1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VDPA device simulator core. 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/kernel.h> 14 #include <linux/kthread.h> 15 #include <linux/slab.h> 16 #include <linux/dma-map-ops.h> 17 #include <linux/vringh.h> 18 #include <linux/vdpa.h> 19 #include <linux/vhost_iotlb.h> 20 #include <uapi/linux/vdpa.h> 21 #include <uapi/linux/vhost_types.h> 22 23 #include "vdpa_sim.h" 24 25 #define DRV_VERSION "0.1" 26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" 27 #define DRV_DESC "vDPA Device Simulator core" 28 #define DRV_LICENSE "GPL v2" 29 30 static int batch_mapping = 1; 31 module_param(batch_mapping, int, 0444); 32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable"); 33 34 static int max_iotlb_entries = 2048; 35 module_param(max_iotlb_entries, int, 0444); 36 MODULE_PARM_DESC(max_iotlb_entries, 37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)"); 38 39 static bool use_va = true; 40 module_param(use_va, bool, 0444); 41 MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA"); 42 43 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE 44 #define VDPASIM_QUEUE_MAX 256 45 #define VDPASIM_VENDOR_ID 0 46 47 struct vdpasim_mm_work { 48 struct kthread_work work; 49 struct vdpasim *vdpasim; 50 struct mm_struct *mm_to_bind; 51 int ret; 52 }; 53 54 static void vdpasim_mm_work_fn(struct kthread_work *work) 55 { 56 struct vdpasim_mm_work *mm_work = 57 container_of(work, struct vdpasim_mm_work, work); 58 struct vdpasim *vdpasim = mm_work->vdpasim; 59 60 mm_work->ret = 0; 61 62 //TODO: should we attach the cgroup of the mm owner? 63 vdpasim->mm_bound = mm_work->mm_to_bind; 64 } 65 66 static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim, 67 struct vdpasim_mm_work *mm_work) 68 { 69 struct kthread_work *work = &mm_work->work; 70 71 kthread_init_work(work, vdpasim_mm_work_fn); 72 kthread_queue_work(vdpasim->worker, work); 73 74 kthread_flush_work(work); 75 } 76 77 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) 78 { 79 return container_of(vdpa, struct vdpasim, vdpa); 80 } 81 82 static void vdpasim_vq_notify(struct vringh *vring) 83 { 84 struct vdpasim_virtqueue *vq = 85 container_of(vring, struct vdpasim_virtqueue, vring); 86 87 if (!vq->cb) 88 return; 89 90 vq->cb(vq->private); 91 } 92 93 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) 94 { 95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 96 uint16_t last_avail_idx = vq->vring.last_avail_idx; 97 struct vring_desc *desc = (struct vring_desc *) 98 (uintptr_t)vq->desc_addr; 99 struct vring_avail *avail = (struct vring_avail *) 100 (uintptr_t)vq->driver_addr; 101 struct vring_used *used = (struct vring_used *) 102 (uintptr_t)vq->device_addr; 103 104 if (use_va && vdpasim->mm_bound) { 105 vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num, 106 true, desc, avail, used); 107 } else { 108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, 109 true, desc, avail, used); 110 } 111 112 vq->vring.last_avail_idx = last_avail_idx; 113 114 /* 115 * Since vdpa_sim does not support receive inflight descriptors as a 116 * destination of a migration, let's set both avail_idx and used_idx 117 * the same at vq start. This is how vhost-user works in a 118 * VHOST_SET_VRING_BASE call. 119 * 120 * Although the simple fix is to set last_used_idx at 121 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready. 122 */ 123 vq->vring.last_used_idx = last_avail_idx; 124 vq->vring.notify = vdpasim_vq_notify; 125 } 126 127 static void vdpasim_vq_reset(struct vdpasim *vdpasim, 128 struct vdpasim_virtqueue *vq) 129 { 130 vq->ready = false; 131 vq->desc_addr = 0; 132 vq->driver_addr = 0; 133 vq->device_addr = 0; 134 vq->cb = NULL; 135 vq->private = NULL; 136 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, 137 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL); 138 139 vq->vring.notify = NULL; 140 } 141 142 static void vdpasim_do_reset(struct vdpasim *vdpasim) 143 { 144 int i; 145 146 spin_lock(&vdpasim->iommu_lock); 147 148 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { 149 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); 150 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 151 &vdpasim->iommu_lock); 152 } 153 154 for (i = 0; i < vdpasim->dev_attr.nas; i++) { 155 vhost_iotlb_reset(&vdpasim->iommu[i]); 156 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 157 0, VHOST_MAP_RW); 158 vdpasim->iommu_pt[i] = true; 159 } 160 161 vdpasim->running = true; 162 spin_unlock(&vdpasim->iommu_lock); 163 164 vdpasim->features = 0; 165 vdpasim->status = 0; 166 ++vdpasim->generation; 167 } 168 169 static const struct vdpa_config_ops vdpasim_config_ops; 170 static const struct vdpa_config_ops vdpasim_batch_config_ops; 171 172 static void vdpasim_work_fn(struct kthread_work *work) 173 { 174 struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); 175 struct mm_struct *mm = vdpasim->mm_bound; 176 177 if (use_va && mm) { 178 if (!mmget_not_zero(mm)) 179 return; 180 kthread_use_mm(mm); 181 } 182 183 vdpasim->dev_attr.work_fn(vdpasim); 184 185 if (use_va && mm) { 186 kthread_unuse_mm(mm); 187 mmput(mm); 188 } 189 } 190 191 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr, 192 const struct vdpa_dev_set_config *config) 193 { 194 const struct vdpa_config_ops *ops; 195 struct vdpa_device *vdpa; 196 struct vdpasim *vdpasim; 197 struct device *dev; 198 int i, ret = -ENOMEM; 199 200 if (!dev_attr->alloc_size) 201 return ERR_PTR(-EINVAL); 202 203 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 204 if (config->device_features & 205 ~dev_attr->supported_features) 206 return ERR_PTR(-EINVAL); 207 dev_attr->supported_features = 208 config->device_features; 209 } 210 211 if (batch_mapping) 212 ops = &vdpasim_batch_config_ops; 213 else 214 ops = &vdpasim_config_ops; 215 216 vdpa = __vdpa_alloc_device(NULL, ops, 217 dev_attr->ngroups, dev_attr->nas, 218 dev_attr->alloc_size, 219 dev_attr->name, use_va); 220 if (IS_ERR(vdpa)) { 221 ret = PTR_ERR(vdpa); 222 goto err_alloc; 223 } 224 225 vdpasim = vdpa_to_sim(vdpa); 226 vdpasim->dev_attr = *dev_attr; 227 dev = &vdpasim->vdpa.dev; 228 229 kthread_init_work(&vdpasim->work, vdpasim_work_fn); 230 vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s", 231 dev_attr->name); 232 if (IS_ERR(vdpasim->worker)) 233 goto err_iommu; 234 235 mutex_init(&vdpasim->mutex); 236 spin_lock_init(&vdpasim->iommu_lock); 237 238 dev->dma_mask = &dev->coherent_dma_mask; 239 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) 240 goto err_iommu; 241 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; 242 243 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); 244 if (!vdpasim->config) 245 goto err_iommu; 246 247 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), 248 GFP_KERNEL); 249 if (!vdpasim->vqs) 250 goto err_iommu; 251 252 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, 253 sizeof(*vdpasim->iommu), GFP_KERNEL); 254 if (!vdpasim->iommu) 255 goto err_iommu; 256 257 vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas, 258 sizeof(*vdpasim->iommu_pt), GFP_KERNEL); 259 if (!vdpasim->iommu_pt) 260 goto err_iommu; 261 262 for (i = 0; i < vdpasim->dev_attr.nas; i++) 263 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); 264 265 for (i = 0; i < dev_attr->nvqs; i++) 266 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], 267 &vdpasim->iommu_lock); 268 269 vdpasim->vdpa.dma_dev = dev; 270 271 return vdpasim; 272 273 err_iommu: 274 put_device(dev); 275 err_alloc: 276 return ERR_PTR(ret); 277 } 278 EXPORT_SYMBOL_GPL(vdpasim_create); 279 280 void vdpasim_schedule_work(struct vdpasim *vdpasim) 281 { 282 kthread_queue_work(vdpasim->worker, &vdpasim->work); 283 } 284 EXPORT_SYMBOL_GPL(vdpasim_schedule_work); 285 286 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, 287 u64 desc_area, u64 driver_area, 288 u64 device_area) 289 { 290 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 291 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 292 293 vq->desc_addr = desc_area; 294 vq->driver_addr = driver_area; 295 vq->device_addr = device_area; 296 297 return 0; 298 } 299 300 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) 301 { 302 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 303 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 304 305 vq->num = num; 306 } 307 308 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) 309 { 310 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 311 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 312 313 if (!vdpasim->running && 314 (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 315 vdpasim->pending_kick = true; 316 return; 317 } 318 319 if (vq->ready) 320 vdpasim_schedule_work(vdpasim); 321 } 322 323 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, 324 struct vdpa_callback *cb) 325 { 326 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 327 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 328 329 vq->cb = cb->callback; 330 vq->private = cb->private; 331 } 332 333 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) 334 { 335 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 336 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 337 bool old_ready; 338 339 mutex_lock(&vdpasim->mutex); 340 old_ready = vq->ready; 341 vq->ready = ready; 342 if (vq->ready && !old_ready) { 343 vdpasim_queue_ready(vdpasim, idx); 344 } 345 mutex_unlock(&vdpasim->mutex); 346 } 347 348 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) 349 { 350 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 351 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 352 353 return vq->ready; 354 } 355 356 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, 357 const struct vdpa_vq_state *state) 358 { 359 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 360 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 361 struct vringh *vrh = &vq->vring; 362 363 mutex_lock(&vdpasim->mutex); 364 vrh->last_avail_idx = state->split.avail_index; 365 mutex_unlock(&vdpasim->mutex); 366 367 return 0; 368 } 369 370 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, 371 struct vdpa_vq_state *state) 372 { 373 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 374 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; 375 struct vringh *vrh = &vq->vring; 376 377 state->split.avail_index = vrh->last_avail_idx; 378 return 0; 379 } 380 381 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx, 382 struct sk_buff *msg, 383 struct netlink_ext_ack *extack) 384 { 385 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 386 387 if (vdpasim->dev_attr.get_stats) 388 return vdpasim->dev_attr.get_stats(vdpasim, idx, 389 msg, extack); 390 return -EOPNOTSUPP; 391 } 392 393 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) 394 { 395 return VDPASIM_QUEUE_ALIGN; 396 } 397 398 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx) 399 { 400 /* RX and TX belongs to group 0, CVQ belongs to group 1 */ 401 if (idx == 2) 402 return 1; 403 else 404 return 0; 405 } 406 407 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa) 408 { 409 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 410 411 return vdpasim->dev_attr.supported_features; 412 } 413 414 static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa) 415 { 416 return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK); 417 } 418 419 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features) 420 { 421 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 422 423 /* DMA mapping must be done by driver */ 424 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) 425 return -EINVAL; 426 427 vdpasim->features = features & vdpasim->dev_attr.supported_features; 428 429 return 0; 430 } 431 432 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa) 433 { 434 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 435 436 return vdpasim->features; 437 } 438 439 static void vdpasim_set_config_cb(struct vdpa_device *vdpa, 440 struct vdpa_callback *cb) 441 { 442 /* We don't support config interrupt */ 443 } 444 445 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) 446 { 447 return VDPASIM_QUEUE_MAX; 448 } 449 450 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) 451 { 452 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 453 454 return vdpasim->dev_attr.id; 455 } 456 457 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) 458 { 459 return VDPASIM_VENDOR_ID; 460 } 461 462 static u8 vdpasim_get_status(struct vdpa_device *vdpa) 463 { 464 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 465 u8 status; 466 467 mutex_lock(&vdpasim->mutex); 468 status = vdpasim->status; 469 mutex_unlock(&vdpasim->mutex); 470 471 return status; 472 } 473 474 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) 475 { 476 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 477 478 mutex_lock(&vdpasim->mutex); 479 vdpasim->status = status; 480 mutex_unlock(&vdpasim->mutex); 481 } 482 483 static int vdpasim_reset(struct vdpa_device *vdpa) 484 { 485 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 486 487 mutex_lock(&vdpasim->mutex); 488 vdpasim->status = 0; 489 vdpasim_do_reset(vdpasim); 490 mutex_unlock(&vdpasim->mutex); 491 492 return 0; 493 } 494 495 static int vdpasim_suspend(struct vdpa_device *vdpa) 496 { 497 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 498 499 mutex_lock(&vdpasim->mutex); 500 vdpasim->running = false; 501 mutex_unlock(&vdpasim->mutex); 502 503 return 0; 504 } 505 506 static int vdpasim_resume(struct vdpa_device *vdpa) 507 { 508 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 509 int i; 510 511 mutex_lock(&vdpasim->mutex); 512 vdpasim->running = true; 513 514 if (vdpasim->pending_kick) { 515 /* Process pending descriptors */ 516 for (i = 0; i < vdpasim->dev_attr.nvqs; ++i) 517 vdpasim_kick_vq(vdpa, i); 518 519 vdpasim->pending_kick = false; 520 } 521 522 mutex_unlock(&vdpasim->mutex); 523 524 return 0; 525 } 526 527 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa) 528 { 529 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 530 531 return vdpasim->dev_attr.config_size; 532 } 533 534 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, 535 void *buf, unsigned int len) 536 { 537 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 538 539 if (offset + len > vdpasim->dev_attr.config_size) 540 return; 541 542 if (vdpasim->dev_attr.get_config) 543 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); 544 545 memcpy(buf, vdpasim->config + offset, len); 546 } 547 548 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, 549 const void *buf, unsigned int len) 550 { 551 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 552 553 if (offset + len > vdpasim->dev_attr.config_size) 554 return; 555 556 memcpy(vdpasim->config + offset, buf, len); 557 558 if (vdpasim->dev_attr.set_config) 559 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); 560 } 561 562 static u32 vdpasim_get_generation(struct vdpa_device *vdpa) 563 { 564 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 565 566 return vdpasim->generation; 567 } 568 569 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa) 570 { 571 struct vdpa_iova_range range = { 572 .first = 0ULL, 573 .last = ULLONG_MAX, 574 }; 575 576 return range; 577 } 578 579 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group, 580 unsigned int asid) 581 { 582 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 583 struct vhost_iotlb *iommu; 584 int i; 585 586 if (group > vdpasim->dev_attr.ngroups) 587 return -EINVAL; 588 589 if (asid >= vdpasim->dev_attr.nas) 590 return -EINVAL; 591 592 iommu = &vdpasim->iommu[asid]; 593 594 mutex_lock(&vdpasim->mutex); 595 596 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) 597 if (vdpasim_get_vq_group(vdpa, i) == group) 598 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, 599 &vdpasim->iommu_lock); 600 601 mutex_unlock(&vdpasim->mutex); 602 603 return 0; 604 } 605 606 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid, 607 struct vhost_iotlb *iotlb) 608 { 609 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 610 struct vhost_iotlb_map *map; 611 struct vhost_iotlb *iommu; 612 u64 start = 0ULL, last = 0ULL - 1; 613 int ret; 614 615 if (asid >= vdpasim->dev_attr.nas) 616 return -EINVAL; 617 618 spin_lock(&vdpasim->iommu_lock); 619 620 iommu = &vdpasim->iommu[asid]; 621 vhost_iotlb_reset(iommu); 622 vdpasim->iommu_pt[asid] = false; 623 624 for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 625 map = vhost_iotlb_itree_next(map, start, last)) { 626 ret = vhost_iotlb_add_range(iommu, map->start, 627 map->last, map->addr, map->perm); 628 if (ret) 629 goto err; 630 } 631 spin_unlock(&vdpasim->iommu_lock); 632 return 0; 633 634 err: 635 vhost_iotlb_reset(iommu); 636 spin_unlock(&vdpasim->iommu_lock); 637 return ret; 638 } 639 640 static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm) 641 { 642 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 643 struct vdpasim_mm_work mm_work; 644 645 mm_work.vdpasim = vdpasim; 646 mm_work.mm_to_bind = mm; 647 648 vdpasim_worker_change_mm_sync(vdpasim, &mm_work); 649 650 return mm_work.ret; 651 } 652 653 static void vdpasim_unbind_mm(struct vdpa_device *vdpa) 654 { 655 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 656 struct vdpasim_mm_work mm_work; 657 658 mm_work.vdpasim = vdpasim; 659 mm_work.mm_to_bind = NULL; 660 661 vdpasim_worker_change_mm_sync(vdpasim, &mm_work); 662 } 663 664 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid, 665 u64 iova, u64 size, 666 u64 pa, u32 perm, void *opaque) 667 { 668 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 669 int ret; 670 671 if (asid >= vdpasim->dev_attr.nas) 672 return -EINVAL; 673 674 spin_lock(&vdpasim->iommu_lock); 675 if (vdpasim->iommu_pt[asid]) { 676 vhost_iotlb_reset(&vdpasim->iommu[asid]); 677 vdpasim->iommu_pt[asid] = false; 678 } 679 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, 680 iova + size - 1, pa, perm, opaque); 681 spin_unlock(&vdpasim->iommu_lock); 682 683 return ret; 684 } 685 686 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid, 687 u64 iova, u64 size) 688 { 689 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 690 691 if (asid >= vdpasim->dev_attr.nas) 692 return -EINVAL; 693 694 if (vdpasim->iommu_pt[asid]) { 695 vhost_iotlb_reset(&vdpasim->iommu[asid]); 696 vdpasim->iommu_pt[asid] = false; 697 } 698 699 spin_lock(&vdpasim->iommu_lock); 700 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); 701 spin_unlock(&vdpasim->iommu_lock); 702 703 return 0; 704 } 705 706 static void vdpasim_free(struct vdpa_device *vdpa) 707 { 708 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); 709 int i; 710 711 kthread_cancel_work_sync(&vdpasim->work); 712 kthread_destroy_worker(vdpasim->worker); 713 714 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { 715 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); 716 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); 717 } 718 719 vdpasim->dev_attr.free(vdpasim); 720 721 for (i = 0; i < vdpasim->dev_attr.nas; i++) 722 vhost_iotlb_reset(&vdpasim->iommu[i]); 723 kfree(vdpasim->iommu); 724 kfree(vdpasim->iommu_pt); 725 kfree(vdpasim->vqs); 726 kfree(vdpasim->config); 727 } 728 729 static const struct vdpa_config_ops vdpasim_config_ops = { 730 .set_vq_address = vdpasim_set_vq_address, 731 .set_vq_num = vdpasim_set_vq_num, 732 .kick_vq = vdpasim_kick_vq, 733 .set_vq_cb = vdpasim_set_vq_cb, 734 .set_vq_ready = vdpasim_set_vq_ready, 735 .get_vq_ready = vdpasim_get_vq_ready, 736 .set_vq_state = vdpasim_set_vq_state, 737 .get_vendor_vq_stats = vdpasim_get_vq_stats, 738 .get_vq_state = vdpasim_get_vq_state, 739 .get_vq_align = vdpasim_get_vq_align, 740 .get_vq_group = vdpasim_get_vq_group, 741 .get_device_features = vdpasim_get_device_features, 742 .get_backend_features = vdpasim_get_backend_features, 743 .set_driver_features = vdpasim_set_driver_features, 744 .get_driver_features = vdpasim_get_driver_features, 745 .set_config_cb = vdpasim_set_config_cb, 746 .get_vq_num_max = vdpasim_get_vq_num_max, 747 .get_device_id = vdpasim_get_device_id, 748 .get_vendor_id = vdpasim_get_vendor_id, 749 .get_status = vdpasim_get_status, 750 .set_status = vdpasim_set_status, 751 .reset = vdpasim_reset, 752 .suspend = vdpasim_suspend, 753 .resume = vdpasim_resume, 754 .get_config_size = vdpasim_get_config_size, 755 .get_config = vdpasim_get_config, 756 .set_config = vdpasim_set_config, 757 .get_generation = vdpasim_get_generation, 758 .get_iova_range = vdpasim_get_iova_range, 759 .set_group_asid = vdpasim_set_group_asid, 760 .dma_map = vdpasim_dma_map, 761 .dma_unmap = vdpasim_dma_unmap, 762 .bind_mm = vdpasim_bind_mm, 763 .unbind_mm = vdpasim_unbind_mm, 764 .free = vdpasim_free, 765 }; 766 767 static const struct vdpa_config_ops vdpasim_batch_config_ops = { 768 .set_vq_address = vdpasim_set_vq_address, 769 .set_vq_num = vdpasim_set_vq_num, 770 .kick_vq = vdpasim_kick_vq, 771 .set_vq_cb = vdpasim_set_vq_cb, 772 .set_vq_ready = vdpasim_set_vq_ready, 773 .get_vq_ready = vdpasim_get_vq_ready, 774 .set_vq_state = vdpasim_set_vq_state, 775 .get_vendor_vq_stats = vdpasim_get_vq_stats, 776 .get_vq_state = vdpasim_get_vq_state, 777 .get_vq_align = vdpasim_get_vq_align, 778 .get_vq_group = vdpasim_get_vq_group, 779 .get_device_features = vdpasim_get_device_features, 780 .get_backend_features = vdpasim_get_backend_features, 781 .set_driver_features = vdpasim_set_driver_features, 782 .get_driver_features = vdpasim_get_driver_features, 783 .set_config_cb = vdpasim_set_config_cb, 784 .get_vq_num_max = vdpasim_get_vq_num_max, 785 .get_device_id = vdpasim_get_device_id, 786 .get_vendor_id = vdpasim_get_vendor_id, 787 .get_status = vdpasim_get_status, 788 .set_status = vdpasim_set_status, 789 .reset = vdpasim_reset, 790 .suspend = vdpasim_suspend, 791 .resume = vdpasim_resume, 792 .get_config_size = vdpasim_get_config_size, 793 .get_config = vdpasim_get_config, 794 .set_config = vdpasim_set_config, 795 .get_generation = vdpasim_get_generation, 796 .get_iova_range = vdpasim_get_iova_range, 797 .set_group_asid = vdpasim_set_group_asid, 798 .set_map = vdpasim_set_map, 799 .bind_mm = vdpasim_bind_mm, 800 .unbind_mm = vdpasim_unbind_mm, 801 .free = vdpasim_free, 802 }; 803 804 MODULE_VERSION(DRV_VERSION); 805 MODULE_LICENSE(DRV_LICENSE); 806 MODULE_AUTHOR(DRV_AUTHOR); 807 MODULE_DESCRIPTION(DRV_DESC); 808