1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bridge driver for modern virtio-pci device 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 * Based on virtio_pci_modern.c. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/vdpa.h> 15 #include <linux/virtio.h> 16 #include <linux/virtio_config.h> 17 #include <linux/virtio_ring.h> 18 #include <linux/virtio_pci.h> 19 #include <linux/virtio_pci_modern.h> 20 21 #define VP_VDPA_QUEUE_MAX 256 22 #define VP_VDPA_DRIVER_NAME "vp_vdpa" 23 #define VP_VDPA_NAME_SIZE 256 24 25 struct vp_vring { 26 void __iomem *notify; 27 char msix_name[VP_VDPA_NAME_SIZE]; 28 struct vdpa_callback cb; 29 resource_size_t notify_pa; 30 int irq; 31 }; 32 33 struct vp_vdpa { 34 struct vdpa_device vdpa; 35 struct virtio_pci_modern_device mdev; 36 struct vp_vring *vring; 37 struct vdpa_callback config_cb; 38 char msix_name[VP_VDPA_NAME_SIZE]; 39 int config_irq; 40 int queues; 41 int vectors; 42 }; 43 44 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) 45 { 46 return container_of(vdpa, struct vp_vdpa, vdpa); 47 } 48 49 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) 50 { 51 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 52 53 return &vp_vdpa->mdev; 54 } 55 56 static u64 vp_vdpa_get_features(struct vdpa_device *vdpa) 57 { 58 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 59 60 return vp_modern_get_features(mdev); 61 } 62 63 static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features) 64 { 65 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 66 67 vp_modern_set_features(mdev, features); 68 69 return 0; 70 } 71 72 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) 73 { 74 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 75 76 return vp_modern_get_status(mdev); 77 } 78 79 static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) 80 { 81 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 82 int irq = vp_vdpa->vring[idx].irq; 83 84 if (irq == VIRTIO_MSI_NO_VECTOR) 85 return -EINVAL; 86 87 return irq; 88 } 89 90 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) 91 { 92 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 93 struct pci_dev *pdev = mdev->pci_dev; 94 int i; 95 96 for (i = 0; i < vp_vdpa->queues; i++) { 97 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { 98 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); 99 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, 100 &vp_vdpa->vring[i]); 101 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 102 } 103 } 104 105 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { 106 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); 107 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); 108 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 109 } 110 111 if (vp_vdpa->vectors) { 112 pci_free_irq_vectors(pdev); 113 vp_vdpa->vectors = 0; 114 } 115 } 116 117 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) 118 { 119 struct vp_vring *vring = arg; 120 121 if (vring->cb.callback) 122 return vring->cb.callback(vring->cb.private); 123 124 return IRQ_HANDLED; 125 } 126 127 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) 128 { 129 struct vp_vdpa *vp_vdpa = arg; 130 131 if (vp_vdpa->config_cb.callback) 132 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); 133 134 return IRQ_HANDLED; 135 } 136 137 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) 138 { 139 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 140 struct pci_dev *pdev = mdev->pci_dev; 141 int i, ret, irq; 142 int queues = vp_vdpa->queues; 143 int vectors = queues + 1; 144 145 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); 146 if (ret != vectors) { 147 dev_err(&pdev->dev, 148 "vp_vdpa: fail to allocate irq vectors want %d but %d\n", 149 vectors, ret); 150 return ret; 151 } 152 153 vp_vdpa->vectors = vectors; 154 155 for (i = 0; i < queues; i++) { 156 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, 157 "vp-vdpa[%s]-%d\n", pci_name(pdev), i); 158 irq = pci_irq_vector(pdev, i); 159 ret = devm_request_irq(&pdev->dev, irq, 160 vp_vdpa_vq_handler, 161 0, vp_vdpa->vring[i].msix_name, 162 &vp_vdpa->vring[i]); 163 if (ret) { 164 dev_err(&pdev->dev, 165 "vp_vdpa: fail to request irq for vq %d\n", i); 166 goto err; 167 } 168 vp_modern_queue_vector(mdev, i, i); 169 vp_vdpa->vring[i].irq = irq; 170 } 171 172 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", 173 pci_name(pdev)); 174 irq = pci_irq_vector(pdev, queues); 175 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, 176 vp_vdpa->msix_name, vp_vdpa); 177 if (ret) { 178 dev_err(&pdev->dev, 179 "vp_vdpa: fail to request irq for vq %d\n", i); 180 goto err; 181 } 182 vp_modern_config_vector(mdev, queues); 183 vp_vdpa->config_irq = irq; 184 185 return 0; 186 err: 187 vp_vdpa_free_irq(vp_vdpa); 188 return ret; 189 } 190 191 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) 192 { 193 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 194 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 195 u8 s = vp_vdpa_get_status(vdpa); 196 197 if (status & VIRTIO_CONFIG_S_DRIVER_OK && 198 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { 199 vp_vdpa_request_irq(vp_vdpa); 200 } 201 202 vp_modern_set_status(mdev, status); 203 } 204 205 static int vp_vdpa_reset(struct vdpa_device *vdpa) 206 { 207 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 208 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 209 u8 s = vp_vdpa_get_status(vdpa); 210 211 vp_modern_set_status(mdev, 0); 212 213 if (s & VIRTIO_CONFIG_S_DRIVER_OK) 214 vp_vdpa_free_irq(vp_vdpa); 215 216 return 0; 217 } 218 219 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) 220 { 221 return VP_VDPA_QUEUE_MAX; 222 } 223 224 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, 225 struct vdpa_vq_state *state) 226 { 227 /* Note that this is not supported by virtio specification, so 228 * we return -EOPNOTSUPP here. This means we can't support live 229 * migration, vhost device start/stop. 230 */ 231 return -EOPNOTSUPP; 232 } 233 234 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, 235 const struct vdpa_vq_state *state) 236 { 237 const struct vdpa_vq_state_split *split = &state->split; 238 239 if (split->avail_index == 0) 240 return 0; 241 242 return -EOPNOTSUPP; 243 } 244 245 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, 246 const struct vdpa_vq_state *state) 247 { 248 const struct vdpa_vq_state_packed *packed = &state->packed; 249 250 if (packed->last_avail_counter == 1 && 251 packed->last_avail_idx == 0 && 252 packed->last_used_counter == 1 && 253 packed->last_used_idx == 0) 254 return 0; 255 256 return -EOPNOTSUPP; 257 } 258 259 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, 260 const struct vdpa_vq_state *state) 261 { 262 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 263 264 /* Note that this is not supported by virtio specification. 265 * But if the state is by chance equal to the device initial 266 * state, we can let it go. 267 */ 268 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && 269 !vp_modern_get_queue_enable(mdev, qid)) { 270 if (vp_modern_get_driver_features(mdev) & 271 BIT_ULL(VIRTIO_F_RING_PACKED)) 272 return vp_vdpa_set_vq_state_packed(vdpa, state); 273 else 274 return vp_vdpa_set_vq_state_split(vdpa, state); 275 } 276 277 return -EOPNOTSUPP; 278 } 279 280 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, 281 struct vdpa_callback *cb) 282 { 283 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 284 285 vp_vdpa->vring[qid].cb = *cb; 286 } 287 288 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, 289 u16 qid, bool ready) 290 { 291 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 292 293 vp_modern_set_queue_enable(mdev, qid, ready); 294 } 295 296 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) 297 { 298 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 299 300 return vp_modern_get_queue_enable(mdev, qid); 301 } 302 303 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, 304 u32 num) 305 { 306 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 307 308 vp_modern_set_queue_size(mdev, qid, num); 309 } 310 311 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, 312 u64 desc_area, u64 driver_area, 313 u64 device_area) 314 { 315 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 316 317 vp_modern_queue_address(mdev, qid, desc_area, 318 driver_area, device_area); 319 320 return 0; 321 } 322 323 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) 324 { 325 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 326 327 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); 328 } 329 330 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) 331 { 332 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 333 334 return vp_modern_generation(mdev); 335 } 336 337 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) 338 { 339 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 340 341 return mdev->id.device; 342 } 343 344 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) 345 { 346 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 347 348 return mdev->id.vendor; 349 } 350 351 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) 352 { 353 return PAGE_SIZE; 354 } 355 356 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) 357 { 358 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 359 360 return mdev->device_len; 361 } 362 363 static void vp_vdpa_get_config(struct vdpa_device *vdpa, 364 unsigned int offset, 365 void *buf, unsigned int len) 366 { 367 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 368 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 369 u8 old, new; 370 u8 *p; 371 int i; 372 373 do { 374 old = vp_ioread8(&mdev->common->config_generation); 375 p = buf; 376 for (i = 0; i < len; i++) 377 *p++ = vp_ioread8(mdev->device + offset + i); 378 379 new = vp_ioread8(&mdev->common->config_generation); 380 } while (old != new); 381 } 382 383 static void vp_vdpa_set_config(struct vdpa_device *vdpa, 384 unsigned int offset, const void *buf, 385 unsigned int len) 386 { 387 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 388 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 389 const u8 *p = buf; 390 int i; 391 392 for (i = 0; i < len; i++) 393 vp_iowrite8(*p++, mdev->device + offset + i); 394 } 395 396 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, 397 struct vdpa_callback *cb) 398 { 399 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 400 401 vp_vdpa->config_cb = *cb; 402 } 403 404 static struct vdpa_notification_area 405 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) 406 { 407 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 408 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 409 struct vdpa_notification_area notify; 410 411 notify.addr = vp_vdpa->vring[qid].notify_pa; 412 notify.size = mdev->notify_offset_multiplier; 413 414 return notify; 415 } 416 417 static const struct vdpa_config_ops vp_vdpa_ops = { 418 .get_features = vp_vdpa_get_features, 419 .set_features = vp_vdpa_set_features, 420 .get_status = vp_vdpa_get_status, 421 .set_status = vp_vdpa_set_status, 422 .reset = vp_vdpa_reset, 423 .get_vq_num_max = vp_vdpa_get_vq_num_max, 424 .get_vq_state = vp_vdpa_get_vq_state, 425 .get_vq_notification = vp_vdpa_get_vq_notification, 426 .set_vq_state = vp_vdpa_set_vq_state, 427 .set_vq_cb = vp_vdpa_set_vq_cb, 428 .set_vq_ready = vp_vdpa_set_vq_ready, 429 .get_vq_ready = vp_vdpa_get_vq_ready, 430 .set_vq_num = vp_vdpa_set_vq_num, 431 .set_vq_address = vp_vdpa_set_vq_address, 432 .kick_vq = vp_vdpa_kick_vq, 433 .get_generation = vp_vdpa_get_generation, 434 .get_device_id = vp_vdpa_get_device_id, 435 .get_vendor_id = vp_vdpa_get_vendor_id, 436 .get_vq_align = vp_vdpa_get_vq_align, 437 .get_config_size = vp_vdpa_get_config_size, 438 .get_config = vp_vdpa_get_config, 439 .set_config = vp_vdpa_set_config, 440 .set_config_cb = vp_vdpa_set_config_cb, 441 .get_vq_irq = vp_vdpa_get_vq_irq, 442 }; 443 444 static void vp_vdpa_free_irq_vectors(void *data) 445 { 446 pci_free_irq_vectors(data); 447 } 448 449 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 450 { 451 struct virtio_pci_modern_device *mdev; 452 struct device *dev = &pdev->dev; 453 struct vp_vdpa *vp_vdpa; 454 int ret, i; 455 456 ret = pcim_enable_device(pdev); 457 if (ret) 458 return ret; 459 460 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 461 dev, &vp_vdpa_ops, NULL, false); 462 if (IS_ERR(vp_vdpa)) { 463 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 464 return PTR_ERR(vp_vdpa); 465 } 466 467 mdev = &vp_vdpa->mdev; 468 mdev->pci_dev = pdev; 469 470 ret = vp_modern_probe(mdev); 471 if (ret) { 472 dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); 473 goto err; 474 } 475 476 pci_set_master(pdev); 477 pci_set_drvdata(pdev, vp_vdpa); 478 479 vp_vdpa->vdpa.dma_dev = &pdev->dev; 480 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 481 482 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); 483 if (ret) { 484 dev_err(&pdev->dev, 485 "Failed for adding devres for freeing irq vectors\n"); 486 goto err; 487 } 488 489 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, 490 sizeof(*vp_vdpa->vring), 491 GFP_KERNEL); 492 if (!vp_vdpa->vring) { 493 ret = -ENOMEM; 494 dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); 495 goto err; 496 } 497 498 for (i = 0; i < vp_vdpa->queues; i++) { 499 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 500 vp_vdpa->vring[i].notify = 501 vp_modern_map_vq_notify(mdev, i, 502 &vp_vdpa->vring[i].notify_pa); 503 if (!vp_vdpa->vring[i].notify) { 504 ret = -EINVAL; 505 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); 506 goto err; 507 } 508 } 509 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 510 511 ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); 512 if (ret) { 513 dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); 514 goto err; 515 } 516 517 return 0; 518 519 err: 520 put_device(&vp_vdpa->vdpa.dev); 521 return ret; 522 } 523 524 static void vp_vdpa_remove(struct pci_dev *pdev) 525 { 526 struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); 527 528 vdpa_unregister_device(&vp_vdpa->vdpa); 529 vp_modern_remove(&vp_vdpa->mdev); 530 } 531 532 static struct pci_driver vp_vdpa_driver = { 533 .name = "vp-vdpa", 534 .id_table = NULL, /* only dynamic ids */ 535 .probe = vp_vdpa_probe, 536 .remove = vp_vdpa_remove, 537 }; 538 539 module_pci_driver(vp_vdpa_driver); 540 541 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 542 MODULE_DESCRIPTION("vp-vdpa"); 543 MODULE_LICENSE("GPL"); 544 MODULE_VERSION("1"); 545