1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bridge driver for modern virtio-pci device 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 * Based on virtio_pci_modern.c. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/vdpa.h> 15 #include <linux/virtio.h> 16 #include <linux/virtio_config.h> 17 #include <linux/virtio_ring.h> 18 #include <linux/virtio_pci.h> 19 #include <linux/virtio_pci_modern.h> 20 21 #define VP_VDPA_QUEUE_MAX 256 22 #define VP_VDPA_DRIVER_NAME "vp_vdpa" 23 #define VP_VDPA_NAME_SIZE 256 24 25 struct vp_vring { 26 void __iomem *notify; 27 char msix_name[VP_VDPA_NAME_SIZE]; 28 struct vdpa_callback cb; 29 resource_size_t notify_pa; 30 int irq; 31 }; 32 33 struct vp_vdpa { 34 struct vdpa_device vdpa; 35 struct virtio_pci_modern_device mdev; 36 struct vp_vring *vring; 37 struct vdpa_callback config_cb; 38 char msix_name[VP_VDPA_NAME_SIZE]; 39 int config_irq; 40 int queues; 41 int vectors; 42 }; 43 44 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) 45 { 46 return container_of(vdpa, struct vp_vdpa, vdpa); 47 } 48 49 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) 50 { 51 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 52 53 return &vp_vdpa->mdev; 54 } 55 56 static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa) 57 { 58 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 59 60 return vp_modern_get_features(mdev); 61 } 62 63 static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) 64 { 65 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 66 67 vp_modern_set_features(mdev, features); 68 69 return 0; 70 } 71 72 static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa) 73 { 74 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 75 76 return vp_modern_get_driver_features(mdev); 77 } 78 79 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) 80 { 81 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 82 83 return vp_modern_get_status(mdev); 84 } 85 86 static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) 87 { 88 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 89 int irq = vp_vdpa->vring[idx].irq; 90 91 if (irq == VIRTIO_MSI_NO_VECTOR) 92 return -EINVAL; 93 94 return irq; 95 } 96 97 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) 98 { 99 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 100 struct pci_dev *pdev = mdev->pci_dev; 101 int i; 102 103 for (i = 0; i < vp_vdpa->queues; i++) { 104 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { 105 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); 106 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, 107 &vp_vdpa->vring[i]); 108 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 109 } 110 } 111 112 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { 113 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); 114 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); 115 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 116 } 117 118 if (vp_vdpa->vectors) { 119 pci_free_irq_vectors(pdev); 120 vp_vdpa->vectors = 0; 121 } 122 } 123 124 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) 125 { 126 struct vp_vring *vring = arg; 127 128 if (vring->cb.callback) 129 return vring->cb.callback(vring->cb.private); 130 131 return IRQ_HANDLED; 132 } 133 134 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) 135 { 136 struct vp_vdpa *vp_vdpa = arg; 137 138 if (vp_vdpa->config_cb.callback) 139 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); 140 141 return IRQ_HANDLED; 142 } 143 144 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) 145 { 146 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 147 struct pci_dev *pdev = mdev->pci_dev; 148 int i, ret, irq; 149 int queues = vp_vdpa->queues; 150 int vectors = queues + 1; 151 152 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); 153 if (ret != vectors) { 154 dev_err(&pdev->dev, 155 "vp_vdpa: fail to allocate irq vectors want %d but %d\n", 156 vectors, ret); 157 return ret; 158 } 159 160 vp_vdpa->vectors = vectors; 161 162 for (i = 0; i < queues; i++) { 163 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, 164 "vp-vdpa[%s]-%d\n", pci_name(pdev), i); 165 irq = pci_irq_vector(pdev, i); 166 ret = devm_request_irq(&pdev->dev, irq, 167 vp_vdpa_vq_handler, 168 0, vp_vdpa->vring[i].msix_name, 169 &vp_vdpa->vring[i]); 170 if (ret) { 171 dev_err(&pdev->dev, 172 "vp_vdpa: fail to request irq for vq %d\n", i); 173 goto err; 174 } 175 vp_modern_queue_vector(mdev, i, i); 176 vp_vdpa->vring[i].irq = irq; 177 } 178 179 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", 180 pci_name(pdev)); 181 irq = pci_irq_vector(pdev, queues); 182 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, 183 vp_vdpa->msix_name, vp_vdpa); 184 if (ret) { 185 dev_err(&pdev->dev, 186 "vp_vdpa: fail to request irq for vq %d\n", i); 187 goto err; 188 } 189 vp_modern_config_vector(mdev, queues); 190 vp_vdpa->config_irq = irq; 191 192 return 0; 193 err: 194 vp_vdpa_free_irq(vp_vdpa); 195 return ret; 196 } 197 198 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) 199 { 200 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 201 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 202 u8 s = vp_vdpa_get_status(vdpa); 203 204 if (status & VIRTIO_CONFIG_S_DRIVER_OK && 205 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { 206 vp_vdpa_request_irq(vp_vdpa); 207 } 208 209 vp_modern_set_status(mdev, status); 210 } 211 212 static int vp_vdpa_reset(struct vdpa_device *vdpa) 213 { 214 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 215 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 216 u8 s = vp_vdpa_get_status(vdpa); 217 218 vp_modern_set_status(mdev, 0); 219 220 if (s & VIRTIO_CONFIG_S_DRIVER_OK) 221 vp_vdpa_free_irq(vp_vdpa); 222 223 return 0; 224 } 225 226 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) 227 { 228 return VP_VDPA_QUEUE_MAX; 229 } 230 231 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, 232 struct vdpa_vq_state *state) 233 { 234 /* Note that this is not supported by virtio specification, so 235 * we return -EOPNOTSUPP here. This means we can't support live 236 * migration, vhost device start/stop. 237 */ 238 return -EOPNOTSUPP; 239 } 240 241 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, 242 const struct vdpa_vq_state *state) 243 { 244 const struct vdpa_vq_state_split *split = &state->split; 245 246 if (split->avail_index == 0) 247 return 0; 248 249 return -EOPNOTSUPP; 250 } 251 252 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, 253 const struct vdpa_vq_state *state) 254 { 255 const struct vdpa_vq_state_packed *packed = &state->packed; 256 257 if (packed->last_avail_counter == 1 && 258 packed->last_avail_idx == 0 && 259 packed->last_used_counter == 1 && 260 packed->last_used_idx == 0) 261 return 0; 262 263 return -EOPNOTSUPP; 264 } 265 266 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, 267 const struct vdpa_vq_state *state) 268 { 269 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 270 271 /* Note that this is not supported by virtio specification. 272 * But if the state is by chance equal to the device initial 273 * state, we can let it go. 274 */ 275 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && 276 !vp_modern_get_queue_enable(mdev, qid)) { 277 if (vp_modern_get_driver_features(mdev) & 278 BIT_ULL(VIRTIO_F_RING_PACKED)) 279 return vp_vdpa_set_vq_state_packed(vdpa, state); 280 else 281 return vp_vdpa_set_vq_state_split(vdpa, state); 282 } 283 284 return -EOPNOTSUPP; 285 } 286 287 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, 288 struct vdpa_callback *cb) 289 { 290 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 291 292 vp_vdpa->vring[qid].cb = *cb; 293 } 294 295 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, 296 u16 qid, bool ready) 297 { 298 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 299 300 vp_modern_set_queue_enable(mdev, qid, ready); 301 } 302 303 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) 304 { 305 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 306 307 return vp_modern_get_queue_enable(mdev, qid); 308 } 309 310 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, 311 u32 num) 312 { 313 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 314 315 vp_modern_set_queue_size(mdev, qid, num); 316 } 317 318 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, 319 u64 desc_area, u64 driver_area, 320 u64 device_area) 321 { 322 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 323 324 vp_modern_queue_address(mdev, qid, desc_area, 325 driver_area, device_area); 326 327 return 0; 328 } 329 330 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) 331 { 332 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 333 334 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); 335 } 336 337 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) 338 { 339 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 340 341 return vp_modern_generation(mdev); 342 } 343 344 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) 345 { 346 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 347 348 return mdev->id.device; 349 } 350 351 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) 352 { 353 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 354 355 return mdev->id.vendor; 356 } 357 358 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) 359 { 360 return PAGE_SIZE; 361 } 362 363 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) 364 { 365 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 366 367 return mdev->device_len; 368 } 369 370 static void vp_vdpa_get_config(struct vdpa_device *vdpa, 371 unsigned int offset, 372 void *buf, unsigned int len) 373 { 374 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 375 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 376 u8 old, new; 377 u8 *p; 378 int i; 379 380 do { 381 old = vp_ioread8(&mdev->common->config_generation); 382 p = buf; 383 for (i = 0; i < len; i++) 384 *p++ = vp_ioread8(mdev->device + offset + i); 385 386 new = vp_ioread8(&mdev->common->config_generation); 387 } while (old != new); 388 } 389 390 static void vp_vdpa_set_config(struct vdpa_device *vdpa, 391 unsigned int offset, const void *buf, 392 unsigned int len) 393 { 394 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 395 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 396 const u8 *p = buf; 397 int i; 398 399 for (i = 0; i < len; i++) 400 vp_iowrite8(*p++, mdev->device + offset + i); 401 } 402 403 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, 404 struct vdpa_callback *cb) 405 { 406 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 407 408 vp_vdpa->config_cb = *cb; 409 } 410 411 static struct vdpa_notification_area 412 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) 413 { 414 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 415 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 416 struct vdpa_notification_area notify; 417 418 notify.addr = vp_vdpa->vring[qid].notify_pa; 419 notify.size = mdev->notify_offset_multiplier; 420 421 return notify; 422 } 423 424 static const struct vdpa_config_ops vp_vdpa_ops = { 425 .get_device_features = vp_vdpa_get_device_features, 426 .set_driver_features = vp_vdpa_set_driver_features, 427 .get_driver_features = vp_vdpa_get_driver_features, 428 .get_status = vp_vdpa_get_status, 429 .set_status = vp_vdpa_set_status, 430 .reset = vp_vdpa_reset, 431 .get_vq_num_max = vp_vdpa_get_vq_num_max, 432 .get_vq_state = vp_vdpa_get_vq_state, 433 .get_vq_notification = vp_vdpa_get_vq_notification, 434 .set_vq_state = vp_vdpa_set_vq_state, 435 .set_vq_cb = vp_vdpa_set_vq_cb, 436 .set_vq_ready = vp_vdpa_set_vq_ready, 437 .get_vq_ready = vp_vdpa_get_vq_ready, 438 .set_vq_num = vp_vdpa_set_vq_num, 439 .set_vq_address = vp_vdpa_set_vq_address, 440 .kick_vq = vp_vdpa_kick_vq, 441 .get_generation = vp_vdpa_get_generation, 442 .get_device_id = vp_vdpa_get_device_id, 443 .get_vendor_id = vp_vdpa_get_vendor_id, 444 .get_vq_align = vp_vdpa_get_vq_align, 445 .get_config_size = vp_vdpa_get_config_size, 446 .get_config = vp_vdpa_get_config, 447 .set_config = vp_vdpa_set_config, 448 .set_config_cb = vp_vdpa_set_config_cb, 449 .get_vq_irq = vp_vdpa_get_vq_irq, 450 }; 451 452 static void vp_vdpa_free_irq_vectors(void *data) 453 { 454 pci_free_irq_vectors(data); 455 } 456 457 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 458 { 459 struct virtio_pci_modern_device *mdev; 460 struct device *dev = &pdev->dev; 461 struct vp_vdpa *vp_vdpa; 462 int ret, i; 463 464 ret = pcim_enable_device(pdev); 465 if (ret) 466 return ret; 467 468 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 469 dev, &vp_vdpa_ops, NULL, false); 470 if (IS_ERR(vp_vdpa)) { 471 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 472 return PTR_ERR(vp_vdpa); 473 } 474 475 mdev = &vp_vdpa->mdev; 476 mdev->pci_dev = pdev; 477 478 ret = vp_modern_probe(mdev); 479 if (ret) { 480 dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); 481 goto err; 482 } 483 484 pci_set_master(pdev); 485 pci_set_drvdata(pdev, vp_vdpa); 486 487 vp_vdpa->vdpa.dma_dev = &pdev->dev; 488 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 489 490 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); 491 if (ret) { 492 dev_err(&pdev->dev, 493 "Failed for adding devres for freeing irq vectors\n"); 494 goto err; 495 } 496 497 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, 498 sizeof(*vp_vdpa->vring), 499 GFP_KERNEL); 500 if (!vp_vdpa->vring) { 501 ret = -ENOMEM; 502 dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); 503 goto err; 504 } 505 506 for (i = 0; i < vp_vdpa->queues; i++) { 507 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 508 vp_vdpa->vring[i].notify = 509 vp_modern_map_vq_notify(mdev, i, 510 &vp_vdpa->vring[i].notify_pa); 511 if (!vp_vdpa->vring[i].notify) { 512 ret = -EINVAL; 513 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); 514 goto err; 515 } 516 } 517 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 518 519 ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); 520 if (ret) { 521 dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); 522 goto err; 523 } 524 525 return 0; 526 527 err: 528 put_device(&vp_vdpa->vdpa.dev); 529 return ret; 530 } 531 532 static void vp_vdpa_remove(struct pci_dev *pdev) 533 { 534 struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); 535 536 vdpa_unregister_device(&vp_vdpa->vdpa); 537 vp_modern_remove(&vp_vdpa->mdev); 538 } 539 540 static struct pci_driver vp_vdpa_driver = { 541 .name = "vp-vdpa", 542 .id_table = NULL, /* only dynamic ids */ 543 .probe = vp_vdpa_probe, 544 .remove = vp_vdpa_remove, 545 }; 546 547 module_pci_driver(vp_vdpa_driver); 548 549 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 550 MODULE_DESCRIPTION("vp-vdpa"); 551 MODULE_LICENSE("GPL"); 552 MODULE_VERSION("1"); 553