1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bridge driver for modern virtio-pci device 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 * Based on virtio_pci_modern.c. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/vdpa.h> 15 #include <linux/virtio.h> 16 #include <linux/virtio_config.h> 17 #include <linux/virtio_ring.h> 18 #include <linux/virtio_pci.h> 19 #include <linux/virtio_pci_modern.h> 20 21 #define VP_VDPA_QUEUE_MAX 256 22 #define VP_VDPA_DRIVER_NAME "vp_vdpa" 23 #define VP_VDPA_NAME_SIZE 256 24 25 struct vp_vring { 26 void __iomem *notify; 27 char msix_name[VP_VDPA_NAME_SIZE]; 28 struct vdpa_callback cb; 29 resource_size_t notify_pa; 30 int irq; 31 }; 32 33 struct vp_vdpa { 34 struct vdpa_device vdpa; 35 struct virtio_pci_modern_device mdev; 36 struct vp_vring *vring; 37 struct vdpa_callback config_cb; 38 char msix_name[VP_VDPA_NAME_SIZE]; 39 int config_irq; 40 int queues; 41 int vectors; 42 }; 43 44 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) 45 { 46 return container_of(vdpa, struct vp_vdpa, vdpa); 47 } 48 49 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) 50 { 51 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 52 53 return &vp_vdpa->mdev; 54 } 55 56 static u64 vp_vdpa_get_features(struct vdpa_device *vdpa) 57 { 58 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 59 60 return vp_modern_get_features(mdev); 61 } 62 63 static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features) 64 { 65 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 66 67 vp_modern_set_features(mdev, features); 68 69 return 0; 70 } 71 72 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) 73 { 74 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 75 76 return vp_modern_get_status(mdev); 77 } 78 79 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) 80 { 81 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 82 struct pci_dev *pdev = mdev->pci_dev; 83 int i; 84 85 for (i = 0; i < vp_vdpa->queues; i++) { 86 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { 87 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); 88 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, 89 &vp_vdpa->vring[i]); 90 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 91 } 92 } 93 94 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { 95 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); 96 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); 97 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 98 } 99 100 if (vp_vdpa->vectors) { 101 pci_free_irq_vectors(pdev); 102 vp_vdpa->vectors = 0; 103 } 104 } 105 106 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) 107 { 108 struct vp_vring *vring = arg; 109 110 if (vring->cb.callback) 111 return vring->cb.callback(vring->cb.private); 112 113 return IRQ_HANDLED; 114 } 115 116 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) 117 { 118 struct vp_vdpa *vp_vdpa = arg; 119 120 if (vp_vdpa->config_cb.callback) 121 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); 122 123 return IRQ_HANDLED; 124 } 125 126 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) 127 { 128 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 129 struct pci_dev *pdev = mdev->pci_dev; 130 int i, ret, irq; 131 int queues = vp_vdpa->queues; 132 int vectors = queues + 1; 133 134 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); 135 if (ret != vectors) { 136 dev_err(&pdev->dev, 137 "vp_vdpa: fail to allocate irq vectors want %d but %d\n", 138 vectors, ret); 139 return ret; 140 } 141 142 vp_vdpa->vectors = vectors; 143 144 for (i = 0; i < queues; i++) { 145 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, 146 "vp-vdpa[%s]-%d\n", pci_name(pdev), i); 147 irq = pci_irq_vector(pdev, i); 148 ret = devm_request_irq(&pdev->dev, irq, 149 vp_vdpa_vq_handler, 150 0, vp_vdpa->vring[i].msix_name, 151 &vp_vdpa->vring[i]); 152 if (ret) { 153 dev_err(&pdev->dev, 154 "vp_vdpa: fail to request irq for vq %d\n", i); 155 goto err; 156 } 157 vp_modern_queue_vector(mdev, i, i); 158 vp_vdpa->vring[i].irq = irq; 159 } 160 161 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", 162 pci_name(pdev)); 163 irq = pci_irq_vector(pdev, queues); 164 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, 165 vp_vdpa->msix_name, vp_vdpa); 166 if (ret) { 167 dev_err(&pdev->dev, 168 "vp_vdpa: fail to request irq for vq %d\n", i); 169 goto err; 170 } 171 vp_modern_config_vector(mdev, queues); 172 vp_vdpa->config_irq = irq; 173 174 return 0; 175 err: 176 vp_vdpa_free_irq(vp_vdpa); 177 return ret; 178 } 179 180 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) 181 { 182 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 183 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 184 u8 s = vp_vdpa_get_status(vdpa); 185 186 if (status & VIRTIO_CONFIG_S_DRIVER_OK && 187 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { 188 vp_vdpa_request_irq(vp_vdpa); 189 } 190 191 vp_modern_set_status(mdev, status); 192 193 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) && 194 (s & VIRTIO_CONFIG_S_DRIVER_OK)) 195 vp_vdpa_free_irq(vp_vdpa); 196 } 197 198 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) 199 { 200 return VP_VDPA_QUEUE_MAX; 201 } 202 203 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, 204 struct vdpa_vq_state *state) 205 { 206 /* Note that this is not supported by virtio specification, so 207 * we return -EOPNOTSUPP here. This means we can't support live 208 * migration, vhost device start/stop. 209 */ 210 return -EOPNOTSUPP; 211 } 212 213 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, 214 const struct vdpa_vq_state *state) 215 { 216 /* Note that this is not supported by virtio specification, so 217 * we return -ENOPOTSUPP here. This means we can't support live 218 * migration, vhost device start/stop. 219 */ 220 return -EOPNOTSUPP; 221 } 222 223 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, 224 struct vdpa_callback *cb) 225 { 226 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 227 228 vp_vdpa->vring[qid].cb = *cb; 229 } 230 231 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, 232 u16 qid, bool ready) 233 { 234 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 235 236 vp_modern_set_queue_enable(mdev, qid, ready); 237 } 238 239 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) 240 { 241 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 242 243 return vp_modern_get_queue_enable(mdev, qid); 244 } 245 246 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, 247 u32 num) 248 { 249 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 250 251 vp_modern_set_queue_size(mdev, qid, num); 252 } 253 254 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, 255 u64 desc_area, u64 driver_area, 256 u64 device_area) 257 { 258 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 259 260 vp_modern_queue_address(mdev, qid, desc_area, 261 driver_area, device_area); 262 263 return 0; 264 } 265 266 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) 267 { 268 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 269 270 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); 271 } 272 273 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) 274 { 275 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 276 277 return vp_modern_generation(mdev); 278 } 279 280 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) 281 { 282 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 283 284 return mdev->id.device; 285 } 286 287 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) 288 { 289 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 290 291 return mdev->id.vendor; 292 } 293 294 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) 295 { 296 return PAGE_SIZE; 297 } 298 299 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) 300 { 301 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 302 303 return mdev->device_len; 304 } 305 306 static void vp_vdpa_get_config(struct vdpa_device *vdpa, 307 unsigned int offset, 308 void *buf, unsigned int len) 309 { 310 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 311 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 312 u8 old, new; 313 u8 *p; 314 int i; 315 316 do { 317 old = vp_ioread8(&mdev->common->config_generation); 318 p = buf; 319 for (i = 0; i < len; i++) 320 *p++ = vp_ioread8(mdev->device + offset + i); 321 322 new = vp_ioread8(&mdev->common->config_generation); 323 } while (old != new); 324 } 325 326 static void vp_vdpa_set_config(struct vdpa_device *vdpa, 327 unsigned int offset, const void *buf, 328 unsigned int len) 329 { 330 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 331 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 332 const u8 *p = buf; 333 int i; 334 335 for (i = 0; i < len; i++) 336 vp_iowrite8(*p++, mdev->device + offset + i); 337 } 338 339 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, 340 struct vdpa_callback *cb) 341 { 342 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 343 344 vp_vdpa->config_cb = *cb; 345 } 346 347 static struct vdpa_notification_area 348 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) 349 { 350 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 351 struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev; 352 struct vdpa_notification_area notify; 353 354 notify.addr = vp_vdpa->vring[qid].notify_pa; 355 notify.size = mdev->notify_offset_multiplier; 356 357 return notify; 358 } 359 360 static const struct vdpa_config_ops vp_vdpa_ops = { 361 .get_features = vp_vdpa_get_features, 362 .set_features = vp_vdpa_set_features, 363 .get_status = vp_vdpa_get_status, 364 .set_status = vp_vdpa_set_status, 365 .get_vq_num_max = vp_vdpa_get_vq_num_max, 366 .get_vq_state = vp_vdpa_get_vq_state, 367 .get_vq_notification = vp_vdpa_get_vq_notification, 368 .set_vq_state = vp_vdpa_set_vq_state, 369 .set_vq_cb = vp_vdpa_set_vq_cb, 370 .set_vq_ready = vp_vdpa_set_vq_ready, 371 .get_vq_ready = vp_vdpa_get_vq_ready, 372 .set_vq_num = vp_vdpa_set_vq_num, 373 .set_vq_address = vp_vdpa_set_vq_address, 374 .kick_vq = vp_vdpa_kick_vq, 375 .get_generation = vp_vdpa_get_generation, 376 .get_device_id = vp_vdpa_get_device_id, 377 .get_vendor_id = vp_vdpa_get_vendor_id, 378 .get_vq_align = vp_vdpa_get_vq_align, 379 .get_config_size = vp_vdpa_get_config_size, 380 .get_config = vp_vdpa_get_config, 381 .set_config = vp_vdpa_set_config, 382 .set_config_cb = vp_vdpa_set_config_cb, 383 }; 384 385 static void vp_vdpa_free_irq_vectors(void *data) 386 { 387 pci_free_irq_vectors(data); 388 } 389 390 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 391 { 392 struct virtio_pci_modern_device *mdev; 393 struct device *dev = &pdev->dev; 394 struct vp_vdpa *vp_vdpa; 395 int ret, i; 396 397 ret = pcim_enable_device(pdev); 398 if (ret) 399 return ret; 400 401 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 402 dev, &vp_vdpa_ops, NULL); 403 if (vp_vdpa == NULL) { 404 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 405 return -ENOMEM; 406 } 407 408 mdev = &vp_vdpa->mdev; 409 mdev->pci_dev = pdev; 410 411 ret = vp_modern_probe(mdev); 412 if (ret) { 413 dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); 414 goto err; 415 } 416 417 pci_set_master(pdev); 418 pci_set_drvdata(pdev, vp_vdpa); 419 420 vp_vdpa->vdpa.dma_dev = &pdev->dev; 421 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 422 423 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); 424 if (ret) { 425 dev_err(&pdev->dev, 426 "Failed for adding devres for freeing irq vectors\n"); 427 goto err; 428 } 429 430 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, 431 sizeof(*vp_vdpa->vring), 432 GFP_KERNEL); 433 if (!vp_vdpa->vring) { 434 ret = -ENOMEM; 435 dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); 436 goto err; 437 } 438 439 for (i = 0; i < vp_vdpa->queues; i++) { 440 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 441 vp_vdpa->vring[i].notify = 442 vp_modern_map_vq_notify(mdev, i, 443 &vp_vdpa->vring[i].notify_pa); 444 if (!vp_vdpa->vring[i].notify) { 445 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); 446 goto err; 447 } 448 } 449 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 450 451 ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); 452 if (ret) { 453 dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); 454 goto err; 455 } 456 457 return 0; 458 459 err: 460 put_device(&vp_vdpa->vdpa.dev); 461 return ret; 462 } 463 464 static void vp_vdpa_remove(struct pci_dev *pdev) 465 { 466 struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev); 467 468 vdpa_unregister_device(&vp_vdpa->vdpa); 469 vp_modern_remove(&vp_vdpa->mdev); 470 } 471 472 static struct pci_driver vp_vdpa_driver = { 473 .name = "vp-vdpa", 474 .id_table = NULL, /* only dynamic ids */ 475 .probe = vp_vdpa_probe, 476 .remove = vp_vdpa_remove, 477 }; 478 479 module_pci_driver(vp_vdpa_driver); 480 481 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 482 MODULE_DESCRIPTION("vp-vdpa"); 483 MODULE_LICENSE("GPL"); 484 MODULE_VERSION("1"); 485