1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu/osdep.h"
19
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "standard-headers/linux/virtio_ids.h"
23 #include "hw/boards.h"
24 #include "hw/virtio/virtio.h"
25 #include "migration/qemu-file-types.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/qdev-properties.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "qemu/log.h"
32 #include "qemu/module.h"
33 #include "hw/pci/msi.h"
34 #include "hw/pci/msix.h"
35 #include "hw/loader.h"
36 #include "sysemu/kvm.h"
37 #include "hw/virtio/virtio-pci.h"
38 #include "qemu/range.h"
39 #include "hw/virtio/virtio-bus.h"
40 #include "qapi/visitor.h"
41 #include "sysemu/replay.h"
42 #include "trace.h"
43
44 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
45
46 #undef VIRTIO_PCI_CONFIG
47
48 /* The remaining space is defined by each driver as the per-driver
49 * configuration space */
50 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
51
52 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
53 VirtIOPCIProxy *dev);
54 static void virtio_pci_reset(DeviceState *qdev);
55
56 /* virtio device */
57 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
to_virtio_pci_proxy(DeviceState * d)58 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
59 {
60 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
61 }
62
63 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
64 * be careful and test performance if you change this.
65 */
to_virtio_pci_proxy_fast(DeviceState * d)66 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
67 {
68 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
69 }
70
virtio_pci_notify(DeviceState * d,uint16_t vector)71 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
72 {
73 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
74
75 if (msix_enabled(&proxy->pci_dev)) {
76 if (vector != VIRTIO_NO_VECTOR) {
77 msix_notify(&proxy->pci_dev, vector);
78 }
79 } else {
80 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
81 pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1);
82 }
83 }
84
virtio_pci_save_config(DeviceState * d,QEMUFile * f)85 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
86 {
87 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
88 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
89
90 pci_device_save(&proxy->pci_dev, f);
91 msix_save(&proxy->pci_dev, f);
92 if (msix_present(&proxy->pci_dev))
93 qemu_put_be16(f, vdev->config_vector);
94 }
95
96 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
97 .name = "virtio_pci/modern_queue_state",
98 .version_id = 1,
99 .minimum_version_id = 1,
100 .fields = (const VMStateField[]) {
101 VMSTATE_UINT16(num, VirtIOPCIQueue),
102 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
103 VMSTATE_BOOL(enabled, VirtIOPCIQueue),
104 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
105 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
106 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
107 VMSTATE_END_OF_LIST()
108 }
109 };
110
virtio_pci_modern_state_needed(void * opaque)111 static bool virtio_pci_modern_state_needed(void *opaque)
112 {
113 VirtIOPCIProxy *proxy = opaque;
114
115 return virtio_pci_modern(proxy);
116 }
117
118 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
119 .name = "virtio_pci/modern_state",
120 .version_id = 1,
121 .minimum_version_id = 1,
122 .needed = &virtio_pci_modern_state_needed,
123 .fields = (const VMStateField[]) {
124 VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
125 VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
126 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
127 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
128 vmstate_virtio_pci_modern_queue_state,
129 VirtIOPCIQueue),
130 VMSTATE_END_OF_LIST()
131 }
132 };
133
134 static const VMStateDescription vmstate_virtio_pci = {
135 .name = "virtio_pci",
136 .version_id = 1,
137 .minimum_version_id = 1,
138 .fields = (const VMStateField[]) {
139 VMSTATE_END_OF_LIST()
140 },
141 .subsections = (const VMStateDescription * const []) {
142 &vmstate_virtio_pci_modern_state_sub,
143 NULL
144 }
145 };
146
virtio_pci_has_extra_state(DeviceState * d)147 static bool virtio_pci_has_extra_state(DeviceState *d)
148 {
149 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
150
151 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
152 }
153
virtio_pci_save_extra_state(DeviceState * d,QEMUFile * f)154 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
155 {
156 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
157
158 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
159 }
160
virtio_pci_load_extra_state(DeviceState * d,QEMUFile * f)161 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
162 {
163 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
164
165 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
166 }
167
virtio_pci_save_queue(DeviceState * d,int n,QEMUFile * f)168 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
169 {
170 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
171 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
172
173 if (msix_present(&proxy->pci_dev))
174 qemu_put_be16(f, virtio_queue_vector(vdev, n));
175 }
176
virtio_pci_load_config(DeviceState * d,QEMUFile * f)177 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
178 {
179 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
180 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
181 uint16_t vector;
182
183 int ret;
184 ret = pci_device_load(&proxy->pci_dev, f);
185 if (ret) {
186 return ret;
187 }
188 msix_unuse_all_vectors(&proxy->pci_dev);
189 msix_load(&proxy->pci_dev, f);
190 if (msix_present(&proxy->pci_dev)) {
191 qemu_get_be16s(f, &vector);
192
193 if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
194 return -EINVAL;
195 }
196 } else {
197 vector = VIRTIO_NO_VECTOR;
198 }
199 vdev->config_vector = vector;
200 if (vector != VIRTIO_NO_VECTOR) {
201 msix_vector_use(&proxy->pci_dev, vector);
202 }
203 return 0;
204 }
205
virtio_pci_load_queue(DeviceState * d,int n,QEMUFile * f)206 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
207 {
208 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
209 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
210
211 uint16_t vector;
212 if (msix_present(&proxy->pci_dev)) {
213 qemu_get_be16s(f, &vector);
214 if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
215 return -EINVAL;
216 }
217 } else {
218 vector = VIRTIO_NO_VECTOR;
219 }
220 virtio_queue_set_vector(vdev, n, vector);
221 if (vector != VIRTIO_NO_VECTOR) {
222 msix_vector_use(&proxy->pci_dev, vector);
223 }
224
225 return 0;
226 }
227
228 typedef struct VirtIOPCIIDInfo {
229 /* virtio id */
230 uint16_t vdev_id;
231 /* pci device id for the transitional device */
232 uint16_t trans_devid;
233 uint16_t class_id;
234 } VirtIOPCIIDInfo;
235
236 static const VirtIOPCIIDInfo virtio_pci_id_info[] = {
237 {
238 .vdev_id = VIRTIO_ID_CRYPTO,
239 .class_id = PCI_CLASS_OTHERS,
240 }, {
241 .vdev_id = VIRTIO_ID_FS,
242 .class_id = PCI_CLASS_STORAGE_OTHER,
243 }, {
244 .vdev_id = VIRTIO_ID_NET,
245 .trans_devid = PCI_DEVICE_ID_VIRTIO_NET,
246 .class_id = PCI_CLASS_NETWORK_ETHERNET,
247 }, {
248 .vdev_id = VIRTIO_ID_BLOCK,
249 .trans_devid = PCI_DEVICE_ID_VIRTIO_BLOCK,
250 .class_id = PCI_CLASS_STORAGE_SCSI,
251 }, {
252 .vdev_id = VIRTIO_ID_CONSOLE,
253 .trans_devid = PCI_DEVICE_ID_VIRTIO_CONSOLE,
254 .class_id = PCI_CLASS_COMMUNICATION_OTHER,
255 }, {
256 .vdev_id = VIRTIO_ID_SCSI,
257 .trans_devid = PCI_DEVICE_ID_VIRTIO_SCSI,
258 .class_id = PCI_CLASS_STORAGE_SCSI
259 }, {
260 .vdev_id = VIRTIO_ID_9P,
261 .trans_devid = PCI_DEVICE_ID_VIRTIO_9P,
262 .class_id = PCI_BASE_CLASS_NETWORK,
263 }, {
264 .vdev_id = VIRTIO_ID_BALLOON,
265 .trans_devid = PCI_DEVICE_ID_VIRTIO_BALLOON,
266 .class_id = PCI_CLASS_OTHERS,
267 }, {
268 .vdev_id = VIRTIO_ID_RNG,
269 .trans_devid = PCI_DEVICE_ID_VIRTIO_RNG,
270 .class_id = PCI_CLASS_OTHERS,
271 },
272 };
273
virtio_pci_get_id_info(uint16_t vdev_id)274 static const VirtIOPCIIDInfo *virtio_pci_get_id_info(uint16_t vdev_id)
275 {
276 const VirtIOPCIIDInfo *info = NULL;
277 int i;
278
279 for (i = 0; i < ARRAY_SIZE(virtio_pci_id_info); i++) {
280 if (virtio_pci_id_info[i].vdev_id == vdev_id) {
281 info = &virtio_pci_id_info[i];
282 break;
283 }
284 }
285
286 if (!info) {
287 /* The device id is invalid or not added to the id_info yet. */
288 error_report("Invalid virtio device(id %u)", vdev_id);
289 abort();
290 }
291
292 return info;
293 }
294
295 /*
296 * Get the Transitional Device ID for the specific device, return
297 * zero if the device is non-transitional.
298 */
virtio_pci_get_trans_devid(uint16_t device_id)299 uint16_t virtio_pci_get_trans_devid(uint16_t device_id)
300 {
301 return virtio_pci_get_id_info(device_id)->trans_devid;
302 }
303
304 /*
305 * Get the Class ID for the specific device.
306 */
virtio_pci_get_class_id(uint16_t device_id)307 uint16_t virtio_pci_get_class_id(uint16_t device_id)
308 {
309 return virtio_pci_get_id_info(device_id)->class_id;
310 }
311
virtio_pci_ioeventfd_enabled(DeviceState * d)312 static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
313 {
314 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
315
316 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
317 }
318
319 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
320
virtio_pci_queue_mem_mult(struct VirtIOPCIProxy * proxy)321 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
322 {
323 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
324 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
325 }
326
virtio_pci_ioeventfd_assign(DeviceState * d,EventNotifier * notifier,int n,bool assign)327 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
328 int n, bool assign)
329 {
330 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
331 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
332 VirtQueue *vq = virtio_get_queue(vdev, n);
333 bool legacy = virtio_pci_legacy(proxy);
334 bool modern = virtio_pci_modern(proxy);
335 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
336 MemoryRegion *modern_mr = &proxy->notify.mr;
337 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
338 MemoryRegion *legacy_mr = &proxy->bar;
339 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
340 virtio_get_queue_index(vq);
341 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
342
343 if (assign) {
344 if (modern) {
345 memory_region_add_eventfd(modern_mr, modern_addr, 0,
346 false, n, notifier);
347 if (modern_pio) {
348 memory_region_add_eventfd(modern_notify_mr, 0, 2,
349 true, n, notifier);
350 }
351 }
352 if (legacy) {
353 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
354 true, n, notifier);
355 }
356 } else {
357 if (modern) {
358 memory_region_del_eventfd(modern_mr, modern_addr, 0,
359 false, n, notifier);
360 if (modern_pio) {
361 memory_region_del_eventfd(modern_notify_mr, 0, 2,
362 true, n, notifier);
363 }
364 }
365 if (legacy) {
366 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
367 true, n, notifier);
368 }
369 }
370 return 0;
371 }
372
virtio_pci_start_ioeventfd(VirtIOPCIProxy * proxy)373 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
374 {
375 virtio_bus_start_ioeventfd(&proxy->bus);
376 }
377
virtio_pci_stop_ioeventfd(VirtIOPCIProxy * proxy)378 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
379 {
380 virtio_bus_stop_ioeventfd(&proxy->bus);
381 }
382
virtio_ioport_write(void * opaque,uint32_t addr,uint32_t val)383 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
384 {
385 VirtIOPCIProxy *proxy = opaque;
386 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
387 uint16_t vector, vq_idx;
388 hwaddr pa;
389
390 switch (addr) {
391 case VIRTIO_PCI_GUEST_FEATURES:
392 /* Guest does not negotiate properly? We have to assume nothing. */
393 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
394 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
395 }
396 virtio_set_features(vdev, val);
397 break;
398 case VIRTIO_PCI_QUEUE_PFN:
399 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
400 if (pa == 0) {
401 virtio_pci_reset(DEVICE(proxy));
402 }
403 else
404 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
405 break;
406 case VIRTIO_PCI_QUEUE_SEL:
407 if (val < VIRTIO_QUEUE_MAX)
408 vdev->queue_sel = val;
409 break;
410 case VIRTIO_PCI_QUEUE_NOTIFY:
411 vq_idx = val;
412 if (vq_idx < VIRTIO_QUEUE_MAX && virtio_queue_get_num(vdev, vq_idx)) {
413 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) {
414 VirtQueue *vq = virtio_get_queue(vdev, vq_idx);
415
416 virtio_queue_set_shadow_avail_idx(vq, val >> 16);
417 }
418 virtio_queue_notify(vdev, vq_idx);
419 }
420 break;
421 case VIRTIO_PCI_STATUS:
422 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
423 virtio_pci_stop_ioeventfd(proxy);
424 }
425
426 virtio_set_status(vdev, val & 0xFF);
427
428 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
429 virtio_pci_start_ioeventfd(proxy);
430 }
431
432 if (vdev->status == 0) {
433 virtio_pci_reset(DEVICE(proxy));
434 }
435
436 /* Linux before 2.6.34 drives the device without enabling
437 the PCI device bus master bit. Enable it automatically
438 for the guest. This is a PCI spec violation but so is
439 initiating DMA with bus master bit clear. */
440 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
441 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
442 proxy->pci_dev.config[PCI_COMMAND] |
443 PCI_COMMAND_MASTER, 1);
444 }
445 break;
446 case VIRTIO_MSI_CONFIG_VECTOR:
447 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
448 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
449 }
450 /* Make it possible for guest to discover an error took place. */
451 if (val < proxy->nvectors) {
452 msix_vector_use(&proxy->pci_dev, val);
453 } else {
454 val = VIRTIO_NO_VECTOR;
455 }
456 vdev->config_vector = val;
457 break;
458 case VIRTIO_MSI_QUEUE_VECTOR:
459 vector = virtio_queue_vector(vdev, vdev->queue_sel);
460 if (vector != VIRTIO_NO_VECTOR) {
461 msix_vector_unuse(&proxy->pci_dev, vector);
462 }
463 /* Make it possible for guest to discover an error took place. */
464 if (val < proxy->nvectors) {
465 msix_vector_use(&proxy->pci_dev, val);
466 } else {
467 val = VIRTIO_NO_VECTOR;
468 }
469 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
470 break;
471 default:
472 qemu_log_mask(LOG_GUEST_ERROR,
473 "%s: unexpected address 0x%x value 0x%x\n",
474 __func__, addr, val);
475 break;
476 }
477 }
478
virtio_ioport_read(VirtIOPCIProxy * proxy,uint32_t addr)479 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
480 {
481 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
482 uint32_t ret = 0xFFFFFFFF;
483
484 switch (addr) {
485 case VIRTIO_PCI_HOST_FEATURES:
486 ret = vdev->host_features;
487 break;
488 case VIRTIO_PCI_GUEST_FEATURES:
489 ret = vdev->guest_features;
490 break;
491 case VIRTIO_PCI_QUEUE_PFN:
492 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
493 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
494 break;
495 case VIRTIO_PCI_QUEUE_NUM:
496 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
497 break;
498 case VIRTIO_PCI_QUEUE_SEL:
499 ret = vdev->queue_sel;
500 break;
501 case VIRTIO_PCI_STATUS:
502 ret = vdev->status;
503 break;
504 case VIRTIO_PCI_ISR:
505 /* reading from the ISR also clears it. */
506 ret = qatomic_xchg(&vdev->isr, 0);
507 pci_irq_deassert(&proxy->pci_dev);
508 break;
509 case VIRTIO_MSI_CONFIG_VECTOR:
510 ret = vdev->config_vector;
511 break;
512 case VIRTIO_MSI_QUEUE_VECTOR:
513 ret = virtio_queue_vector(vdev, vdev->queue_sel);
514 break;
515 default:
516 break;
517 }
518
519 return ret;
520 }
521
virtio_pci_config_read(void * opaque,hwaddr addr,unsigned size)522 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
523 unsigned size)
524 {
525 VirtIOPCIProxy *proxy = opaque;
526 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
527 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
528 uint64_t val = 0;
529
530 if (vdev == NULL) {
531 return UINT64_MAX;
532 }
533
534 if (addr < config) {
535 return virtio_ioport_read(proxy, addr);
536 }
537 addr -= config;
538
539 switch (size) {
540 case 1:
541 val = virtio_config_readb(vdev, addr);
542 break;
543 case 2:
544 val = virtio_config_readw(vdev, addr);
545 if (virtio_is_big_endian(vdev)) {
546 val = bswap16(val);
547 }
548 break;
549 case 4:
550 val = virtio_config_readl(vdev, addr);
551 if (virtio_is_big_endian(vdev)) {
552 val = bswap32(val);
553 }
554 break;
555 }
556 return val;
557 }
558
virtio_pci_config_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)559 static void virtio_pci_config_write(void *opaque, hwaddr addr,
560 uint64_t val, unsigned size)
561 {
562 VirtIOPCIProxy *proxy = opaque;
563 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
564 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
565
566 if (vdev == NULL) {
567 return;
568 }
569
570 if (addr < config) {
571 virtio_ioport_write(proxy, addr, val);
572 return;
573 }
574 addr -= config;
575 /*
576 * Virtio-PCI is odd. Ioports are LE but config space is target native
577 * endian.
578 */
579 switch (size) {
580 case 1:
581 virtio_config_writeb(vdev, addr, val);
582 break;
583 case 2:
584 if (virtio_is_big_endian(vdev)) {
585 val = bswap16(val);
586 }
587 virtio_config_writew(vdev, addr, val);
588 break;
589 case 4:
590 if (virtio_is_big_endian(vdev)) {
591 val = bswap32(val);
592 }
593 virtio_config_writel(vdev, addr, val);
594 break;
595 }
596 }
597
598 static const MemoryRegionOps virtio_pci_config_ops = {
599 .read = virtio_pci_config_read,
600 .write = virtio_pci_config_write,
601 .impl = {
602 .min_access_size = 1,
603 .max_access_size = 4,
604 },
605 .endianness = DEVICE_LITTLE_ENDIAN,
606 };
607
virtio_address_space_lookup(VirtIOPCIProxy * proxy,hwaddr * off,int len)608 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
609 hwaddr *off, int len)
610 {
611 int i;
612 VirtIOPCIRegion *reg;
613
614 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
615 reg = &proxy->regs[i];
616 if (*off >= reg->offset &&
617 *off + len <= reg->offset + reg->size) {
618 *off -= reg->offset;
619 return ®->mr;
620 }
621 }
622
623 return NULL;
624 }
625
626 /* Below are generic functions to do memcpy from/to an address space,
627 * without byteswaps, with input validation.
628 *
629 * As regular address_space_* APIs all do some kind of byteswap at least for
630 * some host/target combinations, we are forced to explicitly convert to a
631 * known-endianness integer value.
632 * It doesn't really matter which endian format to go through, so the code
633 * below selects the endian that causes the least amount of work on the given
634 * host.
635 *
636 * Note: host pointer must be aligned.
637 */
638 static
virtio_address_space_write(VirtIOPCIProxy * proxy,hwaddr addr,const uint8_t * buf,int len)639 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
640 const uint8_t *buf, int len)
641 {
642 uint64_t val;
643 MemoryRegion *mr;
644
645 /* address_space_* APIs assume an aligned address.
646 * As address is under guest control, handle illegal values.
647 */
648 addr &= ~(len - 1);
649
650 mr = virtio_address_space_lookup(proxy, &addr, len);
651 if (!mr) {
652 return;
653 }
654
655 /* Make sure caller aligned buf properly */
656 assert(!(((uintptr_t)buf) & (len - 1)));
657
658 switch (len) {
659 case 1:
660 val = pci_get_byte(buf);
661 break;
662 case 2:
663 val = pci_get_word(buf);
664 break;
665 case 4:
666 val = pci_get_long(buf);
667 break;
668 default:
669 /* As length is under guest control, handle illegal values. */
670 return;
671 }
672 memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
673 MEMTXATTRS_UNSPECIFIED);
674 }
675
676 static void
virtio_address_space_read(VirtIOPCIProxy * proxy,hwaddr addr,uint8_t * buf,int len)677 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
678 uint8_t *buf, int len)
679 {
680 uint64_t val;
681 MemoryRegion *mr;
682
683 /* address_space_* APIs assume an aligned address.
684 * As address is under guest control, handle illegal values.
685 */
686 addr &= ~(len - 1);
687
688 mr = virtio_address_space_lookup(proxy, &addr, len);
689 if (!mr) {
690 return;
691 }
692
693 /* Make sure caller aligned buf properly */
694 assert(!(((uintptr_t)buf) & (len - 1)));
695
696 memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
697 MEMTXATTRS_UNSPECIFIED);
698 switch (len) {
699 case 1:
700 pci_set_byte(buf, val);
701 break;
702 case 2:
703 pci_set_word(buf, val);
704 break;
705 case 4:
706 pci_set_long(buf, val);
707 break;
708 default:
709 /* As length is under guest control, handle illegal values. */
710 break;
711 }
712 }
713
virtio_pci_ats_ctrl_trigger(PCIDevice * pci_dev,bool enable)714 static void virtio_pci_ats_ctrl_trigger(PCIDevice *pci_dev, bool enable)
715 {
716 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
717 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
718 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
719
720 vdev->device_iotlb_enabled = enable;
721
722 if (k->toggle_device_iotlb) {
723 k->toggle_device_iotlb(vdev);
724 }
725 }
726
pcie_ats_config_write(PCIDevice * dev,uint32_t address,uint32_t val,int len)727 static void pcie_ats_config_write(PCIDevice *dev, uint32_t address,
728 uint32_t val, int len)
729 {
730 uint32_t off;
731 uint16_t ats_cap = dev->exp.ats_cap;
732
733 if (!ats_cap || address < ats_cap) {
734 return;
735 }
736 off = address - ats_cap;
737 if (off >= PCI_EXT_CAP_ATS_SIZEOF) {
738 return;
739 }
740
741 if (range_covers_byte(off, len, PCI_ATS_CTRL + 1)) {
742 virtio_pci_ats_ctrl_trigger(dev, !!(val & PCI_ATS_CTRL_ENABLE));
743 }
744 }
745
virtio_write_config(PCIDevice * pci_dev,uint32_t address,uint32_t val,int len)746 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
747 uint32_t val, int len)
748 {
749 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
750 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
751 struct virtio_pci_cfg_cap *cfg;
752
753 pci_default_write_config(pci_dev, address, val, len);
754
755 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
756 pcie_cap_flr_write_config(pci_dev, address, val, len);
757 }
758
759 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
760 pcie_ats_config_write(pci_dev, address, val, len);
761 }
762
763 if (range_covers_byte(address, len, PCI_COMMAND)) {
764 if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
765 virtio_set_disabled(vdev, true);
766 virtio_pci_stop_ioeventfd(proxy);
767 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
768 } else {
769 virtio_set_disabled(vdev, false);
770 }
771 }
772
773 if (proxy->config_cap &&
774 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
775 pci_cfg_data),
776 sizeof cfg->pci_cfg_data)) {
777 uint32_t off;
778 uint32_t caplen;
779
780 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
781 off = le32_to_cpu(cfg->cap.offset);
782 caplen = le32_to_cpu(cfg->cap.length);
783
784 if (caplen == 1 || caplen == 2 || caplen == 4) {
785 assert(caplen <= sizeof cfg->pci_cfg_data);
786 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, caplen);
787 }
788 }
789 }
790
virtio_read_config(PCIDevice * pci_dev,uint32_t address,int len)791 static uint32_t virtio_read_config(PCIDevice *pci_dev,
792 uint32_t address, int len)
793 {
794 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
795 struct virtio_pci_cfg_cap *cfg;
796
797 if (proxy->config_cap &&
798 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
799 pci_cfg_data),
800 sizeof cfg->pci_cfg_data)) {
801 uint32_t off;
802 uint32_t caplen;
803
804 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
805 off = le32_to_cpu(cfg->cap.offset);
806 caplen = le32_to_cpu(cfg->cap.length);
807
808 if (caplen == 1 || caplen == 2 || caplen == 4) {
809 assert(caplen <= sizeof cfg->pci_cfg_data);
810 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, caplen);
811 }
812 }
813
814 return pci_default_read_config(pci_dev, address, len);
815 }
816
kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy * proxy,unsigned int vector)817 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
818 unsigned int vector)
819 {
820 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
821 int ret;
822
823 if (irqfd->users == 0) {
824 KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state);
825 ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev);
826 if (ret < 0) {
827 return ret;
828 }
829 kvm_irqchip_commit_route_changes(&c);
830 irqfd->virq = ret;
831 }
832 irqfd->users++;
833 return 0;
834 }
835
kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy * proxy,unsigned int vector)836 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
837 unsigned int vector)
838 {
839 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
840 if (--irqfd->users == 0) {
841 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
842 }
843 }
844
kvm_virtio_pci_irqfd_use(VirtIOPCIProxy * proxy,EventNotifier * n,unsigned int vector)845 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
846 EventNotifier *n,
847 unsigned int vector)
848 {
849 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
850 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
851 }
852
kvm_virtio_pci_irqfd_release(VirtIOPCIProxy * proxy,EventNotifier * n,unsigned int vector)853 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
854 EventNotifier *n ,
855 unsigned int vector)
856 {
857 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
858 int ret;
859
860 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
861 assert(ret == 0);
862 }
virtio_pci_get_notifier(VirtIOPCIProxy * proxy,int queue_no,EventNotifier ** n,unsigned int * vector)863 static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
864 EventNotifier **n, unsigned int *vector)
865 {
866 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
867 VirtQueue *vq;
868
869 if (!proxy->vector_irqfd && vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)
870 return -1;
871
872 if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
873 *n = virtio_config_get_guest_notifier(vdev);
874 *vector = vdev->config_vector;
875 } else {
876 if (!virtio_queue_get_num(vdev, queue_no)) {
877 return -1;
878 }
879 *vector = virtio_queue_vector(vdev, queue_no);
880 vq = virtio_get_queue(vdev, queue_no);
881 *n = virtio_queue_get_guest_notifier(vq);
882 }
883 return 0;
884 }
885
kvm_virtio_pci_vector_use_one(VirtIOPCIProxy * proxy,int queue_no)886 static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
887 {
888 unsigned int vector;
889 int ret;
890 EventNotifier *n;
891 PCIDevice *dev = &proxy->pci_dev;
892 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
893 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
894
895 ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
896 if (ret < 0) {
897 return ret;
898 }
899 if (vector >= msix_nr_vectors_allocated(dev)) {
900 return 0;
901 }
902 ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
903 if (ret < 0) {
904 return ret;
905 }
906 /*
907 * If guest supports masking, set up irqfd now.
908 * Otherwise, delay until unmasked in the frontend.
909 */
910 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
911 ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
912 if (ret < 0) {
913 kvm_virtio_pci_vq_vector_release(proxy, vector);
914 return ret;
915 }
916 }
917
918 return 0;
919 }
kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy * proxy,int nvqs)920 static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs)
921 {
922 int queue_no;
923 int ret = 0;
924 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
925
926 for (queue_no = 0; queue_no < nvqs; queue_no++) {
927 if (!virtio_queue_get_num(vdev, queue_no)) {
928 return -1;
929 }
930 ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
931 }
932 return ret;
933 }
934
kvm_virtio_pci_vector_config_use(VirtIOPCIProxy * proxy)935 static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
936 {
937 return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
938 }
939
kvm_virtio_pci_vector_release_one(VirtIOPCIProxy * proxy,int queue_no)940 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
941 int queue_no)
942 {
943 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
944 unsigned int vector;
945 EventNotifier *n;
946 int ret;
947 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
948 PCIDevice *dev = &proxy->pci_dev;
949
950 ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
951 if (ret < 0) {
952 return;
953 }
954 if (vector >= msix_nr_vectors_allocated(dev)) {
955 return;
956 }
957 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
958 kvm_virtio_pci_irqfd_release(proxy, n, vector);
959 }
960 kvm_virtio_pci_vq_vector_release(proxy, vector);
961 }
962
kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy * proxy,int nvqs)963 static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs)
964 {
965 int queue_no;
966 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
967
968 for (queue_no = 0; queue_no < nvqs; queue_no++) {
969 if (!virtio_queue_get_num(vdev, queue_no)) {
970 break;
971 }
972 kvm_virtio_pci_vector_release_one(proxy, queue_no);
973 }
974 }
975
kvm_virtio_pci_vector_config_release(VirtIOPCIProxy * proxy)976 static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
977 {
978 kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
979 }
980
virtio_pci_one_vector_unmask(VirtIOPCIProxy * proxy,unsigned int queue_no,unsigned int vector,MSIMessage msg,EventNotifier * n)981 static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
982 unsigned int queue_no,
983 unsigned int vector,
984 MSIMessage msg,
985 EventNotifier *n)
986 {
987 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
988 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
989 VirtIOIRQFD *irqfd;
990 int ret = 0;
991
992 if (proxy->vector_irqfd) {
993 irqfd = &proxy->vector_irqfd[vector];
994 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
995 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
996 &proxy->pci_dev);
997 if (ret < 0) {
998 return ret;
999 }
1000 kvm_irqchip_commit_routes(kvm_state);
1001 }
1002 }
1003
1004 /* If guest supports masking, irqfd is already setup, unmask it.
1005 * Otherwise, set it up now.
1006 */
1007 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
1008 k->guest_notifier_mask(vdev, queue_no, false);
1009 /* Test after unmasking to avoid losing events. */
1010 if (k->guest_notifier_pending &&
1011 k->guest_notifier_pending(vdev, queue_no)) {
1012 event_notifier_set(n);
1013 }
1014 } else {
1015 ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
1016 }
1017 return ret;
1018 }
1019
virtio_pci_one_vector_mask(VirtIOPCIProxy * proxy,unsigned int queue_no,unsigned int vector,EventNotifier * n)1020 static void virtio_pci_one_vector_mask(VirtIOPCIProxy *proxy,
1021 unsigned int queue_no,
1022 unsigned int vector,
1023 EventNotifier *n)
1024 {
1025 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1026 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1027
1028 /* If guest supports masking, keep irqfd but mask it.
1029 * Otherwise, clean it up now.
1030 */
1031 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
1032 k->guest_notifier_mask(vdev, queue_no, true);
1033 } else {
1034 kvm_virtio_pci_irqfd_release(proxy, n, vector);
1035 }
1036 }
1037
virtio_pci_vector_unmask(PCIDevice * dev,unsigned vector,MSIMessage msg)1038 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
1039 MSIMessage msg)
1040 {
1041 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
1042 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1043 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
1044 EventNotifier *n;
1045 int ret, index, unmasked = 0;
1046
1047 while (vq) {
1048 index = virtio_get_queue_index(vq);
1049 if (!virtio_queue_get_num(vdev, index)) {
1050 break;
1051 }
1052 if (index < proxy->nvqs_with_notifiers) {
1053 n = virtio_queue_get_guest_notifier(vq);
1054 ret = virtio_pci_one_vector_unmask(proxy, index, vector, msg, n);
1055 if (ret < 0) {
1056 goto undo;
1057 }
1058 ++unmasked;
1059 }
1060 vq = virtio_vector_next_queue(vq);
1061 }
1062 /* unmask config intr */
1063 if (vector == vdev->config_vector) {
1064 n = virtio_config_get_guest_notifier(vdev);
1065 ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector,
1066 msg, n);
1067 if (ret < 0) {
1068 goto undo_config;
1069 }
1070 }
1071 return 0;
1072 undo_config:
1073 n = virtio_config_get_guest_notifier(vdev);
1074 virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
1075 undo:
1076 vq = virtio_vector_first_queue(vdev, vector);
1077 while (vq && unmasked >= 0) {
1078 index = virtio_get_queue_index(vq);
1079 if (index < proxy->nvqs_with_notifiers) {
1080 n = virtio_queue_get_guest_notifier(vq);
1081 virtio_pci_one_vector_mask(proxy, index, vector, n);
1082 --unmasked;
1083 }
1084 vq = virtio_vector_next_queue(vq);
1085 }
1086 return ret;
1087 }
1088
virtio_pci_vector_mask(PCIDevice * dev,unsigned vector)1089 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
1090 {
1091 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
1092 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1093 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
1094 EventNotifier *n;
1095 int index;
1096
1097 while (vq) {
1098 index = virtio_get_queue_index(vq);
1099 n = virtio_queue_get_guest_notifier(vq);
1100 if (!virtio_queue_get_num(vdev, index)) {
1101 break;
1102 }
1103 if (index < proxy->nvqs_with_notifiers) {
1104 virtio_pci_one_vector_mask(proxy, index, vector, n);
1105 }
1106 vq = virtio_vector_next_queue(vq);
1107 }
1108
1109 if (vector == vdev->config_vector) {
1110 n = virtio_config_get_guest_notifier(vdev);
1111 virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
1112 }
1113 }
1114
virtio_pci_vector_poll(PCIDevice * dev,unsigned int vector_start,unsigned int vector_end)1115 static void virtio_pci_vector_poll(PCIDevice *dev,
1116 unsigned int vector_start,
1117 unsigned int vector_end)
1118 {
1119 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
1120 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1121 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1122 int queue_no;
1123 unsigned int vector;
1124 EventNotifier *notifier;
1125 int ret;
1126
1127 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
1128 ret = virtio_pci_get_notifier(proxy, queue_no, ¬ifier, &vector);
1129 if (ret < 0) {
1130 break;
1131 }
1132 if (vector < vector_start || vector >= vector_end ||
1133 !msix_is_masked(dev, vector)) {
1134 continue;
1135 }
1136 if (k->guest_notifier_pending) {
1137 if (k->guest_notifier_pending(vdev, queue_no)) {
1138 msix_set_pending(dev, vector);
1139 }
1140 } else if (event_notifier_test_and_clear(notifier)) {
1141 msix_set_pending(dev, vector);
1142 }
1143 }
1144 /* poll the config intr */
1145 ret = virtio_pci_get_notifier(proxy, VIRTIO_CONFIG_IRQ_IDX, ¬ifier,
1146 &vector);
1147 if (ret < 0) {
1148 return;
1149 }
1150 if (vector < vector_start || vector >= vector_end ||
1151 !msix_is_masked(dev, vector)) {
1152 return;
1153 }
1154 if (k->guest_notifier_pending) {
1155 if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) {
1156 msix_set_pending(dev, vector);
1157 }
1158 } else if (event_notifier_test_and_clear(notifier)) {
1159 msix_set_pending(dev, vector);
1160 }
1161 }
1162
virtio_pci_set_guest_notifier_fd_handler(VirtIODevice * vdev,VirtQueue * vq,int n,bool assign,bool with_irqfd)1163 void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq,
1164 int n, bool assign,
1165 bool with_irqfd)
1166 {
1167 if (n == VIRTIO_CONFIG_IRQ_IDX) {
1168 virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd);
1169 } else {
1170 virtio_queue_set_guest_notifier_fd_handler(vq, assign, with_irqfd);
1171 }
1172 }
1173
virtio_pci_set_guest_notifier(DeviceState * d,int n,bool assign,bool with_irqfd)1174 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
1175 bool with_irqfd)
1176 {
1177 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1178 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1179 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1180 VirtQueue *vq = NULL;
1181 EventNotifier *notifier = NULL;
1182
1183 if (n == VIRTIO_CONFIG_IRQ_IDX) {
1184 notifier = virtio_config_get_guest_notifier(vdev);
1185 } else {
1186 vq = virtio_get_queue(vdev, n);
1187 notifier = virtio_queue_get_guest_notifier(vq);
1188 }
1189
1190 if (assign) {
1191 int r = event_notifier_init(notifier, 0);
1192 if (r < 0) {
1193 return r;
1194 }
1195 virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, true, with_irqfd);
1196 } else {
1197 virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, false,
1198 with_irqfd);
1199 event_notifier_cleanup(notifier);
1200 }
1201
1202 if (!msix_enabled(&proxy->pci_dev) &&
1203 vdev->use_guest_notifier_mask &&
1204 vdc->guest_notifier_mask) {
1205 vdc->guest_notifier_mask(vdev, n, !assign);
1206 }
1207
1208 return 0;
1209 }
1210
virtio_pci_query_guest_notifiers(DeviceState * d)1211 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
1212 {
1213 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1214 return msix_enabled(&proxy->pci_dev);
1215 }
1216
virtio_pci_set_guest_notifiers(DeviceState * d,int nvqs,bool assign)1217 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
1218 {
1219 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1220 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1221 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1222 int r, n;
1223 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
1224 kvm_msi_via_irqfd_enabled();
1225
1226 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
1227
1228 /*
1229 * When deassigning, pass a consistent nvqs value to avoid leaking
1230 * notifiers. But first check we've actually been configured, exit
1231 * early if we haven't.
1232 */
1233 if (!assign && !proxy->nvqs_with_notifiers) {
1234 return 0;
1235 }
1236 assert(assign || nvqs == proxy->nvqs_with_notifiers);
1237
1238 proxy->nvqs_with_notifiers = nvqs;
1239
1240 /* Must unset vector notifier while guest notifier is still assigned */
1241 if ((proxy->vector_irqfd ||
1242 (vdev->use_guest_notifier_mask && k->guest_notifier_mask)) &&
1243 !assign) {
1244 msix_unset_vector_notifiers(&proxy->pci_dev);
1245 if (proxy->vector_irqfd) {
1246 kvm_virtio_pci_vector_vq_release(proxy, nvqs);
1247 kvm_virtio_pci_vector_config_release(proxy);
1248 g_free(proxy->vector_irqfd);
1249 proxy->vector_irqfd = NULL;
1250 }
1251 }
1252
1253 for (n = 0; n < nvqs; n++) {
1254 if (!virtio_queue_get_num(vdev, n)) {
1255 break;
1256 }
1257
1258 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1259 if (r < 0) {
1260 goto assign_error;
1261 }
1262 }
1263 r = virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, assign,
1264 with_irqfd);
1265 if (r < 0) {
1266 goto config_assign_error;
1267 }
1268 /* Must set vector notifier after guest notifier has been assigned */
1269 if ((with_irqfd ||
1270 (vdev->use_guest_notifier_mask && k->guest_notifier_mask)) &&
1271 assign) {
1272 if (with_irqfd) {
1273 proxy->vector_irqfd =
1274 g_malloc0(sizeof(*proxy->vector_irqfd) *
1275 msix_nr_vectors_allocated(&proxy->pci_dev));
1276 r = kvm_virtio_pci_vector_vq_use(proxy, nvqs);
1277 if (r < 0) {
1278 goto config_assign_error;
1279 }
1280 r = kvm_virtio_pci_vector_config_use(proxy);
1281 if (r < 0) {
1282 goto config_error;
1283 }
1284 }
1285
1286 r = msix_set_vector_notifiers(&proxy->pci_dev, virtio_pci_vector_unmask,
1287 virtio_pci_vector_mask,
1288 virtio_pci_vector_poll);
1289 if (r < 0) {
1290 goto notifiers_error;
1291 }
1292 }
1293
1294 return 0;
1295
1296 notifiers_error:
1297 if (with_irqfd) {
1298 assert(assign);
1299 kvm_virtio_pci_vector_vq_release(proxy, nvqs);
1300 }
1301 config_error:
1302 if (with_irqfd) {
1303 kvm_virtio_pci_vector_config_release(proxy);
1304 }
1305 config_assign_error:
1306 virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, !assign,
1307 with_irqfd);
1308 assign_error:
1309 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1310 assert(assign);
1311 while (--n >= 0) {
1312 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1313 }
1314 g_free(proxy->vector_irqfd);
1315 proxy->vector_irqfd = NULL;
1316 return r;
1317 }
1318
virtio_pci_set_host_notifier_mr(DeviceState * d,int n,MemoryRegion * mr,bool assign)1319 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
1320 MemoryRegion *mr, bool assign)
1321 {
1322 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1323 int offset;
1324
1325 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
1326 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
1327 return -1;
1328 }
1329
1330 if (assign) {
1331 offset = virtio_pci_queue_mem_mult(proxy) * n;
1332 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
1333 } else {
1334 memory_region_del_subregion(&proxy->notify.mr, mr);
1335 }
1336
1337 return 0;
1338 }
1339
virtio_pci_vmstate_change(DeviceState * d,bool running)1340 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1341 {
1342 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1343 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1344
1345 if (running) {
1346 /* Old QEMU versions did not set bus master enable on status write.
1347 * Detect DRIVER set and enable it.
1348 */
1349 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1350 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1351 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1352 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1353 proxy->pci_dev.config[PCI_COMMAND] |
1354 PCI_COMMAND_MASTER, 1);
1355 }
1356 virtio_pci_start_ioeventfd(proxy);
1357 } else {
1358 virtio_pci_stop_ioeventfd(proxy);
1359 }
1360 }
1361
1362 /*
1363 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1364 */
1365
virtio_pci_query_nvectors(DeviceState * d)1366 static int virtio_pci_query_nvectors(DeviceState *d)
1367 {
1368 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1369
1370 return proxy->nvectors;
1371 }
1372
virtio_pci_get_dma_as(DeviceState * d)1373 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
1374 {
1375 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1376 PCIDevice *dev = &proxy->pci_dev;
1377
1378 return pci_get_address_space(dev);
1379 }
1380
virtio_pci_iommu_enabled(DeviceState * d)1381 static bool virtio_pci_iommu_enabled(DeviceState *d)
1382 {
1383 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1384 PCIDevice *dev = &proxy->pci_dev;
1385 AddressSpace *dma_as = pci_device_iommu_address_space(dev);
1386
1387 if (dma_as == &address_space_memory) {
1388 return false;
1389 }
1390
1391 return true;
1392 }
1393
virtio_pci_queue_enabled(DeviceState * d,int n)1394 static bool virtio_pci_queue_enabled(DeviceState *d, int n)
1395 {
1396 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1397 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1398
1399 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1400 return proxy->vqs[n].enabled;
1401 }
1402
1403 return virtio_queue_enabled_legacy(vdev, n);
1404 }
1405
virtio_pci_add_mem_cap(VirtIOPCIProxy * proxy,struct virtio_pci_cap * cap)1406 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1407 struct virtio_pci_cap *cap)
1408 {
1409 PCIDevice *dev = &proxy->pci_dev;
1410 int offset;
1411
1412 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
1413 cap->cap_len, &error_abort);
1414
1415 assert(cap->cap_len >= sizeof *cap);
1416 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1417 cap->cap_len - PCI_CAP_FLAGS);
1418
1419 return offset;
1420 }
1421
virtio_pci_set_vector(VirtIODevice * vdev,VirtIOPCIProxy * proxy,int queue_no,uint16_t old_vector,uint16_t new_vector)1422 static void virtio_pci_set_vector(VirtIODevice *vdev,
1423 VirtIOPCIProxy *proxy,
1424 int queue_no, uint16_t old_vector,
1425 uint16_t new_vector)
1426 {
1427 bool kvm_irqfd = (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
1428 msix_enabled(&proxy->pci_dev) && kvm_msi_via_irqfd_enabled();
1429
1430 if (new_vector == old_vector) {
1431 return;
1432 }
1433
1434 /*
1435 * If the device uses irqfd and the vector changes after DRIVER_OK is
1436 * set, we need to release the old vector and set up the new one.
1437 * Otherwise just need to set the new vector on the device.
1438 */
1439 if (kvm_irqfd && old_vector != VIRTIO_NO_VECTOR) {
1440 kvm_virtio_pci_vector_release_one(proxy, queue_no);
1441 }
1442 /* Set the new vector on the device. */
1443 if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
1444 vdev->config_vector = new_vector;
1445 } else {
1446 virtio_queue_set_vector(vdev, queue_no, new_vector);
1447 }
1448 /* If the new vector changed need to set it up. */
1449 if (kvm_irqfd && new_vector != VIRTIO_NO_VECTOR) {
1450 kvm_virtio_pci_vector_use_one(proxy, queue_no);
1451 }
1452 }
1453
virtio_pci_add_shm_cap(VirtIOPCIProxy * proxy,uint8_t bar,uint64_t offset,uint64_t length,uint8_t id)1454 int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy,
1455 uint8_t bar, uint64_t offset, uint64_t length,
1456 uint8_t id)
1457 {
1458 struct virtio_pci_cap64 cap = {
1459 .cap.cap_len = sizeof cap,
1460 .cap.cfg_type = VIRTIO_PCI_CAP_SHARED_MEMORY_CFG,
1461 };
1462
1463 cap.cap.bar = bar;
1464 cap.cap.length = cpu_to_le32(length);
1465 cap.length_hi = cpu_to_le32(length >> 32);
1466 cap.cap.offset = cpu_to_le32(offset);
1467 cap.offset_hi = cpu_to_le32(offset >> 32);
1468 cap.cap.id = id;
1469 return virtio_pci_add_mem_cap(proxy, &cap.cap);
1470 }
1471
virtio_pci_common_read(void * opaque,hwaddr addr,unsigned size)1472 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1473 unsigned size)
1474 {
1475 VirtIOPCIProxy *proxy = opaque;
1476 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1477 uint32_t val = 0;
1478 int i;
1479
1480 if (vdev == NULL) {
1481 return UINT64_MAX;
1482 }
1483
1484 switch (addr) {
1485 case VIRTIO_PCI_COMMON_DFSELECT:
1486 val = proxy->dfselect;
1487 break;
1488 case VIRTIO_PCI_COMMON_DF:
1489 if (proxy->dfselect <= 1) {
1490 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1491
1492 val = (vdev->host_features & ~vdc->legacy_features) >>
1493 (32 * proxy->dfselect);
1494 }
1495 break;
1496 case VIRTIO_PCI_COMMON_GFSELECT:
1497 val = proxy->gfselect;
1498 break;
1499 case VIRTIO_PCI_COMMON_GF:
1500 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1501 val = proxy->guest_features[proxy->gfselect];
1502 }
1503 break;
1504 case VIRTIO_PCI_COMMON_MSIX:
1505 val = vdev->config_vector;
1506 break;
1507 case VIRTIO_PCI_COMMON_NUMQ:
1508 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1509 if (virtio_queue_get_num(vdev, i)) {
1510 val = i + 1;
1511 }
1512 }
1513 break;
1514 case VIRTIO_PCI_COMMON_STATUS:
1515 val = vdev->status;
1516 break;
1517 case VIRTIO_PCI_COMMON_CFGGENERATION:
1518 val = vdev->generation;
1519 break;
1520 case VIRTIO_PCI_COMMON_Q_SELECT:
1521 val = vdev->queue_sel;
1522 break;
1523 case VIRTIO_PCI_COMMON_Q_SIZE:
1524 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1525 break;
1526 case VIRTIO_PCI_COMMON_Q_MSIX:
1527 val = virtio_queue_vector(vdev, vdev->queue_sel);
1528 break;
1529 case VIRTIO_PCI_COMMON_Q_ENABLE:
1530 val = proxy->vqs[vdev->queue_sel].enabled;
1531 break;
1532 case VIRTIO_PCI_COMMON_Q_NOFF:
1533 /* Simply map queues in order */
1534 val = vdev->queue_sel;
1535 break;
1536 case VIRTIO_PCI_COMMON_Q_DESCLO:
1537 val = proxy->vqs[vdev->queue_sel].desc[0];
1538 break;
1539 case VIRTIO_PCI_COMMON_Q_DESCHI:
1540 val = proxy->vqs[vdev->queue_sel].desc[1];
1541 break;
1542 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1543 val = proxy->vqs[vdev->queue_sel].avail[0];
1544 break;
1545 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1546 val = proxy->vqs[vdev->queue_sel].avail[1];
1547 break;
1548 case VIRTIO_PCI_COMMON_Q_USEDLO:
1549 val = proxy->vqs[vdev->queue_sel].used[0];
1550 break;
1551 case VIRTIO_PCI_COMMON_Q_USEDHI:
1552 val = proxy->vqs[vdev->queue_sel].used[1];
1553 break;
1554 case VIRTIO_PCI_COMMON_Q_RESET:
1555 val = proxy->vqs[vdev->queue_sel].reset;
1556 break;
1557 default:
1558 val = 0;
1559 }
1560
1561 return val;
1562 }
1563
virtio_pci_common_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1564 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1565 uint64_t val, unsigned size)
1566 {
1567 VirtIOPCIProxy *proxy = opaque;
1568 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1569 uint16_t vector;
1570
1571 if (vdev == NULL) {
1572 return;
1573 }
1574
1575 switch (addr) {
1576 case VIRTIO_PCI_COMMON_DFSELECT:
1577 proxy->dfselect = val;
1578 break;
1579 case VIRTIO_PCI_COMMON_GFSELECT:
1580 proxy->gfselect = val;
1581 break;
1582 case VIRTIO_PCI_COMMON_GF:
1583 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1584 proxy->guest_features[proxy->gfselect] = val;
1585 virtio_set_features(vdev,
1586 (((uint64_t)proxy->guest_features[1]) << 32) |
1587 proxy->guest_features[0]);
1588 }
1589 break;
1590 case VIRTIO_PCI_COMMON_MSIX:
1591 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
1592 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1593 }
1594 /* Make it possible for guest to discover an error took place. */
1595 if (val < proxy->nvectors) {
1596 msix_vector_use(&proxy->pci_dev, val);
1597 } else {
1598 val = VIRTIO_NO_VECTOR;
1599 }
1600 virtio_pci_set_vector(vdev, proxy, VIRTIO_CONFIG_IRQ_IDX,
1601 vdev->config_vector, val);
1602 break;
1603 case VIRTIO_PCI_COMMON_STATUS:
1604 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1605 virtio_pci_stop_ioeventfd(proxy);
1606 }
1607
1608 virtio_set_status(vdev, val & 0xFF);
1609
1610 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1611 virtio_pci_start_ioeventfd(proxy);
1612 }
1613
1614 if (vdev->status == 0) {
1615 virtio_pci_reset(DEVICE(proxy));
1616 }
1617
1618 break;
1619 case VIRTIO_PCI_COMMON_Q_SELECT:
1620 if (val < VIRTIO_QUEUE_MAX) {
1621 vdev->queue_sel = val;
1622 }
1623 break;
1624 case VIRTIO_PCI_COMMON_Q_SIZE:
1625 proxy->vqs[vdev->queue_sel].num = val;
1626 virtio_queue_set_num(vdev, vdev->queue_sel,
1627 proxy->vqs[vdev->queue_sel].num);
1628 virtio_init_region_cache(vdev, vdev->queue_sel);
1629 break;
1630 case VIRTIO_PCI_COMMON_Q_MSIX:
1631 vector = virtio_queue_vector(vdev, vdev->queue_sel);
1632 if (vector != VIRTIO_NO_VECTOR) {
1633 msix_vector_unuse(&proxy->pci_dev, vector);
1634 }
1635 /* Make it possible for guest to discover an error took place. */
1636 if (val < proxy->nvectors) {
1637 msix_vector_use(&proxy->pci_dev, val);
1638 } else {
1639 val = VIRTIO_NO_VECTOR;
1640 }
1641 virtio_pci_set_vector(vdev, proxy, vdev->queue_sel, vector, val);
1642 break;
1643 case VIRTIO_PCI_COMMON_Q_ENABLE:
1644 if (val == 1) {
1645 virtio_queue_set_num(vdev, vdev->queue_sel,
1646 proxy->vqs[vdev->queue_sel].num);
1647 virtio_queue_set_rings(vdev, vdev->queue_sel,
1648 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1649 proxy->vqs[vdev->queue_sel].desc[0],
1650 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1651 proxy->vqs[vdev->queue_sel].avail[0],
1652 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1653 proxy->vqs[vdev->queue_sel].used[0]);
1654 proxy->vqs[vdev->queue_sel].enabled = 1;
1655 proxy->vqs[vdev->queue_sel].reset = 0;
1656 virtio_queue_enable(vdev, vdev->queue_sel);
1657 } else {
1658 virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val);
1659 }
1660 break;
1661 case VIRTIO_PCI_COMMON_Q_DESCLO:
1662 proxy->vqs[vdev->queue_sel].desc[0] = val;
1663 break;
1664 case VIRTIO_PCI_COMMON_Q_DESCHI:
1665 proxy->vqs[vdev->queue_sel].desc[1] = val;
1666 break;
1667 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1668 proxy->vqs[vdev->queue_sel].avail[0] = val;
1669 break;
1670 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1671 proxy->vqs[vdev->queue_sel].avail[1] = val;
1672 break;
1673 case VIRTIO_PCI_COMMON_Q_USEDLO:
1674 proxy->vqs[vdev->queue_sel].used[0] = val;
1675 break;
1676 case VIRTIO_PCI_COMMON_Q_USEDHI:
1677 proxy->vqs[vdev->queue_sel].used[1] = val;
1678 break;
1679 case VIRTIO_PCI_COMMON_Q_RESET:
1680 if (val == 1) {
1681 proxy->vqs[vdev->queue_sel].reset = 1;
1682
1683 virtio_queue_reset(vdev, vdev->queue_sel);
1684
1685 proxy->vqs[vdev->queue_sel].reset = 0;
1686 proxy->vqs[vdev->queue_sel].enabled = 0;
1687 }
1688 break;
1689 default:
1690 break;
1691 }
1692 }
1693
1694
virtio_pci_notify_read(void * opaque,hwaddr addr,unsigned size)1695 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1696 unsigned size)
1697 {
1698 VirtIOPCIProxy *proxy = opaque;
1699 if (virtio_bus_get_device(&proxy->bus) == NULL) {
1700 return UINT64_MAX;
1701 }
1702
1703 return 0;
1704 }
1705
virtio_pci_notify_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1706 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1707 uint64_t val, unsigned size)
1708 {
1709 VirtIOPCIProxy *proxy = opaque;
1710 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1711
1712 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1713
1714 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
1715 trace_virtio_pci_notify_write(addr, val, size);
1716 virtio_queue_notify(vdev, queue);
1717 }
1718 }
1719
virtio_pci_notify_write_pio(void * opaque,hwaddr addr,uint64_t val,unsigned size)1720 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1721 uint64_t val, unsigned size)
1722 {
1723 VirtIOPCIProxy *proxy = opaque;
1724 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1725
1726 unsigned queue = val;
1727
1728 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
1729 trace_virtio_pci_notify_write_pio(addr, val, size);
1730 virtio_queue_notify(vdev, queue);
1731 }
1732 }
1733
virtio_pci_isr_read(void * opaque,hwaddr addr,unsigned size)1734 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1735 unsigned size)
1736 {
1737 VirtIOPCIProxy *proxy = opaque;
1738 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1739 uint64_t val;
1740
1741 if (vdev == NULL) {
1742 return UINT64_MAX;
1743 }
1744
1745 val = qatomic_xchg(&vdev->isr, 0);
1746 pci_irq_deassert(&proxy->pci_dev);
1747 return val;
1748 }
1749
virtio_pci_isr_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1750 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1751 uint64_t val, unsigned size)
1752 {
1753 }
1754
virtio_pci_device_read(void * opaque,hwaddr addr,unsigned size)1755 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1756 unsigned size)
1757 {
1758 VirtIOPCIProxy *proxy = opaque;
1759 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1760 uint64_t val;
1761
1762 if (vdev == NULL) {
1763 return UINT64_MAX;
1764 }
1765
1766 switch (size) {
1767 case 1:
1768 val = virtio_config_modern_readb(vdev, addr);
1769 break;
1770 case 2:
1771 val = virtio_config_modern_readw(vdev, addr);
1772 break;
1773 case 4:
1774 val = virtio_config_modern_readl(vdev, addr);
1775 break;
1776 default:
1777 val = 0;
1778 break;
1779 }
1780 return val;
1781 }
1782
virtio_pci_device_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1783 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1784 uint64_t val, unsigned size)
1785 {
1786 VirtIOPCIProxy *proxy = opaque;
1787 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1788
1789 if (vdev == NULL) {
1790 return;
1791 }
1792
1793 switch (size) {
1794 case 1:
1795 virtio_config_modern_writeb(vdev, addr, val);
1796 break;
1797 case 2:
1798 virtio_config_modern_writew(vdev, addr, val);
1799 break;
1800 case 4:
1801 virtio_config_modern_writel(vdev, addr, val);
1802 break;
1803 }
1804 }
1805
virtio_pci_modern_regions_init(VirtIOPCIProxy * proxy,const char * vdev_name)1806 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy,
1807 const char *vdev_name)
1808 {
1809 static const MemoryRegionOps common_ops = {
1810 .read = virtio_pci_common_read,
1811 .write = virtio_pci_common_write,
1812 .impl = {
1813 .min_access_size = 1,
1814 .max_access_size = 4,
1815 },
1816 .endianness = DEVICE_LITTLE_ENDIAN,
1817 };
1818 static const MemoryRegionOps isr_ops = {
1819 .read = virtio_pci_isr_read,
1820 .write = virtio_pci_isr_write,
1821 .impl = {
1822 .min_access_size = 1,
1823 .max_access_size = 4,
1824 },
1825 .endianness = DEVICE_LITTLE_ENDIAN,
1826 };
1827 static const MemoryRegionOps device_ops = {
1828 .read = virtio_pci_device_read,
1829 .write = virtio_pci_device_write,
1830 .impl = {
1831 .min_access_size = 1,
1832 .max_access_size = 4,
1833 },
1834 .endianness = DEVICE_LITTLE_ENDIAN,
1835 };
1836 static const MemoryRegionOps notify_ops = {
1837 .read = virtio_pci_notify_read,
1838 .write = virtio_pci_notify_write,
1839 .impl = {
1840 .min_access_size = 1,
1841 .max_access_size = 4,
1842 },
1843 .endianness = DEVICE_LITTLE_ENDIAN,
1844 };
1845 static const MemoryRegionOps notify_pio_ops = {
1846 .read = virtio_pci_notify_read,
1847 .write = virtio_pci_notify_write_pio,
1848 .impl = {
1849 .min_access_size = 1,
1850 .max_access_size = 4,
1851 },
1852 .endianness = DEVICE_LITTLE_ENDIAN,
1853 };
1854 g_autoptr(GString) name = g_string_new(NULL);
1855
1856 g_string_printf(name, "virtio-pci-common-%s", vdev_name);
1857 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1858 &common_ops,
1859 proxy,
1860 name->str,
1861 proxy->common.size);
1862
1863 g_string_printf(name, "virtio-pci-isr-%s", vdev_name);
1864 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1865 &isr_ops,
1866 proxy,
1867 name->str,
1868 proxy->isr.size);
1869
1870 g_string_printf(name, "virtio-pci-device-%s", vdev_name);
1871 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1872 &device_ops,
1873 proxy,
1874 name->str,
1875 proxy->device.size);
1876
1877 g_string_printf(name, "virtio-pci-notify-%s", vdev_name);
1878 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1879 ¬ify_ops,
1880 proxy,
1881 name->str,
1882 proxy->notify.size);
1883
1884 g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name);
1885 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1886 ¬ify_pio_ops,
1887 proxy,
1888 name->str,
1889 proxy->notify_pio.size);
1890 }
1891
virtio_pci_modern_region_map(VirtIOPCIProxy * proxy,VirtIOPCIRegion * region,struct virtio_pci_cap * cap,MemoryRegion * mr,uint8_t bar)1892 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1893 VirtIOPCIRegion *region,
1894 struct virtio_pci_cap *cap,
1895 MemoryRegion *mr,
1896 uint8_t bar)
1897 {
1898 memory_region_add_subregion(mr, region->offset, ®ion->mr);
1899
1900 cap->cfg_type = region->type;
1901 cap->bar = bar;
1902 cap->offset = cpu_to_le32(region->offset);
1903 cap->length = cpu_to_le32(region->size);
1904 virtio_pci_add_mem_cap(proxy, cap);
1905
1906 }
1907
virtio_pci_modern_mem_region_map(VirtIOPCIProxy * proxy,VirtIOPCIRegion * region,struct virtio_pci_cap * cap)1908 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1909 VirtIOPCIRegion *region,
1910 struct virtio_pci_cap *cap)
1911 {
1912 virtio_pci_modern_region_map(proxy, region, cap,
1913 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1914 }
1915
virtio_pci_modern_io_region_map(VirtIOPCIProxy * proxy,VirtIOPCIRegion * region,struct virtio_pci_cap * cap)1916 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1917 VirtIOPCIRegion *region,
1918 struct virtio_pci_cap *cap)
1919 {
1920 virtio_pci_modern_region_map(proxy, region, cap,
1921 &proxy->io_bar, proxy->modern_io_bar_idx);
1922 }
1923
virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy * proxy,VirtIOPCIRegion * region)1924 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1925 VirtIOPCIRegion *region)
1926 {
1927 memory_region_del_subregion(&proxy->modern_bar,
1928 ®ion->mr);
1929 }
1930
virtio_pci_modern_io_region_unmap(VirtIOPCIProxy * proxy,VirtIOPCIRegion * region)1931 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1932 VirtIOPCIRegion *region)
1933 {
1934 memory_region_del_subregion(&proxy->io_bar,
1935 ®ion->mr);
1936 }
1937
virtio_pci_pre_plugged(DeviceState * d,Error ** errp)1938 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
1939 {
1940 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1941 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1942
1943 if (virtio_pci_modern(proxy)) {
1944 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1945 }
1946
1947 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1948 }
1949
1950 /* This is called by virtio-bus just after the device is plugged. */
virtio_pci_device_plugged(DeviceState * d,Error ** errp)1951 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1952 {
1953 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1954 VirtioBusState *bus = &proxy->bus;
1955 bool legacy = virtio_pci_legacy(proxy);
1956 bool modern;
1957 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1958 uint8_t *config;
1959 uint32_t size;
1960 VirtIODevice *vdev = virtio_bus_get_device(bus);
1961
1962 /*
1963 * Virtio capabilities present without
1964 * VIRTIO_F_VERSION_1 confuses guests
1965 */
1966 if (!proxy->ignore_backend_features &&
1967 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1968 virtio_pci_disable_modern(proxy);
1969
1970 if (!legacy) {
1971 error_setg(errp, "Device doesn't support modern mode, and legacy"
1972 " mode is disabled");
1973 error_append_hint(errp, "Set disable-legacy to off\n");
1974
1975 return;
1976 }
1977 }
1978
1979 modern = virtio_pci_modern(proxy);
1980
1981 config = proxy->pci_dev.config;
1982 if (proxy->class_code) {
1983 pci_config_set_class(config, proxy->class_code);
1984 }
1985
1986 if (legacy) {
1987 if (!virtio_legacy_allowed(vdev)) {
1988 /*
1989 * To avoid migration issues, we allow legacy mode when legacy
1990 * check is disabled in the old machine types (< 5.1).
1991 */
1992 if (virtio_legacy_check_disabled(vdev)) {
1993 warn_report("device is modern-only, but for backward "
1994 "compatibility legacy is allowed");
1995 } else {
1996 error_setg(errp,
1997 "device is modern-only, use disable-legacy=on");
1998 return;
1999 }
2000 }
2001 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2002 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
2003 " neither legacy nor transitional device");
2004 return;
2005 }
2006 /*
2007 * Legacy and transitional devices use specific subsystem IDs.
2008 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
2009 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
2010 */
2011 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
2012 if (proxy->trans_devid) {
2013 pci_config_set_device_id(config, proxy->trans_devid);
2014 }
2015 } else {
2016 /* pure virtio-1.0 */
2017 pci_set_word(config + PCI_VENDOR_ID,
2018 PCI_VENDOR_ID_REDHAT_QUMRANET);
2019 pci_set_word(config + PCI_DEVICE_ID,
2020 PCI_DEVICE_ID_VIRTIO_10_BASE + virtio_bus_get_vdev_id(bus));
2021 pci_config_set_revision(config, 1);
2022 }
2023 config[PCI_INTERRUPT_PIN] = 1;
2024
2025
2026 if (modern) {
2027 struct virtio_pci_cap cap = {
2028 .cap_len = sizeof cap,
2029 };
2030 struct virtio_pci_notify_cap notify = {
2031 .cap.cap_len = sizeof notify,
2032 .notify_off_multiplier =
2033 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
2034 };
2035 struct virtio_pci_cfg_cap cfg = {
2036 .cap.cap_len = sizeof cfg,
2037 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
2038 };
2039 struct virtio_pci_notify_cap notify_pio = {
2040 .cap.cap_len = sizeof notify,
2041 .notify_off_multiplier = cpu_to_le32(0x0),
2042 };
2043
2044 struct virtio_pci_cfg_cap *cfg_mask;
2045
2046 virtio_pci_modern_regions_init(proxy, vdev->name);
2047
2048 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
2049 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
2050 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
2051 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap);
2052
2053 if (modern_pio) {
2054 memory_region_init(&proxy->io_bar, OBJECT(proxy),
2055 "virtio-pci-io", 0x4);
2056
2057 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
2058 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
2059
2060 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
2061 ¬ify_pio.cap);
2062 }
2063
2064 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
2065 PCI_BASE_ADDRESS_SPACE_MEMORY |
2066 PCI_BASE_ADDRESS_MEM_PREFETCH |
2067 PCI_BASE_ADDRESS_MEM_TYPE_64,
2068 &proxy->modern_bar);
2069
2070 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
2071 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
2072 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
2073 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
2074 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
2075 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
2076 }
2077
2078 if (proxy->nvectors) {
2079 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
2080 proxy->msix_bar_idx, NULL);
2081 if (err) {
2082 /* Notice when a system that supports MSIx can't initialize it */
2083 if (err != -ENOTSUP) {
2084 warn_report("unable to init msix vectors to %" PRIu32,
2085 proxy->nvectors);
2086 }
2087 proxy->nvectors = 0;
2088 }
2089 }
2090
2091 proxy->pci_dev.config_write = virtio_write_config;
2092 proxy->pci_dev.config_read = virtio_read_config;
2093
2094 if (legacy) {
2095 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
2096 + virtio_bus_get_vdev_config_len(bus);
2097 size = pow2ceil(size);
2098
2099 memory_region_init_io(&proxy->bar, OBJECT(proxy),
2100 &virtio_pci_config_ops,
2101 proxy, "virtio-pci", size);
2102
2103 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
2104 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
2105 }
2106 }
2107
virtio_pci_device_unplugged(DeviceState * d)2108 static void virtio_pci_device_unplugged(DeviceState *d)
2109 {
2110 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
2111 bool modern = virtio_pci_modern(proxy);
2112 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
2113
2114 virtio_pci_stop_ioeventfd(proxy);
2115
2116 if (modern) {
2117 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
2118 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
2119 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
2120 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
2121 if (modern_pio) {
2122 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
2123 }
2124 }
2125 }
2126
virtio_pci_realize(PCIDevice * pci_dev,Error ** errp)2127 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
2128 {
2129 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
2130 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
2131 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
2132 !pci_bus_is_root(pci_get_bus(pci_dev));
2133
2134 /* fd-based ioevents can't be synchronized in record/replay */
2135 if (replay_mode != REPLAY_MODE_NONE) {
2136 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
2137 }
2138
2139 /*
2140 * virtio pci bar layout used by default.
2141 * subclasses can re-arrange things if needed.
2142 *
2143 * region 0 -- virtio legacy io bar
2144 * region 1 -- msi-x bar
2145 * region 2 -- virtio modern io bar (off by default)
2146 * region 4+5 -- virtio modern memory (64bit) bar
2147 *
2148 */
2149 proxy->legacy_io_bar_idx = 0;
2150 proxy->msix_bar_idx = 1;
2151 proxy->modern_io_bar_idx = 2;
2152 proxy->modern_mem_bar_idx = 4;
2153
2154 proxy->common.offset = 0x0;
2155 proxy->common.size = 0x1000;
2156 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
2157
2158 proxy->isr.offset = 0x1000;
2159 proxy->isr.size = 0x1000;
2160 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
2161
2162 proxy->device.offset = 0x2000;
2163 proxy->device.size = 0x1000;
2164 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
2165
2166 proxy->notify.offset = 0x3000;
2167 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
2168 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
2169
2170 proxy->notify_pio.offset = 0x0;
2171 proxy->notify_pio.size = 0x4;
2172 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
2173
2174 /* subclasses can enforce modern, so do this unconditionally */
2175 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
2176 /* PCI BAR regions must be powers of 2 */
2177 pow2ceil(proxy->notify.offset + proxy->notify.size));
2178
2179 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
2180 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2181 }
2182
2183 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
2184 error_setg(errp, "device cannot work as neither modern nor legacy mode"
2185 " is enabled");
2186 error_append_hint(errp, "Set either disable-modern or disable-legacy"
2187 " to off\n");
2188 return;
2189 }
2190
2191 if (pcie_port && pci_is_express(pci_dev)) {
2192 int pos;
2193 uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
2194
2195 pos = pcie_endpoint_cap_init(pci_dev, 0);
2196 assert(pos > 0);
2197
2198 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
2199 PCI_PM_SIZEOF, errp);
2200 if (pos < 0) {
2201 return;
2202 }
2203
2204 pci_dev->exp.pm_cap = pos;
2205
2206 /*
2207 * Indicates that this function complies with revision 1.2 of the
2208 * PCI Power Management Interface Specification.
2209 */
2210 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
2211
2212 if (proxy->flags & VIRTIO_PCI_FLAG_AER) {
2213 pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset,
2214 PCI_ERR_SIZEOF, NULL);
2215 last_pcie_cap_offset += PCI_ERR_SIZEOF;
2216 }
2217
2218 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
2219 /* Init error enabling flags */
2220 pcie_cap_deverr_init(pci_dev);
2221 }
2222
2223 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
2224 /* Init Link Control Register */
2225 pcie_cap_lnkctl_init(pci_dev);
2226 }
2227
2228 if (proxy->flags & VIRTIO_PCI_FLAG_PM_NO_SOFT_RESET) {
2229 pci_set_word(pci_dev->config + pos + PCI_PM_CTRL,
2230 PCI_PM_CTRL_NO_SOFT_RESET);
2231 }
2232
2233 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
2234 /* Init Power Management Control Register */
2235 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
2236 PCI_PM_CTRL_STATE_MASK);
2237 }
2238
2239 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
2240 pcie_ats_init(pci_dev, last_pcie_cap_offset,
2241 proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED);
2242 last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
2243 }
2244
2245 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
2246 /* Set Function Level Reset capability bit */
2247 pcie_cap_flr_init(pci_dev);
2248 }
2249 } else {
2250 /*
2251 * make future invocations of pci_is_express() return false
2252 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
2253 */
2254 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2255 }
2256
2257 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
2258 if (k->realize) {
2259 k->realize(proxy, errp);
2260 }
2261 }
2262
virtio_pci_exit(PCIDevice * pci_dev)2263 static void virtio_pci_exit(PCIDevice *pci_dev)
2264 {
2265 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
2266 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
2267 !pci_bus_is_root(pci_get_bus(pci_dev));
2268
2269 msix_uninit_exclusive_bar(pci_dev);
2270 if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port &&
2271 pci_is_express(pci_dev)) {
2272 pcie_aer_exit(pci_dev);
2273 }
2274 }
2275
virtio_pci_reset(DeviceState * qdev)2276 static void virtio_pci_reset(DeviceState *qdev)
2277 {
2278 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
2279 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
2280 int i;
2281
2282 virtio_bus_reset(bus);
2283 msix_unuse_all_vectors(&proxy->pci_dev);
2284
2285 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2286 proxy->vqs[i].enabled = 0;
2287 proxy->vqs[i].reset = 0;
2288 proxy->vqs[i].num = 0;
2289 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
2290 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
2291 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
2292 }
2293 }
2294
virtio_pci_no_soft_reset(PCIDevice * dev)2295 static bool virtio_pci_no_soft_reset(PCIDevice *dev)
2296 {
2297 uint16_t pmcsr;
2298
2299 if (!pci_is_express(dev) || !dev->exp.pm_cap) {
2300 return false;
2301 }
2302
2303 pmcsr = pci_get_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL);
2304
2305 /*
2306 * When No_Soft_Reset bit is set and the device
2307 * is in D3hot state, don't reset device
2308 */
2309 return (pmcsr & PCI_PM_CTRL_NO_SOFT_RESET) &&
2310 (pmcsr & PCI_PM_CTRL_STATE_MASK) == 3;
2311 }
2312
virtio_pci_bus_reset_hold(Object * obj,ResetType type)2313 static void virtio_pci_bus_reset_hold(Object *obj, ResetType type)
2314 {
2315 PCIDevice *dev = PCI_DEVICE(obj);
2316 DeviceState *qdev = DEVICE(obj);
2317
2318 if (virtio_pci_no_soft_reset(dev)) {
2319 return;
2320 }
2321
2322 virtio_pci_reset(qdev);
2323
2324 if (pci_is_express(dev)) {
2325 VirtIOPCIProxy *proxy = VIRTIO_PCI(dev);
2326
2327 pcie_cap_deverr_reset(dev);
2328 pcie_cap_lnkctl_reset(dev);
2329
2330 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
2331 pci_word_test_and_clear_mask(
2332 dev->config + dev->exp.pm_cap + PCI_PM_CTRL,
2333 PCI_PM_CTRL_STATE_MASK);
2334 }
2335 }
2336 }
2337
2338 static Property virtio_pci_properties[] = {
2339 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
2340 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
2341 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
2342 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
2343 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
2344 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
2345 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
2346 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
2347 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
2348 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
2349 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
2350 ignore_backend_features, false),
2351 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
2352 VIRTIO_PCI_FLAG_ATS_BIT, false),
2353 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags,
2354 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true),
2355 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
2356 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
2357 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
2358 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
2359 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
2360 VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
2361 DEFINE_PROP_BIT("x-pcie-pm-no-soft-reset", VirtIOPCIProxy, flags,
2362 VIRTIO_PCI_FLAG_PM_NO_SOFT_RESET_BIT, false),
2363 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags,
2364 VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
2365 DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags,
2366 VIRTIO_PCI_FLAG_AER_BIT, false),
2367 DEFINE_PROP_END_OF_LIST(),
2368 };
2369
virtio_pci_dc_realize(DeviceState * qdev,Error ** errp)2370 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
2371 {
2372 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
2373 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
2374 PCIDevice *pci_dev = &proxy->pci_dev;
2375
2376 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
2377 virtio_pci_modern(proxy)) {
2378 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
2379 }
2380
2381 vpciklass->parent_dc_realize(qdev, errp);
2382 }
2383
virtio_pci_class_init(ObjectClass * klass,void * data)2384 static void virtio_pci_class_init(ObjectClass *klass, void *data)
2385 {
2386 DeviceClass *dc = DEVICE_CLASS(klass);
2387 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2388 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2389 ResettableClass *rc = RESETTABLE_CLASS(klass);
2390
2391 device_class_set_props(dc, virtio_pci_properties);
2392 k->realize = virtio_pci_realize;
2393 k->exit = virtio_pci_exit;
2394 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2395 k->revision = VIRTIO_PCI_ABI_VERSION;
2396 k->class_id = PCI_CLASS_OTHERS;
2397 device_class_set_parent_realize(dc, virtio_pci_dc_realize,
2398 &vpciklass->parent_dc_realize);
2399 rc->phases.hold = virtio_pci_bus_reset_hold;
2400 }
2401
2402 static const TypeInfo virtio_pci_info = {
2403 .name = TYPE_VIRTIO_PCI,
2404 .parent = TYPE_PCI_DEVICE,
2405 .instance_size = sizeof(VirtIOPCIProxy),
2406 .class_init = virtio_pci_class_init,
2407 .class_size = sizeof(VirtioPCIClass),
2408 .abstract = true,
2409 };
2410
2411 static Property virtio_pci_generic_properties[] = {
2412 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
2413 ON_OFF_AUTO_AUTO),
2414 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
2415 DEFINE_PROP_END_OF_LIST(),
2416 };
2417
virtio_pci_base_class_init(ObjectClass * klass,void * data)2418 static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
2419 {
2420 const VirtioPCIDeviceTypeInfo *t = data;
2421 if (t->class_init) {
2422 t->class_init(klass, NULL);
2423 }
2424 }
2425
virtio_pci_generic_class_init(ObjectClass * klass,void * data)2426 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
2427 {
2428 DeviceClass *dc = DEVICE_CLASS(klass);
2429
2430 device_class_set_props(dc, virtio_pci_generic_properties);
2431 }
2432
virtio_pci_transitional_instance_init(Object * obj)2433 static void virtio_pci_transitional_instance_init(Object *obj)
2434 {
2435 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
2436
2437 proxy->disable_legacy = ON_OFF_AUTO_OFF;
2438 proxy->disable_modern = false;
2439 }
2440
virtio_pci_non_transitional_instance_init(Object * obj)2441 static void virtio_pci_non_transitional_instance_init(Object *obj)
2442 {
2443 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
2444
2445 proxy->disable_legacy = ON_OFF_AUTO_ON;
2446 proxy->disable_modern = false;
2447 }
2448
virtio_pci_types_register(const VirtioPCIDeviceTypeInfo * t)2449 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
2450 {
2451 char *base_name = NULL;
2452 TypeInfo base_type_info = {
2453 .name = t->base_name,
2454 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
2455 .instance_size = t->instance_size,
2456 .instance_init = t->instance_init,
2457 .instance_finalize = t->instance_finalize,
2458 .class_size = t->class_size,
2459 .abstract = true,
2460 .interfaces = t->interfaces,
2461 };
2462 TypeInfo generic_type_info = {
2463 .name = t->generic_name,
2464 .parent = base_type_info.name,
2465 .class_init = virtio_pci_generic_class_init,
2466 .interfaces = (InterfaceInfo[]) {
2467 { INTERFACE_PCIE_DEVICE },
2468 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2469 { }
2470 },
2471 };
2472
2473 if (!base_type_info.name) {
2474 /* No base type -> register a single generic device type */
2475 /* use intermediate %s-base-type to add generic device props */
2476 base_name = g_strdup_printf("%s-base-type", t->generic_name);
2477 base_type_info.name = base_name;
2478 base_type_info.class_init = virtio_pci_generic_class_init;
2479
2480 generic_type_info.parent = base_name;
2481 generic_type_info.class_init = virtio_pci_base_class_init;
2482 generic_type_info.class_data = (void *)t;
2483
2484 assert(!t->non_transitional_name);
2485 assert(!t->transitional_name);
2486 } else {
2487 base_type_info.class_init = virtio_pci_base_class_init;
2488 base_type_info.class_data = (void *)t;
2489 }
2490
2491 type_register(&base_type_info);
2492 if (generic_type_info.name) {
2493 type_register(&generic_type_info);
2494 }
2495
2496 if (t->non_transitional_name) {
2497 const TypeInfo non_transitional_type_info = {
2498 .name = t->non_transitional_name,
2499 .parent = base_type_info.name,
2500 .instance_init = virtio_pci_non_transitional_instance_init,
2501 .interfaces = (InterfaceInfo[]) {
2502 { INTERFACE_PCIE_DEVICE },
2503 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2504 { }
2505 },
2506 };
2507 type_register(&non_transitional_type_info);
2508 }
2509
2510 if (t->transitional_name) {
2511 const TypeInfo transitional_type_info = {
2512 .name = t->transitional_name,
2513 .parent = base_type_info.name,
2514 .instance_init = virtio_pci_transitional_instance_init,
2515 .interfaces = (InterfaceInfo[]) {
2516 /*
2517 * Transitional virtio devices work only as Conventional PCI
2518 * devices because they require PIO ports.
2519 */
2520 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2521 { }
2522 },
2523 };
2524 type_register(&transitional_type_info);
2525 }
2526 g_free(base_name);
2527 }
2528
virtio_pci_optimal_num_queues(unsigned fixed_queues)2529 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues)
2530 {
2531 /*
2532 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2533 * virtqueue buffers can handle their completion. When a different vCPU
2534 * handles completion it may need to IPI the vCPU that submitted the
2535 * request and this adds overhead.
2536 *
2537 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2538 * guests with very many vCPUs and a device that is only used by a few
2539 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2540 * the guest, so those users might as well manually set the number of
2541 * queues. There is no upper limit that can be applied automatically and
2542 * doing so arbitrarily would result in a sudden performance drop once the
2543 * threshold number of vCPUs is exceeded.
2544 */
2545 unsigned num_queues = current_machine->smp.cpus;
2546
2547 /*
2548 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2549 * config change interrupt and the fixed virtqueues must be taken into
2550 * account too.
2551 */
2552 num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues);
2553
2554 /*
2555 * There is a limit to how many virtqueues a device can have.
2556 */
2557 return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues);
2558 }
2559
2560 /* virtio-pci-bus */
2561
virtio_pci_bus_new(VirtioBusState * bus,size_t bus_size,VirtIOPCIProxy * dev)2562 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2563 VirtIOPCIProxy *dev)
2564 {
2565 DeviceState *qdev = DEVICE(dev);
2566 char virtio_bus_name[] = "virtio-bus";
2567
2568 qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name);
2569 }
2570
virtio_pci_bus_class_init(ObjectClass * klass,void * data)2571 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2572 {
2573 BusClass *bus_class = BUS_CLASS(klass);
2574 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2575 bus_class->max_dev = 1;
2576 k->notify = virtio_pci_notify;
2577 k->save_config = virtio_pci_save_config;
2578 k->load_config = virtio_pci_load_config;
2579 k->save_queue = virtio_pci_save_queue;
2580 k->load_queue = virtio_pci_load_queue;
2581 k->save_extra_state = virtio_pci_save_extra_state;
2582 k->load_extra_state = virtio_pci_load_extra_state;
2583 k->has_extra_state = virtio_pci_has_extra_state;
2584 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2585 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2586 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2587 k->vmstate_change = virtio_pci_vmstate_change;
2588 k->pre_plugged = virtio_pci_pre_plugged;
2589 k->device_plugged = virtio_pci_device_plugged;
2590 k->device_unplugged = virtio_pci_device_unplugged;
2591 k->query_nvectors = virtio_pci_query_nvectors;
2592 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2593 k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
2594 k->get_dma_as = virtio_pci_get_dma_as;
2595 k->iommu_enabled = virtio_pci_iommu_enabled;
2596 k->queue_enabled = virtio_pci_queue_enabled;
2597 }
2598
2599 static const TypeInfo virtio_pci_bus_info = {
2600 .name = TYPE_VIRTIO_PCI_BUS,
2601 .parent = TYPE_VIRTIO_BUS,
2602 .instance_size = sizeof(VirtioPCIBusState),
2603 .class_size = sizeof(VirtioPCIBusClass),
2604 .class_init = virtio_pci_bus_class_init,
2605 };
2606
virtio_pci_register_types(void)2607 static void virtio_pci_register_types(void)
2608 {
2609 /* Base types: */
2610 type_register_static(&virtio_pci_bus_info);
2611 type_register_static(&virtio_pci_info);
2612 }
2613
2614 type_init(virtio_pci_register_types)
2615
2616