Lines Matching refs:dev

36 static MSIMessage msix_prepare_message(PCIDevice *dev, unsigned vector)  in msix_prepare_message()  argument
38 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; in msix_prepare_message()
46 MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) in msix_get_message() argument
48 return dev->msix_prepare_message(dev, vector); in msix_get_message()
55 void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg) in msix_set_message() argument
57 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; in msix_set_message()
69 static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) in msix_pending_byte() argument
71 return dev->msix_pba + vector / 8; in msix_pending_byte()
74 static int msix_is_pending(PCIDevice *dev, int vector) in msix_is_pending() argument
76 return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); in msix_is_pending()
79 void msix_set_pending(PCIDevice *dev, unsigned int vector) in msix_set_pending() argument
81 *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); in msix_set_pending()
84 void msix_clr_pending(PCIDevice *dev, int vector) in msix_clr_pending() argument
86 *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); in msix_clr_pending()
89 static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask) in msix_vector_masked() argument
92 uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA]; in msix_vector_masked()
98 return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] & in msix_vector_masked()
102 bool msix_is_masked(PCIDevice *dev, unsigned int vector) in msix_is_masked() argument
104 return msix_vector_masked(dev, vector, dev->msix_function_masked); in msix_is_masked()
107 static void msix_fire_vector_notifier(PCIDevice *dev, in msix_fire_vector_notifier() argument
113 if (!dev->msix_vector_use_notifier) { in msix_fire_vector_notifier()
117 dev->msix_vector_release_notifier(dev, vector); in msix_fire_vector_notifier()
119 msg = msix_get_message(dev, vector); in msix_fire_vector_notifier()
120 ret = dev->msix_vector_use_notifier(dev, vector, msg); in msix_fire_vector_notifier()
125 static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) in msix_handle_mask_update() argument
127 bool is_masked = msix_is_masked(dev, vector); in msix_handle_mask_update()
130 MSIMessage msg = msix_prepare_message(dev, vector); in msix_handle_mask_update()
132 xen_evtchn_snoop_msi(dev, true, vector, msg.address, msg.data, in msix_handle_mask_update()
140 msix_fire_vector_notifier(dev, vector, is_masked); in msix_handle_mask_update()
142 if (!is_masked && msix_is_pending(dev, vector)) { in msix_handle_mask_update()
143 msix_clr_pending(dev, vector); in msix_handle_mask_update()
144 msix_notify(dev, vector); in msix_handle_mask_update()
148 void msix_set_mask(PCIDevice *dev, int vector, bool mask) in msix_set_mask() argument
153 assert(vector < dev->msix_entries_nr); in msix_set_mask()
157 was_masked = msix_is_masked(dev, vector); in msix_set_mask()
160 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; in msix_set_mask()
162 dev->msix_table[offset] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; in msix_set_mask()
165 msix_handle_mask_update(dev, vector, was_masked); in msix_set_mask()
168 static bool msix_masked(PCIDevice *dev) in msix_masked() argument
170 return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; in msix_masked()
173 static void msix_update_function_masked(PCIDevice *dev) in msix_update_function_masked() argument
175 dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev); in msix_update_function_masked()
179 void msix_write_config(PCIDevice *dev, uint32_t addr, in msix_write_config() argument
182 unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; in msix_write_config()
186 if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { in msix_write_config()
190 trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev)); in msix_write_config()
192 was_masked = dev->msix_function_masked; in msix_write_config()
193 msix_update_function_masked(dev); in msix_write_config()
195 if (!msix_enabled(dev)) { in msix_write_config()
199 pci_device_deassert_intx(dev); in msix_write_config()
201 if (dev->msix_function_masked == was_masked) { in msix_write_config()
205 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { in msix_write_config()
206 msix_handle_mask_update(dev, vector, in msix_write_config()
207 msix_vector_masked(dev, vector, was_masked)); in msix_write_config()
214 PCIDevice *dev = opaque; in msix_table_mmio_read() local
216 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); in msix_table_mmio_read()
217 return pci_get_long(dev->msix_table + addr); in msix_table_mmio_read()
223 PCIDevice *dev = opaque; in msix_table_mmio_write() local
227 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); in msix_table_mmio_write()
229 was_masked = msix_is_masked(dev, vector); in msix_table_mmio_write()
230 pci_set_long(dev->msix_table + addr, val); in msix_table_mmio_write()
231 msix_handle_mask_update(dev, vector, was_masked); in msix_table_mmio_write()
250 PCIDevice *dev = opaque; in msix_pba_mmio_read() local
251 if (dev->msix_vector_poll_notifier) { in msix_pba_mmio_read()
253 unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr); in msix_pba_mmio_read()
254 dev->msix_vector_poll_notifier(dev, vector_start, vector_end); in msix_pba_mmio_read()
257 return pci_get_long(dev->msix_pba + addr); in msix_pba_mmio_read()
278 static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) in msix_mask_all() argument
285 bool was_masked = msix_is_masked(dev, vector); in msix_mask_all()
287 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; in msix_mask_all()
288 msix_handle_mask_update(dev, vector, was_masked); in msix_mask_all()
312 int msix_init(struct PCIDevice *dev, unsigned short nentries, in msix_init() argument
347 cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, in msix_init()
353 dev->msix_cap = cap; in msix_init()
354 dev->cap_present |= QEMU_PCI_CAP_MSIX; in msix_init()
355 config = dev->config + cap; in msix_init()
358 dev->msix_entries_nr = nentries; in msix_init()
359 dev->msix_function_masked = true; in msix_init()
365 dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | in msix_init()
368 dev->msix_table = g_malloc0(table_size); in msix_init()
369 dev->msix_pba = g_malloc0(pba_size); in msix_init()
370 dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); in msix_init()
372 msix_mask_all(dev, nentries); in msix_init()
374 memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, in msix_init()
376 memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); in msix_init()
377 memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, in msix_init()
379 memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); in msix_init()
381 dev->msix_prepare_message = msix_prepare_message; in msix_init()
386 int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries, in msix_init_exclusive_bar() argument
412 name = g_strdup_printf("%s-msix", dev->name); in msix_init_exclusive_bar()
413 memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size); in msix_init_exclusive_bar()
416 ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr, in msix_init_exclusive_bar()
417 0, &dev->msix_exclusive_bar, in msix_init_exclusive_bar()
424 pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY, in msix_init_exclusive_bar()
425 &dev->msix_exclusive_bar); in msix_init_exclusive_bar()
430 static void msix_free_irq_entries(PCIDevice *dev) in msix_free_irq_entries() argument
434 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { in msix_free_irq_entries()
435 dev->msix_entry_used[vector] = 0; in msix_free_irq_entries()
436 msix_clr_pending(dev, vector); in msix_free_irq_entries()
440 static void msix_clear_all_vectors(PCIDevice *dev) in msix_clear_all_vectors() argument
444 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { in msix_clear_all_vectors()
445 msix_clr_pending(dev, vector); in msix_clear_all_vectors()
450 void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar) in msix_uninit() argument
452 if (!msix_present(dev)) { in msix_uninit()
455 pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); in msix_uninit()
456 dev->msix_cap = 0; in msix_uninit()
457 msix_free_irq_entries(dev); in msix_uninit()
458 dev->msix_entries_nr = 0; in msix_uninit()
459 memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio); in msix_uninit()
460 g_free(dev->msix_pba); in msix_uninit()
461 dev->msix_pba = NULL; in msix_uninit()
462 memory_region_del_subregion(table_bar, &dev->msix_table_mmio); in msix_uninit()
463 g_free(dev->msix_table); in msix_uninit()
464 dev->msix_table = NULL; in msix_uninit()
465 g_free(dev->msix_entry_used); in msix_uninit()
466 dev->msix_entry_used = NULL; in msix_uninit()
467 dev->cap_present &= ~QEMU_PCI_CAP_MSIX; in msix_uninit()
468 dev->msix_prepare_message = NULL; in msix_uninit()
471 void msix_uninit_exclusive_bar(PCIDevice *dev) in msix_uninit_exclusive_bar() argument
473 if (msix_present(dev)) { in msix_uninit_exclusive_bar()
474 msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar); in msix_uninit_exclusive_bar()
478 void msix_save(PCIDevice *dev, QEMUFile *f) in msix_save() argument
480 unsigned n = dev->msix_entries_nr; in msix_save()
482 if (!msix_present(dev)) { in msix_save()
486 qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); in msix_save()
487 qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); in msix_save()
491 void msix_load(PCIDevice *dev, QEMUFile *f) in msix_load() argument
493 unsigned n = dev->msix_entries_nr; in msix_load()
496 if (!msix_present(dev)) { in msix_load()
500 msix_clear_all_vectors(dev); in msix_load()
501 qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); in msix_load()
502 qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); in msix_load()
503 msix_update_function_masked(dev); in msix_load()
506 msix_handle_mask_update(dev, vector, true); in msix_load()
511 int msix_present(PCIDevice *dev) in msix_present() argument
513 return dev->cap_present & QEMU_PCI_CAP_MSIX; in msix_present()
517 int msix_enabled(PCIDevice *dev) in msix_enabled() argument
519 return (dev->cap_present & QEMU_PCI_CAP_MSIX) && in msix_enabled()
520 (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & in msix_enabled()
525 void msix_notify(PCIDevice *dev, unsigned vector) in msix_notify() argument
529 assert(vector < dev->msix_entries_nr); in msix_notify()
531 if (!dev->msix_entry_used[vector]) { in msix_notify()
535 if (msix_is_masked(dev, vector)) { in msix_notify()
536 msix_set_pending(dev, vector); in msix_notify()
540 msg = msix_get_message(dev, vector); in msix_notify()
542 msi_send_message(dev, msg); in msix_notify()
545 void msix_reset(PCIDevice *dev) in msix_reset() argument
547 if (!msix_present(dev)) { in msix_reset()
550 msix_clear_all_vectors(dev); in msix_reset()
551 dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= in msix_reset()
552 ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; in msix_reset()
553 memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); in msix_reset()
554 memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8); in msix_reset()
555 msix_mask_all(dev, dev->msix_entries_nr); in msix_reset()
567 void msix_vector_use(PCIDevice *dev, unsigned vector) in msix_vector_use() argument
569 assert(vector < dev->msix_entries_nr); in msix_vector_use()
570 dev->msix_entry_used[vector]++; in msix_vector_use()
574 void msix_vector_unuse(PCIDevice *dev, unsigned vector) in msix_vector_unuse() argument
576 assert(vector < dev->msix_entries_nr); in msix_vector_unuse()
577 if (!dev->msix_entry_used[vector]) { in msix_vector_unuse()
580 if (--dev->msix_entry_used[vector]) { in msix_vector_unuse()
583 msix_clr_pending(dev, vector); in msix_vector_unuse()
586 void msix_unuse_all_vectors(PCIDevice *dev) in msix_unuse_all_vectors() argument
588 if (!msix_present(dev)) { in msix_unuse_all_vectors()
591 msix_free_irq_entries(dev); in msix_unuse_all_vectors()
594 unsigned int msix_nr_vectors_allocated(const PCIDevice *dev) in msix_nr_vectors_allocated() argument
596 return dev->msix_entries_nr; in msix_nr_vectors_allocated()
599 static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector) in msix_set_notifier_for_vector() argument
603 if (msix_is_masked(dev, vector)) { in msix_set_notifier_for_vector()
606 msg = msix_get_message(dev, vector); in msix_set_notifier_for_vector()
607 return dev->msix_vector_use_notifier(dev, vector, msg); in msix_set_notifier_for_vector()
610 static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector) in msix_unset_notifier_for_vector() argument
612 if (msix_is_masked(dev, vector)) { in msix_unset_notifier_for_vector()
615 dev->msix_vector_release_notifier(dev, vector); in msix_unset_notifier_for_vector()
618 int msix_set_vector_notifiers(PCIDevice *dev, in msix_set_vector_notifiers() argument
627 dev->msix_vector_use_notifier = use_notifier; in msix_set_vector_notifiers()
628 dev->msix_vector_release_notifier = release_notifier; in msix_set_vector_notifiers()
629 dev->msix_vector_poll_notifier = poll_notifier; in msix_set_vector_notifiers()
631 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & in msix_set_vector_notifiers()
633 for (vector = 0; vector < dev->msix_entries_nr; vector++) { in msix_set_vector_notifiers()
634 ret = msix_set_notifier_for_vector(dev, vector); in msix_set_vector_notifiers()
640 if (dev->msix_vector_poll_notifier) { in msix_set_vector_notifiers()
641 dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr); in msix_set_vector_notifiers()
647 msix_unset_notifier_for_vector(dev, vector); in msix_set_vector_notifiers()
649 dev->msix_vector_use_notifier = NULL; in msix_set_vector_notifiers()
650 dev->msix_vector_release_notifier = NULL; in msix_set_vector_notifiers()
651 dev->msix_vector_poll_notifier = NULL; in msix_set_vector_notifiers()
655 void msix_unset_vector_notifiers(PCIDevice *dev) in msix_unset_vector_notifiers() argument
659 assert(dev->msix_vector_use_notifier && in msix_unset_vector_notifiers()
660 dev->msix_vector_release_notifier); in msix_unset_vector_notifiers()
662 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & in msix_unset_vector_notifiers()
664 for (vector = 0; vector < dev->msix_entries_nr; vector++) { in msix_unset_vector_notifiers()
665 msix_unset_notifier_for_vector(dev, vector); in msix_unset_vector_notifiers()
668 dev->msix_vector_use_notifier = NULL; in msix_unset_vector_notifiers()
669 dev->msix_vector_release_notifier = NULL; in msix_unset_vector_notifiers()
670 dev->msix_vector_poll_notifier = NULL; in msix_unset_vector_notifiers()