| /openbmc/qemu/hw/vfio/ |
| H A D | device.c | 58 VFIODevice *vbasedev; in vfio_device_reset_handler() local 61 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { in vfio_device_reset_handler() 62 if (vbasedev->dev->realized) { in vfio_device_reset_handler() 63 vbasedev->ops->vfio_compute_needs_reset(vbasedev); in vfio_device_reset_handler() 67 QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { in vfio_device_reset_handler() 68 if (vbasedev->dev->realized && vbasedev->needs_reset) { in vfio_device_reset_handler() 69 vbasedev->ops->vfio_hot_reset_multi(vbasedev); in vfio_device_reset_handler() 77 void vfio_device_irq_disable(VFIODevice *vbasedev, int index) in vfio_device_irq_disable() argument 87 vbasedev->io_ops->set_irqs(vbasedev, &irq_set); in vfio_device_irq_disable() 90 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index) in vfio_device_irq_unmask() argument [all …]
|
| H A D | migration.c | 92 static void vfio_migration_send_event(VFIODevice *vbasedev) in vfio_migration_send_event() argument 94 VFIOMigration *migration = vbasedev->migration; in vfio_migration_send_event() 95 DeviceState *dev = vbasedev->dev; in vfio_migration_send_event() 99 if (!vbasedev->migration_events) { in vfio_migration_send_event() 103 g_assert(vbasedev->ops->vfio_get_object); in vfio_migration_send_event() 104 obj = vbasedev->ops->vfio_get_object(vbasedev); in vfio_migration_send_event() 112 static void vfio_migration_set_device_state(VFIODevice *vbasedev, in vfio_migration_set_device_state() argument 115 VFIOMigration *migration = vbasedev->migration; in vfio_migration_set_device_state() 117 trace_vfio_migration_set_device_state(vbasedev->name, in vfio_migration_set_device_state() 121 vfio_migration_send_event(vbasedev); in vfio_migration_set_device_state() [all …]
|
| H A D | iommufd.c | 90 static bool iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp) in iommufd_cdev_kvm_device_add() argument 92 return !vfio_kvm_device_add_fd(vbasedev->fd, errp); in iommufd_cdev_kvm_device_add() 95 static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev) in iommufd_cdev_kvm_device_del() argument 99 if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) { in iommufd_cdev_kvm_device_del() 104 static bool iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) in iommufd_cdev_connect_and_bind() argument 106 IOMMUFDBackend *iommufd = vbasedev->iommufd; in iommufd_cdev_connect_and_bind() 121 if (!iommufd_cdev_kvm_device_add(vbasedev, errp)) { in iommufd_cdev_connect_and_bind() 131 if (ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) { in iommufd_cdev_connect_and_bind() 133 vbasedev->fd, bind.iommufd); in iommufd_cdev_connect_and_bind() 137 vbasedev->devid = bind.out_devid; in iommufd_cdev_connect_and_bind() [all …]
|
| H A D | migration-multifd.c | 38 bool vfio_load_config_after_iter(VFIODevice *vbasedev) in vfio_load_config_after_iter() argument 40 if (vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_ON) { in vfio_load_config_after_iter() 42 } else if (vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_OFF) { in vfio_load_config_after_iter() 46 assert(vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_AUTO); in vfio_load_config_after_iter() 123 static bool vfio_load_state_buffer_insert(VFIODevice *vbasedev, in vfio_load_state_buffer_insert() argument 128 VFIOMigration *migration = vbasedev->migration; in vfio_load_state_buffer_insert() 141 vbasedev->name, packet->idx); in vfio_load_state_buffer_insert() 149 vbasedev->migration_max_queued_buffers_size) { in vfio_load_state_buffer_insert() 153 vbasedev->name, packet->idx, in vfio_load_state_buffer_insert() 154 vbasedev->migration_max_queued_buffers_size); in vfio_load_state_buffer_insert() [all …]
|
| H A D | platform.c | 58 static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, in vfio_init_intp() argument 63 container_of(vbasedev, VFIOPlatformDevice, vbasedev); in vfio_init_intp() 116 VFIODevice *vbasedev = &intp->vdev->vbasedev; in vfio_set_trigger_eventfd() local 122 if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0, in vfio_set_trigger_eventfd() 124 error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); in vfio_set_trigger_eventfd() 151 for (i = 0; i < vdev->vbasedev.num_initial_regions; i++) { in vfio_mmap_set_enabled() 286 static void vfio_platform_eoi(VFIODevice *vbasedev) in vfio_platform_eoi() argument 290 container_of(vbasedev, VFIOPlatformDevice, vbasedev); in vfio_platform_eoi() 304 vfio_device_irq_unmask(vbasedev, intp->pin); in vfio_platform_eoi() 356 VFIODevice *vbasedev = &intp->vdev->vbasedev; in vfio_set_resample_eventfd() local [all …]
|
| H A D | cpr-iommufd.c | 54 static void vfio_cpr_save_device(VFIODevice *vbasedev) in vfio_cpr_save_device() argument 58 elem->name = g_strdup(vbasedev->name); in vfio_cpr_save_device() 59 elem->namelen = strlen(vbasedev->name) + 1; in vfio_cpr_save_device() 60 elem->ioas_id = vbasedev->cpr.ioas_id; in vfio_cpr_save_device() 61 elem->devid = vbasedev->devid; in vfio_cpr_save_device() 62 elem->hwpt_id = vbasedev->cpr.hwpt_id; in vfio_cpr_save_device() 90 static bool vfio_cpr_find_device(VFIODevice *vbasedev) in vfio_cpr_find_device() argument 92 CprVFIODevice *elem = find_device(vbasedev->name); in vfio_cpr_find_device() 95 vbasedev->cpr.ioas_id = elem->ioas_id; in vfio_cpr_find_device() 96 vbasedev->devid = elem->devid; in vfio_cpr_find_device() [all …]
|
| H A D | migration-multifd.h | 17 bool vfio_multifd_setup(VFIODevice *vbasedev, bool alloc_multifd, Error **errp); 18 void vfio_multifd_cleanup(VFIODevice *vbasedev); 21 bool vfio_multifd_transfer_enabled(VFIODevice *vbasedev); 23 bool vfio_load_config_after_iter(VFIODevice *vbasedev); 27 int vfio_load_state_config_load_ready(VFIODevice *vbasedev); 29 void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f); 35 int vfio_multifd_switchover_start(VFIODevice *vbasedev);
|
| H A D | pci.c | 125 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); in vfio_intx_interrupt() 136 void vfio_pci_intx_eoi(VFIODevice *vbasedev) in vfio_pci_intx_eoi() argument 138 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); in vfio_pci_intx_eoi() 144 trace_vfio_pci_intx_eoi(vbasedev->name); in vfio_pci_intx_eoi() 148 vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_pci_intx_eoi() 164 vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm() 181 if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, in vfio_intx_enable_kvm() 189 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm() 193 trace_vfio_intx_enable_kvm(vdev->vbasedev.name); in vfio_intx_enable_kvm() 204 vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); in vfio_intx_enable_kvm() [all …]
|
| H A D | region.c | 41 VFIODevice *vbasedev = region->vbasedev; in vfio_region_write() local 68 ret = vbasedev->io_ops->region_write(vbasedev, region->nr, in vfio_region_write() 73 __func__, vbasedev->name, region->nr, in vfio_region_write() 77 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); in vfio_region_write() 87 vbasedev->ops->vfio_eoi(vbasedev); in vfio_region_write() 94 VFIODevice *vbasedev = region->vbasedev; in vfio_region_read() local 104 ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf); in vfio_region_read() 107 __func__, vbasedev->name, region->nr, in vfio_region_read() 129 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); in vfio_region_read() 132 vbasedev->ops->vfio_eoi(vbasedev); in vfio_region_read() [all …]
|
| H A D | vfio-migration-internal.h | 40 struct VFIODevice *vbasedev; member 58 bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp); 59 void vfio_migration_exit(VFIODevice *vbasedev); 60 bool vfio_device_state_is_running(VFIODevice *vbasedev); 61 bool vfio_device_state_is_precopy(VFIODevice *vbasedev); 66 int vfio_migration_set_state(VFIODevice *vbasedev,
|
| H A D | container.c | 857 VFIODevice *vbasedev, Error **errp) in vfio_device_get() argument 884 if (vbasedev->ram_block_discard_allowed != in vfio_device_get() 898 vfio_device_prepare(vbasedev, &group->container->bcontainer, info); in vfio_device_get() 900 vbasedev->fd = fd; in vfio_device_get() 901 vbasedev->group = group; in vfio_device_get() 902 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); in vfio_device_get() 914 static void vfio_device_put(VFIODevice *vbasedev) in vfio_device_put() argument 916 if (!vbasedev->group) { in vfio_device_put() 919 QLIST_REMOVE(vbasedev, next); in vfio_device_put() 920 vbasedev->group = NULL; in vfio_device_put() [all …]
|
| H A D | container-base.c | 156 VFIODevice *vbasedev; in vfio_container_devices_dirty_tracking_is_started() local 158 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { in vfio_container_devices_dirty_tracking_is_started() 159 if (!vbasedev->dirty_tracking) { in vfio_container_devices_dirty_tracking_is_started() 177 VFIODevice *vbasedev; in vfio_container_devices_dirty_tracking_is_supported() local 179 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { in vfio_container_devices_dirty_tracking_is_supported() 180 if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) { in vfio_container_devices_dirty_tracking_is_supported() 183 if (!vbasedev->dirty_pages_supported) { in vfio_container_devices_dirty_tracking_is_supported() 191 static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, in vfio_device_dma_logging_report() argument 210 return vbasedev->io_ops->device_feature(vbasedev, feature); in vfio_device_dma_logging_report() 226 VFIODevice *vbasedev; in vfio_container_devices_query_dirty_bitmap() local [all …]
|
| H A D | listener.c | 57 VFIODevice *vbasedev; in vfio_log_sync_needed() local 63 QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { in vfio_log_sync_needed() 64 VFIOMigration *migration = vbasedev->migration; in vfio_log_sync_needed() 70 if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF && in vfio_log_sync_needed() 71 (vfio_device_state_is_running(vbasedev) || in vfio_log_sync_needed() 72 vfio_device_state_is_precopy(vbasedev))) { in vfio_log_sync_needed() 447 static void vfio_device_error_append(VFIODevice *vbasedev, Error **errp) in vfio_device_error_append() argument 453 if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) { in vfio_device_error_append() 455 "on BARs are not supported.\n", vbasedev->name); in vfio_device_error_append() 613 VFIODevice *vbasedev = in vfio_container_region_add() local [all …]
|
| H A D | ap.c | 236 VFIODevice *vbasedev = &vapdev->vdev; in vfio_ap_realize() local 238 if (!vfio_device_get_name(vbasedev, errp)) { in vfio_ap_realize() 242 if (!vfio_device_attach(vbasedev->name, vbasedev, in vfio_ap_realize() 267 error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); in vfio_ap_realize() 268 vfio_device_free_name(vbasedev); in vfio_ap_realize() 309 VFIODevice *vbasedev = &vapdev->vdev; in vfio_ap_instance_init() local 317 vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_AP, &vfio_ap_ops, in vfio_ap_instance_init() 321 vbasedev->mdev = true; in vfio_ap_instance_init()
|
| H A D | pci-quirks.c | 57 trace_vfio_quirk_rom_in_denylist(vdev->vbasedev.name, in vfio_opt_rom_in_denylist() 98 trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name, in vfio_generic_window_quirk_address_write() 124 trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name, in vfio_generic_window_quirk_data_read() 139 trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name, in vfio_generic_window_quirk_data_write() 167 trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name, in vfio_generic_quirk_mirror_read() 181 trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name, in vfio_generic_quirk_mirror_write() 217 trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data); in vfio_ati_3c3_quirk_read() 260 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd)) { in vfio_ioeventfd_exit() 347 ioeventfd->vfio = !ioctl(vdev->vbasedev.fd, in vfio_ioeventfd_init() 387 trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name); in vfio_vga_probe_ati_3c3_quirk() [all …]
|
| H A D | igd.c | 160 ret = pread(vdev->vbasedev.fd, vdev->igd_opregion, in vfio_pci_igd_opregion_init() 185 trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name); in vfio_pci_igd_opregion_init() 195 ret = vfio_device_get_region_info_type(&vdev->vbasedev, in vfio_pci_igd_opregion_detect() 246 ret = pread(vdev->vbasedev.fd, pdev->config + list[i].offset, in vfio_pci_igd_copy() 278 trace_vfio_pci_igd_host_bridge_enabled(vdev->vbasedev.name); in vfio_pci_igd_host_init() 343 trace_vfio_pci_igd_lpc_bridge_enabled(vdev->vbasedev.name); in type_init() 382 ret = vfio_device_get_region_info_type(&vdev->vbasedev, in vfio_pci_igd_setup_lpc_bridge() 390 ret = vfio_device_get_region_info_type(&vdev->vbasedev, in vfio_pci_igd_setup_lpc_bridge() 566 ret = vfio_device_get_region_info(&vdev->vbasedev, in vfio_pci_igd_config_quirk() 661 trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, (gms_size / MiB)); in vfio_pci_igd_config_quirk() [all …]
|
| H A D | display.c | 44 int fd = vdev->vbasedev.fd; in vfio_display_edid_link_up() 61 int fd = vdev->vbasedev.fd; in vfio_display_edid_update() 129 int fd = vdev->vbasedev.fd; in vfio_display_edid_init() 132 ret = vfio_device_get_region_info_type(&vdev->vbasedev, in vfio_display_edid_init() 222 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane); in vfio_display_get_dmabuf() 242 fd = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_GFX_DMABUF, &plane.dmabuf_id); in vfio_display_get_dmabuf() 413 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane); in vfio_display_region_update() 449 ret = vfio_region_setup(OBJECT(vdev), &vdev->vbasedev, in vfio_display_region_update() 525 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe); in vfio_display_probe() 533 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe); in vfio_display_probe()
|
| /openbmc/qemu/include/hw/vfio/ |
| H A D | vfio-device.h | 143 void vfio_device_irq_disable(VFIODevice *vbasedev, int index); 144 void vfio_device_irq_unmask(VFIODevice *vbasedev, int index); 145 void vfio_device_irq_mask(VFIODevice *vbasedev, int index); 146 bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex, 150 bool vfio_device_is_mdev(VFIODevice *vbasedev); 151 bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev, 153 bool vfio_device_attach(char *name, VFIODevice *vbasedev, 156 VFIODevice *vbasedev, AddressSpace *as, 158 void vfio_device_detach(VFIODevice *vbasedev); 255 void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, [all …]
|
| H A D | vfio-cpr.h | 58 void vfio_iommufd_cpr_register_device(struct VFIODevice *vbasedev); 59 void vfio_iommufd_cpr_unregister_device(struct VFIODevice *vbasedev); 60 void vfio_cpr_load_device(struct VFIODevice *vbasedev);
|
| H A D | vfio-region.h | 24 struct VFIODevice *vbasedev; member 40 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
|
| /openbmc/qemu/hw/vfio-user/ |
| H A D | pci.c | 89 VFIOUserProxy *proxy = vdev->vbasedev.proxy; in vfio_user_dma_read() 130 vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL); in vfio_user_dma_read() 137 VFIOUserProxy *proxy = vdev->vbasedev.proxy; in vfio_user_dma_write() 171 vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL); in vfio_user_dma_write() 187 vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL); in vfio_user_pci_process_req() 202 vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS); in vfio_user_pci_process_req() 209 static void vfio_user_compute_needs_reset(VFIODevice *vbasedev) in vfio_user_compute_needs_reset() argument 211 vbasedev->needs_reset = false; in vfio_user_compute_needs_reset() 214 static Object *vfio_user_pci_get_object(VFIODevice *vbasedev) in vfio_user_pci_get_object() argument 216 VFIOUserPCIDevice *vdev = container_of(vbasedev, VFIOUserPCIDevice, in vfio_user_pci_get_object() [all …]
|
| H A D | container.c | 191 static VFIOUserContainer *vfio_user_create_container(VFIODevice *vbasedev, in vfio_user_create_container() argument 197 container->proxy = vbasedev->proxy; in vfio_user_create_container() 205 vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev, in vfio_user_container_connect() argument 216 container = vfio_user_create_container(vbasedev, errp); in vfio_user_container_connect() 283 VFIODevice *vbasedev, Error **errp) in vfio_user_device_get() argument 288 if (!vfio_user_get_device_info(vbasedev->proxy, &info, errp)) { in vfio_user_device_get() 292 vbasedev->fd = -1; in vfio_user_device_get() 294 vfio_device_prepare(vbasedev, &container->bcontainer, &info); in vfio_user_device_get() 302 static bool vfio_user_device_attach(const char *name, VFIODevice *vbasedev, in vfio_user_device_attach() argument 307 container = vfio_user_container_connect(as, vbasedev, errp); in vfio_user_device_attach() [all …]
|
| H A D | device.c | 130 static int vfio_user_device_io_get_region_info(VFIODevice *vbasedev, in vfio_user_device_io_get_region_info() argument 137 if (info->index > vbasedev->num_initial_regions) { in vfio_user_device_io_get_region_info() 141 ret = vfio_user_get_region_info(vbasedev->proxy, info, &fds); in vfio_user_device_io_get_region_info() 155 static int vfio_user_device_io_get_irq_info(VFIODevice *vbasedev, in vfio_user_device_io_get_irq_info() argument 158 VFIOUserProxy *proxy = vbasedev->proxy; in vfio_user_device_io_get_irq_info() 200 static int vfio_user_device_io_set_irqs(VFIODevice *vbasedev, in vfio_user_device_io_set_irqs() argument 203 VFIOUserProxy *proxy = vbasedev->proxy; in vfio_user_device_io_set_irqs() 291 static int vfio_user_device_io_region_read(VFIODevice *vbasedev, uint8_t index, in vfio_user_device_io_region_read() argument 296 VFIOUserProxy *proxy = vbasedev->proxy; in vfio_user_device_io_region_read() 334 static int vfio_user_device_io_region_write(VFIODevice *vbasedev, uint8_t index, in vfio_user_device_io_region_write() argument [all …]
|
| /openbmc/qemu/hw/core/ |
| H A D | sysbus-fdt.c | 226 VFIODevice *vbasedev = &vdev->vbasedev; in add_calxeda_midway_xgmac_fdt_node() local 230 vbasedev->name, mmio_base); in add_calxeda_midway_xgmac_fdt_node() 239 reg_attr = g_new(uint32_t, vbasedev->num_initial_regions * 2); in add_calxeda_midway_xgmac_fdt_node() 240 for (i = 0; i < vbasedev->num_initial_regions; i++) { in add_calxeda_midway_xgmac_fdt_node() 247 vbasedev->num_initial_regions * 2 * sizeof(uint32_t)); in add_calxeda_midway_xgmac_fdt_node() 249 irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); in add_calxeda_midway_xgmac_fdt_node() 250 for (i = 0; i < vbasedev->num_irqs; i++) { in add_calxeda_midway_xgmac_fdt_node() 258 irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); in add_calxeda_midway_xgmac_fdt_node() 297 VFIODevice *vbasedev = &vdev->vbasedev; in add_amd_xgbe_fdt_node() local 311 dt_name = sysfs_to_dt_name(vbasedev->name); in add_amd_xgbe_fdt_node() [all …]
|
| /openbmc/qemu/hw/s390x/ |
| H A D | s390-pci-vfio.c | 70 if (!vpdev->vbasedev.group) { in s390_pci_start_dma_count() 74 id = vpdev->vbasedev.group->container->fd; in s390_pci_start_dma_count() 118 trace_s390_pci_clp_cap(vpci->vbasedev.name, in s390_pci_read_base() 171 trace_s390_pci_clp_cap(vpci->vbasedev.name, in get_host_fh() 198 trace_s390_pci_clp_cap(vpci->vbasedev.name, in s390_pci_read_group() 221 trace_s390_pci_clp_cap(vpci->vbasedev.name, in s390_pci_read_group() 273 trace_s390_pci_clp_cap(vpci->vbasedev.name, in s390_pci_read_util() 280 trace_s390_pci_clp_cap_size(vpci->vbasedev.name, cap->size, in s390_pci_read_util() 300 trace_s390_pci_clp_cap(vpci->vbasedev.name, in s390_pci_read_pfip() 307 trace_s390_pci_clp_cap_size(vpci->vbasedev.name, cap->size, in s390_pci_read_pfip() [all …]
|