Lines Matching +full:- +full:- +full:enable +full:- +full:trace +full:- +full:backends
2 * vhost-vdpa.c
4 * Copyright(c) 2017-2018 Intel Corporation.
8 * See the COPYING file in the top-level directory.
14 #include "hw/virtio/virtio-net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
27 #include "standard-headers/linux/virtio_net.h"
31 #include "trace.h"
138 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_get_vhost_net()
139 return s->vhost_net; in vhost_vdpa_get_vhost_net()
164 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); in vhost_vdpa_net_valid_svq_features()
181 hdev = (struct vhost_dev *)&net->dev; in vhost_vdpa_net_check_device_id()
182 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); in vhost_vdpa_net_check_device_id()
184 return -ENOTSUP; in vhost_vdpa_net_check_device_id()
198 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_add()
215 s->vhost_net = net; in vhost_vdpa_add()
225 return -1; in vhost_vdpa_add()
232 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
233 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
234 if (s->vhost_net) { in vhost_vdpa_cleanup()
235 vhost_net_cleanup(s->vhost_net); in vhost_vdpa_cleanup()
236 g_free(s->vhost_net); in vhost_vdpa_cleanup()
237 s->vhost_net = NULL; in vhost_vdpa_cleanup()
239 if (s->vhost_vdpa.index != 0) { in vhost_vdpa_cleanup()
242 qemu_close(s->vhost_vdpa.shared->device_fd); in vhost_vdpa_cleanup()
243 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete); in vhost_vdpa_cleanup()
244 g_free(s->vhost_vdpa.shared); in vhost_vdpa_cleanup()
249 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_vnet_hdr()
257 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_get_vnet_hash_supported_types()
259 uint64_t features = s->vhost_vdpa.dev->features; in vhost_vdpa_get_vnet_hash_supported_types()
260 int fd = s->vhost_vdpa.shared->device_fd; in vhost_vdpa_get_vnet_hash_supported_types()
282 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_ufo()
286 features = vhost_net_get_features(s->vhost_net, features); in vhost_vdpa_has_ufo()
295 * LE". Otherwise, on a BE machine, higher-level code would mistakely think
298 static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable) in vhost_vdpa_set_vnet_le() argument
308 if (!g_str_has_prefix(driver, "virtio-net-")) { in vhost_vdpa_check_peer_type()
309 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); in vhost_vdpa_check_peer_type()
324 /** From any vdpa net client, get the netclient of the i-th queue pair */
327 NICState *nic = qemu_get_nic(s->nc.peer); in vhost_vdpa_net_get_nc_vdpa()
328 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); in vhost_vdpa_net_get_nc_vdpa()
338 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) in vhost_vdpa_net_log_global_enable() argument
340 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_log_global_enable()
345 /* We are only called on the first data vqs and only if x-svq is not set */ in vhost_vdpa_net_log_global_enable()
346 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { in vhost_vdpa_net_log_global_enable()
350 vdev = v->dev->vdev; in vhost_vdpa_net_log_global_enable()
352 if (!n->vhost_started) { in vhost_vdpa_net_log_global_enable()
356 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; in vhost_vdpa_net_log_global_enable()
358 n->max_ncs - n->max_queue_pairs : 0; in vhost_vdpa_net_log_global_enable()
359 v->shared->svq_switching = enable ? in vhost_vdpa_net_log_global_enable()
363 * in the future and resume the device if read-only operations between in vhost_vdpa_net_log_global_enable()
366 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
369 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
371 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); in vhost_vdpa_net_log_global_enable()
373 v->shared->svq_switching = SVQ_TSTATE_DONE; in vhost_vdpa_net_log_global_enable()
381 if (e->type == MIG_EVENT_PRECOPY_SETUP) { in vdpa_net_migration_state_notifier()
383 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { in vdpa_net_migration_state_notifier()
391 migration_add_notifier(&s->migration_state, in vhost_vdpa_net_data_start_first()
398 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_start()
400 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_data_start()
402 if (s->always_svq || migration_is_running()) { in vhost_vdpa_net_data_start()
403 v->shadow_vqs_enabled = true; in vhost_vdpa_net_data_start()
405 v->shadow_vqs_enabled = false; in vhost_vdpa_net_data_start()
408 if (v->index == 0) { in vhost_vdpa_net_data_start()
409 v->shared->shadow_data = v->shadow_vqs_enabled; in vhost_vdpa_net_data_start()
420 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_load()
421 bool has_cvq = v->dev->vq_index_end % 2; in vhost_vdpa_net_data_load()
427 for (int i = 0; i < v->dev->nvqs; ++i) { in vhost_vdpa_net_data_load()
428 int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); in vhost_vdpa_net_data_load()
440 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_client_stop()
442 if (s->vhost_vdpa.index == 0) { in vhost_vdpa_net_client_stop()
443 migration_remove_notifier(&s->migration_state); in vhost_vdpa_net_client_stop()
472 r = -errno; in vhost_vdpa_get_vring_group()
492 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); in vhost_vdpa_set_address_space_id()
502 VhostIOVATree *tree = v->shared->iova_tree; in vhost_vdpa_cvq_unmap_buf()
518 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, in vhost_vdpa_cvq_unmap_buf()
519 map->size + 1); in vhost_vdpa_cvq_unmap_buf()
535 map.size = size - 1; in vhost_vdpa_cvq_map_buf()
537 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr); in vhost_vdpa_cvq_map_buf()
542 error_report("Insertion to IOVA->HVA tree failed"); in vhost_vdpa_cvq_map_buf()
543 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_cvq_map_buf()
549 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, in vhost_vdpa_cvq_map_buf()
558 vhost_iova_tree_remove(v->shared->iova_tree, map); in vhost_vdpa_cvq_map_buf()
570 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_start()
573 v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_start()
576 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; in vhost_vdpa_net_cvq_start()
577 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; in vhost_vdpa_net_cvq_start()
579 if (v->shared->shadow_data) { in vhost_vdpa_net_cvq_start()
586 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. in vhost_vdpa_net_cvq_start()
588 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { in vhost_vdpa_net_cvq_start()
592 if (!s->cvq_isolated) { in vhost_vdpa_net_cvq_start()
596 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, in vhost_vdpa_net_cvq_start()
597 v->dev->vq_index_end - 1, in vhost_vdpa_net_cvq_start()
609 v->shadow_vqs_enabled = true; in vhost_vdpa_net_cvq_start()
610 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; in vhost_vdpa_net_cvq_start()
613 if (!s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_start()
617 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, in vhost_vdpa_net_cvq_start()
623 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, in vhost_vdpa_net_cvq_start()
626 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_start()
636 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_stop()
638 if (s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_stop()
639 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_stop()
640 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); in vhost_vdpa_net_cvq_stop()
650 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_cvq_add()
655 if (unlikely(r == -ENOSPC)) { in vhost_vdpa_net_cvq_add()
672 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_svq_poll()
681 out_cursor->iov_base = s->cvq_cmd_out_buffer; in vhost_vdpa_net_load_cursor_reset()
682 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
685 in_cursor->iov_base = s->status; in vhost_vdpa_net_load_cursor_reset()
686 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
699 /* device uses a one-byte length ack for each control command */ in vhost_vdpa_net_svq_flush()
702 return -EIO; in vhost_vdpa_net_svq_flush()
707 if (s->status[i] != VIRTIO_NET_OK) { in vhost_vdpa_net_svq_flush()
708 return -EIO; in vhost_vdpa_net_svq_flush()
728 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_load_cmd()
730 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); in vhost_vdpa_net_load_cmd()
742 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - in vhost_vdpa_net_load_cmd()
743 (void *)s->status); in vhost_vdpa_net_load_cmd()
753 /* pack the CVQ command command-specific-data */ in vhost_vdpa_net_load_cmd()
755 out_cursor->iov_base + sizeof(ctrl), data_size); in vhost_vdpa_net_load_cmd()
760 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
772 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
781 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in vhost_vdpa_net_load_mac()
783 .iov_base = (void *)n->mac, in vhost_vdpa_net_load_mac()
784 .iov_len = sizeof(n->mac), in vhost_vdpa_net_load_mac()
806 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || in vhost_vdpa_net_load_mac()
807 n->mac_table.in_use == 0) { in vhost_vdpa_net_load_mac()
811 uint32_t uni_entries = n->mac_table.first_multi, in vhost_vdpa_net_load_mac()
813 mul_entries = n->mac_table.in_use - uni_entries, in vhost_vdpa_net_load_mac()
826 .iov_base = n->mac_table.macs, in vhost_vdpa_net_load_mac()
832 .iov_base = &n->mac_table.macs[uni_macs_size], in vhost_vdpa_net_load_mac()
866 if (!n->rss_data.enabled || in vhost_vdpa_net_load_rss()
867 n->rss_data.runtime_hash_types == VIRTIO_NET_HASH_REPORT_NONE) { in vhost_vdpa_net_load_rss()
871 table = g_malloc_n(n->rss_data.indirections_len, in vhost_vdpa_net_load_rss()
872 sizeof(n->rss_data.indirections_table[0])); in vhost_vdpa_net_load_rss()
873 cfg.hash_types = cpu_to_le32(n->rss_data.runtime_hash_types); in vhost_vdpa_net_load_rss()
880 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - in vhost_vdpa_net_load_rss()
882 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); in vhost_vdpa_net_load_rss()
883 for (int i = 0; i < n->rss_data.indirections_len; ++i) { in vhost_vdpa_net_load_rss()
884 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); in vhost_vdpa_net_load_rss()
886 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_rss()
908 * into n->rss_data and uses the maximum key length in other code, so in vhost_vdpa_net_load_rss()
911 cfg.hash_key_length = sizeof(n->rss_data.key); in vhost_vdpa_net_load_rss()
920 .iov_len = n->rss_data.indirections_len * in vhost_vdpa_net_load_rss()
921 sizeof(n->rss_data.indirections_table[0]), in vhost_vdpa_net_load_rss()
924 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - in vhost_vdpa_net_load_rss()
927 .iov_base = (void *)n->rss_data.key, in vhost_vdpa_net_load_rss()
928 .iov_len = sizeof(n->rss_data.key), in vhost_vdpa_net_load_rss()
952 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { in vhost_vdpa_net_load_mq()
956 trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
958 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
971 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { in vhost_vdpa_net_load_mq()
972 /* load the receive-side scaling state */ in vhost_vdpa_net_load_mq()
977 } else if (virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_mq()
997 if (!virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_offloads()
1002 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { in vhost_vdpa_net_load_offloads()
1018 offloads = cpu_to_le64(n->curr_guest_offloads); in vhost_vdpa_net_load_offloads()
1062 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { in vhost_vdpa_net_load_rx()
1073 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1074 * non-multicast MAC addresses, indicating that promiscuous mode in vhost_vdpa_net_load_rx()
1078 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, in vhost_vdpa_net_load_rx()
1084 if (!n->mac_table.uni_overflow && !n->promisc) { in vhost_vdpa_net_load_rx()
1093 * According to virtio_net_reset(), device turns all-multicast mode in vhost_vdpa_net_load_rx()
1099 * `n->mac_table.multi_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1100 * non-multicast MAC addresses. in vhost_vdpa_net_load_rx()
1103 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, in vhost_vdpa_net_load_rx()
1104 * which sets all-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1109 if (n->mac_table.multi_overflow || n->allmulti) { in vhost_vdpa_net_load_rx()
1117 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { in vhost_vdpa_net_load_rx()
1122 * According to virtio_net_reset(), device turns all-unicast mode in vhost_vdpa_net_load_rx()
1126 * sets all-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1131 if (n->alluni) { in vhost_vdpa_net_load_rx()
1140 * According to virtio_net_reset(), device turns non-multicast mode in vhost_vdpa_net_load_rx()
1144 * sets non-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1149 if (n->nomulti) { in vhost_vdpa_net_load_rx()
1158 * According to virtio_net_reset(), device turns non-unicast mode in vhost_vdpa_net_load_rx()
1162 * sets non-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1167 if (n->nouni) { in vhost_vdpa_net_load_rx()
1176 * According to virtio_net_reset(), device turns non-broadcast mode in vhost_vdpa_net_load_rx()
1180 * sets non-broadcast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1185 if (n->nobcast) { in vhost_vdpa_net_load_rx()
1224 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { in vhost_vdpa_net_load_vlan()
1229 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { in vhost_vdpa_net_load_vlan()
1230 if (n->vlans[i] & (1U << j)) { in vhost_vdpa_net_load_vlan()
1246 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_load()
1251 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_load()
1253 r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index); in vhost_vdpa_net_cvq_load()
1258 if (v->shadow_vqs_enabled) { in vhost_vdpa_net_cvq_load()
1259 n = VIRTIO_NET(v->dev->vdev); in vhost_vdpa_net_cvq_load()
1288 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); in vhost_vdpa_net_cvq_load()
1294 for (int i = 0; i < v->dev->vq_index; ++i) { in vhost_vdpa_net_cvq_load()
1325 * command to enable promiscuous mode to receive all packets,
1331 * marks `n->mac_table.x_overflow` accordingly, it should have
1333 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1338 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1340 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1355 /* parse the non-multicast MAC address entries from CVQ command */ in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1357 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1364 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1365 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1370 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1377 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1378 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1383 if (iov_size(elem->out_sg, elem->out_num) != cursor) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1388 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1389 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1399 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1401 hdr_ptr = out->iov_base; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1402 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1404 hdr_ptr->class = VIRTIO_NET_CTRL_RX; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1405 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1417 if (unlikely(r < sizeof(*s->status))) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1420 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1421 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1427 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1430 * By doing so, the device model can mark `n->mac_table.uni_overflow` in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1431 * and `n->mac_table.multi_overflow`, enabling all packets to be in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1442 out->iov_len = fake_cvq_size; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1445 hdr_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1446 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1447 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1451 * Pack the non-multicast MAC addresses part for fake CVQ command. in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1457 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1458 mac_ptr->entries = cpu_to_le32(fake_uni_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1468 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1469 mac_ptr->entries = cpu_to_le32(fake_mul_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1475 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1494 .iov_base = s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1503 .iov_base = s->status, in vhost_vdpa_net_handle_ctrl_avail()
1504 .iov_len = sizeof(*s->status), in vhost_vdpa_net_handle_ctrl_avail()
1506 ssize_t dev_written = -EINVAL; in vhost_vdpa_net_handle_ctrl_avail()
1508 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, in vhost_vdpa_net_handle_ctrl_avail()
1509 s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1512 ctrl = s->cvq_cmd_out_buffer; in vhost_vdpa_net_handle_ctrl_avail()
1513 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { in vhost_vdpa_net_handle_ctrl_avail()
1519 *s->status = VIRTIO_NET_OK; in vhost_vdpa_net_handle_ctrl_avail()
1520 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && in vhost_vdpa_net_handle_ctrl_avail()
1521 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && in vhost_vdpa_net_handle_ctrl_avail()
1522 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { in vhost_vdpa_net_handle_ctrl_avail()
1558 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_handle_ctrl_avail()
1563 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); in vhost_vdpa_net_handle_ctrl_avail()
1569 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, in vhost_vdpa_net_handle_ctrl_avail()
1578 * by a non-negative value of `dev_written`. Otherwise, it still in vhost_vdpa_net_handle_ctrl_avail()
1623 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1629 error_setg_errno(errp, -r, "Cannot set features"); in vhost_vdpa_probe_cvq_isolation()
1636 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1642 if (cvq_group != -ENOTSUP) { in vhost_vdpa_probe_cvq_isolation()
1715 s->vhost_vdpa.index = queue_pair_index; in net_vhost_vdpa_init()
1716 s->always_svq = svq; in net_vhost_vdpa_init()
1717 s->migration_state.notify = NULL; in net_vhost_vdpa_init()
1718 s->vhost_vdpa.shadow_vqs_enabled = svq; in net_vhost_vdpa_init()
1721 &s->vhost_vdpa.migration_blocker); in net_vhost_vdpa_init()
1722 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); in net_vhost_vdpa_init()
1723 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; in net_vhost_vdpa_init()
1724 s->vhost_vdpa.shared->iova_range = iova_range; in net_vhost_vdpa_init()
1725 s->vhost_vdpa.shared->shadow_data = svq; in net_vhost_vdpa_init()
1726 s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first, in net_vhost_vdpa_init()
1729 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1731 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in net_vhost_vdpa_init()
1732 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1734 -1, 0); in net_vhost_vdpa_init()
1736 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; in net_vhost_vdpa_init()
1737 s->vhost_vdpa.shadow_vq_ops_opaque = s; in net_vhost_vdpa_init()
1738 s->cvq_isolated = cvq_isolated; in net_vhost_vdpa_init()
1741 s->vhost_vdpa.shared = shared; in net_vhost_vdpa_init()
1744 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); in net_vhost_vdpa_init()
1758 "Fail to query features from vhost-vDPA device"); in vhost_vdpa_get_features()
1779 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); in vhost_vdpa_get_max_queue_pairs()
1780 config->len = sizeof(*max_queue_pairs); in vhost_vdpa_get_max_queue_pairs()
1784 error_setg(errp, "Fail to get config from vhost-vDPA device"); in vhost_vdpa_get_max_queue_pairs()
1785 return -ret; in vhost_vdpa_get_max_queue_pairs()
1788 max_queue_pairs = (__virtio16 *)&config->buf; in vhost_vdpa_get_max_queue_pairs()
1808 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); in net_init_vhost_vdpa()
1809 opts = &netdev->u.vhost_vdpa; in net_init_vhost_vdpa()
1810 if (!opts->vhostdev && !opts->vhostfd) { in net_init_vhost_vdpa()
1812 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); in net_init_vhost_vdpa()
1813 return -1; in net_init_vhost_vdpa()
1816 if (opts->vhostdev && opts->vhostfd) { in net_init_vhost_vdpa()
1818 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); in net_init_vhost_vdpa()
1819 return -1; in net_init_vhost_vdpa()
1822 if (opts->vhostdev) { in net_init_vhost_vdpa()
1823 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); in net_init_vhost_vdpa()
1824 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1825 return -errno; in net_init_vhost_vdpa()
1829 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); in net_init_vhost_vdpa()
1830 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1831 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); in net_init_vhost_vdpa()
1832 return -1; in net_init_vhost_vdpa()
1849 error_setg(errp, "vhost-vdpa: get iova range failed: %s", in net_init_vhost_vdpa()
1850 strerror(-r)); in net_init_vhost_vdpa()
1854 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { in net_init_vhost_vdpa()
1864 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; in net_init_vhost_vdpa()
1867 vdpa_device_fd, i, 2, true, opts->x_svq, in net_init_vhost_vdpa()
1875 VhostVDPAShared *shared = s0->vhost_vdpa.shared; in net_init_vhost_vdpa()
1879 opts->x_svq, iova_range, features, shared, in net_init_vhost_vdpa()
1889 for (i--; i >= 0; i--) { in net_init_vhost_vdpa()
1896 return -1; in net_init_vhost_vdpa()