Lines Matching +full:num +full:- +full:macs

2  * vhost-vdpa.c
4 * Copyright(c) 2017-2018 Intel Corporation.
8 * See the COPYING file in the top-level directory.
14 #include "hw/virtio/virtio-net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
27 #include "standard-headers/linux/virtio_net.h"
138 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_get_vhost_net()
139 return s->vhost_net; in vhost_vdpa_get_vhost_net()
164 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); in vhost_vdpa_net_valid_svq_features()
181 hdev = (struct vhost_dev *)&net->dev; in vhost_vdpa_net_check_device_id()
182 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); in vhost_vdpa_net_check_device_id()
184 return -ENOTSUP; in vhost_vdpa_net_check_device_id()
198 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_add()
210 s->vhost_net = net; in vhost_vdpa_add()
220 return -1; in vhost_vdpa_add()
227 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
228 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
229 if (s->vhost_net) { in vhost_vdpa_cleanup()
230 vhost_net_cleanup(s->vhost_net); in vhost_vdpa_cleanup()
231 g_free(s->vhost_net); in vhost_vdpa_cleanup()
232 s->vhost_net = NULL; in vhost_vdpa_cleanup()
234 if (s->vhost_vdpa.index != 0) { in vhost_vdpa_cleanup()
237 qemu_close(s->vhost_vdpa.shared->device_fd); in vhost_vdpa_cleanup()
238 g_free(s->vhost_vdpa.shared); in vhost_vdpa_cleanup()
241 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
249 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_vnet_hdr()
256 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_ufo()
260 features = vhost_net_get_features(s->vhost_net, features); in vhost_vdpa_has_ufo()
269 * LE". Otherwise, on a BE machine, higher-level code would mistakely think
282 if (!g_str_has_prefix(driver, "virtio-net-")) { in vhost_vdpa_check_peer_type()
283 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); in vhost_vdpa_check_peer_type()
298 /** From any vdpa net client, get the netclient of the i-th queue pair */
301 NICState *nic = qemu_get_nic(s->nc.peer); in vhost_vdpa_net_get_nc_vdpa()
302 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); in vhost_vdpa_net_get_nc_vdpa()
314 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_log_global_enable()
319 /* We are only called on the first data vqs and only if x-svq is not set */ in vhost_vdpa_net_log_global_enable()
320 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { in vhost_vdpa_net_log_global_enable()
324 vdev = v->dev->vdev; in vhost_vdpa_net_log_global_enable()
326 if (!n->vhost_started) { in vhost_vdpa_net_log_global_enable()
330 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; in vhost_vdpa_net_log_global_enable()
332 n->max_ncs - n->max_queue_pairs : 0; in vhost_vdpa_net_log_global_enable()
333 v->shared->svq_switching = enable ? in vhost_vdpa_net_log_global_enable()
337 * in the future and resume the device if read-only operations between in vhost_vdpa_net_log_global_enable()
340 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
343 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
345 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); in vhost_vdpa_net_log_global_enable()
347 v->shared->svq_switching = SVQ_TSTATE_DONE; in vhost_vdpa_net_log_global_enable()
355 if (e->type == MIG_EVENT_PRECOPY_SETUP) { in vdpa_net_migration_state_notifier()
357 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { in vdpa_net_migration_state_notifier()
365 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_start_first()
367 migration_add_notifier(&s->migration_state, in vhost_vdpa_net_data_start_first()
369 if (v->shadow_vqs_enabled) { in vhost_vdpa_net_data_start_first()
370 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, in vhost_vdpa_net_data_start_first()
371 v->shared->iova_range.last); in vhost_vdpa_net_data_start_first()
378 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_start()
380 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_data_start()
382 if (s->always_svq || migration_is_running()) { in vhost_vdpa_net_data_start()
383 v->shadow_vqs_enabled = true; in vhost_vdpa_net_data_start()
385 v->shadow_vqs_enabled = false; in vhost_vdpa_net_data_start()
388 if (v->index == 0) { in vhost_vdpa_net_data_start()
389 v->shared->shadow_data = v->shadow_vqs_enabled; in vhost_vdpa_net_data_start()
400 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_load()
401 bool has_cvq = v->dev->vq_index_end % 2; in vhost_vdpa_net_data_load()
407 for (int i = 0; i < v->dev->nvqs; ++i) { in vhost_vdpa_net_data_load()
408 int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); in vhost_vdpa_net_data_load()
421 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_client_stop()
423 if (s->vhost_vdpa.index == 0) { in vhost_vdpa_net_client_stop()
424 migration_remove_notifier(&s->migration_state); in vhost_vdpa_net_client_stop()
427 dev = s->vhost_vdpa.dev; in vhost_vdpa_net_client_stop()
428 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { in vhost_vdpa_net_client_stop()
429 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, in vhost_vdpa_net_client_stop()
458 r = -errno; in vhost_vdpa_get_vring_group()
463 return state.num; in vhost_vdpa_get_vring_group()
472 .num = asid_num, in vhost_vdpa_set_address_space_id()
478 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); in vhost_vdpa_set_address_space_id()
481 asid.index, asid.num, errno, g_strerror(errno)); in vhost_vdpa_set_address_space_id()
488 VhostIOVATree *tree = v->shared->iova_tree; in vhost_vdpa_cvq_unmap_buf()
504 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, in vhost_vdpa_cvq_unmap_buf()
505 map->size + 1); in vhost_vdpa_cvq_unmap_buf()
521 map.size = size - 1; in vhost_vdpa_cvq_map_buf()
523 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr); in vhost_vdpa_cvq_map_buf()
528 error_report("Insertion to IOVA->HVA tree failed"); in vhost_vdpa_cvq_map_buf()
529 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_cvq_map_buf()
535 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, in vhost_vdpa_cvq_map_buf()
544 vhost_iova_tree_remove(v->shared->iova_tree, map); in vhost_vdpa_cvq_map_buf()
556 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_start()
559 v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_start()
562 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; in vhost_vdpa_net_cvq_start()
563 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; in vhost_vdpa_net_cvq_start()
565 if (v->shared->shadow_data) { in vhost_vdpa_net_cvq_start()
572 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. in vhost_vdpa_net_cvq_start()
574 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { in vhost_vdpa_net_cvq_start()
578 if (!s->cvq_isolated) { in vhost_vdpa_net_cvq_start()
582 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, in vhost_vdpa_net_cvq_start()
583 v->dev->vq_index_end - 1, in vhost_vdpa_net_cvq_start()
595 v->shadow_vqs_enabled = true; in vhost_vdpa_net_cvq_start()
596 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; in vhost_vdpa_net_cvq_start()
599 if (!s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_start()
606 * - Memory listener need access to guest's memory addresses allocated in in vhost_vdpa_net_cvq_start()
608 * - There should be plenty of IOVA address space for both ASID not to in vhost_vdpa_net_cvq_start()
616 if (!v->shared->iova_tree) { in vhost_vdpa_net_cvq_start()
617 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, in vhost_vdpa_net_cvq_start()
618 v->shared->iova_range.last); in vhost_vdpa_net_cvq_start()
621 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, in vhost_vdpa_net_cvq_start()
627 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, in vhost_vdpa_net_cvq_start()
630 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_start()
640 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_stop()
642 if (s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_stop()
643 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_stop()
644 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); in vhost_vdpa_net_cvq_stop()
654 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_cvq_add()
659 if (unlikely(r == -ENOSPC)) { in vhost_vdpa_net_cvq_add()
676 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_svq_poll()
685 out_cursor->iov_base = s->cvq_cmd_out_buffer; in vhost_vdpa_net_load_cursor_reset()
686 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
689 in_cursor->iov_base = s->status; in vhost_vdpa_net_load_cursor_reset()
690 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
703 /* device uses a one-byte length ack for each control command */ in vhost_vdpa_net_svq_flush()
706 return -EIO; in vhost_vdpa_net_svq_flush()
711 if (s->status[i] != VIRTIO_NET_OK) { in vhost_vdpa_net_svq_flush()
712 return -EIO; in vhost_vdpa_net_svq_flush()
732 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_load_cmd()
734 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); in vhost_vdpa_net_load_cmd()
746 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - in vhost_vdpa_net_load_cmd()
747 (void *)s->status); in vhost_vdpa_net_load_cmd()
757 /* pack the CVQ command command-specific-data */ in vhost_vdpa_net_load_cmd()
759 out_cursor->iov_base + sizeof(ctrl), data_size); in vhost_vdpa_net_load_cmd()
764 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
776 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
785 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in vhost_vdpa_net_load_mac()
787 .iov_base = (void *)n->mac, in vhost_vdpa_net_load_mac()
788 .iov_len = sizeof(n->mac), in vhost_vdpa_net_load_mac()
810 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || in vhost_vdpa_net_load_mac()
811 n->mac_table.in_use == 0) { in vhost_vdpa_net_load_mac()
815 uint32_t uni_entries = n->mac_table.first_multi, in vhost_vdpa_net_load_mac()
817 mul_entries = n->mac_table.in_use - uni_entries, in vhost_vdpa_net_load_mac()
830 .iov_base = n->mac_table.macs, in vhost_vdpa_net_load_mac()
836 .iov_base = &n->mac_table.macs[uni_macs_size], in vhost_vdpa_net_load_mac()
870 if (!n->rss_data.enabled || in vhost_vdpa_net_load_rss()
871 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { in vhost_vdpa_net_load_rss()
875 table = g_malloc_n(n->rss_data.indirections_len, in vhost_vdpa_net_load_rss()
876 sizeof(n->rss_data.indirections_table[0])); in vhost_vdpa_net_load_rss()
877 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); in vhost_vdpa_net_load_rss()
884 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - in vhost_vdpa_net_load_rss()
886 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); in vhost_vdpa_net_load_rss()
887 for (int i = 0; i < n->rss_data.indirections_len; ++i) { in vhost_vdpa_net_load_rss()
888 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); in vhost_vdpa_net_load_rss()
890 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_rss()
912 * into n->rss_data and uses the maximum key length in other code, so in vhost_vdpa_net_load_rss()
915 cfg.hash_key_length = sizeof(n->rss_data.key); in vhost_vdpa_net_load_rss()
924 .iov_len = n->rss_data.indirections_len * in vhost_vdpa_net_load_rss()
925 sizeof(n->rss_data.indirections_table[0]), in vhost_vdpa_net_load_rss()
928 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - in vhost_vdpa_net_load_rss()
931 .iov_base = (void *)n->rss_data.key, in vhost_vdpa_net_load_rss()
932 .iov_len = sizeof(n->rss_data.key), in vhost_vdpa_net_load_rss()
956 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { in vhost_vdpa_net_load_mq()
960 trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
962 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
975 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { in vhost_vdpa_net_load_mq()
976 /* load the receive-side scaling state */ in vhost_vdpa_net_load_mq()
981 } else if (virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_mq()
1001 if (!virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_offloads()
1006 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { in vhost_vdpa_net_load_offloads()
1022 offloads = cpu_to_le64(n->curr_guest_offloads); in vhost_vdpa_net_load_offloads()
1066 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { in vhost_vdpa_net_load_rx()
1077 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1078 * non-multicast MAC addresses, indicating that promiscuous mode in vhost_vdpa_net_load_rx()
1082 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, in vhost_vdpa_net_load_rx()
1088 if (!n->mac_table.uni_overflow && !n->promisc) { in vhost_vdpa_net_load_rx()
1097 * According to virtio_net_reset(), device turns all-multicast mode in vhost_vdpa_net_load_rx()
1103 * `n->mac_table.multi_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1104 * non-multicast MAC addresses. in vhost_vdpa_net_load_rx()
1107 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, in vhost_vdpa_net_load_rx()
1108 * which sets all-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1113 if (n->mac_table.multi_overflow || n->allmulti) { in vhost_vdpa_net_load_rx()
1121 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { in vhost_vdpa_net_load_rx()
1126 * According to virtio_net_reset(), device turns all-unicast mode in vhost_vdpa_net_load_rx()
1130 * sets all-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1135 if (n->alluni) { in vhost_vdpa_net_load_rx()
1144 * According to virtio_net_reset(), device turns non-multicast mode in vhost_vdpa_net_load_rx()
1148 * sets non-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1153 if (n->nomulti) { in vhost_vdpa_net_load_rx()
1162 * According to virtio_net_reset(), device turns non-unicast mode in vhost_vdpa_net_load_rx()
1166 * sets non-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1171 if (n->nouni) { in vhost_vdpa_net_load_rx()
1180 * According to virtio_net_reset(), device turns non-broadcast mode in vhost_vdpa_net_load_rx()
1184 * sets non-broadcast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1189 if (n->nobcast) { in vhost_vdpa_net_load_rx()
1228 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { in vhost_vdpa_net_load_vlan()
1233 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { in vhost_vdpa_net_load_vlan()
1234 if (n->vlans[i] & (1U << j)) { in vhost_vdpa_net_load_vlan()
1250 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_load()
1255 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_load()
1257 r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index); in vhost_vdpa_net_cvq_load()
1262 if (v->shadow_vqs_enabled) { in vhost_vdpa_net_cvq_load()
1263 n = VIRTIO_NET(v->dev->vdev); in vhost_vdpa_net_cvq_load()
1292 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); in vhost_vdpa_net_cvq_load()
1298 for (int i = 0; i < v->dev->vq_index; ++i) { in vhost_vdpa_net_cvq_load()
1334 * marks `n->mac_table.x_overflow` accordingly, it should have
1336 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1341 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1343 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1358 /* parse the non-multicast MAC address entries from CVQ command */ in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1360 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1367 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1368 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1373 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1380 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1381 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1386 if (iov_size(elem->out_sg, elem->out_num) != cursor) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1391 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1392 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1404 hdr_ptr = out->iov_base; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1405 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1407 hdr_ptr->class = VIRTIO_NET_CTRL_RX; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1408 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1420 if (unlikely(r < sizeof(*s->status))) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1423 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1424 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1430 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1433 * By doing so, the device model can mark `n->mac_table.uni_overflow` in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1434 * and `n->mac_table.multi_overflow`, enabling all packets to be in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1445 out->iov_len = fake_cvq_size; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1448 hdr_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1449 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1450 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1454 * Pack the non-multicast MAC addresses part for fake CVQ command. in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1460 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1461 mac_ptr->entries = cpu_to_le32(fake_uni_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1471 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1472 mac_ptr->entries = cpu_to_le32(fake_mul_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1478 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1497 .iov_base = s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1506 .iov_base = s->status, in vhost_vdpa_net_handle_ctrl_avail()
1507 .iov_len = sizeof(*s->status), in vhost_vdpa_net_handle_ctrl_avail()
1509 ssize_t dev_written = -EINVAL; in vhost_vdpa_net_handle_ctrl_avail()
1511 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, in vhost_vdpa_net_handle_ctrl_avail()
1512 s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1515 ctrl = s->cvq_cmd_out_buffer; in vhost_vdpa_net_handle_ctrl_avail()
1516 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { in vhost_vdpa_net_handle_ctrl_avail()
1522 *s->status = VIRTIO_NET_OK; in vhost_vdpa_net_handle_ctrl_avail()
1523 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && in vhost_vdpa_net_handle_ctrl_avail()
1524 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && in vhost_vdpa_net_handle_ctrl_avail()
1525 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { in vhost_vdpa_net_handle_ctrl_avail()
1561 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_handle_ctrl_avail()
1566 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); in vhost_vdpa_net_handle_ctrl_avail()
1572 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, in vhost_vdpa_net_handle_ctrl_avail()
1581 * by a non-negative value of `dev_written`. Otherwise, it still in vhost_vdpa_net_handle_ctrl_avail()
1626 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1632 error_setg_errno(errp, -r, "Cannot set features"); in vhost_vdpa_probe_cvq_isolation()
1639 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1645 if (cvq_group != -ENOTSUP) { in vhost_vdpa_probe_cvq_isolation()
1718 s->vhost_vdpa.index = queue_pair_index; in net_vhost_vdpa_init()
1719 s->always_svq = svq; in net_vhost_vdpa_init()
1720 s->migration_state.notify = NULL; in net_vhost_vdpa_init()
1721 s->vhost_vdpa.shadow_vqs_enabled = svq; in net_vhost_vdpa_init()
1724 &s->vhost_vdpa.migration_blocker); in net_vhost_vdpa_init()
1725 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); in net_vhost_vdpa_init()
1726 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; in net_vhost_vdpa_init()
1727 s->vhost_vdpa.shared->iova_range = iova_range; in net_vhost_vdpa_init()
1728 s->vhost_vdpa.shared->shadow_data = svq; in net_vhost_vdpa_init()
1730 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1732 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in net_vhost_vdpa_init()
1733 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1735 -1, 0); in net_vhost_vdpa_init()
1737 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; in net_vhost_vdpa_init()
1738 s->vhost_vdpa.shadow_vq_ops_opaque = s; in net_vhost_vdpa_init()
1739 s->cvq_isolated = cvq_isolated; in net_vhost_vdpa_init()
1742 s->vhost_vdpa.shared = shared; in net_vhost_vdpa_init()
1745 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); in net_vhost_vdpa_init()
1759 "Fail to query features from vhost-vDPA device"); in vhost_vdpa_get_features()
1780 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); in vhost_vdpa_get_max_queue_pairs()
1781 config->len = sizeof(*max_queue_pairs); in vhost_vdpa_get_max_queue_pairs()
1785 error_setg(errp, "Fail to get config from vhost-vDPA device"); in vhost_vdpa_get_max_queue_pairs()
1786 return -ret; in vhost_vdpa_get_max_queue_pairs()
1789 max_queue_pairs = (__virtio16 *)&config->buf; in vhost_vdpa_get_max_queue_pairs()
1809 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); in net_init_vhost_vdpa()
1810 opts = &netdev->u.vhost_vdpa; in net_init_vhost_vdpa()
1811 if (!opts->vhostdev && !opts->vhostfd) { in net_init_vhost_vdpa()
1813 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); in net_init_vhost_vdpa()
1814 return -1; in net_init_vhost_vdpa()
1817 if (opts->vhostdev && opts->vhostfd) { in net_init_vhost_vdpa()
1819 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); in net_init_vhost_vdpa()
1820 return -1; in net_init_vhost_vdpa()
1823 if (opts->vhostdev) { in net_init_vhost_vdpa()
1824 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); in net_init_vhost_vdpa()
1825 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1826 return -errno; in net_init_vhost_vdpa()
1830 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); in net_init_vhost_vdpa()
1831 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1832 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); in net_init_vhost_vdpa()
1833 return -1; in net_init_vhost_vdpa()
1851 error_setg(errp, "vhost-vdpa: get iova range failed: %s", in net_init_vhost_vdpa()
1852 strerror(-r)); in net_init_vhost_vdpa()
1856 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { in net_init_vhost_vdpa()
1866 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; in net_init_vhost_vdpa()
1869 vdpa_device_fd, i, 2, true, opts->x_svq, in net_init_vhost_vdpa()
1877 VhostVDPAShared *shared = s0->vhost_vdpa.shared; in net_init_vhost_vdpa()
1881 opts->x_svq, iova_range, features, shared, in net_init_vhost_vdpa()
1891 for (i--; i >= 0; i--) { in net_init_vhost_vdpa()
1898 return -1; in net_init_vhost_vdpa()