/openbmc/qemu/net/ |
H A D | eth.c | 76 size_t size = iov_size(l2hdr_iov, iovcnt); in eth_get_l3_proto() 83 copied = iov_to_buf(l2hdr_iov, iovcnt, proto_offset, in eth_get_l3_proto() 91 const struct iovec *iov, int iovcnt, in _eth_copy_chunk() argument 101 copied = iov_to_buf(iov, iovcnt, offset, buffer, length); in _eth_copy_chunk() 140 size_t input_size = iov_size(iov, iovcnt); in eth_get_protocols() 148 proto = eth_get_l3_proto(iov, iovcnt, *l3hdr_off); in eth_get_protocols() 189 iov, iovcnt, in eth_get_protocols() 207 iov, iovcnt, in eth_get_protocols() 229 size_t copied = iov_to_buf(iov, iovcnt, iovoff, in eth_strip_vlan() 253 copied = iov_to_buf(iov, iovcnt, *payload_offset, in eth_strip_vlan() [all …]
|
H A D | queue.c | 120 int iovcnt, in qemu_net_queue_append_iov() argument 130 for (i = 0; i < iovcnt; i++) { in qemu_net_queue_append_iov() 140 for (i = 0; i < iovcnt; i++) { in qemu_net_queue_append_iov() 174 int iovcnt) in qemu_net_queue_deliver_iov() argument 179 ret = queue->deliver(sender, flags, iov, iovcnt, queue->opaque); in qemu_net_queue_deliver_iov() 198 int iovcnt) in qemu_net_queue_receive_iov() argument 204 return qemu_net_queue_deliver_iov(queue, NULL, 0, iov, iovcnt); in qemu_net_queue_receive_iov() 236 int iovcnt, in qemu_net_queue_send_iov() argument 242 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); in qemu_net_queue_send_iov() 246 ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt); in qemu_net_queue_send_iov() [all …]
|
H A D | filter-replay.c | 36 int iovcnt, NetPacketSent *sent_cb) in filter_replay_receive_iov() argument 42 replay_net_packet_event(nfrs->rns, flags, iov, iovcnt); in filter_replay_receive_iov() 43 return iov_size(iov, iovcnt); in filter_replay_receive_iov() 49 return iov_size(iov, iovcnt); in filter_replay_receive_iov()
|
H A D | netmap.c | 158 const struct iovec *iov, int iovcnt) in netmap_receive_iov() argument 172 if (nm_ring_space(ring) < iovcnt) { in netmap_receive_iov() 182 for (j = 0; j < iovcnt; j++) { in netmap_receive_iov() 260 int iovcnt = 0; in netmap_send() local 267 s->iov[iovcnt].iov_base = (void *)NETMAP_BUF(ring, idx); in netmap_send() 268 s->iov[iovcnt].iov_len = ring->slot[i].len; in netmap_send() 269 iovcnt++; in netmap_send() 283 iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt, in netmap_send()
|
H A D | filter.c | 33 int iovcnt, in qemu_netfilter_receive() argument 42 nf, sender, flags, iov, iovcnt, sent_cb); in qemu_netfilter_receive() 67 int iovcnt, in qemu_netfilter_pass_to_next() argument 100 iovcnt, NULL); in qemu_netfilter_pass_to_next() 114 sender, flags, iov, iovcnt, NULL); in qemu_netfilter_pass_to_next() 119 return iov_size(iov, iovcnt); in qemu_netfilter_pass_to_next()
|
H A D | filter-mirror.c | 108 int iovcnt) in filter_send() argument 110 ssize_t size = iov_size(iov, iovcnt); in filter_send() 118 iov_to_buf(iov, iovcnt, 0, buf, size); in filter_send() 195 int iovcnt, in filter_mirror_receive_iov() argument 201 ret = filter_send(s, iov, iovcnt); in filter_mirror_receive_iov() 217 int iovcnt, in filter_redirector_receive_iov() argument 224 ret = filter_send(s, iov, iovcnt); in filter_redirector_receive_iov()
|
H A D | net.c | 260 int iovcnt, 623 int iovcnt, in filter_receive_iov() argument 758 int iovcnt) in qemu_receive_packet_iov() argument 781 if (iovcnt == 1) { in nc_sendv_compat() 785 offset = iov_size(iov, iovcnt); in nc_sendv_compat() 803 int iovcnt, in qemu_deliver_packet_iov() argument 814 return iov_size(iov, iovcnt); in qemu_deliver_packet_iov() 830 iov_copy = g_new(struct iovec, iovcnt + 1); in qemu_deliver_packet_iov() 838 ret = nc->info->receive_iov(nc, iov, iovcnt); in qemu_deliver_packet_iov() 840 ret = nc_sendv_compat(nc, iov, iovcnt, flags); in qemu_deliver_packet_iov() [all …]
|
H A D | hub.c | 64 const struct iovec *iov, int iovcnt) in net_hub_receive_iov() argument 67 ssize_t len = iov_size(iov, iovcnt); in net_hub_receive_iov() 74 qemu_sendv_packet(&port->nc, iov, iovcnt); in net_hub_receive_iov() 121 const struct iovec *iov, int iovcnt) in net_hub_port_receive_iov() argument 125 return net_hub_receive_iov(port->hub, port, iov, iovcnt); in net_hub_port_receive_iov()
|
H A D | filter-buffer.c | 63 int iovcnt, in filter_buffer_receive_iov() argument 83 iov, iovcnt, NULL); in filter_buffer_receive_iov() 84 return iov_size(iov, iovcnt); in filter_buffer_receive_iov()
|
/openbmc/qemu/migration/ |
H A D | qemu-file.c | 50 unsigned int iovcnt; member 233 if (idx >= f->iovcnt) { in qemu_iovec_release_ram() 242 while ((idx = find_next_bit(f->may_free, f->iovcnt, idx + 1)) < f->iovcnt) { in qemu_iovec_release_ram() 281 if (f->iovcnt > 0) { in qemu_fflush() 296 f->iovcnt = 0; in qemu_fflush() 388 if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base + in add_to_iovec() 389 f->iov[f->iovcnt - 1].iov_len && in add_to_iovec() 394 if (f->iovcnt >= MAX_IOV_SIZE) { in add_to_iovec() 403 f->iov[f->iovcnt++].iov_len = size; in add_to_iovec() 406 if (f->iovcnt >= MAX_IOV_SIZE) { in add_to_iovec() [all …]
|
/openbmc/qemu/include/net/ |
H A D | queue.h | 44 int iovcnt, 53 int iovcnt, 64 int iovcnt); 77 int iovcnt,
|
H A D | filter.h | 31 int iovcnt, 70 int iovcnt, 77 int iovcnt,
|
H A D | eth.h | 318 eth_get_l2_hdr_length_iov(const struct iovec *iov, size_t iovcnt, size_t iovoff) in eth_get_l2_hdr_length_iov() argument 321 size_t copied = iov_to_buf(iov, iovcnt, iovoff, p, ARRAY_SIZE(p)); in eth_get_l2_hdr_length_iov() 345 eth_strip_vlan(const struct iovec *iov, int iovcnt, size_t iovoff, 350 eth_strip_vlan_ex(const struct iovec *iov, int iovcnt, size_t iovoff, int index, 355 eth_get_l3_proto(const struct iovec *l2hdr_iov, int iovcnt, size_t l2hdr_len); 397 void eth_get_protocols(const struct iovec *iov, size_t iovcnt, size_t iovoff,
|
/openbmc/qemu/hw/net/ |
H A D | net_rx_pkt.c | 88 const struct iovec *iov, int iovcnt, in net_rx_pkt_pull_data() argument 91 uint32_t pllen = iov_size(iov, iovcnt) - ploff; in net_rx_pkt_pull_data() 94 net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); in net_rx_pkt_pull_data() 101 iov, iovcnt, ploff, pllen) + 1; in net_rx_pkt_pull_data() 103 net_rx_pkt_iovec_realloc(pkt, iovcnt); in net_rx_pkt_pull_data() 107 iov, iovcnt, ploff, pkt->tot_len); in net_rx_pkt_pull_data() 119 const struct iovec *iov, int iovcnt, in net_rx_pkt_attach_iovec() argument 135 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); in net_rx_pkt_attach_iovec() 147 pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, in net_rx_pkt_attach_iovec_ex() 154 net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); in net_rx_pkt_attach_iovec_ex() [all …]
|
H A D | net_rx_pkt.h | 64 const struct iovec *iov, size_t iovcnt, 220 int iovcnt, size_t iovoff, 236 const struct iovec *iov, int iovcnt, 306 const struct iovec *iov, int iovcnt);
|
/openbmc/qemu/replay/ |
H A D | replay-net.c | 54 const struct iovec *iov, int iovcnt) in replay_net_packet_event() argument 58 event->data = g_malloc(iov_size(iov, iovcnt)); in replay_net_packet_event() 59 event->size = iov_size(iov, iovcnt); in replay_net_packet_event() 61 iov_to_buf(iov, iovcnt, 0, event->data, event->size); in replay_net_packet_event()
|
/openbmc/qemu/hw/9pfs/ |
H A D | cofile.c | 247 struct iovec *iov, int iovcnt, int64_t offset) in v9fs_co_pwritev() argument 255 fsdev_co_throttle_request(s->ctx.fst, THROTTLE_WRITE, iov, iovcnt); in v9fs_co_pwritev() 258 err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset); in v9fs_co_pwritev() 267 struct iovec *iov, int iovcnt, int64_t offset) in v9fs_co_preadv() argument 275 fsdev_co_throttle_request(s->ctx.fst, THROTTLE_READ, iov, iovcnt); in v9fs_co_preadv() 278 err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset); in v9fs_co_preadv()
|
/openbmc/qemu/contrib/vhost-user-blk/ |
H A D | vhost-user-blk.c | 134 vub_readv(VubReq *req, struct iovec *iov, uint32_t iovcnt) in vub_readv() argument 139 if (!iovcnt) { in vub_readv() 144 req->size = vub_iov_size(iov, iovcnt); in vub_readv() 145 rc = preadv(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512); in vub_readv() 157 vub_writev(VubReq *req, struct iovec *iov, uint32_t iovcnt) in vub_writev() argument 162 if (!iovcnt) { in vub_writev() 167 req->size = vub_iov_size(iov, iovcnt); in vub_writev() 168 rc = pwritev(vdev_blk->blk_fd, iov, iovcnt, req->sector_num * 512); in vub_writev() 180 vub_discard_write_zeroes(VubReq *req, struct iovec *iov, uint32_t iovcnt, in vub_discard_write_zeroes() argument 187 size = vub_iov_size(iov, iovcnt); in vub_discard_write_zeroes() [all …]
|
/openbmc/qemu/hw/net/rocker/ |
H A D | rocker_fp.c | 120 int fp_port_eg(FpPort *port, const struct iovec *iov, int iovcnt) in fp_port_eg() argument 125 qemu_sendv_packet(nc, iov, iovcnt); in fp_port_eg() 132 int iovcnt) in fp_port_receive_iov() argument 146 return world_ingress(port->world, port->pport, iov, iovcnt); in fp_port_receive_iov()
|
H A D | rocker_world.h | 30 const struct iovec *iov, int iovcnt); 44 const struct iovec *iov, int iovcnt);
|
H A D | rocker.h | 80 const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu); 82 const struct iovec *iov, int iovcnt);
|
H A D | rocker_world.c | 30 const struct iovec *iov, int iovcnt) in world_ingress() argument 33 return world->ops->ig(world, pport, iov, iovcnt); in world_ingress()
|
H A D | rocker_tlv.h | 153 const unsigned int iovcnt) in rocker_tlv_put_iov() argument 155 size_t len = iov_size(iov, iovcnt); in rocker_tlv_put_iov() 163 iov_to_buf(iov, iovcnt, 0, rocker_tlv_data(tlv), len); in rocker_tlv_put_iov()
|
/openbmc/openbmc/meta-openembedded/meta-networking/recipes-support/open-vm-tools/open-vm-tools/ |
H A D | 0011-Use-off64_t-instead-of-__off64_t.patch | 23 extern ssize_t preadv64(int fd, const struct iovec *iov, int iovcnt, 27 extern ssize_t pwritev64(int fd, const struct iovec *iov, int iovcnt,
|
/openbmc/qemu/block/export/ |
H A D | virtio-blk-handler.c | 54 uint32_t iovcnt, uint32_t type) in virtio_blk_discard_write_zeroes() argument 66 if (unlikely(iov_size(iov, iovcnt) > sizeof(desc))) { in virtio_blk_discard_write_zeroes() 70 size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc)); in virtio_blk_discard_write_zeroes()
|