1 /* 2 * Copyright (c) 2003-2008 Fabrice Bellard 3 * Copyright (c) 2009 Red Hat, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a copy 6 * of this software and associated documentation files (the "Software"), to deal 7 * in the Software without restriction, including without limitation the rights 8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 * copies of the Software, and to permit persons to whom the Software is 10 * furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 * THE SOFTWARE. 22 */ 23 24 #include "net/queue.h" 25 #include "qemu/queue.h" 26 #include "net/net.h" 27 28 /* The delivery handler may only return zero if it will call 29 * qemu_net_queue_flush() when it determines that it is once again able 30 * to deliver packets. It must also call qemu_net_queue_purge() in its 31 * cleanup path. 32 * 33 * If a sent callback is provided to send(), the caller must handle a 34 * zero return from the delivery handler by not sending any more packets 35 * until we have invoked the callback. Only in that case will we queue 36 * the packet. 37 * 38 * If a sent callback isn't provided, we just drop the packet to avoid 39 * unbounded queueing. 40 */ 41 42 struct NetPacket { 43 QTAILQ_ENTRY(NetPacket) entry; 44 NetClientState *sender; 45 unsigned flags; 46 int size; 47 NetPacketSent *sent_cb; 48 uint8_t data[0]; 49 }; 50 51 struct NetQueue { 52 void *opaque; 53 uint32_t nq_maxlen; 54 uint32_t nq_count; 55 NetQueueDeliverFunc *deliver; 56 57 QTAILQ_HEAD(packets, NetPacket) packets; 58 59 unsigned delivering : 1; 60 }; 61 62 NetQueue *qemu_new_net_queue(NetQueueDeliverFunc *deliver, void *opaque) 63 { 64 NetQueue *queue; 65 66 queue = g_new0(NetQueue, 1); 67 68 queue->opaque = opaque; 69 queue->nq_maxlen = 10000; 70 queue->nq_count = 0; 71 queue->deliver = deliver; 72 73 QTAILQ_INIT(&queue->packets); 74 75 queue->delivering = 0; 76 77 return queue; 78 } 79 80 void qemu_del_net_queue(NetQueue *queue) 81 { 82 NetPacket *packet, *next; 83 84 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { 85 QTAILQ_REMOVE(&queue->packets, packet, entry); 86 g_free(packet); 87 } 88 89 g_free(queue); 90 } 91 92 static void qemu_net_queue_append(NetQueue *queue, 93 NetClientState *sender, 94 unsigned flags, 95 const uint8_t *buf, 96 size_t size, 97 NetPacketSent *sent_cb) 98 { 99 NetPacket *packet; 100 101 if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { 102 return; /* drop if queue full and no callback */ 103 } 104 packet = g_malloc(sizeof(NetPacket) + size); 105 packet->sender = sender; 106 packet->flags = flags; 107 packet->size = size; 108 packet->sent_cb = sent_cb; 109 memcpy(packet->data, buf, size); 110 111 queue->nq_count++; 112 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); 113 } 114 115 void qemu_net_queue_append_iov(NetQueue *queue, 116 NetClientState *sender, 117 unsigned flags, 118 const struct iovec *iov, 119 int iovcnt, 120 NetPacketSent *sent_cb) 121 { 122 NetPacket *packet; 123 size_t max_len = 0; 124 int i; 125 126 if (queue->nq_count >= queue->nq_maxlen && !sent_cb) { 127 return; /* drop if queue full and no callback */ 128 } 129 for (i = 0; i < iovcnt; i++) { 130 max_len += iov[i].iov_len; 131 } 132 133 packet = g_malloc(sizeof(NetPacket) + max_len); 134 packet->sender = sender; 135 packet->sent_cb = sent_cb; 136 packet->flags = flags; 137 packet->size = 0; 138 139 for (i = 0; i < iovcnt; i++) { 140 size_t len = iov[i].iov_len; 141 142 memcpy(packet->data + packet->size, iov[i].iov_base, len); 143 packet->size += len; 144 } 145 146 queue->nq_count++; 147 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); 148 } 149 150 static ssize_t qemu_net_queue_deliver(NetQueue *queue, 151 NetClientState *sender, 152 unsigned flags, 153 const uint8_t *data, 154 size_t size) 155 { 156 ssize_t ret = -1; 157 struct iovec iov = { 158 .iov_base = (void *)data, 159 .iov_len = size 160 }; 161 162 queue->delivering = 1; 163 ret = queue->deliver(sender, flags, &iov, 1, queue->opaque); 164 queue->delivering = 0; 165 166 return ret; 167 } 168 169 static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue, 170 NetClientState *sender, 171 unsigned flags, 172 const struct iovec *iov, 173 int iovcnt) 174 { 175 ssize_t ret = -1; 176 177 queue->delivering = 1; 178 ret = queue->deliver(sender, flags, iov, iovcnt, queue->opaque); 179 queue->delivering = 0; 180 181 return ret; 182 } 183 184 ssize_t qemu_net_queue_send(NetQueue *queue, 185 NetClientState *sender, 186 unsigned flags, 187 const uint8_t *data, 188 size_t size, 189 NetPacketSent *sent_cb) 190 { 191 ssize_t ret; 192 193 if (queue->delivering || !qemu_can_send_packet(sender)) { 194 qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); 195 return 0; 196 } 197 198 ret = qemu_net_queue_deliver(queue, sender, flags, data, size); 199 if (ret == 0) { 200 qemu_net_queue_append(queue, sender, flags, data, size, sent_cb); 201 return 0; 202 } 203 204 qemu_net_queue_flush(queue); 205 206 return ret; 207 } 208 209 ssize_t qemu_net_queue_send_iov(NetQueue *queue, 210 NetClientState *sender, 211 unsigned flags, 212 const struct iovec *iov, 213 int iovcnt, 214 NetPacketSent *sent_cb) 215 { 216 ssize_t ret; 217 218 if (queue->delivering || !qemu_can_send_packet(sender)) { 219 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); 220 return 0; 221 } 222 223 ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt); 224 if (ret == 0) { 225 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); 226 return 0; 227 } 228 229 qemu_net_queue_flush(queue); 230 231 return ret; 232 } 233 234 void qemu_net_queue_purge(NetQueue *queue, NetClientState *from) 235 { 236 NetPacket *packet, *next; 237 238 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { 239 if (packet->sender == from) { 240 QTAILQ_REMOVE(&queue->packets, packet, entry); 241 queue->nq_count--; 242 if (packet->sent_cb) { 243 packet->sent_cb(packet->sender, 0); 244 } 245 g_free(packet); 246 } 247 } 248 } 249 250 bool qemu_net_queue_flush(NetQueue *queue) 251 { 252 while (!QTAILQ_EMPTY(&queue->packets)) { 253 NetPacket *packet; 254 int ret; 255 256 packet = QTAILQ_FIRST(&queue->packets); 257 QTAILQ_REMOVE(&queue->packets, packet, entry); 258 queue->nq_count--; 259 260 ret = qemu_net_queue_deliver(queue, 261 packet->sender, 262 packet->flags, 263 packet->data, 264 packet->size); 265 if (ret == 0) { 266 queue->nq_count++; 267 QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); 268 return false; 269 } 270 271 if (packet->sent_cb) { 272 packet->sent_cb(packet->sender, ret); 273 } 274 275 g_free(packet); 276 } 277 return true; 278 } 279