xref: /openbmc/qemu/block/nbd.c (revision e1ecf8c8)
1 /*
2  * QEMU Block driver for  NBD
3  *
4  * Copyright (C) 2016 Red Hat, Inc.
5  * Copyright (C) 2008 Bull S.A.S.
6  *     Author: Laurent Vivier <Laurent.Vivier@bull.net>
7  *
8  * Some parts:
9  *    Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a copy
12  * of this software and associated documentation files (the "Software"), to deal
13  * in the Software without restriction, including without limitation the rights
14  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15  * copies of the Software, and to permit persons to whom the Software is
16  * furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice shall be included in
19  * all copies or substantial portions of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27  * THE SOFTWARE.
28  */
29 
30 #include "qemu/osdep.h"
31 
32 #include "trace.h"
33 #include "qemu/uri.h"
34 #include "qemu/option.h"
35 #include "qemu/cutils.h"
36 #include "qemu/main-loop.h"
37 
38 #include "qapi/qapi-visit-sockets.h"
39 #include "qapi/qmp/qstring.h"
40 
41 #include "block/qdict.h"
42 #include "block/nbd.h"
43 #include "block/block_int.h"
44 
45 #define EN_OPTSTR ":exportname="
46 #define MAX_NBD_REQUESTS    16
47 
48 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
49 #define INDEX_TO_HANDLE(bs, index)  ((index)  ^ (uint64_t)(intptr_t)(bs))
50 
51 typedef struct {
52     Coroutine *coroutine;
53     uint64_t offset;        /* original offset of the request */
54     bool receiving;         /* waiting for connection_co? */
55 } NBDClientRequest;
56 
57 typedef enum NBDClientState {
58     NBD_CLIENT_CONNECTED,
59     NBD_CLIENT_QUIT
60 } NBDClientState;
61 
62 typedef struct BDRVNBDState {
63     QIOChannelSocket *sioc; /* The master data channel */
64     QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
65     NBDExportInfo info;
66 
67     CoMutex send_mutex;
68     CoQueue free_sema;
69     Coroutine *connection_co;
70     int in_flight;
71     NBDClientState state;
72 
73     NBDClientRequest requests[MAX_NBD_REQUESTS];
74     NBDReply reply;
75     BlockDriverState *bs;
76 
77     /* Connection parameters */
78     uint32_t reconnect_delay;
79     SocketAddress *saddr;
80     char *export, *tlscredsid;
81     QCryptoTLSCreds *tlscreds;
82     const char *hostname;
83     char *x_dirty_bitmap;
84 } BDRVNBDState;
85 
86 /* @ret will be used for reconnect in future */
87 static void nbd_channel_error(BDRVNBDState *s, int ret)
88 {
89     s->state = NBD_CLIENT_QUIT;
90 }
91 
92 static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
93 {
94     int i;
95 
96     for (i = 0; i < MAX_NBD_REQUESTS; i++) {
97         NBDClientRequest *req = &s->requests[i];
98 
99         if (req->coroutine && req->receiving) {
100             aio_co_wake(req->coroutine);
101         }
102     }
103 }
104 
105 static void nbd_client_detach_aio_context(BlockDriverState *bs)
106 {
107     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
108 
109     qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
110 }
111 
112 static void nbd_client_attach_aio_context_bh(void *opaque)
113 {
114     BlockDriverState *bs = opaque;
115     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
116 
117     /*
118      * The node is still drained, so we know the coroutine has yielded in
119      * nbd_read_eof(), the only place where bs->in_flight can reach 0, or it is
120      * entered for the first time. Both places are safe for entering the
121      * coroutine.
122      */
123     qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
124     bdrv_dec_in_flight(bs);
125 }
126 
127 static void nbd_client_attach_aio_context(BlockDriverState *bs,
128                                           AioContext *new_context)
129 {
130     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
131 
132     qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
133 
134     bdrv_inc_in_flight(bs);
135 
136     /*
137      * Need to wait here for the BH to run because the BH must run while the
138      * node is still drained.
139      */
140     aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
141 }
142 
143 
144 static void nbd_teardown_connection(BlockDriverState *bs)
145 {
146     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
147 
148     assert(s->ioc);
149 
150     /* finish any pending coroutines */
151     qio_channel_shutdown(s->ioc,
152                          QIO_CHANNEL_SHUTDOWN_BOTH,
153                          NULL);
154     BDRV_POLL_WHILE(bs, s->connection_co);
155 
156     nbd_client_detach_aio_context(bs);
157     object_unref(OBJECT(s->sioc));
158     s->sioc = NULL;
159     object_unref(OBJECT(s->ioc));
160     s->ioc = NULL;
161 }
162 
163 static coroutine_fn void nbd_connection_entry(void *opaque)
164 {
165     BDRVNBDState *s = opaque;
166     uint64_t i;
167     int ret = 0;
168     Error *local_err = NULL;
169 
170     while (s->state != NBD_CLIENT_QUIT) {
171         /*
172          * The NBD client can only really be considered idle when it has
173          * yielded from qio_channel_readv_all_eof(), waiting for data. This is
174          * the point where the additional scheduled coroutine entry happens
175          * after nbd_client_attach_aio_context().
176          *
177          * Therefore we keep an additional in_flight reference all the time and
178          * only drop it temporarily here.
179          */
180         assert(s->reply.handle == 0);
181         ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
182 
183         if (local_err) {
184             trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
185             error_free(local_err);
186         }
187         if (ret <= 0) {
188             nbd_channel_error(s, ret ? ret : -EIO);
189             break;
190         }
191 
192         /*
193          * There's no need for a mutex on the receive side, because the
194          * handler acts as a synchronization point and ensures that only
195          * one coroutine is called until the reply finishes.
196          */
197         i = HANDLE_TO_INDEX(s, s->reply.handle);
198         if (i >= MAX_NBD_REQUESTS ||
199             !s->requests[i].coroutine ||
200             !s->requests[i].receiving ||
201             (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
202         {
203             nbd_channel_error(s, -EINVAL);
204             break;
205         }
206 
207         /*
208          * We're woken up again by the request itself.  Note that there
209          * is no race between yielding and reentering connection_co.  This
210          * is because:
211          *
212          * - if the request runs on the same AioContext, it is only
213          *   entered after we yield
214          *
215          * - if the request runs on a different AioContext, reentering
216          *   connection_co happens through a bottom half, which can only
217          *   run after we yield.
218          */
219         aio_co_wake(s->requests[i].coroutine);
220         qemu_coroutine_yield();
221     }
222 
223     nbd_recv_coroutines_wake_all(s);
224     bdrv_dec_in_flight(s->bs);
225 
226     s->connection_co = NULL;
227     aio_wait_kick();
228 }
229 
230 static int nbd_co_send_request(BlockDriverState *bs,
231                                NBDRequest *request,
232                                QEMUIOVector *qiov)
233 {
234     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
235     int rc, i = -1;
236 
237     qemu_co_mutex_lock(&s->send_mutex);
238     while (s->in_flight == MAX_NBD_REQUESTS) {
239         qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
240     }
241 
242     if (s->state != NBD_CLIENT_CONNECTED) {
243         rc = -EIO;
244         goto err;
245     }
246 
247     s->in_flight++;
248 
249     for (i = 0; i < MAX_NBD_REQUESTS; i++) {
250         if (s->requests[i].coroutine == NULL) {
251             break;
252         }
253     }
254 
255     g_assert(qemu_in_coroutine());
256     assert(i < MAX_NBD_REQUESTS);
257 
258     s->requests[i].coroutine = qemu_coroutine_self();
259     s->requests[i].offset = request->from;
260     s->requests[i].receiving = false;
261 
262     request->handle = INDEX_TO_HANDLE(s, i);
263 
264     assert(s->ioc);
265 
266     if (qiov) {
267         qio_channel_set_cork(s->ioc, true);
268         rc = nbd_send_request(s->ioc, request);
269         if (rc >= 0 && s->state == NBD_CLIENT_CONNECTED) {
270             if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
271                                        NULL) < 0) {
272                 rc = -EIO;
273             }
274         } else if (rc >= 0) {
275             rc = -EIO;
276         }
277         qio_channel_set_cork(s->ioc, false);
278     } else {
279         rc = nbd_send_request(s->ioc, request);
280     }
281 
282 err:
283     if (rc < 0) {
284         nbd_channel_error(s, rc);
285         if (i != -1) {
286             s->requests[i].coroutine = NULL;
287             s->in_flight--;
288         }
289         qemu_co_queue_next(&s->free_sema);
290     }
291     qemu_co_mutex_unlock(&s->send_mutex);
292     return rc;
293 }
294 
295 static inline uint16_t payload_advance16(uint8_t **payload)
296 {
297     *payload += 2;
298     return lduw_be_p(*payload - 2);
299 }
300 
301 static inline uint32_t payload_advance32(uint8_t **payload)
302 {
303     *payload += 4;
304     return ldl_be_p(*payload - 4);
305 }
306 
307 static inline uint64_t payload_advance64(uint8_t **payload)
308 {
309     *payload += 8;
310     return ldq_be_p(*payload - 8);
311 }
312 
313 static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
314                                          NBDStructuredReplyChunk *chunk,
315                                          uint8_t *payload, uint64_t orig_offset,
316                                          QEMUIOVector *qiov, Error **errp)
317 {
318     uint64_t offset;
319     uint32_t hole_size;
320 
321     if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
322         error_setg(errp, "Protocol error: invalid payload for "
323                          "NBD_REPLY_TYPE_OFFSET_HOLE");
324         return -EINVAL;
325     }
326 
327     offset = payload_advance64(&payload);
328     hole_size = payload_advance32(&payload);
329 
330     if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
331         offset > orig_offset + qiov->size - hole_size) {
332         error_setg(errp, "Protocol error: server sent chunk exceeding requested"
333                          " region");
334         return -EINVAL;
335     }
336     if (s->info.min_block &&
337         !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
338         trace_nbd_structured_read_compliance("hole");
339     }
340 
341     qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
342 
343     return 0;
344 }
345 
346 /*
347  * nbd_parse_blockstatus_payload
348  * Based on our request, we expect only one extent in reply, for the
349  * base:allocation context.
350  */
351 static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
352                                          NBDStructuredReplyChunk *chunk,
353                                          uint8_t *payload, uint64_t orig_length,
354                                          NBDExtent *extent, Error **errp)
355 {
356     uint32_t context_id;
357 
358     /* The server succeeded, so it must have sent [at least] one extent */
359     if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
360         error_setg(errp, "Protocol error: invalid payload for "
361                          "NBD_REPLY_TYPE_BLOCK_STATUS");
362         return -EINVAL;
363     }
364 
365     context_id = payload_advance32(&payload);
366     if (s->info.context_id != context_id) {
367         error_setg(errp, "Protocol error: unexpected context id %d for "
368                          "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
369                          "id is %d", context_id,
370                          s->info.context_id);
371         return -EINVAL;
372     }
373 
374     extent->length = payload_advance32(&payload);
375     extent->flags = payload_advance32(&payload);
376 
377     if (extent->length == 0) {
378         error_setg(errp, "Protocol error: server sent status chunk with "
379                    "zero length");
380         return -EINVAL;
381     }
382 
383     /*
384      * A server sending unaligned block status is in violation of the
385      * protocol, but as qemu-nbd 3.1 is such a server (at least for
386      * POSIX files that are not a multiple of 512 bytes, since qemu
387      * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
388      * still sees an implicit hole beyond the real EOF), it's nicer to
389      * work around the misbehaving server. If the request included
390      * more than the final unaligned block, truncate it back to an
391      * aligned result; if the request was only the final block, round
392      * up to the full block and change the status to fully-allocated
393      * (always a safe status, even if it loses information).
394      */
395     if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
396                                                    s->info.min_block)) {
397         trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
398         if (extent->length > s->info.min_block) {
399             extent->length = QEMU_ALIGN_DOWN(extent->length,
400                                              s->info.min_block);
401         } else {
402             extent->length = s->info.min_block;
403             extent->flags = 0;
404         }
405     }
406 
407     /*
408      * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
409      * sent us any more than one extent, nor should it have included
410      * status beyond our request in that extent. However, it's easy
411      * enough to ignore the server's noncompliance without killing the
412      * connection; just ignore trailing extents, and clamp things to
413      * the length of our request.
414      */
415     if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
416         trace_nbd_parse_blockstatus_compliance("more than one extent");
417     }
418     if (extent->length > orig_length) {
419         extent->length = orig_length;
420         trace_nbd_parse_blockstatus_compliance("extent length too large");
421     }
422 
423     return 0;
424 }
425 
426 /*
427  * nbd_parse_error_payload
428  * on success @errp contains message describing nbd error reply
429  */
430 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
431                                    uint8_t *payload, int *request_ret,
432                                    Error **errp)
433 {
434     uint32_t error;
435     uint16_t message_size;
436 
437     assert(chunk->type & (1 << 15));
438 
439     if (chunk->length < sizeof(error) + sizeof(message_size)) {
440         error_setg(errp,
441                    "Protocol error: invalid payload for structured error");
442         return -EINVAL;
443     }
444 
445     error = nbd_errno_to_system_errno(payload_advance32(&payload));
446     if (error == 0) {
447         error_setg(errp, "Protocol error: server sent structured error chunk "
448                          "with error = 0");
449         return -EINVAL;
450     }
451 
452     *request_ret = -error;
453     message_size = payload_advance16(&payload);
454 
455     if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
456         error_setg(errp, "Protocol error: server sent structured error chunk "
457                          "with incorrect message size");
458         return -EINVAL;
459     }
460 
461     /* TODO: Add a trace point to mention the server complaint */
462 
463     /* TODO handle ERROR_OFFSET */
464 
465     return 0;
466 }
467 
468 static int nbd_co_receive_offset_data_payload(BDRVNBDState *s,
469                                               uint64_t orig_offset,
470                                               QEMUIOVector *qiov, Error **errp)
471 {
472     QEMUIOVector sub_qiov;
473     uint64_t offset;
474     size_t data_size;
475     int ret;
476     NBDStructuredReplyChunk *chunk = &s->reply.structured;
477 
478     assert(nbd_reply_is_structured(&s->reply));
479 
480     /* The NBD spec requires at least one byte of payload */
481     if (chunk->length <= sizeof(offset)) {
482         error_setg(errp, "Protocol error: invalid payload for "
483                          "NBD_REPLY_TYPE_OFFSET_DATA");
484         return -EINVAL;
485     }
486 
487     if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
488         return -EIO;
489     }
490 
491     data_size = chunk->length - sizeof(offset);
492     assert(data_size);
493     if (offset < orig_offset || data_size > qiov->size ||
494         offset > orig_offset + qiov->size - data_size) {
495         error_setg(errp, "Protocol error: server sent chunk exceeding requested"
496                          " region");
497         return -EINVAL;
498     }
499     if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
500         trace_nbd_structured_read_compliance("data");
501     }
502 
503     qemu_iovec_init(&sub_qiov, qiov->niov);
504     qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
505     ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
506     qemu_iovec_destroy(&sub_qiov);
507 
508     return ret < 0 ? -EIO : 0;
509 }
510 
511 #define NBD_MAX_MALLOC_PAYLOAD 1000
512 static coroutine_fn int nbd_co_receive_structured_payload(
513         BDRVNBDState *s, void **payload, Error **errp)
514 {
515     int ret;
516     uint32_t len;
517 
518     assert(nbd_reply_is_structured(&s->reply));
519 
520     len = s->reply.structured.length;
521 
522     if (len == 0) {
523         return 0;
524     }
525 
526     if (payload == NULL) {
527         error_setg(errp, "Unexpected structured payload");
528         return -EINVAL;
529     }
530 
531     if (len > NBD_MAX_MALLOC_PAYLOAD) {
532         error_setg(errp, "Payload too large");
533         return -EINVAL;
534     }
535 
536     *payload = g_new(char, len);
537     ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
538     if (ret < 0) {
539         g_free(*payload);
540         *payload = NULL;
541         return ret;
542     }
543 
544     return 0;
545 }
546 
547 /*
548  * nbd_co_do_receive_one_chunk
549  * for simple reply:
550  *   set request_ret to received reply error
551  *   if qiov is not NULL: read payload to @qiov
552  * for structured reply chunk:
553  *   if error chunk: read payload, set @request_ret, do not set @payload
554  *   else if offset_data chunk: read payload data to @qiov, do not set @payload
555  *   else: read payload to @payload
556  *
557  * If function fails, @errp contains corresponding error message, and the
558  * connection with the server is suspect.  If it returns 0, then the
559  * transaction succeeded (although @request_ret may be a negative errno
560  * corresponding to the server's error reply), and errp is unchanged.
561  */
562 static coroutine_fn int nbd_co_do_receive_one_chunk(
563         BDRVNBDState *s, uint64_t handle, bool only_structured,
564         int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
565 {
566     int ret;
567     int i = HANDLE_TO_INDEX(s, handle);
568     void *local_payload = NULL;
569     NBDStructuredReplyChunk *chunk;
570 
571     if (payload) {
572         *payload = NULL;
573     }
574     *request_ret = 0;
575 
576     /* Wait until we're woken up by nbd_connection_entry.  */
577     s->requests[i].receiving = true;
578     qemu_coroutine_yield();
579     s->requests[i].receiving = false;
580     if (s->state != NBD_CLIENT_CONNECTED) {
581         error_setg(errp, "Connection closed");
582         return -EIO;
583     }
584     assert(s->ioc);
585 
586     assert(s->reply.handle == handle);
587 
588     if (nbd_reply_is_simple(&s->reply)) {
589         if (only_structured) {
590             error_setg(errp, "Protocol error: simple reply when structured "
591                              "reply chunk was expected");
592             return -EINVAL;
593         }
594 
595         *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
596         if (*request_ret < 0 || !qiov) {
597             return 0;
598         }
599 
600         return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
601                                      errp) < 0 ? -EIO : 0;
602     }
603 
604     /* handle structured reply chunk */
605     assert(s->info.structured_reply);
606     chunk = &s->reply.structured;
607 
608     if (chunk->type == NBD_REPLY_TYPE_NONE) {
609         if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
610             error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
611                        " NBD_REPLY_FLAG_DONE flag set");
612             return -EINVAL;
613         }
614         if (chunk->length) {
615             error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
616                        " nonzero length");
617             return -EINVAL;
618         }
619         return 0;
620     }
621 
622     if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
623         if (!qiov) {
624             error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
625             return -EINVAL;
626         }
627 
628         return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
629                                                   qiov, errp);
630     }
631 
632     if (nbd_reply_type_is_error(chunk->type)) {
633         payload = &local_payload;
634     }
635 
636     ret = nbd_co_receive_structured_payload(s, payload, errp);
637     if (ret < 0) {
638         return ret;
639     }
640 
641     if (nbd_reply_type_is_error(chunk->type)) {
642         ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
643         g_free(local_payload);
644         return ret;
645     }
646 
647     return 0;
648 }
649 
650 /*
651  * nbd_co_receive_one_chunk
652  * Read reply, wake up connection_co and set s->quit if needed.
653  * Return value is a fatal error code or normal nbd reply error code
654  */
655 static coroutine_fn int nbd_co_receive_one_chunk(
656         BDRVNBDState *s, uint64_t handle, bool only_structured,
657         int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
658         Error **errp)
659 {
660     int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
661                                           request_ret, qiov, payload, errp);
662 
663     if (ret < 0) {
664         memset(reply, 0, sizeof(*reply));
665         nbd_channel_error(s, ret);
666     } else {
667         /* For assert at loop start in nbd_connection_entry */
668         *reply = s->reply;
669         s->reply.handle = 0;
670     }
671 
672     if (s->connection_co) {
673         aio_co_wake(s->connection_co);
674     }
675 
676     return ret;
677 }
678 
679 typedef struct NBDReplyChunkIter {
680     int ret;
681     int request_ret;
682     Error *err;
683     bool done, only_structured;
684 } NBDReplyChunkIter;
685 
686 static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
687                                    int ret, Error **local_err)
688 {
689     assert(ret < 0);
690 
691     if (!iter->ret) {
692         iter->ret = ret;
693         error_propagate(&iter->err, *local_err);
694     } else {
695         error_free(*local_err);
696     }
697 
698     *local_err = NULL;
699 }
700 
701 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
702 {
703     assert(ret < 0);
704 
705     if (!iter->request_ret) {
706         iter->request_ret = ret;
707     }
708 }
709 
710 /*
711  * NBD_FOREACH_REPLY_CHUNK
712  * The pointer stored in @payload requires g_free() to free it.
713  */
714 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
715                                 qiov, reply, payload) \
716     for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
717          nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
718 
719 /*
720  * nbd_reply_chunk_iter_receive
721  * The pointer stored in @payload requires g_free() to free it.
722  */
723 static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
724                                          NBDReplyChunkIter *iter,
725                                          uint64_t handle,
726                                          QEMUIOVector *qiov, NBDReply *reply,
727                                          void **payload)
728 {
729     int ret, request_ret;
730     NBDReply local_reply;
731     NBDStructuredReplyChunk *chunk;
732     Error *local_err = NULL;
733     if (s->state != NBD_CLIENT_CONNECTED) {
734         error_setg(&local_err, "Connection closed");
735         nbd_iter_channel_error(iter, -EIO, &local_err);
736         goto break_loop;
737     }
738 
739     if (iter->done) {
740         /* Previous iteration was last. */
741         goto break_loop;
742     }
743 
744     if (reply == NULL) {
745         reply = &local_reply;
746     }
747 
748     ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
749                                    &request_ret, qiov, reply, payload,
750                                    &local_err);
751     if (ret < 0) {
752         nbd_iter_channel_error(iter, ret, &local_err);
753     } else if (request_ret < 0) {
754         nbd_iter_request_error(iter, request_ret);
755     }
756 
757     /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
758     if (nbd_reply_is_simple(reply) || s->state != NBD_CLIENT_CONNECTED) {
759         goto break_loop;
760     }
761 
762     chunk = &reply->structured;
763     iter->only_structured = true;
764 
765     if (chunk->type == NBD_REPLY_TYPE_NONE) {
766         /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
767         assert(chunk->flags & NBD_REPLY_FLAG_DONE);
768         goto break_loop;
769     }
770 
771     if (chunk->flags & NBD_REPLY_FLAG_DONE) {
772         /* This iteration is last. */
773         iter->done = true;
774     }
775 
776     /* Execute the loop body */
777     return true;
778 
779 break_loop:
780     s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
781 
782     qemu_co_mutex_lock(&s->send_mutex);
783     s->in_flight--;
784     qemu_co_queue_next(&s->free_sema);
785     qemu_co_mutex_unlock(&s->send_mutex);
786 
787     return false;
788 }
789 
790 static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
791                                       int *request_ret, Error **errp)
792 {
793     NBDReplyChunkIter iter;
794 
795     NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
796         /* nbd_reply_chunk_iter_receive does all the work */
797     }
798 
799     error_propagate(errp, iter.err);
800     *request_ret = iter.request_ret;
801     return iter.ret;
802 }
803 
804 static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
805                                         uint64_t offset, QEMUIOVector *qiov,
806                                         int *request_ret, Error **errp)
807 {
808     NBDReplyChunkIter iter;
809     NBDReply reply;
810     void *payload = NULL;
811     Error *local_err = NULL;
812 
813     NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
814                             qiov, &reply, &payload)
815     {
816         int ret;
817         NBDStructuredReplyChunk *chunk = &reply.structured;
818 
819         assert(nbd_reply_is_structured(&reply));
820 
821         switch (chunk->type) {
822         case NBD_REPLY_TYPE_OFFSET_DATA:
823             /*
824              * special cased in nbd_co_receive_one_chunk, data is already
825              * in qiov
826              */
827             break;
828         case NBD_REPLY_TYPE_OFFSET_HOLE:
829             ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
830                                                 offset, qiov, &local_err);
831             if (ret < 0) {
832                 nbd_channel_error(s, ret);
833                 nbd_iter_channel_error(&iter, ret, &local_err);
834             }
835             break;
836         default:
837             if (!nbd_reply_type_is_error(chunk->type)) {
838                 /* not allowed reply type */
839                 nbd_channel_error(s, -EINVAL);
840                 error_setg(&local_err,
841                            "Unexpected reply type: %d (%s) for CMD_READ",
842                            chunk->type, nbd_reply_type_lookup(chunk->type));
843                 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
844             }
845         }
846 
847         g_free(payload);
848         payload = NULL;
849     }
850 
851     error_propagate(errp, iter.err);
852     *request_ret = iter.request_ret;
853     return iter.ret;
854 }
855 
856 static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
857                                             uint64_t handle, uint64_t length,
858                                             NBDExtent *extent,
859                                             int *request_ret, Error **errp)
860 {
861     NBDReplyChunkIter iter;
862     NBDReply reply;
863     void *payload = NULL;
864     Error *local_err = NULL;
865     bool received = false;
866 
867     assert(!extent->length);
868     NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
869         int ret;
870         NBDStructuredReplyChunk *chunk = &reply.structured;
871 
872         assert(nbd_reply_is_structured(&reply));
873 
874         switch (chunk->type) {
875         case NBD_REPLY_TYPE_BLOCK_STATUS:
876             if (received) {
877                 nbd_channel_error(s, -EINVAL);
878                 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
879                 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
880             }
881             received = true;
882 
883             ret = nbd_parse_blockstatus_payload(s, &reply.structured,
884                                                 payload, length, extent,
885                                                 &local_err);
886             if (ret < 0) {
887                 nbd_channel_error(s, ret);
888                 nbd_iter_channel_error(&iter, ret, &local_err);
889             }
890             break;
891         default:
892             if (!nbd_reply_type_is_error(chunk->type)) {
893                 nbd_channel_error(s, -EINVAL);
894                 error_setg(&local_err,
895                            "Unexpected reply type: %d (%s) "
896                            "for CMD_BLOCK_STATUS",
897                            chunk->type, nbd_reply_type_lookup(chunk->type));
898                 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
899             }
900         }
901 
902         g_free(payload);
903         payload = NULL;
904     }
905 
906     if (!extent->length && !iter.request_ret) {
907         error_setg(&local_err, "Server did not reply with any status extents");
908         nbd_iter_channel_error(&iter, -EIO, &local_err);
909     }
910 
911     error_propagate(errp, iter.err);
912     *request_ret = iter.request_ret;
913     return iter.ret;
914 }
915 
916 static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
917                           QEMUIOVector *write_qiov)
918 {
919     int ret, request_ret;
920     Error *local_err = NULL;
921     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
922 
923     assert(request->type != NBD_CMD_READ);
924     if (write_qiov) {
925         assert(request->type == NBD_CMD_WRITE);
926         assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
927     } else {
928         assert(request->type != NBD_CMD_WRITE);
929     }
930     ret = nbd_co_send_request(bs, request, write_qiov);
931     if (ret < 0) {
932         return ret;
933     }
934 
935     ret = nbd_co_receive_return_code(s, request->handle,
936                                      &request_ret, &local_err);
937     if (local_err) {
938         trace_nbd_co_request_fail(request->from, request->len, request->handle,
939                                   request->flags, request->type,
940                                   nbd_cmd_lookup(request->type),
941                                   ret, error_get_pretty(local_err));
942         error_free(local_err);
943     }
944     return ret ? ret : request_ret;
945 }
946 
947 static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
948                                 uint64_t bytes, QEMUIOVector *qiov, int flags)
949 {
950     int ret, request_ret;
951     Error *local_err = NULL;
952     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
953     NBDRequest request = {
954         .type = NBD_CMD_READ,
955         .from = offset,
956         .len = bytes,
957     };
958 
959     assert(bytes <= NBD_MAX_BUFFER_SIZE);
960     assert(!flags);
961 
962     if (!bytes) {
963         return 0;
964     }
965     /*
966      * Work around the fact that the block layer doesn't do
967      * byte-accurate sizing yet - if the read exceeds the server's
968      * advertised size because the block layer rounded size up, then
969      * truncate the request to the server and tail-pad with zero.
970      */
971     if (offset >= s->info.size) {
972         assert(bytes < BDRV_SECTOR_SIZE);
973         qemu_iovec_memset(qiov, 0, 0, bytes);
974         return 0;
975     }
976     if (offset + bytes > s->info.size) {
977         uint64_t slop = offset + bytes - s->info.size;
978 
979         assert(slop < BDRV_SECTOR_SIZE);
980         qemu_iovec_memset(qiov, bytes - slop, 0, slop);
981         request.len -= slop;
982     }
983 
984     ret = nbd_co_send_request(bs, &request, NULL);
985     if (ret < 0) {
986         return ret;
987     }
988 
989     ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
990                                        &request_ret, &local_err);
991     if (local_err) {
992         trace_nbd_co_request_fail(request.from, request.len, request.handle,
993                                   request.flags, request.type,
994                                   nbd_cmd_lookup(request.type),
995                                   ret, error_get_pretty(local_err));
996         error_free(local_err);
997     }
998     return ret ? ret : request_ret;
999 }
1000 
1001 static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
1002                                  uint64_t bytes, QEMUIOVector *qiov, int flags)
1003 {
1004     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1005     NBDRequest request = {
1006         .type = NBD_CMD_WRITE,
1007         .from = offset,
1008         .len = bytes,
1009     };
1010 
1011     assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1012     if (flags & BDRV_REQ_FUA) {
1013         assert(s->info.flags & NBD_FLAG_SEND_FUA);
1014         request.flags |= NBD_CMD_FLAG_FUA;
1015     }
1016 
1017     assert(bytes <= NBD_MAX_BUFFER_SIZE);
1018 
1019     if (!bytes) {
1020         return 0;
1021     }
1022     return nbd_co_request(bs, &request, qiov);
1023 }
1024 
1025 static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1026                                        int bytes, BdrvRequestFlags flags)
1027 {
1028     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1029     NBDRequest request = {
1030         .type = NBD_CMD_WRITE_ZEROES,
1031         .from = offset,
1032         .len = bytes,
1033     };
1034 
1035     assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1036     if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1037         return -ENOTSUP;
1038     }
1039 
1040     if (flags & BDRV_REQ_FUA) {
1041         assert(s->info.flags & NBD_FLAG_SEND_FUA);
1042         request.flags |= NBD_CMD_FLAG_FUA;
1043     }
1044     if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1045         request.flags |= NBD_CMD_FLAG_NO_HOLE;
1046     }
1047     if (flags & BDRV_REQ_NO_FALLBACK) {
1048         assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
1049         request.flags |= NBD_CMD_FLAG_FAST_ZERO;
1050     }
1051 
1052     if (!bytes) {
1053         return 0;
1054     }
1055     return nbd_co_request(bs, &request, NULL);
1056 }
1057 
1058 static int nbd_client_co_flush(BlockDriverState *bs)
1059 {
1060     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1061     NBDRequest request = { .type = NBD_CMD_FLUSH };
1062 
1063     if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1064         return 0;
1065     }
1066 
1067     request.from = 0;
1068     request.len = 0;
1069 
1070     return nbd_co_request(bs, &request, NULL);
1071 }
1072 
1073 static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
1074                                   int bytes)
1075 {
1076     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1077     NBDRequest request = {
1078         .type = NBD_CMD_TRIM,
1079         .from = offset,
1080         .len = bytes,
1081     };
1082 
1083     assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1084     if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1085         return 0;
1086     }
1087 
1088     return nbd_co_request(bs, &request, NULL);
1089 }
1090 
1091 static int coroutine_fn nbd_client_co_block_status(
1092         BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1093         int64_t *pnum, int64_t *map, BlockDriverState **file)
1094 {
1095     int ret, request_ret;
1096     NBDExtent extent = { 0 };
1097     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1098     Error *local_err = NULL;
1099 
1100     NBDRequest request = {
1101         .type = NBD_CMD_BLOCK_STATUS,
1102         .from = offset,
1103         .len = MIN(MIN_NON_ZERO(QEMU_ALIGN_DOWN(INT_MAX,
1104                                                 bs->bl.request_alignment),
1105                                 s->info.max_block),
1106                    MIN(bytes, s->info.size - offset)),
1107         .flags = NBD_CMD_FLAG_REQ_ONE,
1108     };
1109 
1110     if (!s->info.base_allocation) {
1111         *pnum = bytes;
1112         *map = offset;
1113         *file = bs;
1114         return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1115     }
1116 
1117     /*
1118      * Work around the fact that the block layer doesn't do
1119      * byte-accurate sizing yet - if the status request exceeds the
1120      * server's advertised size because the block layer rounded size
1121      * up, we truncated the request to the server (above), or are
1122      * called on just the hole.
1123      */
1124     if (offset >= s->info.size) {
1125         *pnum = bytes;
1126         assert(bytes < BDRV_SECTOR_SIZE);
1127         /* Intentionally don't report offset_valid for the hole */
1128         return BDRV_BLOCK_ZERO;
1129     }
1130 
1131     if (s->info.min_block) {
1132         assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1133     }
1134     ret = nbd_co_send_request(bs, &request, NULL);
1135     if (ret < 0) {
1136         return ret;
1137     }
1138 
1139     ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
1140                                            &extent, &request_ret, &local_err);
1141     if (local_err) {
1142         trace_nbd_co_request_fail(request.from, request.len, request.handle,
1143                                   request.flags, request.type,
1144                                   nbd_cmd_lookup(request.type),
1145                                   ret, error_get_pretty(local_err));
1146         error_free(local_err);
1147     }
1148     if (ret < 0 || request_ret < 0) {
1149         return ret ? ret : request_ret;
1150     }
1151 
1152     assert(extent.length);
1153     *pnum = extent.length;
1154     *map = offset;
1155     *file = bs;
1156     return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1157         (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1158         BDRV_BLOCK_OFFSET_VALID;
1159 }
1160 
1161 static int nbd_client_reopen_prepare(BDRVReopenState *state,
1162                                      BlockReopenQueue *queue, Error **errp)
1163 {
1164     BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
1165 
1166     if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
1167         error_setg(errp, "Can't reopen read-only NBD mount as read/write");
1168         return -EACCES;
1169     }
1170     return 0;
1171 }
1172 
1173 static void nbd_client_close(BlockDriverState *bs)
1174 {
1175     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1176     NBDRequest request = { .type = NBD_CMD_DISC };
1177 
1178     assert(s->ioc);
1179 
1180     nbd_send_request(s->ioc, &request);
1181 
1182     nbd_teardown_connection(bs);
1183 }
1184 
1185 static QIOChannelSocket *nbd_establish_connection(SocketAddress *saddr,
1186                                                   Error **errp)
1187 {
1188     QIOChannelSocket *sioc;
1189     Error *local_err = NULL;
1190 
1191     sioc = qio_channel_socket_new();
1192     qio_channel_set_name(QIO_CHANNEL(sioc), "nbd-client");
1193 
1194     qio_channel_socket_connect_sync(sioc, saddr, &local_err);
1195     if (local_err) {
1196         object_unref(OBJECT(sioc));
1197         error_propagate(errp, local_err);
1198         return NULL;
1199     }
1200 
1201     qio_channel_set_delay(QIO_CHANNEL(sioc), false);
1202 
1203     return sioc;
1204 }
1205 
1206 static int nbd_client_connect(BlockDriverState *bs, Error **errp)
1207 {
1208     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1209     AioContext *aio_context = bdrv_get_aio_context(bs);
1210     int ret;
1211 
1212     /*
1213      * establish TCP connection, return error if it fails
1214      * TODO: Configurable retry-until-timeout behaviour.
1215      */
1216     QIOChannelSocket *sioc = nbd_establish_connection(s->saddr, errp);
1217 
1218     if (!sioc) {
1219         return -ECONNREFUSED;
1220     }
1221 
1222     /* NBD handshake */
1223     trace_nbd_client_connect(s->export);
1224     qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
1225     qio_channel_attach_aio_context(QIO_CHANNEL(sioc), aio_context);
1226 
1227     s->info.request_sizes = true;
1228     s->info.structured_reply = true;
1229     s->info.base_allocation = true;
1230     s->info.x_dirty_bitmap = g_strdup(s->x_dirty_bitmap);
1231     s->info.name = g_strdup(s->export ?: "");
1232     ret = nbd_receive_negotiate(aio_context, QIO_CHANNEL(sioc), s->tlscreds,
1233                                 s->hostname, &s->ioc, &s->info, errp);
1234     g_free(s->info.x_dirty_bitmap);
1235     g_free(s->info.name);
1236     if (ret < 0) {
1237         object_unref(OBJECT(sioc));
1238         return ret;
1239     }
1240     if (s->x_dirty_bitmap && !s->info.base_allocation) {
1241         error_setg(errp, "requested x-dirty-bitmap %s not found",
1242                    s->x_dirty_bitmap);
1243         ret = -EINVAL;
1244         goto fail;
1245     }
1246     if (s->info.flags & NBD_FLAG_READ_ONLY) {
1247         ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
1248         if (ret < 0) {
1249             goto fail;
1250         }
1251     }
1252     if (s->info.flags & NBD_FLAG_SEND_FUA) {
1253         bs->supported_write_flags = BDRV_REQ_FUA;
1254         bs->supported_zero_flags |= BDRV_REQ_FUA;
1255     }
1256     if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
1257         bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
1258         if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
1259             bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
1260         }
1261     }
1262 
1263     s->sioc = sioc;
1264 
1265     if (!s->ioc) {
1266         s->ioc = QIO_CHANNEL(sioc);
1267         object_ref(OBJECT(s->ioc));
1268     }
1269 
1270     trace_nbd_client_connect_success(s->export);
1271 
1272     return 0;
1273 
1274  fail:
1275     /*
1276      * We have connected, but must fail for other reasons.
1277      * Send NBD_CMD_DISC as a courtesy to the server.
1278      */
1279     {
1280         NBDRequest request = { .type = NBD_CMD_DISC };
1281 
1282         nbd_send_request(s->ioc ?: QIO_CHANNEL(sioc), &request);
1283 
1284         object_unref(OBJECT(sioc));
1285 
1286         return ret;
1287     }
1288 }
1289 
1290 /*
1291  * Parse nbd_open options
1292  */
1293 
1294 static int nbd_parse_uri(const char *filename, QDict *options)
1295 {
1296     URI *uri;
1297     const char *p;
1298     QueryParams *qp = NULL;
1299     int ret = 0;
1300     bool is_unix;
1301 
1302     uri = uri_parse(filename);
1303     if (!uri) {
1304         return -EINVAL;
1305     }
1306 
1307     /* transport */
1308     if (!g_strcmp0(uri->scheme, "nbd")) {
1309         is_unix = false;
1310     } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1311         is_unix = false;
1312     } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1313         is_unix = true;
1314     } else {
1315         ret = -EINVAL;
1316         goto out;
1317     }
1318 
1319     p = uri->path ? uri->path : "/";
1320     p += strspn(p, "/");
1321     if (p[0]) {
1322         qdict_put_str(options, "export", p);
1323     }
1324 
1325     qp = query_params_parse(uri->query);
1326     if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1327         ret = -EINVAL;
1328         goto out;
1329     }
1330 
1331     if (is_unix) {
1332         /* nbd+unix:///export?socket=path */
1333         if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1334             ret = -EINVAL;
1335             goto out;
1336         }
1337         qdict_put_str(options, "server.type", "unix");
1338         qdict_put_str(options, "server.path", qp->p[0].value);
1339     } else {
1340         QString *host;
1341         char *port_str;
1342 
1343         /* nbd[+tcp]://host[:port]/export */
1344         if (!uri->server) {
1345             ret = -EINVAL;
1346             goto out;
1347         }
1348 
1349         /* strip braces from literal IPv6 address */
1350         if (uri->server[0] == '[') {
1351             host = qstring_from_substr(uri->server, 1,
1352                                        strlen(uri->server) - 1);
1353         } else {
1354             host = qstring_from_str(uri->server);
1355         }
1356 
1357         qdict_put_str(options, "server.type", "inet");
1358         qdict_put(options, "server.host", host);
1359 
1360         port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1361         qdict_put_str(options, "server.port", port_str);
1362         g_free(port_str);
1363     }
1364 
1365 out:
1366     if (qp) {
1367         query_params_free(qp);
1368     }
1369     uri_free(uri);
1370     return ret;
1371 }
1372 
1373 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1374 {
1375     const QDictEntry *e;
1376 
1377     for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1378         if (!strcmp(e->key, "host") ||
1379             !strcmp(e->key, "port") ||
1380             !strcmp(e->key, "path") ||
1381             !strcmp(e->key, "export") ||
1382             strstart(e->key, "server.", NULL))
1383         {
1384             error_setg(errp, "Option '%s' cannot be used with a file name",
1385                        e->key);
1386             return true;
1387         }
1388     }
1389 
1390     return false;
1391 }
1392 
1393 static void nbd_parse_filename(const char *filename, QDict *options,
1394                                Error **errp)
1395 {
1396     g_autofree char *file = NULL;
1397     char *export_name;
1398     const char *host_spec;
1399     const char *unixpath;
1400 
1401     if (nbd_has_filename_options_conflict(options, errp)) {
1402         return;
1403     }
1404 
1405     if (strstr(filename, "://")) {
1406         int ret = nbd_parse_uri(filename, options);
1407         if (ret < 0) {
1408             error_setg(errp, "No valid URL specified");
1409         }
1410         return;
1411     }
1412 
1413     file = g_strdup(filename);
1414 
1415     export_name = strstr(file, EN_OPTSTR);
1416     if (export_name) {
1417         if (export_name[strlen(EN_OPTSTR)] == 0) {
1418             return;
1419         }
1420         export_name[0] = 0; /* truncate 'file' */
1421         export_name += strlen(EN_OPTSTR);
1422 
1423         qdict_put_str(options, "export", export_name);
1424     }
1425 
1426     /* extract the host_spec - fail if it's not nbd:... */
1427     if (!strstart(file, "nbd:", &host_spec)) {
1428         error_setg(errp, "File name string for NBD must start with 'nbd:'");
1429         return;
1430     }
1431 
1432     if (!*host_spec) {
1433         return;
1434     }
1435 
1436     /* are we a UNIX or TCP socket? */
1437     if (strstart(host_spec, "unix:", &unixpath)) {
1438         qdict_put_str(options, "server.type", "unix");
1439         qdict_put_str(options, "server.path", unixpath);
1440     } else {
1441         InetSocketAddress *addr = g_new(InetSocketAddress, 1);
1442 
1443         if (inet_parse(addr, host_spec, errp)) {
1444             goto out_inet;
1445         }
1446 
1447         qdict_put_str(options, "server.type", "inet");
1448         qdict_put_str(options, "server.host", addr->host);
1449         qdict_put_str(options, "server.port", addr->port);
1450     out_inet:
1451         qapi_free_InetSocketAddress(addr);
1452     }
1453 }
1454 
1455 static bool nbd_process_legacy_socket_options(QDict *output_options,
1456                                               QemuOpts *legacy_opts,
1457                                               Error **errp)
1458 {
1459     const char *path = qemu_opt_get(legacy_opts, "path");
1460     const char *host = qemu_opt_get(legacy_opts, "host");
1461     const char *port = qemu_opt_get(legacy_opts, "port");
1462     const QDictEntry *e;
1463 
1464     if (!path && !host && !port) {
1465         return true;
1466     }
1467 
1468     for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
1469     {
1470         if (strstart(e->key, "server.", NULL)) {
1471             error_setg(errp, "Cannot use 'server' and path/host/port at the "
1472                        "same time");
1473             return false;
1474         }
1475     }
1476 
1477     if (path && host) {
1478         error_setg(errp, "path and host may not be used at the same time");
1479         return false;
1480     } else if (path) {
1481         if (port) {
1482             error_setg(errp, "port may not be used without host");
1483             return false;
1484         }
1485 
1486         qdict_put_str(output_options, "server.type", "unix");
1487         qdict_put_str(output_options, "server.path", path);
1488     } else if (host) {
1489         qdict_put_str(output_options, "server.type", "inet");
1490         qdict_put_str(output_options, "server.host", host);
1491         qdict_put_str(output_options, "server.port",
1492                       port ?: stringify(NBD_DEFAULT_PORT));
1493     }
1494 
1495     return true;
1496 }
1497 
1498 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
1499                                  Error **errp)
1500 {
1501     SocketAddress *saddr = NULL;
1502     QDict *addr = NULL;
1503     Visitor *iv = NULL;
1504     Error *local_err = NULL;
1505 
1506     qdict_extract_subqdict(options, &addr, "server.");
1507     if (!qdict_size(addr)) {
1508         error_setg(errp, "NBD server address missing");
1509         goto done;
1510     }
1511 
1512     iv = qobject_input_visitor_new_flat_confused(addr, errp);
1513     if (!iv) {
1514         goto done;
1515     }
1516 
1517     visit_type_SocketAddress(iv, NULL, &saddr, &local_err);
1518     if (local_err) {
1519         error_propagate(errp, local_err);
1520         goto done;
1521     }
1522 
1523 done:
1524     qobject_unref(addr);
1525     visit_free(iv);
1526     return saddr;
1527 }
1528 
1529 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
1530 {
1531     Object *obj;
1532     QCryptoTLSCreds *creds;
1533 
1534     obj = object_resolve_path_component(
1535         object_get_objects_root(), id);
1536     if (!obj) {
1537         error_setg(errp, "No TLS credentials with id '%s'",
1538                    id);
1539         return NULL;
1540     }
1541     creds = (QCryptoTLSCreds *)
1542         object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
1543     if (!creds) {
1544         error_setg(errp, "Object with id '%s' is not TLS credentials",
1545                    id);
1546         return NULL;
1547     }
1548 
1549     if (creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) {
1550         error_setg(errp,
1551                    "Expecting TLS credentials with a client endpoint");
1552         return NULL;
1553     }
1554     object_ref(obj);
1555     return creds;
1556 }
1557 
1558 
1559 static QemuOptsList nbd_runtime_opts = {
1560     .name = "nbd",
1561     .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
1562     .desc = {
1563         {
1564             .name = "host",
1565             .type = QEMU_OPT_STRING,
1566             .help = "TCP host to connect to",
1567         },
1568         {
1569             .name = "port",
1570             .type = QEMU_OPT_STRING,
1571             .help = "TCP port to connect to",
1572         },
1573         {
1574             .name = "path",
1575             .type = QEMU_OPT_STRING,
1576             .help = "Unix socket path to connect to",
1577         },
1578         {
1579             .name = "export",
1580             .type = QEMU_OPT_STRING,
1581             .help = "Name of the NBD export to open",
1582         },
1583         {
1584             .name = "tls-creds",
1585             .type = QEMU_OPT_STRING,
1586             .help = "ID of the TLS credentials to use",
1587         },
1588         {
1589             .name = "x-dirty-bitmap",
1590             .type = QEMU_OPT_STRING,
1591             .help = "experimental: expose named dirty bitmap in place of "
1592                     "block status",
1593         },
1594         {
1595             .name = "reconnect-delay",
1596             .type = QEMU_OPT_NUMBER,
1597             .help = "On an unexpected disconnect, the nbd client tries to "
1598                     "connect again until succeeding or encountering a serious "
1599                     "error.  During the first @reconnect-delay seconds, all "
1600                     "requests are paused and will be rerun on a successful "
1601                     "reconnect. After that time, any delayed requests and all "
1602                     "future requests before a successful reconnect will "
1603                     "immediately fail. Default 0",
1604         },
1605         { /* end of list */ }
1606     },
1607 };
1608 
1609 static int nbd_process_options(BlockDriverState *bs, QDict *options,
1610                                Error **errp)
1611 {
1612     BDRVNBDState *s = bs->opaque;
1613     QemuOpts *opts;
1614     Error *local_err = NULL;
1615     int ret = -EINVAL;
1616 
1617     opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
1618     qemu_opts_absorb_qdict(opts, options, &local_err);
1619     if (local_err) {
1620         error_propagate(errp, local_err);
1621         goto error;
1622     }
1623 
1624     /* Translate @host, @port, and @path to a SocketAddress */
1625     if (!nbd_process_legacy_socket_options(options, opts, errp)) {
1626         goto error;
1627     }
1628 
1629     /* Pop the config into our state object. Exit if invalid. */
1630     s->saddr = nbd_config(s, options, errp);
1631     if (!s->saddr) {
1632         goto error;
1633     }
1634 
1635     s->export = g_strdup(qemu_opt_get(opts, "export"));
1636 
1637     s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
1638     if (s->tlscredsid) {
1639         s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
1640         if (!s->tlscreds) {
1641             goto error;
1642         }
1643 
1644         /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
1645         if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) {
1646             error_setg(errp, "TLS only supported over IP sockets");
1647             goto error;
1648         }
1649         s->hostname = s->saddr->u.inet.host;
1650     }
1651 
1652     s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
1653     s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
1654 
1655     ret = 0;
1656 
1657  error:
1658     if (ret < 0) {
1659         object_unref(OBJECT(s->tlscreds));
1660         qapi_free_SocketAddress(s->saddr);
1661         g_free(s->export);
1662         g_free(s->tlscredsid);
1663     }
1664     qemu_opts_del(opts);
1665     return ret;
1666 }
1667 
1668 static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
1669                     Error **errp)
1670 {
1671     int ret;
1672     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1673 
1674     ret = nbd_process_options(bs, options, errp);
1675     if (ret < 0) {
1676         return ret;
1677     }
1678 
1679     s->bs = bs;
1680     qemu_co_mutex_init(&s->send_mutex);
1681     qemu_co_queue_init(&s->free_sema);
1682 
1683     ret = nbd_client_connect(bs, errp);
1684     if (ret < 0) {
1685         return ret;
1686     }
1687     /* successfully connected */
1688     s->state = NBD_CLIENT_CONNECTED;
1689 
1690     s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
1691     bdrv_inc_in_flight(bs);
1692     aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
1693 
1694     return 0;
1695 }
1696 
1697 static int nbd_co_flush(BlockDriverState *bs)
1698 {
1699     return nbd_client_co_flush(bs);
1700 }
1701 
1702 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
1703 {
1704     BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1705     uint32_t min = s->info.min_block;
1706     uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
1707 
1708     /*
1709      * If the server did not advertise an alignment:
1710      * - a size that is not sector-aligned implies that an alignment
1711      *   of 1 can be used to access those tail bytes
1712      * - advertisement of block status requires an alignment of 1, so
1713      *   that we don't violate block layer constraints that block
1714      *   status is always aligned (as we can't control whether the
1715      *   server will report sub-sector extents, such as a hole at EOF
1716      *   on an unaligned POSIX file)
1717      * - otherwise, assume the server is so old that we are safer avoiding
1718      *   sub-sector requests
1719      */
1720     if (!min) {
1721         min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
1722                s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
1723     }
1724 
1725     bs->bl.request_alignment = min;
1726     bs->bl.max_pdiscard = max;
1727     bs->bl.max_pwrite_zeroes = max;
1728     bs->bl.max_transfer = max;
1729 
1730     if (s->info.opt_block &&
1731         s->info.opt_block > bs->bl.opt_transfer) {
1732         bs->bl.opt_transfer = s->info.opt_block;
1733     }
1734 }
1735 
1736 static void nbd_close(BlockDriverState *bs)
1737 {
1738     BDRVNBDState *s = bs->opaque;
1739 
1740     nbd_client_close(bs);
1741 
1742     object_unref(OBJECT(s->tlscreds));
1743     qapi_free_SocketAddress(s->saddr);
1744     g_free(s->export);
1745     g_free(s->tlscredsid);
1746     g_free(s->x_dirty_bitmap);
1747 }
1748 
1749 static int64_t nbd_getlength(BlockDriverState *bs)
1750 {
1751     BDRVNBDState *s = bs->opaque;
1752 
1753     return s->info.size;
1754 }
1755 
1756 static void nbd_refresh_filename(BlockDriverState *bs)
1757 {
1758     BDRVNBDState *s = bs->opaque;
1759     const char *host = NULL, *port = NULL, *path = NULL;
1760 
1761     if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
1762         const InetSocketAddress *inet = &s->saddr->u.inet;
1763         if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
1764             host = inet->host;
1765             port = inet->port;
1766         }
1767     } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
1768         path = s->saddr->u.q_unix.path;
1769     } /* else can't represent as pseudo-filename */
1770 
1771     if (path && s->export) {
1772         snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1773                  "nbd+unix:///%s?socket=%s", s->export, path);
1774     } else if (path && !s->export) {
1775         snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1776                  "nbd+unix://?socket=%s", path);
1777     } else if (host && s->export) {
1778         snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1779                  "nbd://%s:%s/%s", host, port, s->export);
1780     } else if (host && !s->export) {
1781         snprintf(bs->exact_filename, sizeof(bs->exact_filename),
1782                  "nbd://%s:%s", host, port);
1783     }
1784 }
1785 
1786 static char *nbd_dirname(BlockDriverState *bs, Error **errp)
1787 {
1788     /* The generic bdrv_dirname() implementation is able to work out some
1789      * directory name for NBD nodes, but that would be wrong. So far there is no
1790      * specification for how "export paths" would work, so NBD does not have
1791      * directory names. */
1792     error_setg(errp, "Cannot generate a base directory for NBD nodes");
1793     return NULL;
1794 }
1795 
1796 static const char *const nbd_strong_runtime_opts[] = {
1797     "path",
1798     "host",
1799     "port",
1800     "export",
1801     "tls-creds",
1802     "server.",
1803 
1804     NULL
1805 };
1806 
1807 static BlockDriver bdrv_nbd = {
1808     .format_name                = "nbd",
1809     .protocol_name              = "nbd",
1810     .instance_size              = sizeof(BDRVNBDState),
1811     .bdrv_parse_filename        = nbd_parse_filename,
1812     .bdrv_file_open             = nbd_open,
1813     .bdrv_reopen_prepare        = nbd_client_reopen_prepare,
1814     .bdrv_co_preadv             = nbd_client_co_preadv,
1815     .bdrv_co_pwritev            = nbd_client_co_pwritev,
1816     .bdrv_co_pwrite_zeroes      = nbd_client_co_pwrite_zeroes,
1817     .bdrv_close                 = nbd_close,
1818     .bdrv_co_flush_to_os        = nbd_co_flush,
1819     .bdrv_co_pdiscard           = nbd_client_co_pdiscard,
1820     .bdrv_refresh_limits        = nbd_refresh_limits,
1821     .bdrv_getlength             = nbd_getlength,
1822     .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
1823     .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
1824     .bdrv_refresh_filename      = nbd_refresh_filename,
1825     .bdrv_co_block_status       = nbd_client_co_block_status,
1826     .bdrv_dirname               = nbd_dirname,
1827     .strong_runtime_opts        = nbd_strong_runtime_opts,
1828 };
1829 
1830 static BlockDriver bdrv_nbd_tcp = {
1831     .format_name                = "nbd",
1832     .protocol_name              = "nbd+tcp",
1833     .instance_size              = sizeof(BDRVNBDState),
1834     .bdrv_parse_filename        = nbd_parse_filename,
1835     .bdrv_file_open             = nbd_open,
1836     .bdrv_reopen_prepare        = nbd_client_reopen_prepare,
1837     .bdrv_co_preadv             = nbd_client_co_preadv,
1838     .bdrv_co_pwritev            = nbd_client_co_pwritev,
1839     .bdrv_co_pwrite_zeroes      = nbd_client_co_pwrite_zeroes,
1840     .bdrv_close                 = nbd_close,
1841     .bdrv_co_flush_to_os        = nbd_co_flush,
1842     .bdrv_co_pdiscard           = nbd_client_co_pdiscard,
1843     .bdrv_refresh_limits        = nbd_refresh_limits,
1844     .bdrv_getlength             = nbd_getlength,
1845     .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
1846     .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
1847     .bdrv_refresh_filename      = nbd_refresh_filename,
1848     .bdrv_co_block_status       = nbd_client_co_block_status,
1849     .bdrv_dirname               = nbd_dirname,
1850     .strong_runtime_opts        = nbd_strong_runtime_opts,
1851 };
1852 
1853 static BlockDriver bdrv_nbd_unix = {
1854     .format_name                = "nbd",
1855     .protocol_name              = "nbd+unix",
1856     .instance_size              = sizeof(BDRVNBDState),
1857     .bdrv_parse_filename        = nbd_parse_filename,
1858     .bdrv_file_open             = nbd_open,
1859     .bdrv_reopen_prepare        = nbd_client_reopen_prepare,
1860     .bdrv_co_preadv             = nbd_client_co_preadv,
1861     .bdrv_co_pwritev            = nbd_client_co_pwritev,
1862     .bdrv_co_pwrite_zeroes      = nbd_client_co_pwrite_zeroes,
1863     .bdrv_close                 = nbd_close,
1864     .bdrv_co_flush_to_os        = nbd_co_flush,
1865     .bdrv_co_pdiscard           = nbd_client_co_pdiscard,
1866     .bdrv_refresh_limits        = nbd_refresh_limits,
1867     .bdrv_getlength             = nbd_getlength,
1868     .bdrv_detach_aio_context    = nbd_client_detach_aio_context,
1869     .bdrv_attach_aio_context    = nbd_client_attach_aio_context,
1870     .bdrv_refresh_filename      = nbd_refresh_filename,
1871     .bdrv_co_block_status       = nbd_client_co_block_status,
1872     .bdrv_dirname               = nbd_dirname,
1873     .strong_runtime_opts        = nbd_strong_runtime_opts,
1874 };
1875 
1876 static void bdrv_nbd_init(void)
1877 {
1878     bdrv_register(&bdrv_nbd);
1879     bdrv_register(&bdrv_nbd_tcp);
1880     bdrv_register(&bdrv_nbd_unix);
1881 }
1882 
1883 block_init(bdrv_nbd_init);
1884