xref: /openbmc/qemu/block/gluster.c (revision a5829ccf)
1 /*
2  * GlusterFS backend for QEMU
3  *
4  * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
5  *
6  * Pipe handling mechanism in AIO implementation is derived from
7  * block/rbd.c. Hence,
8  *
9  * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10  *                         Josh Durgin <josh.durgin@dreamhost.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  * Contributions after 2012-01-13 are licensed under the terms of the
16  * GNU GPL, version 2 or (at your option) any later version.
17  */
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
21 #include "qemu/uri.h"
22 
23 typedef struct GlusterAIOCB {
24     int64_t size;
25     int ret;
26     QEMUBH *bh;
27     Coroutine *coroutine;
28 } GlusterAIOCB;
29 
30 typedef struct BDRVGlusterState {
31     struct glfs *glfs;
32     struct glfs_fd *fd;
33 } BDRVGlusterState;
34 
35 #define GLUSTER_FD_READ  0
36 #define GLUSTER_FD_WRITE 1
37 
38 typedef struct GlusterConf {
39     char *server;
40     int port;
41     char *volname;
42     char *image;
43     char *transport;
44 } GlusterConf;
45 
46 static void qemu_gluster_gconf_free(GlusterConf *gconf)
47 {
48     g_free(gconf->server);
49     g_free(gconf->volname);
50     g_free(gconf->image);
51     g_free(gconf->transport);
52     g_free(gconf);
53 }
54 
55 static int parse_volume_options(GlusterConf *gconf, char *path)
56 {
57     char *p, *q;
58 
59     if (!path) {
60         return -EINVAL;
61     }
62 
63     /* volume */
64     p = q = path + strspn(path, "/");
65     p += strcspn(p, "/");
66     if (*p == '\0') {
67         return -EINVAL;
68     }
69     gconf->volname = g_strndup(q, p - q);
70 
71     /* image */
72     p += strspn(p, "/");
73     if (*p == '\0') {
74         return -EINVAL;
75     }
76     gconf->image = g_strdup(p);
77     return 0;
78 }
79 
80 /*
81  * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
82  *
83  * 'gluster' is the protocol.
84  *
85  * 'transport' specifies the transport type used to connect to gluster
86  * management daemon (glusterd). Valid transport types are
87  * tcp, unix and rdma. If a transport type isn't specified, then tcp
88  * type is assumed.
89  *
90  * 'server' specifies the server where the volume file specification for
91  * the given volume resides. This can be either hostname, ipv4 address
92  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
93  * If transport type is 'unix', then 'server' field should not be specifed.
94  * The 'socket' field needs to be populated with the path to unix domain
95  * socket.
96  *
97  * 'port' is the port number on which glusterd is listening. This is optional
98  * and if not specified, QEMU will send 0 which will make gluster to use the
99  * default port. If the transport type is unix, then 'port' should not be
100  * specified.
101  *
102  * 'volname' is the name of the gluster volume which contains the VM image.
103  *
104  * 'image' is the path to the actual VM image that resides on gluster volume.
105  *
106  * Examples:
107  *
108  * file=gluster://1.2.3.4/testvol/a.img
109  * file=gluster+tcp://1.2.3.4/testvol/a.img
110  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
111  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
112  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
113  * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
114  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
115  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
116  */
117 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
118 {
119     URI *uri;
120     QueryParams *qp = NULL;
121     bool is_unix = false;
122     int ret = 0;
123 
124     uri = uri_parse(filename);
125     if (!uri) {
126         return -EINVAL;
127     }
128 
129     /* transport */
130     if (!strcmp(uri->scheme, "gluster")) {
131         gconf->transport = g_strdup("tcp");
132     } else if (!strcmp(uri->scheme, "gluster+tcp")) {
133         gconf->transport = g_strdup("tcp");
134     } else if (!strcmp(uri->scheme, "gluster+unix")) {
135         gconf->transport = g_strdup("unix");
136         is_unix = true;
137     } else if (!strcmp(uri->scheme, "gluster+rdma")) {
138         gconf->transport = g_strdup("rdma");
139     } else {
140         ret = -EINVAL;
141         goto out;
142     }
143 
144     ret = parse_volume_options(gconf, uri->path);
145     if (ret < 0) {
146         goto out;
147     }
148 
149     qp = query_params_parse(uri->query);
150     if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
151         ret = -EINVAL;
152         goto out;
153     }
154 
155     if (is_unix) {
156         if (uri->server || uri->port) {
157             ret = -EINVAL;
158             goto out;
159         }
160         if (strcmp(qp->p[0].name, "socket")) {
161             ret = -EINVAL;
162             goto out;
163         }
164         gconf->server = g_strdup(qp->p[0].value);
165     } else {
166         gconf->server = g_strdup(uri->server);
167         gconf->port = uri->port;
168     }
169 
170 out:
171     if (qp) {
172         query_params_free(qp);
173     }
174     uri_free(uri);
175     return ret;
176 }
177 
178 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
179 {
180     struct glfs *glfs = NULL;
181     int ret;
182     int old_errno;
183 
184     ret = qemu_gluster_parseuri(gconf, filename);
185     if (ret < 0) {
186         error_report("Usage: file=gluster[+transport]://[server[:port]]/"
187             "volname/image[?socket=...]");
188         errno = -ret;
189         goto out;
190     }
191 
192     glfs = glfs_new(gconf->volname);
193     if (!glfs) {
194         goto out;
195     }
196 
197     ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
198             gconf->port);
199     if (ret < 0) {
200         goto out;
201     }
202 
203     /*
204      * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
205      * GlusterFS makes GF_LOG_* macros available to libgfapi users.
206      */
207     ret = glfs_set_logging(glfs, "-", 4);
208     if (ret < 0) {
209         goto out;
210     }
211 
212     ret = glfs_init(glfs);
213     if (ret) {
214         error_report("Gluster connection failed for server=%s port=%d "
215              "volume=%s image=%s transport=%s", gconf->server, gconf->port,
216              gconf->volname, gconf->image, gconf->transport);
217         goto out;
218     }
219     return glfs;
220 
221 out:
222     if (glfs) {
223         old_errno = errno;
224         glfs_fini(glfs);
225         errno = old_errno;
226     }
227     return NULL;
228 }
229 
230 static void qemu_gluster_complete_aio(void *opaque)
231 {
232     GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
233 
234     qemu_bh_delete(acb->bh);
235     acb->bh = NULL;
236     qemu_coroutine_enter(acb->coroutine, NULL);
237 }
238 
239 /*
240  * AIO callback routine called from GlusterFS thread.
241  */
242 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
243 {
244     GlusterAIOCB *acb = (GlusterAIOCB *)arg;
245 
246     if (!ret || ret == acb->size) {
247         acb->ret = 0; /* Success */
248     } else if (ret < 0) {
249         acb->ret = ret; /* Read/Write failed */
250     } else {
251         acb->ret = -EIO; /* Partial read/write - fail it */
252     }
253 
254     acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
255     qemu_bh_schedule(acb->bh);
256 }
257 
258 /* TODO Convert to fine grained options */
259 static QemuOptsList runtime_opts = {
260     .name = "gluster",
261     .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
262     .desc = {
263         {
264             .name = "filename",
265             .type = QEMU_OPT_STRING,
266             .help = "URL to the gluster image",
267         },
268         { /* end of list */ }
269     },
270 };
271 
272 static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
273                              int bdrv_flags, Error **errp)
274 {
275     BDRVGlusterState *s = bs->opaque;
276     int open_flags = O_BINARY;
277     int ret = 0;
278     GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
279     QemuOpts *opts;
280     Error *local_err = NULL;
281     const char *filename;
282 
283     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
284     qemu_opts_absorb_qdict(opts, options, &local_err);
285     if (error_is_set(&local_err)) {
286         qerror_report_err(local_err);
287         error_free(local_err);
288         ret = -EINVAL;
289         goto out;
290     }
291 
292     filename = qemu_opt_get(opts, "filename");
293 
294     s->glfs = qemu_gluster_init(gconf, filename);
295     if (!s->glfs) {
296         ret = -errno;
297         goto out;
298     }
299 
300     if (bdrv_flags & BDRV_O_RDWR) {
301         open_flags |= O_RDWR;
302     } else {
303         open_flags |= O_RDONLY;
304     }
305 
306     if ((bdrv_flags & BDRV_O_NOCACHE)) {
307         open_flags |= O_DIRECT;
308     }
309 
310     s->fd = glfs_open(s->glfs, gconf->image, open_flags);
311     if (!s->fd) {
312         ret = -errno;
313     }
314 
315 out:
316     qemu_opts_del(opts);
317     qemu_gluster_gconf_free(gconf);
318     if (!ret) {
319         return ret;
320     }
321     if (s->fd) {
322         glfs_close(s->fd);
323     }
324     if (s->glfs) {
325         glfs_fini(s->glfs);
326     }
327     return ret;
328 }
329 
330 #ifdef CONFIG_GLUSTERFS_ZEROFILL
331 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
332         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
333 {
334     int ret;
335     GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
336     BDRVGlusterState *s = bs->opaque;
337     off_t size = nb_sectors * BDRV_SECTOR_SIZE;
338     off_t offset = sector_num * BDRV_SECTOR_SIZE;
339 
340     acb->size = size;
341     acb->ret = 0;
342     acb->coroutine = qemu_coroutine_self();
343 
344     ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
345     if (ret < 0) {
346         ret = -errno;
347         goto out;
348     }
349 
350     qemu_coroutine_yield();
351     ret = acb->ret;
352 
353 out:
354     g_slice_free(GlusterAIOCB, acb);
355     return ret;
356 }
357 
358 static inline bool gluster_supports_zerofill(void)
359 {
360     return 1;
361 }
362 
363 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
364         int64_t size)
365 {
366     return glfs_zerofill(fd, offset, size);
367 }
368 
369 #else
370 static inline bool gluster_supports_zerofill(void)
371 {
372     return 0;
373 }
374 
375 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
376         int64_t size)
377 {
378     return 0;
379 }
380 #endif
381 
382 static int qemu_gluster_create(const char *filename,
383         QEMUOptionParameter *options, Error **errp)
384 {
385     struct glfs *glfs;
386     struct glfs_fd *fd;
387     int ret = 0;
388     int prealloc = 0;
389     int64_t total_size = 0;
390     GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
391 
392     glfs = qemu_gluster_init(gconf, filename);
393     if (!glfs) {
394         ret = -errno;
395         goto out;
396     }
397 
398     while (options && options->name) {
399         if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
400             total_size = options->value.n / BDRV_SECTOR_SIZE;
401         } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
402             if (!options->value.s || !strcmp(options->value.s, "off")) {
403                 prealloc = 0;
404             } else if (!strcmp(options->value.s, "full") &&
405                     gluster_supports_zerofill()) {
406                 prealloc = 1;
407             } else {
408                 error_setg(errp, "Invalid preallocation mode: '%s'"
409                     " or GlusterFS doesn't support zerofill API",
410                            options->value.s);
411                 ret = -EINVAL;
412                 goto out;
413             }
414         }
415         options++;
416     }
417 
418     fd = glfs_creat(glfs, gconf->image,
419         O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
420     if (!fd) {
421         ret = -errno;
422     } else {
423         if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
424             if (prealloc && qemu_gluster_zerofill(fd, 0,
425                     total_size * BDRV_SECTOR_SIZE)) {
426                 ret = -errno;
427             }
428         } else {
429             ret = -errno;
430         }
431 
432         if (glfs_close(fd) != 0) {
433             ret = -errno;
434         }
435     }
436 out:
437     qemu_gluster_gconf_free(gconf);
438     if (glfs) {
439         glfs_fini(glfs);
440     }
441     return ret;
442 }
443 
444 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
445         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
446 {
447     int ret;
448     GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
449     BDRVGlusterState *s = bs->opaque;
450     size_t size = nb_sectors * BDRV_SECTOR_SIZE;
451     off_t offset = sector_num * BDRV_SECTOR_SIZE;
452 
453     acb->size = size;
454     acb->ret = 0;
455     acb->coroutine = qemu_coroutine_self();
456 
457     if (write) {
458         ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
459             &gluster_finish_aiocb, acb);
460     } else {
461         ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
462             &gluster_finish_aiocb, acb);
463     }
464 
465     if (ret < 0) {
466         ret = -errno;
467         goto out;
468     }
469 
470     qemu_coroutine_yield();
471     ret = acb->ret;
472 
473 out:
474     g_slice_free(GlusterAIOCB, acb);
475     return ret;
476 }
477 
478 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
479 {
480     int ret;
481     BDRVGlusterState *s = bs->opaque;
482 
483     ret = glfs_ftruncate(s->fd, offset);
484     if (ret < 0) {
485         return -errno;
486     }
487 
488     return 0;
489 }
490 
491 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
492         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
493 {
494     return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
495 }
496 
497 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
498         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
499 {
500     return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
501 }
502 
503 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
504 {
505     int ret;
506     GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
507     BDRVGlusterState *s = bs->opaque;
508 
509     acb->size = 0;
510     acb->ret = 0;
511     acb->coroutine = qemu_coroutine_self();
512 
513     ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
514     if (ret < 0) {
515         ret = -errno;
516         goto out;
517     }
518 
519     qemu_coroutine_yield();
520     ret = acb->ret;
521 
522 out:
523     g_slice_free(GlusterAIOCB, acb);
524     return ret;
525 }
526 
527 #ifdef CONFIG_GLUSTERFS_DISCARD
528 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
529         int64_t sector_num, int nb_sectors)
530 {
531     int ret;
532     GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
533     BDRVGlusterState *s = bs->opaque;
534     size_t size = nb_sectors * BDRV_SECTOR_SIZE;
535     off_t offset = sector_num * BDRV_SECTOR_SIZE;
536 
537     acb->size = 0;
538     acb->ret = 0;
539     acb->coroutine = qemu_coroutine_self();
540 
541     ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
542     if (ret < 0) {
543         ret = -errno;
544         goto out;
545     }
546 
547     qemu_coroutine_yield();
548     ret = acb->ret;
549 
550 out:
551     g_slice_free(GlusterAIOCB, acb);
552     return ret;
553 }
554 #endif
555 
556 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
557 {
558     BDRVGlusterState *s = bs->opaque;
559     int64_t ret;
560 
561     ret = glfs_lseek(s->fd, 0, SEEK_END);
562     if (ret < 0) {
563         return -errno;
564     } else {
565         return ret;
566     }
567 }
568 
569 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
570 {
571     BDRVGlusterState *s = bs->opaque;
572     struct stat st;
573     int ret;
574 
575     ret = glfs_fstat(s->fd, &st);
576     if (ret < 0) {
577         return -errno;
578     } else {
579         return st.st_blocks * 512;
580     }
581 }
582 
583 static void qemu_gluster_close(BlockDriverState *bs)
584 {
585     BDRVGlusterState *s = bs->opaque;
586 
587     if (s->fd) {
588         glfs_close(s->fd);
589         s->fd = NULL;
590     }
591     glfs_fini(s->glfs);
592 }
593 
594 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
595 {
596     /* GlusterFS volume could be backed by a block device */
597     return 0;
598 }
599 
600 static QEMUOptionParameter qemu_gluster_create_options[] = {
601     {
602         .name = BLOCK_OPT_SIZE,
603         .type = OPT_SIZE,
604         .help = "Virtual disk size"
605     },
606     {
607         .name = BLOCK_OPT_PREALLOC,
608         .type = OPT_STRING,
609         .help = "Preallocation mode (allowed values: off, full)"
610     },
611     { NULL }
612 };
613 
614 static BlockDriver bdrv_gluster = {
615     .format_name                  = "gluster",
616     .protocol_name                = "gluster",
617     .instance_size                = sizeof(BDRVGlusterState),
618     .bdrv_needs_filename          = true,
619     .bdrv_file_open               = qemu_gluster_open,
620     .bdrv_close                   = qemu_gluster_close,
621     .bdrv_create                  = qemu_gluster_create,
622     .bdrv_getlength               = qemu_gluster_getlength,
623     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
624     .bdrv_truncate                = qemu_gluster_truncate,
625     .bdrv_co_readv                = qemu_gluster_co_readv,
626     .bdrv_co_writev               = qemu_gluster_co_writev,
627     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
628     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
629 #ifdef CONFIG_GLUSTERFS_DISCARD
630     .bdrv_co_discard              = qemu_gluster_co_discard,
631 #endif
632 #ifdef CONFIG_GLUSTERFS_ZEROFILL
633     .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
634 #endif
635     .create_options               = qemu_gluster_create_options,
636 };
637 
638 static BlockDriver bdrv_gluster_tcp = {
639     .format_name                  = "gluster",
640     .protocol_name                = "gluster+tcp",
641     .instance_size                = sizeof(BDRVGlusterState),
642     .bdrv_needs_filename          = true,
643     .bdrv_file_open               = qemu_gluster_open,
644     .bdrv_close                   = qemu_gluster_close,
645     .bdrv_create                  = qemu_gluster_create,
646     .bdrv_getlength               = qemu_gluster_getlength,
647     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
648     .bdrv_truncate                = qemu_gluster_truncate,
649     .bdrv_co_readv                = qemu_gluster_co_readv,
650     .bdrv_co_writev               = qemu_gluster_co_writev,
651     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
652     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
653 #ifdef CONFIG_GLUSTERFS_DISCARD
654     .bdrv_co_discard              = qemu_gluster_co_discard,
655 #endif
656 #ifdef CONFIG_GLUSTERFS_ZEROFILL
657     .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
658 #endif
659     .create_options               = qemu_gluster_create_options,
660 };
661 
662 static BlockDriver bdrv_gluster_unix = {
663     .format_name                  = "gluster",
664     .protocol_name                = "gluster+unix",
665     .instance_size                = sizeof(BDRVGlusterState),
666     .bdrv_needs_filename          = true,
667     .bdrv_file_open               = qemu_gluster_open,
668     .bdrv_close                   = qemu_gluster_close,
669     .bdrv_create                  = qemu_gluster_create,
670     .bdrv_getlength               = qemu_gluster_getlength,
671     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
672     .bdrv_truncate                = qemu_gluster_truncate,
673     .bdrv_co_readv                = qemu_gluster_co_readv,
674     .bdrv_co_writev               = qemu_gluster_co_writev,
675     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
676     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
677 #ifdef CONFIG_GLUSTERFS_DISCARD
678     .bdrv_co_discard              = qemu_gluster_co_discard,
679 #endif
680 #ifdef CONFIG_GLUSTERFS_ZEROFILL
681     .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
682 #endif
683     .create_options               = qemu_gluster_create_options,
684 };
685 
686 static BlockDriver bdrv_gluster_rdma = {
687     .format_name                  = "gluster",
688     .protocol_name                = "gluster+rdma",
689     .instance_size                = sizeof(BDRVGlusterState),
690     .bdrv_needs_filename          = true,
691     .bdrv_file_open               = qemu_gluster_open,
692     .bdrv_close                   = qemu_gluster_close,
693     .bdrv_create                  = qemu_gluster_create,
694     .bdrv_getlength               = qemu_gluster_getlength,
695     .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
696     .bdrv_truncate                = qemu_gluster_truncate,
697     .bdrv_co_readv                = qemu_gluster_co_readv,
698     .bdrv_co_writev               = qemu_gluster_co_writev,
699     .bdrv_co_flush_to_disk        = qemu_gluster_co_flush_to_disk,
700     .bdrv_has_zero_init           = qemu_gluster_has_zero_init,
701 #ifdef CONFIG_GLUSTERFS_DISCARD
702     .bdrv_co_discard              = qemu_gluster_co_discard,
703 #endif
704 #ifdef CONFIG_GLUSTERFS_ZEROFILL
705     .bdrv_co_write_zeroes         = qemu_gluster_co_write_zeroes,
706 #endif
707     .create_options               = qemu_gluster_create_options,
708 };
709 
710 static void bdrv_gluster_init(void)
711 {
712     bdrv_register(&bdrv_gluster_rdma);
713     bdrv_register(&bdrv_gluster_unix);
714     bdrv_register(&bdrv_gluster_tcp);
715     bdrv_register(&bdrv_gluster);
716 }
717 
718 block_init(bdrv_gluster_init);
719