1 /* 2 * GlusterFS backend for QEMU 3 * 4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> 5 * 6 * Pipe handling mechanism in AIO implementation is derived from 7 * block/rbd.c. Hence, 8 * 9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>, 10 * Josh Durgin <josh.durgin@dreamhost.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2. See 13 * the COPYING file in the top-level directory. 14 * 15 * Contributions after 2012-01-13 are licensed under the terms of the 16 * GNU GPL, version 2 or (at your option) any later version. 17 */ 18 #include <glusterfs/api/glfs.h> 19 #include "block/block_int.h" 20 #include "qemu/sockets.h" 21 #include "qemu/uri.h" 22 23 typedef struct GlusterAIOCB { 24 int64_t size; 25 int ret; 26 QEMUBH *bh; 27 Coroutine *coroutine; 28 } GlusterAIOCB; 29 30 typedef struct BDRVGlusterState { 31 struct glfs *glfs; 32 struct glfs_fd *fd; 33 } BDRVGlusterState; 34 35 #define GLUSTER_FD_READ 0 36 #define GLUSTER_FD_WRITE 1 37 38 typedef struct GlusterConf { 39 char *server; 40 int port; 41 char *volname; 42 char *image; 43 char *transport; 44 } GlusterConf; 45 46 static void qemu_gluster_gconf_free(GlusterConf *gconf) 47 { 48 g_free(gconf->server); 49 g_free(gconf->volname); 50 g_free(gconf->image); 51 g_free(gconf->transport); 52 g_free(gconf); 53 } 54 55 static int parse_volume_options(GlusterConf *gconf, char *path) 56 { 57 char *p, *q; 58 59 if (!path) { 60 return -EINVAL; 61 } 62 63 /* volume */ 64 p = q = path + strspn(path, "/"); 65 p += strcspn(p, "/"); 66 if (*p == '\0') { 67 return -EINVAL; 68 } 69 gconf->volname = g_strndup(q, p - q); 70 71 /* image */ 72 p += strspn(p, "/"); 73 if (*p == '\0') { 74 return -EINVAL; 75 } 76 gconf->image = g_strdup(p); 77 return 0; 78 } 79 80 /* 81 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] 82 * 83 * 'gluster' is the protocol. 84 * 85 * 'transport' specifies the transport type used to connect to gluster 86 * management daemon (glusterd). Valid transport types are 87 * tcp, unix and rdma. If a transport type isn't specified, then tcp 88 * type is assumed. 89 * 90 * 'server' specifies the server where the volume file specification for 91 * the given volume resides. This can be either hostname, ipv4 address 92 * or ipv6 address. ipv6 address needs to be within square brackets [ ]. 93 * If transport type is 'unix', then 'server' field should not be specifed. 94 * The 'socket' field needs to be populated with the path to unix domain 95 * socket. 96 * 97 * 'port' is the port number on which glusterd is listening. This is optional 98 * and if not specified, QEMU will send 0 which will make gluster to use the 99 * default port. If the transport type is unix, then 'port' should not be 100 * specified. 101 * 102 * 'volname' is the name of the gluster volume which contains the VM image. 103 * 104 * 'image' is the path to the actual VM image that resides on gluster volume. 105 * 106 * Examples: 107 * 108 * file=gluster://1.2.3.4/testvol/a.img 109 * file=gluster+tcp://1.2.3.4/testvol/a.img 110 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img 111 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img 112 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img 113 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img 114 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket 115 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img 116 */ 117 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) 118 { 119 URI *uri; 120 QueryParams *qp = NULL; 121 bool is_unix = false; 122 int ret = 0; 123 124 uri = uri_parse(filename); 125 if (!uri) { 126 return -EINVAL; 127 } 128 129 /* transport */ 130 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { 131 gconf->transport = g_strdup("tcp"); 132 } else if (!strcmp(uri->scheme, "gluster+tcp")) { 133 gconf->transport = g_strdup("tcp"); 134 } else if (!strcmp(uri->scheme, "gluster+unix")) { 135 gconf->transport = g_strdup("unix"); 136 is_unix = true; 137 } else if (!strcmp(uri->scheme, "gluster+rdma")) { 138 gconf->transport = g_strdup("rdma"); 139 } else { 140 ret = -EINVAL; 141 goto out; 142 } 143 144 ret = parse_volume_options(gconf, uri->path); 145 if (ret < 0) { 146 goto out; 147 } 148 149 qp = query_params_parse(uri->query); 150 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { 151 ret = -EINVAL; 152 goto out; 153 } 154 155 if (is_unix) { 156 if (uri->server || uri->port) { 157 ret = -EINVAL; 158 goto out; 159 } 160 if (strcmp(qp->p[0].name, "socket")) { 161 ret = -EINVAL; 162 goto out; 163 } 164 gconf->server = g_strdup(qp->p[0].value); 165 } else { 166 gconf->server = g_strdup(uri->server ? uri->server : "localhost"); 167 gconf->port = uri->port; 168 } 169 170 out: 171 if (qp) { 172 query_params_free(qp); 173 } 174 uri_free(uri); 175 return ret; 176 } 177 178 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, 179 Error **errp) 180 { 181 struct glfs *glfs = NULL; 182 int ret; 183 int old_errno; 184 185 ret = qemu_gluster_parseuri(gconf, filename); 186 if (ret < 0) { 187 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" 188 "volname/image[?socket=...]"); 189 errno = -ret; 190 goto out; 191 } 192 193 glfs = glfs_new(gconf->volname); 194 if (!glfs) { 195 goto out; 196 } 197 198 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, 199 gconf->port); 200 if (ret < 0) { 201 goto out; 202 } 203 204 /* 205 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when 206 * GlusterFS makes GF_LOG_* macros available to libgfapi users. 207 */ 208 ret = glfs_set_logging(glfs, "-", 4); 209 if (ret < 0) { 210 goto out; 211 } 212 213 ret = glfs_init(glfs); 214 if (ret) { 215 error_setg_errno(errp, errno, 216 "Gluster connection failed for server=%s port=%d " 217 "volume=%s image=%s transport=%s", gconf->server, 218 gconf->port, gconf->volname, gconf->image, 219 gconf->transport); 220 goto out; 221 } 222 return glfs; 223 224 out: 225 if (glfs) { 226 old_errno = errno; 227 glfs_fini(glfs); 228 errno = old_errno; 229 } 230 return NULL; 231 } 232 233 static void qemu_gluster_complete_aio(void *opaque) 234 { 235 GlusterAIOCB *acb = (GlusterAIOCB *)opaque; 236 237 qemu_bh_delete(acb->bh); 238 acb->bh = NULL; 239 qemu_coroutine_enter(acb->coroutine, NULL); 240 } 241 242 /* 243 * AIO callback routine called from GlusterFS thread. 244 */ 245 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) 246 { 247 GlusterAIOCB *acb = (GlusterAIOCB *)arg; 248 249 if (!ret || ret == acb->size) { 250 acb->ret = 0; /* Success */ 251 } else if (ret < 0) { 252 acb->ret = ret; /* Read/Write failed */ 253 } else { 254 acb->ret = -EIO; /* Partial read/write - fail it */ 255 } 256 257 acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb); 258 qemu_bh_schedule(acb->bh); 259 } 260 261 /* TODO Convert to fine grained options */ 262 static QemuOptsList runtime_opts = { 263 .name = "gluster", 264 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 265 .desc = { 266 { 267 .name = "filename", 268 .type = QEMU_OPT_STRING, 269 .help = "URL to the gluster image", 270 }, 271 { /* end of list */ } 272 }, 273 }; 274 275 static int qemu_gluster_open(BlockDriverState *bs, QDict *options, 276 int bdrv_flags, Error **errp) 277 { 278 BDRVGlusterState *s = bs->opaque; 279 int open_flags = O_BINARY; 280 int ret = 0; 281 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 282 QemuOpts *opts; 283 Error *local_err = NULL; 284 const char *filename; 285 286 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 287 qemu_opts_absorb_qdict(opts, options, &local_err); 288 if (local_err) { 289 error_propagate(errp, local_err); 290 ret = -EINVAL; 291 goto out; 292 } 293 294 filename = qemu_opt_get(opts, "filename"); 295 296 s->glfs = qemu_gluster_init(gconf, filename, errp); 297 if (!s->glfs) { 298 ret = -errno; 299 goto out; 300 } 301 302 if (bdrv_flags & BDRV_O_RDWR) { 303 open_flags |= O_RDWR; 304 } else { 305 open_flags |= O_RDONLY; 306 } 307 308 if ((bdrv_flags & BDRV_O_NOCACHE)) { 309 open_flags |= O_DIRECT; 310 } 311 312 s->fd = glfs_open(s->glfs, gconf->image, open_flags); 313 if (!s->fd) { 314 ret = -errno; 315 } 316 317 out: 318 qemu_opts_del(opts); 319 qemu_gluster_gconf_free(gconf); 320 if (!ret) { 321 return ret; 322 } 323 if (s->fd) { 324 glfs_close(s->fd); 325 } 326 if (s->glfs) { 327 glfs_fini(s->glfs); 328 } 329 return ret; 330 } 331 332 #ifdef CONFIG_GLUSTERFS_ZEROFILL 333 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, 334 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 335 { 336 int ret; 337 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 338 BDRVGlusterState *s = bs->opaque; 339 off_t size = nb_sectors * BDRV_SECTOR_SIZE; 340 off_t offset = sector_num * BDRV_SECTOR_SIZE; 341 342 acb->size = size; 343 acb->ret = 0; 344 acb->coroutine = qemu_coroutine_self(); 345 346 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 347 if (ret < 0) { 348 ret = -errno; 349 goto out; 350 } 351 352 qemu_coroutine_yield(); 353 ret = acb->ret; 354 355 out: 356 g_slice_free(GlusterAIOCB, acb); 357 return ret; 358 } 359 360 static inline bool gluster_supports_zerofill(void) 361 { 362 return 1; 363 } 364 365 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 366 int64_t size) 367 { 368 return glfs_zerofill(fd, offset, size); 369 } 370 371 #else 372 static inline bool gluster_supports_zerofill(void) 373 { 374 return 0; 375 } 376 377 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 378 int64_t size) 379 { 380 return 0; 381 } 382 #endif 383 384 static int qemu_gluster_create(const char *filename, 385 QEMUOptionParameter *options, Error **errp) 386 { 387 struct glfs *glfs; 388 struct glfs_fd *fd; 389 int ret = 0; 390 int prealloc = 0; 391 int64_t total_size = 0; 392 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 393 394 glfs = qemu_gluster_init(gconf, filename, errp); 395 if (!glfs) { 396 ret = -EINVAL; 397 goto out; 398 } 399 400 while (options && options->name) { 401 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 402 total_size = options->value.n / BDRV_SECTOR_SIZE; 403 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 404 if (!options->value.s || !strcmp(options->value.s, "off")) { 405 prealloc = 0; 406 } else if (!strcmp(options->value.s, "full") && 407 gluster_supports_zerofill()) { 408 prealloc = 1; 409 } else { 410 error_setg(errp, "Invalid preallocation mode: '%s'" 411 " or GlusterFS doesn't support zerofill API", 412 options->value.s); 413 ret = -EINVAL; 414 goto out; 415 } 416 } 417 options++; 418 } 419 420 fd = glfs_creat(glfs, gconf->image, 421 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); 422 if (!fd) { 423 ret = -errno; 424 } else { 425 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { 426 if (prealloc && qemu_gluster_zerofill(fd, 0, 427 total_size * BDRV_SECTOR_SIZE)) { 428 ret = -errno; 429 } 430 } else { 431 ret = -errno; 432 } 433 434 if (glfs_close(fd) != 0) { 435 ret = -errno; 436 } 437 } 438 out: 439 qemu_gluster_gconf_free(gconf); 440 if (glfs) { 441 glfs_fini(glfs); 442 } 443 return ret; 444 } 445 446 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, 447 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) 448 { 449 int ret; 450 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 451 BDRVGlusterState *s = bs->opaque; 452 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 453 off_t offset = sector_num * BDRV_SECTOR_SIZE; 454 455 acb->size = size; 456 acb->ret = 0; 457 acb->coroutine = qemu_coroutine_self(); 458 459 if (write) { 460 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, 461 &gluster_finish_aiocb, acb); 462 } else { 463 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, 464 &gluster_finish_aiocb, acb); 465 } 466 467 if (ret < 0) { 468 ret = -errno; 469 goto out; 470 } 471 472 qemu_coroutine_yield(); 473 ret = acb->ret; 474 475 out: 476 g_slice_free(GlusterAIOCB, acb); 477 return ret; 478 } 479 480 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) 481 { 482 int ret; 483 BDRVGlusterState *s = bs->opaque; 484 485 ret = glfs_ftruncate(s->fd, offset); 486 if (ret < 0) { 487 return -errno; 488 } 489 490 return 0; 491 } 492 493 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, 494 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 495 { 496 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); 497 } 498 499 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, 500 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 501 { 502 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); 503 } 504 505 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) 506 { 507 int ret; 508 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 509 BDRVGlusterState *s = bs->opaque; 510 511 acb->size = 0; 512 acb->ret = 0; 513 acb->coroutine = qemu_coroutine_self(); 514 515 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); 516 if (ret < 0) { 517 ret = -errno; 518 goto out; 519 } 520 521 qemu_coroutine_yield(); 522 ret = acb->ret; 523 524 out: 525 g_slice_free(GlusterAIOCB, acb); 526 return ret; 527 } 528 529 #ifdef CONFIG_GLUSTERFS_DISCARD 530 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, 531 int64_t sector_num, int nb_sectors) 532 { 533 int ret; 534 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 535 BDRVGlusterState *s = bs->opaque; 536 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 537 off_t offset = sector_num * BDRV_SECTOR_SIZE; 538 539 acb->size = 0; 540 acb->ret = 0; 541 acb->coroutine = qemu_coroutine_self(); 542 543 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 544 if (ret < 0) { 545 ret = -errno; 546 goto out; 547 } 548 549 qemu_coroutine_yield(); 550 ret = acb->ret; 551 552 out: 553 g_slice_free(GlusterAIOCB, acb); 554 return ret; 555 } 556 #endif 557 558 static int64_t qemu_gluster_getlength(BlockDriverState *bs) 559 { 560 BDRVGlusterState *s = bs->opaque; 561 int64_t ret; 562 563 ret = glfs_lseek(s->fd, 0, SEEK_END); 564 if (ret < 0) { 565 return -errno; 566 } else { 567 return ret; 568 } 569 } 570 571 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) 572 { 573 BDRVGlusterState *s = bs->opaque; 574 struct stat st; 575 int ret; 576 577 ret = glfs_fstat(s->fd, &st); 578 if (ret < 0) { 579 return -errno; 580 } else { 581 return st.st_blocks * 512; 582 } 583 } 584 585 static void qemu_gluster_close(BlockDriverState *bs) 586 { 587 BDRVGlusterState *s = bs->opaque; 588 589 if (s->fd) { 590 glfs_close(s->fd); 591 s->fd = NULL; 592 } 593 glfs_fini(s->glfs); 594 } 595 596 static int qemu_gluster_has_zero_init(BlockDriverState *bs) 597 { 598 /* GlusterFS volume could be backed by a block device */ 599 return 0; 600 } 601 602 static QEMUOptionParameter qemu_gluster_create_options[] = { 603 { 604 .name = BLOCK_OPT_SIZE, 605 .type = OPT_SIZE, 606 .help = "Virtual disk size" 607 }, 608 { 609 .name = BLOCK_OPT_PREALLOC, 610 .type = OPT_STRING, 611 .help = "Preallocation mode (allowed values: off, full)" 612 }, 613 { NULL } 614 }; 615 616 static BlockDriver bdrv_gluster = { 617 .format_name = "gluster", 618 .protocol_name = "gluster", 619 .instance_size = sizeof(BDRVGlusterState), 620 .bdrv_needs_filename = true, 621 .bdrv_file_open = qemu_gluster_open, 622 .bdrv_close = qemu_gluster_close, 623 .bdrv_create = qemu_gluster_create, 624 .bdrv_getlength = qemu_gluster_getlength, 625 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 626 .bdrv_truncate = qemu_gluster_truncate, 627 .bdrv_co_readv = qemu_gluster_co_readv, 628 .bdrv_co_writev = qemu_gluster_co_writev, 629 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 630 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 631 #ifdef CONFIG_GLUSTERFS_DISCARD 632 .bdrv_co_discard = qemu_gluster_co_discard, 633 #endif 634 #ifdef CONFIG_GLUSTERFS_ZEROFILL 635 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 636 #endif 637 .create_options = qemu_gluster_create_options, 638 }; 639 640 static BlockDriver bdrv_gluster_tcp = { 641 .format_name = "gluster", 642 .protocol_name = "gluster+tcp", 643 .instance_size = sizeof(BDRVGlusterState), 644 .bdrv_needs_filename = true, 645 .bdrv_file_open = qemu_gluster_open, 646 .bdrv_close = qemu_gluster_close, 647 .bdrv_create = qemu_gluster_create, 648 .bdrv_getlength = qemu_gluster_getlength, 649 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 650 .bdrv_truncate = qemu_gluster_truncate, 651 .bdrv_co_readv = qemu_gluster_co_readv, 652 .bdrv_co_writev = qemu_gluster_co_writev, 653 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 654 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 655 #ifdef CONFIG_GLUSTERFS_DISCARD 656 .bdrv_co_discard = qemu_gluster_co_discard, 657 #endif 658 #ifdef CONFIG_GLUSTERFS_ZEROFILL 659 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 660 #endif 661 .create_options = qemu_gluster_create_options, 662 }; 663 664 static BlockDriver bdrv_gluster_unix = { 665 .format_name = "gluster", 666 .protocol_name = "gluster+unix", 667 .instance_size = sizeof(BDRVGlusterState), 668 .bdrv_needs_filename = true, 669 .bdrv_file_open = qemu_gluster_open, 670 .bdrv_close = qemu_gluster_close, 671 .bdrv_create = qemu_gluster_create, 672 .bdrv_getlength = qemu_gluster_getlength, 673 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 674 .bdrv_truncate = qemu_gluster_truncate, 675 .bdrv_co_readv = qemu_gluster_co_readv, 676 .bdrv_co_writev = qemu_gluster_co_writev, 677 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 678 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 679 #ifdef CONFIG_GLUSTERFS_DISCARD 680 .bdrv_co_discard = qemu_gluster_co_discard, 681 #endif 682 #ifdef CONFIG_GLUSTERFS_ZEROFILL 683 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 684 #endif 685 .create_options = qemu_gluster_create_options, 686 }; 687 688 static BlockDriver bdrv_gluster_rdma = { 689 .format_name = "gluster", 690 .protocol_name = "gluster+rdma", 691 .instance_size = sizeof(BDRVGlusterState), 692 .bdrv_needs_filename = true, 693 .bdrv_file_open = qemu_gluster_open, 694 .bdrv_close = qemu_gluster_close, 695 .bdrv_create = qemu_gluster_create, 696 .bdrv_getlength = qemu_gluster_getlength, 697 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 698 .bdrv_truncate = qemu_gluster_truncate, 699 .bdrv_co_readv = qemu_gluster_co_readv, 700 .bdrv_co_writev = qemu_gluster_co_writev, 701 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 702 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 703 #ifdef CONFIG_GLUSTERFS_DISCARD 704 .bdrv_co_discard = qemu_gluster_co_discard, 705 #endif 706 #ifdef CONFIG_GLUSTERFS_ZEROFILL 707 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 708 #endif 709 .create_options = qemu_gluster_create_options, 710 }; 711 712 static void bdrv_gluster_init(void) 713 { 714 bdrv_register(&bdrv_gluster_rdma); 715 bdrv_register(&bdrv_gluster_unix); 716 bdrv_register(&bdrv_gluster_tcp); 717 bdrv_register(&bdrv_gluster); 718 } 719 720 block_init(bdrv_gluster_init); 721