1 /* 2 * GlusterFS backend for QEMU 3 * 4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 #include <glusterfs/api/glfs.h> 11 #include "block/block_int.h" 12 #include "qemu/uri.h" 13 14 typedef struct GlusterAIOCB { 15 int64_t size; 16 int ret; 17 QEMUBH *bh; 18 Coroutine *coroutine; 19 AioContext *aio_context; 20 } GlusterAIOCB; 21 22 typedef struct BDRVGlusterState { 23 struct glfs *glfs; 24 struct glfs_fd *fd; 25 } BDRVGlusterState; 26 27 typedef struct GlusterConf { 28 char *server; 29 int port; 30 char *volname; 31 char *image; 32 char *transport; 33 } GlusterConf; 34 35 static void qemu_gluster_gconf_free(GlusterConf *gconf) 36 { 37 if (gconf) { 38 g_free(gconf->server); 39 g_free(gconf->volname); 40 g_free(gconf->image); 41 g_free(gconf->transport); 42 g_free(gconf); 43 } 44 } 45 46 static int parse_volume_options(GlusterConf *gconf, char *path) 47 { 48 char *p, *q; 49 50 if (!path) { 51 return -EINVAL; 52 } 53 54 /* volume */ 55 p = q = path + strspn(path, "/"); 56 p += strcspn(p, "/"); 57 if (*p == '\0') { 58 return -EINVAL; 59 } 60 gconf->volname = g_strndup(q, p - q); 61 62 /* image */ 63 p += strspn(p, "/"); 64 if (*p == '\0') { 65 return -EINVAL; 66 } 67 gconf->image = g_strdup(p); 68 return 0; 69 } 70 71 /* 72 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] 73 * 74 * 'gluster' is the protocol. 75 * 76 * 'transport' specifies the transport type used to connect to gluster 77 * management daemon (glusterd). Valid transport types are 78 * tcp, unix and rdma. If a transport type isn't specified, then tcp 79 * type is assumed. 80 * 81 * 'server' specifies the server where the volume file specification for 82 * the given volume resides. This can be either hostname, ipv4 address 83 * or ipv6 address. ipv6 address needs to be within square brackets [ ]. 84 * If transport type is 'unix', then 'server' field should not be specified. 85 * The 'socket' field needs to be populated with the path to unix domain 86 * socket. 87 * 88 * 'port' is the port number on which glusterd is listening. This is optional 89 * and if not specified, QEMU will send 0 which will make gluster to use the 90 * default port. If the transport type is unix, then 'port' should not be 91 * specified. 92 * 93 * 'volname' is the name of the gluster volume which contains the VM image. 94 * 95 * 'image' is the path to the actual VM image that resides on gluster volume. 96 * 97 * Examples: 98 * 99 * file=gluster://1.2.3.4/testvol/a.img 100 * file=gluster+tcp://1.2.3.4/testvol/a.img 101 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img 102 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img 103 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img 104 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img 105 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket 106 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img 107 */ 108 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) 109 { 110 URI *uri; 111 QueryParams *qp = NULL; 112 bool is_unix = false; 113 int ret = 0; 114 115 uri = uri_parse(filename); 116 if (!uri) { 117 return -EINVAL; 118 } 119 120 /* transport */ 121 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { 122 gconf->transport = g_strdup("tcp"); 123 } else if (!strcmp(uri->scheme, "gluster+tcp")) { 124 gconf->transport = g_strdup("tcp"); 125 } else if (!strcmp(uri->scheme, "gluster+unix")) { 126 gconf->transport = g_strdup("unix"); 127 is_unix = true; 128 } else if (!strcmp(uri->scheme, "gluster+rdma")) { 129 gconf->transport = g_strdup("rdma"); 130 } else { 131 ret = -EINVAL; 132 goto out; 133 } 134 135 ret = parse_volume_options(gconf, uri->path); 136 if (ret < 0) { 137 goto out; 138 } 139 140 qp = query_params_parse(uri->query); 141 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { 142 ret = -EINVAL; 143 goto out; 144 } 145 146 if (is_unix) { 147 if (uri->server || uri->port) { 148 ret = -EINVAL; 149 goto out; 150 } 151 if (strcmp(qp->p[0].name, "socket")) { 152 ret = -EINVAL; 153 goto out; 154 } 155 gconf->server = g_strdup(qp->p[0].value); 156 } else { 157 gconf->server = g_strdup(uri->server ? uri->server : "localhost"); 158 gconf->port = uri->port; 159 } 160 161 out: 162 if (qp) { 163 query_params_free(qp); 164 } 165 uri_free(uri); 166 return ret; 167 } 168 169 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, 170 Error **errp) 171 { 172 struct glfs *glfs = NULL; 173 int ret; 174 int old_errno; 175 176 ret = qemu_gluster_parseuri(gconf, filename); 177 if (ret < 0) { 178 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" 179 "volname/image[?socket=...]"); 180 errno = -ret; 181 goto out; 182 } 183 184 glfs = glfs_new(gconf->volname); 185 if (!glfs) { 186 goto out; 187 } 188 189 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, 190 gconf->port); 191 if (ret < 0) { 192 goto out; 193 } 194 195 /* 196 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when 197 * GlusterFS makes GF_LOG_* macros available to libgfapi users. 198 */ 199 ret = glfs_set_logging(glfs, "-", 4); 200 if (ret < 0) { 201 goto out; 202 } 203 204 ret = glfs_init(glfs); 205 if (ret) { 206 error_setg_errno(errp, errno, 207 "Gluster connection failed for server=%s port=%d " 208 "volume=%s image=%s transport=%s", gconf->server, 209 gconf->port, gconf->volname, gconf->image, 210 gconf->transport); 211 212 /* glfs_init sometimes doesn't set errno although docs suggest that */ 213 if (errno == 0) 214 errno = EINVAL; 215 216 goto out; 217 } 218 return glfs; 219 220 out: 221 if (glfs) { 222 old_errno = errno; 223 glfs_fini(glfs); 224 errno = old_errno; 225 } 226 return NULL; 227 } 228 229 static void qemu_gluster_complete_aio(void *opaque) 230 { 231 GlusterAIOCB *acb = (GlusterAIOCB *)opaque; 232 233 qemu_bh_delete(acb->bh); 234 acb->bh = NULL; 235 qemu_coroutine_enter(acb->coroutine, NULL); 236 } 237 238 /* 239 * AIO callback routine called from GlusterFS thread. 240 */ 241 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) 242 { 243 GlusterAIOCB *acb = (GlusterAIOCB *)arg; 244 245 if (!ret || ret == acb->size) { 246 acb->ret = 0; /* Success */ 247 } else if (ret < 0) { 248 acb->ret = ret; /* Read/Write failed */ 249 } else { 250 acb->ret = -EIO; /* Partial read/write - fail it */ 251 } 252 253 acb->bh = aio_bh_new(acb->aio_context, qemu_gluster_complete_aio, acb); 254 qemu_bh_schedule(acb->bh); 255 } 256 257 /* TODO Convert to fine grained options */ 258 static QemuOptsList runtime_opts = { 259 .name = "gluster", 260 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 261 .desc = { 262 { 263 .name = "filename", 264 .type = QEMU_OPT_STRING, 265 .help = "URL to the gluster image", 266 }, 267 { /* end of list */ } 268 }, 269 }; 270 271 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) 272 { 273 assert(open_flags != NULL); 274 275 *open_flags |= O_BINARY; 276 277 if (bdrv_flags & BDRV_O_RDWR) { 278 *open_flags |= O_RDWR; 279 } else { 280 *open_flags |= O_RDONLY; 281 } 282 283 if ((bdrv_flags & BDRV_O_NOCACHE)) { 284 *open_flags |= O_DIRECT; 285 } 286 } 287 288 static int qemu_gluster_open(BlockDriverState *bs, QDict *options, 289 int bdrv_flags, Error **errp) 290 { 291 BDRVGlusterState *s = bs->opaque; 292 int open_flags = 0; 293 int ret = 0; 294 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 295 QemuOpts *opts; 296 Error *local_err = NULL; 297 const char *filename; 298 299 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 300 qemu_opts_absorb_qdict(opts, options, &local_err); 301 if (local_err) { 302 error_propagate(errp, local_err); 303 ret = -EINVAL; 304 goto out; 305 } 306 307 filename = qemu_opt_get(opts, "filename"); 308 309 s->glfs = qemu_gluster_init(gconf, filename, errp); 310 if (!s->glfs) { 311 ret = -errno; 312 goto out; 313 } 314 315 qemu_gluster_parse_flags(bdrv_flags, &open_flags); 316 317 s->fd = glfs_open(s->glfs, gconf->image, open_flags); 318 if (!s->fd) { 319 ret = -errno; 320 } 321 322 out: 323 qemu_opts_del(opts); 324 qemu_gluster_gconf_free(gconf); 325 if (!ret) { 326 return ret; 327 } 328 if (s->fd) { 329 glfs_close(s->fd); 330 } 331 if (s->glfs) { 332 glfs_fini(s->glfs); 333 } 334 return ret; 335 } 336 337 typedef struct BDRVGlusterReopenState { 338 struct glfs *glfs; 339 struct glfs_fd *fd; 340 } BDRVGlusterReopenState; 341 342 343 static int qemu_gluster_reopen_prepare(BDRVReopenState *state, 344 BlockReopenQueue *queue, Error **errp) 345 { 346 int ret = 0; 347 BDRVGlusterReopenState *reop_s; 348 GlusterConf *gconf = NULL; 349 int open_flags = 0; 350 351 assert(state != NULL); 352 assert(state->bs != NULL); 353 354 state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState)); 355 reop_s = state->opaque; 356 357 qemu_gluster_parse_flags(state->flags, &open_flags); 358 359 gconf = g_malloc0(sizeof(GlusterConf)); 360 361 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); 362 if (reop_s->glfs == NULL) { 363 ret = -errno; 364 goto exit; 365 } 366 367 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags); 368 if (reop_s->fd == NULL) { 369 /* reops->glfs will be cleaned up in _abort */ 370 ret = -errno; 371 goto exit; 372 } 373 374 exit: 375 /* state->opaque will be freed in either the _abort or _commit */ 376 qemu_gluster_gconf_free(gconf); 377 return ret; 378 } 379 380 static void qemu_gluster_reopen_commit(BDRVReopenState *state) 381 { 382 BDRVGlusterReopenState *reop_s = state->opaque; 383 BDRVGlusterState *s = state->bs->opaque; 384 385 386 /* close the old */ 387 if (s->fd) { 388 glfs_close(s->fd); 389 } 390 if (s->glfs) { 391 glfs_fini(s->glfs); 392 } 393 394 /* use the newly opened image / connection */ 395 s->fd = reop_s->fd; 396 s->glfs = reop_s->glfs; 397 398 g_free(state->opaque); 399 state->opaque = NULL; 400 401 return; 402 } 403 404 405 static void qemu_gluster_reopen_abort(BDRVReopenState *state) 406 { 407 BDRVGlusterReopenState *reop_s = state->opaque; 408 409 if (reop_s == NULL) { 410 return; 411 } 412 413 if (reop_s->fd) { 414 glfs_close(reop_s->fd); 415 } 416 417 if (reop_s->glfs) { 418 glfs_fini(reop_s->glfs); 419 } 420 421 g_free(state->opaque); 422 state->opaque = NULL; 423 424 return; 425 } 426 427 #ifdef CONFIG_GLUSTERFS_ZEROFILL 428 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, 429 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 430 { 431 int ret; 432 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 433 BDRVGlusterState *s = bs->opaque; 434 off_t size = nb_sectors * BDRV_SECTOR_SIZE; 435 off_t offset = sector_num * BDRV_SECTOR_SIZE; 436 437 acb->size = size; 438 acb->ret = 0; 439 acb->coroutine = qemu_coroutine_self(); 440 acb->aio_context = bdrv_get_aio_context(bs); 441 442 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 443 if (ret < 0) { 444 ret = -errno; 445 goto out; 446 } 447 448 qemu_coroutine_yield(); 449 ret = acb->ret; 450 451 out: 452 g_slice_free(GlusterAIOCB, acb); 453 return ret; 454 } 455 456 static inline bool gluster_supports_zerofill(void) 457 { 458 return 1; 459 } 460 461 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 462 int64_t size) 463 { 464 return glfs_zerofill(fd, offset, size); 465 } 466 467 #else 468 static inline bool gluster_supports_zerofill(void) 469 { 470 return 0; 471 } 472 473 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 474 int64_t size) 475 { 476 return 0; 477 } 478 #endif 479 480 static int qemu_gluster_create(const char *filename, 481 QEMUOptionParameter *options, Error **errp) 482 { 483 struct glfs *glfs; 484 struct glfs_fd *fd; 485 int ret = 0; 486 int prealloc = 0; 487 int64_t total_size = 0; 488 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 489 490 glfs = qemu_gluster_init(gconf, filename, errp); 491 if (!glfs) { 492 ret = -errno; 493 goto out; 494 } 495 496 while (options && options->name) { 497 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 498 total_size = options->value.n / BDRV_SECTOR_SIZE; 499 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 500 if (!options->value.s || !strcmp(options->value.s, "off")) { 501 prealloc = 0; 502 } else if (!strcmp(options->value.s, "full") && 503 gluster_supports_zerofill()) { 504 prealloc = 1; 505 } else { 506 error_setg(errp, "Invalid preallocation mode: '%s'" 507 " or GlusterFS doesn't support zerofill API", 508 options->value.s); 509 ret = -EINVAL; 510 goto out; 511 } 512 } 513 options++; 514 } 515 516 fd = glfs_creat(glfs, gconf->image, 517 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); 518 if (!fd) { 519 ret = -errno; 520 } else { 521 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { 522 if (prealloc && qemu_gluster_zerofill(fd, 0, 523 total_size * BDRV_SECTOR_SIZE)) { 524 ret = -errno; 525 } 526 } else { 527 ret = -errno; 528 } 529 530 if (glfs_close(fd) != 0) { 531 ret = -errno; 532 } 533 } 534 out: 535 qemu_gluster_gconf_free(gconf); 536 if (glfs) { 537 glfs_fini(glfs); 538 } 539 return ret; 540 } 541 542 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, 543 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) 544 { 545 int ret; 546 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 547 BDRVGlusterState *s = bs->opaque; 548 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 549 off_t offset = sector_num * BDRV_SECTOR_SIZE; 550 551 acb->size = size; 552 acb->ret = 0; 553 acb->coroutine = qemu_coroutine_self(); 554 acb->aio_context = bdrv_get_aio_context(bs); 555 556 if (write) { 557 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, 558 &gluster_finish_aiocb, acb); 559 } else { 560 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, 561 &gluster_finish_aiocb, acb); 562 } 563 564 if (ret < 0) { 565 ret = -errno; 566 goto out; 567 } 568 569 qemu_coroutine_yield(); 570 ret = acb->ret; 571 572 out: 573 g_slice_free(GlusterAIOCB, acb); 574 return ret; 575 } 576 577 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) 578 { 579 int ret; 580 BDRVGlusterState *s = bs->opaque; 581 582 ret = glfs_ftruncate(s->fd, offset); 583 if (ret < 0) { 584 return -errno; 585 } 586 587 return 0; 588 } 589 590 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, 591 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 592 { 593 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); 594 } 595 596 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, 597 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 598 { 599 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); 600 } 601 602 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) 603 { 604 int ret; 605 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 606 BDRVGlusterState *s = bs->opaque; 607 608 acb->size = 0; 609 acb->ret = 0; 610 acb->coroutine = qemu_coroutine_self(); 611 acb->aio_context = bdrv_get_aio_context(bs); 612 613 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); 614 if (ret < 0) { 615 ret = -errno; 616 goto out; 617 } 618 619 qemu_coroutine_yield(); 620 ret = acb->ret; 621 622 out: 623 g_slice_free(GlusterAIOCB, acb); 624 return ret; 625 } 626 627 #ifdef CONFIG_GLUSTERFS_DISCARD 628 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, 629 int64_t sector_num, int nb_sectors) 630 { 631 int ret; 632 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 633 BDRVGlusterState *s = bs->opaque; 634 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 635 off_t offset = sector_num * BDRV_SECTOR_SIZE; 636 637 acb->size = 0; 638 acb->ret = 0; 639 acb->coroutine = qemu_coroutine_self(); 640 acb->aio_context = bdrv_get_aio_context(bs); 641 642 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 643 if (ret < 0) { 644 ret = -errno; 645 goto out; 646 } 647 648 qemu_coroutine_yield(); 649 ret = acb->ret; 650 651 out: 652 g_slice_free(GlusterAIOCB, acb); 653 return ret; 654 } 655 #endif 656 657 static int64_t qemu_gluster_getlength(BlockDriverState *bs) 658 { 659 BDRVGlusterState *s = bs->opaque; 660 int64_t ret; 661 662 ret = glfs_lseek(s->fd, 0, SEEK_END); 663 if (ret < 0) { 664 return -errno; 665 } else { 666 return ret; 667 } 668 } 669 670 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) 671 { 672 BDRVGlusterState *s = bs->opaque; 673 struct stat st; 674 int ret; 675 676 ret = glfs_fstat(s->fd, &st); 677 if (ret < 0) { 678 return -errno; 679 } else { 680 return st.st_blocks * 512; 681 } 682 } 683 684 static void qemu_gluster_close(BlockDriverState *bs) 685 { 686 BDRVGlusterState *s = bs->opaque; 687 688 if (s->fd) { 689 glfs_close(s->fd); 690 s->fd = NULL; 691 } 692 glfs_fini(s->glfs); 693 } 694 695 static int qemu_gluster_has_zero_init(BlockDriverState *bs) 696 { 697 /* GlusterFS volume could be backed by a block device */ 698 return 0; 699 } 700 701 static QEMUOptionParameter qemu_gluster_create_options[] = { 702 { 703 .name = BLOCK_OPT_SIZE, 704 .type = OPT_SIZE, 705 .help = "Virtual disk size" 706 }, 707 { 708 .name = BLOCK_OPT_PREALLOC, 709 .type = OPT_STRING, 710 .help = "Preallocation mode (allowed values: off, full)" 711 }, 712 { NULL } 713 }; 714 715 static BlockDriver bdrv_gluster = { 716 .format_name = "gluster", 717 .protocol_name = "gluster", 718 .instance_size = sizeof(BDRVGlusterState), 719 .bdrv_needs_filename = true, 720 .bdrv_file_open = qemu_gluster_open, 721 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 722 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 723 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 724 .bdrv_close = qemu_gluster_close, 725 .bdrv_create = qemu_gluster_create, 726 .bdrv_getlength = qemu_gluster_getlength, 727 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 728 .bdrv_truncate = qemu_gluster_truncate, 729 .bdrv_co_readv = qemu_gluster_co_readv, 730 .bdrv_co_writev = qemu_gluster_co_writev, 731 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 732 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 733 #ifdef CONFIG_GLUSTERFS_DISCARD 734 .bdrv_co_discard = qemu_gluster_co_discard, 735 #endif 736 #ifdef CONFIG_GLUSTERFS_ZEROFILL 737 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 738 #endif 739 .create_options = qemu_gluster_create_options, 740 }; 741 742 static BlockDriver bdrv_gluster_tcp = { 743 .format_name = "gluster", 744 .protocol_name = "gluster+tcp", 745 .instance_size = sizeof(BDRVGlusterState), 746 .bdrv_needs_filename = true, 747 .bdrv_file_open = qemu_gluster_open, 748 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 749 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 750 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 751 .bdrv_close = qemu_gluster_close, 752 .bdrv_create = qemu_gluster_create, 753 .bdrv_getlength = qemu_gluster_getlength, 754 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 755 .bdrv_truncate = qemu_gluster_truncate, 756 .bdrv_co_readv = qemu_gluster_co_readv, 757 .bdrv_co_writev = qemu_gluster_co_writev, 758 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 759 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 760 #ifdef CONFIG_GLUSTERFS_DISCARD 761 .bdrv_co_discard = qemu_gluster_co_discard, 762 #endif 763 #ifdef CONFIG_GLUSTERFS_ZEROFILL 764 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 765 #endif 766 .create_options = qemu_gluster_create_options, 767 }; 768 769 static BlockDriver bdrv_gluster_unix = { 770 .format_name = "gluster", 771 .protocol_name = "gluster+unix", 772 .instance_size = sizeof(BDRVGlusterState), 773 .bdrv_needs_filename = true, 774 .bdrv_file_open = qemu_gluster_open, 775 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 776 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 777 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 778 .bdrv_close = qemu_gluster_close, 779 .bdrv_create = qemu_gluster_create, 780 .bdrv_getlength = qemu_gluster_getlength, 781 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 782 .bdrv_truncate = qemu_gluster_truncate, 783 .bdrv_co_readv = qemu_gluster_co_readv, 784 .bdrv_co_writev = qemu_gluster_co_writev, 785 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 786 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 787 #ifdef CONFIG_GLUSTERFS_DISCARD 788 .bdrv_co_discard = qemu_gluster_co_discard, 789 #endif 790 #ifdef CONFIG_GLUSTERFS_ZEROFILL 791 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 792 #endif 793 .create_options = qemu_gluster_create_options, 794 }; 795 796 static BlockDriver bdrv_gluster_rdma = { 797 .format_name = "gluster", 798 .protocol_name = "gluster+rdma", 799 .instance_size = sizeof(BDRVGlusterState), 800 .bdrv_needs_filename = true, 801 .bdrv_file_open = qemu_gluster_open, 802 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 803 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 804 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 805 .bdrv_close = qemu_gluster_close, 806 .bdrv_create = qemu_gluster_create, 807 .bdrv_getlength = qemu_gluster_getlength, 808 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 809 .bdrv_truncate = qemu_gluster_truncate, 810 .bdrv_co_readv = qemu_gluster_co_readv, 811 .bdrv_co_writev = qemu_gluster_co_writev, 812 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 813 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 814 #ifdef CONFIG_GLUSTERFS_DISCARD 815 .bdrv_co_discard = qemu_gluster_co_discard, 816 #endif 817 #ifdef CONFIG_GLUSTERFS_ZEROFILL 818 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 819 #endif 820 .create_options = qemu_gluster_create_options, 821 }; 822 823 static void bdrv_gluster_init(void) 824 { 825 bdrv_register(&bdrv_gluster_rdma); 826 bdrv_register(&bdrv_gluster_unix); 827 bdrv_register(&bdrv_gluster_tcp); 828 bdrv_register(&bdrv_gluster); 829 } 830 831 block_init(bdrv_gluster_init); 832