1 /* 2 * GlusterFS backend for QEMU 3 * 4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 #include <glusterfs/api/glfs.h> 11 #include "block/block_int.h" 12 #include "qemu/uri.h" 13 14 typedef struct GlusterAIOCB { 15 int64_t size; 16 int ret; 17 QEMUBH *bh; 18 Coroutine *coroutine; 19 } GlusterAIOCB; 20 21 typedef struct BDRVGlusterState { 22 struct glfs *glfs; 23 struct glfs_fd *fd; 24 } BDRVGlusterState; 25 26 typedef struct GlusterConf { 27 char *server; 28 int port; 29 char *volname; 30 char *image; 31 char *transport; 32 } GlusterConf; 33 34 static void qemu_gluster_gconf_free(GlusterConf *gconf) 35 { 36 if (gconf) { 37 g_free(gconf->server); 38 g_free(gconf->volname); 39 g_free(gconf->image); 40 g_free(gconf->transport); 41 g_free(gconf); 42 } 43 } 44 45 static int parse_volume_options(GlusterConf *gconf, char *path) 46 { 47 char *p, *q; 48 49 if (!path) { 50 return -EINVAL; 51 } 52 53 /* volume */ 54 p = q = path + strspn(path, "/"); 55 p += strcspn(p, "/"); 56 if (*p == '\0') { 57 return -EINVAL; 58 } 59 gconf->volname = g_strndup(q, p - q); 60 61 /* image */ 62 p += strspn(p, "/"); 63 if (*p == '\0') { 64 return -EINVAL; 65 } 66 gconf->image = g_strdup(p); 67 return 0; 68 } 69 70 /* 71 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] 72 * 73 * 'gluster' is the protocol. 74 * 75 * 'transport' specifies the transport type used to connect to gluster 76 * management daemon (glusterd). Valid transport types are 77 * tcp, unix and rdma. If a transport type isn't specified, then tcp 78 * type is assumed. 79 * 80 * 'server' specifies the server where the volume file specification for 81 * the given volume resides. This can be either hostname, ipv4 address 82 * or ipv6 address. ipv6 address needs to be within square brackets [ ]. 83 * If transport type is 'unix', then 'server' field should not be specified. 84 * The 'socket' field needs to be populated with the path to unix domain 85 * socket. 86 * 87 * 'port' is the port number on which glusterd is listening. This is optional 88 * and if not specified, QEMU will send 0 which will make gluster to use the 89 * default port. If the transport type is unix, then 'port' should not be 90 * specified. 91 * 92 * 'volname' is the name of the gluster volume which contains the VM image. 93 * 94 * 'image' is the path to the actual VM image that resides on gluster volume. 95 * 96 * Examples: 97 * 98 * file=gluster://1.2.3.4/testvol/a.img 99 * file=gluster+tcp://1.2.3.4/testvol/a.img 100 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img 101 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img 102 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img 103 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img 104 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket 105 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img 106 */ 107 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) 108 { 109 URI *uri; 110 QueryParams *qp = NULL; 111 bool is_unix = false; 112 int ret = 0; 113 114 uri = uri_parse(filename); 115 if (!uri) { 116 return -EINVAL; 117 } 118 119 /* transport */ 120 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { 121 gconf->transport = g_strdup("tcp"); 122 } else if (!strcmp(uri->scheme, "gluster+tcp")) { 123 gconf->transport = g_strdup("tcp"); 124 } else if (!strcmp(uri->scheme, "gluster+unix")) { 125 gconf->transport = g_strdup("unix"); 126 is_unix = true; 127 } else if (!strcmp(uri->scheme, "gluster+rdma")) { 128 gconf->transport = g_strdup("rdma"); 129 } else { 130 ret = -EINVAL; 131 goto out; 132 } 133 134 ret = parse_volume_options(gconf, uri->path); 135 if (ret < 0) { 136 goto out; 137 } 138 139 qp = query_params_parse(uri->query); 140 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { 141 ret = -EINVAL; 142 goto out; 143 } 144 145 if (is_unix) { 146 if (uri->server || uri->port) { 147 ret = -EINVAL; 148 goto out; 149 } 150 if (strcmp(qp->p[0].name, "socket")) { 151 ret = -EINVAL; 152 goto out; 153 } 154 gconf->server = g_strdup(qp->p[0].value); 155 } else { 156 gconf->server = g_strdup(uri->server ? uri->server : "localhost"); 157 gconf->port = uri->port; 158 } 159 160 out: 161 if (qp) { 162 query_params_free(qp); 163 } 164 uri_free(uri); 165 return ret; 166 } 167 168 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, 169 Error **errp) 170 { 171 struct glfs *glfs = NULL; 172 int ret; 173 int old_errno; 174 175 ret = qemu_gluster_parseuri(gconf, filename); 176 if (ret < 0) { 177 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" 178 "volname/image[?socket=...]"); 179 errno = -ret; 180 goto out; 181 } 182 183 glfs = glfs_new(gconf->volname); 184 if (!glfs) { 185 goto out; 186 } 187 188 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, 189 gconf->port); 190 if (ret < 0) { 191 goto out; 192 } 193 194 /* 195 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when 196 * GlusterFS makes GF_LOG_* macros available to libgfapi users. 197 */ 198 ret = glfs_set_logging(glfs, "-", 4); 199 if (ret < 0) { 200 goto out; 201 } 202 203 ret = glfs_init(glfs); 204 if (ret) { 205 error_setg_errno(errp, errno, 206 "Gluster connection failed for server=%s port=%d " 207 "volume=%s image=%s transport=%s", gconf->server, 208 gconf->port, gconf->volname, gconf->image, 209 gconf->transport); 210 goto out; 211 } 212 return glfs; 213 214 out: 215 if (glfs) { 216 old_errno = errno; 217 glfs_fini(glfs); 218 errno = old_errno; 219 } 220 return NULL; 221 } 222 223 static void qemu_gluster_complete_aio(void *opaque) 224 { 225 GlusterAIOCB *acb = (GlusterAIOCB *)opaque; 226 227 qemu_bh_delete(acb->bh); 228 acb->bh = NULL; 229 qemu_coroutine_enter(acb->coroutine, NULL); 230 } 231 232 /* 233 * AIO callback routine called from GlusterFS thread. 234 */ 235 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) 236 { 237 GlusterAIOCB *acb = (GlusterAIOCB *)arg; 238 239 if (!ret || ret == acb->size) { 240 acb->ret = 0; /* Success */ 241 } else if (ret < 0) { 242 acb->ret = ret; /* Read/Write failed */ 243 } else { 244 acb->ret = -EIO; /* Partial read/write - fail it */ 245 } 246 247 acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb); 248 qemu_bh_schedule(acb->bh); 249 } 250 251 /* TODO Convert to fine grained options */ 252 static QemuOptsList runtime_opts = { 253 .name = "gluster", 254 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 255 .desc = { 256 { 257 .name = "filename", 258 .type = QEMU_OPT_STRING, 259 .help = "URL to the gluster image", 260 }, 261 { /* end of list */ } 262 }, 263 }; 264 265 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) 266 { 267 assert(open_flags != NULL); 268 269 *open_flags |= O_BINARY; 270 271 if (bdrv_flags & BDRV_O_RDWR) { 272 *open_flags |= O_RDWR; 273 } else { 274 *open_flags |= O_RDONLY; 275 } 276 277 if ((bdrv_flags & BDRV_O_NOCACHE)) { 278 *open_flags |= O_DIRECT; 279 } 280 } 281 282 static int qemu_gluster_open(BlockDriverState *bs, QDict *options, 283 int bdrv_flags, Error **errp) 284 { 285 BDRVGlusterState *s = bs->opaque; 286 int open_flags = 0; 287 int ret = 0; 288 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 289 QemuOpts *opts; 290 Error *local_err = NULL; 291 const char *filename; 292 293 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 294 qemu_opts_absorb_qdict(opts, options, &local_err); 295 if (local_err) { 296 error_propagate(errp, local_err); 297 ret = -EINVAL; 298 goto out; 299 } 300 301 filename = qemu_opt_get(opts, "filename"); 302 303 s->glfs = qemu_gluster_init(gconf, filename, errp); 304 if (!s->glfs) { 305 ret = -errno; 306 goto out; 307 } 308 309 qemu_gluster_parse_flags(bdrv_flags, &open_flags); 310 311 s->fd = glfs_open(s->glfs, gconf->image, open_flags); 312 if (!s->fd) { 313 ret = -errno; 314 } 315 316 out: 317 qemu_opts_del(opts); 318 qemu_gluster_gconf_free(gconf); 319 if (!ret) { 320 return ret; 321 } 322 if (s->fd) { 323 glfs_close(s->fd); 324 } 325 if (s->glfs) { 326 glfs_fini(s->glfs); 327 } 328 return ret; 329 } 330 331 typedef struct BDRVGlusterReopenState { 332 struct glfs *glfs; 333 struct glfs_fd *fd; 334 } BDRVGlusterReopenState; 335 336 337 static int qemu_gluster_reopen_prepare(BDRVReopenState *state, 338 BlockReopenQueue *queue, Error **errp) 339 { 340 int ret = 0; 341 BDRVGlusterReopenState *reop_s; 342 GlusterConf *gconf = NULL; 343 int open_flags = 0; 344 345 assert(state != NULL); 346 assert(state->bs != NULL); 347 348 state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState)); 349 reop_s = state->opaque; 350 351 qemu_gluster_parse_flags(state->flags, &open_flags); 352 353 gconf = g_malloc0(sizeof(GlusterConf)); 354 355 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); 356 if (reop_s->glfs == NULL) { 357 ret = -errno; 358 goto exit; 359 } 360 361 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags); 362 if (reop_s->fd == NULL) { 363 /* reops->glfs will be cleaned up in _abort */ 364 ret = -errno; 365 goto exit; 366 } 367 368 exit: 369 /* state->opaque will be freed in either the _abort or _commit */ 370 qemu_gluster_gconf_free(gconf); 371 return ret; 372 } 373 374 static void qemu_gluster_reopen_commit(BDRVReopenState *state) 375 { 376 BDRVGlusterReopenState *reop_s = state->opaque; 377 BDRVGlusterState *s = state->bs->opaque; 378 379 380 /* close the old */ 381 if (s->fd) { 382 glfs_close(s->fd); 383 } 384 if (s->glfs) { 385 glfs_fini(s->glfs); 386 } 387 388 /* use the newly opened image / connection */ 389 s->fd = reop_s->fd; 390 s->glfs = reop_s->glfs; 391 392 g_free(state->opaque); 393 state->opaque = NULL; 394 395 return; 396 } 397 398 399 static void qemu_gluster_reopen_abort(BDRVReopenState *state) 400 { 401 BDRVGlusterReopenState *reop_s = state->opaque; 402 403 if (reop_s == NULL) { 404 return; 405 } 406 407 if (reop_s->fd) { 408 glfs_close(reop_s->fd); 409 } 410 411 if (reop_s->glfs) { 412 glfs_fini(reop_s->glfs); 413 } 414 415 g_free(state->opaque); 416 state->opaque = NULL; 417 418 return; 419 } 420 421 #ifdef CONFIG_GLUSTERFS_ZEROFILL 422 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs, 423 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) 424 { 425 int ret; 426 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 427 BDRVGlusterState *s = bs->opaque; 428 off_t size = nb_sectors * BDRV_SECTOR_SIZE; 429 off_t offset = sector_num * BDRV_SECTOR_SIZE; 430 431 acb->size = size; 432 acb->ret = 0; 433 acb->coroutine = qemu_coroutine_self(); 434 435 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 436 if (ret < 0) { 437 ret = -errno; 438 goto out; 439 } 440 441 qemu_coroutine_yield(); 442 ret = acb->ret; 443 444 out: 445 g_slice_free(GlusterAIOCB, acb); 446 return ret; 447 } 448 449 static inline bool gluster_supports_zerofill(void) 450 { 451 return 1; 452 } 453 454 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 455 int64_t size) 456 { 457 return glfs_zerofill(fd, offset, size); 458 } 459 460 #else 461 static inline bool gluster_supports_zerofill(void) 462 { 463 return 0; 464 } 465 466 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, 467 int64_t size) 468 { 469 return 0; 470 } 471 #endif 472 473 static int qemu_gluster_create(const char *filename, 474 QEMUOptionParameter *options, Error **errp) 475 { 476 struct glfs *glfs; 477 struct glfs_fd *fd; 478 int ret = 0; 479 int prealloc = 0; 480 int64_t total_size = 0; 481 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); 482 483 glfs = qemu_gluster_init(gconf, filename, errp); 484 if (!glfs) { 485 ret = -EINVAL; 486 goto out; 487 } 488 489 while (options && options->name) { 490 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 491 total_size = options->value.n / BDRV_SECTOR_SIZE; 492 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 493 if (!options->value.s || !strcmp(options->value.s, "off")) { 494 prealloc = 0; 495 } else if (!strcmp(options->value.s, "full") && 496 gluster_supports_zerofill()) { 497 prealloc = 1; 498 } else { 499 error_setg(errp, "Invalid preallocation mode: '%s'" 500 " or GlusterFS doesn't support zerofill API", 501 options->value.s); 502 ret = -EINVAL; 503 goto out; 504 } 505 } 506 options++; 507 } 508 509 fd = glfs_creat(glfs, gconf->image, 510 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); 511 if (!fd) { 512 ret = -errno; 513 } else { 514 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { 515 if (prealloc && qemu_gluster_zerofill(fd, 0, 516 total_size * BDRV_SECTOR_SIZE)) { 517 ret = -errno; 518 } 519 } else { 520 ret = -errno; 521 } 522 523 if (glfs_close(fd) != 0) { 524 ret = -errno; 525 } 526 } 527 out: 528 qemu_gluster_gconf_free(gconf); 529 if (glfs) { 530 glfs_fini(glfs); 531 } 532 return ret; 533 } 534 535 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, 536 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) 537 { 538 int ret; 539 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 540 BDRVGlusterState *s = bs->opaque; 541 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 542 off_t offset = sector_num * BDRV_SECTOR_SIZE; 543 544 acb->size = size; 545 acb->ret = 0; 546 acb->coroutine = qemu_coroutine_self(); 547 548 if (write) { 549 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, 550 &gluster_finish_aiocb, acb); 551 } else { 552 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, 553 &gluster_finish_aiocb, acb); 554 } 555 556 if (ret < 0) { 557 ret = -errno; 558 goto out; 559 } 560 561 qemu_coroutine_yield(); 562 ret = acb->ret; 563 564 out: 565 g_slice_free(GlusterAIOCB, acb); 566 return ret; 567 } 568 569 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) 570 { 571 int ret; 572 BDRVGlusterState *s = bs->opaque; 573 574 ret = glfs_ftruncate(s->fd, offset); 575 if (ret < 0) { 576 return -errno; 577 } 578 579 return 0; 580 } 581 582 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, 583 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 584 { 585 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); 586 } 587 588 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, 589 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) 590 { 591 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); 592 } 593 594 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) 595 { 596 int ret; 597 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 598 BDRVGlusterState *s = bs->opaque; 599 600 acb->size = 0; 601 acb->ret = 0; 602 acb->coroutine = qemu_coroutine_self(); 603 604 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); 605 if (ret < 0) { 606 ret = -errno; 607 goto out; 608 } 609 610 qemu_coroutine_yield(); 611 ret = acb->ret; 612 613 out: 614 g_slice_free(GlusterAIOCB, acb); 615 return ret; 616 } 617 618 #ifdef CONFIG_GLUSTERFS_DISCARD 619 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, 620 int64_t sector_num, int nb_sectors) 621 { 622 int ret; 623 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB); 624 BDRVGlusterState *s = bs->opaque; 625 size_t size = nb_sectors * BDRV_SECTOR_SIZE; 626 off_t offset = sector_num * BDRV_SECTOR_SIZE; 627 628 acb->size = 0; 629 acb->ret = 0; 630 acb->coroutine = qemu_coroutine_self(); 631 632 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); 633 if (ret < 0) { 634 ret = -errno; 635 goto out; 636 } 637 638 qemu_coroutine_yield(); 639 ret = acb->ret; 640 641 out: 642 g_slice_free(GlusterAIOCB, acb); 643 return ret; 644 } 645 #endif 646 647 static int64_t qemu_gluster_getlength(BlockDriverState *bs) 648 { 649 BDRVGlusterState *s = bs->opaque; 650 int64_t ret; 651 652 ret = glfs_lseek(s->fd, 0, SEEK_END); 653 if (ret < 0) { 654 return -errno; 655 } else { 656 return ret; 657 } 658 } 659 660 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs) 661 { 662 BDRVGlusterState *s = bs->opaque; 663 struct stat st; 664 int ret; 665 666 ret = glfs_fstat(s->fd, &st); 667 if (ret < 0) { 668 return -errno; 669 } else { 670 return st.st_blocks * 512; 671 } 672 } 673 674 static void qemu_gluster_close(BlockDriverState *bs) 675 { 676 BDRVGlusterState *s = bs->opaque; 677 678 if (s->fd) { 679 glfs_close(s->fd); 680 s->fd = NULL; 681 } 682 glfs_fini(s->glfs); 683 } 684 685 static int qemu_gluster_has_zero_init(BlockDriverState *bs) 686 { 687 /* GlusterFS volume could be backed by a block device */ 688 return 0; 689 } 690 691 static QEMUOptionParameter qemu_gluster_create_options[] = { 692 { 693 .name = BLOCK_OPT_SIZE, 694 .type = OPT_SIZE, 695 .help = "Virtual disk size" 696 }, 697 { 698 .name = BLOCK_OPT_PREALLOC, 699 .type = OPT_STRING, 700 .help = "Preallocation mode (allowed values: off, full)" 701 }, 702 { NULL } 703 }; 704 705 static BlockDriver bdrv_gluster = { 706 .format_name = "gluster", 707 .protocol_name = "gluster", 708 .instance_size = sizeof(BDRVGlusterState), 709 .bdrv_needs_filename = true, 710 .bdrv_file_open = qemu_gluster_open, 711 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 712 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 713 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 714 .bdrv_close = qemu_gluster_close, 715 .bdrv_create = qemu_gluster_create, 716 .bdrv_getlength = qemu_gluster_getlength, 717 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 718 .bdrv_truncate = qemu_gluster_truncate, 719 .bdrv_co_readv = qemu_gluster_co_readv, 720 .bdrv_co_writev = qemu_gluster_co_writev, 721 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 722 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 723 #ifdef CONFIG_GLUSTERFS_DISCARD 724 .bdrv_co_discard = qemu_gluster_co_discard, 725 #endif 726 #ifdef CONFIG_GLUSTERFS_ZEROFILL 727 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 728 #endif 729 .create_options = qemu_gluster_create_options, 730 }; 731 732 static BlockDriver bdrv_gluster_tcp = { 733 .format_name = "gluster", 734 .protocol_name = "gluster+tcp", 735 .instance_size = sizeof(BDRVGlusterState), 736 .bdrv_needs_filename = true, 737 .bdrv_file_open = qemu_gluster_open, 738 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 739 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 740 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 741 .bdrv_close = qemu_gluster_close, 742 .bdrv_create = qemu_gluster_create, 743 .bdrv_getlength = qemu_gluster_getlength, 744 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 745 .bdrv_truncate = qemu_gluster_truncate, 746 .bdrv_co_readv = qemu_gluster_co_readv, 747 .bdrv_co_writev = qemu_gluster_co_writev, 748 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 749 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 750 #ifdef CONFIG_GLUSTERFS_DISCARD 751 .bdrv_co_discard = qemu_gluster_co_discard, 752 #endif 753 #ifdef CONFIG_GLUSTERFS_ZEROFILL 754 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 755 #endif 756 .create_options = qemu_gluster_create_options, 757 }; 758 759 static BlockDriver bdrv_gluster_unix = { 760 .format_name = "gluster", 761 .protocol_name = "gluster+unix", 762 .instance_size = sizeof(BDRVGlusterState), 763 .bdrv_needs_filename = true, 764 .bdrv_file_open = qemu_gluster_open, 765 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 766 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 767 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 768 .bdrv_close = qemu_gluster_close, 769 .bdrv_create = qemu_gluster_create, 770 .bdrv_getlength = qemu_gluster_getlength, 771 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 772 .bdrv_truncate = qemu_gluster_truncate, 773 .bdrv_co_readv = qemu_gluster_co_readv, 774 .bdrv_co_writev = qemu_gluster_co_writev, 775 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 776 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 777 #ifdef CONFIG_GLUSTERFS_DISCARD 778 .bdrv_co_discard = qemu_gluster_co_discard, 779 #endif 780 #ifdef CONFIG_GLUSTERFS_ZEROFILL 781 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 782 #endif 783 .create_options = qemu_gluster_create_options, 784 }; 785 786 static BlockDriver bdrv_gluster_rdma = { 787 .format_name = "gluster", 788 .protocol_name = "gluster+rdma", 789 .instance_size = sizeof(BDRVGlusterState), 790 .bdrv_needs_filename = true, 791 .bdrv_file_open = qemu_gluster_open, 792 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, 793 .bdrv_reopen_commit = qemu_gluster_reopen_commit, 794 .bdrv_reopen_abort = qemu_gluster_reopen_abort, 795 .bdrv_close = qemu_gluster_close, 796 .bdrv_create = qemu_gluster_create, 797 .bdrv_getlength = qemu_gluster_getlength, 798 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, 799 .bdrv_truncate = qemu_gluster_truncate, 800 .bdrv_co_readv = qemu_gluster_co_readv, 801 .bdrv_co_writev = qemu_gluster_co_writev, 802 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk, 803 .bdrv_has_zero_init = qemu_gluster_has_zero_init, 804 #ifdef CONFIG_GLUSTERFS_DISCARD 805 .bdrv_co_discard = qemu_gluster_co_discard, 806 #endif 807 #ifdef CONFIG_GLUSTERFS_ZEROFILL 808 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes, 809 #endif 810 .create_options = qemu_gluster_create_options, 811 }; 812 813 static void bdrv_gluster_init(void) 814 { 815 bdrv_register(&bdrv_gluster_rdma); 816 bdrv_register(&bdrv_gluster_unix); 817 bdrv_register(&bdrv_gluster_tcp); 818 bdrv_register(&bdrv_gluster); 819 } 820 821 block_init(bdrv_gluster_init); 822