1 2 /* 3 rbd.c -- Export ceph rados objects as a Linux block device 4 5 6 based on drivers/block/osdblk.c: 7 8 Copyright 2009 Red Hat, Inc. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; see the file COPYING. If not, write to 21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 23 24 25 For usage instructions, please refer to: 26 27 Documentation/ABI/testing/sysfs-bus-rbd 28 29 */ 30 31 #include <linux/ceph/libceph.h> 32 #include <linux/ceph/osd_client.h> 33 #include <linux/ceph/mon_client.h> 34 #include <linux/ceph/cls_lock_client.h> 35 #include <linux/ceph/decode.h> 36 #include <linux/parser.h> 37 #include <linux/bsearch.h> 38 39 #include <linux/kernel.h> 40 #include <linux/device.h> 41 #include <linux/module.h> 42 #include <linux/blk-mq.h> 43 #include <linux/fs.h> 44 #include <linux/blkdev.h> 45 #include <linux/slab.h> 46 #include <linux/idr.h> 47 #include <linux/workqueue.h> 48 49 #include "rbd_types.h" 50 51 #define RBD_DEBUG /* Activate rbd_assert() calls */ 52 53 /* 54 * Increment the given counter and return its updated value. 55 * If the counter is already 0 it will not be incremented. 56 * If the counter is already at its maximum value returns 57 * -EINVAL without updating it. 58 */ 59 static int atomic_inc_return_safe(atomic_t *v) 60 { 61 unsigned int counter; 62 63 counter = (unsigned int)__atomic_add_unless(v, 1, 0); 64 if (counter <= (unsigned int)INT_MAX) 65 return (int)counter; 66 67 atomic_dec(v); 68 69 return -EINVAL; 70 } 71 72 /* Decrement the counter. Return the resulting value, or -EINVAL */ 73 static int atomic_dec_return_safe(atomic_t *v) 74 { 75 int counter; 76 77 counter = atomic_dec_return(v); 78 if (counter >= 0) 79 return counter; 80 81 atomic_inc(v); 82 83 return -EINVAL; 84 } 85 86 #define RBD_DRV_NAME "rbd" 87 88 #define RBD_MINORS_PER_MAJOR 256 89 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 90 91 #define RBD_MAX_PARENT_CHAIN_LEN 16 92 93 #define RBD_SNAP_DEV_NAME_PREFIX "snap_" 94 #define RBD_MAX_SNAP_NAME_LEN \ 95 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 96 97 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ 98 99 #define RBD_SNAP_HEAD_NAME "-" 100 101 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ 102 103 /* This allows a single page to hold an image name sent by OSD */ 104 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) 105 #define RBD_IMAGE_ID_LEN_MAX 64 106 107 #define RBD_OBJ_PREFIX_LEN_MAX 64 108 109 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */ 110 #define RBD_RETRY_DELAY msecs_to_jiffies(1000) 111 112 /* Feature bits */ 113 114 #define RBD_FEATURE_LAYERING (1ULL<<0) 115 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) 116 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) 117 #define RBD_FEATURE_DATA_POOL (1ULL<<7) 118 #define RBD_FEATURE_OPERATIONS (1ULL<<8) 119 120 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ 121 RBD_FEATURE_STRIPINGV2 | \ 122 RBD_FEATURE_EXCLUSIVE_LOCK | \ 123 RBD_FEATURE_DATA_POOL | \ 124 RBD_FEATURE_OPERATIONS) 125 126 /* Features supported by this (client software) implementation. */ 127 128 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) 129 130 /* 131 * An RBD device name will be "rbd#", where the "rbd" comes from 132 * RBD_DRV_NAME above, and # is a unique integer identifier. 133 */ 134 #define DEV_NAME_LEN 32 135 136 /* 137 * block device image metadata (in-memory version) 138 */ 139 struct rbd_image_header { 140 /* These six fields never change for a given rbd image */ 141 char *object_prefix; 142 __u8 obj_order; 143 u64 stripe_unit; 144 u64 stripe_count; 145 s64 data_pool_id; 146 u64 features; /* Might be changeable someday? */ 147 148 /* The remaining fields need to be updated occasionally */ 149 u64 image_size; 150 struct ceph_snap_context *snapc; 151 char *snap_names; /* format 1 only */ 152 u64 *snap_sizes; /* format 1 only */ 153 }; 154 155 /* 156 * An rbd image specification. 157 * 158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely 159 * identify an image. Each rbd_dev structure includes a pointer to 160 * an rbd_spec structure that encapsulates this identity. 161 * 162 * Each of the id's in an rbd_spec has an associated name. For a 163 * user-mapped image, the names are supplied and the id's associated 164 * with them are looked up. For a layered image, a parent image is 165 * defined by the tuple, and the names are looked up. 166 * 167 * An rbd_dev structure contains a parent_spec pointer which is 168 * non-null if the image it represents is a child in a layered 169 * image. This pointer will refer to the rbd_spec structure used 170 * by the parent rbd_dev for its own identity (i.e., the structure 171 * is shared between the parent and child). 172 * 173 * Since these structures are populated once, during the discovery 174 * phase of image construction, they are effectively immutable so 175 * we make no effort to synchronize access to them. 176 * 177 * Note that code herein does not assume the image name is known (it 178 * could be a null pointer). 179 */ 180 struct rbd_spec { 181 u64 pool_id; 182 const char *pool_name; 183 184 const char *image_id; 185 const char *image_name; 186 187 u64 snap_id; 188 const char *snap_name; 189 190 struct kref kref; 191 }; 192 193 /* 194 * an instance of the client. multiple devices may share an rbd client. 195 */ 196 struct rbd_client { 197 struct ceph_client *client; 198 struct kref kref; 199 struct list_head node; 200 }; 201 202 struct rbd_img_request; 203 typedef void (*rbd_img_callback_t)(struct rbd_img_request *); 204 205 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */ 206 207 struct rbd_obj_request; 208 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *); 209 210 enum obj_request_type { 211 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES 212 }; 213 214 enum obj_operation_type { 215 OBJ_OP_WRITE, 216 OBJ_OP_READ, 217 OBJ_OP_DISCARD, 218 }; 219 220 enum obj_req_flags { 221 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ 222 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */ 223 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */ 224 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */ 225 }; 226 227 struct rbd_obj_request { 228 u64 object_no; 229 u64 offset; /* object start byte */ 230 u64 length; /* bytes from offset */ 231 unsigned long flags; 232 233 /* 234 * An object request associated with an image will have its 235 * img_data flag set; a standalone object request will not. 236 * 237 * A standalone object request will have which == BAD_WHICH 238 * and a null obj_request pointer. 239 * 240 * An object request initiated in support of a layered image 241 * object (to check for its existence before a write) will 242 * have which == BAD_WHICH and a non-null obj_request pointer. 243 * 244 * Finally, an object request for rbd image data will have 245 * which != BAD_WHICH, and will have a non-null img_request 246 * pointer. The value of which will be in the range 247 * 0..(img_request->obj_request_count-1). 248 */ 249 union { 250 struct rbd_obj_request *obj_request; /* STAT op */ 251 struct { 252 struct rbd_img_request *img_request; 253 u64 img_offset; 254 /* links for img_request->obj_requests list */ 255 struct list_head links; 256 }; 257 }; 258 u32 which; /* posn image request list */ 259 260 enum obj_request_type type; 261 union { 262 struct bio *bio_list; 263 struct { 264 struct page **pages; 265 u32 page_count; 266 }; 267 }; 268 struct page **copyup_pages; 269 u32 copyup_page_count; 270 271 struct ceph_osd_request *osd_req; 272 273 u64 xferred; /* bytes transferred */ 274 int result; 275 276 rbd_obj_callback_t callback; 277 278 struct kref kref; 279 }; 280 281 enum img_req_flags { 282 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */ 283 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ 284 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ 285 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */ 286 }; 287 288 struct rbd_img_request { 289 struct rbd_device *rbd_dev; 290 u64 offset; /* starting image byte offset */ 291 u64 length; /* byte count from offset */ 292 unsigned long flags; 293 union { 294 u64 snap_id; /* for reads */ 295 struct ceph_snap_context *snapc; /* for writes */ 296 }; 297 union { 298 struct request *rq; /* block request */ 299 struct rbd_obj_request *obj_request; /* obj req initiator */ 300 }; 301 struct page **copyup_pages; 302 u32 copyup_page_count; 303 spinlock_t completion_lock;/* protects next_completion */ 304 u32 next_completion; 305 rbd_img_callback_t callback; 306 u64 xferred;/* aggregate bytes transferred */ 307 int result; /* first nonzero obj_request result */ 308 309 u32 obj_request_count; 310 struct list_head obj_requests; /* rbd_obj_request structs */ 311 312 struct kref kref; 313 }; 314 315 #define for_each_obj_request(ireq, oreq) \ 316 list_for_each_entry(oreq, &(ireq)->obj_requests, links) 317 #define for_each_obj_request_from(ireq, oreq) \ 318 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links) 319 #define for_each_obj_request_safe(ireq, oreq, n) \ 320 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links) 321 322 enum rbd_watch_state { 323 RBD_WATCH_STATE_UNREGISTERED, 324 RBD_WATCH_STATE_REGISTERED, 325 RBD_WATCH_STATE_ERROR, 326 }; 327 328 enum rbd_lock_state { 329 RBD_LOCK_STATE_UNLOCKED, 330 RBD_LOCK_STATE_LOCKED, 331 RBD_LOCK_STATE_RELEASING, 332 }; 333 334 /* WatchNotify::ClientId */ 335 struct rbd_client_id { 336 u64 gid; 337 u64 handle; 338 }; 339 340 struct rbd_mapping { 341 u64 size; 342 u64 features; 343 }; 344 345 /* 346 * a single device 347 */ 348 struct rbd_device { 349 int dev_id; /* blkdev unique id */ 350 351 int major; /* blkdev assigned major */ 352 int minor; 353 struct gendisk *disk; /* blkdev's gendisk and rq */ 354 355 u32 image_format; /* Either 1 or 2 */ 356 struct rbd_client *rbd_client; 357 358 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 359 360 spinlock_t lock; /* queue, flags, open_count */ 361 362 struct rbd_image_header header; 363 unsigned long flags; /* possibly lock protected */ 364 struct rbd_spec *spec; 365 struct rbd_options *opts; 366 char *config_info; /* add{,_single_major} string */ 367 368 struct ceph_object_id header_oid; 369 struct ceph_object_locator header_oloc; 370 371 struct ceph_file_layout layout; /* used for all rbd requests */ 372 373 struct mutex watch_mutex; 374 enum rbd_watch_state watch_state; 375 struct ceph_osd_linger_request *watch_handle; 376 u64 watch_cookie; 377 struct delayed_work watch_dwork; 378 379 struct rw_semaphore lock_rwsem; 380 enum rbd_lock_state lock_state; 381 char lock_cookie[32]; 382 struct rbd_client_id owner_cid; 383 struct work_struct acquired_lock_work; 384 struct work_struct released_lock_work; 385 struct delayed_work lock_dwork; 386 struct work_struct unlock_work; 387 wait_queue_head_t lock_waitq; 388 389 struct workqueue_struct *task_wq; 390 391 struct rbd_spec *parent_spec; 392 u64 parent_overlap; 393 atomic_t parent_ref; 394 struct rbd_device *parent; 395 396 /* Block layer tags. */ 397 struct blk_mq_tag_set tag_set; 398 399 /* protects updating the header */ 400 struct rw_semaphore header_rwsem; 401 402 struct rbd_mapping mapping; 403 404 struct list_head node; 405 406 /* sysfs related */ 407 struct device dev; 408 unsigned long open_count; /* protected by lock */ 409 }; 410 411 /* 412 * Flag bits for rbd_dev->flags: 413 * - REMOVING (which is coupled with rbd_dev->open_count) is protected 414 * by rbd_dev->lock 415 * - BLACKLISTED is protected by rbd_dev->lock_rwsem 416 */ 417 enum rbd_dev_flags { 418 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ 419 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ 420 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */ 421 }; 422 423 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ 424 425 static LIST_HEAD(rbd_dev_list); /* devices */ 426 static DEFINE_SPINLOCK(rbd_dev_list_lock); 427 428 static LIST_HEAD(rbd_client_list); /* clients */ 429 static DEFINE_SPINLOCK(rbd_client_list_lock); 430 431 /* Slab caches for frequently-allocated structures */ 432 433 static struct kmem_cache *rbd_img_request_cache; 434 static struct kmem_cache *rbd_obj_request_cache; 435 436 static struct bio_set *rbd_bio_clone; 437 438 static int rbd_major; 439 static DEFINE_IDA(rbd_dev_id_ida); 440 441 static struct workqueue_struct *rbd_wq; 442 443 /* 444 * single-major requires >= 0.75 version of userspace rbd utility. 445 */ 446 static bool single_major = true; 447 module_param(single_major, bool, S_IRUGO); 448 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); 449 450 static int rbd_img_request_submit(struct rbd_img_request *img_request); 451 452 static ssize_t rbd_add(struct bus_type *bus, const char *buf, 453 size_t count); 454 static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 455 size_t count); 456 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, 457 size_t count); 458 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 459 size_t count); 460 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 461 static void rbd_spec_put(struct rbd_spec *spec); 462 463 static int rbd_dev_id_to_minor(int dev_id) 464 { 465 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT; 466 } 467 468 static int minor_to_rbd_dev_id(int minor) 469 { 470 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT; 471 } 472 473 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) 474 { 475 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || 476 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; 477 } 478 479 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) 480 { 481 bool is_lock_owner; 482 483 down_read(&rbd_dev->lock_rwsem); 484 is_lock_owner = __rbd_is_lock_owner(rbd_dev); 485 up_read(&rbd_dev->lock_rwsem); 486 return is_lock_owner; 487 } 488 489 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) 490 { 491 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); 492 } 493 494 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); 495 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); 496 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); 497 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); 498 static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); 499 500 static struct attribute *rbd_bus_attrs[] = { 501 &bus_attr_add.attr, 502 &bus_attr_remove.attr, 503 &bus_attr_add_single_major.attr, 504 &bus_attr_remove_single_major.attr, 505 &bus_attr_supported_features.attr, 506 NULL, 507 }; 508 509 static umode_t rbd_bus_is_visible(struct kobject *kobj, 510 struct attribute *attr, int index) 511 { 512 if (!single_major && 513 (attr == &bus_attr_add_single_major.attr || 514 attr == &bus_attr_remove_single_major.attr)) 515 return 0; 516 517 return attr->mode; 518 } 519 520 static const struct attribute_group rbd_bus_group = { 521 .attrs = rbd_bus_attrs, 522 .is_visible = rbd_bus_is_visible, 523 }; 524 __ATTRIBUTE_GROUPS(rbd_bus); 525 526 static struct bus_type rbd_bus_type = { 527 .name = "rbd", 528 .bus_groups = rbd_bus_groups, 529 }; 530 531 static void rbd_root_dev_release(struct device *dev) 532 { 533 } 534 535 static struct device rbd_root_dev = { 536 .init_name = "rbd", 537 .release = rbd_root_dev_release, 538 }; 539 540 static __printf(2, 3) 541 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) 542 { 543 struct va_format vaf; 544 va_list args; 545 546 va_start(args, fmt); 547 vaf.fmt = fmt; 548 vaf.va = &args; 549 550 if (!rbd_dev) 551 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf); 552 else if (rbd_dev->disk) 553 printk(KERN_WARNING "%s: %s: %pV\n", 554 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf); 555 else if (rbd_dev->spec && rbd_dev->spec->image_name) 556 printk(KERN_WARNING "%s: image %s: %pV\n", 557 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf); 558 else if (rbd_dev->spec && rbd_dev->spec->image_id) 559 printk(KERN_WARNING "%s: id %s: %pV\n", 560 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf); 561 else /* punt */ 562 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n", 563 RBD_DRV_NAME, rbd_dev, &vaf); 564 va_end(args); 565 } 566 567 #ifdef RBD_DEBUG 568 #define rbd_assert(expr) \ 569 if (unlikely(!(expr))) { \ 570 printk(KERN_ERR "\nAssertion failure in %s() " \ 571 "at line %d:\n\n" \ 572 "\trbd_assert(%s);\n\n", \ 573 __func__, __LINE__, #expr); \ 574 BUG(); \ 575 } 576 #else /* !RBD_DEBUG */ 577 # define rbd_assert(expr) ((void) 0) 578 #endif /* !RBD_DEBUG */ 579 580 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); 581 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 582 static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 583 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 584 585 static int rbd_dev_refresh(struct rbd_device *rbd_dev); 586 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); 587 static int rbd_dev_header_info(struct rbd_device *rbd_dev); 588 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); 589 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 590 u64 snap_id); 591 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 592 u8 *order, u64 *snap_size); 593 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 594 u64 *snap_features); 595 596 static int rbd_open(struct block_device *bdev, fmode_t mode) 597 { 598 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 599 bool removing = false; 600 601 spin_lock_irq(&rbd_dev->lock); 602 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) 603 removing = true; 604 else 605 rbd_dev->open_count++; 606 spin_unlock_irq(&rbd_dev->lock); 607 if (removing) 608 return -ENOENT; 609 610 (void) get_device(&rbd_dev->dev); 611 612 return 0; 613 } 614 615 static void rbd_release(struct gendisk *disk, fmode_t mode) 616 { 617 struct rbd_device *rbd_dev = disk->private_data; 618 unsigned long open_count_before; 619 620 spin_lock_irq(&rbd_dev->lock); 621 open_count_before = rbd_dev->open_count--; 622 spin_unlock_irq(&rbd_dev->lock); 623 rbd_assert(open_count_before > 0); 624 625 put_device(&rbd_dev->dev); 626 } 627 628 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg) 629 { 630 int ro; 631 632 if (get_user(ro, (int __user *)arg)) 633 return -EFAULT; 634 635 /* Snapshots can't be marked read-write */ 636 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro) 637 return -EROFS; 638 639 /* Let blkdev_roset() handle it */ 640 return -ENOTTY; 641 } 642 643 static int rbd_ioctl(struct block_device *bdev, fmode_t mode, 644 unsigned int cmd, unsigned long arg) 645 { 646 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 647 int ret; 648 649 switch (cmd) { 650 case BLKROSET: 651 ret = rbd_ioctl_set_ro(rbd_dev, arg); 652 break; 653 default: 654 ret = -ENOTTY; 655 } 656 657 return ret; 658 } 659 660 #ifdef CONFIG_COMPAT 661 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode, 662 unsigned int cmd, unsigned long arg) 663 { 664 return rbd_ioctl(bdev, mode, cmd, arg); 665 } 666 #endif /* CONFIG_COMPAT */ 667 668 static const struct block_device_operations rbd_bd_ops = { 669 .owner = THIS_MODULE, 670 .open = rbd_open, 671 .release = rbd_release, 672 .ioctl = rbd_ioctl, 673 #ifdef CONFIG_COMPAT 674 .compat_ioctl = rbd_compat_ioctl, 675 #endif 676 }; 677 678 /* 679 * Initialize an rbd client instance. Success or not, this function 680 * consumes ceph_opts. Caller holds client_mutex. 681 */ 682 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 683 { 684 struct rbd_client *rbdc; 685 int ret = -ENOMEM; 686 687 dout("%s:\n", __func__); 688 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); 689 if (!rbdc) 690 goto out_opt; 691 692 kref_init(&rbdc->kref); 693 INIT_LIST_HEAD(&rbdc->node); 694 695 rbdc->client = ceph_create_client(ceph_opts, rbdc); 696 if (IS_ERR(rbdc->client)) 697 goto out_rbdc; 698 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ 699 700 ret = ceph_open_session(rbdc->client); 701 if (ret < 0) 702 goto out_client; 703 704 spin_lock(&rbd_client_list_lock); 705 list_add_tail(&rbdc->node, &rbd_client_list); 706 spin_unlock(&rbd_client_list_lock); 707 708 dout("%s: rbdc %p\n", __func__, rbdc); 709 710 return rbdc; 711 out_client: 712 ceph_destroy_client(rbdc->client); 713 out_rbdc: 714 kfree(rbdc); 715 out_opt: 716 if (ceph_opts) 717 ceph_destroy_options(ceph_opts); 718 dout("%s: error %d\n", __func__, ret); 719 720 return ERR_PTR(ret); 721 } 722 723 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) 724 { 725 kref_get(&rbdc->kref); 726 727 return rbdc; 728 } 729 730 /* 731 * Find a ceph client with specific addr and configuration. If 732 * found, bump its reference count. 733 */ 734 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) 735 { 736 struct rbd_client *client_node; 737 bool found = false; 738 739 if (ceph_opts->flags & CEPH_OPT_NOSHARE) 740 return NULL; 741 742 spin_lock(&rbd_client_list_lock); 743 list_for_each_entry(client_node, &rbd_client_list, node) { 744 if (!ceph_compare_options(ceph_opts, client_node->client)) { 745 __rbd_get_client(client_node); 746 747 found = true; 748 break; 749 } 750 } 751 spin_unlock(&rbd_client_list_lock); 752 753 return found ? client_node : NULL; 754 } 755 756 /* 757 * (Per device) rbd map options 758 */ 759 enum { 760 Opt_queue_depth, 761 Opt_last_int, 762 /* int args above */ 763 Opt_last_string, 764 /* string args above */ 765 Opt_read_only, 766 Opt_read_write, 767 Opt_lock_on_read, 768 Opt_exclusive, 769 Opt_err 770 }; 771 772 static match_table_t rbd_opts_tokens = { 773 {Opt_queue_depth, "queue_depth=%d"}, 774 /* int args above */ 775 /* string args above */ 776 {Opt_read_only, "read_only"}, 777 {Opt_read_only, "ro"}, /* Alternate spelling */ 778 {Opt_read_write, "read_write"}, 779 {Opt_read_write, "rw"}, /* Alternate spelling */ 780 {Opt_lock_on_read, "lock_on_read"}, 781 {Opt_exclusive, "exclusive"}, 782 {Opt_err, NULL} 783 }; 784 785 struct rbd_options { 786 int queue_depth; 787 bool read_only; 788 bool lock_on_read; 789 bool exclusive; 790 }; 791 792 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ 793 #define RBD_READ_ONLY_DEFAULT false 794 #define RBD_LOCK_ON_READ_DEFAULT false 795 #define RBD_EXCLUSIVE_DEFAULT false 796 797 static int parse_rbd_opts_token(char *c, void *private) 798 { 799 struct rbd_options *rbd_opts = private; 800 substring_t argstr[MAX_OPT_ARGS]; 801 int token, intval, ret; 802 803 token = match_token(c, rbd_opts_tokens, argstr); 804 if (token < Opt_last_int) { 805 ret = match_int(&argstr[0], &intval); 806 if (ret < 0) { 807 pr_err("bad mount option arg (not int) at '%s'\n", c); 808 return ret; 809 } 810 dout("got int token %d val %d\n", token, intval); 811 } else if (token > Opt_last_int && token < Opt_last_string) { 812 dout("got string token %d val %s\n", token, argstr[0].from); 813 } else { 814 dout("got token %d\n", token); 815 } 816 817 switch (token) { 818 case Opt_queue_depth: 819 if (intval < 1) { 820 pr_err("queue_depth out of range\n"); 821 return -EINVAL; 822 } 823 rbd_opts->queue_depth = intval; 824 break; 825 case Opt_read_only: 826 rbd_opts->read_only = true; 827 break; 828 case Opt_read_write: 829 rbd_opts->read_only = false; 830 break; 831 case Opt_lock_on_read: 832 rbd_opts->lock_on_read = true; 833 break; 834 case Opt_exclusive: 835 rbd_opts->exclusive = true; 836 break; 837 default: 838 /* libceph prints "bad option" msg */ 839 return -EINVAL; 840 } 841 842 return 0; 843 } 844 845 static char* obj_op_name(enum obj_operation_type op_type) 846 { 847 switch (op_type) { 848 case OBJ_OP_READ: 849 return "read"; 850 case OBJ_OP_WRITE: 851 return "write"; 852 case OBJ_OP_DISCARD: 853 return "discard"; 854 default: 855 return "???"; 856 } 857 } 858 859 /* 860 * Get a ceph client with specific addr and configuration, if one does 861 * not exist create it. Either way, ceph_opts is consumed by this 862 * function. 863 */ 864 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 865 { 866 struct rbd_client *rbdc; 867 868 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING); 869 rbdc = rbd_client_find(ceph_opts); 870 if (rbdc) /* using an existing client */ 871 ceph_destroy_options(ceph_opts); 872 else 873 rbdc = rbd_client_create(ceph_opts); 874 mutex_unlock(&client_mutex); 875 876 return rbdc; 877 } 878 879 /* 880 * Destroy ceph client 881 * 882 * Caller must hold rbd_client_list_lock. 883 */ 884 static void rbd_client_release(struct kref *kref) 885 { 886 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 887 888 dout("%s: rbdc %p\n", __func__, rbdc); 889 spin_lock(&rbd_client_list_lock); 890 list_del(&rbdc->node); 891 spin_unlock(&rbd_client_list_lock); 892 893 ceph_destroy_client(rbdc->client); 894 kfree(rbdc); 895 } 896 897 /* 898 * Drop reference to ceph client node. If it's not referenced anymore, release 899 * it. 900 */ 901 static void rbd_put_client(struct rbd_client *rbdc) 902 { 903 if (rbdc) 904 kref_put(&rbdc->kref, rbd_client_release); 905 } 906 907 static bool rbd_image_format_valid(u32 image_format) 908 { 909 return image_format == 1 || image_format == 2; 910 } 911 912 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) 913 { 914 size_t size; 915 u32 snap_count; 916 917 /* The header has to start with the magic rbd header text */ 918 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) 919 return false; 920 921 /* The bio layer requires at least sector-sized I/O */ 922 923 if (ondisk->options.order < SECTOR_SHIFT) 924 return false; 925 926 /* If we use u64 in a few spots we may be able to loosen this */ 927 928 if (ondisk->options.order > 8 * sizeof (int) - 1) 929 return false; 930 931 /* 932 * The size of a snapshot header has to fit in a size_t, and 933 * that limits the number of snapshots. 934 */ 935 snap_count = le32_to_cpu(ondisk->snap_count); 936 size = SIZE_MAX - sizeof (struct ceph_snap_context); 937 if (snap_count > size / sizeof (__le64)) 938 return false; 939 940 /* 941 * Not only that, but the size of the entire the snapshot 942 * header must also be representable in a size_t. 943 */ 944 size -= snap_count * sizeof (__le64); 945 if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) 946 return false; 947 948 return true; 949 } 950 951 /* 952 * returns the size of an object in the image 953 */ 954 static u32 rbd_obj_bytes(struct rbd_image_header *header) 955 { 956 return 1U << header->obj_order; 957 } 958 959 static void rbd_init_layout(struct rbd_device *rbd_dev) 960 { 961 if (rbd_dev->header.stripe_unit == 0 || 962 rbd_dev->header.stripe_count == 0) { 963 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); 964 rbd_dev->header.stripe_count = 1; 965 } 966 967 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; 968 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; 969 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); 970 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? 971 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; 972 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); 973 } 974 975 /* 976 * Fill an rbd image header with information from the given format 1 977 * on-disk header. 978 */ 979 static int rbd_header_from_disk(struct rbd_device *rbd_dev, 980 struct rbd_image_header_ondisk *ondisk) 981 { 982 struct rbd_image_header *header = &rbd_dev->header; 983 bool first_time = header->object_prefix == NULL; 984 struct ceph_snap_context *snapc; 985 char *object_prefix = NULL; 986 char *snap_names = NULL; 987 u64 *snap_sizes = NULL; 988 u32 snap_count; 989 int ret = -ENOMEM; 990 u32 i; 991 992 /* Allocate this now to avoid having to handle failure below */ 993 994 if (first_time) { 995 object_prefix = kstrndup(ondisk->object_prefix, 996 sizeof(ondisk->object_prefix), 997 GFP_KERNEL); 998 if (!object_prefix) 999 return -ENOMEM; 1000 } 1001 1002 /* Allocate the snapshot context and fill it in */ 1003 1004 snap_count = le32_to_cpu(ondisk->snap_count); 1005 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 1006 if (!snapc) 1007 goto out_err; 1008 snapc->seq = le64_to_cpu(ondisk->snap_seq); 1009 if (snap_count) { 1010 struct rbd_image_snap_ondisk *snaps; 1011 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 1012 1013 /* We'll keep a copy of the snapshot names... */ 1014 1015 if (snap_names_len > (u64)SIZE_MAX) 1016 goto out_2big; 1017 snap_names = kmalloc(snap_names_len, GFP_KERNEL); 1018 if (!snap_names) 1019 goto out_err; 1020 1021 /* ...as well as the array of their sizes. */ 1022 snap_sizes = kmalloc_array(snap_count, 1023 sizeof(*header->snap_sizes), 1024 GFP_KERNEL); 1025 if (!snap_sizes) 1026 goto out_err; 1027 1028 /* 1029 * Copy the names, and fill in each snapshot's id 1030 * and size. 1031 * 1032 * Note that rbd_dev_v1_header_info() guarantees the 1033 * ondisk buffer we're working with has 1034 * snap_names_len bytes beyond the end of the 1035 * snapshot id array, this memcpy() is safe. 1036 */ 1037 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); 1038 snaps = ondisk->snaps; 1039 for (i = 0; i < snap_count; i++) { 1040 snapc->snaps[i] = le64_to_cpu(snaps[i].id); 1041 snap_sizes[i] = le64_to_cpu(snaps[i].image_size); 1042 } 1043 } 1044 1045 /* We won't fail any more, fill in the header */ 1046 1047 if (first_time) { 1048 header->object_prefix = object_prefix; 1049 header->obj_order = ondisk->options.order; 1050 rbd_init_layout(rbd_dev); 1051 } else { 1052 ceph_put_snap_context(header->snapc); 1053 kfree(header->snap_names); 1054 kfree(header->snap_sizes); 1055 } 1056 1057 /* The remaining fields always get updated (when we refresh) */ 1058 1059 header->image_size = le64_to_cpu(ondisk->image_size); 1060 header->snapc = snapc; 1061 header->snap_names = snap_names; 1062 header->snap_sizes = snap_sizes; 1063 1064 return 0; 1065 out_2big: 1066 ret = -EIO; 1067 out_err: 1068 kfree(snap_sizes); 1069 kfree(snap_names); 1070 ceph_put_snap_context(snapc); 1071 kfree(object_prefix); 1072 1073 return ret; 1074 } 1075 1076 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 1077 { 1078 const char *snap_name; 1079 1080 rbd_assert(which < rbd_dev->header.snapc->num_snaps); 1081 1082 /* Skip over names until we find the one we are looking for */ 1083 1084 snap_name = rbd_dev->header.snap_names; 1085 while (which--) 1086 snap_name += strlen(snap_name) + 1; 1087 1088 return kstrdup(snap_name, GFP_KERNEL); 1089 } 1090 1091 /* 1092 * Snapshot id comparison function for use with qsort()/bsearch(). 1093 * Note that result is for snapshots in *descending* order. 1094 */ 1095 static int snapid_compare_reverse(const void *s1, const void *s2) 1096 { 1097 u64 snap_id1 = *(u64 *)s1; 1098 u64 snap_id2 = *(u64 *)s2; 1099 1100 if (snap_id1 < snap_id2) 1101 return 1; 1102 return snap_id1 == snap_id2 ? 0 : -1; 1103 } 1104 1105 /* 1106 * Search a snapshot context to see if the given snapshot id is 1107 * present. 1108 * 1109 * Returns the position of the snapshot id in the array if it's found, 1110 * or BAD_SNAP_INDEX otherwise. 1111 * 1112 * Note: The snapshot array is in kept sorted (by the osd) in 1113 * reverse order, highest snapshot id first. 1114 */ 1115 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) 1116 { 1117 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 1118 u64 *found; 1119 1120 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, 1121 sizeof (snap_id), snapid_compare_reverse); 1122 1123 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; 1124 } 1125 1126 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, 1127 u64 snap_id) 1128 { 1129 u32 which; 1130 const char *snap_name; 1131 1132 which = rbd_dev_snap_index(rbd_dev, snap_id); 1133 if (which == BAD_SNAP_INDEX) 1134 return ERR_PTR(-ENOENT); 1135 1136 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); 1137 return snap_name ? snap_name : ERR_PTR(-ENOMEM); 1138 } 1139 1140 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 1141 { 1142 if (snap_id == CEPH_NOSNAP) 1143 return RBD_SNAP_HEAD_NAME; 1144 1145 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1146 if (rbd_dev->image_format == 1) 1147 return rbd_dev_v1_snap_name(rbd_dev, snap_id); 1148 1149 return rbd_dev_v2_snap_name(rbd_dev, snap_id); 1150 } 1151 1152 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 1153 u64 *snap_size) 1154 { 1155 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1156 if (snap_id == CEPH_NOSNAP) { 1157 *snap_size = rbd_dev->header.image_size; 1158 } else if (rbd_dev->image_format == 1) { 1159 u32 which; 1160 1161 which = rbd_dev_snap_index(rbd_dev, snap_id); 1162 if (which == BAD_SNAP_INDEX) 1163 return -ENOENT; 1164 1165 *snap_size = rbd_dev->header.snap_sizes[which]; 1166 } else { 1167 u64 size = 0; 1168 int ret; 1169 1170 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); 1171 if (ret) 1172 return ret; 1173 1174 *snap_size = size; 1175 } 1176 return 0; 1177 } 1178 1179 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 1180 u64 *snap_features) 1181 { 1182 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1183 if (snap_id == CEPH_NOSNAP) { 1184 *snap_features = rbd_dev->header.features; 1185 } else if (rbd_dev->image_format == 1) { 1186 *snap_features = 0; /* No features for format 1 */ 1187 } else { 1188 u64 features = 0; 1189 int ret; 1190 1191 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); 1192 if (ret) 1193 return ret; 1194 1195 *snap_features = features; 1196 } 1197 return 0; 1198 } 1199 1200 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1201 { 1202 u64 snap_id = rbd_dev->spec->snap_id; 1203 u64 size = 0; 1204 u64 features = 0; 1205 int ret; 1206 1207 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1208 if (ret) 1209 return ret; 1210 ret = rbd_snap_features(rbd_dev, snap_id, &features); 1211 if (ret) 1212 return ret; 1213 1214 rbd_dev->mapping.size = size; 1215 rbd_dev->mapping.features = features; 1216 1217 return 0; 1218 } 1219 1220 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) 1221 { 1222 rbd_dev->mapping.size = 0; 1223 rbd_dev->mapping.features = 0; 1224 } 1225 1226 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) 1227 { 1228 u64 segment_size = rbd_obj_bytes(&rbd_dev->header); 1229 1230 return offset & (segment_size - 1); 1231 } 1232 1233 static u64 rbd_segment_length(struct rbd_device *rbd_dev, 1234 u64 offset, u64 length) 1235 { 1236 u64 segment_size = rbd_obj_bytes(&rbd_dev->header); 1237 1238 offset &= segment_size - 1; 1239 1240 rbd_assert(length <= U64_MAX - offset); 1241 if (offset + length > segment_size) 1242 length = segment_size - offset; 1243 1244 return length; 1245 } 1246 1247 /* 1248 * bio helpers 1249 */ 1250 1251 static void bio_chain_put(struct bio *chain) 1252 { 1253 struct bio *tmp; 1254 1255 while (chain) { 1256 tmp = chain; 1257 chain = chain->bi_next; 1258 bio_put(tmp); 1259 } 1260 } 1261 1262 /* 1263 * zeros a bio chain, starting at specific offset 1264 */ 1265 static void zero_bio_chain(struct bio *chain, int start_ofs) 1266 { 1267 struct bio_vec bv; 1268 struct bvec_iter iter; 1269 unsigned long flags; 1270 void *buf; 1271 int pos = 0; 1272 1273 while (chain) { 1274 bio_for_each_segment(bv, chain, iter) { 1275 if (pos + bv.bv_len > start_ofs) { 1276 int remainder = max(start_ofs - pos, 0); 1277 buf = bvec_kmap_irq(&bv, &flags); 1278 memset(buf + remainder, 0, 1279 bv.bv_len - remainder); 1280 flush_dcache_page(bv.bv_page); 1281 bvec_kunmap_irq(buf, &flags); 1282 } 1283 pos += bv.bv_len; 1284 } 1285 1286 chain = chain->bi_next; 1287 } 1288 } 1289 1290 /* 1291 * similar to zero_bio_chain(), zeros data defined by a page array, 1292 * starting at the given byte offset from the start of the array and 1293 * continuing up to the given end offset. The pages array is 1294 * assumed to be big enough to hold all bytes up to the end. 1295 */ 1296 static void zero_pages(struct page **pages, u64 offset, u64 end) 1297 { 1298 struct page **page = &pages[offset >> PAGE_SHIFT]; 1299 1300 rbd_assert(end > offset); 1301 rbd_assert(end - offset <= (u64)SIZE_MAX); 1302 while (offset < end) { 1303 size_t page_offset; 1304 size_t length; 1305 unsigned long flags; 1306 void *kaddr; 1307 1308 page_offset = offset & ~PAGE_MASK; 1309 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset); 1310 local_irq_save(flags); 1311 kaddr = kmap_atomic(*page); 1312 memset(kaddr + page_offset, 0, length); 1313 flush_dcache_page(*page); 1314 kunmap_atomic(kaddr); 1315 local_irq_restore(flags); 1316 1317 offset += length; 1318 page++; 1319 } 1320 } 1321 1322 /* 1323 * Clone a portion of a bio, starting at the given byte offset 1324 * and continuing for the number of bytes indicated. 1325 */ 1326 static struct bio *bio_clone_range(struct bio *bio_src, 1327 unsigned int offset, 1328 unsigned int len, 1329 gfp_t gfpmask) 1330 { 1331 struct bio *bio; 1332 1333 bio = bio_clone_fast(bio_src, gfpmask, rbd_bio_clone); 1334 if (!bio) 1335 return NULL; /* ENOMEM */ 1336 1337 bio_advance(bio, offset); 1338 bio->bi_iter.bi_size = len; 1339 1340 return bio; 1341 } 1342 1343 /* 1344 * Clone a portion of a bio chain, starting at the given byte offset 1345 * into the first bio in the source chain and continuing for the 1346 * number of bytes indicated. The result is another bio chain of 1347 * exactly the given length, or a null pointer on error. 1348 * 1349 * The bio_src and offset parameters are both in-out. On entry they 1350 * refer to the first source bio and the offset into that bio where 1351 * the start of data to be cloned is located. 1352 * 1353 * On return, bio_src is updated to refer to the bio in the source 1354 * chain that contains first un-cloned byte, and *offset will 1355 * contain the offset of that byte within that bio. 1356 */ 1357 static struct bio *bio_chain_clone_range(struct bio **bio_src, 1358 unsigned int *offset, 1359 unsigned int len, 1360 gfp_t gfpmask) 1361 { 1362 struct bio *bi = *bio_src; 1363 unsigned int off = *offset; 1364 struct bio *chain = NULL; 1365 struct bio **end; 1366 1367 /* Build up a chain of clone bios up to the limit */ 1368 1369 if (!bi || off >= bi->bi_iter.bi_size || !len) 1370 return NULL; /* Nothing to clone */ 1371 1372 end = &chain; 1373 while (len) { 1374 unsigned int bi_size; 1375 struct bio *bio; 1376 1377 if (!bi) { 1378 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1379 goto out_err; /* EINVAL; ran out of bio's */ 1380 } 1381 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len); 1382 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1383 if (!bio) 1384 goto out_err; /* ENOMEM */ 1385 1386 *end = bio; 1387 end = &bio->bi_next; 1388 1389 off += bi_size; 1390 if (off == bi->bi_iter.bi_size) { 1391 bi = bi->bi_next; 1392 off = 0; 1393 } 1394 len -= bi_size; 1395 } 1396 *bio_src = bi; 1397 *offset = off; 1398 1399 return chain; 1400 out_err: 1401 bio_chain_put(chain); 1402 1403 return NULL; 1404 } 1405 1406 /* 1407 * The default/initial value for all object request flags is 0. For 1408 * each flag, once its value is set to 1 it is never reset to 0 1409 * again. 1410 */ 1411 static void obj_request_img_data_set(struct rbd_obj_request *obj_request) 1412 { 1413 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { 1414 struct rbd_device *rbd_dev; 1415 1416 rbd_dev = obj_request->img_request->rbd_dev; 1417 rbd_warn(rbd_dev, "obj_request %p already marked img_data", 1418 obj_request); 1419 } 1420 } 1421 1422 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) 1423 { 1424 smp_mb(); 1425 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; 1426 } 1427 1428 static void obj_request_done_set(struct rbd_obj_request *obj_request) 1429 { 1430 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { 1431 struct rbd_device *rbd_dev = NULL; 1432 1433 if (obj_request_img_data_test(obj_request)) 1434 rbd_dev = obj_request->img_request->rbd_dev; 1435 rbd_warn(rbd_dev, "obj_request %p already marked done", 1436 obj_request); 1437 } 1438 } 1439 1440 static bool obj_request_done_test(struct rbd_obj_request *obj_request) 1441 { 1442 smp_mb(); 1443 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; 1444 } 1445 1446 /* 1447 * This sets the KNOWN flag after (possibly) setting the EXISTS 1448 * flag. The latter is set based on the "exists" value provided. 1449 * 1450 * Note that for our purposes once an object exists it never goes 1451 * away again. It's possible that the response from two existence 1452 * checks are separated by the creation of the target object, and 1453 * the first ("doesn't exist") response arrives *after* the second 1454 * ("does exist"). In that case we ignore the second one. 1455 */ 1456 static void obj_request_existence_set(struct rbd_obj_request *obj_request, 1457 bool exists) 1458 { 1459 if (exists) 1460 set_bit(OBJ_REQ_EXISTS, &obj_request->flags); 1461 set_bit(OBJ_REQ_KNOWN, &obj_request->flags); 1462 smp_mb(); 1463 } 1464 1465 static bool obj_request_known_test(struct rbd_obj_request *obj_request) 1466 { 1467 smp_mb(); 1468 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0; 1469 } 1470 1471 static bool obj_request_exists_test(struct rbd_obj_request *obj_request) 1472 { 1473 smp_mb(); 1474 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; 1475 } 1476 1477 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request) 1478 { 1479 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 1480 1481 return obj_request->img_offset < 1482 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header)); 1483 } 1484 1485 static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1486 { 1487 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1488 kref_read(&obj_request->kref)); 1489 kref_get(&obj_request->kref); 1490 } 1491 1492 static void rbd_obj_request_destroy(struct kref *kref); 1493 static void rbd_obj_request_put(struct rbd_obj_request *obj_request) 1494 { 1495 rbd_assert(obj_request != NULL); 1496 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1497 kref_read(&obj_request->kref)); 1498 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1499 } 1500 1501 static void rbd_img_request_get(struct rbd_img_request *img_request) 1502 { 1503 dout("%s: img %p (was %d)\n", __func__, img_request, 1504 kref_read(&img_request->kref)); 1505 kref_get(&img_request->kref); 1506 } 1507 1508 static bool img_request_child_test(struct rbd_img_request *img_request); 1509 static void rbd_parent_request_destroy(struct kref *kref); 1510 static void rbd_img_request_destroy(struct kref *kref); 1511 static void rbd_img_request_put(struct rbd_img_request *img_request) 1512 { 1513 rbd_assert(img_request != NULL); 1514 dout("%s: img %p (was %d)\n", __func__, img_request, 1515 kref_read(&img_request->kref)); 1516 if (img_request_child_test(img_request)) 1517 kref_put(&img_request->kref, rbd_parent_request_destroy); 1518 else 1519 kref_put(&img_request->kref, rbd_img_request_destroy); 1520 } 1521 1522 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1523 struct rbd_obj_request *obj_request) 1524 { 1525 rbd_assert(obj_request->img_request == NULL); 1526 1527 /* Image request now owns object's original reference */ 1528 obj_request->img_request = img_request; 1529 obj_request->which = img_request->obj_request_count; 1530 rbd_assert(!obj_request_img_data_test(obj_request)); 1531 obj_request_img_data_set(obj_request); 1532 rbd_assert(obj_request->which != BAD_WHICH); 1533 img_request->obj_request_count++; 1534 list_add_tail(&obj_request->links, &img_request->obj_requests); 1535 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request, 1536 obj_request->which); 1537 } 1538 1539 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, 1540 struct rbd_obj_request *obj_request) 1541 { 1542 rbd_assert(obj_request->which != BAD_WHICH); 1543 1544 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request, 1545 obj_request->which); 1546 list_del(&obj_request->links); 1547 rbd_assert(img_request->obj_request_count > 0); 1548 img_request->obj_request_count--; 1549 rbd_assert(obj_request->which == img_request->obj_request_count); 1550 obj_request->which = BAD_WHICH; 1551 rbd_assert(obj_request_img_data_test(obj_request)); 1552 rbd_assert(obj_request->img_request == img_request); 1553 obj_request->img_request = NULL; 1554 obj_request->callback = NULL; 1555 rbd_obj_request_put(obj_request); 1556 } 1557 1558 static bool obj_request_type_valid(enum obj_request_type type) 1559 { 1560 switch (type) { 1561 case OBJ_REQUEST_NODATA: 1562 case OBJ_REQUEST_BIO: 1563 case OBJ_REQUEST_PAGES: 1564 return true; 1565 default: 1566 return false; 1567 } 1568 } 1569 1570 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request); 1571 1572 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) 1573 { 1574 struct ceph_osd_request *osd_req = obj_request->osd_req; 1575 1576 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__, 1577 obj_request, obj_request->object_no, obj_request->offset, 1578 obj_request->length, osd_req); 1579 if (obj_request_img_data_test(obj_request)) { 1580 WARN_ON(obj_request->callback != rbd_img_obj_callback); 1581 rbd_img_request_get(obj_request->img_request); 1582 } 1583 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false); 1584 } 1585 1586 static void rbd_img_request_complete(struct rbd_img_request *img_request) 1587 { 1588 1589 dout("%s: img %p\n", __func__, img_request); 1590 1591 /* 1592 * If no error occurred, compute the aggregate transfer 1593 * count for the image request. We could instead use 1594 * atomic64_cmpxchg() to update it as each object request 1595 * completes; not clear which way is better off hand. 1596 */ 1597 if (!img_request->result) { 1598 struct rbd_obj_request *obj_request; 1599 u64 xferred = 0; 1600 1601 for_each_obj_request(img_request, obj_request) 1602 xferred += obj_request->xferred; 1603 img_request->xferred = xferred; 1604 } 1605 1606 if (img_request->callback) 1607 img_request->callback(img_request); 1608 else 1609 rbd_img_request_put(img_request); 1610 } 1611 1612 /* 1613 * The default/initial value for all image request flags is 0. Each 1614 * is conditionally set to 1 at image request initialization time 1615 * and currently never change thereafter. 1616 */ 1617 static void img_request_write_set(struct rbd_img_request *img_request) 1618 { 1619 set_bit(IMG_REQ_WRITE, &img_request->flags); 1620 smp_mb(); 1621 } 1622 1623 static bool img_request_write_test(struct rbd_img_request *img_request) 1624 { 1625 smp_mb(); 1626 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0; 1627 } 1628 1629 /* 1630 * Set the discard flag when the img_request is an discard request 1631 */ 1632 static void img_request_discard_set(struct rbd_img_request *img_request) 1633 { 1634 set_bit(IMG_REQ_DISCARD, &img_request->flags); 1635 smp_mb(); 1636 } 1637 1638 static bool img_request_discard_test(struct rbd_img_request *img_request) 1639 { 1640 smp_mb(); 1641 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0; 1642 } 1643 1644 static void img_request_child_set(struct rbd_img_request *img_request) 1645 { 1646 set_bit(IMG_REQ_CHILD, &img_request->flags); 1647 smp_mb(); 1648 } 1649 1650 static void img_request_child_clear(struct rbd_img_request *img_request) 1651 { 1652 clear_bit(IMG_REQ_CHILD, &img_request->flags); 1653 smp_mb(); 1654 } 1655 1656 static bool img_request_child_test(struct rbd_img_request *img_request) 1657 { 1658 smp_mb(); 1659 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0; 1660 } 1661 1662 static void img_request_layered_set(struct rbd_img_request *img_request) 1663 { 1664 set_bit(IMG_REQ_LAYERED, &img_request->flags); 1665 smp_mb(); 1666 } 1667 1668 static void img_request_layered_clear(struct rbd_img_request *img_request) 1669 { 1670 clear_bit(IMG_REQ_LAYERED, &img_request->flags); 1671 smp_mb(); 1672 } 1673 1674 static bool img_request_layered_test(struct rbd_img_request *img_request) 1675 { 1676 smp_mb(); 1677 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; 1678 } 1679 1680 static enum obj_operation_type 1681 rbd_img_request_op_type(struct rbd_img_request *img_request) 1682 { 1683 if (img_request_write_test(img_request)) 1684 return OBJ_OP_WRITE; 1685 else if (img_request_discard_test(img_request)) 1686 return OBJ_OP_DISCARD; 1687 else 1688 return OBJ_OP_READ; 1689 } 1690 1691 static void 1692 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) 1693 { 1694 u64 xferred = obj_request->xferred; 1695 u64 length = obj_request->length; 1696 1697 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1698 obj_request, obj_request->img_request, obj_request->result, 1699 xferred, length); 1700 /* 1701 * ENOENT means a hole in the image. We zero-fill the entire 1702 * length of the request. A short read also implies zero-fill 1703 * to the end of the request. An error requires the whole 1704 * length of the request to be reported finished with an error 1705 * to the block layer. In each case we update the xferred 1706 * count to indicate the whole request was satisfied. 1707 */ 1708 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); 1709 if (obj_request->result == -ENOENT) { 1710 if (obj_request->type == OBJ_REQUEST_BIO) 1711 zero_bio_chain(obj_request->bio_list, 0); 1712 else 1713 zero_pages(obj_request->pages, 0, length); 1714 obj_request->result = 0; 1715 } else if (xferred < length && !obj_request->result) { 1716 if (obj_request->type == OBJ_REQUEST_BIO) 1717 zero_bio_chain(obj_request->bio_list, xferred); 1718 else 1719 zero_pages(obj_request->pages, xferred, length); 1720 } 1721 obj_request->xferred = length; 1722 obj_request_done_set(obj_request); 1723 } 1724 1725 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) 1726 { 1727 dout("%s: obj %p cb %p\n", __func__, obj_request, 1728 obj_request->callback); 1729 obj_request->callback(obj_request); 1730 } 1731 1732 static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err) 1733 { 1734 obj_request->result = err; 1735 obj_request->xferred = 0; 1736 /* 1737 * kludge - mirror rbd_obj_request_submit() to match a put in 1738 * rbd_img_obj_callback() 1739 */ 1740 if (obj_request_img_data_test(obj_request)) { 1741 WARN_ON(obj_request->callback != rbd_img_obj_callback); 1742 rbd_img_request_get(obj_request->img_request); 1743 } 1744 obj_request_done_set(obj_request); 1745 rbd_obj_request_complete(obj_request); 1746 } 1747 1748 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) 1749 { 1750 struct rbd_img_request *img_request = NULL; 1751 struct rbd_device *rbd_dev = NULL; 1752 bool layered = false; 1753 1754 if (obj_request_img_data_test(obj_request)) { 1755 img_request = obj_request->img_request; 1756 layered = img_request && img_request_layered_test(img_request); 1757 rbd_dev = img_request->rbd_dev; 1758 } 1759 1760 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1761 obj_request, img_request, obj_request->result, 1762 obj_request->xferred, obj_request->length); 1763 if (layered && obj_request->result == -ENOENT && 1764 obj_request->img_offset < rbd_dev->parent_overlap) 1765 rbd_img_parent_read(obj_request); 1766 else if (img_request) 1767 rbd_img_obj_request_read_callback(obj_request); 1768 else 1769 obj_request_done_set(obj_request); 1770 } 1771 1772 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) 1773 { 1774 dout("%s: obj %p result %d %llu\n", __func__, obj_request, 1775 obj_request->result, obj_request->length); 1776 /* 1777 * There is no such thing as a successful short write. Set 1778 * it to our originally-requested length. 1779 */ 1780 obj_request->xferred = obj_request->length; 1781 obj_request_done_set(obj_request); 1782 } 1783 1784 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request) 1785 { 1786 dout("%s: obj %p result %d %llu\n", __func__, obj_request, 1787 obj_request->result, obj_request->length); 1788 /* 1789 * There is no such thing as a successful short discard. Set 1790 * it to our originally-requested length. 1791 */ 1792 obj_request->xferred = obj_request->length; 1793 /* discarding a non-existent object is not a problem */ 1794 if (obj_request->result == -ENOENT) 1795 obj_request->result = 0; 1796 obj_request_done_set(obj_request); 1797 } 1798 1799 /* 1800 * For a simple stat call there's nothing to do. We'll do more if 1801 * this is part of a write sequence for a layered image. 1802 */ 1803 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) 1804 { 1805 dout("%s: obj %p\n", __func__, obj_request); 1806 obj_request_done_set(obj_request); 1807 } 1808 1809 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) 1810 { 1811 dout("%s: obj %p\n", __func__, obj_request); 1812 1813 if (obj_request_img_data_test(obj_request)) 1814 rbd_osd_copyup_callback(obj_request); 1815 else 1816 obj_request_done_set(obj_request); 1817 } 1818 1819 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) 1820 { 1821 struct rbd_obj_request *obj_request = osd_req->r_priv; 1822 u16 opcode; 1823 1824 dout("%s: osd_req %p\n", __func__, osd_req); 1825 rbd_assert(osd_req == obj_request->osd_req); 1826 if (obj_request_img_data_test(obj_request)) { 1827 rbd_assert(obj_request->img_request); 1828 rbd_assert(obj_request->which != BAD_WHICH); 1829 } else { 1830 rbd_assert(obj_request->which == BAD_WHICH); 1831 } 1832 1833 if (osd_req->r_result < 0) 1834 obj_request->result = osd_req->r_result; 1835 1836 /* 1837 * We support a 64-bit length, but ultimately it has to be 1838 * passed to the block layer, which just supports a 32-bit 1839 * length field. 1840 */ 1841 obj_request->xferred = osd_req->r_ops[0].outdata_len; 1842 rbd_assert(obj_request->xferred < (u64)UINT_MAX); 1843 1844 opcode = osd_req->r_ops[0].op; 1845 switch (opcode) { 1846 case CEPH_OSD_OP_READ: 1847 rbd_osd_read_callback(obj_request); 1848 break; 1849 case CEPH_OSD_OP_SETALLOCHINT: 1850 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || 1851 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); 1852 /* fall through */ 1853 case CEPH_OSD_OP_WRITE: 1854 case CEPH_OSD_OP_WRITEFULL: 1855 rbd_osd_write_callback(obj_request); 1856 break; 1857 case CEPH_OSD_OP_STAT: 1858 rbd_osd_stat_callback(obj_request); 1859 break; 1860 case CEPH_OSD_OP_DELETE: 1861 case CEPH_OSD_OP_TRUNCATE: 1862 case CEPH_OSD_OP_ZERO: 1863 rbd_osd_discard_callback(obj_request); 1864 break; 1865 case CEPH_OSD_OP_CALL: 1866 rbd_osd_call_callback(obj_request); 1867 break; 1868 default: 1869 rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d", 1870 obj_request->object_no, opcode); 1871 break; 1872 } 1873 1874 if (obj_request_done_test(obj_request)) 1875 rbd_obj_request_complete(obj_request); 1876 } 1877 1878 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) 1879 { 1880 struct ceph_osd_request *osd_req = obj_request->osd_req; 1881 1882 rbd_assert(obj_request_img_data_test(obj_request)); 1883 osd_req->r_snapid = obj_request->img_request->snap_id; 1884 } 1885 1886 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) 1887 { 1888 struct ceph_osd_request *osd_req = obj_request->osd_req; 1889 1890 ktime_get_real_ts(&osd_req->r_mtime); 1891 osd_req->r_data_offset = obj_request->offset; 1892 } 1893 1894 static struct ceph_osd_request * 1895 __rbd_osd_req_create(struct rbd_device *rbd_dev, 1896 struct ceph_snap_context *snapc, 1897 int num_ops, unsigned int flags, 1898 struct rbd_obj_request *obj_request) 1899 { 1900 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1901 struct ceph_osd_request *req; 1902 const char *name_format = rbd_dev->image_format == 1 ? 1903 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; 1904 1905 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); 1906 if (!req) 1907 return NULL; 1908 1909 req->r_flags = flags; 1910 req->r_callback = rbd_osd_req_callback; 1911 req->r_priv = obj_request; 1912 1913 req->r_base_oloc.pool = rbd_dev->layout.pool_id; 1914 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, 1915 rbd_dev->header.object_prefix, obj_request->object_no)) 1916 goto err_req; 1917 1918 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) 1919 goto err_req; 1920 1921 return req; 1922 1923 err_req: 1924 ceph_osdc_put_request(req); 1925 return NULL; 1926 } 1927 1928 /* 1929 * Create an osd request. A read request has one osd op (read). 1930 * A write request has either one (watch) or two (hint+write) osd ops. 1931 * (All rbd data writes are prefixed with an allocation hint op, but 1932 * technically osd watch is a write request, hence this distinction.) 1933 */ 1934 static struct ceph_osd_request *rbd_osd_req_create( 1935 struct rbd_device *rbd_dev, 1936 enum obj_operation_type op_type, 1937 unsigned int num_ops, 1938 struct rbd_obj_request *obj_request) 1939 { 1940 struct ceph_snap_context *snapc = NULL; 1941 1942 if (obj_request_img_data_test(obj_request) && 1943 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) { 1944 struct rbd_img_request *img_request = obj_request->img_request; 1945 if (op_type == OBJ_OP_WRITE) { 1946 rbd_assert(img_request_write_test(img_request)); 1947 } else { 1948 rbd_assert(img_request_discard_test(img_request)); 1949 } 1950 snapc = img_request->snapc; 1951 } 1952 1953 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2)); 1954 1955 return __rbd_osd_req_create(rbd_dev, snapc, num_ops, 1956 (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ? 1957 CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request); 1958 } 1959 1960 /* 1961 * Create a copyup osd request based on the information in the object 1962 * request supplied. A copyup request has two or three osd ops, a 1963 * copyup method call, potentially a hint op, and a write or truncate 1964 * or zero op. 1965 */ 1966 static struct ceph_osd_request * 1967 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) 1968 { 1969 struct rbd_img_request *img_request; 1970 int num_osd_ops = 3; 1971 1972 rbd_assert(obj_request_img_data_test(obj_request)); 1973 img_request = obj_request->img_request; 1974 rbd_assert(img_request); 1975 rbd_assert(img_request_write_test(img_request) || 1976 img_request_discard_test(img_request)); 1977 1978 if (img_request_discard_test(img_request)) 1979 num_osd_ops = 2; 1980 1981 return __rbd_osd_req_create(img_request->rbd_dev, 1982 img_request->snapc, num_osd_ops, 1983 CEPH_OSD_FLAG_WRITE, obj_request); 1984 } 1985 1986 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) 1987 { 1988 ceph_osdc_put_request(osd_req); 1989 } 1990 1991 static struct rbd_obj_request * 1992 rbd_obj_request_create(enum obj_request_type type) 1993 { 1994 struct rbd_obj_request *obj_request; 1995 1996 rbd_assert(obj_request_type_valid(type)); 1997 1998 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); 1999 if (!obj_request) 2000 return NULL; 2001 2002 obj_request->which = BAD_WHICH; 2003 obj_request->type = type; 2004 INIT_LIST_HEAD(&obj_request->links); 2005 kref_init(&obj_request->kref); 2006 2007 dout("%s %p\n", __func__, obj_request); 2008 return obj_request; 2009 } 2010 2011 static void rbd_obj_request_destroy(struct kref *kref) 2012 { 2013 struct rbd_obj_request *obj_request; 2014 2015 obj_request = container_of(kref, struct rbd_obj_request, kref); 2016 2017 dout("%s: obj %p\n", __func__, obj_request); 2018 2019 rbd_assert(obj_request->img_request == NULL); 2020 rbd_assert(obj_request->which == BAD_WHICH); 2021 2022 if (obj_request->osd_req) 2023 rbd_osd_req_destroy(obj_request->osd_req); 2024 2025 rbd_assert(obj_request_type_valid(obj_request->type)); 2026 switch (obj_request->type) { 2027 case OBJ_REQUEST_NODATA: 2028 break; /* Nothing to do */ 2029 case OBJ_REQUEST_BIO: 2030 if (obj_request->bio_list) 2031 bio_chain_put(obj_request->bio_list); 2032 break; 2033 case OBJ_REQUEST_PAGES: 2034 /* img_data requests don't own their page array */ 2035 if (obj_request->pages && 2036 !obj_request_img_data_test(obj_request)) 2037 ceph_release_page_vector(obj_request->pages, 2038 obj_request->page_count); 2039 break; 2040 } 2041 2042 kmem_cache_free(rbd_obj_request_cache, obj_request); 2043 } 2044 2045 /* It's OK to call this for a device with no parent */ 2046 2047 static void rbd_spec_put(struct rbd_spec *spec); 2048 static void rbd_dev_unparent(struct rbd_device *rbd_dev) 2049 { 2050 rbd_dev_remove_parent(rbd_dev); 2051 rbd_spec_put(rbd_dev->parent_spec); 2052 rbd_dev->parent_spec = NULL; 2053 rbd_dev->parent_overlap = 0; 2054 } 2055 2056 /* 2057 * Parent image reference counting is used to determine when an 2058 * image's parent fields can be safely torn down--after there are no 2059 * more in-flight requests to the parent image. When the last 2060 * reference is dropped, cleaning them up is safe. 2061 */ 2062 static void rbd_dev_parent_put(struct rbd_device *rbd_dev) 2063 { 2064 int counter; 2065 2066 if (!rbd_dev->parent_spec) 2067 return; 2068 2069 counter = atomic_dec_return_safe(&rbd_dev->parent_ref); 2070 if (counter > 0) 2071 return; 2072 2073 /* Last reference; clean up parent data structures */ 2074 2075 if (!counter) 2076 rbd_dev_unparent(rbd_dev); 2077 else 2078 rbd_warn(rbd_dev, "parent reference underflow"); 2079 } 2080 2081 /* 2082 * If an image has a non-zero parent overlap, get a reference to its 2083 * parent. 2084 * 2085 * Returns true if the rbd device has a parent with a non-zero 2086 * overlap and a reference for it was successfully taken, or 2087 * false otherwise. 2088 */ 2089 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 2090 { 2091 int counter = 0; 2092 2093 if (!rbd_dev->parent_spec) 2094 return false; 2095 2096 down_read(&rbd_dev->header_rwsem); 2097 if (rbd_dev->parent_overlap) 2098 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 2099 up_read(&rbd_dev->header_rwsem); 2100 2101 if (counter < 0) 2102 rbd_warn(rbd_dev, "parent reference overflow"); 2103 2104 return counter > 0; 2105 } 2106 2107 /* 2108 * Caller is responsible for filling in the list of object requests 2109 * that comprises the image request, and the Linux request pointer 2110 * (if there is one). 2111 */ 2112 static struct rbd_img_request *rbd_img_request_create( 2113 struct rbd_device *rbd_dev, 2114 u64 offset, u64 length, 2115 enum obj_operation_type op_type, 2116 struct ceph_snap_context *snapc) 2117 { 2118 struct rbd_img_request *img_request; 2119 2120 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO); 2121 if (!img_request) 2122 return NULL; 2123 2124 img_request->rbd_dev = rbd_dev; 2125 img_request->offset = offset; 2126 img_request->length = length; 2127 if (op_type == OBJ_OP_DISCARD) { 2128 img_request_discard_set(img_request); 2129 img_request->snapc = snapc; 2130 } else if (op_type == OBJ_OP_WRITE) { 2131 img_request_write_set(img_request); 2132 img_request->snapc = snapc; 2133 } else { 2134 img_request->snap_id = rbd_dev->spec->snap_id; 2135 } 2136 if (rbd_dev_parent_get(rbd_dev)) 2137 img_request_layered_set(img_request); 2138 2139 spin_lock_init(&img_request->completion_lock); 2140 INIT_LIST_HEAD(&img_request->obj_requests); 2141 kref_init(&img_request->kref); 2142 2143 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, 2144 obj_op_name(op_type), offset, length, img_request); 2145 2146 return img_request; 2147 } 2148 2149 static void rbd_img_request_destroy(struct kref *kref) 2150 { 2151 struct rbd_img_request *img_request; 2152 struct rbd_obj_request *obj_request; 2153 struct rbd_obj_request *next_obj_request; 2154 2155 img_request = container_of(kref, struct rbd_img_request, kref); 2156 2157 dout("%s: img %p\n", __func__, img_request); 2158 2159 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 2160 rbd_img_obj_request_del(img_request, obj_request); 2161 rbd_assert(img_request->obj_request_count == 0); 2162 2163 if (img_request_layered_test(img_request)) { 2164 img_request_layered_clear(img_request); 2165 rbd_dev_parent_put(img_request->rbd_dev); 2166 } 2167 2168 if (img_request_write_test(img_request) || 2169 img_request_discard_test(img_request)) 2170 ceph_put_snap_context(img_request->snapc); 2171 2172 kmem_cache_free(rbd_img_request_cache, img_request); 2173 } 2174 2175 static struct rbd_img_request *rbd_parent_request_create( 2176 struct rbd_obj_request *obj_request, 2177 u64 img_offset, u64 length) 2178 { 2179 struct rbd_img_request *parent_request; 2180 struct rbd_device *rbd_dev; 2181 2182 rbd_assert(obj_request->img_request); 2183 rbd_dev = obj_request->img_request->rbd_dev; 2184 2185 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset, 2186 length, OBJ_OP_READ, NULL); 2187 if (!parent_request) 2188 return NULL; 2189 2190 img_request_child_set(parent_request); 2191 rbd_obj_request_get(obj_request); 2192 parent_request->obj_request = obj_request; 2193 2194 return parent_request; 2195 } 2196 2197 static void rbd_parent_request_destroy(struct kref *kref) 2198 { 2199 struct rbd_img_request *parent_request; 2200 struct rbd_obj_request *orig_request; 2201 2202 parent_request = container_of(kref, struct rbd_img_request, kref); 2203 orig_request = parent_request->obj_request; 2204 2205 parent_request->obj_request = NULL; 2206 rbd_obj_request_put(orig_request); 2207 img_request_child_clear(parent_request); 2208 2209 rbd_img_request_destroy(kref); 2210 } 2211 2212 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 2213 { 2214 struct rbd_img_request *img_request; 2215 unsigned int xferred; 2216 int result; 2217 bool more; 2218 2219 rbd_assert(obj_request_img_data_test(obj_request)); 2220 img_request = obj_request->img_request; 2221 2222 rbd_assert(obj_request->xferred <= (u64)UINT_MAX); 2223 xferred = (unsigned int)obj_request->xferred; 2224 result = obj_request->result; 2225 if (result) { 2226 struct rbd_device *rbd_dev = img_request->rbd_dev; 2227 enum obj_operation_type op_type; 2228 2229 if (img_request_discard_test(img_request)) 2230 op_type = OBJ_OP_DISCARD; 2231 else if (img_request_write_test(img_request)) 2232 op_type = OBJ_OP_WRITE; 2233 else 2234 op_type = OBJ_OP_READ; 2235 2236 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)", 2237 obj_op_name(op_type), obj_request->length, 2238 obj_request->img_offset, obj_request->offset); 2239 rbd_warn(rbd_dev, " result %d xferred %x", 2240 result, xferred); 2241 if (!img_request->result) 2242 img_request->result = result; 2243 /* 2244 * Need to end I/O on the entire obj_request worth of 2245 * bytes in case of error. 2246 */ 2247 xferred = obj_request->length; 2248 } 2249 2250 if (img_request_child_test(img_request)) { 2251 rbd_assert(img_request->obj_request != NULL); 2252 more = obj_request->which < img_request->obj_request_count - 1; 2253 } else { 2254 blk_status_t status = errno_to_blk_status(result); 2255 2256 rbd_assert(img_request->rq != NULL); 2257 2258 more = blk_update_request(img_request->rq, status, xferred); 2259 if (!more) 2260 __blk_mq_end_request(img_request->rq, status); 2261 } 2262 2263 return more; 2264 } 2265 2266 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) 2267 { 2268 struct rbd_img_request *img_request; 2269 u32 which = obj_request->which; 2270 bool more = true; 2271 2272 rbd_assert(obj_request_img_data_test(obj_request)); 2273 img_request = obj_request->img_request; 2274 2275 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 2276 rbd_assert(img_request != NULL); 2277 rbd_assert(img_request->obj_request_count > 0); 2278 rbd_assert(which != BAD_WHICH); 2279 rbd_assert(which < img_request->obj_request_count); 2280 2281 spin_lock_irq(&img_request->completion_lock); 2282 if (which != img_request->next_completion) 2283 goto out; 2284 2285 for_each_obj_request_from(img_request, obj_request) { 2286 rbd_assert(more); 2287 rbd_assert(which < img_request->obj_request_count); 2288 2289 if (!obj_request_done_test(obj_request)) 2290 break; 2291 more = rbd_img_obj_end_request(obj_request); 2292 which++; 2293 } 2294 2295 rbd_assert(more ^ (which == img_request->obj_request_count)); 2296 img_request->next_completion = which; 2297 out: 2298 spin_unlock_irq(&img_request->completion_lock); 2299 rbd_img_request_put(img_request); 2300 2301 if (!more) 2302 rbd_img_request_complete(img_request); 2303 } 2304 2305 /* 2306 * Add individual osd ops to the given ceph_osd_request and prepare 2307 * them for submission. num_ops is the current number of 2308 * osd operations already to the object request. 2309 */ 2310 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, 2311 struct ceph_osd_request *osd_request, 2312 enum obj_operation_type op_type, 2313 unsigned int num_ops) 2314 { 2315 struct rbd_img_request *img_request = obj_request->img_request; 2316 struct rbd_device *rbd_dev = img_request->rbd_dev; 2317 u64 object_size = rbd_obj_bytes(&rbd_dev->header); 2318 u64 offset = obj_request->offset; 2319 u64 length = obj_request->length; 2320 u64 img_end; 2321 u16 opcode; 2322 2323 if (op_type == OBJ_OP_DISCARD) { 2324 if (!offset && length == object_size && 2325 (!img_request_layered_test(img_request) || 2326 !obj_request_overlaps_parent(obj_request))) { 2327 opcode = CEPH_OSD_OP_DELETE; 2328 } else if ((offset + length == object_size)) { 2329 opcode = CEPH_OSD_OP_TRUNCATE; 2330 } else { 2331 down_read(&rbd_dev->header_rwsem); 2332 img_end = rbd_dev->header.image_size; 2333 up_read(&rbd_dev->header_rwsem); 2334 2335 if (obj_request->img_offset + length == img_end) 2336 opcode = CEPH_OSD_OP_TRUNCATE; 2337 else 2338 opcode = CEPH_OSD_OP_ZERO; 2339 } 2340 } else if (op_type == OBJ_OP_WRITE) { 2341 if (!offset && length == object_size) 2342 opcode = CEPH_OSD_OP_WRITEFULL; 2343 else 2344 opcode = CEPH_OSD_OP_WRITE; 2345 osd_req_op_alloc_hint_init(osd_request, num_ops, 2346 object_size, object_size); 2347 num_ops++; 2348 } else { 2349 opcode = CEPH_OSD_OP_READ; 2350 } 2351 2352 if (opcode == CEPH_OSD_OP_DELETE) 2353 osd_req_op_init(osd_request, num_ops, opcode, 0); 2354 else 2355 osd_req_op_extent_init(osd_request, num_ops, opcode, 2356 offset, length, 0, 0); 2357 2358 if (obj_request->type == OBJ_REQUEST_BIO) 2359 osd_req_op_extent_osd_data_bio(osd_request, num_ops, 2360 obj_request->bio_list, length); 2361 else if (obj_request->type == OBJ_REQUEST_PAGES) 2362 osd_req_op_extent_osd_data_pages(osd_request, num_ops, 2363 obj_request->pages, length, 2364 offset & ~PAGE_MASK, false, false); 2365 2366 /* Discards are also writes */ 2367 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) 2368 rbd_osd_req_format_write(obj_request); 2369 else 2370 rbd_osd_req_format_read(obj_request); 2371 } 2372 2373 /* 2374 * Split up an image request into one or more object requests, each 2375 * to a different object. The "type" parameter indicates whether 2376 * "data_desc" is the pointer to the head of a list of bio 2377 * structures, or the base of a page array. In either case this 2378 * function assumes data_desc describes memory sufficient to hold 2379 * all data described by the image request. 2380 */ 2381 static int rbd_img_request_fill(struct rbd_img_request *img_request, 2382 enum obj_request_type type, 2383 void *data_desc) 2384 { 2385 struct rbd_device *rbd_dev = img_request->rbd_dev; 2386 struct rbd_obj_request *obj_request = NULL; 2387 struct rbd_obj_request *next_obj_request; 2388 struct bio *bio_list = NULL; 2389 unsigned int bio_offset = 0; 2390 struct page **pages = NULL; 2391 enum obj_operation_type op_type; 2392 u64 img_offset; 2393 u64 resid; 2394 2395 dout("%s: img %p type %d data_desc %p\n", __func__, img_request, 2396 (int)type, data_desc); 2397 2398 img_offset = img_request->offset; 2399 resid = img_request->length; 2400 rbd_assert(resid > 0); 2401 op_type = rbd_img_request_op_type(img_request); 2402 2403 if (type == OBJ_REQUEST_BIO) { 2404 bio_list = data_desc; 2405 rbd_assert(img_offset == 2406 bio_list->bi_iter.bi_sector << SECTOR_SHIFT); 2407 } else if (type == OBJ_REQUEST_PAGES) { 2408 pages = data_desc; 2409 } 2410 2411 while (resid) { 2412 struct ceph_osd_request *osd_req; 2413 u64 object_no = img_offset >> rbd_dev->header.obj_order; 2414 u64 offset = rbd_segment_offset(rbd_dev, img_offset); 2415 u64 length = rbd_segment_length(rbd_dev, img_offset, resid); 2416 2417 obj_request = rbd_obj_request_create(type); 2418 if (!obj_request) 2419 goto out_unwind; 2420 2421 obj_request->object_no = object_no; 2422 obj_request->offset = offset; 2423 obj_request->length = length; 2424 2425 /* 2426 * set obj_request->img_request before creating the 2427 * osd_request so that it gets the right snapc 2428 */ 2429 rbd_img_obj_request_add(img_request, obj_request); 2430 2431 if (type == OBJ_REQUEST_BIO) { 2432 unsigned int clone_size; 2433 2434 rbd_assert(length <= (u64)UINT_MAX); 2435 clone_size = (unsigned int)length; 2436 obj_request->bio_list = 2437 bio_chain_clone_range(&bio_list, 2438 &bio_offset, 2439 clone_size, 2440 GFP_NOIO); 2441 if (!obj_request->bio_list) 2442 goto out_unwind; 2443 } else if (type == OBJ_REQUEST_PAGES) { 2444 unsigned int page_count; 2445 2446 obj_request->pages = pages; 2447 page_count = (u32)calc_pages_for(offset, length); 2448 obj_request->page_count = page_count; 2449 if ((offset + length) & ~PAGE_MASK) 2450 page_count--; /* more on last page */ 2451 pages += page_count; 2452 } 2453 2454 osd_req = rbd_osd_req_create(rbd_dev, op_type, 2455 (op_type == OBJ_OP_WRITE) ? 2 : 1, 2456 obj_request); 2457 if (!osd_req) 2458 goto out_unwind; 2459 2460 obj_request->osd_req = osd_req; 2461 obj_request->callback = rbd_img_obj_callback; 2462 obj_request->img_offset = img_offset; 2463 2464 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0); 2465 2466 img_offset += length; 2467 resid -= length; 2468 } 2469 2470 return 0; 2471 2472 out_unwind: 2473 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 2474 rbd_img_obj_request_del(img_request, obj_request); 2475 2476 return -ENOMEM; 2477 } 2478 2479 static void 2480 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) 2481 { 2482 struct rbd_img_request *img_request; 2483 struct rbd_device *rbd_dev; 2484 struct page **pages; 2485 u32 page_count; 2486 2487 dout("%s: obj %p\n", __func__, obj_request); 2488 2489 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2490 obj_request->type == OBJ_REQUEST_NODATA); 2491 rbd_assert(obj_request_img_data_test(obj_request)); 2492 img_request = obj_request->img_request; 2493 rbd_assert(img_request); 2494 2495 rbd_dev = img_request->rbd_dev; 2496 rbd_assert(rbd_dev); 2497 2498 pages = obj_request->copyup_pages; 2499 rbd_assert(pages != NULL); 2500 obj_request->copyup_pages = NULL; 2501 page_count = obj_request->copyup_page_count; 2502 rbd_assert(page_count); 2503 obj_request->copyup_page_count = 0; 2504 ceph_release_page_vector(pages, page_count); 2505 2506 /* 2507 * We want the transfer count to reflect the size of the 2508 * original write request. There is no such thing as a 2509 * successful short write, so if the request was successful 2510 * we can just set it to the originally-requested length. 2511 */ 2512 if (!obj_request->result) 2513 obj_request->xferred = obj_request->length; 2514 2515 obj_request_done_set(obj_request); 2516 } 2517 2518 static void 2519 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) 2520 { 2521 struct rbd_obj_request *orig_request; 2522 struct ceph_osd_request *osd_req; 2523 struct rbd_device *rbd_dev; 2524 struct page **pages; 2525 enum obj_operation_type op_type; 2526 u32 page_count; 2527 int img_result; 2528 u64 parent_length; 2529 2530 rbd_assert(img_request_child_test(img_request)); 2531 2532 /* First get what we need from the image request */ 2533 2534 pages = img_request->copyup_pages; 2535 rbd_assert(pages != NULL); 2536 img_request->copyup_pages = NULL; 2537 page_count = img_request->copyup_page_count; 2538 rbd_assert(page_count); 2539 img_request->copyup_page_count = 0; 2540 2541 orig_request = img_request->obj_request; 2542 rbd_assert(orig_request != NULL); 2543 rbd_assert(obj_request_type_valid(orig_request->type)); 2544 img_result = img_request->result; 2545 parent_length = img_request->length; 2546 rbd_assert(img_result || parent_length == img_request->xferred); 2547 rbd_img_request_put(img_request); 2548 2549 rbd_assert(orig_request->img_request); 2550 rbd_dev = orig_request->img_request->rbd_dev; 2551 rbd_assert(rbd_dev); 2552 2553 /* 2554 * If the overlap has become 0 (most likely because the 2555 * image has been flattened) we need to free the pages 2556 * and re-submit the original write request. 2557 */ 2558 if (!rbd_dev->parent_overlap) { 2559 ceph_release_page_vector(pages, page_count); 2560 rbd_obj_request_submit(orig_request); 2561 return; 2562 } 2563 2564 if (img_result) 2565 goto out_err; 2566 2567 /* 2568 * The original osd request is of no use to use any more. 2569 * We need a new one that can hold the three ops in a copyup 2570 * request. Allocate the new copyup osd request for the 2571 * original request, and release the old one. 2572 */ 2573 img_result = -ENOMEM; 2574 osd_req = rbd_osd_req_create_copyup(orig_request); 2575 if (!osd_req) 2576 goto out_err; 2577 rbd_osd_req_destroy(orig_request->osd_req); 2578 orig_request->osd_req = osd_req; 2579 orig_request->copyup_pages = pages; 2580 orig_request->copyup_page_count = page_count; 2581 2582 /* Initialize the copyup op */ 2583 2584 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 2585 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0, 2586 false, false); 2587 2588 /* Add the other op(s) */ 2589 2590 op_type = rbd_img_request_op_type(orig_request->img_request); 2591 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1); 2592 2593 /* All set, send it off. */ 2594 2595 rbd_obj_request_submit(orig_request); 2596 return; 2597 2598 out_err: 2599 ceph_release_page_vector(pages, page_count); 2600 rbd_obj_request_error(orig_request, img_result); 2601 } 2602 2603 /* 2604 * Read from the parent image the range of data that covers the 2605 * entire target of the given object request. This is used for 2606 * satisfying a layered image write request when the target of an 2607 * object request from the image request does not exist. 2608 * 2609 * A page array big enough to hold the returned data is allocated 2610 * and supplied to rbd_img_request_fill() as the "data descriptor." 2611 * When the read completes, this page array will be transferred to 2612 * the original object request for the copyup operation. 2613 * 2614 * If an error occurs, it is recorded as the result of the original 2615 * object request in rbd_img_obj_exists_callback(). 2616 */ 2617 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) 2618 { 2619 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 2620 struct rbd_img_request *parent_request = NULL; 2621 u64 img_offset; 2622 u64 length; 2623 struct page **pages = NULL; 2624 u32 page_count; 2625 int result; 2626 2627 rbd_assert(rbd_dev->parent != NULL); 2628 2629 /* 2630 * Determine the byte range covered by the object in the 2631 * child image to which the original request was to be sent. 2632 */ 2633 img_offset = obj_request->img_offset - obj_request->offset; 2634 length = rbd_obj_bytes(&rbd_dev->header); 2635 2636 /* 2637 * There is no defined parent data beyond the parent 2638 * overlap, so limit what we read at that boundary if 2639 * necessary. 2640 */ 2641 if (img_offset + length > rbd_dev->parent_overlap) { 2642 rbd_assert(img_offset < rbd_dev->parent_overlap); 2643 length = rbd_dev->parent_overlap - img_offset; 2644 } 2645 2646 /* 2647 * Allocate a page array big enough to receive the data read 2648 * from the parent. 2649 */ 2650 page_count = (u32)calc_pages_for(0, length); 2651 pages = ceph_alloc_page_vector(page_count, GFP_NOIO); 2652 if (IS_ERR(pages)) { 2653 result = PTR_ERR(pages); 2654 pages = NULL; 2655 goto out_err; 2656 } 2657 2658 result = -ENOMEM; 2659 parent_request = rbd_parent_request_create(obj_request, 2660 img_offset, length); 2661 if (!parent_request) 2662 goto out_err; 2663 2664 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); 2665 if (result) 2666 goto out_err; 2667 2668 parent_request->copyup_pages = pages; 2669 parent_request->copyup_page_count = page_count; 2670 parent_request->callback = rbd_img_obj_parent_read_full_callback; 2671 2672 result = rbd_img_request_submit(parent_request); 2673 if (!result) 2674 return 0; 2675 2676 parent_request->copyup_pages = NULL; 2677 parent_request->copyup_page_count = 0; 2678 out_err: 2679 if (pages) 2680 ceph_release_page_vector(pages, page_count); 2681 if (parent_request) 2682 rbd_img_request_put(parent_request); 2683 return result; 2684 } 2685 2686 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 2687 { 2688 struct rbd_obj_request *orig_request; 2689 struct rbd_device *rbd_dev; 2690 int result; 2691 2692 rbd_assert(!obj_request_img_data_test(obj_request)); 2693 2694 /* 2695 * All we need from the object request is the original 2696 * request and the result of the STAT op. Grab those, then 2697 * we're done with the request. 2698 */ 2699 orig_request = obj_request->obj_request; 2700 obj_request->obj_request = NULL; 2701 rbd_obj_request_put(orig_request); 2702 rbd_assert(orig_request); 2703 rbd_assert(orig_request->img_request); 2704 2705 result = obj_request->result; 2706 obj_request->result = 0; 2707 2708 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__, 2709 obj_request, orig_request, result, 2710 obj_request->xferred, obj_request->length); 2711 rbd_obj_request_put(obj_request); 2712 2713 /* 2714 * If the overlap has become 0 (most likely because the 2715 * image has been flattened) we need to re-submit the 2716 * original request. 2717 */ 2718 rbd_dev = orig_request->img_request->rbd_dev; 2719 if (!rbd_dev->parent_overlap) { 2720 rbd_obj_request_submit(orig_request); 2721 return; 2722 } 2723 2724 /* 2725 * Our only purpose here is to determine whether the object 2726 * exists, and we don't want to treat the non-existence as 2727 * an error. If something else comes back, transfer the 2728 * error to the original request and complete it now. 2729 */ 2730 if (!result) { 2731 obj_request_existence_set(orig_request, true); 2732 } else if (result == -ENOENT) { 2733 obj_request_existence_set(orig_request, false); 2734 } else { 2735 goto fail_orig_request; 2736 } 2737 2738 /* 2739 * Resubmit the original request now that we have recorded 2740 * whether the target object exists. 2741 */ 2742 result = rbd_img_obj_request_submit(orig_request); 2743 if (result) 2744 goto fail_orig_request; 2745 2746 return; 2747 2748 fail_orig_request: 2749 rbd_obj_request_error(orig_request, result); 2750 } 2751 2752 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) 2753 { 2754 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 2755 struct rbd_obj_request *stat_request; 2756 struct page **pages; 2757 u32 page_count; 2758 size_t size; 2759 int ret; 2760 2761 stat_request = rbd_obj_request_create(OBJ_REQUEST_PAGES); 2762 if (!stat_request) 2763 return -ENOMEM; 2764 2765 stat_request->object_no = obj_request->object_no; 2766 2767 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, 2768 stat_request); 2769 if (!stat_request->osd_req) { 2770 ret = -ENOMEM; 2771 goto fail_stat_request; 2772 } 2773 2774 /* 2775 * The response data for a STAT call consists of: 2776 * le64 length; 2777 * struct { 2778 * le32 tv_sec; 2779 * le32 tv_nsec; 2780 * } mtime; 2781 */ 2782 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); 2783 page_count = (u32)calc_pages_for(0, size); 2784 pages = ceph_alloc_page_vector(page_count, GFP_NOIO); 2785 if (IS_ERR(pages)) { 2786 ret = PTR_ERR(pages); 2787 goto fail_stat_request; 2788 } 2789 2790 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0); 2791 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0, 2792 false, false); 2793 2794 rbd_obj_request_get(obj_request); 2795 stat_request->obj_request = obj_request; 2796 stat_request->pages = pages; 2797 stat_request->page_count = page_count; 2798 stat_request->callback = rbd_img_obj_exists_callback; 2799 2800 rbd_obj_request_submit(stat_request); 2801 return 0; 2802 2803 fail_stat_request: 2804 rbd_obj_request_put(stat_request); 2805 return ret; 2806 } 2807 2808 static bool img_obj_request_simple(struct rbd_obj_request *obj_request) 2809 { 2810 struct rbd_img_request *img_request = obj_request->img_request; 2811 struct rbd_device *rbd_dev = img_request->rbd_dev; 2812 2813 /* Reads */ 2814 if (!img_request_write_test(img_request) && 2815 !img_request_discard_test(img_request)) 2816 return true; 2817 2818 /* Non-layered writes */ 2819 if (!img_request_layered_test(img_request)) 2820 return true; 2821 2822 /* 2823 * Layered writes outside of the parent overlap range don't 2824 * share any data with the parent. 2825 */ 2826 if (!obj_request_overlaps_parent(obj_request)) 2827 return true; 2828 2829 /* 2830 * Entire-object layered writes - we will overwrite whatever 2831 * parent data there is anyway. 2832 */ 2833 if (!obj_request->offset && 2834 obj_request->length == rbd_obj_bytes(&rbd_dev->header)) 2835 return true; 2836 2837 /* 2838 * If the object is known to already exist, its parent data has 2839 * already been copied. 2840 */ 2841 if (obj_request_known_test(obj_request) && 2842 obj_request_exists_test(obj_request)) 2843 return true; 2844 2845 return false; 2846 } 2847 2848 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) 2849 { 2850 rbd_assert(obj_request_img_data_test(obj_request)); 2851 rbd_assert(obj_request_type_valid(obj_request->type)); 2852 rbd_assert(obj_request->img_request); 2853 2854 if (img_obj_request_simple(obj_request)) { 2855 rbd_obj_request_submit(obj_request); 2856 return 0; 2857 } 2858 2859 /* 2860 * It's a layered write. The target object might exist but 2861 * we may not know that yet. If we know it doesn't exist, 2862 * start by reading the data for the full target object from 2863 * the parent so we can use it for a copyup to the target. 2864 */ 2865 if (obj_request_known_test(obj_request)) 2866 return rbd_img_obj_parent_read_full(obj_request); 2867 2868 /* We don't know whether the target exists. Go find out. */ 2869 2870 return rbd_img_obj_exists_submit(obj_request); 2871 } 2872 2873 static int rbd_img_request_submit(struct rbd_img_request *img_request) 2874 { 2875 struct rbd_obj_request *obj_request; 2876 struct rbd_obj_request *next_obj_request; 2877 int ret = 0; 2878 2879 dout("%s: img %p\n", __func__, img_request); 2880 2881 rbd_img_request_get(img_request); 2882 for_each_obj_request_safe(img_request, obj_request, next_obj_request) { 2883 ret = rbd_img_obj_request_submit(obj_request); 2884 if (ret) 2885 goto out_put_ireq; 2886 } 2887 2888 out_put_ireq: 2889 rbd_img_request_put(img_request); 2890 return ret; 2891 } 2892 2893 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) 2894 { 2895 struct rbd_obj_request *obj_request; 2896 struct rbd_device *rbd_dev; 2897 u64 obj_end; 2898 u64 img_xferred; 2899 int img_result; 2900 2901 rbd_assert(img_request_child_test(img_request)); 2902 2903 /* First get what we need from the image request and release it */ 2904 2905 obj_request = img_request->obj_request; 2906 img_xferred = img_request->xferred; 2907 img_result = img_request->result; 2908 rbd_img_request_put(img_request); 2909 2910 /* 2911 * If the overlap has become 0 (most likely because the 2912 * image has been flattened) we need to re-submit the 2913 * original request. 2914 */ 2915 rbd_assert(obj_request); 2916 rbd_assert(obj_request->img_request); 2917 rbd_dev = obj_request->img_request->rbd_dev; 2918 if (!rbd_dev->parent_overlap) { 2919 rbd_obj_request_submit(obj_request); 2920 return; 2921 } 2922 2923 obj_request->result = img_result; 2924 if (obj_request->result) 2925 goto out; 2926 2927 /* 2928 * We need to zero anything beyond the parent overlap 2929 * boundary. Since rbd_img_obj_request_read_callback() 2930 * will zero anything beyond the end of a short read, an 2931 * easy way to do this is to pretend the data from the 2932 * parent came up short--ending at the overlap boundary. 2933 */ 2934 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 2935 obj_end = obj_request->img_offset + obj_request->length; 2936 if (obj_end > rbd_dev->parent_overlap) { 2937 u64 xferred = 0; 2938 2939 if (obj_request->img_offset < rbd_dev->parent_overlap) 2940 xferred = rbd_dev->parent_overlap - 2941 obj_request->img_offset; 2942 2943 obj_request->xferred = min(img_xferred, xferred); 2944 } else { 2945 obj_request->xferred = img_xferred; 2946 } 2947 out: 2948 rbd_img_obj_request_read_callback(obj_request); 2949 rbd_obj_request_complete(obj_request); 2950 } 2951 2952 static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 2953 { 2954 struct rbd_img_request *img_request; 2955 int result; 2956 2957 rbd_assert(obj_request_img_data_test(obj_request)); 2958 rbd_assert(obj_request->img_request != NULL); 2959 rbd_assert(obj_request->result == (s32) -ENOENT); 2960 rbd_assert(obj_request_type_valid(obj_request->type)); 2961 2962 /* rbd_read_finish(obj_request, obj_request->length); */ 2963 img_request = rbd_parent_request_create(obj_request, 2964 obj_request->img_offset, 2965 obj_request->length); 2966 result = -ENOMEM; 2967 if (!img_request) 2968 goto out_err; 2969 2970 if (obj_request->type == OBJ_REQUEST_BIO) 2971 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2972 obj_request->bio_list); 2973 else 2974 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES, 2975 obj_request->pages); 2976 if (result) 2977 goto out_err; 2978 2979 img_request->callback = rbd_img_parent_read_callback; 2980 result = rbd_img_request_submit(img_request); 2981 if (result) 2982 goto out_err; 2983 2984 return; 2985 out_err: 2986 if (img_request) 2987 rbd_img_request_put(img_request); 2988 obj_request->result = result; 2989 obj_request->xferred = 0; 2990 obj_request_done_set(obj_request); 2991 } 2992 2993 static const struct rbd_client_id rbd_empty_cid; 2994 2995 static bool rbd_cid_equal(const struct rbd_client_id *lhs, 2996 const struct rbd_client_id *rhs) 2997 { 2998 return lhs->gid == rhs->gid && lhs->handle == rhs->handle; 2999 } 3000 3001 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev) 3002 { 3003 struct rbd_client_id cid; 3004 3005 mutex_lock(&rbd_dev->watch_mutex); 3006 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client); 3007 cid.handle = rbd_dev->watch_cookie; 3008 mutex_unlock(&rbd_dev->watch_mutex); 3009 return cid; 3010 } 3011 3012 /* 3013 * lock_rwsem must be held for write 3014 */ 3015 static void rbd_set_owner_cid(struct rbd_device *rbd_dev, 3016 const struct rbd_client_id *cid) 3017 { 3018 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev, 3019 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle, 3020 cid->gid, cid->handle); 3021 rbd_dev->owner_cid = *cid; /* struct */ 3022 } 3023 3024 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) 3025 { 3026 mutex_lock(&rbd_dev->watch_mutex); 3027 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie); 3028 mutex_unlock(&rbd_dev->watch_mutex); 3029 } 3030 3031 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) 3032 { 3033 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3034 3035 strcpy(rbd_dev->lock_cookie, cookie); 3036 rbd_set_owner_cid(rbd_dev, &cid); 3037 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 3038 } 3039 3040 /* 3041 * lock_rwsem must be held for write 3042 */ 3043 static int rbd_lock(struct rbd_device *rbd_dev) 3044 { 3045 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3046 char cookie[32]; 3047 int ret; 3048 3049 WARN_ON(__rbd_is_lock_owner(rbd_dev) || 3050 rbd_dev->lock_cookie[0] != '\0'); 3051 3052 format_lock_cookie(rbd_dev, cookie); 3053 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3054 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, 3055 RBD_LOCK_TAG, "", 0); 3056 if (ret) 3057 return ret; 3058 3059 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3060 __rbd_lock(rbd_dev, cookie); 3061 return 0; 3062 } 3063 3064 /* 3065 * lock_rwsem must be held for write 3066 */ 3067 static void rbd_unlock(struct rbd_device *rbd_dev) 3068 { 3069 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3070 int ret; 3071 3072 WARN_ON(!__rbd_is_lock_owner(rbd_dev) || 3073 rbd_dev->lock_cookie[0] == '\0'); 3074 3075 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3076 RBD_LOCK_NAME, rbd_dev->lock_cookie); 3077 if (ret && ret != -ENOENT) 3078 rbd_warn(rbd_dev, "failed to unlock: %d", ret); 3079 3080 /* treat errors as the image is unlocked */ 3081 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 3082 rbd_dev->lock_cookie[0] = '\0'; 3083 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3084 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); 3085 } 3086 3087 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, 3088 enum rbd_notify_op notify_op, 3089 struct page ***preply_pages, 3090 size_t *preply_len) 3091 { 3092 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3093 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3094 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN; 3095 char buf[buf_size]; 3096 void *p = buf; 3097 3098 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op); 3099 3100 /* encode *LockPayload NotifyMessage (op + ClientId) */ 3101 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN); 3102 ceph_encode_32(&p, notify_op); 3103 ceph_encode_64(&p, cid.gid); 3104 ceph_encode_64(&p, cid.handle); 3105 3106 return ceph_osdc_notify(osdc, &rbd_dev->header_oid, 3107 &rbd_dev->header_oloc, buf, buf_size, 3108 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len); 3109 } 3110 3111 static void rbd_notify_op_lock(struct rbd_device *rbd_dev, 3112 enum rbd_notify_op notify_op) 3113 { 3114 struct page **reply_pages; 3115 size_t reply_len; 3116 3117 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len); 3118 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 3119 } 3120 3121 static void rbd_notify_acquired_lock(struct work_struct *work) 3122 { 3123 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3124 acquired_lock_work); 3125 3126 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK); 3127 } 3128 3129 static void rbd_notify_released_lock(struct work_struct *work) 3130 { 3131 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3132 released_lock_work); 3133 3134 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK); 3135 } 3136 3137 static int rbd_request_lock(struct rbd_device *rbd_dev) 3138 { 3139 struct page **reply_pages; 3140 size_t reply_len; 3141 bool lock_owner_responded = false; 3142 int ret; 3143 3144 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3145 3146 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK, 3147 &reply_pages, &reply_len); 3148 if (ret && ret != -ETIMEDOUT) { 3149 rbd_warn(rbd_dev, "failed to request lock: %d", ret); 3150 goto out; 3151 } 3152 3153 if (reply_len > 0 && reply_len <= PAGE_SIZE) { 3154 void *p = page_address(reply_pages[0]); 3155 void *const end = p + reply_len; 3156 u32 n; 3157 3158 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */ 3159 while (n--) { 3160 u8 struct_v; 3161 u32 len; 3162 3163 ceph_decode_need(&p, end, 8 + 8, e_inval); 3164 p += 8 + 8; /* skip gid and cookie */ 3165 3166 ceph_decode_32_safe(&p, end, len, e_inval); 3167 if (!len) 3168 continue; 3169 3170 if (lock_owner_responded) { 3171 rbd_warn(rbd_dev, 3172 "duplicate lock owners detected"); 3173 ret = -EIO; 3174 goto out; 3175 } 3176 3177 lock_owner_responded = true; 3178 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage", 3179 &struct_v, &len); 3180 if (ret) { 3181 rbd_warn(rbd_dev, 3182 "failed to decode ResponseMessage: %d", 3183 ret); 3184 goto e_inval; 3185 } 3186 3187 ret = ceph_decode_32(&p); 3188 } 3189 } 3190 3191 if (!lock_owner_responded) { 3192 rbd_warn(rbd_dev, "no lock owners detected"); 3193 ret = -ETIMEDOUT; 3194 } 3195 3196 out: 3197 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 3198 return ret; 3199 3200 e_inval: 3201 ret = -EINVAL; 3202 goto out; 3203 } 3204 3205 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all) 3206 { 3207 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all); 3208 3209 cancel_delayed_work(&rbd_dev->lock_dwork); 3210 if (wake_all) 3211 wake_up_all(&rbd_dev->lock_waitq); 3212 else 3213 wake_up(&rbd_dev->lock_waitq); 3214 } 3215 3216 static int get_lock_owner_info(struct rbd_device *rbd_dev, 3217 struct ceph_locker **lockers, u32 *num_lockers) 3218 { 3219 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3220 u8 lock_type; 3221 char *lock_tag; 3222 int ret; 3223 3224 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3225 3226 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, 3227 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3228 &lock_type, &lock_tag, lockers, num_lockers); 3229 if (ret) 3230 return ret; 3231 3232 if (*num_lockers == 0) { 3233 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); 3234 goto out; 3235 } 3236 3237 if (strcmp(lock_tag, RBD_LOCK_TAG)) { 3238 rbd_warn(rbd_dev, "locked by external mechanism, tag %s", 3239 lock_tag); 3240 ret = -EBUSY; 3241 goto out; 3242 } 3243 3244 if (lock_type == CEPH_CLS_LOCK_SHARED) { 3245 rbd_warn(rbd_dev, "shared lock type detected"); 3246 ret = -EBUSY; 3247 goto out; 3248 } 3249 3250 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, 3251 strlen(RBD_LOCK_COOKIE_PREFIX))) { 3252 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", 3253 (*lockers)[0].id.cookie); 3254 ret = -EBUSY; 3255 goto out; 3256 } 3257 3258 out: 3259 kfree(lock_tag); 3260 return ret; 3261 } 3262 3263 static int find_watcher(struct rbd_device *rbd_dev, 3264 const struct ceph_locker *locker) 3265 { 3266 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3267 struct ceph_watch_item *watchers; 3268 u32 num_watchers; 3269 u64 cookie; 3270 int i; 3271 int ret; 3272 3273 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, 3274 &rbd_dev->header_oloc, &watchers, 3275 &num_watchers); 3276 if (ret) 3277 return ret; 3278 3279 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); 3280 for (i = 0; i < num_watchers; i++) { 3281 if (!memcmp(&watchers[i].addr, &locker->info.addr, 3282 sizeof(locker->info.addr)) && 3283 watchers[i].cookie == cookie) { 3284 struct rbd_client_id cid = { 3285 .gid = le64_to_cpu(watchers[i].name.num), 3286 .handle = cookie, 3287 }; 3288 3289 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__, 3290 rbd_dev, cid.gid, cid.handle); 3291 rbd_set_owner_cid(rbd_dev, &cid); 3292 ret = 1; 3293 goto out; 3294 } 3295 } 3296 3297 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev); 3298 ret = 0; 3299 out: 3300 kfree(watchers); 3301 return ret; 3302 } 3303 3304 /* 3305 * lock_rwsem must be held for write 3306 */ 3307 static int rbd_try_lock(struct rbd_device *rbd_dev) 3308 { 3309 struct ceph_client *client = rbd_dev->rbd_client->client; 3310 struct ceph_locker *lockers; 3311 u32 num_lockers; 3312 int ret; 3313 3314 for (;;) { 3315 ret = rbd_lock(rbd_dev); 3316 if (ret != -EBUSY) 3317 return ret; 3318 3319 /* determine if the current lock holder is still alive */ 3320 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); 3321 if (ret) 3322 return ret; 3323 3324 if (num_lockers == 0) 3325 goto again; 3326 3327 ret = find_watcher(rbd_dev, lockers); 3328 if (ret) { 3329 if (ret > 0) 3330 ret = 0; /* have to request lock */ 3331 goto out; 3332 } 3333 3334 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock", 3335 ENTITY_NAME(lockers[0].id.name)); 3336 3337 ret = ceph_monc_blacklist_add(&client->monc, 3338 &lockers[0].info.addr); 3339 if (ret) { 3340 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d", 3341 ENTITY_NAME(lockers[0].id.name), ret); 3342 goto out; 3343 } 3344 3345 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, 3346 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3347 lockers[0].id.cookie, 3348 &lockers[0].id.name); 3349 if (ret && ret != -ENOENT) 3350 goto out; 3351 3352 again: 3353 ceph_free_lockers(lockers, num_lockers); 3354 } 3355 3356 out: 3357 ceph_free_lockers(lockers, num_lockers); 3358 return ret; 3359 } 3360 3361 /* 3362 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED 3363 */ 3364 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev, 3365 int *pret) 3366 { 3367 enum rbd_lock_state lock_state; 3368 3369 down_read(&rbd_dev->lock_rwsem); 3370 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 3371 rbd_dev->lock_state); 3372 if (__rbd_is_lock_owner(rbd_dev)) { 3373 lock_state = rbd_dev->lock_state; 3374 up_read(&rbd_dev->lock_rwsem); 3375 return lock_state; 3376 } 3377 3378 up_read(&rbd_dev->lock_rwsem); 3379 down_write(&rbd_dev->lock_rwsem); 3380 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 3381 rbd_dev->lock_state); 3382 if (!__rbd_is_lock_owner(rbd_dev)) { 3383 *pret = rbd_try_lock(rbd_dev); 3384 if (*pret) 3385 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret); 3386 } 3387 3388 lock_state = rbd_dev->lock_state; 3389 up_write(&rbd_dev->lock_rwsem); 3390 return lock_state; 3391 } 3392 3393 static void rbd_acquire_lock(struct work_struct *work) 3394 { 3395 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 3396 struct rbd_device, lock_dwork); 3397 enum rbd_lock_state lock_state; 3398 int ret = 0; 3399 3400 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3401 again: 3402 lock_state = rbd_try_acquire_lock(rbd_dev, &ret); 3403 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) { 3404 if (lock_state == RBD_LOCK_STATE_LOCKED) 3405 wake_requests(rbd_dev, true); 3406 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__, 3407 rbd_dev, lock_state, ret); 3408 return; 3409 } 3410 3411 ret = rbd_request_lock(rbd_dev); 3412 if (ret == -ETIMEDOUT) { 3413 goto again; /* treat this as a dead client */ 3414 } else if (ret == -EROFS) { 3415 rbd_warn(rbd_dev, "peer will not release lock"); 3416 /* 3417 * If this is rbd_add_acquire_lock(), we want to fail 3418 * immediately -- reuse BLACKLISTED flag. Otherwise we 3419 * want to block. 3420 */ 3421 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) { 3422 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); 3423 /* wake "rbd map --exclusive" process */ 3424 wake_requests(rbd_dev, false); 3425 } 3426 } else if (ret < 0) { 3427 rbd_warn(rbd_dev, "error requesting lock: %d", ret); 3428 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 3429 RBD_RETRY_DELAY); 3430 } else { 3431 /* 3432 * lock owner acked, but resend if we don't see them 3433 * release the lock 3434 */ 3435 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__, 3436 rbd_dev); 3437 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 3438 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC)); 3439 } 3440 } 3441 3442 /* 3443 * lock_rwsem must be held for write 3444 */ 3445 static bool rbd_release_lock(struct rbd_device *rbd_dev) 3446 { 3447 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 3448 rbd_dev->lock_state); 3449 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) 3450 return false; 3451 3452 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; 3453 downgrade_write(&rbd_dev->lock_rwsem); 3454 /* 3455 * Ensure that all in-flight IO is flushed. 3456 * 3457 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which 3458 * may be shared with other devices. 3459 */ 3460 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc); 3461 up_read(&rbd_dev->lock_rwsem); 3462 3463 down_write(&rbd_dev->lock_rwsem); 3464 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 3465 rbd_dev->lock_state); 3466 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) 3467 return false; 3468 3469 rbd_unlock(rbd_dev); 3470 /* 3471 * Give others a chance to grab the lock - we would re-acquire 3472 * almost immediately if we got new IO during ceph_osdc_sync() 3473 * otherwise. We need to ack our own notifications, so this 3474 * lock_dwork will be requeued from rbd_wait_state_locked() 3475 * after wake_requests() in rbd_handle_released_lock(). 3476 */ 3477 cancel_delayed_work(&rbd_dev->lock_dwork); 3478 return true; 3479 } 3480 3481 static void rbd_release_lock_work(struct work_struct *work) 3482 { 3483 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3484 unlock_work); 3485 3486 down_write(&rbd_dev->lock_rwsem); 3487 rbd_release_lock(rbd_dev); 3488 up_write(&rbd_dev->lock_rwsem); 3489 } 3490 3491 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, 3492 void **p) 3493 { 3494 struct rbd_client_id cid = { 0 }; 3495 3496 if (struct_v >= 2) { 3497 cid.gid = ceph_decode_64(p); 3498 cid.handle = ceph_decode_64(p); 3499 } 3500 3501 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3502 cid.handle); 3503 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 3504 down_write(&rbd_dev->lock_rwsem); 3505 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 3506 /* 3507 * we already know that the remote client is 3508 * the owner 3509 */ 3510 up_write(&rbd_dev->lock_rwsem); 3511 return; 3512 } 3513 3514 rbd_set_owner_cid(rbd_dev, &cid); 3515 downgrade_write(&rbd_dev->lock_rwsem); 3516 } else { 3517 down_read(&rbd_dev->lock_rwsem); 3518 } 3519 3520 if (!__rbd_is_lock_owner(rbd_dev)) 3521 wake_requests(rbd_dev, false); 3522 up_read(&rbd_dev->lock_rwsem); 3523 } 3524 3525 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, 3526 void **p) 3527 { 3528 struct rbd_client_id cid = { 0 }; 3529 3530 if (struct_v >= 2) { 3531 cid.gid = ceph_decode_64(p); 3532 cid.handle = ceph_decode_64(p); 3533 } 3534 3535 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3536 cid.handle); 3537 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 3538 down_write(&rbd_dev->lock_rwsem); 3539 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 3540 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n", 3541 __func__, rbd_dev, cid.gid, cid.handle, 3542 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); 3543 up_write(&rbd_dev->lock_rwsem); 3544 return; 3545 } 3546 3547 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3548 downgrade_write(&rbd_dev->lock_rwsem); 3549 } else { 3550 down_read(&rbd_dev->lock_rwsem); 3551 } 3552 3553 if (!__rbd_is_lock_owner(rbd_dev)) 3554 wake_requests(rbd_dev, false); 3555 up_read(&rbd_dev->lock_rwsem); 3556 } 3557 3558 /* 3559 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no 3560 * ResponseMessage is needed. 3561 */ 3562 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, 3563 void **p) 3564 { 3565 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev); 3566 struct rbd_client_id cid = { 0 }; 3567 int result = 1; 3568 3569 if (struct_v >= 2) { 3570 cid.gid = ceph_decode_64(p); 3571 cid.handle = ceph_decode_64(p); 3572 } 3573 3574 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3575 cid.handle); 3576 if (rbd_cid_equal(&cid, &my_cid)) 3577 return result; 3578 3579 down_read(&rbd_dev->lock_rwsem); 3580 if (__rbd_is_lock_owner(rbd_dev)) { 3581 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED && 3582 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) 3583 goto out_unlock; 3584 3585 /* 3586 * encode ResponseMessage(0) so the peer can detect 3587 * a missing owner 3588 */ 3589 result = 0; 3590 3591 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { 3592 if (!rbd_dev->opts->exclusive) { 3593 dout("%s rbd_dev %p queueing unlock_work\n", 3594 __func__, rbd_dev); 3595 queue_work(rbd_dev->task_wq, 3596 &rbd_dev->unlock_work); 3597 } else { 3598 /* refuse to release the lock */ 3599 result = -EROFS; 3600 } 3601 } 3602 } 3603 3604 out_unlock: 3605 up_read(&rbd_dev->lock_rwsem); 3606 return result; 3607 } 3608 3609 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev, 3610 u64 notify_id, u64 cookie, s32 *result) 3611 { 3612 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3613 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN; 3614 char buf[buf_size]; 3615 int ret; 3616 3617 if (result) { 3618 void *p = buf; 3619 3620 /* encode ResponseMessage */ 3621 ceph_start_encoding(&p, 1, 1, 3622 buf_size - CEPH_ENCODING_START_BLK_LEN); 3623 ceph_encode_32(&p, *result); 3624 } else { 3625 buf_size = 0; 3626 } 3627 3628 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid, 3629 &rbd_dev->header_oloc, notify_id, cookie, 3630 buf, buf_size); 3631 if (ret) 3632 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret); 3633 } 3634 3635 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id, 3636 u64 cookie) 3637 { 3638 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3639 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL); 3640 } 3641 3642 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev, 3643 u64 notify_id, u64 cookie, s32 result) 3644 { 3645 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 3646 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result); 3647 } 3648 3649 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, 3650 u64 notifier_id, void *data, size_t data_len) 3651 { 3652 struct rbd_device *rbd_dev = arg; 3653 void *p = data; 3654 void *const end = p + data_len; 3655 u8 struct_v = 0; 3656 u32 len; 3657 u32 notify_op; 3658 int ret; 3659 3660 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n", 3661 __func__, rbd_dev, cookie, notify_id, data_len); 3662 if (data_len) { 3663 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage", 3664 &struct_v, &len); 3665 if (ret) { 3666 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d", 3667 ret); 3668 return; 3669 } 3670 3671 notify_op = ceph_decode_32(&p); 3672 } else { 3673 /* legacy notification for header updates */ 3674 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE; 3675 len = 0; 3676 } 3677 3678 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op); 3679 switch (notify_op) { 3680 case RBD_NOTIFY_OP_ACQUIRED_LOCK: 3681 rbd_handle_acquired_lock(rbd_dev, struct_v, &p); 3682 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3683 break; 3684 case RBD_NOTIFY_OP_RELEASED_LOCK: 3685 rbd_handle_released_lock(rbd_dev, struct_v, &p); 3686 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3687 break; 3688 case RBD_NOTIFY_OP_REQUEST_LOCK: 3689 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p); 3690 if (ret <= 0) 3691 rbd_acknowledge_notify_result(rbd_dev, notify_id, 3692 cookie, ret); 3693 else 3694 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3695 break; 3696 case RBD_NOTIFY_OP_HEADER_UPDATE: 3697 ret = rbd_dev_refresh(rbd_dev); 3698 if (ret) 3699 rbd_warn(rbd_dev, "refresh failed: %d", ret); 3700 3701 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3702 break; 3703 default: 3704 if (rbd_is_lock_owner(rbd_dev)) 3705 rbd_acknowledge_notify_result(rbd_dev, notify_id, 3706 cookie, -EOPNOTSUPP); 3707 else 3708 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3709 break; 3710 } 3711 } 3712 3713 static void __rbd_unregister_watch(struct rbd_device *rbd_dev); 3714 3715 static void rbd_watch_errcb(void *arg, u64 cookie, int err) 3716 { 3717 struct rbd_device *rbd_dev = arg; 3718 3719 rbd_warn(rbd_dev, "encountered watch error: %d", err); 3720 3721 down_write(&rbd_dev->lock_rwsem); 3722 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3723 up_write(&rbd_dev->lock_rwsem); 3724 3725 mutex_lock(&rbd_dev->watch_mutex); 3726 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) { 3727 __rbd_unregister_watch(rbd_dev); 3728 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR; 3729 3730 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0); 3731 } 3732 mutex_unlock(&rbd_dev->watch_mutex); 3733 } 3734 3735 /* 3736 * watch_mutex must be locked 3737 */ 3738 static int __rbd_register_watch(struct rbd_device *rbd_dev) 3739 { 3740 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3741 struct ceph_osd_linger_request *handle; 3742 3743 rbd_assert(!rbd_dev->watch_handle); 3744 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3745 3746 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid, 3747 &rbd_dev->header_oloc, rbd_watch_cb, 3748 rbd_watch_errcb, rbd_dev); 3749 if (IS_ERR(handle)) 3750 return PTR_ERR(handle); 3751 3752 rbd_dev->watch_handle = handle; 3753 return 0; 3754 } 3755 3756 /* 3757 * watch_mutex must be locked 3758 */ 3759 static void __rbd_unregister_watch(struct rbd_device *rbd_dev) 3760 { 3761 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3762 int ret; 3763 3764 rbd_assert(rbd_dev->watch_handle); 3765 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3766 3767 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle); 3768 if (ret) 3769 rbd_warn(rbd_dev, "failed to unwatch: %d", ret); 3770 3771 rbd_dev->watch_handle = NULL; 3772 } 3773 3774 static int rbd_register_watch(struct rbd_device *rbd_dev) 3775 { 3776 int ret; 3777 3778 mutex_lock(&rbd_dev->watch_mutex); 3779 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED); 3780 ret = __rbd_register_watch(rbd_dev); 3781 if (ret) 3782 goto out; 3783 3784 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 3785 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 3786 3787 out: 3788 mutex_unlock(&rbd_dev->watch_mutex); 3789 return ret; 3790 } 3791 3792 static void cancel_tasks_sync(struct rbd_device *rbd_dev) 3793 { 3794 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3795 3796 cancel_delayed_work_sync(&rbd_dev->watch_dwork); 3797 cancel_work_sync(&rbd_dev->acquired_lock_work); 3798 cancel_work_sync(&rbd_dev->released_lock_work); 3799 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 3800 cancel_work_sync(&rbd_dev->unlock_work); 3801 } 3802 3803 static void rbd_unregister_watch(struct rbd_device *rbd_dev) 3804 { 3805 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq)); 3806 cancel_tasks_sync(rbd_dev); 3807 3808 mutex_lock(&rbd_dev->watch_mutex); 3809 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) 3810 __rbd_unregister_watch(rbd_dev); 3811 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 3812 mutex_unlock(&rbd_dev->watch_mutex); 3813 3814 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 3815 } 3816 3817 /* 3818 * lock_rwsem must be held for write 3819 */ 3820 static void rbd_reacquire_lock(struct rbd_device *rbd_dev) 3821 { 3822 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3823 char cookie[32]; 3824 int ret; 3825 3826 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); 3827 3828 format_lock_cookie(rbd_dev, cookie); 3829 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, 3830 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3831 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, 3832 RBD_LOCK_TAG, cookie); 3833 if (ret) { 3834 if (ret != -EOPNOTSUPP) 3835 rbd_warn(rbd_dev, "failed to update lock cookie: %d", 3836 ret); 3837 3838 /* 3839 * Lock cookie cannot be updated on older OSDs, so do 3840 * a manual release and queue an acquire. 3841 */ 3842 if (rbd_release_lock(rbd_dev)) 3843 queue_delayed_work(rbd_dev->task_wq, 3844 &rbd_dev->lock_dwork, 0); 3845 } else { 3846 __rbd_lock(rbd_dev, cookie); 3847 } 3848 } 3849 3850 static void rbd_reregister_watch(struct work_struct *work) 3851 { 3852 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 3853 struct rbd_device, watch_dwork); 3854 int ret; 3855 3856 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3857 3858 mutex_lock(&rbd_dev->watch_mutex); 3859 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { 3860 mutex_unlock(&rbd_dev->watch_mutex); 3861 return; 3862 } 3863 3864 ret = __rbd_register_watch(rbd_dev); 3865 if (ret) { 3866 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); 3867 if (ret == -EBLACKLISTED || ret == -ENOENT) { 3868 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); 3869 wake_requests(rbd_dev, true); 3870 } else { 3871 queue_delayed_work(rbd_dev->task_wq, 3872 &rbd_dev->watch_dwork, 3873 RBD_RETRY_DELAY); 3874 } 3875 mutex_unlock(&rbd_dev->watch_mutex); 3876 return; 3877 } 3878 3879 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 3880 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 3881 mutex_unlock(&rbd_dev->watch_mutex); 3882 3883 down_write(&rbd_dev->lock_rwsem); 3884 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) 3885 rbd_reacquire_lock(rbd_dev); 3886 up_write(&rbd_dev->lock_rwsem); 3887 3888 ret = rbd_dev_refresh(rbd_dev); 3889 if (ret) 3890 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret); 3891 } 3892 3893 /* 3894 * Synchronous osd object method call. Returns the number of bytes 3895 * returned in the outbound buffer, or a negative error code. 3896 */ 3897 static int rbd_obj_method_sync(struct rbd_device *rbd_dev, 3898 struct ceph_object_id *oid, 3899 struct ceph_object_locator *oloc, 3900 const char *method_name, 3901 const void *outbound, 3902 size_t outbound_size, 3903 void *inbound, 3904 size_t inbound_size) 3905 { 3906 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3907 struct page *req_page = NULL; 3908 struct page *reply_page; 3909 int ret; 3910 3911 /* 3912 * Method calls are ultimately read operations. The result 3913 * should placed into the inbound buffer provided. They 3914 * also supply outbound data--parameters for the object 3915 * method. Currently if this is present it will be a 3916 * snapshot id. 3917 */ 3918 if (outbound) { 3919 if (outbound_size > PAGE_SIZE) 3920 return -E2BIG; 3921 3922 req_page = alloc_page(GFP_KERNEL); 3923 if (!req_page) 3924 return -ENOMEM; 3925 3926 memcpy(page_address(req_page), outbound, outbound_size); 3927 } 3928 3929 reply_page = alloc_page(GFP_KERNEL); 3930 if (!reply_page) { 3931 if (req_page) 3932 __free_page(req_page); 3933 return -ENOMEM; 3934 } 3935 3936 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, 3937 CEPH_OSD_FLAG_READ, req_page, outbound_size, 3938 reply_page, &inbound_size); 3939 if (!ret) { 3940 memcpy(inbound, page_address(reply_page), inbound_size); 3941 ret = inbound_size; 3942 } 3943 3944 if (req_page) 3945 __free_page(req_page); 3946 __free_page(reply_page); 3947 return ret; 3948 } 3949 3950 /* 3951 * lock_rwsem must be held for read 3952 */ 3953 static void rbd_wait_state_locked(struct rbd_device *rbd_dev) 3954 { 3955 DEFINE_WAIT(wait); 3956 3957 do { 3958 /* 3959 * Note the use of mod_delayed_work() in rbd_acquire_lock() 3960 * and cancel_delayed_work() in wake_requests(). 3961 */ 3962 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev); 3963 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 3964 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, 3965 TASK_UNINTERRUPTIBLE); 3966 up_read(&rbd_dev->lock_rwsem); 3967 schedule(); 3968 down_read(&rbd_dev->lock_rwsem); 3969 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3970 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); 3971 3972 finish_wait(&rbd_dev->lock_waitq, &wait); 3973 } 3974 3975 static void rbd_queue_workfn(struct work_struct *work) 3976 { 3977 struct request *rq = blk_mq_rq_from_pdu(work); 3978 struct rbd_device *rbd_dev = rq->q->queuedata; 3979 struct rbd_img_request *img_request; 3980 struct ceph_snap_context *snapc = NULL; 3981 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 3982 u64 length = blk_rq_bytes(rq); 3983 enum obj_operation_type op_type; 3984 u64 mapping_size; 3985 bool must_be_locked; 3986 int result; 3987 3988 switch (req_op(rq)) { 3989 case REQ_OP_DISCARD: 3990 case REQ_OP_WRITE_ZEROES: 3991 op_type = OBJ_OP_DISCARD; 3992 break; 3993 case REQ_OP_WRITE: 3994 op_type = OBJ_OP_WRITE; 3995 break; 3996 case REQ_OP_READ: 3997 op_type = OBJ_OP_READ; 3998 break; 3999 default: 4000 dout("%s: non-fs request type %d\n", __func__, req_op(rq)); 4001 result = -EIO; 4002 goto err; 4003 } 4004 4005 /* Ignore/skip any zero-length requests */ 4006 4007 if (!length) { 4008 dout("%s: zero-length request\n", __func__); 4009 result = 0; 4010 goto err_rq; 4011 } 4012 4013 rbd_assert(op_type == OBJ_OP_READ || 4014 rbd_dev->spec->snap_id == CEPH_NOSNAP); 4015 4016 /* 4017 * Quit early if the mapped snapshot no longer exists. It's 4018 * still possible the snapshot will have disappeared by the 4019 * time our request arrives at the osd, but there's no sense in 4020 * sending it if we already know. 4021 */ 4022 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) { 4023 dout("request for non-existent snapshot"); 4024 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP); 4025 result = -ENXIO; 4026 goto err_rq; 4027 } 4028 4029 if (offset && length > U64_MAX - offset + 1) { 4030 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset, 4031 length); 4032 result = -EINVAL; 4033 goto err_rq; /* Shouldn't happen */ 4034 } 4035 4036 blk_mq_start_request(rq); 4037 4038 down_read(&rbd_dev->header_rwsem); 4039 mapping_size = rbd_dev->mapping.size; 4040 if (op_type != OBJ_OP_READ) { 4041 snapc = rbd_dev->header.snapc; 4042 ceph_get_snap_context(snapc); 4043 } 4044 up_read(&rbd_dev->header_rwsem); 4045 4046 if (offset + length > mapping_size) { 4047 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, 4048 length, mapping_size); 4049 result = -EIO; 4050 goto err_rq; 4051 } 4052 4053 must_be_locked = 4054 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) && 4055 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); 4056 if (must_be_locked) { 4057 down_read(&rbd_dev->lock_rwsem); 4058 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 4059 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 4060 if (rbd_dev->opts->exclusive) { 4061 rbd_warn(rbd_dev, "exclusive lock required"); 4062 result = -EROFS; 4063 goto err_unlock; 4064 } 4065 rbd_wait_state_locked(rbd_dev); 4066 } 4067 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 4068 result = -EBLACKLISTED; 4069 goto err_unlock; 4070 } 4071 } 4072 4073 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, 4074 snapc); 4075 if (!img_request) { 4076 result = -ENOMEM; 4077 goto err_unlock; 4078 } 4079 img_request->rq = rq; 4080 snapc = NULL; /* img_request consumes a ref */ 4081 4082 if (op_type == OBJ_OP_DISCARD) 4083 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, 4084 NULL); 4085 else 4086 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 4087 rq->bio); 4088 if (result) 4089 goto err_img_request; 4090 4091 result = rbd_img_request_submit(img_request); 4092 if (result) 4093 goto err_img_request; 4094 4095 if (must_be_locked) 4096 up_read(&rbd_dev->lock_rwsem); 4097 return; 4098 4099 err_img_request: 4100 rbd_img_request_put(img_request); 4101 err_unlock: 4102 if (must_be_locked) 4103 up_read(&rbd_dev->lock_rwsem); 4104 err_rq: 4105 if (result) 4106 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 4107 obj_op_name(op_type), length, offset, result); 4108 ceph_put_snap_context(snapc); 4109 err: 4110 blk_mq_end_request(rq, errno_to_blk_status(result)); 4111 } 4112 4113 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4114 const struct blk_mq_queue_data *bd) 4115 { 4116 struct request *rq = bd->rq; 4117 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4118 4119 queue_work(rbd_wq, work); 4120 return BLK_STS_OK; 4121 } 4122 4123 static void rbd_free_disk(struct rbd_device *rbd_dev) 4124 { 4125 blk_cleanup_queue(rbd_dev->disk->queue); 4126 blk_mq_free_tag_set(&rbd_dev->tag_set); 4127 put_disk(rbd_dev->disk); 4128 rbd_dev->disk = NULL; 4129 } 4130 4131 static int rbd_obj_read_sync(struct rbd_device *rbd_dev, 4132 struct ceph_object_id *oid, 4133 struct ceph_object_locator *oloc, 4134 void *buf, int buf_len) 4135 4136 { 4137 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4138 struct ceph_osd_request *req; 4139 struct page **pages; 4140 int num_pages = calc_pages_for(0, buf_len); 4141 int ret; 4142 4143 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 4144 if (!req) 4145 return -ENOMEM; 4146 4147 ceph_oid_copy(&req->r_base_oid, oid); 4148 ceph_oloc_copy(&req->r_base_oloc, oloc); 4149 req->r_flags = CEPH_OSD_FLAG_READ; 4150 4151 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 4152 if (ret) 4153 goto out_req; 4154 4155 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 4156 if (IS_ERR(pages)) { 4157 ret = PTR_ERR(pages); 4158 goto out_req; 4159 } 4160 4161 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); 4162 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, 4163 true); 4164 4165 ceph_osdc_start_request(osdc, req, false); 4166 ret = ceph_osdc_wait_request(osdc, req); 4167 if (ret >= 0) 4168 ceph_copy_from_page_vector(pages, buf, 0, ret); 4169 4170 out_req: 4171 ceph_osdc_put_request(req); 4172 return ret; 4173 } 4174 4175 /* 4176 * Read the complete header for the given rbd device. On successful 4177 * return, the rbd_dev->header field will contain up-to-date 4178 * information about the image. 4179 */ 4180 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) 4181 { 4182 struct rbd_image_header_ondisk *ondisk = NULL; 4183 u32 snap_count = 0; 4184 u64 names_size = 0; 4185 u32 want_count; 4186 int ret; 4187 4188 /* 4189 * The complete header will include an array of its 64-bit 4190 * snapshot ids, followed by the names of those snapshots as 4191 * a contiguous block of NUL-terminated strings. Note that 4192 * the number of snapshots could change by the time we read 4193 * it in, in which case we re-read it. 4194 */ 4195 do { 4196 size_t size; 4197 4198 kfree(ondisk); 4199 4200 size = sizeof (*ondisk); 4201 size += snap_count * sizeof (struct rbd_image_snap_ondisk); 4202 size += names_size; 4203 ondisk = kmalloc(size, GFP_KERNEL); 4204 if (!ondisk) 4205 return -ENOMEM; 4206 4207 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, 4208 &rbd_dev->header_oloc, ondisk, size); 4209 if (ret < 0) 4210 goto out; 4211 if ((size_t)ret < size) { 4212 ret = -ENXIO; 4213 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 4214 size, ret); 4215 goto out; 4216 } 4217 if (!rbd_dev_ondisk_valid(ondisk)) { 4218 ret = -ENXIO; 4219 rbd_warn(rbd_dev, "invalid header"); 4220 goto out; 4221 } 4222 4223 names_size = le64_to_cpu(ondisk->snap_names_len); 4224 want_count = snap_count; 4225 snap_count = le32_to_cpu(ondisk->snap_count); 4226 } while (snap_count != want_count); 4227 4228 ret = rbd_header_from_disk(rbd_dev, ondisk); 4229 out: 4230 kfree(ondisk); 4231 4232 return ret; 4233 } 4234 4235 /* 4236 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 4237 * has disappeared from the (just updated) snapshot context. 4238 */ 4239 static void rbd_exists_validate(struct rbd_device *rbd_dev) 4240 { 4241 u64 snap_id; 4242 4243 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) 4244 return; 4245 4246 snap_id = rbd_dev->spec->snap_id; 4247 if (snap_id == CEPH_NOSNAP) 4248 return; 4249 4250 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX) 4251 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4252 } 4253 4254 static void rbd_dev_update_size(struct rbd_device *rbd_dev) 4255 { 4256 sector_t size; 4257 4258 /* 4259 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't 4260 * try to update its size. If REMOVING is set, updating size 4261 * is just useless work since the device can't be opened. 4262 */ 4263 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && 4264 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { 4265 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 4266 dout("setting size to %llu sectors", (unsigned long long)size); 4267 set_capacity(rbd_dev->disk, size); 4268 revalidate_disk(rbd_dev->disk); 4269 } 4270 } 4271 4272 static int rbd_dev_refresh(struct rbd_device *rbd_dev) 4273 { 4274 u64 mapping_size; 4275 int ret; 4276 4277 down_write(&rbd_dev->header_rwsem); 4278 mapping_size = rbd_dev->mapping.size; 4279 4280 ret = rbd_dev_header_info(rbd_dev); 4281 if (ret) 4282 goto out; 4283 4284 /* 4285 * If there is a parent, see if it has disappeared due to the 4286 * mapped image getting flattened. 4287 */ 4288 if (rbd_dev->parent) { 4289 ret = rbd_dev_v2_parent_info(rbd_dev); 4290 if (ret) 4291 goto out; 4292 } 4293 4294 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { 4295 rbd_dev->mapping.size = rbd_dev->header.image_size; 4296 } else { 4297 /* validate mapped snapshot's EXISTS flag */ 4298 rbd_exists_validate(rbd_dev); 4299 } 4300 4301 out: 4302 up_write(&rbd_dev->header_rwsem); 4303 if (!ret && mapping_size != rbd_dev->mapping.size) 4304 rbd_dev_update_size(rbd_dev); 4305 4306 return ret; 4307 } 4308 4309 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 4310 unsigned int hctx_idx, unsigned int numa_node) 4311 { 4312 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4313 4314 INIT_WORK(work, rbd_queue_workfn); 4315 return 0; 4316 } 4317 4318 static const struct blk_mq_ops rbd_mq_ops = { 4319 .queue_rq = rbd_queue_rq, 4320 .init_request = rbd_init_request, 4321 }; 4322 4323 static int rbd_init_disk(struct rbd_device *rbd_dev) 4324 { 4325 struct gendisk *disk; 4326 struct request_queue *q; 4327 u64 segment_size; 4328 int err; 4329 4330 /* create gendisk info */ 4331 disk = alloc_disk(single_major ? 4332 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) : 4333 RBD_MINORS_PER_MAJOR); 4334 if (!disk) 4335 return -ENOMEM; 4336 4337 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", 4338 rbd_dev->dev_id); 4339 disk->major = rbd_dev->major; 4340 disk->first_minor = rbd_dev->minor; 4341 if (single_major) 4342 disk->flags |= GENHD_FL_EXT_DEVT; 4343 disk->fops = &rbd_bd_ops; 4344 disk->private_data = rbd_dev; 4345 4346 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); 4347 rbd_dev->tag_set.ops = &rbd_mq_ops; 4348 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 4349 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 4350 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 4351 rbd_dev->tag_set.nr_hw_queues = 1; 4352 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); 4353 4354 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); 4355 if (err) 4356 goto out_disk; 4357 4358 q = blk_mq_init_queue(&rbd_dev->tag_set); 4359 if (IS_ERR(q)) { 4360 err = PTR_ERR(q); 4361 goto out_tag_set; 4362 } 4363 4364 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 4365 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 4366 4367 /* set io sizes to object size */ 4368 segment_size = rbd_obj_bytes(&rbd_dev->header); 4369 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4370 q->limits.max_sectors = queue_max_hw_sectors(q); 4371 blk_queue_max_segments(q, USHRT_MAX); 4372 blk_queue_max_segment_size(q, segment_size); 4373 blk_queue_io_min(q, segment_size); 4374 blk_queue_io_opt(q, segment_size); 4375 4376 /* enable the discard support */ 4377 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 4378 q->limits.discard_granularity = segment_size; 4379 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 4380 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 4381 4382 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4383 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; 4384 4385 /* 4386 * disk_release() expects a queue ref from add_disk() and will 4387 * put it. Hold an extra ref until add_disk() is called. 4388 */ 4389 WARN_ON(!blk_get_queue(q)); 4390 disk->queue = q; 4391 q->queuedata = rbd_dev; 4392 4393 rbd_dev->disk = disk; 4394 4395 return 0; 4396 out_tag_set: 4397 blk_mq_free_tag_set(&rbd_dev->tag_set); 4398 out_disk: 4399 put_disk(disk); 4400 return err; 4401 } 4402 4403 /* 4404 sysfs 4405 */ 4406 4407 static struct rbd_device *dev_to_rbd_dev(struct device *dev) 4408 { 4409 return container_of(dev, struct rbd_device, dev); 4410 } 4411 4412 static ssize_t rbd_size_show(struct device *dev, 4413 struct device_attribute *attr, char *buf) 4414 { 4415 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4416 4417 return sprintf(buf, "%llu\n", 4418 (unsigned long long)rbd_dev->mapping.size); 4419 } 4420 4421 /* 4422 * Note this shows the features for whatever's mapped, which is not 4423 * necessarily the base image. 4424 */ 4425 static ssize_t rbd_features_show(struct device *dev, 4426 struct device_attribute *attr, char *buf) 4427 { 4428 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4429 4430 return sprintf(buf, "0x%016llx\n", 4431 (unsigned long long)rbd_dev->mapping.features); 4432 } 4433 4434 static ssize_t rbd_major_show(struct device *dev, 4435 struct device_attribute *attr, char *buf) 4436 { 4437 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4438 4439 if (rbd_dev->major) 4440 return sprintf(buf, "%d\n", rbd_dev->major); 4441 4442 return sprintf(buf, "(none)\n"); 4443 } 4444 4445 static ssize_t rbd_minor_show(struct device *dev, 4446 struct device_attribute *attr, char *buf) 4447 { 4448 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4449 4450 return sprintf(buf, "%d\n", rbd_dev->minor); 4451 } 4452 4453 static ssize_t rbd_client_addr_show(struct device *dev, 4454 struct device_attribute *attr, char *buf) 4455 { 4456 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4457 struct ceph_entity_addr *client_addr = 4458 ceph_client_addr(rbd_dev->rbd_client->client); 4459 4460 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr, 4461 le32_to_cpu(client_addr->nonce)); 4462 } 4463 4464 static ssize_t rbd_client_id_show(struct device *dev, 4465 struct device_attribute *attr, char *buf) 4466 { 4467 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4468 4469 return sprintf(buf, "client%lld\n", 4470 ceph_client_gid(rbd_dev->rbd_client->client)); 4471 } 4472 4473 static ssize_t rbd_cluster_fsid_show(struct device *dev, 4474 struct device_attribute *attr, char *buf) 4475 { 4476 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4477 4478 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid); 4479 } 4480 4481 static ssize_t rbd_config_info_show(struct device *dev, 4482 struct device_attribute *attr, char *buf) 4483 { 4484 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4485 4486 return sprintf(buf, "%s\n", rbd_dev->config_info); 4487 } 4488 4489 static ssize_t rbd_pool_show(struct device *dev, 4490 struct device_attribute *attr, char *buf) 4491 { 4492 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4493 4494 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name); 4495 } 4496 4497 static ssize_t rbd_pool_id_show(struct device *dev, 4498 struct device_attribute *attr, char *buf) 4499 { 4500 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4501 4502 return sprintf(buf, "%llu\n", 4503 (unsigned long long) rbd_dev->spec->pool_id); 4504 } 4505 4506 static ssize_t rbd_name_show(struct device *dev, 4507 struct device_attribute *attr, char *buf) 4508 { 4509 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4510 4511 if (rbd_dev->spec->image_name) 4512 return sprintf(buf, "%s\n", rbd_dev->spec->image_name); 4513 4514 return sprintf(buf, "(unknown)\n"); 4515 } 4516 4517 static ssize_t rbd_image_id_show(struct device *dev, 4518 struct device_attribute *attr, char *buf) 4519 { 4520 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4521 4522 return sprintf(buf, "%s\n", rbd_dev->spec->image_id); 4523 } 4524 4525 /* 4526 * Shows the name of the currently-mapped snapshot (or 4527 * RBD_SNAP_HEAD_NAME for the base image). 4528 */ 4529 static ssize_t rbd_snap_show(struct device *dev, 4530 struct device_attribute *attr, 4531 char *buf) 4532 { 4533 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4534 4535 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name); 4536 } 4537 4538 static ssize_t rbd_snap_id_show(struct device *dev, 4539 struct device_attribute *attr, char *buf) 4540 { 4541 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4542 4543 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id); 4544 } 4545 4546 /* 4547 * For a v2 image, shows the chain of parent images, separated by empty 4548 * lines. For v1 images or if there is no parent, shows "(no parent 4549 * image)". 4550 */ 4551 static ssize_t rbd_parent_show(struct device *dev, 4552 struct device_attribute *attr, 4553 char *buf) 4554 { 4555 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4556 ssize_t count = 0; 4557 4558 if (!rbd_dev->parent) 4559 return sprintf(buf, "(no parent image)\n"); 4560 4561 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) { 4562 struct rbd_spec *spec = rbd_dev->parent_spec; 4563 4564 count += sprintf(&buf[count], "%s" 4565 "pool_id %llu\npool_name %s\n" 4566 "image_id %s\nimage_name %s\n" 4567 "snap_id %llu\nsnap_name %s\n" 4568 "overlap %llu\n", 4569 !count ? "" : "\n", /* first? */ 4570 spec->pool_id, spec->pool_name, 4571 spec->image_id, spec->image_name ?: "(unknown)", 4572 spec->snap_id, spec->snap_name, 4573 rbd_dev->parent_overlap); 4574 } 4575 4576 return count; 4577 } 4578 4579 static ssize_t rbd_image_refresh(struct device *dev, 4580 struct device_attribute *attr, 4581 const char *buf, 4582 size_t size) 4583 { 4584 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4585 int ret; 4586 4587 ret = rbd_dev_refresh(rbd_dev); 4588 if (ret) 4589 return ret; 4590 4591 return size; 4592 } 4593 4594 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); 4595 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL); 4596 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); 4597 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL); 4598 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL); 4599 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); 4600 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL); 4601 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL); 4602 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); 4603 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL); 4604 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); 4605 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); 4606 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); 4607 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); 4608 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); 4609 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL); 4610 4611 static struct attribute *rbd_attrs[] = { 4612 &dev_attr_size.attr, 4613 &dev_attr_features.attr, 4614 &dev_attr_major.attr, 4615 &dev_attr_minor.attr, 4616 &dev_attr_client_addr.attr, 4617 &dev_attr_client_id.attr, 4618 &dev_attr_cluster_fsid.attr, 4619 &dev_attr_config_info.attr, 4620 &dev_attr_pool.attr, 4621 &dev_attr_pool_id.attr, 4622 &dev_attr_name.attr, 4623 &dev_attr_image_id.attr, 4624 &dev_attr_current_snap.attr, 4625 &dev_attr_snap_id.attr, 4626 &dev_attr_parent.attr, 4627 &dev_attr_refresh.attr, 4628 NULL 4629 }; 4630 4631 static struct attribute_group rbd_attr_group = { 4632 .attrs = rbd_attrs, 4633 }; 4634 4635 static const struct attribute_group *rbd_attr_groups[] = { 4636 &rbd_attr_group, 4637 NULL 4638 }; 4639 4640 static void rbd_dev_release(struct device *dev); 4641 4642 static const struct device_type rbd_device_type = { 4643 .name = "rbd", 4644 .groups = rbd_attr_groups, 4645 .release = rbd_dev_release, 4646 }; 4647 4648 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) 4649 { 4650 kref_get(&spec->kref); 4651 4652 return spec; 4653 } 4654 4655 static void rbd_spec_free(struct kref *kref); 4656 static void rbd_spec_put(struct rbd_spec *spec) 4657 { 4658 if (spec) 4659 kref_put(&spec->kref, rbd_spec_free); 4660 } 4661 4662 static struct rbd_spec *rbd_spec_alloc(void) 4663 { 4664 struct rbd_spec *spec; 4665 4666 spec = kzalloc(sizeof (*spec), GFP_KERNEL); 4667 if (!spec) 4668 return NULL; 4669 4670 spec->pool_id = CEPH_NOPOOL; 4671 spec->snap_id = CEPH_NOSNAP; 4672 kref_init(&spec->kref); 4673 4674 return spec; 4675 } 4676 4677 static void rbd_spec_free(struct kref *kref) 4678 { 4679 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref); 4680 4681 kfree(spec->pool_name); 4682 kfree(spec->image_id); 4683 kfree(spec->image_name); 4684 kfree(spec->snap_name); 4685 kfree(spec); 4686 } 4687 4688 static void rbd_dev_free(struct rbd_device *rbd_dev) 4689 { 4690 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED); 4691 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED); 4692 4693 ceph_oid_destroy(&rbd_dev->header_oid); 4694 ceph_oloc_destroy(&rbd_dev->header_oloc); 4695 kfree(rbd_dev->config_info); 4696 4697 rbd_put_client(rbd_dev->rbd_client); 4698 rbd_spec_put(rbd_dev->spec); 4699 kfree(rbd_dev->opts); 4700 kfree(rbd_dev); 4701 } 4702 4703 static void rbd_dev_release(struct device *dev) 4704 { 4705 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4706 bool need_put = !!rbd_dev->opts; 4707 4708 if (need_put) { 4709 destroy_workqueue(rbd_dev->task_wq); 4710 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 4711 } 4712 4713 rbd_dev_free(rbd_dev); 4714 4715 /* 4716 * This is racy, but way better than putting module outside of 4717 * the release callback. The race window is pretty small, so 4718 * doing something similar to dm (dm-builtin.c) is overkill. 4719 */ 4720 if (need_put) 4721 module_put(THIS_MODULE); 4722 } 4723 4724 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, 4725 struct rbd_spec *spec) 4726 { 4727 struct rbd_device *rbd_dev; 4728 4729 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); 4730 if (!rbd_dev) 4731 return NULL; 4732 4733 spin_lock_init(&rbd_dev->lock); 4734 INIT_LIST_HEAD(&rbd_dev->node); 4735 init_rwsem(&rbd_dev->header_rwsem); 4736 4737 rbd_dev->header.data_pool_id = CEPH_NOPOOL; 4738 ceph_oid_init(&rbd_dev->header_oid); 4739 rbd_dev->header_oloc.pool = spec->pool_id; 4740 4741 mutex_init(&rbd_dev->watch_mutex); 4742 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 4743 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch); 4744 4745 init_rwsem(&rbd_dev->lock_rwsem); 4746 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 4747 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock); 4748 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock); 4749 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock); 4750 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work); 4751 init_waitqueue_head(&rbd_dev->lock_waitq); 4752 4753 rbd_dev->dev.bus = &rbd_bus_type; 4754 rbd_dev->dev.type = &rbd_device_type; 4755 rbd_dev->dev.parent = &rbd_root_dev; 4756 device_initialize(&rbd_dev->dev); 4757 4758 rbd_dev->rbd_client = rbdc; 4759 rbd_dev->spec = spec; 4760 4761 return rbd_dev; 4762 } 4763 4764 /* 4765 * Create a mapping rbd_dev. 4766 */ 4767 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, 4768 struct rbd_spec *spec, 4769 struct rbd_options *opts) 4770 { 4771 struct rbd_device *rbd_dev; 4772 4773 rbd_dev = __rbd_dev_create(rbdc, spec); 4774 if (!rbd_dev) 4775 return NULL; 4776 4777 rbd_dev->opts = opts; 4778 4779 /* get an id and fill in device name */ 4780 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 4781 minor_to_rbd_dev_id(1 << MINORBITS), 4782 GFP_KERNEL); 4783 if (rbd_dev->dev_id < 0) 4784 goto fail_rbd_dev; 4785 4786 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id); 4787 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM, 4788 rbd_dev->name); 4789 if (!rbd_dev->task_wq) 4790 goto fail_dev_id; 4791 4792 /* we have a ref from do_rbd_add() */ 4793 __module_get(THIS_MODULE); 4794 4795 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); 4796 return rbd_dev; 4797 4798 fail_dev_id: 4799 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 4800 fail_rbd_dev: 4801 rbd_dev_free(rbd_dev); 4802 return NULL; 4803 } 4804 4805 static void rbd_dev_destroy(struct rbd_device *rbd_dev) 4806 { 4807 if (rbd_dev) 4808 put_device(&rbd_dev->dev); 4809 } 4810 4811 /* 4812 * Get the size and object order for an image snapshot, or if 4813 * snap_id is CEPH_NOSNAP, gets this information for the base 4814 * image. 4815 */ 4816 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 4817 u8 *order, u64 *snap_size) 4818 { 4819 __le64 snapid = cpu_to_le64(snap_id); 4820 int ret; 4821 struct { 4822 u8 order; 4823 __le64 size; 4824 } __attribute__ ((packed)) size_buf = { 0 }; 4825 4826 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4827 &rbd_dev->header_oloc, "get_size", 4828 &snapid, sizeof(snapid), 4829 &size_buf, sizeof(size_buf)); 4830 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4831 if (ret < 0) 4832 return ret; 4833 if (ret < sizeof (size_buf)) 4834 return -ERANGE; 4835 4836 if (order) { 4837 *order = size_buf.order; 4838 dout(" order %u", (unsigned int)*order); 4839 } 4840 *snap_size = le64_to_cpu(size_buf.size); 4841 4842 dout(" snap_id 0x%016llx snap_size = %llu\n", 4843 (unsigned long long)snap_id, 4844 (unsigned long long)*snap_size); 4845 4846 return 0; 4847 } 4848 4849 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) 4850 { 4851 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, 4852 &rbd_dev->header.obj_order, 4853 &rbd_dev->header.image_size); 4854 } 4855 4856 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) 4857 { 4858 void *reply_buf; 4859 int ret; 4860 void *p; 4861 4862 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL); 4863 if (!reply_buf) 4864 return -ENOMEM; 4865 4866 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4867 &rbd_dev->header_oloc, "get_object_prefix", 4868 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX); 4869 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4870 if (ret < 0) 4871 goto out; 4872 4873 p = reply_buf; 4874 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, 4875 p + ret, NULL, GFP_NOIO); 4876 ret = 0; 4877 4878 if (IS_ERR(rbd_dev->header.object_prefix)) { 4879 ret = PTR_ERR(rbd_dev->header.object_prefix); 4880 rbd_dev->header.object_prefix = NULL; 4881 } else { 4882 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); 4883 } 4884 out: 4885 kfree(reply_buf); 4886 4887 return ret; 4888 } 4889 4890 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 4891 u64 *snap_features) 4892 { 4893 __le64 snapid = cpu_to_le64(snap_id); 4894 struct { 4895 __le64 features; 4896 __le64 incompat; 4897 } __attribute__ ((packed)) features_buf = { 0 }; 4898 u64 unsup; 4899 int ret; 4900 4901 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4902 &rbd_dev->header_oloc, "get_features", 4903 &snapid, sizeof(snapid), 4904 &features_buf, sizeof(features_buf)); 4905 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4906 if (ret < 0) 4907 return ret; 4908 if (ret < sizeof (features_buf)) 4909 return -ERANGE; 4910 4911 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; 4912 if (unsup) { 4913 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", 4914 unsup); 4915 return -ENXIO; 4916 } 4917 4918 *snap_features = le64_to_cpu(features_buf.features); 4919 4920 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", 4921 (unsigned long long)snap_id, 4922 (unsigned long long)*snap_features, 4923 (unsigned long long)le64_to_cpu(features_buf.incompat)); 4924 4925 return 0; 4926 } 4927 4928 static int rbd_dev_v2_features(struct rbd_device *rbd_dev) 4929 { 4930 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, 4931 &rbd_dev->header.features); 4932 } 4933 4934 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 4935 { 4936 struct rbd_spec *parent_spec; 4937 size_t size; 4938 void *reply_buf = NULL; 4939 __le64 snapid; 4940 void *p; 4941 void *end; 4942 u64 pool_id; 4943 char *image_id; 4944 u64 snap_id; 4945 u64 overlap; 4946 int ret; 4947 4948 parent_spec = rbd_spec_alloc(); 4949 if (!parent_spec) 4950 return -ENOMEM; 4951 4952 size = sizeof (__le64) + /* pool_id */ 4953 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ 4954 sizeof (__le64) + /* snap_id */ 4955 sizeof (__le64); /* overlap */ 4956 reply_buf = kmalloc(size, GFP_KERNEL); 4957 if (!reply_buf) { 4958 ret = -ENOMEM; 4959 goto out_err; 4960 } 4961 4962 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 4963 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4964 &rbd_dev->header_oloc, "get_parent", 4965 &snapid, sizeof(snapid), reply_buf, size); 4966 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4967 if (ret < 0) 4968 goto out_err; 4969 4970 p = reply_buf; 4971 end = reply_buf + ret; 4972 ret = -ERANGE; 4973 ceph_decode_64_safe(&p, end, pool_id, out_err); 4974 if (pool_id == CEPH_NOPOOL) { 4975 /* 4976 * Either the parent never existed, or we have 4977 * record of it but the image got flattened so it no 4978 * longer has a parent. When the parent of a 4979 * layered image disappears we immediately set the 4980 * overlap to 0. The effect of this is that all new 4981 * requests will be treated as if the image had no 4982 * parent. 4983 */ 4984 if (rbd_dev->parent_overlap) { 4985 rbd_dev->parent_overlap = 0; 4986 rbd_dev_parent_put(rbd_dev); 4987 pr_info("%s: clone image has been flattened\n", 4988 rbd_dev->disk->disk_name); 4989 } 4990 4991 goto out; /* No parent? No problem. */ 4992 } 4993 4994 /* The ceph file layout needs to fit pool id in 32 bits */ 4995 4996 ret = -EIO; 4997 if (pool_id > (u64)U32_MAX) { 4998 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 4999 (unsigned long long)pool_id, U32_MAX); 5000 goto out_err; 5001 } 5002 5003 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 5004 if (IS_ERR(image_id)) { 5005 ret = PTR_ERR(image_id); 5006 goto out_err; 5007 } 5008 ceph_decode_64_safe(&p, end, snap_id, out_err); 5009 ceph_decode_64_safe(&p, end, overlap, out_err); 5010 5011 /* 5012 * The parent won't change (except when the clone is 5013 * flattened, already handled that). So we only need to 5014 * record the parent spec we have not already done so. 5015 */ 5016 if (!rbd_dev->parent_spec) { 5017 parent_spec->pool_id = pool_id; 5018 parent_spec->image_id = image_id; 5019 parent_spec->snap_id = snap_id; 5020 rbd_dev->parent_spec = parent_spec; 5021 parent_spec = NULL; /* rbd_dev now owns this */ 5022 } else { 5023 kfree(image_id); 5024 } 5025 5026 /* 5027 * We always update the parent overlap. If it's zero we issue 5028 * a warning, as we will proceed as if there was no parent. 5029 */ 5030 if (!overlap) { 5031 if (parent_spec) { 5032 /* refresh, careful to warn just once */ 5033 if (rbd_dev->parent_overlap) 5034 rbd_warn(rbd_dev, 5035 "clone now standalone (overlap became 0)"); 5036 } else { 5037 /* initial probe */ 5038 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 5039 } 5040 } 5041 rbd_dev->parent_overlap = overlap; 5042 5043 out: 5044 ret = 0; 5045 out_err: 5046 kfree(reply_buf); 5047 rbd_spec_put(parent_spec); 5048 5049 return ret; 5050 } 5051 5052 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) 5053 { 5054 struct { 5055 __le64 stripe_unit; 5056 __le64 stripe_count; 5057 } __attribute__ ((packed)) striping_info_buf = { 0 }; 5058 size_t size = sizeof (striping_info_buf); 5059 void *p; 5060 u64 obj_size; 5061 u64 stripe_unit; 5062 u64 stripe_count; 5063 int ret; 5064 5065 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5066 &rbd_dev->header_oloc, "get_stripe_unit_count", 5067 NULL, 0, &striping_info_buf, size); 5068 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5069 if (ret < 0) 5070 return ret; 5071 if (ret < size) 5072 return -ERANGE; 5073 5074 /* 5075 * We don't actually support the "fancy striping" feature 5076 * (STRIPINGV2) yet, but if the striping sizes are the 5077 * defaults the behavior is the same as before. So find 5078 * out, and only fail if the image has non-default values. 5079 */ 5080 ret = -EINVAL; 5081 obj_size = rbd_obj_bytes(&rbd_dev->header); 5082 p = &striping_info_buf; 5083 stripe_unit = ceph_decode_64(&p); 5084 if (stripe_unit != obj_size) { 5085 rbd_warn(rbd_dev, "unsupported stripe unit " 5086 "(got %llu want %llu)", 5087 stripe_unit, obj_size); 5088 return -EINVAL; 5089 } 5090 stripe_count = ceph_decode_64(&p); 5091 if (stripe_count != 1) { 5092 rbd_warn(rbd_dev, "unsupported stripe count " 5093 "(got %llu want 1)", stripe_count); 5094 return -EINVAL; 5095 } 5096 rbd_dev->header.stripe_unit = stripe_unit; 5097 rbd_dev->header.stripe_count = stripe_count; 5098 5099 return 0; 5100 } 5101 5102 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) 5103 { 5104 __le64 data_pool_id; 5105 int ret; 5106 5107 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5108 &rbd_dev->header_oloc, "get_data_pool", 5109 NULL, 0, &data_pool_id, sizeof(data_pool_id)); 5110 if (ret < 0) 5111 return ret; 5112 if (ret < sizeof(data_pool_id)) 5113 return -EBADMSG; 5114 5115 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); 5116 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); 5117 return 0; 5118 } 5119 5120 static char *rbd_dev_image_name(struct rbd_device *rbd_dev) 5121 { 5122 CEPH_DEFINE_OID_ONSTACK(oid); 5123 size_t image_id_size; 5124 char *image_id; 5125 void *p; 5126 void *end; 5127 size_t size; 5128 void *reply_buf = NULL; 5129 size_t len = 0; 5130 char *image_name = NULL; 5131 int ret; 5132 5133 rbd_assert(!rbd_dev->spec->image_name); 5134 5135 len = strlen(rbd_dev->spec->image_id); 5136 image_id_size = sizeof (__le32) + len; 5137 image_id = kmalloc(image_id_size, GFP_KERNEL); 5138 if (!image_id) 5139 return NULL; 5140 5141 p = image_id; 5142 end = image_id + image_id_size; 5143 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); 5144 5145 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; 5146 reply_buf = kmalloc(size, GFP_KERNEL); 5147 if (!reply_buf) 5148 goto out; 5149 5150 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); 5151 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5152 "dir_get_name", image_id, image_id_size, 5153 reply_buf, size); 5154 if (ret < 0) 5155 goto out; 5156 p = reply_buf; 5157 end = reply_buf + ret; 5158 5159 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); 5160 if (IS_ERR(image_name)) 5161 image_name = NULL; 5162 else 5163 dout("%s: name is %s len is %zd\n", __func__, image_name, len); 5164 out: 5165 kfree(reply_buf); 5166 kfree(image_id); 5167 5168 return image_name; 5169 } 5170 5171 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5172 { 5173 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5174 const char *snap_name; 5175 u32 which = 0; 5176 5177 /* Skip over names until we find the one we are looking for */ 5178 5179 snap_name = rbd_dev->header.snap_names; 5180 while (which < snapc->num_snaps) { 5181 if (!strcmp(name, snap_name)) 5182 return snapc->snaps[which]; 5183 snap_name += strlen(snap_name) + 1; 5184 which++; 5185 } 5186 return CEPH_NOSNAP; 5187 } 5188 5189 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5190 { 5191 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5192 u32 which; 5193 bool found = false; 5194 u64 snap_id; 5195 5196 for (which = 0; !found && which < snapc->num_snaps; which++) { 5197 const char *snap_name; 5198 5199 snap_id = snapc->snaps[which]; 5200 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 5201 if (IS_ERR(snap_name)) { 5202 /* ignore no-longer existing snapshots */ 5203 if (PTR_ERR(snap_name) == -ENOENT) 5204 continue; 5205 else 5206 break; 5207 } 5208 found = !strcmp(name, snap_name); 5209 kfree(snap_name); 5210 } 5211 return found ? snap_id : CEPH_NOSNAP; 5212 } 5213 5214 /* 5215 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if 5216 * no snapshot by that name is found, or if an error occurs. 5217 */ 5218 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5219 { 5220 if (rbd_dev->image_format == 1) 5221 return rbd_v1_snap_id_by_name(rbd_dev, name); 5222 5223 return rbd_v2_snap_id_by_name(rbd_dev, name); 5224 } 5225 5226 /* 5227 * An image being mapped will have everything but the snap id. 5228 */ 5229 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev) 5230 { 5231 struct rbd_spec *spec = rbd_dev->spec; 5232 5233 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name); 5234 rbd_assert(spec->image_id && spec->image_name); 5235 rbd_assert(spec->snap_name); 5236 5237 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { 5238 u64 snap_id; 5239 5240 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); 5241 if (snap_id == CEPH_NOSNAP) 5242 return -ENOENT; 5243 5244 spec->snap_id = snap_id; 5245 } else { 5246 spec->snap_id = CEPH_NOSNAP; 5247 } 5248 5249 return 0; 5250 } 5251 5252 /* 5253 * A parent image will have all ids but none of the names. 5254 * 5255 * All names in an rbd spec are dynamically allocated. It's OK if we 5256 * can't figure out the name for an image id. 5257 */ 5258 static int rbd_spec_fill_names(struct rbd_device *rbd_dev) 5259 { 5260 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5261 struct rbd_spec *spec = rbd_dev->spec; 5262 const char *pool_name; 5263 const char *image_name; 5264 const char *snap_name; 5265 int ret; 5266 5267 rbd_assert(spec->pool_id != CEPH_NOPOOL); 5268 rbd_assert(spec->image_id); 5269 rbd_assert(spec->snap_id != CEPH_NOSNAP); 5270 5271 /* Get the pool name; we have to make our own copy of this */ 5272 5273 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); 5274 if (!pool_name) { 5275 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); 5276 return -EIO; 5277 } 5278 pool_name = kstrdup(pool_name, GFP_KERNEL); 5279 if (!pool_name) 5280 return -ENOMEM; 5281 5282 /* Fetch the image name; tolerate failure here */ 5283 5284 image_name = rbd_dev_image_name(rbd_dev); 5285 if (!image_name) 5286 rbd_warn(rbd_dev, "unable to get image name"); 5287 5288 /* Fetch the snapshot name */ 5289 5290 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 5291 if (IS_ERR(snap_name)) { 5292 ret = PTR_ERR(snap_name); 5293 goto out_err; 5294 } 5295 5296 spec->pool_name = pool_name; 5297 spec->image_name = image_name; 5298 spec->snap_name = snap_name; 5299 5300 return 0; 5301 5302 out_err: 5303 kfree(image_name); 5304 kfree(pool_name); 5305 return ret; 5306 } 5307 5308 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) 5309 { 5310 size_t size; 5311 int ret; 5312 void *reply_buf; 5313 void *p; 5314 void *end; 5315 u64 seq; 5316 u32 snap_count; 5317 struct ceph_snap_context *snapc; 5318 u32 i; 5319 5320 /* 5321 * We'll need room for the seq value (maximum snapshot id), 5322 * snapshot count, and array of that many snapshot ids. 5323 * For now we have a fixed upper limit on the number we're 5324 * prepared to receive. 5325 */ 5326 size = sizeof (__le64) + sizeof (__le32) + 5327 RBD_MAX_SNAP_COUNT * sizeof (__le64); 5328 reply_buf = kzalloc(size, GFP_KERNEL); 5329 if (!reply_buf) 5330 return -ENOMEM; 5331 5332 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5333 &rbd_dev->header_oloc, "get_snapcontext", 5334 NULL, 0, reply_buf, size); 5335 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5336 if (ret < 0) 5337 goto out; 5338 5339 p = reply_buf; 5340 end = reply_buf + ret; 5341 ret = -ERANGE; 5342 ceph_decode_64_safe(&p, end, seq, out); 5343 ceph_decode_32_safe(&p, end, snap_count, out); 5344 5345 /* 5346 * Make sure the reported number of snapshot ids wouldn't go 5347 * beyond the end of our buffer. But before checking that, 5348 * make sure the computed size of the snapshot context we 5349 * allocate is representable in a size_t. 5350 */ 5351 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) 5352 / sizeof (u64)) { 5353 ret = -EINVAL; 5354 goto out; 5355 } 5356 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) 5357 goto out; 5358 ret = 0; 5359 5360 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 5361 if (!snapc) { 5362 ret = -ENOMEM; 5363 goto out; 5364 } 5365 snapc->seq = seq; 5366 for (i = 0; i < snap_count; i++) 5367 snapc->snaps[i] = ceph_decode_64(&p); 5368 5369 ceph_put_snap_context(rbd_dev->header.snapc); 5370 rbd_dev->header.snapc = snapc; 5371 5372 dout(" snap context seq = %llu, snap_count = %u\n", 5373 (unsigned long long)seq, (unsigned int)snap_count); 5374 out: 5375 kfree(reply_buf); 5376 5377 return ret; 5378 } 5379 5380 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 5381 u64 snap_id) 5382 { 5383 size_t size; 5384 void *reply_buf; 5385 __le64 snapid; 5386 int ret; 5387 void *p; 5388 void *end; 5389 char *snap_name; 5390 5391 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; 5392 reply_buf = kmalloc(size, GFP_KERNEL); 5393 if (!reply_buf) 5394 return ERR_PTR(-ENOMEM); 5395 5396 snapid = cpu_to_le64(snap_id); 5397 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5398 &rbd_dev->header_oloc, "get_snapshot_name", 5399 &snapid, sizeof(snapid), reply_buf, size); 5400 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5401 if (ret < 0) { 5402 snap_name = ERR_PTR(ret); 5403 goto out; 5404 } 5405 5406 p = reply_buf; 5407 end = reply_buf + ret; 5408 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 5409 if (IS_ERR(snap_name)) 5410 goto out; 5411 5412 dout(" snap_id 0x%016llx snap_name = %s\n", 5413 (unsigned long long)snap_id, snap_name); 5414 out: 5415 kfree(reply_buf); 5416 5417 return snap_name; 5418 } 5419 5420 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) 5421 { 5422 bool first_time = rbd_dev->header.object_prefix == NULL; 5423 int ret; 5424 5425 ret = rbd_dev_v2_image_size(rbd_dev); 5426 if (ret) 5427 return ret; 5428 5429 if (first_time) { 5430 ret = rbd_dev_v2_header_onetime(rbd_dev); 5431 if (ret) 5432 return ret; 5433 } 5434 5435 ret = rbd_dev_v2_snap_context(rbd_dev); 5436 if (ret && first_time) { 5437 kfree(rbd_dev->header.object_prefix); 5438 rbd_dev->header.object_prefix = NULL; 5439 } 5440 5441 return ret; 5442 } 5443 5444 static int rbd_dev_header_info(struct rbd_device *rbd_dev) 5445 { 5446 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 5447 5448 if (rbd_dev->image_format == 1) 5449 return rbd_dev_v1_header_info(rbd_dev); 5450 5451 return rbd_dev_v2_header_info(rbd_dev); 5452 } 5453 5454 /* 5455 * Skips over white space at *buf, and updates *buf to point to the 5456 * first found non-space character (if any). Returns the length of 5457 * the token (string of non-white space characters) found. Note 5458 * that *buf must be terminated with '\0'. 5459 */ 5460 static inline size_t next_token(const char **buf) 5461 { 5462 /* 5463 * These are the characters that produce nonzero for 5464 * isspace() in the "C" and "POSIX" locales. 5465 */ 5466 const char *spaces = " \f\n\r\t\v"; 5467 5468 *buf += strspn(*buf, spaces); /* Find start of token */ 5469 5470 return strcspn(*buf, spaces); /* Return token length */ 5471 } 5472 5473 /* 5474 * Finds the next token in *buf, dynamically allocates a buffer big 5475 * enough to hold a copy of it, and copies the token into the new 5476 * buffer. The copy is guaranteed to be terminated with '\0'. Note 5477 * that a duplicate buffer is created even for a zero-length token. 5478 * 5479 * Returns a pointer to the newly-allocated duplicate, or a null 5480 * pointer if memory for the duplicate was not available. If 5481 * the lenp argument is a non-null pointer, the length of the token 5482 * (not including the '\0') is returned in *lenp. 5483 * 5484 * If successful, the *buf pointer will be updated to point beyond 5485 * the end of the found token. 5486 * 5487 * Note: uses GFP_KERNEL for allocation. 5488 */ 5489 static inline char *dup_token(const char **buf, size_t *lenp) 5490 { 5491 char *dup; 5492 size_t len; 5493 5494 len = next_token(buf); 5495 dup = kmemdup(*buf, len + 1, GFP_KERNEL); 5496 if (!dup) 5497 return NULL; 5498 *(dup + len) = '\0'; 5499 *buf += len; 5500 5501 if (lenp) 5502 *lenp = len; 5503 5504 return dup; 5505 } 5506 5507 /* 5508 * Parse the options provided for an "rbd add" (i.e., rbd image 5509 * mapping) request. These arrive via a write to /sys/bus/rbd/add, 5510 * and the data written is passed here via a NUL-terminated buffer. 5511 * Returns 0 if successful or an error code otherwise. 5512 * 5513 * The information extracted from these options is recorded in 5514 * the other parameters which return dynamically-allocated 5515 * structures: 5516 * ceph_opts 5517 * The address of a pointer that will refer to a ceph options 5518 * structure. Caller must release the returned pointer using 5519 * ceph_destroy_options() when it is no longer needed. 5520 * rbd_opts 5521 * Address of an rbd options pointer. Fully initialized by 5522 * this function; caller must release with kfree(). 5523 * spec 5524 * Address of an rbd image specification pointer. Fully 5525 * initialized by this function based on parsed options. 5526 * Caller must release with rbd_spec_put(). 5527 * 5528 * The options passed take this form: 5529 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>] 5530 * where: 5531 * <mon_addrs> 5532 * A comma-separated list of one or more monitor addresses. 5533 * A monitor address is an ip address, optionally followed 5534 * by a port number (separated by a colon). 5535 * I.e.: ip1[:port1][,ip2[:port2]...] 5536 * <options> 5537 * A comma-separated list of ceph and/or rbd options. 5538 * <pool_name> 5539 * The name of the rados pool containing the rbd image. 5540 * <image_name> 5541 * The name of the image in that pool to map. 5542 * <snap_id> 5543 * An optional snapshot id. If provided, the mapping will 5544 * present data from the image at the time that snapshot was 5545 * created. The image head is used if no snapshot id is 5546 * provided. Snapshot mappings are always read-only. 5547 */ 5548 static int rbd_add_parse_args(const char *buf, 5549 struct ceph_options **ceph_opts, 5550 struct rbd_options **opts, 5551 struct rbd_spec **rbd_spec) 5552 { 5553 size_t len; 5554 char *options; 5555 const char *mon_addrs; 5556 char *snap_name; 5557 size_t mon_addrs_size; 5558 struct rbd_spec *spec = NULL; 5559 struct rbd_options *rbd_opts = NULL; 5560 struct ceph_options *copts; 5561 int ret; 5562 5563 /* The first four tokens are required */ 5564 5565 len = next_token(&buf); 5566 if (!len) { 5567 rbd_warn(NULL, "no monitor address(es) provided"); 5568 return -EINVAL; 5569 } 5570 mon_addrs = buf; 5571 mon_addrs_size = len + 1; 5572 buf += len; 5573 5574 ret = -EINVAL; 5575 options = dup_token(&buf, NULL); 5576 if (!options) 5577 return -ENOMEM; 5578 if (!*options) { 5579 rbd_warn(NULL, "no options provided"); 5580 goto out_err; 5581 } 5582 5583 spec = rbd_spec_alloc(); 5584 if (!spec) 5585 goto out_mem; 5586 5587 spec->pool_name = dup_token(&buf, NULL); 5588 if (!spec->pool_name) 5589 goto out_mem; 5590 if (!*spec->pool_name) { 5591 rbd_warn(NULL, "no pool name provided"); 5592 goto out_err; 5593 } 5594 5595 spec->image_name = dup_token(&buf, NULL); 5596 if (!spec->image_name) 5597 goto out_mem; 5598 if (!*spec->image_name) { 5599 rbd_warn(NULL, "no image name provided"); 5600 goto out_err; 5601 } 5602 5603 /* 5604 * Snapshot name is optional; default is to use "-" 5605 * (indicating the head/no snapshot). 5606 */ 5607 len = next_token(&buf); 5608 if (!len) { 5609 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ 5610 len = sizeof (RBD_SNAP_HEAD_NAME) - 1; 5611 } else if (len > RBD_MAX_SNAP_NAME_LEN) { 5612 ret = -ENAMETOOLONG; 5613 goto out_err; 5614 } 5615 snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 5616 if (!snap_name) 5617 goto out_mem; 5618 *(snap_name + len) = '\0'; 5619 spec->snap_name = snap_name; 5620 5621 /* Initialize all rbd options to the defaults */ 5622 5623 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL); 5624 if (!rbd_opts) 5625 goto out_mem; 5626 5627 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; 5628 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 5629 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 5630 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 5631 5632 copts = ceph_parse_options(options, mon_addrs, 5633 mon_addrs + mon_addrs_size - 1, 5634 parse_rbd_opts_token, rbd_opts); 5635 if (IS_ERR(copts)) { 5636 ret = PTR_ERR(copts); 5637 goto out_err; 5638 } 5639 kfree(options); 5640 5641 *ceph_opts = copts; 5642 *opts = rbd_opts; 5643 *rbd_spec = spec; 5644 5645 return 0; 5646 out_mem: 5647 ret = -ENOMEM; 5648 out_err: 5649 kfree(rbd_opts); 5650 rbd_spec_put(spec); 5651 kfree(options); 5652 5653 return ret; 5654 } 5655 5656 /* 5657 * Return pool id (>= 0) or a negative error code. 5658 */ 5659 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name) 5660 { 5661 struct ceph_options *opts = rbdc->client->options; 5662 u64 newest_epoch; 5663 int tries = 0; 5664 int ret; 5665 5666 again: 5667 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name); 5668 if (ret == -ENOENT && tries++ < 1) { 5669 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap", 5670 &newest_epoch); 5671 if (ret < 0) 5672 return ret; 5673 5674 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) { 5675 ceph_osdc_maybe_request_map(&rbdc->client->osdc); 5676 (void) ceph_monc_wait_osdmap(&rbdc->client->monc, 5677 newest_epoch, 5678 opts->mount_timeout); 5679 goto again; 5680 } else { 5681 /* the osdmap we have is new enough */ 5682 return -ENOENT; 5683 } 5684 } 5685 5686 return ret; 5687 } 5688 5689 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) 5690 { 5691 down_write(&rbd_dev->lock_rwsem); 5692 if (__rbd_is_lock_owner(rbd_dev)) 5693 rbd_unlock(rbd_dev); 5694 up_write(&rbd_dev->lock_rwsem); 5695 } 5696 5697 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 5698 { 5699 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 5700 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 5701 return -EINVAL; 5702 } 5703 5704 /* FIXME: "rbd map --exclusive" should be in interruptible */ 5705 down_read(&rbd_dev->lock_rwsem); 5706 rbd_wait_state_locked(rbd_dev); 5707 up_read(&rbd_dev->lock_rwsem); 5708 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 5709 rbd_warn(rbd_dev, "failed to acquire exclusive lock"); 5710 return -EROFS; 5711 } 5712 5713 return 0; 5714 } 5715 5716 /* 5717 * An rbd format 2 image has a unique identifier, distinct from the 5718 * name given to it by the user. Internally, that identifier is 5719 * what's used to specify the names of objects related to the image. 5720 * 5721 * A special "rbd id" object is used to map an rbd image name to its 5722 * id. If that object doesn't exist, then there is no v2 rbd image 5723 * with the supplied name. 5724 * 5725 * This function will record the given rbd_dev's image_id field if 5726 * it can be determined, and in that case will return 0. If any 5727 * errors occur a negative errno will be returned and the rbd_dev's 5728 * image_id field will be unchanged (and should be NULL). 5729 */ 5730 static int rbd_dev_image_id(struct rbd_device *rbd_dev) 5731 { 5732 int ret; 5733 size_t size; 5734 CEPH_DEFINE_OID_ONSTACK(oid); 5735 void *response; 5736 char *image_id; 5737 5738 /* 5739 * When probing a parent image, the image id is already 5740 * known (and the image name likely is not). There's no 5741 * need to fetch the image id again in this case. We 5742 * do still need to set the image format though. 5743 */ 5744 if (rbd_dev->spec->image_id) { 5745 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; 5746 5747 return 0; 5748 } 5749 5750 /* 5751 * First, see if the format 2 image id file exists, and if 5752 * so, get the image's persistent id from it. 5753 */ 5754 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, 5755 rbd_dev->spec->image_name); 5756 if (ret) 5757 return ret; 5758 5759 dout("rbd id object name is %s\n", oid.name); 5760 5761 /* Response will be an encoded string, which includes a length */ 5762 5763 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; 5764 response = kzalloc(size, GFP_NOIO); 5765 if (!response) { 5766 ret = -ENOMEM; 5767 goto out; 5768 } 5769 5770 /* If it doesn't exist we'll assume it's a format 1 image */ 5771 5772 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5773 "get_id", NULL, 0, 5774 response, RBD_IMAGE_ID_LEN_MAX); 5775 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5776 if (ret == -ENOENT) { 5777 image_id = kstrdup("", GFP_KERNEL); 5778 ret = image_id ? 0 : -ENOMEM; 5779 if (!ret) 5780 rbd_dev->image_format = 1; 5781 } else if (ret >= 0) { 5782 void *p = response; 5783 5784 image_id = ceph_extract_encoded_string(&p, p + ret, 5785 NULL, GFP_NOIO); 5786 ret = PTR_ERR_OR_ZERO(image_id); 5787 if (!ret) 5788 rbd_dev->image_format = 2; 5789 } 5790 5791 if (!ret) { 5792 rbd_dev->spec->image_id = image_id; 5793 dout("image_id is %s\n", image_id); 5794 } 5795 out: 5796 kfree(response); 5797 ceph_oid_destroy(&oid); 5798 return ret; 5799 } 5800 5801 /* 5802 * Undo whatever state changes are made by v1 or v2 header info 5803 * call. 5804 */ 5805 static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 5806 { 5807 struct rbd_image_header *header; 5808 5809 rbd_dev_parent_put(rbd_dev); 5810 5811 /* Free dynamic fields from the header, then zero it out */ 5812 5813 header = &rbd_dev->header; 5814 ceph_put_snap_context(header->snapc); 5815 kfree(header->snap_sizes); 5816 kfree(header->snap_names); 5817 kfree(header->object_prefix); 5818 memset(header, 0, sizeof (*header)); 5819 } 5820 5821 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) 5822 { 5823 int ret; 5824 5825 ret = rbd_dev_v2_object_prefix(rbd_dev); 5826 if (ret) 5827 goto out_err; 5828 5829 /* 5830 * Get the and check features for the image. Currently the 5831 * features are assumed to never change. 5832 */ 5833 ret = rbd_dev_v2_features(rbd_dev); 5834 if (ret) 5835 goto out_err; 5836 5837 /* If the image supports fancy striping, get its parameters */ 5838 5839 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 5840 ret = rbd_dev_v2_striping_info(rbd_dev); 5841 if (ret < 0) 5842 goto out_err; 5843 } 5844 5845 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { 5846 ret = rbd_dev_v2_data_pool(rbd_dev); 5847 if (ret) 5848 goto out_err; 5849 } 5850 5851 rbd_init_layout(rbd_dev); 5852 return 0; 5853 5854 out_err: 5855 rbd_dev->header.features = 0; 5856 kfree(rbd_dev->header.object_prefix); 5857 rbd_dev->header.object_prefix = NULL; 5858 return ret; 5859 } 5860 5861 /* 5862 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> 5863 * rbd_dev_image_probe() recursion depth, which means it's also the 5864 * length of the already discovered part of the parent chain. 5865 */ 5866 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) 5867 { 5868 struct rbd_device *parent = NULL; 5869 int ret; 5870 5871 if (!rbd_dev->parent_spec) 5872 return 0; 5873 5874 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { 5875 pr_info("parent chain is too long (%d)\n", depth); 5876 ret = -EINVAL; 5877 goto out_err; 5878 } 5879 5880 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); 5881 if (!parent) { 5882 ret = -ENOMEM; 5883 goto out_err; 5884 } 5885 5886 /* 5887 * Images related by parent/child relationships always share 5888 * rbd_client and spec/parent_spec, so bump their refcounts. 5889 */ 5890 __rbd_get_client(rbd_dev->rbd_client); 5891 rbd_spec_get(rbd_dev->parent_spec); 5892 5893 ret = rbd_dev_image_probe(parent, depth); 5894 if (ret < 0) 5895 goto out_err; 5896 5897 rbd_dev->parent = parent; 5898 atomic_set(&rbd_dev->parent_ref, 1); 5899 return 0; 5900 5901 out_err: 5902 rbd_dev_unparent(rbd_dev); 5903 rbd_dev_destroy(parent); 5904 return ret; 5905 } 5906 5907 static void rbd_dev_device_release(struct rbd_device *rbd_dev) 5908 { 5909 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5910 rbd_dev_mapping_clear(rbd_dev); 5911 rbd_free_disk(rbd_dev); 5912 if (!single_major) 5913 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5914 } 5915 5916 /* 5917 * rbd_dev->header_rwsem must be locked for write and will be unlocked 5918 * upon return. 5919 */ 5920 static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5921 { 5922 int ret; 5923 5924 /* Record our major and minor device numbers. */ 5925 5926 if (!single_major) { 5927 ret = register_blkdev(0, rbd_dev->name); 5928 if (ret < 0) 5929 goto err_out_unlock; 5930 5931 rbd_dev->major = ret; 5932 rbd_dev->minor = 0; 5933 } else { 5934 rbd_dev->major = rbd_major; 5935 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id); 5936 } 5937 5938 /* Set up the blkdev mapping. */ 5939 5940 ret = rbd_init_disk(rbd_dev); 5941 if (ret) 5942 goto err_out_blkdev; 5943 5944 ret = rbd_dev_mapping_set(rbd_dev); 5945 if (ret) 5946 goto err_out_disk; 5947 5948 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5949 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only); 5950 5951 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); 5952 if (ret) 5953 goto err_out_mapping; 5954 5955 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5956 up_write(&rbd_dev->header_rwsem); 5957 return 0; 5958 5959 err_out_mapping: 5960 rbd_dev_mapping_clear(rbd_dev); 5961 err_out_disk: 5962 rbd_free_disk(rbd_dev); 5963 err_out_blkdev: 5964 if (!single_major) 5965 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5966 err_out_unlock: 5967 up_write(&rbd_dev->header_rwsem); 5968 return ret; 5969 } 5970 5971 static int rbd_dev_header_name(struct rbd_device *rbd_dev) 5972 { 5973 struct rbd_spec *spec = rbd_dev->spec; 5974 int ret; 5975 5976 /* Record the header object name for this rbd image. */ 5977 5978 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 5979 if (rbd_dev->image_format == 1) 5980 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 5981 spec->image_name, RBD_SUFFIX); 5982 else 5983 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 5984 RBD_HEADER_PREFIX, spec->image_id); 5985 5986 return ret; 5987 } 5988 5989 static void rbd_dev_image_release(struct rbd_device *rbd_dev) 5990 { 5991 rbd_dev_unprobe(rbd_dev); 5992 if (rbd_dev->opts) 5993 rbd_unregister_watch(rbd_dev); 5994 rbd_dev->image_format = 0; 5995 kfree(rbd_dev->spec->image_id); 5996 rbd_dev->spec->image_id = NULL; 5997 } 5998 5999 /* 6000 * Probe for the existence of the header object for the given rbd 6001 * device. If this image is the one being mapped (i.e., not a 6002 * parent), initiate a watch on its header object before using that 6003 * object to get detailed information about the rbd image. 6004 */ 6005 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) 6006 { 6007 int ret; 6008 6009 /* 6010 * Get the id from the image id object. Unless there's an 6011 * error, rbd_dev->spec->image_id will be filled in with 6012 * a dynamically-allocated string, and rbd_dev->image_format 6013 * will be set to either 1 or 2. 6014 */ 6015 ret = rbd_dev_image_id(rbd_dev); 6016 if (ret) 6017 return ret; 6018 6019 ret = rbd_dev_header_name(rbd_dev); 6020 if (ret) 6021 goto err_out_format; 6022 6023 if (!depth) { 6024 ret = rbd_register_watch(rbd_dev); 6025 if (ret) { 6026 if (ret == -ENOENT) 6027 pr_info("image %s/%s does not exist\n", 6028 rbd_dev->spec->pool_name, 6029 rbd_dev->spec->image_name); 6030 goto err_out_format; 6031 } 6032 } 6033 6034 ret = rbd_dev_header_info(rbd_dev); 6035 if (ret) 6036 goto err_out_watch; 6037 6038 /* 6039 * If this image is the one being mapped, we have pool name and 6040 * id, image name and id, and snap name - need to fill snap id. 6041 * Otherwise this is a parent image, identified by pool, image 6042 * and snap ids - need to fill in names for those ids. 6043 */ 6044 if (!depth) 6045 ret = rbd_spec_fill_snap_id(rbd_dev); 6046 else 6047 ret = rbd_spec_fill_names(rbd_dev); 6048 if (ret) { 6049 if (ret == -ENOENT) 6050 pr_info("snap %s/%s@%s does not exist\n", 6051 rbd_dev->spec->pool_name, 6052 rbd_dev->spec->image_name, 6053 rbd_dev->spec->snap_name); 6054 goto err_out_probe; 6055 } 6056 6057 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { 6058 ret = rbd_dev_v2_parent_info(rbd_dev); 6059 if (ret) 6060 goto err_out_probe; 6061 6062 /* 6063 * Need to warn users if this image is the one being 6064 * mapped and has a parent. 6065 */ 6066 if (!depth && rbd_dev->parent_spec) 6067 rbd_warn(rbd_dev, 6068 "WARNING: kernel layering is EXPERIMENTAL!"); 6069 } 6070 6071 ret = rbd_dev_probe_parent(rbd_dev, depth); 6072 if (ret) 6073 goto err_out_probe; 6074 6075 dout("discovered format %u image, header name is %s\n", 6076 rbd_dev->image_format, rbd_dev->header_oid.name); 6077 return 0; 6078 6079 err_out_probe: 6080 rbd_dev_unprobe(rbd_dev); 6081 err_out_watch: 6082 if (!depth) 6083 rbd_unregister_watch(rbd_dev); 6084 err_out_format: 6085 rbd_dev->image_format = 0; 6086 kfree(rbd_dev->spec->image_id); 6087 rbd_dev->spec->image_id = NULL; 6088 return ret; 6089 } 6090 6091 static ssize_t do_rbd_add(struct bus_type *bus, 6092 const char *buf, 6093 size_t count) 6094 { 6095 struct rbd_device *rbd_dev = NULL; 6096 struct ceph_options *ceph_opts = NULL; 6097 struct rbd_options *rbd_opts = NULL; 6098 struct rbd_spec *spec = NULL; 6099 struct rbd_client *rbdc; 6100 int rc; 6101 6102 if (!try_module_get(THIS_MODULE)) 6103 return -ENODEV; 6104 6105 /* parse add command */ 6106 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 6107 if (rc < 0) 6108 goto out; 6109 6110 rbdc = rbd_get_client(ceph_opts); 6111 if (IS_ERR(rbdc)) { 6112 rc = PTR_ERR(rbdc); 6113 goto err_out_args; 6114 } 6115 6116 /* pick the pool */ 6117 rc = rbd_add_get_pool_id(rbdc, spec->pool_name); 6118 if (rc < 0) { 6119 if (rc == -ENOENT) 6120 pr_info("pool %s does not exist\n", spec->pool_name); 6121 goto err_out_client; 6122 } 6123 spec->pool_id = (u64)rc; 6124 6125 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 6126 if (!rbd_dev) { 6127 rc = -ENOMEM; 6128 goto err_out_client; 6129 } 6130 rbdc = NULL; /* rbd_dev now owns this */ 6131 spec = NULL; /* rbd_dev now owns this */ 6132 rbd_opts = NULL; /* rbd_dev now owns this */ 6133 6134 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL); 6135 if (!rbd_dev->config_info) { 6136 rc = -ENOMEM; 6137 goto err_out_rbd_dev; 6138 } 6139 6140 down_write(&rbd_dev->header_rwsem); 6141 rc = rbd_dev_image_probe(rbd_dev, 0); 6142 if (rc < 0) { 6143 up_write(&rbd_dev->header_rwsem); 6144 goto err_out_rbd_dev; 6145 } 6146 6147 /* If we are mapping a snapshot it must be marked read-only */ 6148 if (rbd_dev->spec->snap_id != CEPH_NOSNAP) 6149 rbd_dev->opts->read_only = true; 6150 6151 rc = rbd_dev_device_setup(rbd_dev); 6152 if (rc) 6153 goto err_out_image_probe; 6154 6155 if (rbd_dev->opts->exclusive) { 6156 rc = rbd_add_acquire_lock(rbd_dev); 6157 if (rc) 6158 goto err_out_device_setup; 6159 } 6160 6161 /* Everything's ready. Announce the disk to the world. */ 6162 6163 rc = device_add(&rbd_dev->dev); 6164 if (rc) 6165 goto err_out_image_lock; 6166 6167 add_disk(rbd_dev->disk); 6168 /* see rbd_init_disk() */ 6169 blk_put_queue(rbd_dev->disk->queue); 6170 6171 spin_lock(&rbd_dev_list_lock); 6172 list_add_tail(&rbd_dev->node, &rbd_dev_list); 6173 spin_unlock(&rbd_dev_list_lock); 6174 6175 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, 6176 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, 6177 rbd_dev->header.features); 6178 rc = count; 6179 out: 6180 module_put(THIS_MODULE); 6181 return rc; 6182 6183 err_out_image_lock: 6184 rbd_dev_image_unlock(rbd_dev); 6185 err_out_device_setup: 6186 rbd_dev_device_release(rbd_dev); 6187 err_out_image_probe: 6188 rbd_dev_image_release(rbd_dev); 6189 err_out_rbd_dev: 6190 rbd_dev_destroy(rbd_dev); 6191 err_out_client: 6192 rbd_put_client(rbdc); 6193 err_out_args: 6194 rbd_spec_put(spec); 6195 kfree(rbd_opts); 6196 goto out; 6197 } 6198 6199 static ssize_t rbd_add(struct bus_type *bus, 6200 const char *buf, 6201 size_t count) 6202 { 6203 if (single_major) 6204 return -EINVAL; 6205 6206 return do_rbd_add(bus, buf, count); 6207 } 6208 6209 static ssize_t rbd_add_single_major(struct bus_type *bus, 6210 const char *buf, 6211 size_t count) 6212 { 6213 return do_rbd_add(bus, buf, count); 6214 } 6215 6216 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) 6217 { 6218 while (rbd_dev->parent) { 6219 struct rbd_device *first = rbd_dev; 6220 struct rbd_device *second = first->parent; 6221 struct rbd_device *third; 6222 6223 /* 6224 * Follow to the parent with no grandparent and 6225 * remove it. 6226 */ 6227 while (second && (third = second->parent)) { 6228 first = second; 6229 second = third; 6230 } 6231 rbd_assert(second); 6232 rbd_dev_image_release(second); 6233 rbd_dev_destroy(second); 6234 first->parent = NULL; 6235 first->parent_overlap = 0; 6236 6237 rbd_assert(first->parent_spec); 6238 rbd_spec_put(first->parent_spec); 6239 first->parent_spec = NULL; 6240 } 6241 } 6242 6243 static ssize_t do_rbd_remove(struct bus_type *bus, 6244 const char *buf, 6245 size_t count) 6246 { 6247 struct rbd_device *rbd_dev = NULL; 6248 struct list_head *tmp; 6249 int dev_id; 6250 char opt_buf[6]; 6251 bool already = false; 6252 bool force = false; 6253 int ret; 6254 6255 dev_id = -1; 6256 opt_buf[0] = '\0'; 6257 sscanf(buf, "%d %5s", &dev_id, opt_buf); 6258 if (dev_id < 0) { 6259 pr_err("dev_id out of range\n"); 6260 return -EINVAL; 6261 } 6262 if (opt_buf[0] != '\0') { 6263 if (!strcmp(opt_buf, "force")) { 6264 force = true; 6265 } else { 6266 pr_err("bad remove option at '%s'\n", opt_buf); 6267 return -EINVAL; 6268 } 6269 } 6270 6271 ret = -ENOENT; 6272 spin_lock(&rbd_dev_list_lock); 6273 list_for_each(tmp, &rbd_dev_list) { 6274 rbd_dev = list_entry(tmp, struct rbd_device, node); 6275 if (rbd_dev->dev_id == dev_id) { 6276 ret = 0; 6277 break; 6278 } 6279 } 6280 if (!ret) { 6281 spin_lock_irq(&rbd_dev->lock); 6282 if (rbd_dev->open_count && !force) 6283 ret = -EBUSY; 6284 else 6285 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6286 &rbd_dev->flags); 6287 spin_unlock_irq(&rbd_dev->lock); 6288 } 6289 spin_unlock(&rbd_dev_list_lock); 6290 if (ret < 0 || already) 6291 return ret; 6292 6293 if (force) { 6294 /* 6295 * Prevent new IO from being queued and wait for existing 6296 * IO to complete/fail. 6297 */ 6298 blk_mq_freeze_queue(rbd_dev->disk->queue); 6299 blk_set_queue_dying(rbd_dev->disk->queue); 6300 } 6301 6302 del_gendisk(rbd_dev->disk); 6303 spin_lock(&rbd_dev_list_lock); 6304 list_del_init(&rbd_dev->node); 6305 spin_unlock(&rbd_dev_list_lock); 6306 device_del(&rbd_dev->dev); 6307 6308 rbd_dev_image_unlock(rbd_dev); 6309 rbd_dev_device_release(rbd_dev); 6310 rbd_dev_image_release(rbd_dev); 6311 rbd_dev_destroy(rbd_dev); 6312 return count; 6313 } 6314 6315 static ssize_t rbd_remove(struct bus_type *bus, 6316 const char *buf, 6317 size_t count) 6318 { 6319 if (single_major) 6320 return -EINVAL; 6321 6322 return do_rbd_remove(bus, buf, count); 6323 } 6324 6325 static ssize_t rbd_remove_single_major(struct bus_type *bus, 6326 const char *buf, 6327 size_t count) 6328 { 6329 return do_rbd_remove(bus, buf, count); 6330 } 6331 6332 /* 6333 * create control files in sysfs 6334 * /sys/bus/rbd/... 6335 */ 6336 static int rbd_sysfs_init(void) 6337 { 6338 int ret; 6339 6340 ret = device_register(&rbd_root_dev); 6341 if (ret < 0) 6342 return ret; 6343 6344 ret = bus_register(&rbd_bus_type); 6345 if (ret < 0) 6346 device_unregister(&rbd_root_dev); 6347 6348 return ret; 6349 } 6350 6351 static void rbd_sysfs_cleanup(void) 6352 { 6353 bus_unregister(&rbd_bus_type); 6354 device_unregister(&rbd_root_dev); 6355 } 6356 6357 static int rbd_slab_init(void) 6358 { 6359 rbd_assert(!rbd_img_request_cache); 6360 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); 6361 if (!rbd_img_request_cache) 6362 return -ENOMEM; 6363 6364 rbd_assert(!rbd_obj_request_cache); 6365 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); 6366 if (!rbd_obj_request_cache) 6367 goto out_err; 6368 6369 rbd_assert(!rbd_bio_clone); 6370 rbd_bio_clone = bioset_create(BIO_POOL_SIZE, 0, 0); 6371 if (!rbd_bio_clone) 6372 goto out_err_clone; 6373 6374 return 0; 6375 6376 out_err_clone: 6377 kmem_cache_destroy(rbd_obj_request_cache); 6378 rbd_obj_request_cache = NULL; 6379 out_err: 6380 kmem_cache_destroy(rbd_img_request_cache); 6381 rbd_img_request_cache = NULL; 6382 return -ENOMEM; 6383 } 6384 6385 static void rbd_slab_exit(void) 6386 { 6387 rbd_assert(rbd_obj_request_cache); 6388 kmem_cache_destroy(rbd_obj_request_cache); 6389 rbd_obj_request_cache = NULL; 6390 6391 rbd_assert(rbd_img_request_cache); 6392 kmem_cache_destroy(rbd_img_request_cache); 6393 rbd_img_request_cache = NULL; 6394 6395 rbd_assert(rbd_bio_clone); 6396 bioset_free(rbd_bio_clone); 6397 rbd_bio_clone = NULL; 6398 } 6399 6400 static int __init rbd_init(void) 6401 { 6402 int rc; 6403 6404 if (!libceph_compatible(NULL)) { 6405 rbd_warn(NULL, "libceph incompatibility (quitting)"); 6406 return -EINVAL; 6407 } 6408 6409 rc = rbd_slab_init(); 6410 if (rc) 6411 return rc; 6412 6413 /* 6414 * The number of active work items is limited by the number of 6415 * rbd devices * queue depth, so leave @max_active at default. 6416 */ 6417 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); 6418 if (!rbd_wq) { 6419 rc = -ENOMEM; 6420 goto err_out_slab; 6421 } 6422 6423 if (single_major) { 6424 rbd_major = register_blkdev(0, RBD_DRV_NAME); 6425 if (rbd_major < 0) { 6426 rc = rbd_major; 6427 goto err_out_wq; 6428 } 6429 } 6430 6431 rc = rbd_sysfs_init(); 6432 if (rc) 6433 goto err_out_blkdev; 6434 6435 if (single_major) 6436 pr_info("loaded (major %d)\n", rbd_major); 6437 else 6438 pr_info("loaded\n"); 6439 6440 return 0; 6441 6442 err_out_blkdev: 6443 if (single_major) 6444 unregister_blkdev(rbd_major, RBD_DRV_NAME); 6445 err_out_wq: 6446 destroy_workqueue(rbd_wq); 6447 err_out_slab: 6448 rbd_slab_exit(); 6449 return rc; 6450 } 6451 6452 static void __exit rbd_exit(void) 6453 { 6454 ida_destroy(&rbd_dev_id_ida); 6455 rbd_sysfs_cleanup(); 6456 if (single_major) 6457 unregister_blkdev(rbd_major, RBD_DRV_NAME); 6458 destroy_workqueue(rbd_wq); 6459 rbd_slab_exit(); 6460 } 6461 6462 module_init(rbd_init); 6463 module_exit(rbd_exit); 6464 6465 MODULE_AUTHOR("Alex Elder <elder@inktank.com>"); 6466 MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); 6467 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); 6468 /* following authorship retained from original osdblk.c */ 6469 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); 6470 6471 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver"); 6472 MODULE_LICENSE("GPL"); 6473