1 2 /* 3 rbd.c -- Export ceph rados objects as a Linux block device 4 5 6 based on drivers/block/osdblk.c: 7 8 Copyright 2009 Red Hat, Inc. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; see the file COPYING. If not, write to 21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 23 24 25 For usage instructions, please refer to: 26 27 Documentation/ABI/testing/sysfs-bus-rbd 28 29 */ 30 31 #include <linux/ceph/libceph.h> 32 #include <linux/ceph/osd_client.h> 33 #include <linux/ceph/mon_client.h> 34 #include <linux/ceph/cls_lock_client.h> 35 #include <linux/ceph/decode.h> 36 #include <linux/parser.h> 37 #include <linux/bsearch.h> 38 39 #include <linux/kernel.h> 40 #include <linux/device.h> 41 #include <linux/module.h> 42 #include <linux/blk-mq.h> 43 #include <linux/fs.h> 44 #include <linux/blkdev.h> 45 #include <linux/slab.h> 46 #include <linux/idr.h> 47 #include <linux/workqueue.h> 48 49 #include "rbd_types.h" 50 51 #define RBD_DEBUG /* Activate rbd_assert() calls */ 52 53 /* 54 * The basic unit of block I/O is a sector. It is interpreted in a 55 * number of contexts in Linux (blk, bio, genhd), but the default is 56 * universally 512 bytes. These symbols are just slightly more 57 * meaningful than the bare numbers they represent. 58 */ 59 #define SECTOR_SHIFT 9 60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 61 62 /* 63 * Increment the given counter and return its updated value. 64 * If the counter is already 0 it will not be incremented. 65 * If the counter is already at its maximum value returns 66 * -EINVAL without updating it. 67 */ 68 static int atomic_inc_return_safe(atomic_t *v) 69 { 70 unsigned int counter; 71 72 counter = (unsigned int)__atomic_add_unless(v, 1, 0); 73 if (counter <= (unsigned int)INT_MAX) 74 return (int)counter; 75 76 atomic_dec(v); 77 78 return -EINVAL; 79 } 80 81 /* Decrement the counter. Return the resulting value, or -EINVAL */ 82 static int atomic_dec_return_safe(atomic_t *v) 83 { 84 int counter; 85 86 counter = atomic_dec_return(v); 87 if (counter >= 0) 88 return counter; 89 90 atomic_inc(v); 91 92 return -EINVAL; 93 } 94 95 #define RBD_DRV_NAME "rbd" 96 97 #define RBD_MINORS_PER_MAJOR 256 98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 99 100 #define RBD_MAX_PARENT_CHAIN_LEN 16 101 102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_" 103 #define RBD_MAX_SNAP_NAME_LEN \ 104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 105 106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ 107 108 #define RBD_SNAP_HEAD_NAME "-" 109 110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ 111 112 /* This allows a single page to hold an image name sent by OSD */ 113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) 114 #define RBD_IMAGE_ID_LEN_MAX 64 115 116 #define RBD_OBJ_PREFIX_LEN_MAX 64 117 118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */ 119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000) 120 121 /* Feature bits */ 122 123 #define RBD_FEATURE_LAYERING (1ULL<<0) 124 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) 125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) 126 #define RBD_FEATURE_DATA_POOL (1ULL<<7) 127 #define RBD_FEATURE_OPERATIONS (1ULL<<8) 128 129 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ 130 RBD_FEATURE_STRIPINGV2 | \ 131 RBD_FEATURE_EXCLUSIVE_LOCK | \ 132 RBD_FEATURE_DATA_POOL | \ 133 RBD_FEATURE_OPERATIONS) 134 135 /* Features supported by this (client software) implementation. */ 136 137 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) 138 139 /* 140 * An RBD device name will be "rbd#", where the "rbd" comes from 141 * RBD_DRV_NAME above, and # is a unique integer identifier. 142 */ 143 #define DEV_NAME_LEN 32 144 145 /* 146 * block device image metadata (in-memory version) 147 */ 148 struct rbd_image_header { 149 /* These six fields never change for a given rbd image */ 150 char *object_prefix; 151 __u8 obj_order; 152 u64 stripe_unit; 153 u64 stripe_count; 154 s64 data_pool_id; 155 u64 features; /* Might be changeable someday? */ 156 157 /* The remaining fields need to be updated occasionally */ 158 u64 image_size; 159 struct ceph_snap_context *snapc; 160 char *snap_names; /* format 1 only */ 161 u64 *snap_sizes; /* format 1 only */ 162 }; 163 164 /* 165 * An rbd image specification. 166 * 167 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely 168 * identify an image. Each rbd_dev structure includes a pointer to 169 * an rbd_spec structure that encapsulates this identity. 170 * 171 * Each of the id's in an rbd_spec has an associated name. For a 172 * user-mapped image, the names are supplied and the id's associated 173 * with them are looked up. For a layered image, a parent image is 174 * defined by the tuple, and the names are looked up. 175 * 176 * An rbd_dev structure contains a parent_spec pointer which is 177 * non-null if the image it represents is a child in a layered 178 * image. This pointer will refer to the rbd_spec structure used 179 * by the parent rbd_dev for its own identity (i.e., the structure 180 * is shared between the parent and child). 181 * 182 * Since these structures are populated once, during the discovery 183 * phase of image construction, they are effectively immutable so 184 * we make no effort to synchronize access to them. 185 * 186 * Note that code herein does not assume the image name is known (it 187 * could be a null pointer). 188 */ 189 struct rbd_spec { 190 u64 pool_id; 191 const char *pool_name; 192 193 const char *image_id; 194 const char *image_name; 195 196 u64 snap_id; 197 const char *snap_name; 198 199 struct kref kref; 200 }; 201 202 /* 203 * an instance of the client. multiple devices may share an rbd client. 204 */ 205 struct rbd_client { 206 struct ceph_client *client; 207 struct kref kref; 208 struct list_head node; 209 }; 210 211 struct rbd_img_request; 212 typedef void (*rbd_img_callback_t)(struct rbd_img_request *); 213 214 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */ 215 216 struct rbd_obj_request; 217 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *); 218 219 enum obj_request_type { 220 OBJ_REQUEST_NODATA = 1, 221 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */ 222 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */ 223 }; 224 225 enum obj_operation_type { 226 OBJ_OP_READ = 1, 227 OBJ_OP_WRITE, 228 OBJ_OP_DISCARD, 229 }; 230 231 enum obj_req_flags { 232 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ 233 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */ 234 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */ 235 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */ 236 }; 237 238 struct rbd_obj_request { 239 u64 object_no; 240 u64 offset; /* object start byte */ 241 u64 length; /* bytes from offset */ 242 unsigned long flags; 243 244 /* 245 * An object request associated with an image will have its 246 * img_data flag set; a standalone object request will not. 247 * 248 * A standalone object request will have which == BAD_WHICH 249 * and a null obj_request pointer. 250 * 251 * An object request initiated in support of a layered image 252 * object (to check for its existence before a write) will 253 * have which == BAD_WHICH and a non-null obj_request pointer. 254 * 255 * Finally, an object request for rbd image data will have 256 * which != BAD_WHICH, and will have a non-null img_request 257 * pointer. The value of which will be in the range 258 * 0..(img_request->obj_request_count-1). 259 */ 260 union { 261 struct rbd_obj_request *obj_request; /* STAT op */ 262 struct { 263 struct rbd_img_request *img_request; 264 u64 img_offset; 265 /* links for img_request->obj_requests list */ 266 struct list_head links; 267 }; 268 }; 269 u32 which; /* posn image request list */ 270 271 enum obj_request_type type; 272 union { 273 struct ceph_bio_iter bio_pos; 274 struct { 275 struct ceph_bvec_iter bvec_pos; 276 u32 bvec_count; 277 }; 278 }; 279 struct bio_vec *copyup_bvecs; 280 u32 copyup_bvec_count; 281 282 struct ceph_osd_request *osd_req; 283 284 u64 xferred; /* bytes transferred */ 285 int result; 286 287 rbd_obj_callback_t callback; 288 289 struct kref kref; 290 }; 291 292 enum img_req_flags { 293 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */ 294 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ 295 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ 296 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */ 297 }; 298 299 struct rbd_img_request { 300 struct rbd_device *rbd_dev; 301 u64 offset; /* starting image byte offset */ 302 u64 length; /* byte count from offset */ 303 unsigned long flags; 304 union { 305 u64 snap_id; /* for reads */ 306 struct ceph_snap_context *snapc; /* for writes */ 307 }; 308 union { 309 struct request *rq; /* block request */ 310 struct rbd_obj_request *obj_request; /* obj req initiator */ 311 }; 312 spinlock_t completion_lock;/* protects next_completion */ 313 u32 next_completion; 314 rbd_img_callback_t callback; 315 u64 xferred;/* aggregate bytes transferred */ 316 int result; /* first nonzero obj_request result */ 317 318 u32 obj_request_count; 319 struct list_head obj_requests; /* rbd_obj_request structs */ 320 321 struct kref kref; 322 }; 323 324 #define for_each_obj_request(ireq, oreq) \ 325 list_for_each_entry(oreq, &(ireq)->obj_requests, links) 326 #define for_each_obj_request_from(ireq, oreq) \ 327 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links) 328 #define for_each_obj_request_safe(ireq, oreq, n) \ 329 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links) 330 331 enum rbd_watch_state { 332 RBD_WATCH_STATE_UNREGISTERED, 333 RBD_WATCH_STATE_REGISTERED, 334 RBD_WATCH_STATE_ERROR, 335 }; 336 337 enum rbd_lock_state { 338 RBD_LOCK_STATE_UNLOCKED, 339 RBD_LOCK_STATE_LOCKED, 340 RBD_LOCK_STATE_RELEASING, 341 }; 342 343 /* WatchNotify::ClientId */ 344 struct rbd_client_id { 345 u64 gid; 346 u64 handle; 347 }; 348 349 struct rbd_mapping { 350 u64 size; 351 u64 features; 352 }; 353 354 /* 355 * a single device 356 */ 357 struct rbd_device { 358 int dev_id; /* blkdev unique id */ 359 360 int major; /* blkdev assigned major */ 361 int minor; 362 struct gendisk *disk; /* blkdev's gendisk and rq */ 363 364 u32 image_format; /* Either 1 or 2 */ 365 struct rbd_client *rbd_client; 366 367 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 368 369 spinlock_t lock; /* queue, flags, open_count */ 370 371 struct rbd_image_header header; 372 unsigned long flags; /* possibly lock protected */ 373 struct rbd_spec *spec; 374 struct rbd_options *opts; 375 char *config_info; /* add{,_single_major} string */ 376 377 struct ceph_object_id header_oid; 378 struct ceph_object_locator header_oloc; 379 380 struct ceph_file_layout layout; /* used for all rbd requests */ 381 382 struct mutex watch_mutex; 383 enum rbd_watch_state watch_state; 384 struct ceph_osd_linger_request *watch_handle; 385 u64 watch_cookie; 386 struct delayed_work watch_dwork; 387 388 struct rw_semaphore lock_rwsem; 389 enum rbd_lock_state lock_state; 390 char lock_cookie[32]; 391 struct rbd_client_id owner_cid; 392 struct work_struct acquired_lock_work; 393 struct work_struct released_lock_work; 394 struct delayed_work lock_dwork; 395 struct work_struct unlock_work; 396 wait_queue_head_t lock_waitq; 397 398 struct workqueue_struct *task_wq; 399 400 struct rbd_spec *parent_spec; 401 u64 parent_overlap; 402 atomic_t parent_ref; 403 struct rbd_device *parent; 404 405 /* Block layer tags. */ 406 struct blk_mq_tag_set tag_set; 407 408 /* protects updating the header */ 409 struct rw_semaphore header_rwsem; 410 411 struct rbd_mapping mapping; 412 413 struct list_head node; 414 415 /* sysfs related */ 416 struct device dev; 417 unsigned long open_count; /* protected by lock */ 418 }; 419 420 /* 421 * Flag bits for rbd_dev->flags: 422 * - REMOVING (which is coupled with rbd_dev->open_count) is protected 423 * by rbd_dev->lock 424 * - BLACKLISTED is protected by rbd_dev->lock_rwsem 425 */ 426 enum rbd_dev_flags { 427 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ 428 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ 429 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */ 430 }; 431 432 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ 433 434 static LIST_HEAD(rbd_dev_list); /* devices */ 435 static DEFINE_SPINLOCK(rbd_dev_list_lock); 436 437 static LIST_HEAD(rbd_client_list); /* clients */ 438 static DEFINE_SPINLOCK(rbd_client_list_lock); 439 440 /* Slab caches for frequently-allocated structures */ 441 442 static struct kmem_cache *rbd_img_request_cache; 443 static struct kmem_cache *rbd_obj_request_cache; 444 445 static int rbd_major; 446 static DEFINE_IDA(rbd_dev_id_ida); 447 448 static struct workqueue_struct *rbd_wq; 449 450 /* 451 * single-major requires >= 0.75 version of userspace rbd utility. 452 */ 453 static bool single_major = true; 454 module_param(single_major, bool, S_IRUGO); 455 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); 456 457 static int rbd_img_request_submit(struct rbd_img_request *img_request); 458 459 static ssize_t rbd_add(struct bus_type *bus, const char *buf, 460 size_t count); 461 static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 462 size_t count); 463 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, 464 size_t count); 465 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 466 size_t count); 467 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 468 static void rbd_spec_put(struct rbd_spec *spec); 469 470 static int rbd_dev_id_to_minor(int dev_id) 471 { 472 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT; 473 } 474 475 static int minor_to_rbd_dev_id(int minor) 476 { 477 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT; 478 } 479 480 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) 481 { 482 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || 483 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; 484 } 485 486 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) 487 { 488 bool is_lock_owner; 489 490 down_read(&rbd_dev->lock_rwsem); 491 is_lock_owner = __rbd_is_lock_owner(rbd_dev); 492 up_read(&rbd_dev->lock_rwsem); 493 return is_lock_owner; 494 } 495 496 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) 497 { 498 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); 499 } 500 501 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); 502 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); 503 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); 504 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); 505 static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL); 506 507 static struct attribute *rbd_bus_attrs[] = { 508 &bus_attr_add.attr, 509 &bus_attr_remove.attr, 510 &bus_attr_add_single_major.attr, 511 &bus_attr_remove_single_major.attr, 512 &bus_attr_supported_features.attr, 513 NULL, 514 }; 515 516 static umode_t rbd_bus_is_visible(struct kobject *kobj, 517 struct attribute *attr, int index) 518 { 519 if (!single_major && 520 (attr == &bus_attr_add_single_major.attr || 521 attr == &bus_attr_remove_single_major.attr)) 522 return 0; 523 524 return attr->mode; 525 } 526 527 static const struct attribute_group rbd_bus_group = { 528 .attrs = rbd_bus_attrs, 529 .is_visible = rbd_bus_is_visible, 530 }; 531 __ATTRIBUTE_GROUPS(rbd_bus); 532 533 static struct bus_type rbd_bus_type = { 534 .name = "rbd", 535 .bus_groups = rbd_bus_groups, 536 }; 537 538 static void rbd_root_dev_release(struct device *dev) 539 { 540 } 541 542 static struct device rbd_root_dev = { 543 .init_name = "rbd", 544 .release = rbd_root_dev_release, 545 }; 546 547 static __printf(2, 3) 548 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) 549 { 550 struct va_format vaf; 551 va_list args; 552 553 va_start(args, fmt); 554 vaf.fmt = fmt; 555 vaf.va = &args; 556 557 if (!rbd_dev) 558 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf); 559 else if (rbd_dev->disk) 560 printk(KERN_WARNING "%s: %s: %pV\n", 561 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf); 562 else if (rbd_dev->spec && rbd_dev->spec->image_name) 563 printk(KERN_WARNING "%s: image %s: %pV\n", 564 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf); 565 else if (rbd_dev->spec && rbd_dev->spec->image_id) 566 printk(KERN_WARNING "%s: id %s: %pV\n", 567 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf); 568 else /* punt */ 569 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n", 570 RBD_DRV_NAME, rbd_dev, &vaf); 571 va_end(args); 572 } 573 574 #ifdef RBD_DEBUG 575 #define rbd_assert(expr) \ 576 if (unlikely(!(expr))) { \ 577 printk(KERN_ERR "\nAssertion failure in %s() " \ 578 "at line %d:\n\n" \ 579 "\trbd_assert(%s);\n\n", \ 580 __func__, __LINE__, #expr); \ 581 BUG(); \ 582 } 583 #else /* !RBD_DEBUG */ 584 # define rbd_assert(expr) ((void) 0) 585 #endif /* !RBD_DEBUG */ 586 587 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); 588 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 589 static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 590 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 591 592 static int rbd_dev_refresh(struct rbd_device *rbd_dev); 593 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); 594 static int rbd_dev_header_info(struct rbd_device *rbd_dev); 595 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); 596 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 597 u64 snap_id); 598 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 599 u8 *order, u64 *snap_size); 600 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 601 u64 *snap_features); 602 603 static int rbd_open(struct block_device *bdev, fmode_t mode) 604 { 605 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 606 bool removing = false; 607 608 spin_lock_irq(&rbd_dev->lock); 609 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) 610 removing = true; 611 else 612 rbd_dev->open_count++; 613 spin_unlock_irq(&rbd_dev->lock); 614 if (removing) 615 return -ENOENT; 616 617 (void) get_device(&rbd_dev->dev); 618 619 return 0; 620 } 621 622 static void rbd_release(struct gendisk *disk, fmode_t mode) 623 { 624 struct rbd_device *rbd_dev = disk->private_data; 625 unsigned long open_count_before; 626 627 spin_lock_irq(&rbd_dev->lock); 628 open_count_before = rbd_dev->open_count--; 629 spin_unlock_irq(&rbd_dev->lock); 630 rbd_assert(open_count_before > 0); 631 632 put_device(&rbd_dev->dev); 633 } 634 635 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg) 636 { 637 int ro; 638 639 if (get_user(ro, (int __user *)arg)) 640 return -EFAULT; 641 642 /* Snapshots can't be marked read-write */ 643 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro) 644 return -EROFS; 645 646 /* Let blkdev_roset() handle it */ 647 return -ENOTTY; 648 } 649 650 static int rbd_ioctl(struct block_device *bdev, fmode_t mode, 651 unsigned int cmd, unsigned long arg) 652 { 653 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 654 int ret; 655 656 switch (cmd) { 657 case BLKROSET: 658 ret = rbd_ioctl_set_ro(rbd_dev, arg); 659 break; 660 default: 661 ret = -ENOTTY; 662 } 663 664 return ret; 665 } 666 667 #ifdef CONFIG_COMPAT 668 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode, 669 unsigned int cmd, unsigned long arg) 670 { 671 return rbd_ioctl(bdev, mode, cmd, arg); 672 } 673 #endif /* CONFIG_COMPAT */ 674 675 static const struct block_device_operations rbd_bd_ops = { 676 .owner = THIS_MODULE, 677 .open = rbd_open, 678 .release = rbd_release, 679 .ioctl = rbd_ioctl, 680 #ifdef CONFIG_COMPAT 681 .compat_ioctl = rbd_compat_ioctl, 682 #endif 683 }; 684 685 /* 686 * Initialize an rbd client instance. Success or not, this function 687 * consumes ceph_opts. Caller holds client_mutex. 688 */ 689 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 690 { 691 struct rbd_client *rbdc; 692 int ret = -ENOMEM; 693 694 dout("%s:\n", __func__); 695 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); 696 if (!rbdc) 697 goto out_opt; 698 699 kref_init(&rbdc->kref); 700 INIT_LIST_HEAD(&rbdc->node); 701 702 rbdc->client = ceph_create_client(ceph_opts, rbdc); 703 if (IS_ERR(rbdc->client)) 704 goto out_rbdc; 705 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ 706 707 ret = ceph_open_session(rbdc->client); 708 if (ret < 0) 709 goto out_client; 710 711 spin_lock(&rbd_client_list_lock); 712 list_add_tail(&rbdc->node, &rbd_client_list); 713 spin_unlock(&rbd_client_list_lock); 714 715 dout("%s: rbdc %p\n", __func__, rbdc); 716 717 return rbdc; 718 out_client: 719 ceph_destroy_client(rbdc->client); 720 out_rbdc: 721 kfree(rbdc); 722 out_opt: 723 if (ceph_opts) 724 ceph_destroy_options(ceph_opts); 725 dout("%s: error %d\n", __func__, ret); 726 727 return ERR_PTR(ret); 728 } 729 730 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) 731 { 732 kref_get(&rbdc->kref); 733 734 return rbdc; 735 } 736 737 /* 738 * Find a ceph client with specific addr and configuration. If 739 * found, bump its reference count. 740 */ 741 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) 742 { 743 struct rbd_client *client_node; 744 bool found = false; 745 746 if (ceph_opts->flags & CEPH_OPT_NOSHARE) 747 return NULL; 748 749 spin_lock(&rbd_client_list_lock); 750 list_for_each_entry(client_node, &rbd_client_list, node) { 751 if (!ceph_compare_options(ceph_opts, client_node->client)) { 752 __rbd_get_client(client_node); 753 754 found = true; 755 break; 756 } 757 } 758 spin_unlock(&rbd_client_list_lock); 759 760 return found ? client_node : NULL; 761 } 762 763 /* 764 * (Per device) rbd map options 765 */ 766 enum { 767 Opt_queue_depth, 768 Opt_last_int, 769 /* int args above */ 770 Opt_last_string, 771 /* string args above */ 772 Opt_read_only, 773 Opt_read_write, 774 Opt_lock_on_read, 775 Opt_exclusive, 776 Opt_err 777 }; 778 779 static match_table_t rbd_opts_tokens = { 780 {Opt_queue_depth, "queue_depth=%d"}, 781 /* int args above */ 782 /* string args above */ 783 {Opt_read_only, "read_only"}, 784 {Opt_read_only, "ro"}, /* Alternate spelling */ 785 {Opt_read_write, "read_write"}, 786 {Opt_read_write, "rw"}, /* Alternate spelling */ 787 {Opt_lock_on_read, "lock_on_read"}, 788 {Opt_exclusive, "exclusive"}, 789 {Opt_err, NULL} 790 }; 791 792 struct rbd_options { 793 int queue_depth; 794 bool read_only; 795 bool lock_on_read; 796 bool exclusive; 797 }; 798 799 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ 800 #define RBD_READ_ONLY_DEFAULT false 801 #define RBD_LOCK_ON_READ_DEFAULT false 802 #define RBD_EXCLUSIVE_DEFAULT false 803 804 static int parse_rbd_opts_token(char *c, void *private) 805 { 806 struct rbd_options *rbd_opts = private; 807 substring_t argstr[MAX_OPT_ARGS]; 808 int token, intval, ret; 809 810 token = match_token(c, rbd_opts_tokens, argstr); 811 if (token < Opt_last_int) { 812 ret = match_int(&argstr[0], &intval); 813 if (ret < 0) { 814 pr_err("bad mount option arg (not int) at '%s'\n", c); 815 return ret; 816 } 817 dout("got int token %d val %d\n", token, intval); 818 } else if (token > Opt_last_int && token < Opt_last_string) { 819 dout("got string token %d val %s\n", token, argstr[0].from); 820 } else { 821 dout("got token %d\n", token); 822 } 823 824 switch (token) { 825 case Opt_queue_depth: 826 if (intval < 1) { 827 pr_err("queue_depth out of range\n"); 828 return -EINVAL; 829 } 830 rbd_opts->queue_depth = intval; 831 break; 832 case Opt_read_only: 833 rbd_opts->read_only = true; 834 break; 835 case Opt_read_write: 836 rbd_opts->read_only = false; 837 break; 838 case Opt_lock_on_read: 839 rbd_opts->lock_on_read = true; 840 break; 841 case Opt_exclusive: 842 rbd_opts->exclusive = true; 843 break; 844 default: 845 /* libceph prints "bad option" msg */ 846 return -EINVAL; 847 } 848 849 return 0; 850 } 851 852 static char* obj_op_name(enum obj_operation_type op_type) 853 { 854 switch (op_type) { 855 case OBJ_OP_READ: 856 return "read"; 857 case OBJ_OP_WRITE: 858 return "write"; 859 case OBJ_OP_DISCARD: 860 return "discard"; 861 default: 862 return "???"; 863 } 864 } 865 866 /* 867 * Get a ceph client with specific addr and configuration, if one does 868 * not exist create it. Either way, ceph_opts is consumed by this 869 * function. 870 */ 871 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 872 { 873 struct rbd_client *rbdc; 874 875 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING); 876 rbdc = rbd_client_find(ceph_opts); 877 if (rbdc) /* using an existing client */ 878 ceph_destroy_options(ceph_opts); 879 else 880 rbdc = rbd_client_create(ceph_opts); 881 mutex_unlock(&client_mutex); 882 883 return rbdc; 884 } 885 886 /* 887 * Destroy ceph client 888 * 889 * Caller must hold rbd_client_list_lock. 890 */ 891 static void rbd_client_release(struct kref *kref) 892 { 893 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 894 895 dout("%s: rbdc %p\n", __func__, rbdc); 896 spin_lock(&rbd_client_list_lock); 897 list_del(&rbdc->node); 898 spin_unlock(&rbd_client_list_lock); 899 900 ceph_destroy_client(rbdc->client); 901 kfree(rbdc); 902 } 903 904 /* 905 * Drop reference to ceph client node. If it's not referenced anymore, release 906 * it. 907 */ 908 static void rbd_put_client(struct rbd_client *rbdc) 909 { 910 if (rbdc) 911 kref_put(&rbdc->kref, rbd_client_release); 912 } 913 914 static bool rbd_image_format_valid(u32 image_format) 915 { 916 return image_format == 1 || image_format == 2; 917 } 918 919 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) 920 { 921 size_t size; 922 u32 snap_count; 923 924 /* The header has to start with the magic rbd header text */ 925 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) 926 return false; 927 928 /* The bio layer requires at least sector-sized I/O */ 929 930 if (ondisk->options.order < SECTOR_SHIFT) 931 return false; 932 933 /* If we use u64 in a few spots we may be able to loosen this */ 934 935 if (ondisk->options.order > 8 * sizeof (int) - 1) 936 return false; 937 938 /* 939 * The size of a snapshot header has to fit in a size_t, and 940 * that limits the number of snapshots. 941 */ 942 snap_count = le32_to_cpu(ondisk->snap_count); 943 size = SIZE_MAX - sizeof (struct ceph_snap_context); 944 if (snap_count > size / sizeof (__le64)) 945 return false; 946 947 /* 948 * Not only that, but the size of the entire the snapshot 949 * header must also be representable in a size_t. 950 */ 951 size -= snap_count * sizeof (__le64); 952 if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) 953 return false; 954 955 return true; 956 } 957 958 /* 959 * returns the size of an object in the image 960 */ 961 static u32 rbd_obj_bytes(struct rbd_image_header *header) 962 { 963 return 1U << header->obj_order; 964 } 965 966 static void rbd_init_layout(struct rbd_device *rbd_dev) 967 { 968 if (rbd_dev->header.stripe_unit == 0 || 969 rbd_dev->header.stripe_count == 0) { 970 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); 971 rbd_dev->header.stripe_count = 1; 972 } 973 974 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; 975 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; 976 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); 977 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? 978 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; 979 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); 980 } 981 982 /* 983 * Fill an rbd image header with information from the given format 1 984 * on-disk header. 985 */ 986 static int rbd_header_from_disk(struct rbd_device *rbd_dev, 987 struct rbd_image_header_ondisk *ondisk) 988 { 989 struct rbd_image_header *header = &rbd_dev->header; 990 bool first_time = header->object_prefix == NULL; 991 struct ceph_snap_context *snapc; 992 char *object_prefix = NULL; 993 char *snap_names = NULL; 994 u64 *snap_sizes = NULL; 995 u32 snap_count; 996 int ret = -ENOMEM; 997 u32 i; 998 999 /* Allocate this now to avoid having to handle failure below */ 1000 1001 if (first_time) { 1002 object_prefix = kstrndup(ondisk->object_prefix, 1003 sizeof(ondisk->object_prefix), 1004 GFP_KERNEL); 1005 if (!object_prefix) 1006 return -ENOMEM; 1007 } 1008 1009 /* Allocate the snapshot context and fill it in */ 1010 1011 snap_count = le32_to_cpu(ondisk->snap_count); 1012 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 1013 if (!snapc) 1014 goto out_err; 1015 snapc->seq = le64_to_cpu(ondisk->snap_seq); 1016 if (snap_count) { 1017 struct rbd_image_snap_ondisk *snaps; 1018 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 1019 1020 /* We'll keep a copy of the snapshot names... */ 1021 1022 if (snap_names_len > (u64)SIZE_MAX) 1023 goto out_2big; 1024 snap_names = kmalloc(snap_names_len, GFP_KERNEL); 1025 if (!snap_names) 1026 goto out_err; 1027 1028 /* ...as well as the array of their sizes. */ 1029 snap_sizes = kmalloc_array(snap_count, 1030 sizeof(*header->snap_sizes), 1031 GFP_KERNEL); 1032 if (!snap_sizes) 1033 goto out_err; 1034 1035 /* 1036 * Copy the names, and fill in each snapshot's id 1037 * and size. 1038 * 1039 * Note that rbd_dev_v1_header_info() guarantees the 1040 * ondisk buffer we're working with has 1041 * snap_names_len bytes beyond the end of the 1042 * snapshot id array, this memcpy() is safe. 1043 */ 1044 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); 1045 snaps = ondisk->snaps; 1046 for (i = 0; i < snap_count; i++) { 1047 snapc->snaps[i] = le64_to_cpu(snaps[i].id); 1048 snap_sizes[i] = le64_to_cpu(snaps[i].image_size); 1049 } 1050 } 1051 1052 /* We won't fail any more, fill in the header */ 1053 1054 if (first_time) { 1055 header->object_prefix = object_prefix; 1056 header->obj_order = ondisk->options.order; 1057 rbd_init_layout(rbd_dev); 1058 } else { 1059 ceph_put_snap_context(header->snapc); 1060 kfree(header->snap_names); 1061 kfree(header->snap_sizes); 1062 } 1063 1064 /* The remaining fields always get updated (when we refresh) */ 1065 1066 header->image_size = le64_to_cpu(ondisk->image_size); 1067 header->snapc = snapc; 1068 header->snap_names = snap_names; 1069 header->snap_sizes = snap_sizes; 1070 1071 return 0; 1072 out_2big: 1073 ret = -EIO; 1074 out_err: 1075 kfree(snap_sizes); 1076 kfree(snap_names); 1077 ceph_put_snap_context(snapc); 1078 kfree(object_prefix); 1079 1080 return ret; 1081 } 1082 1083 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 1084 { 1085 const char *snap_name; 1086 1087 rbd_assert(which < rbd_dev->header.snapc->num_snaps); 1088 1089 /* Skip over names until we find the one we are looking for */ 1090 1091 snap_name = rbd_dev->header.snap_names; 1092 while (which--) 1093 snap_name += strlen(snap_name) + 1; 1094 1095 return kstrdup(snap_name, GFP_KERNEL); 1096 } 1097 1098 /* 1099 * Snapshot id comparison function for use with qsort()/bsearch(). 1100 * Note that result is for snapshots in *descending* order. 1101 */ 1102 static int snapid_compare_reverse(const void *s1, const void *s2) 1103 { 1104 u64 snap_id1 = *(u64 *)s1; 1105 u64 snap_id2 = *(u64 *)s2; 1106 1107 if (snap_id1 < snap_id2) 1108 return 1; 1109 return snap_id1 == snap_id2 ? 0 : -1; 1110 } 1111 1112 /* 1113 * Search a snapshot context to see if the given snapshot id is 1114 * present. 1115 * 1116 * Returns the position of the snapshot id in the array if it's found, 1117 * or BAD_SNAP_INDEX otherwise. 1118 * 1119 * Note: The snapshot array is in kept sorted (by the osd) in 1120 * reverse order, highest snapshot id first. 1121 */ 1122 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) 1123 { 1124 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 1125 u64 *found; 1126 1127 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, 1128 sizeof (snap_id), snapid_compare_reverse); 1129 1130 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; 1131 } 1132 1133 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, 1134 u64 snap_id) 1135 { 1136 u32 which; 1137 const char *snap_name; 1138 1139 which = rbd_dev_snap_index(rbd_dev, snap_id); 1140 if (which == BAD_SNAP_INDEX) 1141 return ERR_PTR(-ENOENT); 1142 1143 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); 1144 return snap_name ? snap_name : ERR_PTR(-ENOMEM); 1145 } 1146 1147 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 1148 { 1149 if (snap_id == CEPH_NOSNAP) 1150 return RBD_SNAP_HEAD_NAME; 1151 1152 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1153 if (rbd_dev->image_format == 1) 1154 return rbd_dev_v1_snap_name(rbd_dev, snap_id); 1155 1156 return rbd_dev_v2_snap_name(rbd_dev, snap_id); 1157 } 1158 1159 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 1160 u64 *snap_size) 1161 { 1162 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1163 if (snap_id == CEPH_NOSNAP) { 1164 *snap_size = rbd_dev->header.image_size; 1165 } else if (rbd_dev->image_format == 1) { 1166 u32 which; 1167 1168 which = rbd_dev_snap_index(rbd_dev, snap_id); 1169 if (which == BAD_SNAP_INDEX) 1170 return -ENOENT; 1171 1172 *snap_size = rbd_dev->header.snap_sizes[which]; 1173 } else { 1174 u64 size = 0; 1175 int ret; 1176 1177 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); 1178 if (ret) 1179 return ret; 1180 1181 *snap_size = size; 1182 } 1183 return 0; 1184 } 1185 1186 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 1187 u64 *snap_features) 1188 { 1189 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1190 if (snap_id == CEPH_NOSNAP) { 1191 *snap_features = rbd_dev->header.features; 1192 } else if (rbd_dev->image_format == 1) { 1193 *snap_features = 0; /* No features for format 1 */ 1194 } else { 1195 u64 features = 0; 1196 int ret; 1197 1198 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); 1199 if (ret) 1200 return ret; 1201 1202 *snap_features = features; 1203 } 1204 return 0; 1205 } 1206 1207 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1208 { 1209 u64 snap_id = rbd_dev->spec->snap_id; 1210 u64 size = 0; 1211 u64 features = 0; 1212 int ret; 1213 1214 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1215 if (ret) 1216 return ret; 1217 ret = rbd_snap_features(rbd_dev, snap_id, &features); 1218 if (ret) 1219 return ret; 1220 1221 rbd_dev->mapping.size = size; 1222 rbd_dev->mapping.features = features; 1223 1224 return 0; 1225 } 1226 1227 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) 1228 { 1229 rbd_dev->mapping.size = 0; 1230 rbd_dev->mapping.features = 0; 1231 } 1232 1233 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) 1234 { 1235 u64 segment_size = rbd_obj_bytes(&rbd_dev->header); 1236 1237 return offset & (segment_size - 1); 1238 } 1239 1240 static u64 rbd_segment_length(struct rbd_device *rbd_dev, 1241 u64 offset, u64 length) 1242 { 1243 u64 segment_size = rbd_obj_bytes(&rbd_dev->header); 1244 1245 offset &= segment_size - 1; 1246 1247 rbd_assert(length <= U64_MAX - offset); 1248 if (offset + length > segment_size) 1249 length = segment_size - offset; 1250 1251 return length; 1252 } 1253 1254 static void zero_bvec(struct bio_vec *bv) 1255 { 1256 void *buf; 1257 unsigned long flags; 1258 1259 buf = bvec_kmap_irq(bv, &flags); 1260 memset(buf, 0, bv->bv_len); 1261 flush_dcache_page(bv->bv_page); 1262 bvec_kunmap_irq(buf, &flags); 1263 } 1264 1265 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes) 1266 { 1267 struct ceph_bio_iter it = *bio_pos; 1268 1269 ceph_bio_iter_advance(&it, off); 1270 ceph_bio_iter_advance_step(&it, bytes, ({ 1271 zero_bvec(&bv); 1272 })); 1273 } 1274 1275 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes) 1276 { 1277 struct ceph_bvec_iter it = *bvec_pos; 1278 1279 ceph_bvec_iter_advance(&it, off); 1280 ceph_bvec_iter_advance_step(&it, bytes, ({ 1281 zero_bvec(&bv); 1282 })); 1283 } 1284 1285 /* 1286 * The default/initial value for all object request flags is 0. For 1287 * each flag, once its value is set to 1 it is never reset to 0 1288 * again. 1289 */ 1290 static void obj_request_img_data_set(struct rbd_obj_request *obj_request) 1291 { 1292 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { 1293 struct rbd_device *rbd_dev; 1294 1295 rbd_dev = obj_request->img_request->rbd_dev; 1296 rbd_warn(rbd_dev, "obj_request %p already marked img_data", 1297 obj_request); 1298 } 1299 } 1300 1301 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) 1302 { 1303 smp_mb(); 1304 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; 1305 } 1306 1307 static void obj_request_done_set(struct rbd_obj_request *obj_request) 1308 { 1309 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { 1310 struct rbd_device *rbd_dev = NULL; 1311 1312 if (obj_request_img_data_test(obj_request)) 1313 rbd_dev = obj_request->img_request->rbd_dev; 1314 rbd_warn(rbd_dev, "obj_request %p already marked done", 1315 obj_request); 1316 } 1317 } 1318 1319 static bool obj_request_done_test(struct rbd_obj_request *obj_request) 1320 { 1321 smp_mb(); 1322 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; 1323 } 1324 1325 /* 1326 * This sets the KNOWN flag after (possibly) setting the EXISTS 1327 * flag. The latter is set based on the "exists" value provided. 1328 * 1329 * Note that for our purposes once an object exists it never goes 1330 * away again. It's possible that the response from two existence 1331 * checks are separated by the creation of the target object, and 1332 * the first ("doesn't exist") response arrives *after* the second 1333 * ("does exist"). In that case we ignore the second one. 1334 */ 1335 static void obj_request_existence_set(struct rbd_obj_request *obj_request, 1336 bool exists) 1337 { 1338 if (exists) 1339 set_bit(OBJ_REQ_EXISTS, &obj_request->flags); 1340 set_bit(OBJ_REQ_KNOWN, &obj_request->flags); 1341 smp_mb(); 1342 } 1343 1344 static bool obj_request_known_test(struct rbd_obj_request *obj_request) 1345 { 1346 smp_mb(); 1347 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0; 1348 } 1349 1350 static bool obj_request_exists_test(struct rbd_obj_request *obj_request) 1351 { 1352 smp_mb(); 1353 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; 1354 } 1355 1356 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request) 1357 { 1358 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 1359 1360 return obj_request->img_offset < 1361 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header)); 1362 } 1363 1364 static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1365 { 1366 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1367 kref_read(&obj_request->kref)); 1368 kref_get(&obj_request->kref); 1369 } 1370 1371 static void rbd_obj_request_destroy(struct kref *kref); 1372 static void rbd_obj_request_put(struct rbd_obj_request *obj_request) 1373 { 1374 rbd_assert(obj_request != NULL); 1375 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1376 kref_read(&obj_request->kref)); 1377 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1378 } 1379 1380 static void rbd_img_request_get(struct rbd_img_request *img_request) 1381 { 1382 dout("%s: img %p (was %d)\n", __func__, img_request, 1383 kref_read(&img_request->kref)); 1384 kref_get(&img_request->kref); 1385 } 1386 1387 static bool img_request_child_test(struct rbd_img_request *img_request); 1388 static void rbd_parent_request_destroy(struct kref *kref); 1389 static void rbd_img_request_destroy(struct kref *kref); 1390 static void rbd_img_request_put(struct rbd_img_request *img_request) 1391 { 1392 rbd_assert(img_request != NULL); 1393 dout("%s: img %p (was %d)\n", __func__, img_request, 1394 kref_read(&img_request->kref)); 1395 if (img_request_child_test(img_request)) 1396 kref_put(&img_request->kref, rbd_parent_request_destroy); 1397 else 1398 kref_put(&img_request->kref, rbd_img_request_destroy); 1399 } 1400 1401 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1402 struct rbd_obj_request *obj_request) 1403 { 1404 rbd_assert(obj_request->img_request == NULL); 1405 1406 /* Image request now owns object's original reference */ 1407 obj_request->img_request = img_request; 1408 obj_request->which = img_request->obj_request_count; 1409 rbd_assert(!obj_request_img_data_test(obj_request)); 1410 obj_request_img_data_set(obj_request); 1411 rbd_assert(obj_request->which != BAD_WHICH); 1412 img_request->obj_request_count++; 1413 list_add_tail(&obj_request->links, &img_request->obj_requests); 1414 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request, 1415 obj_request->which); 1416 } 1417 1418 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, 1419 struct rbd_obj_request *obj_request) 1420 { 1421 rbd_assert(obj_request->which != BAD_WHICH); 1422 1423 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request, 1424 obj_request->which); 1425 list_del(&obj_request->links); 1426 rbd_assert(img_request->obj_request_count > 0); 1427 img_request->obj_request_count--; 1428 rbd_assert(obj_request->which == img_request->obj_request_count); 1429 obj_request->which = BAD_WHICH; 1430 rbd_assert(obj_request_img_data_test(obj_request)); 1431 rbd_assert(obj_request->img_request == img_request); 1432 obj_request->img_request = NULL; 1433 obj_request->callback = NULL; 1434 rbd_obj_request_put(obj_request); 1435 } 1436 1437 static bool obj_request_type_valid(enum obj_request_type type) 1438 { 1439 switch (type) { 1440 case OBJ_REQUEST_NODATA: 1441 case OBJ_REQUEST_BIO: 1442 case OBJ_REQUEST_BVECS: 1443 return true; 1444 default: 1445 return false; 1446 } 1447 } 1448 1449 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request); 1450 1451 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request) 1452 { 1453 struct ceph_osd_request *osd_req = obj_request->osd_req; 1454 1455 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__, 1456 obj_request, obj_request->object_no, obj_request->offset, 1457 obj_request->length, osd_req); 1458 if (obj_request_img_data_test(obj_request)) { 1459 WARN_ON(obj_request->callback != rbd_img_obj_callback); 1460 rbd_img_request_get(obj_request->img_request); 1461 } 1462 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false); 1463 } 1464 1465 static void rbd_img_request_complete(struct rbd_img_request *img_request) 1466 { 1467 1468 dout("%s: img %p\n", __func__, img_request); 1469 1470 /* 1471 * If no error occurred, compute the aggregate transfer 1472 * count for the image request. We could instead use 1473 * atomic64_cmpxchg() to update it as each object request 1474 * completes; not clear which way is better off hand. 1475 */ 1476 if (!img_request->result) { 1477 struct rbd_obj_request *obj_request; 1478 u64 xferred = 0; 1479 1480 for_each_obj_request(img_request, obj_request) 1481 xferred += obj_request->xferred; 1482 img_request->xferred = xferred; 1483 } 1484 1485 if (img_request->callback) 1486 img_request->callback(img_request); 1487 else 1488 rbd_img_request_put(img_request); 1489 } 1490 1491 /* 1492 * The default/initial value for all image request flags is 0. Each 1493 * is conditionally set to 1 at image request initialization time 1494 * and currently never change thereafter. 1495 */ 1496 static void img_request_write_set(struct rbd_img_request *img_request) 1497 { 1498 set_bit(IMG_REQ_WRITE, &img_request->flags); 1499 smp_mb(); 1500 } 1501 1502 static bool img_request_write_test(struct rbd_img_request *img_request) 1503 { 1504 smp_mb(); 1505 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0; 1506 } 1507 1508 /* 1509 * Set the discard flag when the img_request is an discard request 1510 */ 1511 static void img_request_discard_set(struct rbd_img_request *img_request) 1512 { 1513 set_bit(IMG_REQ_DISCARD, &img_request->flags); 1514 smp_mb(); 1515 } 1516 1517 static bool img_request_discard_test(struct rbd_img_request *img_request) 1518 { 1519 smp_mb(); 1520 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0; 1521 } 1522 1523 static void img_request_child_set(struct rbd_img_request *img_request) 1524 { 1525 set_bit(IMG_REQ_CHILD, &img_request->flags); 1526 smp_mb(); 1527 } 1528 1529 static void img_request_child_clear(struct rbd_img_request *img_request) 1530 { 1531 clear_bit(IMG_REQ_CHILD, &img_request->flags); 1532 smp_mb(); 1533 } 1534 1535 static bool img_request_child_test(struct rbd_img_request *img_request) 1536 { 1537 smp_mb(); 1538 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0; 1539 } 1540 1541 static void img_request_layered_set(struct rbd_img_request *img_request) 1542 { 1543 set_bit(IMG_REQ_LAYERED, &img_request->flags); 1544 smp_mb(); 1545 } 1546 1547 static void img_request_layered_clear(struct rbd_img_request *img_request) 1548 { 1549 clear_bit(IMG_REQ_LAYERED, &img_request->flags); 1550 smp_mb(); 1551 } 1552 1553 static bool img_request_layered_test(struct rbd_img_request *img_request) 1554 { 1555 smp_mb(); 1556 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; 1557 } 1558 1559 static enum obj_operation_type 1560 rbd_img_request_op_type(struct rbd_img_request *img_request) 1561 { 1562 if (img_request_write_test(img_request)) 1563 return OBJ_OP_WRITE; 1564 else if (img_request_discard_test(img_request)) 1565 return OBJ_OP_DISCARD; 1566 else 1567 return OBJ_OP_READ; 1568 } 1569 1570 static void 1571 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) 1572 { 1573 u64 xferred = obj_request->xferred; 1574 u64 length = obj_request->length; 1575 1576 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1577 obj_request, obj_request->img_request, obj_request->result, 1578 xferred, length); 1579 /* 1580 * ENOENT means a hole in the image. We zero-fill the entire 1581 * length of the request. A short read also implies zero-fill 1582 * to the end of the request. An error requires the whole 1583 * length of the request to be reported finished with an error 1584 * to the block layer. In each case we update the xferred 1585 * count to indicate the whole request was satisfied. 1586 */ 1587 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); 1588 if (obj_request->result == -ENOENT) { 1589 if (obj_request->type == OBJ_REQUEST_BIO) 1590 zero_bios(&obj_request->bio_pos, 0, length); 1591 else 1592 zero_bvecs(&obj_request->bvec_pos, 0, length); 1593 obj_request->result = 0; 1594 } else if (xferred < length && !obj_request->result) { 1595 if (obj_request->type == OBJ_REQUEST_BIO) 1596 zero_bios(&obj_request->bio_pos, xferred, 1597 length - xferred); 1598 else 1599 zero_bvecs(&obj_request->bvec_pos, xferred, 1600 length - xferred); 1601 } 1602 obj_request->xferred = length; 1603 obj_request_done_set(obj_request); 1604 } 1605 1606 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) 1607 { 1608 dout("%s: obj %p cb %p\n", __func__, obj_request, 1609 obj_request->callback); 1610 obj_request->callback(obj_request); 1611 } 1612 1613 static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err) 1614 { 1615 obj_request->result = err; 1616 obj_request->xferred = 0; 1617 /* 1618 * kludge - mirror rbd_obj_request_submit() to match a put in 1619 * rbd_img_obj_callback() 1620 */ 1621 if (obj_request_img_data_test(obj_request)) { 1622 WARN_ON(obj_request->callback != rbd_img_obj_callback); 1623 rbd_img_request_get(obj_request->img_request); 1624 } 1625 obj_request_done_set(obj_request); 1626 rbd_obj_request_complete(obj_request); 1627 } 1628 1629 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) 1630 { 1631 struct rbd_img_request *img_request = NULL; 1632 struct rbd_device *rbd_dev = NULL; 1633 bool layered = false; 1634 1635 if (obj_request_img_data_test(obj_request)) { 1636 img_request = obj_request->img_request; 1637 layered = img_request && img_request_layered_test(img_request); 1638 rbd_dev = img_request->rbd_dev; 1639 } 1640 1641 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, 1642 obj_request, img_request, obj_request->result, 1643 obj_request->xferred, obj_request->length); 1644 if (layered && obj_request->result == -ENOENT && 1645 obj_request->img_offset < rbd_dev->parent_overlap) 1646 rbd_img_parent_read(obj_request); 1647 else if (img_request) 1648 rbd_img_obj_request_read_callback(obj_request); 1649 else 1650 obj_request_done_set(obj_request); 1651 } 1652 1653 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) 1654 { 1655 dout("%s: obj %p result %d %llu\n", __func__, obj_request, 1656 obj_request->result, obj_request->length); 1657 /* 1658 * There is no such thing as a successful short write. Set 1659 * it to our originally-requested length. 1660 */ 1661 obj_request->xferred = obj_request->length; 1662 obj_request_done_set(obj_request); 1663 } 1664 1665 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request) 1666 { 1667 dout("%s: obj %p result %d %llu\n", __func__, obj_request, 1668 obj_request->result, obj_request->length); 1669 /* 1670 * There is no such thing as a successful short discard. Set 1671 * it to our originally-requested length. 1672 */ 1673 obj_request->xferred = obj_request->length; 1674 /* discarding a non-existent object is not a problem */ 1675 if (obj_request->result == -ENOENT) 1676 obj_request->result = 0; 1677 obj_request_done_set(obj_request); 1678 } 1679 1680 /* 1681 * For a simple stat call there's nothing to do. We'll do more if 1682 * this is part of a write sequence for a layered image. 1683 */ 1684 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) 1685 { 1686 dout("%s: obj %p\n", __func__, obj_request); 1687 obj_request_done_set(obj_request); 1688 } 1689 1690 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) 1691 { 1692 dout("%s: obj %p\n", __func__, obj_request); 1693 1694 if (obj_request_img_data_test(obj_request)) 1695 rbd_osd_copyup_callback(obj_request); 1696 else 1697 obj_request_done_set(obj_request); 1698 } 1699 1700 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) 1701 { 1702 struct rbd_obj_request *obj_request = osd_req->r_priv; 1703 u16 opcode; 1704 1705 dout("%s: osd_req %p\n", __func__, osd_req); 1706 rbd_assert(osd_req == obj_request->osd_req); 1707 if (obj_request_img_data_test(obj_request)) { 1708 rbd_assert(obj_request->img_request); 1709 rbd_assert(obj_request->which != BAD_WHICH); 1710 } else { 1711 rbd_assert(obj_request->which == BAD_WHICH); 1712 } 1713 1714 if (osd_req->r_result < 0) 1715 obj_request->result = osd_req->r_result; 1716 1717 /* 1718 * We support a 64-bit length, but ultimately it has to be 1719 * passed to the block layer, which just supports a 32-bit 1720 * length field. 1721 */ 1722 obj_request->xferred = osd_req->r_ops[0].outdata_len; 1723 rbd_assert(obj_request->xferred < (u64)UINT_MAX); 1724 1725 opcode = osd_req->r_ops[0].op; 1726 switch (opcode) { 1727 case CEPH_OSD_OP_READ: 1728 rbd_osd_read_callback(obj_request); 1729 break; 1730 case CEPH_OSD_OP_SETALLOCHINT: 1731 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || 1732 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); 1733 /* fall through */ 1734 case CEPH_OSD_OP_WRITE: 1735 case CEPH_OSD_OP_WRITEFULL: 1736 rbd_osd_write_callback(obj_request); 1737 break; 1738 case CEPH_OSD_OP_STAT: 1739 rbd_osd_stat_callback(obj_request); 1740 break; 1741 case CEPH_OSD_OP_DELETE: 1742 case CEPH_OSD_OP_TRUNCATE: 1743 case CEPH_OSD_OP_ZERO: 1744 rbd_osd_discard_callback(obj_request); 1745 break; 1746 case CEPH_OSD_OP_CALL: 1747 rbd_osd_call_callback(obj_request); 1748 break; 1749 default: 1750 rbd_warn(NULL, "unexpected OSD op: object_no %016llx opcode %d", 1751 obj_request->object_no, opcode); 1752 break; 1753 } 1754 1755 if (obj_request_done_test(obj_request)) 1756 rbd_obj_request_complete(obj_request); 1757 } 1758 1759 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) 1760 { 1761 struct ceph_osd_request *osd_req = obj_request->osd_req; 1762 1763 rbd_assert(obj_request_img_data_test(obj_request)); 1764 osd_req->r_snapid = obj_request->img_request->snap_id; 1765 } 1766 1767 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) 1768 { 1769 struct ceph_osd_request *osd_req = obj_request->osd_req; 1770 1771 ktime_get_real_ts(&osd_req->r_mtime); 1772 osd_req->r_data_offset = obj_request->offset; 1773 } 1774 1775 static struct ceph_osd_request * 1776 __rbd_osd_req_create(struct rbd_device *rbd_dev, 1777 struct ceph_snap_context *snapc, 1778 int num_ops, unsigned int flags, 1779 struct rbd_obj_request *obj_request) 1780 { 1781 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1782 struct ceph_osd_request *req; 1783 const char *name_format = rbd_dev->image_format == 1 ? 1784 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; 1785 1786 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); 1787 if (!req) 1788 return NULL; 1789 1790 req->r_flags = flags; 1791 req->r_callback = rbd_osd_req_callback; 1792 req->r_priv = obj_request; 1793 1794 req->r_base_oloc.pool = rbd_dev->layout.pool_id; 1795 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, 1796 rbd_dev->header.object_prefix, obj_request->object_no)) 1797 goto err_req; 1798 1799 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) 1800 goto err_req; 1801 1802 return req; 1803 1804 err_req: 1805 ceph_osdc_put_request(req); 1806 return NULL; 1807 } 1808 1809 /* 1810 * Create an osd request. A read request has one osd op (read). 1811 * A write request has either one (watch) or two (hint+write) osd ops. 1812 * (All rbd data writes are prefixed with an allocation hint op, but 1813 * technically osd watch is a write request, hence this distinction.) 1814 */ 1815 static struct ceph_osd_request *rbd_osd_req_create( 1816 struct rbd_device *rbd_dev, 1817 enum obj_operation_type op_type, 1818 unsigned int num_ops, 1819 struct rbd_obj_request *obj_request) 1820 { 1821 struct ceph_snap_context *snapc = NULL; 1822 1823 if (obj_request_img_data_test(obj_request) && 1824 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) { 1825 struct rbd_img_request *img_request = obj_request->img_request; 1826 if (op_type == OBJ_OP_WRITE) { 1827 rbd_assert(img_request_write_test(img_request)); 1828 } else { 1829 rbd_assert(img_request_discard_test(img_request)); 1830 } 1831 snapc = img_request->snapc; 1832 } 1833 1834 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2)); 1835 1836 return __rbd_osd_req_create(rbd_dev, snapc, num_ops, 1837 (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) ? 1838 CEPH_OSD_FLAG_WRITE : CEPH_OSD_FLAG_READ, obj_request); 1839 } 1840 1841 /* 1842 * Create a copyup osd request based on the information in the object 1843 * request supplied. A copyup request has two or three osd ops, a 1844 * copyup method call, potentially a hint op, and a write or truncate 1845 * or zero op. 1846 */ 1847 static struct ceph_osd_request * 1848 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) 1849 { 1850 struct rbd_img_request *img_request; 1851 int num_osd_ops = 3; 1852 1853 rbd_assert(obj_request_img_data_test(obj_request)); 1854 img_request = obj_request->img_request; 1855 rbd_assert(img_request); 1856 rbd_assert(img_request_write_test(img_request) || 1857 img_request_discard_test(img_request)); 1858 1859 if (img_request_discard_test(img_request)) 1860 num_osd_ops = 2; 1861 1862 return __rbd_osd_req_create(img_request->rbd_dev, 1863 img_request->snapc, num_osd_ops, 1864 CEPH_OSD_FLAG_WRITE, obj_request); 1865 } 1866 1867 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) 1868 { 1869 ceph_osdc_put_request(osd_req); 1870 } 1871 1872 static struct rbd_obj_request * 1873 rbd_obj_request_create(enum obj_request_type type) 1874 { 1875 struct rbd_obj_request *obj_request; 1876 1877 rbd_assert(obj_request_type_valid(type)); 1878 1879 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); 1880 if (!obj_request) 1881 return NULL; 1882 1883 obj_request->which = BAD_WHICH; 1884 obj_request->type = type; 1885 INIT_LIST_HEAD(&obj_request->links); 1886 kref_init(&obj_request->kref); 1887 1888 dout("%s %p\n", __func__, obj_request); 1889 return obj_request; 1890 } 1891 1892 static void rbd_obj_request_destroy(struct kref *kref) 1893 { 1894 struct rbd_obj_request *obj_request; 1895 u32 i; 1896 1897 obj_request = container_of(kref, struct rbd_obj_request, kref); 1898 1899 dout("%s: obj %p\n", __func__, obj_request); 1900 1901 rbd_assert(obj_request->img_request == NULL); 1902 rbd_assert(obj_request->which == BAD_WHICH); 1903 1904 if (obj_request->osd_req) 1905 rbd_osd_req_destroy(obj_request->osd_req); 1906 1907 switch (obj_request->type) { 1908 case OBJ_REQUEST_NODATA: 1909 case OBJ_REQUEST_BIO: 1910 case OBJ_REQUEST_BVECS: 1911 break; /* Nothing to do */ 1912 default: 1913 rbd_assert(0); 1914 } 1915 1916 if (obj_request->copyup_bvecs) { 1917 for (i = 0; i < obj_request->copyup_bvec_count; i++) { 1918 if (obj_request->copyup_bvecs[i].bv_page) 1919 __free_page(obj_request->copyup_bvecs[i].bv_page); 1920 } 1921 kfree(obj_request->copyup_bvecs); 1922 } 1923 1924 kmem_cache_free(rbd_obj_request_cache, obj_request); 1925 } 1926 1927 /* It's OK to call this for a device with no parent */ 1928 1929 static void rbd_spec_put(struct rbd_spec *spec); 1930 static void rbd_dev_unparent(struct rbd_device *rbd_dev) 1931 { 1932 rbd_dev_remove_parent(rbd_dev); 1933 rbd_spec_put(rbd_dev->parent_spec); 1934 rbd_dev->parent_spec = NULL; 1935 rbd_dev->parent_overlap = 0; 1936 } 1937 1938 /* 1939 * Parent image reference counting is used to determine when an 1940 * image's parent fields can be safely torn down--after there are no 1941 * more in-flight requests to the parent image. When the last 1942 * reference is dropped, cleaning them up is safe. 1943 */ 1944 static void rbd_dev_parent_put(struct rbd_device *rbd_dev) 1945 { 1946 int counter; 1947 1948 if (!rbd_dev->parent_spec) 1949 return; 1950 1951 counter = atomic_dec_return_safe(&rbd_dev->parent_ref); 1952 if (counter > 0) 1953 return; 1954 1955 /* Last reference; clean up parent data structures */ 1956 1957 if (!counter) 1958 rbd_dev_unparent(rbd_dev); 1959 else 1960 rbd_warn(rbd_dev, "parent reference underflow"); 1961 } 1962 1963 /* 1964 * If an image has a non-zero parent overlap, get a reference to its 1965 * parent. 1966 * 1967 * Returns true if the rbd device has a parent with a non-zero 1968 * overlap and a reference for it was successfully taken, or 1969 * false otherwise. 1970 */ 1971 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 1972 { 1973 int counter = 0; 1974 1975 if (!rbd_dev->parent_spec) 1976 return false; 1977 1978 down_read(&rbd_dev->header_rwsem); 1979 if (rbd_dev->parent_overlap) 1980 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 1981 up_read(&rbd_dev->header_rwsem); 1982 1983 if (counter < 0) 1984 rbd_warn(rbd_dev, "parent reference overflow"); 1985 1986 return counter > 0; 1987 } 1988 1989 /* 1990 * Caller is responsible for filling in the list of object requests 1991 * that comprises the image request, and the Linux request pointer 1992 * (if there is one). 1993 */ 1994 static struct rbd_img_request *rbd_img_request_create( 1995 struct rbd_device *rbd_dev, 1996 u64 offset, u64 length, 1997 enum obj_operation_type op_type, 1998 struct ceph_snap_context *snapc) 1999 { 2000 struct rbd_img_request *img_request; 2001 2002 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO); 2003 if (!img_request) 2004 return NULL; 2005 2006 img_request->rbd_dev = rbd_dev; 2007 img_request->offset = offset; 2008 img_request->length = length; 2009 if (op_type == OBJ_OP_DISCARD) { 2010 img_request_discard_set(img_request); 2011 img_request->snapc = snapc; 2012 } else if (op_type == OBJ_OP_WRITE) { 2013 img_request_write_set(img_request); 2014 img_request->snapc = snapc; 2015 } else { 2016 img_request->snap_id = rbd_dev->spec->snap_id; 2017 } 2018 if (rbd_dev_parent_get(rbd_dev)) 2019 img_request_layered_set(img_request); 2020 2021 spin_lock_init(&img_request->completion_lock); 2022 INIT_LIST_HEAD(&img_request->obj_requests); 2023 kref_init(&img_request->kref); 2024 2025 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, 2026 obj_op_name(op_type), offset, length, img_request); 2027 2028 return img_request; 2029 } 2030 2031 static void rbd_img_request_destroy(struct kref *kref) 2032 { 2033 struct rbd_img_request *img_request; 2034 struct rbd_obj_request *obj_request; 2035 struct rbd_obj_request *next_obj_request; 2036 2037 img_request = container_of(kref, struct rbd_img_request, kref); 2038 2039 dout("%s: img %p\n", __func__, img_request); 2040 2041 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 2042 rbd_img_obj_request_del(img_request, obj_request); 2043 rbd_assert(img_request->obj_request_count == 0); 2044 2045 if (img_request_layered_test(img_request)) { 2046 img_request_layered_clear(img_request); 2047 rbd_dev_parent_put(img_request->rbd_dev); 2048 } 2049 2050 if (img_request_write_test(img_request) || 2051 img_request_discard_test(img_request)) 2052 ceph_put_snap_context(img_request->snapc); 2053 2054 kmem_cache_free(rbd_img_request_cache, img_request); 2055 } 2056 2057 static struct rbd_img_request *rbd_parent_request_create( 2058 struct rbd_obj_request *obj_request, 2059 u64 img_offset, u64 length) 2060 { 2061 struct rbd_img_request *parent_request; 2062 struct rbd_device *rbd_dev; 2063 2064 rbd_assert(obj_request->img_request); 2065 rbd_dev = obj_request->img_request->rbd_dev; 2066 2067 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset, 2068 length, OBJ_OP_READ, NULL); 2069 if (!parent_request) 2070 return NULL; 2071 2072 img_request_child_set(parent_request); 2073 rbd_obj_request_get(obj_request); 2074 parent_request->obj_request = obj_request; 2075 2076 return parent_request; 2077 } 2078 2079 static void rbd_parent_request_destroy(struct kref *kref) 2080 { 2081 struct rbd_img_request *parent_request; 2082 struct rbd_obj_request *orig_request; 2083 2084 parent_request = container_of(kref, struct rbd_img_request, kref); 2085 orig_request = parent_request->obj_request; 2086 2087 parent_request->obj_request = NULL; 2088 rbd_obj_request_put(orig_request); 2089 img_request_child_clear(parent_request); 2090 2091 rbd_img_request_destroy(kref); 2092 } 2093 2094 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 2095 { 2096 struct rbd_img_request *img_request; 2097 unsigned int xferred; 2098 int result; 2099 bool more; 2100 2101 rbd_assert(obj_request_img_data_test(obj_request)); 2102 img_request = obj_request->img_request; 2103 2104 rbd_assert(obj_request->xferred <= (u64)UINT_MAX); 2105 xferred = (unsigned int)obj_request->xferred; 2106 result = obj_request->result; 2107 if (result) { 2108 struct rbd_device *rbd_dev = img_request->rbd_dev; 2109 enum obj_operation_type op_type; 2110 2111 if (img_request_discard_test(img_request)) 2112 op_type = OBJ_OP_DISCARD; 2113 else if (img_request_write_test(img_request)) 2114 op_type = OBJ_OP_WRITE; 2115 else 2116 op_type = OBJ_OP_READ; 2117 2118 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)", 2119 obj_op_name(op_type), obj_request->length, 2120 obj_request->img_offset, obj_request->offset); 2121 rbd_warn(rbd_dev, " result %d xferred %x", 2122 result, xferred); 2123 if (!img_request->result) 2124 img_request->result = result; 2125 /* 2126 * Need to end I/O on the entire obj_request worth of 2127 * bytes in case of error. 2128 */ 2129 xferred = obj_request->length; 2130 } 2131 2132 if (img_request_child_test(img_request)) { 2133 rbd_assert(img_request->obj_request != NULL); 2134 more = obj_request->which < img_request->obj_request_count - 1; 2135 } else { 2136 blk_status_t status = errno_to_blk_status(result); 2137 2138 rbd_assert(img_request->rq != NULL); 2139 2140 more = blk_update_request(img_request->rq, status, xferred); 2141 if (!more) 2142 __blk_mq_end_request(img_request->rq, status); 2143 } 2144 2145 return more; 2146 } 2147 2148 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) 2149 { 2150 struct rbd_img_request *img_request; 2151 u32 which = obj_request->which; 2152 bool more = true; 2153 2154 rbd_assert(obj_request_img_data_test(obj_request)); 2155 img_request = obj_request->img_request; 2156 2157 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 2158 rbd_assert(img_request != NULL); 2159 rbd_assert(img_request->obj_request_count > 0); 2160 rbd_assert(which != BAD_WHICH); 2161 rbd_assert(which < img_request->obj_request_count); 2162 2163 spin_lock_irq(&img_request->completion_lock); 2164 if (which != img_request->next_completion) 2165 goto out; 2166 2167 for_each_obj_request_from(img_request, obj_request) { 2168 rbd_assert(more); 2169 rbd_assert(which < img_request->obj_request_count); 2170 2171 if (!obj_request_done_test(obj_request)) 2172 break; 2173 more = rbd_img_obj_end_request(obj_request); 2174 which++; 2175 } 2176 2177 rbd_assert(more ^ (which == img_request->obj_request_count)); 2178 img_request->next_completion = which; 2179 out: 2180 spin_unlock_irq(&img_request->completion_lock); 2181 rbd_img_request_put(img_request); 2182 2183 if (!more) 2184 rbd_img_request_complete(img_request); 2185 } 2186 2187 /* 2188 * Add individual osd ops to the given ceph_osd_request and prepare 2189 * them for submission. num_ops is the current number of 2190 * osd operations already to the object request. 2191 */ 2192 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, 2193 struct ceph_osd_request *osd_request, 2194 enum obj_operation_type op_type, 2195 unsigned int num_ops) 2196 { 2197 struct rbd_img_request *img_request = obj_request->img_request; 2198 struct rbd_device *rbd_dev = img_request->rbd_dev; 2199 u64 object_size = rbd_obj_bytes(&rbd_dev->header); 2200 u64 offset = obj_request->offset; 2201 u64 length = obj_request->length; 2202 u64 img_end; 2203 u16 opcode; 2204 2205 if (op_type == OBJ_OP_DISCARD) { 2206 if (!offset && length == object_size && 2207 (!img_request_layered_test(img_request) || 2208 !obj_request_overlaps_parent(obj_request))) { 2209 opcode = CEPH_OSD_OP_DELETE; 2210 } else if ((offset + length == object_size)) { 2211 opcode = CEPH_OSD_OP_TRUNCATE; 2212 } else { 2213 down_read(&rbd_dev->header_rwsem); 2214 img_end = rbd_dev->header.image_size; 2215 up_read(&rbd_dev->header_rwsem); 2216 2217 if (obj_request->img_offset + length == img_end) 2218 opcode = CEPH_OSD_OP_TRUNCATE; 2219 else 2220 opcode = CEPH_OSD_OP_ZERO; 2221 } 2222 } else if (op_type == OBJ_OP_WRITE) { 2223 if (!offset && length == object_size) 2224 opcode = CEPH_OSD_OP_WRITEFULL; 2225 else 2226 opcode = CEPH_OSD_OP_WRITE; 2227 osd_req_op_alloc_hint_init(osd_request, num_ops, 2228 object_size, object_size); 2229 num_ops++; 2230 } else { 2231 opcode = CEPH_OSD_OP_READ; 2232 } 2233 2234 if (opcode == CEPH_OSD_OP_DELETE) 2235 osd_req_op_init(osd_request, num_ops, opcode, 0); 2236 else 2237 osd_req_op_extent_init(osd_request, num_ops, opcode, 2238 offset, length, 0, 0); 2239 2240 if (obj_request->type == OBJ_REQUEST_BIO) 2241 osd_req_op_extent_osd_data_bio(osd_request, num_ops, 2242 &obj_request->bio_pos, length); 2243 else if (obj_request->type == OBJ_REQUEST_BVECS) 2244 osd_req_op_extent_osd_data_bvec_pos(osd_request, num_ops, 2245 &obj_request->bvec_pos); 2246 2247 /* Discards are also writes */ 2248 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD) 2249 rbd_osd_req_format_write(obj_request); 2250 else 2251 rbd_osd_req_format_read(obj_request); 2252 } 2253 2254 /* 2255 * Split up an image request into one or more object requests, each 2256 * to a different object. The "type" parameter indicates whether 2257 * "data_desc" is the pointer to the head of a list of bio 2258 * structures, or the base of a page array. In either case this 2259 * function assumes data_desc describes memory sufficient to hold 2260 * all data described by the image request. 2261 */ 2262 static int rbd_img_request_fill(struct rbd_img_request *img_request, 2263 enum obj_request_type type, 2264 void *data_desc) 2265 { 2266 struct rbd_device *rbd_dev = img_request->rbd_dev; 2267 struct rbd_obj_request *obj_request = NULL; 2268 struct rbd_obj_request *next_obj_request; 2269 struct ceph_bio_iter bio_it; 2270 struct ceph_bvec_iter bvec_it; 2271 enum obj_operation_type op_type; 2272 u64 img_offset; 2273 u64 resid; 2274 2275 dout("%s: img %p type %d data_desc %p\n", __func__, img_request, 2276 (int)type, data_desc); 2277 2278 img_offset = img_request->offset; 2279 resid = img_request->length; 2280 rbd_assert(resid > 0); 2281 op_type = rbd_img_request_op_type(img_request); 2282 2283 if (type == OBJ_REQUEST_BIO) { 2284 bio_it = *(struct ceph_bio_iter *)data_desc; 2285 rbd_assert(img_offset == 2286 bio_it.iter.bi_sector << SECTOR_SHIFT); 2287 } else if (type == OBJ_REQUEST_BVECS) { 2288 bvec_it = *(struct ceph_bvec_iter *)data_desc; 2289 } 2290 2291 while (resid) { 2292 struct ceph_osd_request *osd_req; 2293 u64 object_no = img_offset >> rbd_dev->header.obj_order; 2294 u64 offset = rbd_segment_offset(rbd_dev, img_offset); 2295 u64 length = rbd_segment_length(rbd_dev, img_offset, resid); 2296 2297 obj_request = rbd_obj_request_create(type); 2298 if (!obj_request) 2299 goto out_unwind; 2300 2301 obj_request->object_no = object_no; 2302 obj_request->offset = offset; 2303 obj_request->length = length; 2304 2305 /* 2306 * set obj_request->img_request before creating the 2307 * osd_request so that it gets the right snapc 2308 */ 2309 rbd_img_obj_request_add(img_request, obj_request); 2310 2311 if (type == OBJ_REQUEST_BIO) { 2312 obj_request->bio_pos = bio_it; 2313 ceph_bio_iter_advance(&bio_it, length); 2314 } else if (type == OBJ_REQUEST_BVECS) { 2315 obj_request->bvec_pos = bvec_it; 2316 ceph_bvec_iter_shorten(&obj_request->bvec_pos, length); 2317 ceph_bvec_iter_advance(&bvec_it, length); 2318 } 2319 2320 osd_req = rbd_osd_req_create(rbd_dev, op_type, 2321 (op_type == OBJ_OP_WRITE) ? 2 : 1, 2322 obj_request); 2323 if (!osd_req) 2324 goto out_unwind; 2325 2326 obj_request->osd_req = osd_req; 2327 obj_request->callback = rbd_img_obj_callback; 2328 obj_request->img_offset = img_offset; 2329 2330 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0); 2331 2332 img_offset += length; 2333 resid -= length; 2334 } 2335 2336 return 0; 2337 2338 out_unwind: 2339 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 2340 rbd_img_obj_request_del(img_request, obj_request); 2341 2342 return -ENOMEM; 2343 } 2344 2345 static void 2346 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) 2347 { 2348 struct rbd_img_request *img_request; 2349 struct rbd_device *rbd_dev; 2350 2351 dout("%s: obj %p\n", __func__, obj_request); 2352 2353 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2354 obj_request->type == OBJ_REQUEST_NODATA); 2355 rbd_assert(obj_request_img_data_test(obj_request)); 2356 img_request = obj_request->img_request; 2357 rbd_assert(img_request); 2358 2359 rbd_dev = img_request->rbd_dev; 2360 rbd_assert(rbd_dev); 2361 2362 /* 2363 * We want the transfer count to reflect the size of the 2364 * original write request. There is no such thing as a 2365 * successful short write, so if the request was successful 2366 * we can just set it to the originally-requested length. 2367 */ 2368 if (!obj_request->result) 2369 obj_request->xferred = obj_request->length; 2370 2371 obj_request_done_set(obj_request); 2372 } 2373 2374 static void 2375 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) 2376 { 2377 struct rbd_obj_request *orig_request; 2378 struct ceph_osd_request *osd_req; 2379 struct rbd_device *rbd_dev; 2380 enum obj_operation_type op_type; 2381 int img_result; 2382 u64 parent_length; 2383 2384 rbd_assert(img_request_child_test(img_request)); 2385 2386 /* First get what we need from the image request */ 2387 2388 orig_request = img_request->obj_request; 2389 rbd_assert(orig_request != NULL); 2390 rbd_assert(obj_request_type_valid(orig_request->type)); 2391 img_result = img_request->result; 2392 parent_length = img_request->length; 2393 rbd_assert(img_result || parent_length == img_request->xferred); 2394 rbd_img_request_put(img_request); 2395 2396 rbd_assert(orig_request->img_request); 2397 rbd_dev = orig_request->img_request->rbd_dev; 2398 rbd_assert(rbd_dev); 2399 2400 /* 2401 * If the overlap has become 0 (most likely because the 2402 * image has been flattened) we need to free the pages 2403 * and re-submit the original write request. 2404 */ 2405 if (!rbd_dev->parent_overlap) { 2406 rbd_obj_request_submit(orig_request); 2407 return; 2408 } 2409 2410 if (img_result) 2411 goto out_err; 2412 2413 /* 2414 * The original osd request is of no use to use any more. 2415 * We need a new one that can hold the three ops in a copyup 2416 * request. Allocate the new copyup osd request for the 2417 * original request, and release the old one. 2418 */ 2419 img_result = -ENOMEM; 2420 osd_req = rbd_osd_req_create_copyup(orig_request); 2421 if (!osd_req) 2422 goto out_err; 2423 rbd_osd_req_destroy(orig_request->osd_req); 2424 orig_request->osd_req = osd_req; 2425 2426 /* Initialize the copyup op */ 2427 2428 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 2429 osd_req_op_cls_request_data_bvecs(osd_req, 0, orig_request->copyup_bvecs, 2430 parent_length); 2431 2432 /* Add the other op(s) */ 2433 2434 op_type = rbd_img_request_op_type(orig_request->img_request); 2435 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1); 2436 2437 /* All set, send it off. */ 2438 2439 rbd_obj_request_submit(orig_request); 2440 return; 2441 2442 out_err: 2443 rbd_obj_request_error(orig_request, img_result); 2444 } 2445 2446 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap); 2447 2448 /* 2449 * Read from the parent image the range of data that covers the 2450 * entire target of the given object request. This is used for 2451 * satisfying a layered image write request when the target of an 2452 * object request from the image request does not exist. 2453 * 2454 * A page array big enough to hold the returned data is allocated 2455 * and supplied to rbd_img_request_fill() as the "data descriptor." 2456 * When the read completes, this page array will be transferred to 2457 * the original object request for the copyup operation. 2458 * 2459 * If an error occurs, it is recorded as the result of the original 2460 * object request in rbd_img_obj_exists_callback(). 2461 */ 2462 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) 2463 { 2464 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 2465 struct rbd_img_request *parent_request = NULL; 2466 struct ceph_bvec_iter bvec_it = { 0 }; 2467 u64 img_offset; 2468 u64 length; 2469 int result; 2470 2471 rbd_assert(rbd_dev->parent != NULL); 2472 2473 /* 2474 * Determine the byte range covered by the object in the 2475 * child image to which the original request was to be sent. 2476 */ 2477 img_offset = obj_request->img_offset - obj_request->offset; 2478 length = rbd_obj_bytes(&rbd_dev->header); 2479 2480 /* 2481 * There is no defined parent data beyond the parent 2482 * overlap, so limit what we read at that boundary if 2483 * necessary. 2484 */ 2485 if (img_offset + length > rbd_dev->parent_overlap) { 2486 rbd_assert(img_offset < rbd_dev->parent_overlap); 2487 length = rbd_dev->parent_overlap - img_offset; 2488 } 2489 2490 /* 2491 * Allocate a page array big enough to receive the data read 2492 * from the parent. 2493 */ 2494 result = setup_copyup_bvecs(obj_request, length); 2495 if (result) 2496 goto out_err; 2497 2498 result = -ENOMEM; 2499 parent_request = rbd_parent_request_create(obj_request, 2500 img_offset, length); 2501 if (!parent_request) 2502 goto out_err; 2503 2504 bvec_it.bvecs = obj_request->copyup_bvecs; 2505 bvec_it.iter.bi_size = length; 2506 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_BVECS, 2507 &bvec_it); 2508 if (result) 2509 goto out_err; 2510 2511 parent_request->callback = rbd_img_obj_parent_read_full_callback; 2512 2513 result = rbd_img_request_submit(parent_request); 2514 if (!result) 2515 return 0; 2516 2517 out_err: 2518 if (parent_request) 2519 rbd_img_request_put(parent_request); 2520 return result; 2521 } 2522 2523 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 2524 { 2525 struct rbd_obj_request *orig_request; 2526 struct rbd_device *rbd_dev; 2527 int result; 2528 2529 rbd_assert(!obj_request_img_data_test(obj_request)); 2530 2531 /* 2532 * All we need from the object request is the original 2533 * request and the result of the STAT op. Grab those, then 2534 * we're done with the request. 2535 */ 2536 orig_request = obj_request->obj_request; 2537 obj_request->obj_request = NULL; 2538 rbd_obj_request_put(orig_request); 2539 rbd_assert(orig_request); 2540 rbd_assert(orig_request->img_request); 2541 2542 result = obj_request->result; 2543 obj_request->result = 0; 2544 2545 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__, 2546 obj_request, orig_request, result, 2547 obj_request->xferred, obj_request->length); 2548 rbd_obj_request_put(obj_request); 2549 2550 /* 2551 * If the overlap has become 0 (most likely because the 2552 * image has been flattened) we need to re-submit the 2553 * original request. 2554 */ 2555 rbd_dev = orig_request->img_request->rbd_dev; 2556 if (!rbd_dev->parent_overlap) { 2557 rbd_obj_request_submit(orig_request); 2558 return; 2559 } 2560 2561 /* 2562 * Our only purpose here is to determine whether the object 2563 * exists, and we don't want to treat the non-existence as 2564 * an error. If something else comes back, transfer the 2565 * error to the original request and complete it now. 2566 */ 2567 if (!result) { 2568 obj_request_existence_set(orig_request, true); 2569 } else if (result == -ENOENT) { 2570 obj_request_existence_set(orig_request, false); 2571 } else { 2572 goto fail_orig_request; 2573 } 2574 2575 /* 2576 * Resubmit the original request now that we have recorded 2577 * whether the target object exists. 2578 */ 2579 result = rbd_img_obj_request_submit(orig_request); 2580 if (result) 2581 goto fail_orig_request; 2582 2583 return; 2584 2585 fail_orig_request: 2586 rbd_obj_request_error(orig_request, result); 2587 } 2588 2589 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) 2590 { 2591 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 2592 struct rbd_obj_request *stat_request; 2593 struct page **pages; 2594 int ret; 2595 2596 stat_request = rbd_obj_request_create(OBJ_REQUEST_NODATA); 2597 if (!stat_request) 2598 return -ENOMEM; 2599 2600 stat_request->object_no = obj_request->object_no; 2601 2602 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1, 2603 stat_request); 2604 if (!stat_request->osd_req) { 2605 ret = -ENOMEM; 2606 goto fail_stat_request; 2607 } 2608 2609 /* 2610 * The response data for a STAT call consists of: 2611 * le64 length; 2612 * struct { 2613 * le32 tv_sec; 2614 * le32 tv_nsec; 2615 * } mtime; 2616 */ 2617 pages = ceph_alloc_page_vector(1, GFP_NOIO); 2618 if (IS_ERR(pages)) { 2619 ret = PTR_ERR(pages); 2620 goto fail_stat_request; 2621 } 2622 2623 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0); 2624 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, 2625 8 + sizeof(struct ceph_timespec), 2626 0, false, true); 2627 2628 rbd_obj_request_get(obj_request); 2629 stat_request->obj_request = obj_request; 2630 stat_request->callback = rbd_img_obj_exists_callback; 2631 2632 rbd_obj_request_submit(stat_request); 2633 return 0; 2634 2635 fail_stat_request: 2636 rbd_obj_request_put(stat_request); 2637 return ret; 2638 } 2639 2640 static bool img_obj_request_simple(struct rbd_obj_request *obj_request) 2641 { 2642 struct rbd_img_request *img_request = obj_request->img_request; 2643 struct rbd_device *rbd_dev = img_request->rbd_dev; 2644 2645 /* Reads */ 2646 if (!img_request_write_test(img_request) && 2647 !img_request_discard_test(img_request)) 2648 return true; 2649 2650 /* Non-layered writes */ 2651 if (!img_request_layered_test(img_request)) 2652 return true; 2653 2654 /* 2655 * Layered writes outside of the parent overlap range don't 2656 * share any data with the parent. 2657 */ 2658 if (!obj_request_overlaps_parent(obj_request)) 2659 return true; 2660 2661 /* 2662 * Entire-object layered writes - we will overwrite whatever 2663 * parent data there is anyway. 2664 */ 2665 if (!obj_request->offset && 2666 obj_request->length == rbd_obj_bytes(&rbd_dev->header)) 2667 return true; 2668 2669 /* 2670 * If the object is known to already exist, its parent data has 2671 * already been copied. 2672 */ 2673 if (obj_request_known_test(obj_request) && 2674 obj_request_exists_test(obj_request)) 2675 return true; 2676 2677 return false; 2678 } 2679 2680 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) 2681 { 2682 rbd_assert(obj_request_img_data_test(obj_request)); 2683 rbd_assert(obj_request_type_valid(obj_request->type)); 2684 rbd_assert(obj_request->img_request); 2685 2686 if (img_obj_request_simple(obj_request)) { 2687 rbd_obj_request_submit(obj_request); 2688 return 0; 2689 } 2690 2691 /* 2692 * It's a layered write. The target object might exist but 2693 * we may not know that yet. If we know it doesn't exist, 2694 * start by reading the data for the full target object from 2695 * the parent so we can use it for a copyup to the target. 2696 */ 2697 if (obj_request_known_test(obj_request)) 2698 return rbd_img_obj_parent_read_full(obj_request); 2699 2700 /* We don't know whether the target exists. Go find out. */ 2701 2702 return rbd_img_obj_exists_submit(obj_request); 2703 } 2704 2705 static int rbd_img_request_submit(struct rbd_img_request *img_request) 2706 { 2707 struct rbd_obj_request *obj_request; 2708 struct rbd_obj_request *next_obj_request; 2709 int ret = 0; 2710 2711 dout("%s: img %p\n", __func__, img_request); 2712 2713 rbd_img_request_get(img_request); 2714 for_each_obj_request_safe(img_request, obj_request, next_obj_request) { 2715 ret = rbd_img_obj_request_submit(obj_request); 2716 if (ret) 2717 goto out_put_ireq; 2718 } 2719 2720 out_put_ireq: 2721 rbd_img_request_put(img_request); 2722 return ret; 2723 } 2724 2725 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap) 2726 { 2727 u32 i; 2728 2729 rbd_assert(!obj_req->copyup_bvecs); 2730 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap); 2731 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count, 2732 sizeof(*obj_req->copyup_bvecs), 2733 GFP_NOIO); 2734 if (!obj_req->copyup_bvecs) 2735 return -ENOMEM; 2736 2737 for (i = 0; i < obj_req->copyup_bvec_count; i++) { 2738 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE); 2739 2740 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO); 2741 if (!obj_req->copyup_bvecs[i].bv_page) 2742 return -ENOMEM; 2743 2744 obj_req->copyup_bvecs[i].bv_offset = 0; 2745 obj_req->copyup_bvecs[i].bv_len = len; 2746 obj_overlap -= len; 2747 } 2748 2749 rbd_assert(!obj_overlap); 2750 return 0; 2751 } 2752 2753 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) 2754 { 2755 struct rbd_obj_request *obj_request; 2756 struct rbd_device *rbd_dev; 2757 u64 obj_end; 2758 u64 img_xferred; 2759 int img_result; 2760 2761 rbd_assert(img_request_child_test(img_request)); 2762 2763 /* First get what we need from the image request and release it */ 2764 2765 obj_request = img_request->obj_request; 2766 img_xferred = img_request->xferred; 2767 img_result = img_request->result; 2768 rbd_img_request_put(img_request); 2769 2770 /* 2771 * If the overlap has become 0 (most likely because the 2772 * image has been flattened) we need to re-submit the 2773 * original request. 2774 */ 2775 rbd_assert(obj_request); 2776 rbd_assert(obj_request->img_request); 2777 rbd_dev = obj_request->img_request->rbd_dev; 2778 if (!rbd_dev->parent_overlap) { 2779 rbd_obj_request_submit(obj_request); 2780 return; 2781 } 2782 2783 obj_request->result = img_result; 2784 if (obj_request->result) 2785 goto out; 2786 2787 /* 2788 * We need to zero anything beyond the parent overlap 2789 * boundary. Since rbd_img_obj_request_read_callback() 2790 * will zero anything beyond the end of a short read, an 2791 * easy way to do this is to pretend the data from the 2792 * parent came up short--ending at the overlap boundary. 2793 */ 2794 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 2795 obj_end = obj_request->img_offset + obj_request->length; 2796 if (obj_end > rbd_dev->parent_overlap) { 2797 u64 xferred = 0; 2798 2799 if (obj_request->img_offset < rbd_dev->parent_overlap) 2800 xferred = rbd_dev->parent_overlap - 2801 obj_request->img_offset; 2802 2803 obj_request->xferred = min(img_xferred, xferred); 2804 } else { 2805 obj_request->xferred = img_xferred; 2806 } 2807 out: 2808 rbd_img_obj_request_read_callback(obj_request); 2809 rbd_obj_request_complete(obj_request); 2810 } 2811 2812 static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 2813 { 2814 struct rbd_img_request *img_request; 2815 int result; 2816 2817 rbd_assert(obj_request_img_data_test(obj_request)); 2818 rbd_assert(obj_request->img_request != NULL); 2819 rbd_assert(obj_request->result == (s32) -ENOENT); 2820 rbd_assert(obj_request_type_valid(obj_request->type)); 2821 2822 /* rbd_read_finish(obj_request, obj_request->length); */ 2823 img_request = rbd_parent_request_create(obj_request, 2824 obj_request->img_offset, 2825 obj_request->length); 2826 result = -ENOMEM; 2827 if (!img_request) 2828 goto out_err; 2829 2830 if (obj_request->type == OBJ_REQUEST_BIO) 2831 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2832 &obj_request->bio_pos); 2833 else 2834 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BVECS, 2835 &obj_request->bvec_pos); 2836 if (result) 2837 goto out_err; 2838 2839 img_request->callback = rbd_img_parent_read_callback; 2840 result = rbd_img_request_submit(img_request); 2841 if (result) 2842 goto out_err; 2843 2844 return; 2845 out_err: 2846 if (img_request) 2847 rbd_img_request_put(img_request); 2848 obj_request->result = result; 2849 obj_request->xferred = 0; 2850 obj_request_done_set(obj_request); 2851 } 2852 2853 static const struct rbd_client_id rbd_empty_cid; 2854 2855 static bool rbd_cid_equal(const struct rbd_client_id *lhs, 2856 const struct rbd_client_id *rhs) 2857 { 2858 return lhs->gid == rhs->gid && lhs->handle == rhs->handle; 2859 } 2860 2861 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev) 2862 { 2863 struct rbd_client_id cid; 2864 2865 mutex_lock(&rbd_dev->watch_mutex); 2866 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client); 2867 cid.handle = rbd_dev->watch_cookie; 2868 mutex_unlock(&rbd_dev->watch_mutex); 2869 return cid; 2870 } 2871 2872 /* 2873 * lock_rwsem must be held for write 2874 */ 2875 static void rbd_set_owner_cid(struct rbd_device *rbd_dev, 2876 const struct rbd_client_id *cid) 2877 { 2878 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev, 2879 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle, 2880 cid->gid, cid->handle); 2881 rbd_dev->owner_cid = *cid; /* struct */ 2882 } 2883 2884 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) 2885 { 2886 mutex_lock(&rbd_dev->watch_mutex); 2887 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie); 2888 mutex_unlock(&rbd_dev->watch_mutex); 2889 } 2890 2891 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) 2892 { 2893 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 2894 2895 strcpy(rbd_dev->lock_cookie, cookie); 2896 rbd_set_owner_cid(rbd_dev, &cid); 2897 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 2898 } 2899 2900 /* 2901 * lock_rwsem must be held for write 2902 */ 2903 static int rbd_lock(struct rbd_device *rbd_dev) 2904 { 2905 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2906 char cookie[32]; 2907 int ret; 2908 2909 WARN_ON(__rbd_is_lock_owner(rbd_dev) || 2910 rbd_dev->lock_cookie[0] != '\0'); 2911 2912 format_lock_cookie(rbd_dev, cookie); 2913 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 2914 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, 2915 RBD_LOCK_TAG, "", 0); 2916 if (ret) 2917 return ret; 2918 2919 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 2920 __rbd_lock(rbd_dev, cookie); 2921 return 0; 2922 } 2923 2924 /* 2925 * lock_rwsem must be held for write 2926 */ 2927 static void rbd_unlock(struct rbd_device *rbd_dev) 2928 { 2929 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2930 int ret; 2931 2932 WARN_ON(!__rbd_is_lock_owner(rbd_dev) || 2933 rbd_dev->lock_cookie[0] == '\0'); 2934 2935 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 2936 RBD_LOCK_NAME, rbd_dev->lock_cookie); 2937 if (ret && ret != -ENOENT) 2938 rbd_warn(rbd_dev, "failed to unlock: %d", ret); 2939 2940 /* treat errors as the image is unlocked */ 2941 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 2942 rbd_dev->lock_cookie[0] = '\0'; 2943 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 2944 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); 2945 } 2946 2947 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, 2948 enum rbd_notify_op notify_op, 2949 struct page ***preply_pages, 2950 size_t *preply_len) 2951 { 2952 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2953 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 2954 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN; 2955 char buf[buf_size]; 2956 void *p = buf; 2957 2958 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op); 2959 2960 /* encode *LockPayload NotifyMessage (op + ClientId) */ 2961 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN); 2962 ceph_encode_32(&p, notify_op); 2963 ceph_encode_64(&p, cid.gid); 2964 ceph_encode_64(&p, cid.handle); 2965 2966 return ceph_osdc_notify(osdc, &rbd_dev->header_oid, 2967 &rbd_dev->header_oloc, buf, buf_size, 2968 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len); 2969 } 2970 2971 static void rbd_notify_op_lock(struct rbd_device *rbd_dev, 2972 enum rbd_notify_op notify_op) 2973 { 2974 struct page **reply_pages; 2975 size_t reply_len; 2976 2977 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len); 2978 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 2979 } 2980 2981 static void rbd_notify_acquired_lock(struct work_struct *work) 2982 { 2983 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 2984 acquired_lock_work); 2985 2986 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK); 2987 } 2988 2989 static void rbd_notify_released_lock(struct work_struct *work) 2990 { 2991 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 2992 released_lock_work); 2993 2994 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK); 2995 } 2996 2997 static int rbd_request_lock(struct rbd_device *rbd_dev) 2998 { 2999 struct page **reply_pages; 3000 size_t reply_len; 3001 bool lock_owner_responded = false; 3002 int ret; 3003 3004 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3005 3006 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK, 3007 &reply_pages, &reply_len); 3008 if (ret && ret != -ETIMEDOUT) { 3009 rbd_warn(rbd_dev, "failed to request lock: %d", ret); 3010 goto out; 3011 } 3012 3013 if (reply_len > 0 && reply_len <= PAGE_SIZE) { 3014 void *p = page_address(reply_pages[0]); 3015 void *const end = p + reply_len; 3016 u32 n; 3017 3018 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */ 3019 while (n--) { 3020 u8 struct_v; 3021 u32 len; 3022 3023 ceph_decode_need(&p, end, 8 + 8, e_inval); 3024 p += 8 + 8; /* skip gid and cookie */ 3025 3026 ceph_decode_32_safe(&p, end, len, e_inval); 3027 if (!len) 3028 continue; 3029 3030 if (lock_owner_responded) { 3031 rbd_warn(rbd_dev, 3032 "duplicate lock owners detected"); 3033 ret = -EIO; 3034 goto out; 3035 } 3036 3037 lock_owner_responded = true; 3038 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage", 3039 &struct_v, &len); 3040 if (ret) { 3041 rbd_warn(rbd_dev, 3042 "failed to decode ResponseMessage: %d", 3043 ret); 3044 goto e_inval; 3045 } 3046 3047 ret = ceph_decode_32(&p); 3048 } 3049 } 3050 3051 if (!lock_owner_responded) { 3052 rbd_warn(rbd_dev, "no lock owners detected"); 3053 ret = -ETIMEDOUT; 3054 } 3055 3056 out: 3057 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 3058 return ret; 3059 3060 e_inval: 3061 ret = -EINVAL; 3062 goto out; 3063 } 3064 3065 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all) 3066 { 3067 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all); 3068 3069 cancel_delayed_work(&rbd_dev->lock_dwork); 3070 if (wake_all) 3071 wake_up_all(&rbd_dev->lock_waitq); 3072 else 3073 wake_up(&rbd_dev->lock_waitq); 3074 } 3075 3076 static int get_lock_owner_info(struct rbd_device *rbd_dev, 3077 struct ceph_locker **lockers, u32 *num_lockers) 3078 { 3079 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3080 u8 lock_type; 3081 char *lock_tag; 3082 int ret; 3083 3084 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3085 3086 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, 3087 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3088 &lock_type, &lock_tag, lockers, num_lockers); 3089 if (ret) 3090 return ret; 3091 3092 if (*num_lockers == 0) { 3093 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); 3094 goto out; 3095 } 3096 3097 if (strcmp(lock_tag, RBD_LOCK_TAG)) { 3098 rbd_warn(rbd_dev, "locked by external mechanism, tag %s", 3099 lock_tag); 3100 ret = -EBUSY; 3101 goto out; 3102 } 3103 3104 if (lock_type == CEPH_CLS_LOCK_SHARED) { 3105 rbd_warn(rbd_dev, "shared lock type detected"); 3106 ret = -EBUSY; 3107 goto out; 3108 } 3109 3110 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, 3111 strlen(RBD_LOCK_COOKIE_PREFIX))) { 3112 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", 3113 (*lockers)[0].id.cookie); 3114 ret = -EBUSY; 3115 goto out; 3116 } 3117 3118 out: 3119 kfree(lock_tag); 3120 return ret; 3121 } 3122 3123 static int find_watcher(struct rbd_device *rbd_dev, 3124 const struct ceph_locker *locker) 3125 { 3126 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3127 struct ceph_watch_item *watchers; 3128 u32 num_watchers; 3129 u64 cookie; 3130 int i; 3131 int ret; 3132 3133 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, 3134 &rbd_dev->header_oloc, &watchers, 3135 &num_watchers); 3136 if (ret) 3137 return ret; 3138 3139 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); 3140 for (i = 0; i < num_watchers; i++) { 3141 if (!memcmp(&watchers[i].addr, &locker->info.addr, 3142 sizeof(locker->info.addr)) && 3143 watchers[i].cookie == cookie) { 3144 struct rbd_client_id cid = { 3145 .gid = le64_to_cpu(watchers[i].name.num), 3146 .handle = cookie, 3147 }; 3148 3149 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__, 3150 rbd_dev, cid.gid, cid.handle); 3151 rbd_set_owner_cid(rbd_dev, &cid); 3152 ret = 1; 3153 goto out; 3154 } 3155 } 3156 3157 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev); 3158 ret = 0; 3159 out: 3160 kfree(watchers); 3161 return ret; 3162 } 3163 3164 /* 3165 * lock_rwsem must be held for write 3166 */ 3167 static int rbd_try_lock(struct rbd_device *rbd_dev) 3168 { 3169 struct ceph_client *client = rbd_dev->rbd_client->client; 3170 struct ceph_locker *lockers; 3171 u32 num_lockers; 3172 int ret; 3173 3174 for (;;) { 3175 ret = rbd_lock(rbd_dev); 3176 if (ret != -EBUSY) 3177 return ret; 3178 3179 /* determine if the current lock holder is still alive */ 3180 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); 3181 if (ret) 3182 return ret; 3183 3184 if (num_lockers == 0) 3185 goto again; 3186 3187 ret = find_watcher(rbd_dev, lockers); 3188 if (ret) { 3189 if (ret > 0) 3190 ret = 0; /* have to request lock */ 3191 goto out; 3192 } 3193 3194 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock", 3195 ENTITY_NAME(lockers[0].id.name)); 3196 3197 ret = ceph_monc_blacklist_add(&client->monc, 3198 &lockers[0].info.addr); 3199 if (ret) { 3200 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d", 3201 ENTITY_NAME(lockers[0].id.name), ret); 3202 goto out; 3203 } 3204 3205 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, 3206 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3207 lockers[0].id.cookie, 3208 &lockers[0].id.name); 3209 if (ret && ret != -ENOENT) 3210 goto out; 3211 3212 again: 3213 ceph_free_lockers(lockers, num_lockers); 3214 } 3215 3216 out: 3217 ceph_free_lockers(lockers, num_lockers); 3218 return ret; 3219 } 3220 3221 /* 3222 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED 3223 */ 3224 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev, 3225 int *pret) 3226 { 3227 enum rbd_lock_state lock_state; 3228 3229 down_read(&rbd_dev->lock_rwsem); 3230 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 3231 rbd_dev->lock_state); 3232 if (__rbd_is_lock_owner(rbd_dev)) { 3233 lock_state = rbd_dev->lock_state; 3234 up_read(&rbd_dev->lock_rwsem); 3235 return lock_state; 3236 } 3237 3238 up_read(&rbd_dev->lock_rwsem); 3239 down_write(&rbd_dev->lock_rwsem); 3240 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 3241 rbd_dev->lock_state); 3242 if (!__rbd_is_lock_owner(rbd_dev)) { 3243 *pret = rbd_try_lock(rbd_dev); 3244 if (*pret) 3245 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret); 3246 } 3247 3248 lock_state = rbd_dev->lock_state; 3249 up_write(&rbd_dev->lock_rwsem); 3250 return lock_state; 3251 } 3252 3253 static void rbd_acquire_lock(struct work_struct *work) 3254 { 3255 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 3256 struct rbd_device, lock_dwork); 3257 enum rbd_lock_state lock_state; 3258 int ret = 0; 3259 3260 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3261 again: 3262 lock_state = rbd_try_acquire_lock(rbd_dev, &ret); 3263 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) { 3264 if (lock_state == RBD_LOCK_STATE_LOCKED) 3265 wake_requests(rbd_dev, true); 3266 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__, 3267 rbd_dev, lock_state, ret); 3268 return; 3269 } 3270 3271 ret = rbd_request_lock(rbd_dev); 3272 if (ret == -ETIMEDOUT) { 3273 goto again; /* treat this as a dead client */ 3274 } else if (ret == -EROFS) { 3275 rbd_warn(rbd_dev, "peer will not release lock"); 3276 /* 3277 * If this is rbd_add_acquire_lock(), we want to fail 3278 * immediately -- reuse BLACKLISTED flag. Otherwise we 3279 * want to block. 3280 */ 3281 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) { 3282 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); 3283 /* wake "rbd map --exclusive" process */ 3284 wake_requests(rbd_dev, false); 3285 } 3286 } else if (ret < 0) { 3287 rbd_warn(rbd_dev, "error requesting lock: %d", ret); 3288 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 3289 RBD_RETRY_DELAY); 3290 } else { 3291 /* 3292 * lock owner acked, but resend if we don't see them 3293 * release the lock 3294 */ 3295 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__, 3296 rbd_dev); 3297 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 3298 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC)); 3299 } 3300 } 3301 3302 /* 3303 * lock_rwsem must be held for write 3304 */ 3305 static bool rbd_release_lock(struct rbd_device *rbd_dev) 3306 { 3307 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 3308 rbd_dev->lock_state); 3309 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) 3310 return false; 3311 3312 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; 3313 downgrade_write(&rbd_dev->lock_rwsem); 3314 /* 3315 * Ensure that all in-flight IO is flushed. 3316 * 3317 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which 3318 * may be shared with other devices. 3319 */ 3320 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc); 3321 up_read(&rbd_dev->lock_rwsem); 3322 3323 down_write(&rbd_dev->lock_rwsem); 3324 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 3325 rbd_dev->lock_state); 3326 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) 3327 return false; 3328 3329 rbd_unlock(rbd_dev); 3330 /* 3331 * Give others a chance to grab the lock - we would re-acquire 3332 * almost immediately if we got new IO during ceph_osdc_sync() 3333 * otherwise. We need to ack our own notifications, so this 3334 * lock_dwork will be requeued from rbd_wait_state_locked() 3335 * after wake_requests() in rbd_handle_released_lock(). 3336 */ 3337 cancel_delayed_work(&rbd_dev->lock_dwork); 3338 return true; 3339 } 3340 3341 static void rbd_release_lock_work(struct work_struct *work) 3342 { 3343 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3344 unlock_work); 3345 3346 down_write(&rbd_dev->lock_rwsem); 3347 rbd_release_lock(rbd_dev); 3348 up_write(&rbd_dev->lock_rwsem); 3349 } 3350 3351 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, 3352 void **p) 3353 { 3354 struct rbd_client_id cid = { 0 }; 3355 3356 if (struct_v >= 2) { 3357 cid.gid = ceph_decode_64(p); 3358 cid.handle = ceph_decode_64(p); 3359 } 3360 3361 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3362 cid.handle); 3363 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 3364 down_write(&rbd_dev->lock_rwsem); 3365 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 3366 /* 3367 * we already know that the remote client is 3368 * the owner 3369 */ 3370 up_write(&rbd_dev->lock_rwsem); 3371 return; 3372 } 3373 3374 rbd_set_owner_cid(rbd_dev, &cid); 3375 downgrade_write(&rbd_dev->lock_rwsem); 3376 } else { 3377 down_read(&rbd_dev->lock_rwsem); 3378 } 3379 3380 if (!__rbd_is_lock_owner(rbd_dev)) 3381 wake_requests(rbd_dev, false); 3382 up_read(&rbd_dev->lock_rwsem); 3383 } 3384 3385 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, 3386 void **p) 3387 { 3388 struct rbd_client_id cid = { 0 }; 3389 3390 if (struct_v >= 2) { 3391 cid.gid = ceph_decode_64(p); 3392 cid.handle = ceph_decode_64(p); 3393 } 3394 3395 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3396 cid.handle); 3397 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 3398 down_write(&rbd_dev->lock_rwsem); 3399 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 3400 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n", 3401 __func__, rbd_dev, cid.gid, cid.handle, 3402 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); 3403 up_write(&rbd_dev->lock_rwsem); 3404 return; 3405 } 3406 3407 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3408 downgrade_write(&rbd_dev->lock_rwsem); 3409 } else { 3410 down_read(&rbd_dev->lock_rwsem); 3411 } 3412 3413 if (!__rbd_is_lock_owner(rbd_dev)) 3414 wake_requests(rbd_dev, false); 3415 up_read(&rbd_dev->lock_rwsem); 3416 } 3417 3418 /* 3419 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no 3420 * ResponseMessage is needed. 3421 */ 3422 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, 3423 void **p) 3424 { 3425 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev); 3426 struct rbd_client_id cid = { 0 }; 3427 int result = 1; 3428 3429 if (struct_v >= 2) { 3430 cid.gid = ceph_decode_64(p); 3431 cid.handle = ceph_decode_64(p); 3432 } 3433 3434 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 3435 cid.handle); 3436 if (rbd_cid_equal(&cid, &my_cid)) 3437 return result; 3438 3439 down_read(&rbd_dev->lock_rwsem); 3440 if (__rbd_is_lock_owner(rbd_dev)) { 3441 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED && 3442 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) 3443 goto out_unlock; 3444 3445 /* 3446 * encode ResponseMessage(0) so the peer can detect 3447 * a missing owner 3448 */ 3449 result = 0; 3450 3451 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { 3452 if (!rbd_dev->opts->exclusive) { 3453 dout("%s rbd_dev %p queueing unlock_work\n", 3454 __func__, rbd_dev); 3455 queue_work(rbd_dev->task_wq, 3456 &rbd_dev->unlock_work); 3457 } else { 3458 /* refuse to release the lock */ 3459 result = -EROFS; 3460 } 3461 } 3462 } 3463 3464 out_unlock: 3465 up_read(&rbd_dev->lock_rwsem); 3466 return result; 3467 } 3468 3469 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev, 3470 u64 notify_id, u64 cookie, s32 *result) 3471 { 3472 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3473 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN; 3474 char buf[buf_size]; 3475 int ret; 3476 3477 if (result) { 3478 void *p = buf; 3479 3480 /* encode ResponseMessage */ 3481 ceph_start_encoding(&p, 1, 1, 3482 buf_size - CEPH_ENCODING_START_BLK_LEN); 3483 ceph_encode_32(&p, *result); 3484 } else { 3485 buf_size = 0; 3486 } 3487 3488 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid, 3489 &rbd_dev->header_oloc, notify_id, cookie, 3490 buf, buf_size); 3491 if (ret) 3492 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret); 3493 } 3494 3495 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id, 3496 u64 cookie) 3497 { 3498 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3499 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL); 3500 } 3501 3502 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev, 3503 u64 notify_id, u64 cookie, s32 result) 3504 { 3505 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 3506 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result); 3507 } 3508 3509 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, 3510 u64 notifier_id, void *data, size_t data_len) 3511 { 3512 struct rbd_device *rbd_dev = arg; 3513 void *p = data; 3514 void *const end = p + data_len; 3515 u8 struct_v = 0; 3516 u32 len; 3517 u32 notify_op; 3518 int ret; 3519 3520 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n", 3521 __func__, rbd_dev, cookie, notify_id, data_len); 3522 if (data_len) { 3523 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage", 3524 &struct_v, &len); 3525 if (ret) { 3526 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d", 3527 ret); 3528 return; 3529 } 3530 3531 notify_op = ceph_decode_32(&p); 3532 } else { 3533 /* legacy notification for header updates */ 3534 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE; 3535 len = 0; 3536 } 3537 3538 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op); 3539 switch (notify_op) { 3540 case RBD_NOTIFY_OP_ACQUIRED_LOCK: 3541 rbd_handle_acquired_lock(rbd_dev, struct_v, &p); 3542 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3543 break; 3544 case RBD_NOTIFY_OP_RELEASED_LOCK: 3545 rbd_handle_released_lock(rbd_dev, struct_v, &p); 3546 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3547 break; 3548 case RBD_NOTIFY_OP_REQUEST_LOCK: 3549 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p); 3550 if (ret <= 0) 3551 rbd_acknowledge_notify_result(rbd_dev, notify_id, 3552 cookie, ret); 3553 else 3554 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3555 break; 3556 case RBD_NOTIFY_OP_HEADER_UPDATE: 3557 ret = rbd_dev_refresh(rbd_dev); 3558 if (ret) 3559 rbd_warn(rbd_dev, "refresh failed: %d", ret); 3560 3561 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3562 break; 3563 default: 3564 if (rbd_is_lock_owner(rbd_dev)) 3565 rbd_acknowledge_notify_result(rbd_dev, notify_id, 3566 cookie, -EOPNOTSUPP); 3567 else 3568 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 3569 break; 3570 } 3571 } 3572 3573 static void __rbd_unregister_watch(struct rbd_device *rbd_dev); 3574 3575 static void rbd_watch_errcb(void *arg, u64 cookie, int err) 3576 { 3577 struct rbd_device *rbd_dev = arg; 3578 3579 rbd_warn(rbd_dev, "encountered watch error: %d", err); 3580 3581 down_write(&rbd_dev->lock_rwsem); 3582 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3583 up_write(&rbd_dev->lock_rwsem); 3584 3585 mutex_lock(&rbd_dev->watch_mutex); 3586 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) { 3587 __rbd_unregister_watch(rbd_dev); 3588 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR; 3589 3590 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0); 3591 } 3592 mutex_unlock(&rbd_dev->watch_mutex); 3593 } 3594 3595 /* 3596 * watch_mutex must be locked 3597 */ 3598 static int __rbd_register_watch(struct rbd_device *rbd_dev) 3599 { 3600 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3601 struct ceph_osd_linger_request *handle; 3602 3603 rbd_assert(!rbd_dev->watch_handle); 3604 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3605 3606 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid, 3607 &rbd_dev->header_oloc, rbd_watch_cb, 3608 rbd_watch_errcb, rbd_dev); 3609 if (IS_ERR(handle)) 3610 return PTR_ERR(handle); 3611 3612 rbd_dev->watch_handle = handle; 3613 return 0; 3614 } 3615 3616 /* 3617 * watch_mutex must be locked 3618 */ 3619 static void __rbd_unregister_watch(struct rbd_device *rbd_dev) 3620 { 3621 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3622 int ret; 3623 3624 rbd_assert(rbd_dev->watch_handle); 3625 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3626 3627 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle); 3628 if (ret) 3629 rbd_warn(rbd_dev, "failed to unwatch: %d", ret); 3630 3631 rbd_dev->watch_handle = NULL; 3632 } 3633 3634 static int rbd_register_watch(struct rbd_device *rbd_dev) 3635 { 3636 int ret; 3637 3638 mutex_lock(&rbd_dev->watch_mutex); 3639 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED); 3640 ret = __rbd_register_watch(rbd_dev); 3641 if (ret) 3642 goto out; 3643 3644 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 3645 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 3646 3647 out: 3648 mutex_unlock(&rbd_dev->watch_mutex); 3649 return ret; 3650 } 3651 3652 static void cancel_tasks_sync(struct rbd_device *rbd_dev) 3653 { 3654 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3655 3656 cancel_delayed_work_sync(&rbd_dev->watch_dwork); 3657 cancel_work_sync(&rbd_dev->acquired_lock_work); 3658 cancel_work_sync(&rbd_dev->released_lock_work); 3659 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 3660 cancel_work_sync(&rbd_dev->unlock_work); 3661 } 3662 3663 static void rbd_unregister_watch(struct rbd_device *rbd_dev) 3664 { 3665 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq)); 3666 cancel_tasks_sync(rbd_dev); 3667 3668 mutex_lock(&rbd_dev->watch_mutex); 3669 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) 3670 __rbd_unregister_watch(rbd_dev); 3671 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 3672 mutex_unlock(&rbd_dev->watch_mutex); 3673 3674 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 3675 } 3676 3677 /* 3678 * lock_rwsem must be held for write 3679 */ 3680 static void rbd_reacquire_lock(struct rbd_device *rbd_dev) 3681 { 3682 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3683 char cookie[32]; 3684 int ret; 3685 3686 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); 3687 3688 format_lock_cookie(rbd_dev, cookie); 3689 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, 3690 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3691 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, 3692 RBD_LOCK_TAG, cookie); 3693 if (ret) { 3694 if (ret != -EOPNOTSUPP) 3695 rbd_warn(rbd_dev, "failed to update lock cookie: %d", 3696 ret); 3697 3698 /* 3699 * Lock cookie cannot be updated on older OSDs, so do 3700 * a manual release and queue an acquire. 3701 */ 3702 if (rbd_release_lock(rbd_dev)) 3703 queue_delayed_work(rbd_dev->task_wq, 3704 &rbd_dev->lock_dwork, 0); 3705 } else { 3706 __rbd_lock(rbd_dev, cookie); 3707 } 3708 } 3709 3710 static void rbd_reregister_watch(struct work_struct *work) 3711 { 3712 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 3713 struct rbd_device, watch_dwork); 3714 int ret; 3715 3716 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3717 3718 mutex_lock(&rbd_dev->watch_mutex); 3719 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { 3720 mutex_unlock(&rbd_dev->watch_mutex); 3721 return; 3722 } 3723 3724 ret = __rbd_register_watch(rbd_dev); 3725 if (ret) { 3726 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); 3727 if (ret == -EBLACKLISTED || ret == -ENOENT) { 3728 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); 3729 wake_requests(rbd_dev, true); 3730 } else { 3731 queue_delayed_work(rbd_dev->task_wq, 3732 &rbd_dev->watch_dwork, 3733 RBD_RETRY_DELAY); 3734 } 3735 mutex_unlock(&rbd_dev->watch_mutex); 3736 return; 3737 } 3738 3739 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 3740 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 3741 mutex_unlock(&rbd_dev->watch_mutex); 3742 3743 down_write(&rbd_dev->lock_rwsem); 3744 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) 3745 rbd_reacquire_lock(rbd_dev); 3746 up_write(&rbd_dev->lock_rwsem); 3747 3748 ret = rbd_dev_refresh(rbd_dev); 3749 if (ret) 3750 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret); 3751 } 3752 3753 /* 3754 * Synchronous osd object method call. Returns the number of bytes 3755 * returned in the outbound buffer, or a negative error code. 3756 */ 3757 static int rbd_obj_method_sync(struct rbd_device *rbd_dev, 3758 struct ceph_object_id *oid, 3759 struct ceph_object_locator *oloc, 3760 const char *method_name, 3761 const void *outbound, 3762 size_t outbound_size, 3763 void *inbound, 3764 size_t inbound_size) 3765 { 3766 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3767 struct page *req_page = NULL; 3768 struct page *reply_page; 3769 int ret; 3770 3771 /* 3772 * Method calls are ultimately read operations. The result 3773 * should placed into the inbound buffer provided. They 3774 * also supply outbound data--parameters for the object 3775 * method. Currently if this is present it will be a 3776 * snapshot id. 3777 */ 3778 if (outbound) { 3779 if (outbound_size > PAGE_SIZE) 3780 return -E2BIG; 3781 3782 req_page = alloc_page(GFP_KERNEL); 3783 if (!req_page) 3784 return -ENOMEM; 3785 3786 memcpy(page_address(req_page), outbound, outbound_size); 3787 } 3788 3789 reply_page = alloc_page(GFP_KERNEL); 3790 if (!reply_page) { 3791 if (req_page) 3792 __free_page(req_page); 3793 return -ENOMEM; 3794 } 3795 3796 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, 3797 CEPH_OSD_FLAG_READ, req_page, outbound_size, 3798 reply_page, &inbound_size); 3799 if (!ret) { 3800 memcpy(inbound, page_address(reply_page), inbound_size); 3801 ret = inbound_size; 3802 } 3803 3804 if (req_page) 3805 __free_page(req_page); 3806 __free_page(reply_page); 3807 return ret; 3808 } 3809 3810 /* 3811 * lock_rwsem must be held for read 3812 */ 3813 static void rbd_wait_state_locked(struct rbd_device *rbd_dev) 3814 { 3815 DEFINE_WAIT(wait); 3816 3817 do { 3818 /* 3819 * Note the use of mod_delayed_work() in rbd_acquire_lock() 3820 * and cancel_delayed_work() in wake_requests(). 3821 */ 3822 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev); 3823 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 3824 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, 3825 TASK_UNINTERRUPTIBLE); 3826 up_read(&rbd_dev->lock_rwsem); 3827 schedule(); 3828 down_read(&rbd_dev->lock_rwsem); 3829 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3830 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); 3831 3832 finish_wait(&rbd_dev->lock_waitq, &wait); 3833 } 3834 3835 static void rbd_queue_workfn(struct work_struct *work) 3836 { 3837 struct request *rq = blk_mq_rq_from_pdu(work); 3838 struct rbd_device *rbd_dev = rq->q->queuedata; 3839 struct rbd_img_request *img_request; 3840 struct ceph_snap_context *snapc = NULL; 3841 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 3842 u64 length = blk_rq_bytes(rq); 3843 enum obj_operation_type op_type; 3844 u64 mapping_size; 3845 bool must_be_locked; 3846 int result; 3847 3848 switch (req_op(rq)) { 3849 case REQ_OP_DISCARD: 3850 case REQ_OP_WRITE_ZEROES: 3851 op_type = OBJ_OP_DISCARD; 3852 break; 3853 case REQ_OP_WRITE: 3854 op_type = OBJ_OP_WRITE; 3855 break; 3856 case REQ_OP_READ: 3857 op_type = OBJ_OP_READ; 3858 break; 3859 default: 3860 dout("%s: non-fs request type %d\n", __func__, req_op(rq)); 3861 result = -EIO; 3862 goto err; 3863 } 3864 3865 /* Ignore/skip any zero-length requests */ 3866 3867 if (!length) { 3868 dout("%s: zero-length request\n", __func__); 3869 result = 0; 3870 goto err_rq; 3871 } 3872 3873 rbd_assert(op_type == OBJ_OP_READ || 3874 rbd_dev->spec->snap_id == CEPH_NOSNAP); 3875 3876 /* 3877 * Quit early if the mapped snapshot no longer exists. It's 3878 * still possible the snapshot will have disappeared by the 3879 * time our request arrives at the osd, but there's no sense in 3880 * sending it if we already know. 3881 */ 3882 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) { 3883 dout("request for non-existent snapshot"); 3884 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP); 3885 result = -ENXIO; 3886 goto err_rq; 3887 } 3888 3889 if (offset && length > U64_MAX - offset + 1) { 3890 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset, 3891 length); 3892 result = -EINVAL; 3893 goto err_rq; /* Shouldn't happen */ 3894 } 3895 3896 blk_mq_start_request(rq); 3897 3898 down_read(&rbd_dev->header_rwsem); 3899 mapping_size = rbd_dev->mapping.size; 3900 if (op_type != OBJ_OP_READ) { 3901 snapc = rbd_dev->header.snapc; 3902 ceph_get_snap_context(snapc); 3903 } 3904 up_read(&rbd_dev->header_rwsem); 3905 3906 if (offset + length > mapping_size) { 3907 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, 3908 length, mapping_size); 3909 result = -EIO; 3910 goto err_rq; 3911 } 3912 3913 must_be_locked = 3914 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) && 3915 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); 3916 if (must_be_locked) { 3917 down_read(&rbd_dev->lock_rwsem); 3918 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3919 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3920 if (rbd_dev->opts->exclusive) { 3921 rbd_warn(rbd_dev, "exclusive lock required"); 3922 result = -EROFS; 3923 goto err_unlock; 3924 } 3925 rbd_wait_state_locked(rbd_dev); 3926 } 3927 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3928 result = -EBLACKLISTED; 3929 goto err_unlock; 3930 } 3931 } 3932 3933 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, 3934 snapc); 3935 if (!img_request) { 3936 result = -ENOMEM; 3937 goto err_unlock; 3938 } 3939 img_request->rq = rq; 3940 snapc = NULL; /* img_request consumes a ref */ 3941 3942 if (op_type == OBJ_OP_DISCARD) 3943 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, 3944 NULL); 3945 else { 3946 struct ceph_bio_iter bio_it = { .bio = rq->bio, 3947 .iter = rq->bio->bi_iter }; 3948 3949 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 3950 &bio_it); 3951 } 3952 if (result) 3953 goto err_img_request; 3954 3955 result = rbd_img_request_submit(img_request); 3956 if (result) 3957 goto err_img_request; 3958 3959 if (must_be_locked) 3960 up_read(&rbd_dev->lock_rwsem); 3961 return; 3962 3963 err_img_request: 3964 rbd_img_request_put(img_request); 3965 err_unlock: 3966 if (must_be_locked) 3967 up_read(&rbd_dev->lock_rwsem); 3968 err_rq: 3969 if (result) 3970 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 3971 obj_op_name(op_type), length, offset, result); 3972 ceph_put_snap_context(snapc); 3973 err: 3974 blk_mq_end_request(rq, errno_to_blk_status(result)); 3975 } 3976 3977 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 3978 const struct blk_mq_queue_data *bd) 3979 { 3980 struct request *rq = bd->rq; 3981 struct work_struct *work = blk_mq_rq_to_pdu(rq); 3982 3983 queue_work(rbd_wq, work); 3984 return BLK_STS_OK; 3985 } 3986 3987 static void rbd_free_disk(struct rbd_device *rbd_dev) 3988 { 3989 blk_cleanup_queue(rbd_dev->disk->queue); 3990 blk_mq_free_tag_set(&rbd_dev->tag_set); 3991 put_disk(rbd_dev->disk); 3992 rbd_dev->disk = NULL; 3993 } 3994 3995 static int rbd_obj_read_sync(struct rbd_device *rbd_dev, 3996 struct ceph_object_id *oid, 3997 struct ceph_object_locator *oloc, 3998 void *buf, int buf_len) 3999 4000 { 4001 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4002 struct ceph_osd_request *req; 4003 struct page **pages; 4004 int num_pages = calc_pages_for(0, buf_len); 4005 int ret; 4006 4007 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 4008 if (!req) 4009 return -ENOMEM; 4010 4011 ceph_oid_copy(&req->r_base_oid, oid); 4012 ceph_oloc_copy(&req->r_base_oloc, oloc); 4013 req->r_flags = CEPH_OSD_FLAG_READ; 4014 4015 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 4016 if (ret) 4017 goto out_req; 4018 4019 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 4020 if (IS_ERR(pages)) { 4021 ret = PTR_ERR(pages); 4022 goto out_req; 4023 } 4024 4025 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); 4026 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, 4027 true); 4028 4029 ceph_osdc_start_request(osdc, req, false); 4030 ret = ceph_osdc_wait_request(osdc, req); 4031 if (ret >= 0) 4032 ceph_copy_from_page_vector(pages, buf, 0, ret); 4033 4034 out_req: 4035 ceph_osdc_put_request(req); 4036 return ret; 4037 } 4038 4039 /* 4040 * Read the complete header for the given rbd device. On successful 4041 * return, the rbd_dev->header field will contain up-to-date 4042 * information about the image. 4043 */ 4044 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) 4045 { 4046 struct rbd_image_header_ondisk *ondisk = NULL; 4047 u32 snap_count = 0; 4048 u64 names_size = 0; 4049 u32 want_count; 4050 int ret; 4051 4052 /* 4053 * The complete header will include an array of its 64-bit 4054 * snapshot ids, followed by the names of those snapshots as 4055 * a contiguous block of NUL-terminated strings. Note that 4056 * the number of snapshots could change by the time we read 4057 * it in, in which case we re-read it. 4058 */ 4059 do { 4060 size_t size; 4061 4062 kfree(ondisk); 4063 4064 size = sizeof (*ondisk); 4065 size += snap_count * sizeof (struct rbd_image_snap_ondisk); 4066 size += names_size; 4067 ondisk = kmalloc(size, GFP_KERNEL); 4068 if (!ondisk) 4069 return -ENOMEM; 4070 4071 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, 4072 &rbd_dev->header_oloc, ondisk, size); 4073 if (ret < 0) 4074 goto out; 4075 if ((size_t)ret < size) { 4076 ret = -ENXIO; 4077 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 4078 size, ret); 4079 goto out; 4080 } 4081 if (!rbd_dev_ondisk_valid(ondisk)) { 4082 ret = -ENXIO; 4083 rbd_warn(rbd_dev, "invalid header"); 4084 goto out; 4085 } 4086 4087 names_size = le64_to_cpu(ondisk->snap_names_len); 4088 want_count = snap_count; 4089 snap_count = le32_to_cpu(ondisk->snap_count); 4090 } while (snap_count != want_count); 4091 4092 ret = rbd_header_from_disk(rbd_dev, ondisk); 4093 out: 4094 kfree(ondisk); 4095 4096 return ret; 4097 } 4098 4099 /* 4100 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 4101 * has disappeared from the (just updated) snapshot context. 4102 */ 4103 static void rbd_exists_validate(struct rbd_device *rbd_dev) 4104 { 4105 u64 snap_id; 4106 4107 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) 4108 return; 4109 4110 snap_id = rbd_dev->spec->snap_id; 4111 if (snap_id == CEPH_NOSNAP) 4112 return; 4113 4114 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX) 4115 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4116 } 4117 4118 static void rbd_dev_update_size(struct rbd_device *rbd_dev) 4119 { 4120 sector_t size; 4121 4122 /* 4123 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't 4124 * try to update its size. If REMOVING is set, updating size 4125 * is just useless work since the device can't be opened. 4126 */ 4127 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && 4128 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { 4129 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 4130 dout("setting size to %llu sectors", (unsigned long long)size); 4131 set_capacity(rbd_dev->disk, size); 4132 revalidate_disk(rbd_dev->disk); 4133 } 4134 } 4135 4136 static int rbd_dev_refresh(struct rbd_device *rbd_dev) 4137 { 4138 u64 mapping_size; 4139 int ret; 4140 4141 down_write(&rbd_dev->header_rwsem); 4142 mapping_size = rbd_dev->mapping.size; 4143 4144 ret = rbd_dev_header_info(rbd_dev); 4145 if (ret) 4146 goto out; 4147 4148 /* 4149 * If there is a parent, see if it has disappeared due to the 4150 * mapped image getting flattened. 4151 */ 4152 if (rbd_dev->parent) { 4153 ret = rbd_dev_v2_parent_info(rbd_dev); 4154 if (ret) 4155 goto out; 4156 } 4157 4158 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { 4159 rbd_dev->mapping.size = rbd_dev->header.image_size; 4160 } else { 4161 /* validate mapped snapshot's EXISTS flag */ 4162 rbd_exists_validate(rbd_dev); 4163 } 4164 4165 out: 4166 up_write(&rbd_dev->header_rwsem); 4167 if (!ret && mapping_size != rbd_dev->mapping.size) 4168 rbd_dev_update_size(rbd_dev); 4169 4170 return ret; 4171 } 4172 4173 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 4174 unsigned int hctx_idx, unsigned int numa_node) 4175 { 4176 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4177 4178 INIT_WORK(work, rbd_queue_workfn); 4179 return 0; 4180 } 4181 4182 static const struct blk_mq_ops rbd_mq_ops = { 4183 .queue_rq = rbd_queue_rq, 4184 .init_request = rbd_init_request, 4185 }; 4186 4187 static int rbd_init_disk(struct rbd_device *rbd_dev) 4188 { 4189 struct gendisk *disk; 4190 struct request_queue *q; 4191 u64 segment_size; 4192 int err; 4193 4194 /* create gendisk info */ 4195 disk = alloc_disk(single_major ? 4196 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) : 4197 RBD_MINORS_PER_MAJOR); 4198 if (!disk) 4199 return -ENOMEM; 4200 4201 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", 4202 rbd_dev->dev_id); 4203 disk->major = rbd_dev->major; 4204 disk->first_minor = rbd_dev->minor; 4205 if (single_major) 4206 disk->flags |= GENHD_FL_EXT_DEVT; 4207 disk->fops = &rbd_bd_ops; 4208 disk->private_data = rbd_dev; 4209 4210 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); 4211 rbd_dev->tag_set.ops = &rbd_mq_ops; 4212 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 4213 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 4214 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 4215 rbd_dev->tag_set.nr_hw_queues = 1; 4216 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); 4217 4218 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); 4219 if (err) 4220 goto out_disk; 4221 4222 q = blk_mq_init_queue(&rbd_dev->tag_set); 4223 if (IS_ERR(q)) { 4224 err = PTR_ERR(q); 4225 goto out_tag_set; 4226 } 4227 4228 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 4229 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 4230 4231 /* set io sizes to object size */ 4232 segment_size = rbd_obj_bytes(&rbd_dev->header); 4233 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4234 q->limits.max_sectors = queue_max_hw_sectors(q); 4235 blk_queue_max_segments(q, USHRT_MAX); 4236 blk_queue_max_segment_size(q, UINT_MAX); 4237 blk_queue_io_min(q, segment_size); 4238 blk_queue_io_opt(q, segment_size); 4239 4240 /* enable the discard support */ 4241 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 4242 q->limits.discard_granularity = segment_size; 4243 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 4244 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 4245 4246 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4247 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; 4248 4249 /* 4250 * disk_release() expects a queue ref from add_disk() and will 4251 * put it. Hold an extra ref until add_disk() is called. 4252 */ 4253 WARN_ON(!blk_get_queue(q)); 4254 disk->queue = q; 4255 q->queuedata = rbd_dev; 4256 4257 rbd_dev->disk = disk; 4258 4259 return 0; 4260 out_tag_set: 4261 blk_mq_free_tag_set(&rbd_dev->tag_set); 4262 out_disk: 4263 put_disk(disk); 4264 return err; 4265 } 4266 4267 /* 4268 sysfs 4269 */ 4270 4271 static struct rbd_device *dev_to_rbd_dev(struct device *dev) 4272 { 4273 return container_of(dev, struct rbd_device, dev); 4274 } 4275 4276 static ssize_t rbd_size_show(struct device *dev, 4277 struct device_attribute *attr, char *buf) 4278 { 4279 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4280 4281 return sprintf(buf, "%llu\n", 4282 (unsigned long long)rbd_dev->mapping.size); 4283 } 4284 4285 /* 4286 * Note this shows the features for whatever's mapped, which is not 4287 * necessarily the base image. 4288 */ 4289 static ssize_t rbd_features_show(struct device *dev, 4290 struct device_attribute *attr, char *buf) 4291 { 4292 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4293 4294 return sprintf(buf, "0x%016llx\n", 4295 (unsigned long long)rbd_dev->mapping.features); 4296 } 4297 4298 static ssize_t rbd_major_show(struct device *dev, 4299 struct device_attribute *attr, char *buf) 4300 { 4301 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4302 4303 if (rbd_dev->major) 4304 return sprintf(buf, "%d\n", rbd_dev->major); 4305 4306 return sprintf(buf, "(none)\n"); 4307 } 4308 4309 static ssize_t rbd_minor_show(struct device *dev, 4310 struct device_attribute *attr, char *buf) 4311 { 4312 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4313 4314 return sprintf(buf, "%d\n", rbd_dev->minor); 4315 } 4316 4317 static ssize_t rbd_client_addr_show(struct device *dev, 4318 struct device_attribute *attr, char *buf) 4319 { 4320 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4321 struct ceph_entity_addr *client_addr = 4322 ceph_client_addr(rbd_dev->rbd_client->client); 4323 4324 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr, 4325 le32_to_cpu(client_addr->nonce)); 4326 } 4327 4328 static ssize_t rbd_client_id_show(struct device *dev, 4329 struct device_attribute *attr, char *buf) 4330 { 4331 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4332 4333 return sprintf(buf, "client%lld\n", 4334 ceph_client_gid(rbd_dev->rbd_client->client)); 4335 } 4336 4337 static ssize_t rbd_cluster_fsid_show(struct device *dev, 4338 struct device_attribute *attr, char *buf) 4339 { 4340 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4341 4342 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid); 4343 } 4344 4345 static ssize_t rbd_config_info_show(struct device *dev, 4346 struct device_attribute *attr, char *buf) 4347 { 4348 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4349 4350 return sprintf(buf, "%s\n", rbd_dev->config_info); 4351 } 4352 4353 static ssize_t rbd_pool_show(struct device *dev, 4354 struct device_attribute *attr, char *buf) 4355 { 4356 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4357 4358 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name); 4359 } 4360 4361 static ssize_t rbd_pool_id_show(struct device *dev, 4362 struct device_attribute *attr, char *buf) 4363 { 4364 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4365 4366 return sprintf(buf, "%llu\n", 4367 (unsigned long long) rbd_dev->spec->pool_id); 4368 } 4369 4370 static ssize_t rbd_name_show(struct device *dev, 4371 struct device_attribute *attr, char *buf) 4372 { 4373 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4374 4375 if (rbd_dev->spec->image_name) 4376 return sprintf(buf, "%s\n", rbd_dev->spec->image_name); 4377 4378 return sprintf(buf, "(unknown)\n"); 4379 } 4380 4381 static ssize_t rbd_image_id_show(struct device *dev, 4382 struct device_attribute *attr, char *buf) 4383 { 4384 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4385 4386 return sprintf(buf, "%s\n", rbd_dev->spec->image_id); 4387 } 4388 4389 /* 4390 * Shows the name of the currently-mapped snapshot (or 4391 * RBD_SNAP_HEAD_NAME for the base image). 4392 */ 4393 static ssize_t rbd_snap_show(struct device *dev, 4394 struct device_attribute *attr, 4395 char *buf) 4396 { 4397 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4398 4399 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name); 4400 } 4401 4402 static ssize_t rbd_snap_id_show(struct device *dev, 4403 struct device_attribute *attr, char *buf) 4404 { 4405 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4406 4407 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id); 4408 } 4409 4410 /* 4411 * For a v2 image, shows the chain of parent images, separated by empty 4412 * lines. For v1 images or if there is no parent, shows "(no parent 4413 * image)". 4414 */ 4415 static ssize_t rbd_parent_show(struct device *dev, 4416 struct device_attribute *attr, 4417 char *buf) 4418 { 4419 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4420 ssize_t count = 0; 4421 4422 if (!rbd_dev->parent) 4423 return sprintf(buf, "(no parent image)\n"); 4424 4425 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) { 4426 struct rbd_spec *spec = rbd_dev->parent_spec; 4427 4428 count += sprintf(&buf[count], "%s" 4429 "pool_id %llu\npool_name %s\n" 4430 "image_id %s\nimage_name %s\n" 4431 "snap_id %llu\nsnap_name %s\n" 4432 "overlap %llu\n", 4433 !count ? "" : "\n", /* first? */ 4434 spec->pool_id, spec->pool_name, 4435 spec->image_id, spec->image_name ?: "(unknown)", 4436 spec->snap_id, spec->snap_name, 4437 rbd_dev->parent_overlap); 4438 } 4439 4440 return count; 4441 } 4442 4443 static ssize_t rbd_image_refresh(struct device *dev, 4444 struct device_attribute *attr, 4445 const char *buf, 4446 size_t size) 4447 { 4448 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4449 int ret; 4450 4451 ret = rbd_dev_refresh(rbd_dev); 4452 if (ret) 4453 return ret; 4454 4455 return size; 4456 } 4457 4458 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); 4459 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL); 4460 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); 4461 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL); 4462 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL); 4463 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); 4464 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL); 4465 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL); 4466 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); 4467 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL); 4468 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); 4469 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); 4470 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); 4471 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); 4472 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); 4473 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL); 4474 4475 static struct attribute *rbd_attrs[] = { 4476 &dev_attr_size.attr, 4477 &dev_attr_features.attr, 4478 &dev_attr_major.attr, 4479 &dev_attr_minor.attr, 4480 &dev_attr_client_addr.attr, 4481 &dev_attr_client_id.attr, 4482 &dev_attr_cluster_fsid.attr, 4483 &dev_attr_config_info.attr, 4484 &dev_attr_pool.attr, 4485 &dev_attr_pool_id.attr, 4486 &dev_attr_name.attr, 4487 &dev_attr_image_id.attr, 4488 &dev_attr_current_snap.attr, 4489 &dev_attr_snap_id.attr, 4490 &dev_attr_parent.attr, 4491 &dev_attr_refresh.attr, 4492 NULL 4493 }; 4494 4495 static struct attribute_group rbd_attr_group = { 4496 .attrs = rbd_attrs, 4497 }; 4498 4499 static const struct attribute_group *rbd_attr_groups[] = { 4500 &rbd_attr_group, 4501 NULL 4502 }; 4503 4504 static void rbd_dev_release(struct device *dev); 4505 4506 static const struct device_type rbd_device_type = { 4507 .name = "rbd", 4508 .groups = rbd_attr_groups, 4509 .release = rbd_dev_release, 4510 }; 4511 4512 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) 4513 { 4514 kref_get(&spec->kref); 4515 4516 return spec; 4517 } 4518 4519 static void rbd_spec_free(struct kref *kref); 4520 static void rbd_spec_put(struct rbd_spec *spec) 4521 { 4522 if (spec) 4523 kref_put(&spec->kref, rbd_spec_free); 4524 } 4525 4526 static struct rbd_spec *rbd_spec_alloc(void) 4527 { 4528 struct rbd_spec *spec; 4529 4530 spec = kzalloc(sizeof (*spec), GFP_KERNEL); 4531 if (!spec) 4532 return NULL; 4533 4534 spec->pool_id = CEPH_NOPOOL; 4535 spec->snap_id = CEPH_NOSNAP; 4536 kref_init(&spec->kref); 4537 4538 return spec; 4539 } 4540 4541 static void rbd_spec_free(struct kref *kref) 4542 { 4543 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref); 4544 4545 kfree(spec->pool_name); 4546 kfree(spec->image_id); 4547 kfree(spec->image_name); 4548 kfree(spec->snap_name); 4549 kfree(spec); 4550 } 4551 4552 static void rbd_dev_free(struct rbd_device *rbd_dev) 4553 { 4554 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED); 4555 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED); 4556 4557 ceph_oid_destroy(&rbd_dev->header_oid); 4558 ceph_oloc_destroy(&rbd_dev->header_oloc); 4559 kfree(rbd_dev->config_info); 4560 4561 rbd_put_client(rbd_dev->rbd_client); 4562 rbd_spec_put(rbd_dev->spec); 4563 kfree(rbd_dev->opts); 4564 kfree(rbd_dev); 4565 } 4566 4567 static void rbd_dev_release(struct device *dev) 4568 { 4569 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4570 bool need_put = !!rbd_dev->opts; 4571 4572 if (need_put) { 4573 destroy_workqueue(rbd_dev->task_wq); 4574 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 4575 } 4576 4577 rbd_dev_free(rbd_dev); 4578 4579 /* 4580 * This is racy, but way better than putting module outside of 4581 * the release callback. The race window is pretty small, so 4582 * doing something similar to dm (dm-builtin.c) is overkill. 4583 */ 4584 if (need_put) 4585 module_put(THIS_MODULE); 4586 } 4587 4588 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, 4589 struct rbd_spec *spec) 4590 { 4591 struct rbd_device *rbd_dev; 4592 4593 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); 4594 if (!rbd_dev) 4595 return NULL; 4596 4597 spin_lock_init(&rbd_dev->lock); 4598 INIT_LIST_HEAD(&rbd_dev->node); 4599 init_rwsem(&rbd_dev->header_rwsem); 4600 4601 rbd_dev->header.data_pool_id = CEPH_NOPOOL; 4602 ceph_oid_init(&rbd_dev->header_oid); 4603 rbd_dev->header_oloc.pool = spec->pool_id; 4604 4605 mutex_init(&rbd_dev->watch_mutex); 4606 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 4607 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch); 4608 4609 init_rwsem(&rbd_dev->lock_rwsem); 4610 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 4611 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock); 4612 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock); 4613 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock); 4614 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work); 4615 init_waitqueue_head(&rbd_dev->lock_waitq); 4616 4617 rbd_dev->dev.bus = &rbd_bus_type; 4618 rbd_dev->dev.type = &rbd_device_type; 4619 rbd_dev->dev.parent = &rbd_root_dev; 4620 device_initialize(&rbd_dev->dev); 4621 4622 rbd_dev->rbd_client = rbdc; 4623 rbd_dev->spec = spec; 4624 4625 return rbd_dev; 4626 } 4627 4628 /* 4629 * Create a mapping rbd_dev. 4630 */ 4631 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, 4632 struct rbd_spec *spec, 4633 struct rbd_options *opts) 4634 { 4635 struct rbd_device *rbd_dev; 4636 4637 rbd_dev = __rbd_dev_create(rbdc, spec); 4638 if (!rbd_dev) 4639 return NULL; 4640 4641 rbd_dev->opts = opts; 4642 4643 /* get an id and fill in device name */ 4644 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 4645 minor_to_rbd_dev_id(1 << MINORBITS), 4646 GFP_KERNEL); 4647 if (rbd_dev->dev_id < 0) 4648 goto fail_rbd_dev; 4649 4650 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id); 4651 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM, 4652 rbd_dev->name); 4653 if (!rbd_dev->task_wq) 4654 goto fail_dev_id; 4655 4656 /* we have a ref from do_rbd_add() */ 4657 __module_get(THIS_MODULE); 4658 4659 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); 4660 return rbd_dev; 4661 4662 fail_dev_id: 4663 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 4664 fail_rbd_dev: 4665 rbd_dev_free(rbd_dev); 4666 return NULL; 4667 } 4668 4669 static void rbd_dev_destroy(struct rbd_device *rbd_dev) 4670 { 4671 if (rbd_dev) 4672 put_device(&rbd_dev->dev); 4673 } 4674 4675 /* 4676 * Get the size and object order for an image snapshot, or if 4677 * snap_id is CEPH_NOSNAP, gets this information for the base 4678 * image. 4679 */ 4680 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 4681 u8 *order, u64 *snap_size) 4682 { 4683 __le64 snapid = cpu_to_le64(snap_id); 4684 int ret; 4685 struct { 4686 u8 order; 4687 __le64 size; 4688 } __attribute__ ((packed)) size_buf = { 0 }; 4689 4690 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4691 &rbd_dev->header_oloc, "get_size", 4692 &snapid, sizeof(snapid), 4693 &size_buf, sizeof(size_buf)); 4694 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4695 if (ret < 0) 4696 return ret; 4697 if (ret < sizeof (size_buf)) 4698 return -ERANGE; 4699 4700 if (order) { 4701 *order = size_buf.order; 4702 dout(" order %u", (unsigned int)*order); 4703 } 4704 *snap_size = le64_to_cpu(size_buf.size); 4705 4706 dout(" snap_id 0x%016llx snap_size = %llu\n", 4707 (unsigned long long)snap_id, 4708 (unsigned long long)*snap_size); 4709 4710 return 0; 4711 } 4712 4713 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) 4714 { 4715 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, 4716 &rbd_dev->header.obj_order, 4717 &rbd_dev->header.image_size); 4718 } 4719 4720 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) 4721 { 4722 void *reply_buf; 4723 int ret; 4724 void *p; 4725 4726 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL); 4727 if (!reply_buf) 4728 return -ENOMEM; 4729 4730 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4731 &rbd_dev->header_oloc, "get_object_prefix", 4732 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX); 4733 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4734 if (ret < 0) 4735 goto out; 4736 4737 p = reply_buf; 4738 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, 4739 p + ret, NULL, GFP_NOIO); 4740 ret = 0; 4741 4742 if (IS_ERR(rbd_dev->header.object_prefix)) { 4743 ret = PTR_ERR(rbd_dev->header.object_prefix); 4744 rbd_dev->header.object_prefix = NULL; 4745 } else { 4746 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); 4747 } 4748 out: 4749 kfree(reply_buf); 4750 4751 return ret; 4752 } 4753 4754 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 4755 u64 *snap_features) 4756 { 4757 __le64 snapid = cpu_to_le64(snap_id); 4758 struct { 4759 __le64 features; 4760 __le64 incompat; 4761 } __attribute__ ((packed)) features_buf = { 0 }; 4762 u64 unsup; 4763 int ret; 4764 4765 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4766 &rbd_dev->header_oloc, "get_features", 4767 &snapid, sizeof(snapid), 4768 &features_buf, sizeof(features_buf)); 4769 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4770 if (ret < 0) 4771 return ret; 4772 if (ret < sizeof (features_buf)) 4773 return -ERANGE; 4774 4775 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; 4776 if (unsup) { 4777 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", 4778 unsup); 4779 return -ENXIO; 4780 } 4781 4782 *snap_features = le64_to_cpu(features_buf.features); 4783 4784 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", 4785 (unsigned long long)snap_id, 4786 (unsigned long long)*snap_features, 4787 (unsigned long long)le64_to_cpu(features_buf.incompat)); 4788 4789 return 0; 4790 } 4791 4792 static int rbd_dev_v2_features(struct rbd_device *rbd_dev) 4793 { 4794 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, 4795 &rbd_dev->header.features); 4796 } 4797 4798 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 4799 { 4800 struct rbd_spec *parent_spec; 4801 size_t size; 4802 void *reply_buf = NULL; 4803 __le64 snapid; 4804 void *p; 4805 void *end; 4806 u64 pool_id; 4807 char *image_id; 4808 u64 snap_id; 4809 u64 overlap; 4810 int ret; 4811 4812 parent_spec = rbd_spec_alloc(); 4813 if (!parent_spec) 4814 return -ENOMEM; 4815 4816 size = sizeof (__le64) + /* pool_id */ 4817 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ 4818 sizeof (__le64) + /* snap_id */ 4819 sizeof (__le64); /* overlap */ 4820 reply_buf = kmalloc(size, GFP_KERNEL); 4821 if (!reply_buf) { 4822 ret = -ENOMEM; 4823 goto out_err; 4824 } 4825 4826 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 4827 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4828 &rbd_dev->header_oloc, "get_parent", 4829 &snapid, sizeof(snapid), reply_buf, size); 4830 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4831 if (ret < 0) 4832 goto out_err; 4833 4834 p = reply_buf; 4835 end = reply_buf + ret; 4836 ret = -ERANGE; 4837 ceph_decode_64_safe(&p, end, pool_id, out_err); 4838 if (pool_id == CEPH_NOPOOL) { 4839 /* 4840 * Either the parent never existed, or we have 4841 * record of it but the image got flattened so it no 4842 * longer has a parent. When the parent of a 4843 * layered image disappears we immediately set the 4844 * overlap to 0. The effect of this is that all new 4845 * requests will be treated as if the image had no 4846 * parent. 4847 */ 4848 if (rbd_dev->parent_overlap) { 4849 rbd_dev->parent_overlap = 0; 4850 rbd_dev_parent_put(rbd_dev); 4851 pr_info("%s: clone image has been flattened\n", 4852 rbd_dev->disk->disk_name); 4853 } 4854 4855 goto out; /* No parent? No problem. */ 4856 } 4857 4858 /* The ceph file layout needs to fit pool id in 32 bits */ 4859 4860 ret = -EIO; 4861 if (pool_id > (u64)U32_MAX) { 4862 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 4863 (unsigned long long)pool_id, U32_MAX); 4864 goto out_err; 4865 } 4866 4867 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 4868 if (IS_ERR(image_id)) { 4869 ret = PTR_ERR(image_id); 4870 goto out_err; 4871 } 4872 ceph_decode_64_safe(&p, end, snap_id, out_err); 4873 ceph_decode_64_safe(&p, end, overlap, out_err); 4874 4875 /* 4876 * The parent won't change (except when the clone is 4877 * flattened, already handled that). So we only need to 4878 * record the parent spec we have not already done so. 4879 */ 4880 if (!rbd_dev->parent_spec) { 4881 parent_spec->pool_id = pool_id; 4882 parent_spec->image_id = image_id; 4883 parent_spec->snap_id = snap_id; 4884 rbd_dev->parent_spec = parent_spec; 4885 parent_spec = NULL; /* rbd_dev now owns this */ 4886 } else { 4887 kfree(image_id); 4888 } 4889 4890 /* 4891 * We always update the parent overlap. If it's zero we issue 4892 * a warning, as we will proceed as if there was no parent. 4893 */ 4894 if (!overlap) { 4895 if (parent_spec) { 4896 /* refresh, careful to warn just once */ 4897 if (rbd_dev->parent_overlap) 4898 rbd_warn(rbd_dev, 4899 "clone now standalone (overlap became 0)"); 4900 } else { 4901 /* initial probe */ 4902 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 4903 } 4904 } 4905 rbd_dev->parent_overlap = overlap; 4906 4907 out: 4908 ret = 0; 4909 out_err: 4910 kfree(reply_buf); 4911 rbd_spec_put(parent_spec); 4912 4913 return ret; 4914 } 4915 4916 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) 4917 { 4918 struct { 4919 __le64 stripe_unit; 4920 __le64 stripe_count; 4921 } __attribute__ ((packed)) striping_info_buf = { 0 }; 4922 size_t size = sizeof (striping_info_buf); 4923 void *p; 4924 u64 obj_size; 4925 u64 stripe_unit; 4926 u64 stripe_count; 4927 int ret; 4928 4929 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4930 &rbd_dev->header_oloc, "get_stripe_unit_count", 4931 NULL, 0, &striping_info_buf, size); 4932 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 4933 if (ret < 0) 4934 return ret; 4935 if (ret < size) 4936 return -ERANGE; 4937 4938 /* 4939 * We don't actually support the "fancy striping" feature 4940 * (STRIPINGV2) yet, but if the striping sizes are the 4941 * defaults the behavior is the same as before. So find 4942 * out, and only fail if the image has non-default values. 4943 */ 4944 ret = -EINVAL; 4945 obj_size = rbd_obj_bytes(&rbd_dev->header); 4946 p = &striping_info_buf; 4947 stripe_unit = ceph_decode_64(&p); 4948 if (stripe_unit != obj_size) { 4949 rbd_warn(rbd_dev, "unsupported stripe unit " 4950 "(got %llu want %llu)", 4951 stripe_unit, obj_size); 4952 return -EINVAL; 4953 } 4954 stripe_count = ceph_decode_64(&p); 4955 if (stripe_count != 1) { 4956 rbd_warn(rbd_dev, "unsupported stripe count " 4957 "(got %llu want 1)", stripe_count); 4958 return -EINVAL; 4959 } 4960 rbd_dev->header.stripe_unit = stripe_unit; 4961 rbd_dev->header.stripe_count = stripe_count; 4962 4963 return 0; 4964 } 4965 4966 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) 4967 { 4968 __le64 data_pool_id; 4969 int ret; 4970 4971 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4972 &rbd_dev->header_oloc, "get_data_pool", 4973 NULL, 0, &data_pool_id, sizeof(data_pool_id)); 4974 if (ret < 0) 4975 return ret; 4976 if (ret < sizeof(data_pool_id)) 4977 return -EBADMSG; 4978 4979 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); 4980 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); 4981 return 0; 4982 } 4983 4984 static char *rbd_dev_image_name(struct rbd_device *rbd_dev) 4985 { 4986 CEPH_DEFINE_OID_ONSTACK(oid); 4987 size_t image_id_size; 4988 char *image_id; 4989 void *p; 4990 void *end; 4991 size_t size; 4992 void *reply_buf = NULL; 4993 size_t len = 0; 4994 char *image_name = NULL; 4995 int ret; 4996 4997 rbd_assert(!rbd_dev->spec->image_name); 4998 4999 len = strlen(rbd_dev->spec->image_id); 5000 image_id_size = sizeof (__le32) + len; 5001 image_id = kmalloc(image_id_size, GFP_KERNEL); 5002 if (!image_id) 5003 return NULL; 5004 5005 p = image_id; 5006 end = image_id + image_id_size; 5007 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); 5008 5009 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; 5010 reply_buf = kmalloc(size, GFP_KERNEL); 5011 if (!reply_buf) 5012 goto out; 5013 5014 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); 5015 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5016 "dir_get_name", image_id, image_id_size, 5017 reply_buf, size); 5018 if (ret < 0) 5019 goto out; 5020 p = reply_buf; 5021 end = reply_buf + ret; 5022 5023 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); 5024 if (IS_ERR(image_name)) 5025 image_name = NULL; 5026 else 5027 dout("%s: name is %s len is %zd\n", __func__, image_name, len); 5028 out: 5029 kfree(reply_buf); 5030 kfree(image_id); 5031 5032 return image_name; 5033 } 5034 5035 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5036 { 5037 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5038 const char *snap_name; 5039 u32 which = 0; 5040 5041 /* Skip over names until we find the one we are looking for */ 5042 5043 snap_name = rbd_dev->header.snap_names; 5044 while (which < snapc->num_snaps) { 5045 if (!strcmp(name, snap_name)) 5046 return snapc->snaps[which]; 5047 snap_name += strlen(snap_name) + 1; 5048 which++; 5049 } 5050 return CEPH_NOSNAP; 5051 } 5052 5053 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5054 { 5055 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5056 u32 which; 5057 bool found = false; 5058 u64 snap_id; 5059 5060 for (which = 0; !found && which < snapc->num_snaps; which++) { 5061 const char *snap_name; 5062 5063 snap_id = snapc->snaps[which]; 5064 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 5065 if (IS_ERR(snap_name)) { 5066 /* ignore no-longer existing snapshots */ 5067 if (PTR_ERR(snap_name) == -ENOENT) 5068 continue; 5069 else 5070 break; 5071 } 5072 found = !strcmp(name, snap_name); 5073 kfree(snap_name); 5074 } 5075 return found ? snap_id : CEPH_NOSNAP; 5076 } 5077 5078 /* 5079 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if 5080 * no snapshot by that name is found, or if an error occurs. 5081 */ 5082 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5083 { 5084 if (rbd_dev->image_format == 1) 5085 return rbd_v1_snap_id_by_name(rbd_dev, name); 5086 5087 return rbd_v2_snap_id_by_name(rbd_dev, name); 5088 } 5089 5090 /* 5091 * An image being mapped will have everything but the snap id. 5092 */ 5093 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev) 5094 { 5095 struct rbd_spec *spec = rbd_dev->spec; 5096 5097 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name); 5098 rbd_assert(spec->image_id && spec->image_name); 5099 rbd_assert(spec->snap_name); 5100 5101 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { 5102 u64 snap_id; 5103 5104 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); 5105 if (snap_id == CEPH_NOSNAP) 5106 return -ENOENT; 5107 5108 spec->snap_id = snap_id; 5109 } else { 5110 spec->snap_id = CEPH_NOSNAP; 5111 } 5112 5113 return 0; 5114 } 5115 5116 /* 5117 * A parent image will have all ids but none of the names. 5118 * 5119 * All names in an rbd spec are dynamically allocated. It's OK if we 5120 * can't figure out the name for an image id. 5121 */ 5122 static int rbd_spec_fill_names(struct rbd_device *rbd_dev) 5123 { 5124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5125 struct rbd_spec *spec = rbd_dev->spec; 5126 const char *pool_name; 5127 const char *image_name; 5128 const char *snap_name; 5129 int ret; 5130 5131 rbd_assert(spec->pool_id != CEPH_NOPOOL); 5132 rbd_assert(spec->image_id); 5133 rbd_assert(spec->snap_id != CEPH_NOSNAP); 5134 5135 /* Get the pool name; we have to make our own copy of this */ 5136 5137 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); 5138 if (!pool_name) { 5139 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); 5140 return -EIO; 5141 } 5142 pool_name = kstrdup(pool_name, GFP_KERNEL); 5143 if (!pool_name) 5144 return -ENOMEM; 5145 5146 /* Fetch the image name; tolerate failure here */ 5147 5148 image_name = rbd_dev_image_name(rbd_dev); 5149 if (!image_name) 5150 rbd_warn(rbd_dev, "unable to get image name"); 5151 5152 /* Fetch the snapshot name */ 5153 5154 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 5155 if (IS_ERR(snap_name)) { 5156 ret = PTR_ERR(snap_name); 5157 goto out_err; 5158 } 5159 5160 spec->pool_name = pool_name; 5161 spec->image_name = image_name; 5162 spec->snap_name = snap_name; 5163 5164 return 0; 5165 5166 out_err: 5167 kfree(image_name); 5168 kfree(pool_name); 5169 return ret; 5170 } 5171 5172 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) 5173 { 5174 size_t size; 5175 int ret; 5176 void *reply_buf; 5177 void *p; 5178 void *end; 5179 u64 seq; 5180 u32 snap_count; 5181 struct ceph_snap_context *snapc; 5182 u32 i; 5183 5184 /* 5185 * We'll need room for the seq value (maximum snapshot id), 5186 * snapshot count, and array of that many snapshot ids. 5187 * For now we have a fixed upper limit on the number we're 5188 * prepared to receive. 5189 */ 5190 size = sizeof (__le64) + sizeof (__le32) + 5191 RBD_MAX_SNAP_COUNT * sizeof (__le64); 5192 reply_buf = kzalloc(size, GFP_KERNEL); 5193 if (!reply_buf) 5194 return -ENOMEM; 5195 5196 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5197 &rbd_dev->header_oloc, "get_snapcontext", 5198 NULL, 0, reply_buf, size); 5199 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5200 if (ret < 0) 5201 goto out; 5202 5203 p = reply_buf; 5204 end = reply_buf + ret; 5205 ret = -ERANGE; 5206 ceph_decode_64_safe(&p, end, seq, out); 5207 ceph_decode_32_safe(&p, end, snap_count, out); 5208 5209 /* 5210 * Make sure the reported number of snapshot ids wouldn't go 5211 * beyond the end of our buffer. But before checking that, 5212 * make sure the computed size of the snapshot context we 5213 * allocate is representable in a size_t. 5214 */ 5215 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) 5216 / sizeof (u64)) { 5217 ret = -EINVAL; 5218 goto out; 5219 } 5220 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) 5221 goto out; 5222 ret = 0; 5223 5224 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 5225 if (!snapc) { 5226 ret = -ENOMEM; 5227 goto out; 5228 } 5229 snapc->seq = seq; 5230 for (i = 0; i < snap_count; i++) 5231 snapc->snaps[i] = ceph_decode_64(&p); 5232 5233 ceph_put_snap_context(rbd_dev->header.snapc); 5234 rbd_dev->header.snapc = snapc; 5235 5236 dout(" snap context seq = %llu, snap_count = %u\n", 5237 (unsigned long long)seq, (unsigned int)snap_count); 5238 out: 5239 kfree(reply_buf); 5240 5241 return ret; 5242 } 5243 5244 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 5245 u64 snap_id) 5246 { 5247 size_t size; 5248 void *reply_buf; 5249 __le64 snapid; 5250 int ret; 5251 void *p; 5252 void *end; 5253 char *snap_name; 5254 5255 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; 5256 reply_buf = kmalloc(size, GFP_KERNEL); 5257 if (!reply_buf) 5258 return ERR_PTR(-ENOMEM); 5259 5260 snapid = cpu_to_le64(snap_id); 5261 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5262 &rbd_dev->header_oloc, "get_snapshot_name", 5263 &snapid, sizeof(snapid), reply_buf, size); 5264 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5265 if (ret < 0) { 5266 snap_name = ERR_PTR(ret); 5267 goto out; 5268 } 5269 5270 p = reply_buf; 5271 end = reply_buf + ret; 5272 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 5273 if (IS_ERR(snap_name)) 5274 goto out; 5275 5276 dout(" snap_id 0x%016llx snap_name = %s\n", 5277 (unsigned long long)snap_id, snap_name); 5278 out: 5279 kfree(reply_buf); 5280 5281 return snap_name; 5282 } 5283 5284 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) 5285 { 5286 bool first_time = rbd_dev->header.object_prefix == NULL; 5287 int ret; 5288 5289 ret = rbd_dev_v2_image_size(rbd_dev); 5290 if (ret) 5291 return ret; 5292 5293 if (first_time) { 5294 ret = rbd_dev_v2_header_onetime(rbd_dev); 5295 if (ret) 5296 return ret; 5297 } 5298 5299 ret = rbd_dev_v2_snap_context(rbd_dev); 5300 if (ret && first_time) { 5301 kfree(rbd_dev->header.object_prefix); 5302 rbd_dev->header.object_prefix = NULL; 5303 } 5304 5305 return ret; 5306 } 5307 5308 static int rbd_dev_header_info(struct rbd_device *rbd_dev) 5309 { 5310 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 5311 5312 if (rbd_dev->image_format == 1) 5313 return rbd_dev_v1_header_info(rbd_dev); 5314 5315 return rbd_dev_v2_header_info(rbd_dev); 5316 } 5317 5318 /* 5319 * Skips over white space at *buf, and updates *buf to point to the 5320 * first found non-space character (if any). Returns the length of 5321 * the token (string of non-white space characters) found. Note 5322 * that *buf must be terminated with '\0'. 5323 */ 5324 static inline size_t next_token(const char **buf) 5325 { 5326 /* 5327 * These are the characters that produce nonzero for 5328 * isspace() in the "C" and "POSIX" locales. 5329 */ 5330 const char *spaces = " \f\n\r\t\v"; 5331 5332 *buf += strspn(*buf, spaces); /* Find start of token */ 5333 5334 return strcspn(*buf, spaces); /* Return token length */ 5335 } 5336 5337 /* 5338 * Finds the next token in *buf, dynamically allocates a buffer big 5339 * enough to hold a copy of it, and copies the token into the new 5340 * buffer. The copy is guaranteed to be terminated with '\0'. Note 5341 * that a duplicate buffer is created even for a zero-length token. 5342 * 5343 * Returns a pointer to the newly-allocated duplicate, or a null 5344 * pointer if memory for the duplicate was not available. If 5345 * the lenp argument is a non-null pointer, the length of the token 5346 * (not including the '\0') is returned in *lenp. 5347 * 5348 * If successful, the *buf pointer will be updated to point beyond 5349 * the end of the found token. 5350 * 5351 * Note: uses GFP_KERNEL for allocation. 5352 */ 5353 static inline char *dup_token(const char **buf, size_t *lenp) 5354 { 5355 char *dup; 5356 size_t len; 5357 5358 len = next_token(buf); 5359 dup = kmemdup(*buf, len + 1, GFP_KERNEL); 5360 if (!dup) 5361 return NULL; 5362 *(dup + len) = '\0'; 5363 *buf += len; 5364 5365 if (lenp) 5366 *lenp = len; 5367 5368 return dup; 5369 } 5370 5371 /* 5372 * Parse the options provided for an "rbd add" (i.e., rbd image 5373 * mapping) request. These arrive via a write to /sys/bus/rbd/add, 5374 * and the data written is passed here via a NUL-terminated buffer. 5375 * Returns 0 if successful or an error code otherwise. 5376 * 5377 * The information extracted from these options is recorded in 5378 * the other parameters which return dynamically-allocated 5379 * structures: 5380 * ceph_opts 5381 * The address of a pointer that will refer to a ceph options 5382 * structure. Caller must release the returned pointer using 5383 * ceph_destroy_options() when it is no longer needed. 5384 * rbd_opts 5385 * Address of an rbd options pointer. Fully initialized by 5386 * this function; caller must release with kfree(). 5387 * spec 5388 * Address of an rbd image specification pointer. Fully 5389 * initialized by this function based on parsed options. 5390 * Caller must release with rbd_spec_put(). 5391 * 5392 * The options passed take this form: 5393 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>] 5394 * where: 5395 * <mon_addrs> 5396 * A comma-separated list of one or more monitor addresses. 5397 * A monitor address is an ip address, optionally followed 5398 * by a port number (separated by a colon). 5399 * I.e.: ip1[:port1][,ip2[:port2]...] 5400 * <options> 5401 * A comma-separated list of ceph and/or rbd options. 5402 * <pool_name> 5403 * The name of the rados pool containing the rbd image. 5404 * <image_name> 5405 * The name of the image in that pool to map. 5406 * <snap_id> 5407 * An optional snapshot id. If provided, the mapping will 5408 * present data from the image at the time that snapshot was 5409 * created. The image head is used if no snapshot id is 5410 * provided. Snapshot mappings are always read-only. 5411 */ 5412 static int rbd_add_parse_args(const char *buf, 5413 struct ceph_options **ceph_opts, 5414 struct rbd_options **opts, 5415 struct rbd_spec **rbd_spec) 5416 { 5417 size_t len; 5418 char *options; 5419 const char *mon_addrs; 5420 char *snap_name; 5421 size_t mon_addrs_size; 5422 struct rbd_spec *spec = NULL; 5423 struct rbd_options *rbd_opts = NULL; 5424 struct ceph_options *copts; 5425 int ret; 5426 5427 /* The first four tokens are required */ 5428 5429 len = next_token(&buf); 5430 if (!len) { 5431 rbd_warn(NULL, "no monitor address(es) provided"); 5432 return -EINVAL; 5433 } 5434 mon_addrs = buf; 5435 mon_addrs_size = len + 1; 5436 buf += len; 5437 5438 ret = -EINVAL; 5439 options = dup_token(&buf, NULL); 5440 if (!options) 5441 return -ENOMEM; 5442 if (!*options) { 5443 rbd_warn(NULL, "no options provided"); 5444 goto out_err; 5445 } 5446 5447 spec = rbd_spec_alloc(); 5448 if (!spec) 5449 goto out_mem; 5450 5451 spec->pool_name = dup_token(&buf, NULL); 5452 if (!spec->pool_name) 5453 goto out_mem; 5454 if (!*spec->pool_name) { 5455 rbd_warn(NULL, "no pool name provided"); 5456 goto out_err; 5457 } 5458 5459 spec->image_name = dup_token(&buf, NULL); 5460 if (!spec->image_name) 5461 goto out_mem; 5462 if (!*spec->image_name) { 5463 rbd_warn(NULL, "no image name provided"); 5464 goto out_err; 5465 } 5466 5467 /* 5468 * Snapshot name is optional; default is to use "-" 5469 * (indicating the head/no snapshot). 5470 */ 5471 len = next_token(&buf); 5472 if (!len) { 5473 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ 5474 len = sizeof (RBD_SNAP_HEAD_NAME) - 1; 5475 } else if (len > RBD_MAX_SNAP_NAME_LEN) { 5476 ret = -ENAMETOOLONG; 5477 goto out_err; 5478 } 5479 snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 5480 if (!snap_name) 5481 goto out_mem; 5482 *(snap_name + len) = '\0'; 5483 spec->snap_name = snap_name; 5484 5485 /* Initialize all rbd options to the defaults */ 5486 5487 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL); 5488 if (!rbd_opts) 5489 goto out_mem; 5490 5491 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; 5492 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 5493 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 5494 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 5495 5496 copts = ceph_parse_options(options, mon_addrs, 5497 mon_addrs + mon_addrs_size - 1, 5498 parse_rbd_opts_token, rbd_opts); 5499 if (IS_ERR(copts)) { 5500 ret = PTR_ERR(copts); 5501 goto out_err; 5502 } 5503 kfree(options); 5504 5505 *ceph_opts = copts; 5506 *opts = rbd_opts; 5507 *rbd_spec = spec; 5508 5509 return 0; 5510 out_mem: 5511 ret = -ENOMEM; 5512 out_err: 5513 kfree(rbd_opts); 5514 rbd_spec_put(spec); 5515 kfree(options); 5516 5517 return ret; 5518 } 5519 5520 /* 5521 * Return pool id (>= 0) or a negative error code. 5522 */ 5523 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name) 5524 { 5525 struct ceph_options *opts = rbdc->client->options; 5526 u64 newest_epoch; 5527 int tries = 0; 5528 int ret; 5529 5530 again: 5531 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name); 5532 if (ret == -ENOENT && tries++ < 1) { 5533 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap", 5534 &newest_epoch); 5535 if (ret < 0) 5536 return ret; 5537 5538 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) { 5539 ceph_osdc_maybe_request_map(&rbdc->client->osdc); 5540 (void) ceph_monc_wait_osdmap(&rbdc->client->monc, 5541 newest_epoch, 5542 opts->mount_timeout); 5543 goto again; 5544 } else { 5545 /* the osdmap we have is new enough */ 5546 return -ENOENT; 5547 } 5548 } 5549 5550 return ret; 5551 } 5552 5553 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) 5554 { 5555 down_write(&rbd_dev->lock_rwsem); 5556 if (__rbd_is_lock_owner(rbd_dev)) 5557 rbd_unlock(rbd_dev); 5558 up_write(&rbd_dev->lock_rwsem); 5559 } 5560 5561 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 5562 { 5563 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 5564 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 5565 return -EINVAL; 5566 } 5567 5568 /* FIXME: "rbd map --exclusive" should be in interruptible */ 5569 down_read(&rbd_dev->lock_rwsem); 5570 rbd_wait_state_locked(rbd_dev); 5571 up_read(&rbd_dev->lock_rwsem); 5572 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 5573 rbd_warn(rbd_dev, "failed to acquire exclusive lock"); 5574 return -EROFS; 5575 } 5576 5577 return 0; 5578 } 5579 5580 /* 5581 * An rbd format 2 image has a unique identifier, distinct from the 5582 * name given to it by the user. Internally, that identifier is 5583 * what's used to specify the names of objects related to the image. 5584 * 5585 * A special "rbd id" object is used to map an rbd image name to its 5586 * id. If that object doesn't exist, then there is no v2 rbd image 5587 * with the supplied name. 5588 * 5589 * This function will record the given rbd_dev's image_id field if 5590 * it can be determined, and in that case will return 0. If any 5591 * errors occur a negative errno will be returned and the rbd_dev's 5592 * image_id field will be unchanged (and should be NULL). 5593 */ 5594 static int rbd_dev_image_id(struct rbd_device *rbd_dev) 5595 { 5596 int ret; 5597 size_t size; 5598 CEPH_DEFINE_OID_ONSTACK(oid); 5599 void *response; 5600 char *image_id; 5601 5602 /* 5603 * When probing a parent image, the image id is already 5604 * known (and the image name likely is not). There's no 5605 * need to fetch the image id again in this case. We 5606 * do still need to set the image format though. 5607 */ 5608 if (rbd_dev->spec->image_id) { 5609 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; 5610 5611 return 0; 5612 } 5613 5614 /* 5615 * First, see if the format 2 image id file exists, and if 5616 * so, get the image's persistent id from it. 5617 */ 5618 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, 5619 rbd_dev->spec->image_name); 5620 if (ret) 5621 return ret; 5622 5623 dout("rbd id object name is %s\n", oid.name); 5624 5625 /* Response will be an encoded string, which includes a length */ 5626 5627 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; 5628 response = kzalloc(size, GFP_NOIO); 5629 if (!response) { 5630 ret = -ENOMEM; 5631 goto out; 5632 } 5633 5634 /* If it doesn't exist we'll assume it's a format 1 image */ 5635 5636 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5637 "get_id", NULL, 0, 5638 response, RBD_IMAGE_ID_LEN_MAX); 5639 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5640 if (ret == -ENOENT) { 5641 image_id = kstrdup("", GFP_KERNEL); 5642 ret = image_id ? 0 : -ENOMEM; 5643 if (!ret) 5644 rbd_dev->image_format = 1; 5645 } else if (ret >= 0) { 5646 void *p = response; 5647 5648 image_id = ceph_extract_encoded_string(&p, p + ret, 5649 NULL, GFP_NOIO); 5650 ret = PTR_ERR_OR_ZERO(image_id); 5651 if (!ret) 5652 rbd_dev->image_format = 2; 5653 } 5654 5655 if (!ret) { 5656 rbd_dev->spec->image_id = image_id; 5657 dout("image_id is %s\n", image_id); 5658 } 5659 out: 5660 kfree(response); 5661 ceph_oid_destroy(&oid); 5662 return ret; 5663 } 5664 5665 /* 5666 * Undo whatever state changes are made by v1 or v2 header info 5667 * call. 5668 */ 5669 static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 5670 { 5671 struct rbd_image_header *header; 5672 5673 rbd_dev_parent_put(rbd_dev); 5674 5675 /* Free dynamic fields from the header, then zero it out */ 5676 5677 header = &rbd_dev->header; 5678 ceph_put_snap_context(header->snapc); 5679 kfree(header->snap_sizes); 5680 kfree(header->snap_names); 5681 kfree(header->object_prefix); 5682 memset(header, 0, sizeof (*header)); 5683 } 5684 5685 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) 5686 { 5687 int ret; 5688 5689 ret = rbd_dev_v2_object_prefix(rbd_dev); 5690 if (ret) 5691 goto out_err; 5692 5693 /* 5694 * Get the and check features for the image. Currently the 5695 * features are assumed to never change. 5696 */ 5697 ret = rbd_dev_v2_features(rbd_dev); 5698 if (ret) 5699 goto out_err; 5700 5701 /* If the image supports fancy striping, get its parameters */ 5702 5703 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 5704 ret = rbd_dev_v2_striping_info(rbd_dev); 5705 if (ret < 0) 5706 goto out_err; 5707 } 5708 5709 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { 5710 ret = rbd_dev_v2_data_pool(rbd_dev); 5711 if (ret) 5712 goto out_err; 5713 } 5714 5715 rbd_init_layout(rbd_dev); 5716 return 0; 5717 5718 out_err: 5719 rbd_dev->header.features = 0; 5720 kfree(rbd_dev->header.object_prefix); 5721 rbd_dev->header.object_prefix = NULL; 5722 return ret; 5723 } 5724 5725 /* 5726 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> 5727 * rbd_dev_image_probe() recursion depth, which means it's also the 5728 * length of the already discovered part of the parent chain. 5729 */ 5730 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) 5731 { 5732 struct rbd_device *parent = NULL; 5733 int ret; 5734 5735 if (!rbd_dev->parent_spec) 5736 return 0; 5737 5738 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { 5739 pr_info("parent chain is too long (%d)\n", depth); 5740 ret = -EINVAL; 5741 goto out_err; 5742 } 5743 5744 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); 5745 if (!parent) { 5746 ret = -ENOMEM; 5747 goto out_err; 5748 } 5749 5750 /* 5751 * Images related by parent/child relationships always share 5752 * rbd_client and spec/parent_spec, so bump their refcounts. 5753 */ 5754 __rbd_get_client(rbd_dev->rbd_client); 5755 rbd_spec_get(rbd_dev->parent_spec); 5756 5757 ret = rbd_dev_image_probe(parent, depth); 5758 if (ret < 0) 5759 goto out_err; 5760 5761 rbd_dev->parent = parent; 5762 atomic_set(&rbd_dev->parent_ref, 1); 5763 return 0; 5764 5765 out_err: 5766 rbd_dev_unparent(rbd_dev); 5767 rbd_dev_destroy(parent); 5768 return ret; 5769 } 5770 5771 static void rbd_dev_device_release(struct rbd_device *rbd_dev) 5772 { 5773 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5774 rbd_dev_mapping_clear(rbd_dev); 5775 rbd_free_disk(rbd_dev); 5776 if (!single_major) 5777 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5778 } 5779 5780 /* 5781 * rbd_dev->header_rwsem must be locked for write and will be unlocked 5782 * upon return. 5783 */ 5784 static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5785 { 5786 int ret; 5787 5788 /* Record our major and minor device numbers. */ 5789 5790 if (!single_major) { 5791 ret = register_blkdev(0, rbd_dev->name); 5792 if (ret < 0) 5793 goto err_out_unlock; 5794 5795 rbd_dev->major = ret; 5796 rbd_dev->minor = 0; 5797 } else { 5798 rbd_dev->major = rbd_major; 5799 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id); 5800 } 5801 5802 /* Set up the blkdev mapping. */ 5803 5804 ret = rbd_init_disk(rbd_dev); 5805 if (ret) 5806 goto err_out_blkdev; 5807 5808 ret = rbd_dev_mapping_set(rbd_dev); 5809 if (ret) 5810 goto err_out_disk; 5811 5812 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5813 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only); 5814 5815 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); 5816 if (ret) 5817 goto err_out_mapping; 5818 5819 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5820 up_write(&rbd_dev->header_rwsem); 5821 return 0; 5822 5823 err_out_mapping: 5824 rbd_dev_mapping_clear(rbd_dev); 5825 err_out_disk: 5826 rbd_free_disk(rbd_dev); 5827 err_out_blkdev: 5828 if (!single_major) 5829 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5830 err_out_unlock: 5831 up_write(&rbd_dev->header_rwsem); 5832 return ret; 5833 } 5834 5835 static int rbd_dev_header_name(struct rbd_device *rbd_dev) 5836 { 5837 struct rbd_spec *spec = rbd_dev->spec; 5838 int ret; 5839 5840 /* Record the header object name for this rbd image. */ 5841 5842 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 5843 if (rbd_dev->image_format == 1) 5844 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 5845 spec->image_name, RBD_SUFFIX); 5846 else 5847 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 5848 RBD_HEADER_PREFIX, spec->image_id); 5849 5850 return ret; 5851 } 5852 5853 static void rbd_dev_image_release(struct rbd_device *rbd_dev) 5854 { 5855 rbd_dev_unprobe(rbd_dev); 5856 if (rbd_dev->opts) 5857 rbd_unregister_watch(rbd_dev); 5858 rbd_dev->image_format = 0; 5859 kfree(rbd_dev->spec->image_id); 5860 rbd_dev->spec->image_id = NULL; 5861 } 5862 5863 /* 5864 * Probe for the existence of the header object for the given rbd 5865 * device. If this image is the one being mapped (i.e., not a 5866 * parent), initiate a watch on its header object before using that 5867 * object to get detailed information about the rbd image. 5868 */ 5869 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) 5870 { 5871 int ret; 5872 5873 /* 5874 * Get the id from the image id object. Unless there's an 5875 * error, rbd_dev->spec->image_id will be filled in with 5876 * a dynamically-allocated string, and rbd_dev->image_format 5877 * will be set to either 1 or 2. 5878 */ 5879 ret = rbd_dev_image_id(rbd_dev); 5880 if (ret) 5881 return ret; 5882 5883 ret = rbd_dev_header_name(rbd_dev); 5884 if (ret) 5885 goto err_out_format; 5886 5887 if (!depth) { 5888 ret = rbd_register_watch(rbd_dev); 5889 if (ret) { 5890 if (ret == -ENOENT) 5891 pr_info("image %s/%s does not exist\n", 5892 rbd_dev->spec->pool_name, 5893 rbd_dev->spec->image_name); 5894 goto err_out_format; 5895 } 5896 } 5897 5898 ret = rbd_dev_header_info(rbd_dev); 5899 if (ret) 5900 goto err_out_watch; 5901 5902 /* 5903 * If this image is the one being mapped, we have pool name and 5904 * id, image name and id, and snap name - need to fill snap id. 5905 * Otherwise this is a parent image, identified by pool, image 5906 * and snap ids - need to fill in names for those ids. 5907 */ 5908 if (!depth) 5909 ret = rbd_spec_fill_snap_id(rbd_dev); 5910 else 5911 ret = rbd_spec_fill_names(rbd_dev); 5912 if (ret) { 5913 if (ret == -ENOENT) 5914 pr_info("snap %s/%s@%s does not exist\n", 5915 rbd_dev->spec->pool_name, 5916 rbd_dev->spec->image_name, 5917 rbd_dev->spec->snap_name); 5918 goto err_out_probe; 5919 } 5920 5921 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { 5922 ret = rbd_dev_v2_parent_info(rbd_dev); 5923 if (ret) 5924 goto err_out_probe; 5925 5926 /* 5927 * Need to warn users if this image is the one being 5928 * mapped and has a parent. 5929 */ 5930 if (!depth && rbd_dev->parent_spec) 5931 rbd_warn(rbd_dev, 5932 "WARNING: kernel layering is EXPERIMENTAL!"); 5933 } 5934 5935 ret = rbd_dev_probe_parent(rbd_dev, depth); 5936 if (ret) 5937 goto err_out_probe; 5938 5939 dout("discovered format %u image, header name is %s\n", 5940 rbd_dev->image_format, rbd_dev->header_oid.name); 5941 return 0; 5942 5943 err_out_probe: 5944 rbd_dev_unprobe(rbd_dev); 5945 err_out_watch: 5946 if (!depth) 5947 rbd_unregister_watch(rbd_dev); 5948 err_out_format: 5949 rbd_dev->image_format = 0; 5950 kfree(rbd_dev->spec->image_id); 5951 rbd_dev->spec->image_id = NULL; 5952 return ret; 5953 } 5954 5955 static ssize_t do_rbd_add(struct bus_type *bus, 5956 const char *buf, 5957 size_t count) 5958 { 5959 struct rbd_device *rbd_dev = NULL; 5960 struct ceph_options *ceph_opts = NULL; 5961 struct rbd_options *rbd_opts = NULL; 5962 struct rbd_spec *spec = NULL; 5963 struct rbd_client *rbdc; 5964 int rc; 5965 5966 if (!try_module_get(THIS_MODULE)) 5967 return -ENODEV; 5968 5969 /* parse add command */ 5970 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 5971 if (rc < 0) 5972 goto out; 5973 5974 rbdc = rbd_get_client(ceph_opts); 5975 if (IS_ERR(rbdc)) { 5976 rc = PTR_ERR(rbdc); 5977 goto err_out_args; 5978 } 5979 5980 /* pick the pool */ 5981 rc = rbd_add_get_pool_id(rbdc, spec->pool_name); 5982 if (rc < 0) { 5983 if (rc == -ENOENT) 5984 pr_info("pool %s does not exist\n", spec->pool_name); 5985 goto err_out_client; 5986 } 5987 spec->pool_id = (u64)rc; 5988 5989 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 5990 if (!rbd_dev) { 5991 rc = -ENOMEM; 5992 goto err_out_client; 5993 } 5994 rbdc = NULL; /* rbd_dev now owns this */ 5995 spec = NULL; /* rbd_dev now owns this */ 5996 rbd_opts = NULL; /* rbd_dev now owns this */ 5997 5998 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL); 5999 if (!rbd_dev->config_info) { 6000 rc = -ENOMEM; 6001 goto err_out_rbd_dev; 6002 } 6003 6004 down_write(&rbd_dev->header_rwsem); 6005 rc = rbd_dev_image_probe(rbd_dev, 0); 6006 if (rc < 0) { 6007 up_write(&rbd_dev->header_rwsem); 6008 goto err_out_rbd_dev; 6009 } 6010 6011 /* If we are mapping a snapshot it must be marked read-only */ 6012 if (rbd_dev->spec->snap_id != CEPH_NOSNAP) 6013 rbd_dev->opts->read_only = true; 6014 6015 rc = rbd_dev_device_setup(rbd_dev); 6016 if (rc) 6017 goto err_out_image_probe; 6018 6019 if (rbd_dev->opts->exclusive) { 6020 rc = rbd_add_acquire_lock(rbd_dev); 6021 if (rc) 6022 goto err_out_device_setup; 6023 } 6024 6025 /* Everything's ready. Announce the disk to the world. */ 6026 6027 rc = device_add(&rbd_dev->dev); 6028 if (rc) 6029 goto err_out_image_lock; 6030 6031 add_disk(rbd_dev->disk); 6032 /* see rbd_init_disk() */ 6033 blk_put_queue(rbd_dev->disk->queue); 6034 6035 spin_lock(&rbd_dev_list_lock); 6036 list_add_tail(&rbd_dev->node, &rbd_dev_list); 6037 spin_unlock(&rbd_dev_list_lock); 6038 6039 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, 6040 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, 6041 rbd_dev->header.features); 6042 rc = count; 6043 out: 6044 module_put(THIS_MODULE); 6045 return rc; 6046 6047 err_out_image_lock: 6048 rbd_dev_image_unlock(rbd_dev); 6049 err_out_device_setup: 6050 rbd_dev_device_release(rbd_dev); 6051 err_out_image_probe: 6052 rbd_dev_image_release(rbd_dev); 6053 err_out_rbd_dev: 6054 rbd_dev_destroy(rbd_dev); 6055 err_out_client: 6056 rbd_put_client(rbdc); 6057 err_out_args: 6058 rbd_spec_put(spec); 6059 kfree(rbd_opts); 6060 goto out; 6061 } 6062 6063 static ssize_t rbd_add(struct bus_type *bus, 6064 const char *buf, 6065 size_t count) 6066 { 6067 if (single_major) 6068 return -EINVAL; 6069 6070 return do_rbd_add(bus, buf, count); 6071 } 6072 6073 static ssize_t rbd_add_single_major(struct bus_type *bus, 6074 const char *buf, 6075 size_t count) 6076 { 6077 return do_rbd_add(bus, buf, count); 6078 } 6079 6080 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) 6081 { 6082 while (rbd_dev->parent) { 6083 struct rbd_device *first = rbd_dev; 6084 struct rbd_device *second = first->parent; 6085 struct rbd_device *third; 6086 6087 /* 6088 * Follow to the parent with no grandparent and 6089 * remove it. 6090 */ 6091 while (second && (third = second->parent)) { 6092 first = second; 6093 second = third; 6094 } 6095 rbd_assert(second); 6096 rbd_dev_image_release(second); 6097 rbd_dev_destroy(second); 6098 first->parent = NULL; 6099 first->parent_overlap = 0; 6100 6101 rbd_assert(first->parent_spec); 6102 rbd_spec_put(first->parent_spec); 6103 first->parent_spec = NULL; 6104 } 6105 } 6106 6107 static ssize_t do_rbd_remove(struct bus_type *bus, 6108 const char *buf, 6109 size_t count) 6110 { 6111 struct rbd_device *rbd_dev = NULL; 6112 struct list_head *tmp; 6113 int dev_id; 6114 char opt_buf[6]; 6115 bool already = false; 6116 bool force = false; 6117 int ret; 6118 6119 dev_id = -1; 6120 opt_buf[0] = '\0'; 6121 sscanf(buf, "%d %5s", &dev_id, opt_buf); 6122 if (dev_id < 0) { 6123 pr_err("dev_id out of range\n"); 6124 return -EINVAL; 6125 } 6126 if (opt_buf[0] != '\0') { 6127 if (!strcmp(opt_buf, "force")) { 6128 force = true; 6129 } else { 6130 pr_err("bad remove option at '%s'\n", opt_buf); 6131 return -EINVAL; 6132 } 6133 } 6134 6135 ret = -ENOENT; 6136 spin_lock(&rbd_dev_list_lock); 6137 list_for_each(tmp, &rbd_dev_list) { 6138 rbd_dev = list_entry(tmp, struct rbd_device, node); 6139 if (rbd_dev->dev_id == dev_id) { 6140 ret = 0; 6141 break; 6142 } 6143 } 6144 if (!ret) { 6145 spin_lock_irq(&rbd_dev->lock); 6146 if (rbd_dev->open_count && !force) 6147 ret = -EBUSY; 6148 else 6149 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6150 &rbd_dev->flags); 6151 spin_unlock_irq(&rbd_dev->lock); 6152 } 6153 spin_unlock(&rbd_dev_list_lock); 6154 if (ret < 0 || already) 6155 return ret; 6156 6157 if (force) { 6158 /* 6159 * Prevent new IO from being queued and wait for existing 6160 * IO to complete/fail. 6161 */ 6162 blk_mq_freeze_queue(rbd_dev->disk->queue); 6163 blk_set_queue_dying(rbd_dev->disk->queue); 6164 } 6165 6166 del_gendisk(rbd_dev->disk); 6167 spin_lock(&rbd_dev_list_lock); 6168 list_del_init(&rbd_dev->node); 6169 spin_unlock(&rbd_dev_list_lock); 6170 device_del(&rbd_dev->dev); 6171 6172 rbd_dev_image_unlock(rbd_dev); 6173 rbd_dev_device_release(rbd_dev); 6174 rbd_dev_image_release(rbd_dev); 6175 rbd_dev_destroy(rbd_dev); 6176 return count; 6177 } 6178 6179 static ssize_t rbd_remove(struct bus_type *bus, 6180 const char *buf, 6181 size_t count) 6182 { 6183 if (single_major) 6184 return -EINVAL; 6185 6186 return do_rbd_remove(bus, buf, count); 6187 } 6188 6189 static ssize_t rbd_remove_single_major(struct bus_type *bus, 6190 const char *buf, 6191 size_t count) 6192 { 6193 return do_rbd_remove(bus, buf, count); 6194 } 6195 6196 /* 6197 * create control files in sysfs 6198 * /sys/bus/rbd/... 6199 */ 6200 static int rbd_sysfs_init(void) 6201 { 6202 int ret; 6203 6204 ret = device_register(&rbd_root_dev); 6205 if (ret < 0) 6206 return ret; 6207 6208 ret = bus_register(&rbd_bus_type); 6209 if (ret < 0) 6210 device_unregister(&rbd_root_dev); 6211 6212 return ret; 6213 } 6214 6215 static void rbd_sysfs_cleanup(void) 6216 { 6217 bus_unregister(&rbd_bus_type); 6218 device_unregister(&rbd_root_dev); 6219 } 6220 6221 static int rbd_slab_init(void) 6222 { 6223 rbd_assert(!rbd_img_request_cache); 6224 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); 6225 if (!rbd_img_request_cache) 6226 return -ENOMEM; 6227 6228 rbd_assert(!rbd_obj_request_cache); 6229 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); 6230 if (!rbd_obj_request_cache) 6231 goto out_err; 6232 6233 return 0; 6234 6235 out_err: 6236 kmem_cache_destroy(rbd_img_request_cache); 6237 rbd_img_request_cache = NULL; 6238 return -ENOMEM; 6239 } 6240 6241 static void rbd_slab_exit(void) 6242 { 6243 rbd_assert(rbd_obj_request_cache); 6244 kmem_cache_destroy(rbd_obj_request_cache); 6245 rbd_obj_request_cache = NULL; 6246 6247 rbd_assert(rbd_img_request_cache); 6248 kmem_cache_destroy(rbd_img_request_cache); 6249 rbd_img_request_cache = NULL; 6250 } 6251 6252 static int __init rbd_init(void) 6253 { 6254 int rc; 6255 6256 if (!libceph_compatible(NULL)) { 6257 rbd_warn(NULL, "libceph incompatibility (quitting)"); 6258 return -EINVAL; 6259 } 6260 6261 rc = rbd_slab_init(); 6262 if (rc) 6263 return rc; 6264 6265 /* 6266 * The number of active work items is limited by the number of 6267 * rbd devices * queue depth, so leave @max_active at default. 6268 */ 6269 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); 6270 if (!rbd_wq) { 6271 rc = -ENOMEM; 6272 goto err_out_slab; 6273 } 6274 6275 if (single_major) { 6276 rbd_major = register_blkdev(0, RBD_DRV_NAME); 6277 if (rbd_major < 0) { 6278 rc = rbd_major; 6279 goto err_out_wq; 6280 } 6281 } 6282 6283 rc = rbd_sysfs_init(); 6284 if (rc) 6285 goto err_out_blkdev; 6286 6287 if (single_major) 6288 pr_info("loaded (major %d)\n", rbd_major); 6289 else 6290 pr_info("loaded\n"); 6291 6292 return 0; 6293 6294 err_out_blkdev: 6295 if (single_major) 6296 unregister_blkdev(rbd_major, RBD_DRV_NAME); 6297 err_out_wq: 6298 destroy_workqueue(rbd_wq); 6299 err_out_slab: 6300 rbd_slab_exit(); 6301 return rc; 6302 } 6303 6304 static void __exit rbd_exit(void) 6305 { 6306 ida_destroy(&rbd_dev_id_ida); 6307 rbd_sysfs_cleanup(); 6308 if (single_major) 6309 unregister_blkdev(rbd_major, RBD_DRV_NAME); 6310 destroy_workqueue(rbd_wq); 6311 rbd_slab_exit(); 6312 } 6313 6314 module_init(rbd_init); 6315 module_exit(rbd_exit); 6316 6317 MODULE_AUTHOR("Alex Elder <elder@inktank.com>"); 6318 MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); 6319 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); 6320 /* following authorship retained from original osdblk.c */ 6321 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); 6322 6323 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver"); 6324 MODULE_LICENSE("GPL"); 6325