1 2 /* 3 rbd.c -- Export ceph rados objects as a Linux block device 4 5 6 based on drivers/block/osdblk.c: 7 8 Copyright 2009 Red Hat, Inc. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; see the file COPYING. If not, write to 21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 23 24 25 For usage instructions, please refer to: 26 27 Documentation/ABI/testing/sysfs-bus-rbd 28 29 */ 30 31 #include <linux/ceph/libceph.h> 32 #include <linux/ceph/osd_client.h> 33 #include <linux/ceph/mon_client.h> 34 #include <linux/ceph/cls_lock_client.h> 35 #include <linux/ceph/striper.h> 36 #include <linux/ceph/decode.h> 37 #include <linux/fs_parser.h> 38 #include <linux/bsearch.h> 39 40 #include <linux/kernel.h> 41 #include <linux/device.h> 42 #include <linux/module.h> 43 #include <linux/blk-mq.h> 44 #include <linux/fs.h> 45 #include <linux/blkdev.h> 46 #include <linux/slab.h> 47 #include <linux/idr.h> 48 #include <linux/workqueue.h> 49 50 #include "rbd_types.h" 51 52 #define RBD_DEBUG /* Activate rbd_assert() calls */ 53 54 /* 55 * Increment the given counter and return its updated value. 56 * If the counter is already 0 it will not be incremented. 57 * If the counter is already at its maximum value returns 58 * -EINVAL without updating it. 59 */ 60 static int atomic_inc_return_safe(atomic_t *v) 61 { 62 unsigned int counter; 63 64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0); 65 if (counter <= (unsigned int)INT_MAX) 66 return (int)counter; 67 68 atomic_dec(v); 69 70 return -EINVAL; 71 } 72 73 /* Decrement the counter. Return the resulting value, or -EINVAL */ 74 static int atomic_dec_return_safe(atomic_t *v) 75 { 76 int counter; 77 78 counter = atomic_dec_return(v); 79 if (counter >= 0) 80 return counter; 81 82 atomic_inc(v); 83 84 return -EINVAL; 85 } 86 87 #define RBD_DRV_NAME "rbd" 88 89 #define RBD_MINORS_PER_MAJOR 256 90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 91 92 #define RBD_MAX_PARENT_CHAIN_LEN 16 93 94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_" 95 #define RBD_MAX_SNAP_NAME_LEN \ 96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 97 98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ 99 100 #define RBD_SNAP_HEAD_NAME "-" 101 102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ 103 104 /* This allows a single page to hold an image name sent by OSD */ 105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) 106 #define RBD_IMAGE_ID_LEN_MAX 64 107 108 #define RBD_OBJ_PREFIX_LEN_MAX 64 109 110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */ 111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000) 112 113 /* Feature bits */ 114 115 #define RBD_FEATURE_LAYERING (1ULL<<0) 116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) 117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) 118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3) 119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4) 120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5) 121 #define RBD_FEATURE_DATA_POOL (1ULL<<7) 122 #define RBD_FEATURE_OPERATIONS (1ULL<<8) 123 124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ 125 RBD_FEATURE_STRIPINGV2 | \ 126 RBD_FEATURE_EXCLUSIVE_LOCK | \ 127 RBD_FEATURE_OBJECT_MAP | \ 128 RBD_FEATURE_FAST_DIFF | \ 129 RBD_FEATURE_DEEP_FLATTEN | \ 130 RBD_FEATURE_DATA_POOL | \ 131 RBD_FEATURE_OPERATIONS) 132 133 /* Features supported by this (client software) implementation. */ 134 135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) 136 137 /* 138 * An RBD device name will be "rbd#", where the "rbd" comes from 139 * RBD_DRV_NAME above, and # is a unique integer identifier. 140 */ 141 #define DEV_NAME_LEN 32 142 143 /* 144 * block device image metadata (in-memory version) 145 */ 146 struct rbd_image_header { 147 /* These six fields never change for a given rbd image */ 148 char *object_prefix; 149 __u8 obj_order; 150 u64 stripe_unit; 151 u64 stripe_count; 152 s64 data_pool_id; 153 u64 features; /* Might be changeable someday? */ 154 155 /* The remaining fields need to be updated occasionally */ 156 u64 image_size; 157 struct ceph_snap_context *snapc; 158 char *snap_names; /* format 1 only */ 159 u64 *snap_sizes; /* format 1 only */ 160 }; 161 162 /* 163 * An rbd image specification. 164 * 165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely 166 * identify an image. Each rbd_dev structure includes a pointer to 167 * an rbd_spec structure that encapsulates this identity. 168 * 169 * Each of the id's in an rbd_spec has an associated name. For a 170 * user-mapped image, the names are supplied and the id's associated 171 * with them are looked up. For a layered image, a parent image is 172 * defined by the tuple, and the names are looked up. 173 * 174 * An rbd_dev structure contains a parent_spec pointer which is 175 * non-null if the image it represents is a child in a layered 176 * image. This pointer will refer to the rbd_spec structure used 177 * by the parent rbd_dev for its own identity (i.e., the structure 178 * is shared between the parent and child). 179 * 180 * Since these structures are populated once, during the discovery 181 * phase of image construction, they are effectively immutable so 182 * we make no effort to synchronize access to them. 183 * 184 * Note that code herein does not assume the image name is known (it 185 * could be a null pointer). 186 */ 187 struct rbd_spec { 188 u64 pool_id; 189 const char *pool_name; 190 const char *pool_ns; /* NULL if default, never "" */ 191 192 const char *image_id; 193 const char *image_name; 194 195 u64 snap_id; 196 const char *snap_name; 197 198 struct kref kref; 199 }; 200 201 /* 202 * an instance of the client. multiple devices may share an rbd client. 203 */ 204 struct rbd_client { 205 struct ceph_client *client; 206 struct kref kref; 207 struct list_head node; 208 }; 209 210 struct pending_result { 211 int result; /* first nonzero result */ 212 int num_pending; 213 }; 214 215 struct rbd_img_request; 216 217 enum obj_request_type { 218 OBJ_REQUEST_NODATA = 1, 219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */ 220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */ 221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */ 222 }; 223 224 enum obj_operation_type { 225 OBJ_OP_READ = 1, 226 OBJ_OP_WRITE, 227 OBJ_OP_DISCARD, 228 OBJ_OP_ZEROOUT, 229 }; 230 231 #define RBD_OBJ_FLAG_DELETION (1U << 0) 232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1) 233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2) 234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3) 235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4) 236 237 enum rbd_obj_read_state { 238 RBD_OBJ_READ_START = 1, 239 RBD_OBJ_READ_OBJECT, 240 RBD_OBJ_READ_PARENT, 241 }; 242 243 /* 244 * Writes go through the following state machine to deal with 245 * layering: 246 * 247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . . 248 * . | . 249 * . v . 250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . . 251 * . | . . 252 * . v v (deep-copyup . 253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) . 254 * flattened) v | . . 255 * . v . . 256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup . 257 * | not needed) v 258 * v . 259 * done . . . . . . . . . . . . . . . . . . 260 * ^ 261 * | 262 * RBD_OBJ_WRITE_FLAT 263 * 264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether 265 * assert_exists guard is needed or not (in some cases it's not needed 266 * even if there is a parent). 267 */ 268 enum rbd_obj_write_state { 269 RBD_OBJ_WRITE_START = 1, 270 RBD_OBJ_WRITE_PRE_OBJECT_MAP, 271 RBD_OBJ_WRITE_OBJECT, 272 __RBD_OBJ_WRITE_COPYUP, 273 RBD_OBJ_WRITE_COPYUP, 274 RBD_OBJ_WRITE_POST_OBJECT_MAP, 275 }; 276 277 enum rbd_obj_copyup_state { 278 RBD_OBJ_COPYUP_START = 1, 279 RBD_OBJ_COPYUP_READ_PARENT, 280 __RBD_OBJ_COPYUP_OBJECT_MAPS, 281 RBD_OBJ_COPYUP_OBJECT_MAPS, 282 __RBD_OBJ_COPYUP_WRITE_OBJECT, 283 RBD_OBJ_COPYUP_WRITE_OBJECT, 284 }; 285 286 struct rbd_obj_request { 287 struct ceph_object_extent ex; 288 unsigned int flags; /* RBD_OBJ_FLAG_* */ 289 union { 290 enum rbd_obj_read_state read_state; /* for reads */ 291 enum rbd_obj_write_state write_state; /* for writes */ 292 }; 293 294 struct rbd_img_request *img_request; 295 struct ceph_file_extent *img_extents; 296 u32 num_img_extents; 297 298 union { 299 struct ceph_bio_iter bio_pos; 300 struct { 301 struct ceph_bvec_iter bvec_pos; 302 u32 bvec_count; 303 u32 bvec_idx; 304 }; 305 }; 306 307 enum rbd_obj_copyup_state copyup_state; 308 struct bio_vec *copyup_bvecs; 309 u32 copyup_bvec_count; 310 311 struct list_head osd_reqs; /* w/ r_private_item */ 312 313 struct mutex state_mutex; 314 struct pending_result pending; 315 struct kref kref; 316 }; 317 318 enum img_req_flags { 319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ 320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ 321 }; 322 323 enum rbd_img_state { 324 RBD_IMG_START = 1, 325 RBD_IMG_EXCLUSIVE_LOCK, 326 __RBD_IMG_OBJECT_REQUESTS, 327 RBD_IMG_OBJECT_REQUESTS, 328 }; 329 330 struct rbd_img_request { 331 struct rbd_device *rbd_dev; 332 enum obj_operation_type op_type; 333 enum obj_request_type data_type; 334 unsigned long flags; 335 enum rbd_img_state state; 336 union { 337 u64 snap_id; /* for reads */ 338 struct ceph_snap_context *snapc; /* for writes */ 339 }; 340 struct rbd_obj_request *obj_request; /* obj req initiator */ 341 342 struct list_head lock_item; 343 struct list_head object_extents; /* obj_req.ex structs */ 344 345 struct mutex state_mutex; 346 struct pending_result pending; 347 struct work_struct work; 348 int work_result; 349 }; 350 351 #define for_each_obj_request(ireq, oreq) \ 352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item) 353 #define for_each_obj_request_safe(ireq, oreq, n) \ 354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item) 355 356 enum rbd_watch_state { 357 RBD_WATCH_STATE_UNREGISTERED, 358 RBD_WATCH_STATE_REGISTERED, 359 RBD_WATCH_STATE_ERROR, 360 }; 361 362 enum rbd_lock_state { 363 RBD_LOCK_STATE_UNLOCKED, 364 RBD_LOCK_STATE_LOCKED, 365 RBD_LOCK_STATE_RELEASING, 366 }; 367 368 /* WatchNotify::ClientId */ 369 struct rbd_client_id { 370 u64 gid; 371 u64 handle; 372 }; 373 374 struct rbd_mapping { 375 u64 size; 376 }; 377 378 /* 379 * a single device 380 */ 381 struct rbd_device { 382 int dev_id; /* blkdev unique id */ 383 384 int major; /* blkdev assigned major */ 385 int minor; 386 struct gendisk *disk; /* blkdev's gendisk and rq */ 387 388 u32 image_format; /* Either 1 or 2 */ 389 struct rbd_client *rbd_client; 390 391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 392 393 spinlock_t lock; /* queue, flags, open_count */ 394 395 struct rbd_image_header header; 396 unsigned long flags; /* possibly lock protected */ 397 struct rbd_spec *spec; 398 struct rbd_options *opts; 399 char *config_info; /* add{,_single_major} string */ 400 401 struct ceph_object_id header_oid; 402 struct ceph_object_locator header_oloc; 403 404 struct ceph_file_layout layout; /* used for all rbd requests */ 405 406 struct mutex watch_mutex; 407 enum rbd_watch_state watch_state; 408 struct ceph_osd_linger_request *watch_handle; 409 u64 watch_cookie; 410 struct delayed_work watch_dwork; 411 412 struct rw_semaphore lock_rwsem; 413 enum rbd_lock_state lock_state; 414 char lock_cookie[32]; 415 struct rbd_client_id owner_cid; 416 struct work_struct acquired_lock_work; 417 struct work_struct released_lock_work; 418 struct delayed_work lock_dwork; 419 struct work_struct unlock_work; 420 spinlock_t lock_lists_lock; 421 struct list_head acquiring_list; 422 struct list_head running_list; 423 struct completion acquire_wait; 424 int acquire_err; 425 struct completion releasing_wait; 426 427 spinlock_t object_map_lock; 428 u8 *object_map; 429 u64 object_map_size; /* in objects */ 430 u64 object_map_flags; 431 432 struct workqueue_struct *task_wq; 433 434 struct rbd_spec *parent_spec; 435 u64 parent_overlap; 436 atomic_t parent_ref; 437 struct rbd_device *parent; 438 439 /* Block layer tags. */ 440 struct blk_mq_tag_set tag_set; 441 442 /* protects updating the header */ 443 struct rw_semaphore header_rwsem; 444 445 struct rbd_mapping mapping; 446 447 struct list_head node; 448 449 /* sysfs related */ 450 struct device dev; 451 unsigned long open_count; /* protected by lock */ 452 }; 453 454 /* 455 * Flag bits for rbd_dev->flags: 456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected 457 * by rbd_dev->lock 458 */ 459 enum rbd_dev_flags { 460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */ 461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ 462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */ 463 }; 464 465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ 466 467 static LIST_HEAD(rbd_dev_list); /* devices */ 468 static DEFINE_SPINLOCK(rbd_dev_list_lock); 469 470 static LIST_HEAD(rbd_client_list); /* clients */ 471 static DEFINE_SPINLOCK(rbd_client_list_lock); 472 473 /* Slab caches for frequently-allocated structures */ 474 475 static struct kmem_cache *rbd_img_request_cache; 476 static struct kmem_cache *rbd_obj_request_cache; 477 478 static int rbd_major; 479 static DEFINE_IDA(rbd_dev_id_ida); 480 481 static struct workqueue_struct *rbd_wq; 482 483 static struct ceph_snap_context rbd_empty_snapc = { 484 .nref = REFCOUNT_INIT(1), 485 }; 486 487 /* 488 * single-major requires >= 0.75 version of userspace rbd utility. 489 */ 490 static bool single_major = true; 491 module_param(single_major, bool, 0444); 492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); 493 494 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count); 495 static ssize_t remove_store(const struct bus_type *bus, const char *buf, 496 size_t count); 497 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf, 498 size_t count); 499 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf, 500 size_t count); 501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 502 503 static int rbd_dev_id_to_minor(int dev_id) 504 { 505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT; 506 } 507 508 static int minor_to_rbd_dev_id(int minor) 509 { 510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT; 511 } 512 513 static bool rbd_is_ro(struct rbd_device *rbd_dev) 514 { 515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags); 516 } 517 518 static bool rbd_is_snap(struct rbd_device *rbd_dev) 519 { 520 return rbd_dev->spec->snap_id != CEPH_NOSNAP; 521 } 522 523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) 524 { 525 lockdep_assert_held(&rbd_dev->lock_rwsem); 526 527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || 528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; 529 } 530 531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) 532 { 533 bool is_lock_owner; 534 535 down_read(&rbd_dev->lock_rwsem); 536 is_lock_owner = __rbd_is_lock_owner(rbd_dev); 537 up_read(&rbd_dev->lock_rwsem); 538 return is_lock_owner; 539 } 540 541 static ssize_t supported_features_show(const struct bus_type *bus, char *buf) 542 { 543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); 544 } 545 546 static BUS_ATTR_WO(add); 547 static BUS_ATTR_WO(remove); 548 static BUS_ATTR_WO(add_single_major); 549 static BUS_ATTR_WO(remove_single_major); 550 static BUS_ATTR_RO(supported_features); 551 552 static struct attribute *rbd_bus_attrs[] = { 553 &bus_attr_add.attr, 554 &bus_attr_remove.attr, 555 &bus_attr_add_single_major.attr, 556 &bus_attr_remove_single_major.attr, 557 &bus_attr_supported_features.attr, 558 NULL, 559 }; 560 561 static umode_t rbd_bus_is_visible(struct kobject *kobj, 562 struct attribute *attr, int index) 563 { 564 if (!single_major && 565 (attr == &bus_attr_add_single_major.attr || 566 attr == &bus_attr_remove_single_major.attr)) 567 return 0; 568 569 return attr->mode; 570 } 571 572 static const struct attribute_group rbd_bus_group = { 573 .attrs = rbd_bus_attrs, 574 .is_visible = rbd_bus_is_visible, 575 }; 576 __ATTRIBUTE_GROUPS(rbd_bus); 577 578 static struct bus_type rbd_bus_type = { 579 .name = "rbd", 580 .bus_groups = rbd_bus_groups, 581 }; 582 583 static void rbd_root_dev_release(struct device *dev) 584 { 585 } 586 587 static struct device rbd_root_dev = { 588 .init_name = "rbd", 589 .release = rbd_root_dev_release, 590 }; 591 592 static __printf(2, 3) 593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) 594 { 595 struct va_format vaf; 596 va_list args; 597 598 va_start(args, fmt); 599 vaf.fmt = fmt; 600 vaf.va = &args; 601 602 if (!rbd_dev) 603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf); 604 else if (rbd_dev->disk) 605 printk(KERN_WARNING "%s: %s: %pV\n", 606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf); 607 else if (rbd_dev->spec && rbd_dev->spec->image_name) 608 printk(KERN_WARNING "%s: image %s: %pV\n", 609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf); 610 else if (rbd_dev->spec && rbd_dev->spec->image_id) 611 printk(KERN_WARNING "%s: id %s: %pV\n", 612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf); 613 else /* punt */ 614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n", 615 RBD_DRV_NAME, rbd_dev, &vaf); 616 va_end(args); 617 } 618 619 #ifdef RBD_DEBUG 620 #define rbd_assert(expr) \ 621 if (unlikely(!(expr))) { \ 622 printk(KERN_ERR "\nAssertion failure in %s() " \ 623 "at line %d:\n\n" \ 624 "\trbd_assert(%s);\n\n", \ 625 __func__, __LINE__, #expr); \ 626 BUG(); \ 627 } 628 #else /* !RBD_DEBUG */ 629 # define rbd_assert(expr) ((void) 0) 630 #endif /* !RBD_DEBUG */ 631 632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 633 634 static int rbd_dev_refresh(struct rbd_device *rbd_dev); 635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); 636 static int rbd_dev_header_info(struct rbd_device *rbd_dev); 637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); 638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 639 u64 snap_id); 640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 641 u8 *order, u64 *snap_size); 642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev); 643 644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result); 645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result); 646 647 /* 648 * Return true if nothing else is pending. 649 */ 650 static bool pending_result_dec(struct pending_result *pending, int *result) 651 { 652 rbd_assert(pending->num_pending > 0); 653 654 if (*result && !pending->result) 655 pending->result = *result; 656 if (--pending->num_pending) 657 return false; 658 659 *result = pending->result; 660 return true; 661 } 662 663 static int rbd_open(struct block_device *bdev, fmode_t mode) 664 { 665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 666 bool removing = false; 667 668 spin_lock_irq(&rbd_dev->lock); 669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) 670 removing = true; 671 else 672 rbd_dev->open_count++; 673 spin_unlock_irq(&rbd_dev->lock); 674 if (removing) 675 return -ENOENT; 676 677 (void) get_device(&rbd_dev->dev); 678 679 return 0; 680 } 681 682 static void rbd_release(struct gendisk *disk, fmode_t mode) 683 { 684 struct rbd_device *rbd_dev = disk->private_data; 685 unsigned long open_count_before; 686 687 spin_lock_irq(&rbd_dev->lock); 688 open_count_before = rbd_dev->open_count--; 689 spin_unlock_irq(&rbd_dev->lock); 690 rbd_assert(open_count_before > 0); 691 692 put_device(&rbd_dev->dev); 693 } 694 695 static const struct block_device_operations rbd_bd_ops = { 696 .owner = THIS_MODULE, 697 .open = rbd_open, 698 .release = rbd_release, 699 }; 700 701 /* 702 * Initialize an rbd client instance. Success or not, this function 703 * consumes ceph_opts. Caller holds client_mutex. 704 */ 705 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 706 { 707 struct rbd_client *rbdc; 708 int ret = -ENOMEM; 709 710 dout("%s:\n", __func__); 711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); 712 if (!rbdc) 713 goto out_opt; 714 715 kref_init(&rbdc->kref); 716 INIT_LIST_HEAD(&rbdc->node); 717 718 rbdc->client = ceph_create_client(ceph_opts, rbdc); 719 if (IS_ERR(rbdc->client)) 720 goto out_rbdc; 721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ 722 723 ret = ceph_open_session(rbdc->client); 724 if (ret < 0) 725 goto out_client; 726 727 spin_lock(&rbd_client_list_lock); 728 list_add_tail(&rbdc->node, &rbd_client_list); 729 spin_unlock(&rbd_client_list_lock); 730 731 dout("%s: rbdc %p\n", __func__, rbdc); 732 733 return rbdc; 734 out_client: 735 ceph_destroy_client(rbdc->client); 736 out_rbdc: 737 kfree(rbdc); 738 out_opt: 739 if (ceph_opts) 740 ceph_destroy_options(ceph_opts); 741 dout("%s: error %d\n", __func__, ret); 742 743 return ERR_PTR(ret); 744 } 745 746 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) 747 { 748 kref_get(&rbdc->kref); 749 750 return rbdc; 751 } 752 753 /* 754 * Find a ceph client with specific addr and configuration. If 755 * found, bump its reference count. 756 */ 757 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) 758 { 759 struct rbd_client *rbdc = NULL, *iter; 760 761 if (ceph_opts->flags & CEPH_OPT_NOSHARE) 762 return NULL; 763 764 spin_lock(&rbd_client_list_lock); 765 list_for_each_entry(iter, &rbd_client_list, node) { 766 if (!ceph_compare_options(ceph_opts, iter->client)) { 767 __rbd_get_client(iter); 768 769 rbdc = iter; 770 break; 771 } 772 } 773 spin_unlock(&rbd_client_list_lock); 774 775 return rbdc; 776 } 777 778 /* 779 * (Per device) rbd map options 780 */ 781 enum { 782 Opt_queue_depth, 783 Opt_alloc_size, 784 Opt_lock_timeout, 785 /* int args above */ 786 Opt_pool_ns, 787 Opt_compression_hint, 788 /* string args above */ 789 Opt_read_only, 790 Opt_read_write, 791 Opt_lock_on_read, 792 Opt_exclusive, 793 Opt_notrim, 794 }; 795 796 enum { 797 Opt_compression_hint_none, 798 Opt_compression_hint_compressible, 799 Opt_compression_hint_incompressible, 800 }; 801 802 static const struct constant_table rbd_param_compression_hint[] = { 803 {"none", Opt_compression_hint_none}, 804 {"compressible", Opt_compression_hint_compressible}, 805 {"incompressible", Opt_compression_hint_incompressible}, 806 {} 807 }; 808 809 static const struct fs_parameter_spec rbd_parameters[] = { 810 fsparam_u32 ("alloc_size", Opt_alloc_size), 811 fsparam_enum ("compression_hint", Opt_compression_hint, 812 rbd_param_compression_hint), 813 fsparam_flag ("exclusive", Opt_exclusive), 814 fsparam_flag ("lock_on_read", Opt_lock_on_read), 815 fsparam_u32 ("lock_timeout", Opt_lock_timeout), 816 fsparam_flag ("notrim", Opt_notrim), 817 fsparam_string ("_pool_ns", Opt_pool_ns), 818 fsparam_u32 ("queue_depth", Opt_queue_depth), 819 fsparam_flag ("read_only", Opt_read_only), 820 fsparam_flag ("read_write", Opt_read_write), 821 fsparam_flag ("ro", Opt_read_only), 822 fsparam_flag ("rw", Opt_read_write), 823 {} 824 }; 825 826 struct rbd_options { 827 int queue_depth; 828 int alloc_size; 829 unsigned long lock_timeout; 830 bool read_only; 831 bool lock_on_read; 832 bool exclusive; 833 bool trim; 834 835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ 836 }; 837 838 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ 839 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) 840 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ 841 #define RBD_READ_ONLY_DEFAULT false 842 #define RBD_LOCK_ON_READ_DEFAULT false 843 #define RBD_EXCLUSIVE_DEFAULT false 844 #define RBD_TRIM_DEFAULT true 845 846 struct rbd_parse_opts_ctx { 847 struct rbd_spec *spec; 848 struct ceph_options *copts; 849 struct rbd_options *opts; 850 }; 851 852 static char* obj_op_name(enum obj_operation_type op_type) 853 { 854 switch (op_type) { 855 case OBJ_OP_READ: 856 return "read"; 857 case OBJ_OP_WRITE: 858 return "write"; 859 case OBJ_OP_DISCARD: 860 return "discard"; 861 case OBJ_OP_ZEROOUT: 862 return "zeroout"; 863 default: 864 return "???"; 865 } 866 } 867 868 /* 869 * Destroy ceph client 870 * 871 * Caller must hold rbd_client_list_lock. 872 */ 873 static void rbd_client_release(struct kref *kref) 874 { 875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 876 877 dout("%s: rbdc %p\n", __func__, rbdc); 878 spin_lock(&rbd_client_list_lock); 879 list_del(&rbdc->node); 880 spin_unlock(&rbd_client_list_lock); 881 882 ceph_destroy_client(rbdc->client); 883 kfree(rbdc); 884 } 885 886 /* 887 * Drop reference to ceph client node. If it's not referenced anymore, release 888 * it. 889 */ 890 static void rbd_put_client(struct rbd_client *rbdc) 891 { 892 if (rbdc) 893 kref_put(&rbdc->kref, rbd_client_release); 894 } 895 896 /* 897 * Get a ceph client with specific addr and configuration, if one does 898 * not exist create it. Either way, ceph_opts is consumed by this 899 * function. 900 */ 901 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 902 { 903 struct rbd_client *rbdc; 904 int ret; 905 906 mutex_lock(&client_mutex); 907 rbdc = rbd_client_find(ceph_opts); 908 if (rbdc) { 909 ceph_destroy_options(ceph_opts); 910 911 /* 912 * Using an existing client. Make sure ->pg_pools is up to 913 * date before we look up the pool id in do_rbd_add(). 914 */ 915 ret = ceph_wait_for_latest_osdmap(rbdc->client, 916 rbdc->client->options->mount_timeout); 917 if (ret) { 918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret); 919 rbd_put_client(rbdc); 920 rbdc = ERR_PTR(ret); 921 } 922 } else { 923 rbdc = rbd_client_create(ceph_opts); 924 } 925 mutex_unlock(&client_mutex); 926 927 return rbdc; 928 } 929 930 static bool rbd_image_format_valid(u32 image_format) 931 { 932 return image_format == 1 || image_format == 2; 933 } 934 935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) 936 { 937 size_t size; 938 u32 snap_count; 939 940 /* The header has to start with the magic rbd header text */ 941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) 942 return false; 943 944 /* The bio layer requires at least sector-sized I/O */ 945 946 if (ondisk->options.order < SECTOR_SHIFT) 947 return false; 948 949 /* If we use u64 in a few spots we may be able to loosen this */ 950 951 if (ondisk->options.order > 8 * sizeof (int) - 1) 952 return false; 953 954 /* 955 * The size of a snapshot header has to fit in a size_t, and 956 * that limits the number of snapshots. 957 */ 958 snap_count = le32_to_cpu(ondisk->snap_count); 959 size = SIZE_MAX - sizeof (struct ceph_snap_context); 960 if (snap_count > size / sizeof (__le64)) 961 return false; 962 963 /* 964 * Not only that, but the size of the entire the snapshot 965 * header must also be representable in a size_t. 966 */ 967 size -= snap_count * sizeof (__le64); 968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) 969 return false; 970 971 return true; 972 } 973 974 /* 975 * returns the size of an object in the image 976 */ 977 static u32 rbd_obj_bytes(struct rbd_image_header *header) 978 { 979 return 1U << header->obj_order; 980 } 981 982 static void rbd_init_layout(struct rbd_device *rbd_dev) 983 { 984 if (rbd_dev->header.stripe_unit == 0 || 985 rbd_dev->header.stripe_count == 0) { 986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); 987 rbd_dev->header.stripe_count = 1; 988 } 989 990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; 991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; 992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); 993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? 994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; 995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); 996 } 997 998 /* 999 * Fill an rbd image header with information from the given format 1 1000 * on-disk header. 1001 */ 1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev, 1003 struct rbd_image_header_ondisk *ondisk) 1004 { 1005 struct rbd_image_header *header = &rbd_dev->header; 1006 bool first_time = header->object_prefix == NULL; 1007 struct ceph_snap_context *snapc; 1008 char *object_prefix = NULL; 1009 char *snap_names = NULL; 1010 u64 *snap_sizes = NULL; 1011 u32 snap_count; 1012 int ret = -ENOMEM; 1013 u32 i; 1014 1015 /* Allocate this now to avoid having to handle failure below */ 1016 1017 if (first_time) { 1018 object_prefix = kstrndup(ondisk->object_prefix, 1019 sizeof(ondisk->object_prefix), 1020 GFP_KERNEL); 1021 if (!object_prefix) 1022 return -ENOMEM; 1023 } 1024 1025 /* Allocate the snapshot context and fill it in */ 1026 1027 snap_count = le32_to_cpu(ondisk->snap_count); 1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 1029 if (!snapc) 1030 goto out_err; 1031 snapc->seq = le64_to_cpu(ondisk->snap_seq); 1032 if (snap_count) { 1033 struct rbd_image_snap_ondisk *snaps; 1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 1035 1036 /* We'll keep a copy of the snapshot names... */ 1037 1038 if (snap_names_len > (u64)SIZE_MAX) 1039 goto out_2big; 1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL); 1041 if (!snap_names) 1042 goto out_err; 1043 1044 /* ...as well as the array of their sizes. */ 1045 snap_sizes = kmalloc_array(snap_count, 1046 sizeof(*header->snap_sizes), 1047 GFP_KERNEL); 1048 if (!snap_sizes) 1049 goto out_err; 1050 1051 /* 1052 * Copy the names, and fill in each snapshot's id 1053 * and size. 1054 * 1055 * Note that rbd_dev_v1_header_info() guarantees the 1056 * ondisk buffer we're working with has 1057 * snap_names_len bytes beyond the end of the 1058 * snapshot id array, this memcpy() is safe. 1059 */ 1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); 1061 snaps = ondisk->snaps; 1062 for (i = 0; i < snap_count; i++) { 1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id); 1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size); 1065 } 1066 } 1067 1068 /* We won't fail any more, fill in the header */ 1069 1070 if (first_time) { 1071 header->object_prefix = object_prefix; 1072 header->obj_order = ondisk->options.order; 1073 rbd_init_layout(rbd_dev); 1074 } else { 1075 ceph_put_snap_context(header->snapc); 1076 kfree(header->snap_names); 1077 kfree(header->snap_sizes); 1078 } 1079 1080 /* The remaining fields always get updated (when we refresh) */ 1081 1082 header->image_size = le64_to_cpu(ondisk->image_size); 1083 header->snapc = snapc; 1084 header->snap_names = snap_names; 1085 header->snap_sizes = snap_sizes; 1086 1087 return 0; 1088 out_2big: 1089 ret = -EIO; 1090 out_err: 1091 kfree(snap_sizes); 1092 kfree(snap_names); 1093 ceph_put_snap_context(snapc); 1094 kfree(object_prefix); 1095 1096 return ret; 1097 } 1098 1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 1100 { 1101 const char *snap_name; 1102 1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps); 1104 1105 /* Skip over names until we find the one we are looking for */ 1106 1107 snap_name = rbd_dev->header.snap_names; 1108 while (which--) 1109 snap_name += strlen(snap_name) + 1; 1110 1111 return kstrdup(snap_name, GFP_KERNEL); 1112 } 1113 1114 /* 1115 * Snapshot id comparison function for use with qsort()/bsearch(). 1116 * Note that result is for snapshots in *descending* order. 1117 */ 1118 static int snapid_compare_reverse(const void *s1, const void *s2) 1119 { 1120 u64 snap_id1 = *(u64 *)s1; 1121 u64 snap_id2 = *(u64 *)s2; 1122 1123 if (snap_id1 < snap_id2) 1124 return 1; 1125 return snap_id1 == snap_id2 ? 0 : -1; 1126 } 1127 1128 /* 1129 * Search a snapshot context to see if the given snapshot id is 1130 * present. 1131 * 1132 * Returns the position of the snapshot id in the array if it's found, 1133 * or BAD_SNAP_INDEX otherwise. 1134 * 1135 * Note: The snapshot array is in kept sorted (by the osd) in 1136 * reverse order, highest snapshot id first. 1137 */ 1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) 1139 { 1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 1141 u64 *found; 1142 1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, 1144 sizeof (snap_id), snapid_compare_reverse); 1145 1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; 1147 } 1148 1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, 1150 u64 snap_id) 1151 { 1152 u32 which; 1153 const char *snap_name; 1154 1155 which = rbd_dev_snap_index(rbd_dev, snap_id); 1156 if (which == BAD_SNAP_INDEX) 1157 return ERR_PTR(-ENOENT); 1158 1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); 1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM); 1161 } 1162 1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 1164 { 1165 if (snap_id == CEPH_NOSNAP) 1166 return RBD_SNAP_HEAD_NAME; 1167 1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1169 if (rbd_dev->image_format == 1) 1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id); 1171 1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id); 1173 } 1174 1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 1176 u64 *snap_size) 1177 { 1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1179 if (snap_id == CEPH_NOSNAP) { 1180 *snap_size = rbd_dev->header.image_size; 1181 } else if (rbd_dev->image_format == 1) { 1182 u32 which; 1183 1184 which = rbd_dev_snap_index(rbd_dev, snap_id); 1185 if (which == BAD_SNAP_INDEX) 1186 return -ENOENT; 1187 1188 *snap_size = rbd_dev->header.snap_sizes[which]; 1189 } else { 1190 u64 size = 0; 1191 int ret; 1192 1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); 1194 if (ret) 1195 return ret; 1196 1197 *snap_size = size; 1198 } 1199 return 0; 1200 } 1201 1202 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1203 { 1204 u64 snap_id = rbd_dev->spec->snap_id; 1205 u64 size = 0; 1206 int ret; 1207 1208 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1209 if (ret) 1210 return ret; 1211 1212 rbd_dev->mapping.size = size; 1213 return 0; 1214 } 1215 1216 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) 1217 { 1218 rbd_dev->mapping.size = 0; 1219 } 1220 1221 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes) 1222 { 1223 struct ceph_bio_iter it = *bio_pos; 1224 1225 ceph_bio_iter_advance(&it, off); 1226 ceph_bio_iter_advance_step(&it, bytes, ({ 1227 memzero_bvec(&bv); 1228 })); 1229 } 1230 1231 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes) 1232 { 1233 struct ceph_bvec_iter it = *bvec_pos; 1234 1235 ceph_bvec_iter_advance(&it, off); 1236 ceph_bvec_iter_advance_step(&it, bytes, ({ 1237 memzero_bvec(&bv); 1238 })); 1239 } 1240 1241 /* 1242 * Zero a range in @obj_req data buffer defined by a bio (list) or 1243 * (private) bio_vec array. 1244 * 1245 * @off is relative to the start of the data buffer. 1246 */ 1247 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off, 1248 u32 bytes) 1249 { 1250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes); 1251 1252 switch (obj_req->img_request->data_type) { 1253 case OBJ_REQUEST_BIO: 1254 zero_bios(&obj_req->bio_pos, off, bytes); 1255 break; 1256 case OBJ_REQUEST_BVECS: 1257 case OBJ_REQUEST_OWN_BVECS: 1258 zero_bvecs(&obj_req->bvec_pos, off, bytes); 1259 break; 1260 default: 1261 BUG(); 1262 } 1263 } 1264 1265 static void rbd_obj_request_destroy(struct kref *kref); 1266 static void rbd_obj_request_put(struct rbd_obj_request *obj_request) 1267 { 1268 rbd_assert(obj_request != NULL); 1269 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1270 kref_read(&obj_request->kref)); 1271 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1272 } 1273 1274 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1275 struct rbd_obj_request *obj_request) 1276 { 1277 rbd_assert(obj_request->img_request == NULL); 1278 1279 /* Image request now owns object's original reference */ 1280 obj_request->img_request = img_request; 1281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 1282 } 1283 1284 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, 1285 struct rbd_obj_request *obj_request) 1286 { 1287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 1288 list_del(&obj_request->ex.oe_item); 1289 rbd_assert(obj_request->img_request == img_request); 1290 rbd_obj_request_put(obj_request); 1291 } 1292 1293 static void rbd_osd_submit(struct ceph_osd_request *osd_req) 1294 { 1295 struct rbd_obj_request *obj_req = osd_req->r_priv; 1296 1297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n", 1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno, 1299 obj_req->ex.oe_off, obj_req->ex.oe_len); 1300 ceph_osdc_start_request(osd_req->r_osdc, osd_req); 1301 } 1302 1303 /* 1304 * The default/initial value for all image request flags is 0. Each 1305 * is conditionally set to 1 at image request initialization time 1306 * and currently never change thereafter. 1307 */ 1308 static void img_request_layered_set(struct rbd_img_request *img_request) 1309 { 1310 set_bit(IMG_REQ_LAYERED, &img_request->flags); 1311 } 1312 1313 static bool img_request_layered_test(struct rbd_img_request *img_request) 1314 { 1315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; 1316 } 1317 1318 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req) 1319 { 1320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1321 1322 return !obj_req->ex.oe_off && 1323 obj_req->ex.oe_len == rbd_dev->layout.object_size; 1324 } 1325 1326 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req) 1327 { 1328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1329 1330 return obj_req->ex.oe_off + obj_req->ex.oe_len == 1331 rbd_dev->layout.object_size; 1332 } 1333 1334 /* 1335 * Must be called after rbd_obj_calc_img_extents(). 1336 */ 1337 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) 1338 { 1339 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { 1340 dout("%s %p objno %llu discard\n", __func__, obj_req, 1341 obj_req->ex.oe_objno); 1342 return; 1343 } 1344 1345 if (!obj_req->num_img_extents) { 1346 dout("%s %p objno %llu not overlapping\n", __func__, obj_req, 1347 obj_req->ex.oe_objno); 1348 return; 1349 } 1350 1351 if (rbd_obj_is_entire(obj_req) && 1352 !obj_req->img_request->snapc->num_snaps) { 1353 dout("%s %p objno %llu entire\n", __func__, obj_req, 1354 obj_req->ex.oe_objno); 1355 return; 1356 } 1357 1358 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; 1359 } 1360 1361 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) 1362 { 1363 return ceph_file_extents_bytes(obj_req->img_extents, 1364 obj_req->num_img_extents); 1365 } 1366 1367 static bool rbd_img_is_write(struct rbd_img_request *img_req) 1368 { 1369 switch (img_req->op_type) { 1370 case OBJ_OP_READ: 1371 return false; 1372 case OBJ_OP_WRITE: 1373 case OBJ_OP_DISCARD: 1374 case OBJ_OP_ZEROOUT: 1375 return true; 1376 default: 1377 BUG(); 1378 } 1379 } 1380 1381 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) 1382 { 1383 struct rbd_obj_request *obj_req = osd_req->r_priv; 1384 int result; 1385 1386 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req, 1387 osd_req->r_result, obj_req); 1388 1389 /* 1390 * Writes aren't allowed to return a data payload. In some 1391 * guarded write cases (e.g. stat + zero on an empty object) 1392 * a stat response makes it through, but we don't care. 1393 */ 1394 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request)) 1395 result = 0; 1396 else 1397 result = osd_req->r_result; 1398 1399 rbd_obj_handle_request(obj_req, result); 1400 } 1401 1402 static void rbd_osd_format_read(struct ceph_osd_request *osd_req) 1403 { 1404 struct rbd_obj_request *obj_request = osd_req->r_priv; 1405 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 1406 struct ceph_options *opt = rbd_dev->rbd_client->client->options; 1407 1408 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica; 1409 osd_req->r_snapid = obj_request->img_request->snap_id; 1410 } 1411 1412 static void rbd_osd_format_write(struct ceph_osd_request *osd_req) 1413 { 1414 struct rbd_obj_request *obj_request = osd_req->r_priv; 1415 1416 osd_req->r_flags = CEPH_OSD_FLAG_WRITE; 1417 ktime_get_real_ts64(&osd_req->r_mtime); 1418 osd_req->r_data_offset = obj_request->ex.oe_off; 1419 } 1420 1421 static struct ceph_osd_request * 1422 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, 1423 struct ceph_snap_context *snapc, int num_ops) 1424 { 1425 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1426 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1427 struct ceph_osd_request *req; 1428 const char *name_format = rbd_dev->image_format == 1 ? 1429 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; 1430 int ret; 1431 1432 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); 1433 if (!req) 1434 return ERR_PTR(-ENOMEM); 1435 1436 list_add_tail(&req->r_private_item, &obj_req->osd_reqs); 1437 req->r_callback = rbd_osd_req_callback; 1438 req->r_priv = obj_req; 1439 1440 /* 1441 * Data objects may be stored in a separate pool, but always in 1442 * the same namespace in that pool as the header in its pool. 1443 */ 1444 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc); 1445 req->r_base_oloc.pool = rbd_dev->layout.pool_id; 1446 1447 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, 1448 rbd_dev->header.object_prefix, 1449 obj_req->ex.oe_objno); 1450 if (ret) 1451 return ERR_PTR(ret); 1452 1453 return req; 1454 } 1455 1456 static struct ceph_osd_request * 1457 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) 1458 { 1459 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, 1460 num_ops); 1461 } 1462 1463 static struct rbd_obj_request *rbd_obj_request_create(void) 1464 { 1465 struct rbd_obj_request *obj_request; 1466 1467 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); 1468 if (!obj_request) 1469 return NULL; 1470 1471 ceph_object_extent_init(&obj_request->ex); 1472 INIT_LIST_HEAD(&obj_request->osd_reqs); 1473 mutex_init(&obj_request->state_mutex); 1474 kref_init(&obj_request->kref); 1475 1476 dout("%s %p\n", __func__, obj_request); 1477 return obj_request; 1478 } 1479 1480 static void rbd_obj_request_destroy(struct kref *kref) 1481 { 1482 struct rbd_obj_request *obj_request; 1483 struct ceph_osd_request *osd_req; 1484 u32 i; 1485 1486 obj_request = container_of(kref, struct rbd_obj_request, kref); 1487 1488 dout("%s: obj %p\n", __func__, obj_request); 1489 1490 while (!list_empty(&obj_request->osd_reqs)) { 1491 osd_req = list_first_entry(&obj_request->osd_reqs, 1492 struct ceph_osd_request, r_private_item); 1493 list_del_init(&osd_req->r_private_item); 1494 ceph_osdc_put_request(osd_req); 1495 } 1496 1497 switch (obj_request->img_request->data_type) { 1498 case OBJ_REQUEST_NODATA: 1499 case OBJ_REQUEST_BIO: 1500 case OBJ_REQUEST_BVECS: 1501 break; /* Nothing to do */ 1502 case OBJ_REQUEST_OWN_BVECS: 1503 kfree(obj_request->bvec_pos.bvecs); 1504 break; 1505 default: 1506 BUG(); 1507 } 1508 1509 kfree(obj_request->img_extents); 1510 if (obj_request->copyup_bvecs) { 1511 for (i = 0; i < obj_request->copyup_bvec_count; i++) { 1512 if (obj_request->copyup_bvecs[i].bv_page) 1513 __free_page(obj_request->copyup_bvecs[i].bv_page); 1514 } 1515 kfree(obj_request->copyup_bvecs); 1516 } 1517 1518 kmem_cache_free(rbd_obj_request_cache, obj_request); 1519 } 1520 1521 /* It's OK to call this for a device with no parent */ 1522 1523 static void rbd_spec_put(struct rbd_spec *spec); 1524 static void rbd_dev_unparent(struct rbd_device *rbd_dev) 1525 { 1526 rbd_dev_remove_parent(rbd_dev); 1527 rbd_spec_put(rbd_dev->parent_spec); 1528 rbd_dev->parent_spec = NULL; 1529 rbd_dev->parent_overlap = 0; 1530 } 1531 1532 /* 1533 * Parent image reference counting is used to determine when an 1534 * image's parent fields can be safely torn down--after there are no 1535 * more in-flight requests to the parent image. When the last 1536 * reference is dropped, cleaning them up is safe. 1537 */ 1538 static void rbd_dev_parent_put(struct rbd_device *rbd_dev) 1539 { 1540 int counter; 1541 1542 if (!rbd_dev->parent_spec) 1543 return; 1544 1545 counter = atomic_dec_return_safe(&rbd_dev->parent_ref); 1546 if (counter > 0) 1547 return; 1548 1549 /* Last reference; clean up parent data structures */ 1550 1551 if (!counter) 1552 rbd_dev_unparent(rbd_dev); 1553 else 1554 rbd_warn(rbd_dev, "parent reference underflow"); 1555 } 1556 1557 /* 1558 * If an image has a non-zero parent overlap, get a reference to its 1559 * parent. 1560 * 1561 * Returns true if the rbd device has a parent with a non-zero 1562 * overlap and a reference for it was successfully taken, or 1563 * false otherwise. 1564 */ 1565 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 1566 { 1567 int counter = 0; 1568 1569 if (!rbd_dev->parent_spec) 1570 return false; 1571 1572 if (rbd_dev->parent_overlap) 1573 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 1574 1575 if (counter < 0) 1576 rbd_warn(rbd_dev, "parent reference overflow"); 1577 1578 return counter > 0; 1579 } 1580 1581 static void rbd_img_request_init(struct rbd_img_request *img_request, 1582 struct rbd_device *rbd_dev, 1583 enum obj_operation_type op_type) 1584 { 1585 memset(img_request, 0, sizeof(*img_request)); 1586 1587 img_request->rbd_dev = rbd_dev; 1588 img_request->op_type = op_type; 1589 1590 INIT_LIST_HEAD(&img_request->lock_item); 1591 INIT_LIST_HEAD(&img_request->object_extents); 1592 mutex_init(&img_request->state_mutex); 1593 } 1594 1595 static void rbd_img_capture_header(struct rbd_img_request *img_req) 1596 { 1597 struct rbd_device *rbd_dev = img_req->rbd_dev; 1598 1599 lockdep_assert_held(&rbd_dev->header_rwsem); 1600 1601 if (rbd_img_is_write(img_req)) 1602 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); 1603 else 1604 img_req->snap_id = rbd_dev->spec->snap_id; 1605 1606 if (rbd_dev_parent_get(rbd_dev)) 1607 img_request_layered_set(img_req); 1608 } 1609 1610 static void rbd_img_request_destroy(struct rbd_img_request *img_request) 1611 { 1612 struct rbd_obj_request *obj_request; 1613 struct rbd_obj_request *next_obj_request; 1614 1615 dout("%s: img %p\n", __func__, img_request); 1616 1617 WARN_ON(!list_empty(&img_request->lock_item)); 1618 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 1619 rbd_img_obj_request_del(img_request, obj_request); 1620 1621 if (img_request_layered_test(img_request)) 1622 rbd_dev_parent_put(img_request->rbd_dev); 1623 1624 if (rbd_img_is_write(img_request)) 1625 ceph_put_snap_context(img_request->snapc); 1626 1627 if (test_bit(IMG_REQ_CHILD, &img_request->flags)) 1628 kmem_cache_free(rbd_img_request_cache, img_request); 1629 } 1630 1631 #define BITS_PER_OBJ 2 1632 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ) 1633 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1) 1634 1635 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno, 1636 u64 *index, u8 *shift) 1637 { 1638 u32 off; 1639 1640 rbd_assert(objno < rbd_dev->object_map_size); 1641 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off); 1642 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ; 1643 } 1644 1645 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno) 1646 { 1647 u64 index; 1648 u8 shift; 1649 1650 lockdep_assert_held(&rbd_dev->object_map_lock); 1651 __rbd_object_map_index(rbd_dev, objno, &index, &shift); 1652 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK; 1653 } 1654 1655 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val) 1656 { 1657 u64 index; 1658 u8 shift; 1659 u8 *p; 1660 1661 lockdep_assert_held(&rbd_dev->object_map_lock); 1662 rbd_assert(!(val & ~OBJ_MASK)); 1663 1664 __rbd_object_map_index(rbd_dev, objno, &index, &shift); 1665 p = &rbd_dev->object_map[index]; 1666 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift); 1667 } 1668 1669 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno) 1670 { 1671 u8 state; 1672 1673 spin_lock(&rbd_dev->object_map_lock); 1674 state = __rbd_object_map_get(rbd_dev, objno); 1675 spin_unlock(&rbd_dev->object_map_lock); 1676 return state; 1677 } 1678 1679 static bool use_object_map(struct rbd_device *rbd_dev) 1680 { 1681 /* 1682 * An image mapped read-only can't use the object map -- it isn't 1683 * loaded because the header lock isn't acquired. Someone else can 1684 * write to the image and update the object map behind our back. 1685 * 1686 * A snapshot can't be written to, so using the object map is always 1687 * safe. 1688 */ 1689 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev)) 1690 return false; 1691 1692 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) && 1693 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)); 1694 } 1695 1696 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno) 1697 { 1698 u8 state; 1699 1700 /* fall back to default logic if object map is disabled or invalid */ 1701 if (!use_object_map(rbd_dev)) 1702 return true; 1703 1704 state = rbd_object_map_get(rbd_dev, objno); 1705 return state != OBJECT_NONEXISTENT; 1706 } 1707 1708 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id, 1709 struct ceph_object_id *oid) 1710 { 1711 if (snap_id == CEPH_NOSNAP) 1712 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX, 1713 rbd_dev->spec->image_id); 1714 else 1715 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX, 1716 rbd_dev->spec->image_id, snap_id); 1717 } 1718 1719 static int rbd_object_map_lock(struct rbd_device *rbd_dev) 1720 { 1721 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1722 CEPH_DEFINE_OID_ONSTACK(oid); 1723 u8 lock_type; 1724 char *lock_tag; 1725 struct ceph_locker *lockers; 1726 u32 num_lockers; 1727 bool broke_lock = false; 1728 int ret; 1729 1730 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid); 1731 1732 again: 1733 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, 1734 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0); 1735 if (ret != -EBUSY || broke_lock) { 1736 if (ret == -EEXIST) 1737 ret = 0; /* already locked by myself */ 1738 if (ret) 1739 rbd_warn(rbd_dev, "failed to lock object map: %d", ret); 1740 return ret; 1741 } 1742 1743 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc, 1744 RBD_LOCK_NAME, &lock_type, &lock_tag, 1745 &lockers, &num_lockers); 1746 if (ret) { 1747 if (ret == -ENOENT) 1748 goto again; 1749 1750 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret); 1751 return ret; 1752 } 1753 1754 kfree(lock_tag); 1755 if (num_lockers == 0) 1756 goto again; 1757 1758 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu", 1759 ENTITY_NAME(lockers[0].id.name)); 1760 1761 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc, 1762 RBD_LOCK_NAME, lockers[0].id.cookie, 1763 &lockers[0].id.name); 1764 ceph_free_lockers(lockers, num_lockers); 1765 if (ret) { 1766 if (ret == -ENOENT) 1767 goto again; 1768 1769 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret); 1770 return ret; 1771 } 1772 1773 broke_lock = true; 1774 goto again; 1775 } 1776 1777 static void rbd_object_map_unlock(struct rbd_device *rbd_dev) 1778 { 1779 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1780 CEPH_DEFINE_OID_ONSTACK(oid); 1781 int ret; 1782 1783 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid); 1784 1785 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, 1786 ""); 1787 if (ret && ret != -ENOENT) 1788 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret); 1789 } 1790 1791 static int decode_object_map_header(void **p, void *end, u64 *object_map_size) 1792 { 1793 u8 struct_v; 1794 u32 struct_len; 1795 u32 header_len; 1796 void *header_end; 1797 int ret; 1798 1799 ceph_decode_32_safe(p, end, header_len, e_inval); 1800 header_end = *p + header_len; 1801 1802 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v, 1803 &struct_len); 1804 if (ret) 1805 return ret; 1806 1807 ceph_decode_64_safe(p, end, *object_map_size, e_inval); 1808 1809 *p = header_end; 1810 return 0; 1811 1812 e_inval: 1813 return -EINVAL; 1814 } 1815 1816 static int __rbd_object_map_load(struct rbd_device *rbd_dev) 1817 { 1818 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1819 CEPH_DEFINE_OID_ONSTACK(oid); 1820 struct page **pages; 1821 void *p, *end; 1822 size_t reply_len; 1823 u64 num_objects; 1824 u64 object_map_bytes; 1825 u64 object_map_size; 1826 int num_pages; 1827 int ret; 1828 1829 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size); 1830 1831 num_objects = ceph_get_num_objects(&rbd_dev->layout, 1832 rbd_dev->mapping.size); 1833 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ, 1834 BITS_PER_BYTE); 1835 num_pages = calc_pages_for(0, object_map_bytes) + 1; 1836 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1837 if (IS_ERR(pages)) 1838 return PTR_ERR(pages); 1839 1840 reply_len = num_pages * PAGE_SIZE; 1841 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid); 1842 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc, 1843 "rbd", "object_map_load", CEPH_OSD_FLAG_READ, 1844 NULL, 0, pages, &reply_len); 1845 if (ret) 1846 goto out; 1847 1848 p = page_address(pages[0]); 1849 end = p + min(reply_len, (size_t)PAGE_SIZE); 1850 ret = decode_object_map_header(&p, end, &object_map_size); 1851 if (ret) 1852 goto out; 1853 1854 if (object_map_size != num_objects) { 1855 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu", 1856 object_map_size, num_objects); 1857 ret = -EINVAL; 1858 goto out; 1859 } 1860 1861 if (offset_in_page(p) + object_map_bytes > reply_len) { 1862 ret = -EINVAL; 1863 goto out; 1864 } 1865 1866 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL); 1867 if (!rbd_dev->object_map) { 1868 ret = -ENOMEM; 1869 goto out; 1870 } 1871 1872 rbd_dev->object_map_size = object_map_size; 1873 ceph_copy_from_page_vector(pages, rbd_dev->object_map, 1874 offset_in_page(p), object_map_bytes); 1875 1876 out: 1877 ceph_release_page_vector(pages, num_pages); 1878 return ret; 1879 } 1880 1881 static void rbd_object_map_free(struct rbd_device *rbd_dev) 1882 { 1883 kvfree(rbd_dev->object_map); 1884 rbd_dev->object_map = NULL; 1885 rbd_dev->object_map_size = 0; 1886 } 1887 1888 static int rbd_object_map_load(struct rbd_device *rbd_dev) 1889 { 1890 int ret; 1891 1892 ret = __rbd_object_map_load(rbd_dev); 1893 if (ret) 1894 return ret; 1895 1896 ret = rbd_dev_v2_get_flags(rbd_dev); 1897 if (ret) { 1898 rbd_object_map_free(rbd_dev); 1899 return ret; 1900 } 1901 1902 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID) 1903 rbd_warn(rbd_dev, "object map is invalid"); 1904 1905 return 0; 1906 } 1907 1908 static int rbd_object_map_open(struct rbd_device *rbd_dev) 1909 { 1910 int ret; 1911 1912 ret = rbd_object_map_lock(rbd_dev); 1913 if (ret) 1914 return ret; 1915 1916 ret = rbd_object_map_load(rbd_dev); 1917 if (ret) { 1918 rbd_object_map_unlock(rbd_dev); 1919 return ret; 1920 } 1921 1922 return 0; 1923 } 1924 1925 static void rbd_object_map_close(struct rbd_device *rbd_dev) 1926 { 1927 rbd_object_map_free(rbd_dev); 1928 rbd_object_map_unlock(rbd_dev); 1929 } 1930 1931 /* 1932 * This function needs snap_id (or more precisely just something to 1933 * distinguish between HEAD and snapshot object maps), new_state and 1934 * current_state that were passed to rbd_object_map_update(). 1935 * 1936 * To avoid allocating and stashing a context we piggyback on the OSD 1937 * request. A HEAD update has two ops (assert_locked). For new_state 1938 * and current_state we decode our own object_map_update op, encoded in 1939 * rbd_cls_object_map_update(). 1940 */ 1941 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req, 1942 struct ceph_osd_request *osd_req) 1943 { 1944 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1945 struct ceph_osd_data *osd_data; 1946 u64 objno; 1947 u8 state, new_state, current_state; 1948 bool has_current_state; 1949 void *p; 1950 1951 if (osd_req->r_result) 1952 return osd_req->r_result; 1953 1954 /* 1955 * Nothing to do for a snapshot object map. 1956 */ 1957 if (osd_req->r_num_ops == 1) 1958 return 0; 1959 1960 /* 1961 * Update in-memory HEAD object map. 1962 */ 1963 rbd_assert(osd_req->r_num_ops == 2); 1964 osd_data = osd_req_op_data(osd_req, 1, cls, request_data); 1965 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES); 1966 1967 p = page_address(osd_data->pages[0]); 1968 objno = ceph_decode_64(&p); 1969 rbd_assert(objno == obj_req->ex.oe_objno); 1970 rbd_assert(ceph_decode_64(&p) == objno + 1); 1971 new_state = ceph_decode_8(&p); 1972 has_current_state = ceph_decode_8(&p); 1973 if (has_current_state) 1974 current_state = ceph_decode_8(&p); 1975 1976 spin_lock(&rbd_dev->object_map_lock); 1977 state = __rbd_object_map_get(rbd_dev, objno); 1978 if (!has_current_state || current_state == state || 1979 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) 1980 __rbd_object_map_set(rbd_dev, objno, new_state); 1981 spin_unlock(&rbd_dev->object_map_lock); 1982 1983 return 0; 1984 } 1985 1986 static void rbd_object_map_callback(struct ceph_osd_request *osd_req) 1987 { 1988 struct rbd_obj_request *obj_req = osd_req->r_priv; 1989 int result; 1990 1991 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req, 1992 osd_req->r_result, obj_req); 1993 1994 result = rbd_object_map_update_finish(obj_req, osd_req); 1995 rbd_obj_handle_request(obj_req, result); 1996 } 1997 1998 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state) 1999 { 2000 u8 state = rbd_object_map_get(rbd_dev, objno); 2001 2002 if (state == new_state || 2003 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) || 2004 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING)) 2005 return false; 2006 2007 return true; 2008 } 2009 2010 static int rbd_cls_object_map_update(struct ceph_osd_request *req, 2011 int which, u64 objno, u8 new_state, 2012 const u8 *current_state) 2013 { 2014 struct page **pages; 2015 void *p, *start; 2016 int ret; 2017 2018 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update"); 2019 if (ret) 2020 return ret; 2021 2022 pages = ceph_alloc_page_vector(1, GFP_NOIO); 2023 if (IS_ERR(pages)) 2024 return PTR_ERR(pages); 2025 2026 p = start = page_address(pages[0]); 2027 ceph_encode_64(&p, objno); 2028 ceph_encode_64(&p, objno + 1); 2029 ceph_encode_8(&p, new_state); 2030 if (current_state) { 2031 ceph_encode_8(&p, 1); 2032 ceph_encode_8(&p, *current_state); 2033 } else { 2034 ceph_encode_8(&p, 0); 2035 } 2036 2037 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0, 2038 false, true); 2039 return 0; 2040 } 2041 2042 /* 2043 * Return: 2044 * 0 - object map update sent 2045 * 1 - object map update isn't needed 2046 * <0 - error 2047 */ 2048 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id, 2049 u8 new_state, const u8 *current_state) 2050 { 2051 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2052 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2053 struct ceph_osd_request *req; 2054 int num_ops = 1; 2055 int which = 0; 2056 int ret; 2057 2058 if (snap_id == CEPH_NOSNAP) { 2059 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state)) 2060 return 1; 2061 2062 num_ops++; /* assert_locked */ 2063 } 2064 2065 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO); 2066 if (!req) 2067 return -ENOMEM; 2068 2069 list_add_tail(&req->r_private_item, &obj_req->osd_reqs); 2070 req->r_callback = rbd_object_map_callback; 2071 req->r_priv = obj_req; 2072 2073 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid); 2074 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc); 2075 req->r_flags = CEPH_OSD_FLAG_WRITE; 2076 ktime_get_real_ts64(&req->r_mtime); 2077 2078 if (snap_id == CEPH_NOSNAP) { 2079 /* 2080 * Protect against possible race conditions during lock 2081 * ownership transitions. 2082 */ 2083 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME, 2084 CEPH_CLS_LOCK_EXCLUSIVE, "", ""); 2085 if (ret) 2086 return ret; 2087 } 2088 2089 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno, 2090 new_state, current_state); 2091 if (ret) 2092 return ret; 2093 2094 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 2095 if (ret) 2096 return ret; 2097 2098 ceph_osdc_start_request(osdc, req); 2099 return 0; 2100 } 2101 2102 static void prune_extents(struct ceph_file_extent *img_extents, 2103 u32 *num_img_extents, u64 overlap) 2104 { 2105 u32 cnt = *num_img_extents; 2106 2107 /* drop extents completely beyond the overlap */ 2108 while (cnt && img_extents[cnt - 1].fe_off >= overlap) 2109 cnt--; 2110 2111 if (cnt) { 2112 struct ceph_file_extent *ex = &img_extents[cnt - 1]; 2113 2114 /* trim final overlapping extent */ 2115 if (ex->fe_off + ex->fe_len > overlap) 2116 ex->fe_len = overlap - ex->fe_off; 2117 } 2118 2119 *num_img_extents = cnt; 2120 } 2121 2122 /* 2123 * Determine the byte range(s) covered by either just the object extent 2124 * or the entire object in the parent image. 2125 */ 2126 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req, 2127 bool entire) 2128 { 2129 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2130 int ret; 2131 2132 if (!rbd_dev->parent_overlap) 2133 return 0; 2134 2135 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno, 2136 entire ? 0 : obj_req->ex.oe_off, 2137 entire ? rbd_dev->layout.object_size : 2138 obj_req->ex.oe_len, 2139 &obj_req->img_extents, 2140 &obj_req->num_img_extents); 2141 if (ret) 2142 return ret; 2143 2144 prune_extents(obj_req->img_extents, &obj_req->num_img_extents, 2145 rbd_dev->parent_overlap); 2146 return 0; 2147 } 2148 2149 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which) 2150 { 2151 struct rbd_obj_request *obj_req = osd_req->r_priv; 2152 2153 switch (obj_req->img_request->data_type) { 2154 case OBJ_REQUEST_BIO: 2155 osd_req_op_extent_osd_data_bio(osd_req, which, 2156 &obj_req->bio_pos, 2157 obj_req->ex.oe_len); 2158 break; 2159 case OBJ_REQUEST_BVECS: 2160 case OBJ_REQUEST_OWN_BVECS: 2161 rbd_assert(obj_req->bvec_pos.iter.bi_size == 2162 obj_req->ex.oe_len); 2163 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count); 2164 osd_req_op_extent_osd_data_bvec_pos(osd_req, which, 2165 &obj_req->bvec_pos); 2166 break; 2167 default: 2168 BUG(); 2169 } 2170 } 2171 2172 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which) 2173 { 2174 struct page **pages; 2175 2176 /* 2177 * The response data for a STAT call consists of: 2178 * le64 length; 2179 * struct { 2180 * le32 tv_sec; 2181 * le32 tv_nsec; 2182 * } mtime; 2183 */ 2184 pages = ceph_alloc_page_vector(1, GFP_NOIO); 2185 if (IS_ERR(pages)) 2186 return PTR_ERR(pages); 2187 2188 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0); 2189 osd_req_op_raw_data_in_pages(osd_req, which, pages, 2190 8 + sizeof(struct ceph_timespec), 2191 0, false, true); 2192 return 0; 2193 } 2194 2195 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which, 2196 u32 bytes) 2197 { 2198 struct rbd_obj_request *obj_req = osd_req->r_priv; 2199 int ret; 2200 2201 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup"); 2202 if (ret) 2203 return ret; 2204 2205 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs, 2206 obj_req->copyup_bvec_count, bytes); 2207 return 0; 2208 } 2209 2210 static int rbd_obj_init_read(struct rbd_obj_request *obj_req) 2211 { 2212 obj_req->read_state = RBD_OBJ_READ_START; 2213 return 0; 2214 } 2215 2216 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req, 2217 int which) 2218 { 2219 struct rbd_obj_request *obj_req = osd_req->r_priv; 2220 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2221 u16 opcode; 2222 2223 if (!use_object_map(rbd_dev) || 2224 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) { 2225 osd_req_op_alloc_hint_init(osd_req, which++, 2226 rbd_dev->layout.object_size, 2227 rbd_dev->layout.object_size, 2228 rbd_dev->opts->alloc_hint_flags); 2229 } 2230 2231 if (rbd_obj_is_entire(obj_req)) 2232 opcode = CEPH_OSD_OP_WRITEFULL; 2233 else 2234 opcode = CEPH_OSD_OP_WRITE; 2235 2236 osd_req_op_extent_init(osd_req, which, opcode, 2237 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0); 2238 rbd_osd_setup_data(osd_req, which); 2239 } 2240 2241 static int rbd_obj_init_write(struct rbd_obj_request *obj_req) 2242 { 2243 int ret; 2244 2245 /* reverse map the entire object onto the parent */ 2246 ret = rbd_obj_calc_img_extents(obj_req, true); 2247 if (ret) 2248 return ret; 2249 2250 obj_req->write_state = RBD_OBJ_WRITE_START; 2251 return 0; 2252 } 2253 2254 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req) 2255 { 2256 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE : 2257 CEPH_OSD_OP_ZERO; 2258 } 2259 2260 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req, 2261 int which) 2262 { 2263 struct rbd_obj_request *obj_req = osd_req->r_priv; 2264 2265 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) { 2266 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION); 2267 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0); 2268 } else { 2269 osd_req_op_extent_init(osd_req, which, 2270 truncate_or_zero_opcode(obj_req), 2271 obj_req->ex.oe_off, obj_req->ex.oe_len, 2272 0, 0); 2273 } 2274 } 2275 2276 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req) 2277 { 2278 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2279 u64 off, next_off; 2280 int ret; 2281 2282 /* 2283 * Align the range to alloc_size boundary and punt on discards 2284 * that are too small to free up any space. 2285 * 2286 * alloc_size == object_size && is_tail() is a special case for 2287 * filestore with filestore_punch_hole = false, needed to allow 2288 * truncate (in addition to delete). 2289 */ 2290 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size || 2291 !rbd_obj_is_tail(obj_req)) { 2292 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size); 2293 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len, 2294 rbd_dev->opts->alloc_size); 2295 if (off >= next_off) 2296 return 1; 2297 2298 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__, 2299 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len, 2300 off, next_off - off); 2301 obj_req->ex.oe_off = off; 2302 obj_req->ex.oe_len = next_off - off; 2303 } 2304 2305 /* reverse map the entire object onto the parent */ 2306 ret = rbd_obj_calc_img_extents(obj_req, true); 2307 if (ret) 2308 return ret; 2309 2310 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; 2311 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) 2312 obj_req->flags |= RBD_OBJ_FLAG_DELETION; 2313 2314 obj_req->write_state = RBD_OBJ_WRITE_START; 2315 return 0; 2316 } 2317 2318 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req, 2319 int which) 2320 { 2321 struct rbd_obj_request *obj_req = osd_req->r_priv; 2322 u16 opcode; 2323 2324 if (rbd_obj_is_entire(obj_req)) { 2325 if (obj_req->num_img_extents) { 2326 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)) 2327 osd_req_op_init(osd_req, which++, 2328 CEPH_OSD_OP_CREATE, 0); 2329 opcode = CEPH_OSD_OP_TRUNCATE; 2330 } else { 2331 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION); 2332 osd_req_op_init(osd_req, which++, 2333 CEPH_OSD_OP_DELETE, 0); 2334 opcode = 0; 2335 } 2336 } else { 2337 opcode = truncate_or_zero_opcode(obj_req); 2338 } 2339 2340 if (opcode) 2341 osd_req_op_extent_init(osd_req, which, opcode, 2342 obj_req->ex.oe_off, obj_req->ex.oe_len, 2343 0, 0); 2344 } 2345 2346 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req) 2347 { 2348 int ret; 2349 2350 /* reverse map the entire object onto the parent */ 2351 ret = rbd_obj_calc_img_extents(obj_req, true); 2352 if (ret) 2353 return ret; 2354 2355 if (!obj_req->num_img_extents) { 2356 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; 2357 if (rbd_obj_is_entire(obj_req)) 2358 obj_req->flags |= RBD_OBJ_FLAG_DELETION; 2359 } 2360 2361 obj_req->write_state = RBD_OBJ_WRITE_START; 2362 return 0; 2363 } 2364 2365 static int count_write_ops(struct rbd_obj_request *obj_req) 2366 { 2367 struct rbd_img_request *img_req = obj_req->img_request; 2368 2369 switch (img_req->op_type) { 2370 case OBJ_OP_WRITE: 2371 if (!use_object_map(img_req->rbd_dev) || 2372 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) 2373 return 2; /* setallochint + write/writefull */ 2374 2375 return 1; /* write/writefull */ 2376 case OBJ_OP_DISCARD: 2377 return 1; /* delete/truncate/zero */ 2378 case OBJ_OP_ZEROOUT: 2379 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents && 2380 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)) 2381 return 2; /* create + truncate */ 2382 2383 return 1; /* delete/truncate/zero */ 2384 default: 2385 BUG(); 2386 } 2387 } 2388 2389 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req, 2390 int which) 2391 { 2392 struct rbd_obj_request *obj_req = osd_req->r_priv; 2393 2394 switch (obj_req->img_request->op_type) { 2395 case OBJ_OP_WRITE: 2396 __rbd_osd_setup_write_ops(osd_req, which); 2397 break; 2398 case OBJ_OP_DISCARD: 2399 __rbd_osd_setup_discard_ops(osd_req, which); 2400 break; 2401 case OBJ_OP_ZEROOUT: 2402 __rbd_osd_setup_zeroout_ops(osd_req, which); 2403 break; 2404 default: 2405 BUG(); 2406 } 2407 } 2408 2409 /* 2410 * Prune the list of object requests (adjust offset and/or length, drop 2411 * redundant requests). Prepare object request state machines and image 2412 * request state machine for execution. 2413 */ 2414 static int __rbd_img_fill_request(struct rbd_img_request *img_req) 2415 { 2416 struct rbd_obj_request *obj_req, *next_obj_req; 2417 int ret; 2418 2419 for_each_obj_request_safe(img_req, obj_req, next_obj_req) { 2420 switch (img_req->op_type) { 2421 case OBJ_OP_READ: 2422 ret = rbd_obj_init_read(obj_req); 2423 break; 2424 case OBJ_OP_WRITE: 2425 ret = rbd_obj_init_write(obj_req); 2426 break; 2427 case OBJ_OP_DISCARD: 2428 ret = rbd_obj_init_discard(obj_req); 2429 break; 2430 case OBJ_OP_ZEROOUT: 2431 ret = rbd_obj_init_zeroout(obj_req); 2432 break; 2433 default: 2434 BUG(); 2435 } 2436 if (ret < 0) 2437 return ret; 2438 if (ret > 0) { 2439 rbd_img_obj_request_del(img_req, obj_req); 2440 continue; 2441 } 2442 } 2443 2444 img_req->state = RBD_IMG_START; 2445 return 0; 2446 } 2447 2448 union rbd_img_fill_iter { 2449 struct ceph_bio_iter bio_iter; 2450 struct ceph_bvec_iter bvec_iter; 2451 }; 2452 2453 struct rbd_img_fill_ctx { 2454 enum obj_request_type pos_type; 2455 union rbd_img_fill_iter *pos; 2456 union rbd_img_fill_iter iter; 2457 ceph_object_extent_fn_t set_pos_fn; 2458 ceph_object_extent_fn_t count_fn; 2459 ceph_object_extent_fn_t copy_fn; 2460 }; 2461 2462 static struct ceph_object_extent *alloc_object_extent(void *arg) 2463 { 2464 struct rbd_img_request *img_req = arg; 2465 struct rbd_obj_request *obj_req; 2466 2467 obj_req = rbd_obj_request_create(); 2468 if (!obj_req) 2469 return NULL; 2470 2471 rbd_img_obj_request_add(img_req, obj_req); 2472 return &obj_req->ex; 2473 } 2474 2475 /* 2476 * While su != os && sc == 1 is technically not fancy (it's the same 2477 * layout as su == os && sc == 1), we can't use the nocopy path for it 2478 * because ->set_pos_fn() should be called only once per object. 2479 * ceph_file_to_extents() invokes action_fn once per stripe unit, so 2480 * treat su != os && sc == 1 as fancy. 2481 */ 2482 static bool rbd_layout_is_fancy(struct ceph_file_layout *l) 2483 { 2484 return l->stripe_unit != l->object_size; 2485 } 2486 2487 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req, 2488 struct ceph_file_extent *img_extents, 2489 u32 num_img_extents, 2490 struct rbd_img_fill_ctx *fctx) 2491 { 2492 u32 i; 2493 int ret; 2494 2495 img_req->data_type = fctx->pos_type; 2496 2497 /* 2498 * Create object requests and set each object request's starting 2499 * position in the provided bio (list) or bio_vec array. 2500 */ 2501 fctx->iter = *fctx->pos; 2502 for (i = 0; i < num_img_extents; i++) { 2503 ret = ceph_file_to_extents(&img_req->rbd_dev->layout, 2504 img_extents[i].fe_off, 2505 img_extents[i].fe_len, 2506 &img_req->object_extents, 2507 alloc_object_extent, img_req, 2508 fctx->set_pos_fn, &fctx->iter); 2509 if (ret) 2510 return ret; 2511 } 2512 2513 return __rbd_img_fill_request(img_req); 2514 } 2515 2516 /* 2517 * Map a list of image extents to a list of object extents, create the 2518 * corresponding object requests (normally each to a different object, 2519 * but not always) and add them to @img_req. For each object request, 2520 * set up its data descriptor to point to the corresponding chunk(s) of 2521 * @fctx->pos data buffer. 2522 * 2523 * Because ceph_file_to_extents() will merge adjacent object extents 2524 * together, each object request's data descriptor may point to multiple 2525 * different chunks of @fctx->pos data buffer. 2526 * 2527 * @fctx->pos data buffer is assumed to be large enough. 2528 */ 2529 static int rbd_img_fill_request(struct rbd_img_request *img_req, 2530 struct ceph_file_extent *img_extents, 2531 u32 num_img_extents, 2532 struct rbd_img_fill_ctx *fctx) 2533 { 2534 struct rbd_device *rbd_dev = img_req->rbd_dev; 2535 struct rbd_obj_request *obj_req; 2536 u32 i; 2537 int ret; 2538 2539 if (fctx->pos_type == OBJ_REQUEST_NODATA || 2540 !rbd_layout_is_fancy(&rbd_dev->layout)) 2541 return rbd_img_fill_request_nocopy(img_req, img_extents, 2542 num_img_extents, fctx); 2543 2544 img_req->data_type = OBJ_REQUEST_OWN_BVECS; 2545 2546 /* 2547 * Create object requests and determine ->bvec_count for each object 2548 * request. Note that ->bvec_count sum over all object requests may 2549 * be greater than the number of bio_vecs in the provided bio (list) 2550 * or bio_vec array because when mapped, those bio_vecs can straddle 2551 * stripe unit boundaries. 2552 */ 2553 fctx->iter = *fctx->pos; 2554 for (i = 0; i < num_img_extents; i++) { 2555 ret = ceph_file_to_extents(&rbd_dev->layout, 2556 img_extents[i].fe_off, 2557 img_extents[i].fe_len, 2558 &img_req->object_extents, 2559 alloc_object_extent, img_req, 2560 fctx->count_fn, &fctx->iter); 2561 if (ret) 2562 return ret; 2563 } 2564 2565 for_each_obj_request(img_req, obj_req) { 2566 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count, 2567 sizeof(*obj_req->bvec_pos.bvecs), 2568 GFP_NOIO); 2569 if (!obj_req->bvec_pos.bvecs) 2570 return -ENOMEM; 2571 } 2572 2573 /* 2574 * Fill in each object request's private bio_vec array, splitting and 2575 * rearranging the provided bio_vecs in stripe unit chunks as needed. 2576 */ 2577 fctx->iter = *fctx->pos; 2578 for (i = 0; i < num_img_extents; i++) { 2579 ret = ceph_iterate_extents(&rbd_dev->layout, 2580 img_extents[i].fe_off, 2581 img_extents[i].fe_len, 2582 &img_req->object_extents, 2583 fctx->copy_fn, &fctx->iter); 2584 if (ret) 2585 return ret; 2586 } 2587 2588 return __rbd_img_fill_request(img_req); 2589 } 2590 2591 static int rbd_img_fill_nodata(struct rbd_img_request *img_req, 2592 u64 off, u64 len) 2593 { 2594 struct ceph_file_extent ex = { off, len }; 2595 union rbd_img_fill_iter dummy = {}; 2596 struct rbd_img_fill_ctx fctx = { 2597 .pos_type = OBJ_REQUEST_NODATA, 2598 .pos = &dummy, 2599 }; 2600 2601 return rbd_img_fill_request(img_req, &ex, 1, &fctx); 2602 } 2603 2604 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg) 2605 { 2606 struct rbd_obj_request *obj_req = 2607 container_of(ex, struct rbd_obj_request, ex); 2608 struct ceph_bio_iter *it = arg; 2609 2610 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2611 obj_req->bio_pos = *it; 2612 ceph_bio_iter_advance(it, bytes); 2613 } 2614 2615 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2616 { 2617 struct rbd_obj_request *obj_req = 2618 container_of(ex, struct rbd_obj_request, ex); 2619 struct ceph_bio_iter *it = arg; 2620 2621 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2622 ceph_bio_iter_advance_step(it, bytes, ({ 2623 obj_req->bvec_count++; 2624 })); 2625 2626 } 2627 2628 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2629 { 2630 struct rbd_obj_request *obj_req = 2631 container_of(ex, struct rbd_obj_request, ex); 2632 struct ceph_bio_iter *it = arg; 2633 2634 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2635 ceph_bio_iter_advance_step(it, bytes, ({ 2636 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv; 2637 obj_req->bvec_pos.iter.bi_size += bv.bv_len; 2638 })); 2639 } 2640 2641 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req, 2642 struct ceph_file_extent *img_extents, 2643 u32 num_img_extents, 2644 struct ceph_bio_iter *bio_pos) 2645 { 2646 struct rbd_img_fill_ctx fctx = { 2647 .pos_type = OBJ_REQUEST_BIO, 2648 .pos = (union rbd_img_fill_iter *)bio_pos, 2649 .set_pos_fn = set_bio_pos, 2650 .count_fn = count_bio_bvecs, 2651 .copy_fn = copy_bio_bvecs, 2652 }; 2653 2654 return rbd_img_fill_request(img_req, img_extents, num_img_extents, 2655 &fctx); 2656 } 2657 2658 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req, 2659 u64 off, u64 len, struct bio *bio) 2660 { 2661 struct ceph_file_extent ex = { off, len }; 2662 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter }; 2663 2664 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it); 2665 } 2666 2667 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg) 2668 { 2669 struct rbd_obj_request *obj_req = 2670 container_of(ex, struct rbd_obj_request, ex); 2671 struct ceph_bvec_iter *it = arg; 2672 2673 obj_req->bvec_pos = *it; 2674 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes); 2675 ceph_bvec_iter_advance(it, bytes); 2676 } 2677 2678 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2679 { 2680 struct rbd_obj_request *obj_req = 2681 container_of(ex, struct rbd_obj_request, ex); 2682 struct ceph_bvec_iter *it = arg; 2683 2684 ceph_bvec_iter_advance_step(it, bytes, ({ 2685 obj_req->bvec_count++; 2686 })); 2687 } 2688 2689 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2690 { 2691 struct rbd_obj_request *obj_req = 2692 container_of(ex, struct rbd_obj_request, ex); 2693 struct ceph_bvec_iter *it = arg; 2694 2695 ceph_bvec_iter_advance_step(it, bytes, ({ 2696 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv; 2697 obj_req->bvec_pos.iter.bi_size += bv.bv_len; 2698 })); 2699 } 2700 2701 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req, 2702 struct ceph_file_extent *img_extents, 2703 u32 num_img_extents, 2704 struct ceph_bvec_iter *bvec_pos) 2705 { 2706 struct rbd_img_fill_ctx fctx = { 2707 .pos_type = OBJ_REQUEST_BVECS, 2708 .pos = (union rbd_img_fill_iter *)bvec_pos, 2709 .set_pos_fn = set_bvec_pos, 2710 .count_fn = count_bvecs, 2711 .copy_fn = copy_bvecs, 2712 }; 2713 2714 return rbd_img_fill_request(img_req, img_extents, num_img_extents, 2715 &fctx); 2716 } 2717 2718 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req, 2719 struct ceph_file_extent *img_extents, 2720 u32 num_img_extents, 2721 struct bio_vec *bvecs) 2722 { 2723 struct ceph_bvec_iter it = { 2724 .bvecs = bvecs, 2725 .iter = { .bi_size = ceph_file_extents_bytes(img_extents, 2726 num_img_extents) }, 2727 }; 2728 2729 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents, 2730 &it); 2731 } 2732 2733 static void rbd_img_handle_request_work(struct work_struct *work) 2734 { 2735 struct rbd_img_request *img_req = 2736 container_of(work, struct rbd_img_request, work); 2737 2738 rbd_img_handle_request(img_req, img_req->work_result); 2739 } 2740 2741 static void rbd_img_schedule(struct rbd_img_request *img_req, int result) 2742 { 2743 INIT_WORK(&img_req->work, rbd_img_handle_request_work); 2744 img_req->work_result = result; 2745 queue_work(rbd_wq, &img_req->work); 2746 } 2747 2748 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req) 2749 { 2750 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2751 2752 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) { 2753 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST; 2754 return true; 2755 } 2756 2757 dout("%s %p objno %llu assuming dne\n", __func__, obj_req, 2758 obj_req->ex.oe_objno); 2759 return false; 2760 } 2761 2762 static int rbd_obj_read_object(struct rbd_obj_request *obj_req) 2763 { 2764 struct ceph_osd_request *osd_req; 2765 int ret; 2766 2767 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1); 2768 if (IS_ERR(osd_req)) 2769 return PTR_ERR(osd_req); 2770 2771 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ, 2772 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0); 2773 rbd_osd_setup_data(osd_req, 0); 2774 rbd_osd_format_read(osd_req); 2775 2776 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 2777 if (ret) 2778 return ret; 2779 2780 rbd_osd_submit(osd_req); 2781 return 0; 2782 } 2783 2784 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req) 2785 { 2786 struct rbd_img_request *img_req = obj_req->img_request; 2787 struct rbd_device *parent = img_req->rbd_dev->parent; 2788 struct rbd_img_request *child_img_req; 2789 int ret; 2790 2791 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO); 2792 if (!child_img_req) 2793 return -ENOMEM; 2794 2795 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ); 2796 __set_bit(IMG_REQ_CHILD, &child_img_req->flags); 2797 child_img_req->obj_request = obj_req; 2798 2799 down_read(&parent->header_rwsem); 2800 rbd_img_capture_header(child_img_req); 2801 up_read(&parent->header_rwsem); 2802 2803 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req, 2804 obj_req); 2805 2806 if (!rbd_img_is_write(img_req)) { 2807 switch (img_req->data_type) { 2808 case OBJ_REQUEST_BIO: 2809 ret = __rbd_img_fill_from_bio(child_img_req, 2810 obj_req->img_extents, 2811 obj_req->num_img_extents, 2812 &obj_req->bio_pos); 2813 break; 2814 case OBJ_REQUEST_BVECS: 2815 case OBJ_REQUEST_OWN_BVECS: 2816 ret = __rbd_img_fill_from_bvecs(child_img_req, 2817 obj_req->img_extents, 2818 obj_req->num_img_extents, 2819 &obj_req->bvec_pos); 2820 break; 2821 default: 2822 BUG(); 2823 } 2824 } else { 2825 ret = rbd_img_fill_from_bvecs(child_img_req, 2826 obj_req->img_extents, 2827 obj_req->num_img_extents, 2828 obj_req->copyup_bvecs); 2829 } 2830 if (ret) { 2831 rbd_img_request_destroy(child_img_req); 2832 return ret; 2833 } 2834 2835 /* avoid parent chain recursion */ 2836 rbd_img_schedule(child_img_req, 0); 2837 return 0; 2838 } 2839 2840 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result) 2841 { 2842 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2843 int ret; 2844 2845 again: 2846 switch (obj_req->read_state) { 2847 case RBD_OBJ_READ_START: 2848 rbd_assert(!*result); 2849 2850 if (!rbd_obj_may_exist(obj_req)) { 2851 *result = -ENOENT; 2852 obj_req->read_state = RBD_OBJ_READ_OBJECT; 2853 goto again; 2854 } 2855 2856 ret = rbd_obj_read_object(obj_req); 2857 if (ret) { 2858 *result = ret; 2859 return true; 2860 } 2861 obj_req->read_state = RBD_OBJ_READ_OBJECT; 2862 return false; 2863 case RBD_OBJ_READ_OBJECT: 2864 if (*result == -ENOENT && rbd_dev->parent_overlap) { 2865 /* reverse map this object extent onto the parent */ 2866 ret = rbd_obj_calc_img_extents(obj_req, false); 2867 if (ret) { 2868 *result = ret; 2869 return true; 2870 } 2871 if (obj_req->num_img_extents) { 2872 ret = rbd_obj_read_from_parent(obj_req); 2873 if (ret) { 2874 *result = ret; 2875 return true; 2876 } 2877 obj_req->read_state = RBD_OBJ_READ_PARENT; 2878 return false; 2879 } 2880 } 2881 2882 /* 2883 * -ENOENT means a hole in the image -- zero-fill the entire 2884 * length of the request. A short read also implies zero-fill 2885 * to the end of the request. 2886 */ 2887 if (*result == -ENOENT) { 2888 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len); 2889 *result = 0; 2890 } else if (*result >= 0) { 2891 if (*result < obj_req->ex.oe_len) 2892 rbd_obj_zero_range(obj_req, *result, 2893 obj_req->ex.oe_len - *result); 2894 else 2895 rbd_assert(*result == obj_req->ex.oe_len); 2896 *result = 0; 2897 } 2898 return true; 2899 case RBD_OBJ_READ_PARENT: 2900 /* 2901 * The parent image is read only up to the overlap -- zero-fill 2902 * from the overlap to the end of the request. 2903 */ 2904 if (!*result) { 2905 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req); 2906 2907 if (obj_overlap < obj_req->ex.oe_len) 2908 rbd_obj_zero_range(obj_req, obj_overlap, 2909 obj_req->ex.oe_len - obj_overlap); 2910 } 2911 return true; 2912 default: 2913 BUG(); 2914 } 2915 } 2916 2917 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req) 2918 { 2919 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2920 2921 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) 2922 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST; 2923 2924 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) && 2925 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) { 2926 dout("%s %p noop for nonexistent\n", __func__, obj_req); 2927 return true; 2928 } 2929 2930 return false; 2931 } 2932 2933 /* 2934 * Return: 2935 * 0 - object map update sent 2936 * 1 - object map update isn't needed 2937 * <0 - error 2938 */ 2939 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req) 2940 { 2941 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2942 u8 new_state; 2943 2944 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 2945 return 1; 2946 2947 if (obj_req->flags & RBD_OBJ_FLAG_DELETION) 2948 new_state = OBJECT_PENDING; 2949 else 2950 new_state = OBJECT_EXISTS; 2951 2952 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL); 2953 } 2954 2955 static int rbd_obj_write_object(struct rbd_obj_request *obj_req) 2956 { 2957 struct ceph_osd_request *osd_req; 2958 int num_ops = count_write_ops(obj_req); 2959 int which = 0; 2960 int ret; 2961 2962 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) 2963 num_ops++; /* stat */ 2964 2965 osd_req = rbd_obj_add_osd_request(obj_req, num_ops); 2966 if (IS_ERR(osd_req)) 2967 return PTR_ERR(osd_req); 2968 2969 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) { 2970 ret = rbd_osd_setup_stat(osd_req, which++); 2971 if (ret) 2972 return ret; 2973 } 2974 2975 rbd_osd_setup_write_ops(osd_req, which); 2976 rbd_osd_format_write(osd_req); 2977 2978 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 2979 if (ret) 2980 return ret; 2981 2982 rbd_osd_submit(osd_req); 2983 return 0; 2984 } 2985 2986 /* 2987 * copyup_bvecs pages are never highmem pages 2988 */ 2989 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes) 2990 { 2991 struct ceph_bvec_iter it = { 2992 .bvecs = bvecs, 2993 .iter = { .bi_size = bytes }, 2994 }; 2995 2996 ceph_bvec_iter_advance_step(&it, bytes, ({ 2997 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len)) 2998 return false; 2999 })); 3000 return true; 3001 } 3002 3003 #define MODS_ONLY U32_MAX 3004 3005 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req, 3006 u32 bytes) 3007 { 3008 struct ceph_osd_request *osd_req; 3009 int ret; 3010 3011 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); 3012 rbd_assert(bytes > 0 && bytes != MODS_ONLY); 3013 3014 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1); 3015 if (IS_ERR(osd_req)) 3016 return PTR_ERR(osd_req); 3017 3018 ret = rbd_osd_setup_copyup(osd_req, 0, bytes); 3019 if (ret) 3020 return ret; 3021 3022 rbd_osd_format_write(osd_req); 3023 3024 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 3025 if (ret) 3026 return ret; 3027 3028 rbd_osd_submit(osd_req); 3029 return 0; 3030 } 3031 3032 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req, 3033 u32 bytes) 3034 { 3035 struct ceph_osd_request *osd_req; 3036 int num_ops = count_write_ops(obj_req); 3037 int which = 0; 3038 int ret; 3039 3040 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); 3041 3042 if (bytes != MODS_ONLY) 3043 num_ops++; /* copyup */ 3044 3045 osd_req = rbd_obj_add_osd_request(obj_req, num_ops); 3046 if (IS_ERR(osd_req)) 3047 return PTR_ERR(osd_req); 3048 3049 if (bytes != MODS_ONLY) { 3050 ret = rbd_osd_setup_copyup(osd_req, which++, bytes); 3051 if (ret) 3052 return ret; 3053 } 3054 3055 rbd_osd_setup_write_ops(osd_req, which); 3056 rbd_osd_format_write(osd_req); 3057 3058 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 3059 if (ret) 3060 return ret; 3061 3062 rbd_osd_submit(osd_req); 3063 return 0; 3064 } 3065 3066 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap) 3067 { 3068 u32 i; 3069 3070 rbd_assert(!obj_req->copyup_bvecs); 3071 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap); 3072 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count, 3073 sizeof(*obj_req->copyup_bvecs), 3074 GFP_NOIO); 3075 if (!obj_req->copyup_bvecs) 3076 return -ENOMEM; 3077 3078 for (i = 0; i < obj_req->copyup_bvec_count; i++) { 3079 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE); 3080 struct page *page = alloc_page(GFP_NOIO); 3081 3082 if (!page) 3083 return -ENOMEM; 3084 3085 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0); 3086 obj_overlap -= len; 3087 } 3088 3089 rbd_assert(!obj_overlap); 3090 return 0; 3091 } 3092 3093 /* 3094 * The target object doesn't exist. Read the data for the entire 3095 * target object up to the overlap point (if any) from the parent, 3096 * so we can use it for a copyup. 3097 */ 3098 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req) 3099 { 3100 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3101 int ret; 3102 3103 rbd_assert(obj_req->num_img_extents); 3104 prune_extents(obj_req->img_extents, &obj_req->num_img_extents, 3105 rbd_dev->parent_overlap); 3106 if (!obj_req->num_img_extents) { 3107 /* 3108 * The overlap has become 0 (most likely because the 3109 * image has been flattened). Re-submit the original write 3110 * request -- pass MODS_ONLY since the copyup isn't needed 3111 * anymore. 3112 */ 3113 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY); 3114 } 3115 3116 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req)); 3117 if (ret) 3118 return ret; 3119 3120 return rbd_obj_read_from_parent(obj_req); 3121 } 3122 3123 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req) 3124 { 3125 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3126 struct ceph_snap_context *snapc = obj_req->img_request->snapc; 3127 u8 new_state; 3128 u32 i; 3129 int ret; 3130 3131 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending); 3132 3133 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3134 return; 3135 3136 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS) 3137 return; 3138 3139 for (i = 0; i < snapc->num_snaps; i++) { 3140 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) && 3141 i + 1 < snapc->num_snaps) 3142 new_state = OBJECT_EXISTS_CLEAN; 3143 else 3144 new_state = OBJECT_EXISTS; 3145 3146 ret = rbd_object_map_update(obj_req, snapc->snaps[i], 3147 new_state, NULL); 3148 if (ret < 0) { 3149 obj_req->pending.result = ret; 3150 return; 3151 } 3152 3153 rbd_assert(!ret); 3154 obj_req->pending.num_pending++; 3155 } 3156 } 3157 3158 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req) 3159 { 3160 u32 bytes = rbd_obj_img_extents_bytes(obj_req); 3161 int ret; 3162 3163 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending); 3164 3165 /* 3166 * Only send non-zero copyup data to save some I/O and network 3167 * bandwidth -- zero copyup data is equivalent to the object not 3168 * existing. 3169 */ 3170 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS) 3171 bytes = 0; 3172 3173 if (obj_req->img_request->snapc->num_snaps && bytes > 0) { 3174 /* 3175 * Send a copyup request with an empty snapshot context to 3176 * deep-copyup the object through all existing snapshots. 3177 * A second request with the current snapshot context will be 3178 * sent for the actual modification. 3179 */ 3180 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes); 3181 if (ret) { 3182 obj_req->pending.result = ret; 3183 return; 3184 } 3185 3186 obj_req->pending.num_pending++; 3187 bytes = MODS_ONLY; 3188 } 3189 3190 ret = rbd_obj_copyup_current_snapc(obj_req, bytes); 3191 if (ret) { 3192 obj_req->pending.result = ret; 3193 return; 3194 } 3195 3196 obj_req->pending.num_pending++; 3197 } 3198 3199 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result) 3200 { 3201 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3202 int ret; 3203 3204 again: 3205 switch (obj_req->copyup_state) { 3206 case RBD_OBJ_COPYUP_START: 3207 rbd_assert(!*result); 3208 3209 ret = rbd_obj_copyup_read_parent(obj_req); 3210 if (ret) { 3211 *result = ret; 3212 return true; 3213 } 3214 if (obj_req->num_img_extents) 3215 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT; 3216 else 3217 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT; 3218 return false; 3219 case RBD_OBJ_COPYUP_READ_PARENT: 3220 if (*result) 3221 return true; 3222 3223 if (is_zero_bvecs(obj_req->copyup_bvecs, 3224 rbd_obj_img_extents_bytes(obj_req))) { 3225 dout("%s %p detected zeros\n", __func__, obj_req); 3226 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS; 3227 } 3228 3229 rbd_obj_copyup_object_maps(obj_req); 3230 if (!obj_req->pending.num_pending) { 3231 *result = obj_req->pending.result; 3232 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS; 3233 goto again; 3234 } 3235 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS; 3236 return false; 3237 case __RBD_OBJ_COPYUP_OBJECT_MAPS: 3238 if (!pending_result_dec(&obj_req->pending, result)) 3239 return false; 3240 fallthrough; 3241 case RBD_OBJ_COPYUP_OBJECT_MAPS: 3242 if (*result) { 3243 rbd_warn(rbd_dev, "snap object map update failed: %d", 3244 *result); 3245 return true; 3246 } 3247 3248 rbd_obj_copyup_write_object(obj_req); 3249 if (!obj_req->pending.num_pending) { 3250 *result = obj_req->pending.result; 3251 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT; 3252 goto again; 3253 } 3254 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT; 3255 return false; 3256 case __RBD_OBJ_COPYUP_WRITE_OBJECT: 3257 if (!pending_result_dec(&obj_req->pending, result)) 3258 return false; 3259 fallthrough; 3260 case RBD_OBJ_COPYUP_WRITE_OBJECT: 3261 return true; 3262 default: 3263 BUG(); 3264 } 3265 } 3266 3267 /* 3268 * Return: 3269 * 0 - object map update sent 3270 * 1 - object map update isn't needed 3271 * <0 - error 3272 */ 3273 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req) 3274 { 3275 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3276 u8 current_state = OBJECT_PENDING; 3277 3278 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3279 return 1; 3280 3281 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION)) 3282 return 1; 3283 3284 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT, 3285 ¤t_state); 3286 } 3287 3288 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result) 3289 { 3290 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3291 int ret; 3292 3293 again: 3294 switch (obj_req->write_state) { 3295 case RBD_OBJ_WRITE_START: 3296 rbd_assert(!*result); 3297 3298 rbd_obj_set_copyup_enabled(obj_req); 3299 if (rbd_obj_write_is_noop(obj_req)) 3300 return true; 3301 3302 ret = rbd_obj_write_pre_object_map(obj_req); 3303 if (ret < 0) { 3304 *result = ret; 3305 return true; 3306 } 3307 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP; 3308 if (ret > 0) 3309 goto again; 3310 return false; 3311 case RBD_OBJ_WRITE_PRE_OBJECT_MAP: 3312 if (*result) { 3313 rbd_warn(rbd_dev, "pre object map update failed: %d", 3314 *result); 3315 return true; 3316 } 3317 ret = rbd_obj_write_object(obj_req); 3318 if (ret) { 3319 *result = ret; 3320 return true; 3321 } 3322 obj_req->write_state = RBD_OBJ_WRITE_OBJECT; 3323 return false; 3324 case RBD_OBJ_WRITE_OBJECT: 3325 if (*result == -ENOENT) { 3326 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) { 3327 *result = 0; 3328 obj_req->copyup_state = RBD_OBJ_COPYUP_START; 3329 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP; 3330 goto again; 3331 } 3332 /* 3333 * On a non-existent object: 3334 * delete - -ENOENT, truncate/zero - 0 3335 */ 3336 if (obj_req->flags & RBD_OBJ_FLAG_DELETION) 3337 *result = 0; 3338 } 3339 if (*result) 3340 return true; 3341 3342 obj_req->write_state = RBD_OBJ_WRITE_COPYUP; 3343 goto again; 3344 case __RBD_OBJ_WRITE_COPYUP: 3345 if (!rbd_obj_advance_copyup(obj_req, result)) 3346 return false; 3347 fallthrough; 3348 case RBD_OBJ_WRITE_COPYUP: 3349 if (*result) { 3350 rbd_warn(rbd_dev, "copyup failed: %d", *result); 3351 return true; 3352 } 3353 ret = rbd_obj_write_post_object_map(obj_req); 3354 if (ret < 0) { 3355 *result = ret; 3356 return true; 3357 } 3358 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP; 3359 if (ret > 0) 3360 goto again; 3361 return false; 3362 case RBD_OBJ_WRITE_POST_OBJECT_MAP: 3363 if (*result) 3364 rbd_warn(rbd_dev, "post object map update failed: %d", 3365 *result); 3366 return true; 3367 default: 3368 BUG(); 3369 } 3370 } 3371 3372 /* 3373 * Return true if @obj_req is completed. 3374 */ 3375 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req, 3376 int *result) 3377 { 3378 struct rbd_img_request *img_req = obj_req->img_request; 3379 struct rbd_device *rbd_dev = img_req->rbd_dev; 3380 bool done; 3381 3382 mutex_lock(&obj_req->state_mutex); 3383 if (!rbd_img_is_write(img_req)) 3384 done = rbd_obj_advance_read(obj_req, result); 3385 else 3386 done = rbd_obj_advance_write(obj_req, result); 3387 mutex_unlock(&obj_req->state_mutex); 3388 3389 if (done && *result) { 3390 rbd_assert(*result < 0); 3391 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d", 3392 obj_op_name(img_req->op_type), obj_req->ex.oe_objno, 3393 obj_req->ex.oe_off, obj_req->ex.oe_len, *result); 3394 } 3395 return done; 3396 } 3397 3398 /* 3399 * This is open-coded in rbd_img_handle_request() to avoid parent chain 3400 * recursion. 3401 */ 3402 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result) 3403 { 3404 if (__rbd_obj_handle_request(obj_req, &result)) 3405 rbd_img_handle_request(obj_req->img_request, result); 3406 } 3407 3408 static bool need_exclusive_lock(struct rbd_img_request *img_req) 3409 { 3410 struct rbd_device *rbd_dev = img_req->rbd_dev; 3411 3412 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) 3413 return false; 3414 3415 if (rbd_is_ro(rbd_dev)) 3416 return false; 3417 3418 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags)); 3419 if (rbd_dev->opts->lock_on_read || 3420 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3421 return true; 3422 3423 return rbd_img_is_write(img_req); 3424 } 3425 3426 static bool rbd_lock_add_request(struct rbd_img_request *img_req) 3427 { 3428 struct rbd_device *rbd_dev = img_req->rbd_dev; 3429 bool locked; 3430 3431 lockdep_assert_held(&rbd_dev->lock_rwsem); 3432 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED; 3433 spin_lock(&rbd_dev->lock_lists_lock); 3434 rbd_assert(list_empty(&img_req->lock_item)); 3435 if (!locked) 3436 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list); 3437 else 3438 list_add_tail(&img_req->lock_item, &rbd_dev->running_list); 3439 spin_unlock(&rbd_dev->lock_lists_lock); 3440 return locked; 3441 } 3442 3443 static void rbd_lock_del_request(struct rbd_img_request *img_req) 3444 { 3445 struct rbd_device *rbd_dev = img_req->rbd_dev; 3446 bool need_wakeup; 3447 3448 lockdep_assert_held(&rbd_dev->lock_rwsem); 3449 spin_lock(&rbd_dev->lock_lists_lock); 3450 rbd_assert(!list_empty(&img_req->lock_item)); 3451 list_del_init(&img_req->lock_item); 3452 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && 3453 list_empty(&rbd_dev->running_list)); 3454 spin_unlock(&rbd_dev->lock_lists_lock); 3455 if (need_wakeup) 3456 complete(&rbd_dev->releasing_wait); 3457 } 3458 3459 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) 3460 { 3461 struct rbd_device *rbd_dev = img_req->rbd_dev; 3462 3463 if (!need_exclusive_lock(img_req)) 3464 return 1; 3465 3466 if (rbd_lock_add_request(img_req)) 3467 return 1; 3468 3469 if (rbd_dev->opts->exclusive) { 3470 WARN_ON(1); /* lock got released? */ 3471 return -EROFS; 3472 } 3473 3474 /* 3475 * Note the use of mod_delayed_work() in rbd_acquire_lock() 3476 * and cancel_delayed_work() in wake_lock_waiters(). 3477 */ 3478 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev); 3479 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 3480 return 0; 3481 } 3482 3483 static void rbd_img_object_requests(struct rbd_img_request *img_req) 3484 { 3485 struct rbd_obj_request *obj_req; 3486 3487 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); 3488 3489 for_each_obj_request(img_req, obj_req) { 3490 int result = 0; 3491 3492 if (__rbd_obj_handle_request(obj_req, &result)) { 3493 if (result) { 3494 img_req->pending.result = result; 3495 return; 3496 } 3497 } else { 3498 img_req->pending.num_pending++; 3499 } 3500 } 3501 } 3502 3503 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) 3504 { 3505 struct rbd_device *rbd_dev = img_req->rbd_dev; 3506 int ret; 3507 3508 again: 3509 switch (img_req->state) { 3510 case RBD_IMG_START: 3511 rbd_assert(!*result); 3512 3513 ret = rbd_img_exclusive_lock(img_req); 3514 if (ret < 0) { 3515 *result = ret; 3516 return true; 3517 } 3518 img_req->state = RBD_IMG_EXCLUSIVE_LOCK; 3519 if (ret > 0) 3520 goto again; 3521 return false; 3522 case RBD_IMG_EXCLUSIVE_LOCK: 3523 if (*result) 3524 return true; 3525 3526 rbd_assert(!need_exclusive_lock(img_req) || 3527 __rbd_is_lock_owner(rbd_dev)); 3528 3529 rbd_img_object_requests(img_req); 3530 if (!img_req->pending.num_pending) { 3531 *result = img_req->pending.result; 3532 img_req->state = RBD_IMG_OBJECT_REQUESTS; 3533 goto again; 3534 } 3535 img_req->state = __RBD_IMG_OBJECT_REQUESTS; 3536 return false; 3537 case __RBD_IMG_OBJECT_REQUESTS: 3538 if (!pending_result_dec(&img_req->pending, result)) 3539 return false; 3540 fallthrough; 3541 case RBD_IMG_OBJECT_REQUESTS: 3542 return true; 3543 default: 3544 BUG(); 3545 } 3546 } 3547 3548 /* 3549 * Return true if @img_req is completed. 3550 */ 3551 static bool __rbd_img_handle_request(struct rbd_img_request *img_req, 3552 int *result) 3553 { 3554 struct rbd_device *rbd_dev = img_req->rbd_dev; 3555 bool done; 3556 3557 if (need_exclusive_lock(img_req)) { 3558 down_read(&rbd_dev->lock_rwsem); 3559 mutex_lock(&img_req->state_mutex); 3560 done = rbd_img_advance(img_req, result); 3561 if (done) 3562 rbd_lock_del_request(img_req); 3563 mutex_unlock(&img_req->state_mutex); 3564 up_read(&rbd_dev->lock_rwsem); 3565 } else { 3566 mutex_lock(&img_req->state_mutex); 3567 done = rbd_img_advance(img_req, result); 3568 mutex_unlock(&img_req->state_mutex); 3569 } 3570 3571 if (done && *result) { 3572 rbd_assert(*result < 0); 3573 rbd_warn(rbd_dev, "%s%s result %d", 3574 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "", 3575 obj_op_name(img_req->op_type), *result); 3576 } 3577 return done; 3578 } 3579 3580 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result) 3581 { 3582 again: 3583 if (!__rbd_img_handle_request(img_req, &result)) 3584 return; 3585 3586 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { 3587 struct rbd_obj_request *obj_req = img_req->obj_request; 3588 3589 rbd_img_request_destroy(img_req); 3590 if (__rbd_obj_handle_request(obj_req, &result)) { 3591 img_req = obj_req->img_request; 3592 goto again; 3593 } 3594 } else { 3595 struct request *rq = blk_mq_rq_from_pdu(img_req); 3596 3597 rbd_img_request_destroy(img_req); 3598 blk_mq_end_request(rq, errno_to_blk_status(result)); 3599 } 3600 } 3601 3602 static const struct rbd_client_id rbd_empty_cid; 3603 3604 static bool rbd_cid_equal(const struct rbd_client_id *lhs, 3605 const struct rbd_client_id *rhs) 3606 { 3607 return lhs->gid == rhs->gid && lhs->handle == rhs->handle; 3608 } 3609 3610 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev) 3611 { 3612 struct rbd_client_id cid; 3613 3614 mutex_lock(&rbd_dev->watch_mutex); 3615 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client); 3616 cid.handle = rbd_dev->watch_cookie; 3617 mutex_unlock(&rbd_dev->watch_mutex); 3618 return cid; 3619 } 3620 3621 /* 3622 * lock_rwsem must be held for write 3623 */ 3624 static void rbd_set_owner_cid(struct rbd_device *rbd_dev, 3625 const struct rbd_client_id *cid) 3626 { 3627 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev, 3628 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle, 3629 cid->gid, cid->handle); 3630 rbd_dev->owner_cid = *cid; /* struct */ 3631 } 3632 3633 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) 3634 { 3635 mutex_lock(&rbd_dev->watch_mutex); 3636 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie); 3637 mutex_unlock(&rbd_dev->watch_mutex); 3638 } 3639 3640 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) 3641 { 3642 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3643 3644 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3645 strcpy(rbd_dev->lock_cookie, cookie); 3646 rbd_set_owner_cid(rbd_dev, &cid); 3647 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 3648 } 3649 3650 /* 3651 * lock_rwsem must be held for write 3652 */ 3653 static int rbd_lock(struct rbd_device *rbd_dev) 3654 { 3655 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3656 char cookie[32]; 3657 int ret; 3658 3659 WARN_ON(__rbd_is_lock_owner(rbd_dev) || 3660 rbd_dev->lock_cookie[0] != '\0'); 3661 3662 format_lock_cookie(rbd_dev, cookie); 3663 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3664 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, 3665 RBD_LOCK_TAG, "", 0); 3666 if (ret) 3667 return ret; 3668 3669 __rbd_lock(rbd_dev, cookie); 3670 return 0; 3671 } 3672 3673 /* 3674 * lock_rwsem must be held for write 3675 */ 3676 static void rbd_unlock(struct rbd_device *rbd_dev) 3677 { 3678 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3679 int ret; 3680 3681 WARN_ON(!__rbd_is_lock_owner(rbd_dev) || 3682 rbd_dev->lock_cookie[0] == '\0'); 3683 3684 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3685 RBD_LOCK_NAME, rbd_dev->lock_cookie); 3686 if (ret && ret != -ENOENT) 3687 rbd_warn(rbd_dev, "failed to unlock header: %d", ret); 3688 3689 /* treat errors as the image is unlocked */ 3690 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 3691 rbd_dev->lock_cookie[0] = '\0'; 3692 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3693 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); 3694 } 3695 3696 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, 3697 enum rbd_notify_op notify_op, 3698 struct page ***preply_pages, 3699 size_t *preply_len) 3700 { 3701 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3702 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3703 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN]; 3704 int buf_size = sizeof(buf); 3705 void *p = buf; 3706 3707 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op); 3708 3709 /* encode *LockPayload NotifyMessage (op + ClientId) */ 3710 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN); 3711 ceph_encode_32(&p, notify_op); 3712 ceph_encode_64(&p, cid.gid); 3713 ceph_encode_64(&p, cid.handle); 3714 3715 return ceph_osdc_notify(osdc, &rbd_dev->header_oid, 3716 &rbd_dev->header_oloc, buf, buf_size, 3717 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len); 3718 } 3719 3720 static void rbd_notify_op_lock(struct rbd_device *rbd_dev, 3721 enum rbd_notify_op notify_op) 3722 { 3723 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL); 3724 } 3725 3726 static void rbd_notify_acquired_lock(struct work_struct *work) 3727 { 3728 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3729 acquired_lock_work); 3730 3731 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK); 3732 } 3733 3734 static void rbd_notify_released_lock(struct work_struct *work) 3735 { 3736 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3737 released_lock_work); 3738 3739 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK); 3740 } 3741 3742 static int rbd_request_lock(struct rbd_device *rbd_dev) 3743 { 3744 struct page **reply_pages; 3745 size_t reply_len; 3746 bool lock_owner_responded = false; 3747 int ret; 3748 3749 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3750 3751 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK, 3752 &reply_pages, &reply_len); 3753 if (ret && ret != -ETIMEDOUT) { 3754 rbd_warn(rbd_dev, "failed to request lock: %d", ret); 3755 goto out; 3756 } 3757 3758 if (reply_len > 0 && reply_len <= PAGE_SIZE) { 3759 void *p = page_address(reply_pages[0]); 3760 void *const end = p + reply_len; 3761 u32 n; 3762 3763 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */ 3764 while (n--) { 3765 u8 struct_v; 3766 u32 len; 3767 3768 ceph_decode_need(&p, end, 8 + 8, e_inval); 3769 p += 8 + 8; /* skip gid and cookie */ 3770 3771 ceph_decode_32_safe(&p, end, len, e_inval); 3772 if (!len) 3773 continue; 3774 3775 if (lock_owner_responded) { 3776 rbd_warn(rbd_dev, 3777 "duplicate lock owners detected"); 3778 ret = -EIO; 3779 goto out; 3780 } 3781 3782 lock_owner_responded = true; 3783 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage", 3784 &struct_v, &len); 3785 if (ret) { 3786 rbd_warn(rbd_dev, 3787 "failed to decode ResponseMessage: %d", 3788 ret); 3789 goto e_inval; 3790 } 3791 3792 ret = ceph_decode_32(&p); 3793 } 3794 } 3795 3796 if (!lock_owner_responded) { 3797 rbd_warn(rbd_dev, "no lock owners detected"); 3798 ret = -ETIMEDOUT; 3799 } 3800 3801 out: 3802 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 3803 return ret; 3804 3805 e_inval: 3806 ret = -EINVAL; 3807 goto out; 3808 } 3809 3810 /* 3811 * Either image request state machine(s) or rbd_add_acquire_lock() 3812 * (i.e. "rbd map"). 3813 */ 3814 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) 3815 { 3816 struct rbd_img_request *img_req; 3817 3818 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 3819 lockdep_assert_held_write(&rbd_dev->lock_rwsem); 3820 3821 cancel_delayed_work(&rbd_dev->lock_dwork); 3822 if (!completion_done(&rbd_dev->acquire_wait)) { 3823 rbd_assert(list_empty(&rbd_dev->acquiring_list) && 3824 list_empty(&rbd_dev->running_list)); 3825 rbd_dev->acquire_err = result; 3826 complete_all(&rbd_dev->acquire_wait); 3827 return; 3828 } 3829 3830 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) { 3831 mutex_lock(&img_req->state_mutex); 3832 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK); 3833 rbd_img_schedule(img_req, result); 3834 mutex_unlock(&img_req->state_mutex); 3835 } 3836 3837 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); 3838 } 3839 3840 static int get_lock_owner_info(struct rbd_device *rbd_dev, 3841 struct ceph_locker **lockers, u32 *num_lockers) 3842 { 3843 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3844 u8 lock_type; 3845 char *lock_tag; 3846 int ret; 3847 3848 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3849 3850 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, 3851 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3852 &lock_type, &lock_tag, lockers, num_lockers); 3853 if (ret) 3854 return ret; 3855 3856 if (*num_lockers == 0) { 3857 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); 3858 goto out; 3859 } 3860 3861 if (strcmp(lock_tag, RBD_LOCK_TAG)) { 3862 rbd_warn(rbd_dev, "locked by external mechanism, tag %s", 3863 lock_tag); 3864 ret = -EBUSY; 3865 goto out; 3866 } 3867 3868 if (lock_type == CEPH_CLS_LOCK_SHARED) { 3869 rbd_warn(rbd_dev, "shared lock type detected"); 3870 ret = -EBUSY; 3871 goto out; 3872 } 3873 3874 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, 3875 strlen(RBD_LOCK_COOKIE_PREFIX))) { 3876 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", 3877 (*lockers)[0].id.cookie); 3878 ret = -EBUSY; 3879 goto out; 3880 } 3881 3882 out: 3883 kfree(lock_tag); 3884 return ret; 3885 } 3886 3887 static int find_watcher(struct rbd_device *rbd_dev, 3888 const struct ceph_locker *locker) 3889 { 3890 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3891 struct ceph_watch_item *watchers; 3892 u32 num_watchers; 3893 u64 cookie; 3894 int i; 3895 int ret; 3896 3897 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, 3898 &rbd_dev->header_oloc, &watchers, 3899 &num_watchers); 3900 if (ret) 3901 return ret; 3902 3903 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); 3904 for (i = 0; i < num_watchers; i++) { 3905 /* 3906 * Ignore addr->type while comparing. This mimics 3907 * entity_addr_t::get_legacy_str() + strcmp(). 3908 */ 3909 if (ceph_addr_equal_no_type(&watchers[i].addr, 3910 &locker->info.addr) && 3911 watchers[i].cookie == cookie) { 3912 struct rbd_client_id cid = { 3913 .gid = le64_to_cpu(watchers[i].name.num), 3914 .handle = cookie, 3915 }; 3916 3917 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__, 3918 rbd_dev, cid.gid, cid.handle); 3919 rbd_set_owner_cid(rbd_dev, &cid); 3920 ret = 1; 3921 goto out; 3922 } 3923 } 3924 3925 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev); 3926 ret = 0; 3927 out: 3928 kfree(watchers); 3929 return ret; 3930 } 3931 3932 /* 3933 * lock_rwsem must be held for write 3934 */ 3935 static int rbd_try_lock(struct rbd_device *rbd_dev) 3936 { 3937 struct ceph_client *client = rbd_dev->rbd_client->client; 3938 struct ceph_locker *lockers; 3939 u32 num_lockers; 3940 int ret; 3941 3942 for (;;) { 3943 ret = rbd_lock(rbd_dev); 3944 if (ret != -EBUSY) 3945 return ret; 3946 3947 /* determine if the current lock holder is still alive */ 3948 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); 3949 if (ret) 3950 return ret; 3951 3952 if (num_lockers == 0) 3953 goto again; 3954 3955 ret = find_watcher(rbd_dev, lockers); 3956 if (ret) 3957 goto out; /* request lock or error */ 3958 3959 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", 3960 ENTITY_NAME(lockers[0].id.name)); 3961 3962 ret = ceph_monc_blocklist_add(&client->monc, 3963 &lockers[0].info.addr); 3964 if (ret) { 3965 rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", 3966 ENTITY_NAME(lockers[0].id.name), ret); 3967 goto out; 3968 } 3969 3970 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, 3971 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3972 lockers[0].id.cookie, 3973 &lockers[0].id.name); 3974 if (ret && ret != -ENOENT) 3975 goto out; 3976 3977 again: 3978 ceph_free_lockers(lockers, num_lockers); 3979 } 3980 3981 out: 3982 ceph_free_lockers(lockers, num_lockers); 3983 return ret; 3984 } 3985 3986 static int rbd_post_acquire_action(struct rbd_device *rbd_dev) 3987 { 3988 int ret; 3989 3990 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { 3991 ret = rbd_object_map_open(rbd_dev); 3992 if (ret) 3993 return ret; 3994 } 3995 3996 return 0; 3997 } 3998 3999 /* 4000 * Return: 4001 * 0 - lock acquired 4002 * 1 - caller should call rbd_request_lock() 4003 * <0 - error 4004 */ 4005 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) 4006 { 4007 int ret; 4008 4009 down_read(&rbd_dev->lock_rwsem); 4010 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 4011 rbd_dev->lock_state); 4012 if (__rbd_is_lock_owner(rbd_dev)) { 4013 up_read(&rbd_dev->lock_rwsem); 4014 return 0; 4015 } 4016 4017 up_read(&rbd_dev->lock_rwsem); 4018 down_write(&rbd_dev->lock_rwsem); 4019 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 4020 rbd_dev->lock_state); 4021 if (__rbd_is_lock_owner(rbd_dev)) { 4022 up_write(&rbd_dev->lock_rwsem); 4023 return 0; 4024 } 4025 4026 ret = rbd_try_lock(rbd_dev); 4027 if (ret < 0) { 4028 rbd_warn(rbd_dev, "failed to lock header: %d", ret); 4029 if (ret == -EBLOCKLISTED) 4030 goto out; 4031 4032 ret = 1; /* request lock anyway */ 4033 } 4034 if (ret > 0) { 4035 up_write(&rbd_dev->lock_rwsem); 4036 return ret; 4037 } 4038 4039 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED); 4040 rbd_assert(list_empty(&rbd_dev->running_list)); 4041 4042 ret = rbd_post_acquire_action(rbd_dev); 4043 if (ret) { 4044 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret); 4045 /* 4046 * Can't stay in RBD_LOCK_STATE_LOCKED because 4047 * rbd_lock_add_request() would let the request through, 4048 * assuming that e.g. object map is locked and loaded. 4049 */ 4050 rbd_unlock(rbd_dev); 4051 } 4052 4053 out: 4054 wake_lock_waiters(rbd_dev, ret); 4055 up_write(&rbd_dev->lock_rwsem); 4056 return ret; 4057 } 4058 4059 static void rbd_acquire_lock(struct work_struct *work) 4060 { 4061 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 4062 struct rbd_device, lock_dwork); 4063 int ret; 4064 4065 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4066 again: 4067 ret = rbd_try_acquire_lock(rbd_dev); 4068 if (ret <= 0) { 4069 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret); 4070 return; 4071 } 4072 4073 ret = rbd_request_lock(rbd_dev); 4074 if (ret == -ETIMEDOUT) { 4075 goto again; /* treat this as a dead client */ 4076 } else if (ret == -EROFS) { 4077 rbd_warn(rbd_dev, "peer will not release lock"); 4078 down_write(&rbd_dev->lock_rwsem); 4079 wake_lock_waiters(rbd_dev, ret); 4080 up_write(&rbd_dev->lock_rwsem); 4081 } else if (ret < 0) { 4082 rbd_warn(rbd_dev, "error requesting lock: %d", ret); 4083 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 4084 RBD_RETRY_DELAY); 4085 } else { 4086 /* 4087 * lock owner acked, but resend if we don't see them 4088 * release the lock 4089 */ 4090 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__, 4091 rbd_dev); 4092 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 4093 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC)); 4094 } 4095 } 4096 4097 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) 4098 { 4099 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4100 lockdep_assert_held_write(&rbd_dev->lock_rwsem); 4101 4102 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) 4103 return false; 4104 4105 /* 4106 * Ensure that all in-flight IO is flushed. 4107 */ 4108 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; 4109 rbd_assert(!completion_done(&rbd_dev->releasing_wait)); 4110 if (list_empty(&rbd_dev->running_list)) 4111 return true; 4112 4113 up_write(&rbd_dev->lock_rwsem); 4114 wait_for_completion(&rbd_dev->releasing_wait); 4115 4116 down_write(&rbd_dev->lock_rwsem); 4117 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) 4118 return false; 4119 4120 rbd_assert(list_empty(&rbd_dev->running_list)); 4121 return true; 4122 } 4123 4124 static void rbd_pre_release_action(struct rbd_device *rbd_dev) 4125 { 4126 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) 4127 rbd_object_map_close(rbd_dev); 4128 } 4129 4130 static void __rbd_release_lock(struct rbd_device *rbd_dev) 4131 { 4132 rbd_assert(list_empty(&rbd_dev->running_list)); 4133 4134 rbd_pre_release_action(rbd_dev); 4135 rbd_unlock(rbd_dev); 4136 } 4137 4138 /* 4139 * lock_rwsem must be held for write 4140 */ 4141 static void rbd_release_lock(struct rbd_device *rbd_dev) 4142 { 4143 if (!rbd_quiesce_lock(rbd_dev)) 4144 return; 4145 4146 __rbd_release_lock(rbd_dev); 4147 4148 /* 4149 * Give others a chance to grab the lock - we would re-acquire 4150 * almost immediately if we got new IO while draining the running 4151 * list otherwise. We need to ack our own notifications, so this 4152 * lock_dwork will be requeued from rbd_handle_released_lock() by 4153 * way of maybe_kick_acquire(). 4154 */ 4155 cancel_delayed_work(&rbd_dev->lock_dwork); 4156 } 4157 4158 static void rbd_release_lock_work(struct work_struct *work) 4159 { 4160 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 4161 unlock_work); 4162 4163 down_write(&rbd_dev->lock_rwsem); 4164 rbd_release_lock(rbd_dev); 4165 up_write(&rbd_dev->lock_rwsem); 4166 } 4167 4168 static void maybe_kick_acquire(struct rbd_device *rbd_dev) 4169 { 4170 bool have_requests; 4171 4172 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4173 if (__rbd_is_lock_owner(rbd_dev)) 4174 return; 4175 4176 spin_lock(&rbd_dev->lock_lists_lock); 4177 have_requests = !list_empty(&rbd_dev->acquiring_list); 4178 spin_unlock(&rbd_dev->lock_lists_lock); 4179 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) { 4180 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev); 4181 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 4182 } 4183 } 4184 4185 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, 4186 void **p) 4187 { 4188 struct rbd_client_id cid = { 0 }; 4189 4190 if (struct_v >= 2) { 4191 cid.gid = ceph_decode_64(p); 4192 cid.handle = ceph_decode_64(p); 4193 } 4194 4195 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4196 cid.handle); 4197 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 4198 down_write(&rbd_dev->lock_rwsem); 4199 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 4200 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n", 4201 __func__, rbd_dev, cid.gid, cid.handle); 4202 } else { 4203 rbd_set_owner_cid(rbd_dev, &cid); 4204 } 4205 downgrade_write(&rbd_dev->lock_rwsem); 4206 } else { 4207 down_read(&rbd_dev->lock_rwsem); 4208 } 4209 4210 maybe_kick_acquire(rbd_dev); 4211 up_read(&rbd_dev->lock_rwsem); 4212 } 4213 4214 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, 4215 void **p) 4216 { 4217 struct rbd_client_id cid = { 0 }; 4218 4219 if (struct_v >= 2) { 4220 cid.gid = ceph_decode_64(p); 4221 cid.handle = ceph_decode_64(p); 4222 } 4223 4224 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4225 cid.handle); 4226 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 4227 down_write(&rbd_dev->lock_rwsem); 4228 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 4229 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n", 4230 __func__, rbd_dev, cid.gid, cid.handle, 4231 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); 4232 } else { 4233 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 4234 } 4235 downgrade_write(&rbd_dev->lock_rwsem); 4236 } else { 4237 down_read(&rbd_dev->lock_rwsem); 4238 } 4239 4240 maybe_kick_acquire(rbd_dev); 4241 up_read(&rbd_dev->lock_rwsem); 4242 } 4243 4244 /* 4245 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no 4246 * ResponseMessage is needed. 4247 */ 4248 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, 4249 void **p) 4250 { 4251 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev); 4252 struct rbd_client_id cid = { 0 }; 4253 int result = 1; 4254 4255 if (struct_v >= 2) { 4256 cid.gid = ceph_decode_64(p); 4257 cid.handle = ceph_decode_64(p); 4258 } 4259 4260 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4261 cid.handle); 4262 if (rbd_cid_equal(&cid, &my_cid)) 4263 return result; 4264 4265 down_read(&rbd_dev->lock_rwsem); 4266 if (__rbd_is_lock_owner(rbd_dev)) { 4267 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED && 4268 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) 4269 goto out_unlock; 4270 4271 /* 4272 * encode ResponseMessage(0) so the peer can detect 4273 * a missing owner 4274 */ 4275 result = 0; 4276 4277 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { 4278 if (!rbd_dev->opts->exclusive) { 4279 dout("%s rbd_dev %p queueing unlock_work\n", 4280 __func__, rbd_dev); 4281 queue_work(rbd_dev->task_wq, 4282 &rbd_dev->unlock_work); 4283 } else { 4284 /* refuse to release the lock */ 4285 result = -EROFS; 4286 } 4287 } 4288 } 4289 4290 out_unlock: 4291 up_read(&rbd_dev->lock_rwsem); 4292 return result; 4293 } 4294 4295 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev, 4296 u64 notify_id, u64 cookie, s32 *result) 4297 { 4298 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4299 char buf[4 + CEPH_ENCODING_START_BLK_LEN]; 4300 int buf_size = sizeof(buf); 4301 int ret; 4302 4303 if (result) { 4304 void *p = buf; 4305 4306 /* encode ResponseMessage */ 4307 ceph_start_encoding(&p, 1, 1, 4308 buf_size - CEPH_ENCODING_START_BLK_LEN); 4309 ceph_encode_32(&p, *result); 4310 } else { 4311 buf_size = 0; 4312 } 4313 4314 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid, 4315 &rbd_dev->header_oloc, notify_id, cookie, 4316 buf, buf_size); 4317 if (ret) 4318 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret); 4319 } 4320 4321 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id, 4322 u64 cookie) 4323 { 4324 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4325 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL); 4326 } 4327 4328 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev, 4329 u64 notify_id, u64 cookie, s32 result) 4330 { 4331 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 4332 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result); 4333 } 4334 4335 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, 4336 u64 notifier_id, void *data, size_t data_len) 4337 { 4338 struct rbd_device *rbd_dev = arg; 4339 void *p = data; 4340 void *const end = p + data_len; 4341 u8 struct_v = 0; 4342 u32 len; 4343 u32 notify_op; 4344 int ret; 4345 4346 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n", 4347 __func__, rbd_dev, cookie, notify_id, data_len); 4348 if (data_len) { 4349 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage", 4350 &struct_v, &len); 4351 if (ret) { 4352 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d", 4353 ret); 4354 return; 4355 } 4356 4357 notify_op = ceph_decode_32(&p); 4358 } else { 4359 /* legacy notification for header updates */ 4360 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE; 4361 len = 0; 4362 } 4363 4364 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op); 4365 switch (notify_op) { 4366 case RBD_NOTIFY_OP_ACQUIRED_LOCK: 4367 rbd_handle_acquired_lock(rbd_dev, struct_v, &p); 4368 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4369 break; 4370 case RBD_NOTIFY_OP_RELEASED_LOCK: 4371 rbd_handle_released_lock(rbd_dev, struct_v, &p); 4372 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4373 break; 4374 case RBD_NOTIFY_OP_REQUEST_LOCK: 4375 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p); 4376 if (ret <= 0) 4377 rbd_acknowledge_notify_result(rbd_dev, notify_id, 4378 cookie, ret); 4379 else 4380 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4381 break; 4382 case RBD_NOTIFY_OP_HEADER_UPDATE: 4383 ret = rbd_dev_refresh(rbd_dev); 4384 if (ret) 4385 rbd_warn(rbd_dev, "refresh failed: %d", ret); 4386 4387 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4388 break; 4389 default: 4390 if (rbd_is_lock_owner(rbd_dev)) 4391 rbd_acknowledge_notify_result(rbd_dev, notify_id, 4392 cookie, -EOPNOTSUPP); 4393 else 4394 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4395 break; 4396 } 4397 } 4398 4399 static void __rbd_unregister_watch(struct rbd_device *rbd_dev); 4400 4401 static void rbd_watch_errcb(void *arg, u64 cookie, int err) 4402 { 4403 struct rbd_device *rbd_dev = arg; 4404 4405 rbd_warn(rbd_dev, "encountered watch error: %d", err); 4406 4407 down_write(&rbd_dev->lock_rwsem); 4408 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 4409 up_write(&rbd_dev->lock_rwsem); 4410 4411 mutex_lock(&rbd_dev->watch_mutex); 4412 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) { 4413 __rbd_unregister_watch(rbd_dev); 4414 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR; 4415 4416 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0); 4417 } 4418 mutex_unlock(&rbd_dev->watch_mutex); 4419 } 4420 4421 /* 4422 * watch_mutex must be locked 4423 */ 4424 static int __rbd_register_watch(struct rbd_device *rbd_dev) 4425 { 4426 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4427 struct ceph_osd_linger_request *handle; 4428 4429 rbd_assert(!rbd_dev->watch_handle); 4430 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4431 4432 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid, 4433 &rbd_dev->header_oloc, rbd_watch_cb, 4434 rbd_watch_errcb, rbd_dev); 4435 if (IS_ERR(handle)) 4436 return PTR_ERR(handle); 4437 4438 rbd_dev->watch_handle = handle; 4439 return 0; 4440 } 4441 4442 /* 4443 * watch_mutex must be locked 4444 */ 4445 static void __rbd_unregister_watch(struct rbd_device *rbd_dev) 4446 { 4447 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4448 int ret; 4449 4450 rbd_assert(rbd_dev->watch_handle); 4451 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4452 4453 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle); 4454 if (ret) 4455 rbd_warn(rbd_dev, "failed to unwatch: %d", ret); 4456 4457 rbd_dev->watch_handle = NULL; 4458 } 4459 4460 static int rbd_register_watch(struct rbd_device *rbd_dev) 4461 { 4462 int ret; 4463 4464 mutex_lock(&rbd_dev->watch_mutex); 4465 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED); 4466 ret = __rbd_register_watch(rbd_dev); 4467 if (ret) 4468 goto out; 4469 4470 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 4471 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 4472 4473 out: 4474 mutex_unlock(&rbd_dev->watch_mutex); 4475 return ret; 4476 } 4477 4478 static void cancel_tasks_sync(struct rbd_device *rbd_dev) 4479 { 4480 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4481 4482 cancel_work_sync(&rbd_dev->acquired_lock_work); 4483 cancel_work_sync(&rbd_dev->released_lock_work); 4484 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 4485 cancel_work_sync(&rbd_dev->unlock_work); 4486 } 4487 4488 /* 4489 * header_rwsem must not be held to avoid a deadlock with 4490 * rbd_dev_refresh() when flushing notifies. 4491 */ 4492 static void rbd_unregister_watch(struct rbd_device *rbd_dev) 4493 { 4494 cancel_tasks_sync(rbd_dev); 4495 4496 mutex_lock(&rbd_dev->watch_mutex); 4497 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) 4498 __rbd_unregister_watch(rbd_dev); 4499 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 4500 mutex_unlock(&rbd_dev->watch_mutex); 4501 4502 cancel_delayed_work_sync(&rbd_dev->watch_dwork); 4503 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 4504 } 4505 4506 /* 4507 * lock_rwsem must be held for write 4508 */ 4509 static void rbd_reacquire_lock(struct rbd_device *rbd_dev) 4510 { 4511 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4512 char cookie[32]; 4513 int ret; 4514 4515 if (!rbd_quiesce_lock(rbd_dev)) 4516 return; 4517 4518 format_lock_cookie(rbd_dev, cookie); 4519 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, 4520 &rbd_dev->header_oloc, RBD_LOCK_NAME, 4521 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, 4522 RBD_LOCK_TAG, cookie); 4523 if (ret) { 4524 if (ret != -EOPNOTSUPP) 4525 rbd_warn(rbd_dev, "failed to update lock cookie: %d", 4526 ret); 4527 4528 /* 4529 * Lock cookie cannot be updated on older OSDs, so do 4530 * a manual release and queue an acquire. 4531 */ 4532 __rbd_release_lock(rbd_dev); 4533 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 4534 } else { 4535 __rbd_lock(rbd_dev, cookie); 4536 wake_lock_waiters(rbd_dev, 0); 4537 } 4538 } 4539 4540 static void rbd_reregister_watch(struct work_struct *work) 4541 { 4542 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 4543 struct rbd_device, watch_dwork); 4544 int ret; 4545 4546 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4547 4548 mutex_lock(&rbd_dev->watch_mutex); 4549 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { 4550 mutex_unlock(&rbd_dev->watch_mutex); 4551 return; 4552 } 4553 4554 ret = __rbd_register_watch(rbd_dev); 4555 if (ret) { 4556 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); 4557 if (ret != -EBLOCKLISTED && ret != -ENOENT) { 4558 queue_delayed_work(rbd_dev->task_wq, 4559 &rbd_dev->watch_dwork, 4560 RBD_RETRY_DELAY); 4561 mutex_unlock(&rbd_dev->watch_mutex); 4562 return; 4563 } 4564 4565 mutex_unlock(&rbd_dev->watch_mutex); 4566 down_write(&rbd_dev->lock_rwsem); 4567 wake_lock_waiters(rbd_dev, ret); 4568 up_write(&rbd_dev->lock_rwsem); 4569 return; 4570 } 4571 4572 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 4573 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 4574 mutex_unlock(&rbd_dev->watch_mutex); 4575 4576 down_write(&rbd_dev->lock_rwsem); 4577 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) 4578 rbd_reacquire_lock(rbd_dev); 4579 up_write(&rbd_dev->lock_rwsem); 4580 4581 ret = rbd_dev_refresh(rbd_dev); 4582 if (ret) 4583 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret); 4584 } 4585 4586 /* 4587 * Synchronous osd object method call. Returns the number of bytes 4588 * returned in the outbound buffer, or a negative error code. 4589 */ 4590 static int rbd_obj_method_sync(struct rbd_device *rbd_dev, 4591 struct ceph_object_id *oid, 4592 struct ceph_object_locator *oloc, 4593 const char *method_name, 4594 const void *outbound, 4595 size_t outbound_size, 4596 void *inbound, 4597 size_t inbound_size) 4598 { 4599 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4600 struct page *req_page = NULL; 4601 struct page *reply_page; 4602 int ret; 4603 4604 /* 4605 * Method calls are ultimately read operations. The result 4606 * should placed into the inbound buffer provided. They 4607 * also supply outbound data--parameters for the object 4608 * method. Currently if this is present it will be a 4609 * snapshot id. 4610 */ 4611 if (outbound) { 4612 if (outbound_size > PAGE_SIZE) 4613 return -E2BIG; 4614 4615 req_page = alloc_page(GFP_KERNEL); 4616 if (!req_page) 4617 return -ENOMEM; 4618 4619 memcpy(page_address(req_page), outbound, outbound_size); 4620 } 4621 4622 reply_page = alloc_page(GFP_KERNEL); 4623 if (!reply_page) { 4624 if (req_page) 4625 __free_page(req_page); 4626 return -ENOMEM; 4627 } 4628 4629 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, 4630 CEPH_OSD_FLAG_READ, req_page, outbound_size, 4631 &reply_page, &inbound_size); 4632 if (!ret) { 4633 memcpy(inbound, page_address(reply_page), inbound_size); 4634 ret = inbound_size; 4635 } 4636 4637 if (req_page) 4638 __free_page(req_page); 4639 __free_page(reply_page); 4640 return ret; 4641 } 4642 4643 static void rbd_queue_workfn(struct work_struct *work) 4644 { 4645 struct rbd_img_request *img_request = 4646 container_of(work, struct rbd_img_request, work); 4647 struct rbd_device *rbd_dev = img_request->rbd_dev; 4648 enum obj_operation_type op_type = img_request->op_type; 4649 struct request *rq = blk_mq_rq_from_pdu(img_request); 4650 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 4651 u64 length = blk_rq_bytes(rq); 4652 u64 mapping_size; 4653 int result; 4654 4655 /* Ignore/skip any zero-length requests */ 4656 if (!length) { 4657 dout("%s: zero-length request\n", __func__); 4658 result = 0; 4659 goto err_img_request; 4660 } 4661 4662 blk_mq_start_request(rq); 4663 4664 down_read(&rbd_dev->header_rwsem); 4665 mapping_size = rbd_dev->mapping.size; 4666 rbd_img_capture_header(img_request); 4667 up_read(&rbd_dev->header_rwsem); 4668 4669 if (offset + length > mapping_size) { 4670 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, 4671 length, mapping_size); 4672 result = -EIO; 4673 goto err_img_request; 4674 } 4675 4676 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, 4677 img_request, obj_op_name(op_type), offset, length); 4678 4679 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT) 4680 result = rbd_img_fill_nodata(img_request, offset, length); 4681 else 4682 result = rbd_img_fill_from_bio(img_request, offset, length, 4683 rq->bio); 4684 if (result) 4685 goto err_img_request; 4686 4687 rbd_img_handle_request(img_request, 0); 4688 return; 4689 4690 err_img_request: 4691 rbd_img_request_destroy(img_request); 4692 if (result) 4693 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 4694 obj_op_name(op_type), length, offset, result); 4695 blk_mq_end_request(rq, errno_to_blk_status(result)); 4696 } 4697 4698 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4699 const struct blk_mq_queue_data *bd) 4700 { 4701 struct rbd_device *rbd_dev = hctx->queue->queuedata; 4702 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq); 4703 enum obj_operation_type op_type; 4704 4705 switch (req_op(bd->rq)) { 4706 case REQ_OP_DISCARD: 4707 op_type = OBJ_OP_DISCARD; 4708 break; 4709 case REQ_OP_WRITE_ZEROES: 4710 op_type = OBJ_OP_ZEROOUT; 4711 break; 4712 case REQ_OP_WRITE: 4713 op_type = OBJ_OP_WRITE; 4714 break; 4715 case REQ_OP_READ: 4716 op_type = OBJ_OP_READ; 4717 break; 4718 default: 4719 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq)); 4720 return BLK_STS_IOERR; 4721 } 4722 4723 rbd_img_request_init(img_req, rbd_dev, op_type); 4724 4725 if (rbd_img_is_write(img_req)) { 4726 if (rbd_is_ro(rbd_dev)) { 4727 rbd_warn(rbd_dev, "%s on read-only mapping", 4728 obj_op_name(img_req->op_type)); 4729 return BLK_STS_IOERR; 4730 } 4731 rbd_assert(!rbd_is_snap(rbd_dev)); 4732 } 4733 4734 INIT_WORK(&img_req->work, rbd_queue_workfn); 4735 queue_work(rbd_wq, &img_req->work); 4736 return BLK_STS_OK; 4737 } 4738 4739 static void rbd_free_disk(struct rbd_device *rbd_dev) 4740 { 4741 put_disk(rbd_dev->disk); 4742 blk_mq_free_tag_set(&rbd_dev->tag_set); 4743 rbd_dev->disk = NULL; 4744 } 4745 4746 static int rbd_obj_read_sync(struct rbd_device *rbd_dev, 4747 struct ceph_object_id *oid, 4748 struct ceph_object_locator *oloc, 4749 void *buf, int buf_len) 4750 4751 { 4752 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4753 struct ceph_osd_request *req; 4754 struct page **pages; 4755 int num_pages = calc_pages_for(0, buf_len); 4756 int ret; 4757 4758 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 4759 if (!req) 4760 return -ENOMEM; 4761 4762 ceph_oid_copy(&req->r_base_oid, oid); 4763 ceph_oloc_copy(&req->r_base_oloc, oloc); 4764 req->r_flags = CEPH_OSD_FLAG_READ; 4765 4766 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 4767 if (IS_ERR(pages)) { 4768 ret = PTR_ERR(pages); 4769 goto out_req; 4770 } 4771 4772 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); 4773 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, 4774 true); 4775 4776 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 4777 if (ret) 4778 goto out_req; 4779 4780 ceph_osdc_start_request(osdc, req); 4781 ret = ceph_osdc_wait_request(osdc, req); 4782 if (ret >= 0) 4783 ceph_copy_from_page_vector(pages, buf, 0, ret); 4784 4785 out_req: 4786 ceph_osdc_put_request(req); 4787 return ret; 4788 } 4789 4790 /* 4791 * Read the complete header for the given rbd device. On successful 4792 * return, the rbd_dev->header field will contain up-to-date 4793 * information about the image. 4794 */ 4795 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) 4796 { 4797 struct rbd_image_header_ondisk *ondisk = NULL; 4798 u32 snap_count = 0; 4799 u64 names_size = 0; 4800 u32 want_count; 4801 int ret; 4802 4803 /* 4804 * The complete header will include an array of its 64-bit 4805 * snapshot ids, followed by the names of those snapshots as 4806 * a contiguous block of NUL-terminated strings. Note that 4807 * the number of snapshots could change by the time we read 4808 * it in, in which case we re-read it. 4809 */ 4810 do { 4811 size_t size; 4812 4813 kfree(ondisk); 4814 4815 size = sizeof (*ondisk); 4816 size += snap_count * sizeof (struct rbd_image_snap_ondisk); 4817 size += names_size; 4818 ondisk = kmalloc(size, GFP_KERNEL); 4819 if (!ondisk) 4820 return -ENOMEM; 4821 4822 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, 4823 &rbd_dev->header_oloc, ondisk, size); 4824 if (ret < 0) 4825 goto out; 4826 if ((size_t)ret < size) { 4827 ret = -ENXIO; 4828 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 4829 size, ret); 4830 goto out; 4831 } 4832 if (!rbd_dev_ondisk_valid(ondisk)) { 4833 ret = -ENXIO; 4834 rbd_warn(rbd_dev, "invalid header"); 4835 goto out; 4836 } 4837 4838 names_size = le64_to_cpu(ondisk->snap_names_len); 4839 want_count = snap_count; 4840 snap_count = le32_to_cpu(ondisk->snap_count); 4841 } while (snap_count != want_count); 4842 4843 ret = rbd_header_from_disk(rbd_dev, ondisk); 4844 out: 4845 kfree(ondisk); 4846 4847 return ret; 4848 } 4849 4850 static void rbd_dev_update_size(struct rbd_device *rbd_dev) 4851 { 4852 sector_t size; 4853 4854 /* 4855 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't 4856 * try to update its size. If REMOVING is set, updating size 4857 * is just useless work since the device can't be opened. 4858 */ 4859 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && 4860 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { 4861 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 4862 dout("setting size to %llu sectors", (unsigned long long)size); 4863 set_capacity_and_notify(rbd_dev->disk, size); 4864 } 4865 } 4866 4867 static int rbd_dev_refresh(struct rbd_device *rbd_dev) 4868 { 4869 u64 mapping_size; 4870 int ret; 4871 4872 down_write(&rbd_dev->header_rwsem); 4873 mapping_size = rbd_dev->mapping.size; 4874 4875 ret = rbd_dev_header_info(rbd_dev); 4876 if (ret) 4877 goto out; 4878 4879 /* 4880 * If there is a parent, see if it has disappeared due to the 4881 * mapped image getting flattened. 4882 */ 4883 if (rbd_dev->parent) { 4884 ret = rbd_dev_v2_parent_info(rbd_dev); 4885 if (ret) 4886 goto out; 4887 } 4888 4889 rbd_assert(!rbd_is_snap(rbd_dev)); 4890 rbd_dev->mapping.size = rbd_dev->header.image_size; 4891 4892 out: 4893 up_write(&rbd_dev->header_rwsem); 4894 if (!ret && mapping_size != rbd_dev->mapping.size) 4895 rbd_dev_update_size(rbd_dev); 4896 4897 return ret; 4898 } 4899 4900 static const struct blk_mq_ops rbd_mq_ops = { 4901 .queue_rq = rbd_queue_rq, 4902 }; 4903 4904 static int rbd_init_disk(struct rbd_device *rbd_dev) 4905 { 4906 struct gendisk *disk; 4907 struct request_queue *q; 4908 unsigned int objset_bytes = 4909 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; 4910 int err; 4911 4912 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); 4913 rbd_dev->tag_set.ops = &rbd_mq_ops; 4914 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 4915 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 4916 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 4917 rbd_dev->tag_set.nr_hw_queues = num_present_cpus(); 4918 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request); 4919 4920 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); 4921 if (err) 4922 return err; 4923 4924 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev); 4925 if (IS_ERR(disk)) { 4926 err = PTR_ERR(disk); 4927 goto out_tag_set; 4928 } 4929 q = disk->queue; 4930 4931 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", 4932 rbd_dev->dev_id); 4933 disk->major = rbd_dev->major; 4934 disk->first_minor = rbd_dev->minor; 4935 if (single_major) 4936 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT); 4937 else 4938 disk->minors = RBD_MINORS_PER_MAJOR; 4939 disk->fops = &rbd_bd_ops; 4940 disk->private_data = rbd_dev; 4941 4942 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 4943 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 4944 4945 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); 4946 q->limits.max_sectors = queue_max_hw_sectors(q); 4947 blk_queue_max_segments(q, USHRT_MAX); 4948 blk_queue_max_segment_size(q, UINT_MAX); 4949 blk_queue_io_min(q, rbd_dev->opts->alloc_size); 4950 blk_queue_io_opt(q, rbd_dev->opts->alloc_size); 4951 4952 if (rbd_dev->opts->trim) { 4953 q->limits.discard_granularity = rbd_dev->opts->alloc_size; 4954 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); 4955 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); 4956 } 4957 4958 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4959 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); 4960 4961 rbd_dev->disk = disk; 4962 4963 return 0; 4964 out_tag_set: 4965 blk_mq_free_tag_set(&rbd_dev->tag_set); 4966 return err; 4967 } 4968 4969 /* 4970 sysfs 4971 */ 4972 4973 static struct rbd_device *dev_to_rbd_dev(struct device *dev) 4974 { 4975 return container_of(dev, struct rbd_device, dev); 4976 } 4977 4978 static ssize_t rbd_size_show(struct device *dev, 4979 struct device_attribute *attr, char *buf) 4980 { 4981 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4982 4983 return sprintf(buf, "%llu\n", 4984 (unsigned long long)rbd_dev->mapping.size); 4985 } 4986 4987 static ssize_t rbd_features_show(struct device *dev, 4988 struct device_attribute *attr, char *buf) 4989 { 4990 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4991 4992 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features); 4993 } 4994 4995 static ssize_t rbd_major_show(struct device *dev, 4996 struct device_attribute *attr, char *buf) 4997 { 4998 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4999 5000 if (rbd_dev->major) 5001 return sprintf(buf, "%d\n", rbd_dev->major); 5002 5003 return sprintf(buf, "(none)\n"); 5004 } 5005 5006 static ssize_t rbd_minor_show(struct device *dev, 5007 struct device_attribute *attr, char *buf) 5008 { 5009 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5010 5011 return sprintf(buf, "%d\n", rbd_dev->minor); 5012 } 5013 5014 static ssize_t rbd_client_addr_show(struct device *dev, 5015 struct device_attribute *attr, char *buf) 5016 { 5017 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5018 struct ceph_entity_addr *client_addr = 5019 ceph_client_addr(rbd_dev->rbd_client->client); 5020 5021 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr, 5022 le32_to_cpu(client_addr->nonce)); 5023 } 5024 5025 static ssize_t rbd_client_id_show(struct device *dev, 5026 struct device_attribute *attr, char *buf) 5027 { 5028 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5029 5030 return sprintf(buf, "client%lld\n", 5031 ceph_client_gid(rbd_dev->rbd_client->client)); 5032 } 5033 5034 static ssize_t rbd_cluster_fsid_show(struct device *dev, 5035 struct device_attribute *attr, char *buf) 5036 { 5037 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5038 5039 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid); 5040 } 5041 5042 static ssize_t rbd_config_info_show(struct device *dev, 5043 struct device_attribute *attr, char *buf) 5044 { 5045 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5046 5047 if (!capable(CAP_SYS_ADMIN)) 5048 return -EPERM; 5049 5050 return sprintf(buf, "%s\n", rbd_dev->config_info); 5051 } 5052 5053 static ssize_t rbd_pool_show(struct device *dev, 5054 struct device_attribute *attr, char *buf) 5055 { 5056 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5057 5058 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name); 5059 } 5060 5061 static ssize_t rbd_pool_id_show(struct device *dev, 5062 struct device_attribute *attr, char *buf) 5063 { 5064 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5065 5066 return sprintf(buf, "%llu\n", 5067 (unsigned long long) rbd_dev->spec->pool_id); 5068 } 5069 5070 static ssize_t rbd_pool_ns_show(struct device *dev, 5071 struct device_attribute *attr, char *buf) 5072 { 5073 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5074 5075 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: ""); 5076 } 5077 5078 static ssize_t rbd_name_show(struct device *dev, 5079 struct device_attribute *attr, char *buf) 5080 { 5081 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5082 5083 if (rbd_dev->spec->image_name) 5084 return sprintf(buf, "%s\n", rbd_dev->spec->image_name); 5085 5086 return sprintf(buf, "(unknown)\n"); 5087 } 5088 5089 static ssize_t rbd_image_id_show(struct device *dev, 5090 struct device_attribute *attr, char *buf) 5091 { 5092 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5093 5094 return sprintf(buf, "%s\n", rbd_dev->spec->image_id); 5095 } 5096 5097 /* 5098 * Shows the name of the currently-mapped snapshot (or 5099 * RBD_SNAP_HEAD_NAME for the base image). 5100 */ 5101 static ssize_t rbd_snap_show(struct device *dev, 5102 struct device_attribute *attr, 5103 char *buf) 5104 { 5105 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5106 5107 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name); 5108 } 5109 5110 static ssize_t rbd_snap_id_show(struct device *dev, 5111 struct device_attribute *attr, char *buf) 5112 { 5113 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5114 5115 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id); 5116 } 5117 5118 /* 5119 * For a v2 image, shows the chain of parent images, separated by empty 5120 * lines. For v1 images or if there is no parent, shows "(no parent 5121 * image)". 5122 */ 5123 static ssize_t rbd_parent_show(struct device *dev, 5124 struct device_attribute *attr, 5125 char *buf) 5126 { 5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5128 ssize_t count = 0; 5129 5130 if (!rbd_dev->parent) 5131 return sprintf(buf, "(no parent image)\n"); 5132 5133 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) { 5134 struct rbd_spec *spec = rbd_dev->parent_spec; 5135 5136 count += sprintf(&buf[count], "%s" 5137 "pool_id %llu\npool_name %s\n" 5138 "pool_ns %s\n" 5139 "image_id %s\nimage_name %s\n" 5140 "snap_id %llu\nsnap_name %s\n" 5141 "overlap %llu\n", 5142 !count ? "" : "\n", /* first? */ 5143 spec->pool_id, spec->pool_name, 5144 spec->pool_ns ?: "", 5145 spec->image_id, spec->image_name ?: "(unknown)", 5146 spec->snap_id, spec->snap_name, 5147 rbd_dev->parent_overlap); 5148 } 5149 5150 return count; 5151 } 5152 5153 static ssize_t rbd_image_refresh(struct device *dev, 5154 struct device_attribute *attr, 5155 const char *buf, 5156 size_t size) 5157 { 5158 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5159 int ret; 5160 5161 if (!capable(CAP_SYS_ADMIN)) 5162 return -EPERM; 5163 5164 ret = rbd_dev_refresh(rbd_dev); 5165 if (ret) 5166 return ret; 5167 5168 return size; 5169 } 5170 5171 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL); 5172 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL); 5173 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL); 5174 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL); 5175 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL); 5176 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL); 5177 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL); 5178 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL); 5179 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL); 5180 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL); 5181 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL); 5182 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL); 5183 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL); 5184 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh); 5185 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL); 5186 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL); 5187 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL); 5188 5189 static struct attribute *rbd_attrs[] = { 5190 &dev_attr_size.attr, 5191 &dev_attr_features.attr, 5192 &dev_attr_major.attr, 5193 &dev_attr_minor.attr, 5194 &dev_attr_client_addr.attr, 5195 &dev_attr_client_id.attr, 5196 &dev_attr_cluster_fsid.attr, 5197 &dev_attr_config_info.attr, 5198 &dev_attr_pool.attr, 5199 &dev_attr_pool_id.attr, 5200 &dev_attr_pool_ns.attr, 5201 &dev_attr_name.attr, 5202 &dev_attr_image_id.attr, 5203 &dev_attr_current_snap.attr, 5204 &dev_attr_snap_id.attr, 5205 &dev_attr_parent.attr, 5206 &dev_attr_refresh.attr, 5207 NULL 5208 }; 5209 5210 static struct attribute_group rbd_attr_group = { 5211 .attrs = rbd_attrs, 5212 }; 5213 5214 static const struct attribute_group *rbd_attr_groups[] = { 5215 &rbd_attr_group, 5216 NULL 5217 }; 5218 5219 static void rbd_dev_release(struct device *dev); 5220 5221 static const struct device_type rbd_device_type = { 5222 .name = "rbd", 5223 .groups = rbd_attr_groups, 5224 .release = rbd_dev_release, 5225 }; 5226 5227 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) 5228 { 5229 kref_get(&spec->kref); 5230 5231 return spec; 5232 } 5233 5234 static void rbd_spec_free(struct kref *kref); 5235 static void rbd_spec_put(struct rbd_spec *spec) 5236 { 5237 if (spec) 5238 kref_put(&spec->kref, rbd_spec_free); 5239 } 5240 5241 static struct rbd_spec *rbd_spec_alloc(void) 5242 { 5243 struct rbd_spec *spec; 5244 5245 spec = kzalloc(sizeof (*spec), GFP_KERNEL); 5246 if (!spec) 5247 return NULL; 5248 5249 spec->pool_id = CEPH_NOPOOL; 5250 spec->snap_id = CEPH_NOSNAP; 5251 kref_init(&spec->kref); 5252 5253 return spec; 5254 } 5255 5256 static void rbd_spec_free(struct kref *kref) 5257 { 5258 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref); 5259 5260 kfree(spec->pool_name); 5261 kfree(spec->pool_ns); 5262 kfree(spec->image_id); 5263 kfree(spec->image_name); 5264 kfree(spec->snap_name); 5265 kfree(spec); 5266 } 5267 5268 static void rbd_dev_free(struct rbd_device *rbd_dev) 5269 { 5270 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED); 5271 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED); 5272 5273 ceph_oid_destroy(&rbd_dev->header_oid); 5274 ceph_oloc_destroy(&rbd_dev->header_oloc); 5275 kfree(rbd_dev->config_info); 5276 5277 rbd_put_client(rbd_dev->rbd_client); 5278 rbd_spec_put(rbd_dev->spec); 5279 kfree(rbd_dev->opts); 5280 kfree(rbd_dev); 5281 } 5282 5283 static void rbd_dev_release(struct device *dev) 5284 { 5285 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5286 bool need_put = !!rbd_dev->opts; 5287 5288 if (need_put) { 5289 destroy_workqueue(rbd_dev->task_wq); 5290 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5291 } 5292 5293 rbd_dev_free(rbd_dev); 5294 5295 /* 5296 * This is racy, but way better than putting module outside of 5297 * the release callback. The race window is pretty small, so 5298 * doing something similar to dm (dm-builtin.c) is overkill. 5299 */ 5300 if (need_put) 5301 module_put(THIS_MODULE); 5302 } 5303 5304 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) 5305 { 5306 struct rbd_device *rbd_dev; 5307 5308 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); 5309 if (!rbd_dev) 5310 return NULL; 5311 5312 spin_lock_init(&rbd_dev->lock); 5313 INIT_LIST_HEAD(&rbd_dev->node); 5314 init_rwsem(&rbd_dev->header_rwsem); 5315 5316 rbd_dev->header.data_pool_id = CEPH_NOPOOL; 5317 ceph_oid_init(&rbd_dev->header_oid); 5318 rbd_dev->header_oloc.pool = spec->pool_id; 5319 if (spec->pool_ns) { 5320 WARN_ON(!*spec->pool_ns); 5321 rbd_dev->header_oloc.pool_ns = 5322 ceph_find_or_create_string(spec->pool_ns, 5323 strlen(spec->pool_ns)); 5324 } 5325 5326 mutex_init(&rbd_dev->watch_mutex); 5327 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 5328 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch); 5329 5330 init_rwsem(&rbd_dev->lock_rwsem); 5331 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 5332 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock); 5333 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock); 5334 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock); 5335 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work); 5336 spin_lock_init(&rbd_dev->lock_lists_lock); 5337 INIT_LIST_HEAD(&rbd_dev->acquiring_list); 5338 INIT_LIST_HEAD(&rbd_dev->running_list); 5339 init_completion(&rbd_dev->acquire_wait); 5340 init_completion(&rbd_dev->releasing_wait); 5341 5342 spin_lock_init(&rbd_dev->object_map_lock); 5343 5344 rbd_dev->dev.bus = &rbd_bus_type; 5345 rbd_dev->dev.type = &rbd_device_type; 5346 rbd_dev->dev.parent = &rbd_root_dev; 5347 device_initialize(&rbd_dev->dev); 5348 5349 return rbd_dev; 5350 } 5351 5352 /* 5353 * Create a mapping rbd_dev. 5354 */ 5355 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, 5356 struct rbd_spec *spec, 5357 struct rbd_options *opts) 5358 { 5359 struct rbd_device *rbd_dev; 5360 5361 rbd_dev = __rbd_dev_create(spec); 5362 if (!rbd_dev) 5363 return NULL; 5364 5365 /* get an id and fill in device name */ 5366 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 5367 minor_to_rbd_dev_id(1 << MINORBITS), 5368 GFP_KERNEL); 5369 if (rbd_dev->dev_id < 0) 5370 goto fail_rbd_dev; 5371 5372 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id); 5373 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM, 5374 rbd_dev->name); 5375 if (!rbd_dev->task_wq) 5376 goto fail_dev_id; 5377 5378 /* we have a ref from do_rbd_add() */ 5379 __module_get(THIS_MODULE); 5380 5381 rbd_dev->rbd_client = rbdc; 5382 rbd_dev->spec = spec; 5383 rbd_dev->opts = opts; 5384 5385 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); 5386 return rbd_dev; 5387 5388 fail_dev_id: 5389 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5390 fail_rbd_dev: 5391 rbd_dev_free(rbd_dev); 5392 return NULL; 5393 } 5394 5395 static void rbd_dev_destroy(struct rbd_device *rbd_dev) 5396 { 5397 if (rbd_dev) 5398 put_device(&rbd_dev->dev); 5399 } 5400 5401 /* 5402 * Get the size and object order for an image snapshot, or if 5403 * snap_id is CEPH_NOSNAP, gets this information for the base 5404 * image. 5405 */ 5406 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 5407 u8 *order, u64 *snap_size) 5408 { 5409 __le64 snapid = cpu_to_le64(snap_id); 5410 int ret; 5411 struct { 5412 u8 order; 5413 __le64 size; 5414 } __attribute__ ((packed)) size_buf = { 0 }; 5415 5416 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5417 &rbd_dev->header_oloc, "get_size", 5418 &snapid, sizeof(snapid), 5419 &size_buf, sizeof(size_buf)); 5420 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5421 if (ret < 0) 5422 return ret; 5423 if (ret < sizeof (size_buf)) 5424 return -ERANGE; 5425 5426 if (order) { 5427 *order = size_buf.order; 5428 dout(" order %u", (unsigned int)*order); 5429 } 5430 *snap_size = le64_to_cpu(size_buf.size); 5431 5432 dout(" snap_id 0x%016llx snap_size = %llu\n", 5433 (unsigned long long)snap_id, 5434 (unsigned long long)*snap_size); 5435 5436 return 0; 5437 } 5438 5439 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) 5440 { 5441 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, 5442 &rbd_dev->header.obj_order, 5443 &rbd_dev->header.image_size); 5444 } 5445 5446 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) 5447 { 5448 size_t size; 5449 void *reply_buf; 5450 int ret; 5451 void *p; 5452 5453 /* Response will be an encoded string, which includes a length */ 5454 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX; 5455 reply_buf = kzalloc(size, GFP_KERNEL); 5456 if (!reply_buf) 5457 return -ENOMEM; 5458 5459 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5460 &rbd_dev->header_oloc, "get_object_prefix", 5461 NULL, 0, reply_buf, size); 5462 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5463 if (ret < 0) 5464 goto out; 5465 5466 p = reply_buf; 5467 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, 5468 p + ret, NULL, GFP_NOIO); 5469 ret = 0; 5470 5471 if (IS_ERR(rbd_dev->header.object_prefix)) { 5472 ret = PTR_ERR(rbd_dev->header.object_prefix); 5473 rbd_dev->header.object_prefix = NULL; 5474 } else { 5475 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); 5476 } 5477 out: 5478 kfree(reply_buf); 5479 5480 return ret; 5481 } 5482 5483 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 5484 bool read_only, u64 *snap_features) 5485 { 5486 struct { 5487 __le64 snap_id; 5488 u8 read_only; 5489 } features_in; 5490 struct { 5491 __le64 features; 5492 __le64 incompat; 5493 } __attribute__ ((packed)) features_buf = { 0 }; 5494 u64 unsup; 5495 int ret; 5496 5497 features_in.snap_id = cpu_to_le64(snap_id); 5498 features_in.read_only = read_only; 5499 5500 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5501 &rbd_dev->header_oloc, "get_features", 5502 &features_in, sizeof(features_in), 5503 &features_buf, sizeof(features_buf)); 5504 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5505 if (ret < 0) 5506 return ret; 5507 if (ret < sizeof (features_buf)) 5508 return -ERANGE; 5509 5510 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; 5511 if (unsup) { 5512 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", 5513 unsup); 5514 return -ENXIO; 5515 } 5516 5517 *snap_features = le64_to_cpu(features_buf.features); 5518 5519 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", 5520 (unsigned long long)snap_id, 5521 (unsigned long long)*snap_features, 5522 (unsigned long long)le64_to_cpu(features_buf.incompat)); 5523 5524 return 0; 5525 } 5526 5527 static int rbd_dev_v2_features(struct rbd_device *rbd_dev) 5528 { 5529 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, 5530 rbd_is_ro(rbd_dev), 5531 &rbd_dev->header.features); 5532 } 5533 5534 /* 5535 * These are generic image flags, but since they are used only for 5536 * object map, store them in rbd_dev->object_map_flags. 5537 * 5538 * For the same reason, this function is called only on object map 5539 * (re)load and not on header refresh. 5540 */ 5541 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev) 5542 { 5543 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 5544 __le64 flags; 5545 int ret; 5546 5547 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5548 &rbd_dev->header_oloc, "get_flags", 5549 &snapid, sizeof(snapid), 5550 &flags, sizeof(flags)); 5551 if (ret < 0) 5552 return ret; 5553 if (ret < sizeof(flags)) 5554 return -EBADMSG; 5555 5556 rbd_dev->object_map_flags = le64_to_cpu(flags); 5557 return 0; 5558 } 5559 5560 struct parent_image_info { 5561 u64 pool_id; 5562 const char *pool_ns; 5563 const char *image_id; 5564 u64 snap_id; 5565 5566 bool has_overlap; 5567 u64 overlap; 5568 }; 5569 5570 /* 5571 * The caller is responsible for @pii. 5572 */ 5573 static int decode_parent_image_spec(void **p, void *end, 5574 struct parent_image_info *pii) 5575 { 5576 u8 struct_v; 5577 u32 struct_len; 5578 int ret; 5579 5580 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", 5581 &struct_v, &struct_len); 5582 if (ret) 5583 return ret; 5584 5585 ceph_decode_64_safe(p, end, pii->pool_id, e_inval); 5586 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 5587 if (IS_ERR(pii->pool_ns)) { 5588 ret = PTR_ERR(pii->pool_ns); 5589 pii->pool_ns = NULL; 5590 return ret; 5591 } 5592 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 5593 if (IS_ERR(pii->image_id)) { 5594 ret = PTR_ERR(pii->image_id); 5595 pii->image_id = NULL; 5596 return ret; 5597 } 5598 ceph_decode_64_safe(p, end, pii->snap_id, e_inval); 5599 return 0; 5600 5601 e_inval: 5602 return -EINVAL; 5603 } 5604 5605 static int __get_parent_info(struct rbd_device *rbd_dev, 5606 struct page *req_page, 5607 struct page *reply_page, 5608 struct parent_image_info *pii) 5609 { 5610 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5611 size_t reply_len = PAGE_SIZE; 5612 void *p, *end; 5613 int ret; 5614 5615 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5616 "rbd", "parent_get", CEPH_OSD_FLAG_READ, 5617 req_page, sizeof(u64), &reply_page, &reply_len); 5618 if (ret) 5619 return ret == -EOPNOTSUPP ? 1 : ret; 5620 5621 p = page_address(reply_page); 5622 end = p + reply_len; 5623 ret = decode_parent_image_spec(&p, end, pii); 5624 if (ret) 5625 return ret; 5626 5627 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5628 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, 5629 req_page, sizeof(u64), &reply_page, &reply_len); 5630 if (ret) 5631 return ret; 5632 5633 p = page_address(reply_page); 5634 end = p + reply_len; 5635 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); 5636 if (pii->has_overlap) 5637 ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 5638 5639 return 0; 5640 5641 e_inval: 5642 return -EINVAL; 5643 } 5644 5645 /* 5646 * The caller is responsible for @pii. 5647 */ 5648 static int __get_parent_info_legacy(struct rbd_device *rbd_dev, 5649 struct page *req_page, 5650 struct page *reply_page, 5651 struct parent_image_info *pii) 5652 { 5653 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5654 size_t reply_len = PAGE_SIZE; 5655 void *p, *end; 5656 int ret; 5657 5658 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5659 "rbd", "get_parent", CEPH_OSD_FLAG_READ, 5660 req_page, sizeof(u64), &reply_page, &reply_len); 5661 if (ret) 5662 return ret; 5663 5664 p = page_address(reply_page); 5665 end = p + reply_len; 5666 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); 5667 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 5668 if (IS_ERR(pii->image_id)) { 5669 ret = PTR_ERR(pii->image_id); 5670 pii->image_id = NULL; 5671 return ret; 5672 } 5673 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); 5674 pii->has_overlap = true; 5675 ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 5676 5677 return 0; 5678 5679 e_inval: 5680 return -EINVAL; 5681 } 5682 5683 static int get_parent_info(struct rbd_device *rbd_dev, 5684 struct parent_image_info *pii) 5685 { 5686 struct page *req_page, *reply_page; 5687 void *p; 5688 int ret; 5689 5690 req_page = alloc_page(GFP_KERNEL); 5691 if (!req_page) 5692 return -ENOMEM; 5693 5694 reply_page = alloc_page(GFP_KERNEL); 5695 if (!reply_page) { 5696 __free_page(req_page); 5697 return -ENOMEM; 5698 } 5699 5700 p = page_address(req_page); 5701 ceph_encode_64(&p, rbd_dev->spec->snap_id); 5702 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); 5703 if (ret > 0) 5704 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, 5705 pii); 5706 5707 __free_page(req_page); 5708 __free_page(reply_page); 5709 return ret; 5710 } 5711 5712 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 5713 { 5714 struct rbd_spec *parent_spec; 5715 struct parent_image_info pii = { 0 }; 5716 int ret; 5717 5718 parent_spec = rbd_spec_alloc(); 5719 if (!parent_spec) 5720 return -ENOMEM; 5721 5722 ret = get_parent_info(rbd_dev, &pii); 5723 if (ret) 5724 goto out_err; 5725 5726 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", 5727 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, 5728 pii.has_overlap, pii.overlap); 5729 5730 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { 5731 /* 5732 * Either the parent never existed, or we have 5733 * record of it but the image got flattened so it no 5734 * longer has a parent. When the parent of a 5735 * layered image disappears we immediately set the 5736 * overlap to 0. The effect of this is that all new 5737 * requests will be treated as if the image had no 5738 * parent. 5739 * 5740 * If !pii.has_overlap, the parent image spec is not 5741 * applicable. It's there to avoid duplication in each 5742 * snapshot record. 5743 */ 5744 if (rbd_dev->parent_overlap) { 5745 rbd_dev->parent_overlap = 0; 5746 rbd_dev_parent_put(rbd_dev); 5747 pr_info("%s: clone image has been flattened\n", 5748 rbd_dev->disk->disk_name); 5749 } 5750 5751 goto out; /* No parent? No problem. */ 5752 } 5753 5754 /* The ceph file layout needs to fit pool id in 32 bits */ 5755 5756 ret = -EIO; 5757 if (pii.pool_id > (u64)U32_MAX) { 5758 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 5759 (unsigned long long)pii.pool_id, U32_MAX); 5760 goto out_err; 5761 } 5762 5763 /* 5764 * The parent won't change (except when the clone is 5765 * flattened, already handled that). So we only need to 5766 * record the parent spec we have not already done so. 5767 */ 5768 if (!rbd_dev->parent_spec) { 5769 parent_spec->pool_id = pii.pool_id; 5770 if (pii.pool_ns && *pii.pool_ns) { 5771 parent_spec->pool_ns = pii.pool_ns; 5772 pii.pool_ns = NULL; 5773 } 5774 parent_spec->image_id = pii.image_id; 5775 pii.image_id = NULL; 5776 parent_spec->snap_id = pii.snap_id; 5777 5778 rbd_dev->parent_spec = parent_spec; 5779 parent_spec = NULL; /* rbd_dev now owns this */ 5780 } 5781 5782 /* 5783 * We always update the parent overlap. If it's zero we issue 5784 * a warning, as we will proceed as if there was no parent. 5785 */ 5786 if (!pii.overlap) { 5787 if (parent_spec) { 5788 /* refresh, careful to warn just once */ 5789 if (rbd_dev->parent_overlap) 5790 rbd_warn(rbd_dev, 5791 "clone now standalone (overlap became 0)"); 5792 } else { 5793 /* initial probe */ 5794 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 5795 } 5796 } 5797 rbd_dev->parent_overlap = pii.overlap; 5798 5799 out: 5800 ret = 0; 5801 out_err: 5802 kfree(pii.pool_ns); 5803 kfree(pii.image_id); 5804 rbd_spec_put(parent_spec); 5805 return ret; 5806 } 5807 5808 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) 5809 { 5810 struct { 5811 __le64 stripe_unit; 5812 __le64 stripe_count; 5813 } __attribute__ ((packed)) striping_info_buf = { 0 }; 5814 size_t size = sizeof (striping_info_buf); 5815 void *p; 5816 int ret; 5817 5818 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5819 &rbd_dev->header_oloc, "get_stripe_unit_count", 5820 NULL, 0, &striping_info_buf, size); 5821 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5822 if (ret < 0) 5823 return ret; 5824 if (ret < size) 5825 return -ERANGE; 5826 5827 p = &striping_info_buf; 5828 rbd_dev->header.stripe_unit = ceph_decode_64(&p); 5829 rbd_dev->header.stripe_count = ceph_decode_64(&p); 5830 return 0; 5831 } 5832 5833 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) 5834 { 5835 __le64 data_pool_id; 5836 int ret; 5837 5838 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5839 &rbd_dev->header_oloc, "get_data_pool", 5840 NULL, 0, &data_pool_id, sizeof(data_pool_id)); 5841 if (ret < 0) 5842 return ret; 5843 if (ret < sizeof(data_pool_id)) 5844 return -EBADMSG; 5845 5846 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); 5847 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); 5848 return 0; 5849 } 5850 5851 static char *rbd_dev_image_name(struct rbd_device *rbd_dev) 5852 { 5853 CEPH_DEFINE_OID_ONSTACK(oid); 5854 size_t image_id_size; 5855 char *image_id; 5856 void *p; 5857 void *end; 5858 size_t size; 5859 void *reply_buf = NULL; 5860 size_t len = 0; 5861 char *image_name = NULL; 5862 int ret; 5863 5864 rbd_assert(!rbd_dev->spec->image_name); 5865 5866 len = strlen(rbd_dev->spec->image_id); 5867 image_id_size = sizeof (__le32) + len; 5868 image_id = kmalloc(image_id_size, GFP_KERNEL); 5869 if (!image_id) 5870 return NULL; 5871 5872 p = image_id; 5873 end = image_id + image_id_size; 5874 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); 5875 5876 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; 5877 reply_buf = kmalloc(size, GFP_KERNEL); 5878 if (!reply_buf) 5879 goto out; 5880 5881 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); 5882 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5883 "dir_get_name", image_id, image_id_size, 5884 reply_buf, size); 5885 if (ret < 0) 5886 goto out; 5887 p = reply_buf; 5888 end = reply_buf + ret; 5889 5890 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); 5891 if (IS_ERR(image_name)) 5892 image_name = NULL; 5893 else 5894 dout("%s: name is %s len is %zd\n", __func__, image_name, len); 5895 out: 5896 kfree(reply_buf); 5897 kfree(image_id); 5898 5899 return image_name; 5900 } 5901 5902 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5903 { 5904 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5905 const char *snap_name; 5906 u32 which = 0; 5907 5908 /* Skip over names until we find the one we are looking for */ 5909 5910 snap_name = rbd_dev->header.snap_names; 5911 while (which < snapc->num_snaps) { 5912 if (!strcmp(name, snap_name)) 5913 return snapc->snaps[which]; 5914 snap_name += strlen(snap_name) + 1; 5915 which++; 5916 } 5917 return CEPH_NOSNAP; 5918 } 5919 5920 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5921 { 5922 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5923 u32 which; 5924 bool found = false; 5925 u64 snap_id; 5926 5927 for (which = 0; !found && which < snapc->num_snaps; which++) { 5928 const char *snap_name; 5929 5930 snap_id = snapc->snaps[which]; 5931 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 5932 if (IS_ERR(snap_name)) { 5933 /* ignore no-longer existing snapshots */ 5934 if (PTR_ERR(snap_name) == -ENOENT) 5935 continue; 5936 else 5937 break; 5938 } 5939 found = !strcmp(name, snap_name); 5940 kfree(snap_name); 5941 } 5942 return found ? snap_id : CEPH_NOSNAP; 5943 } 5944 5945 /* 5946 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if 5947 * no snapshot by that name is found, or if an error occurs. 5948 */ 5949 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5950 { 5951 if (rbd_dev->image_format == 1) 5952 return rbd_v1_snap_id_by_name(rbd_dev, name); 5953 5954 return rbd_v2_snap_id_by_name(rbd_dev, name); 5955 } 5956 5957 /* 5958 * An image being mapped will have everything but the snap id. 5959 */ 5960 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev) 5961 { 5962 struct rbd_spec *spec = rbd_dev->spec; 5963 5964 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name); 5965 rbd_assert(spec->image_id && spec->image_name); 5966 rbd_assert(spec->snap_name); 5967 5968 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { 5969 u64 snap_id; 5970 5971 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); 5972 if (snap_id == CEPH_NOSNAP) 5973 return -ENOENT; 5974 5975 spec->snap_id = snap_id; 5976 } else { 5977 spec->snap_id = CEPH_NOSNAP; 5978 } 5979 5980 return 0; 5981 } 5982 5983 /* 5984 * A parent image will have all ids but none of the names. 5985 * 5986 * All names in an rbd spec are dynamically allocated. It's OK if we 5987 * can't figure out the name for an image id. 5988 */ 5989 static int rbd_spec_fill_names(struct rbd_device *rbd_dev) 5990 { 5991 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5992 struct rbd_spec *spec = rbd_dev->spec; 5993 const char *pool_name; 5994 const char *image_name; 5995 const char *snap_name; 5996 int ret; 5997 5998 rbd_assert(spec->pool_id != CEPH_NOPOOL); 5999 rbd_assert(spec->image_id); 6000 rbd_assert(spec->snap_id != CEPH_NOSNAP); 6001 6002 /* Get the pool name; we have to make our own copy of this */ 6003 6004 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); 6005 if (!pool_name) { 6006 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); 6007 return -EIO; 6008 } 6009 pool_name = kstrdup(pool_name, GFP_KERNEL); 6010 if (!pool_name) 6011 return -ENOMEM; 6012 6013 /* Fetch the image name; tolerate failure here */ 6014 6015 image_name = rbd_dev_image_name(rbd_dev); 6016 if (!image_name) 6017 rbd_warn(rbd_dev, "unable to get image name"); 6018 6019 /* Fetch the snapshot name */ 6020 6021 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 6022 if (IS_ERR(snap_name)) { 6023 ret = PTR_ERR(snap_name); 6024 goto out_err; 6025 } 6026 6027 spec->pool_name = pool_name; 6028 spec->image_name = image_name; 6029 spec->snap_name = snap_name; 6030 6031 return 0; 6032 6033 out_err: 6034 kfree(image_name); 6035 kfree(pool_name); 6036 return ret; 6037 } 6038 6039 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) 6040 { 6041 size_t size; 6042 int ret; 6043 void *reply_buf; 6044 void *p; 6045 void *end; 6046 u64 seq; 6047 u32 snap_count; 6048 struct ceph_snap_context *snapc; 6049 u32 i; 6050 6051 /* 6052 * We'll need room for the seq value (maximum snapshot id), 6053 * snapshot count, and array of that many snapshot ids. 6054 * For now we have a fixed upper limit on the number we're 6055 * prepared to receive. 6056 */ 6057 size = sizeof (__le64) + sizeof (__le32) + 6058 RBD_MAX_SNAP_COUNT * sizeof (__le64); 6059 reply_buf = kzalloc(size, GFP_KERNEL); 6060 if (!reply_buf) 6061 return -ENOMEM; 6062 6063 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 6064 &rbd_dev->header_oloc, "get_snapcontext", 6065 NULL, 0, reply_buf, size); 6066 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6067 if (ret < 0) 6068 goto out; 6069 6070 p = reply_buf; 6071 end = reply_buf + ret; 6072 ret = -ERANGE; 6073 ceph_decode_64_safe(&p, end, seq, out); 6074 ceph_decode_32_safe(&p, end, snap_count, out); 6075 6076 /* 6077 * Make sure the reported number of snapshot ids wouldn't go 6078 * beyond the end of our buffer. But before checking that, 6079 * make sure the computed size of the snapshot context we 6080 * allocate is representable in a size_t. 6081 */ 6082 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) 6083 / sizeof (u64)) { 6084 ret = -EINVAL; 6085 goto out; 6086 } 6087 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) 6088 goto out; 6089 ret = 0; 6090 6091 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 6092 if (!snapc) { 6093 ret = -ENOMEM; 6094 goto out; 6095 } 6096 snapc->seq = seq; 6097 for (i = 0; i < snap_count; i++) 6098 snapc->snaps[i] = ceph_decode_64(&p); 6099 6100 ceph_put_snap_context(rbd_dev->header.snapc); 6101 rbd_dev->header.snapc = snapc; 6102 6103 dout(" snap context seq = %llu, snap_count = %u\n", 6104 (unsigned long long)seq, (unsigned int)snap_count); 6105 out: 6106 kfree(reply_buf); 6107 6108 return ret; 6109 } 6110 6111 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 6112 u64 snap_id) 6113 { 6114 size_t size; 6115 void *reply_buf; 6116 __le64 snapid; 6117 int ret; 6118 void *p; 6119 void *end; 6120 char *snap_name; 6121 6122 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; 6123 reply_buf = kmalloc(size, GFP_KERNEL); 6124 if (!reply_buf) 6125 return ERR_PTR(-ENOMEM); 6126 6127 snapid = cpu_to_le64(snap_id); 6128 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 6129 &rbd_dev->header_oloc, "get_snapshot_name", 6130 &snapid, sizeof(snapid), reply_buf, size); 6131 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6132 if (ret < 0) { 6133 snap_name = ERR_PTR(ret); 6134 goto out; 6135 } 6136 6137 p = reply_buf; 6138 end = reply_buf + ret; 6139 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 6140 if (IS_ERR(snap_name)) 6141 goto out; 6142 6143 dout(" snap_id 0x%016llx snap_name = %s\n", 6144 (unsigned long long)snap_id, snap_name); 6145 out: 6146 kfree(reply_buf); 6147 6148 return snap_name; 6149 } 6150 6151 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) 6152 { 6153 bool first_time = rbd_dev->header.object_prefix == NULL; 6154 int ret; 6155 6156 ret = rbd_dev_v2_image_size(rbd_dev); 6157 if (ret) 6158 return ret; 6159 6160 if (first_time) { 6161 ret = rbd_dev_v2_header_onetime(rbd_dev); 6162 if (ret) 6163 return ret; 6164 } 6165 6166 ret = rbd_dev_v2_snap_context(rbd_dev); 6167 if (ret && first_time) { 6168 kfree(rbd_dev->header.object_prefix); 6169 rbd_dev->header.object_prefix = NULL; 6170 } 6171 6172 return ret; 6173 } 6174 6175 static int rbd_dev_header_info(struct rbd_device *rbd_dev) 6176 { 6177 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 6178 6179 if (rbd_dev->image_format == 1) 6180 return rbd_dev_v1_header_info(rbd_dev); 6181 6182 return rbd_dev_v2_header_info(rbd_dev); 6183 } 6184 6185 /* 6186 * Skips over white space at *buf, and updates *buf to point to the 6187 * first found non-space character (if any). Returns the length of 6188 * the token (string of non-white space characters) found. Note 6189 * that *buf must be terminated with '\0'. 6190 */ 6191 static inline size_t next_token(const char **buf) 6192 { 6193 /* 6194 * These are the characters that produce nonzero for 6195 * isspace() in the "C" and "POSIX" locales. 6196 */ 6197 static const char spaces[] = " \f\n\r\t\v"; 6198 6199 *buf += strspn(*buf, spaces); /* Find start of token */ 6200 6201 return strcspn(*buf, spaces); /* Return token length */ 6202 } 6203 6204 /* 6205 * Finds the next token in *buf, dynamically allocates a buffer big 6206 * enough to hold a copy of it, and copies the token into the new 6207 * buffer. The copy is guaranteed to be terminated with '\0'. Note 6208 * that a duplicate buffer is created even for a zero-length token. 6209 * 6210 * Returns a pointer to the newly-allocated duplicate, or a null 6211 * pointer if memory for the duplicate was not available. If 6212 * the lenp argument is a non-null pointer, the length of the token 6213 * (not including the '\0') is returned in *lenp. 6214 * 6215 * If successful, the *buf pointer will be updated to point beyond 6216 * the end of the found token. 6217 * 6218 * Note: uses GFP_KERNEL for allocation. 6219 */ 6220 static inline char *dup_token(const char **buf, size_t *lenp) 6221 { 6222 char *dup; 6223 size_t len; 6224 6225 len = next_token(buf); 6226 dup = kmemdup(*buf, len + 1, GFP_KERNEL); 6227 if (!dup) 6228 return NULL; 6229 *(dup + len) = '\0'; 6230 *buf += len; 6231 6232 if (lenp) 6233 *lenp = len; 6234 6235 return dup; 6236 } 6237 6238 static int rbd_parse_param(struct fs_parameter *param, 6239 struct rbd_parse_opts_ctx *pctx) 6240 { 6241 struct rbd_options *opt = pctx->opts; 6242 struct fs_parse_result result; 6243 struct p_log log = {.prefix = "rbd"}; 6244 int token, ret; 6245 6246 ret = ceph_parse_param(param, pctx->copts, NULL); 6247 if (ret != -ENOPARAM) 6248 return ret; 6249 6250 token = __fs_parse(&log, rbd_parameters, param, &result); 6251 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token); 6252 if (token < 0) { 6253 if (token == -ENOPARAM) 6254 return inval_plog(&log, "Unknown parameter '%s'", 6255 param->key); 6256 return token; 6257 } 6258 6259 switch (token) { 6260 case Opt_queue_depth: 6261 if (result.uint_32 < 1) 6262 goto out_of_range; 6263 opt->queue_depth = result.uint_32; 6264 break; 6265 case Opt_alloc_size: 6266 if (result.uint_32 < SECTOR_SIZE) 6267 goto out_of_range; 6268 if (!is_power_of_2(result.uint_32)) 6269 return inval_plog(&log, "alloc_size must be a power of 2"); 6270 opt->alloc_size = result.uint_32; 6271 break; 6272 case Opt_lock_timeout: 6273 /* 0 is "wait forever" (i.e. infinite timeout) */ 6274 if (result.uint_32 > INT_MAX / 1000) 6275 goto out_of_range; 6276 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000); 6277 break; 6278 case Opt_pool_ns: 6279 kfree(pctx->spec->pool_ns); 6280 pctx->spec->pool_ns = param->string; 6281 param->string = NULL; 6282 break; 6283 case Opt_compression_hint: 6284 switch (result.uint_32) { 6285 case Opt_compression_hint_none: 6286 opt->alloc_hint_flags &= 6287 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE | 6288 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE); 6289 break; 6290 case Opt_compression_hint_compressible: 6291 opt->alloc_hint_flags |= 6292 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE; 6293 opt->alloc_hint_flags &= 6294 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE; 6295 break; 6296 case Opt_compression_hint_incompressible: 6297 opt->alloc_hint_flags |= 6298 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE; 6299 opt->alloc_hint_flags &= 6300 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE; 6301 break; 6302 default: 6303 BUG(); 6304 } 6305 break; 6306 case Opt_read_only: 6307 opt->read_only = true; 6308 break; 6309 case Opt_read_write: 6310 opt->read_only = false; 6311 break; 6312 case Opt_lock_on_read: 6313 opt->lock_on_read = true; 6314 break; 6315 case Opt_exclusive: 6316 opt->exclusive = true; 6317 break; 6318 case Opt_notrim: 6319 opt->trim = false; 6320 break; 6321 default: 6322 BUG(); 6323 } 6324 6325 return 0; 6326 6327 out_of_range: 6328 return inval_plog(&log, "%s out of range", param->key); 6329 } 6330 6331 /* 6332 * This duplicates most of generic_parse_monolithic(), untying it from 6333 * fs_context and skipping standard superblock and security options. 6334 */ 6335 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx) 6336 { 6337 char *key; 6338 int ret = 0; 6339 6340 dout("%s '%s'\n", __func__, options); 6341 while ((key = strsep(&options, ",")) != NULL) { 6342 if (*key) { 6343 struct fs_parameter param = { 6344 .key = key, 6345 .type = fs_value_is_flag, 6346 }; 6347 char *value = strchr(key, '='); 6348 size_t v_len = 0; 6349 6350 if (value) { 6351 if (value == key) 6352 continue; 6353 *value++ = 0; 6354 v_len = strlen(value); 6355 param.string = kmemdup_nul(value, v_len, 6356 GFP_KERNEL); 6357 if (!param.string) 6358 return -ENOMEM; 6359 param.type = fs_value_is_string; 6360 } 6361 param.size = v_len; 6362 6363 ret = rbd_parse_param(¶m, pctx); 6364 kfree(param.string); 6365 if (ret) 6366 break; 6367 } 6368 } 6369 6370 return ret; 6371 } 6372 6373 /* 6374 * Parse the options provided for an "rbd add" (i.e., rbd image 6375 * mapping) request. These arrive via a write to /sys/bus/rbd/add, 6376 * and the data written is passed here via a NUL-terminated buffer. 6377 * Returns 0 if successful or an error code otherwise. 6378 * 6379 * The information extracted from these options is recorded in 6380 * the other parameters which return dynamically-allocated 6381 * structures: 6382 * ceph_opts 6383 * The address of a pointer that will refer to a ceph options 6384 * structure. Caller must release the returned pointer using 6385 * ceph_destroy_options() when it is no longer needed. 6386 * rbd_opts 6387 * Address of an rbd options pointer. Fully initialized by 6388 * this function; caller must release with kfree(). 6389 * spec 6390 * Address of an rbd image specification pointer. Fully 6391 * initialized by this function based on parsed options. 6392 * Caller must release with rbd_spec_put(). 6393 * 6394 * The options passed take this form: 6395 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>] 6396 * where: 6397 * <mon_addrs> 6398 * A comma-separated list of one or more monitor addresses. 6399 * A monitor address is an ip address, optionally followed 6400 * by a port number (separated by a colon). 6401 * I.e.: ip1[:port1][,ip2[:port2]...] 6402 * <options> 6403 * A comma-separated list of ceph and/or rbd options. 6404 * <pool_name> 6405 * The name of the rados pool containing the rbd image. 6406 * <image_name> 6407 * The name of the image in that pool to map. 6408 * <snap_id> 6409 * An optional snapshot id. If provided, the mapping will 6410 * present data from the image at the time that snapshot was 6411 * created. The image head is used if no snapshot id is 6412 * provided. Snapshot mappings are always read-only. 6413 */ 6414 static int rbd_add_parse_args(const char *buf, 6415 struct ceph_options **ceph_opts, 6416 struct rbd_options **opts, 6417 struct rbd_spec **rbd_spec) 6418 { 6419 size_t len; 6420 char *options; 6421 const char *mon_addrs; 6422 char *snap_name; 6423 size_t mon_addrs_size; 6424 struct rbd_parse_opts_ctx pctx = { 0 }; 6425 int ret; 6426 6427 /* The first four tokens are required */ 6428 6429 len = next_token(&buf); 6430 if (!len) { 6431 rbd_warn(NULL, "no monitor address(es) provided"); 6432 return -EINVAL; 6433 } 6434 mon_addrs = buf; 6435 mon_addrs_size = len; 6436 buf += len; 6437 6438 ret = -EINVAL; 6439 options = dup_token(&buf, NULL); 6440 if (!options) 6441 return -ENOMEM; 6442 if (!*options) { 6443 rbd_warn(NULL, "no options provided"); 6444 goto out_err; 6445 } 6446 6447 pctx.spec = rbd_spec_alloc(); 6448 if (!pctx.spec) 6449 goto out_mem; 6450 6451 pctx.spec->pool_name = dup_token(&buf, NULL); 6452 if (!pctx.spec->pool_name) 6453 goto out_mem; 6454 if (!*pctx.spec->pool_name) { 6455 rbd_warn(NULL, "no pool name provided"); 6456 goto out_err; 6457 } 6458 6459 pctx.spec->image_name = dup_token(&buf, NULL); 6460 if (!pctx.spec->image_name) 6461 goto out_mem; 6462 if (!*pctx.spec->image_name) { 6463 rbd_warn(NULL, "no image name provided"); 6464 goto out_err; 6465 } 6466 6467 /* 6468 * Snapshot name is optional; default is to use "-" 6469 * (indicating the head/no snapshot). 6470 */ 6471 len = next_token(&buf); 6472 if (!len) { 6473 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ 6474 len = sizeof (RBD_SNAP_HEAD_NAME) - 1; 6475 } else if (len > RBD_MAX_SNAP_NAME_LEN) { 6476 ret = -ENAMETOOLONG; 6477 goto out_err; 6478 } 6479 snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 6480 if (!snap_name) 6481 goto out_mem; 6482 *(snap_name + len) = '\0'; 6483 pctx.spec->snap_name = snap_name; 6484 6485 pctx.copts = ceph_alloc_options(); 6486 if (!pctx.copts) 6487 goto out_mem; 6488 6489 /* Initialize all rbd options to the defaults */ 6490 6491 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL); 6492 if (!pctx.opts) 6493 goto out_mem; 6494 6495 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT; 6496 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 6497 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT; 6498 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; 6499 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 6500 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 6501 pctx.opts->trim = RBD_TRIM_DEFAULT; 6502 6503 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL, 6504 ','); 6505 if (ret) 6506 goto out_err; 6507 6508 ret = rbd_parse_options(options, &pctx); 6509 if (ret) 6510 goto out_err; 6511 6512 *ceph_opts = pctx.copts; 6513 *opts = pctx.opts; 6514 *rbd_spec = pctx.spec; 6515 kfree(options); 6516 return 0; 6517 6518 out_mem: 6519 ret = -ENOMEM; 6520 out_err: 6521 kfree(pctx.opts); 6522 ceph_destroy_options(pctx.copts); 6523 rbd_spec_put(pctx.spec); 6524 kfree(options); 6525 return ret; 6526 } 6527 6528 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) 6529 { 6530 down_write(&rbd_dev->lock_rwsem); 6531 if (__rbd_is_lock_owner(rbd_dev)) 6532 __rbd_release_lock(rbd_dev); 6533 up_write(&rbd_dev->lock_rwsem); 6534 } 6535 6536 /* 6537 * If the wait is interrupted, an error is returned even if the lock 6538 * was successfully acquired. rbd_dev_image_unlock() will release it 6539 * if needed. 6540 */ 6541 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 6542 { 6543 long ret; 6544 6545 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 6546 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read) 6547 return 0; 6548 6549 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 6550 return -EINVAL; 6551 } 6552 6553 if (rbd_is_ro(rbd_dev)) 6554 return 0; 6555 6556 rbd_assert(!rbd_is_lock_owner(rbd_dev)); 6557 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 6558 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait, 6559 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout)); 6560 if (ret > 0) { 6561 ret = rbd_dev->acquire_err; 6562 } else { 6563 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 6564 if (!ret) 6565 ret = -ETIMEDOUT; 6566 } 6567 6568 if (ret) { 6569 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); 6570 return ret; 6571 } 6572 6573 /* 6574 * The lock may have been released by now, unless automatic lock 6575 * transitions are disabled. 6576 */ 6577 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev)); 6578 return 0; 6579 } 6580 6581 /* 6582 * An rbd format 2 image has a unique identifier, distinct from the 6583 * name given to it by the user. Internally, that identifier is 6584 * what's used to specify the names of objects related to the image. 6585 * 6586 * A special "rbd id" object is used to map an rbd image name to its 6587 * id. If that object doesn't exist, then there is no v2 rbd image 6588 * with the supplied name. 6589 * 6590 * This function will record the given rbd_dev's image_id field if 6591 * it can be determined, and in that case will return 0. If any 6592 * errors occur a negative errno will be returned and the rbd_dev's 6593 * image_id field will be unchanged (and should be NULL). 6594 */ 6595 static int rbd_dev_image_id(struct rbd_device *rbd_dev) 6596 { 6597 int ret; 6598 size_t size; 6599 CEPH_DEFINE_OID_ONSTACK(oid); 6600 void *response; 6601 char *image_id; 6602 6603 /* 6604 * When probing a parent image, the image id is already 6605 * known (and the image name likely is not). There's no 6606 * need to fetch the image id again in this case. We 6607 * do still need to set the image format though. 6608 */ 6609 if (rbd_dev->spec->image_id) { 6610 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; 6611 6612 return 0; 6613 } 6614 6615 /* 6616 * First, see if the format 2 image id file exists, and if 6617 * so, get the image's persistent id from it. 6618 */ 6619 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, 6620 rbd_dev->spec->image_name); 6621 if (ret) 6622 return ret; 6623 6624 dout("rbd id object name is %s\n", oid.name); 6625 6626 /* Response will be an encoded string, which includes a length */ 6627 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; 6628 response = kzalloc(size, GFP_NOIO); 6629 if (!response) { 6630 ret = -ENOMEM; 6631 goto out; 6632 } 6633 6634 /* If it doesn't exist we'll assume it's a format 1 image */ 6635 6636 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 6637 "get_id", NULL, 0, 6638 response, size); 6639 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6640 if (ret == -ENOENT) { 6641 image_id = kstrdup("", GFP_KERNEL); 6642 ret = image_id ? 0 : -ENOMEM; 6643 if (!ret) 6644 rbd_dev->image_format = 1; 6645 } else if (ret >= 0) { 6646 void *p = response; 6647 6648 image_id = ceph_extract_encoded_string(&p, p + ret, 6649 NULL, GFP_NOIO); 6650 ret = PTR_ERR_OR_ZERO(image_id); 6651 if (!ret) 6652 rbd_dev->image_format = 2; 6653 } 6654 6655 if (!ret) { 6656 rbd_dev->spec->image_id = image_id; 6657 dout("image_id is %s\n", image_id); 6658 } 6659 out: 6660 kfree(response); 6661 ceph_oid_destroy(&oid); 6662 return ret; 6663 } 6664 6665 /* 6666 * Undo whatever state changes are made by v1 or v2 header info 6667 * call. 6668 */ 6669 static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 6670 { 6671 struct rbd_image_header *header; 6672 6673 rbd_dev_parent_put(rbd_dev); 6674 rbd_object_map_free(rbd_dev); 6675 rbd_dev_mapping_clear(rbd_dev); 6676 6677 /* Free dynamic fields from the header, then zero it out */ 6678 6679 header = &rbd_dev->header; 6680 ceph_put_snap_context(header->snapc); 6681 kfree(header->snap_sizes); 6682 kfree(header->snap_names); 6683 kfree(header->object_prefix); 6684 memset(header, 0, sizeof (*header)); 6685 } 6686 6687 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) 6688 { 6689 int ret; 6690 6691 ret = rbd_dev_v2_object_prefix(rbd_dev); 6692 if (ret) 6693 goto out_err; 6694 6695 /* 6696 * Get the and check features for the image. Currently the 6697 * features are assumed to never change. 6698 */ 6699 ret = rbd_dev_v2_features(rbd_dev); 6700 if (ret) 6701 goto out_err; 6702 6703 /* If the image supports fancy striping, get its parameters */ 6704 6705 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 6706 ret = rbd_dev_v2_striping_info(rbd_dev); 6707 if (ret < 0) 6708 goto out_err; 6709 } 6710 6711 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { 6712 ret = rbd_dev_v2_data_pool(rbd_dev); 6713 if (ret) 6714 goto out_err; 6715 } 6716 6717 rbd_init_layout(rbd_dev); 6718 return 0; 6719 6720 out_err: 6721 rbd_dev->header.features = 0; 6722 kfree(rbd_dev->header.object_prefix); 6723 rbd_dev->header.object_prefix = NULL; 6724 return ret; 6725 } 6726 6727 /* 6728 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> 6729 * rbd_dev_image_probe() recursion depth, which means it's also the 6730 * length of the already discovered part of the parent chain. 6731 */ 6732 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) 6733 { 6734 struct rbd_device *parent = NULL; 6735 int ret; 6736 6737 if (!rbd_dev->parent_spec) 6738 return 0; 6739 6740 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { 6741 pr_info("parent chain is too long (%d)\n", depth); 6742 ret = -EINVAL; 6743 goto out_err; 6744 } 6745 6746 parent = __rbd_dev_create(rbd_dev->parent_spec); 6747 if (!parent) { 6748 ret = -ENOMEM; 6749 goto out_err; 6750 } 6751 6752 /* 6753 * Images related by parent/child relationships always share 6754 * rbd_client and spec/parent_spec, so bump their refcounts. 6755 */ 6756 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client); 6757 parent->spec = rbd_spec_get(rbd_dev->parent_spec); 6758 6759 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags); 6760 6761 ret = rbd_dev_image_probe(parent, depth); 6762 if (ret < 0) 6763 goto out_err; 6764 6765 rbd_dev->parent = parent; 6766 atomic_set(&rbd_dev->parent_ref, 1); 6767 return 0; 6768 6769 out_err: 6770 rbd_dev_unparent(rbd_dev); 6771 rbd_dev_destroy(parent); 6772 return ret; 6773 } 6774 6775 static void rbd_dev_device_release(struct rbd_device *rbd_dev) 6776 { 6777 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 6778 rbd_free_disk(rbd_dev); 6779 if (!single_major) 6780 unregister_blkdev(rbd_dev->major, rbd_dev->name); 6781 } 6782 6783 /* 6784 * rbd_dev->header_rwsem must be locked for write and will be unlocked 6785 * upon return. 6786 */ 6787 static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 6788 { 6789 int ret; 6790 6791 /* Record our major and minor device numbers. */ 6792 6793 if (!single_major) { 6794 ret = register_blkdev(0, rbd_dev->name); 6795 if (ret < 0) 6796 goto err_out_unlock; 6797 6798 rbd_dev->major = ret; 6799 rbd_dev->minor = 0; 6800 } else { 6801 rbd_dev->major = rbd_major; 6802 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id); 6803 } 6804 6805 /* Set up the blkdev mapping. */ 6806 6807 ret = rbd_init_disk(rbd_dev); 6808 if (ret) 6809 goto err_out_blkdev; 6810 6811 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 6812 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev)); 6813 6814 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); 6815 if (ret) 6816 goto err_out_disk; 6817 6818 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 6819 up_write(&rbd_dev->header_rwsem); 6820 return 0; 6821 6822 err_out_disk: 6823 rbd_free_disk(rbd_dev); 6824 err_out_blkdev: 6825 if (!single_major) 6826 unregister_blkdev(rbd_dev->major, rbd_dev->name); 6827 err_out_unlock: 6828 up_write(&rbd_dev->header_rwsem); 6829 return ret; 6830 } 6831 6832 static int rbd_dev_header_name(struct rbd_device *rbd_dev) 6833 { 6834 struct rbd_spec *spec = rbd_dev->spec; 6835 int ret; 6836 6837 /* Record the header object name for this rbd image. */ 6838 6839 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 6840 if (rbd_dev->image_format == 1) 6841 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 6842 spec->image_name, RBD_SUFFIX); 6843 else 6844 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 6845 RBD_HEADER_PREFIX, spec->image_id); 6846 6847 return ret; 6848 } 6849 6850 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap) 6851 { 6852 if (!is_snap) { 6853 pr_info("image %s/%s%s%s does not exist\n", 6854 rbd_dev->spec->pool_name, 6855 rbd_dev->spec->pool_ns ?: "", 6856 rbd_dev->spec->pool_ns ? "/" : "", 6857 rbd_dev->spec->image_name); 6858 } else { 6859 pr_info("snap %s/%s%s%s@%s does not exist\n", 6860 rbd_dev->spec->pool_name, 6861 rbd_dev->spec->pool_ns ?: "", 6862 rbd_dev->spec->pool_ns ? "/" : "", 6863 rbd_dev->spec->image_name, 6864 rbd_dev->spec->snap_name); 6865 } 6866 } 6867 6868 static void rbd_dev_image_release(struct rbd_device *rbd_dev) 6869 { 6870 if (!rbd_is_ro(rbd_dev)) 6871 rbd_unregister_watch(rbd_dev); 6872 6873 rbd_dev_unprobe(rbd_dev); 6874 rbd_dev->image_format = 0; 6875 kfree(rbd_dev->spec->image_id); 6876 rbd_dev->spec->image_id = NULL; 6877 } 6878 6879 /* 6880 * Probe for the existence of the header object for the given rbd 6881 * device. If this image is the one being mapped (i.e., not a 6882 * parent), initiate a watch on its header object before using that 6883 * object to get detailed information about the rbd image. 6884 * 6885 * On success, returns with header_rwsem held for write if called 6886 * with @depth == 0. 6887 */ 6888 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) 6889 { 6890 bool need_watch = !rbd_is_ro(rbd_dev); 6891 int ret; 6892 6893 /* 6894 * Get the id from the image id object. Unless there's an 6895 * error, rbd_dev->spec->image_id will be filled in with 6896 * a dynamically-allocated string, and rbd_dev->image_format 6897 * will be set to either 1 or 2. 6898 */ 6899 ret = rbd_dev_image_id(rbd_dev); 6900 if (ret) 6901 return ret; 6902 6903 ret = rbd_dev_header_name(rbd_dev); 6904 if (ret) 6905 goto err_out_format; 6906 6907 if (need_watch) { 6908 ret = rbd_register_watch(rbd_dev); 6909 if (ret) { 6910 if (ret == -ENOENT) 6911 rbd_print_dne(rbd_dev, false); 6912 goto err_out_format; 6913 } 6914 } 6915 6916 if (!depth) 6917 down_write(&rbd_dev->header_rwsem); 6918 6919 ret = rbd_dev_header_info(rbd_dev); 6920 if (ret) { 6921 if (ret == -ENOENT && !need_watch) 6922 rbd_print_dne(rbd_dev, false); 6923 goto err_out_probe; 6924 } 6925 6926 /* 6927 * If this image is the one being mapped, we have pool name and 6928 * id, image name and id, and snap name - need to fill snap id. 6929 * Otherwise this is a parent image, identified by pool, image 6930 * and snap ids - need to fill in names for those ids. 6931 */ 6932 if (!depth) 6933 ret = rbd_spec_fill_snap_id(rbd_dev); 6934 else 6935 ret = rbd_spec_fill_names(rbd_dev); 6936 if (ret) { 6937 if (ret == -ENOENT) 6938 rbd_print_dne(rbd_dev, true); 6939 goto err_out_probe; 6940 } 6941 6942 ret = rbd_dev_mapping_set(rbd_dev); 6943 if (ret) 6944 goto err_out_probe; 6945 6946 if (rbd_is_snap(rbd_dev) && 6947 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) { 6948 ret = rbd_object_map_load(rbd_dev); 6949 if (ret) 6950 goto err_out_probe; 6951 } 6952 6953 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { 6954 ret = rbd_dev_v2_parent_info(rbd_dev); 6955 if (ret) 6956 goto err_out_probe; 6957 } 6958 6959 ret = rbd_dev_probe_parent(rbd_dev, depth); 6960 if (ret) 6961 goto err_out_probe; 6962 6963 dout("discovered format %u image, header name is %s\n", 6964 rbd_dev->image_format, rbd_dev->header_oid.name); 6965 return 0; 6966 6967 err_out_probe: 6968 if (!depth) 6969 up_write(&rbd_dev->header_rwsem); 6970 if (need_watch) 6971 rbd_unregister_watch(rbd_dev); 6972 rbd_dev_unprobe(rbd_dev); 6973 err_out_format: 6974 rbd_dev->image_format = 0; 6975 kfree(rbd_dev->spec->image_id); 6976 rbd_dev->spec->image_id = NULL; 6977 return ret; 6978 } 6979 6980 static ssize_t do_rbd_add(const char *buf, size_t count) 6981 { 6982 struct rbd_device *rbd_dev = NULL; 6983 struct ceph_options *ceph_opts = NULL; 6984 struct rbd_options *rbd_opts = NULL; 6985 struct rbd_spec *spec = NULL; 6986 struct rbd_client *rbdc; 6987 int rc; 6988 6989 if (!capable(CAP_SYS_ADMIN)) 6990 return -EPERM; 6991 6992 if (!try_module_get(THIS_MODULE)) 6993 return -ENODEV; 6994 6995 /* parse add command */ 6996 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 6997 if (rc < 0) 6998 goto out; 6999 7000 rbdc = rbd_get_client(ceph_opts); 7001 if (IS_ERR(rbdc)) { 7002 rc = PTR_ERR(rbdc); 7003 goto err_out_args; 7004 } 7005 7006 /* pick the pool */ 7007 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name); 7008 if (rc < 0) { 7009 if (rc == -ENOENT) 7010 pr_info("pool %s does not exist\n", spec->pool_name); 7011 goto err_out_client; 7012 } 7013 spec->pool_id = (u64)rc; 7014 7015 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 7016 if (!rbd_dev) { 7017 rc = -ENOMEM; 7018 goto err_out_client; 7019 } 7020 rbdc = NULL; /* rbd_dev now owns this */ 7021 spec = NULL; /* rbd_dev now owns this */ 7022 rbd_opts = NULL; /* rbd_dev now owns this */ 7023 7024 /* if we are mapping a snapshot it will be a read-only mapping */ 7025 if (rbd_dev->opts->read_only || 7026 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME)) 7027 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags); 7028 7029 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL); 7030 if (!rbd_dev->config_info) { 7031 rc = -ENOMEM; 7032 goto err_out_rbd_dev; 7033 } 7034 7035 rc = rbd_dev_image_probe(rbd_dev, 0); 7036 if (rc < 0) 7037 goto err_out_rbd_dev; 7038 7039 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) { 7040 rbd_warn(rbd_dev, "alloc_size adjusted to %u", 7041 rbd_dev->layout.object_size); 7042 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size; 7043 } 7044 7045 rc = rbd_dev_device_setup(rbd_dev); 7046 if (rc) 7047 goto err_out_image_probe; 7048 7049 rc = rbd_add_acquire_lock(rbd_dev); 7050 if (rc) 7051 goto err_out_image_lock; 7052 7053 /* Everything's ready. Announce the disk to the world. */ 7054 7055 rc = device_add(&rbd_dev->dev); 7056 if (rc) 7057 goto err_out_image_lock; 7058 7059 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL); 7060 if (rc) 7061 goto err_out_cleanup_disk; 7062 7063 spin_lock(&rbd_dev_list_lock); 7064 list_add_tail(&rbd_dev->node, &rbd_dev_list); 7065 spin_unlock(&rbd_dev_list_lock); 7066 7067 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, 7068 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, 7069 rbd_dev->header.features); 7070 rc = count; 7071 out: 7072 module_put(THIS_MODULE); 7073 return rc; 7074 7075 err_out_cleanup_disk: 7076 rbd_free_disk(rbd_dev); 7077 err_out_image_lock: 7078 rbd_dev_image_unlock(rbd_dev); 7079 rbd_dev_device_release(rbd_dev); 7080 err_out_image_probe: 7081 rbd_dev_image_release(rbd_dev); 7082 err_out_rbd_dev: 7083 rbd_dev_destroy(rbd_dev); 7084 err_out_client: 7085 rbd_put_client(rbdc); 7086 err_out_args: 7087 rbd_spec_put(spec); 7088 kfree(rbd_opts); 7089 goto out; 7090 } 7091 7092 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count) 7093 { 7094 if (single_major) 7095 return -EINVAL; 7096 7097 return do_rbd_add(buf, count); 7098 } 7099 7100 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf, 7101 size_t count) 7102 { 7103 return do_rbd_add(buf, count); 7104 } 7105 7106 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) 7107 { 7108 while (rbd_dev->parent) { 7109 struct rbd_device *first = rbd_dev; 7110 struct rbd_device *second = first->parent; 7111 struct rbd_device *third; 7112 7113 /* 7114 * Follow to the parent with no grandparent and 7115 * remove it. 7116 */ 7117 while (second && (third = second->parent)) { 7118 first = second; 7119 second = third; 7120 } 7121 rbd_assert(second); 7122 rbd_dev_image_release(second); 7123 rbd_dev_destroy(second); 7124 first->parent = NULL; 7125 first->parent_overlap = 0; 7126 7127 rbd_assert(first->parent_spec); 7128 rbd_spec_put(first->parent_spec); 7129 first->parent_spec = NULL; 7130 } 7131 } 7132 7133 static ssize_t do_rbd_remove(const char *buf, size_t count) 7134 { 7135 struct rbd_device *rbd_dev = NULL; 7136 struct list_head *tmp; 7137 int dev_id; 7138 char opt_buf[6]; 7139 bool force = false; 7140 int ret; 7141 7142 if (!capable(CAP_SYS_ADMIN)) 7143 return -EPERM; 7144 7145 dev_id = -1; 7146 opt_buf[0] = '\0'; 7147 sscanf(buf, "%d %5s", &dev_id, opt_buf); 7148 if (dev_id < 0) { 7149 pr_err("dev_id out of range\n"); 7150 return -EINVAL; 7151 } 7152 if (opt_buf[0] != '\0') { 7153 if (!strcmp(opt_buf, "force")) { 7154 force = true; 7155 } else { 7156 pr_err("bad remove option at '%s'\n", opt_buf); 7157 return -EINVAL; 7158 } 7159 } 7160 7161 ret = -ENOENT; 7162 spin_lock(&rbd_dev_list_lock); 7163 list_for_each(tmp, &rbd_dev_list) { 7164 rbd_dev = list_entry(tmp, struct rbd_device, node); 7165 if (rbd_dev->dev_id == dev_id) { 7166 ret = 0; 7167 break; 7168 } 7169 } 7170 if (!ret) { 7171 spin_lock_irq(&rbd_dev->lock); 7172 if (rbd_dev->open_count && !force) 7173 ret = -EBUSY; 7174 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, 7175 &rbd_dev->flags)) 7176 ret = -EINPROGRESS; 7177 spin_unlock_irq(&rbd_dev->lock); 7178 } 7179 spin_unlock(&rbd_dev_list_lock); 7180 if (ret) 7181 return ret; 7182 7183 if (force) { 7184 /* 7185 * Prevent new IO from being queued and wait for existing 7186 * IO to complete/fail. 7187 */ 7188 blk_mq_freeze_queue(rbd_dev->disk->queue); 7189 blk_mark_disk_dead(rbd_dev->disk); 7190 } 7191 7192 del_gendisk(rbd_dev->disk); 7193 spin_lock(&rbd_dev_list_lock); 7194 list_del_init(&rbd_dev->node); 7195 spin_unlock(&rbd_dev_list_lock); 7196 device_del(&rbd_dev->dev); 7197 7198 rbd_dev_image_unlock(rbd_dev); 7199 rbd_dev_device_release(rbd_dev); 7200 rbd_dev_image_release(rbd_dev); 7201 rbd_dev_destroy(rbd_dev); 7202 return count; 7203 } 7204 7205 static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count) 7206 { 7207 if (single_major) 7208 return -EINVAL; 7209 7210 return do_rbd_remove(buf, count); 7211 } 7212 7213 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf, 7214 size_t count) 7215 { 7216 return do_rbd_remove(buf, count); 7217 } 7218 7219 /* 7220 * create control files in sysfs 7221 * /sys/bus/rbd/... 7222 */ 7223 static int __init rbd_sysfs_init(void) 7224 { 7225 int ret; 7226 7227 ret = device_register(&rbd_root_dev); 7228 if (ret < 0) { 7229 put_device(&rbd_root_dev); 7230 return ret; 7231 } 7232 7233 ret = bus_register(&rbd_bus_type); 7234 if (ret < 0) 7235 device_unregister(&rbd_root_dev); 7236 7237 return ret; 7238 } 7239 7240 static void __exit rbd_sysfs_cleanup(void) 7241 { 7242 bus_unregister(&rbd_bus_type); 7243 device_unregister(&rbd_root_dev); 7244 } 7245 7246 static int __init rbd_slab_init(void) 7247 { 7248 rbd_assert(!rbd_img_request_cache); 7249 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); 7250 if (!rbd_img_request_cache) 7251 return -ENOMEM; 7252 7253 rbd_assert(!rbd_obj_request_cache); 7254 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); 7255 if (!rbd_obj_request_cache) 7256 goto out_err; 7257 7258 return 0; 7259 7260 out_err: 7261 kmem_cache_destroy(rbd_img_request_cache); 7262 rbd_img_request_cache = NULL; 7263 return -ENOMEM; 7264 } 7265 7266 static void rbd_slab_exit(void) 7267 { 7268 rbd_assert(rbd_obj_request_cache); 7269 kmem_cache_destroy(rbd_obj_request_cache); 7270 rbd_obj_request_cache = NULL; 7271 7272 rbd_assert(rbd_img_request_cache); 7273 kmem_cache_destroy(rbd_img_request_cache); 7274 rbd_img_request_cache = NULL; 7275 } 7276 7277 static int __init rbd_init(void) 7278 { 7279 int rc; 7280 7281 if (!libceph_compatible(NULL)) { 7282 rbd_warn(NULL, "libceph incompatibility (quitting)"); 7283 return -EINVAL; 7284 } 7285 7286 rc = rbd_slab_init(); 7287 if (rc) 7288 return rc; 7289 7290 /* 7291 * The number of active work items is limited by the number of 7292 * rbd devices * queue depth, so leave @max_active at default. 7293 */ 7294 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); 7295 if (!rbd_wq) { 7296 rc = -ENOMEM; 7297 goto err_out_slab; 7298 } 7299 7300 if (single_major) { 7301 rbd_major = register_blkdev(0, RBD_DRV_NAME); 7302 if (rbd_major < 0) { 7303 rc = rbd_major; 7304 goto err_out_wq; 7305 } 7306 } 7307 7308 rc = rbd_sysfs_init(); 7309 if (rc) 7310 goto err_out_blkdev; 7311 7312 if (single_major) 7313 pr_info("loaded (major %d)\n", rbd_major); 7314 else 7315 pr_info("loaded\n"); 7316 7317 return 0; 7318 7319 err_out_blkdev: 7320 if (single_major) 7321 unregister_blkdev(rbd_major, RBD_DRV_NAME); 7322 err_out_wq: 7323 destroy_workqueue(rbd_wq); 7324 err_out_slab: 7325 rbd_slab_exit(); 7326 return rc; 7327 } 7328 7329 static void __exit rbd_exit(void) 7330 { 7331 ida_destroy(&rbd_dev_id_ida); 7332 rbd_sysfs_cleanup(); 7333 if (single_major) 7334 unregister_blkdev(rbd_major, RBD_DRV_NAME); 7335 destroy_workqueue(rbd_wq); 7336 rbd_slab_exit(); 7337 } 7338 7339 module_init(rbd_init); 7340 module_exit(rbd_exit); 7341 7342 MODULE_AUTHOR("Alex Elder <elder@inktank.com>"); 7343 MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); 7344 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); 7345 /* following authorship retained from original osdblk.c */ 7346 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); 7347 7348 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver"); 7349 MODULE_LICENSE("GPL"); 7350