1 2 /* 3 rbd.c -- Export ceph rados objects as a Linux block device 4 5 6 based on drivers/block/osdblk.c: 7 8 Copyright 2009 Red Hat, Inc. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; see the file COPYING. If not, write to 21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 23 24 25 For usage instructions, please refer to: 26 27 Documentation/ABI/testing/sysfs-bus-rbd 28 29 */ 30 31 #include <linux/ceph/libceph.h> 32 #include <linux/ceph/osd_client.h> 33 #include <linux/ceph/mon_client.h> 34 #include <linux/ceph/cls_lock_client.h> 35 #include <linux/ceph/striper.h> 36 #include <linux/ceph/decode.h> 37 #include <linux/fs_parser.h> 38 #include <linux/bsearch.h> 39 40 #include <linux/kernel.h> 41 #include <linux/device.h> 42 #include <linux/module.h> 43 #include <linux/blk-mq.h> 44 #include <linux/fs.h> 45 #include <linux/blkdev.h> 46 #include <linux/slab.h> 47 #include <linux/idr.h> 48 #include <linux/workqueue.h> 49 50 #include "rbd_types.h" 51 52 #define RBD_DEBUG /* Activate rbd_assert() calls */ 53 54 /* 55 * Increment the given counter and return its updated value. 56 * If the counter is already 0 it will not be incremented. 57 * If the counter is already at its maximum value returns 58 * -EINVAL without updating it. 59 */ 60 static int atomic_inc_return_safe(atomic_t *v) 61 { 62 unsigned int counter; 63 64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0); 65 if (counter <= (unsigned int)INT_MAX) 66 return (int)counter; 67 68 atomic_dec(v); 69 70 return -EINVAL; 71 } 72 73 /* Decrement the counter. Return the resulting value, or -EINVAL */ 74 static int atomic_dec_return_safe(atomic_t *v) 75 { 76 int counter; 77 78 counter = atomic_dec_return(v); 79 if (counter >= 0) 80 return counter; 81 82 atomic_inc(v); 83 84 return -EINVAL; 85 } 86 87 #define RBD_DRV_NAME "rbd" 88 89 #define RBD_MINORS_PER_MAJOR 256 90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 91 92 #define RBD_MAX_PARENT_CHAIN_LEN 16 93 94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_" 95 #define RBD_MAX_SNAP_NAME_LEN \ 96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) 97 98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ 99 100 #define RBD_SNAP_HEAD_NAME "-" 101 102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ 103 104 /* This allows a single page to hold an image name sent by OSD */ 105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) 106 #define RBD_IMAGE_ID_LEN_MAX 64 107 108 #define RBD_OBJ_PREFIX_LEN_MAX 64 109 110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */ 111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000) 112 113 /* Feature bits */ 114 115 #define RBD_FEATURE_LAYERING (1ULL<<0) 116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) 117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) 118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3) 119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4) 120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5) 121 #define RBD_FEATURE_DATA_POOL (1ULL<<7) 122 #define RBD_FEATURE_OPERATIONS (1ULL<<8) 123 124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ 125 RBD_FEATURE_STRIPINGV2 | \ 126 RBD_FEATURE_EXCLUSIVE_LOCK | \ 127 RBD_FEATURE_OBJECT_MAP | \ 128 RBD_FEATURE_FAST_DIFF | \ 129 RBD_FEATURE_DEEP_FLATTEN | \ 130 RBD_FEATURE_DATA_POOL | \ 131 RBD_FEATURE_OPERATIONS) 132 133 /* Features supported by this (client software) implementation. */ 134 135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) 136 137 /* 138 * An RBD device name will be "rbd#", where the "rbd" comes from 139 * RBD_DRV_NAME above, and # is a unique integer identifier. 140 */ 141 #define DEV_NAME_LEN 32 142 143 /* 144 * block device image metadata (in-memory version) 145 */ 146 struct rbd_image_header { 147 /* These six fields never change for a given rbd image */ 148 char *object_prefix; 149 __u8 obj_order; 150 u64 stripe_unit; 151 u64 stripe_count; 152 s64 data_pool_id; 153 u64 features; /* Might be changeable someday? */ 154 155 /* The remaining fields need to be updated occasionally */ 156 u64 image_size; 157 struct ceph_snap_context *snapc; 158 char *snap_names; /* format 1 only */ 159 u64 *snap_sizes; /* format 1 only */ 160 }; 161 162 /* 163 * An rbd image specification. 164 * 165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely 166 * identify an image. Each rbd_dev structure includes a pointer to 167 * an rbd_spec structure that encapsulates this identity. 168 * 169 * Each of the id's in an rbd_spec has an associated name. For a 170 * user-mapped image, the names are supplied and the id's associated 171 * with them are looked up. For a layered image, a parent image is 172 * defined by the tuple, and the names are looked up. 173 * 174 * An rbd_dev structure contains a parent_spec pointer which is 175 * non-null if the image it represents is a child in a layered 176 * image. This pointer will refer to the rbd_spec structure used 177 * by the parent rbd_dev for its own identity (i.e., the structure 178 * is shared between the parent and child). 179 * 180 * Since these structures are populated once, during the discovery 181 * phase of image construction, they are effectively immutable so 182 * we make no effort to synchronize access to them. 183 * 184 * Note that code herein does not assume the image name is known (it 185 * could be a null pointer). 186 */ 187 struct rbd_spec { 188 u64 pool_id; 189 const char *pool_name; 190 const char *pool_ns; /* NULL if default, never "" */ 191 192 const char *image_id; 193 const char *image_name; 194 195 u64 snap_id; 196 const char *snap_name; 197 198 struct kref kref; 199 }; 200 201 /* 202 * an instance of the client. multiple devices may share an rbd client. 203 */ 204 struct rbd_client { 205 struct ceph_client *client; 206 struct kref kref; 207 struct list_head node; 208 }; 209 210 struct pending_result { 211 int result; /* first nonzero result */ 212 int num_pending; 213 }; 214 215 struct rbd_img_request; 216 217 enum obj_request_type { 218 OBJ_REQUEST_NODATA = 1, 219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */ 220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */ 221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */ 222 }; 223 224 enum obj_operation_type { 225 OBJ_OP_READ = 1, 226 OBJ_OP_WRITE, 227 OBJ_OP_DISCARD, 228 OBJ_OP_ZEROOUT, 229 }; 230 231 #define RBD_OBJ_FLAG_DELETION (1U << 0) 232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1) 233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2) 234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3) 235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4) 236 237 enum rbd_obj_read_state { 238 RBD_OBJ_READ_START = 1, 239 RBD_OBJ_READ_OBJECT, 240 RBD_OBJ_READ_PARENT, 241 }; 242 243 /* 244 * Writes go through the following state machine to deal with 245 * layering: 246 * 247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . . 248 * . | . 249 * . v . 250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . . 251 * . | . . 252 * . v v (deep-copyup . 253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) . 254 * flattened) v | . . 255 * . v . . 256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup . 257 * | not needed) v 258 * v . 259 * done . . . . . . . . . . . . . . . . . . 260 * ^ 261 * | 262 * RBD_OBJ_WRITE_FLAT 263 * 264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether 265 * assert_exists guard is needed or not (in some cases it's not needed 266 * even if there is a parent). 267 */ 268 enum rbd_obj_write_state { 269 RBD_OBJ_WRITE_START = 1, 270 RBD_OBJ_WRITE_PRE_OBJECT_MAP, 271 RBD_OBJ_WRITE_OBJECT, 272 __RBD_OBJ_WRITE_COPYUP, 273 RBD_OBJ_WRITE_COPYUP, 274 RBD_OBJ_WRITE_POST_OBJECT_MAP, 275 }; 276 277 enum rbd_obj_copyup_state { 278 RBD_OBJ_COPYUP_START = 1, 279 RBD_OBJ_COPYUP_READ_PARENT, 280 __RBD_OBJ_COPYUP_OBJECT_MAPS, 281 RBD_OBJ_COPYUP_OBJECT_MAPS, 282 __RBD_OBJ_COPYUP_WRITE_OBJECT, 283 RBD_OBJ_COPYUP_WRITE_OBJECT, 284 }; 285 286 struct rbd_obj_request { 287 struct ceph_object_extent ex; 288 unsigned int flags; /* RBD_OBJ_FLAG_* */ 289 union { 290 enum rbd_obj_read_state read_state; /* for reads */ 291 enum rbd_obj_write_state write_state; /* for writes */ 292 }; 293 294 struct rbd_img_request *img_request; 295 struct ceph_file_extent *img_extents; 296 u32 num_img_extents; 297 298 union { 299 struct ceph_bio_iter bio_pos; 300 struct { 301 struct ceph_bvec_iter bvec_pos; 302 u32 bvec_count; 303 u32 bvec_idx; 304 }; 305 }; 306 307 enum rbd_obj_copyup_state copyup_state; 308 struct bio_vec *copyup_bvecs; 309 u32 copyup_bvec_count; 310 311 struct list_head osd_reqs; /* w/ r_private_item */ 312 313 struct mutex state_mutex; 314 struct pending_result pending; 315 struct kref kref; 316 }; 317 318 enum img_req_flags { 319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ 320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ 321 }; 322 323 enum rbd_img_state { 324 RBD_IMG_START = 1, 325 RBD_IMG_EXCLUSIVE_LOCK, 326 __RBD_IMG_OBJECT_REQUESTS, 327 RBD_IMG_OBJECT_REQUESTS, 328 }; 329 330 struct rbd_img_request { 331 struct rbd_device *rbd_dev; 332 enum obj_operation_type op_type; 333 enum obj_request_type data_type; 334 unsigned long flags; 335 enum rbd_img_state state; 336 union { 337 u64 snap_id; /* for reads */ 338 struct ceph_snap_context *snapc; /* for writes */ 339 }; 340 struct rbd_obj_request *obj_request; /* obj req initiator */ 341 342 struct list_head lock_item; 343 struct list_head object_extents; /* obj_req.ex structs */ 344 345 struct mutex state_mutex; 346 struct pending_result pending; 347 struct work_struct work; 348 int work_result; 349 }; 350 351 #define for_each_obj_request(ireq, oreq) \ 352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item) 353 #define for_each_obj_request_safe(ireq, oreq, n) \ 354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item) 355 356 enum rbd_watch_state { 357 RBD_WATCH_STATE_UNREGISTERED, 358 RBD_WATCH_STATE_REGISTERED, 359 RBD_WATCH_STATE_ERROR, 360 }; 361 362 enum rbd_lock_state { 363 RBD_LOCK_STATE_UNLOCKED, 364 RBD_LOCK_STATE_LOCKED, 365 RBD_LOCK_STATE_RELEASING, 366 }; 367 368 /* WatchNotify::ClientId */ 369 struct rbd_client_id { 370 u64 gid; 371 u64 handle; 372 }; 373 374 struct rbd_mapping { 375 u64 size; 376 }; 377 378 /* 379 * a single device 380 */ 381 struct rbd_device { 382 int dev_id; /* blkdev unique id */ 383 384 int major; /* blkdev assigned major */ 385 int minor; 386 struct gendisk *disk; /* blkdev's gendisk and rq */ 387 388 u32 image_format; /* Either 1 or 2 */ 389 struct rbd_client *rbd_client; 390 391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ 392 393 spinlock_t lock; /* queue, flags, open_count */ 394 395 struct rbd_image_header header; 396 unsigned long flags; /* possibly lock protected */ 397 struct rbd_spec *spec; 398 struct rbd_options *opts; 399 char *config_info; /* add{,_single_major} string */ 400 401 struct ceph_object_id header_oid; 402 struct ceph_object_locator header_oloc; 403 404 struct ceph_file_layout layout; /* used for all rbd requests */ 405 406 struct mutex watch_mutex; 407 enum rbd_watch_state watch_state; 408 struct ceph_osd_linger_request *watch_handle; 409 u64 watch_cookie; 410 struct delayed_work watch_dwork; 411 412 struct rw_semaphore lock_rwsem; 413 enum rbd_lock_state lock_state; 414 char lock_cookie[32]; 415 struct rbd_client_id owner_cid; 416 struct work_struct acquired_lock_work; 417 struct work_struct released_lock_work; 418 struct delayed_work lock_dwork; 419 struct work_struct unlock_work; 420 spinlock_t lock_lists_lock; 421 struct list_head acquiring_list; 422 struct list_head running_list; 423 struct completion acquire_wait; 424 int acquire_err; 425 struct completion releasing_wait; 426 427 spinlock_t object_map_lock; 428 u8 *object_map; 429 u64 object_map_size; /* in objects */ 430 u64 object_map_flags; 431 432 struct workqueue_struct *task_wq; 433 434 struct rbd_spec *parent_spec; 435 u64 parent_overlap; 436 atomic_t parent_ref; 437 struct rbd_device *parent; 438 439 /* Block layer tags. */ 440 struct blk_mq_tag_set tag_set; 441 442 /* protects updating the header */ 443 struct rw_semaphore header_rwsem; 444 445 struct rbd_mapping mapping; 446 447 struct list_head node; 448 449 /* sysfs related */ 450 struct device dev; 451 unsigned long open_count; /* protected by lock */ 452 }; 453 454 /* 455 * Flag bits for rbd_dev->flags: 456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected 457 * by rbd_dev->lock 458 */ 459 enum rbd_dev_flags { 460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */ 461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ 462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */ 463 }; 464 465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ 466 467 static LIST_HEAD(rbd_dev_list); /* devices */ 468 static DEFINE_SPINLOCK(rbd_dev_list_lock); 469 470 static LIST_HEAD(rbd_client_list); /* clients */ 471 static DEFINE_SPINLOCK(rbd_client_list_lock); 472 473 /* Slab caches for frequently-allocated structures */ 474 475 static struct kmem_cache *rbd_img_request_cache; 476 static struct kmem_cache *rbd_obj_request_cache; 477 478 static int rbd_major; 479 static DEFINE_IDA(rbd_dev_id_ida); 480 481 static struct workqueue_struct *rbd_wq; 482 483 static struct ceph_snap_context rbd_empty_snapc = { 484 .nref = REFCOUNT_INIT(1), 485 }; 486 487 /* 488 * single-major requires >= 0.75 version of userspace rbd utility. 489 */ 490 static bool single_major = true; 491 module_param(single_major, bool, 0444); 492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); 493 494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count); 495 static ssize_t remove_store(struct bus_type *bus, const char *buf, 496 size_t count); 497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf, 498 size_t count); 499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf, 500 size_t count); 501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 502 503 static int rbd_dev_id_to_minor(int dev_id) 504 { 505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT; 506 } 507 508 static int minor_to_rbd_dev_id(int minor) 509 { 510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT; 511 } 512 513 static bool rbd_is_ro(struct rbd_device *rbd_dev) 514 { 515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags); 516 } 517 518 static bool rbd_is_snap(struct rbd_device *rbd_dev) 519 { 520 return rbd_dev->spec->snap_id != CEPH_NOSNAP; 521 } 522 523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) 524 { 525 lockdep_assert_held(&rbd_dev->lock_rwsem); 526 527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || 528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; 529 } 530 531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) 532 { 533 bool is_lock_owner; 534 535 down_read(&rbd_dev->lock_rwsem); 536 is_lock_owner = __rbd_is_lock_owner(rbd_dev); 537 up_read(&rbd_dev->lock_rwsem); 538 return is_lock_owner; 539 } 540 541 static ssize_t supported_features_show(struct bus_type *bus, char *buf) 542 { 543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); 544 } 545 546 static BUS_ATTR_WO(add); 547 static BUS_ATTR_WO(remove); 548 static BUS_ATTR_WO(add_single_major); 549 static BUS_ATTR_WO(remove_single_major); 550 static BUS_ATTR_RO(supported_features); 551 552 static struct attribute *rbd_bus_attrs[] = { 553 &bus_attr_add.attr, 554 &bus_attr_remove.attr, 555 &bus_attr_add_single_major.attr, 556 &bus_attr_remove_single_major.attr, 557 &bus_attr_supported_features.attr, 558 NULL, 559 }; 560 561 static umode_t rbd_bus_is_visible(struct kobject *kobj, 562 struct attribute *attr, int index) 563 { 564 if (!single_major && 565 (attr == &bus_attr_add_single_major.attr || 566 attr == &bus_attr_remove_single_major.attr)) 567 return 0; 568 569 return attr->mode; 570 } 571 572 static const struct attribute_group rbd_bus_group = { 573 .attrs = rbd_bus_attrs, 574 .is_visible = rbd_bus_is_visible, 575 }; 576 __ATTRIBUTE_GROUPS(rbd_bus); 577 578 static struct bus_type rbd_bus_type = { 579 .name = "rbd", 580 .bus_groups = rbd_bus_groups, 581 }; 582 583 static void rbd_root_dev_release(struct device *dev) 584 { 585 } 586 587 static struct device rbd_root_dev = { 588 .init_name = "rbd", 589 .release = rbd_root_dev_release, 590 }; 591 592 static __printf(2, 3) 593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) 594 { 595 struct va_format vaf; 596 va_list args; 597 598 va_start(args, fmt); 599 vaf.fmt = fmt; 600 vaf.va = &args; 601 602 if (!rbd_dev) 603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf); 604 else if (rbd_dev->disk) 605 printk(KERN_WARNING "%s: %s: %pV\n", 606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf); 607 else if (rbd_dev->spec && rbd_dev->spec->image_name) 608 printk(KERN_WARNING "%s: image %s: %pV\n", 609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf); 610 else if (rbd_dev->spec && rbd_dev->spec->image_id) 611 printk(KERN_WARNING "%s: id %s: %pV\n", 612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf); 613 else /* punt */ 614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n", 615 RBD_DRV_NAME, rbd_dev, &vaf); 616 va_end(args); 617 } 618 619 #ifdef RBD_DEBUG 620 #define rbd_assert(expr) \ 621 if (unlikely(!(expr))) { \ 622 printk(KERN_ERR "\nAssertion failure in %s() " \ 623 "at line %d:\n\n" \ 624 "\trbd_assert(%s);\n\n", \ 625 __func__, __LINE__, #expr); \ 626 BUG(); \ 627 } 628 #else /* !RBD_DEBUG */ 629 # define rbd_assert(expr) ((void) 0) 630 #endif /* !RBD_DEBUG */ 631 632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 633 634 static int rbd_dev_refresh(struct rbd_device *rbd_dev); 635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); 636 static int rbd_dev_header_info(struct rbd_device *rbd_dev); 637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); 638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 639 u64 snap_id); 640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 641 u8 *order, u64 *snap_size); 642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev); 643 644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result); 645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result); 646 647 /* 648 * Return true if nothing else is pending. 649 */ 650 static bool pending_result_dec(struct pending_result *pending, int *result) 651 { 652 rbd_assert(pending->num_pending > 0); 653 654 if (*result && !pending->result) 655 pending->result = *result; 656 if (--pending->num_pending) 657 return false; 658 659 *result = pending->result; 660 return true; 661 } 662 663 static int rbd_open(struct block_device *bdev, fmode_t mode) 664 { 665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 666 bool removing = false; 667 668 spin_lock_irq(&rbd_dev->lock); 669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) 670 removing = true; 671 else 672 rbd_dev->open_count++; 673 spin_unlock_irq(&rbd_dev->lock); 674 if (removing) 675 return -ENOENT; 676 677 (void) get_device(&rbd_dev->dev); 678 679 return 0; 680 } 681 682 static void rbd_release(struct gendisk *disk, fmode_t mode) 683 { 684 struct rbd_device *rbd_dev = disk->private_data; 685 unsigned long open_count_before; 686 687 spin_lock_irq(&rbd_dev->lock); 688 open_count_before = rbd_dev->open_count--; 689 spin_unlock_irq(&rbd_dev->lock); 690 rbd_assert(open_count_before > 0); 691 692 put_device(&rbd_dev->dev); 693 } 694 695 static const struct block_device_operations rbd_bd_ops = { 696 .owner = THIS_MODULE, 697 .open = rbd_open, 698 .release = rbd_release, 699 }; 700 701 /* 702 * Initialize an rbd client instance. Success or not, this function 703 * consumes ceph_opts. Caller holds client_mutex. 704 */ 705 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) 706 { 707 struct rbd_client *rbdc; 708 int ret = -ENOMEM; 709 710 dout("%s:\n", __func__); 711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); 712 if (!rbdc) 713 goto out_opt; 714 715 kref_init(&rbdc->kref); 716 INIT_LIST_HEAD(&rbdc->node); 717 718 rbdc->client = ceph_create_client(ceph_opts, rbdc); 719 if (IS_ERR(rbdc->client)) 720 goto out_rbdc; 721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ 722 723 ret = ceph_open_session(rbdc->client); 724 if (ret < 0) 725 goto out_client; 726 727 spin_lock(&rbd_client_list_lock); 728 list_add_tail(&rbdc->node, &rbd_client_list); 729 spin_unlock(&rbd_client_list_lock); 730 731 dout("%s: rbdc %p\n", __func__, rbdc); 732 733 return rbdc; 734 out_client: 735 ceph_destroy_client(rbdc->client); 736 out_rbdc: 737 kfree(rbdc); 738 out_opt: 739 if (ceph_opts) 740 ceph_destroy_options(ceph_opts); 741 dout("%s: error %d\n", __func__, ret); 742 743 return ERR_PTR(ret); 744 } 745 746 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) 747 { 748 kref_get(&rbdc->kref); 749 750 return rbdc; 751 } 752 753 /* 754 * Find a ceph client with specific addr and configuration. If 755 * found, bump its reference count. 756 */ 757 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) 758 { 759 struct rbd_client *rbdc = NULL, *iter; 760 761 if (ceph_opts->flags & CEPH_OPT_NOSHARE) 762 return NULL; 763 764 spin_lock(&rbd_client_list_lock); 765 list_for_each_entry(iter, &rbd_client_list, node) { 766 if (!ceph_compare_options(ceph_opts, iter->client)) { 767 __rbd_get_client(iter); 768 769 rbdc = iter; 770 break; 771 } 772 } 773 spin_unlock(&rbd_client_list_lock); 774 775 return rbdc; 776 } 777 778 /* 779 * (Per device) rbd map options 780 */ 781 enum { 782 Opt_queue_depth, 783 Opt_alloc_size, 784 Opt_lock_timeout, 785 /* int args above */ 786 Opt_pool_ns, 787 Opt_compression_hint, 788 /* string args above */ 789 Opt_read_only, 790 Opt_read_write, 791 Opt_lock_on_read, 792 Opt_exclusive, 793 Opt_notrim, 794 }; 795 796 enum { 797 Opt_compression_hint_none, 798 Opt_compression_hint_compressible, 799 Opt_compression_hint_incompressible, 800 }; 801 802 static const struct constant_table rbd_param_compression_hint[] = { 803 {"none", Opt_compression_hint_none}, 804 {"compressible", Opt_compression_hint_compressible}, 805 {"incompressible", Opt_compression_hint_incompressible}, 806 {} 807 }; 808 809 static const struct fs_parameter_spec rbd_parameters[] = { 810 fsparam_u32 ("alloc_size", Opt_alloc_size), 811 fsparam_enum ("compression_hint", Opt_compression_hint, 812 rbd_param_compression_hint), 813 fsparam_flag ("exclusive", Opt_exclusive), 814 fsparam_flag ("lock_on_read", Opt_lock_on_read), 815 fsparam_u32 ("lock_timeout", Opt_lock_timeout), 816 fsparam_flag ("notrim", Opt_notrim), 817 fsparam_string ("_pool_ns", Opt_pool_ns), 818 fsparam_u32 ("queue_depth", Opt_queue_depth), 819 fsparam_flag ("read_only", Opt_read_only), 820 fsparam_flag ("read_write", Opt_read_write), 821 fsparam_flag ("ro", Opt_read_only), 822 fsparam_flag ("rw", Opt_read_write), 823 {} 824 }; 825 826 struct rbd_options { 827 int queue_depth; 828 int alloc_size; 829 unsigned long lock_timeout; 830 bool read_only; 831 bool lock_on_read; 832 bool exclusive; 833 bool trim; 834 835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ 836 }; 837 838 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ 839 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) 840 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ 841 #define RBD_READ_ONLY_DEFAULT false 842 #define RBD_LOCK_ON_READ_DEFAULT false 843 #define RBD_EXCLUSIVE_DEFAULT false 844 #define RBD_TRIM_DEFAULT true 845 846 struct rbd_parse_opts_ctx { 847 struct rbd_spec *spec; 848 struct ceph_options *copts; 849 struct rbd_options *opts; 850 }; 851 852 static char* obj_op_name(enum obj_operation_type op_type) 853 { 854 switch (op_type) { 855 case OBJ_OP_READ: 856 return "read"; 857 case OBJ_OP_WRITE: 858 return "write"; 859 case OBJ_OP_DISCARD: 860 return "discard"; 861 case OBJ_OP_ZEROOUT: 862 return "zeroout"; 863 default: 864 return "???"; 865 } 866 } 867 868 /* 869 * Destroy ceph client 870 * 871 * Caller must hold rbd_client_list_lock. 872 */ 873 static void rbd_client_release(struct kref *kref) 874 { 875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); 876 877 dout("%s: rbdc %p\n", __func__, rbdc); 878 spin_lock(&rbd_client_list_lock); 879 list_del(&rbdc->node); 880 spin_unlock(&rbd_client_list_lock); 881 882 ceph_destroy_client(rbdc->client); 883 kfree(rbdc); 884 } 885 886 /* 887 * Drop reference to ceph client node. If it's not referenced anymore, release 888 * it. 889 */ 890 static void rbd_put_client(struct rbd_client *rbdc) 891 { 892 if (rbdc) 893 kref_put(&rbdc->kref, rbd_client_release); 894 } 895 896 /* 897 * Get a ceph client with specific addr and configuration, if one does 898 * not exist create it. Either way, ceph_opts is consumed by this 899 * function. 900 */ 901 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) 902 { 903 struct rbd_client *rbdc; 904 int ret; 905 906 mutex_lock(&client_mutex); 907 rbdc = rbd_client_find(ceph_opts); 908 if (rbdc) { 909 ceph_destroy_options(ceph_opts); 910 911 /* 912 * Using an existing client. Make sure ->pg_pools is up to 913 * date before we look up the pool id in do_rbd_add(). 914 */ 915 ret = ceph_wait_for_latest_osdmap(rbdc->client, 916 rbdc->client->options->mount_timeout); 917 if (ret) { 918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret); 919 rbd_put_client(rbdc); 920 rbdc = ERR_PTR(ret); 921 } 922 } else { 923 rbdc = rbd_client_create(ceph_opts); 924 } 925 mutex_unlock(&client_mutex); 926 927 return rbdc; 928 } 929 930 static bool rbd_image_format_valid(u32 image_format) 931 { 932 return image_format == 1 || image_format == 2; 933 } 934 935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) 936 { 937 size_t size; 938 u32 snap_count; 939 940 /* The header has to start with the magic rbd header text */ 941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) 942 return false; 943 944 /* The bio layer requires at least sector-sized I/O */ 945 946 if (ondisk->options.order < SECTOR_SHIFT) 947 return false; 948 949 /* If we use u64 in a few spots we may be able to loosen this */ 950 951 if (ondisk->options.order > 8 * sizeof (int) - 1) 952 return false; 953 954 /* 955 * The size of a snapshot header has to fit in a size_t, and 956 * that limits the number of snapshots. 957 */ 958 snap_count = le32_to_cpu(ondisk->snap_count); 959 size = SIZE_MAX - sizeof (struct ceph_snap_context); 960 if (snap_count > size / sizeof (__le64)) 961 return false; 962 963 /* 964 * Not only that, but the size of the entire the snapshot 965 * header must also be representable in a size_t. 966 */ 967 size -= snap_count * sizeof (__le64); 968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) 969 return false; 970 971 return true; 972 } 973 974 /* 975 * returns the size of an object in the image 976 */ 977 static u32 rbd_obj_bytes(struct rbd_image_header *header) 978 { 979 return 1U << header->obj_order; 980 } 981 982 static void rbd_init_layout(struct rbd_device *rbd_dev) 983 { 984 if (rbd_dev->header.stripe_unit == 0 || 985 rbd_dev->header.stripe_count == 0) { 986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header); 987 rbd_dev->header.stripe_count = 1; 988 } 989 990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit; 991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count; 992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header); 993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ? 994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id; 995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); 996 } 997 998 /* 999 * Fill an rbd image header with information from the given format 1 1000 * on-disk header. 1001 */ 1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev, 1003 struct rbd_image_header_ondisk *ondisk) 1004 { 1005 struct rbd_image_header *header = &rbd_dev->header; 1006 bool first_time = header->object_prefix == NULL; 1007 struct ceph_snap_context *snapc; 1008 char *object_prefix = NULL; 1009 char *snap_names = NULL; 1010 u64 *snap_sizes = NULL; 1011 u32 snap_count; 1012 int ret = -ENOMEM; 1013 u32 i; 1014 1015 /* Allocate this now to avoid having to handle failure below */ 1016 1017 if (first_time) { 1018 object_prefix = kstrndup(ondisk->object_prefix, 1019 sizeof(ondisk->object_prefix), 1020 GFP_KERNEL); 1021 if (!object_prefix) 1022 return -ENOMEM; 1023 } 1024 1025 /* Allocate the snapshot context and fill it in */ 1026 1027 snap_count = le32_to_cpu(ondisk->snap_count); 1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 1029 if (!snapc) 1030 goto out_err; 1031 snapc->seq = le64_to_cpu(ondisk->snap_seq); 1032 if (snap_count) { 1033 struct rbd_image_snap_ondisk *snaps; 1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 1035 1036 /* We'll keep a copy of the snapshot names... */ 1037 1038 if (snap_names_len > (u64)SIZE_MAX) 1039 goto out_2big; 1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL); 1041 if (!snap_names) 1042 goto out_err; 1043 1044 /* ...as well as the array of their sizes. */ 1045 snap_sizes = kmalloc_array(snap_count, 1046 sizeof(*header->snap_sizes), 1047 GFP_KERNEL); 1048 if (!snap_sizes) 1049 goto out_err; 1050 1051 /* 1052 * Copy the names, and fill in each snapshot's id 1053 * and size. 1054 * 1055 * Note that rbd_dev_v1_header_info() guarantees the 1056 * ondisk buffer we're working with has 1057 * snap_names_len bytes beyond the end of the 1058 * snapshot id array, this memcpy() is safe. 1059 */ 1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); 1061 snaps = ondisk->snaps; 1062 for (i = 0; i < snap_count; i++) { 1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id); 1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size); 1065 } 1066 } 1067 1068 /* We won't fail any more, fill in the header */ 1069 1070 if (first_time) { 1071 header->object_prefix = object_prefix; 1072 header->obj_order = ondisk->options.order; 1073 rbd_init_layout(rbd_dev); 1074 } else { 1075 ceph_put_snap_context(header->snapc); 1076 kfree(header->snap_names); 1077 kfree(header->snap_sizes); 1078 } 1079 1080 /* The remaining fields always get updated (when we refresh) */ 1081 1082 header->image_size = le64_to_cpu(ondisk->image_size); 1083 header->snapc = snapc; 1084 header->snap_names = snap_names; 1085 header->snap_sizes = snap_sizes; 1086 1087 return 0; 1088 out_2big: 1089 ret = -EIO; 1090 out_err: 1091 kfree(snap_sizes); 1092 kfree(snap_names); 1093 ceph_put_snap_context(snapc); 1094 kfree(object_prefix); 1095 1096 return ret; 1097 } 1098 1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 1100 { 1101 const char *snap_name; 1102 1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps); 1104 1105 /* Skip over names until we find the one we are looking for */ 1106 1107 snap_name = rbd_dev->header.snap_names; 1108 while (which--) 1109 snap_name += strlen(snap_name) + 1; 1110 1111 return kstrdup(snap_name, GFP_KERNEL); 1112 } 1113 1114 /* 1115 * Snapshot id comparison function for use with qsort()/bsearch(). 1116 * Note that result is for snapshots in *descending* order. 1117 */ 1118 static int snapid_compare_reverse(const void *s1, const void *s2) 1119 { 1120 u64 snap_id1 = *(u64 *)s1; 1121 u64 snap_id2 = *(u64 *)s2; 1122 1123 if (snap_id1 < snap_id2) 1124 return 1; 1125 return snap_id1 == snap_id2 ? 0 : -1; 1126 } 1127 1128 /* 1129 * Search a snapshot context to see if the given snapshot id is 1130 * present. 1131 * 1132 * Returns the position of the snapshot id in the array if it's found, 1133 * or BAD_SNAP_INDEX otherwise. 1134 * 1135 * Note: The snapshot array is in kept sorted (by the osd) in 1136 * reverse order, highest snapshot id first. 1137 */ 1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) 1139 { 1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 1141 u64 *found; 1142 1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, 1144 sizeof (snap_id), snapid_compare_reverse); 1145 1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; 1147 } 1148 1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, 1150 u64 snap_id) 1151 { 1152 u32 which; 1153 const char *snap_name; 1154 1155 which = rbd_dev_snap_index(rbd_dev, snap_id); 1156 if (which == BAD_SNAP_INDEX) 1157 return ERR_PTR(-ENOENT); 1158 1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); 1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM); 1161 } 1162 1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 1164 { 1165 if (snap_id == CEPH_NOSNAP) 1166 return RBD_SNAP_HEAD_NAME; 1167 1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1169 if (rbd_dev->image_format == 1) 1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id); 1171 1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id); 1173 } 1174 1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 1176 u64 *snap_size) 1177 { 1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 1179 if (snap_id == CEPH_NOSNAP) { 1180 *snap_size = rbd_dev->header.image_size; 1181 } else if (rbd_dev->image_format == 1) { 1182 u32 which; 1183 1184 which = rbd_dev_snap_index(rbd_dev, snap_id); 1185 if (which == BAD_SNAP_INDEX) 1186 return -ENOENT; 1187 1188 *snap_size = rbd_dev->header.snap_sizes[which]; 1189 } else { 1190 u64 size = 0; 1191 int ret; 1192 1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); 1194 if (ret) 1195 return ret; 1196 1197 *snap_size = size; 1198 } 1199 return 0; 1200 } 1201 1202 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1203 { 1204 u64 snap_id = rbd_dev->spec->snap_id; 1205 u64 size = 0; 1206 int ret; 1207 1208 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1209 if (ret) 1210 return ret; 1211 1212 rbd_dev->mapping.size = size; 1213 return 0; 1214 } 1215 1216 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) 1217 { 1218 rbd_dev->mapping.size = 0; 1219 } 1220 1221 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes) 1222 { 1223 struct ceph_bio_iter it = *bio_pos; 1224 1225 ceph_bio_iter_advance(&it, off); 1226 ceph_bio_iter_advance_step(&it, bytes, ({ 1227 memzero_bvec(&bv); 1228 })); 1229 } 1230 1231 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes) 1232 { 1233 struct ceph_bvec_iter it = *bvec_pos; 1234 1235 ceph_bvec_iter_advance(&it, off); 1236 ceph_bvec_iter_advance_step(&it, bytes, ({ 1237 memzero_bvec(&bv); 1238 })); 1239 } 1240 1241 /* 1242 * Zero a range in @obj_req data buffer defined by a bio (list) or 1243 * (private) bio_vec array. 1244 * 1245 * @off is relative to the start of the data buffer. 1246 */ 1247 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off, 1248 u32 bytes) 1249 { 1250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes); 1251 1252 switch (obj_req->img_request->data_type) { 1253 case OBJ_REQUEST_BIO: 1254 zero_bios(&obj_req->bio_pos, off, bytes); 1255 break; 1256 case OBJ_REQUEST_BVECS: 1257 case OBJ_REQUEST_OWN_BVECS: 1258 zero_bvecs(&obj_req->bvec_pos, off, bytes); 1259 break; 1260 default: 1261 BUG(); 1262 } 1263 } 1264 1265 static void rbd_obj_request_destroy(struct kref *kref); 1266 static void rbd_obj_request_put(struct rbd_obj_request *obj_request) 1267 { 1268 rbd_assert(obj_request != NULL); 1269 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1270 kref_read(&obj_request->kref)); 1271 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1272 } 1273 1274 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1275 struct rbd_obj_request *obj_request) 1276 { 1277 rbd_assert(obj_request->img_request == NULL); 1278 1279 /* Image request now owns object's original reference */ 1280 obj_request->img_request = img_request; 1281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 1282 } 1283 1284 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, 1285 struct rbd_obj_request *obj_request) 1286 { 1287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request); 1288 list_del(&obj_request->ex.oe_item); 1289 rbd_assert(obj_request->img_request == img_request); 1290 rbd_obj_request_put(obj_request); 1291 } 1292 1293 static void rbd_osd_submit(struct ceph_osd_request *osd_req) 1294 { 1295 struct rbd_obj_request *obj_req = osd_req->r_priv; 1296 1297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n", 1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno, 1299 obj_req->ex.oe_off, obj_req->ex.oe_len); 1300 ceph_osdc_start_request(osd_req->r_osdc, osd_req); 1301 } 1302 1303 /* 1304 * The default/initial value for all image request flags is 0. Each 1305 * is conditionally set to 1 at image request initialization time 1306 * and currently never change thereafter. 1307 */ 1308 static void img_request_layered_set(struct rbd_img_request *img_request) 1309 { 1310 set_bit(IMG_REQ_LAYERED, &img_request->flags); 1311 } 1312 1313 static bool img_request_layered_test(struct rbd_img_request *img_request) 1314 { 1315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; 1316 } 1317 1318 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req) 1319 { 1320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1321 1322 return !obj_req->ex.oe_off && 1323 obj_req->ex.oe_len == rbd_dev->layout.object_size; 1324 } 1325 1326 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req) 1327 { 1328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1329 1330 return obj_req->ex.oe_off + obj_req->ex.oe_len == 1331 rbd_dev->layout.object_size; 1332 } 1333 1334 /* 1335 * Must be called after rbd_obj_calc_img_extents(). 1336 */ 1337 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) 1338 { 1339 if (!obj_req->num_img_extents || 1340 (rbd_obj_is_entire(obj_req) && 1341 !obj_req->img_request->snapc->num_snaps)) 1342 return false; 1343 1344 return true; 1345 } 1346 1347 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) 1348 { 1349 return ceph_file_extents_bytes(obj_req->img_extents, 1350 obj_req->num_img_extents); 1351 } 1352 1353 static bool rbd_img_is_write(struct rbd_img_request *img_req) 1354 { 1355 switch (img_req->op_type) { 1356 case OBJ_OP_READ: 1357 return false; 1358 case OBJ_OP_WRITE: 1359 case OBJ_OP_DISCARD: 1360 case OBJ_OP_ZEROOUT: 1361 return true; 1362 default: 1363 BUG(); 1364 } 1365 } 1366 1367 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) 1368 { 1369 struct rbd_obj_request *obj_req = osd_req->r_priv; 1370 int result; 1371 1372 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req, 1373 osd_req->r_result, obj_req); 1374 1375 /* 1376 * Writes aren't allowed to return a data payload. In some 1377 * guarded write cases (e.g. stat + zero on an empty object) 1378 * a stat response makes it through, but we don't care. 1379 */ 1380 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request)) 1381 result = 0; 1382 else 1383 result = osd_req->r_result; 1384 1385 rbd_obj_handle_request(obj_req, result); 1386 } 1387 1388 static void rbd_osd_format_read(struct ceph_osd_request *osd_req) 1389 { 1390 struct rbd_obj_request *obj_request = osd_req->r_priv; 1391 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; 1392 struct ceph_options *opt = rbd_dev->rbd_client->client->options; 1393 1394 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica; 1395 osd_req->r_snapid = obj_request->img_request->snap_id; 1396 } 1397 1398 static void rbd_osd_format_write(struct ceph_osd_request *osd_req) 1399 { 1400 struct rbd_obj_request *obj_request = osd_req->r_priv; 1401 1402 osd_req->r_flags = CEPH_OSD_FLAG_WRITE; 1403 ktime_get_real_ts64(&osd_req->r_mtime); 1404 osd_req->r_data_offset = obj_request->ex.oe_off; 1405 } 1406 1407 static struct ceph_osd_request * 1408 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, 1409 struct ceph_snap_context *snapc, int num_ops) 1410 { 1411 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1412 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1413 struct ceph_osd_request *req; 1414 const char *name_format = rbd_dev->image_format == 1 ? 1415 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; 1416 int ret; 1417 1418 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); 1419 if (!req) 1420 return ERR_PTR(-ENOMEM); 1421 1422 list_add_tail(&req->r_private_item, &obj_req->osd_reqs); 1423 req->r_callback = rbd_osd_req_callback; 1424 req->r_priv = obj_req; 1425 1426 /* 1427 * Data objects may be stored in a separate pool, but always in 1428 * the same namespace in that pool as the header in its pool. 1429 */ 1430 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc); 1431 req->r_base_oloc.pool = rbd_dev->layout.pool_id; 1432 1433 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format, 1434 rbd_dev->header.object_prefix, 1435 obj_req->ex.oe_objno); 1436 if (ret) 1437 return ERR_PTR(ret); 1438 1439 return req; 1440 } 1441 1442 static struct ceph_osd_request * 1443 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) 1444 { 1445 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, 1446 num_ops); 1447 } 1448 1449 static struct rbd_obj_request *rbd_obj_request_create(void) 1450 { 1451 struct rbd_obj_request *obj_request; 1452 1453 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); 1454 if (!obj_request) 1455 return NULL; 1456 1457 ceph_object_extent_init(&obj_request->ex); 1458 INIT_LIST_HEAD(&obj_request->osd_reqs); 1459 mutex_init(&obj_request->state_mutex); 1460 kref_init(&obj_request->kref); 1461 1462 dout("%s %p\n", __func__, obj_request); 1463 return obj_request; 1464 } 1465 1466 static void rbd_obj_request_destroy(struct kref *kref) 1467 { 1468 struct rbd_obj_request *obj_request; 1469 struct ceph_osd_request *osd_req; 1470 u32 i; 1471 1472 obj_request = container_of(kref, struct rbd_obj_request, kref); 1473 1474 dout("%s: obj %p\n", __func__, obj_request); 1475 1476 while (!list_empty(&obj_request->osd_reqs)) { 1477 osd_req = list_first_entry(&obj_request->osd_reqs, 1478 struct ceph_osd_request, r_private_item); 1479 list_del_init(&osd_req->r_private_item); 1480 ceph_osdc_put_request(osd_req); 1481 } 1482 1483 switch (obj_request->img_request->data_type) { 1484 case OBJ_REQUEST_NODATA: 1485 case OBJ_REQUEST_BIO: 1486 case OBJ_REQUEST_BVECS: 1487 break; /* Nothing to do */ 1488 case OBJ_REQUEST_OWN_BVECS: 1489 kfree(obj_request->bvec_pos.bvecs); 1490 break; 1491 default: 1492 BUG(); 1493 } 1494 1495 kfree(obj_request->img_extents); 1496 if (obj_request->copyup_bvecs) { 1497 for (i = 0; i < obj_request->copyup_bvec_count; i++) { 1498 if (obj_request->copyup_bvecs[i].bv_page) 1499 __free_page(obj_request->copyup_bvecs[i].bv_page); 1500 } 1501 kfree(obj_request->copyup_bvecs); 1502 } 1503 1504 kmem_cache_free(rbd_obj_request_cache, obj_request); 1505 } 1506 1507 /* It's OK to call this for a device with no parent */ 1508 1509 static void rbd_spec_put(struct rbd_spec *spec); 1510 static void rbd_dev_unparent(struct rbd_device *rbd_dev) 1511 { 1512 rbd_dev_remove_parent(rbd_dev); 1513 rbd_spec_put(rbd_dev->parent_spec); 1514 rbd_dev->parent_spec = NULL; 1515 rbd_dev->parent_overlap = 0; 1516 } 1517 1518 /* 1519 * Parent image reference counting is used to determine when an 1520 * image's parent fields can be safely torn down--after there are no 1521 * more in-flight requests to the parent image. When the last 1522 * reference is dropped, cleaning them up is safe. 1523 */ 1524 static void rbd_dev_parent_put(struct rbd_device *rbd_dev) 1525 { 1526 int counter; 1527 1528 if (!rbd_dev->parent_spec) 1529 return; 1530 1531 counter = atomic_dec_return_safe(&rbd_dev->parent_ref); 1532 if (counter > 0) 1533 return; 1534 1535 /* Last reference; clean up parent data structures */ 1536 1537 if (!counter) 1538 rbd_dev_unparent(rbd_dev); 1539 else 1540 rbd_warn(rbd_dev, "parent reference underflow"); 1541 } 1542 1543 /* 1544 * If an image has a non-zero parent overlap, get a reference to its 1545 * parent. 1546 * 1547 * Returns true if the rbd device has a parent with a non-zero 1548 * overlap and a reference for it was successfully taken, or 1549 * false otherwise. 1550 */ 1551 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) 1552 { 1553 int counter = 0; 1554 1555 if (!rbd_dev->parent_spec) 1556 return false; 1557 1558 if (rbd_dev->parent_overlap) 1559 counter = atomic_inc_return_safe(&rbd_dev->parent_ref); 1560 1561 if (counter < 0) 1562 rbd_warn(rbd_dev, "parent reference overflow"); 1563 1564 return counter > 0; 1565 } 1566 1567 static void rbd_img_request_init(struct rbd_img_request *img_request, 1568 struct rbd_device *rbd_dev, 1569 enum obj_operation_type op_type) 1570 { 1571 memset(img_request, 0, sizeof(*img_request)); 1572 1573 img_request->rbd_dev = rbd_dev; 1574 img_request->op_type = op_type; 1575 1576 INIT_LIST_HEAD(&img_request->lock_item); 1577 INIT_LIST_HEAD(&img_request->object_extents); 1578 mutex_init(&img_request->state_mutex); 1579 } 1580 1581 static void rbd_img_capture_header(struct rbd_img_request *img_req) 1582 { 1583 struct rbd_device *rbd_dev = img_req->rbd_dev; 1584 1585 lockdep_assert_held(&rbd_dev->header_rwsem); 1586 1587 if (rbd_img_is_write(img_req)) 1588 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); 1589 else 1590 img_req->snap_id = rbd_dev->spec->snap_id; 1591 1592 if (rbd_dev_parent_get(rbd_dev)) 1593 img_request_layered_set(img_req); 1594 } 1595 1596 static void rbd_img_request_destroy(struct rbd_img_request *img_request) 1597 { 1598 struct rbd_obj_request *obj_request; 1599 struct rbd_obj_request *next_obj_request; 1600 1601 dout("%s: img %p\n", __func__, img_request); 1602 1603 WARN_ON(!list_empty(&img_request->lock_item)); 1604 for_each_obj_request_safe(img_request, obj_request, next_obj_request) 1605 rbd_img_obj_request_del(img_request, obj_request); 1606 1607 if (img_request_layered_test(img_request)) 1608 rbd_dev_parent_put(img_request->rbd_dev); 1609 1610 if (rbd_img_is_write(img_request)) 1611 ceph_put_snap_context(img_request->snapc); 1612 1613 if (test_bit(IMG_REQ_CHILD, &img_request->flags)) 1614 kmem_cache_free(rbd_img_request_cache, img_request); 1615 } 1616 1617 #define BITS_PER_OBJ 2 1618 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ) 1619 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1) 1620 1621 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno, 1622 u64 *index, u8 *shift) 1623 { 1624 u32 off; 1625 1626 rbd_assert(objno < rbd_dev->object_map_size); 1627 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off); 1628 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ; 1629 } 1630 1631 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno) 1632 { 1633 u64 index; 1634 u8 shift; 1635 1636 lockdep_assert_held(&rbd_dev->object_map_lock); 1637 __rbd_object_map_index(rbd_dev, objno, &index, &shift); 1638 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK; 1639 } 1640 1641 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val) 1642 { 1643 u64 index; 1644 u8 shift; 1645 u8 *p; 1646 1647 lockdep_assert_held(&rbd_dev->object_map_lock); 1648 rbd_assert(!(val & ~OBJ_MASK)); 1649 1650 __rbd_object_map_index(rbd_dev, objno, &index, &shift); 1651 p = &rbd_dev->object_map[index]; 1652 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift); 1653 } 1654 1655 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno) 1656 { 1657 u8 state; 1658 1659 spin_lock(&rbd_dev->object_map_lock); 1660 state = __rbd_object_map_get(rbd_dev, objno); 1661 spin_unlock(&rbd_dev->object_map_lock); 1662 return state; 1663 } 1664 1665 static bool use_object_map(struct rbd_device *rbd_dev) 1666 { 1667 /* 1668 * An image mapped read-only can't use the object map -- it isn't 1669 * loaded because the header lock isn't acquired. Someone else can 1670 * write to the image and update the object map behind our back. 1671 * 1672 * A snapshot can't be written to, so using the object map is always 1673 * safe. 1674 */ 1675 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev)) 1676 return false; 1677 1678 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) && 1679 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)); 1680 } 1681 1682 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno) 1683 { 1684 u8 state; 1685 1686 /* fall back to default logic if object map is disabled or invalid */ 1687 if (!use_object_map(rbd_dev)) 1688 return true; 1689 1690 state = rbd_object_map_get(rbd_dev, objno); 1691 return state != OBJECT_NONEXISTENT; 1692 } 1693 1694 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id, 1695 struct ceph_object_id *oid) 1696 { 1697 if (snap_id == CEPH_NOSNAP) 1698 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX, 1699 rbd_dev->spec->image_id); 1700 else 1701 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX, 1702 rbd_dev->spec->image_id, snap_id); 1703 } 1704 1705 static int rbd_object_map_lock(struct rbd_device *rbd_dev) 1706 { 1707 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1708 CEPH_DEFINE_OID_ONSTACK(oid); 1709 u8 lock_type; 1710 char *lock_tag; 1711 struct ceph_locker *lockers; 1712 u32 num_lockers; 1713 bool broke_lock = false; 1714 int ret; 1715 1716 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid); 1717 1718 again: 1719 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, 1720 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0); 1721 if (ret != -EBUSY || broke_lock) { 1722 if (ret == -EEXIST) 1723 ret = 0; /* already locked by myself */ 1724 if (ret) 1725 rbd_warn(rbd_dev, "failed to lock object map: %d", ret); 1726 return ret; 1727 } 1728 1729 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc, 1730 RBD_LOCK_NAME, &lock_type, &lock_tag, 1731 &lockers, &num_lockers); 1732 if (ret) { 1733 if (ret == -ENOENT) 1734 goto again; 1735 1736 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret); 1737 return ret; 1738 } 1739 1740 kfree(lock_tag); 1741 if (num_lockers == 0) 1742 goto again; 1743 1744 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu", 1745 ENTITY_NAME(lockers[0].id.name)); 1746 1747 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc, 1748 RBD_LOCK_NAME, lockers[0].id.cookie, 1749 &lockers[0].id.name); 1750 ceph_free_lockers(lockers, num_lockers); 1751 if (ret) { 1752 if (ret == -ENOENT) 1753 goto again; 1754 1755 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret); 1756 return ret; 1757 } 1758 1759 broke_lock = true; 1760 goto again; 1761 } 1762 1763 static void rbd_object_map_unlock(struct rbd_device *rbd_dev) 1764 { 1765 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1766 CEPH_DEFINE_OID_ONSTACK(oid); 1767 int ret; 1768 1769 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid); 1770 1771 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, 1772 ""); 1773 if (ret && ret != -ENOENT) 1774 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret); 1775 } 1776 1777 static int decode_object_map_header(void **p, void *end, u64 *object_map_size) 1778 { 1779 u8 struct_v; 1780 u32 struct_len; 1781 u32 header_len; 1782 void *header_end; 1783 int ret; 1784 1785 ceph_decode_32_safe(p, end, header_len, e_inval); 1786 header_end = *p + header_len; 1787 1788 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v, 1789 &struct_len); 1790 if (ret) 1791 return ret; 1792 1793 ceph_decode_64_safe(p, end, *object_map_size, e_inval); 1794 1795 *p = header_end; 1796 return 0; 1797 1798 e_inval: 1799 return -EINVAL; 1800 } 1801 1802 static int __rbd_object_map_load(struct rbd_device *rbd_dev) 1803 { 1804 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1805 CEPH_DEFINE_OID_ONSTACK(oid); 1806 struct page **pages; 1807 void *p, *end; 1808 size_t reply_len; 1809 u64 num_objects; 1810 u64 object_map_bytes; 1811 u64 object_map_size; 1812 int num_pages; 1813 int ret; 1814 1815 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size); 1816 1817 num_objects = ceph_get_num_objects(&rbd_dev->layout, 1818 rbd_dev->mapping.size); 1819 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ, 1820 BITS_PER_BYTE); 1821 num_pages = calc_pages_for(0, object_map_bytes) + 1; 1822 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 1823 if (IS_ERR(pages)) 1824 return PTR_ERR(pages); 1825 1826 reply_len = num_pages * PAGE_SIZE; 1827 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid); 1828 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc, 1829 "rbd", "object_map_load", CEPH_OSD_FLAG_READ, 1830 NULL, 0, pages, &reply_len); 1831 if (ret) 1832 goto out; 1833 1834 p = page_address(pages[0]); 1835 end = p + min(reply_len, (size_t)PAGE_SIZE); 1836 ret = decode_object_map_header(&p, end, &object_map_size); 1837 if (ret) 1838 goto out; 1839 1840 if (object_map_size != num_objects) { 1841 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu", 1842 object_map_size, num_objects); 1843 ret = -EINVAL; 1844 goto out; 1845 } 1846 1847 if (offset_in_page(p) + object_map_bytes > reply_len) { 1848 ret = -EINVAL; 1849 goto out; 1850 } 1851 1852 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL); 1853 if (!rbd_dev->object_map) { 1854 ret = -ENOMEM; 1855 goto out; 1856 } 1857 1858 rbd_dev->object_map_size = object_map_size; 1859 ceph_copy_from_page_vector(pages, rbd_dev->object_map, 1860 offset_in_page(p), object_map_bytes); 1861 1862 out: 1863 ceph_release_page_vector(pages, num_pages); 1864 return ret; 1865 } 1866 1867 static void rbd_object_map_free(struct rbd_device *rbd_dev) 1868 { 1869 kvfree(rbd_dev->object_map); 1870 rbd_dev->object_map = NULL; 1871 rbd_dev->object_map_size = 0; 1872 } 1873 1874 static int rbd_object_map_load(struct rbd_device *rbd_dev) 1875 { 1876 int ret; 1877 1878 ret = __rbd_object_map_load(rbd_dev); 1879 if (ret) 1880 return ret; 1881 1882 ret = rbd_dev_v2_get_flags(rbd_dev); 1883 if (ret) { 1884 rbd_object_map_free(rbd_dev); 1885 return ret; 1886 } 1887 1888 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID) 1889 rbd_warn(rbd_dev, "object map is invalid"); 1890 1891 return 0; 1892 } 1893 1894 static int rbd_object_map_open(struct rbd_device *rbd_dev) 1895 { 1896 int ret; 1897 1898 ret = rbd_object_map_lock(rbd_dev); 1899 if (ret) 1900 return ret; 1901 1902 ret = rbd_object_map_load(rbd_dev); 1903 if (ret) { 1904 rbd_object_map_unlock(rbd_dev); 1905 return ret; 1906 } 1907 1908 return 0; 1909 } 1910 1911 static void rbd_object_map_close(struct rbd_device *rbd_dev) 1912 { 1913 rbd_object_map_free(rbd_dev); 1914 rbd_object_map_unlock(rbd_dev); 1915 } 1916 1917 /* 1918 * This function needs snap_id (or more precisely just something to 1919 * distinguish between HEAD and snapshot object maps), new_state and 1920 * current_state that were passed to rbd_object_map_update(). 1921 * 1922 * To avoid allocating and stashing a context we piggyback on the OSD 1923 * request. A HEAD update has two ops (assert_locked). For new_state 1924 * and current_state we decode our own object_map_update op, encoded in 1925 * rbd_cls_object_map_update(). 1926 */ 1927 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req, 1928 struct ceph_osd_request *osd_req) 1929 { 1930 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 1931 struct ceph_osd_data *osd_data; 1932 u64 objno; 1933 u8 state, new_state, current_state; 1934 bool has_current_state; 1935 void *p; 1936 1937 if (osd_req->r_result) 1938 return osd_req->r_result; 1939 1940 /* 1941 * Nothing to do for a snapshot object map. 1942 */ 1943 if (osd_req->r_num_ops == 1) 1944 return 0; 1945 1946 /* 1947 * Update in-memory HEAD object map. 1948 */ 1949 rbd_assert(osd_req->r_num_ops == 2); 1950 osd_data = osd_req_op_data(osd_req, 1, cls, request_data); 1951 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES); 1952 1953 p = page_address(osd_data->pages[0]); 1954 objno = ceph_decode_64(&p); 1955 rbd_assert(objno == obj_req->ex.oe_objno); 1956 rbd_assert(ceph_decode_64(&p) == objno + 1); 1957 new_state = ceph_decode_8(&p); 1958 has_current_state = ceph_decode_8(&p); 1959 if (has_current_state) 1960 current_state = ceph_decode_8(&p); 1961 1962 spin_lock(&rbd_dev->object_map_lock); 1963 state = __rbd_object_map_get(rbd_dev, objno); 1964 if (!has_current_state || current_state == state || 1965 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) 1966 __rbd_object_map_set(rbd_dev, objno, new_state); 1967 spin_unlock(&rbd_dev->object_map_lock); 1968 1969 return 0; 1970 } 1971 1972 static void rbd_object_map_callback(struct ceph_osd_request *osd_req) 1973 { 1974 struct rbd_obj_request *obj_req = osd_req->r_priv; 1975 int result; 1976 1977 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req, 1978 osd_req->r_result, obj_req); 1979 1980 result = rbd_object_map_update_finish(obj_req, osd_req); 1981 rbd_obj_handle_request(obj_req, result); 1982 } 1983 1984 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state) 1985 { 1986 u8 state = rbd_object_map_get(rbd_dev, objno); 1987 1988 if (state == new_state || 1989 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) || 1990 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING)) 1991 return false; 1992 1993 return true; 1994 } 1995 1996 static int rbd_cls_object_map_update(struct ceph_osd_request *req, 1997 int which, u64 objno, u8 new_state, 1998 const u8 *current_state) 1999 { 2000 struct page **pages; 2001 void *p, *start; 2002 int ret; 2003 2004 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update"); 2005 if (ret) 2006 return ret; 2007 2008 pages = ceph_alloc_page_vector(1, GFP_NOIO); 2009 if (IS_ERR(pages)) 2010 return PTR_ERR(pages); 2011 2012 p = start = page_address(pages[0]); 2013 ceph_encode_64(&p, objno); 2014 ceph_encode_64(&p, objno + 1); 2015 ceph_encode_8(&p, new_state); 2016 if (current_state) { 2017 ceph_encode_8(&p, 1); 2018 ceph_encode_8(&p, *current_state); 2019 } else { 2020 ceph_encode_8(&p, 0); 2021 } 2022 2023 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0, 2024 false, true); 2025 return 0; 2026 } 2027 2028 /* 2029 * Return: 2030 * 0 - object map update sent 2031 * 1 - object map update isn't needed 2032 * <0 - error 2033 */ 2034 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id, 2035 u8 new_state, const u8 *current_state) 2036 { 2037 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2038 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2039 struct ceph_osd_request *req; 2040 int num_ops = 1; 2041 int which = 0; 2042 int ret; 2043 2044 if (snap_id == CEPH_NOSNAP) { 2045 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state)) 2046 return 1; 2047 2048 num_ops++; /* assert_locked */ 2049 } 2050 2051 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO); 2052 if (!req) 2053 return -ENOMEM; 2054 2055 list_add_tail(&req->r_private_item, &obj_req->osd_reqs); 2056 req->r_callback = rbd_object_map_callback; 2057 req->r_priv = obj_req; 2058 2059 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid); 2060 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc); 2061 req->r_flags = CEPH_OSD_FLAG_WRITE; 2062 ktime_get_real_ts64(&req->r_mtime); 2063 2064 if (snap_id == CEPH_NOSNAP) { 2065 /* 2066 * Protect against possible race conditions during lock 2067 * ownership transitions. 2068 */ 2069 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME, 2070 CEPH_CLS_LOCK_EXCLUSIVE, "", ""); 2071 if (ret) 2072 return ret; 2073 } 2074 2075 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno, 2076 new_state, current_state); 2077 if (ret) 2078 return ret; 2079 2080 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 2081 if (ret) 2082 return ret; 2083 2084 ceph_osdc_start_request(osdc, req); 2085 return 0; 2086 } 2087 2088 static void prune_extents(struct ceph_file_extent *img_extents, 2089 u32 *num_img_extents, u64 overlap) 2090 { 2091 u32 cnt = *num_img_extents; 2092 2093 /* drop extents completely beyond the overlap */ 2094 while (cnt && img_extents[cnt - 1].fe_off >= overlap) 2095 cnt--; 2096 2097 if (cnt) { 2098 struct ceph_file_extent *ex = &img_extents[cnt - 1]; 2099 2100 /* trim final overlapping extent */ 2101 if (ex->fe_off + ex->fe_len > overlap) 2102 ex->fe_len = overlap - ex->fe_off; 2103 } 2104 2105 *num_img_extents = cnt; 2106 } 2107 2108 /* 2109 * Determine the byte range(s) covered by either just the object extent 2110 * or the entire object in the parent image. 2111 */ 2112 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req, 2113 bool entire) 2114 { 2115 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2116 int ret; 2117 2118 if (!rbd_dev->parent_overlap) 2119 return 0; 2120 2121 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno, 2122 entire ? 0 : obj_req->ex.oe_off, 2123 entire ? rbd_dev->layout.object_size : 2124 obj_req->ex.oe_len, 2125 &obj_req->img_extents, 2126 &obj_req->num_img_extents); 2127 if (ret) 2128 return ret; 2129 2130 prune_extents(obj_req->img_extents, &obj_req->num_img_extents, 2131 rbd_dev->parent_overlap); 2132 return 0; 2133 } 2134 2135 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which) 2136 { 2137 struct rbd_obj_request *obj_req = osd_req->r_priv; 2138 2139 switch (obj_req->img_request->data_type) { 2140 case OBJ_REQUEST_BIO: 2141 osd_req_op_extent_osd_data_bio(osd_req, which, 2142 &obj_req->bio_pos, 2143 obj_req->ex.oe_len); 2144 break; 2145 case OBJ_REQUEST_BVECS: 2146 case OBJ_REQUEST_OWN_BVECS: 2147 rbd_assert(obj_req->bvec_pos.iter.bi_size == 2148 obj_req->ex.oe_len); 2149 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count); 2150 osd_req_op_extent_osd_data_bvec_pos(osd_req, which, 2151 &obj_req->bvec_pos); 2152 break; 2153 default: 2154 BUG(); 2155 } 2156 } 2157 2158 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which) 2159 { 2160 struct page **pages; 2161 2162 /* 2163 * The response data for a STAT call consists of: 2164 * le64 length; 2165 * struct { 2166 * le32 tv_sec; 2167 * le32 tv_nsec; 2168 * } mtime; 2169 */ 2170 pages = ceph_alloc_page_vector(1, GFP_NOIO); 2171 if (IS_ERR(pages)) 2172 return PTR_ERR(pages); 2173 2174 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0); 2175 osd_req_op_raw_data_in_pages(osd_req, which, pages, 2176 8 + sizeof(struct ceph_timespec), 2177 0, false, true); 2178 return 0; 2179 } 2180 2181 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which, 2182 u32 bytes) 2183 { 2184 struct rbd_obj_request *obj_req = osd_req->r_priv; 2185 int ret; 2186 2187 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup"); 2188 if (ret) 2189 return ret; 2190 2191 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs, 2192 obj_req->copyup_bvec_count, bytes); 2193 return 0; 2194 } 2195 2196 static int rbd_obj_init_read(struct rbd_obj_request *obj_req) 2197 { 2198 obj_req->read_state = RBD_OBJ_READ_START; 2199 return 0; 2200 } 2201 2202 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req, 2203 int which) 2204 { 2205 struct rbd_obj_request *obj_req = osd_req->r_priv; 2206 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2207 u16 opcode; 2208 2209 if (!use_object_map(rbd_dev) || 2210 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) { 2211 osd_req_op_alloc_hint_init(osd_req, which++, 2212 rbd_dev->layout.object_size, 2213 rbd_dev->layout.object_size, 2214 rbd_dev->opts->alloc_hint_flags); 2215 } 2216 2217 if (rbd_obj_is_entire(obj_req)) 2218 opcode = CEPH_OSD_OP_WRITEFULL; 2219 else 2220 opcode = CEPH_OSD_OP_WRITE; 2221 2222 osd_req_op_extent_init(osd_req, which, opcode, 2223 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0); 2224 rbd_osd_setup_data(osd_req, which); 2225 } 2226 2227 static int rbd_obj_init_write(struct rbd_obj_request *obj_req) 2228 { 2229 int ret; 2230 2231 /* reverse map the entire object onto the parent */ 2232 ret = rbd_obj_calc_img_extents(obj_req, true); 2233 if (ret) 2234 return ret; 2235 2236 if (rbd_obj_copyup_enabled(obj_req)) 2237 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; 2238 2239 obj_req->write_state = RBD_OBJ_WRITE_START; 2240 return 0; 2241 } 2242 2243 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req) 2244 { 2245 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE : 2246 CEPH_OSD_OP_ZERO; 2247 } 2248 2249 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req, 2250 int which) 2251 { 2252 struct rbd_obj_request *obj_req = osd_req->r_priv; 2253 2254 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) { 2255 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION); 2256 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0); 2257 } else { 2258 osd_req_op_extent_init(osd_req, which, 2259 truncate_or_zero_opcode(obj_req), 2260 obj_req->ex.oe_off, obj_req->ex.oe_len, 2261 0, 0); 2262 } 2263 } 2264 2265 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req) 2266 { 2267 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2268 u64 off, next_off; 2269 int ret; 2270 2271 /* 2272 * Align the range to alloc_size boundary and punt on discards 2273 * that are too small to free up any space. 2274 * 2275 * alloc_size == object_size && is_tail() is a special case for 2276 * filestore with filestore_punch_hole = false, needed to allow 2277 * truncate (in addition to delete). 2278 */ 2279 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size || 2280 !rbd_obj_is_tail(obj_req)) { 2281 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size); 2282 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len, 2283 rbd_dev->opts->alloc_size); 2284 if (off >= next_off) 2285 return 1; 2286 2287 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__, 2288 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len, 2289 off, next_off - off); 2290 obj_req->ex.oe_off = off; 2291 obj_req->ex.oe_len = next_off - off; 2292 } 2293 2294 /* reverse map the entire object onto the parent */ 2295 ret = rbd_obj_calc_img_extents(obj_req, true); 2296 if (ret) 2297 return ret; 2298 2299 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; 2300 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) 2301 obj_req->flags |= RBD_OBJ_FLAG_DELETION; 2302 2303 obj_req->write_state = RBD_OBJ_WRITE_START; 2304 return 0; 2305 } 2306 2307 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req, 2308 int which) 2309 { 2310 struct rbd_obj_request *obj_req = osd_req->r_priv; 2311 u16 opcode; 2312 2313 if (rbd_obj_is_entire(obj_req)) { 2314 if (obj_req->num_img_extents) { 2315 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)) 2316 osd_req_op_init(osd_req, which++, 2317 CEPH_OSD_OP_CREATE, 0); 2318 opcode = CEPH_OSD_OP_TRUNCATE; 2319 } else { 2320 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION); 2321 osd_req_op_init(osd_req, which++, 2322 CEPH_OSD_OP_DELETE, 0); 2323 opcode = 0; 2324 } 2325 } else { 2326 opcode = truncate_or_zero_opcode(obj_req); 2327 } 2328 2329 if (opcode) 2330 osd_req_op_extent_init(osd_req, which, opcode, 2331 obj_req->ex.oe_off, obj_req->ex.oe_len, 2332 0, 0); 2333 } 2334 2335 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req) 2336 { 2337 int ret; 2338 2339 /* reverse map the entire object onto the parent */ 2340 ret = rbd_obj_calc_img_extents(obj_req, true); 2341 if (ret) 2342 return ret; 2343 2344 if (rbd_obj_copyup_enabled(obj_req)) 2345 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; 2346 if (!obj_req->num_img_extents) { 2347 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; 2348 if (rbd_obj_is_entire(obj_req)) 2349 obj_req->flags |= RBD_OBJ_FLAG_DELETION; 2350 } 2351 2352 obj_req->write_state = RBD_OBJ_WRITE_START; 2353 return 0; 2354 } 2355 2356 static int count_write_ops(struct rbd_obj_request *obj_req) 2357 { 2358 struct rbd_img_request *img_req = obj_req->img_request; 2359 2360 switch (img_req->op_type) { 2361 case OBJ_OP_WRITE: 2362 if (!use_object_map(img_req->rbd_dev) || 2363 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) 2364 return 2; /* setallochint + write/writefull */ 2365 2366 return 1; /* write/writefull */ 2367 case OBJ_OP_DISCARD: 2368 return 1; /* delete/truncate/zero */ 2369 case OBJ_OP_ZEROOUT: 2370 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents && 2371 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)) 2372 return 2; /* create + truncate */ 2373 2374 return 1; /* delete/truncate/zero */ 2375 default: 2376 BUG(); 2377 } 2378 } 2379 2380 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req, 2381 int which) 2382 { 2383 struct rbd_obj_request *obj_req = osd_req->r_priv; 2384 2385 switch (obj_req->img_request->op_type) { 2386 case OBJ_OP_WRITE: 2387 __rbd_osd_setup_write_ops(osd_req, which); 2388 break; 2389 case OBJ_OP_DISCARD: 2390 __rbd_osd_setup_discard_ops(osd_req, which); 2391 break; 2392 case OBJ_OP_ZEROOUT: 2393 __rbd_osd_setup_zeroout_ops(osd_req, which); 2394 break; 2395 default: 2396 BUG(); 2397 } 2398 } 2399 2400 /* 2401 * Prune the list of object requests (adjust offset and/or length, drop 2402 * redundant requests). Prepare object request state machines and image 2403 * request state machine for execution. 2404 */ 2405 static int __rbd_img_fill_request(struct rbd_img_request *img_req) 2406 { 2407 struct rbd_obj_request *obj_req, *next_obj_req; 2408 int ret; 2409 2410 for_each_obj_request_safe(img_req, obj_req, next_obj_req) { 2411 switch (img_req->op_type) { 2412 case OBJ_OP_READ: 2413 ret = rbd_obj_init_read(obj_req); 2414 break; 2415 case OBJ_OP_WRITE: 2416 ret = rbd_obj_init_write(obj_req); 2417 break; 2418 case OBJ_OP_DISCARD: 2419 ret = rbd_obj_init_discard(obj_req); 2420 break; 2421 case OBJ_OP_ZEROOUT: 2422 ret = rbd_obj_init_zeroout(obj_req); 2423 break; 2424 default: 2425 BUG(); 2426 } 2427 if (ret < 0) 2428 return ret; 2429 if (ret > 0) { 2430 rbd_img_obj_request_del(img_req, obj_req); 2431 continue; 2432 } 2433 } 2434 2435 img_req->state = RBD_IMG_START; 2436 return 0; 2437 } 2438 2439 union rbd_img_fill_iter { 2440 struct ceph_bio_iter bio_iter; 2441 struct ceph_bvec_iter bvec_iter; 2442 }; 2443 2444 struct rbd_img_fill_ctx { 2445 enum obj_request_type pos_type; 2446 union rbd_img_fill_iter *pos; 2447 union rbd_img_fill_iter iter; 2448 ceph_object_extent_fn_t set_pos_fn; 2449 ceph_object_extent_fn_t count_fn; 2450 ceph_object_extent_fn_t copy_fn; 2451 }; 2452 2453 static struct ceph_object_extent *alloc_object_extent(void *arg) 2454 { 2455 struct rbd_img_request *img_req = arg; 2456 struct rbd_obj_request *obj_req; 2457 2458 obj_req = rbd_obj_request_create(); 2459 if (!obj_req) 2460 return NULL; 2461 2462 rbd_img_obj_request_add(img_req, obj_req); 2463 return &obj_req->ex; 2464 } 2465 2466 /* 2467 * While su != os && sc == 1 is technically not fancy (it's the same 2468 * layout as su == os && sc == 1), we can't use the nocopy path for it 2469 * because ->set_pos_fn() should be called only once per object. 2470 * ceph_file_to_extents() invokes action_fn once per stripe unit, so 2471 * treat su != os && sc == 1 as fancy. 2472 */ 2473 static bool rbd_layout_is_fancy(struct ceph_file_layout *l) 2474 { 2475 return l->stripe_unit != l->object_size; 2476 } 2477 2478 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req, 2479 struct ceph_file_extent *img_extents, 2480 u32 num_img_extents, 2481 struct rbd_img_fill_ctx *fctx) 2482 { 2483 u32 i; 2484 int ret; 2485 2486 img_req->data_type = fctx->pos_type; 2487 2488 /* 2489 * Create object requests and set each object request's starting 2490 * position in the provided bio (list) or bio_vec array. 2491 */ 2492 fctx->iter = *fctx->pos; 2493 for (i = 0; i < num_img_extents; i++) { 2494 ret = ceph_file_to_extents(&img_req->rbd_dev->layout, 2495 img_extents[i].fe_off, 2496 img_extents[i].fe_len, 2497 &img_req->object_extents, 2498 alloc_object_extent, img_req, 2499 fctx->set_pos_fn, &fctx->iter); 2500 if (ret) 2501 return ret; 2502 } 2503 2504 return __rbd_img_fill_request(img_req); 2505 } 2506 2507 /* 2508 * Map a list of image extents to a list of object extents, create the 2509 * corresponding object requests (normally each to a different object, 2510 * but not always) and add them to @img_req. For each object request, 2511 * set up its data descriptor to point to the corresponding chunk(s) of 2512 * @fctx->pos data buffer. 2513 * 2514 * Because ceph_file_to_extents() will merge adjacent object extents 2515 * together, each object request's data descriptor may point to multiple 2516 * different chunks of @fctx->pos data buffer. 2517 * 2518 * @fctx->pos data buffer is assumed to be large enough. 2519 */ 2520 static int rbd_img_fill_request(struct rbd_img_request *img_req, 2521 struct ceph_file_extent *img_extents, 2522 u32 num_img_extents, 2523 struct rbd_img_fill_ctx *fctx) 2524 { 2525 struct rbd_device *rbd_dev = img_req->rbd_dev; 2526 struct rbd_obj_request *obj_req; 2527 u32 i; 2528 int ret; 2529 2530 if (fctx->pos_type == OBJ_REQUEST_NODATA || 2531 !rbd_layout_is_fancy(&rbd_dev->layout)) 2532 return rbd_img_fill_request_nocopy(img_req, img_extents, 2533 num_img_extents, fctx); 2534 2535 img_req->data_type = OBJ_REQUEST_OWN_BVECS; 2536 2537 /* 2538 * Create object requests and determine ->bvec_count for each object 2539 * request. Note that ->bvec_count sum over all object requests may 2540 * be greater than the number of bio_vecs in the provided bio (list) 2541 * or bio_vec array because when mapped, those bio_vecs can straddle 2542 * stripe unit boundaries. 2543 */ 2544 fctx->iter = *fctx->pos; 2545 for (i = 0; i < num_img_extents; i++) { 2546 ret = ceph_file_to_extents(&rbd_dev->layout, 2547 img_extents[i].fe_off, 2548 img_extents[i].fe_len, 2549 &img_req->object_extents, 2550 alloc_object_extent, img_req, 2551 fctx->count_fn, &fctx->iter); 2552 if (ret) 2553 return ret; 2554 } 2555 2556 for_each_obj_request(img_req, obj_req) { 2557 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count, 2558 sizeof(*obj_req->bvec_pos.bvecs), 2559 GFP_NOIO); 2560 if (!obj_req->bvec_pos.bvecs) 2561 return -ENOMEM; 2562 } 2563 2564 /* 2565 * Fill in each object request's private bio_vec array, splitting and 2566 * rearranging the provided bio_vecs in stripe unit chunks as needed. 2567 */ 2568 fctx->iter = *fctx->pos; 2569 for (i = 0; i < num_img_extents; i++) { 2570 ret = ceph_iterate_extents(&rbd_dev->layout, 2571 img_extents[i].fe_off, 2572 img_extents[i].fe_len, 2573 &img_req->object_extents, 2574 fctx->copy_fn, &fctx->iter); 2575 if (ret) 2576 return ret; 2577 } 2578 2579 return __rbd_img_fill_request(img_req); 2580 } 2581 2582 static int rbd_img_fill_nodata(struct rbd_img_request *img_req, 2583 u64 off, u64 len) 2584 { 2585 struct ceph_file_extent ex = { off, len }; 2586 union rbd_img_fill_iter dummy = {}; 2587 struct rbd_img_fill_ctx fctx = { 2588 .pos_type = OBJ_REQUEST_NODATA, 2589 .pos = &dummy, 2590 }; 2591 2592 return rbd_img_fill_request(img_req, &ex, 1, &fctx); 2593 } 2594 2595 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg) 2596 { 2597 struct rbd_obj_request *obj_req = 2598 container_of(ex, struct rbd_obj_request, ex); 2599 struct ceph_bio_iter *it = arg; 2600 2601 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2602 obj_req->bio_pos = *it; 2603 ceph_bio_iter_advance(it, bytes); 2604 } 2605 2606 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2607 { 2608 struct rbd_obj_request *obj_req = 2609 container_of(ex, struct rbd_obj_request, ex); 2610 struct ceph_bio_iter *it = arg; 2611 2612 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2613 ceph_bio_iter_advance_step(it, bytes, ({ 2614 obj_req->bvec_count++; 2615 })); 2616 2617 } 2618 2619 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2620 { 2621 struct rbd_obj_request *obj_req = 2622 container_of(ex, struct rbd_obj_request, ex); 2623 struct ceph_bio_iter *it = arg; 2624 2625 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes); 2626 ceph_bio_iter_advance_step(it, bytes, ({ 2627 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv; 2628 obj_req->bvec_pos.iter.bi_size += bv.bv_len; 2629 })); 2630 } 2631 2632 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req, 2633 struct ceph_file_extent *img_extents, 2634 u32 num_img_extents, 2635 struct ceph_bio_iter *bio_pos) 2636 { 2637 struct rbd_img_fill_ctx fctx = { 2638 .pos_type = OBJ_REQUEST_BIO, 2639 .pos = (union rbd_img_fill_iter *)bio_pos, 2640 .set_pos_fn = set_bio_pos, 2641 .count_fn = count_bio_bvecs, 2642 .copy_fn = copy_bio_bvecs, 2643 }; 2644 2645 return rbd_img_fill_request(img_req, img_extents, num_img_extents, 2646 &fctx); 2647 } 2648 2649 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req, 2650 u64 off, u64 len, struct bio *bio) 2651 { 2652 struct ceph_file_extent ex = { off, len }; 2653 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter }; 2654 2655 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it); 2656 } 2657 2658 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg) 2659 { 2660 struct rbd_obj_request *obj_req = 2661 container_of(ex, struct rbd_obj_request, ex); 2662 struct ceph_bvec_iter *it = arg; 2663 2664 obj_req->bvec_pos = *it; 2665 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes); 2666 ceph_bvec_iter_advance(it, bytes); 2667 } 2668 2669 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2670 { 2671 struct rbd_obj_request *obj_req = 2672 container_of(ex, struct rbd_obj_request, ex); 2673 struct ceph_bvec_iter *it = arg; 2674 2675 ceph_bvec_iter_advance_step(it, bytes, ({ 2676 obj_req->bvec_count++; 2677 })); 2678 } 2679 2680 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg) 2681 { 2682 struct rbd_obj_request *obj_req = 2683 container_of(ex, struct rbd_obj_request, ex); 2684 struct ceph_bvec_iter *it = arg; 2685 2686 ceph_bvec_iter_advance_step(it, bytes, ({ 2687 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv; 2688 obj_req->bvec_pos.iter.bi_size += bv.bv_len; 2689 })); 2690 } 2691 2692 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req, 2693 struct ceph_file_extent *img_extents, 2694 u32 num_img_extents, 2695 struct ceph_bvec_iter *bvec_pos) 2696 { 2697 struct rbd_img_fill_ctx fctx = { 2698 .pos_type = OBJ_REQUEST_BVECS, 2699 .pos = (union rbd_img_fill_iter *)bvec_pos, 2700 .set_pos_fn = set_bvec_pos, 2701 .count_fn = count_bvecs, 2702 .copy_fn = copy_bvecs, 2703 }; 2704 2705 return rbd_img_fill_request(img_req, img_extents, num_img_extents, 2706 &fctx); 2707 } 2708 2709 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req, 2710 struct ceph_file_extent *img_extents, 2711 u32 num_img_extents, 2712 struct bio_vec *bvecs) 2713 { 2714 struct ceph_bvec_iter it = { 2715 .bvecs = bvecs, 2716 .iter = { .bi_size = ceph_file_extents_bytes(img_extents, 2717 num_img_extents) }, 2718 }; 2719 2720 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents, 2721 &it); 2722 } 2723 2724 static void rbd_img_handle_request_work(struct work_struct *work) 2725 { 2726 struct rbd_img_request *img_req = 2727 container_of(work, struct rbd_img_request, work); 2728 2729 rbd_img_handle_request(img_req, img_req->work_result); 2730 } 2731 2732 static void rbd_img_schedule(struct rbd_img_request *img_req, int result) 2733 { 2734 INIT_WORK(&img_req->work, rbd_img_handle_request_work); 2735 img_req->work_result = result; 2736 queue_work(rbd_wq, &img_req->work); 2737 } 2738 2739 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req) 2740 { 2741 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2742 2743 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) { 2744 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST; 2745 return true; 2746 } 2747 2748 dout("%s %p objno %llu assuming dne\n", __func__, obj_req, 2749 obj_req->ex.oe_objno); 2750 return false; 2751 } 2752 2753 static int rbd_obj_read_object(struct rbd_obj_request *obj_req) 2754 { 2755 struct ceph_osd_request *osd_req; 2756 int ret; 2757 2758 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1); 2759 if (IS_ERR(osd_req)) 2760 return PTR_ERR(osd_req); 2761 2762 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ, 2763 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0); 2764 rbd_osd_setup_data(osd_req, 0); 2765 rbd_osd_format_read(osd_req); 2766 2767 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 2768 if (ret) 2769 return ret; 2770 2771 rbd_osd_submit(osd_req); 2772 return 0; 2773 } 2774 2775 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req) 2776 { 2777 struct rbd_img_request *img_req = obj_req->img_request; 2778 struct rbd_device *parent = img_req->rbd_dev->parent; 2779 struct rbd_img_request *child_img_req; 2780 int ret; 2781 2782 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO); 2783 if (!child_img_req) 2784 return -ENOMEM; 2785 2786 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ); 2787 __set_bit(IMG_REQ_CHILD, &child_img_req->flags); 2788 child_img_req->obj_request = obj_req; 2789 2790 down_read(&parent->header_rwsem); 2791 rbd_img_capture_header(child_img_req); 2792 up_read(&parent->header_rwsem); 2793 2794 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req, 2795 obj_req); 2796 2797 if (!rbd_img_is_write(img_req)) { 2798 switch (img_req->data_type) { 2799 case OBJ_REQUEST_BIO: 2800 ret = __rbd_img_fill_from_bio(child_img_req, 2801 obj_req->img_extents, 2802 obj_req->num_img_extents, 2803 &obj_req->bio_pos); 2804 break; 2805 case OBJ_REQUEST_BVECS: 2806 case OBJ_REQUEST_OWN_BVECS: 2807 ret = __rbd_img_fill_from_bvecs(child_img_req, 2808 obj_req->img_extents, 2809 obj_req->num_img_extents, 2810 &obj_req->bvec_pos); 2811 break; 2812 default: 2813 BUG(); 2814 } 2815 } else { 2816 ret = rbd_img_fill_from_bvecs(child_img_req, 2817 obj_req->img_extents, 2818 obj_req->num_img_extents, 2819 obj_req->copyup_bvecs); 2820 } 2821 if (ret) { 2822 rbd_img_request_destroy(child_img_req); 2823 return ret; 2824 } 2825 2826 /* avoid parent chain recursion */ 2827 rbd_img_schedule(child_img_req, 0); 2828 return 0; 2829 } 2830 2831 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result) 2832 { 2833 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2834 int ret; 2835 2836 again: 2837 switch (obj_req->read_state) { 2838 case RBD_OBJ_READ_START: 2839 rbd_assert(!*result); 2840 2841 if (!rbd_obj_may_exist(obj_req)) { 2842 *result = -ENOENT; 2843 obj_req->read_state = RBD_OBJ_READ_OBJECT; 2844 goto again; 2845 } 2846 2847 ret = rbd_obj_read_object(obj_req); 2848 if (ret) { 2849 *result = ret; 2850 return true; 2851 } 2852 obj_req->read_state = RBD_OBJ_READ_OBJECT; 2853 return false; 2854 case RBD_OBJ_READ_OBJECT: 2855 if (*result == -ENOENT && rbd_dev->parent_overlap) { 2856 /* reverse map this object extent onto the parent */ 2857 ret = rbd_obj_calc_img_extents(obj_req, false); 2858 if (ret) { 2859 *result = ret; 2860 return true; 2861 } 2862 if (obj_req->num_img_extents) { 2863 ret = rbd_obj_read_from_parent(obj_req); 2864 if (ret) { 2865 *result = ret; 2866 return true; 2867 } 2868 obj_req->read_state = RBD_OBJ_READ_PARENT; 2869 return false; 2870 } 2871 } 2872 2873 /* 2874 * -ENOENT means a hole in the image -- zero-fill the entire 2875 * length of the request. A short read also implies zero-fill 2876 * to the end of the request. 2877 */ 2878 if (*result == -ENOENT) { 2879 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len); 2880 *result = 0; 2881 } else if (*result >= 0) { 2882 if (*result < obj_req->ex.oe_len) 2883 rbd_obj_zero_range(obj_req, *result, 2884 obj_req->ex.oe_len - *result); 2885 else 2886 rbd_assert(*result == obj_req->ex.oe_len); 2887 *result = 0; 2888 } 2889 return true; 2890 case RBD_OBJ_READ_PARENT: 2891 /* 2892 * The parent image is read only up to the overlap -- zero-fill 2893 * from the overlap to the end of the request. 2894 */ 2895 if (!*result) { 2896 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req); 2897 2898 if (obj_overlap < obj_req->ex.oe_len) 2899 rbd_obj_zero_range(obj_req, obj_overlap, 2900 obj_req->ex.oe_len - obj_overlap); 2901 } 2902 return true; 2903 default: 2904 BUG(); 2905 } 2906 } 2907 2908 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req) 2909 { 2910 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2911 2912 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) 2913 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST; 2914 2915 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) && 2916 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) { 2917 dout("%s %p noop for nonexistent\n", __func__, obj_req); 2918 return true; 2919 } 2920 2921 return false; 2922 } 2923 2924 /* 2925 * Return: 2926 * 0 - object map update sent 2927 * 1 - object map update isn't needed 2928 * <0 - error 2929 */ 2930 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req) 2931 { 2932 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 2933 u8 new_state; 2934 2935 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 2936 return 1; 2937 2938 if (obj_req->flags & RBD_OBJ_FLAG_DELETION) 2939 new_state = OBJECT_PENDING; 2940 else 2941 new_state = OBJECT_EXISTS; 2942 2943 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL); 2944 } 2945 2946 static int rbd_obj_write_object(struct rbd_obj_request *obj_req) 2947 { 2948 struct ceph_osd_request *osd_req; 2949 int num_ops = count_write_ops(obj_req); 2950 int which = 0; 2951 int ret; 2952 2953 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) 2954 num_ops++; /* stat */ 2955 2956 osd_req = rbd_obj_add_osd_request(obj_req, num_ops); 2957 if (IS_ERR(osd_req)) 2958 return PTR_ERR(osd_req); 2959 2960 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) { 2961 ret = rbd_osd_setup_stat(osd_req, which++); 2962 if (ret) 2963 return ret; 2964 } 2965 2966 rbd_osd_setup_write_ops(osd_req, which); 2967 rbd_osd_format_write(osd_req); 2968 2969 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 2970 if (ret) 2971 return ret; 2972 2973 rbd_osd_submit(osd_req); 2974 return 0; 2975 } 2976 2977 /* 2978 * copyup_bvecs pages are never highmem pages 2979 */ 2980 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes) 2981 { 2982 struct ceph_bvec_iter it = { 2983 .bvecs = bvecs, 2984 .iter = { .bi_size = bytes }, 2985 }; 2986 2987 ceph_bvec_iter_advance_step(&it, bytes, ({ 2988 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len)) 2989 return false; 2990 })); 2991 return true; 2992 } 2993 2994 #define MODS_ONLY U32_MAX 2995 2996 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req, 2997 u32 bytes) 2998 { 2999 struct ceph_osd_request *osd_req; 3000 int ret; 3001 3002 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); 3003 rbd_assert(bytes > 0 && bytes != MODS_ONLY); 3004 3005 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1); 3006 if (IS_ERR(osd_req)) 3007 return PTR_ERR(osd_req); 3008 3009 ret = rbd_osd_setup_copyup(osd_req, 0, bytes); 3010 if (ret) 3011 return ret; 3012 3013 rbd_osd_format_write(osd_req); 3014 3015 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 3016 if (ret) 3017 return ret; 3018 3019 rbd_osd_submit(osd_req); 3020 return 0; 3021 } 3022 3023 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req, 3024 u32 bytes) 3025 { 3026 struct ceph_osd_request *osd_req; 3027 int num_ops = count_write_ops(obj_req); 3028 int which = 0; 3029 int ret; 3030 3031 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); 3032 3033 if (bytes != MODS_ONLY) 3034 num_ops++; /* copyup */ 3035 3036 osd_req = rbd_obj_add_osd_request(obj_req, num_ops); 3037 if (IS_ERR(osd_req)) 3038 return PTR_ERR(osd_req); 3039 3040 if (bytes != MODS_ONLY) { 3041 ret = rbd_osd_setup_copyup(osd_req, which++, bytes); 3042 if (ret) 3043 return ret; 3044 } 3045 3046 rbd_osd_setup_write_ops(osd_req, which); 3047 rbd_osd_format_write(osd_req); 3048 3049 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO); 3050 if (ret) 3051 return ret; 3052 3053 rbd_osd_submit(osd_req); 3054 return 0; 3055 } 3056 3057 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap) 3058 { 3059 u32 i; 3060 3061 rbd_assert(!obj_req->copyup_bvecs); 3062 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap); 3063 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count, 3064 sizeof(*obj_req->copyup_bvecs), 3065 GFP_NOIO); 3066 if (!obj_req->copyup_bvecs) 3067 return -ENOMEM; 3068 3069 for (i = 0; i < obj_req->copyup_bvec_count; i++) { 3070 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE); 3071 struct page *page = alloc_page(GFP_NOIO); 3072 3073 if (!page) 3074 return -ENOMEM; 3075 3076 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0); 3077 obj_overlap -= len; 3078 } 3079 3080 rbd_assert(!obj_overlap); 3081 return 0; 3082 } 3083 3084 /* 3085 * The target object doesn't exist. Read the data for the entire 3086 * target object up to the overlap point (if any) from the parent, 3087 * so we can use it for a copyup. 3088 */ 3089 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req) 3090 { 3091 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3092 int ret; 3093 3094 rbd_assert(obj_req->num_img_extents); 3095 prune_extents(obj_req->img_extents, &obj_req->num_img_extents, 3096 rbd_dev->parent_overlap); 3097 if (!obj_req->num_img_extents) { 3098 /* 3099 * The overlap has become 0 (most likely because the 3100 * image has been flattened). Re-submit the original write 3101 * request -- pass MODS_ONLY since the copyup isn't needed 3102 * anymore. 3103 */ 3104 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY); 3105 } 3106 3107 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req)); 3108 if (ret) 3109 return ret; 3110 3111 return rbd_obj_read_from_parent(obj_req); 3112 } 3113 3114 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req) 3115 { 3116 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3117 struct ceph_snap_context *snapc = obj_req->img_request->snapc; 3118 u8 new_state; 3119 u32 i; 3120 int ret; 3121 3122 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending); 3123 3124 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3125 return; 3126 3127 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS) 3128 return; 3129 3130 for (i = 0; i < snapc->num_snaps; i++) { 3131 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) && 3132 i + 1 < snapc->num_snaps) 3133 new_state = OBJECT_EXISTS_CLEAN; 3134 else 3135 new_state = OBJECT_EXISTS; 3136 3137 ret = rbd_object_map_update(obj_req, snapc->snaps[i], 3138 new_state, NULL); 3139 if (ret < 0) { 3140 obj_req->pending.result = ret; 3141 return; 3142 } 3143 3144 rbd_assert(!ret); 3145 obj_req->pending.num_pending++; 3146 } 3147 } 3148 3149 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req) 3150 { 3151 u32 bytes = rbd_obj_img_extents_bytes(obj_req); 3152 int ret; 3153 3154 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending); 3155 3156 /* 3157 * Only send non-zero copyup data to save some I/O and network 3158 * bandwidth -- zero copyup data is equivalent to the object not 3159 * existing. 3160 */ 3161 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS) 3162 bytes = 0; 3163 3164 if (obj_req->img_request->snapc->num_snaps && bytes > 0) { 3165 /* 3166 * Send a copyup request with an empty snapshot context to 3167 * deep-copyup the object through all existing snapshots. 3168 * A second request with the current snapshot context will be 3169 * sent for the actual modification. 3170 */ 3171 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes); 3172 if (ret) { 3173 obj_req->pending.result = ret; 3174 return; 3175 } 3176 3177 obj_req->pending.num_pending++; 3178 bytes = MODS_ONLY; 3179 } 3180 3181 ret = rbd_obj_copyup_current_snapc(obj_req, bytes); 3182 if (ret) { 3183 obj_req->pending.result = ret; 3184 return; 3185 } 3186 3187 obj_req->pending.num_pending++; 3188 } 3189 3190 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result) 3191 { 3192 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3193 int ret; 3194 3195 again: 3196 switch (obj_req->copyup_state) { 3197 case RBD_OBJ_COPYUP_START: 3198 rbd_assert(!*result); 3199 3200 ret = rbd_obj_copyup_read_parent(obj_req); 3201 if (ret) { 3202 *result = ret; 3203 return true; 3204 } 3205 if (obj_req->num_img_extents) 3206 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT; 3207 else 3208 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT; 3209 return false; 3210 case RBD_OBJ_COPYUP_READ_PARENT: 3211 if (*result) 3212 return true; 3213 3214 if (is_zero_bvecs(obj_req->copyup_bvecs, 3215 rbd_obj_img_extents_bytes(obj_req))) { 3216 dout("%s %p detected zeros\n", __func__, obj_req); 3217 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS; 3218 } 3219 3220 rbd_obj_copyup_object_maps(obj_req); 3221 if (!obj_req->pending.num_pending) { 3222 *result = obj_req->pending.result; 3223 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS; 3224 goto again; 3225 } 3226 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS; 3227 return false; 3228 case __RBD_OBJ_COPYUP_OBJECT_MAPS: 3229 if (!pending_result_dec(&obj_req->pending, result)) 3230 return false; 3231 fallthrough; 3232 case RBD_OBJ_COPYUP_OBJECT_MAPS: 3233 if (*result) { 3234 rbd_warn(rbd_dev, "snap object map update failed: %d", 3235 *result); 3236 return true; 3237 } 3238 3239 rbd_obj_copyup_write_object(obj_req); 3240 if (!obj_req->pending.num_pending) { 3241 *result = obj_req->pending.result; 3242 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT; 3243 goto again; 3244 } 3245 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT; 3246 return false; 3247 case __RBD_OBJ_COPYUP_WRITE_OBJECT: 3248 if (!pending_result_dec(&obj_req->pending, result)) 3249 return false; 3250 fallthrough; 3251 case RBD_OBJ_COPYUP_WRITE_OBJECT: 3252 return true; 3253 default: 3254 BUG(); 3255 } 3256 } 3257 3258 /* 3259 * Return: 3260 * 0 - object map update sent 3261 * 1 - object map update isn't needed 3262 * <0 - error 3263 */ 3264 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req) 3265 { 3266 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3267 u8 current_state = OBJECT_PENDING; 3268 3269 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3270 return 1; 3271 3272 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION)) 3273 return 1; 3274 3275 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT, 3276 ¤t_state); 3277 } 3278 3279 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result) 3280 { 3281 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; 3282 int ret; 3283 3284 again: 3285 switch (obj_req->write_state) { 3286 case RBD_OBJ_WRITE_START: 3287 rbd_assert(!*result); 3288 3289 if (rbd_obj_write_is_noop(obj_req)) 3290 return true; 3291 3292 ret = rbd_obj_write_pre_object_map(obj_req); 3293 if (ret < 0) { 3294 *result = ret; 3295 return true; 3296 } 3297 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP; 3298 if (ret > 0) 3299 goto again; 3300 return false; 3301 case RBD_OBJ_WRITE_PRE_OBJECT_MAP: 3302 if (*result) { 3303 rbd_warn(rbd_dev, "pre object map update failed: %d", 3304 *result); 3305 return true; 3306 } 3307 ret = rbd_obj_write_object(obj_req); 3308 if (ret) { 3309 *result = ret; 3310 return true; 3311 } 3312 obj_req->write_state = RBD_OBJ_WRITE_OBJECT; 3313 return false; 3314 case RBD_OBJ_WRITE_OBJECT: 3315 if (*result == -ENOENT) { 3316 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) { 3317 *result = 0; 3318 obj_req->copyup_state = RBD_OBJ_COPYUP_START; 3319 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP; 3320 goto again; 3321 } 3322 /* 3323 * On a non-existent object: 3324 * delete - -ENOENT, truncate/zero - 0 3325 */ 3326 if (obj_req->flags & RBD_OBJ_FLAG_DELETION) 3327 *result = 0; 3328 } 3329 if (*result) 3330 return true; 3331 3332 obj_req->write_state = RBD_OBJ_WRITE_COPYUP; 3333 goto again; 3334 case __RBD_OBJ_WRITE_COPYUP: 3335 if (!rbd_obj_advance_copyup(obj_req, result)) 3336 return false; 3337 fallthrough; 3338 case RBD_OBJ_WRITE_COPYUP: 3339 if (*result) { 3340 rbd_warn(rbd_dev, "copyup failed: %d", *result); 3341 return true; 3342 } 3343 ret = rbd_obj_write_post_object_map(obj_req); 3344 if (ret < 0) { 3345 *result = ret; 3346 return true; 3347 } 3348 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP; 3349 if (ret > 0) 3350 goto again; 3351 return false; 3352 case RBD_OBJ_WRITE_POST_OBJECT_MAP: 3353 if (*result) 3354 rbd_warn(rbd_dev, "post object map update failed: %d", 3355 *result); 3356 return true; 3357 default: 3358 BUG(); 3359 } 3360 } 3361 3362 /* 3363 * Return true if @obj_req is completed. 3364 */ 3365 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req, 3366 int *result) 3367 { 3368 struct rbd_img_request *img_req = obj_req->img_request; 3369 struct rbd_device *rbd_dev = img_req->rbd_dev; 3370 bool done; 3371 3372 mutex_lock(&obj_req->state_mutex); 3373 if (!rbd_img_is_write(img_req)) 3374 done = rbd_obj_advance_read(obj_req, result); 3375 else 3376 done = rbd_obj_advance_write(obj_req, result); 3377 mutex_unlock(&obj_req->state_mutex); 3378 3379 if (done && *result) { 3380 rbd_assert(*result < 0); 3381 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d", 3382 obj_op_name(img_req->op_type), obj_req->ex.oe_objno, 3383 obj_req->ex.oe_off, obj_req->ex.oe_len, *result); 3384 } 3385 return done; 3386 } 3387 3388 /* 3389 * This is open-coded in rbd_img_handle_request() to avoid parent chain 3390 * recursion. 3391 */ 3392 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result) 3393 { 3394 if (__rbd_obj_handle_request(obj_req, &result)) 3395 rbd_img_handle_request(obj_req->img_request, result); 3396 } 3397 3398 static bool need_exclusive_lock(struct rbd_img_request *img_req) 3399 { 3400 struct rbd_device *rbd_dev = img_req->rbd_dev; 3401 3402 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) 3403 return false; 3404 3405 if (rbd_is_ro(rbd_dev)) 3406 return false; 3407 3408 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags)); 3409 if (rbd_dev->opts->lock_on_read || 3410 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) 3411 return true; 3412 3413 return rbd_img_is_write(img_req); 3414 } 3415 3416 static bool rbd_lock_add_request(struct rbd_img_request *img_req) 3417 { 3418 struct rbd_device *rbd_dev = img_req->rbd_dev; 3419 bool locked; 3420 3421 lockdep_assert_held(&rbd_dev->lock_rwsem); 3422 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED; 3423 spin_lock(&rbd_dev->lock_lists_lock); 3424 rbd_assert(list_empty(&img_req->lock_item)); 3425 if (!locked) 3426 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list); 3427 else 3428 list_add_tail(&img_req->lock_item, &rbd_dev->running_list); 3429 spin_unlock(&rbd_dev->lock_lists_lock); 3430 return locked; 3431 } 3432 3433 static void rbd_lock_del_request(struct rbd_img_request *img_req) 3434 { 3435 struct rbd_device *rbd_dev = img_req->rbd_dev; 3436 bool need_wakeup; 3437 3438 lockdep_assert_held(&rbd_dev->lock_rwsem); 3439 spin_lock(&rbd_dev->lock_lists_lock); 3440 rbd_assert(!list_empty(&img_req->lock_item)); 3441 list_del_init(&img_req->lock_item); 3442 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && 3443 list_empty(&rbd_dev->running_list)); 3444 spin_unlock(&rbd_dev->lock_lists_lock); 3445 if (need_wakeup) 3446 complete(&rbd_dev->releasing_wait); 3447 } 3448 3449 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) 3450 { 3451 struct rbd_device *rbd_dev = img_req->rbd_dev; 3452 3453 if (!need_exclusive_lock(img_req)) 3454 return 1; 3455 3456 if (rbd_lock_add_request(img_req)) 3457 return 1; 3458 3459 if (rbd_dev->opts->exclusive) { 3460 WARN_ON(1); /* lock got released? */ 3461 return -EROFS; 3462 } 3463 3464 /* 3465 * Note the use of mod_delayed_work() in rbd_acquire_lock() 3466 * and cancel_delayed_work() in wake_lock_waiters(). 3467 */ 3468 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev); 3469 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 3470 return 0; 3471 } 3472 3473 static void rbd_img_object_requests(struct rbd_img_request *img_req) 3474 { 3475 struct rbd_obj_request *obj_req; 3476 3477 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); 3478 3479 for_each_obj_request(img_req, obj_req) { 3480 int result = 0; 3481 3482 if (__rbd_obj_handle_request(obj_req, &result)) { 3483 if (result) { 3484 img_req->pending.result = result; 3485 return; 3486 } 3487 } else { 3488 img_req->pending.num_pending++; 3489 } 3490 } 3491 } 3492 3493 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) 3494 { 3495 struct rbd_device *rbd_dev = img_req->rbd_dev; 3496 int ret; 3497 3498 again: 3499 switch (img_req->state) { 3500 case RBD_IMG_START: 3501 rbd_assert(!*result); 3502 3503 ret = rbd_img_exclusive_lock(img_req); 3504 if (ret < 0) { 3505 *result = ret; 3506 return true; 3507 } 3508 img_req->state = RBD_IMG_EXCLUSIVE_LOCK; 3509 if (ret > 0) 3510 goto again; 3511 return false; 3512 case RBD_IMG_EXCLUSIVE_LOCK: 3513 if (*result) 3514 return true; 3515 3516 rbd_assert(!need_exclusive_lock(img_req) || 3517 __rbd_is_lock_owner(rbd_dev)); 3518 3519 rbd_img_object_requests(img_req); 3520 if (!img_req->pending.num_pending) { 3521 *result = img_req->pending.result; 3522 img_req->state = RBD_IMG_OBJECT_REQUESTS; 3523 goto again; 3524 } 3525 img_req->state = __RBD_IMG_OBJECT_REQUESTS; 3526 return false; 3527 case __RBD_IMG_OBJECT_REQUESTS: 3528 if (!pending_result_dec(&img_req->pending, result)) 3529 return false; 3530 fallthrough; 3531 case RBD_IMG_OBJECT_REQUESTS: 3532 return true; 3533 default: 3534 BUG(); 3535 } 3536 } 3537 3538 /* 3539 * Return true if @img_req is completed. 3540 */ 3541 static bool __rbd_img_handle_request(struct rbd_img_request *img_req, 3542 int *result) 3543 { 3544 struct rbd_device *rbd_dev = img_req->rbd_dev; 3545 bool done; 3546 3547 if (need_exclusive_lock(img_req)) { 3548 down_read(&rbd_dev->lock_rwsem); 3549 mutex_lock(&img_req->state_mutex); 3550 done = rbd_img_advance(img_req, result); 3551 if (done) 3552 rbd_lock_del_request(img_req); 3553 mutex_unlock(&img_req->state_mutex); 3554 up_read(&rbd_dev->lock_rwsem); 3555 } else { 3556 mutex_lock(&img_req->state_mutex); 3557 done = rbd_img_advance(img_req, result); 3558 mutex_unlock(&img_req->state_mutex); 3559 } 3560 3561 if (done && *result) { 3562 rbd_assert(*result < 0); 3563 rbd_warn(rbd_dev, "%s%s result %d", 3564 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "", 3565 obj_op_name(img_req->op_type), *result); 3566 } 3567 return done; 3568 } 3569 3570 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result) 3571 { 3572 again: 3573 if (!__rbd_img_handle_request(img_req, &result)) 3574 return; 3575 3576 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { 3577 struct rbd_obj_request *obj_req = img_req->obj_request; 3578 3579 rbd_img_request_destroy(img_req); 3580 if (__rbd_obj_handle_request(obj_req, &result)) { 3581 img_req = obj_req->img_request; 3582 goto again; 3583 } 3584 } else { 3585 struct request *rq = blk_mq_rq_from_pdu(img_req); 3586 3587 rbd_img_request_destroy(img_req); 3588 blk_mq_end_request(rq, errno_to_blk_status(result)); 3589 } 3590 } 3591 3592 static const struct rbd_client_id rbd_empty_cid; 3593 3594 static bool rbd_cid_equal(const struct rbd_client_id *lhs, 3595 const struct rbd_client_id *rhs) 3596 { 3597 return lhs->gid == rhs->gid && lhs->handle == rhs->handle; 3598 } 3599 3600 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev) 3601 { 3602 struct rbd_client_id cid; 3603 3604 mutex_lock(&rbd_dev->watch_mutex); 3605 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client); 3606 cid.handle = rbd_dev->watch_cookie; 3607 mutex_unlock(&rbd_dev->watch_mutex); 3608 return cid; 3609 } 3610 3611 /* 3612 * lock_rwsem must be held for write 3613 */ 3614 static void rbd_set_owner_cid(struct rbd_device *rbd_dev, 3615 const struct rbd_client_id *cid) 3616 { 3617 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev, 3618 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle, 3619 cid->gid, cid->handle); 3620 rbd_dev->owner_cid = *cid; /* struct */ 3621 } 3622 3623 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) 3624 { 3625 mutex_lock(&rbd_dev->watch_mutex); 3626 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie); 3627 mutex_unlock(&rbd_dev->watch_mutex); 3628 } 3629 3630 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) 3631 { 3632 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3633 3634 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3635 strcpy(rbd_dev->lock_cookie, cookie); 3636 rbd_set_owner_cid(rbd_dev, &cid); 3637 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 3638 } 3639 3640 /* 3641 * lock_rwsem must be held for write 3642 */ 3643 static int rbd_lock(struct rbd_device *rbd_dev) 3644 { 3645 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3646 char cookie[32]; 3647 int ret; 3648 3649 WARN_ON(__rbd_is_lock_owner(rbd_dev) || 3650 rbd_dev->lock_cookie[0] != '\0'); 3651 3652 format_lock_cookie(rbd_dev, cookie); 3653 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3654 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, 3655 RBD_LOCK_TAG, "", 0); 3656 if (ret) 3657 return ret; 3658 3659 __rbd_lock(rbd_dev, cookie); 3660 return 0; 3661 } 3662 3663 /* 3664 * lock_rwsem must be held for write 3665 */ 3666 static void rbd_unlock(struct rbd_device *rbd_dev) 3667 { 3668 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3669 int ret; 3670 3671 WARN_ON(!__rbd_is_lock_owner(rbd_dev) || 3672 rbd_dev->lock_cookie[0] == '\0'); 3673 3674 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 3675 RBD_LOCK_NAME, rbd_dev->lock_cookie); 3676 if (ret && ret != -ENOENT) 3677 rbd_warn(rbd_dev, "failed to unlock header: %d", ret); 3678 3679 /* treat errors as the image is unlocked */ 3680 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 3681 rbd_dev->lock_cookie[0] = '\0'; 3682 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 3683 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); 3684 } 3685 3686 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, 3687 enum rbd_notify_op notify_op, 3688 struct page ***preply_pages, 3689 size_t *preply_len) 3690 { 3691 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3692 struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3693 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN]; 3694 int buf_size = sizeof(buf); 3695 void *p = buf; 3696 3697 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op); 3698 3699 /* encode *LockPayload NotifyMessage (op + ClientId) */ 3700 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN); 3701 ceph_encode_32(&p, notify_op); 3702 ceph_encode_64(&p, cid.gid); 3703 ceph_encode_64(&p, cid.handle); 3704 3705 return ceph_osdc_notify(osdc, &rbd_dev->header_oid, 3706 &rbd_dev->header_oloc, buf, buf_size, 3707 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len); 3708 } 3709 3710 static void rbd_notify_op_lock(struct rbd_device *rbd_dev, 3711 enum rbd_notify_op notify_op) 3712 { 3713 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL); 3714 } 3715 3716 static void rbd_notify_acquired_lock(struct work_struct *work) 3717 { 3718 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3719 acquired_lock_work); 3720 3721 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK); 3722 } 3723 3724 static void rbd_notify_released_lock(struct work_struct *work) 3725 { 3726 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 3727 released_lock_work); 3728 3729 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK); 3730 } 3731 3732 static int rbd_request_lock(struct rbd_device *rbd_dev) 3733 { 3734 struct page **reply_pages; 3735 size_t reply_len; 3736 bool lock_owner_responded = false; 3737 int ret; 3738 3739 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3740 3741 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK, 3742 &reply_pages, &reply_len); 3743 if (ret && ret != -ETIMEDOUT) { 3744 rbd_warn(rbd_dev, "failed to request lock: %d", ret); 3745 goto out; 3746 } 3747 3748 if (reply_len > 0 && reply_len <= PAGE_SIZE) { 3749 void *p = page_address(reply_pages[0]); 3750 void *const end = p + reply_len; 3751 u32 n; 3752 3753 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */ 3754 while (n--) { 3755 u8 struct_v; 3756 u32 len; 3757 3758 ceph_decode_need(&p, end, 8 + 8, e_inval); 3759 p += 8 + 8; /* skip gid and cookie */ 3760 3761 ceph_decode_32_safe(&p, end, len, e_inval); 3762 if (!len) 3763 continue; 3764 3765 if (lock_owner_responded) { 3766 rbd_warn(rbd_dev, 3767 "duplicate lock owners detected"); 3768 ret = -EIO; 3769 goto out; 3770 } 3771 3772 lock_owner_responded = true; 3773 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage", 3774 &struct_v, &len); 3775 if (ret) { 3776 rbd_warn(rbd_dev, 3777 "failed to decode ResponseMessage: %d", 3778 ret); 3779 goto e_inval; 3780 } 3781 3782 ret = ceph_decode_32(&p); 3783 } 3784 } 3785 3786 if (!lock_owner_responded) { 3787 rbd_warn(rbd_dev, "no lock owners detected"); 3788 ret = -ETIMEDOUT; 3789 } 3790 3791 out: 3792 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)); 3793 return ret; 3794 3795 e_inval: 3796 ret = -EINVAL; 3797 goto out; 3798 } 3799 3800 /* 3801 * Either image request state machine(s) or rbd_add_acquire_lock() 3802 * (i.e. "rbd map"). 3803 */ 3804 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) 3805 { 3806 struct rbd_img_request *img_req; 3807 3808 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 3809 lockdep_assert_held_write(&rbd_dev->lock_rwsem); 3810 3811 cancel_delayed_work(&rbd_dev->lock_dwork); 3812 if (!completion_done(&rbd_dev->acquire_wait)) { 3813 rbd_assert(list_empty(&rbd_dev->acquiring_list) && 3814 list_empty(&rbd_dev->running_list)); 3815 rbd_dev->acquire_err = result; 3816 complete_all(&rbd_dev->acquire_wait); 3817 return; 3818 } 3819 3820 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) { 3821 mutex_lock(&img_req->state_mutex); 3822 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK); 3823 rbd_img_schedule(img_req, result); 3824 mutex_unlock(&img_req->state_mutex); 3825 } 3826 3827 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); 3828 } 3829 3830 static int get_lock_owner_info(struct rbd_device *rbd_dev, 3831 struct ceph_locker **lockers, u32 *num_lockers) 3832 { 3833 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3834 u8 lock_type; 3835 char *lock_tag; 3836 int ret; 3837 3838 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3839 3840 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, 3841 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3842 &lock_type, &lock_tag, lockers, num_lockers); 3843 if (ret) 3844 return ret; 3845 3846 if (*num_lockers == 0) { 3847 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); 3848 goto out; 3849 } 3850 3851 if (strcmp(lock_tag, RBD_LOCK_TAG)) { 3852 rbd_warn(rbd_dev, "locked by external mechanism, tag %s", 3853 lock_tag); 3854 ret = -EBUSY; 3855 goto out; 3856 } 3857 3858 if (lock_type == CEPH_CLS_LOCK_SHARED) { 3859 rbd_warn(rbd_dev, "shared lock type detected"); 3860 ret = -EBUSY; 3861 goto out; 3862 } 3863 3864 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, 3865 strlen(RBD_LOCK_COOKIE_PREFIX))) { 3866 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", 3867 (*lockers)[0].id.cookie); 3868 ret = -EBUSY; 3869 goto out; 3870 } 3871 3872 out: 3873 kfree(lock_tag); 3874 return ret; 3875 } 3876 3877 static int find_watcher(struct rbd_device *rbd_dev, 3878 const struct ceph_locker *locker) 3879 { 3880 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3881 struct ceph_watch_item *watchers; 3882 u32 num_watchers; 3883 u64 cookie; 3884 int i; 3885 int ret; 3886 3887 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, 3888 &rbd_dev->header_oloc, &watchers, 3889 &num_watchers); 3890 if (ret) 3891 return ret; 3892 3893 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); 3894 for (i = 0; i < num_watchers; i++) { 3895 /* 3896 * Ignore addr->type while comparing. This mimics 3897 * entity_addr_t::get_legacy_str() + strcmp(). 3898 */ 3899 if (ceph_addr_equal_no_type(&watchers[i].addr, 3900 &locker->info.addr) && 3901 watchers[i].cookie == cookie) { 3902 struct rbd_client_id cid = { 3903 .gid = le64_to_cpu(watchers[i].name.num), 3904 .handle = cookie, 3905 }; 3906 3907 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__, 3908 rbd_dev, cid.gid, cid.handle); 3909 rbd_set_owner_cid(rbd_dev, &cid); 3910 ret = 1; 3911 goto out; 3912 } 3913 } 3914 3915 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev); 3916 ret = 0; 3917 out: 3918 kfree(watchers); 3919 return ret; 3920 } 3921 3922 /* 3923 * lock_rwsem must be held for write 3924 */ 3925 static int rbd_try_lock(struct rbd_device *rbd_dev) 3926 { 3927 struct ceph_client *client = rbd_dev->rbd_client->client; 3928 struct ceph_locker *lockers; 3929 u32 num_lockers; 3930 int ret; 3931 3932 for (;;) { 3933 ret = rbd_lock(rbd_dev); 3934 if (ret != -EBUSY) 3935 return ret; 3936 3937 /* determine if the current lock holder is still alive */ 3938 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); 3939 if (ret) 3940 return ret; 3941 3942 if (num_lockers == 0) 3943 goto again; 3944 3945 ret = find_watcher(rbd_dev, lockers); 3946 if (ret) 3947 goto out; /* request lock or error */ 3948 3949 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", 3950 ENTITY_NAME(lockers[0].id.name)); 3951 3952 ret = ceph_monc_blocklist_add(&client->monc, 3953 &lockers[0].info.addr); 3954 if (ret) { 3955 rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", 3956 ENTITY_NAME(lockers[0].id.name), ret); 3957 goto out; 3958 } 3959 3960 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, 3961 &rbd_dev->header_oloc, RBD_LOCK_NAME, 3962 lockers[0].id.cookie, 3963 &lockers[0].id.name); 3964 if (ret && ret != -ENOENT) 3965 goto out; 3966 3967 again: 3968 ceph_free_lockers(lockers, num_lockers); 3969 } 3970 3971 out: 3972 ceph_free_lockers(lockers, num_lockers); 3973 return ret; 3974 } 3975 3976 static int rbd_post_acquire_action(struct rbd_device *rbd_dev) 3977 { 3978 int ret; 3979 3980 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { 3981 ret = rbd_object_map_open(rbd_dev); 3982 if (ret) 3983 return ret; 3984 } 3985 3986 return 0; 3987 } 3988 3989 /* 3990 * Return: 3991 * 0 - lock acquired 3992 * 1 - caller should call rbd_request_lock() 3993 * <0 - error 3994 */ 3995 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) 3996 { 3997 int ret; 3998 3999 down_read(&rbd_dev->lock_rwsem); 4000 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev, 4001 rbd_dev->lock_state); 4002 if (__rbd_is_lock_owner(rbd_dev)) { 4003 up_read(&rbd_dev->lock_rwsem); 4004 return 0; 4005 } 4006 4007 up_read(&rbd_dev->lock_rwsem); 4008 down_write(&rbd_dev->lock_rwsem); 4009 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev, 4010 rbd_dev->lock_state); 4011 if (__rbd_is_lock_owner(rbd_dev)) { 4012 up_write(&rbd_dev->lock_rwsem); 4013 return 0; 4014 } 4015 4016 ret = rbd_try_lock(rbd_dev); 4017 if (ret < 0) { 4018 rbd_warn(rbd_dev, "failed to lock header: %d", ret); 4019 if (ret == -EBLOCKLISTED) 4020 goto out; 4021 4022 ret = 1; /* request lock anyway */ 4023 } 4024 if (ret > 0) { 4025 up_write(&rbd_dev->lock_rwsem); 4026 return ret; 4027 } 4028 4029 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED); 4030 rbd_assert(list_empty(&rbd_dev->running_list)); 4031 4032 ret = rbd_post_acquire_action(rbd_dev); 4033 if (ret) { 4034 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret); 4035 /* 4036 * Can't stay in RBD_LOCK_STATE_LOCKED because 4037 * rbd_lock_add_request() would let the request through, 4038 * assuming that e.g. object map is locked and loaded. 4039 */ 4040 rbd_unlock(rbd_dev); 4041 } 4042 4043 out: 4044 wake_lock_waiters(rbd_dev, ret); 4045 up_write(&rbd_dev->lock_rwsem); 4046 return ret; 4047 } 4048 4049 static void rbd_acquire_lock(struct work_struct *work) 4050 { 4051 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 4052 struct rbd_device, lock_dwork); 4053 int ret; 4054 4055 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4056 again: 4057 ret = rbd_try_acquire_lock(rbd_dev); 4058 if (ret <= 0) { 4059 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret); 4060 return; 4061 } 4062 4063 ret = rbd_request_lock(rbd_dev); 4064 if (ret == -ETIMEDOUT) { 4065 goto again; /* treat this as a dead client */ 4066 } else if (ret == -EROFS) { 4067 rbd_warn(rbd_dev, "peer will not release lock"); 4068 down_write(&rbd_dev->lock_rwsem); 4069 wake_lock_waiters(rbd_dev, ret); 4070 up_write(&rbd_dev->lock_rwsem); 4071 } else if (ret < 0) { 4072 rbd_warn(rbd_dev, "error requesting lock: %d", ret); 4073 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 4074 RBD_RETRY_DELAY); 4075 } else { 4076 /* 4077 * lock owner acked, but resend if we don't see them 4078 * release the lock 4079 */ 4080 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__, 4081 rbd_dev); 4082 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 4083 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC)); 4084 } 4085 } 4086 4087 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) 4088 { 4089 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4090 lockdep_assert_held_write(&rbd_dev->lock_rwsem); 4091 4092 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) 4093 return false; 4094 4095 /* 4096 * Ensure that all in-flight IO is flushed. 4097 */ 4098 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; 4099 rbd_assert(!completion_done(&rbd_dev->releasing_wait)); 4100 if (list_empty(&rbd_dev->running_list)) 4101 return true; 4102 4103 up_write(&rbd_dev->lock_rwsem); 4104 wait_for_completion(&rbd_dev->releasing_wait); 4105 4106 down_write(&rbd_dev->lock_rwsem); 4107 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) 4108 return false; 4109 4110 rbd_assert(list_empty(&rbd_dev->running_list)); 4111 return true; 4112 } 4113 4114 static void rbd_pre_release_action(struct rbd_device *rbd_dev) 4115 { 4116 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) 4117 rbd_object_map_close(rbd_dev); 4118 } 4119 4120 static void __rbd_release_lock(struct rbd_device *rbd_dev) 4121 { 4122 rbd_assert(list_empty(&rbd_dev->running_list)); 4123 4124 rbd_pre_release_action(rbd_dev); 4125 rbd_unlock(rbd_dev); 4126 } 4127 4128 /* 4129 * lock_rwsem must be held for write 4130 */ 4131 static void rbd_release_lock(struct rbd_device *rbd_dev) 4132 { 4133 if (!rbd_quiesce_lock(rbd_dev)) 4134 return; 4135 4136 __rbd_release_lock(rbd_dev); 4137 4138 /* 4139 * Give others a chance to grab the lock - we would re-acquire 4140 * almost immediately if we got new IO while draining the running 4141 * list otherwise. We need to ack our own notifications, so this 4142 * lock_dwork will be requeued from rbd_handle_released_lock() by 4143 * way of maybe_kick_acquire(). 4144 */ 4145 cancel_delayed_work(&rbd_dev->lock_dwork); 4146 } 4147 4148 static void rbd_release_lock_work(struct work_struct *work) 4149 { 4150 struct rbd_device *rbd_dev = container_of(work, struct rbd_device, 4151 unlock_work); 4152 4153 down_write(&rbd_dev->lock_rwsem); 4154 rbd_release_lock(rbd_dev); 4155 up_write(&rbd_dev->lock_rwsem); 4156 } 4157 4158 static void maybe_kick_acquire(struct rbd_device *rbd_dev) 4159 { 4160 bool have_requests; 4161 4162 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4163 if (__rbd_is_lock_owner(rbd_dev)) 4164 return; 4165 4166 spin_lock(&rbd_dev->lock_lists_lock); 4167 have_requests = !list_empty(&rbd_dev->acquiring_list); 4168 spin_unlock(&rbd_dev->lock_lists_lock); 4169 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) { 4170 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev); 4171 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 4172 } 4173 } 4174 4175 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, 4176 void **p) 4177 { 4178 struct rbd_client_id cid = { 0 }; 4179 4180 if (struct_v >= 2) { 4181 cid.gid = ceph_decode_64(p); 4182 cid.handle = ceph_decode_64(p); 4183 } 4184 4185 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4186 cid.handle); 4187 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 4188 down_write(&rbd_dev->lock_rwsem); 4189 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 4190 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n", 4191 __func__, rbd_dev, cid.gid, cid.handle); 4192 } else { 4193 rbd_set_owner_cid(rbd_dev, &cid); 4194 } 4195 downgrade_write(&rbd_dev->lock_rwsem); 4196 } else { 4197 down_read(&rbd_dev->lock_rwsem); 4198 } 4199 4200 maybe_kick_acquire(rbd_dev); 4201 up_read(&rbd_dev->lock_rwsem); 4202 } 4203 4204 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, 4205 void **p) 4206 { 4207 struct rbd_client_id cid = { 0 }; 4208 4209 if (struct_v >= 2) { 4210 cid.gid = ceph_decode_64(p); 4211 cid.handle = ceph_decode_64(p); 4212 } 4213 4214 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4215 cid.handle); 4216 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { 4217 down_write(&rbd_dev->lock_rwsem); 4218 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { 4219 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n", 4220 __func__, rbd_dev, cid.gid, cid.handle, 4221 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); 4222 } else { 4223 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 4224 } 4225 downgrade_write(&rbd_dev->lock_rwsem); 4226 } else { 4227 down_read(&rbd_dev->lock_rwsem); 4228 } 4229 4230 maybe_kick_acquire(rbd_dev); 4231 up_read(&rbd_dev->lock_rwsem); 4232 } 4233 4234 /* 4235 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no 4236 * ResponseMessage is needed. 4237 */ 4238 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v, 4239 void **p) 4240 { 4241 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev); 4242 struct rbd_client_id cid = { 0 }; 4243 int result = 1; 4244 4245 if (struct_v >= 2) { 4246 cid.gid = ceph_decode_64(p); 4247 cid.handle = ceph_decode_64(p); 4248 } 4249 4250 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid, 4251 cid.handle); 4252 if (rbd_cid_equal(&cid, &my_cid)) 4253 return result; 4254 4255 down_read(&rbd_dev->lock_rwsem); 4256 if (__rbd_is_lock_owner(rbd_dev)) { 4257 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED && 4258 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) 4259 goto out_unlock; 4260 4261 /* 4262 * encode ResponseMessage(0) so the peer can detect 4263 * a missing owner 4264 */ 4265 result = 0; 4266 4267 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) { 4268 if (!rbd_dev->opts->exclusive) { 4269 dout("%s rbd_dev %p queueing unlock_work\n", 4270 __func__, rbd_dev); 4271 queue_work(rbd_dev->task_wq, 4272 &rbd_dev->unlock_work); 4273 } else { 4274 /* refuse to release the lock */ 4275 result = -EROFS; 4276 } 4277 } 4278 } 4279 4280 out_unlock: 4281 up_read(&rbd_dev->lock_rwsem); 4282 return result; 4283 } 4284 4285 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev, 4286 u64 notify_id, u64 cookie, s32 *result) 4287 { 4288 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4289 char buf[4 + CEPH_ENCODING_START_BLK_LEN]; 4290 int buf_size = sizeof(buf); 4291 int ret; 4292 4293 if (result) { 4294 void *p = buf; 4295 4296 /* encode ResponseMessage */ 4297 ceph_start_encoding(&p, 1, 1, 4298 buf_size - CEPH_ENCODING_START_BLK_LEN); 4299 ceph_encode_32(&p, *result); 4300 } else { 4301 buf_size = 0; 4302 } 4303 4304 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid, 4305 &rbd_dev->header_oloc, notify_id, cookie, 4306 buf, buf_size); 4307 if (ret) 4308 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret); 4309 } 4310 4311 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id, 4312 u64 cookie) 4313 { 4314 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4315 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL); 4316 } 4317 4318 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev, 4319 u64 notify_id, u64 cookie, s32 result) 4320 { 4321 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result); 4322 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result); 4323 } 4324 4325 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie, 4326 u64 notifier_id, void *data, size_t data_len) 4327 { 4328 struct rbd_device *rbd_dev = arg; 4329 void *p = data; 4330 void *const end = p + data_len; 4331 u8 struct_v = 0; 4332 u32 len; 4333 u32 notify_op; 4334 int ret; 4335 4336 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n", 4337 __func__, rbd_dev, cookie, notify_id, data_len); 4338 if (data_len) { 4339 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage", 4340 &struct_v, &len); 4341 if (ret) { 4342 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d", 4343 ret); 4344 return; 4345 } 4346 4347 notify_op = ceph_decode_32(&p); 4348 } else { 4349 /* legacy notification for header updates */ 4350 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE; 4351 len = 0; 4352 } 4353 4354 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op); 4355 switch (notify_op) { 4356 case RBD_NOTIFY_OP_ACQUIRED_LOCK: 4357 rbd_handle_acquired_lock(rbd_dev, struct_v, &p); 4358 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4359 break; 4360 case RBD_NOTIFY_OP_RELEASED_LOCK: 4361 rbd_handle_released_lock(rbd_dev, struct_v, &p); 4362 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4363 break; 4364 case RBD_NOTIFY_OP_REQUEST_LOCK: 4365 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p); 4366 if (ret <= 0) 4367 rbd_acknowledge_notify_result(rbd_dev, notify_id, 4368 cookie, ret); 4369 else 4370 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4371 break; 4372 case RBD_NOTIFY_OP_HEADER_UPDATE: 4373 ret = rbd_dev_refresh(rbd_dev); 4374 if (ret) 4375 rbd_warn(rbd_dev, "refresh failed: %d", ret); 4376 4377 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4378 break; 4379 default: 4380 if (rbd_is_lock_owner(rbd_dev)) 4381 rbd_acknowledge_notify_result(rbd_dev, notify_id, 4382 cookie, -EOPNOTSUPP); 4383 else 4384 rbd_acknowledge_notify(rbd_dev, notify_id, cookie); 4385 break; 4386 } 4387 } 4388 4389 static void __rbd_unregister_watch(struct rbd_device *rbd_dev); 4390 4391 static void rbd_watch_errcb(void *arg, u64 cookie, int err) 4392 { 4393 struct rbd_device *rbd_dev = arg; 4394 4395 rbd_warn(rbd_dev, "encountered watch error: %d", err); 4396 4397 down_write(&rbd_dev->lock_rwsem); 4398 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); 4399 up_write(&rbd_dev->lock_rwsem); 4400 4401 mutex_lock(&rbd_dev->watch_mutex); 4402 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) { 4403 __rbd_unregister_watch(rbd_dev); 4404 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR; 4405 4406 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0); 4407 } 4408 mutex_unlock(&rbd_dev->watch_mutex); 4409 } 4410 4411 /* 4412 * watch_mutex must be locked 4413 */ 4414 static int __rbd_register_watch(struct rbd_device *rbd_dev) 4415 { 4416 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4417 struct ceph_osd_linger_request *handle; 4418 4419 rbd_assert(!rbd_dev->watch_handle); 4420 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4421 4422 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid, 4423 &rbd_dev->header_oloc, rbd_watch_cb, 4424 rbd_watch_errcb, rbd_dev); 4425 if (IS_ERR(handle)) 4426 return PTR_ERR(handle); 4427 4428 rbd_dev->watch_handle = handle; 4429 return 0; 4430 } 4431 4432 /* 4433 * watch_mutex must be locked 4434 */ 4435 static void __rbd_unregister_watch(struct rbd_device *rbd_dev) 4436 { 4437 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4438 int ret; 4439 4440 rbd_assert(rbd_dev->watch_handle); 4441 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4442 4443 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle); 4444 if (ret) 4445 rbd_warn(rbd_dev, "failed to unwatch: %d", ret); 4446 4447 rbd_dev->watch_handle = NULL; 4448 } 4449 4450 static int rbd_register_watch(struct rbd_device *rbd_dev) 4451 { 4452 int ret; 4453 4454 mutex_lock(&rbd_dev->watch_mutex); 4455 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED); 4456 ret = __rbd_register_watch(rbd_dev); 4457 if (ret) 4458 goto out; 4459 4460 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 4461 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 4462 4463 out: 4464 mutex_unlock(&rbd_dev->watch_mutex); 4465 return ret; 4466 } 4467 4468 static void cancel_tasks_sync(struct rbd_device *rbd_dev) 4469 { 4470 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4471 4472 cancel_work_sync(&rbd_dev->acquired_lock_work); 4473 cancel_work_sync(&rbd_dev->released_lock_work); 4474 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 4475 cancel_work_sync(&rbd_dev->unlock_work); 4476 } 4477 4478 /* 4479 * header_rwsem must not be held to avoid a deadlock with 4480 * rbd_dev_refresh() when flushing notifies. 4481 */ 4482 static void rbd_unregister_watch(struct rbd_device *rbd_dev) 4483 { 4484 cancel_tasks_sync(rbd_dev); 4485 4486 mutex_lock(&rbd_dev->watch_mutex); 4487 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) 4488 __rbd_unregister_watch(rbd_dev); 4489 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 4490 mutex_unlock(&rbd_dev->watch_mutex); 4491 4492 cancel_delayed_work_sync(&rbd_dev->watch_dwork); 4493 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 4494 } 4495 4496 /* 4497 * lock_rwsem must be held for write 4498 */ 4499 static void rbd_reacquire_lock(struct rbd_device *rbd_dev) 4500 { 4501 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4502 char cookie[32]; 4503 int ret; 4504 4505 if (!rbd_quiesce_lock(rbd_dev)) 4506 return; 4507 4508 format_lock_cookie(rbd_dev, cookie); 4509 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, 4510 &rbd_dev->header_oloc, RBD_LOCK_NAME, 4511 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, 4512 RBD_LOCK_TAG, cookie); 4513 if (ret) { 4514 if (ret != -EOPNOTSUPP) 4515 rbd_warn(rbd_dev, "failed to update lock cookie: %d", 4516 ret); 4517 4518 /* 4519 * Lock cookie cannot be updated on older OSDs, so do 4520 * a manual release and queue an acquire. 4521 */ 4522 __rbd_release_lock(rbd_dev); 4523 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 4524 } else { 4525 __rbd_lock(rbd_dev, cookie); 4526 wake_lock_waiters(rbd_dev, 0); 4527 } 4528 } 4529 4530 static void rbd_reregister_watch(struct work_struct *work) 4531 { 4532 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 4533 struct rbd_device, watch_dwork); 4534 int ret; 4535 4536 dout("%s rbd_dev %p\n", __func__, rbd_dev); 4537 4538 mutex_lock(&rbd_dev->watch_mutex); 4539 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { 4540 mutex_unlock(&rbd_dev->watch_mutex); 4541 return; 4542 } 4543 4544 ret = __rbd_register_watch(rbd_dev); 4545 if (ret) { 4546 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); 4547 if (ret != -EBLOCKLISTED && ret != -ENOENT) { 4548 queue_delayed_work(rbd_dev->task_wq, 4549 &rbd_dev->watch_dwork, 4550 RBD_RETRY_DELAY); 4551 mutex_unlock(&rbd_dev->watch_mutex); 4552 return; 4553 } 4554 4555 mutex_unlock(&rbd_dev->watch_mutex); 4556 down_write(&rbd_dev->lock_rwsem); 4557 wake_lock_waiters(rbd_dev, ret); 4558 up_write(&rbd_dev->lock_rwsem); 4559 return; 4560 } 4561 4562 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 4563 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 4564 mutex_unlock(&rbd_dev->watch_mutex); 4565 4566 down_write(&rbd_dev->lock_rwsem); 4567 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) 4568 rbd_reacquire_lock(rbd_dev); 4569 up_write(&rbd_dev->lock_rwsem); 4570 4571 ret = rbd_dev_refresh(rbd_dev); 4572 if (ret) 4573 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret); 4574 } 4575 4576 /* 4577 * Synchronous osd object method call. Returns the number of bytes 4578 * returned in the outbound buffer, or a negative error code. 4579 */ 4580 static int rbd_obj_method_sync(struct rbd_device *rbd_dev, 4581 struct ceph_object_id *oid, 4582 struct ceph_object_locator *oloc, 4583 const char *method_name, 4584 const void *outbound, 4585 size_t outbound_size, 4586 void *inbound, 4587 size_t inbound_size) 4588 { 4589 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4590 struct page *req_page = NULL; 4591 struct page *reply_page; 4592 int ret; 4593 4594 /* 4595 * Method calls are ultimately read operations. The result 4596 * should placed into the inbound buffer provided. They 4597 * also supply outbound data--parameters for the object 4598 * method. Currently if this is present it will be a 4599 * snapshot id. 4600 */ 4601 if (outbound) { 4602 if (outbound_size > PAGE_SIZE) 4603 return -E2BIG; 4604 4605 req_page = alloc_page(GFP_KERNEL); 4606 if (!req_page) 4607 return -ENOMEM; 4608 4609 memcpy(page_address(req_page), outbound, outbound_size); 4610 } 4611 4612 reply_page = alloc_page(GFP_KERNEL); 4613 if (!reply_page) { 4614 if (req_page) 4615 __free_page(req_page); 4616 return -ENOMEM; 4617 } 4618 4619 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name, 4620 CEPH_OSD_FLAG_READ, req_page, outbound_size, 4621 &reply_page, &inbound_size); 4622 if (!ret) { 4623 memcpy(inbound, page_address(reply_page), inbound_size); 4624 ret = inbound_size; 4625 } 4626 4627 if (req_page) 4628 __free_page(req_page); 4629 __free_page(reply_page); 4630 return ret; 4631 } 4632 4633 static void rbd_queue_workfn(struct work_struct *work) 4634 { 4635 struct rbd_img_request *img_request = 4636 container_of(work, struct rbd_img_request, work); 4637 struct rbd_device *rbd_dev = img_request->rbd_dev; 4638 enum obj_operation_type op_type = img_request->op_type; 4639 struct request *rq = blk_mq_rq_from_pdu(img_request); 4640 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; 4641 u64 length = blk_rq_bytes(rq); 4642 u64 mapping_size; 4643 int result; 4644 4645 /* Ignore/skip any zero-length requests */ 4646 if (!length) { 4647 dout("%s: zero-length request\n", __func__); 4648 result = 0; 4649 goto err_img_request; 4650 } 4651 4652 blk_mq_start_request(rq); 4653 4654 down_read(&rbd_dev->header_rwsem); 4655 mapping_size = rbd_dev->mapping.size; 4656 rbd_img_capture_header(img_request); 4657 up_read(&rbd_dev->header_rwsem); 4658 4659 if (offset + length > mapping_size) { 4660 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, 4661 length, mapping_size); 4662 result = -EIO; 4663 goto err_img_request; 4664 } 4665 4666 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, 4667 img_request, obj_op_name(op_type), offset, length); 4668 4669 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT) 4670 result = rbd_img_fill_nodata(img_request, offset, length); 4671 else 4672 result = rbd_img_fill_from_bio(img_request, offset, length, 4673 rq->bio); 4674 if (result) 4675 goto err_img_request; 4676 4677 rbd_img_handle_request(img_request, 0); 4678 return; 4679 4680 err_img_request: 4681 rbd_img_request_destroy(img_request); 4682 if (result) 4683 rbd_warn(rbd_dev, "%s %llx at %llx result %d", 4684 obj_op_name(op_type), length, offset, result); 4685 blk_mq_end_request(rq, errno_to_blk_status(result)); 4686 } 4687 4688 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4689 const struct blk_mq_queue_data *bd) 4690 { 4691 struct rbd_device *rbd_dev = hctx->queue->queuedata; 4692 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq); 4693 enum obj_operation_type op_type; 4694 4695 switch (req_op(bd->rq)) { 4696 case REQ_OP_DISCARD: 4697 op_type = OBJ_OP_DISCARD; 4698 break; 4699 case REQ_OP_WRITE_ZEROES: 4700 op_type = OBJ_OP_ZEROOUT; 4701 break; 4702 case REQ_OP_WRITE: 4703 op_type = OBJ_OP_WRITE; 4704 break; 4705 case REQ_OP_READ: 4706 op_type = OBJ_OP_READ; 4707 break; 4708 default: 4709 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq)); 4710 return BLK_STS_IOERR; 4711 } 4712 4713 rbd_img_request_init(img_req, rbd_dev, op_type); 4714 4715 if (rbd_img_is_write(img_req)) { 4716 if (rbd_is_ro(rbd_dev)) { 4717 rbd_warn(rbd_dev, "%s on read-only mapping", 4718 obj_op_name(img_req->op_type)); 4719 return BLK_STS_IOERR; 4720 } 4721 rbd_assert(!rbd_is_snap(rbd_dev)); 4722 } 4723 4724 INIT_WORK(&img_req->work, rbd_queue_workfn); 4725 queue_work(rbd_wq, &img_req->work); 4726 return BLK_STS_OK; 4727 } 4728 4729 static void rbd_free_disk(struct rbd_device *rbd_dev) 4730 { 4731 put_disk(rbd_dev->disk); 4732 blk_mq_free_tag_set(&rbd_dev->tag_set); 4733 rbd_dev->disk = NULL; 4734 } 4735 4736 static int rbd_obj_read_sync(struct rbd_device *rbd_dev, 4737 struct ceph_object_id *oid, 4738 struct ceph_object_locator *oloc, 4739 void *buf, int buf_len) 4740 4741 { 4742 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 4743 struct ceph_osd_request *req; 4744 struct page **pages; 4745 int num_pages = calc_pages_for(0, buf_len); 4746 int ret; 4747 4748 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 4749 if (!req) 4750 return -ENOMEM; 4751 4752 ceph_oid_copy(&req->r_base_oid, oid); 4753 ceph_oloc_copy(&req->r_base_oloc, oloc); 4754 req->r_flags = CEPH_OSD_FLAG_READ; 4755 4756 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); 4757 if (IS_ERR(pages)) { 4758 ret = PTR_ERR(pages); 4759 goto out_req; 4760 } 4761 4762 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0); 4763 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, 4764 true); 4765 4766 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 4767 if (ret) 4768 goto out_req; 4769 4770 ceph_osdc_start_request(osdc, req); 4771 ret = ceph_osdc_wait_request(osdc, req); 4772 if (ret >= 0) 4773 ceph_copy_from_page_vector(pages, buf, 0, ret); 4774 4775 out_req: 4776 ceph_osdc_put_request(req); 4777 return ret; 4778 } 4779 4780 /* 4781 * Read the complete header for the given rbd device. On successful 4782 * return, the rbd_dev->header field will contain up-to-date 4783 * information about the image. 4784 */ 4785 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) 4786 { 4787 struct rbd_image_header_ondisk *ondisk = NULL; 4788 u32 snap_count = 0; 4789 u64 names_size = 0; 4790 u32 want_count; 4791 int ret; 4792 4793 /* 4794 * The complete header will include an array of its 64-bit 4795 * snapshot ids, followed by the names of those snapshots as 4796 * a contiguous block of NUL-terminated strings. Note that 4797 * the number of snapshots could change by the time we read 4798 * it in, in which case we re-read it. 4799 */ 4800 do { 4801 size_t size; 4802 4803 kfree(ondisk); 4804 4805 size = sizeof (*ondisk); 4806 size += snap_count * sizeof (struct rbd_image_snap_ondisk); 4807 size += names_size; 4808 ondisk = kmalloc(size, GFP_KERNEL); 4809 if (!ondisk) 4810 return -ENOMEM; 4811 4812 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid, 4813 &rbd_dev->header_oloc, ondisk, size); 4814 if (ret < 0) 4815 goto out; 4816 if ((size_t)ret < size) { 4817 ret = -ENXIO; 4818 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 4819 size, ret); 4820 goto out; 4821 } 4822 if (!rbd_dev_ondisk_valid(ondisk)) { 4823 ret = -ENXIO; 4824 rbd_warn(rbd_dev, "invalid header"); 4825 goto out; 4826 } 4827 4828 names_size = le64_to_cpu(ondisk->snap_names_len); 4829 want_count = snap_count; 4830 snap_count = le32_to_cpu(ondisk->snap_count); 4831 } while (snap_count != want_count); 4832 4833 ret = rbd_header_from_disk(rbd_dev, ondisk); 4834 out: 4835 kfree(ondisk); 4836 4837 return ret; 4838 } 4839 4840 static void rbd_dev_update_size(struct rbd_device *rbd_dev) 4841 { 4842 sector_t size; 4843 4844 /* 4845 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't 4846 * try to update its size. If REMOVING is set, updating size 4847 * is just useless work since the device can't be opened. 4848 */ 4849 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && 4850 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { 4851 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 4852 dout("setting size to %llu sectors", (unsigned long long)size); 4853 set_capacity_and_notify(rbd_dev->disk, size); 4854 } 4855 } 4856 4857 static int rbd_dev_refresh(struct rbd_device *rbd_dev) 4858 { 4859 u64 mapping_size; 4860 int ret; 4861 4862 down_write(&rbd_dev->header_rwsem); 4863 mapping_size = rbd_dev->mapping.size; 4864 4865 ret = rbd_dev_header_info(rbd_dev); 4866 if (ret) 4867 goto out; 4868 4869 /* 4870 * If there is a parent, see if it has disappeared due to the 4871 * mapped image getting flattened. 4872 */ 4873 if (rbd_dev->parent) { 4874 ret = rbd_dev_v2_parent_info(rbd_dev); 4875 if (ret) 4876 goto out; 4877 } 4878 4879 rbd_assert(!rbd_is_snap(rbd_dev)); 4880 rbd_dev->mapping.size = rbd_dev->header.image_size; 4881 4882 out: 4883 up_write(&rbd_dev->header_rwsem); 4884 if (!ret && mapping_size != rbd_dev->mapping.size) 4885 rbd_dev_update_size(rbd_dev); 4886 4887 return ret; 4888 } 4889 4890 static const struct blk_mq_ops rbd_mq_ops = { 4891 .queue_rq = rbd_queue_rq, 4892 }; 4893 4894 static int rbd_init_disk(struct rbd_device *rbd_dev) 4895 { 4896 struct gendisk *disk; 4897 struct request_queue *q; 4898 unsigned int objset_bytes = 4899 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; 4900 int err; 4901 4902 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); 4903 rbd_dev->tag_set.ops = &rbd_mq_ops; 4904 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; 4905 rbd_dev->tag_set.numa_node = NUMA_NO_NODE; 4906 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 4907 rbd_dev->tag_set.nr_hw_queues = num_present_cpus(); 4908 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request); 4909 4910 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); 4911 if (err) 4912 return err; 4913 4914 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev); 4915 if (IS_ERR(disk)) { 4916 err = PTR_ERR(disk); 4917 goto out_tag_set; 4918 } 4919 q = disk->queue; 4920 4921 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", 4922 rbd_dev->dev_id); 4923 disk->major = rbd_dev->major; 4924 disk->first_minor = rbd_dev->minor; 4925 if (single_major) 4926 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT); 4927 else 4928 disk->minors = RBD_MINORS_PER_MAJOR; 4929 disk->fops = &rbd_bd_ops; 4930 disk->private_data = rbd_dev; 4931 4932 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 4933 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 4934 4935 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); 4936 q->limits.max_sectors = queue_max_hw_sectors(q); 4937 blk_queue_max_segments(q, USHRT_MAX); 4938 blk_queue_max_segment_size(q, UINT_MAX); 4939 blk_queue_io_min(q, rbd_dev->opts->alloc_size); 4940 blk_queue_io_opt(q, rbd_dev->opts->alloc_size); 4941 4942 if (rbd_dev->opts->trim) { 4943 q->limits.discard_granularity = rbd_dev->opts->alloc_size; 4944 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); 4945 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); 4946 } 4947 4948 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4949 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); 4950 4951 rbd_dev->disk = disk; 4952 4953 return 0; 4954 out_tag_set: 4955 blk_mq_free_tag_set(&rbd_dev->tag_set); 4956 return err; 4957 } 4958 4959 /* 4960 sysfs 4961 */ 4962 4963 static struct rbd_device *dev_to_rbd_dev(struct device *dev) 4964 { 4965 return container_of(dev, struct rbd_device, dev); 4966 } 4967 4968 static ssize_t rbd_size_show(struct device *dev, 4969 struct device_attribute *attr, char *buf) 4970 { 4971 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4972 4973 return sprintf(buf, "%llu\n", 4974 (unsigned long long)rbd_dev->mapping.size); 4975 } 4976 4977 static ssize_t rbd_features_show(struct device *dev, 4978 struct device_attribute *attr, char *buf) 4979 { 4980 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4981 4982 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features); 4983 } 4984 4985 static ssize_t rbd_major_show(struct device *dev, 4986 struct device_attribute *attr, char *buf) 4987 { 4988 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 4989 4990 if (rbd_dev->major) 4991 return sprintf(buf, "%d\n", rbd_dev->major); 4992 4993 return sprintf(buf, "(none)\n"); 4994 } 4995 4996 static ssize_t rbd_minor_show(struct device *dev, 4997 struct device_attribute *attr, char *buf) 4998 { 4999 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5000 5001 return sprintf(buf, "%d\n", rbd_dev->minor); 5002 } 5003 5004 static ssize_t rbd_client_addr_show(struct device *dev, 5005 struct device_attribute *attr, char *buf) 5006 { 5007 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5008 struct ceph_entity_addr *client_addr = 5009 ceph_client_addr(rbd_dev->rbd_client->client); 5010 5011 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr, 5012 le32_to_cpu(client_addr->nonce)); 5013 } 5014 5015 static ssize_t rbd_client_id_show(struct device *dev, 5016 struct device_attribute *attr, char *buf) 5017 { 5018 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5019 5020 return sprintf(buf, "client%lld\n", 5021 ceph_client_gid(rbd_dev->rbd_client->client)); 5022 } 5023 5024 static ssize_t rbd_cluster_fsid_show(struct device *dev, 5025 struct device_attribute *attr, char *buf) 5026 { 5027 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5028 5029 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid); 5030 } 5031 5032 static ssize_t rbd_config_info_show(struct device *dev, 5033 struct device_attribute *attr, char *buf) 5034 { 5035 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5036 5037 if (!capable(CAP_SYS_ADMIN)) 5038 return -EPERM; 5039 5040 return sprintf(buf, "%s\n", rbd_dev->config_info); 5041 } 5042 5043 static ssize_t rbd_pool_show(struct device *dev, 5044 struct device_attribute *attr, char *buf) 5045 { 5046 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5047 5048 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name); 5049 } 5050 5051 static ssize_t rbd_pool_id_show(struct device *dev, 5052 struct device_attribute *attr, char *buf) 5053 { 5054 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5055 5056 return sprintf(buf, "%llu\n", 5057 (unsigned long long) rbd_dev->spec->pool_id); 5058 } 5059 5060 static ssize_t rbd_pool_ns_show(struct device *dev, 5061 struct device_attribute *attr, char *buf) 5062 { 5063 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5064 5065 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: ""); 5066 } 5067 5068 static ssize_t rbd_name_show(struct device *dev, 5069 struct device_attribute *attr, char *buf) 5070 { 5071 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5072 5073 if (rbd_dev->spec->image_name) 5074 return sprintf(buf, "%s\n", rbd_dev->spec->image_name); 5075 5076 return sprintf(buf, "(unknown)\n"); 5077 } 5078 5079 static ssize_t rbd_image_id_show(struct device *dev, 5080 struct device_attribute *attr, char *buf) 5081 { 5082 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5083 5084 return sprintf(buf, "%s\n", rbd_dev->spec->image_id); 5085 } 5086 5087 /* 5088 * Shows the name of the currently-mapped snapshot (or 5089 * RBD_SNAP_HEAD_NAME for the base image). 5090 */ 5091 static ssize_t rbd_snap_show(struct device *dev, 5092 struct device_attribute *attr, 5093 char *buf) 5094 { 5095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5096 5097 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name); 5098 } 5099 5100 static ssize_t rbd_snap_id_show(struct device *dev, 5101 struct device_attribute *attr, char *buf) 5102 { 5103 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5104 5105 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id); 5106 } 5107 5108 /* 5109 * For a v2 image, shows the chain of parent images, separated by empty 5110 * lines. For v1 images or if there is no parent, shows "(no parent 5111 * image)". 5112 */ 5113 static ssize_t rbd_parent_show(struct device *dev, 5114 struct device_attribute *attr, 5115 char *buf) 5116 { 5117 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5118 ssize_t count = 0; 5119 5120 if (!rbd_dev->parent) 5121 return sprintf(buf, "(no parent image)\n"); 5122 5123 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) { 5124 struct rbd_spec *spec = rbd_dev->parent_spec; 5125 5126 count += sprintf(&buf[count], "%s" 5127 "pool_id %llu\npool_name %s\n" 5128 "pool_ns %s\n" 5129 "image_id %s\nimage_name %s\n" 5130 "snap_id %llu\nsnap_name %s\n" 5131 "overlap %llu\n", 5132 !count ? "" : "\n", /* first? */ 5133 spec->pool_id, spec->pool_name, 5134 spec->pool_ns ?: "", 5135 spec->image_id, spec->image_name ?: "(unknown)", 5136 spec->snap_id, spec->snap_name, 5137 rbd_dev->parent_overlap); 5138 } 5139 5140 return count; 5141 } 5142 5143 static ssize_t rbd_image_refresh(struct device *dev, 5144 struct device_attribute *attr, 5145 const char *buf, 5146 size_t size) 5147 { 5148 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5149 int ret; 5150 5151 if (!capable(CAP_SYS_ADMIN)) 5152 return -EPERM; 5153 5154 ret = rbd_dev_refresh(rbd_dev); 5155 if (ret) 5156 return ret; 5157 5158 return size; 5159 } 5160 5161 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL); 5162 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL); 5163 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL); 5164 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL); 5165 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL); 5166 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL); 5167 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL); 5168 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL); 5169 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL); 5170 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL); 5171 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL); 5172 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL); 5173 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL); 5174 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh); 5175 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL); 5176 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL); 5177 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL); 5178 5179 static struct attribute *rbd_attrs[] = { 5180 &dev_attr_size.attr, 5181 &dev_attr_features.attr, 5182 &dev_attr_major.attr, 5183 &dev_attr_minor.attr, 5184 &dev_attr_client_addr.attr, 5185 &dev_attr_client_id.attr, 5186 &dev_attr_cluster_fsid.attr, 5187 &dev_attr_config_info.attr, 5188 &dev_attr_pool.attr, 5189 &dev_attr_pool_id.attr, 5190 &dev_attr_pool_ns.attr, 5191 &dev_attr_name.attr, 5192 &dev_attr_image_id.attr, 5193 &dev_attr_current_snap.attr, 5194 &dev_attr_snap_id.attr, 5195 &dev_attr_parent.attr, 5196 &dev_attr_refresh.attr, 5197 NULL 5198 }; 5199 5200 static struct attribute_group rbd_attr_group = { 5201 .attrs = rbd_attrs, 5202 }; 5203 5204 static const struct attribute_group *rbd_attr_groups[] = { 5205 &rbd_attr_group, 5206 NULL 5207 }; 5208 5209 static void rbd_dev_release(struct device *dev); 5210 5211 static const struct device_type rbd_device_type = { 5212 .name = "rbd", 5213 .groups = rbd_attr_groups, 5214 .release = rbd_dev_release, 5215 }; 5216 5217 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) 5218 { 5219 kref_get(&spec->kref); 5220 5221 return spec; 5222 } 5223 5224 static void rbd_spec_free(struct kref *kref); 5225 static void rbd_spec_put(struct rbd_spec *spec) 5226 { 5227 if (spec) 5228 kref_put(&spec->kref, rbd_spec_free); 5229 } 5230 5231 static struct rbd_spec *rbd_spec_alloc(void) 5232 { 5233 struct rbd_spec *spec; 5234 5235 spec = kzalloc(sizeof (*spec), GFP_KERNEL); 5236 if (!spec) 5237 return NULL; 5238 5239 spec->pool_id = CEPH_NOPOOL; 5240 spec->snap_id = CEPH_NOSNAP; 5241 kref_init(&spec->kref); 5242 5243 return spec; 5244 } 5245 5246 static void rbd_spec_free(struct kref *kref) 5247 { 5248 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref); 5249 5250 kfree(spec->pool_name); 5251 kfree(spec->pool_ns); 5252 kfree(spec->image_id); 5253 kfree(spec->image_name); 5254 kfree(spec->snap_name); 5255 kfree(spec); 5256 } 5257 5258 static void rbd_dev_free(struct rbd_device *rbd_dev) 5259 { 5260 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED); 5261 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED); 5262 5263 ceph_oid_destroy(&rbd_dev->header_oid); 5264 ceph_oloc_destroy(&rbd_dev->header_oloc); 5265 kfree(rbd_dev->config_info); 5266 5267 rbd_put_client(rbd_dev->rbd_client); 5268 rbd_spec_put(rbd_dev->spec); 5269 kfree(rbd_dev->opts); 5270 kfree(rbd_dev); 5271 } 5272 5273 static void rbd_dev_release(struct device *dev) 5274 { 5275 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5276 bool need_put = !!rbd_dev->opts; 5277 5278 if (need_put) { 5279 destroy_workqueue(rbd_dev->task_wq); 5280 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5281 } 5282 5283 rbd_dev_free(rbd_dev); 5284 5285 /* 5286 * This is racy, but way better than putting module outside of 5287 * the release callback. The race window is pretty small, so 5288 * doing something similar to dm (dm-builtin.c) is overkill. 5289 */ 5290 if (need_put) 5291 module_put(THIS_MODULE); 5292 } 5293 5294 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, 5295 struct rbd_spec *spec) 5296 { 5297 struct rbd_device *rbd_dev; 5298 5299 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); 5300 if (!rbd_dev) 5301 return NULL; 5302 5303 spin_lock_init(&rbd_dev->lock); 5304 INIT_LIST_HEAD(&rbd_dev->node); 5305 init_rwsem(&rbd_dev->header_rwsem); 5306 5307 rbd_dev->header.data_pool_id = CEPH_NOPOOL; 5308 ceph_oid_init(&rbd_dev->header_oid); 5309 rbd_dev->header_oloc.pool = spec->pool_id; 5310 if (spec->pool_ns) { 5311 WARN_ON(!*spec->pool_ns); 5312 rbd_dev->header_oloc.pool_ns = 5313 ceph_find_or_create_string(spec->pool_ns, 5314 strlen(spec->pool_ns)); 5315 } 5316 5317 mutex_init(&rbd_dev->watch_mutex); 5318 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; 5319 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch); 5320 5321 init_rwsem(&rbd_dev->lock_rwsem); 5322 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED; 5323 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock); 5324 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock); 5325 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock); 5326 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work); 5327 spin_lock_init(&rbd_dev->lock_lists_lock); 5328 INIT_LIST_HEAD(&rbd_dev->acquiring_list); 5329 INIT_LIST_HEAD(&rbd_dev->running_list); 5330 init_completion(&rbd_dev->acquire_wait); 5331 init_completion(&rbd_dev->releasing_wait); 5332 5333 spin_lock_init(&rbd_dev->object_map_lock); 5334 5335 rbd_dev->dev.bus = &rbd_bus_type; 5336 rbd_dev->dev.type = &rbd_device_type; 5337 rbd_dev->dev.parent = &rbd_root_dev; 5338 device_initialize(&rbd_dev->dev); 5339 5340 rbd_dev->rbd_client = rbdc; 5341 rbd_dev->spec = spec; 5342 5343 return rbd_dev; 5344 } 5345 5346 /* 5347 * Create a mapping rbd_dev. 5348 */ 5349 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, 5350 struct rbd_spec *spec, 5351 struct rbd_options *opts) 5352 { 5353 struct rbd_device *rbd_dev; 5354 5355 rbd_dev = __rbd_dev_create(rbdc, spec); 5356 if (!rbd_dev) 5357 return NULL; 5358 5359 rbd_dev->opts = opts; 5360 5361 /* get an id and fill in device name */ 5362 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, 5363 minor_to_rbd_dev_id(1 << MINORBITS), 5364 GFP_KERNEL); 5365 if (rbd_dev->dev_id < 0) 5366 goto fail_rbd_dev; 5367 5368 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id); 5369 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM, 5370 rbd_dev->name); 5371 if (!rbd_dev->task_wq) 5372 goto fail_dev_id; 5373 5374 /* we have a ref from do_rbd_add() */ 5375 __module_get(THIS_MODULE); 5376 5377 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); 5378 return rbd_dev; 5379 5380 fail_dev_id: 5381 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id); 5382 fail_rbd_dev: 5383 rbd_dev_free(rbd_dev); 5384 return NULL; 5385 } 5386 5387 static void rbd_dev_destroy(struct rbd_device *rbd_dev) 5388 { 5389 if (rbd_dev) 5390 put_device(&rbd_dev->dev); 5391 } 5392 5393 /* 5394 * Get the size and object order for an image snapshot, or if 5395 * snap_id is CEPH_NOSNAP, gets this information for the base 5396 * image. 5397 */ 5398 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 5399 u8 *order, u64 *snap_size) 5400 { 5401 __le64 snapid = cpu_to_le64(snap_id); 5402 int ret; 5403 struct { 5404 u8 order; 5405 __le64 size; 5406 } __attribute__ ((packed)) size_buf = { 0 }; 5407 5408 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5409 &rbd_dev->header_oloc, "get_size", 5410 &snapid, sizeof(snapid), 5411 &size_buf, sizeof(size_buf)); 5412 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5413 if (ret < 0) 5414 return ret; 5415 if (ret < sizeof (size_buf)) 5416 return -ERANGE; 5417 5418 if (order) { 5419 *order = size_buf.order; 5420 dout(" order %u", (unsigned int)*order); 5421 } 5422 *snap_size = le64_to_cpu(size_buf.size); 5423 5424 dout(" snap_id 0x%016llx snap_size = %llu\n", 5425 (unsigned long long)snap_id, 5426 (unsigned long long)*snap_size); 5427 5428 return 0; 5429 } 5430 5431 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) 5432 { 5433 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, 5434 &rbd_dev->header.obj_order, 5435 &rbd_dev->header.image_size); 5436 } 5437 5438 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) 5439 { 5440 size_t size; 5441 void *reply_buf; 5442 int ret; 5443 void *p; 5444 5445 /* Response will be an encoded string, which includes a length */ 5446 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX; 5447 reply_buf = kzalloc(size, GFP_KERNEL); 5448 if (!reply_buf) 5449 return -ENOMEM; 5450 5451 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5452 &rbd_dev->header_oloc, "get_object_prefix", 5453 NULL, 0, reply_buf, size); 5454 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5455 if (ret < 0) 5456 goto out; 5457 5458 p = reply_buf; 5459 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, 5460 p + ret, NULL, GFP_NOIO); 5461 ret = 0; 5462 5463 if (IS_ERR(rbd_dev->header.object_prefix)) { 5464 ret = PTR_ERR(rbd_dev->header.object_prefix); 5465 rbd_dev->header.object_prefix = NULL; 5466 } else { 5467 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); 5468 } 5469 out: 5470 kfree(reply_buf); 5471 5472 return ret; 5473 } 5474 5475 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 5476 bool read_only, u64 *snap_features) 5477 { 5478 struct { 5479 __le64 snap_id; 5480 u8 read_only; 5481 } features_in; 5482 struct { 5483 __le64 features; 5484 __le64 incompat; 5485 } __attribute__ ((packed)) features_buf = { 0 }; 5486 u64 unsup; 5487 int ret; 5488 5489 features_in.snap_id = cpu_to_le64(snap_id); 5490 features_in.read_only = read_only; 5491 5492 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5493 &rbd_dev->header_oloc, "get_features", 5494 &features_in, sizeof(features_in), 5495 &features_buf, sizeof(features_buf)); 5496 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5497 if (ret < 0) 5498 return ret; 5499 if (ret < sizeof (features_buf)) 5500 return -ERANGE; 5501 5502 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; 5503 if (unsup) { 5504 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", 5505 unsup); 5506 return -ENXIO; 5507 } 5508 5509 *snap_features = le64_to_cpu(features_buf.features); 5510 5511 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", 5512 (unsigned long long)snap_id, 5513 (unsigned long long)*snap_features, 5514 (unsigned long long)le64_to_cpu(features_buf.incompat)); 5515 5516 return 0; 5517 } 5518 5519 static int rbd_dev_v2_features(struct rbd_device *rbd_dev) 5520 { 5521 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, 5522 rbd_is_ro(rbd_dev), 5523 &rbd_dev->header.features); 5524 } 5525 5526 /* 5527 * These are generic image flags, but since they are used only for 5528 * object map, store them in rbd_dev->object_map_flags. 5529 * 5530 * For the same reason, this function is called only on object map 5531 * (re)load and not on header refresh. 5532 */ 5533 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev) 5534 { 5535 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 5536 __le64 flags; 5537 int ret; 5538 5539 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5540 &rbd_dev->header_oloc, "get_flags", 5541 &snapid, sizeof(snapid), 5542 &flags, sizeof(flags)); 5543 if (ret < 0) 5544 return ret; 5545 if (ret < sizeof(flags)) 5546 return -EBADMSG; 5547 5548 rbd_dev->object_map_flags = le64_to_cpu(flags); 5549 return 0; 5550 } 5551 5552 struct parent_image_info { 5553 u64 pool_id; 5554 const char *pool_ns; 5555 const char *image_id; 5556 u64 snap_id; 5557 5558 bool has_overlap; 5559 u64 overlap; 5560 }; 5561 5562 /* 5563 * The caller is responsible for @pii. 5564 */ 5565 static int decode_parent_image_spec(void **p, void *end, 5566 struct parent_image_info *pii) 5567 { 5568 u8 struct_v; 5569 u32 struct_len; 5570 int ret; 5571 5572 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", 5573 &struct_v, &struct_len); 5574 if (ret) 5575 return ret; 5576 5577 ceph_decode_64_safe(p, end, pii->pool_id, e_inval); 5578 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 5579 if (IS_ERR(pii->pool_ns)) { 5580 ret = PTR_ERR(pii->pool_ns); 5581 pii->pool_ns = NULL; 5582 return ret; 5583 } 5584 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); 5585 if (IS_ERR(pii->image_id)) { 5586 ret = PTR_ERR(pii->image_id); 5587 pii->image_id = NULL; 5588 return ret; 5589 } 5590 ceph_decode_64_safe(p, end, pii->snap_id, e_inval); 5591 return 0; 5592 5593 e_inval: 5594 return -EINVAL; 5595 } 5596 5597 static int __get_parent_info(struct rbd_device *rbd_dev, 5598 struct page *req_page, 5599 struct page *reply_page, 5600 struct parent_image_info *pii) 5601 { 5602 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5603 size_t reply_len = PAGE_SIZE; 5604 void *p, *end; 5605 int ret; 5606 5607 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5608 "rbd", "parent_get", CEPH_OSD_FLAG_READ, 5609 req_page, sizeof(u64), &reply_page, &reply_len); 5610 if (ret) 5611 return ret == -EOPNOTSUPP ? 1 : ret; 5612 5613 p = page_address(reply_page); 5614 end = p + reply_len; 5615 ret = decode_parent_image_spec(&p, end, pii); 5616 if (ret) 5617 return ret; 5618 5619 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5620 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, 5621 req_page, sizeof(u64), &reply_page, &reply_len); 5622 if (ret) 5623 return ret; 5624 5625 p = page_address(reply_page); 5626 end = p + reply_len; 5627 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); 5628 if (pii->has_overlap) 5629 ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 5630 5631 return 0; 5632 5633 e_inval: 5634 return -EINVAL; 5635 } 5636 5637 /* 5638 * The caller is responsible for @pii. 5639 */ 5640 static int __get_parent_info_legacy(struct rbd_device *rbd_dev, 5641 struct page *req_page, 5642 struct page *reply_page, 5643 struct parent_image_info *pii) 5644 { 5645 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5646 size_t reply_len = PAGE_SIZE; 5647 void *p, *end; 5648 int ret; 5649 5650 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, 5651 "rbd", "get_parent", CEPH_OSD_FLAG_READ, 5652 req_page, sizeof(u64), &reply_page, &reply_len); 5653 if (ret) 5654 return ret; 5655 5656 p = page_address(reply_page); 5657 end = p + reply_len; 5658 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); 5659 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 5660 if (IS_ERR(pii->image_id)) { 5661 ret = PTR_ERR(pii->image_id); 5662 pii->image_id = NULL; 5663 return ret; 5664 } 5665 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); 5666 pii->has_overlap = true; 5667 ceph_decode_64_safe(&p, end, pii->overlap, e_inval); 5668 5669 return 0; 5670 5671 e_inval: 5672 return -EINVAL; 5673 } 5674 5675 static int get_parent_info(struct rbd_device *rbd_dev, 5676 struct parent_image_info *pii) 5677 { 5678 struct page *req_page, *reply_page; 5679 void *p; 5680 int ret; 5681 5682 req_page = alloc_page(GFP_KERNEL); 5683 if (!req_page) 5684 return -ENOMEM; 5685 5686 reply_page = alloc_page(GFP_KERNEL); 5687 if (!reply_page) { 5688 __free_page(req_page); 5689 return -ENOMEM; 5690 } 5691 5692 p = page_address(req_page); 5693 ceph_encode_64(&p, rbd_dev->spec->snap_id); 5694 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); 5695 if (ret > 0) 5696 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, 5697 pii); 5698 5699 __free_page(req_page); 5700 __free_page(reply_page); 5701 return ret; 5702 } 5703 5704 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 5705 { 5706 struct rbd_spec *parent_spec; 5707 struct parent_image_info pii = { 0 }; 5708 int ret; 5709 5710 parent_spec = rbd_spec_alloc(); 5711 if (!parent_spec) 5712 return -ENOMEM; 5713 5714 ret = get_parent_info(rbd_dev, &pii); 5715 if (ret) 5716 goto out_err; 5717 5718 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", 5719 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, 5720 pii.has_overlap, pii.overlap); 5721 5722 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { 5723 /* 5724 * Either the parent never existed, or we have 5725 * record of it but the image got flattened so it no 5726 * longer has a parent. When the parent of a 5727 * layered image disappears we immediately set the 5728 * overlap to 0. The effect of this is that all new 5729 * requests will be treated as if the image had no 5730 * parent. 5731 * 5732 * If !pii.has_overlap, the parent image spec is not 5733 * applicable. It's there to avoid duplication in each 5734 * snapshot record. 5735 */ 5736 if (rbd_dev->parent_overlap) { 5737 rbd_dev->parent_overlap = 0; 5738 rbd_dev_parent_put(rbd_dev); 5739 pr_info("%s: clone image has been flattened\n", 5740 rbd_dev->disk->disk_name); 5741 } 5742 5743 goto out; /* No parent? No problem. */ 5744 } 5745 5746 /* The ceph file layout needs to fit pool id in 32 bits */ 5747 5748 ret = -EIO; 5749 if (pii.pool_id > (u64)U32_MAX) { 5750 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 5751 (unsigned long long)pii.pool_id, U32_MAX); 5752 goto out_err; 5753 } 5754 5755 /* 5756 * The parent won't change (except when the clone is 5757 * flattened, already handled that). So we only need to 5758 * record the parent spec we have not already done so. 5759 */ 5760 if (!rbd_dev->parent_spec) { 5761 parent_spec->pool_id = pii.pool_id; 5762 if (pii.pool_ns && *pii.pool_ns) { 5763 parent_spec->pool_ns = pii.pool_ns; 5764 pii.pool_ns = NULL; 5765 } 5766 parent_spec->image_id = pii.image_id; 5767 pii.image_id = NULL; 5768 parent_spec->snap_id = pii.snap_id; 5769 5770 rbd_dev->parent_spec = parent_spec; 5771 parent_spec = NULL; /* rbd_dev now owns this */ 5772 } 5773 5774 /* 5775 * We always update the parent overlap. If it's zero we issue 5776 * a warning, as we will proceed as if there was no parent. 5777 */ 5778 if (!pii.overlap) { 5779 if (parent_spec) { 5780 /* refresh, careful to warn just once */ 5781 if (rbd_dev->parent_overlap) 5782 rbd_warn(rbd_dev, 5783 "clone now standalone (overlap became 0)"); 5784 } else { 5785 /* initial probe */ 5786 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 5787 } 5788 } 5789 rbd_dev->parent_overlap = pii.overlap; 5790 5791 out: 5792 ret = 0; 5793 out_err: 5794 kfree(pii.pool_ns); 5795 kfree(pii.image_id); 5796 rbd_spec_put(parent_spec); 5797 return ret; 5798 } 5799 5800 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) 5801 { 5802 struct { 5803 __le64 stripe_unit; 5804 __le64 stripe_count; 5805 } __attribute__ ((packed)) striping_info_buf = { 0 }; 5806 size_t size = sizeof (striping_info_buf); 5807 void *p; 5808 int ret; 5809 5810 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5811 &rbd_dev->header_oloc, "get_stripe_unit_count", 5812 NULL, 0, &striping_info_buf, size); 5813 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 5814 if (ret < 0) 5815 return ret; 5816 if (ret < size) 5817 return -ERANGE; 5818 5819 p = &striping_info_buf; 5820 rbd_dev->header.stripe_unit = ceph_decode_64(&p); 5821 rbd_dev->header.stripe_count = ceph_decode_64(&p); 5822 return 0; 5823 } 5824 5825 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) 5826 { 5827 __le64 data_pool_id; 5828 int ret; 5829 5830 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 5831 &rbd_dev->header_oloc, "get_data_pool", 5832 NULL, 0, &data_pool_id, sizeof(data_pool_id)); 5833 if (ret < 0) 5834 return ret; 5835 if (ret < sizeof(data_pool_id)) 5836 return -EBADMSG; 5837 5838 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); 5839 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); 5840 return 0; 5841 } 5842 5843 static char *rbd_dev_image_name(struct rbd_device *rbd_dev) 5844 { 5845 CEPH_DEFINE_OID_ONSTACK(oid); 5846 size_t image_id_size; 5847 char *image_id; 5848 void *p; 5849 void *end; 5850 size_t size; 5851 void *reply_buf = NULL; 5852 size_t len = 0; 5853 char *image_name = NULL; 5854 int ret; 5855 5856 rbd_assert(!rbd_dev->spec->image_name); 5857 5858 len = strlen(rbd_dev->spec->image_id); 5859 image_id_size = sizeof (__le32) + len; 5860 image_id = kmalloc(image_id_size, GFP_KERNEL); 5861 if (!image_id) 5862 return NULL; 5863 5864 p = image_id; 5865 end = image_id + image_id_size; 5866 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); 5867 5868 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; 5869 reply_buf = kmalloc(size, GFP_KERNEL); 5870 if (!reply_buf) 5871 goto out; 5872 5873 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY); 5874 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 5875 "dir_get_name", image_id, image_id_size, 5876 reply_buf, size); 5877 if (ret < 0) 5878 goto out; 5879 p = reply_buf; 5880 end = reply_buf + ret; 5881 5882 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); 5883 if (IS_ERR(image_name)) 5884 image_name = NULL; 5885 else 5886 dout("%s: name is %s len is %zd\n", __func__, image_name, len); 5887 out: 5888 kfree(reply_buf); 5889 kfree(image_id); 5890 5891 return image_name; 5892 } 5893 5894 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5895 { 5896 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5897 const char *snap_name; 5898 u32 which = 0; 5899 5900 /* Skip over names until we find the one we are looking for */ 5901 5902 snap_name = rbd_dev->header.snap_names; 5903 while (which < snapc->num_snaps) { 5904 if (!strcmp(name, snap_name)) 5905 return snapc->snaps[which]; 5906 snap_name += strlen(snap_name) + 1; 5907 which++; 5908 } 5909 return CEPH_NOSNAP; 5910 } 5911 5912 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5913 { 5914 struct ceph_snap_context *snapc = rbd_dev->header.snapc; 5915 u32 which; 5916 bool found = false; 5917 u64 snap_id; 5918 5919 for (which = 0; !found && which < snapc->num_snaps; which++) { 5920 const char *snap_name; 5921 5922 snap_id = snapc->snaps[which]; 5923 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 5924 if (IS_ERR(snap_name)) { 5925 /* ignore no-longer existing snapshots */ 5926 if (PTR_ERR(snap_name) == -ENOENT) 5927 continue; 5928 else 5929 break; 5930 } 5931 found = !strcmp(name, snap_name); 5932 kfree(snap_name); 5933 } 5934 return found ? snap_id : CEPH_NOSNAP; 5935 } 5936 5937 /* 5938 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if 5939 * no snapshot by that name is found, or if an error occurs. 5940 */ 5941 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) 5942 { 5943 if (rbd_dev->image_format == 1) 5944 return rbd_v1_snap_id_by_name(rbd_dev, name); 5945 5946 return rbd_v2_snap_id_by_name(rbd_dev, name); 5947 } 5948 5949 /* 5950 * An image being mapped will have everything but the snap id. 5951 */ 5952 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev) 5953 { 5954 struct rbd_spec *spec = rbd_dev->spec; 5955 5956 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name); 5957 rbd_assert(spec->image_id && spec->image_name); 5958 rbd_assert(spec->snap_name); 5959 5960 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { 5961 u64 snap_id; 5962 5963 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); 5964 if (snap_id == CEPH_NOSNAP) 5965 return -ENOENT; 5966 5967 spec->snap_id = snap_id; 5968 } else { 5969 spec->snap_id = CEPH_NOSNAP; 5970 } 5971 5972 return 0; 5973 } 5974 5975 /* 5976 * A parent image will have all ids but none of the names. 5977 * 5978 * All names in an rbd spec are dynamically allocated. It's OK if we 5979 * can't figure out the name for an image id. 5980 */ 5981 static int rbd_spec_fill_names(struct rbd_device *rbd_dev) 5982 { 5983 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 5984 struct rbd_spec *spec = rbd_dev->spec; 5985 const char *pool_name; 5986 const char *image_name; 5987 const char *snap_name; 5988 int ret; 5989 5990 rbd_assert(spec->pool_id != CEPH_NOPOOL); 5991 rbd_assert(spec->image_id); 5992 rbd_assert(spec->snap_id != CEPH_NOSNAP); 5993 5994 /* Get the pool name; we have to make our own copy of this */ 5995 5996 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); 5997 if (!pool_name) { 5998 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); 5999 return -EIO; 6000 } 6001 pool_name = kstrdup(pool_name, GFP_KERNEL); 6002 if (!pool_name) 6003 return -ENOMEM; 6004 6005 /* Fetch the image name; tolerate failure here */ 6006 6007 image_name = rbd_dev_image_name(rbd_dev); 6008 if (!image_name) 6009 rbd_warn(rbd_dev, "unable to get image name"); 6010 6011 /* Fetch the snapshot name */ 6012 6013 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 6014 if (IS_ERR(snap_name)) { 6015 ret = PTR_ERR(snap_name); 6016 goto out_err; 6017 } 6018 6019 spec->pool_name = pool_name; 6020 spec->image_name = image_name; 6021 spec->snap_name = snap_name; 6022 6023 return 0; 6024 6025 out_err: 6026 kfree(image_name); 6027 kfree(pool_name); 6028 return ret; 6029 } 6030 6031 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) 6032 { 6033 size_t size; 6034 int ret; 6035 void *reply_buf; 6036 void *p; 6037 void *end; 6038 u64 seq; 6039 u32 snap_count; 6040 struct ceph_snap_context *snapc; 6041 u32 i; 6042 6043 /* 6044 * We'll need room for the seq value (maximum snapshot id), 6045 * snapshot count, and array of that many snapshot ids. 6046 * For now we have a fixed upper limit on the number we're 6047 * prepared to receive. 6048 */ 6049 size = sizeof (__le64) + sizeof (__le32) + 6050 RBD_MAX_SNAP_COUNT * sizeof (__le64); 6051 reply_buf = kzalloc(size, GFP_KERNEL); 6052 if (!reply_buf) 6053 return -ENOMEM; 6054 6055 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 6056 &rbd_dev->header_oloc, "get_snapcontext", 6057 NULL, 0, reply_buf, size); 6058 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6059 if (ret < 0) 6060 goto out; 6061 6062 p = reply_buf; 6063 end = reply_buf + ret; 6064 ret = -ERANGE; 6065 ceph_decode_64_safe(&p, end, seq, out); 6066 ceph_decode_32_safe(&p, end, snap_count, out); 6067 6068 /* 6069 * Make sure the reported number of snapshot ids wouldn't go 6070 * beyond the end of our buffer. But before checking that, 6071 * make sure the computed size of the snapshot context we 6072 * allocate is representable in a size_t. 6073 */ 6074 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) 6075 / sizeof (u64)) { 6076 ret = -EINVAL; 6077 goto out; 6078 } 6079 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) 6080 goto out; 6081 ret = 0; 6082 6083 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 6084 if (!snapc) { 6085 ret = -ENOMEM; 6086 goto out; 6087 } 6088 snapc->seq = seq; 6089 for (i = 0; i < snap_count; i++) 6090 snapc->snaps[i] = ceph_decode_64(&p); 6091 6092 ceph_put_snap_context(rbd_dev->header.snapc); 6093 rbd_dev->header.snapc = snapc; 6094 6095 dout(" snap context seq = %llu, snap_count = %u\n", 6096 (unsigned long long)seq, (unsigned int)snap_count); 6097 out: 6098 kfree(reply_buf); 6099 6100 return ret; 6101 } 6102 6103 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 6104 u64 snap_id) 6105 { 6106 size_t size; 6107 void *reply_buf; 6108 __le64 snapid; 6109 int ret; 6110 void *p; 6111 void *end; 6112 char *snap_name; 6113 6114 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; 6115 reply_buf = kmalloc(size, GFP_KERNEL); 6116 if (!reply_buf) 6117 return ERR_PTR(-ENOMEM); 6118 6119 snapid = cpu_to_le64(snap_id); 6120 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 6121 &rbd_dev->header_oloc, "get_snapshot_name", 6122 &snapid, sizeof(snapid), reply_buf, size); 6123 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6124 if (ret < 0) { 6125 snap_name = ERR_PTR(ret); 6126 goto out; 6127 } 6128 6129 p = reply_buf; 6130 end = reply_buf + ret; 6131 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 6132 if (IS_ERR(snap_name)) 6133 goto out; 6134 6135 dout(" snap_id 0x%016llx snap_name = %s\n", 6136 (unsigned long long)snap_id, snap_name); 6137 out: 6138 kfree(reply_buf); 6139 6140 return snap_name; 6141 } 6142 6143 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) 6144 { 6145 bool first_time = rbd_dev->header.object_prefix == NULL; 6146 int ret; 6147 6148 ret = rbd_dev_v2_image_size(rbd_dev); 6149 if (ret) 6150 return ret; 6151 6152 if (first_time) { 6153 ret = rbd_dev_v2_header_onetime(rbd_dev); 6154 if (ret) 6155 return ret; 6156 } 6157 6158 ret = rbd_dev_v2_snap_context(rbd_dev); 6159 if (ret && first_time) { 6160 kfree(rbd_dev->header.object_prefix); 6161 rbd_dev->header.object_prefix = NULL; 6162 } 6163 6164 return ret; 6165 } 6166 6167 static int rbd_dev_header_info(struct rbd_device *rbd_dev) 6168 { 6169 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 6170 6171 if (rbd_dev->image_format == 1) 6172 return rbd_dev_v1_header_info(rbd_dev); 6173 6174 return rbd_dev_v2_header_info(rbd_dev); 6175 } 6176 6177 /* 6178 * Skips over white space at *buf, and updates *buf to point to the 6179 * first found non-space character (if any). Returns the length of 6180 * the token (string of non-white space characters) found. Note 6181 * that *buf must be terminated with '\0'. 6182 */ 6183 static inline size_t next_token(const char **buf) 6184 { 6185 /* 6186 * These are the characters that produce nonzero for 6187 * isspace() in the "C" and "POSIX" locales. 6188 */ 6189 static const char spaces[] = " \f\n\r\t\v"; 6190 6191 *buf += strspn(*buf, spaces); /* Find start of token */ 6192 6193 return strcspn(*buf, spaces); /* Return token length */ 6194 } 6195 6196 /* 6197 * Finds the next token in *buf, dynamically allocates a buffer big 6198 * enough to hold a copy of it, and copies the token into the new 6199 * buffer. The copy is guaranteed to be terminated with '\0'. Note 6200 * that a duplicate buffer is created even for a zero-length token. 6201 * 6202 * Returns a pointer to the newly-allocated duplicate, or a null 6203 * pointer if memory for the duplicate was not available. If 6204 * the lenp argument is a non-null pointer, the length of the token 6205 * (not including the '\0') is returned in *lenp. 6206 * 6207 * If successful, the *buf pointer will be updated to point beyond 6208 * the end of the found token. 6209 * 6210 * Note: uses GFP_KERNEL for allocation. 6211 */ 6212 static inline char *dup_token(const char **buf, size_t *lenp) 6213 { 6214 char *dup; 6215 size_t len; 6216 6217 len = next_token(buf); 6218 dup = kmemdup(*buf, len + 1, GFP_KERNEL); 6219 if (!dup) 6220 return NULL; 6221 *(dup + len) = '\0'; 6222 *buf += len; 6223 6224 if (lenp) 6225 *lenp = len; 6226 6227 return dup; 6228 } 6229 6230 static int rbd_parse_param(struct fs_parameter *param, 6231 struct rbd_parse_opts_ctx *pctx) 6232 { 6233 struct rbd_options *opt = pctx->opts; 6234 struct fs_parse_result result; 6235 struct p_log log = {.prefix = "rbd"}; 6236 int token, ret; 6237 6238 ret = ceph_parse_param(param, pctx->copts, NULL); 6239 if (ret != -ENOPARAM) 6240 return ret; 6241 6242 token = __fs_parse(&log, rbd_parameters, param, &result); 6243 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token); 6244 if (token < 0) { 6245 if (token == -ENOPARAM) 6246 return inval_plog(&log, "Unknown parameter '%s'", 6247 param->key); 6248 return token; 6249 } 6250 6251 switch (token) { 6252 case Opt_queue_depth: 6253 if (result.uint_32 < 1) 6254 goto out_of_range; 6255 opt->queue_depth = result.uint_32; 6256 break; 6257 case Opt_alloc_size: 6258 if (result.uint_32 < SECTOR_SIZE) 6259 goto out_of_range; 6260 if (!is_power_of_2(result.uint_32)) 6261 return inval_plog(&log, "alloc_size must be a power of 2"); 6262 opt->alloc_size = result.uint_32; 6263 break; 6264 case Opt_lock_timeout: 6265 /* 0 is "wait forever" (i.e. infinite timeout) */ 6266 if (result.uint_32 > INT_MAX / 1000) 6267 goto out_of_range; 6268 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000); 6269 break; 6270 case Opt_pool_ns: 6271 kfree(pctx->spec->pool_ns); 6272 pctx->spec->pool_ns = param->string; 6273 param->string = NULL; 6274 break; 6275 case Opt_compression_hint: 6276 switch (result.uint_32) { 6277 case Opt_compression_hint_none: 6278 opt->alloc_hint_flags &= 6279 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE | 6280 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE); 6281 break; 6282 case Opt_compression_hint_compressible: 6283 opt->alloc_hint_flags |= 6284 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE; 6285 opt->alloc_hint_flags &= 6286 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE; 6287 break; 6288 case Opt_compression_hint_incompressible: 6289 opt->alloc_hint_flags |= 6290 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE; 6291 opt->alloc_hint_flags &= 6292 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE; 6293 break; 6294 default: 6295 BUG(); 6296 } 6297 break; 6298 case Opt_read_only: 6299 opt->read_only = true; 6300 break; 6301 case Opt_read_write: 6302 opt->read_only = false; 6303 break; 6304 case Opt_lock_on_read: 6305 opt->lock_on_read = true; 6306 break; 6307 case Opt_exclusive: 6308 opt->exclusive = true; 6309 break; 6310 case Opt_notrim: 6311 opt->trim = false; 6312 break; 6313 default: 6314 BUG(); 6315 } 6316 6317 return 0; 6318 6319 out_of_range: 6320 return inval_plog(&log, "%s out of range", param->key); 6321 } 6322 6323 /* 6324 * This duplicates most of generic_parse_monolithic(), untying it from 6325 * fs_context and skipping standard superblock and security options. 6326 */ 6327 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx) 6328 { 6329 char *key; 6330 int ret = 0; 6331 6332 dout("%s '%s'\n", __func__, options); 6333 while ((key = strsep(&options, ",")) != NULL) { 6334 if (*key) { 6335 struct fs_parameter param = { 6336 .key = key, 6337 .type = fs_value_is_flag, 6338 }; 6339 char *value = strchr(key, '='); 6340 size_t v_len = 0; 6341 6342 if (value) { 6343 if (value == key) 6344 continue; 6345 *value++ = 0; 6346 v_len = strlen(value); 6347 param.string = kmemdup_nul(value, v_len, 6348 GFP_KERNEL); 6349 if (!param.string) 6350 return -ENOMEM; 6351 param.type = fs_value_is_string; 6352 } 6353 param.size = v_len; 6354 6355 ret = rbd_parse_param(¶m, pctx); 6356 kfree(param.string); 6357 if (ret) 6358 break; 6359 } 6360 } 6361 6362 return ret; 6363 } 6364 6365 /* 6366 * Parse the options provided for an "rbd add" (i.e., rbd image 6367 * mapping) request. These arrive via a write to /sys/bus/rbd/add, 6368 * and the data written is passed here via a NUL-terminated buffer. 6369 * Returns 0 if successful or an error code otherwise. 6370 * 6371 * The information extracted from these options is recorded in 6372 * the other parameters which return dynamically-allocated 6373 * structures: 6374 * ceph_opts 6375 * The address of a pointer that will refer to a ceph options 6376 * structure. Caller must release the returned pointer using 6377 * ceph_destroy_options() when it is no longer needed. 6378 * rbd_opts 6379 * Address of an rbd options pointer. Fully initialized by 6380 * this function; caller must release with kfree(). 6381 * spec 6382 * Address of an rbd image specification pointer. Fully 6383 * initialized by this function based on parsed options. 6384 * Caller must release with rbd_spec_put(). 6385 * 6386 * The options passed take this form: 6387 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>] 6388 * where: 6389 * <mon_addrs> 6390 * A comma-separated list of one or more monitor addresses. 6391 * A monitor address is an ip address, optionally followed 6392 * by a port number (separated by a colon). 6393 * I.e.: ip1[:port1][,ip2[:port2]...] 6394 * <options> 6395 * A comma-separated list of ceph and/or rbd options. 6396 * <pool_name> 6397 * The name of the rados pool containing the rbd image. 6398 * <image_name> 6399 * The name of the image in that pool to map. 6400 * <snap_id> 6401 * An optional snapshot id. If provided, the mapping will 6402 * present data from the image at the time that snapshot was 6403 * created. The image head is used if no snapshot id is 6404 * provided. Snapshot mappings are always read-only. 6405 */ 6406 static int rbd_add_parse_args(const char *buf, 6407 struct ceph_options **ceph_opts, 6408 struct rbd_options **opts, 6409 struct rbd_spec **rbd_spec) 6410 { 6411 size_t len; 6412 char *options; 6413 const char *mon_addrs; 6414 char *snap_name; 6415 size_t mon_addrs_size; 6416 struct rbd_parse_opts_ctx pctx = { 0 }; 6417 int ret; 6418 6419 /* The first four tokens are required */ 6420 6421 len = next_token(&buf); 6422 if (!len) { 6423 rbd_warn(NULL, "no monitor address(es) provided"); 6424 return -EINVAL; 6425 } 6426 mon_addrs = buf; 6427 mon_addrs_size = len; 6428 buf += len; 6429 6430 ret = -EINVAL; 6431 options = dup_token(&buf, NULL); 6432 if (!options) 6433 return -ENOMEM; 6434 if (!*options) { 6435 rbd_warn(NULL, "no options provided"); 6436 goto out_err; 6437 } 6438 6439 pctx.spec = rbd_spec_alloc(); 6440 if (!pctx.spec) 6441 goto out_mem; 6442 6443 pctx.spec->pool_name = dup_token(&buf, NULL); 6444 if (!pctx.spec->pool_name) 6445 goto out_mem; 6446 if (!*pctx.spec->pool_name) { 6447 rbd_warn(NULL, "no pool name provided"); 6448 goto out_err; 6449 } 6450 6451 pctx.spec->image_name = dup_token(&buf, NULL); 6452 if (!pctx.spec->image_name) 6453 goto out_mem; 6454 if (!*pctx.spec->image_name) { 6455 rbd_warn(NULL, "no image name provided"); 6456 goto out_err; 6457 } 6458 6459 /* 6460 * Snapshot name is optional; default is to use "-" 6461 * (indicating the head/no snapshot). 6462 */ 6463 len = next_token(&buf); 6464 if (!len) { 6465 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ 6466 len = sizeof (RBD_SNAP_HEAD_NAME) - 1; 6467 } else if (len > RBD_MAX_SNAP_NAME_LEN) { 6468 ret = -ENAMETOOLONG; 6469 goto out_err; 6470 } 6471 snap_name = kmemdup(buf, len + 1, GFP_KERNEL); 6472 if (!snap_name) 6473 goto out_mem; 6474 *(snap_name + len) = '\0'; 6475 pctx.spec->snap_name = snap_name; 6476 6477 pctx.copts = ceph_alloc_options(); 6478 if (!pctx.copts) 6479 goto out_mem; 6480 6481 /* Initialize all rbd options to the defaults */ 6482 6483 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL); 6484 if (!pctx.opts) 6485 goto out_mem; 6486 6487 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT; 6488 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 6489 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT; 6490 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; 6491 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 6492 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 6493 pctx.opts->trim = RBD_TRIM_DEFAULT; 6494 6495 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL, 6496 ','); 6497 if (ret) 6498 goto out_err; 6499 6500 ret = rbd_parse_options(options, &pctx); 6501 if (ret) 6502 goto out_err; 6503 6504 *ceph_opts = pctx.copts; 6505 *opts = pctx.opts; 6506 *rbd_spec = pctx.spec; 6507 kfree(options); 6508 return 0; 6509 6510 out_mem: 6511 ret = -ENOMEM; 6512 out_err: 6513 kfree(pctx.opts); 6514 ceph_destroy_options(pctx.copts); 6515 rbd_spec_put(pctx.spec); 6516 kfree(options); 6517 return ret; 6518 } 6519 6520 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) 6521 { 6522 down_write(&rbd_dev->lock_rwsem); 6523 if (__rbd_is_lock_owner(rbd_dev)) 6524 __rbd_release_lock(rbd_dev); 6525 up_write(&rbd_dev->lock_rwsem); 6526 } 6527 6528 /* 6529 * If the wait is interrupted, an error is returned even if the lock 6530 * was successfully acquired. rbd_dev_image_unlock() will release it 6531 * if needed. 6532 */ 6533 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 6534 { 6535 long ret; 6536 6537 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 6538 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read) 6539 return 0; 6540 6541 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 6542 return -EINVAL; 6543 } 6544 6545 if (rbd_is_ro(rbd_dev)) 6546 return 0; 6547 6548 rbd_assert(!rbd_is_lock_owner(rbd_dev)); 6549 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); 6550 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait, 6551 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout)); 6552 if (ret > 0) { 6553 ret = rbd_dev->acquire_err; 6554 } else { 6555 cancel_delayed_work_sync(&rbd_dev->lock_dwork); 6556 if (!ret) 6557 ret = -ETIMEDOUT; 6558 } 6559 6560 if (ret) { 6561 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); 6562 return ret; 6563 } 6564 6565 /* 6566 * The lock may have been released by now, unless automatic lock 6567 * transitions are disabled. 6568 */ 6569 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev)); 6570 return 0; 6571 } 6572 6573 /* 6574 * An rbd format 2 image has a unique identifier, distinct from the 6575 * name given to it by the user. Internally, that identifier is 6576 * what's used to specify the names of objects related to the image. 6577 * 6578 * A special "rbd id" object is used to map an rbd image name to its 6579 * id. If that object doesn't exist, then there is no v2 rbd image 6580 * with the supplied name. 6581 * 6582 * This function will record the given rbd_dev's image_id field if 6583 * it can be determined, and in that case will return 0. If any 6584 * errors occur a negative errno will be returned and the rbd_dev's 6585 * image_id field will be unchanged (and should be NULL). 6586 */ 6587 static int rbd_dev_image_id(struct rbd_device *rbd_dev) 6588 { 6589 int ret; 6590 size_t size; 6591 CEPH_DEFINE_OID_ONSTACK(oid); 6592 void *response; 6593 char *image_id; 6594 6595 /* 6596 * When probing a parent image, the image id is already 6597 * known (and the image name likely is not). There's no 6598 * need to fetch the image id again in this case. We 6599 * do still need to set the image format though. 6600 */ 6601 if (rbd_dev->spec->image_id) { 6602 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; 6603 6604 return 0; 6605 } 6606 6607 /* 6608 * First, see if the format 2 image id file exists, and if 6609 * so, get the image's persistent id from it. 6610 */ 6611 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX, 6612 rbd_dev->spec->image_name); 6613 if (ret) 6614 return ret; 6615 6616 dout("rbd id object name is %s\n", oid.name); 6617 6618 /* Response will be an encoded string, which includes a length */ 6619 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; 6620 response = kzalloc(size, GFP_NOIO); 6621 if (!response) { 6622 ret = -ENOMEM; 6623 goto out; 6624 } 6625 6626 /* If it doesn't exist we'll assume it's a format 1 image */ 6627 6628 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc, 6629 "get_id", NULL, 0, 6630 response, size); 6631 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); 6632 if (ret == -ENOENT) { 6633 image_id = kstrdup("", GFP_KERNEL); 6634 ret = image_id ? 0 : -ENOMEM; 6635 if (!ret) 6636 rbd_dev->image_format = 1; 6637 } else if (ret >= 0) { 6638 void *p = response; 6639 6640 image_id = ceph_extract_encoded_string(&p, p + ret, 6641 NULL, GFP_NOIO); 6642 ret = PTR_ERR_OR_ZERO(image_id); 6643 if (!ret) 6644 rbd_dev->image_format = 2; 6645 } 6646 6647 if (!ret) { 6648 rbd_dev->spec->image_id = image_id; 6649 dout("image_id is %s\n", image_id); 6650 } 6651 out: 6652 kfree(response); 6653 ceph_oid_destroy(&oid); 6654 return ret; 6655 } 6656 6657 /* 6658 * Undo whatever state changes are made by v1 or v2 header info 6659 * call. 6660 */ 6661 static void rbd_dev_unprobe(struct rbd_device *rbd_dev) 6662 { 6663 struct rbd_image_header *header; 6664 6665 rbd_dev_parent_put(rbd_dev); 6666 rbd_object_map_free(rbd_dev); 6667 rbd_dev_mapping_clear(rbd_dev); 6668 6669 /* Free dynamic fields from the header, then zero it out */ 6670 6671 header = &rbd_dev->header; 6672 ceph_put_snap_context(header->snapc); 6673 kfree(header->snap_sizes); 6674 kfree(header->snap_names); 6675 kfree(header->object_prefix); 6676 memset(header, 0, sizeof (*header)); 6677 } 6678 6679 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) 6680 { 6681 int ret; 6682 6683 ret = rbd_dev_v2_object_prefix(rbd_dev); 6684 if (ret) 6685 goto out_err; 6686 6687 /* 6688 * Get the and check features for the image. Currently the 6689 * features are assumed to never change. 6690 */ 6691 ret = rbd_dev_v2_features(rbd_dev); 6692 if (ret) 6693 goto out_err; 6694 6695 /* If the image supports fancy striping, get its parameters */ 6696 6697 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 6698 ret = rbd_dev_v2_striping_info(rbd_dev); 6699 if (ret < 0) 6700 goto out_err; 6701 } 6702 6703 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { 6704 ret = rbd_dev_v2_data_pool(rbd_dev); 6705 if (ret) 6706 goto out_err; 6707 } 6708 6709 rbd_init_layout(rbd_dev); 6710 return 0; 6711 6712 out_err: 6713 rbd_dev->header.features = 0; 6714 kfree(rbd_dev->header.object_prefix); 6715 rbd_dev->header.object_prefix = NULL; 6716 return ret; 6717 } 6718 6719 /* 6720 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> 6721 * rbd_dev_image_probe() recursion depth, which means it's also the 6722 * length of the already discovered part of the parent chain. 6723 */ 6724 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) 6725 { 6726 struct rbd_device *parent = NULL; 6727 int ret; 6728 6729 if (!rbd_dev->parent_spec) 6730 return 0; 6731 6732 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { 6733 pr_info("parent chain is too long (%d)\n", depth); 6734 ret = -EINVAL; 6735 goto out_err; 6736 } 6737 6738 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); 6739 if (!parent) { 6740 ret = -ENOMEM; 6741 goto out_err; 6742 } 6743 6744 /* 6745 * Images related by parent/child relationships always share 6746 * rbd_client and spec/parent_spec, so bump their refcounts. 6747 */ 6748 __rbd_get_client(rbd_dev->rbd_client); 6749 rbd_spec_get(rbd_dev->parent_spec); 6750 6751 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags); 6752 6753 ret = rbd_dev_image_probe(parent, depth); 6754 if (ret < 0) 6755 goto out_err; 6756 6757 rbd_dev->parent = parent; 6758 atomic_set(&rbd_dev->parent_ref, 1); 6759 return 0; 6760 6761 out_err: 6762 rbd_dev_unparent(rbd_dev); 6763 rbd_dev_destroy(parent); 6764 return ret; 6765 } 6766 6767 static void rbd_dev_device_release(struct rbd_device *rbd_dev) 6768 { 6769 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 6770 rbd_free_disk(rbd_dev); 6771 if (!single_major) 6772 unregister_blkdev(rbd_dev->major, rbd_dev->name); 6773 } 6774 6775 /* 6776 * rbd_dev->header_rwsem must be locked for write and will be unlocked 6777 * upon return. 6778 */ 6779 static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 6780 { 6781 int ret; 6782 6783 /* Record our major and minor device numbers. */ 6784 6785 if (!single_major) { 6786 ret = register_blkdev(0, rbd_dev->name); 6787 if (ret < 0) 6788 goto err_out_unlock; 6789 6790 rbd_dev->major = ret; 6791 rbd_dev->minor = 0; 6792 } else { 6793 rbd_dev->major = rbd_major; 6794 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id); 6795 } 6796 6797 /* Set up the blkdev mapping. */ 6798 6799 ret = rbd_init_disk(rbd_dev); 6800 if (ret) 6801 goto err_out_blkdev; 6802 6803 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 6804 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev)); 6805 6806 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); 6807 if (ret) 6808 goto err_out_disk; 6809 6810 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 6811 up_write(&rbd_dev->header_rwsem); 6812 return 0; 6813 6814 err_out_disk: 6815 rbd_free_disk(rbd_dev); 6816 err_out_blkdev: 6817 if (!single_major) 6818 unregister_blkdev(rbd_dev->major, rbd_dev->name); 6819 err_out_unlock: 6820 up_write(&rbd_dev->header_rwsem); 6821 return ret; 6822 } 6823 6824 static int rbd_dev_header_name(struct rbd_device *rbd_dev) 6825 { 6826 struct rbd_spec *spec = rbd_dev->spec; 6827 int ret; 6828 6829 /* Record the header object name for this rbd image. */ 6830 6831 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 6832 if (rbd_dev->image_format == 1) 6833 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 6834 spec->image_name, RBD_SUFFIX); 6835 else 6836 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s", 6837 RBD_HEADER_PREFIX, spec->image_id); 6838 6839 return ret; 6840 } 6841 6842 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap) 6843 { 6844 if (!is_snap) { 6845 pr_info("image %s/%s%s%s does not exist\n", 6846 rbd_dev->spec->pool_name, 6847 rbd_dev->spec->pool_ns ?: "", 6848 rbd_dev->spec->pool_ns ? "/" : "", 6849 rbd_dev->spec->image_name); 6850 } else { 6851 pr_info("snap %s/%s%s%s@%s does not exist\n", 6852 rbd_dev->spec->pool_name, 6853 rbd_dev->spec->pool_ns ?: "", 6854 rbd_dev->spec->pool_ns ? "/" : "", 6855 rbd_dev->spec->image_name, 6856 rbd_dev->spec->snap_name); 6857 } 6858 } 6859 6860 static void rbd_dev_image_release(struct rbd_device *rbd_dev) 6861 { 6862 if (!rbd_is_ro(rbd_dev)) 6863 rbd_unregister_watch(rbd_dev); 6864 6865 rbd_dev_unprobe(rbd_dev); 6866 rbd_dev->image_format = 0; 6867 kfree(rbd_dev->spec->image_id); 6868 rbd_dev->spec->image_id = NULL; 6869 } 6870 6871 /* 6872 * Probe for the existence of the header object for the given rbd 6873 * device. If this image is the one being mapped (i.e., not a 6874 * parent), initiate a watch on its header object before using that 6875 * object to get detailed information about the rbd image. 6876 * 6877 * On success, returns with header_rwsem held for write if called 6878 * with @depth == 0. 6879 */ 6880 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) 6881 { 6882 bool need_watch = !rbd_is_ro(rbd_dev); 6883 int ret; 6884 6885 /* 6886 * Get the id from the image id object. Unless there's an 6887 * error, rbd_dev->spec->image_id will be filled in with 6888 * a dynamically-allocated string, and rbd_dev->image_format 6889 * will be set to either 1 or 2. 6890 */ 6891 ret = rbd_dev_image_id(rbd_dev); 6892 if (ret) 6893 return ret; 6894 6895 ret = rbd_dev_header_name(rbd_dev); 6896 if (ret) 6897 goto err_out_format; 6898 6899 if (need_watch) { 6900 ret = rbd_register_watch(rbd_dev); 6901 if (ret) { 6902 if (ret == -ENOENT) 6903 rbd_print_dne(rbd_dev, false); 6904 goto err_out_format; 6905 } 6906 } 6907 6908 if (!depth) 6909 down_write(&rbd_dev->header_rwsem); 6910 6911 ret = rbd_dev_header_info(rbd_dev); 6912 if (ret) { 6913 if (ret == -ENOENT && !need_watch) 6914 rbd_print_dne(rbd_dev, false); 6915 goto err_out_probe; 6916 } 6917 6918 /* 6919 * If this image is the one being mapped, we have pool name and 6920 * id, image name and id, and snap name - need to fill snap id. 6921 * Otherwise this is a parent image, identified by pool, image 6922 * and snap ids - need to fill in names for those ids. 6923 */ 6924 if (!depth) 6925 ret = rbd_spec_fill_snap_id(rbd_dev); 6926 else 6927 ret = rbd_spec_fill_names(rbd_dev); 6928 if (ret) { 6929 if (ret == -ENOENT) 6930 rbd_print_dne(rbd_dev, true); 6931 goto err_out_probe; 6932 } 6933 6934 ret = rbd_dev_mapping_set(rbd_dev); 6935 if (ret) 6936 goto err_out_probe; 6937 6938 if (rbd_is_snap(rbd_dev) && 6939 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) { 6940 ret = rbd_object_map_load(rbd_dev); 6941 if (ret) 6942 goto err_out_probe; 6943 } 6944 6945 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { 6946 ret = rbd_dev_v2_parent_info(rbd_dev); 6947 if (ret) 6948 goto err_out_probe; 6949 } 6950 6951 ret = rbd_dev_probe_parent(rbd_dev, depth); 6952 if (ret) 6953 goto err_out_probe; 6954 6955 dout("discovered format %u image, header name is %s\n", 6956 rbd_dev->image_format, rbd_dev->header_oid.name); 6957 return 0; 6958 6959 err_out_probe: 6960 if (!depth) 6961 up_write(&rbd_dev->header_rwsem); 6962 if (need_watch) 6963 rbd_unregister_watch(rbd_dev); 6964 rbd_dev_unprobe(rbd_dev); 6965 err_out_format: 6966 rbd_dev->image_format = 0; 6967 kfree(rbd_dev->spec->image_id); 6968 rbd_dev->spec->image_id = NULL; 6969 return ret; 6970 } 6971 6972 static ssize_t do_rbd_add(struct bus_type *bus, 6973 const char *buf, 6974 size_t count) 6975 { 6976 struct rbd_device *rbd_dev = NULL; 6977 struct ceph_options *ceph_opts = NULL; 6978 struct rbd_options *rbd_opts = NULL; 6979 struct rbd_spec *spec = NULL; 6980 struct rbd_client *rbdc; 6981 int rc; 6982 6983 if (!capable(CAP_SYS_ADMIN)) 6984 return -EPERM; 6985 6986 if (!try_module_get(THIS_MODULE)) 6987 return -ENODEV; 6988 6989 /* parse add command */ 6990 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 6991 if (rc < 0) 6992 goto out; 6993 6994 rbdc = rbd_get_client(ceph_opts); 6995 if (IS_ERR(rbdc)) { 6996 rc = PTR_ERR(rbdc); 6997 goto err_out_args; 6998 } 6999 7000 /* pick the pool */ 7001 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name); 7002 if (rc < 0) { 7003 if (rc == -ENOENT) 7004 pr_info("pool %s does not exist\n", spec->pool_name); 7005 goto err_out_client; 7006 } 7007 spec->pool_id = (u64)rc; 7008 7009 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 7010 if (!rbd_dev) { 7011 rc = -ENOMEM; 7012 goto err_out_client; 7013 } 7014 rbdc = NULL; /* rbd_dev now owns this */ 7015 spec = NULL; /* rbd_dev now owns this */ 7016 rbd_opts = NULL; /* rbd_dev now owns this */ 7017 7018 /* if we are mapping a snapshot it will be a read-only mapping */ 7019 if (rbd_dev->opts->read_only || 7020 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME)) 7021 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags); 7022 7023 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL); 7024 if (!rbd_dev->config_info) { 7025 rc = -ENOMEM; 7026 goto err_out_rbd_dev; 7027 } 7028 7029 rc = rbd_dev_image_probe(rbd_dev, 0); 7030 if (rc < 0) 7031 goto err_out_rbd_dev; 7032 7033 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) { 7034 rbd_warn(rbd_dev, "alloc_size adjusted to %u", 7035 rbd_dev->layout.object_size); 7036 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size; 7037 } 7038 7039 rc = rbd_dev_device_setup(rbd_dev); 7040 if (rc) 7041 goto err_out_image_probe; 7042 7043 rc = rbd_add_acquire_lock(rbd_dev); 7044 if (rc) 7045 goto err_out_image_lock; 7046 7047 /* Everything's ready. Announce the disk to the world. */ 7048 7049 rc = device_add(&rbd_dev->dev); 7050 if (rc) 7051 goto err_out_image_lock; 7052 7053 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL); 7054 if (rc) 7055 goto err_out_cleanup_disk; 7056 7057 spin_lock(&rbd_dev_list_lock); 7058 list_add_tail(&rbd_dev->node, &rbd_dev_list); 7059 spin_unlock(&rbd_dev_list_lock); 7060 7061 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name, 7062 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT, 7063 rbd_dev->header.features); 7064 rc = count; 7065 out: 7066 module_put(THIS_MODULE); 7067 return rc; 7068 7069 err_out_cleanup_disk: 7070 rbd_free_disk(rbd_dev); 7071 err_out_image_lock: 7072 rbd_dev_image_unlock(rbd_dev); 7073 rbd_dev_device_release(rbd_dev); 7074 err_out_image_probe: 7075 rbd_dev_image_release(rbd_dev); 7076 err_out_rbd_dev: 7077 rbd_dev_destroy(rbd_dev); 7078 err_out_client: 7079 rbd_put_client(rbdc); 7080 err_out_args: 7081 rbd_spec_put(spec); 7082 kfree(rbd_opts); 7083 goto out; 7084 } 7085 7086 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count) 7087 { 7088 if (single_major) 7089 return -EINVAL; 7090 7091 return do_rbd_add(bus, buf, count); 7092 } 7093 7094 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf, 7095 size_t count) 7096 { 7097 return do_rbd_add(bus, buf, count); 7098 } 7099 7100 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) 7101 { 7102 while (rbd_dev->parent) { 7103 struct rbd_device *first = rbd_dev; 7104 struct rbd_device *second = first->parent; 7105 struct rbd_device *third; 7106 7107 /* 7108 * Follow to the parent with no grandparent and 7109 * remove it. 7110 */ 7111 while (second && (third = second->parent)) { 7112 first = second; 7113 second = third; 7114 } 7115 rbd_assert(second); 7116 rbd_dev_image_release(second); 7117 rbd_dev_destroy(second); 7118 first->parent = NULL; 7119 first->parent_overlap = 0; 7120 7121 rbd_assert(first->parent_spec); 7122 rbd_spec_put(first->parent_spec); 7123 first->parent_spec = NULL; 7124 } 7125 } 7126 7127 static ssize_t do_rbd_remove(struct bus_type *bus, 7128 const char *buf, 7129 size_t count) 7130 { 7131 struct rbd_device *rbd_dev = NULL; 7132 struct list_head *tmp; 7133 int dev_id; 7134 char opt_buf[6]; 7135 bool force = false; 7136 int ret; 7137 7138 if (!capable(CAP_SYS_ADMIN)) 7139 return -EPERM; 7140 7141 dev_id = -1; 7142 opt_buf[0] = '\0'; 7143 sscanf(buf, "%d %5s", &dev_id, opt_buf); 7144 if (dev_id < 0) { 7145 pr_err("dev_id out of range\n"); 7146 return -EINVAL; 7147 } 7148 if (opt_buf[0] != '\0') { 7149 if (!strcmp(opt_buf, "force")) { 7150 force = true; 7151 } else { 7152 pr_err("bad remove option at '%s'\n", opt_buf); 7153 return -EINVAL; 7154 } 7155 } 7156 7157 ret = -ENOENT; 7158 spin_lock(&rbd_dev_list_lock); 7159 list_for_each(tmp, &rbd_dev_list) { 7160 rbd_dev = list_entry(tmp, struct rbd_device, node); 7161 if (rbd_dev->dev_id == dev_id) { 7162 ret = 0; 7163 break; 7164 } 7165 } 7166 if (!ret) { 7167 spin_lock_irq(&rbd_dev->lock); 7168 if (rbd_dev->open_count && !force) 7169 ret = -EBUSY; 7170 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, 7171 &rbd_dev->flags)) 7172 ret = -EINPROGRESS; 7173 spin_unlock_irq(&rbd_dev->lock); 7174 } 7175 spin_unlock(&rbd_dev_list_lock); 7176 if (ret) 7177 return ret; 7178 7179 if (force) { 7180 /* 7181 * Prevent new IO from being queued and wait for existing 7182 * IO to complete/fail. 7183 */ 7184 blk_mq_freeze_queue(rbd_dev->disk->queue); 7185 blk_mark_disk_dead(rbd_dev->disk); 7186 } 7187 7188 del_gendisk(rbd_dev->disk); 7189 spin_lock(&rbd_dev_list_lock); 7190 list_del_init(&rbd_dev->node); 7191 spin_unlock(&rbd_dev_list_lock); 7192 device_del(&rbd_dev->dev); 7193 7194 rbd_dev_image_unlock(rbd_dev); 7195 rbd_dev_device_release(rbd_dev); 7196 rbd_dev_image_release(rbd_dev); 7197 rbd_dev_destroy(rbd_dev); 7198 return count; 7199 } 7200 7201 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count) 7202 { 7203 if (single_major) 7204 return -EINVAL; 7205 7206 return do_rbd_remove(bus, buf, count); 7207 } 7208 7209 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf, 7210 size_t count) 7211 { 7212 return do_rbd_remove(bus, buf, count); 7213 } 7214 7215 /* 7216 * create control files in sysfs 7217 * /sys/bus/rbd/... 7218 */ 7219 static int __init rbd_sysfs_init(void) 7220 { 7221 int ret; 7222 7223 ret = device_register(&rbd_root_dev); 7224 if (ret < 0) { 7225 put_device(&rbd_root_dev); 7226 return ret; 7227 } 7228 7229 ret = bus_register(&rbd_bus_type); 7230 if (ret < 0) 7231 device_unregister(&rbd_root_dev); 7232 7233 return ret; 7234 } 7235 7236 static void __exit rbd_sysfs_cleanup(void) 7237 { 7238 bus_unregister(&rbd_bus_type); 7239 device_unregister(&rbd_root_dev); 7240 } 7241 7242 static int __init rbd_slab_init(void) 7243 { 7244 rbd_assert(!rbd_img_request_cache); 7245 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); 7246 if (!rbd_img_request_cache) 7247 return -ENOMEM; 7248 7249 rbd_assert(!rbd_obj_request_cache); 7250 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); 7251 if (!rbd_obj_request_cache) 7252 goto out_err; 7253 7254 return 0; 7255 7256 out_err: 7257 kmem_cache_destroy(rbd_img_request_cache); 7258 rbd_img_request_cache = NULL; 7259 return -ENOMEM; 7260 } 7261 7262 static void rbd_slab_exit(void) 7263 { 7264 rbd_assert(rbd_obj_request_cache); 7265 kmem_cache_destroy(rbd_obj_request_cache); 7266 rbd_obj_request_cache = NULL; 7267 7268 rbd_assert(rbd_img_request_cache); 7269 kmem_cache_destroy(rbd_img_request_cache); 7270 rbd_img_request_cache = NULL; 7271 } 7272 7273 static int __init rbd_init(void) 7274 { 7275 int rc; 7276 7277 if (!libceph_compatible(NULL)) { 7278 rbd_warn(NULL, "libceph incompatibility (quitting)"); 7279 return -EINVAL; 7280 } 7281 7282 rc = rbd_slab_init(); 7283 if (rc) 7284 return rc; 7285 7286 /* 7287 * The number of active work items is limited by the number of 7288 * rbd devices * queue depth, so leave @max_active at default. 7289 */ 7290 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); 7291 if (!rbd_wq) { 7292 rc = -ENOMEM; 7293 goto err_out_slab; 7294 } 7295 7296 if (single_major) { 7297 rbd_major = register_blkdev(0, RBD_DRV_NAME); 7298 if (rbd_major < 0) { 7299 rc = rbd_major; 7300 goto err_out_wq; 7301 } 7302 } 7303 7304 rc = rbd_sysfs_init(); 7305 if (rc) 7306 goto err_out_blkdev; 7307 7308 if (single_major) 7309 pr_info("loaded (major %d)\n", rbd_major); 7310 else 7311 pr_info("loaded\n"); 7312 7313 return 0; 7314 7315 err_out_blkdev: 7316 if (single_major) 7317 unregister_blkdev(rbd_major, RBD_DRV_NAME); 7318 err_out_wq: 7319 destroy_workqueue(rbd_wq); 7320 err_out_slab: 7321 rbd_slab_exit(); 7322 return rc; 7323 } 7324 7325 static void __exit rbd_exit(void) 7326 { 7327 ida_destroy(&rbd_dev_id_ida); 7328 rbd_sysfs_cleanup(); 7329 if (single_major) 7330 unregister_blkdev(rbd_major, RBD_DRV_NAME); 7331 destroy_workqueue(rbd_wq); 7332 rbd_slab_exit(); 7333 } 7334 7335 module_init(rbd_init); 7336 module_exit(rbd_exit); 7337 7338 MODULE_AUTHOR("Alex Elder <elder@inktank.com>"); 7339 MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); 7340 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); 7341 /* following authorship retained from original osdblk.c */ 7342 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); 7343 7344 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver"); 7345 MODULE_LICENSE("GPL"); 7346