1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static struct ceph_osd_data * 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 176 { 177 BUG_ON(which >= osd_req->r_num_ops); 178 179 return &osd_req->r_ops[which].raw_data_in; 180 } 181 182 struct ceph_osd_data * 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 184 unsigned int which) 185 { 186 return osd_req_op_data(osd_req, which, extent, osd_data); 187 } 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 189 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 191 unsigned int which, struct page **pages, 192 u64 length, u32 alignment, 193 bool pages_from_pool, bool own_pages) 194 { 195 struct ceph_osd_data *osd_data; 196 197 osd_data = osd_req_op_raw_data_in(osd_req, which); 198 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 199 pages_from_pool, own_pages); 200 } 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 202 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 204 unsigned int which, struct page **pages, 205 u64 length, u32 alignment, 206 bool pages_from_pool, bool own_pages) 207 { 208 struct ceph_osd_data *osd_data; 209 210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 211 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 212 pages_from_pool, own_pages); 213 } 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 215 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 217 unsigned int which, struct ceph_pagelist *pagelist) 218 { 219 struct ceph_osd_data *osd_data; 220 221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 222 ceph_osd_data_pagelist_init(osd_data, pagelist); 223 } 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 225 226 #ifdef CONFIG_BLOCK 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 228 unsigned int which, 229 struct ceph_bio_iter *bio_pos, 230 u32 bio_length) 231 { 232 struct ceph_osd_data *osd_data; 233 234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 236 } 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 238 #endif /* CONFIG_BLOCK */ 239 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 241 unsigned int which, 242 struct bio_vec *bvecs, u32 num_bvecs, 243 u32 bytes) 244 { 245 struct ceph_osd_data *osd_data; 246 struct ceph_bvec_iter it = { 247 .bvecs = bvecs, 248 .iter = { .bi_size = bytes }, 249 }; 250 251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 255 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 257 unsigned int which, 258 struct ceph_bvec_iter *bvec_pos) 259 { 260 struct ceph_osd_data *osd_data; 261 262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 264 } 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 266 267 static void osd_req_op_cls_request_info_pagelist( 268 struct ceph_osd_request *osd_req, 269 unsigned int which, struct ceph_pagelist *pagelist) 270 { 271 struct ceph_osd_data *osd_data; 272 273 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 274 ceph_osd_data_pagelist_init(osd_data, pagelist); 275 } 276 277 void osd_req_op_cls_request_data_pagelist( 278 struct ceph_osd_request *osd_req, 279 unsigned int which, struct ceph_pagelist *pagelist) 280 { 281 struct ceph_osd_data *osd_data; 282 283 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 284 ceph_osd_data_pagelist_init(osd_data, pagelist); 285 osd_req->r_ops[which].cls.indata_len += pagelist->length; 286 osd_req->r_ops[which].indata_len += pagelist->length; 287 } 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 default: 350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 351 return 0; 352 } 353 } 354 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 356 { 357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 358 int num_pages; 359 360 num_pages = calc_pages_for((u64)osd_data->alignment, 361 (u64)osd_data->length); 362 ceph_release_page_vector(osd_data->pages, num_pages); 363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 364 ceph_pagelist_release(osd_data->pagelist); 365 } 366 ceph_osd_data_init(osd_data); 367 } 368 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 370 unsigned int which) 371 { 372 struct ceph_osd_req_op *op; 373 374 BUG_ON(which >= osd_req->r_num_ops); 375 op = &osd_req->r_ops[which]; 376 377 switch (op->op) { 378 case CEPH_OSD_OP_READ: 379 case CEPH_OSD_OP_SPARSE_READ: 380 case CEPH_OSD_OP_WRITE: 381 case CEPH_OSD_OP_WRITEFULL: 382 kfree(op->extent.sparse_ext); 383 ceph_osd_data_release(&op->extent.osd_data); 384 break; 385 case CEPH_OSD_OP_CALL: 386 ceph_osd_data_release(&op->cls.request_info); 387 ceph_osd_data_release(&op->cls.request_data); 388 ceph_osd_data_release(&op->cls.response_data); 389 break; 390 case CEPH_OSD_OP_SETXATTR: 391 case CEPH_OSD_OP_CMPXATTR: 392 ceph_osd_data_release(&op->xattr.osd_data); 393 break; 394 case CEPH_OSD_OP_STAT: 395 ceph_osd_data_release(&op->raw_data_in); 396 break; 397 case CEPH_OSD_OP_NOTIFY_ACK: 398 ceph_osd_data_release(&op->notify_ack.request_data); 399 break; 400 case CEPH_OSD_OP_NOTIFY: 401 ceph_osd_data_release(&op->notify.request_data); 402 ceph_osd_data_release(&op->notify.response_data); 403 break; 404 case CEPH_OSD_OP_LIST_WATCHERS: 405 ceph_osd_data_release(&op->list_watchers.response_data); 406 break; 407 case CEPH_OSD_OP_COPY_FROM2: 408 ceph_osd_data_release(&op->copy_from.osd_data); 409 break; 410 default: 411 break; 412 } 413 } 414 415 /* 416 * Assumes @t is zero-initialized. 417 */ 418 static void target_init(struct ceph_osd_request_target *t) 419 { 420 ceph_oid_init(&t->base_oid); 421 ceph_oloc_init(&t->base_oloc); 422 ceph_oid_init(&t->target_oid); 423 ceph_oloc_init(&t->target_oloc); 424 425 ceph_osds_init(&t->acting); 426 ceph_osds_init(&t->up); 427 t->size = -1; 428 t->min_size = -1; 429 430 t->osd = CEPH_HOMELESS_OSD; 431 } 432 433 static void target_copy(struct ceph_osd_request_target *dest, 434 const struct ceph_osd_request_target *src) 435 { 436 ceph_oid_copy(&dest->base_oid, &src->base_oid); 437 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 438 ceph_oid_copy(&dest->target_oid, &src->target_oid); 439 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 440 441 dest->pgid = src->pgid; /* struct */ 442 dest->spgid = src->spgid; /* struct */ 443 dest->pg_num = src->pg_num; 444 dest->pg_num_mask = src->pg_num_mask; 445 ceph_osds_copy(&dest->acting, &src->acting); 446 ceph_osds_copy(&dest->up, &src->up); 447 dest->size = src->size; 448 dest->min_size = src->min_size; 449 dest->sort_bitwise = src->sort_bitwise; 450 dest->recovery_deletes = src->recovery_deletes; 451 452 dest->flags = src->flags; 453 dest->used_replica = src->used_replica; 454 dest->paused = src->paused; 455 456 dest->epoch = src->epoch; 457 dest->last_force_resend = src->last_force_resend; 458 459 dest->osd = src->osd; 460 } 461 462 static void target_destroy(struct ceph_osd_request_target *t) 463 { 464 ceph_oid_destroy(&t->base_oid); 465 ceph_oloc_destroy(&t->base_oloc); 466 ceph_oid_destroy(&t->target_oid); 467 ceph_oloc_destroy(&t->target_oloc); 468 } 469 470 /* 471 * requests 472 */ 473 static void request_release_checks(struct ceph_osd_request *req) 474 { 475 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 476 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 477 WARN_ON(!list_empty(&req->r_private_item)); 478 WARN_ON(req->r_osd); 479 } 480 481 static void ceph_osdc_release_request(struct kref *kref) 482 { 483 struct ceph_osd_request *req = container_of(kref, 484 struct ceph_osd_request, r_kref); 485 unsigned int which; 486 487 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 488 req->r_request, req->r_reply); 489 request_release_checks(req); 490 491 if (req->r_request) 492 ceph_msg_put(req->r_request); 493 if (req->r_reply) 494 ceph_msg_put(req->r_reply); 495 496 for (which = 0; which < req->r_num_ops; which++) 497 osd_req_op_data_release(req, which); 498 499 target_destroy(&req->r_t); 500 ceph_put_snap_context(req->r_snapc); 501 502 if (req->r_mempool) 503 mempool_free(req, req->r_osdc->req_mempool); 504 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 505 kmem_cache_free(ceph_osd_request_cache, req); 506 else 507 kfree(req); 508 } 509 510 void ceph_osdc_get_request(struct ceph_osd_request *req) 511 { 512 dout("%s %p (was %d)\n", __func__, req, 513 kref_read(&req->r_kref)); 514 kref_get(&req->r_kref); 515 } 516 EXPORT_SYMBOL(ceph_osdc_get_request); 517 518 void ceph_osdc_put_request(struct ceph_osd_request *req) 519 { 520 if (req) { 521 dout("%s %p (was %d)\n", __func__, req, 522 kref_read(&req->r_kref)); 523 kref_put(&req->r_kref, ceph_osdc_release_request); 524 } 525 } 526 EXPORT_SYMBOL(ceph_osdc_put_request); 527 528 static void request_init(struct ceph_osd_request *req) 529 { 530 /* req only, each op is zeroed in osd_req_op_init() */ 531 memset(req, 0, sizeof(*req)); 532 533 kref_init(&req->r_kref); 534 init_completion(&req->r_completion); 535 RB_CLEAR_NODE(&req->r_node); 536 RB_CLEAR_NODE(&req->r_mc_node); 537 INIT_LIST_HEAD(&req->r_private_item); 538 539 target_init(&req->r_t); 540 } 541 542 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 543 struct ceph_snap_context *snapc, 544 unsigned int num_ops, 545 bool use_mempool, 546 gfp_t gfp_flags) 547 { 548 struct ceph_osd_request *req; 549 550 if (use_mempool) { 551 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 552 req = mempool_alloc(osdc->req_mempool, gfp_flags); 553 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 554 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 555 } else { 556 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 557 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 558 } 559 if (unlikely(!req)) 560 return NULL; 561 562 request_init(req); 563 req->r_osdc = osdc; 564 req->r_mempool = use_mempool; 565 req->r_num_ops = num_ops; 566 req->r_snapid = CEPH_NOSNAP; 567 req->r_snapc = ceph_get_snap_context(snapc); 568 569 dout("%s req %p\n", __func__, req); 570 return req; 571 } 572 EXPORT_SYMBOL(ceph_osdc_alloc_request); 573 574 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 575 { 576 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 577 } 578 579 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 580 int num_request_data_items, 581 int num_reply_data_items) 582 { 583 struct ceph_osd_client *osdc = req->r_osdc; 584 struct ceph_msg *msg; 585 int msg_size; 586 587 WARN_ON(req->r_request || req->r_reply); 588 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 589 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 590 591 /* create request message */ 592 msg_size = CEPH_ENCODING_START_BLK_LEN + 593 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 594 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 595 msg_size += CEPH_ENCODING_START_BLK_LEN + 596 sizeof(struct ceph_osd_reqid); /* reqid */ 597 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 598 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 599 msg_size += CEPH_ENCODING_START_BLK_LEN + 600 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 601 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 602 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 603 msg_size += 8; /* snapid */ 604 msg_size += 8; /* snap_seq */ 605 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 606 msg_size += 4 + 8; /* retry_attempt, features */ 607 608 if (req->r_mempool) 609 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 610 num_request_data_items); 611 else 612 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 613 num_request_data_items, gfp, true); 614 if (!msg) 615 return -ENOMEM; 616 617 memset(msg->front.iov_base, 0, msg->front.iov_len); 618 req->r_request = msg; 619 620 /* create reply message */ 621 msg_size = OSD_OPREPLY_FRONT_LEN; 622 msg_size += req->r_base_oid.name_len; 623 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 624 625 if (req->r_mempool) 626 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 627 num_reply_data_items); 628 else 629 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 630 num_reply_data_items, gfp, true); 631 if (!msg) 632 return -ENOMEM; 633 634 req->r_reply = msg; 635 636 return 0; 637 } 638 639 static bool osd_req_opcode_valid(u16 opcode) 640 { 641 switch (opcode) { 642 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 643 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 644 #undef GENERATE_CASE 645 default: 646 return false; 647 } 648 } 649 650 static void get_num_data_items(struct ceph_osd_request *req, 651 int *num_request_data_items, 652 int *num_reply_data_items) 653 { 654 struct ceph_osd_req_op *op; 655 656 *num_request_data_items = 0; 657 *num_reply_data_items = 0; 658 659 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 660 switch (op->op) { 661 /* request */ 662 case CEPH_OSD_OP_WRITE: 663 case CEPH_OSD_OP_WRITEFULL: 664 case CEPH_OSD_OP_SETXATTR: 665 case CEPH_OSD_OP_CMPXATTR: 666 case CEPH_OSD_OP_NOTIFY_ACK: 667 case CEPH_OSD_OP_COPY_FROM2: 668 *num_request_data_items += 1; 669 break; 670 671 /* reply */ 672 case CEPH_OSD_OP_STAT: 673 case CEPH_OSD_OP_READ: 674 case CEPH_OSD_OP_SPARSE_READ: 675 case CEPH_OSD_OP_LIST_WATCHERS: 676 *num_reply_data_items += 1; 677 break; 678 679 /* both */ 680 case CEPH_OSD_OP_NOTIFY: 681 *num_request_data_items += 1; 682 *num_reply_data_items += 1; 683 break; 684 case CEPH_OSD_OP_CALL: 685 *num_request_data_items += 2; 686 *num_reply_data_items += 1; 687 break; 688 689 default: 690 WARN_ON(!osd_req_opcode_valid(op->op)); 691 break; 692 } 693 } 694 } 695 696 /* 697 * oid, oloc and OSD op opcode(s) must be filled in before this function 698 * is called. 699 */ 700 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 701 { 702 int num_request_data_items, num_reply_data_items; 703 704 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 705 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 706 num_reply_data_items); 707 } 708 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 709 710 /* 711 * This is an osd op init function for opcodes that have no data or 712 * other information associated with them. It also serves as a 713 * common init routine for all the other init functions, below. 714 */ 715 struct ceph_osd_req_op * 716 osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 717 u16 opcode, u32 flags) 718 { 719 struct ceph_osd_req_op *op; 720 721 BUG_ON(which >= osd_req->r_num_ops); 722 BUG_ON(!osd_req_opcode_valid(opcode)); 723 724 op = &osd_req->r_ops[which]; 725 memset(op, 0, sizeof (*op)); 726 op->op = opcode; 727 op->flags = flags; 728 729 return op; 730 } 731 EXPORT_SYMBOL(osd_req_op_init); 732 733 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 734 unsigned int which, u16 opcode, 735 u64 offset, u64 length, 736 u64 truncate_size, u32 truncate_seq) 737 { 738 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 739 opcode, 0); 740 size_t payload_len = 0; 741 742 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 743 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 744 opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ); 745 746 op->extent.offset = offset; 747 op->extent.length = length; 748 op->extent.truncate_size = truncate_size; 749 op->extent.truncate_seq = truncate_seq; 750 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 751 payload_len += length; 752 753 op->indata_len = payload_len; 754 } 755 EXPORT_SYMBOL(osd_req_op_extent_init); 756 757 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 758 unsigned int which, u64 length) 759 { 760 struct ceph_osd_req_op *op; 761 u64 previous; 762 763 BUG_ON(which >= osd_req->r_num_ops); 764 op = &osd_req->r_ops[which]; 765 previous = op->extent.length; 766 767 if (length == previous) 768 return; /* Nothing to do */ 769 BUG_ON(length > previous); 770 771 op->extent.length = length; 772 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 773 op->indata_len -= previous - length; 774 } 775 EXPORT_SYMBOL(osd_req_op_extent_update); 776 777 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 778 unsigned int which, u64 offset_inc) 779 { 780 struct ceph_osd_req_op *op, *prev_op; 781 782 BUG_ON(which + 1 >= osd_req->r_num_ops); 783 784 prev_op = &osd_req->r_ops[which]; 785 op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 786 /* dup previous one */ 787 op->indata_len = prev_op->indata_len; 788 op->outdata_len = prev_op->outdata_len; 789 op->extent = prev_op->extent; 790 /* adjust offset */ 791 op->extent.offset += offset_inc; 792 op->extent.length -= offset_inc; 793 794 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 795 op->indata_len -= offset_inc; 796 } 797 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 798 799 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 800 const char *class, const char *method) 801 { 802 struct ceph_osd_req_op *op; 803 struct ceph_pagelist *pagelist; 804 size_t payload_len = 0; 805 size_t size; 806 int ret; 807 808 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 809 810 pagelist = ceph_pagelist_alloc(GFP_NOFS); 811 if (!pagelist) 812 return -ENOMEM; 813 814 op->cls.class_name = class; 815 size = strlen(class); 816 BUG_ON(size > (size_t) U8_MAX); 817 op->cls.class_len = size; 818 ret = ceph_pagelist_append(pagelist, class, size); 819 if (ret) 820 goto err_pagelist_free; 821 payload_len += size; 822 823 op->cls.method_name = method; 824 size = strlen(method); 825 BUG_ON(size > (size_t) U8_MAX); 826 op->cls.method_len = size; 827 ret = ceph_pagelist_append(pagelist, method, size); 828 if (ret) 829 goto err_pagelist_free; 830 payload_len += size; 831 832 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 833 op->indata_len = payload_len; 834 return 0; 835 836 err_pagelist_free: 837 ceph_pagelist_release(pagelist); 838 return ret; 839 } 840 EXPORT_SYMBOL(osd_req_op_cls_init); 841 842 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 843 u16 opcode, const char *name, const void *value, 844 size_t size, u8 cmp_op, u8 cmp_mode) 845 { 846 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 847 opcode, 0); 848 struct ceph_pagelist *pagelist; 849 size_t payload_len; 850 int ret; 851 852 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 853 854 pagelist = ceph_pagelist_alloc(GFP_NOFS); 855 if (!pagelist) 856 return -ENOMEM; 857 858 payload_len = strlen(name); 859 op->xattr.name_len = payload_len; 860 ret = ceph_pagelist_append(pagelist, name, payload_len); 861 if (ret) 862 goto err_pagelist_free; 863 864 op->xattr.value_len = size; 865 ret = ceph_pagelist_append(pagelist, value, size); 866 if (ret) 867 goto err_pagelist_free; 868 payload_len += size; 869 870 op->xattr.cmp_op = cmp_op; 871 op->xattr.cmp_mode = cmp_mode; 872 873 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 874 op->indata_len = payload_len; 875 return 0; 876 877 err_pagelist_free: 878 ceph_pagelist_release(pagelist); 879 return ret; 880 } 881 EXPORT_SYMBOL(osd_req_op_xattr_init); 882 883 /* 884 * @watch_opcode: CEPH_OSD_WATCH_OP_* 885 */ 886 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 887 u8 watch_opcode, u64 cookie, u32 gen) 888 { 889 struct ceph_osd_req_op *op; 890 891 op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 892 op->watch.cookie = cookie; 893 op->watch.op = watch_opcode; 894 op->watch.gen = gen; 895 } 896 897 /* 898 * prot_ver, timeout and notify payload (may be empty) should already be 899 * encoded in @request_pl 900 */ 901 static void osd_req_op_notify_init(struct ceph_osd_request *req, int which, 902 u64 cookie, struct ceph_pagelist *request_pl) 903 { 904 struct ceph_osd_req_op *op; 905 906 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 907 op->notify.cookie = cookie; 908 909 ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl); 910 op->indata_len = request_pl->length; 911 } 912 913 /* 914 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_* 915 */ 916 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 917 unsigned int which, 918 u64 expected_object_size, 919 u64 expected_write_size, 920 u32 flags) 921 { 922 struct ceph_osd_req_op *op; 923 924 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); 925 op->alloc_hint.expected_object_size = expected_object_size; 926 op->alloc_hint.expected_write_size = expected_write_size; 927 op->alloc_hint.flags = flags; 928 929 /* 930 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 931 * not worth a feature bit. Set FAILOK per-op flag to make 932 * sure older osds don't trip over an unsupported opcode. 933 */ 934 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 935 } 936 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 937 938 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 939 struct ceph_osd_data *osd_data) 940 { 941 u64 length = ceph_osd_data_length(osd_data); 942 943 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 944 BUG_ON(length > (u64) SIZE_MAX); 945 if (length) 946 ceph_msg_data_add_pages(msg, osd_data->pages, 947 length, osd_data->alignment, false); 948 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 949 BUG_ON(!length); 950 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 951 #ifdef CONFIG_BLOCK 952 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 953 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 954 #endif 955 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 956 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 957 } else { 958 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 959 } 960 } 961 962 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 963 const struct ceph_osd_req_op *src) 964 { 965 switch (src->op) { 966 case CEPH_OSD_OP_STAT: 967 break; 968 case CEPH_OSD_OP_READ: 969 case CEPH_OSD_OP_SPARSE_READ: 970 case CEPH_OSD_OP_WRITE: 971 case CEPH_OSD_OP_WRITEFULL: 972 case CEPH_OSD_OP_ZERO: 973 case CEPH_OSD_OP_TRUNCATE: 974 dst->extent.offset = cpu_to_le64(src->extent.offset); 975 dst->extent.length = cpu_to_le64(src->extent.length); 976 dst->extent.truncate_size = 977 cpu_to_le64(src->extent.truncate_size); 978 dst->extent.truncate_seq = 979 cpu_to_le32(src->extent.truncate_seq); 980 break; 981 case CEPH_OSD_OP_CALL: 982 dst->cls.class_len = src->cls.class_len; 983 dst->cls.method_len = src->cls.method_len; 984 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 985 break; 986 case CEPH_OSD_OP_WATCH: 987 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 988 dst->watch.ver = cpu_to_le64(0); 989 dst->watch.op = src->watch.op; 990 dst->watch.gen = cpu_to_le32(src->watch.gen); 991 break; 992 case CEPH_OSD_OP_NOTIFY_ACK: 993 break; 994 case CEPH_OSD_OP_NOTIFY: 995 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 996 break; 997 case CEPH_OSD_OP_LIST_WATCHERS: 998 break; 999 case CEPH_OSD_OP_SETALLOCHINT: 1000 dst->alloc_hint.expected_object_size = 1001 cpu_to_le64(src->alloc_hint.expected_object_size); 1002 dst->alloc_hint.expected_write_size = 1003 cpu_to_le64(src->alloc_hint.expected_write_size); 1004 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags); 1005 break; 1006 case CEPH_OSD_OP_SETXATTR: 1007 case CEPH_OSD_OP_CMPXATTR: 1008 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1009 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1010 dst->xattr.cmp_op = src->xattr.cmp_op; 1011 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1012 break; 1013 case CEPH_OSD_OP_CREATE: 1014 case CEPH_OSD_OP_DELETE: 1015 break; 1016 case CEPH_OSD_OP_COPY_FROM2: 1017 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1018 dst->copy_from.src_version = 1019 cpu_to_le64(src->copy_from.src_version); 1020 dst->copy_from.flags = src->copy_from.flags; 1021 dst->copy_from.src_fadvise_flags = 1022 cpu_to_le32(src->copy_from.src_fadvise_flags); 1023 break; 1024 default: 1025 pr_err("unsupported osd opcode %s\n", 1026 ceph_osd_op_name(src->op)); 1027 WARN_ON(1); 1028 1029 return 0; 1030 } 1031 1032 dst->op = cpu_to_le16(src->op); 1033 dst->flags = cpu_to_le32(src->flags); 1034 dst->payload_len = cpu_to_le32(src->indata_len); 1035 1036 return src->indata_len; 1037 } 1038 1039 /* 1040 * build new request AND message, calculate layout, and adjust file 1041 * extent as needed. 1042 * 1043 * if the file was recently truncated, we include information about its 1044 * old and new size so that the object can be updated appropriately. (we 1045 * avoid synchronously deleting truncated objects because it's slow.) 1046 */ 1047 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1048 struct ceph_file_layout *layout, 1049 struct ceph_vino vino, 1050 u64 off, u64 *plen, 1051 unsigned int which, int num_ops, 1052 int opcode, int flags, 1053 struct ceph_snap_context *snapc, 1054 u32 truncate_seq, 1055 u64 truncate_size, 1056 bool use_mempool) 1057 { 1058 struct ceph_osd_request *req; 1059 u64 objnum = 0; 1060 u64 objoff = 0; 1061 u64 objlen = 0; 1062 int r; 1063 1064 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1065 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1066 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE && 1067 opcode != CEPH_OSD_OP_SPARSE_READ); 1068 1069 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1070 GFP_NOFS); 1071 if (!req) { 1072 r = -ENOMEM; 1073 goto fail; 1074 } 1075 1076 /* calculate max write size */ 1077 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1078 if (r) 1079 goto fail; 1080 1081 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1082 osd_req_op_init(req, which, opcode, 0); 1083 } else { 1084 u32 object_size = layout->object_size; 1085 u32 object_base = off - objoff; 1086 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1087 if (truncate_size <= object_base) { 1088 truncate_size = 0; 1089 } else { 1090 truncate_size -= object_base; 1091 if (truncate_size > object_size) 1092 truncate_size = object_size; 1093 } 1094 } 1095 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1096 truncate_size, truncate_seq); 1097 } 1098 1099 req->r_base_oloc.pool = layout->pool_id; 1100 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1101 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1102 req->r_flags = flags | osdc->client->options->read_from_replica; 1103 1104 req->r_snapid = vino.snap; 1105 if (flags & CEPH_OSD_FLAG_WRITE) 1106 req->r_data_offset = off; 1107 1108 if (num_ops > 1) 1109 /* 1110 * This is a special case for ceph_writepages_start(), but it 1111 * also covers ceph_uninline_data(). If more multi-op request 1112 * use cases emerge, we will need a separate helper. 1113 */ 1114 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); 1115 else 1116 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1117 if (r) 1118 goto fail; 1119 1120 return req; 1121 1122 fail: 1123 ceph_osdc_put_request(req); 1124 return ERR_PTR(r); 1125 } 1126 EXPORT_SYMBOL(ceph_osdc_new_request); 1127 1128 int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt) 1129 { 1130 op->extent.sparse_ext_cnt = cnt; 1131 op->extent.sparse_ext = kmalloc_array(cnt, 1132 sizeof(*op->extent.sparse_ext), 1133 GFP_NOFS); 1134 if (!op->extent.sparse_ext) 1135 return -ENOMEM; 1136 return 0; 1137 } 1138 EXPORT_SYMBOL(__ceph_alloc_sparse_ext_map); 1139 1140 /* 1141 * We keep osd requests in an rbtree, sorted by ->r_tid. 1142 */ 1143 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1144 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1145 1146 /* 1147 * Call @fn on each OSD request as long as @fn returns 0. 1148 */ 1149 static void for_each_request(struct ceph_osd_client *osdc, 1150 int (*fn)(struct ceph_osd_request *req, void *arg), 1151 void *arg) 1152 { 1153 struct rb_node *n, *p; 1154 1155 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1156 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1157 1158 for (p = rb_first(&osd->o_requests); p; ) { 1159 struct ceph_osd_request *req = 1160 rb_entry(p, struct ceph_osd_request, r_node); 1161 1162 p = rb_next(p); 1163 if (fn(req, arg)) 1164 return; 1165 } 1166 } 1167 1168 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1169 struct ceph_osd_request *req = 1170 rb_entry(p, struct ceph_osd_request, r_node); 1171 1172 p = rb_next(p); 1173 if (fn(req, arg)) 1174 return; 1175 } 1176 } 1177 1178 static bool osd_homeless(struct ceph_osd *osd) 1179 { 1180 return osd->o_osd == CEPH_HOMELESS_OSD; 1181 } 1182 1183 static bool osd_registered(struct ceph_osd *osd) 1184 { 1185 verify_osdc_locked(osd->o_osdc); 1186 1187 return !RB_EMPTY_NODE(&osd->o_node); 1188 } 1189 1190 /* 1191 * Assumes @osd is zero-initialized. 1192 */ 1193 static void osd_init(struct ceph_osd *osd) 1194 { 1195 refcount_set(&osd->o_ref, 1); 1196 RB_CLEAR_NODE(&osd->o_node); 1197 spin_lock_init(&osd->o_requests_lock); 1198 osd->o_requests = RB_ROOT; 1199 osd->o_linger_requests = RB_ROOT; 1200 osd->o_backoff_mappings = RB_ROOT; 1201 osd->o_backoffs_by_id = RB_ROOT; 1202 INIT_LIST_HEAD(&osd->o_osd_lru); 1203 INIT_LIST_HEAD(&osd->o_keepalive_item); 1204 osd->o_incarnation = 1; 1205 mutex_init(&osd->lock); 1206 } 1207 1208 static void ceph_init_sparse_read(struct ceph_sparse_read *sr) 1209 { 1210 kfree(sr->sr_extent); 1211 memset(sr, '\0', sizeof(*sr)); 1212 sr->sr_state = CEPH_SPARSE_READ_HDR; 1213 } 1214 1215 static void osd_cleanup(struct ceph_osd *osd) 1216 { 1217 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1218 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1219 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1220 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1221 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1222 WARN_ON(!list_empty(&osd->o_osd_lru)); 1223 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1224 1225 ceph_init_sparse_read(&osd->o_sparse_read); 1226 1227 if (osd->o_auth.authorizer) { 1228 WARN_ON(osd_homeless(osd)); 1229 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1230 } 1231 } 1232 1233 /* 1234 * Track open sessions with osds. 1235 */ 1236 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1237 { 1238 struct ceph_osd *osd; 1239 1240 WARN_ON(onum == CEPH_HOMELESS_OSD); 1241 1242 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1243 osd_init(osd); 1244 osd->o_osdc = osdc; 1245 osd->o_osd = onum; 1246 osd->o_sparse_op_idx = -1; 1247 1248 ceph_init_sparse_read(&osd->o_sparse_read); 1249 1250 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1251 1252 return osd; 1253 } 1254 1255 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1256 { 1257 if (refcount_inc_not_zero(&osd->o_ref)) { 1258 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, 1259 refcount_read(&osd->o_ref)); 1260 return osd; 1261 } else { 1262 dout("get_osd %p FAIL\n", osd); 1263 return NULL; 1264 } 1265 } 1266 1267 static void put_osd(struct ceph_osd *osd) 1268 { 1269 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), 1270 refcount_read(&osd->o_ref) - 1); 1271 if (refcount_dec_and_test(&osd->o_ref)) { 1272 osd_cleanup(osd); 1273 kfree(osd); 1274 } 1275 } 1276 1277 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1278 1279 static void __move_osd_to_lru(struct ceph_osd *osd) 1280 { 1281 struct ceph_osd_client *osdc = osd->o_osdc; 1282 1283 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1284 BUG_ON(!list_empty(&osd->o_osd_lru)); 1285 1286 spin_lock(&osdc->osd_lru_lock); 1287 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1288 spin_unlock(&osdc->osd_lru_lock); 1289 1290 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1291 } 1292 1293 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1294 { 1295 if (RB_EMPTY_ROOT(&osd->o_requests) && 1296 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1297 __move_osd_to_lru(osd); 1298 } 1299 1300 static void __remove_osd_from_lru(struct ceph_osd *osd) 1301 { 1302 struct ceph_osd_client *osdc = osd->o_osdc; 1303 1304 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1305 1306 spin_lock(&osdc->osd_lru_lock); 1307 if (!list_empty(&osd->o_osd_lru)) 1308 list_del_init(&osd->o_osd_lru); 1309 spin_unlock(&osdc->osd_lru_lock); 1310 } 1311 1312 /* 1313 * Close the connection and assign any leftover requests to the 1314 * homeless session. 1315 */ 1316 static void close_osd(struct ceph_osd *osd) 1317 { 1318 struct ceph_osd_client *osdc = osd->o_osdc; 1319 struct rb_node *n; 1320 1321 verify_osdc_wrlocked(osdc); 1322 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1323 1324 ceph_con_close(&osd->o_con); 1325 1326 for (n = rb_first(&osd->o_requests); n; ) { 1327 struct ceph_osd_request *req = 1328 rb_entry(n, struct ceph_osd_request, r_node); 1329 1330 n = rb_next(n); /* unlink_request() */ 1331 1332 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1333 unlink_request(osd, req); 1334 link_request(&osdc->homeless_osd, req); 1335 } 1336 for (n = rb_first(&osd->o_linger_requests); n; ) { 1337 struct ceph_osd_linger_request *lreq = 1338 rb_entry(n, struct ceph_osd_linger_request, node); 1339 1340 n = rb_next(n); /* unlink_linger() */ 1341 1342 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1343 lreq->linger_id); 1344 unlink_linger(osd, lreq); 1345 link_linger(&osdc->homeless_osd, lreq); 1346 } 1347 clear_backoffs(osd); 1348 1349 __remove_osd_from_lru(osd); 1350 erase_osd(&osdc->osds, osd); 1351 put_osd(osd); 1352 } 1353 1354 /* 1355 * reset osd connect 1356 */ 1357 static int reopen_osd(struct ceph_osd *osd) 1358 { 1359 struct ceph_entity_addr *peer_addr; 1360 1361 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1362 1363 if (RB_EMPTY_ROOT(&osd->o_requests) && 1364 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1365 close_osd(osd); 1366 return -ENODEV; 1367 } 1368 1369 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1370 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1371 !ceph_con_opened(&osd->o_con)) { 1372 struct rb_node *n; 1373 1374 dout("osd addr hasn't changed and connection never opened, " 1375 "letting msgr retry\n"); 1376 /* touch each r_stamp for handle_timeout()'s benfit */ 1377 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1378 struct ceph_osd_request *req = 1379 rb_entry(n, struct ceph_osd_request, r_node); 1380 req->r_stamp = jiffies; 1381 } 1382 1383 return -EAGAIN; 1384 } 1385 1386 ceph_con_close(&osd->o_con); 1387 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1388 osd->o_incarnation++; 1389 1390 return 0; 1391 } 1392 1393 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1394 bool wrlocked) 1395 { 1396 struct ceph_osd *osd; 1397 1398 if (wrlocked) 1399 verify_osdc_wrlocked(osdc); 1400 else 1401 verify_osdc_locked(osdc); 1402 1403 if (o != CEPH_HOMELESS_OSD) 1404 osd = lookup_osd(&osdc->osds, o); 1405 else 1406 osd = &osdc->homeless_osd; 1407 if (!osd) { 1408 if (!wrlocked) 1409 return ERR_PTR(-EAGAIN); 1410 1411 osd = create_osd(osdc, o); 1412 insert_osd(&osdc->osds, osd); 1413 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1414 &osdc->osdmap->osd_addr[osd->o_osd]); 1415 } 1416 1417 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1418 return osd; 1419 } 1420 1421 /* 1422 * Create request <-> OSD session relation. 1423 * 1424 * @req has to be assigned a tid, @osd may be homeless. 1425 */ 1426 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1427 { 1428 verify_osd_locked(osd); 1429 WARN_ON(!req->r_tid || req->r_osd); 1430 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1431 req, req->r_tid); 1432 1433 if (!osd_homeless(osd)) 1434 __remove_osd_from_lru(osd); 1435 else 1436 atomic_inc(&osd->o_osdc->num_homeless); 1437 1438 get_osd(osd); 1439 spin_lock(&osd->o_requests_lock); 1440 insert_request(&osd->o_requests, req); 1441 spin_unlock(&osd->o_requests_lock); 1442 req->r_osd = osd; 1443 } 1444 1445 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1446 { 1447 verify_osd_locked(osd); 1448 WARN_ON(req->r_osd != osd); 1449 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1450 req, req->r_tid); 1451 1452 req->r_osd = NULL; 1453 spin_lock(&osd->o_requests_lock); 1454 erase_request(&osd->o_requests, req); 1455 spin_unlock(&osd->o_requests_lock); 1456 put_osd(osd); 1457 1458 if (!osd_homeless(osd)) 1459 maybe_move_osd_to_lru(osd); 1460 else 1461 atomic_dec(&osd->o_osdc->num_homeless); 1462 } 1463 1464 static bool __pool_full(struct ceph_pg_pool_info *pi) 1465 { 1466 return pi->flags & CEPH_POOL_FLAG_FULL; 1467 } 1468 1469 static bool have_pool_full(struct ceph_osd_client *osdc) 1470 { 1471 struct rb_node *n; 1472 1473 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1474 struct ceph_pg_pool_info *pi = 1475 rb_entry(n, struct ceph_pg_pool_info, node); 1476 1477 if (__pool_full(pi)) 1478 return true; 1479 } 1480 1481 return false; 1482 } 1483 1484 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1485 { 1486 struct ceph_pg_pool_info *pi; 1487 1488 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1489 if (!pi) 1490 return false; 1491 1492 return __pool_full(pi); 1493 } 1494 1495 /* 1496 * Returns whether a request should be blocked from being sent 1497 * based on the current osdmap and osd_client settings. 1498 */ 1499 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1500 const struct ceph_osd_request_target *t, 1501 struct ceph_pg_pool_info *pi) 1502 { 1503 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1504 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1505 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1506 __pool_full(pi); 1507 1508 WARN_ON(pi->id != t->target_oloc.pool); 1509 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1510 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1511 (osdc->osdmap->epoch < osdc->epoch_barrier); 1512 } 1513 1514 static int pick_random_replica(const struct ceph_osds *acting) 1515 { 1516 int i = get_random_u32_below(acting->size); 1517 1518 dout("%s picked osd%d, primary osd%d\n", __func__, 1519 acting->osds[i], acting->primary); 1520 return i; 1521 } 1522 1523 /* 1524 * Picks the closest replica based on client's location given by 1525 * crush_location option. Prefers the primary if the locality is 1526 * the same. 1527 */ 1528 static int pick_closest_replica(struct ceph_osd_client *osdc, 1529 const struct ceph_osds *acting) 1530 { 1531 struct ceph_options *opt = osdc->client->options; 1532 int best_i, best_locality; 1533 int i = 0, locality; 1534 1535 do { 1536 locality = ceph_get_crush_locality(osdc->osdmap, 1537 acting->osds[i], 1538 &opt->crush_locs); 1539 if (i == 0 || 1540 (locality >= 0 && best_locality < 0) || 1541 (locality >= 0 && best_locality >= 0 && 1542 locality < best_locality)) { 1543 best_i = i; 1544 best_locality = locality; 1545 } 1546 } while (++i < acting->size); 1547 1548 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, 1549 acting->osds[best_i], best_locality, acting->primary); 1550 return best_i; 1551 } 1552 1553 enum calc_target_result { 1554 CALC_TARGET_NO_ACTION = 0, 1555 CALC_TARGET_NEED_RESEND, 1556 CALC_TARGET_POOL_DNE, 1557 }; 1558 1559 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1560 struct ceph_osd_request_target *t, 1561 bool any_change) 1562 { 1563 struct ceph_pg_pool_info *pi; 1564 struct ceph_pg pgid, last_pgid; 1565 struct ceph_osds up, acting; 1566 bool is_read = t->flags & CEPH_OSD_FLAG_READ; 1567 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; 1568 bool force_resend = false; 1569 bool unpaused = false; 1570 bool legacy_change = false; 1571 bool split = false; 1572 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1573 bool recovery_deletes = ceph_osdmap_flag(osdc, 1574 CEPH_OSDMAP_RECOVERY_DELETES); 1575 enum calc_target_result ct_res; 1576 1577 t->epoch = osdc->osdmap->epoch; 1578 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1579 if (!pi) { 1580 t->osd = CEPH_HOMELESS_OSD; 1581 ct_res = CALC_TARGET_POOL_DNE; 1582 goto out; 1583 } 1584 1585 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1586 if (t->last_force_resend < pi->last_force_request_resend) { 1587 t->last_force_resend = pi->last_force_request_resend; 1588 force_resend = true; 1589 } else if (t->last_force_resend == 0) { 1590 force_resend = true; 1591 } 1592 } 1593 1594 /* apply tiering */ 1595 ceph_oid_copy(&t->target_oid, &t->base_oid); 1596 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1597 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1598 if (is_read && pi->read_tier >= 0) 1599 t->target_oloc.pool = pi->read_tier; 1600 if (is_write && pi->write_tier >= 0) 1601 t->target_oloc.pool = pi->write_tier; 1602 1603 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1604 if (!pi) { 1605 t->osd = CEPH_HOMELESS_OSD; 1606 ct_res = CALC_TARGET_POOL_DNE; 1607 goto out; 1608 } 1609 } 1610 1611 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1612 last_pgid.pool = pgid.pool; 1613 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1614 1615 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1616 if (any_change && 1617 ceph_is_new_interval(&t->acting, 1618 &acting, 1619 &t->up, 1620 &up, 1621 t->size, 1622 pi->size, 1623 t->min_size, 1624 pi->min_size, 1625 t->pg_num, 1626 pi->pg_num, 1627 t->sort_bitwise, 1628 sort_bitwise, 1629 t->recovery_deletes, 1630 recovery_deletes, 1631 &last_pgid)) 1632 force_resend = true; 1633 1634 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1635 t->paused = false; 1636 unpaused = true; 1637 } 1638 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1639 ceph_osds_changed(&t->acting, &acting, 1640 t->used_replica || any_change); 1641 if (t->pg_num) 1642 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1643 1644 if (legacy_change || force_resend || split) { 1645 t->pgid = pgid; /* struct */ 1646 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1647 ceph_osds_copy(&t->acting, &acting); 1648 ceph_osds_copy(&t->up, &up); 1649 t->size = pi->size; 1650 t->min_size = pi->min_size; 1651 t->pg_num = pi->pg_num; 1652 t->pg_num_mask = pi->pg_num_mask; 1653 t->sort_bitwise = sort_bitwise; 1654 t->recovery_deletes = recovery_deletes; 1655 1656 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS | 1657 CEPH_OSD_FLAG_LOCALIZE_READS)) && 1658 !is_write && pi->type == CEPH_POOL_TYPE_REP && 1659 acting.size > 1) { 1660 int pos; 1661 1662 WARN_ON(!is_read || acting.osds[0] != acting.primary); 1663 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) { 1664 pos = pick_random_replica(&acting); 1665 } else { 1666 pos = pick_closest_replica(osdc, &acting); 1667 } 1668 t->osd = acting.osds[pos]; 1669 t->used_replica = pos > 0; 1670 } else { 1671 t->osd = acting.primary; 1672 t->used_replica = false; 1673 } 1674 } 1675 1676 if (unpaused || legacy_change || force_resend || split) 1677 ct_res = CALC_TARGET_NEED_RESEND; 1678 else 1679 ct_res = CALC_TARGET_NO_ACTION; 1680 1681 out: 1682 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1683 legacy_change, force_resend, split, ct_res, t->osd); 1684 return ct_res; 1685 } 1686 1687 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1688 { 1689 struct ceph_spg_mapping *spg; 1690 1691 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1692 if (!spg) 1693 return NULL; 1694 1695 RB_CLEAR_NODE(&spg->node); 1696 spg->backoffs = RB_ROOT; 1697 return spg; 1698 } 1699 1700 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1701 { 1702 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1703 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1704 1705 kfree(spg); 1706 } 1707 1708 /* 1709 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1710 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1711 * defined only within a specific spgid; it does not pass anything to 1712 * children on split, or to another primary. 1713 */ 1714 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1715 RB_BYPTR, const struct ceph_spg *, node) 1716 1717 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1718 { 1719 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1720 } 1721 1722 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1723 void **pkey, size_t *pkey_len) 1724 { 1725 if (hoid->key_len) { 1726 *pkey = hoid->key; 1727 *pkey_len = hoid->key_len; 1728 } else { 1729 *pkey = hoid->oid; 1730 *pkey_len = hoid->oid_len; 1731 } 1732 } 1733 1734 static int compare_names(const void *name1, size_t name1_len, 1735 const void *name2, size_t name2_len) 1736 { 1737 int ret; 1738 1739 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1740 if (!ret) { 1741 if (name1_len < name2_len) 1742 ret = -1; 1743 else if (name1_len > name2_len) 1744 ret = 1; 1745 } 1746 return ret; 1747 } 1748 1749 static int hoid_compare(const struct ceph_hobject_id *lhs, 1750 const struct ceph_hobject_id *rhs) 1751 { 1752 void *effective_key1, *effective_key2; 1753 size_t effective_key1_len, effective_key2_len; 1754 int ret; 1755 1756 if (lhs->is_max < rhs->is_max) 1757 return -1; 1758 if (lhs->is_max > rhs->is_max) 1759 return 1; 1760 1761 if (lhs->pool < rhs->pool) 1762 return -1; 1763 if (lhs->pool > rhs->pool) 1764 return 1; 1765 1766 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1767 return -1; 1768 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1769 return 1; 1770 1771 ret = compare_names(lhs->nspace, lhs->nspace_len, 1772 rhs->nspace, rhs->nspace_len); 1773 if (ret) 1774 return ret; 1775 1776 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1777 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1778 ret = compare_names(effective_key1, effective_key1_len, 1779 effective_key2, effective_key2_len); 1780 if (ret) 1781 return ret; 1782 1783 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1784 if (ret) 1785 return ret; 1786 1787 if (lhs->snapid < rhs->snapid) 1788 return -1; 1789 if (lhs->snapid > rhs->snapid) 1790 return 1; 1791 1792 return 0; 1793 } 1794 1795 /* 1796 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1797 * compat stuff here. 1798 * 1799 * Assumes @hoid is zero-initialized. 1800 */ 1801 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1802 { 1803 u8 struct_v; 1804 u32 struct_len; 1805 int ret; 1806 1807 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1808 &struct_len); 1809 if (ret) 1810 return ret; 1811 1812 if (struct_v < 4) { 1813 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1814 goto e_inval; 1815 } 1816 1817 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1818 GFP_NOIO); 1819 if (IS_ERR(hoid->key)) { 1820 ret = PTR_ERR(hoid->key); 1821 hoid->key = NULL; 1822 return ret; 1823 } 1824 1825 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1826 GFP_NOIO); 1827 if (IS_ERR(hoid->oid)) { 1828 ret = PTR_ERR(hoid->oid); 1829 hoid->oid = NULL; 1830 return ret; 1831 } 1832 1833 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1834 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1835 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1836 1837 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1838 GFP_NOIO); 1839 if (IS_ERR(hoid->nspace)) { 1840 ret = PTR_ERR(hoid->nspace); 1841 hoid->nspace = NULL; 1842 return ret; 1843 } 1844 1845 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1846 1847 ceph_hoid_build_hash_cache(hoid); 1848 return 0; 1849 1850 e_inval: 1851 return -EINVAL; 1852 } 1853 1854 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1855 { 1856 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1857 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1858 } 1859 1860 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1861 { 1862 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1863 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1864 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1865 ceph_encode_64(p, hoid->snapid); 1866 ceph_encode_32(p, hoid->hash); 1867 ceph_encode_8(p, hoid->is_max); 1868 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1869 ceph_encode_64(p, hoid->pool); 1870 } 1871 1872 static void free_hoid(struct ceph_hobject_id *hoid) 1873 { 1874 if (hoid) { 1875 kfree(hoid->key); 1876 kfree(hoid->oid); 1877 kfree(hoid->nspace); 1878 kfree(hoid); 1879 } 1880 } 1881 1882 static struct ceph_osd_backoff *alloc_backoff(void) 1883 { 1884 struct ceph_osd_backoff *backoff; 1885 1886 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1887 if (!backoff) 1888 return NULL; 1889 1890 RB_CLEAR_NODE(&backoff->spg_node); 1891 RB_CLEAR_NODE(&backoff->id_node); 1892 return backoff; 1893 } 1894 1895 static void free_backoff(struct ceph_osd_backoff *backoff) 1896 { 1897 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1898 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1899 1900 free_hoid(backoff->begin); 1901 free_hoid(backoff->end); 1902 kfree(backoff); 1903 } 1904 1905 /* 1906 * Within a specific spgid, backoffs are managed by ->begin hoid. 1907 */ 1908 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1909 RB_BYVAL, spg_node); 1910 1911 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1912 const struct ceph_hobject_id *hoid) 1913 { 1914 struct rb_node *n = root->rb_node; 1915 1916 while (n) { 1917 struct ceph_osd_backoff *cur = 1918 rb_entry(n, struct ceph_osd_backoff, spg_node); 1919 int cmp; 1920 1921 cmp = hoid_compare(hoid, cur->begin); 1922 if (cmp < 0) { 1923 n = n->rb_left; 1924 } else if (cmp > 0) { 1925 if (hoid_compare(hoid, cur->end) < 0) 1926 return cur; 1927 1928 n = n->rb_right; 1929 } else { 1930 return cur; 1931 } 1932 } 1933 1934 return NULL; 1935 } 1936 1937 /* 1938 * Each backoff has a unique id within its OSD session. 1939 */ 1940 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1941 1942 static void clear_backoffs(struct ceph_osd *osd) 1943 { 1944 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1945 struct ceph_spg_mapping *spg = 1946 rb_entry(rb_first(&osd->o_backoff_mappings), 1947 struct ceph_spg_mapping, node); 1948 1949 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1950 struct ceph_osd_backoff *backoff = 1951 rb_entry(rb_first(&spg->backoffs), 1952 struct ceph_osd_backoff, spg_node); 1953 1954 erase_backoff(&spg->backoffs, backoff); 1955 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1956 free_backoff(backoff); 1957 } 1958 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1959 free_spg_mapping(spg); 1960 } 1961 } 1962 1963 /* 1964 * Set up a temporary, non-owning view into @t. 1965 */ 1966 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1967 const struct ceph_osd_request_target *t) 1968 { 1969 hoid->key = NULL; 1970 hoid->key_len = 0; 1971 hoid->oid = t->target_oid.name; 1972 hoid->oid_len = t->target_oid.name_len; 1973 hoid->snapid = CEPH_NOSNAP; 1974 hoid->hash = t->pgid.seed; 1975 hoid->is_max = false; 1976 if (t->target_oloc.pool_ns) { 1977 hoid->nspace = t->target_oloc.pool_ns->str; 1978 hoid->nspace_len = t->target_oloc.pool_ns->len; 1979 } else { 1980 hoid->nspace = NULL; 1981 hoid->nspace_len = 0; 1982 } 1983 hoid->pool = t->target_oloc.pool; 1984 ceph_hoid_build_hash_cache(hoid); 1985 } 1986 1987 static bool should_plug_request(struct ceph_osd_request *req) 1988 { 1989 struct ceph_osd *osd = req->r_osd; 1990 struct ceph_spg_mapping *spg; 1991 struct ceph_osd_backoff *backoff; 1992 struct ceph_hobject_id hoid; 1993 1994 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 1995 if (!spg) 1996 return false; 1997 1998 hoid_fill_from_target(&hoid, &req->r_t); 1999 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 2000 if (!backoff) 2001 return false; 2002 2003 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 2004 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 2005 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 2006 return true; 2007 } 2008 2009 /* 2010 * Keep get_num_data_items() in sync with this function. 2011 */ 2012 static void setup_request_data(struct ceph_osd_request *req) 2013 { 2014 struct ceph_msg *request_msg = req->r_request; 2015 struct ceph_msg *reply_msg = req->r_reply; 2016 struct ceph_osd_req_op *op; 2017 2018 if (req->r_request->num_data_items || req->r_reply->num_data_items) 2019 return; 2020 2021 WARN_ON(request_msg->data_length || reply_msg->data_length); 2022 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 2023 switch (op->op) { 2024 /* request */ 2025 case CEPH_OSD_OP_WRITE: 2026 case CEPH_OSD_OP_WRITEFULL: 2027 WARN_ON(op->indata_len != op->extent.length); 2028 ceph_osdc_msg_data_add(request_msg, 2029 &op->extent.osd_data); 2030 break; 2031 case CEPH_OSD_OP_SETXATTR: 2032 case CEPH_OSD_OP_CMPXATTR: 2033 WARN_ON(op->indata_len != op->xattr.name_len + 2034 op->xattr.value_len); 2035 ceph_osdc_msg_data_add(request_msg, 2036 &op->xattr.osd_data); 2037 break; 2038 case CEPH_OSD_OP_NOTIFY_ACK: 2039 ceph_osdc_msg_data_add(request_msg, 2040 &op->notify_ack.request_data); 2041 break; 2042 case CEPH_OSD_OP_COPY_FROM2: 2043 ceph_osdc_msg_data_add(request_msg, 2044 &op->copy_from.osd_data); 2045 break; 2046 2047 /* reply */ 2048 case CEPH_OSD_OP_STAT: 2049 ceph_osdc_msg_data_add(reply_msg, 2050 &op->raw_data_in); 2051 break; 2052 case CEPH_OSD_OP_READ: 2053 case CEPH_OSD_OP_SPARSE_READ: 2054 ceph_osdc_msg_data_add(reply_msg, 2055 &op->extent.osd_data); 2056 break; 2057 case CEPH_OSD_OP_LIST_WATCHERS: 2058 ceph_osdc_msg_data_add(reply_msg, 2059 &op->list_watchers.response_data); 2060 break; 2061 2062 /* both */ 2063 case CEPH_OSD_OP_CALL: 2064 WARN_ON(op->indata_len != op->cls.class_len + 2065 op->cls.method_len + 2066 op->cls.indata_len); 2067 ceph_osdc_msg_data_add(request_msg, 2068 &op->cls.request_info); 2069 /* optional, can be NONE */ 2070 ceph_osdc_msg_data_add(request_msg, 2071 &op->cls.request_data); 2072 /* optional, can be NONE */ 2073 ceph_osdc_msg_data_add(reply_msg, 2074 &op->cls.response_data); 2075 break; 2076 case CEPH_OSD_OP_NOTIFY: 2077 ceph_osdc_msg_data_add(request_msg, 2078 &op->notify.request_data); 2079 ceph_osdc_msg_data_add(reply_msg, 2080 &op->notify.response_data); 2081 break; 2082 } 2083 } 2084 } 2085 2086 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2087 { 2088 ceph_encode_8(p, 1); 2089 ceph_encode_64(p, pgid->pool); 2090 ceph_encode_32(p, pgid->seed); 2091 ceph_encode_32(p, -1); /* preferred */ 2092 } 2093 2094 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2095 { 2096 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2097 encode_pgid(p, &spgid->pgid); 2098 ceph_encode_8(p, spgid->shard); 2099 } 2100 2101 static void encode_oloc(void **p, void *end, 2102 const struct ceph_object_locator *oloc) 2103 { 2104 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2105 ceph_encode_64(p, oloc->pool); 2106 ceph_encode_32(p, -1); /* preferred */ 2107 ceph_encode_32(p, 0); /* key len */ 2108 if (oloc->pool_ns) 2109 ceph_encode_string(p, end, oloc->pool_ns->str, 2110 oloc->pool_ns->len); 2111 else 2112 ceph_encode_32(p, 0); 2113 } 2114 2115 static void encode_request_partial(struct ceph_osd_request *req, 2116 struct ceph_msg *msg) 2117 { 2118 void *p = msg->front.iov_base; 2119 void *const end = p + msg->front_alloc_len; 2120 u32 data_len = 0; 2121 int i; 2122 2123 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2124 /* snapshots aren't writeable */ 2125 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2126 } else { 2127 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2128 req->r_data_offset || req->r_snapc); 2129 } 2130 2131 setup_request_data(req); 2132 2133 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2134 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2135 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2136 ceph_encode_32(&p, req->r_flags); 2137 2138 /* reqid */ 2139 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2140 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2141 p += sizeof(struct ceph_osd_reqid); 2142 2143 /* trace */ 2144 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2145 p += sizeof(struct ceph_blkin_trace_info); 2146 2147 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2148 ceph_encode_timespec64(p, &req->r_mtime); 2149 p += sizeof(struct ceph_timespec); 2150 2151 encode_oloc(&p, end, &req->r_t.target_oloc); 2152 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2153 req->r_t.target_oid.name_len); 2154 2155 /* ops, can imply data */ 2156 ceph_encode_16(&p, req->r_num_ops); 2157 for (i = 0; i < req->r_num_ops; i++) { 2158 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2159 p += sizeof(struct ceph_osd_op); 2160 } 2161 2162 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2163 if (req->r_snapc) { 2164 ceph_encode_64(&p, req->r_snapc->seq); 2165 ceph_encode_32(&p, req->r_snapc->num_snaps); 2166 for (i = 0; i < req->r_snapc->num_snaps; i++) 2167 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2168 } else { 2169 ceph_encode_64(&p, 0); /* snap_seq */ 2170 ceph_encode_32(&p, 0); /* snaps len */ 2171 } 2172 2173 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2174 BUG_ON(p > end - 8); /* space for features */ 2175 2176 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2177 /* front_len is finalized in encode_request_finish() */ 2178 msg->front.iov_len = p - msg->front.iov_base; 2179 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2180 msg->hdr.data_len = cpu_to_le32(data_len); 2181 /* 2182 * The header "data_off" is a hint to the receiver allowing it 2183 * to align received data into its buffers such that there's no 2184 * need to re-copy it before writing it to disk (direct I/O). 2185 */ 2186 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2187 2188 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2189 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2190 } 2191 2192 static void encode_request_finish(struct ceph_msg *msg) 2193 { 2194 void *p = msg->front.iov_base; 2195 void *const partial_end = p + msg->front.iov_len; 2196 void *const end = p + msg->front_alloc_len; 2197 2198 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2199 /* luminous OSD -- encode features and be done */ 2200 p = partial_end; 2201 ceph_encode_64(&p, msg->con->peer_features); 2202 } else { 2203 struct { 2204 char spgid[CEPH_ENCODING_START_BLK_LEN + 2205 CEPH_PGID_ENCODING_LEN + 1]; 2206 __le32 hash; 2207 __le32 epoch; 2208 __le32 flags; 2209 char reqid[CEPH_ENCODING_START_BLK_LEN + 2210 sizeof(struct ceph_osd_reqid)]; 2211 char trace[sizeof(struct ceph_blkin_trace_info)]; 2212 __le32 client_inc; 2213 struct ceph_timespec mtime; 2214 } __packed head; 2215 struct ceph_pg pgid; 2216 void *oloc, *oid, *tail; 2217 int oloc_len, oid_len, tail_len; 2218 int len; 2219 2220 /* 2221 * Pre-luminous OSD -- reencode v8 into v4 using @head 2222 * as a temporary buffer. Encode the raw PG; the rest 2223 * is just a matter of moving oloc, oid and tail blobs 2224 * around. 2225 */ 2226 memcpy(&head, p, sizeof(head)); 2227 p += sizeof(head); 2228 2229 oloc = p; 2230 p += CEPH_ENCODING_START_BLK_LEN; 2231 pgid.pool = ceph_decode_64(&p); 2232 p += 4 + 4; /* preferred, key len */ 2233 len = ceph_decode_32(&p); 2234 p += len; /* nspace */ 2235 oloc_len = p - oloc; 2236 2237 oid = p; 2238 len = ceph_decode_32(&p); 2239 p += len; 2240 oid_len = p - oid; 2241 2242 tail = p; 2243 tail_len = partial_end - p; 2244 2245 p = msg->front.iov_base; 2246 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2247 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2248 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2249 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2250 2251 /* reassert_version */ 2252 memset(p, 0, sizeof(struct ceph_eversion)); 2253 p += sizeof(struct ceph_eversion); 2254 2255 BUG_ON(p >= oloc); 2256 memmove(p, oloc, oloc_len); 2257 p += oloc_len; 2258 2259 pgid.seed = le32_to_cpu(head.hash); 2260 encode_pgid(&p, &pgid); /* raw pg */ 2261 2262 BUG_ON(p >= oid); 2263 memmove(p, oid, oid_len); 2264 p += oid_len; 2265 2266 /* tail -- ops, snapid, snapc, retry_attempt */ 2267 BUG_ON(p >= tail); 2268 memmove(p, tail, tail_len); 2269 p += tail_len; 2270 2271 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2272 } 2273 2274 BUG_ON(p > end); 2275 msg->front.iov_len = p - msg->front.iov_base; 2276 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2277 2278 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2279 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2280 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2281 le16_to_cpu(msg->hdr.version)); 2282 } 2283 2284 /* 2285 * @req has to be assigned a tid and registered. 2286 */ 2287 static void send_request(struct ceph_osd_request *req) 2288 { 2289 struct ceph_osd *osd = req->r_osd; 2290 2291 verify_osd_locked(osd); 2292 WARN_ON(osd->o_osd != req->r_t.osd); 2293 2294 /* backoff? */ 2295 if (should_plug_request(req)) 2296 return; 2297 2298 /* 2299 * We may have a previously queued request message hanging 2300 * around. Cancel it to avoid corrupting the msgr. 2301 */ 2302 if (req->r_sent) 2303 ceph_msg_revoke(req->r_request); 2304 2305 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2306 if (req->r_attempts) 2307 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2308 else 2309 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2310 2311 encode_request_partial(req, req->r_request); 2312 2313 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2314 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2315 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2316 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2317 req->r_attempts); 2318 2319 req->r_t.paused = false; 2320 req->r_stamp = jiffies; 2321 req->r_attempts++; 2322 2323 req->r_sent = osd->o_incarnation; 2324 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2325 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2326 } 2327 2328 static void maybe_request_map(struct ceph_osd_client *osdc) 2329 { 2330 bool continuous = false; 2331 2332 verify_osdc_locked(osdc); 2333 WARN_ON(!osdc->osdmap->epoch); 2334 2335 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2336 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2337 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2338 dout("%s osdc %p continuous\n", __func__, osdc); 2339 continuous = true; 2340 } else { 2341 dout("%s osdc %p onetime\n", __func__, osdc); 2342 } 2343 2344 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2345 osdc->osdmap->epoch + 1, continuous)) 2346 ceph_monc_renew_subs(&osdc->client->monc); 2347 } 2348 2349 static void complete_request(struct ceph_osd_request *req, int err); 2350 static void send_map_check(struct ceph_osd_request *req); 2351 2352 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2353 { 2354 struct ceph_osd_client *osdc = req->r_osdc; 2355 struct ceph_osd *osd; 2356 enum calc_target_result ct_res; 2357 int err = 0; 2358 bool need_send = false; 2359 bool promoted = false; 2360 2361 WARN_ON(req->r_tid); 2362 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2363 2364 again: 2365 ct_res = calc_target(osdc, &req->r_t, false); 2366 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2367 goto promote; 2368 2369 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2370 if (IS_ERR(osd)) { 2371 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2372 goto promote; 2373 } 2374 2375 if (osdc->abort_err) { 2376 dout("req %p abort_err %d\n", req, osdc->abort_err); 2377 err = osdc->abort_err; 2378 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2379 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2380 osdc->epoch_barrier); 2381 req->r_t.paused = true; 2382 maybe_request_map(osdc); 2383 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2384 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2385 dout("req %p pausewr\n", req); 2386 req->r_t.paused = true; 2387 maybe_request_map(osdc); 2388 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2389 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2390 dout("req %p pauserd\n", req); 2391 req->r_t.paused = true; 2392 maybe_request_map(osdc); 2393 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2394 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2395 CEPH_OSD_FLAG_FULL_FORCE)) && 2396 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2397 pool_full(osdc, req->r_t.base_oloc.pool))) { 2398 dout("req %p full/pool_full\n", req); 2399 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2400 err = -ENOSPC; 2401 } else { 2402 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) 2403 pr_warn_ratelimited("cluster is full (osdmap FULL)\n"); 2404 else 2405 pr_warn_ratelimited("pool %lld is full or reached quota\n", 2406 req->r_t.base_oloc.pool); 2407 req->r_t.paused = true; 2408 maybe_request_map(osdc); 2409 } 2410 } else if (!osd_homeless(osd)) { 2411 need_send = true; 2412 } else { 2413 maybe_request_map(osdc); 2414 } 2415 2416 mutex_lock(&osd->lock); 2417 /* 2418 * Assign the tid atomically with send_request() to protect 2419 * multiple writes to the same object from racing with each 2420 * other, resulting in out of order ops on the OSDs. 2421 */ 2422 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2423 link_request(osd, req); 2424 if (need_send) 2425 send_request(req); 2426 else if (err) 2427 complete_request(req, err); 2428 mutex_unlock(&osd->lock); 2429 2430 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2431 send_map_check(req); 2432 2433 if (promoted) 2434 downgrade_write(&osdc->lock); 2435 return; 2436 2437 promote: 2438 up_read(&osdc->lock); 2439 down_write(&osdc->lock); 2440 wrlocked = true; 2441 promoted = true; 2442 goto again; 2443 } 2444 2445 static void account_request(struct ceph_osd_request *req) 2446 { 2447 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2448 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2449 2450 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2451 atomic_inc(&req->r_osdc->num_requests); 2452 2453 req->r_start_stamp = jiffies; 2454 req->r_start_latency = ktime_get(); 2455 } 2456 2457 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2458 { 2459 ceph_osdc_get_request(req); 2460 account_request(req); 2461 __submit_request(req, wrlocked); 2462 } 2463 2464 static void finish_request(struct ceph_osd_request *req) 2465 { 2466 struct ceph_osd_client *osdc = req->r_osdc; 2467 2468 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2469 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2470 2471 req->r_end_latency = ktime_get(); 2472 2473 if (req->r_osd) { 2474 ceph_init_sparse_read(&req->r_osd->o_sparse_read); 2475 unlink_request(req->r_osd, req); 2476 } 2477 atomic_dec(&osdc->num_requests); 2478 2479 /* 2480 * If an OSD has failed or returned and a request has been sent 2481 * twice, it's possible to get a reply and end up here while the 2482 * request message is queued for delivery. We will ignore the 2483 * reply, so not a big deal, but better to try and catch it. 2484 */ 2485 ceph_msg_revoke(req->r_request); 2486 ceph_msg_revoke_incoming(req->r_reply); 2487 } 2488 2489 static void __complete_request(struct ceph_osd_request *req) 2490 { 2491 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2492 req->r_tid, req->r_callback, req->r_result); 2493 2494 if (req->r_callback) 2495 req->r_callback(req); 2496 complete_all(&req->r_completion); 2497 ceph_osdc_put_request(req); 2498 } 2499 2500 static void complete_request_workfn(struct work_struct *work) 2501 { 2502 struct ceph_osd_request *req = 2503 container_of(work, struct ceph_osd_request, r_complete_work); 2504 2505 __complete_request(req); 2506 } 2507 2508 /* 2509 * This is open-coded in handle_reply(). 2510 */ 2511 static void complete_request(struct ceph_osd_request *req, int err) 2512 { 2513 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2514 2515 req->r_result = err; 2516 finish_request(req); 2517 2518 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2519 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2520 } 2521 2522 static void cancel_map_check(struct ceph_osd_request *req) 2523 { 2524 struct ceph_osd_client *osdc = req->r_osdc; 2525 struct ceph_osd_request *lookup_req; 2526 2527 verify_osdc_wrlocked(osdc); 2528 2529 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2530 if (!lookup_req) 2531 return; 2532 2533 WARN_ON(lookup_req != req); 2534 erase_request_mc(&osdc->map_checks, req); 2535 ceph_osdc_put_request(req); 2536 } 2537 2538 static void cancel_request(struct ceph_osd_request *req) 2539 { 2540 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2541 2542 cancel_map_check(req); 2543 finish_request(req); 2544 complete_all(&req->r_completion); 2545 ceph_osdc_put_request(req); 2546 } 2547 2548 static void abort_request(struct ceph_osd_request *req, int err) 2549 { 2550 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2551 2552 cancel_map_check(req); 2553 complete_request(req, err); 2554 } 2555 2556 static int abort_fn(struct ceph_osd_request *req, void *arg) 2557 { 2558 int err = *(int *)arg; 2559 2560 abort_request(req, err); 2561 return 0; /* continue iteration */ 2562 } 2563 2564 /* 2565 * Abort all in-flight requests with @err and arrange for all future 2566 * requests to be failed immediately. 2567 */ 2568 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2569 { 2570 dout("%s osdc %p err %d\n", __func__, osdc, err); 2571 down_write(&osdc->lock); 2572 for_each_request(osdc, abort_fn, &err); 2573 osdc->abort_err = err; 2574 up_write(&osdc->lock); 2575 } 2576 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2577 2578 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2579 { 2580 down_write(&osdc->lock); 2581 osdc->abort_err = 0; 2582 up_write(&osdc->lock); 2583 } 2584 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2585 2586 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2587 { 2588 if (likely(eb > osdc->epoch_barrier)) { 2589 dout("updating epoch_barrier from %u to %u\n", 2590 osdc->epoch_barrier, eb); 2591 osdc->epoch_barrier = eb; 2592 /* Request map if we're not to the barrier yet */ 2593 if (eb > osdc->osdmap->epoch) 2594 maybe_request_map(osdc); 2595 } 2596 } 2597 2598 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2599 { 2600 down_read(&osdc->lock); 2601 if (unlikely(eb > osdc->epoch_barrier)) { 2602 up_read(&osdc->lock); 2603 down_write(&osdc->lock); 2604 update_epoch_barrier(osdc, eb); 2605 up_write(&osdc->lock); 2606 } else { 2607 up_read(&osdc->lock); 2608 } 2609 } 2610 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2611 2612 /* 2613 * We can end up releasing caps as a result of abort_request(). 2614 * In that case, we probably want to ensure that the cap release message 2615 * has an updated epoch barrier in it, so set the epoch barrier prior to 2616 * aborting the first request. 2617 */ 2618 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2619 { 2620 struct ceph_osd_client *osdc = req->r_osdc; 2621 bool *victims = arg; 2622 2623 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2624 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2625 pool_full(osdc, req->r_t.base_oloc.pool))) { 2626 if (!*victims) { 2627 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2628 *victims = true; 2629 } 2630 abort_request(req, -ENOSPC); 2631 } 2632 2633 return 0; /* continue iteration */ 2634 } 2635 2636 /* 2637 * Drop all pending requests that are stalled waiting on a full condition to 2638 * clear, and complete them with ENOSPC as the return code. Set the 2639 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2640 * cancelled. 2641 */ 2642 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2643 { 2644 bool victims = false; 2645 2646 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2647 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2648 for_each_request(osdc, abort_on_full_fn, &victims); 2649 } 2650 2651 static void check_pool_dne(struct ceph_osd_request *req) 2652 { 2653 struct ceph_osd_client *osdc = req->r_osdc; 2654 struct ceph_osdmap *map = osdc->osdmap; 2655 2656 verify_osdc_wrlocked(osdc); 2657 WARN_ON(!map->epoch); 2658 2659 if (req->r_attempts) { 2660 /* 2661 * We sent a request earlier, which means that 2662 * previously the pool existed, and now it does not 2663 * (i.e., it was deleted). 2664 */ 2665 req->r_map_dne_bound = map->epoch; 2666 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2667 req->r_tid); 2668 } else { 2669 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2670 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2671 } 2672 2673 if (req->r_map_dne_bound) { 2674 if (map->epoch >= req->r_map_dne_bound) { 2675 /* we had a new enough map */ 2676 pr_info_ratelimited("tid %llu pool does not exist\n", 2677 req->r_tid); 2678 complete_request(req, -ENOENT); 2679 } 2680 } else { 2681 send_map_check(req); 2682 } 2683 } 2684 2685 static void map_check_cb(struct ceph_mon_generic_request *greq) 2686 { 2687 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2688 struct ceph_osd_request *req; 2689 u64 tid = greq->private_data; 2690 2691 WARN_ON(greq->result || !greq->u.newest); 2692 2693 down_write(&osdc->lock); 2694 req = lookup_request_mc(&osdc->map_checks, tid); 2695 if (!req) { 2696 dout("%s tid %llu dne\n", __func__, tid); 2697 goto out_unlock; 2698 } 2699 2700 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2701 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2702 if (!req->r_map_dne_bound) 2703 req->r_map_dne_bound = greq->u.newest; 2704 erase_request_mc(&osdc->map_checks, req); 2705 check_pool_dne(req); 2706 2707 ceph_osdc_put_request(req); 2708 out_unlock: 2709 up_write(&osdc->lock); 2710 } 2711 2712 static void send_map_check(struct ceph_osd_request *req) 2713 { 2714 struct ceph_osd_client *osdc = req->r_osdc; 2715 struct ceph_osd_request *lookup_req; 2716 int ret; 2717 2718 verify_osdc_wrlocked(osdc); 2719 2720 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2721 if (lookup_req) { 2722 WARN_ON(lookup_req != req); 2723 return; 2724 } 2725 2726 ceph_osdc_get_request(req); 2727 insert_request_mc(&osdc->map_checks, req); 2728 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2729 map_check_cb, req->r_tid); 2730 WARN_ON(ret); 2731 } 2732 2733 /* 2734 * lingering requests, watch/notify v2 infrastructure 2735 */ 2736 static void linger_release(struct kref *kref) 2737 { 2738 struct ceph_osd_linger_request *lreq = 2739 container_of(kref, struct ceph_osd_linger_request, kref); 2740 2741 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2742 lreq->reg_req, lreq->ping_req); 2743 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2744 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2745 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2746 WARN_ON(!list_empty(&lreq->scan_item)); 2747 WARN_ON(!list_empty(&lreq->pending_lworks)); 2748 WARN_ON(lreq->osd); 2749 2750 if (lreq->request_pl) 2751 ceph_pagelist_release(lreq->request_pl); 2752 if (lreq->notify_id_pages) 2753 ceph_release_page_vector(lreq->notify_id_pages, 1); 2754 2755 ceph_osdc_put_request(lreq->reg_req); 2756 ceph_osdc_put_request(lreq->ping_req); 2757 target_destroy(&lreq->t); 2758 kfree(lreq); 2759 } 2760 2761 static void linger_put(struct ceph_osd_linger_request *lreq) 2762 { 2763 if (lreq) 2764 kref_put(&lreq->kref, linger_release); 2765 } 2766 2767 static struct ceph_osd_linger_request * 2768 linger_get(struct ceph_osd_linger_request *lreq) 2769 { 2770 kref_get(&lreq->kref); 2771 return lreq; 2772 } 2773 2774 static struct ceph_osd_linger_request * 2775 linger_alloc(struct ceph_osd_client *osdc) 2776 { 2777 struct ceph_osd_linger_request *lreq; 2778 2779 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2780 if (!lreq) 2781 return NULL; 2782 2783 kref_init(&lreq->kref); 2784 mutex_init(&lreq->lock); 2785 RB_CLEAR_NODE(&lreq->node); 2786 RB_CLEAR_NODE(&lreq->osdc_node); 2787 RB_CLEAR_NODE(&lreq->mc_node); 2788 INIT_LIST_HEAD(&lreq->scan_item); 2789 INIT_LIST_HEAD(&lreq->pending_lworks); 2790 init_completion(&lreq->reg_commit_wait); 2791 init_completion(&lreq->notify_finish_wait); 2792 2793 lreq->osdc = osdc; 2794 target_init(&lreq->t); 2795 2796 dout("%s lreq %p\n", __func__, lreq); 2797 return lreq; 2798 } 2799 2800 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2801 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2802 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2803 2804 /* 2805 * Create linger request <-> OSD session relation. 2806 * 2807 * @lreq has to be registered, @osd may be homeless. 2808 */ 2809 static void link_linger(struct ceph_osd *osd, 2810 struct ceph_osd_linger_request *lreq) 2811 { 2812 verify_osd_locked(osd); 2813 WARN_ON(!lreq->linger_id || lreq->osd); 2814 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2815 osd->o_osd, lreq, lreq->linger_id); 2816 2817 if (!osd_homeless(osd)) 2818 __remove_osd_from_lru(osd); 2819 else 2820 atomic_inc(&osd->o_osdc->num_homeless); 2821 2822 get_osd(osd); 2823 insert_linger(&osd->o_linger_requests, lreq); 2824 lreq->osd = osd; 2825 } 2826 2827 static void unlink_linger(struct ceph_osd *osd, 2828 struct ceph_osd_linger_request *lreq) 2829 { 2830 verify_osd_locked(osd); 2831 WARN_ON(lreq->osd != osd); 2832 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2833 osd->o_osd, lreq, lreq->linger_id); 2834 2835 lreq->osd = NULL; 2836 erase_linger(&osd->o_linger_requests, lreq); 2837 put_osd(osd); 2838 2839 if (!osd_homeless(osd)) 2840 maybe_move_osd_to_lru(osd); 2841 else 2842 atomic_dec(&osd->o_osdc->num_homeless); 2843 } 2844 2845 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2846 { 2847 verify_osdc_locked(lreq->osdc); 2848 2849 return !RB_EMPTY_NODE(&lreq->osdc_node); 2850 } 2851 2852 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2853 { 2854 struct ceph_osd_client *osdc = lreq->osdc; 2855 bool registered; 2856 2857 down_read(&osdc->lock); 2858 registered = __linger_registered(lreq); 2859 up_read(&osdc->lock); 2860 2861 return registered; 2862 } 2863 2864 static void linger_register(struct ceph_osd_linger_request *lreq) 2865 { 2866 struct ceph_osd_client *osdc = lreq->osdc; 2867 2868 verify_osdc_wrlocked(osdc); 2869 WARN_ON(lreq->linger_id); 2870 2871 linger_get(lreq); 2872 lreq->linger_id = ++osdc->last_linger_id; 2873 insert_linger_osdc(&osdc->linger_requests, lreq); 2874 } 2875 2876 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2877 { 2878 struct ceph_osd_client *osdc = lreq->osdc; 2879 2880 verify_osdc_wrlocked(osdc); 2881 2882 erase_linger_osdc(&osdc->linger_requests, lreq); 2883 linger_put(lreq); 2884 } 2885 2886 static void cancel_linger_request(struct ceph_osd_request *req) 2887 { 2888 struct ceph_osd_linger_request *lreq = req->r_priv; 2889 2890 WARN_ON(!req->r_linger); 2891 cancel_request(req); 2892 linger_put(lreq); 2893 } 2894 2895 struct linger_work { 2896 struct work_struct work; 2897 struct ceph_osd_linger_request *lreq; 2898 struct list_head pending_item; 2899 unsigned long queued_stamp; 2900 2901 union { 2902 struct { 2903 u64 notify_id; 2904 u64 notifier_id; 2905 void *payload; /* points into @msg front */ 2906 size_t payload_len; 2907 2908 struct ceph_msg *msg; /* for ceph_msg_put() */ 2909 } notify; 2910 struct { 2911 int err; 2912 } error; 2913 }; 2914 }; 2915 2916 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2917 work_func_t workfn) 2918 { 2919 struct linger_work *lwork; 2920 2921 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2922 if (!lwork) 2923 return NULL; 2924 2925 INIT_WORK(&lwork->work, workfn); 2926 INIT_LIST_HEAD(&lwork->pending_item); 2927 lwork->lreq = linger_get(lreq); 2928 2929 return lwork; 2930 } 2931 2932 static void lwork_free(struct linger_work *lwork) 2933 { 2934 struct ceph_osd_linger_request *lreq = lwork->lreq; 2935 2936 mutex_lock(&lreq->lock); 2937 list_del(&lwork->pending_item); 2938 mutex_unlock(&lreq->lock); 2939 2940 linger_put(lreq); 2941 kfree(lwork); 2942 } 2943 2944 static void lwork_queue(struct linger_work *lwork) 2945 { 2946 struct ceph_osd_linger_request *lreq = lwork->lreq; 2947 struct ceph_osd_client *osdc = lreq->osdc; 2948 2949 verify_lreq_locked(lreq); 2950 WARN_ON(!list_empty(&lwork->pending_item)); 2951 2952 lwork->queued_stamp = jiffies; 2953 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2954 queue_work(osdc->notify_wq, &lwork->work); 2955 } 2956 2957 static void do_watch_notify(struct work_struct *w) 2958 { 2959 struct linger_work *lwork = container_of(w, struct linger_work, work); 2960 struct ceph_osd_linger_request *lreq = lwork->lreq; 2961 2962 if (!linger_registered(lreq)) { 2963 dout("%s lreq %p not registered\n", __func__, lreq); 2964 goto out; 2965 } 2966 2967 WARN_ON(!lreq->is_watch); 2968 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2969 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2970 lwork->notify.payload_len); 2971 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2972 lwork->notify.notifier_id, lwork->notify.payload, 2973 lwork->notify.payload_len); 2974 2975 out: 2976 ceph_msg_put(lwork->notify.msg); 2977 lwork_free(lwork); 2978 } 2979 2980 static void do_watch_error(struct work_struct *w) 2981 { 2982 struct linger_work *lwork = container_of(w, struct linger_work, work); 2983 struct ceph_osd_linger_request *lreq = lwork->lreq; 2984 2985 if (!linger_registered(lreq)) { 2986 dout("%s lreq %p not registered\n", __func__, lreq); 2987 goto out; 2988 } 2989 2990 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 2991 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 2992 2993 out: 2994 lwork_free(lwork); 2995 } 2996 2997 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 2998 { 2999 struct linger_work *lwork; 3000 3001 lwork = lwork_alloc(lreq, do_watch_error); 3002 if (!lwork) { 3003 pr_err("failed to allocate error-lwork\n"); 3004 return; 3005 } 3006 3007 lwork->error.err = lreq->last_error; 3008 lwork_queue(lwork); 3009 } 3010 3011 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 3012 int result) 3013 { 3014 if (!completion_done(&lreq->reg_commit_wait)) { 3015 lreq->reg_commit_error = (result <= 0 ? result : 0); 3016 complete_all(&lreq->reg_commit_wait); 3017 } 3018 } 3019 3020 static void linger_commit_cb(struct ceph_osd_request *req) 3021 { 3022 struct ceph_osd_linger_request *lreq = req->r_priv; 3023 3024 mutex_lock(&lreq->lock); 3025 if (req != lreq->reg_req) { 3026 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3027 __func__, lreq, lreq->linger_id, req, lreq->reg_req); 3028 goto out; 3029 } 3030 3031 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 3032 lreq->linger_id, req->r_result); 3033 linger_reg_commit_complete(lreq, req->r_result); 3034 lreq->committed = true; 3035 3036 if (!lreq->is_watch) { 3037 struct ceph_osd_data *osd_data = 3038 osd_req_op_data(req, 0, notify, response_data); 3039 void *p = page_address(osd_data->pages[0]); 3040 3041 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 3042 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 3043 3044 /* make note of the notify_id */ 3045 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 3046 lreq->notify_id = ceph_decode_64(&p); 3047 dout("lreq %p notify_id %llu\n", lreq, 3048 lreq->notify_id); 3049 } else { 3050 dout("lreq %p no notify_id\n", lreq); 3051 } 3052 } 3053 3054 out: 3055 mutex_unlock(&lreq->lock); 3056 linger_put(lreq); 3057 } 3058 3059 static int normalize_watch_error(int err) 3060 { 3061 /* 3062 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 3063 * notification and a failure to reconnect because we raced with 3064 * the delete appear the same to the user. 3065 */ 3066 if (err == -ENOENT) 3067 err = -ENOTCONN; 3068 3069 return err; 3070 } 3071 3072 static void linger_reconnect_cb(struct ceph_osd_request *req) 3073 { 3074 struct ceph_osd_linger_request *lreq = req->r_priv; 3075 3076 mutex_lock(&lreq->lock); 3077 if (req != lreq->reg_req) { 3078 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3079 __func__, lreq, lreq->linger_id, req, lreq->reg_req); 3080 goto out; 3081 } 3082 3083 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 3084 lreq, lreq->linger_id, req->r_result, lreq->last_error); 3085 if (req->r_result < 0) { 3086 if (!lreq->last_error) { 3087 lreq->last_error = normalize_watch_error(req->r_result); 3088 queue_watch_error(lreq); 3089 } 3090 } 3091 3092 out: 3093 mutex_unlock(&lreq->lock); 3094 linger_put(lreq); 3095 } 3096 3097 static void send_linger(struct ceph_osd_linger_request *lreq) 3098 { 3099 struct ceph_osd_client *osdc = lreq->osdc; 3100 struct ceph_osd_request *req; 3101 int ret; 3102 3103 verify_osdc_wrlocked(osdc); 3104 mutex_lock(&lreq->lock); 3105 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3106 3107 if (lreq->reg_req) { 3108 if (lreq->reg_req->r_osd) 3109 cancel_linger_request(lreq->reg_req); 3110 ceph_osdc_put_request(lreq->reg_req); 3111 } 3112 3113 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); 3114 BUG_ON(!req); 3115 3116 target_copy(&req->r_t, &lreq->t); 3117 req->r_mtime = lreq->mtime; 3118 3119 if (lreq->is_watch && lreq->committed) { 3120 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT, 3121 lreq->linger_id, ++lreq->register_gen); 3122 dout("lreq %p reconnect register_gen %u\n", lreq, 3123 req->r_ops[0].watch.gen); 3124 req->r_callback = linger_reconnect_cb; 3125 } else { 3126 if (lreq->is_watch) { 3127 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH, 3128 lreq->linger_id, 0); 3129 } else { 3130 lreq->notify_id = 0; 3131 3132 refcount_inc(&lreq->request_pl->refcnt); 3133 osd_req_op_notify_init(req, 0, lreq->linger_id, 3134 lreq->request_pl); 3135 ceph_osd_data_pages_init( 3136 osd_req_op_data(req, 0, notify, response_data), 3137 lreq->notify_id_pages, PAGE_SIZE, 0, false, false); 3138 } 3139 dout("lreq %p register\n", lreq); 3140 req->r_callback = linger_commit_cb; 3141 } 3142 3143 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 3144 BUG_ON(ret); 3145 3146 req->r_priv = linger_get(lreq); 3147 req->r_linger = true; 3148 lreq->reg_req = req; 3149 mutex_unlock(&lreq->lock); 3150 3151 submit_request(req, true); 3152 } 3153 3154 static void linger_ping_cb(struct ceph_osd_request *req) 3155 { 3156 struct ceph_osd_linger_request *lreq = req->r_priv; 3157 3158 mutex_lock(&lreq->lock); 3159 if (req != lreq->ping_req) { 3160 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", 3161 __func__, lreq, lreq->linger_id, req, lreq->ping_req); 3162 goto out; 3163 } 3164 3165 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3166 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3167 lreq->last_error); 3168 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3169 if (!req->r_result) { 3170 lreq->watch_valid_thru = lreq->ping_sent; 3171 } else if (!lreq->last_error) { 3172 lreq->last_error = normalize_watch_error(req->r_result); 3173 queue_watch_error(lreq); 3174 } 3175 } else { 3176 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3177 lreq->register_gen, req->r_ops[0].watch.gen); 3178 } 3179 3180 out: 3181 mutex_unlock(&lreq->lock); 3182 linger_put(lreq); 3183 } 3184 3185 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3186 { 3187 struct ceph_osd_client *osdc = lreq->osdc; 3188 struct ceph_osd_request *req; 3189 int ret; 3190 3191 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3192 dout("%s PAUSERD\n", __func__); 3193 return; 3194 } 3195 3196 lreq->ping_sent = jiffies; 3197 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3198 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3199 lreq->register_gen); 3200 3201 if (lreq->ping_req) { 3202 if (lreq->ping_req->r_osd) 3203 cancel_linger_request(lreq->ping_req); 3204 ceph_osdc_put_request(lreq->ping_req); 3205 } 3206 3207 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); 3208 BUG_ON(!req); 3209 3210 target_copy(&req->r_t, &lreq->t); 3211 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id, 3212 lreq->register_gen); 3213 req->r_callback = linger_ping_cb; 3214 3215 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 3216 BUG_ON(ret); 3217 3218 req->r_priv = linger_get(lreq); 3219 req->r_linger = true; 3220 lreq->ping_req = req; 3221 3222 ceph_osdc_get_request(req); 3223 account_request(req); 3224 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3225 link_request(lreq->osd, req); 3226 send_request(req); 3227 } 3228 3229 static void linger_submit(struct ceph_osd_linger_request *lreq) 3230 { 3231 struct ceph_osd_client *osdc = lreq->osdc; 3232 struct ceph_osd *osd; 3233 3234 down_write(&osdc->lock); 3235 linger_register(lreq); 3236 3237 calc_target(osdc, &lreq->t, false); 3238 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3239 link_linger(osd, lreq); 3240 3241 send_linger(lreq); 3242 up_write(&osdc->lock); 3243 } 3244 3245 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3246 { 3247 struct ceph_osd_client *osdc = lreq->osdc; 3248 struct ceph_osd_linger_request *lookup_lreq; 3249 3250 verify_osdc_wrlocked(osdc); 3251 3252 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3253 lreq->linger_id); 3254 if (!lookup_lreq) 3255 return; 3256 3257 WARN_ON(lookup_lreq != lreq); 3258 erase_linger_mc(&osdc->linger_map_checks, lreq); 3259 linger_put(lreq); 3260 } 3261 3262 /* 3263 * @lreq has to be both registered and linked. 3264 */ 3265 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3266 { 3267 if (lreq->ping_req && lreq->ping_req->r_osd) 3268 cancel_linger_request(lreq->ping_req); 3269 if (lreq->reg_req && lreq->reg_req->r_osd) 3270 cancel_linger_request(lreq->reg_req); 3271 cancel_linger_map_check(lreq); 3272 unlink_linger(lreq->osd, lreq); 3273 linger_unregister(lreq); 3274 } 3275 3276 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3277 { 3278 struct ceph_osd_client *osdc = lreq->osdc; 3279 3280 down_write(&osdc->lock); 3281 if (__linger_registered(lreq)) 3282 __linger_cancel(lreq); 3283 up_write(&osdc->lock); 3284 } 3285 3286 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3287 3288 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3289 { 3290 struct ceph_osd_client *osdc = lreq->osdc; 3291 struct ceph_osdmap *map = osdc->osdmap; 3292 3293 verify_osdc_wrlocked(osdc); 3294 WARN_ON(!map->epoch); 3295 3296 if (lreq->register_gen) { 3297 lreq->map_dne_bound = map->epoch; 3298 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3299 lreq, lreq->linger_id); 3300 } else { 3301 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3302 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3303 map->epoch); 3304 } 3305 3306 if (lreq->map_dne_bound) { 3307 if (map->epoch >= lreq->map_dne_bound) { 3308 /* we had a new enough map */ 3309 pr_info("linger_id %llu pool does not exist\n", 3310 lreq->linger_id); 3311 linger_reg_commit_complete(lreq, -ENOENT); 3312 __linger_cancel(lreq); 3313 } 3314 } else { 3315 send_linger_map_check(lreq); 3316 } 3317 } 3318 3319 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3320 { 3321 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3322 struct ceph_osd_linger_request *lreq; 3323 u64 linger_id = greq->private_data; 3324 3325 WARN_ON(greq->result || !greq->u.newest); 3326 3327 down_write(&osdc->lock); 3328 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3329 if (!lreq) { 3330 dout("%s linger_id %llu dne\n", __func__, linger_id); 3331 goto out_unlock; 3332 } 3333 3334 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3335 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3336 greq->u.newest); 3337 if (!lreq->map_dne_bound) 3338 lreq->map_dne_bound = greq->u.newest; 3339 erase_linger_mc(&osdc->linger_map_checks, lreq); 3340 check_linger_pool_dne(lreq); 3341 3342 linger_put(lreq); 3343 out_unlock: 3344 up_write(&osdc->lock); 3345 } 3346 3347 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3348 { 3349 struct ceph_osd_client *osdc = lreq->osdc; 3350 struct ceph_osd_linger_request *lookup_lreq; 3351 int ret; 3352 3353 verify_osdc_wrlocked(osdc); 3354 3355 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3356 lreq->linger_id); 3357 if (lookup_lreq) { 3358 WARN_ON(lookup_lreq != lreq); 3359 return; 3360 } 3361 3362 linger_get(lreq); 3363 insert_linger_mc(&osdc->linger_map_checks, lreq); 3364 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3365 linger_map_check_cb, lreq->linger_id); 3366 WARN_ON(ret); 3367 } 3368 3369 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3370 { 3371 int ret; 3372 3373 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3374 ret = wait_for_completion_killable(&lreq->reg_commit_wait); 3375 return ret ?: lreq->reg_commit_error; 3376 } 3377 3378 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, 3379 unsigned long timeout) 3380 { 3381 long left; 3382 3383 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3384 left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, 3385 ceph_timeout_jiffies(timeout)); 3386 if (left <= 0) 3387 left = left ?: -ETIMEDOUT; 3388 else 3389 left = lreq->notify_finish_error; /* completed */ 3390 3391 return left; 3392 } 3393 3394 /* 3395 * Timeout callback, called every N seconds. When 1 or more OSD 3396 * requests has been active for more than N seconds, we send a keepalive 3397 * (tag + timestamp) to its OSD to ensure any communications channel 3398 * reset is detected. 3399 */ 3400 static void handle_timeout(struct work_struct *work) 3401 { 3402 struct ceph_osd_client *osdc = 3403 container_of(work, struct ceph_osd_client, timeout_work.work); 3404 struct ceph_options *opts = osdc->client->options; 3405 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3406 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3407 LIST_HEAD(slow_osds); 3408 struct rb_node *n, *p; 3409 3410 dout("%s osdc %p\n", __func__, osdc); 3411 down_write(&osdc->lock); 3412 3413 /* 3414 * ping osds that are a bit slow. this ensures that if there 3415 * is a break in the TCP connection we will notice, and reopen 3416 * a connection with that osd (from the fault callback). 3417 */ 3418 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3419 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3420 bool found = false; 3421 3422 for (p = rb_first(&osd->o_requests); p; ) { 3423 struct ceph_osd_request *req = 3424 rb_entry(p, struct ceph_osd_request, r_node); 3425 3426 p = rb_next(p); /* abort_request() */ 3427 3428 if (time_before(req->r_stamp, cutoff)) { 3429 dout(" req %p tid %llu on osd%d is laggy\n", 3430 req, req->r_tid, osd->o_osd); 3431 found = true; 3432 } 3433 if (opts->osd_request_timeout && 3434 time_before(req->r_start_stamp, expiry_cutoff)) { 3435 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3436 req->r_tid, osd->o_osd); 3437 abort_request(req, -ETIMEDOUT); 3438 } 3439 } 3440 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3441 struct ceph_osd_linger_request *lreq = 3442 rb_entry(p, struct ceph_osd_linger_request, node); 3443 3444 dout(" lreq %p linger_id %llu is served by osd%d\n", 3445 lreq, lreq->linger_id, osd->o_osd); 3446 found = true; 3447 3448 mutex_lock(&lreq->lock); 3449 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3450 send_linger_ping(lreq); 3451 mutex_unlock(&lreq->lock); 3452 } 3453 3454 if (found) 3455 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3456 } 3457 3458 if (opts->osd_request_timeout) { 3459 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3460 struct ceph_osd_request *req = 3461 rb_entry(p, struct ceph_osd_request, r_node); 3462 3463 p = rb_next(p); /* abort_request() */ 3464 3465 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3466 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3467 req->r_tid, osdc->homeless_osd.o_osd); 3468 abort_request(req, -ETIMEDOUT); 3469 } 3470 } 3471 } 3472 3473 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3474 maybe_request_map(osdc); 3475 3476 while (!list_empty(&slow_osds)) { 3477 struct ceph_osd *osd = list_first_entry(&slow_osds, 3478 struct ceph_osd, 3479 o_keepalive_item); 3480 list_del_init(&osd->o_keepalive_item); 3481 ceph_con_keepalive(&osd->o_con); 3482 } 3483 3484 up_write(&osdc->lock); 3485 schedule_delayed_work(&osdc->timeout_work, 3486 osdc->client->options->osd_keepalive_timeout); 3487 } 3488 3489 static void handle_osds_timeout(struct work_struct *work) 3490 { 3491 struct ceph_osd_client *osdc = 3492 container_of(work, struct ceph_osd_client, 3493 osds_timeout_work.work); 3494 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3495 struct ceph_osd *osd, *nosd; 3496 3497 dout("%s osdc %p\n", __func__, osdc); 3498 down_write(&osdc->lock); 3499 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3500 if (time_before(jiffies, osd->lru_ttl)) 3501 break; 3502 3503 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3504 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3505 close_osd(osd); 3506 } 3507 3508 up_write(&osdc->lock); 3509 schedule_delayed_work(&osdc->osds_timeout_work, 3510 round_jiffies_relative(delay)); 3511 } 3512 3513 static int ceph_oloc_decode(void **p, void *end, 3514 struct ceph_object_locator *oloc) 3515 { 3516 u8 struct_v, struct_cv; 3517 u32 len; 3518 void *struct_end; 3519 int ret = 0; 3520 3521 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3522 struct_v = ceph_decode_8(p); 3523 struct_cv = ceph_decode_8(p); 3524 if (struct_v < 3) { 3525 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3526 struct_v, struct_cv); 3527 goto e_inval; 3528 } 3529 if (struct_cv > 6) { 3530 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3531 struct_v, struct_cv); 3532 goto e_inval; 3533 } 3534 len = ceph_decode_32(p); 3535 ceph_decode_need(p, end, len, e_inval); 3536 struct_end = *p + len; 3537 3538 oloc->pool = ceph_decode_64(p); 3539 *p += 4; /* skip preferred */ 3540 3541 len = ceph_decode_32(p); 3542 if (len > 0) { 3543 pr_warn("ceph_object_locator::key is set\n"); 3544 goto e_inval; 3545 } 3546 3547 if (struct_v >= 5) { 3548 bool changed = false; 3549 3550 len = ceph_decode_32(p); 3551 if (len > 0) { 3552 ceph_decode_need(p, end, len, e_inval); 3553 if (!oloc->pool_ns || 3554 ceph_compare_string(oloc->pool_ns, *p, len)) 3555 changed = true; 3556 *p += len; 3557 } else { 3558 if (oloc->pool_ns) 3559 changed = true; 3560 } 3561 if (changed) { 3562 /* redirect changes namespace */ 3563 pr_warn("ceph_object_locator::nspace is changed\n"); 3564 goto e_inval; 3565 } 3566 } 3567 3568 if (struct_v >= 6) { 3569 s64 hash = ceph_decode_64(p); 3570 if (hash != -1) { 3571 pr_warn("ceph_object_locator::hash is set\n"); 3572 goto e_inval; 3573 } 3574 } 3575 3576 /* skip the rest */ 3577 *p = struct_end; 3578 out: 3579 return ret; 3580 3581 e_inval: 3582 ret = -EINVAL; 3583 goto out; 3584 } 3585 3586 static int ceph_redirect_decode(void **p, void *end, 3587 struct ceph_request_redirect *redir) 3588 { 3589 u8 struct_v, struct_cv; 3590 u32 len; 3591 void *struct_end; 3592 int ret; 3593 3594 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3595 struct_v = ceph_decode_8(p); 3596 struct_cv = ceph_decode_8(p); 3597 if (struct_cv > 1) { 3598 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3599 struct_v, struct_cv); 3600 goto e_inval; 3601 } 3602 len = ceph_decode_32(p); 3603 ceph_decode_need(p, end, len, e_inval); 3604 struct_end = *p + len; 3605 3606 ret = ceph_oloc_decode(p, end, &redir->oloc); 3607 if (ret) 3608 goto out; 3609 3610 len = ceph_decode_32(p); 3611 if (len > 0) { 3612 pr_warn("ceph_request_redirect::object_name is set\n"); 3613 goto e_inval; 3614 } 3615 3616 /* skip the rest */ 3617 *p = struct_end; 3618 out: 3619 return ret; 3620 3621 e_inval: 3622 ret = -EINVAL; 3623 goto out; 3624 } 3625 3626 struct MOSDOpReply { 3627 struct ceph_pg pgid; 3628 u64 flags; 3629 int result; 3630 u32 epoch; 3631 int num_ops; 3632 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3633 s32 rval[CEPH_OSD_MAX_OPS]; 3634 int retry_attempt; 3635 struct ceph_eversion replay_version; 3636 u64 user_version; 3637 struct ceph_request_redirect redirect; 3638 }; 3639 3640 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3641 { 3642 void *p = msg->front.iov_base; 3643 void *const end = p + msg->front.iov_len; 3644 u16 version = le16_to_cpu(msg->hdr.version); 3645 struct ceph_eversion bad_replay_version; 3646 u8 decode_redir; 3647 u32 len; 3648 int ret; 3649 int i; 3650 3651 ceph_decode_32_safe(&p, end, len, e_inval); 3652 ceph_decode_need(&p, end, len, e_inval); 3653 p += len; /* skip oid */ 3654 3655 ret = ceph_decode_pgid(&p, end, &m->pgid); 3656 if (ret) 3657 return ret; 3658 3659 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3660 ceph_decode_32_safe(&p, end, m->result, e_inval); 3661 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3662 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3663 p += sizeof(bad_replay_version); 3664 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3665 3666 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3667 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3668 goto e_inval; 3669 3670 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3671 e_inval); 3672 for (i = 0; i < m->num_ops; i++) { 3673 struct ceph_osd_op *op = p; 3674 3675 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3676 p += sizeof(*op); 3677 } 3678 3679 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3680 for (i = 0; i < m->num_ops; i++) 3681 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3682 3683 if (version >= 5) { 3684 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3685 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3686 p += sizeof(m->replay_version); 3687 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3688 } else { 3689 m->replay_version = bad_replay_version; /* struct */ 3690 m->user_version = le64_to_cpu(m->replay_version.version); 3691 } 3692 3693 if (version >= 6) { 3694 if (version >= 7) 3695 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3696 else 3697 decode_redir = 1; 3698 } else { 3699 decode_redir = 0; 3700 } 3701 3702 if (decode_redir) { 3703 ret = ceph_redirect_decode(&p, end, &m->redirect); 3704 if (ret) 3705 return ret; 3706 } else { 3707 ceph_oloc_init(&m->redirect.oloc); 3708 } 3709 3710 return 0; 3711 3712 e_inval: 3713 return -EINVAL; 3714 } 3715 3716 /* 3717 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3718 * specified. 3719 */ 3720 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3721 { 3722 struct ceph_osd_client *osdc = osd->o_osdc; 3723 struct ceph_osd_request *req; 3724 struct MOSDOpReply m; 3725 u64 tid = le64_to_cpu(msg->hdr.tid); 3726 u32 data_len = 0; 3727 int ret; 3728 int i; 3729 3730 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3731 3732 down_read(&osdc->lock); 3733 if (!osd_registered(osd)) { 3734 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3735 goto out_unlock_osdc; 3736 } 3737 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3738 3739 mutex_lock(&osd->lock); 3740 req = lookup_request(&osd->o_requests, tid); 3741 if (!req) { 3742 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3743 goto out_unlock_session; 3744 } 3745 3746 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3747 ret = decode_MOSDOpReply(msg, &m); 3748 m.redirect.oloc.pool_ns = NULL; 3749 if (ret) { 3750 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3751 req->r_tid, ret); 3752 ceph_msg_dump(msg); 3753 goto fail_request; 3754 } 3755 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3756 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3757 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3758 le64_to_cpu(m.replay_version.version), m.user_version); 3759 3760 if (m.retry_attempt >= 0) { 3761 if (m.retry_attempt != req->r_attempts - 1) { 3762 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3763 req, req->r_tid, m.retry_attempt, 3764 req->r_attempts - 1); 3765 goto out_unlock_session; 3766 } 3767 } else { 3768 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3769 } 3770 3771 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3772 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3773 m.redirect.oloc.pool); 3774 unlink_request(osd, req); 3775 mutex_unlock(&osd->lock); 3776 3777 /* 3778 * Not ceph_oloc_copy() - changing pool_ns is not 3779 * supported. 3780 */ 3781 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3782 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3783 CEPH_OSD_FLAG_IGNORE_OVERLAY | 3784 CEPH_OSD_FLAG_IGNORE_CACHE; 3785 req->r_tid = 0; 3786 __submit_request(req, false); 3787 goto out_unlock_osdc; 3788 } 3789 3790 if (m.result == -EAGAIN) { 3791 dout("req %p tid %llu EAGAIN\n", req, req->r_tid); 3792 unlink_request(osd, req); 3793 mutex_unlock(&osd->lock); 3794 3795 /* 3796 * The object is missing on the replica or not (yet) 3797 * readable. Clear pgid to force a resend to the primary 3798 * via legacy_change. 3799 */ 3800 req->r_t.pgid.pool = 0; 3801 req->r_t.pgid.seed = 0; 3802 WARN_ON(!req->r_t.used_replica); 3803 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | 3804 CEPH_OSD_FLAG_LOCALIZE_READS); 3805 req->r_tid = 0; 3806 __submit_request(req, false); 3807 goto out_unlock_osdc; 3808 } 3809 3810 if (m.num_ops != req->r_num_ops) { 3811 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3812 req->r_num_ops, req->r_tid); 3813 goto fail_request; 3814 } 3815 for (i = 0; i < req->r_num_ops; i++) { 3816 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3817 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3818 req->r_ops[i].rval = m.rval[i]; 3819 req->r_ops[i].outdata_len = m.outdata_len[i]; 3820 data_len += m.outdata_len[i]; 3821 } 3822 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3823 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3824 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3825 goto fail_request; 3826 } 3827 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3828 req, req->r_tid, m.result, data_len); 3829 3830 /* 3831 * Since we only ever request ONDISK, we should only ever get 3832 * one (type of) reply back. 3833 */ 3834 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3835 req->r_result = m.result ?: data_len; 3836 finish_request(req); 3837 mutex_unlock(&osd->lock); 3838 up_read(&osdc->lock); 3839 3840 __complete_request(req); 3841 return; 3842 3843 fail_request: 3844 complete_request(req, -EIO); 3845 out_unlock_session: 3846 mutex_unlock(&osd->lock); 3847 out_unlock_osdc: 3848 up_read(&osdc->lock); 3849 } 3850 3851 static void set_pool_was_full(struct ceph_osd_client *osdc) 3852 { 3853 struct rb_node *n; 3854 3855 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3856 struct ceph_pg_pool_info *pi = 3857 rb_entry(n, struct ceph_pg_pool_info, node); 3858 3859 pi->was_full = __pool_full(pi); 3860 } 3861 } 3862 3863 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3864 { 3865 struct ceph_pg_pool_info *pi; 3866 3867 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3868 if (!pi) 3869 return false; 3870 3871 return pi->was_full && !__pool_full(pi); 3872 } 3873 3874 static enum calc_target_result 3875 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3876 { 3877 struct ceph_osd_client *osdc = lreq->osdc; 3878 enum calc_target_result ct_res; 3879 3880 ct_res = calc_target(osdc, &lreq->t, true); 3881 if (ct_res == CALC_TARGET_NEED_RESEND) { 3882 struct ceph_osd *osd; 3883 3884 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3885 if (osd != lreq->osd) { 3886 unlink_linger(lreq->osd, lreq); 3887 link_linger(osd, lreq); 3888 } 3889 } 3890 3891 return ct_res; 3892 } 3893 3894 /* 3895 * Requeue requests whose mapping to an OSD has changed. 3896 */ 3897 static void scan_requests(struct ceph_osd *osd, 3898 bool force_resend, 3899 bool cleared_full, 3900 bool check_pool_cleared_full, 3901 struct rb_root *need_resend, 3902 struct list_head *need_resend_linger) 3903 { 3904 struct ceph_osd_client *osdc = osd->o_osdc; 3905 struct rb_node *n; 3906 bool force_resend_writes; 3907 3908 for (n = rb_first(&osd->o_linger_requests); n; ) { 3909 struct ceph_osd_linger_request *lreq = 3910 rb_entry(n, struct ceph_osd_linger_request, node); 3911 enum calc_target_result ct_res; 3912 3913 n = rb_next(n); /* recalc_linger_target() */ 3914 3915 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3916 lreq->linger_id); 3917 ct_res = recalc_linger_target(lreq); 3918 switch (ct_res) { 3919 case CALC_TARGET_NO_ACTION: 3920 force_resend_writes = cleared_full || 3921 (check_pool_cleared_full && 3922 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3923 if (!force_resend && !force_resend_writes) 3924 break; 3925 3926 fallthrough; 3927 case CALC_TARGET_NEED_RESEND: 3928 cancel_linger_map_check(lreq); 3929 /* 3930 * scan_requests() for the previous epoch(s) 3931 * may have already added it to the list, since 3932 * it's not unlinked here. 3933 */ 3934 if (list_empty(&lreq->scan_item)) 3935 list_add_tail(&lreq->scan_item, need_resend_linger); 3936 break; 3937 case CALC_TARGET_POOL_DNE: 3938 list_del_init(&lreq->scan_item); 3939 check_linger_pool_dne(lreq); 3940 break; 3941 } 3942 } 3943 3944 for (n = rb_first(&osd->o_requests); n; ) { 3945 struct ceph_osd_request *req = 3946 rb_entry(n, struct ceph_osd_request, r_node); 3947 enum calc_target_result ct_res; 3948 3949 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3950 3951 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3952 ct_res = calc_target(osdc, &req->r_t, false); 3953 switch (ct_res) { 3954 case CALC_TARGET_NO_ACTION: 3955 force_resend_writes = cleared_full || 3956 (check_pool_cleared_full && 3957 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3958 if (!force_resend && 3959 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3960 !force_resend_writes)) 3961 break; 3962 3963 fallthrough; 3964 case CALC_TARGET_NEED_RESEND: 3965 cancel_map_check(req); 3966 unlink_request(osd, req); 3967 insert_request(need_resend, req); 3968 break; 3969 case CALC_TARGET_POOL_DNE: 3970 check_pool_dne(req); 3971 break; 3972 } 3973 } 3974 } 3975 3976 static int handle_one_map(struct ceph_osd_client *osdc, 3977 void *p, void *end, bool incremental, 3978 struct rb_root *need_resend, 3979 struct list_head *need_resend_linger) 3980 { 3981 struct ceph_osdmap *newmap; 3982 struct rb_node *n; 3983 bool skipped_map = false; 3984 bool was_full; 3985 3986 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3987 set_pool_was_full(osdc); 3988 3989 if (incremental) 3990 newmap = osdmap_apply_incremental(&p, end, 3991 ceph_msgr2(osdc->client), 3992 osdc->osdmap); 3993 else 3994 newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client)); 3995 if (IS_ERR(newmap)) 3996 return PTR_ERR(newmap); 3997 3998 if (newmap != osdc->osdmap) { 3999 /* 4000 * Preserve ->was_full before destroying the old map. 4001 * For pools that weren't in the old map, ->was_full 4002 * should be false. 4003 */ 4004 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 4005 struct ceph_pg_pool_info *pi = 4006 rb_entry(n, struct ceph_pg_pool_info, node); 4007 struct ceph_pg_pool_info *old_pi; 4008 4009 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 4010 if (old_pi) 4011 pi->was_full = old_pi->was_full; 4012 else 4013 WARN_ON(pi->was_full); 4014 } 4015 4016 if (osdc->osdmap->epoch && 4017 osdc->osdmap->epoch + 1 < newmap->epoch) { 4018 WARN_ON(incremental); 4019 skipped_map = true; 4020 } 4021 4022 ceph_osdmap_destroy(osdc->osdmap); 4023 osdc->osdmap = newmap; 4024 } 4025 4026 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 4027 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 4028 need_resend, need_resend_linger); 4029 4030 for (n = rb_first(&osdc->osds); n; ) { 4031 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4032 4033 n = rb_next(n); /* close_osd() */ 4034 4035 scan_requests(osd, skipped_map, was_full, true, need_resend, 4036 need_resend_linger); 4037 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 4038 memcmp(&osd->o_con.peer_addr, 4039 ceph_osd_addr(osdc->osdmap, osd->o_osd), 4040 sizeof(struct ceph_entity_addr))) 4041 close_osd(osd); 4042 } 4043 4044 return 0; 4045 } 4046 4047 static void kick_requests(struct ceph_osd_client *osdc, 4048 struct rb_root *need_resend, 4049 struct list_head *need_resend_linger) 4050 { 4051 struct ceph_osd_linger_request *lreq, *nlreq; 4052 enum calc_target_result ct_res; 4053 struct rb_node *n; 4054 4055 /* make sure need_resend targets reflect latest map */ 4056 for (n = rb_first(need_resend); n; ) { 4057 struct ceph_osd_request *req = 4058 rb_entry(n, struct ceph_osd_request, r_node); 4059 4060 n = rb_next(n); 4061 4062 if (req->r_t.epoch < osdc->osdmap->epoch) { 4063 ct_res = calc_target(osdc, &req->r_t, false); 4064 if (ct_res == CALC_TARGET_POOL_DNE) { 4065 erase_request(need_resend, req); 4066 check_pool_dne(req); 4067 } 4068 } 4069 } 4070 4071 for (n = rb_first(need_resend); n; ) { 4072 struct ceph_osd_request *req = 4073 rb_entry(n, struct ceph_osd_request, r_node); 4074 struct ceph_osd *osd; 4075 4076 n = rb_next(n); 4077 erase_request(need_resend, req); /* before link_request() */ 4078 4079 osd = lookup_create_osd(osdc, req->r_t.osd, true); 4080 link_request(osd, req); 4081 if (!req->r_linger) { 4082 if (!osd_homeless(osd) && !req->r_t.paused) 4083 send_request(req); 4084 } else { 4085 cancel_linger_request(req); 4086 } 4087 } 4088 4089 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 4090 if (!osd_homeless(lreq->osd)) 4091 send_linger(lreq); 4092 4093 list_del_init(&lreq->scan_item); 4094 } 4095 } 4096 4097 /* 4098 * Process updated osd map. 4099 * 4100 * The message contains any number of incremental and full maps, normally 4101 * indicating some sort of topology change in the cluster. Kick requests 4102 * off to different OSDs as needed. 4103 */ 4104 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 4105 { 4106 void *p = msg->front.iov_base; 4107 void *const end = p + msg->front.iov_len; 4108 u32 nr_maps, maplen; 4109 u32 epoch; 4110 struct ceph_fsid fsid; 4111 struct rb_root need_resend = RB_ROOT; 4112 LIST_HEAD(need_resend_linger); 4113 bool handled_incremental = false; 4114 bool was_pauserd, was_pausewr; 4115 bool pauserd, pausewr; 4116 int err; 4117 4118 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 4119 down_write(&osdc->lock); 4120 4121 /* verify fsid */ 4122 ceph_decode_need(&p, end, sizeof(fsid), bad); 4123 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 4124 if (ceph_check_fsid(osdc->client, &fsid) < 0) 4125 goto bad; 4126 4127 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4128 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4129 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4130 have_pool_full(osdc); 4131 4132 /* incremental maps */ 4133 ceph_decode_32_safe(&p, end, nr_maps, bad); 4134 dout(" %d inc maps\n", nr_maps); 4135 while (nr_maps > 0) { 4136 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4137 epoch = ceph_decode_32(&p); 4138 maplen = ceph_decode_32(&p); 4139 ceph_decode_need(&p, end, maplen, bad); 4140 if (osdc->osdmap->epoch && 4141 osdc->osdmap->epoch + 1 == epoch) { 4142 dout("applying incremental map %u len %d\n", 4143 epoch, maplen); 4144 err = handle_one_map(osdc, p, p + maplen, true, 4145 &need_resend, &need_resend_linger); 4146 if (err) 4147 goto bad; 4148 handled_incremental = true; 4149 } else { 4150 dout("ignoring incremental map %u len %d\n", 4151 epoch, maplen); 4152 } 4153 p += maplen; 4154 nr_maps--; 4155 } 4156 if (handled_incremental) 4157 goto done; 4158 4159 /* full maps */ 4160 ceph_decode_32_safe(&p, end, nr_maps, bad); 4161 dout(" %d full maps\n", nr_maps); 4162 while (nr_maps) { 4163 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4164 epoch = ceph_decode_32(&p); 4165 maplen = ceph_decode_32(&p); 4166 ceph_decode_need(&p, end, maplen, bad); 4167 if (nr_maps > 1) { 4168 dout("skipping non-latest full map %u len %d\n", 4169 epoch, maplen); 4170 } else if (osdc->osdmap->epoch >= epoch) { 4171 dout("skipping full map %u len %d, " 4172 "older than our %u\n", epoch, maplen, 4173 osdc->osdmap->epoch); 4174 } else { 4175 dout("taking full map %u len %d\n", epoch, maplen); 4176 err = handle_one_map(osdc, p, p + maplen, false, 4177 &need_resend, &need_resend_linger); 4178 if (err) 4179 goto bad; 4180 } 4181 p += maplen; 4182 nr_maps--; 4183 } 4184 4185 done: 4186 /* 4187 * subscribe to subsequent osdmap updates if full to ensure 4188 * we find out when we are no longer full and stop returning 4189 * ENOSPC. 4190 */ 4191 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4192 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4193 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4194 have_pool_full(osdc); 4195 if (was_pauserd || was_pausewr || pauserd || pausewr || 4196 osdc->osdmap->epoch < osdc->epoch_barrier) 4197 maybe_request_map(osdc); 4198 4199 kick_requests(osdc, &need_resend, &need_resend_linger); 4200 4201 ceph_osdc_abort_on_full(osdc); 4202 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4203 osdc->osdmap->epoch); 4204 up_write(&osdc->lock); 4205 wake_up_all(&osdc->client->auth_wq); 4206 return; 4207 4208 bad: 4209 pr_err("osdc handle_map corrupt msg\n"); 4210 ceph_msg_dump(msg); 4211 up_write(&osdc->lock); 4212 } 4213 4214 /* 4215 * Resubmit requests pending on the given osd. 4216 */ 4217 static void kick_osd_requests(struct ceph_osd *osd) 4218 { 4219 struct rb_node *n; 4220 4221 clear_backoffs(osd); 4222 4223 for (n = rb_first(&osd->o_requests); n; ) { 4224 struct ceph_osd_request *req = 4225 rb_entry(n, struct ceph_osd_request, r_node); 4226 4227 n = rb_next(n); /* cancel_linger_request() */ 4228 4229 if (!req->r_linger) { 4230 if (!req->r_t.paused) 4231 send_request(req); 4232 } else { 4233 cancel_linger_request(req); 4234 } 4235 } 4236 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4237 struct ceph_osd_linger_request *lreq = 4238 rb_entry(n, struct ceph_osd_linger_request, node); 4239 4240 send_linger(lreq); 4241 } 4242 } 4243 4244 /* 4245 * If the osd connection drops, we need to resubmit all requests. 4246 */ 4247 static void osd_fault(struct ceph_connection *con) 4248 { 4249 struct ceph_osd *osd = con->private; 4250 struct ceph_osd_client *osdc = osd->o_osdc; 4251 4252 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4253 4254 down_write(&osdc->lock); 4255 if (!osd_registered(osd)) { 4256 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4257 goto out_unlock; 4258 } 4259 4260 if (!reopen_osd(osd)) 4261 kick_osd_requests(osd); 4262 maybe_request_map(osdc); 4263 4264 out_unlock: 4265 up_write(&osdc->lock); 4266 } 4267 4268 struct MOSDBackoff { 4269 struct ceph_spg spgid; 4270 u32 map_epoch; 4271 u8 op; 4272 u64 id; 4273 struct ceph_hobject_id *begin; 4274 struct ceph_hobject_id *end; 4275 }; 4276 4277 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4278 { 4279 void *p = msg->front.iov_base; 4280 void *const end = p + msg->front.iov_len; 4281 u8 struct_v; 4282 u32 struct_len; 4283 int ret; 4284 4285 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4286 if (ret) 4287 return ret; 4288 4289 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4290 if (ret) 4291 return ret; 4292 4293 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4294 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4295 ceph_decode_8_safe(&p, end, m->op, e_inval); 4296 ceph_decode_64_safe(&p, end, m->id, e_inval); 4297 4298 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4299 if (!m->begin) 4300 return -ENOMEM; 4301 4302 ret = decode_hoid(&p, end, m->begin); 4303 if (ret) { 4304 free_hoid(m->begin); 4305 return ret; 4306 } 4307 4308 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4309 if (!m->end) { 4310 free_hoid(m->begin); 4311 return -ENOMEM; 4312 } 4313 4314 ret = decode_hoid(&p, end, m->end); 4315 if (ret) { 4316 free_hoid(m->begin); 4317 free_hoid(m->end); 4318 return ret; 4319 } 4320 4321 return 0; 4322 4323 e_inval: 4324 return -EINVAL; 4325 } 4326 4327 static struct ceph_msg *create_backoff_message( 4328 const struct ceph_osd_backoff *backoff, 4329 u32 map_epoch) 4330 { 4331 struct ceph_msg *msg; 4332 void *p, *end; 4333 int msg_size; 4334 4335 msg_size = CEPH_ENCODING_START_BLK_LEN + 4336 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4337 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4338 msg_size += CEPH_ENCODING_START_BLK_LEN + 4339 hoid_encoding_size(backoff->begin); 4340 msg_size += CEPH_ENCODING_START_BLK_LEN + 4341 hoid_encoding_size(backoff->end); 4342 4343 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4344 if (!msg) 4345 return NULL; 4346 4347 p = msg->front.iov_base; 4348 end = p + msg->front_alloc_len; 4349 4350 encode_spgid(&p, &backoff->spgid); 4351 ceph_encode_32(&p, map_epoch); 4352 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4353 ceph_encode_64(&p, backoff->id); 4354 encode_hoid(&p, end, backoff->begin); 4355 encode_hoid(&p, end, backoff->end); 4356 BUG_ON(p != end); 4357 4358 msg->front.iov_len = p - msg->front.iov_base; 4359 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4360 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4361 4362 return msg; 4363 } 4364 4365 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4366 { 4367 struct ceph_spg_mapping *spg; 4368 struct ceph_osd_backoff *backoff; 4369 struct ceph_msg *msg; 4370 4371 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4372 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4373 4374 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4375 if (!spg) { 4376 spg = alloc_spg_mapping(); 4377 if (!spg) { 4378 pr_err("%s failed to allocate spg\n", __func__); 4379 return; 4380 } 4381 spg->spgid = m->spgid; /* struct */ 4382 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4383 } 4384 4385 backoff = alloc_backoff(); 4386 if (!backoff) { 4387 pr_err("%s failed to allocate backoff\n", __func__); 4388 return; 4389 } 4390 backoff->spgid = m->spgid; /* struct */ 4391 backoff->id = m->id; 4392 backoff->begin = m->begin; 4393 m->begin = NULL; /* backoff now owns this */ 4394 backoff->end = m->end; 4395 m->end = NULL; /* ditto */ 4396 4397 insert_backoff(&spg->backoffs, backoff); 4398 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4399 4400 /* 4401 * Ack with original backoff's epoch so that the OSD can 4402 * discard this if there was a PG split. 4403 */ 4404 msg = create_backoff_message(backoff, m->map_epoch); 4405 if (!msg) { 4406 pr_err("%s failed to allocate msg\n", __func__); 4407 return; 4408 } 4409 ceph_con_send(&osd->o_con, msg); 4410 } 4411 4412 static bool target_contained_by(const struct ceph_osd_request_target *t, 4413 const struct ceph_hobject_id *begin, 4414 const struct ceph_hobject_id *end) 4415 { 4416 struct ceph_hobject_id hoid; 4417 int cmp; 4418 4419 hoid_fill_from_target(&hoid, t); 4420 cmp = hoid_compare(&hoid, begin); 4421 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4422 } 4423 4424 static void handle_backoff_unblock(struct ceph_osd *osd, 4425 const struct MOSDBackoff *m) 4426 { 4427 struct ceph_spg_mapping *spg; 4428 struct ceph_osd_backoff *backoff; 4429 struct rb_node *n; 4430 4431 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4432 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4433 4434 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4435 if (!backoff) { 4436 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4437 __func__, osd->o_osd, m->spgid.pgid.pool, 4438 m->spgid.pgid.seed, m->spgid.shard, m->id); 4439 return; 4440 } 4441 4442 if (hoid_compare(backoff->begin, m->begin) && 4443 hoid_compare(backoff->end, m->end)) { 4444 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4445 __func__, osd->o_osd, m->spgid.pgid.pool, 4446 m->spgid.pgid.seed, m->spgid.shard, m->id); 4447 /* unblock it anyway... */ 4448 } 4449 4450 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4451 BUG_ON(!spg); 4452 4453 erase_backoff(&spg->backoffs, backoff); 4454 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4455 free_backoff(backoff); 4456 4457 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4458 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4459 free_spg_mapping(spg); 4460 } 4461 4462 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4463 struct ceph_osd_request *req = 4464 rb_entry(n, struct ceph_osd_request, r_node); 4465 4466 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4467 /* 4468 * Match against @m, not @backoff -- the PG may 4469 * have split on the OSD. 4470 */ 4471 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4472 /* 4473 * If no other installed backoff applies, 4474 * resend. 4475 */ 4476 send_request(req); 4477 } 4478 } 4479 } 4480 } 4481 4482 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4483 { 4484 struct ceph_osd_client *osdc = osd->o_osdc; 4485 struct MOSDBackoff m; 4486 int ret; 4487 4488 down_read(&osdc->lock); 4489 if (!osd_registered(osd)) { 4490 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4491 up_read(&osdc->lock); 4492 return; 4493 } 4494 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4495 4496 mutex_lock(&osd->lock); 4497 ret = decode_MOSDBackoff(msg, &m); 4498 if (ret) { 4499 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4500 ceph_msg_dump(msg); 4501 goto out_unlock; 4502 } 4503 4504 switch (m.op) { 4505 case CEPH_OSD_BACKOFF_OP_BLOCK: 4506 handle_backoff_block(osd, &m); 4507 break; 4508 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4509 handle_backoff_unblock(osd, &m); 4510 break; 4511 default: 4512 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4513 } 4514 4515 free_hoid(m.begin); 4516 free_hoid(m.end); 4517 4518 out_unlock: 4519 mutex_unlock(&osd->lock); 4520 up_read(&osdc->lock); 4521 } 4522 4523 /* 4524 * Process osd watch notifications 4525 */ 4526 static void handle_watch_notify(struct ceph_osd_client *osdc, 4527 struct ceph_msg *msg) 4528 { 4529 void *p = msg->front.iov_base; 4530 void *const end = p + msg->front.iov_len; 4531 struct ceph_osd_linger_request *lreq; 4532 struct linger_work *lwork; 4533 u8 proto_ver, opcode; 4534 u64 cookie, notify_id; 4535 u64 notifier_id = 0; 4536 s32 return_code = 0; 4537 void *payload = NULL; 4538 u32 payload_len = 0; 4539 4540 ceph_decode_8_safe(&p, end, proto_ver, bad); 4541 ceph_decode_8_safe(&p, end, opcode, bad); 4542 ceph_decode_64_safe(&p, end, cookie, bad); 4543 p += 8; /* skip ver */ 4544 ceph_decode_64_safe(&p, end, notify_id, bad); 4545 4546 if (proto_ver >= 1) { 4547 ceph_decode_32_safe(&p, end, payload_len, bad); 4548 ceph_decode_need(&p, end, payload_len, bad); 4549 payload = p; 4550 p += payload_len; 4551 } 4552 4553 if (le16_to_cpu(msg->hdr.version) >= 2) 4554 ceph_decode_32_safe(&p, end, return_code, bad); 4555 4556 if (le16_to_cpu(msg->hdr.version) >= 3) 4557 ceph_decode_64_safe(&p, end, notifier_id, bad); 4558 4559 down_read(&osdc->lock); 4560 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4561 if (!lreq) { 4562 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4563 cookie); 4564 goto out_unlock_osdc; 4565 } 4566 4567 mutex_lock(&lreq->lock); 4568 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4569 opcode, cookie, lreq, lreq->is_watch); 4570 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4571 if (!lreq->last_error) { 4572 lreq->last_error = -ENOTCONN; 4573 queue_watch_error(lreq); 4574 } 4575 } else if (!lreq->is_watch) { 4576 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4577 if (lreq->notify_id && lreq->notify_id != notify_id) { 4578 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4579 lreq->notify_id, notify_id); 4580 } else if (!completion_done(&lreq->notify_finish_wait)) { 4581 struct ceph_msg_data *data = 4582 msg->num_data_items ? &msg->data[0] : NULL; 4583 4584 if (data) { 4585 if (lreq->preply_pages) { 4586 WARN_ON(data->type != 4587 CEPH_MSG_DATA_PAGES); 4588 *lreq->preply_pages = data->pages; 4589 *lreq->preply_len = data->length; 4590 data->own_pages = false; 4591 } 4592 } 4593 lreq->notify_finish_error = return_code; 4594 complete_all(&lreq->notify_finish_wait); 4595 } 4596 } else { 4597 /* CEPH_WATCH_EVENT_NOTIFY */ 4598 lwork = lwork_alloc(lreq, do_watch_notify); 4599 if (!lwork) { 4600 pr_err("failed to allocate notify-lwork\n"); 4601 goto out_unlock_lreq; 4602 } 4603 4604 lwork->notify.notify_id = notify_id; 4605 lwork->notify.notifier_id = notifier_id; 4606 lwork->notify.payload = payload; 4607 lwork->notify.payload_len = payload_len; 4608 lwork->notify.msg = ceph_msg_get(msg); 4609 lwork_queue(lwork); 4610 } 4611 4612 out_unlock_lreq: 4613 mutex_unlock(&lreq->lock); 4614 out_unlock_osdc: 4615 up_read(&osdc->lock); 4616 return; 4617 4618 bad: 4619 pr_err("osdc handle_watch_notify corrupt msg\n"); 4620 } 4621 4622 /* 4623 * Register request, send initial attempt. 4624 */ 4625 void ceph_osdc_start_request(struct ceph_osd_client *osdc, 4626 struct ceph_osd_request *req) 4627 { 4628 down_read(&osdc->lock); 4629 submit_request(req, false); 4630 up_read(&osdc->lock); 4631 } 4632 EXPORT_SYMBOL(ceph_osdc_start_request); 4633 4634 /* 4635 * Unregister request. If @req was registered, it isn't completed: 4636 * r_result isn't set and __complete_request() isn't invoked. 4637 * 4638 * If @req wasn't registered, this call may have raced with 4639 * handle_reply(), in which case r_result would already be set and 4640 * __complete_request() would be getting invoked, possibly even 4641 * concurrently with this call. 4642 */ 4643 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4644 { 4645 struct ceph_osd_client *osdc = req->r_osdc; 4646 4647 down_write(&osdc->lock); 4648 if (req->r_osd) 4649 cancel_request(req); 4650 up_write(&osdc->lock); 4651 } 4652 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4653 4654 /* 4655 * @timeout: in jiffies, 0 means "wait forever" 4656 */ 4657 static int wait_request_timeout(struct ceph_osd_request *req, 4658 unsigned long timeout) 4659 { 4660 long left; 4661 4662 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4663 left = wait_for_completion_killable_timeout(&req->r_completion, 4664 ceph_timeout_jiffies(timeout)); 4665 if (left <= 0) { 4666 left = left ?: -ETIMEDOUT; 4667 ceph_osdc_cancel_request(req); 4668 } else { 4669 left = req->r_result; /* completed */ 4670 } 4671 4672 return left; 4673 } 4674 4675 /* 4676 * wait for a request to complete 4677 */ 4678 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4679 struct ceph_osd_request *req) 4680 { 4681 return wait_request_timeout(req, 0); 4682 } 4683 EXPORT_SYMBOL(ceph_osdc_wait_request); 4684 4685 /* 4686 * sync - wait for all in-flight requests to flush. avoid starvation. 4687 */ 4688 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4689 { 4690 struct rb_node *n, *p; 4691 u64 last_tid = atomic64_read(&osdc->last_tid); 4692 4693 again: 4694 down_read(&osdc->lock); 4695 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4696 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4697 4698 mutex_lock(&osd->lock); 4699 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4700 struct ceph_osd_request *req = 4701 rb_entry(p, struct ceph_osd_request, r_node); 4702 4703 if (req->r_tid > last_tid) 4704 break; 4705 4706 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4707 continue; 4708 4709 ceph_osdc_get_request(req); 4710 mutex_unlock(&osd->lock); 4711 up_read(&osdc->lock); 4712 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4713 __func__, req, req->r_tid, last_tid); 4714 wait_for_completion(&req->r_completion); 4715 ceph_osdc_put_request(req); 4716 goto again; 4717 } 4718 4719 mutex_unlock(&osd->lock); 4720 } 4721 4722 up_read(&osdc->lock); 4723 dout("%s done last_tid %llu\n", __func__, last_tid); 4724 } 4725 EXPORT_SYMBOL(ceph_osdc_sync); 4726 4727 /* 4728 * Returns a handle, caller owns a ref. 4729 */ 4730 struct ceph_osd_linger_request * 4731 ceph_osdc_watch(struct ceph_osd_client *osdc, 4732 struct ceph_object_id *oid, 4733 struct ceph_object_locator *oloc, 4734 rados_watchcb2_t wcb, 4735 rados_watcherrcb_t errcb, 4736 void *data) 4737 { 4738 struct ceph_osd_linger_request *lreq; 4739 int ret; 4740 4741 lreq = linger_alloc(osdc); 4742 if (!lreq) 4743 return ERR_PTR(-ENOMEM); 4744 4745 lreq->is_watch = true; 4746 lreq->wcb = wcb; 4747 lreq->errcb = errcb; 4748 lreq->data = data; 4749 lreq->watch_valid_thru = jiffies; 4750 4751 ceph_oid_copy(&lreq->t.base_oid, oid); 4752 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4753 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4754 ktime_get_real_ts64(&lreq->mtime); 4755 4756 linger_submit(lreq); 4757 ret = linger_reg_commit_wait(lreq); 4758 if (ret) { 4759 linger_cancel(lreq); 4760 goto err_put_lreq; 4761 } 4762 4763 return lreq; 4764 4765 err_put_lreq: 4766 linger_put(lreq); 4767 return ERR_PTR(ret); 4768 } 4769 EXPORT_SYMBOL(ceph_osdc_watch); 4770 4771 /* 4772 * Releases a ref. 4773 * 4774 * Times out after mount_timeout to preserve rbd unmap behaviour 4775 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4776 * with mount_timeout"). 4777 */ 4778 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4779 struct ceph_osd_linger_request *lreq) 4780 { 4781 struct ceph_options *opts = osdc->client->options; 4782 struct ceph_osd_request *req; 4783 int ret; 4784 4785 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4786 if (!req) 4787 return -ENOMEM; 4788 4789 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4790 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4791 req->r_flags = CEPH_OSD_FLAG_WRITE; 4792 ktime_get_real_ts64(&req->r_mtime); 4793 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH, 4794 lreq->linger_id, 0); 4795 4796 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4797 if (ret) 4798 goto out_put_req; 4799 4800 ceph_osdc_start_request(osdc, req); 4801 linger_cancel(lreq); 4802 linger_put(lreq); 4803 ret = wait_request_timeout(req, opts->mount_timeout); 4804 4805 out_put_req: 4806 ceph_osdc_put_request(req); 4807 return ret; 4808 } 4809 EXPORT_SYMBOL(ceph_osdc_unwatch); 4810 4811 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4812 u64 notify_id, u64 cookie, void *payload, 4813 u32 payload_len) 4814 { 4815 struct ceph_osd_req_op *op; 4816 struct ceph_pagelist *pl; 4817 int ret; 4818 4819 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4820 4821 pl = ceph_pagelist_alloc(GFP_NOIO); 4822 if (!pl) 4823 return -ENOMEM; 4824 4825 ret = ceph_pagelist_encode_64(pl, notify_id); 4826 ret |= ceph_pagelist_encode_64(pl, cookie); 4827 if (payload) { 4828 ret |= ceph_pagelist_encode_32(pl, payload_len); 4829 ret |= ceph_pagelist_append(pl, payload, payload_len); 4830 } else { 4831 ret |= ceph_pagelist_encode_32(pl, 0); 4832 } 4833 if (ret) { 4834 ceph_pagelist_release(pl); 4835 return -ENOMEM; 4836 } 4837 4838 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4839 op->indata_len = pl->length; 4840 return 0; 4841 } 4842 4843 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4844 struct ceph_object_id *oid, 4845 struct ceph_object_locator *oloc, 4846 u64 notify_id, 4847 u64 cookie, 4848 void *payload, 4849 u32 payload_len) 4850 { 4851 struct ceph_osd_request *req; 4852 int ret; 4853 4854 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4855 if (!req) 4856 return -ENOMEM; 4857 4858 ceph_oid_copy(&req->r_base_oid, oid); 4859 ceph_oloc_copy(&req->r_base_oloc, oloc); 4860 req->r_flags = CEPH_OSD_FLAG_READ; 4861 4862 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4863 payload_len); 4864 if (ret) 4865 goto out_put_req; 4866 4867 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4868 if (ret) 4869 goto out_put_req; 4870 4871 ceph_osdc_start_request(osdc, req); 4872 ret = ceph_osdc_wait_request(osdc, req); 4873 4874 out_put_req: 4875 ceph_osdc_put_request(req); 4876 return ret; 4877 } 4878 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4879 4880 /* 4881 * @timeout: in seconds 4882 * 4883 * @preply_{pages,len} are initialized both on success and error. 4884 * The caller is responsible for: 4885 * 4886 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4887 */ 4888 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4889 struct ceph_object_id *oid, 4890 struct ceph_object_locator *oloc, 4891 void *payload, 4892 u32 payload_len, 4893 u32 timeout, 4894 struct page ***preply_pages, 4895 size_t *preply_len) 4896 { 4897 struct ceph_osd_linger_request *lreq; 4898 int ret; 4899 4900 WARN_ON(!timeout); 4901 if (preply_pages) { 4902 *preply_pages = NULL; 4903 *preply_len = 0; 4904 } 4905 4906 lreq = linger_alloc(osdc); 4907 if (!lreq) 4908 return -ENOMEM; 4909 4910 lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO); 4911 if (!lreq->request_pl) { 4912 ret = -ENOMEM; 4913 goto out_put_lreq; 4914 } 4915 4916 ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */ 4917 ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout); 4918 ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len); 4919 ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len); 4920 if (ret) { 4921 ret = -ENOMEM; 4922 goto out_put_lreq; 4923 } 4924 4925 /* for notify_id */ 4926 lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO); 4927 if (IS_ERR(lreq->notify_id_pages)) { 4928 ret = PTR_ERR(lreq->notify_id_pages); 4929 lreq->notify_id_pages = NULL; 4930 goto out_put_lreq; 4931 } 4932 4933 lreq->preply_pages = preply_pages; 4934 lreq->preply_len = preply_len; 4935 4936 ceph_oid_copy(&lreq->t.base_oid, oid); 4937 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4938 lreq->t.flags = CEPH_OSD_FLAG_READ; 4939 4940 linger_submit(lreq); 4941 ret = linger_reg_commit_wait(lreq); 4942 if (!ret) 4943 ret = linger_notify_finish_wait(lreq, 4944 msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); 4945 else 4946 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4947 4948 linger_cancel(lreq); 4949 out_put_lreq: 4950 linger_put(lreq); 4951 return ret; 4952 } 4953 EXPORT_SYMBOL(ceph_osdc_notify); 4954 4955 /* 4956 * Return the number of milliseconds since the watch was last 4957 * confirmed, or an error. If there is an error, the watch is no 4958 * longer valid, and should be destroyed with ceph_osdc_unwatch(). 4959 */ 4960 int ceph_osdc_watch_check(struct ceph_osd_client *osdc, 4961 struct ceph_osd_linger_request *lreq) 4962 { 4963 unsigned long stamp, age; 4964 int ret; 4965 4966 down_read(&osdc->lock); 4967 mutex_lock(&lreq->lock); 4968 stamp = lreq->watch_valid_thru; 4969 if (!list_empty(&lreq->pending_lworks)) { 4970 struct linger_work *lwork = 4971 list_first_entry(&lreq->pending_lworks, 4972 struct linger_work, 4973 pending_item); 4974 4975 if (time_before(lwork->queued_stamp, stamp)) 4976 stamp = lwork->queued_stamp; 4977 } 4978 age = jiffies - stamp; 4979 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__, 4980 lreq, lreq->linger_id, age, lreq->last_error); 4981 /* we are truncating to msecs, so return a safe upper bound */ 4982 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age); 4983 4984 mutex_unlock(&lreq->lock); 4985 up_read(&osdc->lock); 4986 return ret; 4987 } 4988 4989 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 4990 { 4991 u8 struct_v; 4992 u32 struct_len; 4993 int ret; 4994 4995 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 4996 &struct_v, &struct_len); 4997 if (ret) 4998 goto bad; 4999 5000 ret = -EINVAL; 5001 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 5002 ceph_decode_64_safe(p, end, item->cookie, bad); 5003 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 5004 5005 if (struct_v >= 2) { 5006 ret = ceph_decode_entity_addr(p, end, &item->addr); 5007 if (ret) 5008 goto bad; 5009 } else { 5010 ret = 0; 5011 } 5012 5013 dout("%s %s%llu cookie %llu addr %s\n", __func__, 5014 ENTITY_NAME(item->name), item->cookie, 5015 ceph_pr_addr(&item->addr)); 5016 bad: 5017 return ret; 5018 } 5019 5020 static int decode_watchers(void **p, void *end, 5021 struct ceph_watch_item **watchers, 5022 u32 *num_watchers) 5023 { 5024 u8 struct_v; 5025 u32 struct_len; 5026 int i; 5027 int ret; 5028 5029 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 5030 &struct_v, &struct_len); 5031 if (ret) 5032 return ret; 5033 5034 *num_watchers = ceph_decode_32(p); 5035 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 5036 if (!*watchers) 5037 return -ENOMEM; 5038 5039 for (i = 0; i < *num_watchers; i++) { 5040 ret = decode_watcher(p, end, *watchers + i); 5041 if (ret) { 5042 kfree(*watchers); 5043 return ret; 5044 } 5045 } 5046 5047 return 0; 5048 } 5049 5050 /* 5051 * On success, the caller is responsible for: 5052 * 5053 * kfree(watchers); 5054 */ 5055 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 5056 struct ceph_object_id *oid, 5057 struct ceph_object_locator *oloc, 5058 struct ceph_watch_item **watchers, 5059 u32 *num_watchers) 5060 { 5061 struct ceph_osd_request *req; 5062 struct page **pages; 5063 int ret; 5064 5065 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5066 if (!req) 5067 return -ENOMEM; 5068 5069 ceph_oid_copy(&req->r_base_oid, oid); 5070 ceph_oloc_copy(&req->r_base_oloc, oloc); 5071 req->r_flags = CEPH_OSD_FLAG_READ; 5072 5073 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5074 if (IS_ERR(pages)) { 5075 ret = PTR_ERR(pages); 5076 goto out_put_req; 5077 } 5078 5079 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5080 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5081 response_data), 5082 pages, PAGE_SIZE, 0, false, true); 5083 5084 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5085 if (ret) 5086 goto out_put_req; 5087 5088 ceph_osdc_start_request(osdc, req); 5089 ret = ceph_osdc_wait_request(osdc, req); 5090 if (ret >= 0) { 5091 void *p = page_address(pages[0]); 5092 void *const end = p + req->r_ops[0].outdata_len; 5093 5094 ret = decode_watchers(&p, end, watchers, num_watchers); 5095 } 5096 5097 out_put_req: 5098 ceph_osdc_put_request(req); 5099 return ret; 5100 } 5101 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5102 5103 /* 5104 * Call all pending notify callbacks - for use after a watch is 5105 * unregistered, to make sure no more callbacks for it will be invoked 5106 */ 5107 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5108 { 5109 dout("%s osdc %p\n", __func__, osdc); 5110 flush_workqueue(osdc->notify_wq); 5111 } 5112 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5113 5114 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5115 { 5116 down_read(&osdc->lock); 5117 maybe_request_map(osdc); 5118 up_read(&osdc->lock); 5119 } 5120 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5121 5122 /* 5123 * Execute an OSD class method on an object. 5124 * 5125 * @flags: CEPH_OSD_FLAG_* 5126 * @resp_len: in/out param for reply length 5127 */ 5128 int ceph_osdc_call(struct ceph_osd_client *osdc, 5129 struct ceph_object_id *oid, 5130 struct ceph_object_locator *oloc, 5131 const char *class, const char *method, 5132 unsigned int flags, 5133 struct page *req_page, size_t req_len, 5134 struct page **resp_pages, size_t *resp_len) 5135 { 5136 struct ceph_osd_request *req; 5137 int ret; 5138 5139 if (req_len > PAGE_SIZE) 5140 return -E2BIG; 5141 5142 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5143 if (!req) 5144 return -ENOMEM; 5145 5146 ceph_oid_copy(&req->r_base_oid, oid); 5147 ceph_oloc_copy(&req->r_base_oloc, oloc); 5148 req->r_flags = flags; 5149 5150 ret = osd_req_op_cls_init(req, 0, class, method); 5151 if (ret) 5152 goto out_put_req; 5153 5154 if (req_page) 5155 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5156 0, false, false); 5157 if (resp_pages) 5158 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5159 *resp_len, 0, false, false); 5160 5161 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5162 if (ret) 5163 goto out_put_req; 5164 5165 ceph_osdc_start_request(osdc, req); 5166 ret = ceph_osdc_wait_request(osdc, req); 5167 if (ret >= 0) { 5168 ret = req->r_ops[0].rval; 5169 if (resp_pages) 5170 *resp_len = req->r_ops[0].outdata_len; 5171 } 5172 5173 out_put_req: 5174 ceph_osdc_put_request(req); 5175 return ret; 5176 } 5177 EXPORT_SYMBOL(ceph_osdc_call); 5178 5179 /* 5180 * reset all osd connections 5181 */ 5182 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5183 { 5184 struct rb_node *n; 5185 5186 down_write(&osdc->lock); 5187 for (n = rb_first(&osdc->osds); n; ) { 5188 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5189 5190 n = rb_next(n); 5191 if (!reopen_osd(osd)) 5192 kick_osd_requests(osd); 5193 } 5194 up_write(&osdc->lock); 5195 } 5196 5197 /* 5198 * init, shutdown 5199 */ 5200 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5201 { 5202 int err; 5203 5204 dout("init\n"); 5205 osdc->client = client; 5206 init_rwsem(&osdc->lock); 5207 osdc->osds = RB_ROOT; 5208 INIT_LIST_HEAD(&osdc->osd_lru); 5209 spin_lock_init(&osdc->osd_lru_lock); 5210 osd_init(&osdc->homeless_osd); 5211 osdc->homeless_osd.o_osdc = osdc; 5212 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5213 osdc->last_linger_id = CEPH_LINGER_ID_START; 5214 osdc->linger_requests = RB_ROOT; 5215 osdc->map_checks = RB_ROOT; 5216 osdc->linger_map_checks = RB_ROOT; 5217 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5218 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5219 5220 err = -ENOMEM; 5221 osdc->osdmap = ceph_osdmap_alloc(); 5222 if (!osdc->osdmap) 5223 goto out; 5224 5225 osdc->req_mempool = mempool_create_slab_pool(10, 5226 ceph_osd_request_cache); 5227 if (!osdc->req_mempool) 5228 goto out_map; 5229 5230 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5231 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5232 if (err < 0) 5233 goto out_mempool; 5234 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5235 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5236 "osd_op_reply"); 5237 if (err < 0) 5238 goto out_msgpool; 5239 5240 err = -ENOMEM; 5241 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5242 if (!osdc->notify_wq) 5243 goto out_msgpool_reply; 5244 5245 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5246 if (!osdc->completion_wq) 5247 goto out_notify_wq; 5248 5249 schedule_delayed_work(&osdc->timeout_work, 5250 osdc->client->options->osd_keepalive_timeout); 5251 schedule_delayed_work(&osdc->osds_timeout_work, 5252 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5253 5254 return 0; 5255 5256 out_notify_wq: 5257 destroy_workqueue(osdc->notify_wq); 5258 out_msgpool_reply: 5259 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5260 out_msgpool: 5261 ceph_msgpool_destroy(&osdc->msgpool_op); 5262 out_mempool: 5263 mempool_destroy(osdc->req_mempool); 5264 out_map: 5265 ceph_osdmap_destroy(osdc->osdmap); 5266 out: 5267 return err; 5268 } 5269 5270 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5271 { 5272 destroy_workqueue(osdc->completion_wq); 5273 destroy_workqueue(osdc->notify_wq); 5274 cancel_delayed_work_sync(&osdc->timeout_work); 5275 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5276 5277 down_write(&osdc->lock); 5278 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5279 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5280 struct ceph_osd, o_node); 5281 close_osd(osd); 5282 } 5283 up_write(&osdc->lock); 5284 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5285 osd_cleanup(&osdc->homeless_osd); 5286 5287 WARN_ON(!list_empty(&osdc->osd_lru)); 5288 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5289 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5290 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5291 WARN_ON(atomic_read(&osdc->num_requests)); 5292 WARN_ON(atomic_read(&osdc->num_homeless)); 5293 5294 ceph_osdmap_destroy(osdc->osdmap); 5295 mempool_destroy(osdc->req_mempool); 5296 ceph_msgpool_destroy(&osdc->msgpool_op); 5297 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5298 } 5299 5300 int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5301 u64 src_snapid, u64 src_version, 5302 struct ceph_object_id *src_oid, 5303 struct ceph_object_locator *src_oloc, 5304 u32 src_fadvise_flags, 5305 u32 dst_fadvise_flags, 5306 u32 truncate_seq, u64 truncate_size, 5307 u8 copy_from_flags) 5308 { 5309 struct ceph_osd_req_op *op; 5310 struct page **pages; 5311 void *p, *end; 5312 5313 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5314 if (IS_ERR(pages)) 5315 return PTR_ERR(pages); 5316 5317 op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, 5318 dst_fadvise_flags); 5319 op->copy_from.snapid = src_snapid; 5320 op->copy_from.src_version = src_version; 5321 op->copy_from.flags = copy_from_flags; 5322 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5323 5324 p = page_address(pages[0]); 5325 end = p + PAGE_SIZE; 5326 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5327 encode_oloc(&p, end, src_oloc); 5328 ceph_encode_32(&p, truncate_seq); 5329 ceph_encode_64(&p, truncate_size); 5330 op->indata_len = PAGE_SIZE - (end - p); 5331 5332 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5333 op->indata_len, 0, false, true); 5334 return 0; 5335 } 5336 EXPORT_SYMBOL(osd_req_op_copy_from_init); 5337 5338 int __init ceph_osdc_setup(void) 5339 { 5340 size_t size = sizeof(struct ceph_osd_request) + 5341 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5342 5343 BUG_ON(ceph_osd_request_cache); 5344 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5345 0, 0, NULL); 5346 5347 return ceph_osd_request_cache ? 0 : -ENOMEM; 5348 } 5349 5350 void ceph_osdc_cleanup(void) 5351 { 5352 BUG_ON(!ceph_osd_request_cache); 5353 kmem_cache_destroy(ceph_osd_request_cache); 5354 ceph_osd_request_cache = NULL; 5355 } 5356 5357 /* 5358 * handle incoming message 5359 */ 5360 static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5361 { 5362 struct ceph_osd *osd = con->private; 5363 struct ceph_osd_client *osdc = osd->o_osdc; 5364 int type = le16_to_cpu(msg->hdr.type); 5365 5366 switch (type) { 5367 case CEPH_MSG_OSD_MAP: 5368 ceph_osdc_handle_map(osdc, msg); 5369 break; 5370 case CEPH_MSG_OSD_OPREPLY: 5371 handle_reply(osd, msg); 5372 break; 5373 case CEPH_MSG_OSD_BACKOFF: 5374 handle_backoff(osd, msg); 5375 break; 5376 case CEPH_MSG_WATCH_NOTIFY: 5377 handle_watch_notify(osdc, msg); 5378 break; 5379 5380 default: 5381 pr_err("received unknown message type %d %s\n", type, 5382 ceph_msg_type_name(type)); 5383 } 5384 5385 ceph_msg_put(msg); 5386 } 5387 5388 /* How much sparse data was requested? */ 5389 static u64 sparse_data_requested(struct ceph_osd_request *req) 5390 { 5391 u64 len = 0; 5392 5393 if (req->r_flags & CEPH_OSD_FLAG_READ) { 5394 int i; 5395 5396 for (i = 0; i < req->r_num_ops; ++i) { 5397 struct ceph_osd_req_op *op = &req->r_ops[i]; 5398 5399 if (op->op == CEPH_OSD_OP_SPARSE_READ) 5400 len += op->extent.length; 5401 } 5402 } 5403 return len; 5404 } 5405 5406 /* 5407 * Lookup and return message for incoming reply. Don't try to do 5408 * anything about a larger than preallocated data portion of the 5409 * message at the moment - for now, just skip the message. 5410 */ 5411 static struct ceph_msg *get_reply(struct ceph_connection *con, 5412 struct ceph_msg_header *hdr, 5413 int *skip) 5414 { 5415 struct ceph_osd *osd = con->private; 5416 struct ceph_osd_client *osdc = osd->o_osdc; 5417 struct ceph_msg *m = NULL; 5418 struct ceph_osd_request *req; 5419 int front_len = le32_to_cpu(hdr->front_len); 5420 int data_len = le32_to_cpu(hdr->data_len); 5421 u64 tid = le64_to_cpu(hdr->tid); 5422 u64 srlen; 5423 5424 down_read(&osdc->lock); 5425 if (!osd_registered(osd)) { 5426 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5427 *skip = 1; 5428 goto out_unlock_osdc; 5429 } 5430 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5431 5432 mutex_lock(&osd->lock); 5433 req = lookup_request(&osd->o_requests, tid); 5434 if (!req) { 5435 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5436 osd->o_osd, tid); 5437 *skip = 1; 5438 goto out_unlock_session; 5439 } 5440 5441 ceph_msg_revoke_incoming(req->r_reply); 5442 5443 if (front_len > req->r_reply->front_alloc_len) { 5444 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5445 __func__, osd->o_osd, req->r_tid, front_len, 5446 req->r_reply->front_alloc_len); 5447 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5448 false); 5449 if (!m) 5450 goto out_unlock_session; 5451 ceph_msg_put(req->r_reply); 5452 req->r_reply = m; 5453 } 5454 5455 srlen = sparse_data_requested(req); 5456 if (!srlen && data_len > req->r_reply->data_length) { 5457 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5458 __func__, osd->o_osd, req->r_tid, data_len, 5459 req->r_reply->data_length); 5460 m = NULL; 5461 *skip = 1; 5462 goto out_unlock_session; 5463 } 5464 5465 m = ceph_msg_get(req->r_reply); 5466 m->sparse_read = (bool)srlen; 5467 5468 dout("get_reply tid %lld %p\n", tid, m); 5469 5470 out_unlock_session: 5471 mutex_unlock(&osd->lock); 5472 out_unlock_osdc: 5473 up_read(&osdc->lock); 5474 return m; 5475 } 5476 5477 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5478 { 5479 struct ceph_msg *m; 5480 int type = le16_to_cpu(hdr->type); 5481 u32 front_len = le32_to_cpu(hdr->front_len); 5482 u32 data_len = le32_to_cpu(hdr->data_len); 5483 5484 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5485 if (!m) 5486 return NULL; 5487 5488 if (data_len) { 5489 struct page **pages; 5490 5491 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5492 GFP_NOIO); 5493 if (IS_ERR(pages)) { 5494 ceph_msg_put(m); 5495 return NULL; 5496 } 5497 5498 ceph_msg_data_add_pages(m, pages, data_len, 0, true); 5499 } 5500 5501 return m; 5502 } 5503 5504 static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con, 5505 struct ceph_msg_header *hdr, 5506 int *skip) 5507 { 5508 struct ceph_osd *osd = con->private; 5509 int type = le16_to_cpu(hdr->type); 5510 5511 *skip = 0; 5512 switch (type) { 5513 case CEPH_MSG_OSD_MAP: 5514 case CEPH_MSG_OSD_BACKOFF: 5515 case CEPH_MSG_WATCH_NOTIFY: 5516 return alloc_msg_with_page_vector(hdr); 5517 case CEPH_MSG_OSD_OPREPLY: 5518 return get_reply(con, hdr, skip); 5519 default: 5520 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5521 osd->o_osd, type); 5522 *skip = 1; 5523 return NULL; 5524 } 5525 } 5526 5527 /* 5528 * Wrappers to refcount containing ceph_osd struct 5529 */ 5530 static struct ceph_connection *osd_get_con(struct ceph_connection *con) 5531 { 5532 struct ceph_osd *osd = con->private; 5533 if (get_osd(osd)) 5534 return con; 5535 return NULL; 5536 } 5537 5538 static void osd_put_con(struct ceph_connection *con) 5539 { 5540 struct ceph_osd *osd = con->private; 5541 put_osd(osd); 5542 } 5543 5544 /* 5545 * authentication 5546 */ 5547 5548 /* 5549 * Note: returned pointer is the address of a structure that's 5550 * managed separately. Caller must *not* attempt to free it. 5551 */ 5552 static struct ceph_auth_handshake * 5553 osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new) 5554 { 5555 struct ceph_osd *o = con->private; 5556 struct ceph_osd_client *osdc = o->o_osdc; 5557 struct ceph_auth_client *ac = osdc->client->monc.auth; 5558 struct ceph_auth_handshake *auth = &o->o_auth; 5559 int ret; 5560 5561 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5562 force_new, proto, NULL, NULL); 5563 if (ret) 5564 return ERR_PTR(ret); 5565 5566 return auth; 5567 } 5568 5569 static int osd_add_authorizer_challenge(struct ceph_connection *con, 5570 void *challenge_buf, int challenge_buf_len) 5571 { 5572 struct ceph_osd *o = con->private; 5573 struct ceph_osd_client *osdc = o->o_osdc; 5574 struct ceph_auth_client *ac = osdc->client->monc.auth; 5575 5576 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5577 challenge_buf, challenge_buf_len); 5578 } 5579 5580 static int osd_verify_authorizer_reply(struct ceph_connection *con) 5581 { 5582 struct ceph_osd *o = con->private; 5583 struct ceph_osd_client *osdc = o->o_osdc; 5584 struct ceph_auth_client *ac = osdc->client->monc.auth; 5585 struct ceph_auth_handshake *auth = &o->o_auth; 5586 5587 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer, 5588 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len, 5589 NULL, NULL, NULL, NULL); 5590 } 5591 5592 static int osd_invalidate_authorizer(struct ceph_connection *con) 5593 { 5594 struct ceph_osd *o = con->private; 5595 struct ceph_osd_client *osdc = o->o_osdc; 5596 struct ceph_auth_client *ac = osdc->client->monc.auth; 5597 5598 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5599 return ceph_monc_validate_auth(&osdc->client->monc); 5600 } 5601 5602 static int osd_get_auth_request(struct ceph_connection *con, 5603 void *buf, int *buf_len, 5604 void **authorizer, int *authorizer_len) 5605 { 5606 struct ceph_osd *o = con->private; 5607 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5608 struct ceph_auth_handshake *auth = &o->o_auth; 5609 int ret; 5610 5611 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5612 buf, buf_len); 5613 if (ret) 5614 return ret; 5615 5616 *authorizer = auth->authorizer_buf; 5617 *authorizer_len = auth->authorizer_buf_len; 5618 return 0; 5619 } 5620 5621 static int osd_handle_auth_reply_more(struct ceph_connection *con, 5622 void *reply, int reply_len, 5623 void *buf, int *buf_len, 5624 void **authorizer, int *authorizer_len) 5625 { 5626 struct ceph_osd *o = con->private; 5627 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5628 struct ceph_auth_handshake *auth = &o->o_auth; 5629 int ret; 5630 5631 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len, 5632 buf, buf_len); 5633 if (ret) 5634 return ret; 5635 5636 *authorizer = auth->authorizer_buf; 5637 *authorizer_len = auth->authorizer_buf_len; 5638 return 0; 5639 } 5640 5641 static int osd_handle_auth_done(struct ceph_connection *con, 5642 u64 global_id, void *reply, int reply_len, 5643 u8 *session_key, int *session_key_len, 5644 u8 *con_secret, int *con_secret_len) 5645 { 5646 struct ceph_osd *o = con->private; 5647 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5648 struct ceph_auth_handshake *auth = &o->o_auth; 5649 5650 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len, 5651 session_key, session_key_len, 5652 con_secret, con_secret_len); 5653 } 5654 5655 static int osd_handle_auth_bad_method(struct ceph_connection *con, 5656 int used_proto, int result, 5657 const int *allowed_protos, int proto_cnt, 5658 const int *allowed_modes, int mode_cnt) 5659 { 5660 struct ceph_osd *o = con->private; 5661 struct ceph_mon_client *monc = &o->o_osdc->client->monc; 5662 int ret; 5663 5664 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_OSD, 5665 used_proto, result, 5666 allowed_protos, proto_cnt, 5667 allowed_modes, mode_cnt)) { 5668 ret = ceph_monc_validate_auth(monc); 5669 if (ret) 5670 return ret; 5671 } 5672 5673 return -EACCES; 5674 } 5675 5676 static void osd_reencode_message(struct ceph_msg *msg) 5677 { 5678 int type = le16_to_cpu(msg->hdr.type); 5679 5680 if (type == CEPH_MSG_OSD_OP) 5681 encode_request_finish(msg); 5682 } 5683 5684 static int osd_sign_message(struct ceph_msg *msg) 5685 { 5686 struct ceph_osd *o = msg->con->private; 5687 struct ceph_auth_handshake *auth = &o->o_auth; 5688 5689 return ceph_auth_sign_message(auth, msg); 5690 } 5691 5692 static int osd_check_message_signature(struct ceph_msg *msg) 5693 { 5694 struct ceph_osd *o = msg->con->private; 5695 struct ceph_auth_handshake *auth = &o->o_auth; 5696 5697 return ceph_auth_check_message_signature(auth, msg); 5698 } 5699 5700 static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len, 5701 bool zero) 5702 { 5703 while (len) { 5704 struct page *page; 5705 size_t poff, plen; 5706 5707 page = ceph_msg_data_next(cursor, &poff, &plen); 5708 if (plen > len) 5709 plen = len; 5710 if (zero) 5711 zero_user_segment(page, poff, poff + plen); 5712 len -= plen; 5713 ceph_msg_data_advance(cursor, plen); 5714 } 5715 } 5716 5717 static int prep_next_sparse_read(struct ceph_connection *con, 5718 struct ceph_msg_data_cursor *cursor) 5719 { 5720 struct ceph_osd *o = con->private; 5721 struct ceph_sparse_read *sr = &o->o_sparse_read; 5722 struct ceph_osd_request *req; 5723 struct ceph_osd_req_op *op; 5724 5725 spin_lock(&o->o_requests_lock); 5726 req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid)); 5727 if (!req) { 5728 spin_unlock(&o->o_requests_lock); 5729 return -EBADR; 5730 } 5731 5732 if (o->o_sparse_op_idx < 0) { 5733 u64 srlen = sparse_data_requested(req); 5734 5735 dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n", 5736 __func__, o->o_osd, srlen); 5737 ceph_msg_data_cursor_init(cursor, con->in_msg, srlen); 5738 } else { 5739 u64 end; 5740 5741 op = &req->r_ops[o->o_sparse_op_idx]; 5742 5743 WARN_ON_ONCE(op->extent.sparse_ext); 5744 5745 /* hand back buffer we took earlier */ 5746 op->extent.sparse_ext = sr->sr_extent; 5747 sr->sr_extent = NULL; 5748 op->extent.sparse_ext_cnt = sr->sr_count; 5749 sr->sr_ext_len = 0; 5750 dout("%s: [%d] completed extent array len %d cursor->resid %zd\n", 5751 __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid); 5752 /* Advance to end of data for this operation */ 5753 end = ceph_sparse_ext_map_end(op); 5754 if (end < sr->sr_req_len) 5755 advance_cursor(cursor, sr->sr_req_len - end, false); 5756 } 5757 5758 ceph_init_sparse_read(sr); 5759 5760 /* find next op in this request (if any) */ 5761 while (++o->o_sparse_op_idx < req->r_num_ops) { 5762 op = &req->r_ops[o->o_sparse_op_idx]; 5763 if (op->op == CEPH_OSD_OP_SPARSE_READ) 5764 goto found; 5765 } 5766 5767 /* reset for next sparse read request */ 5768 spin_unlock(&o->o_requests_lock); 5769 o->o_sparse_op_idx = -1; 5770 return 0; 5771 found: 5772 sr->sr_req_off = op->extent.offset; 5773 sr->sr_req_len = op->extent.length; 5774 sr->sr_pos = sr->sr_req_off; 5775 dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__, 5776 o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len); 5777 5778 /* hand off request's sparse extent map buffer */ 5779 sr->sr_ext_len = op->extent.sparse_ext_cnt; 5780 op->extent.sparse_ext_cnt = 0; 5781 sr->sr_extent = op->extent.sparse_ext; 5782 op->extent.sparse_ext = NULL; 5783 5784 spin_unlock(&o->o_requests_lock); 5785 return 1; 5786 } 5787 5788 #ifdef __BIG_ENDIAN 5789 static inline void convert_extent_map(struct ceph_sparse_read *sr) 5790 { 5791 int i; 5792 5793 for (i = 0; i < sr->sr_count; i++) { 5794 struct ceph_sparse_extent *ext = &sr->sr_extent[i]; 5795 5796 ext->off = le64_to_cpu((__force __le64)ext->off); 5797 ext->len = le64_to_cpu((__force __le64)ext->len); 5798 } 5799 } 5800 #else 5801 static inline void convert_extent_map(struct ceph_sparse_read *sr) 5802 { 5803 } 5804 #endif 5805 5806 #define MAX_EXTENTS 4096 5807 5808 static int osd_sparse_read(struct ceph_connection *con, 5809 struct ceph_msg_data_cursor *cursor, 5810 char **pbuf) 5811 { 5812 struct ceph_osd *o = con->private; 5813 struct ceph_sparse_read *sr = &o->o_sparse_read; 5814 u32 count = sr->sr_count; 5815 u64 eoff, elen; 5816 int ret; 5817 5818 switch (sr->sr_state) { 5819 case CEPH_SPARSE_READ_HDR: 5820 next_op: 5821 ret = prep_next_sparse_read(con, cursor); 5822 if (ret <= 0) 5823 return ret; 5824 5825 /* number of extents */ 5826 ret = sizeof(sr->sr_count); 5827 *pbuf = (char *)&sr->sr_count; 5828 sr->sr_state = CEPH_SPARSE_READ_EXTENTS; 5829 break; 5830 case CEPH_SPARSE_READ_EXTENTS: 5831 /* Convert sr_count to host-endian */ 5832 count = le32_to_cpu((__force __le32)sr->sr_count); 5833 sr->sr_count = count; 5834 dout("[%d] got %u extents\n", o->o_osd, count); 5835 5836 if (count > 0) { 5837 if (!sr->sr_extent || count > sr->sr_ext_len) { 5838 /* 5839 * Apply a hard cap to the number of extents. 5840 * If we have more, assume something is wrong. 5841 */ 5842 if (count > MAX_EXTENTS) { 5843 dout("%s: OSD returned 0x%x extents in a single reply!\n", 5844 __func__, count); 5845 return -EREMOTEIO; 5846 } 5847 5848 /* no extent array provided, or too short */ 5849 kfree(sr->sr_extent); 5850 sr->sr_extent = kmalloc_array(count, 5851 sizeof(*sr->sr_extent), 5852 GFP_NOIO); 5853 if (!sr->sr_extent) 5854 return -ENOMEM; 5855 sr->sr_ext_len = count; 5856 } 5857 ret = count * sizeof(*sr->sr_extent); 5858 *pbuf = (char *)sr->sr_extent; 5859 sr->sr_state = CEPH_SPARSE_READ_DATA_LEN; 5860 break; 5861 } 5862 /* No extents? Read data len */ 5863 fallthrough; 5864 case CEPH_SPARSE_READ_DATA_LEN: 5865 convert_extent_map(sr); 5866 ret = sizeof(sr->sr_datalen); 5867 *pbuf = (char *)&sr->sr_datalen; 5868 sr->sr_state = CEPH_SPARSE_READ_DATA; 5869 break; 5870 case CEPH_SPARSE_READ_DATA: 5871 if (sr->sr_index >= count) { 5872 sr->sr_state = CEPH_SPARSE_READ_HDR; 5873 goto next_op; 5874 } 5875 5876 eoff = sr->sr_extent[sr->sr_index].off; 5877 elen = sr->sr_extent[sr->sr_index].len; 5878 5879 dout("[%d] ext %d off 0x%llx len 0x%llx\n", 5880 o->o_osd, sr->sr_index, eoff, elen); 5881 5882 if (elen > INT_MAX) { 5883 dout("Sparse read extent length too long (0x%llx)\n", 5884 elen); 5885 return -EREMOTEIO; 5886 } 5887 5888 /* zero out anything from sr_pos to start of extent */ 5889 if (sr->sr_pos < eoff) 5890 advance_cursor(cursor, eoff - sr->sr_pos, true); 5891 5892 /* Set position to end of extent */ 5893 sr->sr_pos = eoff + elen; 5894 5895 /* send back the new length and nullify the ptr */ 5896 cursor->sr_resid = elen; 5897 ret = elen; 5898 *pbuf = NULL; 5899 5900 /* Bump the array index */ 5901 ++sr->sr_index; 5902 break; 5903 } 5904 return ret; 5905 } 5906 5907 static const struct ceph_connection_operations osd_con_ops = { 5908 .get = osd_get_con, 5909 .put = osd_put_con, 5910 .sparse_read = osd_sparse_read, 5911 .alloc_msg = osd_alloc_msg, 5912 .dispatch = osd_dispatch, 5913 .fault = osd_fault, 5914 .reencode_message = osd_reencode_message, 5915 .get_authorizer = osd_get_authorizer, 5916 .add_authorizer_challenge = osd_add_authorizer_challenge, 5917 .verify_authorizer_reply = osd_verify_authorizer_reply, 5918 .invalidate_authorizer = osd_invalidate_authorizer, 5919 .sign_message = osd_sign_message, 5920 .check_message_signature = osd_check_message_signature, 5921 .get_auth_request = osd_get_auth_request, 5922 .handle_auth_reply_more = osd_handle_auth_reply_more, 5923 .handle_auth_done = osd_handle_auth_done, 5924 .handle_auth_bad_method = osd_handle_auth_bad_method, 5925 }; 5926