1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static struct ceph_osd_data * 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 176 { 177 BUG_ON(which >= osd_req->r_num_ops); 178 179 return &osd_req->r_ops[which].raw_data_in; 180 } 181 182 struct ceph_osd_data * 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 184 unsigned int which) 185 { 186 return osd_req_op_data(osd_req, which, extent, osd_data); 187 } 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 189 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 191 unsigned int which, struct page **pages, 192 u64 length, u32 alignment, 193 bool pages_from_pool, bool own_pages) 194 { 195 struct ceph_osd_data *osd_data; 196 197 osd_data = osd_req_op_raw_data_in(osd_req, which); 198 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 199 pages_from_pool, own_pages); 200 } 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 202 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 204 unsigned int which, struct page **pages, 205 u64 length, u32 alignment, 206 bool pages_from_pool, bool own_pages) 207 { 208 struct ceph_osd_data *osd_data; 209 210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 211 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 212 pages_from_pool, own_pages); 213 } 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 215 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 217 unsigned int which, struct ceph_pagelist *pagelist) 218 { 219 struct ceph_osd_data *osd_data; 220 221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 222 ceph_osd_data_pagelist_init(osd_data, pagelist); 223 } 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 225 226 #ifdef CONFIG_BLOCK 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 228 unsigned int which, 229 struct ceph_bio_iter *bio_pos, 230 u32 bio_length) 231 { 232 struct ceph_osd_data *osd_data; 233 234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 236 } 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 238 #endif /* CONFIG_BLOCK */ 239 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 241 unsigned int which, 242 struct bio_vec *bvecs, u32 num_bvecs, 243 u32 bytes) 244 { 245 struct ceph_osd_data *osd_data; 246 struct ceph_bvec_iter it = { 247 .bvecs = bvecs, 248 .iter = { .bi_size = bytes }, 249 }; 250 251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 255 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 257 unsigned int which, 258 struct ceph_bvec_iter *bvec_pos) 259 { 260 struct ceph_osd_data *osd_data; 261 262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 264 } 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 266 267 static void osd_req_op_cls_request_info_pagelist( 268 struct ceph_osd_request *osd_req, 269 unsigned int which, struct ceph_pagelist *pagelist) 270 { 271 struct ceph_osd_data *osd_data; 272 273 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 274 ceph_osd_data_pagelist_init(osd_data, pagelist); 275 } 276 277 void osd_req_op_cls_request_data_pagelist( 278 struct ceph_osd_request *osd_req, 279 unsigned int which, struct ceph_pagelist *pagelist) 280 { 281 struct ceph_osd_data *osd_data; 282 283 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 284 ceph_osd_data_pagelist_init(osd_data, pagelist); 285 osd_req->r_ops[which].cls.indata_len += pagelist->length; 286 osd_req->r_ops[which].indata_len += pagelist->length; 287 } 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 default: 350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 351 return 0; 352 } 353 } 354 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 356 { 357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 358 int num_pages; 359 360 num_pages = calc_pages_for((u64)osd_data->alignment, 361 (u64)osd_data->length); 362 ceph_release_page_vector(osd_data->pages, num_pages); 363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 364 ceph_pagelist_release(osd_data->pagelist); 365 } 366 ceph_osd_data_init(osd_data); 367 } 368 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 370 unsigned int which) 371 { 372 struct ceph_osd_req_op *op; 373 374 BUG_ON(which >= osd_req->r_num_ops); 375 op = &osd_req->r_ops[which]; 376 377 switch (op->op) { 378 case CEPH_OSD_OP_READ: 379 case CEPH_OSD_OP_WRITE: 380 case CEPH_OSD_OP_WRITEFULL: 381 ceph_osd_data_release(&op->extent.osd_data); 382 break; 383 case CEPH_OSD_OP_CALL: 384 ceph_osd_data_release(&op->cls.request_info); 385 ceph_osd_data_release(&op->cls.request_data); 386 ceph_osd_data_release(&op->cls.response_data); 387 break; 388 case CEPH_OSD_OP_SETXATTR: 389 case CEPH_OSD_OP_CMPXATTR: 390 ceph_osd_data_release(&op->xattr.osd_data); 391 break; 392 case CEPH_OSD_OP_STAT: 393 ceph_osd_data_release(&op->raw_data_in); 394 break; 395 case CEPH_OSD_OP_NOTIFY_ACK: 396 ceph_osd_data_release(&op->notify_ack.request_data); 397 break; 398 case CEPH_OSD_OP_NOTIFY: 399 ceph_osd_data_release(&op->notify.request_data); 400 ceph_osd_data_release(&op->notify.response_data); 401 break; 402 case CEPH_OSD_OP_LIST_WATCHERS: 403 ceph_osd_data_release(&op->list_watchers.response_data); 404 break; 405 case CEPH_OSD_OP_COPY_FROM2: 406 ceph_osd_data_release(&op->copy_from.osd_data); 407 break; 408 default: 409 break; 410 } 411 } 412 413 /* 414 * Assumes @t is zero-initialized. 415 */ 416 static void target_init(struct ceph_osd_request_target *t) 417 { 418 ceph_oid_init(&t->base_oid); 419 ceph_oloc_init(&t->base_oloc); 420 ceph_oid_init(&t->target_oid); 421 ceph_oloc_init(&t->target_oloc); 422 423 ceph_osds_init(&t->acting); 424 ceph_osds_init(&t->up); 425 t->size = -1; 426 t->min_size = -1; 427 428 t->osd = CEPH_HOMELESS_OSD; 429 } 430 431 static void target_copy(struct ceph_osd_request_target *dest, 432 const struct ceph_osd_request_target *src) 433 { 434 ceph_oid_copy(&dest->base_oid, &src->base_oid); 435 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 436 ceph_oid_copy(&dest->target_oid, &src->target_oid); 437 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 438 439 dest->pgid = src->pgid; /* struct */ 440 dest->spgid = src->spgid; /* struct */ 441 dest->pg_num = src->pg_num; 442 dest->pg_num_mask = src->pg_num_mask; 443 ceph_osds_copy(&dest->acting, &src->acting); 444 ceph_osds_copy(&dest->up, &src->up); 445 dest->size = src->size; 446 dest->min_size = src->min_size; 447 dest->sort_bitwise = src->sort_bitwise; 448 dest->recovery_deletes = src->recovery_deletes; 449 450 dest->flags = src->flags; 451 dest->used_replica = src->used_replica; 452 dest->paused = src->paused; 453 454 dest->epoch = src->epoch; 455 dest->last_force_resend = src->last_force_resend; 456 457 dest->osd = src->osd; 458 } 459 460 static void target_destroy(struct ceph_osd_request_target *t) 461 { 462 ceph_oid_destroy(&t->base_oid); 463 ceph_oloc_destroy(&t->base_oloc); 464 ceph_oid_destroy(&t->target_oid); 465 ceph_oloc_destroy(&t->target_oloc); 466 } 467 468 /* 469 * requests 470 */ 471 static void request_release_checks(struct ceph_osd_request *req) 472 { 473 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 474 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 475 WARN_ON(!list_empty(&req->r_private_item)); 476 WARN_ON(req->r_osd); 477 } 478 479 static void ceph_osdc_release_request(struct kref *kref) 480 { 481 struct ceph_osd_request *req = container_of(kref, 482 struct ceph_osd_request, r_kref); 483 unsigned int which; 484 485 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 486 req->r_request, req->r_reply); 487 request_release_checks(req); 488 489 if (req->r_request) 490 ceph_msg_put(req->r_request); 491 if (req->r_reply) 492 ceph_msg_put(req->r_reply); 493 494 for (which = 0; which < req->r_num_ops; which++) 495 osd_req_op_data_release(req, which); 496 497 target_destroy(&req->r_t); 498 ceph_put_snap_context(req->r_snapc); 499 500 if (req->r_mempool) 501 mempool_free(req, req->r_osdc->req_mempool); 502 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 503 kmem_cache_free(ceph_osd_request_cache, req); 504 else 505 kfree(req); 506 } 507 508 void ceph_osdc_get_request(struct ceph_osd_request *req) 509 { 510 dout("%s %p (was %d)\n", __func__, req, 511 kref_read(&req->r_kref)); 512 kref_get(&req->r_kref); 513 } 514 EXPORT_SYMBOL(ceph_osdc_get_request); 515 516 void ceph_osdc_put_request(struct ceph_osd_request *req) 517 { 518 if (req) { 519 dout("%s %p (was %d)\n", __func__, req, 520 kref_read(&req->r_kref)); 521 kref_put(&req->r_kref, ceph_osdc_release_request); 522 } 523 } 524 EXPORT_SYMBOL(ceph_osdc_put_request); 525 526 static void request_init(struct ceph_osd_request *req) 527 { 528 /* req only, each op is zeroed in osd_req_op_init() */ 529 memset(req, 0, sizeof(*req)); 530 531 kref_init(&req->r_kref); 532 init_completion(&req->r_completion); 533 RB_CLEAR_NODE(&req->r_node); 534 RB_CLEAR_NODE(&req->r_mc_node); 535 INIT_LIST_HEAD(&req->r_private_item); 536 537 target_init(&req->r_t); 538 } 539 540 /* 541 * This is ugly, but it allows us to reuse linger registration and ping 542 * requests, keeping the structure of the code around send_linger{_ping}() 543 * reasonable. Setting up a min_nr=2 mempool for each linger request 544 * and dealing with copying ops (this blasts req only, watch op remains 545 * intact) isn't any better. 546 */ 547 static void request_reinit(struct ceph_osd_request *req) 548 { 549 struct ceph_osd_client *osdc = req->r_osdc; 550 bool mempool = req->r_mempool; 551 unsigned int num_ops = req->r_num_ops; 552 u64 snapid = req->r_snapid; 553 struct ceph_snap_context *snapc = req->r_snapc; 554 bool linger = req->r_linger; 555 struct ceph_msg *request_msg = req->r_request; 556 struct ceph_msg *reply_msg = req->r_reply; 557 558 dout("%s req %p\n", __func__, req); 559 WARN_ON(kref_read(&req->r_kref) != 1); 560 request_release_checks(req); 561 562 WARN_ON(kref_read(&request_msg->kref) != 1); 563 WARN_ON(kref_read(&reply_msg->kref) != 1); 564 target_destroy(&req->r_t); 565 566 request_init(req); 567 req->r_osdc = osdc; 568 req->r_mempool = mempool; 569 req->r_num_ops = num_ops; 570 req->r_snapid = snapid; 571 req->r_snapc = snapc; 572 req->r_linger = linger; 573 req->r_request = request_msg; 574 req->r_reply = reply_msg; 575 } 576 577 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 578 struct ceph_snap_context *snapc, 579 unsigned int num_ops, 580 bool use_mempool, 581 gfp_t gfp_flags) 582 { 583 struct ceph_osd_request *req; 584 585 if (use_mempool) { 586 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 587 req = mempool_alloc(osdc->req_mempool, gfp_flags); 588 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 589 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 590 } else { 591 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 592 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 593 } 594 if (unlikely(!req)) 595 return NULL; 596 597 request_init(req); 598 req->r_osdc = osdc; 599 req->r_mempool = use_mempool; 600 req->r_num_ops = num_ops; 601 req->r_snapid = CEPH_NOSNAP; 602 req->r_snapc = ceph_get_snap_context(snapc); 603 604 dout("%s req %p\n", __func__, req); 605 return req; 606 } 607 EXPORT_SYMBOL(ceph_osdc_alloc_request); 608 609 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 610 { 611 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 612 } 613 614 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 615 int num_request_data_items, 616 int num_reply_data_items) 617 { 618 struct ceph_osd_client *osdc = req->r_osdc; 619 struct ceph_msg *msg; 620 int msg_size; 621 622 WARN_ON(req->r_request || req->r_reply); 623 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 624 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 625 626 /* create request message */ 627 msg_size = CEPH_ENCODING_START_BLK_LEN + 628 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 629 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 630 msg_size += CEPH_ENCODING_START_BLK_LEN + 631 sizeof(struct ceph_osd_reqid); /* reqid */ 632 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 633 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 634 msg_size += CEPH_ENCODING_START_BLK_LEN + 635 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 636 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 637 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 638 msg_size += 8; /* snapid */ 639 msg_size += 8; /* snap_seq */ 640 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 641 msg_size += 4 + 8; /* retry_attempt, features */ 642 643 if (req->r_mempool) 644 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 645 num_request_data_items); 646 else 647 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 648 num_request_data_items, gfp, true); 649 if (!msg) 650 return -ENOMEM; 651 652 memset(msg->front.iov_base, 0, msg->front.iov_len); 653 req->r_request = msg; 654 655 /* create reply message */ 656 msg_size = OSD_OPREPLY_FRONT_LEN; 657 msg_size += req->r_base_oid.name_len; 658 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 659 660 if (req->r_mempool) 661 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 662 num_reply_data_items); 663 else 664 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 665 num_reply_data_items, gfp, true); 666 if (!msg) 667 return -ENOMEM; 668 669 req->r_reply = msg; 670 671 return 0; 672 } 673 674 static bool osd_req_opcode_valid(u16 opcode) 675 { 676 switch (opcode) { 677 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 678 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 679 #undef GENERATE_CASE 680 default: 681 return false; 682 } 683 } 684 685 static void get_num_data_items(struct ceph_osd_request *req, 686 int *num_request_data_items, 687 int *num_reply_data_items) 688 { 689 struct ceph_osd_req_op *op; 690 691 *num_request_data_items = 0; 692 *num_reply_data_items = 0; 693 694 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 695 switch (op->op) { 696 /* request */ 697 case CEPH_OSD_OP_WRITE: 698 case CEPH_OSD_OP_WRITEFULL: 699 case CEPH_OSD_OP_SETXATTR: 700 case CEPH_OSD_OP_CMPXATTR: 701 case CEPH_OSD_OP_NOTIFY_ACK: 702 case CEPH_OSD_OP_COPY_FROM2: 703 *num_request_data_items += 1; 704 break; 705 706 /* reply */ 707 case CEPH_OSD_OP_STAT: 708 case CEPH_OSD_OP_READ: 709 case CEPH_OSD_OP_LIST_WATCHERS: 710 *num_reply_data_items += 1; 711 break; 712 713 /* both */ 714 case CEPH_OSD_OP_NOTIFY: 715 *num_request_data_items += 1; 716 *num_reply_data_items += 1; 717 break; 718 case CEPH_OSD_OP_CALL: 719 *num_request_data_items += 2; 720 *num_reply_data_items += 1; 721 break; 722 723 default: 724 WARN_ON(!osd_req_opcode_valid(op->op)); 725 break; 726 } 727 } 728 } 729 730 /* 731 * oid, oloc and OSD op opcode(s) must be filled in before this function 732 * is called. 733 */ 734 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 735 { 736 int num_request_data_items, num_reply_data_items; 737 738 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 739 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 740 num_reply_data_items); 741 } 742 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 743 744 /* 745 * This is an osd op init function for opcodes that have no data or 746 * other information associated with them. It also serves as a 747 * common init routine for all the other init functions, below. 748 */ 749 struct ceph_osd_req_op * 750 osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 751 u16 opcode, u32 flags) 752 { 753 struct ceph_osd_req_op *op; 754 755 BUG_ON(which >= osd_req->r_num_ops); 756 BUG_ON(!osd_req_opcode_valid(opcode)); 757 758 op = &osd_req->r_ops[which]; 759 memset(op, 0, sizeof (*op)); 760 op->op = opcode; 761 op->flags = flags; 762 763 return op; 764 } 765 EXPORT_SYMBOL(osd_req_op_init); 766 767 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 768 unsigned int which, u16 opcode, 769 u64 offset, u64 length, 770 u64 truncate_size, u32 truncate_seq) 771 { 772 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 773 opcode, 0); 774 size_t payload_len = 0; 775 776 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 777 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 778 opcode != CEPH_OSD_OP_TRUNCATE); 779 780 op->extent.offset = offset; 781 op->extent.length = length; 782 op->extent.truncate_size = truncate_size; 783 op->extent.truncate_seq = truncate_seq; 784 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 785 payload_len += length; 786 787 op->indata_len = payload_len; 788 } 789 EXPORT_SYMBOL(osd_req_op_extent_init); 790 791 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 792 unsigned int which, u64 length) 793 { 794 struct ceph_osd_req_op *op; 795 u64 previous; 796 797 BUG_ON(which >= osd_req->r_num_ops); 798 op = &osd_req->r_ops[which]; 799 previous = op->extent.length; 800 801 if (length == previous) 802 return; /* Nothing to do */ 803 BUG_ON(length > previous); 804 805 op->extent.length = length; 806 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 807 op->indata_len -= previous - length; 808 } 809 EXPORT_SYMBOL(osd_req_op_extent_update); 810 811 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 812 unsigned int which, u64 offset_inc) 813 { 814 struct ceph_osd_req_op *op, *prev_op; 815 816 BUG_ON(which + 1 >= osd_req->r_num_ops); 817 818 prev_op = &osd_req->r_ops[which]; 819 op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 820 /* dup previous one */ 821 op->indata_len = prev_op->indata_len; 822 op->outdata_len = prev_op->outdata_len; 823 op->extent = prev_op->extent; 824 /* adjust offset */ 825 op->extent.offset += offset_inc; 826 op->extent.length -= offset_inc; 827 828 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 829 op->indata_len -= offset_inc; 830 } 831 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 832 833 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 834 const char *class, const char *method) 835 { 836 struct ceph_osd_req_op *op; 837 struct ceph_pagelist *pagelist; 838 size_t payload_len = 0; 839 size_t size; 840 int ret; 841 842 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 843 844 pagelist = ceph_pagelist_alloc(GFP_NOFS); 845 if (!pagelist) 846 return -ENOMEM; 847 848 op->cls.class_name = class; 849 size = strlen(class); 850 BUG_ON(size > (size_t) U8_MAX); 851 op->cls.class_len = size; 852 ret = ceph_pagelist_append(pagelist, class, size); 853 if (ret) 854 goto err_pagelist_free; 855 payload_len += size; 856 857 op->cls.method_name = method; 858 size = strlen(method); 859 BUG_ON(size > (size_t) U8_MAX); 860 op->cls.method_len = size; 861 ret = ceph_pagelist_append(pagelist, method, size); 862 if (ret) 863 goto err_pagelist_free; 864 payload_len += size; 865 866 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 867 op->indata_len = payload_len; 868 return 0; 869 870 err_pagelist_free: 871 ceph_pagelist_release(pagelist); 872 return ret; 873 } 874 EXPORT_SYMBOL(osd_req_op_cls_init); 875 876 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 877 u16 opcode, const char *name, const void *value, 878 size_t size, u8 cmp_op, u8 cmp_mode) 879 { 880 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, 881 opcode, 0); 882 struct ceph_pagelist *pagelist; 883 size_t payload_len; 884 int ret; 885 886 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 887 888 pagelist = ceph_pagelist_alloc(GFP_NOFS); 889 if (!pagelist) 890 return -ENOMEM; 891 892 payload_len = strlen(name); 893 op->xattr.name_len = payload_len; 894 ret = ceph_pagelist_append(pagelist, name, payload_len); 895 if (ret) 896 goto err_pagelist_free; 897 898 op->xattr.value_len = size; 899 ret = ceph_pagelist_append(pagelist, value, size); 900 if (ret) 901 goto err_pagelist_free; 902 payload_len += size; 903 904 op->xattr.cmp_op = cmp_op; 905 op->xattr.cmp_mode = cmp_mode; 906 907 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 908 op->indata_len = payload_len; 909 return 0; 910 911 err_pagelist_free: 912 ceph_pagelist_release(pagelist); 913 return ret; 914 } 915 EXPORT_SYMBOL(osd_req_op_xattr_init); 916 917 /* 918 * @watch_opcode: CEPH_OSD_WATCH_OP_* 919 */ 920 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 921 u64 cookie, u8 watch_opcode) 922 { 923 struct ceph_osd_req_op *op; 924 925 op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 926 op->watch.cookie = cookie; 927 op->watch.op = watch_opcode; 928 op->watch.gen = 0; 929 } 930 931 /* 932 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_* 933 */ 934 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 935 unsigned int which, 936 u64 expected_object_size, 937 u64 expected_write_size, 938 u32 flags) 939 { 940 struct ceph_osd_req_op *op; 941 942 op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); 943 op->alloc_hint.expected_object_size = expected_object_size; 944 op->alloc_hint.expected_write_size = expected_write_size; 945 op->alloc_hint.flags = flags; 946 947 /* 948 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 949 * not worth a feature bit. Set FAILOK per-op flag to make 950 * sure older osds don't trip over an unsupported opcode. 951 */ 952 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 953 } 954 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 955 956 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 957 struct ceph_osd_data *osd_data) 958 { 959 u64 length = ceph_osd_data_length(osd_data); 960 961 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 962 BUG_ON(length > (u64) SIZE_MAX); 963 if (length) 964 ceph_msg_data_add_pages(msg, osd_data->pages, 965 length, osd_data->alignment, false); 966 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 967 BUG_ON(!length); 968 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 969 #ifdef CONFIG_BLOCK 970 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 971 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 972 #endif 973 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 974 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 975 } else { 976 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 977 } 978 } 979 980 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 981 const struct ceph_osd_req_op *src) 982 { 983 switch (src->op) { 984 case CEPH_OSD_OP_STAT: 985 break; 986 case CEPH_OSD_OP_READ: 987 case CEPH_OSD_OP_WRITE: 988 case CEPH_OSD_OP_WRITEFULL: 989 case CEPH_OSD_OP_ZERO: 990 case CEPH_OSD_OP_TRUNCATE: 991 dst->extent.offset = cpu_to_le64(src->extent.offset); 992 dst->extent.length = cpu_to_le64(src->extent.length); 993 dst->extent.truncate_size = 994 cpu_to_le64(src->extent.truncate_size); 995 dst->extent.truncate_seq = 996 cpu_to_le32(src->extent.truncate_seq); 997 break; 998 case CEPH_OSD_OP_CALL: 999 dst->cls.class_len = src->cls.class_len; 1000 dst->cls.method_len = src->cls.method_len; 1001 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 1002 break; 1003 case CEPH_OSD_OP_WATCH: 1004 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 1005 dst->watch.ver = cpu_to_le64(0); 1006 dst->watch.op = src->watch.op; 1007 dst->watch.gen = cpu_to_le32(src->watch.gen); 1008 break; 1009 case CEPH_OSD_OP_NOTIFY_ACK: 1010 break; 1011 case CEPH_OSD_OP_NOTIFY: 1012 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 1013 break; 1014 case CEPH_OSD_OP_LIST_WATCHERS: 1015 break; 1016 case CEPH_OSD_OP_SETALLOCHINT: 1017 dst->alloc_hint.expected_object_size = 1018 cpu_to_le64(src->alloc_hint.expected_object_size); 1019 dst->alloc_hint.expected_write_size = 1020 cpu_to_le64(src->alloc_hint.expected_write_size); 1021 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags); 1022 break; 1023 case CEPH_OSD_OP_SETXATTR: 1024 case CEPH_OSD_OP_CMPXATTR: 1025 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1026 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1027 dst->xattr.cmp_op = src->xattr.cmp_op; 1028 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1029 break; 1030 case CEPH_OSD_OP_CREATE: 1031 case CEPH_OSD_OP_DELETE: 1032 break; 1033 case CEPH_OSD_OP_COPY_FROM2: 1034 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1035 dst->copy_from.src_version = 1036 cpu_to_le64(src->copy_from.src_version); 1037 dst->copy_from.flags = src->copy_from.flags; 1038 dst->copy_from.src_fadvise_flags = 1039 cpu_to_le32(src->copy_from.src_fadvise_flags); 1040 break; 1041 default: 1042 pr_err("unsupported osd opcode %s\n", 1043 ceph_osd_op_name(src->op)); 1044 WARN_ON(1); 1045 1046 return 0; 1047 } 1048 1049 dst->op = cpu_to_le16(src->op); 1050 dst->flags = cpu_to_le32(src->flags); 1051 dst->payload_len = cpu_to_le32(src->indata_len); 1052 1053 return src->indata_len; 1054 } 1055 1056 /* 1057 * build new request AND message, calculate layout, and adjust file 1058 * extent as needed. 1059 * 1060 * if the file was recently truncated, we include information about its 1061 * old and new size so that the object can be updated appropriately. (we 1062 * avoid synchronously deleting truncated objects because it's slow.) 1063 */ 1064 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1065 struct ceph_file_layout *layout, 1066 struct ceph_vino vino, 1067 u64 off, u64 *plen, 1068 unsigned int which, int num_ops, 1069 int opcode, int flags, 1070 struct ceph_snap_context *snapc, 1071 u32 truncate_seq, 1072 u64 truncate_size, 1073 bool use_mempool) 1074 { 1075 struct ceph_osd_request *req; 1076 u64 objnum = 0; 1077 u64 objoff = 0; 1078 u64 objlen = 0; 1079 int r; 1080 1081 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1082 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1083 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); 1084 1085 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1086 GFP_NOFS); 1087 if (!req) { 1088 r = -ENOMEM; 1089 goto fail; 1090 } 1091 1092 /* calculate max write size */ 1093 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1094 if (r) 1095 goto fail; 1096 1097 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1098 osd_req_op_init(req, which, opcode, 0); 1099 } else { 1100 u32 object_size = layout->object_size; 1101 u32 object_base = off - objoff; 1102 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1103 if (truncate_size <= object_base) { 1104 truncate_size = 0; 1105 } else { 1106 truncate_size -= object_base; 1107 if (truncate_size > object_size) 1108 truncate_size = object_size; 1109 } 1110 } 1111 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1112 truncate_size, truncate_seq); 1113 } 1114 1115 req->r_base_oloc.pool = layout->pool_id; 1116 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1117 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1118 req->r_flags = flags | osdc->client->options->read_from_replica; 1119 1120 req->r_snapid = vino.snap; 1121 if (flags & CEPH_OSD_FLAG_WRITE) 1122 req->r_data_offset = off; 1123 1124 if (num_ops > 1) 1125 /* 1126 * This is a special case for ceph_writepages_start(), but it 1127 * also covers ceph_uninline_data(). If more multi-op request 1128 * use cases emerge, we will need a separate helper. 1129 */ 1130 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); 1131 else 1132 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1133 if (r) 1134 goto fail; 1135 1136 return req; 1137 1138 fail: 1139 ceph_osdc_put_request(req); 1140 return ERR_PTR(r); 1141 } 1142 EXPORT_SYMBOL(ceph_osdc_new_request); 1143 1144 /* 1145 * We keep osd requests in an rbtree, sorted by ->r_tid. 1146 */ 1147 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1148 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1149 1150 /* 1151 * Call @fn on each OSD request as long as @fn returns 0. 1152 */ 1153 static void for_each_request(struct ceph_osd_client *osdc, 1154 int (*fn)(struct ceph_osd_request *req, void *arg), 1155 void *arg) 1156 { 1157 struct rb_node *n, *p; 1158 1159 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1160 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1161 1162 for (p = rb_first(&osd->o_requests); p; ) { 1163 struct ceph_osd_request *req = 1164 rb_entry(p, struct ceph_osd_request, r_node); 1165 1166 p = rb_next(p); 1167 if (fn(req, arg)) 1168 return; 1169 } 1170 } 1171 1172 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1173 struct ceph_osd_request *req = 1174 rb_entry(p, struct ceph_osd_request, r_node); 1175 1176 p = rb_next(p); 1177 if (fn(req, arg)) 1178 return; 1179 } 1180 } 1181 1182 static bool osd_homeless(struct ceph_osd *osd) 1183 { 1184 return osd->o_osd == CEPH_HOMELESS_OSD; 1185 } 1186 1187 static bool osd_registered(struct ceph_osd *osd) 1188 { 1189 verify_osdc_locked(osd->o_osdc); 1190 1191 return !RB_EMPTY_NODE(&osd->o_node); 1192 } 1193 1194 /* 1195 * Assumes @osd is zero-initialized. 1196 */ 1197 static void osd_init(struct ceph_osd *osd) 1198 { 1199 refcount_set(&osd->o_ref, 1); 1200 RB_CLEAR_NODE(&osd->o_node); 1201 osd->o_requests = RB_ROOT; 1202 osd->o_linger_requests = RB_ROOT; 1203 osd->o_backoff_mappings = RB_ROOT; 1204 osd->o_backoffs_by_id = RB_ROOT; 1205 INIT_LIST_HEAD(&osd->o_osd_lru); 1206 INIT_LIST_HEAD(&osd->o_keepalive_item); 1207 osd->o_incarnation = 1; 1208 mutex_init(&osd->lock); 1209 } 1210 1211 static void osd_cleanup(struct ceph_osd *osd) 1212 { 1213 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1214 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1215 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1216 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1217 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1218 WARN_ON(!list_empty(&osd->o_osd_lru)); 1219 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1220 1221 if (osd->o_auth.authorizer) { 1222 WARN_ON(osd_homeless(osd)); 1223 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1224 } 1225 } 1226 1227 /* 1228 * Track open sessions with osds. 1229 */ 1230 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1231 { 1232 struct ceph_osd *osd; 1233 1234 WARN_ON(onum == CEPH_HOMELESS_OSD); 1235 1236 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1237 osd_init(osd); 1238 osd->o_osdc = osdc; 1239 osd->o_osd = onum; 1240 1241 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1242 1243 return osd; 1244 } 1245 1246 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1247 { 1248 if (refcount_inc_not_zero(&osd->o_ref)) { 1249 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, 1250 refcount_read(&osd->o_ref)); 1251 return osd; 1252 } else { 1253 dout("get_osd %p FAIL\n", osd); 1254 return NULL; 1255 } 1256 } 1257 1258 static void put_osd(struct ceph_osd *osd) 1259 { 1260 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), 1261 refcount_read(&osd->o_ref) - 1); 1262 if (refcount_dec_and_test(&osd->o_ref)) { 1263 osd_cleanup(osd); 1264 kfree(osd); 1265 } 1266 } 1267 1268 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1269 1270 static void __move_osd_to_lru(struct ceph_osd *osd) 1271 { 1272 struct ceph_osd_client *osdc = osd->o_osdc; 1273 1274 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1275 BUG_ON(!list_empty(&osd->o_osd_lru)); 1276 1277 spin_lock(&osdc->osd_lru_lock); 1278 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1279 spin_unlock(&osdc->osd_lru_lock); 1280 1281 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1282 } 1283 1284 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1285 { 1286 if (RB_EMPTY_ROOT(&osd->o_requests) && 1287 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1288 __move_osd_to_lru(osd); 1289 } 1290 1291 static void __remove_osd_from_lru(struct ceph_osd *osd) 1292 { 1293 struct ceph_osd_client *osdc = osd->o_osdc; 1294 1295 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1296 1297 spin_lock(&osdc->osd_lru_lock); 1298 if (!list_empty(&osd->o_osd_lru)) 1299 list_del_init(&osd->o_osd_lru); 1300 spin_unlock(&osdc->osd_lru_lock); 1301 } 1302 1303 /* 1304 * Close the connection and assign any leftover requests to the 1305 * homeless session. 1306 */ 1307 static void close_osd(struct ceph_osd *osd) 1308 { 1309 struct ceph_osd_client *osdc = osd->o_osdc; 1310 struct rb_node *n; 1311 1312 verify_osdc_wrlocked(osdc); 1313 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1314 1315 ceph_con_close(&osd->o_con); 1316 1317 for (n = rb_first(&osd->o_requests); n; ) { 1318 struct ceph_osd_request *req = 1319 rb_entry(n, struct ceph_osd_request, r_node); 1320 1321 n = rb_next(n); /* unlink_request() */ 1322 1323 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1324 unlink_request(osd, req); 1325 link_request(&osdc->homeless_osd, req); 1326 } 1327 for (n = rb_first(&osd->o_linger_requests); n; ) { 1328 struct ceph_osd_linger_request *lreq = 1329 rb_entry(n, struct ceph_osd_linger_request, node); 1330 1331 n = rb_next(n); /* unlink_linger() */ 1332 1333 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1334 lreq->linger_id); 1335 unlink_linger(osd, lreq); 1336 link_linger(&osdc->homeless_osd, lreq); 1337 } 1338 clear_backoffs(osd); 1339 1340 __remove_osd_from_lru(osd); 1341 erase_osd(&osdc->osds, osd); 1342 put_osd(osd); 1343 } 1344 1345 /* 1346 * reset osd connect 1347 */ 1348 static int reopen_osd(struct ceph_osd *osd) 1349 { 1350 struct ceph_entity_addr *peer_addr; 1351 1352 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1353 1354 if (RB_EMPTY_ROOT(&osd->o_requests) && 1355 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1356 close_osd(osd); 1357 return -ENODEV; 1358 } 1359 1360 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1361 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1362 !ceph_con_opened(&osd->o_con)) { 1363 struct rb_node *n; 1364 1365 dout("osd addr hasn't changed and connection never opened, " 1366 "letting msgr retry\n"); 1367 /* touch each r_stamp for handle_timeout()'s benfit */ 1368 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1369 struct ceph_osd_request *req = 1370 rb_entry(n, struct ceph_osd_request, r_node); 1371 req->r_stamp = jiffies; 1372 } 1373 1374 return -EAGAIN; 1375 } 1376 1377 ceph_con_close(&osd->o_con); 1378 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1379 osd->o_incarnation++; 1380 1381 return 0; 1382 } 1383 1384 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1385 bool wrlocked) 1386 { 1387 struct ceph_osd *osd; 1388 1389 if (wrlocked) 1390 verify_osdc_wrlocked(osdc); 1391 else 1392 verify_osdc_locked(osdc); 1393 1394 if (o != CEPH_HOMELESS_OSD) 1395 osd = lookup_osd(&osdc->osds, o); 1396 else 1397 osd = &osdc->homeless_osd; 1398 if (!osd) { 1399 if (!wrlocked) 1400 return ERR_PTR(-EAGAIN); 1401 1402 osd = create_osd(osdc, o); 1403 insert_osd(&osdc->osds, osd); 1404 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1405 &osdc->osdmap->osd_addr[osd->o_osd]); 1406 } 1407 1408 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1409 return osd; 1410 } 1411 1412 /* 1413 * Create request <-> OSD session relation. 1414 * 1415 * @req has to be assigned a tid, @osd may be homeless. 1416 */ 1417 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1418 { 1419 verify_osd_locked(osd); 1420 WARN_ON(!req->r_tid || req->r_osd); 1421 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1422 req, req->r_tid); 1423 1424 if (!osd_homeless(osd)) 1425 __remove_osd_from_lru(osd); 1426 else 1427 atomic_inc(&osd->o_osdc->num_homeless); 1428 1429 get_osd(osd); 1430 insert_request(&osd->o_requests, req); 1431 req->r_osd = osd; 1432 } 1433 1434 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1435 { 1436 verify_osd_locked(osd); 1437 WARN_ON(req->r_osd != osd); 1438 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1439 req, req->r_tid); 1440 1441 req->r_osd = NULL; 1442 erase_request(&osd->o_requests, req); 1443 put_osd(osd); 1444 1445 if (!osd_homeless(osd)) 1446 maybe_move_osd_to_lru(osd); 1447 else 1448 atomic_dec(&osd->o_osdc->num_homeless); 1449 } 1450 1451 static bool __pool_full(struct ceph_pg_pool_info *pi) 1452 { 1453 return pi->flags & CEPH_POOL_FLAG_FULL; 1454 } 1455 1456 static bool have_pool_full(struct ceph_osd_client *osdc) 1457 { 1458 struct rb_node *n; 1459 1460 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1461 struct ceph_pg_pool_info *pi = 1462 rb_entry(n, struct ceph_pg_pool_info, node); 1463 1464 if (__pool_full(pi)) 1465 return true; 1466 } 1467 1468 return false; 1469 } 1470 1471 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1472 { 1473 struct ceph_pg_pool_info *pi; 1474 1475 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1476 if (!pi) 1477 return false; 1478 1479 return __pool_full(pi); 1480 } 1481 1482 /* 1483 * Returns whether a request should be blocked from being sent 1484 * based on the current osdmap and osd_client settings. 1485 */ 1486 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1487 const struct ceph_osd_request_target *t, 1488 struct ceph_pg_pool_info *pi) 1489 { 1490 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1491 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1492 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1493 __pool_full(pi); 1494 1495 WARN_ON(pi->id != t->target_oloc.pool); 1496 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1497 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1498 (osdc->osdmap->epoch < osdc->epoch_barrier); 1499 } 1500 1501 static int pick_random_replica(const struct ceph_osds *acting) 1502 { 1503 int i = prandom_u32() % acting->size; 1504 1505 dout("%s picked osd%d, primary osd%d\n", __func__, 1506 acting->osds[i], acting->primary); 1507 return i; 1508 } 1509 1510 /* 1511 * Picks the closest replica based on client's location given by 1512 * crush_location option. Prefers the primary if the locality is 1513 * the same. 1514 */ 1515 static int pick_closest_replica(struct ceph_osd_client *osdc, 1516 const struct ceph_osds *acting) 1517 { 1518 struct ceph_options *opt = osdc->client->options; 1519 int best_i, best_locality; 1520 int i = 0, locality; 1521 1522 do { 1523 locality = ceph_get_crush_locality(osdc->osdmap, 1524 acting->osds[i], 1525 &opt->crush_locs); 1526 if (i == 0 || 1527 (locality >= 0 && best_locality < 0) || 1528 (locality >= 0 && best_locality >= 0 && 1529 locality < best_locality)) { 1530 best_i = i; 1531 best_locality = locality; 1532 } 1533 } while (++i < acting->size); 1534 1535 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, 1536 acting->osds[best_i], best_locality, acting->primary); 1537 return best_i; 1538 } 1539 1540 enum calc_target_result { 1541 CALC_TARGET_NO_ACTION = 0, 1542 CALC_TARGET_NEED_RESEND, 1543 CALC_TARGET_POOL_DNE, 1544 }; 1545 1546 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1547 struct ceph_osd_request_target *t, 1548 bool any_change) 1549 { 1550 struct ceph_pg_pool_info *pi; 1551 struct ceph_pg pgid, last_pgid; 1552 struct ceph_osds up, acting; 1553 bool is_read = t->flags & CEPH_OSD_FLAG_READ; 1554 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; 1555 bool force_resend = false; 1556 bool unpaused = false; 1557 bool legacy_change = false; 1558 bool split = false; 1559 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1560 bool recovery_deletes = ceph_osdmap_flag(osdc, 1561 CEPH_OSDMAP_RECOVERY_DELETES); 1562 enum calc_target_result ct_res; 1563 1564 t->epoch = osdc->osdmap->epoch; 1565 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1566 if (!pi) { 1567 t->osd = CEPH_HOMELESS_OSD; 1568 ct_res = CALC_TARGET_POOL_DNE; 1569 goto out; 1570 } 1571 1572 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1573 if (t->last_force_resend < pi->last_force_request_resend) { 1574 t->last_force_resend = pi->last_force_request_resend; 1575 force_resend = true; 1576 } else if (t->last_force_resend == 0) { 1577 force_resend = true; 1578 } 1579 } 1580 1581 /* apply tiering */ 1582 ceph_oid_copy(&t->target_oid, &t->base_oid); 1583 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1584 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1585 if (is_read && pi->read_tier >= 0) 1586 t->target_oloc.pool = pi->read_tier; 1587 if (is_write && pi->write_tier >= 0) 1588 t->target_oloc.pool = pi->write_tier; 1589 1590 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1591 if (!pi) { 1592 t->osd = CEPH_HOMELESS_OSD; 1593 ct_res = CALC_TARGET_POOL_DNE; 1594 goto out; 1595 } 1596 } 1597 1598 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1599 last_pgid.pool = pgid.pool; 1600 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1601 1602 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1603 if (any_change && 1604 ceph_is_new_interval(&t->acting, 1605 &acting, 1606 &t->up, 1607 &up, 1608 t->size, 1609 pi->size, 1610 t->min_size, 1611 pi->min_size, 1612 t->pg_num, 1613 pi->pg_num, 1614 t->sort_bitwise, 1615 sort_bitwise, 1616 t->recovery_deletes, 1617 recovery_deletes, 1618 &last_pgid)) 1619 force_resend = true; 1620 1621 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1622 t->paused = false; 1623 unpaused = true; 1624 } 1625 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1626 ceph_osds_changed(&t->acting, &acting, 1627 t->used_replica || any_change); 1628 if (t->pg_num) 1629 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1630 1631 if (legacy_change || force_resend || split) { 1632 t->pgid = pgid; /* struct */ 1633 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1634 ceph_osds_copy(&t->acting, &acting); 1635 ceph_osds_copy(&t->up, &up); 1636 t->size = pi->size; 1637 t->min_size = pi->min_size; 1638 t->pg_num = pi->pg_num; 1639 t->pg_num_mask = pi->pg_num_mask; 1640 t->sort_bitwise = sort_bitwise; 1641 t->recovery_deletes = recovery_deletes; 1642 1643 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS | 1644 CEPH_OSD_FLAG_LOCALIZE_READS)) && 1645 !is_write && pi->type == CEPH_POOL_TYPE_REP && 1646 acting.size > 1) { 1647 int pos; 1648 1649 WARN_ON(!is_read || acting.osds[0] != acting.primary); 1650 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) { 1651 pos = pick_random_replica(&acting); 1652 } else { 1653 pos = pick_closest_replica(osdc, &acting); 1654 } 1655 t->osd = acting.osds[pos]; 1656 t->used_replica = pos > 0; 1657 } else { 1658 t->osd = acting.primary; 1659 t->used_replica = false; 1660 } 1661 } 1662 1663 if (unpaused || legacy_change || force_resend || split) 1664 ct_res = CALC_TARGET_NEED_RESEND; 1665 else 1666 ct_res = CALC_TARGET_NO_ACTION; 1667 1668 out: 1669 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1670 legacy_change, force_resend, split, ct_res, t->osd); 1671 return ct_res; 1672 } 1673 1674 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1675 { 1676 struct ceph_spg_mapping *spg; 1677 1678 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1679 if (!spg) 1680 return NULL; 1681 1682 RB_CLEAR_NODE(&spg->node); 1683 spg->backoffs = RB_ROOT; 1684 return spg; 1685 } 1686 1687 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1688 { 1689 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1690 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1691 1692 kfree(spg); 1693 } 1694 1695 /* 1696 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1697 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1698 * defined only within a specific spgid; it does not pass anything to 1699 * children on split, or to another primary. 1700 */ 1701 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1702 RB_BYPTR, const struct ceph_spg *, node) 1703 1704 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1705 { 1706 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1707 } 1708 1709 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1710 void **pkey, size_t *pkey_len) 1711 { 1712 if (hoid->key_len) { 1713 *pkey = hoid->key; 1714 *pkey_len = hoid->key_len; 1715 } else { 1716 *pkey = hoid->oid; 1717 *pkey_len = hoid->oid_len; 1718 } 1719 } 1720 1721 static int compare_names(const void *name1, size_t name1_len, 1722 const void *name2, size_t name2_len) 1723 { 1724 int ret; 1725 1726 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1727 if (!ret) { 1728 if (name1_len < name2_len) 1729 ret = -1; 1730 else if (name1_len > name2_len) 1731 ret = 1; 1732 } 1733 return ret; 1734 } 1735 1736 static int hoid_compare(const struct ceph_hobject_id *lhs, 1737 const struct ceph_hobject_id *rhs) 1738 { 1739 void *effective_key1, *effective_key2; 1740 size_t effective_key1_len, effective_key2_len; 1741 int ret; 1742 1743 if (lhs->is_max < rhs->is_max) 1744 return -1; 1745 if (lhs->is_max > rhs->is_max) 1746 return 1; 1747 1748 if (lhs->pool < rhs->pool) 1749 return -1; 1750 if (lhs->pool > rhs->pool) 1751 return 1; 1752 1753 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1754 return -1; 1755 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1756 return 1; 1757 1758 ret = compare_names(lhs->nspace, lhs->nspace_len, 1759 rhs->nspace, rhs->nspace_len); 1760 if (ret) 1761 return ret; 1762 1763 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1764 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1765 ret = compare_names(effective_key1, effective_key1_len, 1766 effective_key2, effective_key2_len); 1767 if (ret) 1768 return ret; 1769 1770 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1771 if (ret) 1772 return ret; 1773 1774 if (lhs->snapid < rhs->snapid) 1775 return -1; 1776 if (lhs->snapid > rhs->snapid) 1777 return 1; 1778 1779 return 0; 1780 } 1781 1782 /* 1783 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1784 * compat stuff here. 1785 * 1786 * Assumes @hoid is zero-initialized. 1787 */ 1788 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1789 { 1790 u8 struct_v; 1791 u32 struct_len; 1792 int ret; 1793 1794 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1795 &struct_len); 1796 if (ret) 1797 return ret; 1798 1799 if (struct_v < 4) { 1800 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1801 goto e_inval; 1802 } 1803 1804 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1805 GFP_NOIO); 1806 if (IS_ERR(hoid->key)) { 1807 ret = PTR_ERR(hoid->key); 1808 hoid->key = NULL; 1809 return ret; 1810 } 1811 1812 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1813 GFP_NOIO); 1814 if (IS_ERR(hoid->oid)) { 1815 ret = PTR_ERR(hoid->oid); 1816 hoid->oid = NULL; 1817 return ret; 1818 } 1819 1820 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1821 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1822 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1823 1824 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1825 GFP_NOIO); 1826 if (IS_ERR(hoid->nspace)) { 1827 ret = PTR_ERR(hoid->nspace); 1828 hoid->nspace = NULL; 1829 return ret; 1830 } 1831 1832 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1833 1834 ceph_hoid_build_hash_cache(hoid); 1835 return 0; 1836 1837 e_inval: 1838 return -EINVAL; 1839 } 1840 1841 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1842 { 1843 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1844 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1845 } 1846 1847 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1848 { 1849 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1850 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1851 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1852 ceph_encode_64(p, hoid->snapid); 1853 ceph_encode_32(p, hoid->hash); 1854 ceph_encode_8(p, hoid->is_max); 1855 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1856 ceph_encode_64(p, hoid->pool); 1857 } 1858 1859 static void free_hoid(struct ceph_hobject_id *hoid) 1860 { 1861 if (hoid) { 1862 kfree(hoid->key); 1863 kfree(hoid->oid); 1864 kfree(hoid->nspace); 1865 kfree(hoid); 1866 } 1867 } 1868 1869 static struct ceph_osd_backoff *alloc_backoff(void) 1870 { 1871 struct ceph_osd_backoff *backoff; 1872 1873 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1874 if (!backoff) 1875 return NULL; 1876 1877 RB_CLEAR_NODE(&backoff->spg_node); 1878 RB_CLEAR_NODE(&backoff->id_node); 1879 return backoff; 1880 } 1881 1882 static void free_backoff(struct ceph_osd_backoff *backoff) 1883 { 1884 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1885 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1886 1887 free_hoid(backoff->begin); 1888 free_hoid(backoff->end); 1889 kfree(backoff); 1890 } 1891 1892 /* 1893 * Within a specific spgid, backoffs are managed by ->begin hoid. 1894 */ 1895 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1896 RB_BYVAL, spg_node); 1897 1898 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1899 const struct ceph_hobject_id *hoid) 1900 { 1901 struct rb_node *n = root->rb_node; 1902 1903 while (n) { 1904 struct ceph_osd_backoff *cur = 1905 rb_entry(n, struct ceph_osd_backoff, spg_node); 1906 int cmp; 1907 1908 cmp = hoid_compare(hoid, cur->begin); 1909 if (cmp < 0) { 1910 n = n->rb_left; 1911 } else if (cmp > 0) { 1912 if (hoid_compare(hoid, cur->end) < 0) 1913 return cur; 1914 1915 n = n->rb_right; 1916 } else { 1917 return cur; 1918 } 1919 } 1920 1921 return NULL; 1922 } 1923 1924 /* 1925 * Each backoff has a unique id within its OSD session. 1926 */ 1927 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1928 1929 static void clear_backoffs(struct ceph_osd *osd) 1930 { 1931 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1932 struct ceph_spg_mapping *spg = 1933 rb_entry(rb_first(&osd->o_backoff_mappings), 1934 struct ceph_spg_mapping, node); 1935 1936 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1937 struct ceph_osd_backoff *backoff = 1938 rb_entry(rb_first(&spg->backoffs), 1939 struct ceph_osd_backoff, spg_node); 1940 1941 erase_backoff(&spg->backoffs, backoff); 1942 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1943 free_backoff(backoff); 1944 } 1945 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1946 free_spg_mapping(spg); 1947 } 1948 } 1949 1950 /* 1951 * Set up a temporary, non-owning view into @t. 1952 */ 1953 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1954 const struct ceph_osd_request_target *t) 1955 { 1956 hoid->key = NULL; 1957 hoid->key_len = 0; 1958 hoid->oid = t->target_oid.name; 1959 hoid->oid_len = t->target_oid.name_len; 1960 hoid->snapid = CEPH_NOSNAP; 1961 hoid->hash = t->pgid.seed; 1962 hoid->is_max = false; 1963 if (t->target_oloc.pool_ns) { 1964 hoid->nspace = t->target_oloc.pool_ns->str; 1965 hoid->nspace_len = t->target_oloc.pool_ns->len; 1966 } else { 1967 hoid->nspace = NULL; 1968 hoid->nspace_len = 0; 1969 } 1970 hoid->pool = t->target_oloc.pool; 1971 ceph_hoid_build_hash_cache(hoid); 1972 } 1973 1974 static bool should_plug_request(struct ceph_osd_request *req) 1975 { 1976 struct ceph_osd *osd = req->r_osd; 1977 struct ceph_spg_mapping *spg; 1978 struct ceph_osd_backoff *backoff; 1979 struct ceph_hobject_id hoid; 1980 1981 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 1982 if (!spg) 1983 return false; 1984 1985 hoid_fill_from_target(&hoid, &req->r_t); 1986 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 1987 if (!backoff) 1988 return false; 1989 1990 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 1991 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 1992 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 1993 return true; 1994 } 1995 1996 /* 1997 * Keep get_num_data_items() in sync with this function. 1998 */ 1999 static void setup_request_data(struct ceph_osd_request *req) 2000 { 2001 struct ceph_msg *request_msg = req->r_request; 2002 struct ceph_msg *reply_msg = req->r_reply; 2003 struct ceph_osd_req_op *op; 2004 2005 if (req->r_request->num_data_items || req->r_reply->num_data_items) 2006 return; 2007 2008 WARN_ON(request_msg->data_length || reply_msg->data_length); 2009 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 2010 switch (op->op) { 2011 /* request */ 2012 case CEPH_OSD_OP_WRITE: 2013 case CEPH_OSD_OP_WRITEFULL: 2014 WARN_ON(op->indata_len != op->extent.length); 2015 ceph_osdc_msg_data_add(request_msg, 2016 &op->extent.osd_data); 2017 break; 2018 case CEPH_OSD_OP_SETXATTR: 2019 case CEPH_OSD_OP_CMPXATTR: 2020 WARN_ON(op->indata_len != op->xattr.name_len + 2021 op->xattr.value_len); 2022 ceph_osdc_msg_data_add(request_msg, 2023 &op->xattr.osd_data); 2024 break; 2025 case CEPH_OSD_OP_NOTIFY_ACK: 2026 ceph_osdc_msg_data_add(request_msg, 2027 &op->notify_ack.request_data); 2028 break; 2029 case CEPH_OSD_OP_COPY_FROM2: 2030 ceph_osdc_msg_data_add(request_msg, 2031 &op->copy_from.osd_data); 2032 break; 2033 2034 /* reply */ 2035 case CEPH_OSD_OP_STAT: 2036 ceph_osdc_msg_data_add(reply_msg, 2037 &op->raw_data_in); 2038 break; 2039 case CEPH_OSD_OP_READ: 2040 ceph_osdc_msg_data_add(reply_msg, 2041 &op->extent.osd_data); 2042 break; 2043 case CEPH_OSD_OP_LIST_WATCHERS: 2044 ceph_osdc_msg_data_add(reply_msg, 2045 &op->list_watchers.response_data); 2046 break; 2047 2048 /* both */ 2049 case CEPH_OSD_OP_CALL: 2050 WARN_ON(op->indata_len != op->cls.class_len + 2051 op->cls.method_len + 2052 op->cls.indata_len); 2053 ceph_osdc_msg_data_add(request_msg, 2054 &op->cls.request_info); 2055 /* optional, can be NONE */ 2056 ceph_osdc_msg_data_add(request_msg, 2057 &op->cls.request_data); 2058 /* optional, can be NONE */ 2059 ceph_osdc_msg_data_add(reply_msg, 2060 &op->cls.response_data); 2061 break; 2062 case CEPH_OSD_OP_NOTIFY: 2063 ceph_osdc_msg_data_add(request_msg, 2064 &op->notify.request_data); 2065 ceph_osdc_msg_data_add(reply_msg, 2066 &op->notify.response_data); 2067 break; 2068 } 2069 } 2070 } 2071 2072 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2073 { 2074 ceph_encode_8(p, 1); 2075 ceph_encode_64(p, pgid->pool); 2076 ceph_encode_32(p, pgid->seed); 2077 ceph_encode_32(p, -1); /* preferred */ 2078 } 2079 2080 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2081 { 2082 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2083 encode_pgid(p, &spgid->pgid); 2084 ceph_encode_8(p, spgid->shard); 2085 } 2086 2087 static void encode_oloc(void **p, void *end, 2088 const struct ceph_object_locator *oloc) 2089 { 2090 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2091 ceph_encode_64(p, oloc->pool); 2092 ceph_encode_32(p, -1); /* preferred */ 2093 ceph_encode_32(p, 0); /* key len */ 2094 if (oloc->pool_ns) 2095 ceph_encode_string(p, end, oloc->pool_ns->str, 2096 oloc->pool_ns->len); 2097 else 2098 ceph_encode_32(p, 0); 2099 } 2100 2101 static void encode_request_partial(struct ceph_osd_request *req, 2102 struct ceph_msg *msg) 2103 { 2104 void *p = msg->front.iov_base; 2105 void *const end = p + msg->front_alloc_len; 2106 u32 data_len = 0; 2107 int i; 2108 2109 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2110 /* snapshots aren't writeable */ 2111 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2112 } else { 2113 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2114 req->r_data_offset || req->r_snapc); 2115 } 2116 2117 setup_request_data(req); 2118 2119 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2120 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2121 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2122 ceph_encode_32(&p, req->r_flags); 2123 2124 /* reqid */ 2125 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2126 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2127 p += sizeof(struct ceph_osd_reqid); 2128 2129 /* trace */ 2130 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2131 p += sizeof(struct ceph_blkin_trace_info); 2132 2133 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2134 ceph_encode_timespec64(p, &req->r_mtime); 2135 p += sizeof(struct ceph_timespec); 2136 2137 encode_oloc(&p, end, &req->r_t.target_oloc); 2138 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2139 req->r_t.target_oid.name_len); 2140 2141 /* ops, can imply data */ 2142 ceph_encode_16(&p, req->r_num_ops); 2143 for (i = 0; i < req->r_num_ops; i++) { 2144 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2145 p += sizeof(struct ceph_osd_op); 2146 } 2147 2148 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2149 if (req->r_snapc) { 2150 ceph_encode_64(&p, req->r_snapc->seq); 2151 ceph_encode_32(&p, req->r_snapc->num_snaps); 2152 for (i = 0; i < req->r_snapc->num_snaps; i++) 2153 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2154 } else { 2155 ceph_encode_64(&p, 0); /* snap_seq */ 2156 ceph_encode_32(&p, 0); /* snaps len */ 2157 } 2158 2159 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2160 BUG_ON(p > end - 8); /* space for features */ 2161 2162 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2163 /* front_len is finalized in encode_request_finish() */ 2164 msg->front.iov_len = p - msg->front.iov_base; 2165 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2166 msg->hdr.data_len = cpu_to_le32(data_len); 2167 /* 2168 * The header "data_off" is a hint to the receiver allowing it 2169 * to align received data into its buffers such that there's no 2170 * need to re-copy it before writing it to disk (direct I/O). 2171 */ 2172 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2173 2174 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2175 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2176 } 2177 2178 static void encode_request_finish(struct ceph_msg *msg) 2179 { 2180 void *p = msg->front.iov_base; 2181 void *const partial_end = p + msg->front.iov_len; 2182 void *const end = p + msg->front_alloc_len; 2183 2184 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2185 /* luminous OSD -- encode features and be done */ 2186 p = partial_end; 2187 ceph_encode_64(&p, msg->con->peer_features); 2188 } else { 2189 struct { 2190 char spgid[CEPH_ENCODING_START_BLK_LEN + 2191 CEPH_PGID_ENCODING_LEN + 1]; 2192 __le32 hash; 2193 __le32 epoch; 2194 __le32 flags; 2195 char reqid[CEPH_ENCODING_START_BLK_LEN + 2196 sizeof(struct ceph_osd_reqid)]; 2197 char trace[sizeof(struct ceph_blkin_trace_info)]; 2198 __le32 client_inc; 2199 struct ceph_timespec mtime; 2200 } __packed head; 2201 struct ceph_pg pgid; 2202 void *oloc, *oid, *tail; 2203 int oloc_len, oid_len, tail_len; 2204 int len; 2205 2206 /* 2207 * Pre-luminous OSD -- reencode v8 into v4 using @head 2208 * as a temporary buffer. Encode the raw PG; the rest 2209 * is just a matter of moving oloc, oid and tail blobs 2210 * around. 2211 */ 2212 memcpy(&head, p, sizeof(head)); 2213 p += sizeof(head); 2214 2215 oloc = p; 2216 p += CEPH_ENCODING_START_BLK_LEN; 2217 pgid.pool = ceph_decode_64(&p); 2218 p += 4 + 4; /* preferred, key len */ 2219 len = ceph_decode_32(&p); 2220 p += len; /* nspace */ 2221 oloc_len = p - oloc; 2222 2223 oid = p; 2224 len = ceph_decode_32(&p); 2225 p += len; 2226 oid_len = p - oid; 2227 2228 tail = p; 2229 tail_len = partial_end - p; 2230 2231 p = msg->front.iov_base; 2232 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2233 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2234 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2235 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2236 2237 /* reassert_version */ 2238 memset(p, 0, sizeof(struct ceph_eversion)); 2239 p += sizeof(struct ceph_eversion); 2240 2241 BUG_ON(p >= oloc); 2242 memmove(p, oloc, oloc_len); 2243 p += oloc_len; 2244 2245 pgid.seed = le32_to_cpu(head.hash); 2246 encode_pgid(&p, &pgid); /* raw pg */ 2247 2248 BUG_ON(p >= oid); 2249 memmove(p, oid, oid_len); 2250 p += oid_len; 2251 2252 /* tail -- ops, snapid, snapc, retry_attempt */ 2253 BUG_ON(p >= tail); 2254 memmove(p, tail, tail_len); 2255 p += tail_len; 2256 2257 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2258 } 2259 2260 BUG_ON(p > end); 2261 msg->front.iov_len = p - msg->front.iov_base; 2262 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2263 2264 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2265 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2266 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2267 le16_to_cpu(msg->hdr.version)); 2268 } 2269 2270 /* 2271 * @req has to be assigned a tid and registered. 2272 */ 2273 static void send_request(struct ceph_osd_request *req) 2274 { 2275 struct ceph_osd *osd = req->r_osd; 2276 2277 verify_osd_locked(osd); 2278 WARN_ON(osd->o_osd != req->r_t.osd); 2279 2280 /* backoff? */ 2281 if (should_plug_request(req)) 2282 return; 2283 2284 /* 2285 * We may have a previously queued request message hanging 2286 * around. Cancel it to avoid corrupting the msgr. 2287 */ 2288 if (req->r_sent) 2289 ceph_msg_revoke(req->r_request); 2290 2291 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2292 if (req->r_attempts) 2293 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2294 else 2295 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2296 2297 encode_request_partial(req, req->r_request); 2298 2299 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2300 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2301 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2302 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2303 req->r_attempts); 2304 2305 req->r_t.paused = false; 2306 req->r_stamp = jiffies; 2307 req->r_attempts++; 2308 2309 req->r_sent = osd->o_incarnation; 2310 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2311 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2312 } 2313 2314 static void maybe_request_map(struct ceph_osd_client *osdc) 2315 { 2316 bool continuous = false; 2317 2318 verify_osdc_locked(osdc); 2319 WARN_ON(!osdc->osdmap->epoch); 2320 2321 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2322 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2323 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2324 dout("%s osdc %p continuous\n", __func__, osdc); 2325 continuous = true; 2326 } else { 2327 dout("%s osdc %p onetime\n", __func__, osdc); 2328 } 2329 2330 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2331 osdc->osdmap->epoch + 1, continuous)) 2332 ceph_monc_renew_subs(&osdc->client->monc); 2333 } 2334 2335 static void complete_request(struct ceph_osd_request *req, int err); 2336 static void send_map_check(struct ceph_osd_request *req); 2337 2338 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2339 { 2340 struct ceph_osd_client *osdc = req->r_osdc; 2341 struct ceph_osd *osd; 2342 enum calc_target_result ct_res; 2343 int err = 0; 2344 bool need_send = false; 2345 bool promoted = false; 2346 2347 WARN_ON(req->r_tid); 2348 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2349 2350 again: 2351 ct_res = calc_target(osdc, &req->r_t, false); 2352 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2353 goto promote; 2354 2355 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2356 if (IS_ERR(osd)) { 2357 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2358 goto promote; 2359 } 2360 2361 if (osdc->abort_err) { 2362 dout("req %p abort_err %d\n", req, osdc->abort_err); 2363 err = osdc->abort_err; 2364 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2365 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2366 osdc->epoch_barrier); 2367 req->r_t.paused = true; 2368 maybe_request_map(osdc); 2369 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2370 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2371 dout("req %p pausewr\n", req); 2372 req->r_t.paused = true; 2373 maybe_request_map(osdc); 2374 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2375 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2376 dout("req %p pauserd\n", req); 2377 req->r_t.paused = true; 2378 maybe_request_map(osdc); 2379 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2380 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2381 CEPH_OSD_FLAG_FULL_FORCE)) && 2382 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2383 pool_full(osdc, req->r_t.base_oloc.pool))) { 2384 dout("req %p full/pool_full\n", req); 2385 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2386 err = -ENOSPC; 2387 } else { 2388 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) 2389 pr_warn_ratelimited("cluster is full (osdmap FULL)\n"); 2390 else 2391 pr_warn_ratelimited("pool %lld is full or reached quota\n", 2392 req->r_t.base_oloc.pool); 2393 req->r_t.paused = true; 2394 maybe_request_map(osdc); 2395 } 2396 } else if (!osd_homeless(osd)) { 2397 need_send = true; 2398 } else { 2399 maybe_request_map(osdc); 2400 } 2401 2402 mutex_lock(&osd->lock); 2403 /* 2404 * Assign the tid atomically with send_request() to protect 2405 * multiple writes to the same object from racing with each 2406 * other, resulting in out of order ops on the OSDs. 2407 */ 2408 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2409 link_request(osd, req); 2410 if (need_send) 2411 send_request(req); 2412 else if (err) 2413 complete_request(req, err); 2414 mutex_unlock(&osd->lock); 2415 2416 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2417 send_map_check(req); 2418 2419 if (promoted) 2420 downgrade_write(&osdc->lock); 2421 return; 2422 2423 promote: 2424 up_read(&osdc->lock); 2425 down_write(&osdc->lock); 2426 wrlocked = true; 2427 promoted = true; 2428 goto again; 2429 } 2430 2431 static void account_request(struct ceph_osd_request *req) 2432 { 2433 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2434 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2435 2436 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2437 atomic_inc(&req->r_osdc->num_requests); 2438 2439 req->r_start_stamp = jiffies; 2440 req->r_start_latency = ktime_get(); 2441 } 2442 2443 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2444 { 2445 ceph_osdc_get_request(req); 2446 account_request(req); 2447 __submit_request(req, wrlocked); 2448 } 2449 2450 static void finish_request(struct ceph_osd_request *req) 2451 { 2452 struct ceph_osd_client *osdc = req->r_osdc; 2453 2454 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2455 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2456 2457 req->r_end_latency = ktime_get(); 2458 2459 if (req->r_osd) 2460 unlink_request(req->r_osd, req); 2461 atomic_dec(&osdc->num_requests); 2462 2463 /* 2464 * If an OSD has failed or returned and a request has been sent 2465 * twice, it's possible to get a reply and end up here while the 2466 * request message is queued for delivery. We will ignore the 2467 * reply, so not a big deal, but better to try and catch it. 2468 */ 2469 ceph_msg_revoke(req->r_request); 2470 ceph_msg_revoke_incoming(req->r_reply); 2471 } 2472 2473 static void __complete_request(struct ceph_osd_request *req) 2474 { 2475 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2476 req->r_tid, req->r_callback, req->r_result); 2477 2478 if (req->r_callback) 2479 req->r_callback(req); 2480 complete_all(&req->r_completion); 2481 ceph_osdc_put_request(req); 2482 } 2483 2484 static void complete_request_workfn(struct work_struct *work) 2485 { 2486 struct ceph_osd_request *req = 2487 container_of(work, struct ceph_osd_request, r_complete_work); 2488 2489 __complete_request(req); 2490 } 2491 2492 /* 2493 * This is open-coded in handle_reply(). 2494 */ 2495 static void complete_request(struct ceph_osd_request *req, int err) 2496 { 2497 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2498 2499 req->r_result = err; 2500 finish_request(req); 2501 2502 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2503 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2504 } 2505 2506 static void cancel_map_check(struct ceph_osd_request *req) 2507 { 2508 struct ceph_osd_client *osdc = req->r_osdc; 2509 struct ceph_osd_request *lookup_req; 2510 2511 verify_osdc_wrlocked(osdc); 2512 2513 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2514 if (!lookup_req) 2515 return; 2516 2517 WARN_ON(lookup_req != req); 2518 erase_request_mc(&osdc->map_checks, req); 2519 ceph_osdc_put_request(req); 2520 } 2521 2522 static void cancel_request(struct ceph_osd_request *req) 2523 { 2524 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2525 2526 cancel_map_check(req); 2527 finish_request(req); 2528 complete_all(&req->r_completion); 2529 ceph_osdc_put_request(req); 2530 } 2531 2532 static void abort_request(struct ceph_osd_request *req, int err) 2533 { 2534 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2535 2536 cancel_map_check(req); 2537 complete_request(req, err); 2538 } 2539 2540 static int abort_fn(struct ceph_osd_request *req, void *arg) 2541 { 2542 int err = *(int *)arg; 2543 2544 abort_request(req, err); 2545 return 0; /* continue iteration */ 2546 } 2547 2548 /* 2549 * Abort all in-flight requests with @err and arrange for all future 2550 * requests to be failed immediately. 2551 */ 2552 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2553 { 2554 dout("%s osdc %p err %d\n", __func__, osdc, err); 2555 down_write(&osdc->lock); 2556 for_each_request(osdc, abort_fn, &err); 2557 osdc->abort_err = err; 2558 up_write(&osdc->lock); 2559 } 2560 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2561 2562 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2563 { 2564 down_write(&osdc->lock); 2565 osdc->abort_err = 0; 2566 up_write(&osdc->lock); 2567 } 2568 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2569 2570 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2571 { 2572 if (likely(eb > osdc->epoch_barrier)) { 2573 dout("updating epoch_barrier from %u to %u\n", 2574 osdc->epoch_barrier, eb); 2575 osdc->epoch_barrier = eb; 2576 /* Request map if we're not to the barrier yet */ 2577 if (eb > osdc->osdmap->epoch) 2578 maybe_request_map(osdc); 2579 } 2580 } 2581 2582 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2583 { 2584 down_read(&osdc->lock); 2585 if (unlikely(eb > osdc->epoch_barrier)) { 2586 up_read(&osdc->lock); 2587 down_write(&osdc->lock); 2588 update_epoch_barrier(osdc, eb); 2589 up_write(&osdc->lock); 2590 } else { 2591 up_read(&osdc->lock); 2592 } 2593 } 2594 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2595 2596 /* 2597 * We can end up releasing caps as a result of abort_request(). 2598 * In that case, we probably want to ensure that the cap release message 2599 * has an updated epoch barrier in it, so set the epoch barrier prior to 2600 * aborting the first request. 2601 */ 2602 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2603 { 2604 struct ceph_osd_client *osdc = req->r_osdc; 2605 bool *victims = arg; 2606 2607 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2608 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2609 pool_full(osdc, req->r_t.base_oloc.pool))) { 2610 if (!*victims) { 2611 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2612 *victims = true; 2613 } 2614 abort_request(req, -ENOSPC); 2615 } 2616 2617 return 0; /* continue iteration */ 2618 } 2619 2620 /* 2621 * Drop all pending requests that are stalled waiting on a full condition to 2622 * clear, and complete them with ENOSPC as the return code. Set the 2623 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2624 * cancelled. 2625 */ 2626 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2627 { 2628 bool victims = false; 2629 2630 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2631 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2632 for_each_request(osdc, abort_on_full_fn, &victims); 2633 } 2634 2635 static void check_pool_dne(struct ceph_osd_request *req) 2636 { 2637 struct ceph_osd_client *osdc = req->r_osdc; 2638 struct ceph_osdmap *map = osdc->osdmap; 2639 2640 verify_osdc_wrlocked(osdc); 2641 WARN_ON(!map->epoch); 2642 2643 if (req->r_attempts) { 2644 /* 2645 * We sent a request earlier, which means that 2646 * previously the pool existed, and now it does not 2647 * (i.e., it was deleted). 2648 */ 2649 req->r_map_dne_bound = map->epoch; 2650 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2651 req->r_tid); 2652 } else { 2653 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2654 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2655 } 2656 2657 if (req->r_map_dne_bound) { 2658 if (map->epoch >= req->r_map_dne_bound) { 2659 /* we had a new enough map */ 2660 pr_info_ratelimited("tid %llu pool does not exist\n", 2661 req->r_tid); 2662 complete_request(req, -ENOENT); 2663 } 2664 } else { 2665 send_map_check(req); 2666 } 2667 } 2668 2669 static void map_check_cb(struct ceph_mon_generic_request *greq) 2670 { 2671 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2672 struct ceph_osd_request *req; 2673 u64 tid = greq->private_data; 2674 2675 WARN_ON(greq->result || !greq->u.newest); 2676 2677 down_write(&osdc->lock); 2678 req = lookup_request_mc(&osdc->map_checks, tid); 2679 if (!req) { 2680 dout("%s tid %llu dne\n", __func__, tid); 2681 goto out_unlock; 2682 } 2683 2684 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2685 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2686 if (!req->r_map_dne_bound) 2687 req->r_map_dne_bound = greq->u.newest; 2688 erase_request_mc(&osdc->map_checks, req); 2689 check_pool_dne(req); 2690 2691 ceph_osdc_put_request(req); 2692 out_unlock: 2693 up_write(&osdc->lock); 2694 } 2695 2696 static void send_map_check(struct ceph_osd_request *req) 2697 { 2698 struct ceph_osd_client *osdc = req->r_osdc; 2699 struct ceph_osd_request *lookup_req; 2700 int ret; 2701 2702 verify_osdc_wrlocked(osdc); 2703 2704 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2705 if (lookup_req) { 2706 WARN_ON(lookup_req != req); 2707 return; 2708 } 2709 2710 ceph_osdc_get_request(req); 2711 insert_request_mc(&osdc->map_checks, req); 2712 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2713 map_check_cb, req->r_tid); 2714 WARN_ON(ret); 2715 } 2716 2717 /* 2718 * lingering requests, watch/notify v2 infrastructure 2719 */ 2720 static void linger_release(struct kref *kref) 2721 { 2722 struct ceph_osd_linger_request *lreq = 2723 container_of(kref, struct ceph_osd_linger_request, kref); 2724 2725 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2726 lreq->reg_req, lreq->ping_req); 2727 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2728 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2729 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2730 WARN_ON(!list_empty(&lreq->scan_item)); 2731 WARN_ON(!list_empty(&lreq->pending_lworks)); 2732 WARN_ON(lreq->osd); 2733 2734 if (lreq->reg_req) 2735 ceph_osdc_put_request(lreq->reg_req); 2736 if (lreq->ping_req) 2737 ceph_osdc_put_request(lreq->ping_req); 2738 target_destroy(&lreq->t); 2739 kfree(lreq); 2740 } 2741 2742 static void linger_put(struct ceph_osd_linger_request *lreq) 2743 { 2744 if (lreq) 2745 kref_put(&lreq->kref, linger_release); 2746 } 2747 2748 static struct ceph_osd_linger_request * 2749 linger_get(struct ceph_osd_linger_request *lreq) 2750 { 2751 kref_get(&lreq->kref); 2752 return lreq; 2753 } 2754 2755 static struct ceph_osd_linger_request * 2756 linger_alloc(struct ceph_osd_client *osdc) 2757 { 2758 struct ceph_osd_linger_request *lreq; 2759 2760 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2761 if (!lreq) 2762 return NULL; 2763 2764 kref_init(&lreq->kref); 2765 mutex_init(&lreq->lock); 2766 RB_CLEAR_NODE(&lreq->node); 2767 RB_CLEAR_NODE(&lreq->osdc_node); 2768 RB_CLEAR_NODE(&lreq->mc_node); 2769 INIT_LIST_HEAD(&lreq->scan_item); 2770 INIT_LIST_HEAD(&lreq->pending_lworks); 2771 init_completion(&lreq->reg_commit_wait); 2772 init_completion(&lreq->notify_finish_wait); 2773 2774 lreq->osdc = osdc; 2775 target_init(&lreq->t); 2776 2777 dout("%s lreq %p\n", __func__, lreq); 2778 return lreq; 2779 } 2780 2781 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2782 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2783 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2784 2785 /* 2786 * Create linger request <-> OSD session relation. 2787 * 2788 * @lreq has to be registered, @osd may be homeless. 2789 */ 2790 static void link_linger(struct ceph_osd *osd, 2791 struct ceph_osd_linger_request *lreq) 2792 { 2793 verify_osd_locked(osd); 2794 WARN_ON(!lreq->linger_id || lreq->osd); 2795 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2796 osd->o_osd, lreq, lreq->linger_id); 2797 2798 if (!osd_homeless(osd)) 2799 __remove_osd_from_lru(osd); 2800 else 2801 atomic_inc(&osd->o_osdc->num_homeless); 2802 2803 get_osd(osd); 2804 insert_linger(&osd->o_linger_requests, lreq); 2805 lreq->osd = osd; 2806 } 2807 2808 static void unlink_linger(struct ceph_osd *osd, 2809 struct ceph_osd_linger_request *lreq) 2810 { 2811 verify_osd_locked(osd); 2812 WARN_ON(lreq->osd != osd); 2813 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2814 osd->o_osd, lreq, lreq->linger_id); 2815 2816 lreq->osd = NULL; 2817 erase_linger(&osd->o_linger_requests, lreq); 2818 put_osd(osd); 2819 2820 if (!osd_homeless(osd)) 2821 maybe_move_osd_to_lru(osd); 2822 else 2823 atomic_dec(&osd->o_osdc->num_homeless); 2824 } 2825 2826 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2827 { 2828 verify_osdc_locked(lreq->osdc); 2829 2830 return !RB_EMPTY_NODE(&lreq->osdc_node); 2831 } 2832 2833 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2834 { 2835 struct ceph_osd_client *osdc = lreq->osdc; 2836 bool registered; 2837 2838 down_read(&osdc->lock); 2839 registered = __linger_registered(lreq); 2840 up_read(&osdc->lock); 2841 2842 return registered; 2843 } 2844 2845 static void linger_register(struct ceph_osd_linger_request *lreq) 2846 { 2847 struct ceph_osd_client *osdc = lreq->osdc; 2848 2849 verify_osdc_wrlocked(osdc); 2850 WARN_ON(lreq->linger_id); 2851 2852 linger_get(lreq); 2853 lreq->linger_id = ++osdc->last_linger_id; 2854 insert_linger_osdc(&osdc->linger_requests, lreq); 2855 } 2856 2857 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2858 { 2859 struct ceph_osd_client *osdc = lreq->osdc; 2860 2861 verify_osdc_wrlocked(osdc); 2862 2863 erase_linger_osdc(&osdc->linger_requests, lreq); 2864 linger_put(lreq); 2865 } 2866 2867 static void cancel_linger_request(struct ceph_osd_request *req) 2868 { 2869 struct ceph_osd_linger_request *lreq = req->r_priv; 2870 2871 WARN_ON(!req->r_linger); 2872 cancel_request(req); 2873 linger_put(lreq); 2874 } 2875 2876 struct linger_work { 2877 struct work_struct work; 2878 struct ceph_osd_linger_request *lreq; 2879 struct list_head pending_item; 2880 unsigned long queued_stamp; 2881 2882 union { 2883 struct { 2884 u64 notify_id; 2885 u64 notifier_id; 2886 void *payload; /* points into @msg front */ 2887 size_t payload_len; 2888 2889 struct ceph_msg *msg; /* for ceph_msg_put() */ 2890 } notify; 2891 struct { 2892 int err; 2893 } error; 2894 }; 2895 }; 2896 2897 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2898 work_func_t workfn) 2899 { 2900 struct linger_work *lwork; 2901 2902 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2903 if (!lwork) 2904 return NULL; 2905 2906 INIT_WORK(&lwork->work, workfn); 2907 INIT_LIST_HEAD(&lwork->pending_item); 2908 lwork->lreq = linger_get(lreq); 2909 2910 return lwork; 2911 } 2912 2913 static void lwork_free(struct linger_work *lwork) 2914 { 2915 struct ceph_osd_linger_request *lreq = lwork->lreq; 2916 2917 mutex_lock(&lreq->lock); 2918 list_del(&lwork->pending_item); 2919 mutex_unlock(&lreq->lock); 2920 2921 linger_put(lreq); 2922 kfree(lwork); 2923 } 2924 2925 static void lwork_queue(struct linger_work *lwork) 2926 { 2927 struct ceph_osd_linger_request *lreq = lwork->lreq; 2928 struct ceph_osd_client *osdc = lreq->osdc; 2929 2930 verify_lreq_locked(lreq); 2931 WARN_ON(!list_empty(&lwork->pending_item)); 2932 2933 lwork->queued_stamp = jiffies; 2934 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2935 queue_work(osdc->notify_wq, &lwork->work); 2936 } 2937 2938 static void do_watch_notify(struct work_struct *w) 2939 { 2940 struct linger_work *lwork = container_of(w, struct linger_work, work); 2941 struct ceph_osd_linger_request *lreq = lwork->lreq; 2942 2943 if (!linger_registered(lreq)) { 2944 dout("%s lreq %p not registered\n", __func__, lreq); 2945 goto out; 2946 } 2947 2948 WARN_ON(!lreq->is_watch); 2949 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2950 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2951 lwork->notify.payload_len); 2952 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2953 lwork->notify.notifier_id, lwork->notify.payload, 2954 lwork->notify.payload_len); 2955 2956 out: 2957 ceph_msg_put(lwork->notify.msg); 2958 lwork_free(lwork); 2959 } 2960 2961 static void do_watch_error(struct work_struct *w) 2962 { 2963 struct linger_work *lwork = container_of(w, struct linger_work, work); 2964 struct ceph_osd_linger_request *lreq = lwork->lreq; 2965 2966 if (!linger_registered(lreq)) { 2967 dout("%s lreq %p not registered\n", __func__, lreq); 2968 goto out; 2969 } 2970 2971 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 2972 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 2973 2974 out: 2975 lwork_free(lwork); 2976 } 2977 2978 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 2979 { 2980 struct linger_work *lwork; 2981 2982 lwork = lwork_alloc(lreq, do_watch_error); 2983 if (!lwork) { 2984 pr_err("failed to allocate error-lwork\n"); 2985 return; 2986 } 2987 2988 lwork->error.err = lreq->last_error; 2989 lwork_queue(lwork); 2990 } 2991 2992 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 2993 int result) 2994 { 2995 if (!completion_done(&lreq->reg_commit_wait)) { 2996 lreq->reg_commit_error = (result <= 0 ? result : 0); 2997 complete_all(&lreq->reg_commit_wait); 2998 } 2999 } 3000 3001 static void linger_commit_cb(struct ceph_osd_request *req) 3002 { 3003 struct ceph_osd_linger_request *lreq = req->r_priv; 3004 3005 mutex_lock(&lreq->lock); 3006 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 3007 lreq->linger_id, req->r_result); 3008 linger_reg_commit_complete(lreq, req->r_result); 3009 lreq->committed = true; 3010 3011 if (!lreq->is_watch) { 3012 struct ceph_osd_data *osd_data = 3013 osd_req_op_data(req, 0, notify, response_data); 3014 void *p = page_address(osd_data->pages[0]); 3015 3016 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 3017 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 3018 3019 /* make note of the notify_id */ 3020 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 3021 lreq->notify_id = ceph_decode_64(&p); 3022 dout("lreq %p notify_id %llu\n", lreq, 3023 lreq->notify_id); 3024 } else { 3025 dout("lreq %p no notify_id\n", lreq); 3026 } 3027 } 3028 3029 mutex_unlock(&lreq->lock); 3030 linger_put(lreq); 3031 } 3032 3033 static int normalize_watch_error(int err) 3034 { 3035 /* 3036 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 3037 * notification and a failure to reconnect because we raced with 3038 * the delete appear the same to the user. 3039 */ 3040 if (err == -ENOENT) 3041 err = -ENOTCONN; 3042 3043 return err; 3044 } 3045 3046 static void linger_reconnect_cb(struct ceph_osd_request *req) 3047 { 3048 struct ceph_osd_linger_request *lreq = req->r_priv; 3049 3050 mutex_lock(&lreq->lock); 3051 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 3052 lreq, lreq->linger_id, req->r_result, lreq->last_error); 3053 if (req->r_result < 0) { 3054 if (!lreq->last_error) { 3055 lreq->last_error = normalize_watch_error(req->r_result); 3056 queue_watch_error(lreq); 3057 } 3058 } 3059 3060 mutex_unlock(&lreq->lock); 3061 linger_put(lreq); 3062 } 3063 3064 static void send_linger(struct ceph_osd_linger_request *lreq) 3065 { 3066 struct ceph_osd_request *req = lreq->reg_req; 3067 struct ceph_osd_req_op *op = &req->r_ops[0]; 3068 3069 verify_osdc_wrlocked(req->r_osdc); 3070 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3071 3072 if (req->r_osd) 3073 cancel_linger_request(req); 3074 3075 request_reinit(req); 3076 target_copy(&req->r_t, &lreq->t); 3077 req->r_mtime = lreq->mtime; 3078 3079 mutex_lock(&lreq->lock); 3080 if (lreq->is_watch && lreq->committed) { 3081 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3082 op->watch.cookie != lreq->linger_id); 3083 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT; 3084 op->watch.gen = ++lreq->register_gen; 3085 dout("lreq %p reconnect register_gen %u\n", lreq, 3086 op->watch.gen); 3087 req->r_callback = linger_reconnect_cb; 3088 } else { 3089 if (!lreq->is_watch) 3090 lreq->notify_id = 0; 3091 else 3092 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH); 3093 dout("lreq %p register\n", lreq); 3094 req->r_callback = linger_commit_cb; 3095 } 3096 mutex_unlock(&lreq->lock); 3097 3098 req->r_priv = linger_get(lreq); 3099 req->r_linger = true; 3100 3101 submit_request(req, true); 3102 } 3103 3104 static void linger_ping_cb(struct ceph_osd_request *req) 3105 { 3106 struct ceph_osd_linger_request *lreq = req->r_priv; 3107 3108 mutex_lock(&lreq->lock); 3109 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3110 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3111 lreq->last_error); 3112 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3113 if (!req->r_result) { 3114 lreq->watch_valid_thru = lreq->ping_sent; 3115 } else if (!lreq->last_error) { 3116 lreq->last_error = normalize_watch_error(req->r_result); 3117 queue_watch_error(lreq); 3118 } 3119 } else { 3120 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3121 lreq->register_gen, req->r_ops[0].watch.gen); 3122 } 3123 3124 mutex_unlock(&lreq->lock); 3125 linger_put(lreq); 3126 } 3127 3128 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3129 { 3130 struct ceph_osd_client *osdc = lreq->osdc; 3131 struct ceph_osd_request *req = lreq->ping_req; 3132 struct ceph_osd_req_op *op = &req->r_ops[0]; 3133 3134 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3135 dout("%s PAUSERD\n", __func__); 3136 return; 3137 } 3138 3139 lreq->ping_sent = jiffies; 3140 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3141 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3142 lreq->register_gen); 3143 3144 if (req->r_osd) 3145 cancel_linger_request(req); 3146 3147 request_reinit(req); 3148 target_copy(&req->r_t, &lreq->t); 3149 3150 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3151 op->watch.cookie != lreq->linger_id || 3152 op->watch.op != CEPH_OSD_WATCH_OP_PING); 3153 op->watch.gen = lreq->register_gen; 3154 req->r_callback = linger_ping_cb; 3155 req->r_priv = linger_get(lreq); 3156 req->r_linger = true; 3157 3158 ceph_osdc_get_request(req); 3159 account_request(req); 3160 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3161 link_request(lreq->osd, req); 3162 send_request(req); 3163 } 3164 3165 static void linger_submit(struct ceph_osd_linger_request *lreq) 3166 { 3167 struct ceph_osd_client *osdc = lreq->osdc; 3168 struct ceph_osd *osd; 3169 3170 down_write(&osdc->lock); 3171 linger_register(lreq); 3172 if (lreq->is_watch) { 3173 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; 3174 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; 3175 } else { 3176 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; 3177 } 3178 3179 calc_target(osdc, &lreq->t, false); 3180 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3181 link_linger(osd, lreq); 3182 3183 send_linger(lreq); 3184 up_write(&osdc->lock); 3185 } 3186 3187 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3188 { 3189 struct ceph_osd_client *osdc = lreq->osdc; 3190 struct ceph_osd_linger_request *lookup_lreq; 3191 3192 verify_osdc_wrlocked(osdc); 3193 3194 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3195 lreq->linger_id); 3196 if (!lookup_lreq) 3197 return; 3198 3199 WARN_ON(lookup_lreq != lreq); 3200 erase_linger_mc(&osdc->linger_map_checks, lreq); 3201 linger_put(lreq); 3202 } 3203 3204 /* 3205 * @lreq has to be both registered and linked. 3206 */ 3207 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3208 { 3209 if (lreq->is_watch && lreq->ping_req->r_osd) 3210 cancel_linger_request(lreq->ping_req); 3211 if (lreq->reg_req->r_osd) 3212 cancel_linger_request(lreq->reg_req); 3213 cancel_linger_map_check(lreq); 3214 unlink_linger(lreq->osd, lreq); 3215 linger_unregister(lreq); 3216 } 3217 3218 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3219 { 3220 struct ceph_osd_client *osdc = lreq->osdc; 3221 3222 down_write(&osdc->lock); 3223 if (__linger_registered(lreq)) 3224 __linger_cancel(lreq); 3225 up_write(&osdc->lock); 3226 } 3227 3228 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3229 3230 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3231 { 3232 struct ceph_osd_client *osdc = lreq->osdc; 3233 struct ceph_osdmap *map = osdc->osdmap; 3234 3235 verify_osdc_wrlocked(osdc); 3236 WARN_ON(!map->epoch); 3237 3238 if (lreq->register_gen) { 3239 lreq->map_dne_bound = map->epoch; 3240 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3241 lreq, lreq->linger_id); 3242 } else { 3243 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3244 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3245 map->epoch); 3246 } 3247 3248 if (lreq->map_dne_bound) { 3249 if (map->epoch >= lreq->map_dne_bound) { 3250 /* we had a new enough map */ 3251 pr_info("linger_id %llu pool does not exist\n", 3252 lreq->linger_id); 3253 linger_reg_commit_complete(lreq, -ENOENT); 3254 __linger_cancel(lreq); 3255 } 3256 } else { 3257 send_linger_map_check(lreq); 3258 } 3259 } 3260 3261 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3262 { 3263 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3264 struct ceph_osd_linger_request *lreq; 3265 u64 linger_id = greq->private_data; 3266 3267 WARN_ON(greq->result || !greq->u.newest); 3268 3269 down_write(&osdc->lock); 3270 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3271 if (!lreq) { 3272 dout("%s linger_id %llu dne\n", __func__, linger_id); 3273 goto out_unlock; 3274 } 3275 3276 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3277 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3278 greq->u.newest); 3279 if (!lreq->map_dne_bound) 3280 lreq->map_dne_bound = greq->u.newest; 3281 erase_linger_mc(&osdc->linger_map_checks, lreq); 3282 check_linger_pool_dne(lreq); 3283 3284 linger_put(lreq); 3285 out_unlock: 3286 up_write(&osdc->lock); 3287 } 3288 3289 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3290 { 3291 struct ceph_osd_client *osdc = lreq->osdc; 3292 struct ceph_osd_linger_request *lookup_lreq; 3293 int ret; 3294 3295 verify_osdc_wrlocked(osdc); 3296 3297 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3298 lreq->linger_id); 3299 if (lookup_lreq) { 3300 WARN_ON(lookup_lreq != lreq); 3301 return; 3302 } 3303 3304 linger_get(lreq); 3305 insert_linger_mc(&osdc->linger_map_checks, lreq); 3306 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3307 linger_map_check_cb, lreq->linger_id); 3308 WARN_ON(ret); 3309 } 3310 3311 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3312 { 3313 int ret; 3314 3315 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3316 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); 3317 return ret ?: lreq->reg_commit_error; 3318 } 3319 3320 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) 3321 { 3322 int ret; 3323 3324 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3325 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); 3326 return ret ?: lreq->notify_finish_error; 3327 } 3328 3329 /* 3330 * Timeout callback, called every N seconds. When 1 or more OSD 3331 * requests has been active for more than N seconds, we send a keepalive 3332 * (tag + timestamp) to its OSD to ensure any communications channel 3333 * reset is detected. 3334 */ 3335 static void handle_timeout(struct work_struct *work) 3336 { 3337 struct ceph_osd_client *osdc = 3338 container_of(work, struct ceph_osd_client, timeout_work.work); 3339 struct ceph_options *opts = osdc->client->options; 3340 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3341 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3342 LIST_HEAD(slow_osds); 3343 struct rb_node *n, *p; 3344 3345 dout("%s osdc %p\n", __func__, osdc); 3346 down_write(&osdc->lock); 3347 3348 /* 3349 * ping osds that are a bit slow. this ensures that if there 3350 * is a break in the TCP connection we will notice, and reopen 3351 * a connection with that osd (from the fault callback). 3352 */ 3353 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3354 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3355 bool found = false; 3356 3357 for (p = rb_first(&osd->o_requests); p; ) { 3358 struct ceph_osd_request *req = 3359 rb_entry(p, struct ceph_osd_request, r_node); 3360 3361 p = rb_next(p); /* abort_request() */ 3362 3363 if (time_before(req->r_stamp, cutoff)) { 3364 dout(" req %p tid %llu on osd%d is laggy\n", 3365 req, req->r_tid, osd->o_osd); 3366 found = true; 3367 } 3368 if (opts->osd_request_timeout && 3369 time_before(req->r_start_stamp, expiry_cutoff)) { 3370 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3371 req->r_tid, osd->o_osd); 3372 abort_request(req, -ETIMEDOUT); 3373 } 3374 } 3375 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3376 struct ceph_osd_linger_request *lreq = 3377 rb_entry(p, struct ceph_osd_linger_request, node); 3378 3379 dout(" lreq %p linger_id %llu is served by osd%d\n", 3380 lreq, lreq->linger_id, osd->o_osd); 3381 found = true; 3382 3383 mutex_lock(&lreq->lock); 3384 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3385 send_linger_ping(lreq); 3386 mutex_unlock(&lreq->lock); 3387 } 3388 3389 if (found) 3390 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3391 } 3392 3393 if (opts->osd_request_timeout) { 3394 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3395 struct ceph_osd_request *req = 3396 rb_entry(p, struct ceph_osd_request, r_node); 3397 3398 p = rb_next(p); /* abort_request() */ 3399 3400 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3401 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3402 req->r_tid, osdc->homeless_osd.o_osd); 3403 abort_request(req, -ETIMEDOUT); 3404 } 3405 } 3406 } 3407 3408 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3409 maybe_request_map(osdc); 3410 3411 while (!list_empty(&slow_osds)) { 3412 struct ceph_osd *osd = list_first_entry(&slow_osds, 3413 struct ceph_osd, 3414 o_keepalive_item); 3415 list_del_init(&osd->o_keepalive_item); 3416 ceph_con_keepalive(&osd->o_con); 3417 } 3418 3419 up_write(&osdc->lock); 3420 schedule_delayed_work(&osdc->timeout_work, 3421 osdc->client->options->osd_keepalive_timeout); 3422 } 3423 3424 static void handle_osds_timeout(struct work_struct *work) 3425 { 3426 struct ceph_osd_client *osdc = 3427 container_of(work, struct ceph_osd_client, 3428 osds_timeout_work.work); 3429 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3430 struct ceph_osd *osd, *nosd; 3431 3432 dout("%s osdc %p\n", __func__, osdc); 3433 down_write(&osdc->lock); 3434 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3435 if (time_before(jiffies, osd->lru_ttl)) 3436 break; 3437 3438 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3439 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3440 close_osd(osd); 3441 } 3442 3443 up_write(&osdc->lock); 3444 schedule_delayed_work(&osdc->osds_timeout_work, 3445 round_jiffies_relative(delay)); 3446 } 3447 3448 static int ceph_oloc_decode(void **p, void *end, 3449 struct ceph_object_locator *oloc) 3450 { 3451 u8 struct_v, struct_cv; 3452 u32 len; 3453 void *struct_end; 3454 int ret = 0; 3455 3456 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3457 struct_v = ceph_decode_8(p); 3458 struct_cv = ceph_decode_8(p); 3459 if (struct_v < 3) { 3460 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3461 struct_v, struct_cv); 3462 goto e_inval; 3463 } 3464 if (struct_cv > 6) { 3465 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3466 struct_v, struct_cv); 3467 goto e_inval; 3468 } 3469 len = ceph_decode_32(p); 3470 ceph_decode_need(p, end, len, e_inval); 3471 struct_end = *p + len; 3472 3473 oloc->pool = ceph_decode_64(p); 3474 *p += 4; /* skip preferred */ 3475 3476 len = ceph_decode_32(p); 3477 if (len > 0) { 3478 pr_warn("ceph_object_locator::key is set\n"); 3479 goto e_inval; 3480 } 3481 3482 if (struct_v >= 5) { 3483 bool changed = false; 3484 3485 len = ceph_decode_32(p); 3486 if (len > 0) { 3487 ceph_decode_need(p, end, len, e_inval); 3488 if (!oloc->pool_ns || 3489 ceph_compare_string(oloc->pool_ns, *p, len)) 3490 changed = true; 3491 *p += len; 3492 } else { 3493 if (oloc->pool_ns) 3494 changed = true; 3495 } 3496 if (changed) { 3497 /* redirect changes namespace */ 3498 pr_warn("ceph_object_locator::nspace is changed\n"); 3499 goto e_inval; 3500 } 3501 } 3502 3503 if (struct_v >= 6) { 3504 s64 hash = ceph_decode_64(p); 3505 if (hash != -1) { 3506 pr_warn("ceph_object_locator::hash is set\n"); 3507 goto e_inval; 3508 } 3509 } 3510 3511 /* skip the rest */ 3512 *p = struct_end; 3513 out: 3514 return ret; 3515 3516 e_inval: 3517 ret = -EINVAL; 3518 goto out; 3519 } 3520 3521 static int ceph_redirect_decode(void **p, void *end, 3522 struct ceph_request_redirect *redir) 3523 { 3524 u8 struct_v, struct_cv; 3525 u32 len; 3526 void *struct_end; 3527 int ret; 3528 3529 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3530 struct_v = ceph_decode_8(p); 3531 struct_cv = ceph_decode_8(p); 3532 if (struct_cv > 1) { 3533 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3534 struct_v, struct_cv); 3535 goto e_inval; 3536 } 3537 len = ceph_decode_32(p); 3538 ceph_decode_need(p, end, len, e_inval); 3539 struct_end = *p + len; 3540 3541 ret = ceph_oloc_decode(p, end, &redir->oloc); 3542 if (ret) 3543 goto out; 3544 3545 len = ceph_decode_32(p); 3546 if (len > 0) { 3547 pr_warn("ceph_request_redirect::object_name is set\n"); 3548 goto e_inval; 3549 } 3550 3551 /* skip the rest */ 3552 *p = struct_end; 3553 out: 3554 return ret; 3555 3556 e_inval: 3557 ret = -EINVAL; 3558 goto out; 3559 } 3560 3561 struct MOSDOpReply { 3562 struct ceph_pg pgid; 3563 u64 flags; 3564 int result; 3565 u32 epoch; 3566 int num_ops; 3567 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3568 s32 rval[CEPH_OSD_MAX_OPS]; 3569 int retry_attempt; 3570 struct ceph_eversion replay_version; 3571 u64 user_version; 3572 struct ceph_request_redirect redirect; 3573 }; 3574 3575 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3576 { 3577 void *p = msg->front.iov_base; 3578 void *const end = p + msg->front.iov_len; 3579 u16 version = le16_to_cpu(msg->hdr.version); 3580 struct ceph_eversion bad_replay_version; 3581 u8 decode_redir; 3582 u32 len; 3583 int ret; 3584 int i; 3585 3586 ceph_decode_32_safe(&p, end, len, e_inval); 3587 ceph_decode_need(&p, end, len, e_inval); 3588 p += len; /* skip oid */ 3589 3590 ret = ceph_decode_pgid(&p, end, &m->pgid); 3591 if (ret) 3592 return ret; 3593 3594 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3595 ceph_decode_32_safe(&p, end, m->result, e_inval); 3596 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3597 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3598 p += sizeof(bad_replay_version); 3599 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3600 3601 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3602 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3603 goto e_inval; 3604 3605 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3606 e_inval); 3607 for (i = 0; i < m->num_ops; i++) { 3608 struct ceph_osd_op *op = p; 3609 3610 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3611 p += sizeof(*op); 3612 } 3613 3614 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3615 for (i = 0; i < m->num_ops; i++) 3616 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3617 3618 if (version >= 5) { 3619 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3620 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3621 p += sizeof(m->replay_version); 3622 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3623 } else { 3624 m->replay_version = bad_replay_version; /* struct */ 3625 m->user_version = le64_to_cpu(m->replay_version.version); 3626 } 3627 3628 if (version >= 6) { 3629 if (version >= 7) 3630 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3631 else 3632 decode_redir = 1; 3633 } else { 3634 decode_redir = 0; 3635 } 3636 3637 if (decode_redir) { 3638 ret = ceph_redirect_decode(&p, end, &m->redirect); 3639 if (ret) 3640 return ret; 3641 } else { 3642 ceph_oloc_init(&m->redirect.oloc); 3643 } 3644 3645 return 0; 3646 3647 e_inval: 3648 return -EINVAL; 3649 } 3650 3651 /* 3652 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3653 * specified. 3654 */ 3655 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3656 { 3657 struct ceph_osd_client *osdc = osd->o_osdc; 3658 struct ceph_osd_request *req; 3659 struct MOSDOpReply m; 3660 u64 tid = le64_to_cpu(msg->hdr.tid); 3661 u32 data_len = 0; 3662 int ret; 3663 int i; 3664 3665 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3666 3667 down_read(&osdc->lock); 3668 if (!osd_registered(osd)) { 3669 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3670 goto out_unlock_osdc; 3671 } 3672 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3673 3674 mutex_lock(&osd->lock); 3675 req = lookup_request(&osd->o_requests, tid); 3676 if (!req) { 3677 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3678 goto out_unlock_session; 3679 } 3680 3681 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3682 ret = decode_MOSDOpReply(msg, &m); 3683 m.redirect.oloc.pool_ns = NULL; 3684 if (ret) { 3685 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3686 req->r_tid, ret); 3687 ceph_msg_dump(msg); 3688 goto fail_request; 3689 } 3690 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3691 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3692 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3693 le64_to_cpu(m.replay_version.version), m.user_version); 3694 3695 if (m.retry_attempt >= 0) { 3696 if (m.retry_attempt != req->r_attempts - 1) { 3697 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3698 req, req->r_tid, m.retry_attempt, 3699 req->r_attempts - 1); 3700 goto out_unlock_session; 3701 } 3702 } else { 3703 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3704 } 3705 3706 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3707 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3708 m.redirect.oloc.pool); 3709 unlink_request(osd, req); 3710 mutex_unlock(&osd->lock); 3711 3712 /* 3713 * Not ceph_oloc_copy() - changing pool_ns is not 3714 * supported. 3715 */ 3716 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3717 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3718 CEPH_OSD_FLAG_IGNORE_OVERLAY | 3719 CEPH_OSD_FLAG_IGNORE_CACHE; 3720 req->r_tid = 0; 3721 __submit_request(req, false); 3722 goto out_unlock_osdc; 3723 } 3724 3725 if (m.result == -EAGAIN) { 3726 dout("req %p tid %llu EAGAIN\n", req, req->r_tid); 3727 unlink_request(osd, req); 3728 mutex_unlock(&osd->lock); 3729 3730 /* 3731 * The object is missing on the replica or not (yet) 3732 * readable. Clear pgid to force a resend to the primary 3733 * via legacy_change. 3734 */ 3735 req->r_t.pgid.pool = 0; 3736 req->r_t.pgid.seed = 0; 3737 WARN_ON(!req->r_t.used_replica); 3738 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | 3739 CEPH_OSD_FLAG_LOCALIZE_READS); 3740 req->r_tid = 0; 3741 __submit_request(req, false); 3742 goto out_unlock_osdc; 3743 } 3744 3745 if (m.num_ops != req->r_num_ops) { 3746 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3747 req->r_num_ops, req->r_tid); 3748 goto fail_request; 3749 } 3750 for (i = 0; i < req->r_num_ops; i++) { 3751 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3752 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3753 req->r_ops[i].rval = m.rval[i]; 3754 req->r_ops[i].outdata_len = m.outdata_len[i]; 3755 data_len += m.outdata_len[i]; 3756 } 3757 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3758 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3759 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3760 goto fail_request; 3761 } 3762 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3763 req, req->r_tid, m.result, data_len); 3764 3765 /* 3766 * Since we only ever request ONDISK, we should only ever get 3767 * one (type of) reply back. 3768 */ 3769 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3770 req->r_result = m.result ?: data_len; 3771 finish_request(req); 3772 mutex_unlock(&osd->lock); 3773 up_read(&osdc->lock); 3774 3775 __complete_request(req); 3776 return; 3777 3778 fail_request: 3779 complete_request(req, -EIO); 3780 out_unlock_session: 3781 mutex_unlock(&osd->lock); 3782 out_unlock_osdc: 3783 up_read(&osdc->lock); 3784 } 3785 3786 static void set_pool_was_full(struct ceph_osd_client *osdc) 3787 { 3788 struct rb_node *n; 3789 3790 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3791 struct ceph_pg_pool_info *pi = 3792 rb_entry(n, struct ceph_pg_pool_info, node); 3793 3794 pi->was_full = __pool_full(pi); 3795 } 3796 } 3797 3798 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3799 { 3800 struct ceph_pg_pool_info *pi; 3801 3802 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3803 if (!pi) 3804 return false; 3805 3806 return pi->was_full && !__pool_full(pi); 3807 } 3808 3809 static enum calc_target_result 3810 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3811 { 3812 struct ceph_osd_client *osdc = lreq->osdc; 3813 enum calc_target_result ct_res; 3814 3815 ct_res = calc_target(osdc, &lreq->t, true); 3816 if (ct_res == CALC_TARGET_NEED_RESEND) { 3817 struct ceph_osd *osd; 3818 3819 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3820 if (osd != lreq->osd) { 3821 unlink_linger(lreq->osd, lreq); 3822 link_linger(osd, lreq); 3823 } 3824 } 3825 3826 return ct_res; 3827 } 3828 3829 /* 3830 * Requeue requests whose mapping to an OSD has changed. 3831 */ 3832 static void scan_requests(struct ceph_osd *osd, 3833 bool force_resend, 3834 bool cleared_full, 3835 bool check_pool_cleared_full, 3836 struct rb_root *need_resend, 3837 struct list_head *need_resend_linger) 3838 { 3839 struct ceph_osd_client *osdc = osd->o_osdc; 3840 struct rb_node *n; 3841 bool force_resend_writes; 3842 3843 for (n = rb_first(&osd->o_linger_requests); n; ) { 3844 struct ceph_osd_linger_request *lreq = 3845 rb_entry(n, struct ceph_osd_linger_request, node); 3846 enum calc_target_result ct_res; 3847 3848 n = rb_next(n); /* recalc_linger_target() */ 3849 3850 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3851 lreq->linger_id); 3852 ct_res = recalc_linger_target(lreq); 3853 switch (ct_res) { 3854 case CALC_TARGET_NO_ACTION: 3855 force_resend_writes = cleared_full || 3856 (check_pool_cleared_full && 3857 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3858 if (!force_resend && !force_resend_writes) 3859 break; 3860 3861 fallthrough; 3862 case CALC_TARGET_NEED_RESEND: 3863 cancel_linger_map_check(lreq); 3864 /* 3865 * scan_requests() for the previous epoch(s) 3866 * may have already added it to the list, since 3867 * it's not unlinked here. 3868 */ 3869 if (list_empty(&lreq->scan_item)) 3870 list_add_tail(&lreq->scan_item, need_resend_linger); 3871 break; 3872 case CALC_TARGET_POOL_DNE: 3873 list_del_init(&lreq->scan_item); 3874 check_linger_pool_dne(lreq); 3875 break; 3876 } 3877 } 3878 3879 for (n = rb_first(&osd->o_requests); n; ) { 3880 struct ceph_osd_request *req = 3881 rb_entry(n, struct ceph_osd_request, r_node); 3882 enum calc_target_result ct_res; 3883 3884 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3885 3886 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3887 ct_res = calc_target(osdc, &req->r_t, false); 3888 switch (ct_res) { 3889 case CALC_TARGET_NO_ACTION: 3890 force_resend_writes = cleared_full || 3891 (check_pool_cleared_full && 3892 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3893 if (!force_resend && 3894 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3895 !force_resend_writes)) 3896 break; 3897 3898 fallthrough; 3899 case CALC_TARGET_NEED_RESEND: 3900 cancel_map_check(req); 3901 unlink_request(osd, req); 3902 insert_request(need_resend, req); 3903 break; 3904 case CALC_TARGET_POOL_DNE: 3905 check_pool_dne(req); 3906 break; 3907 } 3908 } 3909 } 3910 3911 static int handle_one_map(struct ceph_osd_client *osdc, 3912 void *p, void *end, bool incremental, 3913 struct rb_root *need_resend, 3914 struct list_head *need_resend_linger) 3915 { 3916 struct ceph_osdmap *newmap; 3917 struct rb_node *n; 3918 bool skipped_map = false; 3919 bool was_full; 3920 3921 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3922 set_pool_was_full(osdc); 3923 3924 if (incremental) 3925 newmap = osdmap_apply_incremental(&p, end, 3926 ceph_msgr2(osdc->client), 3927 osdc->osdmap); 3928 else 3929 newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client)); 3930 if (IS_ERR(newmap)) 3931 return PTR_ERR(newmap); 3932 3933 if (newmap != osdc->osdmap) { 3934 /* 3935 * Preserve ->was_full before destroying the old map. 3936 * For pools that weren't in the old map, ->was_full 3937 * should be false. 3938 */ 3939 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 3940 struct ceph_pg_pool_info *pi = 3941 rb_entry(n, struct ceph_pg_pool_info, node); 3942 struct ceph_pg_pool_info *old_pi; 3943 3944 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 3945 if (old_pi) 3946 pi->was_full = old_pi->was_full; 3947 else 3948 WARN_ON(pi->was_full); 3949 } 3950 3951 if (osdc->osdmap->epoch && 3952 osdc->osdmap->epoch + 1 < newmap->epoch) { 3953 WARN_ON(incremental); 3954 skipped_map = true; 3955 } 3956 3957 ceph_osdmap_destroy(osdc->osdmap); 3958 osdc->osdmap = newmap; 3959 } 3960 3961 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3962 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3963 need_resend, need_resend_linger); 3964 3965 for (n = rb_first(&osdc->osds); n; ) { 3966 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3967 3968 n = rb_next(n); /* close_osd() */ 3969 3970 scan_requests(osd, skipped_map, was_full, true, need_resend, 3971 need_resend_linger); 3972 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 3973 memcmp(&osd->o_con.peer_addr, 3974 ceph_osd_addr(osdc->osdmap, osd->o_osd), 3975 sizeof(struct ceph_entity_addr))) 3976 close_osd(osd); 3977 } 3978 3979 return 0; 3980 } 3981 3982 static void kick_requests(struct ceph_osd_client *osdc, 3983 struct rb_root *need_resend, 3984 struct list_head *need_resend_linger) 3985 { 3986 struct ceph_osd_linger_request *lreq, *nlreq; 3987 enum calc_target_result ct_res; 3988 struct rb_node *n; 3989 3990 /* make sure need_resend targets reflect latest map */ 3991 for (n = rb_first(need_resend); n; ) { 3992 struct ceph_osd_request *req = 3993 rb_entry(n, struct ceph_osd_request, r_node); 3994 3995 n = rb_next(n); 3996 3997 if (req->r_t.epoch < osdc->osdmap->epoch) { 3998 ct_res = calc_target(osdc, &req->r_t, false); 3999 if (ct_res == CALC_TARGET_POOL_DNE) { 4000 erase_request(need_resend, req); 4001 check_pool_dne(req); 4002 } 4003 } 4004 } 4005 4006 for (n = rb_first(need_resend); n; ) { 4007 struct ceph_osd_request *req = 4008 rb_entry(n, struct ceph_osd_request, r_node); 4009 struct ceph_osd *osd; 4010 4011 n = rb_next(n); 4012 erase_request(need_resend, req); /* before link_request() */ 4013 4014 osd = lookup_create_osd(osdc, req->r_t.osd, true); 4015 link_request(osd, req); 4016 if (!req->r_linger) { 4017 if (!osd_homeless(osd) && !req->r_t.paused) 4018 send_request(req); 4019 } else { 4020 cancel_linger_request(req); 4021 } 4022 } 4023 4024 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 4025 if (!osd_homeless(lreq->osd)) 4026 send_linger(lreq); 4027 4028 list_del_init(&lreq->scan_item); 4029 } 4030 } 4031 4032 /* 4033 * Process updated osd map. 4034 * 4035 * The message contains any number of incremental and full maps, normally 4036 * indicating some sort of topology change in the cluster. Kick requests 4037 * off to different OSDs as needed. 4038 */ 4039 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 4040 { 4041 void *p = msg->front.iov_base; 4042 void *const end = p + msg->front.iov_len; 4043 u32 nr_maps, maplen; 4044 u32 epoch; 4045 struct ceph_fsid fsid; 4046 struct rb_root need_resend = RB_ROOT; 4047 LIST_HEAD(need_resend_linger); 4048 bool handled_incremental = false; 4049 bool was_pauserd, was_pausewr; 4050 bool pauserd, pausewr; 4051 int err; 4052 4053 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 4054 down_write(&osdc->lock); 4055 4056 /* verify fsid */ 4057 ceph_decode_need(&p, end, sizeof(fsid), bad); 4058 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 4059 if (ceph_check_fsid(osdc->client, &fsid) < 0) 4060 goto bad; 4061 4062 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4063 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4064 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4065 have_pool_full(osdc); 4066 4067 /* incremental maps */ 4068 ceph_decode_32_safe(&p, end, nr_maps, bad); 4069 dout(" %d inc maps\n", nr_maps); 4070 while (nr_maps > 0) { 4071 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4072 epoch = ceph_decode_32(&p); 4073 maplen = ceph_decode_32(&p); 4074 ceph_decode_need(&p, end, maplen, bad); 4075 if (osdc->osdmap->epoch && 4076 osdc->osdmap->epoch + 1 == epoch) { 4077 dout("applying incremental map %u len %d\n", 4078 epoch, maplen); 4079 err = handle_one_map(osdc, p, p + maplen, true, 4080 &need_resend, &need_resend_linger); 4081 if (err) 4082 goto bad; 4083 handled_incremental = true; 4084 } else { 4085 dout("ignoring incremental map %u len %d\n", 4086 epoch, maplen); 4087 } 4088 p += maplen; 4089 nr_maps--; 4090 } 4091 if (handled_incremental) 4092 goto done; 4093 4094 /* full maps */ 4095 ceph_decode_32_safe(&p, end, nr_maps, bad); 4096 dout(" %d full maps\n", nr_maps); 4097 while (nr_maps) { 4098 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4099 epoch = ceph_decode_32(&p); 4100 maplen = ceph_decode_32(&p); 4101 ceph_decode_need(&p, end, maplen, bad); 4102 if (nr_maps > 1) { 4103 dout("skipping non-latest full map %u len %d\n", 4104 epoch, maplen); 4105 } else if (osdc->osdmap->epoch >= epoch) { 4106 dout("skipping full map %u len %d, " 4107 "older than our %u\n", epoch, maplen, 4108 osdc->osdmap->epoch); 4109 } else { 4110 dout("taking full map %u len %d\n", epoch, maplen); 4111 err = handle_one_map(osdc, p, p + maplen, false, 4112 &need_resend, &need_resend_linger); 4113 if (err) 4114 goto bad; 4115 } 4116 p += maplen; 4117 nr_maps--; 4118 } 4119 4120 done: 4121 /* 4122 * subscribe to subsequent osdmap updates if full to ensure 4123 * we find out when we are no longer full and stop returning 4124 * ENOSPC. 4125 */ 4126 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4127 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4128 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4129 have_pool_full(osdc); 4130 if (was_pauserd || was_pausewr || pauserd || pausewr || 4131 osdc->osdmap->epoch < osdc->epoch_barrier) 4132 maybe_request_map(osdc); 4133 4134 kick_requests(osdc, &need_resend, &need_resend_linger); 4135 4136 ceph_osdc_abort_on_full(osdc); 4137 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4138 osdc->osdmap->epoch); 4139 up_write(&osdc->lock); 4140 wake_up_all(&osdc->client->auth_wq); 4141 return; 4142 4143 bad: 4144 pr_err("osdc handle_map corrupt msg\n"); 4145 ceph_msg_dump(msg); 4146 up_write(&osdc->lock); 4147 } 4148 4149 /* 4150 * Resubmit requests pending on the given osd. 4151 */ 4152 static void kick_osd_requests(struct ceph_osd *osd) 4153 { 4154 struct rb_node *n; 4155 4156 clear_backoffs(osd); 4157 4158 for (n = rb_first(&osd->o_requests); n; ) { 4159 struct ceph_osd_request *req = 4160 rb_entry(n, struct ceph_osd_request, r_node); 4161 4162 n = rb_next(n); /* cancel_linger_request() */ 4163 4164 if (!req->r_linger) { 4165 if (!req->r_t.paused) 4166 send_request(req); 4167 } else { 4168 cancel_linger_request(req); 4169 } 4170 } 4171 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4172 struct ceph_osd_linger_request *lreq = 4173 rb_entry(n, struct ceph_osd_linger_request, node); 4174 4175 send_linger(lreq); 4176 } 4177 } 4178 4179 /* 4180 * If the osd connection drops, we need to resubmit all requests. 4181 */ 4182 static void osd_fault(struct ceph_connection *con) 4183 { 4184 struct ceph_osd *osd = con->private; 4185 struct ceph_osd_client *osdc = osd->o_osdc; 4186 4187 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4188 4189 down_write(&osdc->lock); 4190 if (!osd_registered(osd)) { 4191 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4192 goto out_unlock; 4193 } 4194 4195 if (!reopen_osd(osd)) 4196 kick_osd_requests(osd); 4197 maybe_request_map(osdc); 4198 4199 out_unlock: 4200 up_write(&osdc->lock); 4201 } 4202 4203 struct MOSDBackoff { 4204 struct ceph_spg spgid; 4205 u32 map_epoch; 4206 u8 op; 4207 u64 id; 4208 struct ceph_hobject_id *begin; 4209 struct ceph_hobject_id *end; 4210 }; 4211 4212 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4213 { 4214 void *p = msg->front.iov_base; 4215 void *const end = p + msg->front.iov_len; 4216 u8 struct_v; 4217 u32 struct_len; 4218 int ret; 4219 4220 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4221 if (ret) 4222 return ret; 4223 4224 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4225 if (ret) 4226 return ret; 4227 4228 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4229 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4230 ceph_decode_8_safe(&p, end, m->op, e_inval); 4231 ceph_decode_64_safe(&p, end, m->id, e_inval); 4232 4233 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4234 if (!m->begin) 4235 return -ENOMEM; 4236 4237 ret = decode_hoid(&p, end, m->begin); 4238 if (ret) { 4239 free_hoid(m->begin); 4240 return ret; 4241 } 4242 4243 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4244 if (!m->end) { 4245 free_hoid(m->begin); 4246 return -ENOMEM; 4247 } 4248 4249 ret = decode_hoid(&p, end, m->end); 4250 if (ret) { 4251 free_hoid(m->begin); 4252 free_hoid(m->end); 4253 return ret; 4254 } 4255 4256 return 0; 4257 4258 e_inval: 4259 return -EINVAL; 4260 } 4261 4262 static struct ceph_msg *create_backoff_message( 4263 const struct ceph_osd_backoff *backoff, 4264 u32 map_epoch) 4265 { 4266 struct ceph_msg *msg; 4267 void *p, *end; 4268 int msg_size; 4269 4270 msg_size = CEPH_ENCODING_START_BLK_LEN + 4271 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4272 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4273 msg_size += CEPH_ENCODING_START_BLK_LEN + 4274 hoid_encoding_size(backoff->begin); 4275 msg_size += CEPH_ENCODING_START_BLK_LEN + 4276 hoid_encoding_size(backoff->end); 4277 4278 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4279 if (!msg) 4280 return NULL; 4281 4282 p = msg->front.iov_base; 4283 end = p + msg->front_alloc_len; 4284 4285 encode_spgid(&p, &backoff->spgid); 4286 ceph_encode_32(&p, map_epoch); 4287 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4288 ceph_encode_64(&p, backoff->id); 4289 encode_hoid(&p, end, backoff->begin); 4290 encode_hoid(&p, end, backoff->end); 4291 BUG_ON(p != end); 4292 4293 msg->front.iov_len = p - msg->front.iov_base; 4294 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4295 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4296 4297 return msg; 4298 } 4299 4300 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4301 { 4302 struct ceph_spg_mapping *spg; 4303 struct ceph_osd_backoff *backoff; 4304 struct ceph_msg *msg; 4305 4306 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4307 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4308 4309 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4310 if (!spg) { 4311 spg = alloc_spg_mapping(); 4312 if (!spg) { 4313 pr_err("%s failed to allocate spg\n", __func__); 4314 return; 4315 } 4316 spg->spgid = m->spgid; /* struct */ 4317 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4318 } 4319 4320 backoff = alloc_backoff(); 4321 if (!backoff) { 4322 pr_err("%s failed to allocate backoff\n", __func__); 4323 return; 4324 } 4325 backoff->spgid = m->spgid; /* struct */ 4326 backoff->id = m->id; 4327 backoff->begin = m->begin; 4328 m->begin = NULL; /* backoff now owns this */ 4329 backoff->end = m->end; 4330 m->end = NULL; /* ditto */ 4331 4332 insert_backoff(&spg->backoffs, backoff); 4333 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4334 4335 /* 4336 * Ack with original backoff's epoch so that the OSD can 4337 * discard this if there was a PG split. 4338 */ 4339 msg = create_backoff_message(backoff, m->map_epoch); 4340 if (!msg) { 4341 pr_err("%s failed to allocate msg\n", __func__); 4342 return; 4343 } 4344 ceph_con_send(&osd->o_con, msg); 4345 } 4346 4347 static bool target_contained_by(const struct ceph_osd_request_target *t, 4348 const struct ceph_hobject_id *begin, 4349 const struct ceph_hobject_id *end) 4350 { 4351 struct ceph_hobject_id hoid; 4352 int cmp; 4353 4354 hoid_fill_from_target(&hoid, t); 4355 cmp = hoid_compare(&hoid, begin); 4356 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4357 } 4358 4359 static void handle_backoff_unblock(struct ceph_osd *osd, 4360 const struct MOSDBackoff *m) 4361 { 4362 struct ceph_spg_mapping *spg; 4363 struct ceph_osd_backoff *backoff; 4364 struct rb_node *n; 4365 4366 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4367 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4368 4369 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4370 if (!backoff) { 4371 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4372 __func__, osd->o_osd, m->spgid.pgid.pool, 4373 m->spgid.pgid.seed, m->spgid.shard, m->id); 4374 return; 4375 } 4376 4377 if (hoid_compare(backoff->begin, m->begin) && 4378 hoid_compare(backoff->end, m->end)) { 4379 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4380 __func__, osd->o_osd, m->spgid.pgid.pool, 4381 m->spgid.pgid.seed, m->spgid.shard, m->id); 4382 /* unblock it anyway... */ 4383 } 4384 4385 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4386 BUG_ON(!spg); 4387 4388 erase_backoff(&spg->backoffs, backoff); 4389 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4390 free_backoff(backoff); 4391 4392 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4393 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4394 free_spg_mapping(spg); 4395 } 4396 4397 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4398 struct ceph_osd_request *req = 4399 rb_entry(n, struct ceph_osd_request, r_node); 4400 4401 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4402 /* 4403 * Match against @m, not @backoff -- the PG may 4404 * have split on the OSD. 4405 */ 4406 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4407 /* 4408 * If no other installed backoff applies, 4409 * resend. 4410 */ 4411 send_request(req); 4412 } 4413 } 4414 } 4415 } 4416 4417 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4418 { 4419 struct ceph_osd_client *osdc = osd->o_osdc; 4420 struct MOSDBackoff m; 4421 int ret; 4422 4423 down_read(&osdc->lock); 4424 if (!osd_registered(osd)) { 4425 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4426 up_read(&osdc->lock); 4427 return; 4428 } 4429 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4430 4431 mutex_lock(&osd->lock); 4432 ret = decode_MOSDBackoff(msg, &m); 4433 if (ret) { 4434 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4435 ceph_msg_dump(msg); 4436 goto out_unlock; 4437 } 4438 4439 switch (m.op) { 4440 case CEPH_OSD_BACKOFF_OP_BLOCK: 4441 handle_backoff_block(osd, &m); 4442 break; 4443 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4444 handle_backoff_unblock(osd, &m); 4445 break; 4446 default: 4447 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4448 } 4449 4450 free_hoid(m.begin); 4451 free_hoid(m.end); 4452 4453 out_unlock: 4454 mutex_unlock(&osd->lock); 4455 up_read(&osdc->lock); 4456 } 4457 4458 /* 4459 * Process osd watch notifications 4460 */ 4461 static void handle_watch_notify(struct ceph_osd_client *osdc, 4462 struct ceph_msg *msg) 4463 { 4464 void *p = msg->front.iov_base; 4465 void *const end = p + msg->front.iov_len; 4466 struct ceph_osd_linger_request *lreq; 4467 struct linger_work *lwork; 4468 u8 proto_ver, opcode; 4469 u64 cookie, notify_id; 4470 u64 notifier_id = 0; 4471 s32 return_code = 0; 4472 void *payload = NULL; 4473 u32 payload_len = 0; 4474 4475 ceph_decode_8_safe(&p, end, proto_ver, bad); 4476 ceph_decode_8_safe(&p, end, opcode, bad); 4477 ceph_decode_64_safe(&p, end, cookie, bad); 4478 p += 8; /* skip ver */ 4479 ceph_decode_64_safe(&p, end, notify_id, bad); 4480 4481 if (proto_ver >= 1) { 4482 ceph_decode_32_safe(&p, end, payload_len, bad); 4483 ceph_decode_need(&p, end, payload_len, bad); 4484 payload = p; 4485 p += payload_len; 4486 } 4487 4488 if (le16_to_cpu(msg->hdr.version) >= 2) 4489 ceph_decode_32_safe(&p, end, return_code, bad); 4490 4491 if (le16_to_cpu(msg->hdr.version) >= 3) 4492 ceph_decode_64_safe(&p, end, notifier_id, bad); 4493 4494 down_read(&osdc->lock); 4495 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4496 if (!lreq) { 4497 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4498 cookie); 4499 goto out_unlock_osdc; 4500 } 4501 4502 mutex_lock(&lreq->lock); 4503 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4504 opcode, cookie, lreq, lreq->is_watch); 4505 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4506 if (!lreq->last_error) { 4507 lreq->last_error = -ENOTCONN; 4508 queue_watch_error(lreq); 4509 } 4510 } else if (!lreq->is_watch) { 4511 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4512 if (lreq->notify_id && lreq->notify_id != notify_id) { 4513 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4514 lreq->notify_id, notify_id); 4515 } else if (!completion_done(&lreq->notify_finish_wait)) { 4516 struct ceph_msg_data *data = 4517 msg->num_data_items ? &msg->data[0] : NULL; 4518 4519 if (data) { 4520 if (lreq->preply_pages) { 4521 WARN_ON(data->type != 4522 CEPH_MSG_DATA_PAGES); 4523 *lreq->preply_pages = data->pages; 4524 *lreq->preply_len = data->length; 4525 data->own_pages = false; 4526 } 4527 } 4528 lreq->notify_finish_error = return_code; 4529 complete_all(&lreq->notify_finish_wait); 4530 } 4531 } else { 4532 /* CEPH_WATCH_EVENT_NOTIFY */ 4533 lwork = lwork_alloc(lreq, do_watch_notify); 4534 if (!lwork) { 4535 pr_err("failed to allocate notify-lwork\n"); 4536 goto out_unlock_lreq; 4537 } 4538 4539 lwork->notify.notify_id = notify_id; 4540 lwork->notify.notifier_id = notifier_id; 4541 lwork->notify.payload = payload; 4542 lwork->notify.payload_len = payload_len; 4543 lwork->notify.msg = ceph_msg_get(msg); 4544 lwork_queue(lwork); 4545 } 4546 4547 out_unlock_lreq: 4548 mutex_unlock(&lreq->lock); 4549 out_unlock_osdc: 4550 up_read(&osdc->lock); 4551 return; 4552 4553 bad: 4554 pr_err("osdc handle_watch_notify corrupt msg\n"); 4555 } 4556 4557 /* 4558 * Register request, send initial attempt. 4559 */ 4560 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 4561 struct ceph_osd_request *req, 4562 bool nofail) 4563 { 4564 down_read(&osdc->lock); 4565 submit_request(req, false); 4566 up_read(&osdc->lock); 4567 4568 return 0; 4569 } 4570 EXPORT_SYMBOL(ceph_osdc_start_request); 4571 4572 /* 4573 * Unregister a registered request. The request is not completed: 4574 * ->r_result isn't set and __complete_request() isn't called. 4575 */ 4576 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4577 { 4578 struct ceph_osd_client *osdc = req->r_osdc; 4579 4580 down_write(&osdc->lock); 4581 if (req->r_osd) 4582 cancel_request(req); 4583 up_write(&osdc->lock); 4584 } 4585 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4586 4587 /* 4588 * @timeout: in jiffies, 0 means "wait forever" 4589 */ 4590 static int wait_request_timeout(struct ceph_osd_request *req, 4591 unsigned long timeout) 4592 { 4593 long left; 4594 4595 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4596 left = wait_for_completion_killable_timeout(&req->r_completion, 4597 ceph_timeout_jiffies(timeout)); 4598 if (left <= 0) { 4599 left = left ?: -ETIMEDOUT; 4600 ceph_osdc_cancel_request(req); 4601 } else { 4602 left = req->r_result; /* completed */ 4603 } 4604 4605 return left; 4606 } 4607 4608 /* 4609 * wait for a request to complete 4610 */ 4611 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4612 struct ceph_osd_request *req) 4613 { 4614 return wait_request_timeout(req, 0); 4615 } 4616 EXPORT_SYMBOL(ceph_osdc_wait_request); 4617 4618 /* 4619 * sync - wait for all in-flight requests to flush. avoid starvation. 4620 */ 4621 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4622 { 4623 struct rb_node *n, *p; 4624 u64 last_tid = atomic64_read(&osdc->last_tid); 4625 4626 again: 4627 down_read(&osdc->lock); 4628 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4629 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4630 4631 mutex_lock(&osd->lock); 4632 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4633 struct ceph_osd_request *req = 4634 rb_entry(p, struct ceph_osd_request, r_node); 4635 4636 if (req->r_tid > last_tid) 4637 break; 4638 4639 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4640 continue; 4641 4642 ceph_osdc_get_request(req); 4643 mutex_unlock(&osd->lock); 4644 up_read(&osdc->lock); 4645 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4646 __func__, req, req->r_tid, last_tid); 4647 wait_for_completion(&req->r_completion); 4648 ceph_osdc_put_request(req); 4649 goto again; 4650 } 4651 4652 mutex_unlock(&osd->lock); 4653 } 4654 4655 up_read(&osdc->lock); 4656 dout("%s done last_tid %llu\n", __func__, last_tid); 4657 } 4658 EXPORT_SYMBOL(ceph_osdc_sync); 4659 4660 static struct ceph_osd_request * 4661 alloc_linger_request(struct ceph_osd_linger_request *lreq) 4662 { 4663 struct ceph_osd_request *req; 4664 4665 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO); 4666 if (!req) 4667 return NULL; 4668 4669 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4670 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4671 return req; 4672 } 4673 4674 static struct ceph_osd_request * 4675 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) 4676 { 4677 struct ceph_osd_request *req; 4678 4679 req = alloc_linger_request(lreq); 4680 if (!req) 4681 return NULL; 4682 4683 /* 4684 * Pass 0 for cookie because we don't know it yet, it will be 4685 * filled in by linger_submit(). 4686 */ 4687 osd_req_op_watch_init(req, 0, 0, watch_opcode); 4688 4689 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { 4690 ceph_osdc_put_request(req); 4691 return NULL; 4692 } 4693 4694 return req; 4695 } 4696 4697 /* 4698 * Returns a handle, caller owns a ref. 4699 */ 4700 struct ceph_osd_linger_request * 4701 ceph_osdc_watch(struct ceph_osd_client *osdc, 4702 struct ceph_object_id *oid, 4703 struct ceph_object_locator *oloc, 4704 rados_watchcb2_t wcb, 4705 rados_watcherrcb_t errcb, 4706 void *data) 4707 { 4708 struct ceph_osd_linger_request *lreq; 4709 int ret; 4710 4711 lreq = linger_alloc(osdc); 4712 if (!lreq) 4713 return ERR_PTR(-ENOMEM); 4714 4715 lreq->is_watch = true; 4716 lreq->wcb = wcb; 4717 lreq->errcb = errcb; 4718 lreq->data = data; 4719 lreq->watch_valid_thru = jiffies; 4720 4721 ceph_oid_copy(&lreq->t.base_oid, oid); 4722 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4723 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4724 ktime_get_real_ts64(&lreq->mtime); 4725 4726 lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); 4727 if (!lreq->reg_req) { 4728 ret = -ENOMEM; 4729 goto err_put_lreq; 4730 } 4731 4732 lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); 4733 if (!lreq->ping_req) { 4734 ret = -ENOMEM; 4735 goto err_put_lreq; 4736 } 4737 4738 linger_submit(lreq); 4739 ret = linger_reg_commit_wait(lreq); 4740 if (ret) { 4741 linger_cancel(lreq); 4742 goto err_put_lreq; 4743 } 4744 4745 return lreq; 4746 4747 err_put_lreq: 4748 linger_put(lreq); 4749 return ERR_PTR(ret); 4750 } 4751 EXPORT_SYMBOL(ceph_osdc_watch); 4752 4753 /* 4754 * Releases a ref. 4755 * 4756 * Times out after mount_timeout to preserve rbd unmap behaviour 4757 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4758 * with mount_timeout"). 4759 */ 4760 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4761 struct ceph_osd_linger_request *lreq) 4762 { 4763 struct ceph_options *opts = osdc->client->options; 4764 struct ceph_osd_request *req; 4765 int ret; 4766 4767 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4768 if (!req) 4769 return -ENOMEM; 4770 4771 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4772 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4773 req->r_flags = CEPH_OSD_FLAG_WRITE; 4774 ktime_get_real_ts64(&req->r_mtime); 4775 osd_req_op_watch_init(req, 0, lreq->linger_id, 4776 CEPH_OSD_WATCH_OP_UNWATCH); 4777 4778 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4779 if (ret) 4780 goto out_put_req; 4781 4782 ceph_osdc_start_request(osdc, req, false); 4783 linger_cancel(lreq); 4784 linger_put(lreq); 4785 ret = wait_request_timeout(req, opts->mount_timeout); 4786 4787 out_put_req: 4788 ceph_osdc_put_request(req); 4789 return ret; 4790 } 4791 EXPORT_SYMBOL(ceph_osdc_unwatch); 4792 4793 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4794 u64 notify_id, u64 cookie, void *payload, 4795 u32 payload_len) 4796 { 4797 struct ceph_osd_req_op *op; 4798 struct ceph_pagelist *pl; 4799 int ret; 4800 4801 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4802 4803 pl = ceph_pagelist_alloc(GFP_NOIO); 4804 if (!pl) 4805 return -ENOMEM; 4806 4807 ret = ceph_pagelist_encode_64(pl, notify_id); 4808 ret |= ceph_pagelist_encode_64(pl, cookie); 4809 if (payload) { 4810 ret |= ceph_pagelist_encode_32(pl, payload_len); 4811 ret |= ceph_pagelist_append(pl, payload, payload_len); 4812 } else { 4813 ret |= ceph_pagelist_encode_32(pl, 0); 4814 } 4815 if (ret) { 4816 ceph_pagelist_release(pl); 4817 return -ENOMEM; 4818 } 4819 4820 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4821 op->indata_len = pl->length; 4822 return 0; 4823 } 4824 4825 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4826 struct ceph_object_id *oid, 4827 struct ceph_object_locator *oloc, 4828 u64 notify_id, 4829 u64 cookie, 4830 void *payload, 4831 u32 payload_len) 4832 { 4833 struct ceph_osd_request *req; 4834 int ret; 4835 4836 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4837 if (!req) 4838 return -ENOMEM; 4839 4840 ceph_oid_copy(&req->r_base_oid, oid); 4841 ceph_oloc_copy(&req->r_base_oloc, oloc); 4842 req->r_flags = CEPH_OSD_FLAG_READ; 4843 4844 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4845 payload_len); 4846 if (ret) 4847 goto out_put_req; 4848 4849 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4850 if (ret) 4851 goto out_put_req; 4852 4853 ceph_osdc_start_request(osdc, req, false); 4854 ret = ceph_osdc_wait_request(osdc, req); 4855 4856 out_put_req: 4857 ceph_osdc_put_request(req); 4858 return ret; 4859 } 4860 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4861 4862 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, 4863 u64 cookie, u32 prot_ver, u32 timeout, 4864 void *payload, u32 payload_len) 4865 { 4866 struct ceph_osd_req_op *op; 4867 struct ceph_pagelist *pl; 4868 int ret; 4869 4870 op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 4871 op->notify.cookie = cookie; 4872 4873 pl = ceph_pagelist_alloc(GFP_NOIO); 4874 if (!pl) 4875 return -ENOMEM; 4876 4877 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ 4878 ret |= ceph_pagelist_encode_32(pl, timeout); 4879 ret |= ceph_pagelist_encode_32(pl, payload_len); 4880 ret |= ceph_pagelist_append(pl, payload, payload_len); 4881 if (ret) { 4882 ceph_pagelist_release(pl); 4883 return -ENOMEM; 4884 } 4885 4886 ceph_osd_data_pagelist_init(&op->notify.request_data, pl); 4887 op->indata_len = pl->length; 4888 return 0; 4889 } 4890 4891 /* 4892 * @timeout: in seconds 4893 * 4894 * @preply_{pages,len} are initialized both on success and error. 4895 * The caller is responsible for: 4896 * 4897 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4898 */ 4899 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4900 struct ceph_object_id *oid, 4901 struct ceph_object_locator *oloc, 4902 void *payload, 4903 u32 payload_len, 4904 u32 timeout, 4905 struct page ***preply_pages, 4906 size_t *preply_len) 4907 { 4908 struct ceph_osd_linger_request *lreq; 4909 struct page **pages; 4910 int ret; 4911 4912 WARN_ON(!timeout); 4913 if (preply_pages) { 4914 *preply_pages = NULL; 4915 *preply_len = 0; 4916 } 4917 4918 lreq = linger_alloc(osdc); 4919 if (!lreq) 4920 return -ENOMEM; 4921 4922 lreq->preply_pages = preply_pages; 4923 lreq->preply_len = preply_len; 4924 4925 ceph_oid_copy(&lreq->t.base_oid, oid); 4926 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4927 lreq->t.flags = CEPH_OSD_FLAG_READ; 4928 4929 lreq->reg_req = alloc_linger_request(lreq); 4930 if (!lreq->reg_req) { 4931 ret = -ENOMEM; 4932 goto out_put_lreq; 4933 } 4934 4935 /* 4936 * Pass 0 for cookie because we don't know it yet, it will be 4937 * filled in by linger_submit(). 4938 */ 4939 ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, 4940 payload, payload_len); 4941 if (ret) 4942 goto out_put_lreq; 4943 4944 /* for notify_id */ 4945 pages = ceph_alloc_page_vector(1, GFP_NOIO); 4946 if (IS_ERR(pages)) { 4947 ret = PTR_ERR(pages); 4948 goto out_put_lreq; 4949 } 4950 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, 4951 response_data), 4952 pages, PAGE_SIZE, 0, false, true); 4953 4954 ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); 4955 if (ret) 4956 goto out_put_lreq; 4957 4958 linger_submit(lreq); 4959 ret = linger_reg_commit_wait(lreq); 4960 if (!ret) 4961 ret = linger_notify_finish_wait(lreq); 4962 else 4963 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4964 4965 linger_cancel(lreq); 4966 out_put_lreq: 4967 linger_put(lreq); 4968 return ret; 4969 } 4970 EXPORT_SYMBOL(ceph_osdc_notify); 4971 4972 /* 4973 * Return the number of milliseconds since the watch was last 4974 * confirmed, or an error. If there is an error, the watch is no 4975 * longer valid, and should be destroyed with ceph_osdc_unwatch(). 4976 */ 4977 int ceph_osdc_watch_check(struct ceph_osd_client *osdc, 4978 struct ceph_osd_linger_request *lreq) 4979 { 4980 unsigned long stamp, age; 4981 int ret; 4982 4983 down_read(&osdc->lock); 4984 mutex_lock(&lreq->lock); 4985 stamp = lreq->watch_valid_thru; 4986 if (!list_empty(&lreq->pending_lworks)) { 4987 struct linger_work *lwork = 4988 list_first_entry(&lreq->pending_lworks, 4989 struct linger_work, 4990 pending_item); 4991 4992 if (time_before(lwork->queued_stamp, stamp)) 4993 stamp = lwork->queued_stamp; 4994 } 4995 age = jiffies - stamp; 4996 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__, 4997 lreq, lreq->linger_id, age, lreq->last_error); 4998 /* we are truncating to msecs, so return a safe upper bound */ 4999 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age); 5000 5001 mutex_unlock(&lreq->lock); 5002 up_read(&osdc->lock); 5003 return ret; 5004 } 5005 5006 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 5007 { 5008 u8 struct_v; 5009 u32 struct_len; 5010 int ret; 5011 5012 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 5013 &struct_v, &struct_len); 5014 if (ret) 5015 goto bad; 5016 5017 ret = -EINVAL; 5018 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 5019 ceph_decode_64_safe(p, end, item->cookie, bad); 5020 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 5021 5022 if (struct_v >= 2) { 5023 ret = ceph_decode_entity_addr(p, end, &item->addr); 5024 if (ret) 5025 goto bad; 5026 } else { 5027 ret = 0; 5028 } 5029 5030 dout("%s %s%llu cookie %llu addr %s\n", __func__, 5031 ENTITY_NAME(item->name), item->cookie, 5032 ceph_pr_addr(&item->addr)); 5033 bad: 5034 return ret; 5035 } 5036 5037 static int decode_watchers(void **p, void *end, 5038 struct ceph_watch_item **watchers, 5039 u32 *num_watchers) 5040 { 5041 u8 struct_v; 5042 u32 struct_len; 5043 int i; 5044 int ret; 5045 5046 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 5047 &struct_v, &struct_len); 5048 if (ret) 5049 return ret; 5050 5051 *num_watchers = ceph_decode_32(p); 5052 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 5053 if (!*watchers) 5054 return -ENOMEM; 5055 5056 for (i = 0; i < *num_watchers; i++) { 5057 ret = decode_watcher(p, end, *watchers + i); 5058 if (ret) { 5059 kfree(*watchers); 5060 return ret; 5061 } 5062 } 5063 5064 return 0; 5065 } 5066 5067 /* 5068 * On success, the caller is responsible for: 5069 * 5070 * kfree(watchers); 5071 */ 5072 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 5073 struct ceph_object_id *oid, 5074 struct ceph_object_locator *oloc, 5075 struct ceph_watch_item **watchers, 5076 u32 *num_watchers) 5077 { 5078 struct ceph_osd_request *req; 5079 struct page **pages; 5080 int ret; 5081 5082 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5083 if (!req) 5084 return -ENOMEM; 5085 5086 ceph_oid_copy(&req->r_base_oid, oid); 5087 ceph_oloc_copy(&req->r_base_oloc, oloc); 5088 req->r_flags = CEPH_OSD_FLAG_READ; 5089 5090 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5091 if (IS_ERR(pages)) { 5092 ret = PTR_ERR(pages); 5093 goto out_put_req; 5094 } 5095 5096 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5097 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5098 response_data), 5099 pages, PAGE_SIZE, 0, false, true); 5100 5101 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5102 if (ret) 5103 goto out_put_req; 5104 5105 ceph_osdc_start_request(osdc, req, false); 5106 ret = ceph_osdc_wait_request(osdc, req); 5107 if (ret >= 0) { 5108 void *p = page_address(pages[0]); 5109 void *const end = p + req->r_ops[0].outdata_len; 5110 5111 ret = decode_watchers(&p, end, watchers, num_watchers); 5112 } 5113 5114 out_put_req: 5115 ceph_osdc_put_request(req); 5116 return ret; 5117 } 5118 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5119 5120 /* 5121 * Call all pending notify callbacks - for use after a watch is 5122 * unregistered, to make sure no more callbacks for it will be invoked 5123 */ 5124 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5125 { 5126 dout("%s osdc %p\n", __func__, osdc); 5127 flush_workqueue(osdc->notify_wq); 5128 } 5129 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5130 5131 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5132 { 5133 down_read(&osdc->lock); 5134 maybe_request_map(osdc); 5135 up_read(&osdc->lock); 5136 } 5137 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5138 5139 /* 5140 * Execute an OSD class method on an object. 5141 * 5142 * @flags: CEPH_OSD_FLAG_* 5143 * @resp_len: in/out param for reply length 5144 */ 5145 int ceph_osdc_call(struct ceph_osd_client *osdc, 5146 struct ceph_object_id *oid, 5147 struct ceph_object_locator *oloc, 5148 const char *class, const char *method, 5149 unsigned int flags, 5150 struct page *req_page, size_t req_len, 5151 struct page **resp_pages, size_t *resp_len) 5152 { 5153 struct ceph_osd_request *req; 5154 int ret; 5155 5156 if (req_len > PAGE_SIZE) 5157 return -E2BIG; 5158 5159 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5160 if (!req) 5161 return -ENOMEM; 5162 5163 ceph_oid_copy(&req->r_base_oid, oid); 5164 ceph_oloc_copy(&req->r_base_oloc, oloc); 5165 req->r_flags = flags; 5166 5167 ret = osd_req_op_cls_init(req, 0, class, method); 5168 if (ret) 5169 goto out_put_req; 5170 5171 if (req_page) 5172 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5173 0, false, false); 5174 if (resp_pages) 5175 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5176 *resp_len, 0, false, false); 5177 5178 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5179 if (ret) 5180 goto out_put_req; 5181 5182 ceph_osdc_start_request(osdc, req, false); 5183 ret = ceph_osdc_wait_request(osdc, req); 5184 if (ret >= 0) { 5185 ret = req->r_ops[0].rval; 5186 if (resp_pages) 5187 *resp_len = req->r_ops[0].outdata_len; 5188 } 5189 5190 out_put_req: 5191 ceph_osdc_put_request(req); 5192 return ret; 5193 } 5194 EXPORT_SYMBOL(ceph_osdc_call); 5195 5196 /* 5197 * reset all osd connections 5198 */ 5199 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5200 { 5201 struct rb_node *n; 5202 5203 down_write(&osdc->lock); 5204 for (n = rb_first(&osdc->osds); n; ) { 5205 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5206 5207 n = rb_next(n); 5208 if (!reopen_osd(osd)) 5209 kick_osd_requests(osd); 5210 } 5211 up_write(&osdc->lock); 5212 } 5213 5214 /* 5215 * init, shutdown 5216 */ 5217 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5218 { 5219 int err; 5220 5221 dout("init\n"); 5222 osdc->client = client; 5223 init_rwsem(&osdc->lock); 5224 osdc->osds = RB_ROOT; 5225 INIT_LIST_HEAD(&osdc->osd_lru); 5226 spin_lock_init(&osdc->osd_lru_lock); 5227 osd_init(&osdc->homeless_osd); 5228 osdc->homeless_osd.o_osdc = osdc; 5229 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5230 osdc->last_linger_id = CEPH_LINGER_ID_START; 5231 osdc->linger_requests = RB_ROOT; 5232 osdc->map_checks = RB_ROOT; 5233 osdc->linger_map_checks = RB_ROOT; 5234 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5235 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5236 5237 err = -ENOMEM; 5238 osdc->osdmap = ceph_osdmap_alloc(); 5239 if (!osdc->osdmap) 5240 goto out; 5241 5242 osdc->req_mempool = mempool_create_slab_pool(10, 5243 ceph_osd_request_cache); 5244 if (!osdc->req_mempool) 5245 goto out_map; 5246 5247 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5248 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5249 if (err < 0) 5250 goto out_mempool; 5251 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5252 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5253 "osd_op_reply"); 5254 if (err < 0) 5255 goto out_msgpool; 5256 5257 err = -ENOMEM; 5258 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5259 if (!osdc->notify_wq) 5260 goto out_msgpool_reply; 5261 5262 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5263 if (!osdc->completion_wq) 5264 goto out_notify_wq; 5265 5266 schedule_delayed_work(&osdc->timeout_work, 5267 osdc->client->options->osd_keepalive_timeout); 5268 schedule_delayed_work(&osdc->osds_timeout_work, 5269 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5270 5271 return 0; 5272 5273 out_notify_wq: 5274 destroy_workqueue(osdc->notify_wq); 5275 out_msgpool_reply: 5276 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5277 out_msgpool: 5278 ceph_msgpool_destroy(&osdc->msgpool_op); 5279 out_mempool: 5280 mempool_destroy(osdc->req_mempool); 5281 out_map: 5282 ceph_osdmap_destroy(osdc->osdmap); 5283 out: 5284 return err; 5285 } 5286 5287 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5288 { 5289 destroy_workqueue(osdc->completion_wq); 5290 destroy_workqueue(osdc->notify_wq); 5291 cancel_delayed_work_sync(&osdc->timeout_work); 5292 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5293 5294 down_write(&osdc->lock); 5295 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5296 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5297 struct ceph_osd, o_node); 5298 close_osd(osd); 5299 } 5300 up_write(&osdc->lock); 5301 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5302 osd_cleanup(&osdc->homeless_osd); 5303 5304 WARN_ON(!list_empty(&osdc->osd_lru)); 5305 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5306 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5307 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5308 WARN_ON(atomic_read(&osdc->num_requests)); 5309 WARN_ON(atomic_read(&osdc->num_homeless)); 5310 5311 ceph_osdmap_destroy(osdc->osdmap); 5312 mempool_destroy(osdc->req_mempool); 5313 ceph_msgpool_destroy(&osdc->msgpool_op); 5314 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5315 } 5316 5317 int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5318 u64 src_snapid, u64 src_version, 5319 struct ceph_object_id *src_oid, 5320 struct ceph_object_locator *src_oloc, 5321 u32 src_fadvise_flags, 5322 u32 dst_fadvise_flags, 5323 u32 truncate_seq, u64 truncate_size, 5324 u8 copy_from_flags) 5325 { 5326 struct ceph_osd_req_op *op; 5327 struct page **pages; 5328 void *p, *end; 5329 5330 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5331 if (IS_ERR(pages)) 5332 return PTR_ERR(pages); 5333 5334 op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, 5335 dst_fadvise_flags); 5336 op->copy_from.snapid = src_snapid; 5337 op->copy_from.src_version = src_version; 5338 op->copy_from.flags = copy_from_flags; 5339 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5340 5341 p = page_address(pages[0]); 5342 end = p + PAGE_SIZE; 5343 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5344 encode_oloc(&p, end, src_oloc); 5345 ceph_encode_32(&p, truncate_seq); 5346 ceph_encode_64(&p, truncate_size); 5347 op->indata_len = PAGE_SIZE - (end - p); 5348 5349 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5350 op->indata_len, 0, false, true); 5351 return 0; 5352 } 5353 EXPORT_SYMBOL(osd_req_op_copy_from_init); 5354 5355 int __init ceph_osdc_setup(void) 5356 { 5357 size_t size = sizeof(struct ceph_osd_request) + 5358 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5359 5360 BUG_ON(ceph_osd_request_cache); 5361 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5362 0, 0, NULL); 5363 5364 return ceph_osd_request_cache ? 0 : -ENOMEM; 5365 } 5366 5367 void ceph_osdc_cleanup(void) 5368 { 5369 BUG_ON(!ceph_osd_request_cache); 5370 kmem_cache_destroy(ceph_osd_request_cache); 5371 ceph_osd_request_cache = NULL; 5372 } 5373 5374 /* 5375 * handle incoming message 5376 */ 5377 static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5378 { 5379 struct ceph_osd *osd = con->private; 5380 struct ceph_osd_client *osdc = osd->o_osdc; 5381 int type = le16_to_cpu(msg->hdr.type); 5382 5383 switch (type) { 5384 case CEPH_MSG_OSD_MAP: 5385 ceph_osdc_handle_map(osdc, msg); 5386 break; 5387 case CEPH_MSG_OSD_OPREPLY: 5388 handle_reply(osd, msg); 5389 break; 5390 case CEPH_MSG_OSD_BACKOFF: 5391 handle_backoff(osd, msg); 5392 break; 5393 case CEPH_MSG_WATCH_NOTIFY: 5394 handle_watch_notify(osdc, msg); 5395 break; 5396 5397 default: 5398 pr_err("received unknown message type %d %s\n", type, 5399 ceph_msg_type_name(type)); 5400 } 5401 5402 ceph_msg_put(msg); 5403 } 5404 5405 /* 5406 * Lookup and return message for incoming reply. Don't try to do 5407 * anything about a larger than preallocated data portion of the 5408 * message at the moment - for now, just skip the message. 5409 */ 5410 static struct ceph_msg *get_reply(struct ceph_connection *con, 5411 struct ceph_msg_header *hdr, 5412 int *skip) 5413 { 5414 struct ceph_osd *osd = con->private; 5415 struct ceph_osd_client *osdc = osd->o_osdc; 5416 struct ceph_msg *m = NULL; 5417 struct ceph_osd_request *req; 5418 int front_len = le32_to_cpu(hdr->front_len); 5419 int data_len = le32_to_cpu(hdr->data_len); 5420 u64 tid = le64_to_cpu(hdr->tid); 5421 5422 down_read(&osdc->lock); 5423 if (!osd_registered(osd)) { 5424 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5425 *skip = 1; 5426 goto out_unlock_osdc; 5427 } 5428 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5429 5430 mutex_lock(&osd->lock); 5431 req = lookup_request(&osd->o_requests, tid); 5432 if (!req) { 5433 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5434 osd->o_osd, tid); 5435 *skip = 1; 5436 goto out_unlock_session; 5437 } 5438 5439 ceph_msg_revoke_incoming(req->r_reply); 5440 5441 if (front_len > req->r_reply->front_alloc_len) { 5442 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5443 __func__, osd->o_osd, req->r_tid, front_len, 5444 req->r_reply->front_alloc_len); 5445 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5446 false); 5447 if (!m) 5448 goto out_unlock_session; 5449 ceph_msg_put(req->r_reply); 5450 req->r_reply = m; 5451 } 5452 5453 if (data_len > req->r_reply->data_length) { 5454 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5455 __func__, osd->o_osd, req->r_tid, data_len, 5456 req->r_reply->data_length); 5457 m = NULL; 5458 *skip = 1; 5459 goto out_unlock_session; 5460 } 5461 5462 m = ceph_msg_get(req->r_reply); 5463 dout("get_reply tid %lld %p\n", tid, m); 5464 5465 out_unlock_session: 5466 mutex_unlock(&osd->lock); 5467 out_unlock_osdc: 5468 up_read(&osdc->lock); 5469 return m; 5470 } 5471 5472 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5473 { 5474 struct ceph_msg *m; 5475 int type = le16_to_cpu(hdr->type); 5476 u32 front_len = le32_to_cpu(hdr->front_len); 5477 u32 data_len = le32_to_cpu(hdr->data_len); 5478 5479 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5480 if (!m) 5481 return NULL; 5482 5483 if (data_len) { 5484 struct page **pages; 5485 5486 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5487 GFP_NOIO); 5488 if (IS_ERR(pages)) { 5489 ceph_msg_put(m); 5490 return NULL; 5491 } 5492 5493 ceph_msg_data_add_pages(m, pages, data_len, 0, true); 5494 } 5495 5496 return m; 5497 } 5498 5499 static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con, 5500 struct ceph_msg_header *hdr, 5501 int *skip) 5502 { 5503 struct ceph_osd *osd = con->private; 5504 int type = le16_to_cpu(hdr->type); 5505 5506 *skip = 0; 5507 switch (type) { 5508 case CEPH_MSG_OSD_MAP: 5509 case CEPH_MSG_OSD_BACKOFF: 5510 case CEPH_MSG_WATCH_NOTIFY: 5511 return alloc_msg_with_page_vector(hdr); 5512 case CEPH_MSG_OSD_OPREPLY: 5513 return get_reply(con, hdr, skip); 5514 default: 5515 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5516 osd->o_osd, type); 5517 *skip = 1; 5518 return NULL; 5519 } 5520 } 5521 5522 /* 5523 * Wrappers to refcount containing ceph_osd struct 5524 */ 5525 static struct ceph_connection *osd_get_con(struct ceph_connection *con) 5526 { 5527 struct ceph_osd *osd = con->private; 5528 if (get_osd(osd)) 5529 return con; 5530 return NULL; 5531 } 5532 5533 static void osd_put_con(struct ceph_connection *con) 5534 { 5535 struct ceph_osd *osd = con->private; 5536 put_osd(osd); 5537 } 5538 5539 /* 5540 * authentication 5541 */ 5542 5543 /* 5544 * Note: returned pointer is the address of a structure that's 5545 * managed separately. Caller must *not* attempt to free it. 5546 */ 5547 static struct ceph_auth_handshake * 5548 osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new) 5549 { 5550 struct ceph_osd *o = con->private; 5551 struct ceph_osd_client *osdc = o->o_osdc; 5552 struct ceph_auth_client *ac = osdc->client->monc.auth; 5553 struct ceph_auth_handshake *auth = &o->o_auth; 5554 int ret; 5555 5556 ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5557 force_new, proto, NULL, NULL); 5558 if (ret) 5559 return ERR_PTR(ret); 5560 5561 return auth; 5562 } 5563 5564 static int osd_add_authorizer_challenge(struct ceph_connection *con, 5565 void *challenge_buf, int challenge_buf_len) 5566 { 5567 struct ceph_osd *o = con->private; 5568 struct ceph_osd_client *osdc = o->o_osdc; 5569 struct ceph_auth_client *ac = osdc->client->monc.auth; 5570 5571 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5572 challenge_buf, challenge_buf_len); 5573 } 5574 5575 static int osd_verify_authorizer_reply(struct ceph_connection *con) 5576 { 5577 struct ceph_osd *o = con->private; 5578 struct ceph_osd_client *osdc = o->o_osdc; 5579 struct ceph_auth_client *ac = osdc->client->monc.auth; 5580 struct ceph_auth_handshake *auth = &o->o_auth; 5581 5582 return ceph_auth_verify_authorizer_reply(ac, auth->authorizer, 5583 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len, 5584 NULL, NULL, NULL, NULL); 5585 } 5586 5587 static int osd_invalidate_authorizer(struct ceph_connection *con) 5588 { 5589 struct ceph_osd *o = con->private; 5590 struct ceph_osd_client *osdc = o->o_osdc; 5591 struct ceph_auth_client *ac = osdc->client->monc.auth; 5592 5593 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5594 return ceph_monc_validate_auth(&osdc->client->monc); 5595 } 5596 5597 static int osd_get_auth_request(struct ceph_connection *con, 5598 void *buf, int *buf_len, 5599 void **authorizer, int *authorizer_len) 5600 { 5601 struct ceph_osd *o = con->private; 5602 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5603 struct ceph_auth_handshake *auth = &o->o_auth; 5604 int ret; 5605 5606 ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_OSD, 5607 buf, buf_len); 5608 if (ret) 5609 return ret; 5610 5611 *authorizer = auth->authorizer_buf; 5612 *authorizer_len = auth->authorizer_buf_len; 5613 return 0; 5614 } 5615 5616 static int osd_handle_auth_reply_more(struct ceph_connection *con, 5617 void *reply, int reply_len, 5618 void *buf, int *buf_len, 5619 void **authorizer, int *authorizer_len) 5620 { 5621 struct ceph_osd *o = con->private; 5622 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5623 struct ceph_auth_handshake *auth = &o->o_auth; 5624 int ret; 5625 5626 ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len, 5627 buf, buf_len); 5628 if (ret) 5629 return ret; 5630 5631 *authorizer = auth->authorizer_buf; 5632 *authorizer_len = auth->authorizer_buf_len; 5633 return 0; 5634 } 5635 5636 static int osd_handle_auth_done(struct ceph_connection *con, 5637 u64 global_id, void *reply, int reply_len, 5638 u8 *session_key, int *session_key_len, 5639 u8 *con_secret, int *con_secret_len) 5640 { 5641 struct ceph_osd *o = con->private; 5642 struct ceph_auth_client *ac = o->o_osdc->client->monc.auth; 5643 struct ceph_auth_handshake *auth = &o->o_auth; 5644 5645 return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len, 5646 session_key, session_key_len, 5647 con_secret, con_secret_len); 5648 } 5649 5650 static int osd_handle_auth_bad_method(struct ceph_connection *con, 5651 int used_proto, int result, 5652 const int *allowed_protos, int proto_cnt, 5653 const int *allowed_modes, int mode_cnt) 5654 { 5655 struct ceph_osd *o = con->private; 5656 struct ceph_mon_client *monc = &o->o_osdc->client->monc; 5657 int ret; 5658 5659 if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_OSD, 5660 used_proto, result, 5661 allowed_protos, proto_cnt, 5662 allowed_modes, mode_cnt)) { 5663 ret = ceph_monc_validate_auth(monc); 5664 if (ret) 5665 return ret; 5666 } 5667 5668 return -EACCES; 5669 } 5670 5671 static void osd_reencode_message(struct ceph_msg *msg) 5672 { 5673 int type = le16_to_cpu(msg->hdr.type); 5674 5675 if (type == CEPH_MSG_OSD_OP) 5676 encode_request_finish(msg); 5677 } 5678 5679 static int osd_sign_message(struct ceph_msg *msg) 5680 { 5681 struct ceph_osd *o = msg->con->private; 5682 struct ceph_auth_handshake *auth = &o->o_auth; 5683 5684 return ceph_auth_sign_message(auth, msg); 5685 } 5686 5687 static int osd_check_message_signature(struct ceph_msg *msg) 5688 { 5689 struct ceph_osd *o = msg->con->private; 5690 struct ceph_auth_handshake *auth = &o->o_auth; 5691 5692 return ceph_auth_check_message_signature(auth, msg); 5693 } 5694 5695 static const struct ceph_connection_operations osd_con_ops = { 5696 .get = osd_get_con, 5697 .put = osd_put_con, 5698 .alloc_msg = osd_alloc_msg, 5699 .dispatch = osd_dispatch, 5700 .fault = osd_fault, 5701 .reencode_message = osd_reencode_message, 5702 .get_authorizer = osd_get_authorizer, 5703 .add_authorizer_challenge = osd_add_authorizer_challenge, 5704 .verify_authorizer_reply = osd_verify_authorizer_reply, 5705 .invalidate_authorizer = osd_invalidate_authorizer, 5706 .sign_message = osd_sign_message, 5707 .check_message_signature = osd_check_message_signature, 5708 .get_auth_request = osd_get_auth_request, 5709 .handle_auth_reply_more = osd_handle_auth_reply_more, 5710 .handle_auth_done = osd_handle_auth_done, 5711 .handle_auth_bad_method = osd_handle_auth_bad_method, 5712 }; 5713