1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static struct ceph_osd_data * 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 176 { 177 BUG_ON(which >= osd_req->r_num_ops); 178 179 return &osd_req->r_ops[which].raw_data_in; 180 } 181 182 struct ceph_osd_data * 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 184 unsigned int which) 185 { 186 return osd_req_op_data(osd_req, which, extent, osd_data); 187 } 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 189 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 191 unsigned int which, struct page **pages, 192 u64 length, u32 alignment, 193 bool pages_from_pool, bool own_pages) 194 { 195 struct ceph_osd_data *osd_data; 196 197 osd_data = osd_req_op_raw_data_in(osd_req, which); 198 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 199 pages_from_pool, own_pages); 200 } 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 202 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 204 unsigned int which, struct page **pages, 205 u64 length, u32 alignment, 206 bool pages_from_pool, bool own_pages) 207 { 208 struct ceph_osd_data *osd_data; 209 210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 211 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 212 pages_from_pool, own_pages); 213 } 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 215 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 217 unsigned int which, struct ceph_pagelist *pagelist) 218 { 219 struct ceph_osd_data *osd_data; 220 221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 222 ceph_osd_data_pagelist_init(osd_data, pagelist); 223 } 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 225 226 #ifdef CONFIG_BLOCK 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 228 unsigned int which, 229 struct ceph_bio_iter *bio_pos, 230 u32 bio_length) 231 { 232 struct ceph_osd_data *osd_data; 233 234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 236 } 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 238 #endif /* CONFIG_BLOCK */ 239 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 241 unsigned int which, 242 struct bio_vec *bvecs, u32 num_bvecs, 243 u32 bytes) 244 { 245 struct ceph_osd_data *osd_data; 246 struct ceph_bvec_iter it = { 247 .bvecs = bvecs, 248 .iter = { .bi_size = bytes }, 249 }; 250 251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 255 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 257 unsigned int which, 258 struct ceph_bvec_iter *bvec_pos) 259 { 260 struct ceph_osd_data *osd_data; 261 262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 264 } 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 266 267 static void osd_req_op_cls_request_info_pagelist( 268 struct ceph_osd_request *osd_req, 269 unsigned int which, struct ceph_pagelist *pagelist) 270 { 271 struct ceph_osd_data *osd_data; 272 273 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 274 ceph_osd_data_pagelist_init(osd_data, pagelist); 275 } 276 277 void osd_req_op_cls_request_data_pagelist( 278 struct ceph_osd_request *osd_req, 279 unsigned int which, struct ceph_pagelist *pagelist) 280 { 281 struct ceph_osd_data *osd_data; 282 283 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 284 ceph_osd_data_pagelist_init(osd_data, pagelist); 285 osd_req->r_ops[which].cls.indata_len += pagelist->length; 286 osd_req->r_ops[which].indata_len += pagelist->length; 287 } 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 default: 350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 351 return 0; 352 } 353 } 354 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 356 { 357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 358 int num_pages; 359 360 num_pages = calc_pages_for((u64)osd_data->alignment, 361 (u64)osd_data->length); 362 ceph_release_page_vector(osd_data->pages, num_pages); 363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 364 ceph_pagelist_release(osd_data->pagelist); 365 } 366 ceph_osd_data_init(osd_data); 367 } 368 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 370 unsigned int which) 371 { 372 struct ceph_osd_req_op *op; 373 374 BUG_ON(which >= osd_req->r_num_ops); 375 op = &osd_req->r_ops[which]; 376 377 switch (op->op) { 378 case CEPH_OSD_OP_READ: 379 case CEPH_OSD_OP_WRITE: 380 case CEPH_OSD_OP_WRITEFULL: 381 ceph_osd_data_release(&op->extent.osd_data); 382 break; 383 case CEPH_OSD_OP_CALL: 384 ceph_osd_data_release(&op->cls.request_info); 385 ceph_osd_data_release(&op->cls.request_data); 386 ceph_osd_data_release(&op->cls.response_data); 387 break; 388 case CEPH_OSD_OP_SETXATTR: 389 case CEPH_OSD_OP_CMPXATTR: 390 ceph_osd_data_release(&op->xattr.osd_data); 391 break; 392 case CEPH_OSD_OP_STAT: 393 ceph_osd_data_release(&op->raw_data_in); 394 break; 395 case CEPH_OSD_OP_NOTIFY_ACK: 396 ceph_osd_data_release(&op->notify_ack.request_data); 397 break; 398 case CEPH_OSD_OP_NOTIFY: 399 ceph_osd_data_release(&op->notify.request_data); 400 ceph_osd_data_release(&op->notify.response_data); 401 break; 402 case CEPH_OSD_OP_LIST_WATCHERS: 403 ceph_osd_data_release(&op->list_watchers.response_data); 404 break; 405 case CEPH_OSD_OP_COPY_FROM2: 406 ceph_osd_data_release(&op->copy_from.osd_data); 407 break; 408 default: 409 break; 410 } 411 } 412 413 /* 414 * Assumes @t is zero-initialized. 415 */ 416 static void target_init(struct ceph_osd_request_target *t) 417 { 418 ceph_oid_init(&t->base_oid); 419 ceph_oloc_init(&t->base_oloc); 420 ceph_oid_init(&t->target_oid); 421 ceph_oloc_init(&t->target_oloc); 422 423 ceph_osds_init(&t->acting); 424 ceph_osds_init(&t->up); 425 t->size = -1; 426 t->min_size = -1; 427 428 t->osd = CEPH_HOMELESS_OSD; 429 } 430 431 static void target_copy(struct ceph_osd_request_target *dest, 432 const struct ceph_osd_request_target *src) 433 { 434 ceph_oid_copy(&dest->base_oid, &src->base_oid); 435 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 436 ceph_oid_copy(&dest->target_oid, &src->target_oid); 437 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 438 439 dest->pgid = src->pgid; /* struct */ 440 dest->spgid = src->spgid; /* struct */ 441 dest->pg_num = src->pg_num; 442 dest->pg_num_mask = src->pg_num_mask; 443 ceph_osds_copy(&dest->acting, &src->acting); 444 ceph_osds_copy(&dest->up, &src->up); 445 dest->size = src->size; 446 dest->min_size = src->min_size; 447 dest->sort_bitwise = src->sort_bitwise; 448 dest->recovery_deletes = src->recovery_deletes; 449 450 dest->flags = src->flags; 451 dest->used_replica = src->used_replica; 452 dest->paused = src->paused; 453 454 dest->epoch = src->epoch; 455 dest->last_force_resend = src->last_force_resend; 456 457 dest->osd = src->osd; 458 } 459 460 static void target_destroy(struct ceph_osd_request_target *t) 461 { 462 ceph_oid_destroy(&t->base_oid); 463 ceph_oloc_destroy(&t->base_oloc); 464 ceph_oid_destroy(&t->target_oid); 465 ceph_oloc_destroy(&t->target_oloc); 466 } 467 468 /* 469 * requests 470 */ 471 static void request_release_checks(struct ceph_osd_request *req) 472 { 473 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 474 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 475 WARN_ON(!list_empty(&req->r_private_item)); 476 WARN_ON(req->r_osd); 477 } 478 479 static void ceph_osdc_release_request(struct kref *kref) 480 { 481 struct ceph_osd_request *req = container_of(kref, 482 struct ceph_osd_request, r_kref); 483 unsigned int which; 484 485 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 486 req->r_request, req->r_reply); 487 request_release_checks(req); 488 489 if (req->r_request) 490 ceph_msg_put(req->r_request); 491 if (req->r_reply) 492 ceph_msg_put(req->r_reply); 493 494 for (which = 0; which < req->r_num_ops; which++) 495 osd_req_op_data_release(req, which); 496 497 target_destroy(&req->r_t); 498 ceph_put_snap_context(req->r_snapc); 499 500 if (req->r_mempool) 501 mempool_free(req, req->r_osdc->req_mempool); 502 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 503 kmem_cache_free(ceph_osd_request_cache, req); 504 else 505 kfree(req); 506 } 507 508 void ceph_osdc_get_request(struct ceph_osd_request *req) 509 { 510 dout("%s %p (was %d)\n", __func__, req, 511 kref_read(&req->r_kref)); 512 kref_get(&req->r_kref); 513 } 514 EXPORT_SYMBOL(ceph_osdc_get_request); 515 516 void ceph_osdc_put_request(struct ceph_osd_request *req) 517 { 518 if (req) { 519 dout("%s %p (was %d)\n", __func__, req, 520 kref_read(&req->r_kref)); 521 kref_put(&req->r_kref, ceph_osdc_release_request); 522 } 523 } 524 EXPORT_SYMBOL(ceph_osdc_put_request); 525 526 static void request_init(struct ceph_osd_request *req) 527 { 528 /* req only, each op is zeroed in _osd_req_op_init() */ 529 memset(req, 0, sizeof(*req)); 530 531 kref_init(&req->r_kref); 532 init_completion(&req->r_completion); 533 RB_CLEAR_NODE(&req->r_node); 534 RB_CLEAR_NODE(&req->r_mc_node); 535 INIT_LIST_HEAD(&req->r_private_item); 536 537 target_init(&req->r_t); 538 } 539 540 /* 541 * This is ugly, but it allows us to reuse linger registration and ping 542 * requests, keeping the structure of the code around send_linger{_ping}() 543 * reasonable. Setting up a min_nr=2 mempool for each linger request 544 * and dealing with copying ops (this blasts req only, watch op remains 545 * intact) isn't any better. 546 */ 547 static void request_reinit(struct ceph_osd_request *req) 548 { 549 struct ceph_osd_client *osdc = req->r_osdc; 550 bool mempool = req->r_mempool; 551 unsigned int num_ops = req->r_num_ops; 552 u64 snapid = req->r_snapid; 553 struct ceph_snap_context *snapc = req->r_snapc; 554 bool linger = req->r_linger; 555 struct ceph_msg *request_msg = req->r_request; 556 struct ceph_msg *reply_msg = req->r_reply; 557 558 dout("%s req %p\n", __func__, req); 559 WARN_ON(kref_read(&req->r_kref) != 1); 560 request_release_checks(req); 561 562 WARN_ON(kref_read(&request_msg->kref) != 1); 563 WARN_ON(kref_read(&reply_msg->kref) != 1); 564 target_destroy(&req->r_t); 565 566 request_init(req); 567 req->r_osdc = osdc; 568 req->r_mempool = mempool; 569 req->r_num_ops = num_ops; 570 req->r_snapid = snapid; 571 req->r_snapc = snapc; 572 req->r_linger = linger; 573 req->r_request = request_msg; 574 req->r_reply = reply_msg; 575 } 576 577 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 578 struct ceph_snap_context *snapc, 579 unsigned int num_ops, 580 bool use_mempool, 581 gfp_t gfp_flags) 582 { 583 struct ceph_osd_request *req; 584 585 if (use_mempool) { 586 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 587 req = mempool_alloc(osdc->req_mempool, gfp_flags); 588 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 589 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 590 } else { 591 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 592 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 593 } 594 if (unlikely(!req)) 595 return NULL; 596 597 request_init(req); 598 req->r_osdc = osdc; 599 req->r_mempool = use_mempool; 600 req->r_num_ops = num_ops; 601 req->r_snapid = CEPH_NOSNAP; 602 req->r_snapc = ceph_get_snap_context(snapc); 603 604 dout("%s req %p\n", __func__, req); 605 return req; 606 } 607 EXPORT_SYMBOL(ceph_osdc_alloc_request); 608 609 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 610 { 611 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 612 } 613 614 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 615 int num_request_data_items, 616 int num_reply_data_items) 617 { 618 struct ceph_osd_client *osdc = req->r_osdc; 619 struct ceph_msg *msg; 620 int msg_size; 621 622 WARN_ON(req->r_request || req->r_reply); 623 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 624 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 625 626 /* create request message */ 627 msg_size = CEPH_ENCODING_START_BLK_LEN + 628 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 629 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 630 msg_size += CEPH_ENCODING_START_BLK_LEN + 631 sizeof(struct ceph_osd_reqid); /* reqid */ 632 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 633 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 634 msg_size += CEPH_ENCODING_START_BLK_LEN + 635 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 636 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 637 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 638 msg_size += 8; /* snapid */ 639 msg_size += 8; /* snap_seq */ 640 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 641 msg_size += 4 + 8; /* retry_attempt, features */ 642 643 if (req->r_mempool) 644 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 645 num_request_data_items); 646 else 647 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 648 num_request_data_items, gfp, true); 649 if (!msg) 650 return -ENOMEM; 651 652 memset(msg->front.iov_base, 0, msg->front.iov_len); 653 req->r_request = msg; 654 655 /* create reply message */ 656 msg_size = OSD_OPREPLY_FRONT_LEN; 657 msg_size += req->r_base_oid.name_len; 658 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 659 660 if (req->r_mempool) 661 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 662 num_reply_data_items); 663 else 664 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 665 num_reply_data_items, gfp, true); 666 if (!msg) 667 return -ENOMEM; 668 669 req->r_reply = msg; 670 671 return 0; 672 } 673 674 static bool osd_req_opcode_valid(u16 opcode) 675 { 676 switch (opcode) { 677 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 678 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 679 #undef GENERATE_CASE 680 default: 681 return false; 682 } 683 } 684 685 static void get_num_data_items(struct ceph_osd_request *req, 686 int *num_request_data_items, 687 int *num_reply_data_items) 688 { 689 struct ceph_osd_req_op *op; 690 691 *num_request_data_items = 0; 692 *num_reply_data_items = 0; 693 694 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 695 switch (op->op) { 696 /* request */ 697 case CEPH_OSD_OP_WRITE: 698 case CEPH_OSD_OP_WRITEFULL: 699 case CEPH_OSD_OP_SETXATTR: 700 case CEPH_OSD_OP_CMPXATTR: 701 case CEPH_OSD_OP_NOTIFY_ACK: 702 case CEPH_OSD_OP_COPY_FROM2: 703 *num_request_data_items += 1; 704 break; 705 706 /* reply */ 707 case CEPH_OSD_OP_STAT: 708 case CEPH_OSD_OP_READ: 709 case CEPH_OSD_OP_LIST_WATCHERS: 710 *num_reply_data_items += 1; 711 break; 712 713 /* both */ 714 case CEPH_OSD_OP_NOTIFY: 715 *num_request_data_items += 1; 716 *num_reply_data_items += 1; 717 break; 718 case CEPH_OSD_OP_CALL: 719 *num_request_data_items += 2; 720 *num_reply_data_items += 1; 721 break; 722 723 default: 724 WARN_ON(!osd_req_opcode_valid(op->op)); 725 break; 726 } 727 } 728 } 729 730 /* 731 * oid, oloc and OSD op opcode(s) must be filled in before this function 732 * is called. 733 */ 734 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 735 { 736 int num_request_data_items, num_reply_data_items; 737 738 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 739 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 740 num_reply_data_items); 741 } 742 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 743 744 /* 745 * This is an osd op init function for opcodes that have no data or 746 * other information associated with them. It also serves as a 747 * common init routine for all the other init functions, below. 748 */ 749 static struct ceph_osd_req_op * 750 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 751 u16 opcode, u32 flags) 752 { 753 struct ceph_osd_req_op *op; 754 755 BUG_ON(which >= osd_req->r_num_ops); 756 BUG_ON(!osd_req_opcode_valid(opcode)); 757 758 op = &osd_req->r_ops[which]; 759 memset(op, 0, sizeof (*op)); 760 op->op = opcode; 761 op->flags = flags; 762 763 return op; 764 } 765 766 void osd_req_op_init(struct ceph_osd_request *osd_req, 767 unsigned int which, u16 opcode, u32 flags) 768 { 769 (void)_osd_req_op_init(osd_req, which, opcode, flags); 770 } 771 EXPORT_SYMBOL(osd_req_op_init); 772 773 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 774 unsigned int which, u16 opcode, 775 u64 offset, u64 length, 776 u64 truncate_size, u32 truncate_seq) 777 { 778 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 779 opcode, 0); 780 size_t payload_len = 0; 781 782 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 783 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 784 opcode != CEPH_OSD_OP_TRUNCATE); 785 786 op->extent.offset = offset; 787 op->extent.length = length; 788 op->extent.truncate_size = truncate_size; 789 op->extent.truncate_seq = truncate_seq; 790 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 791 payload_len += length; 792 793 op->indata_len = payload_len; 794 } 795 EXPORT_SYMBOL(osd_req_op_extent_init); 796 797 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 798 unsigned int which, u64 length) 799 { 800 struct ceph_osd_req_op *op; 801 u64 previous; 802 803 BUG_ON(which >= osd_req->r_num_ops); 804 op = &osd_req->r_ops[which]; 805 previous = op->extent.length; 806 807 if (length == previous) 808 return; /* Nothing to do */ 809 BUG_ON(length > previous); 810 811 op->extent.length = length; 812 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 813 op->indata_len -= previous - length; 814 } 815 EXPORT_SYMBOL(osd_req_op_extent_update); 816 817 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 818 unsigned int which, u64 offset_inc) 819 { 820 struct ceph_osd_req_op *op, *prev_op; 821 822 BUG_ON(which + 1 >= osd_req->r_num_ops); 823 824 prev_op = &osd_req->r_ops[which]; 825 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 826 /* dup previous one */ 827 op->indata_len = prev_op->indata_len; 828 op->outdata_len = prev_op->outdata_len; 829 op->extent = prev_op->extent; 830 /* adjust offset */ 831 op->extent.offset += offset_inc; 832 op->extent.length -= offset_inc; 833 834 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 835 op->indata_len -= offset_inc; 836 } 837 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 838 839 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 840 const char *class, const char *method) 841 { 842 struct ceph_osd_req_op *op; 843 struct ceph_pagelist *pagelist; 844 size_t payload_len = 0; 845 size_t size; 846 int ret; 847 848 op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 849 850 pagelist = ceph_pagelist_alloc(GFP_NOFS); 851 if (!pagelist) 852 return -ENOMEM; 853 854 op->cls.class_name = class; 855 size = strlen(class); 856 BUG_ON(size > (size_t) U8_MAX); 857 op->cls.class_len = size; 858 ret = ceph_pagelist_append(pagelist, class, size); 859 if (ret) 860 goto err_pagelist_free; 861 payload_len += size; 862 863 op->cls.method_name = method; 864 size = strlen(method); 865 BUG_ON(size > (size_t) U8_MAX); 866 op->cls.method_len = size; 867 ret = ceph_pagelist_append(pagelist, method, size); 868 if (ret) 869 goto err_pagelist_free; 870 payload_len += size; 871 872 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 873 op->indata_len = payload_len; 874 return 0; 875 876 err_pagelist_free: 877 ceph_pagelist_release(pagelist); 878 return ret; 879 } 880 EXPORT_SYMBOL(osd_req_op_cls_init); 881 882 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 883 u16 opcode, const char *name, const void *value, 884 size_t size, u8 cmp_op, u8 cmp_mode) 885 { 886 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 887 opcode, 0); 888 struct ceph_pagelist *pagelist; 889 size_t payload_len; 890 int ret; 891 892 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 893 894 pagelist = ceph_pagelist_alloc(GFP_NOFS); 895 if (!pagelist) 896 return -ENOMEM; 897 898 payload_len = strlen(name); 899 op->xattr.name_len = payload_len; 900 ret = ceph_pagelist_append(pagelist, name, payload_len); 901 if (ret) 902 goto err_pagelist_free; 903 904 op->xattr.value_len = size; 905 ret = ceph_pagelist_append(pagelist, value, size); 906 if (ret) 907 goto err_pagelist_free; 908 payload_len += size; 909 910 op->xattr.cmp_op = cmp_op; 911 op->xattr.cmp_mode = cmp_mode; 912 913 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 914 op->indata_len = payload_len; 915 return 0; 916 917 err_pagelist_free: 918 ceph_pagelist_release(pagelist); 919 return ret; 920 } 921 EXPORT_SYMBOL(osd_req_op_xattr_init); 922 923 /* 924 * @watch_opcode: CEPH_OSD_WATCH_OP_* 925 */ 926 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 927 u64 cookie, u8 watch_opcode) 928 { 929 struct ceph_osd_req_op *op; 930 931 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 932 op->watch.cookie = cookie; 933 op->watch.op = watch_opcode; 934 op->watch.gen = 0; 935 } 936 937 /* 938 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_* 939 */ 940 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 941 unsigned int which, 942 u64 expected_object_size, 943 u64 expected_write_size, 944 u32 flags) 945 { 946 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 947 CEPH_OSD_OP_SETALLOCHINT, 948 0); 949 950 op->alloc_hint.expected_object_size = expected_object_size; 951 op->alloc_hint.expected_write_size = expected_write_size; 952 op->alloc_hint.flags = flags; 953 954 /* 955 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 956 * not worth a feature bit. Set FAILOK per-op flag to make 957 * sure older osds don't trip over an unsupported opcode. 958 */ 959 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 960 } 961 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 962 963 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 964 struct ceph_osd_data *osd_data) 965 { 966 u64 length = ceph_osd_data_length(osd_data); 967 968 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 969 BUG_ON(length > (u64) SIZE_MAX); 970 if (length) 971 ceph_msg_data_add_pages(msg, osd_data->pages, 972 length, osd_data->alignment, false); 973 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 974 BUG_ON(!length); 975 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 976 #ifdef CONFIG_BLOCK 977 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 978 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 979 #endif 980 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 981 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 982 } else { 983 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 984 } 985 } 986 987 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 988 const struct ceph_osd_req_op *src) 989 { 990 switch (src->op) { 991 case CEPH_OSD_OP_STAT: 992 break; 993 case CEPH_OSD_OP_READ: 994 case CEPH_OSD_OP_WRITE: 995 case CEPH_OSD_OP_WRITEFULL: 996 case CEPH_OSD_OP_ZERO: 997 case CEPH_OSD_OP_TRUNCATE: 998 dst->extent.offset = cpu_to_le64(src->extent.offset); 999 dst->extent.length = cpu_to_le64(src->extent.length); 1000 dst->extent.truncate_size = 1001 cpu_to_le64(src->extent.truncate_size); 1002 dst->extent.truncate_seq = 1003 cpu_to_le32(src->extent.truncate_seq); 1004 break; 1005 case CEPH_OSD_OP_CALL: 1006 dst->cls.class_len = src->cls.class_len; 1007 dst->cls.method_len = src->cls.method_len; 1008 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 1009 break; 1010 case CEPH_OSD_OP_WATCH: 1011 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 1012 dst->watch.ver = cpu_to_le64(0); 1013 dst->watch.op = src->watch.op; 1014 dst->watch.gen = cpu_to_le32(src->watch.gen); 1015 break; 1016 case CEPH_OSD_OP_NOTIFY_ACK: 1017 break; 1018 case CEPH_OSD_OP_NOTIFY: 1019 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 1020 break; 1021 case CEPH_OSD_OP_LIST_WATCHERS: 1022 break; 1023 case CEPH_OSD_OP_SETALLOCHINT: 1024 dst->alloc_hint.expected_object_size = 1025 cpu_to_le64(src->alloc_hint.expected_object_size); 1026 dst->alloc_hint.expected_write_size = 1027 cpu_to_le64(src->alloc_hint.expected_write_size); 1028 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags); 1029 break; 1030 case CEPH_OSD_OP_SETXATTR: 1031 case CEPH_OSD_OP_CMPXATTR: 1032 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1033 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1034 dst->xattr.cmp_op = src->xattr.cmp_op; 1035 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1036 break; 1037 case CEPH_OSD_OP_CREATE: 1038 case CEPH_OSD_OP_DELETE: 1039 break; 1040 case CEPH_OSD_OP_COPY_FROM2: 1041 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1042 dst->copy_from.src_version = 1043 cpu_to_le64(src->copy_from.src_version); 1044 dst->copy_from.flags = src->copy_from.flags; 1045 dst->copy_from.src_fadvise_flags = 1046 cpu_to_le32(src->copy_from.src_fadvise_flags); 1047 break; 1048 default: 1049 pr_err("unsupported osd opcode %s\n", 1050 ceph_osd_op_name(src->op)); 1051 WARN_ON(1); 1052 1053 return 0; 1054 } 1055 1056 dst->op = cpu_to_le16(src->op); 1057 dst->flags = cpu_to_le32(src->flags); 1058 dst->payload_len = cpu_to_le32(src->indata_len); 1059 1060 return src->indata_len; 1061 } 1062 1063 /* 1064 * build new request AND message, calculate layout, and adjust file 1065 * extent as needed. 1066 * 1067 * if the file was recently truncated, we include information about its 1068 * old and new size so that the object can be updated appropriately. (we 1069 * avoid synchronously deleting truncated objects because it's slow.) 1070 */ 1071 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1072 struct ceph_file_layout *layout, 1073 struct ceph_vino vino, 1074 u64 off, u64 *plen, 1075 unsigned int which, int num_ops, 1076 int opcode, int flags, 1077 struct ceph_snap_context *snapc, 1078 u32 truncate_seq, 1079 u64 truncate_size, 1080 bool use_mempool) 1081 { 1082 struct ceph_osd_request *req; 1083 u64 objnum = 0; 1084 u64 objoff = 0; 1085 u64 objlen = 0; 1086 int r; 1087 1088 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1089 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1090 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); 1091 1092 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1093 GFP_NOFS); 1094 if (!req) { 1095 r = -ENOMEM; 1096 goto fail; 1097 } 1098 1099 /* calculate max write size */ 1100 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1101 if (r) 1102 goto fail; 1103 1104 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1105 osd_req_op_init(req, which, opcode, 0); 1106 } else { 1107 u32 object_size = layout->object_size; 1108 u32 object_base = off - objoff; 1109 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1110 if (truncate_size <= object_base) { 1111 truncate_size = 0; 1112 } else { 1113 truncate_size -= object_base; 1114 if (truncate_size > object_size) 1115 truncate_size = object_size; 1116 } 1117 } 1118 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1119 truncate_size, truncate_seq); 1120 } 1121 1122 req->r_base_oloc.pool = layout->pool_id; 1123 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1124 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1125 req->r_flags = flags | osdc->client->options->read_from_replica; 1126 1127 req->r_snapid = vino.snap; 1128 if (flags & CEPH_OSD_FLAG_WRITE) 1129 req->r_data_offset = off; 1130 1131 if (num_ops > 1) 1132 /* 1133 * This is a special case for ceph_writepages_start(), but it 1134 * also covers ceph_uninline_data(). If more multi-op request 1135 * use cases emerge, we will need a separate helper. 1136 */ 1137 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); 1138 else 1139 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1140 if (r) 1141 goto fail; 1142 1143 return req; 1144 1145 fail: 1146 ceph_osdc_put_request(req); 1147 return ERR_PTR(r); 1148 } 1149 EXPORT_SYMBOL(ceph_osdc_new_request); 1150 1151 /* 1152 * We keep osd requests in an rbtree, sorted by ->r_tid. 1153 */ 1154 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1155 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1156 1157 /* 1158 * Call @fn on each OSD request as long as @fn returns 0. 1159 */ 1160 static void for_each_request(struct ceph_osd_client *osdc, 1161 int (*fn)(struct ceph_osd_request *req, void *arg), 1162 void *arg) 1163 { 1164 struct rb_node *n, *p; 1165 1166 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1167 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1168 1169 for (p = rb_first(&osd->o_requests); p; ) { 1170 struct ceph_osd_request *req = 1171 rb_entry(p, struct ceph_osd_request, r_node); 1172 1173 p = rb_next(p); 1174 if (fn(req, arg)) 1175 return; 1176 } 1177 } 1178 1179 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1180 struct ceph_osd_request *req = 1181 rb_entry(p, struct ceph_osd_request, r_node); 1182 1183 p = rb_next(p); 1184 if (fn(req, arg)) 1185 return; 1186 } 1187 } 1188 1189 static bool osd_homeless(struct ceph_osd *osd) 1190 { 1191 return osd->o_osd == CEPH_HOMELESS_OSD; 1192 } 1193 1194 static bool osd_registered(struct ceph_osd *osd) 1195 { 1196 verify_osdc_locked(osd->o_osdc); 1197 1198 return !RB_EMPTY_NODE(&osd->o_node); 1199 } 1200 1201 /* 1202 * Assumes @osd is zero-initialized. 1203 */ 1204 static void osd_init(struct ceph_osd *osd) 1205 { 1206 refcount_set(&osd->o_ref, 1); 1207 RB_CLEAR_NODE(&osd->o_node); 1208 osd->o_requests = RB_ROOT; 1209 osd->o_linger_requests = RB_ROOT; 1210 osd->o_backoff_mappings = RB_ROOT; 1211 osd->o_backoffs_by_id = RB_ROOT; 1212 INIT_LIST_HEAD(&osd->o_osd_lru); 1213 INIT_LIST_HEAD(&osd->o_keepalive_item); 1214 osd->o_incarnation = 1; 1215 mutex_init(&osd->lock); 1216 } 1217 1218 static void osd_cleanup(struct ceph_osd *osd) 1219 { 1220 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1221 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1222 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1223 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1224 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1225 WARN_ON(!list_empty(&osd->o_osd_lru)); 1226 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1227 1228 if (osd->o_auth.authorizer) { 1229 WARN_ON(osd_homeless(osd)); 1230 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1231 } 1232 } 1233 1234 /* 1235 * Track open sessions with osds. 1236 */ 1237 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1238 { 1239 struct ceph_osd *osd; 1240 1241 WARN_ON(onum == CEPH_HOMELESS_OSD); 1242 1243 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1244 osd_init(osd); 1245 osd->o_osdc = osdc; 1246 osd->o_osd = onum; 1247 1248 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1249 1250 return osd; 1251 } 1252 1253 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1254 { 1255 if (refcount_inc_not_zero(&osd->o_ref)) { 1256 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, 1257 refcount_read(&osd->o_ref)); 1258 return osd; 1259 } else { 1260 dout("get_osd %p FAIL\n", osd); 1261 return NULL; 1262 } 1263 } 1264 1265 static void put_osd(struct ceph_osd *osd) 1266 { 1267 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), 1268 refcount_read(&osd->o_ref) - 1); 1269 if (refcount_dec_and_test(&osd->o_ref)) { 1270 osd_cleanup(osd); 1271 kfree(osd); 1272 } 1273 } 1274 1275 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1276 1277 static void __move_osd_to_lru(struct ceph_osd *osd) 1278 { 1279 struct ceph_osd_client *osdc = osd->o_osdc; 1280 1281 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1282 BUG_ON(!list_empty(&osd->o_osd_lru)); 1283 1284 spin_lock(&osdc->osd_lru_lock); 1285 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1286 spin_unlock(&osdc->osd_lru_lock); 1287 1288 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1289 } 1290 1291 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1292 { 1293 if (RB_EMPTY_ROOT(&osd->o_requests) && 1294 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1295 __move_osd_to_lru(osd); 1296 } 1297 1298 static void __remove_osd_from_lru(struct ceph_osd *osd) 1299 { 1300 struct ceph_osd_client *osdc = osd->o_osdc; 1301 1302 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1303 1304 spin_lock(&osdc->osd_lru_lock); 1305 if (!list_empty(&osd->o_osd_lru)) 1306 list_del_init(&osd->o_osd_lru); 1307 spin_unlock(&osdc->osd_lru_lock); 1308 } 1309 1310 /* 1311 * Close the connection and assign any leftover requests to the 1312 * homeless session. 1313 */ 1314 static void close_osd(struct ceph_osd *osd) 1315 { 1316 struct ceph_osd_client *osdc = osd->o_osdc; 1317 struct rb_node *n; 1318 1319 verify_osdc_wrlocked(osdc); 1320 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1321 1322 ceph_con_close(&osd->o_con); 1323 1324 for (n = rb_first(&osd->o_requests); n; ) { 1325 struct ceph_osd_request *req = 1326 rb_entry(n, struct ceph_osd_request, r_node); 1327 1328 n = rb_next(n); /* unlink_request() */ 1329 1330 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1331 unlink_request(osd, req); 1332 link_request(&osdc->homeless_osd, req); 1333 } 1334 for (n = rb_first(&osd->o_linger_requests); n; ) { 1335 struct ceph_osd_linger_request *lreq = 1336 rb_entry(n, struct ceph_osd_linger_request, node); 1337 1338 n = rb_next(n); /* unlink_linger() */ 1339 1340 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1341 lreq->linger_id); 1342 unlink_linger(osd, lreq); 1343 link_linger(&osdc->homeless_osd, lreq); 1344 } 1345 clear_backoffs(osd); 1346 1347 __remove_osd_from_lru(osd); 1348 erase_osd(&osdc->osds, osd); 1349 put_osd(osd); 1350 } 1351 1352 /* 1353 * reset osd connect 1354 */ 1355 static int reopen_osd(struct ceph_osd *osd) 1356 { 1357 struct ceph_entity_addr *peer_addr; 1358 1359 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1360 1361 if (RB_EMPTY_ROOT(&osd->o_requests) && 1362 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1363 close_osd(osd); 1364 return -ENODEV; 1365 } 1366 1367 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1368 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1369 !ceph_con_opened(&osd->o_con)) { 1370 struct rb_node *n; 1371 1372 dout("osd addr hasn't changed and connection never opened, " 1373 "letting msgr retry\n"); 1374 /* touch each r_stamp for handle_timeout()'s benfit */ 1375 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1376 struct ceph_osd_request *req = 1377 rb_entry(n, struct ceph_osd_request, r_node); 1378 req->r_stamp = jiffies; 1379 } 1380 1381 return -EAGAIN; 1382 } 1383 1384 ceph_con_close(&osd->o_con); 1385 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1386 osd->o_incarnation++; 1387 1388 return 0; 1389 } 1390 1391 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1392 bool wrlocked) 1393 { 1394 struct ceph_osd *osd; 1395 1396 if (wrlocked) 1397 verify_osdc_wrlocked(osdc); 1398 else 1399 verify_osdc_locked(osdc); 1400 1401 if (o != CEPH_HOMELESS_OSD) 1402 osd = lookup_osd(&osdc->osds, o); 1403 else 1404 osd = &osdc->homeless_osd; 1405 if (!osd) { 1406 if (!wrlocked) 1407 return ERR_PTR(-EAGAIN); 1408 1409 osd = create_osd(osdc, o); 1410 insert_osd(&osdc->osds, osd); 1411 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1412 &osdc->osdmap->osd_addr[osd->o_osd]); 1413 } 1414 1415 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1416 return osd; 1417 } 1418 1419 /* 1420 * Create request <-> OSD session relation. 1421 * 1422 * @req has to be assigned a tid, @osd may be homeless. 1423 */ 1424 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1425 { 1426 verify_osd_locked(osd); 1427 WARN_ON(!req->r_tid || req->r_osd); 1428 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1429 req, req->r_tid); 1430 1431 if (!osd_homeless(osd)) 1432 __remove_osd_from_lru(osd); 1433 else 1434 atomic_inc(&osd->o_osdc->num_homeless); 1435 1436 get_osd(osd); 1437 insert_request(&osd->o_requests, req); 1438 req->r_osd = osd; 1439 } 1440 1441 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1442 { 1443 verify_osd_locked(osd); 1444 WARN_ON(req->r_osd != osd); 1445 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1446 req, req->r_tid); 1447 1448 req->r_osd = NULL; 1449 erase_request(&osd->o_requests, req); 1450 put_osd(osd); 1451 1452 if (!osd_homeless(osd)) 1453 maybe_move_osd_to_lru(osd); 1454 else 1455 atomic_dec(&osd->o_osdc->num_homeless); 1456 } 1457 1458 static bool __pool_full(struct ceph_pg_pool_info *pi) 1459 { 1460 return pi->flags & CEPH_POOL_FLAG_FULL; 1461 } 1462 1463 static bool have_pool_full(struct ceph_osd_client *osdc) 1464 { 1465 struct rb_node *n; 1466 1467 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1468 struct ceph_pg_pool_info *pi = 1469 rb_entry(n, struct ceph_pg_pool_info, node); 1470 1471 if (__pool_full(pi)) 1472 return true; 1473 } 1474 1475 return false; 1476 } 1477 1478 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1479 { 1480 struct ceph_pg_pool_info *pi; 1481 1482 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1483 if (!pi) 1484 return false; 1485 1486 return __pool_full(pi); 1487 } 1488 1489 /* 1490 * Returns whether a request should be blocked from being sent 1491 * based on the current osdmap and osd_client settings. 1492 */ 1493 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1494 const struct ceph_osd_request_target *t, 1495 struct ceph_pg_pool_info *pi) 1496 { 1497 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1498 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1499 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1500 __pool_full(pi); 1501 1502 WARN_ON(pi->id != t->target_oloc.pool); 1503 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1504 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1505 (osdc->osdmap->epoch < osdc->epoch_barrier); 1506 } 1507 1508 static int pick_random_replica(const struct ceph_osds *acting) 1509 { 1510 int i = prandom_u32() % acting->size; 1511 1512 dout("%s picked osd%d, primary osd%d\n", __func__, 1513 acting->osds[i], acting->primary); 1514 return i; 1515 } 1516 1517 /* 1518 * Picks the closest replica based on client's location given by 1519 * crush_location option. Prefers the primary if the locality is 1520 * the same. 1521 */ 1522 static int pick_closest_replica(struct ceph_osd_client *osdc, 1523 const struct ceph_osds *acting) 1524 { 1525 struct ceph_options *opt = osdc->client->options; 1526 int best_i, best_locality; 1527 int i = 0, locality; 1528 1529 do { 1530 locality = ceph_get_crush_locality(osdc->osdmap, 1531 acting->osds[i], 1532 &opt->crush_locs); 1533 if (i == 0 || 1534 (locality >= 0 && best_locality < 0) || 1535 (locality >= 0 && best_locality >= 0 && 1536 locality < best_locality)) { 1537 best_i = i; 1538 best_locality = locality; 1539 } 1540 } while (++i < acting->size); 1541 1542 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, 1543 acting->osds[best_i], best_locality, acting->primary); 1544 return best_i; 1545 } 1546 1547 enum calc_target_result { 1548 CALC_TARGET_NO_ACTION = 0, 1549 CALC_TARGET_NEED_RESEND, 1550 CALC_TARGET_POOL_DNE, 1551 }; 1552 1553 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1554 struct ceph_osd_request_target *t, 1555 bool any_change) 1556 { 1557 struct ceph_pg_pool_info *pi; 1558 struct ceph_pg pgid, last_pgid; 1559 struct ceph_osds up, acting; 1560 bool is_read = t->flags & CEPH_OSD_FLAG_READ; 1561 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; 1562 bool force_resend = false; 1563 bool unpaused = false; 1564 bool legacy_change = false; 1565 bool split = false; 1566 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1567 bool recovery_deletes = ceph_osdmap_flag(osdc, 1568 CEPH_OSDMAP_RECOVERY_DELETES); 1569 enum calc_target_result ct_res; 1570 1571 t->epoch = osdc->osdmap->epoch; 1572 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1573 if (!pi) { 1574 t->osd = CEPH_HOMELESS_OSD; 1575 ct_res = CALC_TARGET_POOL_DNE; 1576 goto out; 1577 } 1578 1579 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1580 if (t->last_force_resend < pi->last_force_request_resend) { 1581 t->last_force_resend = pi->last_force_request_resend; 1582 force_resend = true; 1583 } else if (t->last_force_resend == 0) { 1584 force_resend = true; 1585 } 1586 } 1587 1588 /* apply tiering */ 1589 ceph_oid_copy(&t->target_oid, &t->base_oid); 1590 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1591 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1592 if (is_read && pi->read_tier >= 0) 1593 t->target_oloc.pool = pi->read_tier; 1594 if (is_write && pi->write_tier >= 0) 1595 t->target_oloc.pool = pi->write_tier; 1596 1597 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1598 if (!pi) { 1599 t->osd = CEPH_HOMELESS_OSD; 1600 ct_res = CALC_TARGET_POOL_DNE; 1601 goto out; 1602 } 1603 } 1604 1605 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1606 last_pgid.pool = pgid.pool; 1607 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1608 1609 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1610 if (any_change && 1611 ceph_is_new_interval(&t->acting, 1612 &acting, 1613 &t->up, 1614 &up, 1615 t->size, 1616 pi->size, 1617 t->min_size, 1618 pi->min_size, 1619 t->pg_num, 1620 pi->pg_num, 1621 t->sort_bitwise, 1622 sort_bitwise, 1623 t->recovery_deletes, 1624 recovery_deletes, 1625 &last_pgid)) 1626 force_resend = true; 1627 1628 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1629 t->paused = false; 1630 unpaused = true; 1631 } 1632 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1633 ceph_osds_changed(&t->acting, &acting, 1634 t->used_replica || any_change); 1635 if (t->pg_num) 1636 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1637 1638 if (legacy_change || force_resend || split) { 1639 t->pgid = pgid; /* struct */ 1640 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1641 ceph_osds_copy(&t->acting, &acting); 1642 ceph_osds_copy(&t->up, &up); 1643 t->size = pi->size; 1644 t->min_size = pi->min_size; 1645 t->pg_num = pi->pg_num; 1646 t->pg_num_mask = pi->pg_num_mask; 1647 t->sort_bitwise = sort_bitwise; 1648 t->recovery_deletes = recovery_deletes; 1649 1650 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS | 1651 CEPH_OSD_FLAG_LOCALIZE_READS)) && 1652 !is_write && pi->type == CEPH_POOL_TYPE_REP && 1653 acting.size > 1) { 1654 int pos; 1655 1656 WARN_ON(!is_read || acting.osds[0] != acting.primary); 1657 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) { 1658 pos = pick_random_replica(&acting); 1659 } else { 1660 pos = pick_closest_replica(osdc, &acting); 1661 } 1662 t->osd = acting.osds[pos]; 1663 t->used_replica = pos > 0; 1664 } else { 1665 t->osd = acting.primary; 1666 t->used_replica = false; 1667 } 1668 } 1669 1670 if (unpaused || legacy_change || force_resend || split) 1671 ct_res = CALC_TARGET_NEED_RESEND; 1672 else 1673 ct_res = CALC_TARGET_NO_ACTION; 1674 1675 out: 1676 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1677 legacy_change, force_resend, split, ct_res, t->osd); 1678 return ct_res; 1679 } 1680 1681 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1682 { 1683 struct ceph_spg_mapping *spg; 1684 1685 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1686 if (!spg) 1687 return NULL; 1688 1689 RB_CLEAR_NODE(&spg->node); 1690 spg->backoffs = RB_ROOT; 1691 return spg; 1692 } 1693 1694 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1695 { 1696 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1697 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1698 1699 kfree(spg); 1700 } 1701 1702 /* 1703 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1704 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1705 * defined only within a specific spgid; it does not pass anything to 1706 * children on split, or to another primary. 1707 */ 1708 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1709 RB_BYPTR, const struct ceph_spg *, node) 1710 1711 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1712 { 1713 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1714 } 1715 1716 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1717 void **pkey, size_t *pkey_len) 1718 { 1719 if (hoid->key_len) { 1720 *pkey = hoid->key; 1721 *pkey_len = hoid->key_len; 1722 } else { 1723 *pkey = hoid->oid; 1724 *pkey_len = hoid->oid_len; 1725 } 1726 } 1727 1728 static int compare_names(const void *name1, size_t name1_len, 1729 const void *name2, size_t name2_len) 1730 { 1731 int ret; 1732 1733 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1734 if (!ret) { 1735 if (name1_len < name2_len) 1736 ret = -1; 1737 else if (name1_len > name2_len) 1738 ret = 1; 1739 } 1740 return ret; 1741 } 1742 1743 static int hoid_compare(const struct ceph_hobject_id *lhs, 1744 const struct ceph_hobject_id *rhs) 1745 { 1746 void *effective_key1, *effective_key2; 1747 size_t effective_key1_len, effective_key2_len; 1748 int ret; 1749 1750 if (lhs->is_max < rhs->is_max) 1751 return -1; 1752 if (lhs->is_max > rhs->is_max) 1753 return 1; 1754 1755 if (lhs->pool < rhs->pool) 1756 return -1; 1757 if (lhs->pool > rhs->pool) 1758 return 1; 1759 1760 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1761 return -1; 1762 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1763 return 1; 1764 1765 ret = compare_names(lhs->nspace, lhs->nspace_len, 1766 rhs->nspace, rhs->nspace_len); 1767 if (ret) 1768 return ret; 1769 1770 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1771 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1772 ret = compare_names(effective_key1, effective_key1_len, 1773 effective_key2, effective_key2_len); 1774 if (ret) 1775 return ret; 1776 1777 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1778 if (ret) 1779 return ret; 1780 1781 if (lhs->snapid < rhs->snapid) 1782 return -1; 1783 if (lhs->snapid > rhs->snapid) 1784 return 1; 1785 1786 return 0; 1787 } 1788 1789 /* 1790 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1791 * compat stuff here. 1792 * 1793 * Assumes @hoid is zero-initialized. 1794 */ 1795 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1796 { 1797 u8 struct_v; 1798 u32 struct_len; 1799 int ret; 1800 1801 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1802 &struct_len); 1803 if (ret) 1804 return ret; 1805 1806 if (struct_v < 4) { 1807 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1808 goto e_inval; 1809 } 1810 1811 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1812 GFP_NOIO); 1813 if (IS_ERR(hoid->key)) { 1814 ret = PTR_ERR(hoid->key); 1815 hoid->key = NULL; 1816 return ret; 1817 } 1818 1819 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1820 GFP_NOIO); 1821 if (IS_ERR(hoid->oid)) { 1822 ret = PTR_ERR(hoid->oid); 1823 hoid->oid = NULL; 1824 return ret; 1825 } 1826 1827 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1828 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1829 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1830 1831 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1832 GFP_NOIO); 1833 if (IS_ERR(hoid->nspace)) { 1834 ret = PTR_ERR(hoid->nspace); 1835 hoid->nspace = NULL; 1836 return ret; 1837 } 1838 1839 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1840 1841 ceph_hoid_build_hash_cache(hoid); 1842 return 0; 1843 1844 e_inval: 1845 return -EINVAL; 1846 } 1847 1848 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1849 { 1850 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1851 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1852 } 1853 1854 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1855 { 1856 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1857 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1858 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1859 ceph_encode_64(p, hoid->snapid); 1860 ceph_encode_32(p, hoid->hash); 1861 ceph_encode_8(p, hoid->is_max); 1862 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1863 ceph_encode_64(p, hoid->pool); 1864 } 1865 1866 static void free_hoid(struct ceph_hobject_id *hoid) 1867 { 1868 if (hoid) { 1869 kfree(hoid->key); 1870 kfree(hoid->oid); 1871 kfree(hoid->nspace); 1872 kfree(hoid); 1873 } 1874 } 1875 1876 static struct ceph_osd_backoff *alloc_backoff(void) 1877 { 1878 struct ceph_osd_backoff *backoff; 1879 1880 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1881 if (!backoff) 1882 return NULL; 1883 1884 RB_CLEAR_NODE(&backoff->spg_node); 1885 RB_CLEAR_NODE(&backoff->id_node); 1886 return backoff; 1887 } 1888 1889 static void free_backoff(struct ceph_osd_backoff *backoff) 1890 { 1891 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1892 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1893 1894 free_hoid(backoff->begin); 1895 free_hoid(backoff->end); 1896 kfree(backoff); 1897 } 1898 1899 /* 1900 * Within a specific spgid, backoffs are managed by ->begin hoid. 1901 */ 1902 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1903 RB_BYVAL, spg_node); 1904 1905 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1906 const struct ceph_hobject_id *hoid) 1907 { 1908 struct rb_node *n = root->rb_node; 1909 1910 while (n) { 1911 struct ceph_osd_backoff *cur = 1912 rb_entry(n, struct ceph_osd_backoff, spg_node); 1913 int cmp; 1914 1915 cmp = hoid_compare(hoid, cur->begin); 1916 if (cmp < 0) { 1917 n = n->rb_left; 1918 } else if (cmp > 0) { 1919 if (hoid_compare(hoid, cur->end) < 0) 1920 return cur; 1921 1922 n = n->rb_right; 1923 } else { 1924 return cur; 1925 } 1926 } 1927 1928 return NULL; 1929 } 1930 1931 /* 1932 * Each backoff has a unique id within its OSD session. 1933 */ 1934 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1935 1936 static void clear_backoffs(struct ceph_osd *osd) 1937 { 1938 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1939 struct ceph_spg_mapping *spg = 1940 rb_entry(rb_first(&osd->o_backoff_mappings), 1941 struct ceph_spg_mapping, node); 1942 1943 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1944 struct ceph_osd_backoff *backoff = 1945 rb_entry(rb_first(&spg->backoffs), 1946 struct ceph_osd_backoff, spg_node); 1947 1948 erase_backoff(&spg->backoffs, backoff); 1949 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1950 free_backoff(backoff); 1951 } 1952 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1953 free_spg_mapping(spg); 1954 } 1955 } 1956 1957 /* 1958 * Set up a temporary, non-owning view into @t. 1959 */ 1960 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1961 const struct ceph_osd_request_target *t) 1962 { 1963 hoid->key = NULL; 1964 hoid->key_len = 0; 1965 hoid->oid = t->target_oid.name; 1966 hoid->oid_len = t->target_oid.name_len; 1967 hoid->snapid = CEPH_NOSNAP; 1968 hoid->hash = t->pgid.seed; 1969 hoid->is_max = false; 1970 if (t->target_oloc.pool_ns) { 1971 hoid->nspace = t->target_oloc.pool_ns->str; 1972 hoid->nspace_len = t->target_oloc.pool_ns->len; 1973 } else { 1974 hoid->nspace = NULL; 1975 hoid->nspace_len = 0; 1976 } 1977 hoid->pool = t->target_oloc.pool; 1978 ceph_hoid_build_hash_cache(hoid); 1979 } 1980 1981 static bool should_plug_request(struct ceph_osd_request *req) 1982 { 1983 struct ceph_osd *osd = req->r_osd; 1984 struct ceph_spg_mapping *spg; 1985 struct ceph_osd_backoff *backoff; 1986 struct ceph_hobject_id hoid; 1987 1988 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 1989 if (!spg) 1990 return false; 1991 1992 hoid_fill_from_target(&hoid, &req->r_t); 1993 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 1994 if (!backoff) 1995 return false; 1996 1997 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 1998 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 1999 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 2000 return true; 2001 } 2002 2003 /* 2004 * Keep get_num_data_items() in sync with this function. 2005 */ 2006 static void setup_request_data(struct ceph_osd_request *req) 2007 { 2008 struct ceph_msg *request_msg = req->r_request; 2009 struct ceph_msg *reply_msg = req->r_reply; 2010 struct ceph_osd_req_op *op; 2011 2012 if (req->r_request->num_data_items || req->r_reply->num_data_items) 2013 return; 2014 2015 WARN_ON(request_msg->data_length || reply_msg->data_length); 2016 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 2017 switch (op->op) { 2018 /* request */ 2019 case CEPH_OSD_OP_WRITE: 2020 case CEPH_OSD_OP_WRITEFULL: 2021 WARN_ON(op->indata_len != op->extent.length); 2022 ceph_osdc_msg_data_add(request_msg, 2023 &op->extent.osd_data); 2024 break; 2025 case CEPH_OSD_OP_SETXATTR: 2026 case CEPH_OSD_OP_CMPXATTR: 2027 WARN_ON(op->indata_len != op->xattr.name_len + 2028 op->xattr.value_len); 2029 ceph_osdc_msg_data_add(request_msg, 2030 &op->xattr.osd_data); 2031 break; 2032 case CEPH_OSD_OP_NOTIFY_ACK: 2033 ceph_osdc_msg_data_add(request_msg, 2034 &op->notify_ack.request_data); 2035 break; 2036 case CEPH_OSD_OP_COPY_FROM2: 2037 ceph_osdc_msg_data_add(request_msg, 2038 &op->copy_from.osd_data); 2039 break; 2040 2041 /* reply */ 2042 case CEPH_OSD_OP_STAT: 2043 ceph_osdc_msg_data_add(reply_msg, 2044 &op->raw_data_in); 2045 break; 2046 case CEPH_OSD_OP_READ: 2047 ceph_osdc_msg_data_add(reply_msg, 2048 &op->extent.osd_data); 2049 break; 2050 case CEPH_OSD_OP_LIST_WATCHERS: 2051 ceph_osdc_msg_data_add(reply_msg, 2052 &op->list_watchers.response_data); 2053 break; 2054 2055 /* both */ 2056 case CEPH_OSD_OP_CALL: 2057 WARN_ON(op->indata_len != op->cls.class_len + 2058 op->cls.method_len + 2059 op->cls.indata_len); 2060 ceph_osdc_msg_data_add(request_msg, 2061 &op->cls.request_info); 2062 /* optional, can be NONE */ 2063 ceph_osdc_msg_data_add(request_msg, 2064 &op->cls.request_data); 2065 /* optional, can be NONE */ 2066 ceph_osdc_msg_data_add(reply_msg, 2067 &op->cls.response_data); 2068 break; 2069 case CEPH_OSD_OP_NOTIFY: 2070 ceph_osdc_msg_data_add(request_msg, 2071 &op->notify.request_data); 2072 ceph_osdc_msg_data_add(reply_msg, 2073 &op->notify.response_data); 2074 break; 2075 } 2076 } 2077 } 2078 2079 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2080 { 2081 ceph_encode_8(p, 1); 2082 ceph_encode_64(p, pgid->pool); 2083 ceph_encode_32(p, pgid->seed); 2084 ceph_encode_32(p, -1); /* preferred */ 2085 } 2086 2087 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2088 { 2089 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2090 encode_pgid(p, &spgid->pgid); 2091 ceph_encode_8(p, spgid->shard); 2092 } 2093 2094 static void encode_oloc(void **p, void *end, 2095 const struct ceph_object_locator *oloc) 2096 { 2097 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2098 ceph_encode_64(p, oloc->pool); 2099 ceph_encode_32(p, -1); /* preferred */ 2100 ceph_encode_32(p, 0); /* key len */ 2101 if (oloc->pool_ns) 2102 ceph_encode_string(p, end, oloc->pool_ns->str, 2103 oloc->pool_ns->len); 2104 else 2105 ceph_encode_32(p, 0); 2106 } 2107 2108 static void encode_request_partial(struct ceph_osd_request *req, 2109 struct ceph_msg *msg) 2110 { 2111 void *p = msg->front.iov_base; 2112 void *const end = p + msg->front_alloc_len; 2113 u32 data_len = 0; 2114 int i; 2115 2116 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2117 /* snapshots aren't writeable */ 2118 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2119 } else { 2120 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2121 req->r_data_offset || req->r_snapc); 2122 } 2123 2124 setup_request_data(req); 2125 2126 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2127 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2128 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2129 ceph_encode_32(&p, req->r_flags); 2130 2131 /* reqid */ 2132 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2133 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2134 p += sizeof(struct ceph_osd_reqid); 2135 2136 /* trace */ 2137 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2138 p += sizeof(struct ceph_blkin_trace_info); 2139 2140 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2141 ceph_encode_timespec64(p, &req->r_mtime); 2142 p += sizeof(struct ceph_timespec); 2143 2144 encode_oloc(&p, end, &req->r_t.target_oloc); 2145 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2146 req->r_t.target_oid.name_len); 2147 2148 /* ops, can imply data */ 2149 ceph_encode_16(&p, req->r_num_ops); 2150 for (i = 0; i < req->r_num_ops; i++) { 2151 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2152 p += sizeof(struct ceph_osd_op); 2153 } 2154 2155 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2156 if (req->r_snapc) { 2157 ceph_encode_64(&p, req->r_snapc->seq); 2158 ceph_encode_32(&p, req->r_snapc->num_snaps); 2159 for (i = 0; i < req->r_snapc->num_snaps; i++) 2160 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2161 } else { 2162 ceph_encode_64(&p, 0); /* snap_seq */ 2163 ceph_encode_32(&p, 0); /* snaps len */ 2164 } 2165 2166 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2167 BUG_ON(p > end - 8); /* space for features */ 2168 2169 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2170 /* front_len is finalized in encode_request_finish() */ 2171 msg->front.iov_len = p - msg->front.iov_base; 2172 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2173 msg->hdr.data_len = cpu_to_le32(data_len); 2174 /* 2175 * The header "data_off" is a hint to the receiver allowing it 2176 * to align received data into its buffers such that there's no 2177 * need to re-copy it before writing it to disk (direct I/O). 2178 */ 2179 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2180 2181 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2182 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2183 } 2184 2185 static void encode_request_finish(struct ceph_msg *msg) 2186 { 2187 void *p = msg->front.iov_base; 2188 void *const partial_end = p + msg->front.iov_len; 2189 void *const end = p + msg->front_alloc_len; 2190 2191 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2192 /* luminous OSD -- encode features and be done */ 2193 p = partial_end; 2194 ceph_encode_64(&p, msg->con->peer_features); 2195 } else { 2196 struct { 2197 char spgid[CEPH_ENCODING_START_BLK_LEN + 2198 CEPH_PGID_ENCODING_LEN + 1]; 2199 __le32 hash; 2200 __le32 epoch; 2201 __le32 flags; 2202 char reqid[CEPH_ENCODING_START_BLK_LEN + 2203 sizeof(struct ceph_osd_reqid)]; 2204 char trace[sizeof(struct ceph_blkin_trace_info)]; 2205 __le32 client_inc; 2206 struct ceph_timespec mtime; 2207 } __packed head; 2208 struct ceph_pg pgid; 2209 void *oloc, *oid, *tail; 2210 int oloc_len, oid_len, tail_len; 2211 int len; 2212 2213 /* 2214 * Pre-luminous OSD -- reencode v8 into v4 using @head 2215 * as a temporary buffer. Encode the raw PG; the rest 2216 * is just a matter of moving oloc, oid and tail blobs 2217 * around. 2218 */ 2219 memcpy(&head, p, sizeof(head)); 2220 p += sizeof(head); 2221 2222 oloc = p; 2223 p += CEPH_ENCODING_START_BLK_LEN; 2224 pgid.pool = ceph_decode_64(&p); 2225 p += 4 + 4; /* preferred, key len */ 2226 len = ceph_decode_32(&p); 2227 p += len; /* nspace */ 2228 oloc_len = p - oloc; 2229 2230 oid = p; 2231 len = ceph_decode_32(&p); 2232 p += len; 2233 oid_len = p - oid; 2234 2235 tail = p; 2236 tail_len = partial_end - p; 2237 2238 p = msg->front.iov_base; 2239 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2240 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2241 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2242 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2243 2244 /* reassert_version */ 2245 memset(p, 0, sizeof(struct ceph_eversion)); 2246 p += sizeof(struct ceph_eversion); 2247 2248 BUG_ON(p >= oloc); 2249 memmove(p, oloc, oloc_len); 2250 p += oloc_len; 2251 2252 pgid.seed = le32_to_cpu(head.hash); 2253 encode_pgid(&p, &pgid); /* raw pg */ 2254 2255 BUG_ON(p >= oid); 2256 memmove(p, oid, oid_len); 2257 p += oid_len; 2258 2259 /* tail -- ops, snapid, snapc, retry_attempt */ 2260 BUG_ON(p >= tail); 2261 memmove(p, tail, tail_len); 2262 p += tail_len; 2263 2264 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2265 } 2266 2267 BUG_ON(p > end); 2268 msg->front.iov_len = p - msg->front.iov_base; 2269 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2270 2271 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2272 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2273 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2274 le16_to_cpu(msg->hdr.version)); 2275 } 2276 2277 /* 2278 * @req has to be assigned a tid and registered. 2279 */ 2280 static void send_request(struct ceph_osd_request *req) 2281 { 2282 struct ceph_osd *osd = req->r_osd; 2283 2284 verify_osd_locked(osd); 2285 WARN_ON(osd->o_osd != req->r_t.osd); 2286 2287 /* backoff? */ 2288 if (should_plug_request(req)) 2289 return; 2290 2291 /* 2292 * We may have a previously queued request message hanging 2293 * around. Cancel it to avoid corrupting the msgr. 2294 */ 2295 if (req->r_sent) 2296 ceph_msg_revoke(req->r_request); 2297 2298 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2299 if (req->r_attempts) 2300 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2301 else 2302 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2303 2304 encode_request_partial(req, req->r_request); 2305 2306 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2307 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2308 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2309 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2310 req->r_attempts); 2311 2312 req->r_t.paused = false; 2313 req->r_stamp = jiffies; 2314 req->r_attempts++; 2315 2316 req->r_sent = osd->o_incarnation; 2317 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2318 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2319 } 2320 2321 static void maybe_request_map(struct ceph_osd_client *osdc) 2322 { 2323 bool continuous = false; 2324 2325 verify_osdc_locked(osdc); 2326 WARN_ON(!osdc->osdmap->epoch); 2327 2328 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2329 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2330 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2331 dout("%s osdc %p continuous\n", __func__, osdc); 2332 continuous = true; 2333 } else { 2334 dout("%s osdc %p onetime\n", __func__, osdc); 2335 } 2336 2337 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2338 osdc->osdmap->epoch + 1, continuous)) 2339 ceph_monc_renew_subs(&osdc->client->monc); 2340 } 2341 2342 static void complete_request(struct ceph_osd_request *req, int err); 2343 static void send_map_check(struct ceph_osd_request *req); 2344 2345 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2346 { 2347 struct ceph_osd_client *osdc = req->r_osdc; 2348 struct ceph_osd *osd; 2349 enum calc_target_result ct_res; 2350 int err = 0; 2351 bool need_send = false; 2352 bool promoted = false; 2353 2354 WARN_ON(req->r_tid); 2355 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2356 2357 again: 2358 ct_res = calc_target(osdc, &req->r_t, false); 2359 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2360 goto promote; 2361 2362 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2363 if (IS_ERR(osd)) { 2364 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2365 goto promote; 2366 } 2367 2368 if (osdc->abort_err) { 2369 dout("req %p abort_err %d\n", req, osdc->abort_err); 2370 err = osdc->abort_err; 2371 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2372 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2373 osdc->epoch_barrier); 2374 req->r_t.paused = true; 2375 maybe_request_map(osdc); 2376 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2377 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2378 dout("req %p pausewr\n", req); 2379 req->r_t.paused = true; 2380 maybe_request_map(osdc); 2381 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2382 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2383 dout("req %p pauserd\n", req); 2384 req->r_t.paused = true; 2385 maybe_request_map(osdc); 2386 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2387 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2388 CEPH_OSD_FLAG_FULL_FORCE)) && 2389 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2390 pool_full(osdc, req->r_t.base_oloc.pool))) { 2391 dout("req %p full/pool_full\n", req); 2392 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2393 err = -ENOSPC; 2394 } else { 2395 pr_warn_ratelimited("FULL or reached pool quota\n"); 2396 req->r_t.paused = true; 2397 maybe_request_map(osdc); 2398 } 2399 } else if (!osd_homeless(osd)) { 2400 need_send = true; 2401 } else { 2402 maybe_request_map(osdc); 2403 } 2404 2405 mutex_lock(&osd->lock); 2406 /* 2407 * Assign the tid atomically with send_request() to protect 2408 * multiple writes to the same object from racing with each 2409 * other, resulting in out of order ops on the OSDs. 2410 */ 2411 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2412 link_request(osd, req); 2413 if (need_send) 2414 send_request(req); 2415 else if (err) 2416 complete_request(req, err); 2417 mutex_unlock(&osd->lock); 2418 2419 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2420 send_map_check(req); 2421 2422 if (promoted) 2423 downgrade_write(&osdc->lock); 2424 return; 2425 2426 promote: 2427 up_read(&osdc->lock); 2428 down_write(&osdc->lock); 2429 wrlocked = true; 2430 promoted = true; 2431 goto again; 2432 } 2433 2434 static void account_request(struct ceph_osd_request *req) 2435 { 2436 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2437 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2438 2439 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2440 atomic_inc(&req->r_osdc->num_requests); 2441 2442 req->r_start_stamp = jiffies; 2443 req->r_start_latency = ktime_get(); 2444 } 2445 2446 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2447 { 2448 ceph_osdc_get_request(req); 2449 account_request(req); 2450 __submit_request(req, wrlocked); 2451 } 2452 2453 static void finish_request(struct ceph_osd_request *req) 2454 { 2455 struct ceph_osd_client *osdc = req->r_osdc; 2456 2457 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2458 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2459 2460 req->r_end_latency = ktime_get(); 2461 2462 if (req->r_osd) 2463 unlink_request(req->r_osd, req); 2464 atomic_dec(&osdc->num_requests); 2465 2466 /* 2467 * If an OSD has failed or returned and a request has been sent 2468 * twice, it's possible to get a reply and end up here while the 2469 * request message is queued for delivery. We will ignore the 2470 * reply, so not a big deal, but better to try and catch it. 2471 */ 2472 ceph_msg_revoke(req->r_request); 2473 ceph_msg_revoke_incoming(req->r_reply); 2474 } 2475 2476 static void __complete_request(struct ceph_osd_request *req) 2477 { 2478 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2479 req->r_tid, req->r_callback, req->r_result); 2480 2481 if (req->r_callback) 2482 req->r_callback(req); 2483 complete_all(&req->r_completion); 2484 ceph_osdc_put_request(req); 2485 } 2486 2487 static void complete_request_workfn(struct work_struct *work) 2488 { 2489 struct ceph_osd_request *req = 2490 container_of(work, struct ceph_osd_request, r_complete_work); 2491 2492 __complete_request(req); 2493 } 2494 2495 /* 2496 * This is open-coded in handle_reply(). 2497 */ 2498 static void complete_request(struct ceph_osd_request *req, int err) 2499 { 2500 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2501 2502 req->r_result = err; 2503 finish_request(req); 2504 2505 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2506 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2507 } 2508 2509 static void cancel_map_check(struct ceph_osd_request *req) 2510 { 2511 struct ceph_osd_client *osdc = req->r_osdc; 2512 struct ceph_osd_request *lookup_req; 2513 2514 verify_osdc_wrlocked(osdc); 2515 2516 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2517 if (!lookup_req) 2518 return; 2519 2520 WARN_ON(lookup_req != req); 2521 erase_request_mc(&osdc->map_checks, req); 2522 ceph_osdc_put_request(req); 2523 } 2524 2525 static void cancel_request(struct ceph_osd_request *req) 2526 { 2527 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2528 2529 cancel_map_check(req); 2530 finish_request(req); 2531 complete_all(&req->r_completion); 2532 ceph_osdc_put_request(req); 2533 } 2534 2535 static void abort_request(struct ceph_osd_request *req, int err) 2536 { 2537 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2538 2539 cancel_map_check(req); 2540 complete_request(req, err); 2541 } 2542 2543 static int abort_fn(struct ceph_osd_request *req, void *arg) 2544 { 2545 int err = *(int *)arg; 2546 2547 abort_request(req, err); 2548 return 0; /* continue iteration */ 2549 } 2550 2551 /* 2552 * Abort all in-flight requests with @err and arrange for all future 2553 * requests to be failed immediately. 2554 */ 2555 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2556 { 2557 dout("%s osdc %p err %d\n", __func__, osdc, err); 2558 down_write(&osdc->lock); 2559 for_each_request(osdc, abort_fn, &err); 2560 osdc->abort_err = err; 2561 up_write(&osdc->lock); 2562 } 2563 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2564 2565 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2566 { 2567 down_write(&osdc->lock); 2568 osdc->abort_err = 0; 2569 up_write(&osdc->lock); 2570 } 2571 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2572 2573 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2574 { 2575 if (likely(eb > osdc->epoch_barrier)) { 2576 dout("updating epoch_barrier from %u to %u\n", 2577 osdc->epoch_barrier, eb); 2578 osdc->epoch_barrier = eb; 2579 /* Request map if we're not to the barrier yet */ 2580 if (eb > osdc->osdmap->epoch) 2581 maybe_request_map(osdc); 2582 } 2583 } 2584 2585 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2586 { 2587 down_read(&osdc->lock); 2588 if (unlikely(eb > osdc->epoch_barrier)) { 2589 up_read(&osdc->lock); 2590 down_write(&osdc->lock); 2591 update_epoch_barrier(osdc, eb); 2592 up_write(&osdc->lock); 2593 } else { 2594 up_read(&osdc->lock); 2595 } 2596 } 2597 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2598 2599 /* 2600 * We can end up releasing caps as a result of abort_request(). 2601 * In that case, we probably want to ensure that the cap release message 2602 * has an updated epoch barrier in it, so set the epoch barrier prior to 2603 * aborting the first request. 2604 */ 2605 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2606 { 2607 struct ceph_osd_client *osdc = req->r_osdc; 2608 bool *victims = arg; 2609 2610 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2611 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2612 pool_full(osdc, req->r_t.base_oloc.pool))) { 2613 if (!*victims) { 2614 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2615 *victims = true; 2616 } 2617 abort_request(req, -ENOSPC); 2618 } 2619 2620 return 0; /* continue iteration */ 2621 } 2622 2623 /* 2624 * Drop all pending requests that are stalled waiting on a full condition to 2625 * clear, and complete them with ENOSPC as the return code. Set the 2626 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2627 * cancelled. 2628 */ 2629 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2630 { 2631 bool victims = false; 2632 2633 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2634 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2635 for_each_request(osdc, abort_on_full_fn, &victims); 2636 } 2637 2638 static void check_pool_dne(struct ceph_osd_request *req) 2639 { 2640 struct ceph_osd_client *osdc = req->r_osdc; 2641 struct ceph_osdmap *map = osdc->osdmap; 2642 2643 verify_osdc_wrlocked(osdc); 2644 WARN_ON(!map->epoch); 2645 2646 if (req->r_attempts) { 2647 /* 2648 * We sent a request earlier, which means that 2649 * previously the pool existed, and now it does not 2650 * (i.e., it was deleted). 2651 */ 2652 req->r_map_dne_bound = map->epoch; 2653 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2654 req->r_tid); 2655 } else { 2656 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2657 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2658 } 2659 2660 if (req->r_map_dne_bound) { 2661 if (map->epoch >= req->r_map_dne_bound) { 2662 /* we had a new enough map */ 2663 pr_info_ratelimited("tid %llu pool does not exist\n", 2664 req->r_tid); 2665 complete_request(req, -ENOENT); 2666 } 2667 } else { 2668 send_map_check(req); 2669 } 2670 } 2671 2672 static void map_check_cb(struct ceph_mon_generic_request *greq) 2673 { 2674 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2675 struct ceph_osd_request *req; 2676 u64 tid = greq->private_data; 2677 2678 WARN_ON(greq->result || !greq->u.newest); 2679 2680 down_write(&osdc->lock); 2681 req = lookup_request_mc(&osdc->map_checks, tid); 2682 if (!req) { 2683 dout("%s tid %llu dne\n", __func__, tid); 2684 goto out_unlock; 2685 } 2686 2687 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2688 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2689 if (!req->r_map_dne_bound) 2690 req->r_map_dne_bound = greq->u.newest; 2691 erase_request_mc(&osdc->map_checks, req); 2692 check_pool_dne(req); 2693 2694 ceph_osdc_put_request(req); 2695 out_unlock: 2696 up_write(&osdc->lock); 2697 } 2698 2699 static void send_map_check(struct ceph_osd_request *req) 2700 { 2701 struct ceph_osd_client *osdc = req->r_osdc; 2702 struct ceph_osd_request *lookup_req; 2703 int ret; 2704 2705 verify_osdc_wrlocked(osdc); 2706 2707 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2708 if (lookup_req) { 2709 WARN_ON(lookup_req != req); 2710 return; 2711 } 2712 2713 ceph_osdc_get_request(req); 2714 insert_request_mc(&osdc->map_checks, req); 2715 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2716 map_check_cb, req->r_tid); 2717 WARN_ON(ret); 2718 } 2719 2720 /* 2721 * lingering requests, watch/notify v2 infrastructure 2722 */ 2723 static void linger_release(struct kref *kref) 2724 { 2725 struct ceph_osd_linger_request *lreq = 2726 container_of(kref, struct ceph_osd_linger_request, kref); 2727 2728 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2729 lreq->reg_req, lreq->ping_req); 2730 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2731 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2732 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2733 WARN_ON(!list_empty(&lreq->scan_item)); 2734 WARN_ON(!list_empty(&lreq->pending_lworks)); 2735 WARN_ON(lreq->osd); 2736 2737 if (lreq->reg_req) 2738 ceph_osdc_put_request(lreq->reg_req); 2739 if (lreq->ping_req) 2740 ceph_osdc_put_request(lreq->ping_req); 2741 target_destroy(&lreq->t); 2742 kfree(lreq); 2743 } 2744 2745 static void linger_put(struct ceph_osd_linger_request *lreq) 2746 { 2747 if (lreq) 2748 kref_put(&lreq->kref, linger_release); 2749 } 2750 2751 static struct ceph_osd_linger_request * 2752 linger_get(struct ceph_osd_linger_request *lreq) 2753 { 2754 kref_get(&lreq->kref); 2755 return lreq; 2756 } 2757 2758 static struct ceph_osd_linger_request * 2759 linger_alloc(struct ceph_osd_client *osdc) 2760 { 2761 struct ceph_osd_linger_request *lreq; 2762 2763 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2764 if (!lreq) 2765 return NULL; 2766 2767 kref_init(&lreq->kref); 2768 mutex_init(&lreq->lock); 2769 RB_CLEAR_NODE(&lreq->node); 2770 RB_CLEAR_NODE(&lreq->osdc_node); 2771 RB_CLEAR_NODE(&lreq->mc_node); 2772 INIT_LIST_HEAD(&lreq->scan_item); 2773 INIT_LIST_HEAD(&lreq->pending_lworks); 2774 init_completion(&lreq->reg_commit_wait); 2775 init_completion(&lreq->notify_finish_wait); 2776 2777 lreq->osdc = osdc; 2778 target_init(&lreq->t); 2779 2780 dout("%s lreq %p\n", __func__, lreq); 2781 return lreq; 2782 } 2783 2784 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2785 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2786 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2787 2788 /* 2789 * Create linger request <-> OSD session relation. 2790 * 2791 * @lreq has to be registered, @osd may be homeless. 2792 */ 2793 static void link_linger(struct ceph_osd *osd, 2794 struct ceph_osd_linger_request *lreq) 2795 { 2796 verify_osd_locked(osd); 2797 WARN_ON(!lreq->linger_id || lreq->osd); 2798 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2799 osd->o_osd, lreq, lreq->linger_id); 2800 2801 if (!osd_homeless(osd)) 2802 __remove_osd_from_lru(osd); 2803 else 2804 atomic_inc(&osd->o_osdc->num_homeless); 2805 2806 get_osd(osd); 2807 insert_linger(&osd->o_linger_requests, lreq); 2808 lreq->osd = osd; 2809 } 2810 2811 static void unlink_linger(struct ceph_osd *osd, 2812 struct ceph_osd_linger_request *lreq) 2813 { 2814 verify_osd_locked(osd); 2815 WARN_ON(lreq->osd != osd); 2816 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2817 osd->o_osd, lreq, lreq->linger_id); 2818 2819 lreq->osd = NULL; 2820 erase_linger(&osd->o_linger_requests, lreq); 2821 put_osd(osd); 2822 2823 if (!osd_homeless(osd)) 2824 maybe_move_osd_to_lru(osd); 2825 else 2826 atomic_dec(&osd->o_osdc->num_homeless); 2827 } 2828 2829 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2830 { 2831 verify_osdc_locked(lreq->osdc); 2832 2833 return !RB_EMPTY_NODE(&lreq->osdc_node); 2834 } 2835 2836 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2837 { 2838 struct ceph_osd_client *osdc = lreq->osdc; 2839 bool registered; 2840 2841 down_read(&osdc->lock); 2842 registered = __linger_registered(lreq); 2843 up_read(&osdc->lock); 2844 2845 return registered; 2846 } 2847 2848 static void linger_register(struct ceph_osd_linger_request *lreq) 2849 { 2850 struct ceph_osd_client *osdc = lreq->osdc; 2851 2852 verify_osdc_wrlocked(osdc); 2853 WARN_ON(lreq->linger_id); 2854 2855 linger_get(lreq); 2856 lreq->linger_id = ++osdc->last_linger_id; 2857 insert_linger_osdc(&osdc->linger_requests, lreq); 2858 } 2859 2860 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2861 { 2862 struct ceph_osd_client *osdc = lreq->osdc; 2863 2864 verify_osdc_wrlocked(osdc); 2865 2866 erase_linger_osdc(&osdc->linger_requests, lreq); 2867 linger_put(lreq); 2868 } 2869 2870 static void cancel_linger_request(struct ceph_osd_request *req) 2871 { 2872 struct ceph_osd_linger_request *lreq = req->r_priv; 2873 2874 WARN_ON(!req->r_linger); 2875 cancel_request(req); 2876 linger_put(lreq); 2877 } 2878 2879 struct linger_work { 2880 struct work_struct work; 2881 struct ceph_osd_linger_request *lreq; 2882 struct list_head pending_item; 2883 unsigned long queued_stamp; 2884 2885 union { 2886 struct { 2887 u64 notify_id; 2888 u64 notifier_id; 2889 void *payload; /* points into @msg front */ 2890 size_t payload_len; 2891 2892 struct ceph_msg *msg; /* for ceph_msg_put() */ 2893 } notify; 2894 struct { 2895 int err; 2896 } error; 2897 }; 2898 }; 2899 2900 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2901 work_func_t workfn) 2902 { 2903 struct linger_work *lwork; 2904 2905 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2906 if (!lwork) 2907 return NULL; 2908 2909 INIT_WORK(&lwork->work, workfn); 2910 INIT_LIST_HEAD(&lwork->pending_item); 2911 lwork->lreq = linger_get(lreq); 2912 2913 return lwork; 2914 } 2915 2916 static void lwork_free(struct linger_work *lwork) 2917 { 2918 struct ceph_osd_linger_request *lreq = lwork->lreq; 2919 2920 mutex_lock(&lreq->lock); 2921 list_del(&lwork->pending_item); 2922 mutex_unlock(&lreq->lock); 2923 2924 linger_put(lreq); 2925 kfree(lwork); 2926 } 2927 2928 static void lwork_queue(struct linger_work *lwork) 2929 { 2930 struct ceph_osd_linger_request *lreq = lwork->lreq; 2931 struct ceph_osd_client *osdc = lreq->osdc; 2932 2933 verify_lreq_locked(lreq); 2934 WARN_ON(!list_empty(&lwork->pending_item)); 2935 2936 lwork->queued_stamp = jiffies; 2937 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2938 queue_work(osdc->notify_wq, &lwork->work); 2939 } 2940 2941 static void do_watch_notify(struct work_struct *w) 2942 { 2943 struct linger_work *lwork = container_of(w, struct linger_work, work); 2944 struct ceph_osd_linger_request *lreq = lwork->lreq; 2945 2946 if (!linger_registered(lreq)) { 2947 dout("%s lreq %p not registered\n", __func__, lreq); 2948 goto out; 2949 } 2950 2951 WARN_ON(!lreq->is_watch); 2952 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2953 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2954 lwork->notify.payload_len); 2955 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2956 lwork->notify.notifier_id, lwork->notify.payload, 2957 lwork->notify.payload_len); 2958 2959 out: 2960 ceph_msg_put(lwork->notify.msg); 2961 lwork_free(lwork); 2962 } 2963 2964 static void do_watch_error(struct work_struct *w) 2965 { 2966 struct linger_work *lwork = container_of(w, struct linger_work, work); 2967 struct ceph_osd_linger_request *lreq = lwork->lreq; 2968 2969 if (!linger_registered(lreq)) { 2970 dout("%s lreq %p not registered\n", __func__, lreq); 2971 goto out; 2972 } 2973 2974 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 2975 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 2976 2977 out: 2978 lwork_free(lwork); 2979 } 2980 2981 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 2982 { 2983 struct linger_work *lwork; 2984 2985 lwork = lwork_alloc(lreq, do_watch_error); 2986 if (!lwork) { 2987 pr_err("failed to allocate error-lwork\n"); 2988 return; 2989 } 2990 2991 lwork->error.err = lreq->last_error; 2992 lwork_queue(lwork); 2993 } 2994 2995 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 2996 int result) 2997 { 2998 if (!completion_done(&lreq->reg_commit_wait)) { 2999 lreq->reg_commit_error = (result <= 0 ? result : 0); 3000 complete_all(&lreq->reg_commit_wait); 3001 } 3002 } 3003 3004 static void linger_commit_cb(struct ceph_osd_request *req) 3005 { 3006 struct ceph_osd_linger_request *lreq = req->r_priv; 3007 3008 mutex_lock(&lreq->lock); 3009 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 3010 lreq->linger_id, req->r_result); 3011 linger_reg_commit_complete(lreq, req->r_result); 3012 lreq->committed = true; 3013 3014 if (!lreq->is_watch) { 3015 struct ceph_osd_data *osd_data = 3016 osd_req_op_data(req, 0, notify, response_data); 3017 void *p = page_address(osd_data->pages[0]); 3018 3019 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 3020 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 3021 3022 /* make note of the notify_id */ 3023 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 3024 lreq->notify_id = ceph_decode_64(&p); 3025 dout("lreq %p notify_id %llu\n", lreq, 3026 lreq->notify_id); 3027 } else { 3028 dout("lreq %p no notify_id\n", lreq); 3029 } 3030 } 3031 3032 mutex_unlock(&lreq->lock); 3033 linger_put(lreq); 3034 } 3035 3036 static int normalize_watch_error(int err) 3037 { 3038 /* 3039 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 3040 * notification and a failure to reconnect because we raced with 3041 * the delete appear the same to the user. 3042 */ 3043 if (err == -ENOENT) 3044 err = -ENOTCONN; 3045 3046 return err; 3047 } 3048 3049 static void linger_reconnect_cb(struct ceph_osd_request *req) 3050 { 3051 struct ceph_osd_linger_request *lreq = req->r_priv; 3052 3053 mutex_lock(&lreq->lock); 3054 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 3055 lreq, lreq->linger_id, req->r_result, lreq->last_error); 3056 if (req->r_result < 0) { 3057 if (!lreq->last_error) { 3058 lreq->last_error = normalize_watch_error(req->r_result); 3059 queue_watch_error(lreq); 3060 } 3061 } 3062 3063 mutex_unlock(&lreq->lock); 3064 linger_put(lreq); 3065 } 3066 3067 static void send_linger(struct ceph_osd_linger_request *lreq) 3068 { 3069 struct ceph_osd_request *req = lreq->reg_req; 3070 struct ceph_osd_req_op *op = &req->r_ops[0]; 3071 3072 verify_osdc_wrlocked(req->r_osdc); 3073 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3074 3075 if (req->r_osd) 3076 cancel_linger_request(req); 3077 3078 request_reinit(req); 3079 target_copy(&req->r_t, &lreq->t); 3080 req->r_mtime = lreq->mtime; 3081 3082 mutex_lock(&lreq->lock); 3083 if (lreq->is_watch && lreq->committed) { 3084 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3085 op->watch.cookie != lreq->linger_id); 3086 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT; 3087 op->watch.gen = ++lreq->register_gen; 3088 dout("lreq %p reconnect register_gen %u\n", lreq, 3089 op->watch.gen); 3090 req->r_callback = linger_reconnect_cb; 3091 } else { 3092 if (!lreq->is_watch) 3093 lreq->notify_id = 0; 3094 else 3095 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH); 3096 dout("lreq %p register\n", lreq); 3097 req->r_callback = linger_commit_cb; 3098 } 3099 mutex_unlock(&lreq->lock); 3100 3101 req->r_priv = linger_get(lreq); 3102 req->r_linger = true; 3103 3104 submit_request(req, true); 3105 } 3106 3107 static void linger_ping_cb(struct ceph_osd_request *req) 3108 { 3109 struct ceph_osd_linger_request *lreq = req->r_priv; 3110 3111 mutex_lock(&lreq->lock); 3112 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3113 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3114 lreq->last_error); 3115 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3116 if (!req->r_result) { 3117 lreq->watch_valid_thru = lreq->ping_sent; 3118 } else if (!lreq->last_error) { 3119 lreq->last_error = normalize_watch_error(req->r_result); 3120 queue_watch_error(lreq); 3121 } 3122 } else { 3123 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3124 lreq->register_gen, req->r_ops[0].watch.gen); 3125 } 3126 3127 mutex_unlock(&lreq->lock); 3128 linger_put(lreq); 3129 } 3130 3131 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3132 { 3133 struct ceph_osd_client *osdc = lreq->osdc; 3134 struct ceph_osd_request *req = lreq->ping_req; 3135 struct ceph_osd_req_op *op = &req->r_ops[0]; 3136 3137 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3138 dout("%s PAUSERD\n", __func__); 3139 return; 3140 } 3141 3142 lreq->ping_sent = jiffies; 3143 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3144 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3145 lreq->register_gen); 3146 3147 if (req->r_osd) 3148 cancel_linger_request(req); 3149 3150 request_reinit(req); 3151 target_copy(&req->r_t, &lreq->t); 3152 3153 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3154 op->watch.cookie != lreq->linger_id || 3155 op->watch.op != CEPH_OSD_WATCH_OP_PING); 3156 op->watch.gen = lreq->register_gen; 3157 req->r_callback = linger_ping_cb; 3158 req->r_priv = linger_get(lreq); 3159 req->r_linger = true; 3160 3161 ceph_osdc_get_request(req); 3162 account_request(req); 3163 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3164 link_request(lreq->osd, req); 3165 send_request(req); 3166 } 3167 3168 static void linger_submit(struct ceph_osd_linger_request *lreq) 3169 { 3170 struct ceph_osd_client *osdc = lreq->osdc; 3171 struct ceph_osd *osd; 3172 3173 down_write(&osdc->lock); 3174 linger_register(lreq); 3175 if (lreq->is_watch) { 3176 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; 3177 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; 3178 } else { 3179 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; 3180 } 3181 3182 calc_target(osdc, &lreq->t, false); 3183 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3184 link_linger(osd, lreq); 3185 3186 send_linger(lreq); 3187 up_write(&osdc->lock); 3188 } 3189 3190 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3191 { 3192 struct ceph_osd_client *osdc = lreq->osdc; 3193 struct ceph_osd_linger_request *lookup_lreq; 3194 3195 verify_osdc_wrlocked(osdc); 3196 3197 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3198 lreq->linger_id); 3199 if (!lookup_lreq) 3200 return; 3201 3202 WARN_ON(lookup_lreq != lreq); 3203 erase_linger_mc(&osdc->linger_map_checks, lreq); 3204 linger_put(lreq); 3205 } 3206 3207 /* 3208 * @lreq has to be both registered and linked. 3209 */ 3210 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3211 { 3212 if (lreq->is_watch && lreq->ping_req->r_osd) 3213 cancel_linger_request(lreq->ping_req); 3214 if (lreq->reg_req->r_osd) 3215 cancel_linger_request(lreq->reg_req); 3216 cancel_linger_map_check(lreq); 3217 unlink_linger(lreq->osd, lreq); 3218 linger_unregister(lreq); 3219 } 3220 3221 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3222 { 3223 struct ceph_osd_client *osdc = lreq->osdc; 3224 3225 down_write(&osdc->lock); 3226 if (__linger_registered(lreq)) 3227 __linger_cancel(lreq); 3228 up_write(&osdc->lock); 3229 } 3230 3231 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3232 3233 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3234 { 3235 struct ceph_osd_client *osdc = lreq->osdc; 3236 struct ceph_osdmap *map = osdc->osdmap; 3237 3238 verify_osdc_wrlocked(osdc); 3239 WARN_ON(!map->epoch); 3240 3241 if (lreq->register_gen) { 3242 lreq->map_dne_bound = map->epoch; 3243 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3244 lreq, lreq->linger_id); 3245 } else { 3246 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3247 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3248 map->epoch); 3249 } 3250 3251 if (lreq->map_dne_bound) { 3252 if (map->epoch >= lreq->map_dne_bound) { 3253 /* we had a new enough map */ 3254 pr_info("linger_id %llu pool does not exist\n", 3255 lreq->linger_id); 3256 linger_reg_commit_complete(lreq, -ENOENT); 3257 __linger_cancel(lreq); 3258 } 3259 } else { 3260 send_linger_map_check(lreq); 3261 } 3262 } 3263 3264 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3265 { 3266 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3267 struct ceph_osd_linger_request *lreq; 3268 u64 linger_id = greq->private_data; 3269 3270 WARN_ON(greq->result || !greq->u.newest); 3271 3272 down_write(&osdc->lock); 3273 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3274 if (!lreq) { 3275 dout("%s linger_id %llu dne\n", __func__, linger_id); 3276 goto out_unlock; 3277 } 3278 3279 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3280 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3281 greq->u.newest); 3282 if (!lreq->map_dne_bound) 3283 lreq->map_dne_bound = greq->u.newest; 3284 erase_linger_mc(&osdc->linger_map_checks, lreq); 3285 check_linger_pool_dne(lreq); 3286 3287 linger_put(lreq); 3288 out_unlock: 3289 up_write(&osdc->lock); 3290 } 3291 3292 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3293 { 3294 struct ceph_osd_client *osdc = lreq->osdc; 3295 struct ceph_osd_linger_request *lookup_lreq; 3296 int ret; 3297 3298 verify_osdc_wrlocked(osdc); 3299 3300 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3301 lreq->linger_id); 3302 if (lookup_lreq) { 3303 WARN_ON(lookup_lreq != lreq); 3304 return; 3305 } 3306 3307 linger_get(lreq); 3308 insert_linger_mc(&osdc->linger_map_checks, lreq); 3309 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3310 linger_map_check_cb, lreq->linger_id); 3311 WARN_ON(ret); 3312 } 3313 3314 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3315 { 3316 int ret; 3317 3318 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3319 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); 3320 return ret ?: lreq->reg_commit_error; 3321 } 3322 3323 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) 3324 { 3325 int ret; 3326 3327 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3328 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); 3329 return ret ?: lreq->notify_finish_error; 3330 } 3331 3332 /* 3333 * Timeout callback, called every N seconds. When 1 or more OSD 3334 * requests has been active for more than N seconds, we send a keepalive 3335 * (tag + timestamp) to its OSD to ensure any communications channel 3336 * reset is detected. 3337 */ 3338 static void handle_timeout(struct work_struct *work) 3339 { 3340 struct ceph_osd_client *osdc = 3341 container_of(work, struct ceph_osd_client, timeout_work.work); 3342 struct ceph_options *opts = osdc->client->options; 3343 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3344 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3345 LIST_HEAD(slow_osds); 3346 struct rb_node *n, *p; 3347 3348 dout("%s osdc %p\n", __func__, osdc); 3349 down_write(&osdc->lock); 3350 3351 /* 3352 * ping osds that are a bit slow. this ensures that if there 3353 * is a break in the TCP connection we will notice, and reopen 3354 * a connection with that osd (from the fault callback). 3355 */ 3356 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3357 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3358 bool found = false; 3359 3360 for (p = rb_first(&osd->o_requests); p; ) { 3361 struct ceph_osd_request *req = 3362 rb_entry(p, struct ceph_osd_request, r_node); 3363 3364 p = rb_next(p); /* abort_request() */ 3365 3366 if (time_before(req->r_stamp, cutoff)) { 3367 dout(" req %p tid %llu on osd%d is laggy\n", 3368 req, req->r_tid, osd->o_osd); 3369 found = true; 3370 } 3371 if (opts->osd_request_timeout && 3372 time_before(req->r_start_stamp, expiry_cutoff)) { 3373 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3374 req->r_tid, osd->o_osd); 3375 abort_request(req, -ETIMEDOUT); 3376 } 3377 } 3378 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3379 struct ceph_osd_linger_request *lreq = 3380 rb_entry(p, struct ceph_osd_linger_request, node); 3381 3382 dout(" lreq %p linger_id %llu is served by osd%d\n", 3383 lreq, lreq->linger_id, osd->o_osd); 3384 found = true; 3385 3386 mutex_lock(&lreq->lock); 3387 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3388 send_linger_ping(lreq); 3389 mutex_unlock(&lreq->lock); 3390 } 3391 3392 if (found) 3393 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3394 } 3395 3396 if (opts->osd_request_timeout) { 3397 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3398 struct ceph_osd_request *req = 3399 rb_entry(p, struct ceph_osd_request, r_node); 3400 3401 p = rb_next(p); /* abort_request() */ 3402 3403 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3404 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3405 req->r_tid, osdc->homeless_osd.o_osd); 3406 abort_request(req, -ETIMEDOUT); 3407 } 3408 } 3409 } 3410 3411 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3412 maybe_request_map(osdc); 3413 3414 while (!list_empty(&slow_osds)) { 3415 struct ceph_osd *osd = list_first_entry(&slow_osds, 3416 struct ceph_osd, 3417 o_keepalive_item); 3418 list_del_init(&osd->o_keepalive_item); 3419 ceph_con_keepalive(&osd->o_con); 3420 } 3421 3422 up_write(&osdc->lock); 3423 schedule_delayed_work(&osdc->timeout_work, 3424 osdc->client->options->osd_keepalive_timeout); 3425 } 3426 3427 static void handle_osds_timeout(struct work_struct *work) 3428 { 3429 struct ceph_osd_client *osdc = 3430 container_of(work, struct ceph_osd_client, 3431 osds_timeout_work.work); 3432 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3433 struct ceph_osd *osd, *nosd; 3434 3435 dout("%s osdc %p\n", __func__, osdc); 3436 down_write(&osdc->lock); 3437 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3438 if (time_before(jiffies, osd->lru_ttl)) 3439 break; 3440 3441 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3442 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3443 close_osd(osd); 3444 } 3445 3446 up_write(&osdc->lock); 3447 schedule_delayed_work(&osdc->osds_timeout_work, 3448 round_jiffies_relative(delay)); 3449 } 3450 3451 static int ceph_oloc_decode(void **p, void *end, 3452 struct ceph_object_locator *oloc) 3453 { 3454 u8 struct_v, struct_cv; 3455 u32 len; 3456 void *struct_end; 3457 int ret = 0; 3458 3459 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3460 struct_v = ceph_decode_8(p); 3461 struct_cv = ceph_decode_8(p); 3462 if (struct_v < 3) { 3463 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3464 struct_v, struct_cv); 3465 goto e_inval; 3466 } 3467 if (struct_cv > 6) { 3468 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3469 struct_v, struct_cv); 3470 goto e_inval; 3471 } 3472 len = ceph_decode_32(p); 3473 ceph_decode_need(p, end, len, e_inval); 3474 struct_end = *p + len; 3475 3476 oloc->pool = ceph_decode_64(p); 3477 *p += 4; /* skip preferred */ 3478 3479 len = ceph_decode_32(p); 3480 if (len > 0) { 3481 pr_warn("ceph_object_locator::key is set\n"); 3482 goto e_inval; 3483 } 3484 3485 if (struct_v >= 5) { 3486 bool changed = false; 3487 3488 len = ceph_decode_32(p); 3489 if (len > 0) { 3490 ceph_decode_need(p, end, len, e_inval); 3491 if (!oloc->pool_ns || 3492 ceph_compare_string(oloc->pool_ns, *p, len)) 3493 changed = true; 3494 *p += len; 3495 } else { 3496 if (oloc->pool_ns) 3497 changed = true; 3498 } 3499 if (changed) { 3500 /* redirect changes namespace */ 3501 pr_warn("ceph_object_locator::nspace is changed\n"); 3502 goto e_inval; 3503 } 3504 } 3505 3506 if (struct_v >= 6) { 3507 s64 hash = ceph_decode_64(p); 3508 if (hash != -1) { 3509 pr_warn("ceph_object_locator::hash is set\n"); 3510 goto e_inval; 3511 } 3512 } 3513 3514 /* skip the rest */ 3515 *p = struct_end; 3516 out: 3517 return ret; 3518 3519 e_inval: 3520 ret = -EINVAL; 3521 goto out; 3522 } 3523 3524 static int ceph_redirect_decode(void **p, void *end, 3525 struct ceph_request_redirect *redir) 3526 { 3527 u8 struct_v, struct_cv; 3528 u32 len; 3529 void *struct_end; 3530 int ret; 3531 3532 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3533 struct_v = ceph_decode_8(p); 3534 struct_cv = ceph_decode_8(p); 3535 if (struct_cv > 1) { 3536 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3537 struct_v, struct_cv); 3538 goto e_inval; 3539 } 3540 len = ceph_decode_32(p); 3541 ceph_decode_need(p, end, len, e_inval); 3542 struct_end = *p + len; 3543 3544 ret = ceph_oloc_decode(p, end, &redir->oloc); 3545 if (ret) 3546 goto out; 3547 3548 len = ceph_decode_32(p); 3549 if (len > 0) { 3550 pr_warn("ceph_request_redirect::object_name is set\n"); 3551 goto e_inval; 3552 } 3553 3554 /* skip the rest */ 3555 *p = struct_end; 3556 out: 3557 return ret; 3558 3559 e_inval: 3560 ret = -EINVAL; 3561 goto out; 3562 } 3563 3564 struct MOSDOpReply { 3565 struct ceph_pg pgid; 3566 u64 flags; 3567 int result; 3568 u32 epoch; 3569 int num_ops; 3570 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3571 s32 rval[CEPH_OSD_MAX_OPS]; 3572 int retry_attempt; 3573 struct ceph_eversion replay_version; 3574 u64 user_version; 3575 struct ceph_request_redirect redirect; 3576 }; 3577 3578 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3579 { 3580 void *p = msg->front.iov_base; 3581 void *const end = p + msg->front.iov_len; 3582 u16 version = le16_to_cpu(msg->hdr.version); 3583 struct ceph_eversion bad_replay_version; 3584 u8 decode_redir; 3585 u32 len; 3586 int ret; 3587 int i; 3588 3589 ceph_decode_32_safe(&p, end, len, e_inval); 3590 ceph_decode_need(&p, end, len, e_inval); 3591 p += len; /* skip oid */ 3592 3593 ret = ceph_decode_pgid(&p, end, &m->pgid); 3594 if (ret) 3595 return ret; 3596 3597 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3598 ceph_decode_32_safe(&p, end, m->result, e_inval); 3599 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3600 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3601 p += sizeof(bad_replay_version); 3602 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3603 3604 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3605 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3606 goto e_inval; 3607 3608 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3609 e_inval); 3610 for (i = 0; i < m->num_ops; i++) { 3611 struct ceph_osd_op *op = p; 3612 3613 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3614 p += sizeof(*op); 3615 } 3616 3617 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3618 for (i = 0; i < m->num_ops; i++) 3619 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3620 3621 if (version >= 5) { 3622 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3623 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3624 p += sizeof(m->replay_version); 3625 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3626 } else { 3627 m->replay_version = bad_replay_version; /* struct */ 3628 m->user_version = le64_to_cpu(m->replay_version.version); 3629 } 3630 3631 if (version >= 6) { 3632 if (version >= 7) 3633 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3634 else 3635 decode_redir = 1; 3636 } else { 3637 decode_redir = 0; 3638 } 3639 3640 if (decode_redir) { 3641 ret = ceph_redirect_decode(&p, end, &m->redirect); 3642 if (ret) 3643 return ret; 3644 } else { 3645 ceph_oloc_init(&m->redirect.oloc); 3646 } 3647 3648 return 0; 3649 3650 e_inval: 3651 return -EINVAL; 3652 } 3653 3654 /* 3655 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3656 * specified. 3657 */ 3658 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3659 { 3660 struct ceph_osd_client *osdc = osd->o_osdc; 3661 struct ceph_osd_request *req; 3662 struct MOSDOpReply m; 3663 u64 tid = le64_to_cpu(msg->hdr.tid); 3664 u32 data_len = 0; 3665 int ret; 3666 int i; 3667 3668 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3669 3670 down_read(&osdc->lock); 3671 if (!osd_registered(osd)) { 3672 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3673 goto out_unlock_osdc; 3674 } 3675 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3676 3677 mutex_lock(&osd->lock); 3678 req = lookup_request(&osd->o_requests, tid); 3679 if (!req) { 3680 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3681 goto out_unlock_session; 3682 } 3683 3684 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3685 ret = decode_MOSDOpReply(msg, &m); 3686 m.redirect.oloc.pool_ns = NULL; 3687 if (ret) { 3688 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3689 req->r_tid, ret); 3690 ceph_msg_dump(msg); 3691 goto fail_request; 3692 } 3693 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3694 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3695 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3696 le64_to_cpu(m.replay_version.version), m.user_version); 3697 3698 if (m.retry_attempt >= 0) { 3699 if (m.retry_attempt != req->r_attempts - 1) { 3700 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3701 req, req->r_tid, m.retry_attempt, 3702 req->r_attempts - 1); 3703 goto out_unlock_session; 3704 } 3705 } else { 3706 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3707 } 3708 3709 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3710 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3711 m.redirect.oloc.pool); 3712 unlink_request(osd, req); 3713 mutex_unlock(&osd->lock); 3714 3715 /* 3716 * Not ceph_oloc_copy() - changing pool_ns is not 3717 * supported. 3718 */ 3719 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3720 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3721 CEPH_OSD_FLAG_IGNORE_OVERLAY | 3722 CEPH_OSD_FLAG_IGNORE_CACHE; 3723 req->r_tid = 0; 3724 __submit_request(req, false); 3725 goto out_unlock_osdc; 3726 } 3727 3728 if (m.result == -EAGAIN) { 3729 dout("req %p tid %llu EAGAIN\n", req, req->r_tid); 3730 unlink_request(osd, req); 3731 mutex_unlock(&osd->lock); 3732 3733 /* 3734 * The object is missing on the replica or not (yet) 3735 * readable. Clear pgid to force a resend to the primary 3736 * via legacy_change. 3737 */ 3738 req->r_t.pgid.pool = 0; 3739 req->r_t.pgid.seed = 0; 3740 WARN_ON(!req->r_t.used_replica); 3741 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | 3742 CEPH_OSD_FLAG_LOCALIZE_READS); 3743 req->r_tid = 0; 3744 __submit_request(req, false); 3745 goto out_unlock_osdc; 3746 } 3747 3748 if (m.num_ops != req->r_num_ops) { 3749 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3750 req->r_num_ops, req->r_tid); 3751 goto fail_request; 3752 } 3753 for (i = 0; i < req->r_num_ops; i++) { 3754 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3755 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3756 req->r_ops[i].rval = m.rval[i]; 3757 req->r_ops[i].outdata_len = m.outdata_len[i]; 3758 data_len += m.outdata_len[i]; 3759 } 3760 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3761 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3762 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3763 goto fail_request; 3764 } 3765 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3766 req, req->r_tid, m.result, data_len); 3767 3768 /* 3769 * Since we only ever request ONDISK, we should only ever get 3770 * one (type of) reply back. 3771 */ 3772 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3773 req->r_result = m.result ?: data_len; 3774 finish_request(req); 3775 mutex_unlock(&osd->lock); 3776 up_read(&osdc->lock); 3777 3778 __complete_request(req); 3779 return; 3780 3781 fail_request: 3782 complete_request(req, -EIO); 3783 out_unlock_session: 3784 mutex_unlock(&osd->lock); 3785 out_unlock_osdc: 3786 up_read(&osdc->lock); 3787 } 3788 3789 static void set_pool_was_full(struct ceph_osd_client *osdc) 3790 { 3791 struct rb_node *n; 3792 3793 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3794 struct ceph_pg_pool_info *pi = 3795 rb_entry(n, struct ceph_pg_pool_info, node); 3796 3797 pi->was_full = __pool_full(pi); 3798 } 3799 } 3800 3801 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3802 { 3803 struct ceph_pg_pool_info *pi; 3804 3805 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3806 if (!pi) 3807 return false; 3808 3809 return pi->was_full && !__pool_full(pi); 3810 } 3811 3812 static enum calc_target_result 3813 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3814 { 3815 struct ceph_osd_client *osdc = lreq->osdc; 3816 enum calc_target_result ct_res; 3817 3818 ct_res = calc_target(osdc, &lreq->t, true); 3819 if (ct_res == CALC_TARGET_NEED_RESEND) { 3820 struct ceph_osd *osd; 3821 3822 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3823 if (osd != lreq->osd) { 3824 unlink_linger(lreq->osd, lreq); 3825 link_linger(osd, lreq); 3826 } 3827 } 3828 3829 return ct_res; 3830 } 3831 3832 /* 3833 * Requeue requests whose mapping to an OSD has changed. 3834 */ 3835 static void scan_requests(struct ceph_osd *osd, 3836 bool force_resend, 3837 bool cleared_full, 3838 bool check_pool_cleared_full, 3839 struct rb_root *need_resend, 3840 struct list_head *need_resend_linger) 3841 { 3842 struct ceph_osd_client *osdc = osd->o_osdc; 3843 struct rb_node *n; 3844 bool force_resend_writes; 3845 3846 for (n = rb_first(&osd->o_linger_requests); n; ) { 3847 struct ceph_osd_linger_request *lreq = 3848 rb_entry(n, struct ceph_osd_linger_request, node); 3849 enum calc_target_result ct_res; 3850 3851 n = rb_next(n); /* recalc_linger_target() */ 3852 3853 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3854 lreq->linger_id); 3855 ct_res = recalc_linger_target(lreq); 3856 switch (ct_res) { 3857 case CALC_TARGET_NO_ACTION: 3858 force_resend_writes = cleared_full || 3859 (check_pool_cleared_full && 3860 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3861 if (!force_resend && !force_resend_writes) 3862 break; 3863 3864 /* fall through */ 3865 case CALC_TARGET_NEED_RESEND: 3866 cancel_linger_map_check(lreq); 3867 /* 3868 * scan_requests() for the previous epoch(s) 3869 * may have already added it to the list, since 3870 * it's not unlinked here. 3871 */ 3872 if (list_empty(&lreq->scan_item)) 3873 list_add_tail(&lreq->scan_item, need_resend_linger); 3874 break; 3875 case CALC_TARGET_POOL_DNE: 3876 list_del_init(&lreq->scan_item); 3877 check_linger_pool_dne(lreq); 3878 break; 3879 } 3880 } 3881 3882 for (n = rb_first(&osd->o_requests); n; ) { 3883 struct ceph_osd_request *req = 3884 rb_entry(n, struct ceph_osd_request, r_node); 3885 enum calc_target_result ct_res; 3886 3887 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3888 3889 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3890 ct_res = calc_target(osdc, &req->r_t, false); 3891 switch (ct_res) { 3892 case CALC_TARGET_NO_ACTION: 3893 force_resend_writes = cleared_full || 3894 (check_pool_cleared_full && 3895 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3896 if (!force_resend && 3897 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3898 !force_resend_writes)) 3899 break; 3900 3901 /* fall through */ 3902 case CALC_TARGET_NEED_RESEND: 3903 cancel_map_check(req); 3904 unlink_request(osd, req); 3905 insert_request(need_resend, req); 3906 break; 3907 case CALC_TARGET_POOL_DNE: 3908 check_pool_dne(req); 3909 break; 3910 } 3911 } 3912 } 3913 3914 static int handle_one_map(struct ceph_osd_client *osdc, 3915 void *p, void *end, bool incremental, 3916 struct rb_root *need_resend, 3917 struct list_head *need_resend_linger) 3918 { 3919 struct ceph_osdmap *newmap; 3920 struct rb_node *n; 3921 bool skipped_map = false; 3922 bool was_full; 3923 3924 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3925 set_pool_was_full(osdc); 3926 3927 if (incremental) 3928 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap); 3929 else 3930 newmap = ceph_osdmap_decode(&p, end); 3931 if (IS_ERR(newmap)) 3932 return PTR_ERR(newmap); 3933 3934 if (newmap != osdc->osdmap) { 3935 /* 3936 * Preserve ->was_full before destroying the old map. 3937 * For pools that weren't in the old map, ->was_full 3938 * should be false. 3939 */ 3940 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 3941 struct ceph_pg_pool_info *pi = 3942 rb_entry(n, struct ceph_pg_pool_info, node); 3943 struct ceph_pg_pool_info *old_pi; 3944 3945 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 3946 if (old_pi) 3947 pi->was_full = old_pi->was_full; 3948 else 3949 WARN_ON(pi->was_full); 3950 } 3951 3952 if (osdc->osdmap->epoch && 3953 osdc->osdmap->epoch + 1 < newmap->epoch) { 3954 WARN_ON(incremental); 3955 skipped_map = true; 3956 } 3957 3958 ceph_osdmap_destroy(osdc->osdmap); 3959 osdc->osdmap = newmap; 3960 } 3961 3962 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3963 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3964 need_resend, need_resend_linger); 3965 3966 for (n = rb_first(&osdc->osds); n; ) { 3967 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3968 3969 n = rb_next(n); /* close_osd() */ 3970 3971 scan_requests(osd, skipped_map, was_full, true, need_resend, 3972 need_resend_linger); 3973 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 3974 memcmp(&osd->o_con.peer_addr, 3975 ceph_osd_addr(osdc->osdmap, osd->o_osd), 3976 sizeof(struct ceph_entity_addr))) 3977 close_osd(osd); 3978 } 3979 3980 return 0; 3981 } 3982 3983 static void kick_requests(struct ceph_osd_client *osdc, 3984 struct rb_root *need_resend, 3985 struct list_head *need_resend_linger) 3986 { 3987 struct ceph_osd_linger_request *lreq, *nlreq; 3988 enum calc_target_result ct_res; 3989 struct rb_node *n; 3990 3991 /* make sure need_resend targets reflect latest map */ 3992 for (n = rb_first(need_resend); n; ) { 3993 struct ceph_osd_request *req = 3994 rb_entry(n, struct ceph_osd_request, r_node); 3995 3996 n = rb_next(n); 3997 3998 if (req->r_t.epoch < osdc->osdmap->epoch) { 3999 ct_res = calc_target(osdc, &req->r_t, false); 4000 if (ct_res == CALC_TARGET_POOL_DNE) { 4001 erase_request(need_resend, req); 4002 check_pool_dne(req); 4003 } 4004 } 4005 } 4006 4007 for (n = rb_first(need_resend); n; ) { 4008 struct ceph_osd_request *req = 4009 rb_entry(n, struct ceph_osd_request, r_node); 4010 struct ceph_osd *osd; 4011 4012 n = rb_next(n); 4013 erase_request(need_resend, req); /* before link_request() */ 4014 4015 osd = lookup_create_osd(osdc, req->r_t.osd, true); 4016 link_request(osd, req); 4017 if (!req->r_linger) { 4018 if (!osd_homeless(osd) && !req->r_t.paused) 4019 send_request(req); 4020 } else { 4021 cancel_linger_request(req); 4022 } 4023 } 4024 4025 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 4026 if (!osd_homeless(lreq->osd)) 4027 send_linger(lreq); 4028 4029 list_del_init(&lreq->scan_item); 4030 } 4031 } 4032 4033 /* 4034 * Process updated osd map. 4035 * 4036 * The message contains any number of incremental and full maps, normally 4037 * indicating some sort of topology change in the cluster. Kick requests 4038 * off to different OSDs as needed. 4039 */ 4040 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 4041 { 4042 void *p = msg->front.iov_base; 4043 void *const end = p + msg->front.iov_len; 4044 u32 nr_maps, maplen; 4045 u32 epoch; 4046 struct ceph_fsid fsid; 4047 struct rb_root need_resend = RB_ROOT; 4048 LIST_HEAD(need_resend_linger); 4049 bool handled_incremental = false; 4050 bool was_pauserd, was_pausewr; 4051 bool pauserd, pausewr; 4052 int err; 4053 4054 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 4055 down_write(&osdc->lock); 4056 4057 /* verify fsid */ 4058 ceph_decode_need(&p, end, sizeof(fsid), bad); 4059 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 4060 if (ceph_check_fsid(osdc->client, &fsid) < 0) 4061 goto bad; 4062 4063 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4064 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4065 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4066 have_pool_full(osdc); 4067 4068 /* incremental maps */ 4069 ceph_decode_32_safe(&p, end, nr_maps, bad); 4070 dout(" %d inc maps\n", nr_maps); 4071 while (nr_maps > 0) { 4072 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4073 epoch = ceph_decode_32(&p); 4074 maplen = ceph_decode_32(&p); 4075 ceph_decode_need(&p, end, maplen, bad); 4076 if (osdc->osdmap->epoch && 4077 osdc->osdmap->epoch + 1 == epoch) { 4078 dout("applying incremental map %u len %d\n", 4079 epoch, maplen); 4080 err = handle_one_map(osdc, p, p + maplen, true, 4081 &need_resend, &need_resend_linger); 4082 if (err) 4083 goto bad; 4084 handled_incremental = true; 4085 } else { 4086 dout("ignoring incremental map %u len %d\n", 4087 epoch, maplen); 4088 } 4089 p += maplen; 4090 nr_maps--; 4091 } 4092 if (handled_incremental) 4093 goto done; 4094 4095 /* full maps */ 4096 ceph_decode_32_safe(&p, end, nr_maps, bad); 4097 dout(" %d full maps\n", nr_maps); 4098 while (nr_maps) { 4099 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4100 epoch = ceph_decode_32(&p); 4101 maplen = ceph_decode_32(&p); 4102 ceph_decode_need(&p, end, maplen, bad); 4103 if (nr_maps > 1) { 4104 dout("skipping non-latest full map %u len %d\n", 4105 epoch, maplen); 4106 } else if (osdc->osdmap->epoch >= epoch) { 4107 dout("skipping full map %u len %d, " 4108 "older than our %u\n", epoch, maplen, 4109 osdc->osdmap->epoch); 4110 } else { 4111 dout("taking full map %u len %d\n", epoch, maplen); 4112 err = handle_one_map(osdc, p, p + maplen, false, 4113 &need_resend, &need_resend_linger); 4114 if (err) 4115 goto bad; 4116 } 4117 p += maplen; 4118 nr_maps--; 4119 } 4120 4121 done: 4122 /* 4123 * subscribe to subsequent osdmap updates if full to ensure 4124 * we find out when we are no longer full and stop returning 4125 * ENOSPC. 4126 */ 4127 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4128 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4129 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4130 have_pool_full(osdc); 4131 if (was_pauserd || was_pausewr || pauserd || pausewr || 4132 osdc->osdmap->epoch < osdc->epoch_barrier) 4133 maybe_request_map(osdc); 4134 4135 kick_requests(osdc, &need_resend, &need_resend_linger); 4136 4137 ceph_osdc_abort_on_full(osdc); 4138 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4139 osdc->osdmap->epoch); 4140 up_write(&osdc->lock); 4141 wake_up_all(&osdc->client->auth_wq); 4142 return; 4143 4144 bad: 4145 pr_err("osdc handle_map corrupt msg\n"); 4146 ceph_msg_dump(msg); 4147 up_write(&osdc->lock); 4148 } 4149 4150 /* 4151 * Resubmit requests pending on the given osd. 4152 */ 4153 static void kick_osd_requests(struct ceph_osd *osd) 4154 { 4155 struct rb_node *n; 4156 4157 clear_backoffs(osd); 4158 4159 for (n = rb_first(&osd->o_requests); n; ) { 4160 struct ceph_osd_request *req = 4161 rb_entry(n, struct ceph_osd_request, r_node); 4162 4163 n = rb_next(n); /* cancel_linger_request() */ 4164 4165 if (!req->r_linger) { 4166 if (!req->r_t.paused) 4167 send_request(req); 4168 } else { 4169 cancel_linger_request(req); 4170 } 4171 } 4172 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4173 struct ceph_osd_linger_request *lreq = 4174 rb_entry(n, struct ceph_osd_linger_request, node); 4175 4176 send_linger(lreq); 4177 } 4178 } 4179 4180 /* 4181 * If the osd connection drops, we need to resubmit all requests. 4182 */ 4183 static void osd_fault(struct ceph_connection *con) 4184 { 4185 struct ceph_osd *osd = con->private; 4186 struct ceph_osd_client *osdc = osd->o_osdc; 4187 4188 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4189 4190 down_write(&osdc->lock); 4191 if (!osd_registered(osd)) { 4192 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4193 goto out_unlock; 4194 } 4195 4196 if (!reopen_osd(osd)) 4197 kick_osd_requests(osd); 4198 maybe_request_map(osdc); 4199 4200 out_unlock: 4201 up_write(&osdc->lock); 4202 } 4203 4204 struct MOSDBackoff { 4205 struct ceph_spg spgid; 4206 u32 map_epoch; 4207 u8 op; 4208 u64 id; 4209 struct ceph_hobject_id *begin; 4210 struct ceph_hobject_id *end; 4211 }; 4212 4213 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4214 { 4215 void *p = msg->front.iov_base; 4216 void *const end = p + msg->front.iov_len; 4217 u8 struct_v; 4218 u32 struct_len; 4219 int ret; 4220 4221 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4222 if (ret) 4223 return ret; 4224 4225 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4226 if (ret) 4227 return ret; 4228 4229 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4230 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4231 ceph_decode_8_safe(&p, end, m->op, e_inval); 4232 ceph_decode_64_safe(&p, end, m->id, e_inval); 4233 4234 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4235 if (!m->begin) 4236 return -ENOMEM; 4237 4238 ret = decode_hoid(&p, end, m->begin); 4239 if (ret) { 4240 free_hoid(m->begin); 4241 return ret; 4242 } 4243 4244 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4245 if (!m->end) { 4246 free_hoid(m->begin); 4247 return -ENOMEM; 4248 } 4249 4250 ret = decode_hoid(&p, end, m->end); 4251 if (ret) { 4252 free_hoid(m->begin); 4253 free_hoid(m->end); 4254 return ret; 4255 } 4256 4257 return 0; 4258 4259 e_inval: 4260 return -EINVAL; 4261 } 4262 4263 static struct ceph_msg *create_backoff_message( 4264 const struct ceph_osd_backoff *backoff, 4265 u32 map_epoch) 4266 { 4267 struct ceph_msg *msg; 4268 void *p, *end; 4269 int msg_size; 4270 4271 msg_size = CEPH_ENCODING_START_BLK_LEN + 4272 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4273 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4274 msg_size += CEPH_ENCODING_START_BLK_LEN + 4275 hoid_encoding_size(backoff->begin); 4276 msg_size += CEPH_ENCODING_START_BLK_LEN + 4277 hoid_encoding_size(backoff->end); 4278 4279 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4280 if (!msg) 4281 return NULL; 4282 4283 p = msg->front.iov_base; 4284 end = p + msg->front_alloc_len; 4285 4286 encode_spgid(&p, &backoff->spgid); 4287 ceph_encode_32(&p, map_epoch); 4288 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4289 ceph_encode_64(&p, backoff->id); 4290 encode_hoid(&p, end, backoff->begin); 4291 encode_hoid(&p, end, backoff->end); 4292 BUG_ON(p != end); 4293 4294 msg->front.iov_len = p - msg->front.iov_base; 4295 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4296 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4297 4298 return msg; 4299 } 4300 4301 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4302 { 4303 struct ceph_spg_mapping *spg; 4304 struct ceph_osd_backoff *backoff; 4305 struct ceph_msg *msg; 4306 4307 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4308 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4309 4310 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4311 if (!spg) { 4312 spg = alloc_spg_mapping(); 4313 if (!spg) { 4314 pr_err("%s failed to allocate spg\n", __func__); 4315 return; 4316 } 4317 spg->spgid = m->spgid; /* struct */ 4318 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4319 } 4320 4321 backoff = alloc_backoff(); 4322 if (!backoff) { 4323 pr_err("%s failed to allocate backoff\n", __func__); 4324 return; 4325 } 4326 backoff->spgid = m->spgid; /* struct */ 4327 backoff->id = m->id; 4328 backoff->begin = m->begin; 4329 m->begin = NULL; /* backoff now owns this */ 4330 backoff->end = m->end; 4331 m->end = NULL; /* ditto */ 4332 4333 insert_backoff(&spg->backoffs, backoff); 4334 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4335 4336 /* 4337 * Ack with original backoff's epoch so that the OSD can 4338 * discard this if there was a PG split. 4339 */ 4340 msg = create_backoff_message(backoff, m->map_epoch); 4341 if (!msg) { 4342 pr_err("%s failed to allocate msg\n", __func__); 4343 return; 4344 } 4345 ceph_con_send(&osd->o_con, msg); 4346 } 4347 4348 static bool target_contained_by(const struct ceph_osd_request_target *t, 4349 const struct ceph_hobject_id *begin, 4350 const struct ceph_hobject_id *end) 4351 { 4352 struct ceph_hobject_id hoid; 4353 int cmp; 4354 4355 hoid_fill_from_target(&hoid, t); 4356 cmp = hoid_compare(&hoid, begin); 4357 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4358 } 4359 4360 static void handle_backoff_unblock(struct ceph_osd *osd, 4361 const struct MOSDBackoff *m) 4362 { 4363 struct ceph_spg_mapping *spg; 4364 struct ceph_osd_backoff *backoff; 4365 struct rb_node *n; 4366 4367 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4368 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4369 4370 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4371 if (!backoff) { 4372 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4373 __func__, osd->o_osd, m->spgid.pgid.pool, 4374 m->spgid.pgid.seed, m->spgid.shard, m->id); 4375 return; 4376 } 4377 4378 if (hoid_compare(backoff->begin, m->begin) && 4379 hoid_compare(backoff->end, m->end)) { 4380 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4381 __func__, osd->o_osd, m->spgid.pgid.pool, 4382 m->spgid.pgid.seed, m->spgid.shard, m->id); 4383 /* unblock it anyway... */ 4384 } 4385 4386 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4387 BUG_ON(!spg); 4388 4389 erase_backoff(&spg->backoffs, backoff); 4390 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4391 free_backoff(backoff); 4392 4393 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4394 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4395 free_spg_mapping(spg); 4396 } 4397 4398 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4399 struct ceph_osd_request *req = 4400 rb_entry(n, struct ceph_osd_request, r_node); 4401 4402 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4403 /* 4404 * Match against @m, not @backoff -- the PG may 4405 * have split on the OSD. 4406 */ 4407 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4408 /* 4409 * If no other installed backoff applies, 4410 * resend. 4411 */ 4412 send_request(req); 4413 } 4414 } 4415 } 4416 } 4417 4418 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4419 { 4420 struct ceph_osd_client *osdc = osd->o_osdc; 4421 struct MOSDBackoff m; 4422 int ret; 4423 4424 down_read(&osdc->lock); 4425 if (!osd_registered(osd)) { 4426 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4427 up_read(&osdc->lock); 4428 return; 4429 } 4430 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4431 4432 mutex_lock(&osd->lock); 4433 ret = decode_MOSDBackoff(msg, &m); 4434 if (ret) { 4435 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4436 ceph_msg_dump(msg); 4437 goto out_unlock; 4438 } 4439 4440 switch (m.op) { 4441 case CEPH_OSD_BACKOFF_OP_BLOCK: 4442 handle_backoff_block(osd, &m); 4443 break; 4444 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4445 handle_backoff_unblock(osd, &m); 4446 break; 4447 default: 4448 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4449 } 4450 4451 free_hoid(m.begin); 4452 free_hoid(m.end); 4453 4454 out_unlock: 4455 mutex_unlock(&osd->lock); 4456 up_read(&osdc->lock); 4457 } 4458 4459 /* 4460 * Process osd watch notifications 4461 */ 4462 static void handle_watch_notify(struct ceph_osd_client *osdc, 4463 struct ceph_msg *msg) 4464 { 4465 void *p = msg->front.iov_base; 4466 void *const end = p + msg->front.iov_len; 4467 struct ceph_osd_linger_request *lreq; 4468 struct linger_work *lwork; 4469 u8 proto_ver, opcode; 4470 u64 cookie, notify_id; 4471 u64 notifier_id = 0; 4472 s32 return_code = 0; 4473 void *payload = NULL; 4474 u32 payload_len = 0; 4475 4476 ceph_decode_8_safe(&p, end, proto_ver, bad); 4477 ceph_decode_8_safe(&p, end, opcode, bad); 4478 ceph_decode_64_safe(&p, end, cookie, bad); 4479 p += 8; /* skip ver */ 4480 ceph_decode_64_safe(&p, end, notify_id, bad); 4481 4482 if (proto_ver >= 1) { 4483 ceph_decode_32_safe(&p, end, payload_len, bad); 4484 ceph_decode_need(&p, end, payload_len, bad); 4485 payload = p; 4486 p += payload_len; 4487 } 4488 4489 if (le16_to_cpu(msg->hdr.version) >= 2) 4490 ceph_decode_32_safe(&p, end, return_code, bad); 4491 4492 if (le16_to_cpu(msg->hdr.version) >= 3) 4493 ceph_decode_64_safe(&p, end, notifier_id, bad); 4494 4495 down_read(&osdc->lock); 4496 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4497 if (!lreq) { 4498 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4499 cookie); 4500 goto out_unlock_osdc; 4501 } 4502 4503 mutex_lock(&lreq->lock); 4504 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4505 opcode, cookie, lreq, lreq->is_watch); 4506 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4507 if (!lreq->last_error) { 4508 lreq->last_error = -ENOTCONN; 4509 queue_watch_error(lreq); 4510 } 4511 } else if (!lreq->is_watch) { 4512 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4513 if (lreq->notify_id && lreq->notify_id != notify_id) { 4514 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4515 lreq->notify_id, notify_id); 4516 } else if (!completion_done(&lreq->notify_finish_wait)) { 4517 struct ceph_msg_data *data = 4518 msg->num_data_items ? &msg->data[0] : NULL; 4519 4520 if (data) { 4521 if (lreq->preply_pages) { 4522 WARN_ON(data->type != 4523 CEPH_MSG_DATA_PAGES); 4524 *lreq->preply_pages = data->pages; 4525 *lreq->preply_len = data->length; 4526 data->own_pages = false; 4527 } 4528 } 4529 lreq->notify_finish_error = return_code; 4530 complete_all(&lreq->notify_finish_wait); 4531 } 4532 } else { 4533 /* CEPH_WATCH_EVENT_NOTIFY */ 4534 lwork = lwork_alloc(lreq, do_watch_notify); 4535 if (!lwork) { 4536 pr_err("failed to allocate notify-lwork\n"); 4537 goto out_unlock_lreq; 4538 } 4539 4540 lwork->notify.notify_id = notify_id; 4541 lwork->notify.notifier_id = notifier_id; 4542 lwork->notify.payload = payload; 4543 lwork->notify.payload_len = payload_len; 4544 lwork->notify.msg = ceph_msg_get(msg); 4545 lwork_queue(lwork); 4546 } 4547 4548 out_unlock_lreq: 4549 mutex_unlock(&lreq->lock); 4550 out_unlock_osdc: 4551 up_read(&osdc->lock); 4552 return; 4553 4554 bad: 4555 pr_err("osdc handle_watch_notify corrupt msg\n"); 4556 } 4557 4558 /* 4559 * Register request, send initial attempt. 4560 */ 4561 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 4562 struct ceph_osd_request *req, 4563 bool nofail) 4564 { 4565 down_read(&osdc->lock); 4566 submit_request(req, false); 4567 up_read(&osdc->lock); 4568 4569 return 0; 4570 } 4571 EXPORT_SYMBOL(ceph_osdc_start_request); 4572 4573 /* 4574 * Unregister a registered request. The request is not completed: 4575 * ->r_result isn't set and __complete_request() isn't called. 4576 */ 4577 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4578 { 4579 struct ceph_osd_client *osdc = req->r_osdc; 4580 4581 down_write(&osdc->lock); 4582 if (req->r_osd) 4583 cancel_request(req); 4584 up_write(&osdc->lock); 4585 } 4586 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4587 4588 /* 4589 * @timeout: in jiffies, 0 means "wait forever" 4590 */ 4591 static int wait_request_timeout(struct ceph_osd_request *req, 4592 unsigned long timeout) 4593 { 4594 long left; 4595 4596 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4597 left = wait_for_completion_killable_timeout(&req->r_completion, 4598 ceph_timeout_jiffies(timeout)); 4599 if (left <= 0) { 4600 left = left ?: -ETIMEDOUT; 4601 ceph_osdc_cancel_request(req); 4602 } else { 4603 left = req->r_result; /* completed */ 4604 } 4605 4606 return left; 4607 } 4608 4609 /* 4610 * wait for a request to complete 4611 */ 4612 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4613 struct ceph_osd_request *req) 4614 { 4615 return wait_request_timeout(req, 0); 4616 } 4617 EXPORT_SYMBOL(ceph_osdc_wait_request); 4618 4619 /* 4620 * sync - wait for all in-flight requests to flush. avoid starvation. 4621 */ 4622 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4623 { 4624 struct rb_node *n, *p; 4625 u64 last_tid = atomic64_read(&osdc->last_tid); 4626 4627 again: 4628 down_read(&osdc->lock); 4629 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4630 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4631 4632 mutex_lock(&osd->lock); 4633 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4634 struct ceph_osd_request *req = 4635 rb_entry(p, struct ceph_osd_request, r_node); 4636 4637 if (req->r_tid > last_tid) 4638 break; 4639 4640 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4641 continue; 4642 4643 ceph_osdc_get_request(req); 4644 mutex_unlock(&osd->lock); 4645 up_read(&osdc->lock); 4646 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4647 __func__, req, req->r_tid, last_tid); 4648 wait_for_completion(&req->r_completion); 4649 ceph_osdc_put_request(req); 4650 goto again; 4651 } 4652 4653 mutex_unlock(&osd->lock); 4654 } 4655 4656 up_read(&osdc->lock); 4657 dout("%s done last_tid %llu\n", __func__, last_tid); 4658 } 4659 EXPORT_SYMBOL(ceph_osdc_sync); 4660 4661 static struct ceph_osd_request * 4662 alloc_linger_request(struct ceph_osd_linger_request *lreq) 4663 { 4664 struct ceph_osd_request *req; 4665 4666 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO); 4667 if (!req) 4668 return NULL; 4669 4670 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4671 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4672 return req; 4673 } 4674 4675 static struct ceph_osd_request * 4676 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) 4677 { 4678 struct ceph_osd_request *req; 4679 4680 req = alloc_linger_request(lreq); 4681 if (!req) 4682 return NULL; 4683 4684 /* 4685 * Pass 0 for cookie because we don't know it yet, it will be 4686 * filled in by linger_submit(). 4687 */ 4688 osd_req_op_watch_init(req, 0, 0, watch_opcode); 4689 4690 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { 4691 ceph_osdc_put_request(req); 4692 return NULL; 4693 } 4694 4695 return req; 4696 } 4697 4698 /* 4699 * Returns a handle, caller owns a ref. 4700 */ 4701 struct ceph_osd_linger_request * 4702 ceph_osdc_watch(struct ceph_osd_client *osdc, 4703 struct ceph_object_id *oid, 4704 struct ceph_object_locator *oloc, 4705 rados_watchcb2_t wcb, 4706 rados_watcherrcb_t errcb, 4707 void *data) 4708 { 4709 struct ceph_osd_linger_request *lreq; 4710 int ret; 4711 4712 lreq = linger_alloc(osdc); 4713 if (!lreq) 4714 return ERR_PTR(-ENOMEM); 4715 4716 lreq->is_watch = true; 4717 lreq->wcb = wcb; 4718 lreq->errcb = errcb; 4719 lreq->data = data; 4720 lreq->watch_valid_thru = jiffies; 4721 4722 ceph_oid_copy(&lreq->t.base_oid, oid); 4723 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4724 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4725 ktime_get_real_ts64(&lreq->mtime); 4726 4727 lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); 4728 if (!lreq->reg_req) { 4729 ret = -ENOMEM; 4730 goto err_put_lreq; 4731 } 4732 4733 lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); 4734 if (!lreq->ping_req) { 4735 ret = -ENOMEM; 4736 goto err_put_lreq; 4737 } 4738 4739 linger_submit(lreq); 4740 ret = linger_reg_commit_wait(lreq); 4741 if (ret) { 4742 linger_cancel(lreq); 4743 goto err_put_lreq; 4744 } 4745 4746 return lreq; 4747 4748 err_put_lreq: 4749 linger_put(lreq); 4750 return ERR_PTR(ret); 4751 } 4752 EXPORT_SYMBOL(ceph_osdc_watch); 4753 4754 /* 4755 * Releases a ref. 4756 * 4757 * Times out after mount_timeout to preserve rbd unmap behaviour 4758 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4759 * with mount_timeout"). 4760 */ 4761 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4762 struct ceph_osd_linger_request *lreq) 4763 { 4764 struct ceph_options *opts = osdc->client->options; 4765 struct ceph_osd_request *req; 4766 int ret; 4767 4768 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4769 if (!req) 4770 return -ENOMEM; 4771 4772 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4773 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4774 req->r_flags = CEPH_OSD_FLAG_WRITE; 4775 ktime_get_real_ts64(&req->r_mtime); 4776 osd_req_op_watch_init(req, 0, lreq->linger_id, 4777 CEPH_OSD_WATCH_OP_UNWATCH); 4778 4779 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4780 if (ret) 4781 goto out_put_req; 4782 4783 ceph_osdc_start_request(osdc, req, false); 4784 linger_cancel(lreq); 4785 linger_put(lreq); 4786 ret = wait_request_timeout(req, opts->mount_timeout); 4787 4788 out_put_req: 4789 ceph_osdc_put_request(req); 4790 return ret; 4791 } 4792 EXPORT_SYMBOL(ceph_osdc_unwatch); 4793 4794 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4795 u64 notify_id, u64 cookie, void *payload, 4796 u32 payload_len) 4797 { 4798 struct ceph_osd_req_op *op; 4799 struct ceph_pagelist *pl; 4800 int ret; 4801 4802 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4803 4804 pl = ceph_pagelist_alloc(GFP_NOIO); 4805 if (!pl) 4806 return -ENOMEM; 4807 4808 ret = ceph_pagelist_encode_64(pl, notify_id); 4809 ret |= ceph_pagelist_encode_64(pl, cookie); 4810 if (payload) { 4811 ret |= ceph_pagelist_encode_32(pl, payload_len); 4812 ret |= ceph_pagelist_append(pl, payload, payload_len); 4813 } else { 4814 ret |= ceph_pagelist_encode_32(pl, 0); 4815 } 4816 if (ret) { 4817 ceph_pagelist_release(pl); 4818 return -ENOMEM; 4819 } 4820 4821 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4822 op->indata_len = pl->length; 4823 return 0; 4824 } 4825 4826 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4827 struct ceph_object_id *oid, 4828 struct ceph_object_locator *oloc, 4829 u64 notify_id, 4830 u64 cookie, 4831 void *payload, 4832 u32 payload_len) 4833 { 4834 struct ceph_osd_request *req; 4835 int ret; 4836 4837 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4838 if (!req) 4839 return -ENOMEM; 4840 4841 ceph_oid_copy(&req->r_base_oid, oid); 4842 ceph_oloc_copy(&req->r_base_oloc, oloc); 4843 req->r_flags = CEPH_OSD_FLAG_READ; 4844 4845 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4846 payload_len); 4847 if (ret) 4848 goto out_put_req; 4849 4850 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4851 if (ret) 4852 goto out_put_req; 4853 4854 ceph_osdc_start_request(osdc, req, false); 4855 ret = ceph_osdc_wait_request(osdc, req); 4856 4857 out_put_req: 4858 ceph_osdc_put_request(req); 4859 return ret; 4860 } 4861 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4862 4863 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, 4864 u64 cookie, u32 prot_ver, u32 timeout, 4865 void *payload, u32 payload_len) 4866 { 4867 struct ceph_osd_req_op *op; 4868 struct ceph_pagelist *pl; 4869 int ret; 4870 4871 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 4872 op->notify.cookie = cookie; 4873 4874 pl = ceph_pagelist_alloc(GFP_NOIO); 4875 if (!pl) 4876 return -ENOMEM; 4877 4878 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ 4879 ret |= ceph_pagelist_encode_32(pl, timeout); 4880 ret |= ceph_pagelist_encode_32(pl, payload_len); 4881 ret |= ceph_pagelist_append(pl, payload, payload_len); 4882 if (ret) { 4883 ceph_pagelist_release(pl); 4884 return -ENOMEM; 4885 } 4886 4887 ceph_osd_data_pagelist_init(&op->notify.request_data, pl); 4888 op->indata_len = pl->length; 4889 return 0; 4890 } 4891 4892 /* 4893 * @timeout: in seconds 4894 * 4895 * @preply_{pages,len} are initialized both on success and error. 4896 * The caller is responsible for: 4897 * 4898 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4899 */ 4900 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4901 struct ceph_object_id *oid, 4902 struct ceph_object_locator *oloc, 4903 void *payload, 4904 u32 payload_len, 4905 u32 timeout, 4906 struct page ***preply_pages, 4907 size_t *preply_len) 4908 { 4909 struct ceph_osd_linger_request *lreq; 4910 struct page **pages; 4911 int ret; 4912 4913 WARN_ON(!timeout); 4914 if (preply_pages) { 4915 *preply_pages = NULL; 4916 *preply_len = 0; 4917 } 4918 4919 lreq = linger_alloc(osdc); 4920 if (!lreq) 4921 return -ENOMEM; 4922 4923 lreq->preply_pages = preply_pages; 4924 lreq->preply_len = preply_len; 4925 4926 ceph_oid_copy(&lreq->t.base_oid, oid); 4927 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4928 lreq->t.flags = CEPH_OSD_FLAG_READ; 4929 4930 lreq->reg_req = alloc_linger_request(lreq); 4931 if (!lreq->reg_req) { 4932 ret = -ENOMEM; 4933 goto out_put_lreq; 4934 } 4935 4936 /* 4937 * Pass 0 for cookie because we don't know it yet, it will be 4938 * filled in by linger_submit(). 4939 */ 4940 ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, 4941 payload, payload_len); 4942 if (ret) 4943 goto out_put_lreq; 4944 4945 /* for notify_id */ 4946 pages = ceph_alloc_page_vector(1, GFP_NOIO); 4947 if (IS_ERR(pages)) { 4948 ret = PTR_ERR(pages); 4949 goto out_put_lreq; 4950 } 4951 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, 4952 response_data), 4953 pages, PAGE_SIZE, 0, false, true); 4954 4955 ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); 4956 if (ret) 4957 goto out_put_lreq; 4958 4959 linger_submit(lreq); 4960 ret = linger_reg_commit_wait(lreq); 4961 if (!ret) 4962 ret = linger_notify_finish_wait(lreq); 4963 else 4964 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4965 4966 linger_cancel(lreq); 4967 out_put_lreq: 4968 linger_put(lreq); 4969 return ret; 4970 } 4971 EXPORT_SYMBOL(ceph_osdc_notify); 4972 4973 /* 4974 * Return the number of milliseconds since the watch was last 4975 * confirmed, or an error. If there is an error, the watch is no 4976 * longer valid, and should be destroyed with ceph_osdc_unwatch(). 4977 */ 4978 int ceph_osdc_watch_check(struct ceph_osd_client *osdc, 4979 struct ceph_osd_linger_request *lreq) 4980 { 4981 unsigned long stamp, age; 4982 int ret; 4983 4984 down_read(&osdc->lock); 4985 mutex_lock(&lreq->lock); 4986 stamp = lreq->watch_valid_thru; 4987 if (!list_empty(&lreq->pending_lworks)) { 4988 struct linger_work *lwork = 4989 list_first_entry(&lreq->pending_lworks, 4990 struct linger_work, 4991 pending_item); 4992 4993 if (time_before(lwork->queued_stamp, stamp)) 4994 stamp = lwork->queued_stamp; 4995 } 4996 age = jiffies - stamp; 4997 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__, 4998 lreq, lreq->linger_id, age, lreq->last_error); 4999 /* we are truncating to msecs, so return a safe upper bound */ 5000 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age); 5001 5002 mutex_unlock(&lreq->lock); 5003 up_read(&osdc->lock); 5004 return ret; 5005 } 5006 5007 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 5008 { 5009 u8 struct_v; 5010 u32 struct_len; 5011 int ret; 5012 5013 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 5014 &struct_v, &struct_len); 5015 if (ret) 5016 goto bad; 5017 5018 ret = -EINVAL; 5019 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 5020 ceph_decode_64_safe(p, end, item->cookie, bad); 5021 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 5022 5023 if (struct_v >= 2) { 5024 ret = ceph_decode_entity_addr(p, end, &item->addr); 5025 if (ret) 5026 goto bad; 5027 } else { 5028 ret = 0; 5029 } 5030 5031 dout("%s %s%llu cookie %llu addr %s\n", __func__, 5032 ENTITY_NAME(item->name), item->cookie, 5033 ceph_pr_addr(&item->addr)); 5034 bad: 5035 return ret; 5036 } 5037 5038 static int decode_watchers(void **p, void *end, 5039 struct ceph_watch_item **watchers, 5040 u32 *num_watchers) 5041 { 5042 u8 struct_v; 5043 u32 struct_len; 5044 int i; 5045 int ret; 5046 5047 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 5048 &struct_v, &struct_len); 5049 if (ret) 5050 return ret; 5051 5052 *num_watchers = ceph_decode_32(p); 5053 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 5054 if (!*watchers) 5055 return -ENOMEM; 5056 5057 for (i = 0; i < *num_watchers; i++) { 5058 ret = decode_watcher(p, end, *watchers + i); 5059 if (ret) { 5060 kfree(*watchers); 5061 return ret; 5062 } 5063 } 5064 5065 return 0; 5066 } 5067 5068 /* 5069 * On success, the caller is responsible for: 5070 * 5071 * kfree(watchers); 5072 */ 5073 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 5074 struct ceph_object_id *oid, 5075 struct ceph_object_locator *oloc, 5076 struct ceph_watch_item **watchers, 5077 u32 *num_watchers) 5078 { 5079 struct ceph_osd_request *req; 5080 struct page **pages; 5081 int ret; 5082 5083 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5084 if (!req) 5085 return -ENOMEM; 5086 5087 ceph_oid_copy(&req->r_base_oid, oid); 5088 ceph_oloc_copy(&req->r_base_oloc, oloc); 5089 req->r_flags = CEPH_OSD_FLAG_READ; 5090 5091 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5092 if (IS_ERR(pages)) { 5093 ret = PTR_ERR(pages); 5094 goto out_put_req; 5095 } 5096 5097 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5098 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5099 response_data), 5100 pages, PAGE_SIZE, 0, false, true); 5101 5102 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5103 if (ret) 5104 goto out_put_req; 5105 5106 ceph_osdc_start_request(osdc, req, false); 5107 ret = ceph_osdc_wait_request(osdc, req); 5108 if (ret >= 0) { 5109 void *p = page_address(pages[0]); 5110 void *const end = p + req->r_ops[0].outdata_len; 5111 5112 ret = decode_watchers(&p, end, watchers, num_watchers); 5113 } 5114 5115 out_put_req: 5116 ceph_osdc_put_request(req); 5117 return ret; 5118 } 5119 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5120 5121 /* 5122 * Call all pending notify callbacks - for use after a watch is 5123 * unregistered, to make sure no more callbacks for it will be invoked 5124 */ 5125 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5126 { 5127 dout("%s osdc %p\n", __func__, osdc); 5128 flush_workqueue(osdc->notify_wq); 5129 } 5130 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5131 5132 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5133 { 5134 down_read(&osdc->lock); 5135 maybe_request_map(osdc); 5136 up_read(&osdc->lock); 5137 } 5138 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5139 5140 /* 5141 * Execute an OSD class method on an object. 5142 * 5143 * @flags: CEPH_OSD_FLAG_* 5144 * @resp_len: in/out param for reply length 5145 */ 5146 int ceph_osdc_call(struct ceph_osd_client *osdc, 5147 struct ceph_object_id *oid, 5148 struct ceph_object_locator *oloc, 5149 const char *class, const char *method, 5150 unsigned int flags, 5151 struct page *req_page, size_t req_len, 5152 struct page **resp_pages, size_t *resp_len) 5153 { 5154 struct ceph_osd_request *req; 5155 int ret; 5156 5157 if (req_len > PAGE_SIZE) 5158 return -E2BIG; 5159 5160 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5161 if (!req) 5162 return -ENOMEM; 5163 5164 ceph_oid_copy(&req->r_base_oid, oid); 5165 ceph_oloc_copy(&req->r_base_oloc, oloc); 5166 req->r_flags = flags; 5167 5168 ret = osd_req_op_cls_init(req, 0, class, method); 5169 if (ret) 5170 goto out_put_req; 5171 5172 if (req_page) 5173 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5174 0, false, false); 5175 if (resp_pages) 5176 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5177 *resp_len, 0, false, false); 5178 5179 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5180 if (ret) 5181 goto out_put_req; 5182 5183 ceph_osdc_start_request(osdc, req, false); 5184 ret = ceph_osdc_wait_request(osdc, req); 5185 if (ret >= 0) { 5186 ret = req->r_ops[0].rval; 5187 if (resp_pages) 5188 *resp_len = req->r_ops[0].outdata_len; 5189 } 5190 5191 out_put_req: 5192 ceph_osdc_put_request(req); 5193 return ret; 5194 } 5195 EXPORT_SYMBOL(ceph_osdc_call); 5196 5197 /* 5198 * reset all osd connections 5199 */ 5200 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5201 { 5202 struct rb_node *n; 5203 5204 down_write(&osdc->lock); 5205 for (n = rb_first(&osdc->osds); n; ) { 5206 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5207 5208 n = rb_next(n); 5209 if (!reopen_osd(osd)) 5210 kick_osd_requests(osd); 5211 } 5212 up_write(&osdc->lock); 5213 } 5214 5215 /* 5216 * init, shutdown 5217 */ 5218 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5219 { 5220 int err; 5221 5222 dout("init\n"); 5223 osdc->client = client; 5224 init_rwsem(&osdc->lock); 5225 osdc->osds = RB_ROOT; 5226 INIT_LIST_HEAD(&osdc->osd_lru); 5227 spin_lock_init(&osdc->osd_lru_lock); 5228 osd_init(&osdc->homeless_osd); 5229 osdc->homeless_osd.o_osdc = osdc; 5230 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5231 osdc->last_linger_id = CEPH_LINGER_ID_START; 5232 osdc->linger_requests = RB_ROOT; 5233 osdc->map_checks = RB_ROOT; 5234 osdc->linger_map_checks = RB_ROOT; 5235 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5236 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5237 5238 err = -ENOMEM; 5239 osdc->osdmap = ceph_osdmap_alloc(); 5240 if (!osdc->osdmap) 5241 goto out; 5242 5243 osdc->req_mempool = mempool_create_slab_pool(10, 5244 ceph_osd_request_cache); 5245 if (!osdc->req_mempool) 5246 goto out_map; 5247 5248 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5249 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5250 if (err < 0) 5251 goto out_mempool; 5252 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5253 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5254 "osd_op_reply"); 5255 if (err < 0) 5256 goto out_msgpool; 5257 5258 err = -ENOMEM; 5259 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5260 if (!osdc->notify_wq) 5261 goto out_msgpool_reply; 5262 5263 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5264 if (!osdc->completion_wq) 5265 goto out_notify_wq; 5266 5267 schedule_delayed_work(&osdc->timeout_work, 5268 osdc->client->options->osd_keepalive_timeout); 5269 schedule_delayed_work(&osdc->osds_timeout_work, 5270 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5271 5272 return 0; 5273 5274 out_notify_wq: 5275 destroy_workqueue(osdc->notify_wq); 5276 out_msgpool_reply: 5277 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5278 out_msgpool: 5279 ceph_msgpool_destroy(&osdc->msgpool_op); 5280 out_mempool: 5281 mempool_destroy(osdc->req_mempool); 5282 out_map: 5283 ceph_osdmap_destroy(osdc->osdmap); 5284 out: 5285 return err; 5286 } 5287 5288 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5289 { 5290 destroy_workqueue(osdc->completion_wq); 5291 destroy_workqueue(osdc->notify_wq); 5292 cancel_delayed_work_sync(&osdc->timeout_work); 5293 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5294 5295 down_write(&osdc->lock); 5296 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5297 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5298 struct ceph_osd, o_node); 5299 close_osd(osd); 5300 } 5301 up_write(&osdc->lock); 5302 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5303 osd_cleanup(&osdc->homeless_osd); 5304 5305 WARN_ON(!list_empty(&osdc->osd_lru)); 5306 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5307 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5308 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5309 WARN_ON(atomic_read(&osdc->num_requests)); 5310 WARN_ON(atomic_read(&osdc->num_homeless)); 5311 5312 ceph_osdmap_destroy(osdc->osdmap); 5313 mempool_destroy(osdc->req_mempool); 5314 ceph_msgpool_destroy(&osdc->msgpool_op); 5315 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5316 } 5317 5318 static int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5319 u64 src_snapid, u64 src_version, 5320 struct ceph_object_id *src_oid, 5321 struct ceph_object_locator *src_oloc, 5322 u32 src_fadvise_flags, 5323 u32 dst_fadvise_flags, 5324 u32 truncate_seq, u64 truncate_size, 5325 u8 copy_from_flags) 5326 { 5327 struct ceph_osd_req_op *op; 5328 struct page **pages; 5329 void *p, *end; 5330 5331 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5332 if (IS_ERR(pages)) 5333 return PTR_ERR(pages); 5334 5335 op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, 5336 dst_fadvise_flags); 5337 op->copy_from.snapid = src_snapid; 5338 op->copy_from.src_version = src_version; 5339 op->copy_from.flags = copy_from_flags; 5340 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5341 5342 p = page_address(pages[0]); 5343 end = p + PAGE_SIZE; 5344 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5345 encode_oloc(&p, end, src_oloc); 5346 ceph_encode_32(&p, truncate_seq); 5347 ceph_encode_64(&p, truncate_size); 5348 op->indata_len = PAGE_SIZE - (end - p); 5349 5350 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5351 op->indata_len, 0, false, true); 5352 return 0; 5353 } 5354 5355 int ceph_osdc_copy_from(struct ceph_osd_client *osdc, 5356 u64 src_snapid, u64 src_version, 5357 struct ceph_object_id *src_oid, 5358 struct ceph_object_locator *src_oloc, 5359 u32 src_fadvise_flags, 5360 struct ceph_object_id *dst_oid, 5361 struct ceph_object_locator *dst_oloc, 5362 u32 dst_fadvise_flags, 5363 u32 truncate_seq, u64 truncate_size, 5364 u8 copy_from_flags) 5365 { 5366 struct ceph_osd_request *req; 5367 int ret; 5368 5369 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 5370 if (!req) 5371 return -ENOMEM; 5372 5373 req->r_flags = CEPH_OSD_FLAG_WRITE; 5374 5375 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc); 5376 ceph_oid_copy(&req->r_t.base_oid, dst_oid); 5377 5378 ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid, 5379 src_oloc, src_fadvise_flags, 5380 dst_fadvise_flags, truncate_seq, 5381 truncate_size, copy_from_flags); 5382 if (ret) 5383 goto out; 5384 5385 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 5386 if (ret) 5387 goto out; 5388 5389 ceph_osdc_start_request(osdc, req, false); 5390 ret = ceph_osdc_wait_request(osdc, req); 5391 5392 out: 5393 ceph_osdc_put_request(req); 5394 return ret; 5395 } 5396 EXPORT_SYMBOL(ceph_osdc_copy_from); 5397 5398 int __init ceph_osdc_setup(void) 5399 { 5400 size_t size = sizeof(struct ceph_osd_request) + 5401 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5402 5403 BUG_ON(ceph_osd_request_cache); 5404 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5405 0, 0, NULL); 5406 5407 return ceph_osd_request_cache ? 0 : -ENOMEM; 5408 } 5409 5410 void ceph_osdc_cleanup(void) 5411 { 5412 BUG_ON(!ceph_osd_request_cache); 5413 kmem_cache_destroy(ceph_osd_request_cache); 5414 ceph_osd_request_cache = NULL; 5415 } 5416 5417 /* 5418 * handle incoming message 5419 */ 5420 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5421 { 5422 struct ceph_osd *osd = con->private; 5423 struct ceph_osd_client *osdc = osd->o_osdc; 5424 int type = le16_to_cpu(msg->hdr.type); 5425 5426 switch (type) { 5427 case CEPH_MSG_OSD_MAP: 5428 ceph_osdc_handle_map(osdc, msg); 5429 break; 5430 case CEPH_MSG_OSD_OPREPLY: 5431 handle_reply(osd, msg); 5432 break; 5433 case CEPH_MSG_OSD_BACKOFF: 5434 handle_backoff(osd, msg); 5435 break; 5436 case CEPH_MSG_WATCH_NOTIFY: 5437 handle_watch_notify(osdc, msg); 5438 break; 5439 5440 default: 5441 pr_err("received unknown message type %d %s\n", type, 5442 ceph_msg_type_name(type)); 5443 } 5444 5445 ceph_msg_put(msg); 5446 } 5447 5448 /* 5449 * Lookup and return message for incoming reply. Don't try to do 5450 * anything about a larger than preallocated data portion of the 5451 * message at the moment - for now, just skip the message. 5452 */ 5453 static struct ceph_msg *get_reply(struct ceph_connection *con, 5454 struct ceph_msg_header *hdr, 5455 int *skip) 5456 { 5457 struct ceph_osd *osd = con->private; 5458 struct ceph_osd_client *osdc = osd->o_osdc; 5459 struct ceph_msg *m = NULL; 5460 struct ceph_osd_request *req; 5461 int front_len = le32_to_cpu(hdr->front_len); 5462 int data_len = le32_to_cpu(hdr->data_len); 5463 u64 tid = le64_to_cpu(hdr->tid); 5464 5465 down_read(&osdc->lock); 5466 if (!osd_registered(osd)) { 5467 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5468 *skip = 1; 5469 goto out_unlock_osdc; 5470 } 5471 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5472 5473 mutex_lock(&osd->lock); 5474 req = lookup_request(&osd->o_requests, tid); 5475 if (!req) { 5476 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5477 osd->o_osd, tid); 5478 *skip = 1; 5479 goto out_unlock_session; 5480 } 5481 5482 ceph_msg_revoke_incoming(req->r_reply); 5483 5484 if (front_len > req->r_reply->front_alloc_len) { 5485 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5486 __func__, osd->o_osd, req->r_tid, front_len, 5487 req->r_reply->front_alloc_len); 5488 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5489 false); 5490 if (!m) 5491 goto out_unlock_session; 5492 ceph_msg_put(req->r_reply); 5493 req->r_reply = m; 5494 } 5495 5496 if (data_len > req->r_reply->data_length) { 5497 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5498 __func__, osd->o_osd, req->r_tid, data_len, 5499 req->r_reply->data_length); 5500 m = NULL; 5501 *skip = 1; 5502 goto out_unlock_session; 5503 } 5504 5505 m = ceph_msg_get(req->r_reply); 5506 dout("get_reply tid %lld %p\n", tid, m); 5507 5508 out_unlock_session: 5509 mutex_unlock(&osd->lock); 5510 out_unlock_osdc: 5511 up_read(&osdc->lock); 5512 return m; 5513 } 5514 5515 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5516 { 5517 struct ceph_msg *m; 5518 int type = le16_to_cpu(hdr->type); 5519 u32 front_len = le32_to_cpu(hdr->front_len); 5520 u32 data_len = le32_to_cpu(hdr->data_len); 5521 5522 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5523 if (!m) 5524 return NULL; 5525 5526 if (data_len) { 5527 struct page **pages; 5528 5529 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5530 GFP_NOIO); 5531 if (IS_ERR(pages)) { 5532 ceph_msg_put(m); 5533 return NULL; 5534 } 5535 5536 ceph_msg_data_add_pages(m, pages, data_len, 0, true); 5537 } 5538 5539 return m; 5540 } 5541 5542 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 5543 struct ceph_msg_header *hdr, 5544 int *skip) 5545 { 5546 struct ceph_osd *osd = con->private; 5547 int type = le16_to_cpu(hdr->type); 5548 5549 *skip = 0; 5550 switch (type) { 5551 case CEPH_MSG_OSD_MAP: 5552 case CEPH_MSG_OSD_BACKOFF: 5553 case CEPH_MSG_WATCH_NOTIFY: 5554 return alloc_msg_with_page_vector(hdr); 5555 case CEPH_MSG_OSD_OPREPLY: 5556 return get_reply(con, hdr, skip); 5557 default: 5558 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5559 osd->o_osd, type); 5560 *skip = 1; 5561 return NULL; 5562 } 5563 } 5564 5565 /* 5566 * Wrappers to refcount containing ceph_osd struct 5567 */ 5568 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 5569 { 5570 struct ceph_osd *osd = con->private; 5571 if (get_osd(osd)) 5572 return con; 5573 return NULL; 5574 } 5575 5576 static void put_osd_con(struct ceph_connection *con) 5577 { 5578 struct ceph_osd *osd = con->private; 5579 put_osd(osd); 5580 } 5581 5582 /* 5583 * authentication 5584 */ 5585 /* 5586 * Note: returned pointer is the address of a structure that's 5587 * managed separately. Caller must *not* attempt to free it. 5588 */ 5589 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 5590 int *proto, int force_new) 5591 { 5592 struct ceph_osd *o = con->private; 5593 struct ceph_osd_client *osdc = o->o_osdc; 5594 struct ceph_auth_client *ac = osdc->client->monc.auth; 5595 struct ceph_auth_handshake *auth = &o->o_auth; 5596 5597 if (force_new && auth->authorizer) { 5598 ceph_auth_destroy_authorizer(auth->authorizer); 5599 auth->authorizer = NULL; 5600 } 5601 if (!auth->authorizer) { 5602 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5603 auth); 5604 if (ret) 5605 return ERR_PTR(ret); 5606 } else { 5607 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5608 auth); 5609 if (ret) 5610 return ERR_PTR(ret); 5611 } 5612 *proto = ac->protocol; 5613 5614 return auth; 5615 } 5616 5617 static int add_authorizer_challenge(struct ceph_connection *con, 5618 void *challenge_buf, int challenge_buf_len) 5619 { 5620 struct ceph_osd *o = con->private; 5621 struct ceph_osd_client *osdc = o->o_osdc; 5622 struct ceph_auth_client *ac = osdc->client->monc.auth; 5623 5624 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5625 challenge_buf, challenge_buf_len); 5626 } 5627 5628 static int verify_authorizer_reply(struct ceph_connection *con) 5629 { 5630 struct ceph_osd *o = con->private; 5631 struct ceph_osd_client *osdc = o->o_osdc; 5632 struct ceph_auth_client *ac = osdc->client->monc.auth; 5633 5634 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer); 5635 } 5636 5637 static int invalidate_authorizer(struct ceph_connection *con) 5638 { 5639 struct ceph_osd *o = con->private; 5640 struct ceph_osd_client *osdc = o->o_osdc; 5641 struct ceph_auth_client *ac = osdc->client->monc.auth; 5642 5643 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5644 return ceph_monc_validate_auth(&osdc->client->monc); 5645 } 5646 5647 static void osd_reencode_message(struct ceph_msg *msg) 5648 { 5649 int type = le16_to_cpu(msg->hdr.type); 5650 5651 if (type == CEPH_MSG_OSD_OP) 5652 encode_request_finish(msg); 5653 } 5654 5655 static int osd_sign_message(struct ceph_msg *msg) 5656 { 5657 struct ceph_osd *o = msg->con->private; 5658 struct ceph_auth_handshake *auth = &o->o_auth; 5659 5660 return ceph_auth_sign_message(auth, msg); 5661 } 5662 5663 static int osd_check_message_signature(struct ceph_msg *msg) 5664 { 5665 struct ceph_osd *o = msg->con->private; 5666 struct ceph_auth_handshake *auth = &o->o_auth; 5667 5668 return ceph_auth_check_message_signature(auth, msg); 5669 } 5670 5671 static const struct ceph_connection_operations osd_con_ops = { 5672 .get = get_osd_con, 5673 .put = put_osd_con, 5674 .dispatch = dispatch, 5675 .get_authorizer = get_authorizer, 5676 .add_authorizer_challenge = add_authorizer_challenge, 5677 .verify_authorizer_reply = verify_authorizer_reply, 5678 .invalidate_authorizer = invalidate_authorizer, 5679 .alloc_msg = alloc_msg, 5680 .reencode_message = osd_reencode_message, 5681 .sign_message = osd_sign_message, 5682 .check_message_signature = osd_check_message_signature, 5683 .fault = osd_fault, 5684 }; 5685