1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static struct ceph_osd_data * 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 176 { 177 BUG_ON(which >= osd_req->r_num_ops); 178 179 return &osd_req->r_ops[which].raw_data_in; 180 } 181 182 struct ceph_osd_data * 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 184 unsigned int which) 185 { 186 return osd_req_op_data(osd_req, which, extent, osd_data); 187 } 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 189 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 191 unsigned int which, struct page **pages, 192 u64 length, u32 alignment, 193 bool pages_from_pool, bool own_pages) 194 { 195 struct ceph_osd_data *osd_data; 196 197 osd_data = osd_req_op_raw_data_in(osd_req, which); 198 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 199 pages_from_pool, own_pages); 200 } 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 202 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 204 unsigned int which, struct page **pages, 205 u64 length, u32 alignment, 206 bool pages_from_pool, bool own_pages) 207 { 208 struct ceph_osd_data *osd_data; 209 210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 211 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 212 pages_from_pool, own_pages); 213 } 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 215 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 217 unsigned int which, struct ceph_pagelist *pagelist) 218 { 219 struct ceph_osd_data *osd_data; 220 221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 222 ceph_osd_data_pagelist_init(osd_data, pagelist); 223 } 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 225 226 #ifdef CONFIG_BLOCK 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 228 unsigned int which, 229 struct ceph_bio_iter *bio_pos, 230 u32 bio_length) 231 { 232 struct ceph_osd_data *osd_data; 233 234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 236 } 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 238 #endif /* CONFIG_BLOCK */ 239 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 241 unsigned int which, 242 struct bio_vec *bvecs, u32 num_bvecs, 243 u32 bytes) 244 { 245 struct ceph_osd_data *osd_data; 246 struct ceph_bvec_iter it = { 247 .bvecs = bvecs, 248 .iter = { .bi_size = bytes }, 249 }; 250 251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 255 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 257 unsigned int which, 258 struct ceph_bvec_iter *bvec_pos) 259 { 260 struct ceph_osd_data *osd_data; 261 262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 264 } 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 266 267 static void osd_req_op_cls_request_info_pagelist( 268 struct ceph_osd_request *osd_req, 269 unsigned int which, struct ceph_pagelist *pagelist) 270 { 271 struct ceph_osd_data *osd_data; 272 273 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 274 ceph_osd_data_pagelist_init(osd_data, pagelist); 275 } 276 277 void osd_req_op_cls_request_data_pagelist( 278 struct ceph_osd_request *osd_req, 279 unsigned int which, struct ceph_pagelist *pagelist) 280 { 281 struct ceph_osd_data *osd_data; 282 283 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 284 ceph_osd_data_pagelist_init(osd_data, pagelist); 285 osd_req->r_ops[which].cls.indata_len += pagelist->length; 286 osd_req->r_ops[which].indata_len += pagelist->length; 287 } 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 default: 350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 351 return 0; 352 } 353 } 354 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 356 { 357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 358 int num_pages; 359 360 num_pages = calc_pages_for((u64)osd_data->alignment, 361 (u64)osd_data->length); 362 ceph_release_page_vector(osd_data->pages, num_pages); 363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 364 ceph_pagelist_release(osd_data->pagelist); 365 } 366 ceph_osd_data_init(osd_data); 367 } 368 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 370 unsigned int which) 371 { 372 struct ceph_osd_req_op *op; 373 374 BUG_ON(which >= osd_req->r_num_ops); 375 op = &osd_req->r_ops[which]; 376 377 switch (op->op) { 378 case CEPH_OSD_OP_READ: 379 case CEPH_OSD_OP_WRITE: 380 case CEPH_OSD_OP_WRITEFULL: 381 ceph_osd_data_release(&op->extent.osd_data); 382 break; 383 case CEPH_OSD_OP_CALL: 384 ceph_osd_data_release(&op->cls.request_info); 385 ceph_osd_data_release(&op->cls.request_data); 386 ceph_osd_data_release(&op->cls.response_data); 387 break; 388 case CEPH_OSD_OP_SETXATTR: 389 case CEPH_OSD_OP_CMPXATTR: 390 ceph_osd_data_release(&op->xattr.osd_data); 391 break; 392 case CEPH_OSD_OP_STAT: 393 ceph_osd_data_release(&op->raw_data_in); 394 break; 395 case CEPH_OSD_OP_NOTIFY_ACK: 396 ceph_osd_data_release(&op->notify_ack.request_data); 397 break; 398 case CEPH_OSD_OP_NOTIFY: 399 ceph_osd_data_release(&op->notify.request_data); 400 ceph_osd_data_release(&op->notify.response_data); 401 break; 402 case CEPH_OSD_OP_LIST_WATCHERS: 403 ceph_osd_data_release(&op->list_watchers.response_data); 404 break; 405 case CEPH_OSD_OP_COPY_FROM: 406 ceph_osd_data_release(&op->copy_from.osd_data); 407 break; 408 default: 409 break; 410 } 411 } 412 413 /* 414 * Assumes @t is zero-initialized. 415 */ 416 static void target_init(struct ceph_osd_request_target *t) 417 { 418 ceph_oid_init(&t->base_oid); 419 ceph_oloc_init(&t->base_oloc); 420 ceph_oid_init(&t->target_oid); 421 ceph_oloc_init(&t->target_oloc); 422 423 ceph_osds_init(&t->acting); 424 ceph_osds_init(&t->up); 425 t->size = -1; 426 t->min_size = -1; 427 428 t->osd = CEPH_HOMELESS_OSD; 429 } 430 431 static void target_copy(struct ceph_osd_request_target *dest, 432 const struct ceph_osd_request_target *src) 433 { 434 ceph_oid_copy(&dest->base_oid, &src->base_oid); 435 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 436 ceph_oid_copy(&dest->target_oid, &src->target_oid); 437 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 438 439 dest->pgid = src->pgid; /* struct */ 440 dest->spgid = src->spgid; /* struct */ 441 dest->pg_num = src->pg_num; 442 dest->pg_num_mask = src->pg_num_mask; 443 ceph_osds_copy(&dest->acting, &src->acting); 444 ceph_osds_copy(&dest->up, &src->up); 445 dest->size = src->size; 446 dest->min_size = src->min_size; 447 dest->sort_bitwise = src->sort_bitwise; 448 449 dest->flags = src->flags; 450 dest->paused = src->paused; 451 452 dest->epoch = src->epoch; 453 dest->last_force_resend = src->last_force_resend; 454 455 dest->osd = src->osd; 456 } 457 458 static void target_destroy(struct ceph_osd_request_target *t) 459 { 460 ceph_oid_destroy(&t->base_oid); 461 ceph_oloc_destroy(&t->base_oloc); 462 ceph_oid_destroy(&t->target_oid); 463 ceph_oloc_destroy(&t->target_oloc); 464 } 465 466 /* 467 * requests 468 */ 469 static void request_release_checks(struct ceph_osd_request *req) 470 { 471 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 472 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 473 WARN_ON(!list_empty(&req->r_private_item)); 474 WARN_ON(req->r_osd); 475 } 476 477 static void ceph_osdc_release_request(struct kref *kref) 478 { 479 struct ceph_osd_request *req = container_of(kref, 480 struct ceph_osd_request, r_kref); 481 unsigned int which; 482 483 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 484 req->r_request, req->r_reply); 485 request_release_checks(req); 486 487 if (req->r_request) 488 ceph_msg_put(req->r_request); 489 if (req->r_reply) 490 ceph_msg_put(req->r_reply); 491 492 for (which = 0; which < req->r_num_ops; which++) 493 osd_req_op_data_release(req, which); 494 495 target_destroy(&req->r_t); 496 ceph_put_snap_context(req->r_snapc); 497 498 if (req->r_mempool) 499 mempool_free(req, req->r_osdc->req_mempool); 500 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 501 kmem_cache_free(ceph_osd_request_cache, req); 502 else 503 kfree(req); 504 } 505 506 void ceph_osdc_get_request(struct ceph_osd_request *req) 507 { 508 dout("%s %p (was %d)\n", __func__, req, 509 kref_read(&req->r_kref)); 510 kref_get(&req->r_kref); 511 } 512 EXPORT_SYMBOL(ceph_osdc_get_request); 513 514 void ceph_osdc_put_request(struct ceph_osd_request *req) 515 { 516 if (req) { 517 dout("%s %p (was %d)\n", __func__, req, 518 kref_read(&req->r_kref)); 519 kref_put(&req->r_kref, ceph_osdc_release_request); 520 } 521 } 522 EXPORT_SYMBOL(ceph_osdc_put_request); 523 524 static void request_init(struct ceph_osd_request *req) 525 { 526 /* req only, each op is zeroed in _osd_req_op_init() */ 527 memset(req, 0, sizeof(*req)); 528 529 kref_init(&req->r_kref); 530 init_completion(&req->r_completion); 531 RB_CLEAR_NODE(&req->r_node); 532 RB_CLEAR_NODE(&req->r_mc_node); 533 INIT_LIST_HEAD(&req->r_private_item); 534 535 target_init(&req->r_t); 536 } 537 538 /* 539 * This is ugly, but it allows us to reuse linger registration and ping 540 * requests, keeping the structure of the code around send_linger{_ping}() 541 * reasonable. Setting up a min_nr=2 mempool for each linger request 542 * and dealing with copying ops (this blasts req only, watch op remains 543 * intact) isn't any better. 544 */ 545 static void request_reinit(struct ceph_osd_request *req) 546 { 547 struct ceph_osd_client *osdc = req->r_osdc; 548 bool mempool = req->r_mempool; 549 unsigned int num_ops = req->r_num_ops; 550 u64 snapid = req->r_snapid; 551 struct ceph_snap_context *snapc = req->r_snapc; 552 bool linger = req->r_linger; 553 struct ceph_msg *request_msg = req->r_request; 554 struct ceph_msg *reply_msg = req->r_reply; 555 556 dout("%s req %p\n", __func__, req); 557 WARN_ON(kref_read(&req->r_kref) != 1); 558 request_release_checks(req); 559 560 WARN_ON(kref_read(&request_msg->kref) != 1); 561 WARN_ON(kref_read(&reply_msg->kref) != 1); 562 target_destroy(&req->r_t); 563 564 request_init(req); 565 req->r_osdc = osdc; 566 req->r_mempool = mempool; 567 req->r_num_ops = num_ops; 568 req->r_snapid = snapid; 569 req->r_snapc = snapc; 570 req->r_linger = linger; 571 req->r_request = request_msg; 572 req->r_reply = reply_msg; 573 } 574 575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 576 struct ceph_snap_context *snapc, 577 unsigned int num_ops, 578 bool use_mempool, 579 gfp_t gfp_flags) 580 { 581 struct ceph_osd_request *req; 582 583 if (use_mempool) { 584 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 585 req = mempool_alloc(osdc->req_mempool, gfp_flags); 586 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 587 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 588 } else { 589 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 590 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 591 } 592 if (unlikely(!req)) 593 return NULL; 594 595 request_init(req); 596 req->r_osdc = osdc; 597 req->r_mempool = use_mempool; 598 req->r_num_ops = num_ops; 599 req->r_snapid = CEPH_NOSNAP; 600 req->r_snapc = ceph_get_snap_context(snapc); 601 602 dout("%s req %p\n", __func__, req); 603 return req; 604 } 605 EXPORT_SYMBOL(ceph_osdc_alloc_request); 606 607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 608 { 609 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 610 } 611 612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 613 int num_request_data_items, 614 int num_reply_data_items) 615 { 616 struct ceph_osd_client *osdc = req->r_osdc; 617 struct ceph_msg *msg; 618 int msg_size; 619 620 WARN_ON(req->r_request || req->r_reply); 621 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 622 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 623 624 /* create request message */ 625 msg_size = CEPH_ENCODING_START_BLK_LEN + 626 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 627 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 628 msg_size += CEPH_ENCODING_START_BLK_LEN + 629 sizeof(struct ceph_osd_reqid); /* reqid */ 630 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 631 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 632 msg_size += CEPH_ENCODING_START_BLK_LEN + 633 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 634 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 635 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 636 msg_size += 8; /* snapid */ 637 msg_size += 8; /* snap_seq */ 638 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 639 msg_size += 4 + 8; /* retry_attempt, features */ 640 641 if (req->r_mempool) 642 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 643 num_request_data_items); 644 else 645 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 646 num_request_data_items, gfp, true); 647 if (!msg) 648 return -ENOMEM; 649 650 memset(msg->front.iov_base, 0, msg->front.iov_len); 651 req->r_request = msg; 652 653 /* create reply message */ 654 msg_size = OSD_OPREPLY_FRONT_LEN; 655 msg_size += req->r_base_oid.name_len; 656 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 657 658 if (req->r_mempool) 659 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 660 num_reply_data_items); 661 else 662 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 663 num_reply_data_items, gfp, true); 664 if (!msg) 665 return -ENOMEM; 666 667 req->r_reply = msg; 668 669 return 0; 670 } 671 672 static bool osd_req_opcode_valid(u16 opcode) 673 { 674 switch (opcode) { 675 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 677 #undef GENERATE_CASE 678 default: 679 return false; 680 } 681 } 682 683 static void get_num_data_items(struct ceph_osd_request *req, 684 int *num_request_data_items, 685 int *num_reply_data_items) 686 { 687 struct ceph_osd_req_op *op; 688 689 *num_request_data_items = 0; 690 *num_reply_data_items = 0; 691 692 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 693 switch (op->op) { 694 /* request */ 695 case CEPH_OSD_OP_WRITE: 696 case CEPH_OSD_OP_WRITEFULL: 697 case CEPH_OSD_OP_SETXATTR: 698 case CEPH_OSD_OP_CMPXATTR: 699 case CEPH_OSD_OP_NOTIFY_ACK: 700 case CEPH_OSD_OP_COPY_FROM: 701 *num_request_data_items += 1; 702 break; 703 704 /* reply */ 705 case CEPH_OSD_OP_STAT: 706 case CEPH_OSD_OP_READ: 707 case CEPH_OSD_OP_LIST_WATCHERS: 708 *num_reply_data_items += 1; 709 break; 710 711 /* both */ 712 case CEPH_OSD_OP_NOTIFY: 713 *num_request_data_items += 1; 714 *num_reply_data_items += 1; 715 break; 716 case CEPH_OSD_OP_CALL: 717 *num_request_data_items += 2; 718 *num_reply_data_items += 1; 719 break; 720 721 default: 722 WARN_ON(!osd_req_opcode_valid(op->op)); 723 break; 724 } 725 } 726 } 727 728 /* 729 * oid, oloc and OSD op opcode(s) must be filled in before this function 730 * is called. 731 */ 732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 733 { 734 int num_request_data_items, num_reply_data_items; 735 736 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 737 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 738 num_reply_data_items); 739 } 740 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 741 742 /* 743 * This is an osd op init function for opcodes that have no data or 744 * other information associated with them. It also serves as a 745 * common init routine for all the other init functions, below. 746 */ 747 static struct ceph_osd_req_op * 748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 749 u16 opcode, u32 flags) 750 { 751 struct ceph_osd_req_op *op; 752 753 BUG_ON(which >= osd_req->r_num_ops); 754 BUG_ON(!osd_req_opcode_valid(opcode)); 755 756 op = &osd_req->r_ops[which]; 757 memset(op, 0, sizeof (*op)); 758 op->op = opcode; 759 op->flags = flags; 760 761 return op; 762 } 763 764 void osd_req_op_init(struct ceph_osd_request *osd_req, 765 unsigned int which, u16 opcode, u32 flags) 766 { 767 (void)_osd_req_op_init(osd_req, which, opcode, flags); 768 } 769 EXPORT_SYMBOL(osd_req_op_init); 770 771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 772 unsigned int which, u16 opcode, 773 u64 offset, u64 length, 774 u64 truncate_size, u32 truncate_seq) 775 { 776 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 777 opcode, 0); 778 size_t payload_len = 0; 779 780 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 781 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 782 opcode != CEPH_OSD_OP_TRUNCATE); 783 784 op->extent.offset = offset; 785 op->extent.length = length; 786 op->extent.truncate_size = truncate_size; 787 op->extent.truncate_seq = truncate_seq; 788 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 789 payload_len += length; 790 791 op->indata_len = payload_len; 792 } 793 EXPORT_SYMBOL(osd_req_op_extent_init); 794 795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 796 unsigned int which, u64 length) 797 { 798 struct ceph_osd_req_op *op; 799 u64 previous; 800 801 BUG_ON(which >= osd_req->r_num_ops); 802 op = &osd_req->r_ops[which]; 803 previous = op->extent.length; 804 805 if (length == previous) 806 return; /* Nothing to do */ 807 BUG_ON(length > previous); 808 809 op->extent.length = length; 810 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 811 op->indata_len -= previous - length; 812 } 813 EXPORT_SYMBOL(osd_req_op_extent_update); 814 815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 816 unsigned int which, u64 offset_inc) 817 { 818 struct ceph_osd_req_op *op, *prev_op; 819 820 BUG_ON(which + 1 >= osd_req->r_num_ops); 821 822 prev_op = &osd_req->r_ops[which]; 823 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 824 /* dup previous one */ 825 op->indata_len = prev_op->indata_len; 826 op->outdata_len = prev_op->outdata_len; 827 op->extent = prev_op->extent; 828 /* adjust offset */ 829 op->extent.offset += offset_inc; 830 op->extent.length -= offset_inc; 831 832 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 833 op->indata_len -= offset_inc; 834 } 835 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 836 837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 838 const char *class, const char *method) 839 { 840 struct ceph_osd_req_op *op; 841 struct ceph_pagelist *pagelist; 842 size_t payload_len = 0; 843 size_t size; 844 int ret; 845 846 op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 847 848 pagelist = ceph_pagelist_alloc(GFP_NOFS); 849 if (!pagelist) 850 return -ENOMEM; 851 852 op->cls.class_name = class; 853 size = strlen(class); 854 BUG_ON(size > (size_t) U8_MAX); 855 op->cls.class_len = size; 856 ret = ceph_pagelist_append(pagelist, class, size); 857 if (ret) 858 goto err_pagelist_free; 859 payload_len += size; 860 861 op->cls.method_name = method; 862 size = strlen(method); 863 BUG_ON(size > (size_t) U8_MAX); 864 op->cls.method_len = size; 865 ret = ceph_pagelist_append(pagelist, method, size); 866 if (ret) 867 goto err_pagelist_free; 868 payload_len += size; 869 870 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 871 op->indata_len = payload_len; 872 return 0; 873 874 err_pagelist_free: 875 ceph_pagelist_release(pagelist); 876 return ret; 877 } 878 EXPORT_SYMBOL(osd_req_op_cls_init); 879 880 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 881 u16 opcode, const char *name, const void *value, 882 size_t size, u8 cmp_op, u8 cmp_mode) 883 { 884 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 885 opcode, 0); 886 struct ceph_pagelist *pagelist; 887 size_t payload_len; 888 int ret; 889 890 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 891 892 pagelist = ceph_pagelist_alloc(GFP_NOFS); 893 if (!pagelist) 894 return -ENOMEM; 895 896 payload_len = strlen(name); 897 op->xattr.name_len = payload_len; 898 ret = ceph_pagelist_append(pagelist, name, payload_len); 899 if (ret) 900 goto err_pagelist_free; 901 902 op->xattr.value_len = size; 903 ret = ceph_pagelist_append(pagelist, value, size); 904 if (ret) 905 goto err_pagelist_free; 906 payload_len += size; 907 908 op->xattr.cmp_op = cmp_op; 909 op->xattr.cmp_mode = cmp_mode; 910 911 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 912 op->indata_len = payload_len; 913 return 0; 914 915 err_pagelist_free: 916 ceph_pagelist_release(pagelist); 917 return ret; 918 } 919 EXPORT_SYMBOL(osd_req_op_xattr_init); 920 921 /* 922 * @watch_opcode: CEPH_OSD_WATCH_OP_* 923 */ 924 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 925 u64 cookie, u8 watch_opcode) 926 { 927 struct ceph_osd_req_op *op; 928 929 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 930 op->watch.cookie = cookie; 931 op->watch.op = watch_opcode; 932 op->watch.gen = 0; 933 } 934 935 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 936 unsigned int which, 937 u64 expected_object_size, 938 u64 expected_write_size) 939 { 940 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 941 CEPH_OSD_OP_SETALLOCHINT, 942 0); 943 944 op->alloc_hint.expected_object_size = expected_object_size; 945 op->alloc_hint.expected_write_size = expected_write_size; 946 947 /* 948 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 949 * not worth a feature bit. Set FAILOK per-op flag to make 950 * sure older osds don't trip over an unsupported opcode. 951 */ 952 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 953 } 954 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 955 956 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 957 struct ceph_osd_data *osd_data) 958 { 959 u64 length = ceph_osd_data_length(osd_data); 960 961 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 962 BUG_ON(length > (u64) SIZE_MAX); 963 if (length) 964 ceph_msg_data_add_pages(msg, osd_data->pages, 965 length, osd_data->alignment); 966 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 967 BUG_ON(!length); 968 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 969 #ifdef CONFIG_BLOCK 970 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 971 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 972 #endif 973 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 974 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 975 } else { 976 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 977 } 978 } 979 980 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 981 const struct ceph_osd_req_op *src) 982 { 983 switch (src->op) { 984 case CEPH_OSD_OP_STAT: 985 break; 986 case CEPH_OSD_OP_READ: 987 case CEPH_OSD_OP_WRITE: 988 case CEPH_OSD_OP_WRITEFULL: 989 case CEPH_OSD_OP_ZERO: 990 case CEPH_OSD_OP_TRUNCATE: 991 dst->extent.offset = cpu_to_le64(src->extent.offset); 992 dst->extent.length = cpu_to_le64(src->extent.length); 993 dst->extent.truncate_size = 994 cpu_to_le64(src->extent.truncate_size); 995 dst->extent.truncate_seq = 996 cpu_to_le32(src->extent.truncate_seq); 997 break; 998 case CEPH_OSD_OP_CALL: 999 dst->cls.class_len = src->cls.class_len; 1000 dst->cls.method_len = src->cls.method_len; 1001 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 1002 break; 1003 case CEPH_OSD_OP_WATCH: 1004 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 1005 dst->watch.ver = cpu_to_le64(0); 1006 dst->watch.op = src->watch.op; 1007 dst->watch.gen = cpu_to_le32(src->watch.gen); 1008 break; 1009 case CEPH_OSD_OP_NOTIFY_ACK: 1010 break; 1011 case CEPH_OSD_OP_NOTIFY: 1012 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 1013 break; 1014 case CEPH_OSD_OP_LIST_WATCHERS: 1015 break; 1016 case CEPH_OSD_OP_SETALLOCHINT: 1017 dst->alloc_hint.expected_object_size = 1018 cpu_to_le64(src->alloc_hint.expected_object_size); 1019 dst->alloc_hint.expected_write_size = 1020 cpu_to_le64(src->alloc_hint.expected_write_size); 1021 break; 1022 case CEPH_OSD_OP_SETXATTR: 1023 case CEPH_OSD_OP_CMPXATTR: 1024 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1025 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1026 dst->xattr.cmp_op = src->xattr.cmp_op; 1027 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1028 break; 1029 case CEPH_OSD_OP_CREATE: 1030 case CEPH_OSD_OP_DELETE: 1031 break; 1032 case CEPH_OSD_OP_COPY_FROM: 1033 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1034 dst->copy_from.src_version = 1035 cpu_to_le64(src->copy_from.src_version); 1036 dst->copy_from.flags = src->copy_from.flags; 1037 dst->copy_from.src_fadvise_flags = 1038 cpu_to_le32(src->copy_from.src_fadvise_flags); 1039 break; 1040 default: 1041 pr_err("unsupported osd opcode %s\n", 1042 ceph_osd_op_name(src->op)); 1043 WARN_ON(1); 1044 1045 return 0; 1046 } 1047 1048 dst->op = cpu_to_le16(src->op); 1049 dst->flags = cpu_to_le32(src->flags); 1050 dst->payload_len = cpu_to_le32(src->indata_len); 1051 1052 return src->indata_len; 1053 } 1054 1055 /* 1056 * build new request AND message, calculate layout, and adjust file 1057 * extent as needed. 1058 * 1059 * if the file was recently truncated, we include information about its 1060 * old and new size so that the object can be updated appropriately. (we 1061 * avoid synchronously deleting truncated objects because it's slow.) 1062 */ 1063 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1064 struct ceph_file_layout *layout, 1065 struct ceph_vino vino, 1066 u64 off, u64 *plen, 1067 unsigned int which, int num_ops, 1068 int opcode, int flags, 1069 struct ceph_snap_context *snapc, 1070 u32 truncate_seq, 1071 u64 truncate_size, 1072 bool use_mempool) 1073 { 1074 struct ceph_osd_request *req; 1075 u64 objnum = 0; 1076 u64 objoff = 0; 1077 u64 objlen = 0; 1078 int r; 1079 1080 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1081 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1082 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); 1083 1084 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1085 GFP_NOFS); 1086 if (!req) { 1087 r = -ENOMEM; 1088 goto fail; 1089 } 1090 1091 /* calculate max write size */ 1092 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1093 if (r) 1094 goto fail; 1095 1096 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1097 osd_req_op_init(req, which, opcode, 0); 1098 } else { 1099 u32 object_size = layout->object_size; 1100 u32 object_base = off - objoff; 1101 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1102 if (truncate_size <= object_base) { 1103 truncate_size = 0; 1104 } else { 1105 truncate_size -= object_base; 1106 if (truncate_size > object_size) 1107 truncate_size = object_size; 1108 } 1109 } 1110 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1111 truncate_size, truncate_seq); 1112 } 1113 1114 req->r_flags = flags; 1115 req->r_base_oloc.pool = layout->pool_id; 1116 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1117 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1118 1119 req->r_snapid = vino.snap; 1120 if (flags & CEPH_OSD_FLAG_WRITE) 1121 req->r_data_offset = off; 1122 1123 if (num_ops > 1) 1124 /* 1125 * This is a special case for ceph_writepages_start(), but it 1126 * also covers ceph_uninline_data(). If more multi-op request 1127 * use cases emerge, we will need a separate helper. 1128 */ 1129 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); 1130 else 1131 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1132 if (r) 1133 goto fail; 1134 1135 return req; 1136 1137 fail: 1138 ceph_osdc_put_request(req); 1139 return ERR_PTR(r); 1140 } 1141 EXPORT_SYMBOL(ceph_osdc_new_request); 1142 1143 /* 1144 * We keep osd requests in an rbtree, sorted by ->r_tid. 1145 */ 1146 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1147 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1148 1149 /* 1150 * Call @fn on each OSD request as long as @fn returns 0. 1151 */ 1152 static void for_each_request(struct ceph_osd_client *osdc, 1153 int (*fn)(struct ceph_osd_request *req, void *arg), 1154 void *arg) 1155 { 1156 struct rb_node *n, *p; 1157 1158 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1159 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1160 1161 for (p = rb_first(&osd->o_requests); p; ) { 1162 struct ceph_osd_request *req = 1163 rb_entry(p, struct ceph_osd_request, r_node); 1164 1165 p = rb_next(p); 1166 if (fn(req, arg)) 1167 return; 1168 } 1169 } 1170 1171 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1172 struct ceph_osd_request *req = 1173 rb_entry(p, struct ceph_osd_request, r_node); 1174 1175 p = rb_next(p); 1176 if (fn(req, arg)) 1177 return; 1178 } 1179 } 1180 1181 static bool osd_homeless(struct ceph_osd *osd) 1182 { 1183 return osd->o_osd == CEPH_HOMELESS_OSD; 1184 } 1185 1186 static bool osd_registered(struct ceph_osd *osd) 1187 { 1188 verify_osdc_locked(osd->o_osdc); 1189 1190 return !RB_EMPTY_NODE(&osd->o_node); 1191 } 1192 1193 /* 1194 * Assumes @osd is zero-initialized. 1195 */ 1196 static void osd_init(struct ceph_osd *osd) 1197 { 1198 refcount_set(&osd->o_ref, 1); 1199 RB_CLEAR_NODE(&osd->o_node); 1200 osd->o_requests = RB_ROOT; 1201 osd->o_linger_requests = RB_ROOT; 1202 osd->o_backoff_mappings = RB_ROOT; 1203 osd->o_backoffs_by_id = RB_ROOT; 1204 INIT_LIST_HEAD(&osd->o_osd_lru); 1205 INIT_LIST_HEAD(&osd->o_keepalive_item); 1206 osd->o_incarnation = 1; 1207 mutex_init(&osd->lock); 1208 } 1209 1210 static void osd_cleanup(struct ceph_osd *osd) 1211 { 1212 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1213 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1214 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1215 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1216 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1217 WARN_ON(!list_empty(&osd->o_osd_lru)); 1218 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1219 1220 if (osd->o_auth.authorizer) { 1221 WARN_ON(osd_homeless(osd)); 1222 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1223 } 1224 } 1225 1226 /* 1227 * Track open sessions with osds. 1228 */ 1229 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1230 { 1231 struct ceph_osd *osd; 1232 1233 WARN_ON(onum == CEPH_HOMELESS_OSD); 1234 1235 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1236 osd_init(osd); 1237 osd->o_osdc = osdc; 1238 osd->o_osd = onum; 1239 1240 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1241 1242 return osd; 1243 } 1244 1245 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1246 { 1247 if (refcount_inc_not_zero(&osd->o_ref)) { 1248 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, 1249 refcount_read(&osd->o_ref)); 1250 return osd; 1251 } else { 1252 dout("get_osd %p FAIL\n", osd); 1253 return NULL; 1254 } 1255 } 1256 1257 static void put_osd(struct ceph_osd *osd) 1258 { 1259 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), 1260 refcount_read(&osd->o_ref) - 1); 1261 if (refcount_dec_and_test(&osd->o_ref)) { 1262 osd_cleanup(osd); 1263 kfree(osd); 1264 } 1265 } 1266 1267 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1268 1269 static void __move_osd_to_lru(struct ceph_osd *osd) 1270 { 1271 struct ceph_osd_client *osdc = osd->o_osdc; 1272 1273 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1274 BUG_ON(!list_empty(&osd->o_osd_lru)); 1275 1276 spin_lock(&osdc->osd_lru_lock); 1277 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1278 spin_unlock(&osdc->osd_lru_lock); 1279 1280 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1281 } 1282 1283 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1284 { 1285 if (RB_EMPTY_ROOT(&osd->o_requests) && 1286 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1287 __move_osd_to_lru(osd); 1288 } 1289 1290 static void __remove_osd_from_lru(struct ceph_osd *osd) 1291 { 1292 struct ceph_osd_client *osdc = osd->o_osdc; 1293 1294 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1295 1296 spin_lock(&osdc->osd_lru_lock); 1297 if (!list_empty(&osd->o_osd_lru)) 1298 list_del_init(&osd->o_osd_lru); 1299 spin_unlock(&osdc->osd_lru_lock); 1300 } 1301 1302 /* 1303 * Close the connection and assign any leftover requests to the 1304 * homeless session. 1305 */ 1306 static void close_osd(struct ceph_osd *osd) 1307 { 1308 struct ceph_osd_client *osdc = osd->o_osdc; 1309 struct rb_node *n; 1310 1311 verify_osdc_wrlocked(osdc); 1312 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1313 1314 ceph_con_close(&osd->o_con); 1315 1316 for (n = rb_first(&osd->o_requests); n; ) { 1317 struct ceph_osd_request *req = 1318 rb_entry(n, struct ceph_osd_request, r_node); 1319 1320 n = rb_next(n); /* unlink_request() */ 1321 1322 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1323 unlink_request(osd, req); 1324 link_request(&osdc->homeless_osd, req); 1325 } 1326 for (n = rb_first(&osd->o_linger_requests); n; ) { 1327 struct ceph_osd_linger_request *lreq = 1328 rb_entry(n, struct ceph_osd_linger_request, node); 1329 1330 n = rb_next(n); /* unlink_linger() */ 1331 1332 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1333 lreq->linger_id); 1334 unlink_linger(osd, lreq); 1335 link_linger(&osdc->homeless_osd, lreq); 1336 } 1337 clear_backoffs(osd); 1338 1339 __remove_osd_from_lru(osd); 1340 erase_osd(&osdc->osds, osd); 1341 put_osd(osd); 1342 } 1343 1344 /* 1345 * reset osd connect 1346 */ 1347 static int reopen_osd(struct ceph_osd *osd) 1348 { 1349 struct ceph_entity_addr *peer_addr; 1350 1351 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1352 1353 if (RB_EMPTY_ROOT(&osd->o_requests) && 1354 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1355 close_osd(osd); 1356 return -ENODEV; 1357 } 1358 1359 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1360 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1361 !ceph_con_opened(&osd->o_con)) { 1362 struct rb_node *n; 1363 1364 dout("osd addr hasn't changed and connection never opened, " 1365 "letting msgr retry\n"); 1366 /* touch each r_stamp for handle_timeout()'s benfit */ 1367 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1368 struct ceph_osd_request *req = 1369 rb_entry(n, struct ceph_osd_request, r_node); 1370 req->r_stamp = jiffies; 1371 } 1372 1373 return -EAGAIN; 1374 } 1375 1376 ceph_con_close(&osd->o_con); 1377 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1378 osd->o_incarnation++; 1379 1380 return 0; 1381 } 1382 1383 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1384 bool wrlocked) 1385 { 1386 struct ceph_osd *osd; 1387 1388 if (wrlocked) 1389 verify_osdc_wrlocked(osdc); 1390 else 1391 verify_osdc_locked(osdc); 1392 1393 if (o != CEPH_HOMELESS_OSD) 1394 osd = lookup_osd(&osdc->osds, o); 1395 else 1396 osd = &osdc->homeless_osd; 1397 if (!osd) { 1398 if (!wrlocked) 1399 return ERR_PTR(-EAGAIN); 1400 1401 osd = create_osd(osdc, o); 1402 insert_osd(&osdc->osds, osd); 1403 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1404 &osdc->osdmap->osd_addr[osd->o_osd]); 1405 } 1406 1407 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1408 return osd; 1409 } 1410 1411 /* 1412 * Create request <-> OSD session relation. 1413 * 1414 * @req has to be assigned a tid, @osd may be homeless. 1415 */ 1416 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1417 { 1418 verify_osd_locked(osd); 1419 WARN_ON(!req->r_tid || req->r_osd); 1420 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1421 req, req->r_tid); 1422 1423 if (!osd_homeless(osd)) 1424 __remove_osd_from_lru(osd); 1425 else 1426 atomic_inc(&osd->o_osdc->num_homeless); 1427 1428 get_osd(osd); 1429 insert_request(&osd->o_requests, req); 1430 req->r_osd = osd; 1431 } 1432 1433 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1434 { 1435 verify_osd_locked(osd); 1436 WARN_ON(req->r_osd != osd); 1437 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1438 req, req->r_tid); 1439 1440 req->r_osd = NULL; 1441 erase_request(&osd->o_requests, req); 1442 put_osd(osd); 1443 1444 if (!osd_homeless(osd)) 1445 maybe_move_osd_to_lru(osd); 1446 else 1447 atomic_dec(&osd->o_osdc->num_homeless); 1448 } 1449 1450 static bool __pool_full(struct ceph_pg_pool_info *pi) 1451 { 1452 return pi->flags & CEPH_POOL_FLAG_FULL; 1453 } 1454 1455 static bool have_pool_full(struct ceph_osd_client *osdc) 1456 { 1457 struct rb_node *n; 1458 1459 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1460 struct ceph_pg_pool_info *pi = 1461 rb_entry(n, struct ceph_pg_pool_info, node); 1462 1463 if (__pool_full(pi)) 1464 return true; 1465 } 1466 1467 return false; 1468 } 1469 1470 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1471 { 1472 struct ceph_pg_pool_info *pi; 1473 1474 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1475 if (!pi) 1476 return false; 1477 1478 return __pool_full(pi); 1479 } 1480 1481 /* 1482 * Returns whether a request should be blocked from being sent 1483 * based on the current osdmap and osd_client settings. 1484 */ 1485 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1486 const struct ceph_osd_request_target *t, 1487 struct ceph_pg_pool_info *pi) 1488 { 1489 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1490 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1491 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1492 __pool_full(pi); 1493 1494 WARN_ON(pi->id != t->target_oloc.pool); 1495 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1496 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1497 (osdc->osdmap->epoch < osdc->epoch_barrier); 1498 } 1499 1500 enum calc_target_result { 1501 CALC_TARGET_NO_ACTION = 0, 1502 CALC_TARGET_NEED_RESEND, 1503 CALC_TARGET_POOL_DNE, 1504 }; 1505 1506 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1507 struct ceph_osd_request_target *t, 1508 struct ceph_connection *con, 1509 bool any_change) 1510 { 1511 struct ceph_pg_pool_info *pi; 1512 struct ceph_pg pgid, last_pgid; 1513 struct ceph_osds up, acting; 1514 bool force_resend = false; 1515 bool unpaused = false; 1516 bool legacy_change = false; 1517 bool split = false; 1518 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1519 bool recovery_deletes = ceph_osdmap_flag(osdc, 1520 CEPH_OSDMAP_RECOVERY_DELETES); 1521 enum calc_target_result ct_res; 1522 1523 t->epoch = osdc->osdmap->epoch; 1524 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1525 if (!pi) { 1526 t->osd = CEPH_HOMELESS_OSD; 1527 ct_res = CALC_TARGET_POOL_DNE; 1528 goto out; 1529 } 1530 1531 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1532 if (t->last_force_resend < pi->last_force_request_resend) { 1533 t->last_force_resend = pi->last_force_request_resend; 1534 force_resend = true; 1535 } else if (t->last_force_resend == 0) { 1536 force_resend = true; 1537 } 1538 } 1539 1540 /* apply tiering */ 1541 ceph_oid_copy(&t->target_oid, &t->base_oid); 1542 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1543 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1544 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0) 1545 t->target_oloc.pool = pi->read_tier; 1546 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0) 1547 t->target_oloc.pool = pi->write_tier; 1548 1549 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1550 if (!pi) { 1551 t->osd = CEPH_HOMELESS_OSD; 1552 ct_res = CALC_TARGET_POOL_DNE; 1553 goto out; 1554 } 1555 } 1556 1557 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1558 last_pgid.pool = pgid.pool; 1559 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1560 1561 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1562 if (any_change && 1563 ceph_is_new_interval(&t->acting, 1564 &acting, 1565 &t->up, 1566 &up, 1567 t->size, 1568 pi->size, 1569 t->min_size, 1570 pi->min_size, 1571 t->pg_num, 1572 pi->pg_num, 1573 t->sort_bitwise, 1574 sort_bitwise, 1575 t->recovery_deletes, 1576 recovery_deletes, 1577 &last_pgid)) 1578 force_resend = true; 1579 1580 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1581 t->paused = false; 1582 unpaused = true; 1583 } 1584 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1585 ceph_osds_changed(&t->acting, &acting, any_change); 1586 if (t->pg_num) 1587 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1588 1589 if (legacy_change || force_resend || split) { 1590 t->pgid = pgid; /* struct */ 1591 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1592 ceph_osds_copy(&t->acting, &acting); 1593 ceph_osds_copy(&t->up, &up); 1594 t->size = pi->size; 1595 t->min_size = pi->min_size; 1596 t->pg_num = pi->pg_num; 1597 t->pg_num_mask = pi->pg_num_mask; 1598 t->sort_bitwise = sort_bitwise; 1599 t->recovery_deletes = recovery_deletes; 1600 1601 t->osd = acting.primary; 1602 } 1603 1604 if (unpaused || legacy_change || force_resend || split) 1605 ct_res = CALC_TARGET_NEED_RESEND; 1606 else 1607 ct_res = CALC_TARGET_NO_ACTION; 1608 1609 out: 1610 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1611 legacy_change, force_resend, split, ct_res, t->osd); 1612 return ct_res; 1613 } 1614 1615 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1616 { 1617 struct ceph_spg_mapping *spg; 1618 1619 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1620 if (!spg) 1621 return NULL; 1622 1623 RB_CLEAR_NODE(&spg->node); 1624 spg->backoffs = RB_ROOT; 1625 return spg; 1626 } 1627 1628 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1629 { 1630 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1631 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1632 1633 kfree(spg); 1634 } 1635 1636 /* 1637 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1638 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1639 * defined only within a specific spgid; it does not pass anything to 1640 * children on split, or to another primary. 1641 */ 1642 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1643 RB_BYPTR, const struct ceph_spg *, node) 1644 1645 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1646 { 1647 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1648 } 1649 1650 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1651 void **pkey, size_t *pkey_len) 1652 { 1653 if (hoid->key_len) { 1654 *pkey = hoid->key; 1655 *pkey_len = hoid->key_len; 1656 } else { 1657 *pkey = hoid->oid; 1658 *pkey_len = hoid->oid_len; 1659 } 1660 } 1661 1662 static int compare_names(const void *name1, size_t name1_len, 1663 const void *name2, size_t name2_len) 1664 { 1665 int ret; 1666 1667 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1668 if (!ret) { 1669 if (name1_len < name2_len) 1670 ret = -1; 1671 else if (name1_len > name2_len) 1672 ret = 1; 1673 } 1674 return ret; 1675 } 1676 1677 static int hoid_compare(const struct ceph_hobject_id *lhs, 1678 const struct ceph_hobject_id *rhs) 1679 { 1680 void *effective_key1, *effective_key2; 1681 size_t effective_key1_len, effective_key2_len; 1682 int ret; 1683 1684 if (lhs->is_max < rhs->is_max) 1685 return -1; 1686 if (lhs->is_max > rhs->is_max) 1687 return 1; 1688 1689 if (lhs->pool < rhs->pool) 1690 return -1; 1691 if (lhs->pool > rhs->pool) 1692 return 1; 1693 1694 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1695 return -1; 1696 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1697 return 1; 1698 1699 ret = compare_names(lhs->nspace, lhs->nspace_len, 1700 rhs->nspace, rhs->nspace_len); 1701 if (ret) 1702 return ret; 1703 1704 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1705 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1706 ret = compare_names(effective_key1, effective_key1_len, 1707 effective_key2, effective_key2_len); 1708 if (ret) 1709 return ret; 1710 1711 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1712 if (ret) 1713 return ret; 1714 1715 if (lhs->snapid < rhs->snapid) 1716 return -1; 1717 if (lhs->snapid > rhs->snapid) 1718 return 1; 1719 1720 return 0; 1721 } 1722 1723 /* 1724 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1725 * compat stuff here. 1726 * 1727 * Assumes @hoid is zero-initialized. 1728 */ 1729 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1730 { 1731 u8 struct_v; 1732 u32 struct_len; 1733 int ret; 1734 1735 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1736 &struct_len); 1737 if (ret) 1738 return ret; 1739 1740 if (struct_v < 4) { 1741 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1742 goto e_inval; 1743 } 1744 1745 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1746 GFP_NOIO); 1747 if (IS_ERR(hoid->key)) { 1748 ret = PTR_ERR(hoid->key); 1749 hoid->key = NULL; 1750 return ret; 1751 } 1752 1753 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1754 GFP_NOIO); 1755 if (IS_ERR(hoid->oid)) { 1756 ret = PTR_ERR(hoid->oid); 1757 hoid->oid = NULL; 1758 return ret; 1759 } 1760 1761 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1762 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1763 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1764 1765 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1766 GFP_NOIO); 1767 if (IS_ERR(hoid->nspace)) { 1768 ret = PTR_ERR(hoid->nspace); 1769 hoid->nspace = NULL; 1770 return ret; 1771 } 1772 1773 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1774 1775 ceph_hoid_build_hash_cache(hoid); 1776 return 0; 1777 1778 e_inval: 1779 return -EINVAL; 1780 } 1781 1782 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1783 { 1784 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1785 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1786 } 1787 1788 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1789 { 1790 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1791 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1792 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1793 ceph_encode_64(p, hoid->snapid); 1794 ceph_encode_32(p, hoid->hash); 1795 ceph_encode_8(p, hoid->is_max); 1796 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1797 ceph_encode_64(p, hoid->pool); 1798 } 1799 1800 static void free_hoid(struct ceph_hobject_id *hoid) 1801 { 1802 if (hoid) { 1803 kfree(hoid->key); 1804 kfree(hoid->oid); 1805 kfree(hoid->nspace); 1806 kfree(hoid); 1807 } 1808 } 1809 1810 static struct ceph_osd_backoff *alloc_backoff(void) 1811 { 1812 struct ceph_osd_backoff *backoff; 1813 1814 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1815 if (!backoff) 1816 return NULL; 1817 1818 RB_CLEAR_NODE(&backoff->spg_node); 1819 RB_CLEAR_NODE(&backoff->id_node); 1820 return backoff; 1821 } 1822 1823 static void free_backoff(struct ceph_osd_backoff *backoff) 1824 { 1825 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1826 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1827 1828 free_hoid(backoff->begin); 1829 free_hoid(backoff->end); 1830 kfree(backoff); 1831 } 1832 1833 /* 1834 * Within a specific spgid, backoffs are managed by ->begin hoid. 1835 */ 1836 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1837 RB_BYVAL, spg_node); 1838 1839 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1840 const struct ceph_hobject_id *hoid) 1841 { 1842 struct rb_node *n = root->rb_node; 1843 1844 while (n) { 1845 struct ceph_osd_backoff *cur = 1846 rb_entry(n, struct ceph_osd_backoff, spg_node); 1847 int cmp; 1848 1849 cmp = hoid_compare(hoid, cur->begin); 1850 if (cmp < 0) { 1851 n = n->rb_left; 1852 } else if (cmp > 0) { 1853 if (hoid_compare(hoid, cur->end) < 0) 1854 return cur; 1855 1856 n = n->rb_right; 1857 } else { 1858 return cur; 1859 } 1860 } 1861 1862 return NULL; 1863 } 1864 1865 /* 1866 * Each backoff has a unique id within its OSD session. 1867 */ 1868 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1869 1870 static void clear_backoffs(struct ceph_osd *osd) 1871 { 1872 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1873 struct ceph_spg_mapping *spg = 1874 rb_entry(rb_first(&osd->o_backoff_mappings), 1875 struct ceph_spg_mapping, node); 1876 1877 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1878 struct ceph_osd_backoff *backoff = 1879 rb_entry(rb_first(&spg->backoffs), 1880 struct ceph_osd_backoff, spg_node); 1881 1882 erase_backoff(&spg->backoffs, backoff); 1883 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1884 free_backoff(backoff); 1885 } 1886 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1887 free_spg_mapping(spg); 1888 } 1889 } 1890 1891 /* 1892 * Set up a temporary, non-owning view into @t. 1893 */ 1894 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1895 const struct ceph_osd_request_target *t) 1896 { 1897 hoid->key = NULL; 1898 hoid->key_len = 0; 1899 hoid->oid = t->target_oid.name; 1900 hoid->oid_len = t->target_oid.name_len; 1901 hoid->snapid = CEPH_NOSNAP; 1902 hoid->hash = t->pgid.seed; 1903 hoid->is_max = false; 1904 if (t->target_oloc.pool_ns) { 1905 hoid->nspace = t->target_oloc.pool_ns->str; 1906 hoid->nspace_len = t->target_oloc.pool_ns->len; 1907 } else { 1908 hoid->nspace = NULL; 1909 hoid->nspace_len = 0; 1910 } 1911 hoid->pool = t->target_oloc.pool; 1912 ceph_hoid_build_hash_cache(hoid); 1913 } 1914 1915 static bool should_plug_request(struct ceph_osd_request *req) 1916 { 1917 struct ceph_osd *osd = req->r_osd; 1918 struct ceph_spg_mapping *spg; 1919 struct ceph_osd_backoff *backoff; 1920 struct ceph_hobject_id hoid; 1921 1922 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 1923 if (!spg) 1924 return false; 1925 1926 hoid_fill_from_target(&hoid, &req->r_t); 1927 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 1928 if (!backoff) 1929 return false; 1930 1931 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 1932 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 1933 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 1934 return true; 1935 } 1936 1937 /* 1938 * Keep get_num_data_items() in sync with this function. 1939 */ 1940 static void setup_request_data(struct ceph_osd_request *req) 1941 { 1942 struct ceph_msg *request_msg = req->r_request; 1943 struct ceph_msg *reply_msg = req->r_reply; 1944 struct ceph_osd_req_op *op; 1945 1946 if (req->r_request->num_data_items || req->r_reply->num_data_items) 1947 return; 1948 1949 WARN_ON(request_msg->data_length || reply_msg->data_length); 1950 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 1951 switch (op->op) { 1952 /* request */ 1953 case CEPH_OSD_OP_WRITE: 1954 case CEPH_OSD_OP_WRITEFULL: 1955 WARN_ON(op->indata_len != op->extent.length); 1956 ceph_osdc_msg_data_add(request_msg, 1957 &op->extent.osd_data); 1958 break; 1959 case CEPH_OSD_OP_SETXATTR: 1960 case CEPH_OSD_OP_CMPXATTR: 1961 WARN_ON(op->indata_len != op->xattr.name_len + 1962 op->xattr.value_len); 1963 ceph_osdc_msg_data_add(request_msg, 1964 &op->xattr.osd_data); 1965 break; 1966 case CEPH_OSD_OP_NOTIFY_ACK: 1967 ceph_osdc_msg_data_add(request_msg, 1968 &op->notify_ack.request_data); 1969 break; 1970 case CEPH_OSD_OP_COPY_FROM: 1971 ceph_osdc_msg_data_add(request_msg, 1972 &op->copy_from.osd_data); 1973 break; 1974 1975 /* reply */ 1976 case CEPH_OSD_OP_STAT: 1977 ceph_osdc_msg_data_add(reply_msg, 1978 &op->raw_data_in); 1979 break; 1980 case CEPH_OSD_OP_READ: 1981 ceph_osdc_msg_data_add(reply_msg, 1982 &op->extent.osd_data); 1983 break; 1984 case CEPH_OSD_OP_LIST_WATCHERS: 1985 ceph_osdc_msg_data_add(reply_msg, 1986 &op->list_watchers.response_data); 1987 break; 1988 1989 /* both */ 1990 case CEPH_OSD_OP_CALL: 1991 WARN_ON(op->indata_len != op->cls.class_len + 1992 op->cls.method_len + 1993 op->cls.indata_len); 1994 ceph_osdc_msg_data_add(request_msg, 1995 &op->cls.request_info); 1996 /* optional, can be NONE */ 1997 ceph_osdc_msg_data_add(request_msg, 1998 &op->cls.request_data); 1999 /* optional, can be NONE */ 2000 ceph_osdc_msg_data_add(reply_msg, 2001 &op->cls.response_data); 2002 break; 2003 case CEPH_OSD_OP_NOTIFY: 2004 ceph_osdc_msg_data_add(request_msg, 2005 &op->notify.request_data); 2006 ceph_osdc_msg_data_add(reply_msg, 2007 &op->notify.response_data); 2008 break; 2009 } 2010 } 2011 } 2012 2013 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2014 { 2015 ceph_encode_8(p, 1); 2016 ceph_encode_64(p, pgid->pool); 2017 ceph_encode_32(p, pgid->seed); 2018 ceph_encode_32(p, -1); /* preferred */ 2019 } 2020 2021 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2022 { 2023 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2024 encode_pgid(p, &spgid->pgid); 2025 ceph_encode_8(p, spgid->shard); 2026 } 2027 2028 static void encode_oloc(void **p, void *end, 2029 const struct ceph_object_locator *oloc) 2030 { 2031 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2032 ceph_encode_64(p, oloc->pool); 2033 ceph_encode_32(p, -1); /* preferred */ 2034 ceph_encode_32(p, 0); /* key len */ 2035 if (oloc->pool_ns) 2036 ceph_encode_string(p, end, oloc->pool_ns->str, 2037 oloc->pool_ns->len); 2038 else 2039 ceph_encode_32(p, 0); 2040 } 2041 2042 static void encode_request_partial(struct ceph_osd_request *req, 2043 struct ceph_msg *msg) 2044 { 2045 void *p = msg->front.iov_base; 2046 void *const end = p + msg->front_alloc_len; 2047 u32 data_len = 0; 2048 int i; 2049 2050 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2051 /* snapshots aren't writeable */ 2052 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2053 } else { 2054 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2055 req->r_data_offset || req->r_snapc); 2056 } 2057 2058 setup_request_data(req); 2059 2060 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2061 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2062 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2063 ceph_encode_32(&p, req->r_flags); 2064 2065 /* reqid */ 2066 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2067 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2068 p += sizeof(struct ceph_osd_reqid); 2069 2070 /* trace */ 2071 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2072 p += sizeof(struct ceph_blkin_trace_info); 2073 2074 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2075 ceph_encode_timespec64(p, &req->r_mtime); 2076 p += sizeof(struct ceph_timespec); 2077 2078 encode_oloc(&p, end, &req->r_t.target_oloc); 2079 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2080 req->r_t.target_oid.name_len); 2081 2082 /* ops, can imply data */ 2083 ceph_encode_16(&p, req->r_num_ops); 2084 for (i = 0; i < req->r_num_ops; i++) { 2085 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2086 p += sizeof(struct ceph_osd_op); 2087 } 2088 2089 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2090 if (req->r_snapc) { 2091 ceph_encode_64(&p, req->r_snapc->seq); 2092 ceph_encode_32(&p, req->r_snapc->num_snaps); 2093 for (i = 0; i < req->r_snapc->num_snaps; i++) 2094 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2095 } else { 2096 ceph_encode_64(&p, 0); /* snap_seq */ 2097 ceph_encode_32(&p, 0); /* snaps len */ 2098 } 2099 2100 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2101 BUG_ON(p > end - 8); /* space for features */ 2102 2103 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2104 /* front_len is finalized in encode_request_finish() */ 2105 msg->front.iov_len = p - msg->front.iov_base; 2106 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2107 msg->hdr.data_len = cpu_to_le32(data_len); 2108 /* 2109 * The header "data_off" is a hint to the receiver allowing it 2110 * to align received data into its buffers such that there's no 2111 * need to re-copy it before writing it to disk (direct I/O). 2112 */ 2113 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2114 2115 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2116 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2117 } 2118 2119 static void encode_request_finish(struct ceph_msg *msg) 2120 { 2121 void *p = msg->front.iov_base; 2122 void *const partial_end = p + msg->front.iov_len; 2123 void *const end = p + msg->front_alloc_len; 2124 2125 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2126 /* luminous OSD -- encode features and be done */ 2127 p = partial_end; 2128 ceph_encode_64(&p, msg->con->peer_features); 2129 } else { 2130 struct { 2131 char spgid[CEPH_ENCODING_START_BLK_LEN + 2132 CEPH_PGID_ENCODING_LEN + 1]; 2133 __le32 hash; 2134 __le32 epoch; 2135 __le32 flags; 2136 char reqid[CEPH_ENCODING_START_BLK_LEN + 2137 sizeof(struct ceph_osd_reqid)]; 2138 char trace[sizeof(struct ceph_blkin_trace_info)]; 2139 __le32 client_inc; 2140 struct ceph_timespec mtime; 2141 } __packed head; 2142 struct ceph_pg pgid; 2143 void *oloc, *oid, *tail; 2144 int oloc_len, oid_len, tail_len; 2145 int len; 2146 2147 /* 2148 * Pre-luminous OSD -- reencode v8 into v4 using @head 2149 * as a temporary buffer. Encode the raw PG; the rest 2150 * is just a matter of moving oloc, oid and tail blobs 2151 * around. 2152 */ 2153 memcpy(&head, p, sizeof(head)); 2154 p += sizeof(head); 2155 2156 oloc = p; 2157 p += CEPH_ENCODING_START_BLK_LEN; 2158 pgid.pool = ceph_decode_64(&p); 2159 p += 4 + 4; /* preferred, key len */ 2160 len = ceph_decode_32(&p); 2161 p += len; /* nspace */ 2162 oloc_len = p - oloc; 2163 2164 oid = p; 2165 len = ceph_decode_32(&p); 2166 p += len; 2167 oid_len = p - oid; 2168 2169 tail = p; 2170 tail_len = partial_end - p; 2171 2172 p = msg->front.iov_base; 2173 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2174 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2175 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2176 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2177 2178 /* reassert_version */ 2179 memset(p, 0, sizeof(struct ceph_eversion)); 2180 p += sizeof(struct ceph_eversion); 2181 2182 BUG_ON(p >= oloc); 2183 memmove(p, oloc, oloc_len); 2184 p += oloc_len; 2185 2186 pgid.seed = le32_to_cpu(head.hash); 2187 encode_pgid(&p, &pgid); /* raw pg */ 2188 2189 BUG_ON(p >= oid); 2190 memmove(p, oid, oid_len); 2191 p += oid_len; 2192 2193 /* tail -- ops, snapid, snapc, retry_attempt */ 2194 BUG_ON(p >= tail); 2195 memmove(p, tail, tail_len); 2196 p += tail_len; 2197 2198 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2199 } 2200 2201 BUG_ON(p > end); 2202 msg->front.iov_len = p - msg->front.iov_base; 2203 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2204 2205 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2206 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2207 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2208 le16_to_cpu(msg->hdr.version)); 2209 } 2210 2211 /* 2212 * @req has to be assigned a tid and registered. 2213 */ 2214 static void send_request(struct ceph_osd_request *req) 2215 { 2216 struct ceph_osd *osd = req->r_osd; 2217 2218 verify_osd_locked(osd); 2219 WARN_ON(osd->o_osd != req->r_t.osd); 2220 2221 /* backoff? */ 2222 if (should_plug_request(req)) 2223 return; 2224 2225 /* 2226 * We may have a previously queued request message hanging 2227 * around. Cancel it to avoid corrupting the msgr. 2228 */ 2229 if (req->r_sent) 2230 ceph_msg_revoke(req->r_request); 2231 2232 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2233 if (req->r_attempts) 2234 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2235 else 2236 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2237 2238 encode_request_partial(req, req->r_request); 2239 2240 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2241 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2242 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2243 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2244 req->r_attempts); 2245 2246 req->r_t.paused = false; 2247 req->r_stamp = jiffies; 2248 req->r_attempts++; 2249 2250 req->r_sent = osd->o_incarnation; 2251 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2252 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2253 } 2254 2255 static void maybe_request_map(struct ceph_osd_client *osdc) 2256 { 2257 bool continuous = false; 2258 2259 verify_osdc_locked(osdc); 2260 WARN_ON(!osdc->osdmap->epoch); 2261 2262 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2263 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2264 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2265 dout("%s osdc %p continuous\n", __func__, osdc); 2266 continuous = true; 2267 } else { 2268 dout("%s osdc %p onetime\n", __func__, osdc); 2269 } 2270 2271 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2272 osdc->osdmap->epoch + 1, continuous)) 2273 ceph_monc_renew_subs(&osdc->client->monc); 2274 } 2275 2276 static void complete_request(struct ceph_osd_request *req, int err); 2277 static void send_map_check(struct ceph_osd_request *req); 2278 2279 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2280 { 2281 struct ceph_osd_client *osdc = req->r_osdc; 2282 struct ceph_osd *osd; 2283 enum calc_target_result ct_res; 2284 int err = 0; 2285 bool need_send = false; 2286 bool promoted = false; 2287 2288 WARN_ON(req->r_tid); 2289 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2290 2291 again: 2292 ct_res = calc_target(osdc, &req->r_t, NULL, false); 2293 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2294 goto promote; 2295 2296 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2297 if (IS_ERR(osd)) { 2298 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2299 goto promote; 2300 } 2301 2302 if (osdc->abort_err) { 2303 dout("req %p abort_err %d\n", req, osdc->abort_err); 2304 err = osdc->abort_err; 2305 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2306 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2307 osdc->epoch_barrier); 2308 req->r_t.paused = true; 2309 maybe_request_map(osdc); 2310 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2311 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2312 dout("req %p pausewr\n", req); 2313 req->r_t.paused = true; 2314 maybe_request_map(osdc); 2315 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2316 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2317 dout("req %p pauserd\n", req); 2318 req->r_t.paused = true; 2319 maybe_request_map(osdc); 2320 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2321 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2322 CEPH_OSD_FLAG_FULL_FORCE)) && 2323 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2324 pool_full(osdc, req->r_t.base_oloc.pool))) { 2325 dout("req %p full/pool_full\n", req); 2326 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2327 err = -ENOSPC; 2328 } else { 2329 pr_warn_ratelimited("FULL or reached pool quota\n"); 2330 req->r_t.paused = true; 2331 maybe_request_map(osdc); 2332 } 2333 } else if (!osd_homeless(osd)) { 2334 need_send = true; 2335 } else { 2336 maybe_request_map(osdc); 2337 } 2338 2339 mutex_lock(&osd->lock); 2340 /* 2341 * Assign the tid atomically with send_request() to protect 2342 * multiple writes to the same object from racing with each 2343 * other, resulting in out of order ops on the OSDs. 2344 */ 2345 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2346 link_request(osd, req); 2347 if (need_send) 2348 send_request(req); 2349 else if (err) 2350 complete_request(req, err); 2351 mutex_unlock(&osd->lock); 2352 2353 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2354 send_map_check(req); 2355 2356 if (promoted) 2357 downgrade_write(&osdc->lock); 2358 return; 2359 2360 promote: 2361 up_read(&osdc->lock); 2362 down_write(&osdc->lock); 2363 wrlocked = true; 2364 promoted = true; 2365 goto again; 2366 } 2367 2368 static void account_request(struct ceph_osd_request *req) 2369 { 2370 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2371 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2372 2373 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2374 atomic_inc(&req->r_osdc->num_requests); 2375 2376 req->r_start_stamp = jiffies; 2377 } 2378 2379 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2380 { 2381 ceph_osdc_get_request(req); 2382 account_request(req); 2383 __submit_request(req, wrlocked); 2384 } 2385 2386 static void finish_request(struct ceph_osd_request *req) 2387 { 2388 struct ceph_osd_client *osdc = req->r_osdc; 2389 2390 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2391 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2392 2393 if (req->r_osd) 2394 unlink_request(req->r_osd, req); 2395 atomic_dec(&osdc->num_requests); 2396 2397 /* 2398 * If an OSD has failed or returned and a request has been sent 2399 * twice, it's possible to get a reply and end up here while the 2400 * request message is queued for delivery. We will ignore the 2401 * reply, so not a big deal, but better to try and catch it. 2402 */ 2403 ceph_msg_revoke(req->r_request); 2404 ceph_msg_revoke_incoming(req->r_reply); 2405 } 2406 2407 static void __complete_request(struct ceph_osd_request *req) 2408 { 2409 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2410 req->r_tid, req->r_callback, req->r_result); 2411 2412 if (req->r_callback) 2413 req->r_callback(req); 2414 complete_all(&req->r_completion); 2415 ceph_osdc_put_request(req); 2416 } 2417 2418 static void complete_request_workfn(struct work_struct *work) 2419 { 2420 struct ceph_osd_request *req = 2421 container_of(work, struct ceph_osd_request, r_complete_work); 2422 2423 __complete_request(req); 2424 } 2425 2426 /* 2427 * This is open-coded in handle_reply(). 2428 */ 2429 static void complete_request(struct ceph_osd_request *req, int err) 2430 { 2431 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2432 2433 req->r_result = err; 2434 finish_request(req); 2435 2436 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2437 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2438 } 2439 2440 static void cancel_map_check(struct ceph_osd_request *req) 2441 { 2442 struct ceph_osd_client *osdc = req->r_osdc; 2443 struct ceph_osd_request *lookup_req; 2444 2445 verify_osdc_wrlocked(osdc); 2446 2447 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2448 if (!lookup_req) 2449 return; 2450 2451 WARN_ON(lookup_req != req); 2452 erase_request_mc(&osdc->map_checks, req); 2453 ceph_osdc_put_request(req); 2454 } 2455 2456 static void cancel_request(struct ceph_osd_request *req) 2457 { 2458 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2459 2460 cancel_map_check(req); 2461 finish_request(req); 2462 complete_all(&req->r_completion); 2463 ceph_osdc_put_request(req); 2464 } 2465 2466 static void abort_request(struct ceph_osd_request *req, int err) 2467 { 2468 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2469 2470 cancel_map_check(req); 2471 complete_request(req, err); 2472 } 2473 2474 static int abort_fn(struct ceph_osd_request *req, void *arg) 2475 { 2476 int err = *(int *)arg; 2477 2478 abort_request(req, err); 2479 return 0; /* continue iteration */ 2480 } 2481 2482 /* 2483 * Abort all in-flight requests with @err and arrange for all future 2484 * requests to be failed immediately. 2485 */ 2486 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2487 { 2488 dout("%s osdc %p err %d\n", __func__, osdc, err); 2489 down_write(&osdc->lock); 2490 for_each_request(osdc, abort_fn, &err); 2491 osdc->abort_err = err; 2492 up_write(&osdc->lock); 2493 } 2494 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2495 2496 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2497 { 2498 down_write(&osdc->lock); 2499 osdc->abort_err = 0; 2500 up_write(&osdc->lock); 2501 } 2502 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2503 2504 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2505 { 2506 if (likely(eb > osdc->epoch_barrier)) { 2507 dout("updating epoch_barrier from %u to %u\n", 2508 osdc->epoch_barrier, eb); 2509 osdc->epoch_barrier = eb; 2510 /* Request map if we're not to the barrier yet */ 2511 if (eb > osdc->osdmap->epoch) 2512 maybe_request_map(osdc); 2513 } 2514 } 2515 2516 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2517 { 2518 down_read(&osdc->lock); 2519 if (unlikely(eb > osdc->epoch_barrier)) { 2520 up_read(&osdc->lock); 2521 down_write(&osdc->lock); 2522 update_epoch_barrier(osdc, eb); 2523 up_write(&osdc->lock); 2524 } else { 2525 up_read(&osdc->lock); 2526 } 2527 } 2528 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2529 2530 /* 2531 * We can end up releasing caps as a result of abort_request(). 2532 * In that case, we probably want to ensure that the cap release message 2533 * has an updated epoch barrier in it, so set the epoch barrier prior to 2534 * aborting the first request. 2535 */ 2536 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2537 { 2538 struct ceph_osd_client *osdc = req->r_osdc; 2539 bool *victims = arg; 2540 2541 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2542 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2543 pool_full(osdc, req->r_t.base_oloc.pool))) { 2544 if (!*victims) { 2545 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2546 *victims = true; 2547 } 2548 abort_request(req, -ENOSPC); 2549 } 2550 2551 return 0; /* continue iteration */ 2552 } 2553 2554 /* 2555 * Drop all pending requests that are stalled waiting on a full condition to 2556 * clear, and complete them with ENOSPC as the return code. Set the 2557 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2558 * cancelled. 2559 */ 2560 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2561 { 2562 bool victims = false; 2563 2564 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2565 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2566 for_each_request(osdc, abort_on_full_fn, &victims); 2567 } 2568 2569 static void check_pool_dne(struct ceph_osd_request *req) 2570 { 2571 struct ceph_osd_client *osdc = req->r_osdc; 2572 struct ceph_osdmap *map = osdc->osdmap; 2573 2574 verify_osdc_wrlocked(osdc); 2575 WARN_ON(!map->epoch); 2576 2577 if (req->r_attempts) { 2578 /* 2579 * We sent a request earlier, which means that 2580 * previously the pool existed, and now it does not 2581 * (i.e., it was deleted). 2582 */ 2583 req->r_map_dne_bound = map->epoch; 2584 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2585 req->r_tid); 2586 } else { 2587 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2588 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2589 } 2590 2591 if (req->r_map_dne_bound) { 2592 if (map->epoch >= req->r_map_dne_bound) { 2593 /* we had a new enough map */ 2594 pr_info_ratelimited("tid %llu pool does not exist\n", 2595 req->r_tid); 2596 complete_request(req, -ENOENT); 2597 } 2598 } else { 2599 send_map_check(req); 2600 } 2601 } 2602 2603 static void map_check_cb(struct ceph_mon_generic_request *greq) 2604 { 2605 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2606 struct ceph_osd_request *req; 2607 u64 tid = greq->private_data; 2608 2609 WARN_ON(greq->result || !greq->u.newest); 2610 2611 down_write(&osdc->lock); 2612 req = lookup_request_mc(&osdc->map_checks, tid); 2613 if (!req) { 2614 dout("%s tid %llu dne\n", __func__, tid); 2615 goto out_unlock; 2616 } 2617 2618 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2619 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2620 if (!req->r_map_dne_bound) 2621 req->r_map_dne_bound = greq->u.newest; 2622 erase_request_mc(&osdc->map_checks, req); 2623 check_pool_dne(req); 2624 2625 ceph_osdc_put_request(req); 2626 out_unlock: 2627 up_write(&osdc->lock); 2628 } 2629 2630 static void send_map_check(struct ceph_osd_request *req) 2631 { 2632 struct ceph_osd_client *osdc = req->r_osdc; 2633 struct ceph_osd_request *lookup_req; 2634 int ret; 2635 2636 verify_osdc_wrlocked(osdc); 2637 2638 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2639 if (lookup_req) { 2640 WARN_ON(lookup_req != req); 2641 return; 2642 } 2643 2644 ceph_osdc_get_request(req); 2645 insert_request_mc(&osdc->map_checks, req); 2646 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2647 map_check_cb, req->r_tid); 2648 WARN_ON(ret); 2649 } 2650 2651 /* 2652 * lingering requests, watch/notify v2 infrastructure 2653 */ 2654 static void linger_release(struct kref *kref) 2655 { 2656 struct ceph_osd_linger_request *lreq = 2657 container_of(kref, struct ceph_osd_linger_request, kref); 2658 2659 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2660 lreq->reg_req, lreq->ping_req); 2661 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2662 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2663 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2664 WARN_ON(!list_empty(&lreq->scan_item)); 2665 WARN_ON(!list_empty(&lreq->pending_lworks)); 2666 WARN_ON(lreq->osd); 2667 2668 if (lreq->reg_req) 2669 ceph_osdc_put_request(lreq->reg_req); 2670 if (lreq->ping_req) 2671 ceph_osdc_put_request(lreq->ping_req); 2672 target_destroy(&lreq->t); 2673 kfree(lreq); 2674 } 2675 2676 static void linger_put(struct ceph_osd_linger_request *lreq) 2677 { 2678 if (lreq) 2679 kref_put(&lreq->kref, linger_release); 2680 } 2681 2682 static struct ceph_osd_linger_request * 2683 linger_get(struct ceph_osd_linger_request *lreq) 2684 { 2685 kref_get(&lreq->kref); 2686 return lreq; 2687 } 2688 2689 static struct ceph_osd_linger_request * 2690 linger_alloc(struct ceph_osd_client *osdc) 2691 { 2692 struct ceph_osd_linger_request *lreq; 2693 2694 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2695 if (!lreq) 2696 return NULL; 2697 2698 kref_init(&lreq->kref); 2699 mutex_init(&lreq->lock); 2700 RB_CLEAR_NODE(&lreq->node); 2701 RB_CLEAR_NODE(&lreq->osdc_node); 2702 RB_CLEAR_NODE(&lreq->mc_node); 2703 INIT_LIST_HEAD(&lreq->scan_item); 2704 INIT_LIST_HEAD(&lreq->pending_lworks); 2705 init_completion(&lreq->reg_commit_wait); 2706 init_completion(&lreq->notify_finish_wait); 2707 2708 lreq->osdc = osdc; 2709 target_init(&lreq->t); 2710 2711 dout("%s lreq %p\n", __func__, lreq); 2712 return lreq; 2713 } 2714 2715 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2716 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2717 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2718 2719 /* 2720 * Create linger request <-> OSD session relation. 2721 * 2722 * @lreq has to be registered, @osd may be homeless. 2723 */ 2724 static void link_linger(struct ceph_osd *osd, 2725 struct ceph_osd_linger_request *lreq) 2726 { 2727 verify_osd_locked(osd); 2728 WARN_ON(!lreq->linger_id || lreq->osd); 2729 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2730 osd->o_osd, lreq, lreq->linger_id); 2731 2732 if (!osd_homeless(osd)) 2733 __remove_osd_from_lru(osd); 2734 else 2735 atomic_inc(&osd->o_osdc->num_homeless); 2736 2737 get_osd(osd); 2738 insert_linger(&osd->o_linger_requests, lreq); 2739 lreq->osd = osd; 2740 } 2741 2742 static void unlink_linger(struct ceph_osd *osd, 2743 struct ceph_osd_linger_request *lreq) 2744 { 2745 verify_osd_locked(osd); 2746 WARN_ON(lreq->osd != osd); 2747 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2748 osd->o_osd, lreq, lreq->linger_id); 2749 2750 lreq->osd = NULL; 2751 erase_linger(&osd->o_linger_requests, lreq); 2752 put_osd(osd); 2753 2754 if (!osd_homeless(osd)) 2755 maybe_move_osd_to_lru(osd); 2756 else 2757 atomic_dec(&osd->o_osdc->num_homeless); 2758 } 2759 2760 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2761 { 2762 verify_osdc_locked(lreq->osdc); 2763 2764 return !RB_EMPTY_NODE(&lreq->osdc_node); 2765 } 2766 2767 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2768 { 2769 struct ceph_osd_client *osdc = lreq->osdc; 2770 bool registered; 2771 2772 down_read(&osdc->lock); 2773 registered = __linger_registered(lreq); 2774 up_read(&osdc->lock); 2775 2776 return registered; 2777 } 2778 2779 static void linger_register(struct ceph_osd_linger_request *lreq) 2780 { 2781 struct ceph_osd_client *osdc = lreq->osdc; 2782 2783 verify_osdc_wrlocked(osdc); 2784 WARN_ON(lreq->linger_id); 2785 2786 linger_get(lreq); 2787 lreq->linger_id = ++osdc->last_linger_id; 2788 insert_linger_osdc(&osdc->linger_requests, lreq); 2789 } 2790 2791 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2792 { 2793 struct ceph_osd_client *osdc = lreq->osdc; 2794 2795 verify_osdc_wrlocked(osdc); 2796 2797 erase_linger_osdc(&osdc->linger_requests, lreq); 2798 linger_put(lreq); 2799 } 2800 2801 static void cancel_linger_request(struct ceph_osd_request *req) 2802 { 2803 struct ceph_osd_linger_request *lreq = req->r_priv; 2804 2805 WARN_ON(!req->r_linger); 2806 cancel_request(req); 2807 linger_put(lreq); 2808 } 2809 2810 struct linger_work { 2811 struct work_struct work; 2812 struct ceph_osd_linger_request *lreq; 2813 struct list_head pending_item; 2814 unsigned long queued_stamp; 2815 2816 union { 2817 struct { 2818 u64 notify_id; 2819 u64 notifier_id; 2820 void *payload; /* points into @msg front */ 2821 size_t payload_len; 2822 2823 struct ceph_msg *msg; /* for ceph_msg_put() */ 2824 } notify; 2825 struct { 2826 int err; 2827 } error; 2828 }; 2829 }; 2830 2831 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2832 work_func_t workfn) 2833 { 2834 struct linger_work *lwork; 2835 2836 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2837 if (!lwork) 2838 return NULL; 2839 2840 INIT_WORK(&lwork->work, workfn); 2841 INIT_LIST_HEAD(&lwork->pending_item); 2842 lwork->lreq = linger_get(lreq); 2843 2844 return lwork; 2845 } 2846 2847 static void lwork_free(struct linger_work *lwork) 2848 { 2849 struct ceph_osd_linger_request *lreq = lwork->lreq; 2850 2851 mutex_lock(&lreq->lock); 2852 list_del(&lwork->pending_item); 2853 mutex_unlock(&lreq->lock); 2854 2855 linger_put(lreq); 2856 kfree(lwork); 2857 } 2858 2859 static void lwork_queue(struct linger_work *lwork) 2860 { 2861 struct ceph_osd_linger_request *lreq = lwork->lreq; 2862 struct ceph_osd_client *osdc = lreq->osdc; 2863 2864 verify_lreq_locked(lreq); 2865 WARN_ON(!list_empty(&lwork->pending_item)); 2866 2867 lwork->queued_stamp = jiffies; 2868 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2869 queue_work(osdc->notify_wq, &lwork->work); 2870 } 2871 2872 static void do_watch_notify(struct work_struct *w) 2873 { 2874 struct linger_work *lwork = container_of(w, struct linger_work, work); 2875 struct ceph_osd_linger_request *lreq = lwork->lreq; 2876 2877 if (!linger_registered(lreq)) { 2878 dout("%s lreq %p not registered\n", __func__, lreq); 2879 goto out; 2880 } 2881 2882 WARN_ON(!lreq->is_watch); 2883 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2884 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2885 lwork->notify.payload_len); 2886 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2887 lwork->notify.notifier_id, lwork->notify.payload, 2888 lwork->notify.payload_len); 2889 2890 out: 2891 ceph_msg_put(lwork->notify.msg); 2892 lwork_free(lwork); 2893 } 2894 2895 static void do_watch_error(struct work_struct *w) 2896 { 2897 struct linger_work *lwork = container_of(w, struct linger_work, work); 2898 struct ceph_osd_linger_request *lreq = lwork->lreq; 2899 2900 if (!linger_registered(lreq)) { 2901 dout("%s lreq %p not registered\n", __func__, lreq); 2902 goto out; 2903 } 2904 2905 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 2906 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 2907 2908 out: 2909 lwork_free(lwork); 2910 } 2911 2912 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 2913 { 2914 struct linger_work *lwork; 2915 2916 lwork = lwork_alloc(lreq, do_watch_error); 2917 if (!lwork) { 2918 pr_err("failed to allocate error-lwork\n"); 2919 return; 2920 } 2921 2922 lwork->error.err = lreq->last_error; 2923 lwork_queue(lwork); 2924 } 2925 2926 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 2927 int result) 2928 { 2929 if (!completion_done(&lreq->reg_commit_wait)) { 2930 lreq->reg_commit_error = (result <= 0 ? result : 0); 2931 complete_all(&lreq->reg_commit_wait); 2932 } 2933 } 2934 2935 static void linger_commit_cb(struct ceph_osd_request *req) 2936 { 2937 struct ceph_osd_linger_request *lreq = req->r_priv; 2938 2939 mutex_lock(&lreq->lock); 2940 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 2941 lreq->linger_id, req->r_result); 2942 linger_reg_commit_complete(lreq, req->r_result); 2943 lreq->committed = true; 2944 2945 if (!lreq->is_watch) { 2946 struct ceph_osd_data *osd_data = 2947 osd_req_op_data(req, 0, notify, response_data); 2948 void *p = page_address(osd_data->pages[0]); 2949 2950 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 2951 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 2952 2953 /* make note of the notify_id */ 2954 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 2955 lreq->notify_id = ceph_decode_64(&p); 2956 dout("lreq %p notify_id %llu\n", lreq, 2957 lreq->notify_id); 2958 } else { 2959 dout("lreq %p no notify_id\n", lreq); 2960 } 2961 } 2962 2963 mutex_unlock(&lreq->lock); 2964 linger_put(lreq); 2965 } 2966 2967 static int normalize_watch_error(int err) 2968 { 2969 /* 2970 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 2971 * notification and a failure to reconnect because we raced with 2972 * the delete appear the same to the user. 2973 */ 2974 if (err == -ENOENT) 2975 err = -ENOTCONN; 2976 2977 return err; 2978 } 2979 2980 static void linger_reconnect_cb(struct ceph_osd_request *req) 2981 { 2982 struct ceph_osd_linger_request *lreq = req->r_priv; 2983 2984 mutex_lock(&lreq->lock); 2985 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 2986 lreq, lreq->linger_id, req->r_result, lreq->last_error); 2987 if (req->r_result < 0) { 2988 if (!lreq->last_error) { 2989 lreq->last_error = normalize_watch_error(req->r_result); 2990 queue_watch_error(lreq); 2991 } 2992 } 2993 2994 mutex_unlock(&lreq->lock); 2995 linger_put(lreq); 2996 } 2997 2998 static void send_linger(struct ceph_osd_linger_request *lreq) 2999 { 3000 struct ceph_osd_request *req = lreq->reg_req; 3001 struct ceph_osd_req_op *op = &req->r_ops[0]; 3002 3003 verify_osdc_wrlocked(req->r_osdc); 3004 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3005 3006 if (req->r_osd) 3007 cancel_linger_request(req); 3008 3009 request_reinit(req); 3010 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 3011 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 3012 req->r_flags = lreq->t.flags; 3013 req->r_mtime = lreq->mtime; 3014 3015 mutex_lock(&lreq->lock); 3016 if (lreq->is_watch && lreq->committed) { 3017 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3018 op->watch.cookie != lreq->linger_id); 3019 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT; 3020 op->watch.gen = ++lreq->register_gen; 3021 dout("lreq %p reconnect register_gen %u\n", lreq, 3022 op->watch.gen); 3023 req->r_callback = linger_reconnect_cb; 3024 } else { 3025 if (!lreq->is_watch) 3026 lreq->notify_id = 0; 3027 else 3028 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH); 3029 dout("lreq %p register\n", lreq); 3030 req->r_callback = linger_commit_cb; 3031 } 3032 mutex_unlock(&lreq->lock); 3033 3034 req->r_priv = linger_get(lreq); 3035 req->r_linger = true; 3036 3037 submit_request(req, true); 3038 } 3039 3040 static void linger_ping_cb(struct ceph_osd_request *req) 3041 { 3042 struct ceph_osd_linger_request *lreq = req->r_priv; 3043 3044 mutex_lock(&lreq->lock); 3045 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3046 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3047 lreq->last_error); 3048 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3049 if (!req->r_result) { 3050 lreq->watch_valid_thru = lreq->ping_sent; 3051 } else if (!lreq->last_error) { 3052 lreq->last_error = normalize_watch_error(req->r_result); 3053 queue_watch_error(lreq); 3054 } 3055 } else { 3056 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3057 lreq->register_gen, req->r_ops[0].watch.gen); 3058 } 3059 3060 mutex_unlock(&lreq->lock); 3061 linger_put(lreq); 3062 } 3063 3064 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3065 { 3066 struct ceph_osd_client *osdc = lreq->osdc; 3067 struct ceph_osd_request *req = lreq->ping_req; 3068 struct ceph_osd_req_op *op = &req->r_ops[0]; 3069 3070 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3071 dout("%s PAUSERD\n", __func__); 3072 return; 3073 } 3074 3075 lreq->ping_sent = jiffies; 3076 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3077 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3078 lreq->register_gen); 3079 3080 if (req->r_osd) 3081 cancel_linger_request(req); 3082 3083 request_reinit(req); 3084 target_copy(&req->r_t, &lreq->t); 3085 3086 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3087 op->watch.cookie != lreq->linger_id || 3088 op->watch.op != CEPH_OSD_WATCH_OP_PING); 3089 op->watch.gen = lreq->register_gen; 3090 req->r_callback = linger_ping_cb; 3091 req->r_priv = linger_get(lreq); 3092 req->r_linger = true; 3093 3094 ceph_osdc_get_request(req); 3095 account_request(req); 3096 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3097 link_request(lreq->osd, req); 3098 send_request(req); 3099 } 3100 3101 static void linger_submit(struct ceph_osd_linger_request *lreq) 3102 { 3103 struct ceph_osd_client *osdc = lreq->osdc; 3104 struct ceph_osd *osd; 3105 3106 down_write(&osdc->lock); 3107 linger_register(lreq); 3108 if (lreq->is_watch) { 3109 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; 3110 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; 3111 } else { 3112 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; 3113 } 3114 3115 calc_target(osdc, &lreq->t, NULL, false); 3116 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3117 link_linger(osd, lreq); 3118 3119 send_linger(lreq); 3120 up_write(&osdc->lock); 3121 } 3122 3123 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3124 { 3125 struct ceph_osd_client *osdc = lreq->osdc; 3126 struct ceph_osd_linger_request *lookup_lreq; 3127 3128 verify_osdc_wrlocked(osdc); 3129 3130 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3131 lreq->linger_id); 3132 if (!lookup_lreq) 3133 return; 3134 3135 WARN_ON(lookup_lreq != lreq); 3136 erase_linger_mc(&osdc->linger_map_checks, lreq); 3137 linger_put(lreq); 3138 } 3139 3140 /* 3141 * @lreq has to be both registered and linked. 3142 */ 3143 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3144 { 3145 if (lreq->is_watch && lreq->ping_req->r_osd) 3146 cancel_linger_request(lreq->ping_req); 3147 if (lreq->reg_req->r_osd) 3148 cancel_linger_request(lreq->reg_req); 3149 cancel_linger_map_check(lreq); 3150 unlink_linger(lreq->osd, lreq); 3151 linger_unregister(lreq); 3152 } 3153 3154 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3155 { 3156 struct ceph_osd_client *osdc = lreq->osdc; 3157 3158 down_write(&osdc->lock); 3159 if (__linger_registered(lreq)) 3160 __linger_cancel(lreq); 3161 up_write(&osdc->lock); 3162 } 3163 3164 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3165 3166 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3167 { 3168 struct ceph_osd_client *osdc = lreq->osdc; 3169 struct ceph_osdmap *map = osdc->osdmap; 3170 3171 verify_osdc_wrlocked(osdc); 3172 WARN_ON(!map->epoch); 3173 3174 if (lreq->register_gen) { 3175 lreq->map_dne_bound = map->epoch; 3176 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3177 lreq, lreq->linger_id); 3178 } else { 3179 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3180 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3181 map->epoch); 3182 } 3183 3184 if (lreq->map_dne_bound) { 3185 if (map->epoch >= lreq->map_dne_bound) { 3186 /* we had a new enough map */ 3187 pr_info("linger_id %llu pool does not exist\n", 3188 lreq->linger_id); 3189 linger_reg_commit_complete(lreq, -ENOENT); 3190 __linger_cancel(lreq); 3191 } 3192 } else { 3193 send_linger_map_check(lreq); 3194 } 3195 } 3196 3197 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3198 { 3199 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3200 struct ceph_osd_linger_request *lreq; 3201 u64 linger_id = greq->private_data; 3202 3203 WARN_ON(greq->result || !greq->u.newest); 3204 3205 down_write(&osdc->lock); 3206 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3207 if (!lreq) { 3208 dout("%s linger_id %llu dne\n", __func__, linger_id); 3209 goto out_unlock; 3210 } 3211 3212 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3213 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3214 greq->u.newest); 3215 if (!lreq->map_dne_bound) 3216 lreq->map_dne_bound = greq->u.newest; 3217 erase_linger_mc(&osdc->linger_map_checks, lreq); 3218 check_linger_pool_dne(lreq); 3219 3220 linger_put(lreq); 3221 out_unlock: 3222 up_write(&osdc->lock); 3223 } 3224 3225 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3226 { 3227 struct ceph_osd_client *osdc = lreq->osdc; 3228 struct ceph_osd_linger_request *lookup_lreq; 3229 int ret; 3230 3231 verify_osdc_wrlocked(osdc); 3232 3233 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3234 lreq->linger_id); 3235 if (lookup_lreq) { 3236 WARN_ON(lookup_lreq != lreq); 3237 return; 3238 } 3239 3240 linger_get(lreq); 3241 insert_linger_mc(&osdc->linger_map_checks, lreq); 3242 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3243 linger_map_check_cb, lreq->linger_id); 3244 WARN_ON(ret); 3245 } 3246 3247 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3248 { 3249 int ret; 3250 3251 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3252 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); 3253 return ret ?: lreq->reg_commit_error; 3254 } 3255 3256 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) 3257 { 3258 int ret; 3259 3260 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3261 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); 3262 return ret ?: lreq->notify_finish_error; 3263 } 3264 3265 /* 3266 * Timeout callback, called every N seconds. When 1 or more OSD 3267 * requests has been active for more than N seconds, we send a keepalive 3268 * (tag + timestamp) to its OSD to ensure any communications channel 3269 * reset is detected. 3270 */ 3271 static void handle_timeout(struct work_struct *work) 3272 { 3273 struct ceph_osd_client *osdc = 3274 container_of(work, struct ceph_osd_client, timeout_work.work); 3275 struct ceph_options *opts = osdc->client->options; 3276 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3277 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3278 LIST_HEAD(slow_osds); 3279 struct rb_node *n, *p; 3280 3281 dout("%s osdc %p\n", __func__, osdc); 3282 down_write(&osdc->lock); 3283 3284 /* 3285 * ping osds that are a bit slow. this ensures that if there 3286 * is a break in the TCP connection we will notice, and reopen 3287 * a connection with that osd (from the fault callback). 3288 */ 3289 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3290 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3291 bool found = false; 3292 3293 for (p = rb_first(&osd->o_requests); p; ) { 3294 struct ceph_osd_request *req = 3295 rb_entry(p, struct ceph_osd_request, r_node); 3296 3297 p = rb_next(p); /* abort_request() */ 3298 3299 if (time_before(req->r_stamp, cutoff)) { 3300 dout(" req %p tid %llu on osd%d is laggy\n", 3301 req, req->r_tid, osd->o_osd); 3302 found = true; 3303 } 3304 if (opts->osd_request_timeout && 3305 time_before(req->r_start_stamp, expiry_cutoff)) { 3306 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3307 req->r_tid, osd->o_osd); 3308 abort_request(req, -ETIMEDOUT); 3309 } 3310 } 3311 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3312 struct ceph_osd_linger_request *lreq = 3313 rb_entry(p, struct ceph_osd_linger_request, node); 3314 3315 dout(" lreq %p linger_id %llu is served by osd%d\n", 3316 lreq, lreq->linger_id, osd->o_osd); 3317 found = true; 3318 3319 mutex_lock(&lreq->lock); 3320 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3321 send_linger_ping(lreq); 3322 mutex_unlock(&lreq->lock); 3323 } 3324 3325 if (found) 3326 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3327 } 3328 3329 if (opts->osd_request_timeout) { 3330 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3331 struct ceph_osd_request *req = 3332 rb_entry(p, struct ceph_osd_request, r_node); 3333 3334 p = rb_next(p); /* abort_request() */ 3335 3336 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3337 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3338 req->r_tid, osdc->homeless_osd.o_osd); 3339 abort_request(req, -ETIMEDOUT); 3340 } 3341 } 3342 } 3343 3344 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3345 maybe_request_map(osdc); 3346 3347 while (!list_empty(&slow_osds)) { 3348 struct ceph_osd *osd = list_first_entry(&slow_osds, 3349 struct ceph_osd, 3350 o_keepalive_item); 3351 list_del_init(&osd->o_keepalive_item); 3352 ceph_con_keepalive(&osd->o_con); 3353 } 3354 3355 up_write(&osdc->lock); 3356 schedule_delayed_work(&osdc->timeout_work, 3357 osdc->client->options->osd_keepalive_timeout); 3358 } 3359 3360 static void handle_osds_timeout(struct work_struct *work) 3361 { 3362 struct ceph_osd_client *osdc = 3363 container_of(work, struct ceph_osd_client, 3364 osds_timeout_work.work); 3365 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3366 struct ceph_osd *osd, *nosd; 3367 3368 dout("%s osdc %p\n", __func__, osdc); 3369 down_write(&osdc->lock); 3370 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3371 if (time_before(jiffies, osd->lru_ttl)) 3372 break; 3373 3374 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3375 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3376 close_osd(osd); 3377 } 3378 3379 up_write(&osdc->lock); 3380 schedule_delayed_work(&osdc->osds_timeout_work, 3381 round_jiffies_relative(delay)); 3382 } 3383 3384 static int ceph_oloc_decode(void **p, void *end, 3385 struct ceph_object_locator *oloc) 3386 { 3387 u8 struct_v, struct_cv; 3388 u32 len; 3389 void *struct_end; 3390 int ret = 0; 3391 3392 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3393 struct_v = ceph_decode_8(p); 3394 struct_cv = ceph_decode_8(p); 3395 if (struct_v < 3) { 3396 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3397 struct_v, struct_cv); 3398 goto e_inval; 3399 } 3400 if (struct_cv > 6) { 3401 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3402 struct_v, struct_cv); 3403 goto e_inval; 3404 } 3405 len = ceph_decode_32(p); 3406 ceph_decode_need(p, end, len, e_inval); 3407 struct_end = *p + len; 3408 3409 oloc->pool = ceph_decode_64(p); 3410 *p += 4; /* skip preferred */ 3411 3412 len = ceph_decode_32(p); 3413 if (len > 0) { 3414 pr_warn("ceph_object_locator::key is set\n"); 3415 goto e_inval; 3416 } 3417 3418 if (struct_v >= 5) { 3419 bool changed = false; 3420 3421 len = ceph_decode_32(p); 3422 if (len > 0) { 3423 ceph_decode_need(p, end, len, e_inval); 3424 if (!oloc->pool_ns || 3425 ceph_compare_string(oloc->pool_ns, *p, len)) 3426 changed = true; 3427 *p += len; 3428 } else { 3429 if (oloc->pool_ns) 3430 changed = true; 3431 } 3432 if (changed) { 3433 /* redirect changes namespace */ 3434 pr_warn("ceph_object_locator::nspace is changed\n"); 3435 goto e_inval; 3436 } 3437 } 3438 3439 if (struct_v >= 6) { 3440 s64 hash = ceph_decode_64(p); 3441 if (hash != -1) { 3442 pr_warn("ceph_object_locator::hash is set\n"); 3443 goto e_inval; 3444 } 3445 } 3446 3447 /* skip the rest */ 3448 *p = struct_end; 3449 out: 3450 return ret; 3451 3452 e_inval: 3453 ret = -EINVAL; 3454 goto out; 3455 } 3456 3457 static int ceph_redirect_decode(void **p, void *end, 3458 struct ceph_request_redirect *redir) 3459 { 3460 u8 struct_v, struct_cv; 3461 u32 len; 3462 void *struct_end; 3463 int ret; 3464 3465 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3466 struct_v = ceph_decode_8(p); 3467 struct_cv = ceph_decode_8(p); 3468 if (struct_cv > 1) { 3469 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3470 struct_v, struct_cv); 3471 goto e_inval; 3472 } 3473 len = ceph_decode_32(p); 3474 ceph_decode_need(p, end, len, e_inval); 3475 struct_end = *p + len; 3476 3477 ret = ceph_oloc_decode(p, end, &redir->oloc); 3478 if (ret) 3479 goto out; 3480 3481 len = ceph_decode_32(p); 3482 if (len > 0) { 3483 pr_warn("ceph_request_redirect::object_name is set\n"); 3484 goto e_inval; 3485 } 3486 3487 len = ceph_decode_32(p); 3488 *p += len; /* skip osd_instructions */ 3489 3490 /* skip the rest */ 3491 *p = struct_end; 3492 out: 3493 return ret; 3494 3495 e_inval: 3496 ret = -EINVAL; 3497 goto out; 3498 } 3499 3500 struct MOSDOpReply { 3501 struct ceph_pg pgid; 3502 u64 flags; 3503 int result; 3504 u32 epoch; 3505 int num_ops; 3506 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3507 s32 rval[CEPH_OSD_MAX_OPS]; 3508 int retry_attempt; 3509 struct ceph_eversion replay_version; 3510 u64 user_version; 3511 struct ceph_request_redirect redirect; 3512 }; 3513 3514 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3515 { 3516 void *p = msg->front.iov_base; 3517 void *const end = p + msg->front.iov_len; 3518 u16 version = le16_to_cpu(msg->hdr.version); 3519 struct ceph_eversion bad_replay_version; 3520 u8 decode_redir; 3521 u32 len; 3522 int ret; 3523 int i; 3524 3525 ceph_decode_32_safe(&p, end, len, e_inval); 3526 ceph_decode_need(&p, end, len, e_inval); 3527 p += len; /* skip oid */ 3528 3529 ret = ceph_decode_pgid(&p, end, &m->pgid); 3530 if (ret) 3531 return ret; 3532 3533 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3534 ceph_decode_32_safe(&p, end, m->result, e_inval); 3535 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3536 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3537 p += sizeof(bad_replay_version); 3538 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3539 3540 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3541 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3542 goto e_inval; 3543 3544 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3545 e_inval); 3546 for (i = 0; i < m->num_ops; i++) { 3547 struct ceph_osd_op *op = p; 3548 3549 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3550 p += sizeof(*op); 3551 } 3552 3553 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3554 for (i = 0; i < m->num_ops; i++) 3555 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3556 3557 if (version >= 5) { 3558 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3559 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3560 p += sizeof(m->replay_version); 3561 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3562 } else { 3563 m->replay_version = bad_replay_version; /* struct */ 3564 m->user_version = le64_to_cpu(m->replay_version.version); 3565 } 3566 3567 if (version >= 6) { 3568 if (version >= 7) 3569 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3570 else 3571 decode_redir = 1; 3572 } else { 3573 decode_redir = 0; 3574 } 3575 3576 if (decode_redir) { 3577 ret = ceph_redirect_decode(&p, end, &m->redirect); 3578 if (ret) 3579 return ret; 3580 } else { 3581 ceph_oloc_init(&m->redirect.oloc); 3582 } 3583 3584 return 0; 3585 3586 e_inval: 3587 return -EINVAL; 3588 } 3589 3590 /* 3591 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3592 * specified. 3593 */ 3594 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3595 { 3596 struct ceph_osd_client *osdc = osd->o_osdc; 3597 struct ceph_osd_request *req; 3598 struct MOSDOpReply m; 3599 u64 tid = le64_to_cpu(msg->hdr.tid); 3600 u32 data_len = 0; 3601 int ret; 3602 int i; 3603 3604 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3605 3606 down_read(&osdc->lock); 3607 if (!osd_registered(osd)) { 3608 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3609 goto out_unlock_osdc; 3610 } 3611 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3612 3613 mutex_lock(&osd->lock); 3614 req = lookup_request(&osd->o_requests, tid); 3615 if (!req) { 3616 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3617 goto out_unlock_session; 3618 } 3619 3620 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3621 ret = decode_MOSDOpReply(msg, &m); 3622 m.redirect.oloc.pool_ns = NULL; 3623 if (ret) { 3624 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3625 req->r_tid, ret); 3626 ceph_msg_dump(msg); 3627 goto fail_request; 3628 } 3629 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3630 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3631 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3632 le64_to_cpu(m.replay_version.version), m.user_version); 3633 3634 if (m.retry_attempt >= 0) { 3635 if (m.retry_attempt != req->r_attempts - 1) { 3636 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3637 req, req->r_tid, m.retry_attempt, 3638 req->r_attempts - 1); 3639 goto out_unlock_session; 3640 } 3641 } else { 3642 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3643 } 3644 3645 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3646 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3647 m.redirect.oloc.pool); 3648 unlink_request(osd, req); 3649 mutex_unlock(&osd->lock); 3650 3651 /* 3652 * Not ceph_oloc_copy() - changing pool_ns is not 3653 * supported. 3654 */ 3655 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3656 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; 3657 req->r_tid = 0; 3658 __submit_request(req, false); 3659 goto out_unlock_osdc; 3660 } 3661 3662 if (m.num_ops != req->r_num_ops) { 3663 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3664 req->r_num_ops, req->r_tid); 3665 goto fail_request; 3666 } 3667 for (i = 0; i < req->r_num_ops; i++) { 3668 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3669 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3670 req->r_ops[i].rval = m.rval[i]; 3671 req->r_ops[i].outdata_len = m.outdata_len[i]; 3672 data_len += m.outdata_len[i]; 3673 } 3674 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3675 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3676 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3677 goto fail_request; 3678 } 3679 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3680 req, req->r_tid, m.result, data_len); 3681 3682 /* 3683 * Since we only ever request ONDISK, we should only ever get 3684 * one (type of) reply back. 3685 */ 3686 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3687 req->r_result = m.result ?: data_len; 3688 finish_request(req); 3689 mutex_unlock(&osd->lock); 3690 up_read(&osdc->lock); 3691 3692 __complete_request(req); 3693 return; 3694 3695 fail_request: 3696 complete_request(req, -EIO); 3697 out_unlock_session: 3698 mutex_unlock(&osd->lock); 3699 out_unlock_osdc: 3700 up_read(&osdc->lock); 3701 } 3702 3703 static void set_pool_was_full(struct ceph_osd_client *osdc) 3704 { 3705 struct rb_node *n; 3706 3707 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3708 struct ceph_pg_pool_info *pi = 3709 rb_entry(n, struct ceph_pg_pool_info, node); 3710 3711 pi->was_full = __pool_full(pi); 3712 } 3713 } 3714 3715 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3716 { 3717 struct ceph_pg_pool_info *pi; 3718 3719 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3720 if (!pi) 3721 return false; 3722 3723 return pi->was_full && !__pool_full(pi); 3724 } 3725 3726 static enum calc_target_result 3727 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3728 { 3729 struct ceph_osd_client *osdc = lreq->osdc; 3730 enum calc_target_result ct_res; 3731 3732 ct_res = calc_target(osdc, &lreq->t, NULL, true); 3733 if (ct_res == CALC_TARGET_NEED_RESEND) { 3734 struct ceph_osd *osd; 3735 3736 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3737 if (osd != lreq->osd) { 3738 unlink_linger(lreq->osd, lreq); 3739 link_linger(osd, lreq); 3740 } 3741 } 3742 3743 return ct_res; 3744 } 3745 3746 /* 3747 * Requeue requests whose mapping to an OSD has changed. 3748 */ 3749 static void scan_requests(struct ceph_osd *osd, 3750 bool force_resend, 3751 bool cleared_full, 3752 bool check_pool_cleared_full, 3753 struct rb_root *need_resend, 3754 struct list_head *need_resend_linger) 3755 { 3756 struct ceph_osd_client *osdc = osd->o_osdc; 3757 struct rb_node *n; 3758 bool force_resend_writes; 3759 3760 for (n = rb_first(&osd->o_linger_requests); n; ) { 3761 struct ceph_osd_linger_request *lreq = 3762 rb_entry(n, struct ceph_osd_linger_request, node); 3763 enum calc_target_result ct_res; 3764 3765 n = rb_next(n); /* recalc_linger_target() */ 3766 3767 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3768 lreq->linger_id); 3769 ct_res = recalc_linger_target(lreq); 3770 switch (ct_res) { 3771 case CALC_TARGET_NO_ACTION: 3772 force_resend_writes = cleared_full || 3773 (check_pool_cleared_full && 3774 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3775 if (!force_resend && !force_resend_writes) 3776 break; 3777 3778 /* fall through */ 3779 case CALC_TARGET_NEED_RESEND: 3780 cancel_linger_map_check(lreq); 3781 /* 3782 * scan_requests() for the previous epoch(s) 3783 * may have already added it to the list, since 3784 * it's not unlinked here. 3785 */ 3786 if (list_empty(&lreq->scan_item)) 3787 list_add_tail(&lreq->scan_item, need_resend_linger); 3788 break; 3789 case CALC_TARGET_POOL_DNE: 3790 list_del_init(&lreq->scan_item); 3791 check_linger_pool_dne(lreq); 3792 break; 3793 } 3794 } 3795 3796 for (n = rb_first(&osd->o_requests); n; ) { 3797 struct ceph_osd_request *req = 3798 rb_entry(n, struct ceph_osd_request, r_node); 3799 enum calc_target_result ct_res; 3800 3801 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3802 3803 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3804 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con, 3805 false); 3806 switch (ct_res) { 3807 case CALC_TARGET_NO_ACTION: 3808 force_resend_writes = cleared_full || 3809 (check_pool_cleared_full && 3810 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3811 if (!force_resend && 3812 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3813 !force_resend_writes)) 3814 break; 3815 3816 /* fall through */ 3817 case CALC_TARGET_NEED_RESEND: 3818 cancel_map_check(req); 3819 unlink_request(osd, req); 3820 insert_request(need_resend, req); 3821 break; 3822 case CALC_TARGET_POOL_DNE: 3823 check_pool_dne(req); 3824 break; 3825 } 3826 } 3827 } 3828 3829 static int handle_one_map(struct ceph_osd_client *osdc, 3830 void *p, void *end, bool incremental, 3831 struct rb_root *need_resend, 3832 struct list_head *need_resend_linger) 3833 { 3834 struct ceph_osdmap *newmap; 3835 struct rb_node *n; 3836 bool skipped_map = false; 3837 bool was_full; 3838 3839 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3840 set_pool_was_full(osdc); 3841 3842 if (incremental) 3843 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap); 3844 else 3845 newmap = ceph_osdmap_decode(&p, end); 3846 if (IS_ERR(newmap)) 3847 return PTR_ERR(newmap); 3848 3849 if (newmap != osdc->osdmap) { 3850 /* 3851 * Preserve ->was_full before destroying the old map. 3852 * For pools that weren't in the old map, ->was_full 3853 * should be false. 3854 */ 3855 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 3856 struct ceph_pg_pool_info *pi = 3857 rb_entry(n, struct ceph_pg_pool_info, node); 3858 struct ceph_pg_pool_info *old_pi; 3859 3860 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 3861 if (old_pi) 3862 pi->was_full = old_pi->was_full; 3863 else 3864 WARN_ON(pi->was_full); 3865 } 3866 3867 if (osdc->osdmap->epoch && 3868 osdc->osdmap->epoch + 1 < newmap->epoch) { 3869 WARN_ON(incremental); 3870 skipped_map = true; 3871 } 3872 3873 ceph_osdmap_destroy(osdc->osdmap); 3874 osdc->osdmap = newmap; 3875 } 3876 3877 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3878 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3879 need_resend, need_resend_linger); 3880 3881 for (n = rb_first(&osdc->osds); n; ) { 3882 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3883 3884 n = rb_next(n); /* close_osd() */ 3885 3886 scan_requests(osd, skipped_map, was_full, true, need_resend, 3887 need_resend_linger); 3888 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 3889 memcmp(&osd->o_con.peer_addr, 3890 ceph_osd_addr(osdc->osdmap, osd->o_osd), 3891 sizeof(struct ceph_entity_addr))) 3892 close_osd(osd); 3893 } 3894 3895 return 0; 3896 } 3897 3898 static void kick_requests(struct ceph_osd_client *osdc, 3899 struct rb_root *need_resend, 3900 struct list_head *need_resend_linger) 3901 { 3902 struct ceph_osd_linger_request *lreq, *nlreq; 3903 enum calc_target_result ct_res; 3904 struct rb_node *n; 3905 3906 /* make sure need_resend targets reflect latest map */ 3907 for (n = rb_first(need_resend); n; ) { 3908 struct ceph_osd_request *req = 3909 rb_entry(n, struct ceph_osd_request, r_node); 3910 3911 n = rb_next(n); 3912 3913 if (req->r_t.epoch < osdc->osdmap->epoch) { 3914 ct_res = calc_target(osdc, &req->r_t, NULL, false); 3915 if (ct_res == CALC_TARGET_POOL_DNE) { 3916 erase_request(need_resend, req); 3917 check_pool_dne(req); 3918 } 3919 } 3920 } 3921 3922 for (n = rb_first(need_resend); n; ) { 3923 struct ceph_osd_request *req = 3924 rb_entry(n, struct ceph_osd_request, r_node); 3925 struct ceph_osd *osd; 3926 3927 n = rb_next(n); 3928 erase_request(need_resend, req); /* before link_request() */ 3929 3930 osd = lookup_create_osd(osdc, req->r_t.osd, true); 3931 link_request(osd, req); 3932 if (!req->r_linger) { 3933 if (!osd_homeless(osd) && !req->r_t.paused) 3934 send_request(req); 3935 } else { 3936 cancel_linger_request(req); 3937 } 3938 } 3939 3940 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 3941 if (!osd_homeless(lreq->osd)) 3942 send_linger(lreq); 3943 3944 list_del_init(&lreq->scan_item); 3945 } 3946 } 3947 3948 /* 3949 * Process updated osd map. 3950 * 3951 * The message contains any number of incremental and full maps, normally 3952 * indicating some sort of topology change in the cluster. Kick requests 3953 * off to different OSDs as needed. 3954 */ 3955 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 3956 { 3957 void *p = msg->front.iov_base; 3958 void *const end = p + msg->front.iov_len; 3959 u32 nr_maps, maplen; 3960 u32 epoch; 3961 struct ceph_fsid fsid; 3962 struct rb_root need_resend = RB_ROOT; 3963 LIST_HEAD(need_resend_linger); 3964 bool handled_incremental = false; 3965 bool was_pauserd, was_pausewr; 3966 bool pauserd, pausewr; 3967 int err; 3968 3969 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 3970 down_write(&osdc->lock); 3971 3972 /* verify fsid */ 3973 ceph_decode_need(&p, end, sizeof(fsid), bad); 3974 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3975 if (ceph_check_fsid(osdc->client, &fsid) < 0) 3976 goto bad; 3977 3978 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 3979 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 3980 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 3981 have_pool_full(osdc); 3982 3983 /* incremental maps */ 3984 ceph_decode_32_safe(&p, end, nr_maps, bad); 3985 dout(" %d inc maps\n", nr_maps); 3986 while (nr_maps > 0) { 3987 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 3988 epoch = ceph_decode_32(&p); 3989 maplen = ceph_decode_32(&p); 3990 ceph_decode_need(&p, end, maplen, bad); 3991 if (osdc->osdmap->epoch && 3992 osdc->osdmap->epoch + 1 == epoch) { 3993 dout("applying incremental map %u len %d\n", 3994 epoch, maplen); 3995 err = handle_one_map(osdc, p, p + maplen, true, 3996 &need_resend, &need_resend_linger); 3997 if (err) 3998 goto bad; 3999 handled_incremental = true; 4000 } else { 4001 dout("ignoring incremental map %u len %d\n", 4002 epoch, maplen); 4003 } 4004 p += maplen; 4005 nr_maps--; 4006 } 4007 if (handled_incremental) 4008 goto done; 4009 4010 /* full maps */ 4011 ceph_decode_32_safe(&p, end, nr_maps, bad); 4012 dout(" %d full maps\n", nr_maps); 4013 while (nr_maps) { 4014 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4015 epoch = ceph_decode_32(&p); 4016 maplen = ceph_decode_32(&p); 4017 ceph_decode_need(&p, end, maplen, bad); 4018 if (nr_maps > 1) { 4019 dout("skipping non-latest full map %u len %d\n", 4020 epoch, maplen); 4021 } else if (osdc->osdmap->epoch >= epoch) { 4022 dout("skipping full map %u len %d, " 4023 "older than our %u\n", epoch, maplen, 4024 osdc->osdmap->epoch); 4025 } else { 4026 dout("taking full map %u len %d\n", epoch, maplen); 4027 err = handle_one_map(osdc, p, p + maplen, false, 4028 &need_resend, &need_resend_linger); 4029 if (err) 4030 goto bad; 4031 } 4032 p += maplen; 4033 nr_maps--; 4034 } 4035 4036 done: 4037 /* 4038 * subscribe to subsequent osdmap updates if full to ensure 4039 * we find out when we are no longer full and stop returning 4040 * ENOSPC. 4041 */ 4042 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4043 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4044 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4045 have_pool_full(osdc); 4046 if (was_pauserd || was_pausewr || pauserd || pausewr || 4047 osdc->osdmap->epoch < osdc->epoch_barrier) 4048 maybe_request_map(osdc); 4049 4050 kick_requests(osdc, &need_resend, &need_resend_linger); 4051 4052 ceph_osdc_abort_on_full(osdc); 4053 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4054 osdc->osdmap->epoch); 4055 up_write(&osdc->lock); 4056 wake_up_all(&osdc->client->auth_wq); 4057 return; 4058 4059 bad: 4060 pr_err("osdc handle_map corrupt msg\n"); 4061 ceph_msg_dump(msg); 4062 up_write(&osdc->lock); 4063 } 4064 4065 /* 4066 * Resubmit requests pending on the given osd. 4067 */ 4068 static void kick_osd_requests(struct ceph_osd *osd) 4069 { 4070 struct rb_node *n; 4071 4072 clear_backoffs(osd); 4073 4074 for (n = rb_first(&osd->o_requests); n; ) { 4075 struct ceph_osd_request *req = 4076 rb_entry(n, struct ceph_osd_request, r_node); 4077 4078 n = rb_next(n); /* cancel_linger_request() */ 4079 4080 if (!req->r_linger) { 4081 if (!req->r_t.paused) 4082 send_request(req); 4083 } else { 4084 cancel_linger_request(req); 4085 } 4086 } 4087 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4088 struct ceph_osd_linger_request *lreq = 4089 rb_entry(n, struct ceph_osd_linger_request, node); 4090 4091 send_linger(lreq); 4092 } 4093 } 4094 4095 /* 4096 * If the osd connection drops, we need to resubmit all requests. 4097 */ 4098 static void osd_fault(struct ceph_connection *con) 4099 { 4100 struct ceph_osd *osd = con->private; 4101 struct ceph_osd_client *osdc = osd->o_osdc; 4102 4103 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4104 4105 down_write(&osdc->lock); 4106 if (!osd_registered(osd)) { 4107 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4108 goto out_unlock; 4109 } 4110 4111 if (!reopen_osd(osd)) 4112 kick_osd_requests(osd); 4113 maybe_request_map(osdc); 4114 4115 out_unlock: 4116 up_write(&osdc->lock); 4117 } 4118 4119 struct MOSDBackoff { 4120 struct ceph_spg spgid; 4121 u32 map_epoch; 4122 u8 op; 4123 u64 id; 4124 struct ceph_hobject_id *begin; 4125 struct ceph_hobject_id *end; 4126 }; 4127 4128 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4129 { 4130 void *p = msg->front.iov_base; 4131 void *const end = p + msg->front.iov_len; 4132 u8 struct_v; 4133 u32 struct_len; 4134 int ret; 4135 4136 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4137 if (ret) 4138 return ret; 4139 4140 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4141 if (ret) 4142 return ret; 4143 4144 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4145 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4146 ceph_decode_8_safe(&p, end, m->op, e_inval); 4147 ceph_decode_64_safe(&p, end, m->id, e_inval); 4148 4149 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4150 if (!m->begin) 4151 return -ENOMEM; 4152 4153 ret = decode_hoid(&p, end, m->begin); 4154 if (ret) { 4155 free_hoid(m->begin); 4156 return ret; 4157 } 4158 4159 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4160 if (!m->end) { 4161 free_hoid(m->begin); 4162 return -ENOMEM; 4163 } 4164 4165 ret = decode_hoid(&p, end, m->end); 4166 if (ret) { 4167 free_hoid(m->begin); 4168 free_hoid(m->end); 4169 return ret; 4170 } 4171 4172 return 0; 4173 4174 e_inval: 4175 return -EINVAL; 4176 } 4177 4178 static struct ceph_msg *create_backoff_message( 4179 const struct ceph_osd_backoff *backoff, 4180 u32 map_epoch) 4181 { 4182 struct ceph_msg *msg; 4183 void *p, *end; 4184 int msg_size; 4185 4186 msg_size = CEPH_ENCODING_START_BLK_LEN + 4187 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4188 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4189 msg_size += CEPH_ENCODING_START_BLK_LEN + 4190 hoid_encoding_size(backoff->begin); 4191 msg_size += CEPH_ENCODING_START_BLK_LEN + 4192 hoid_encoding_size(backoff->end); 4193 4194 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4195 if (!msg) 4196 return NULL; 4197 4198 p = msg->front.iov_base; 4199 end = p + msg->front_alloc_len; 4200 4201 encode_spgid(&p, &backoff->spgid); 4202 ceph_encode_32(&p, map_epoch); 4203 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4204 ceph_encode_64(&p, backoff->id); 4205 encode_hoid(&p, end, backoff->begin); 4206 encode_hoid(&p, end, backoff->end); 4207 BUG_ON(p != end); 4208 4209 msg->front.iov_len = p - msg->front.iov_base; 4210 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4211 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4212 4213 return msg; 4214 } 4215 4216 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4217 { 4218 struct ceph_spg_mapping *spg; 4219 struct ceph_osd_backoff *backoff; 4220 struct ceph_msg *msg; 4221 4222 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4223 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4224 4225 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4226 if (!spg) { 4227 spg = alloc_spg_mapping(); 4228 if (!spg) { 4229 pr_err("%s failed to allocate spg\n", __func__); 4230 return; 4231 } 4232 spg->spgid = m->spgid; /* struct */ 4233 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4234 } 4235 4236 backoff = alloc_backoff(); 4237 if (!backoff) { 4238 pr_err("%s failed to allocate backoff\n", __func__); 4239 return; 4240 } 4241 backoff->spgid = m->spgid; /* struct */ 4242 backoff->id = m->id; 4243 backoff->begin = m->begin; 4244 m->begin = NULL; /* backoff now owns this */ 4245 backoff->end = m->end; 4246 m->end = NULL; /* ditto */ 4247 4248 insert_backoff(&spg->backoffs, backoff); 4249 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4250 4251 /* 4252 * Ack with original backoff's epoch so that the OSD can 4253 * discard this if there was a PG split. 4254 */ 4255 msg = create_backoff_message(backoff, m->map_epoch); 4256 if (!msg) { 4257 pr_err("%s failed to allocate msg\n", __func__); 4258 return; 4259 } 4260 ceph_con_send(&osd->o_con, msg); 4261 } 4262 4263 static bool target_contained_by(const struct ceph_osd_request_target *t, 4264 const struct ceph_hobject_id *begin, 4265 const struct ceph_hobject_id *end) 4266 { 4267 struct ceph_hobject_id hoid; 4268 int cmp; 4269 4270 hoid_fill_from_target(&hoid, t); 4271 cmp = hoid_compare(&hoid, begin); 4272 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4273 } 4274 4275 static void handle_backoff_unblock(struct ceph_osd *osd, 4276 const struct MOSDBackoff *m) 4277 { 4278 struct ceph_spg_mapping *spg; 4279 struct ceph_osd_backoff *backoff; 4280 struct rb_node *n; 4281 4282 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4283 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4284 4285 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4286 if (!backoff) { 4287 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4288 __func__, osd->o_osd, m->spgid.pgid.pool, 4289 m->spgid.pgid.seed, m->spgid.shard, m->id); 4290 return; 4291 } 4292 4293 if (hoid_compare(backoff->begin, m->begin) && 4294 hoid_compare(backoff->end, m->end)) { 4295 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4296 __func__, osd->o_osd, m->spgid.pgid.pool, 4297 m->spgid.pgid.seed, m->spgid.shard, m->id); 4298 /* unblock it anyway... */ 4299 } 4300 4301 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4302 BUG_ON(!spg); 4303 4304 erase_backoff(&spg->backoffs, backoff); 4305 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4306 free_backoff(backoff); 4307 4308 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4309 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4310 free_spg_mapping(spg); 4311 } 4312 4313 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4314 struct ceph_osd_request *req = 4315 rb_entry(n, struct ceph_osd_request, r_node); 4316 4317 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4318 /* 4319 * Match against @m, not @backoff -- the PG may 4320 * have split on the OSD. 4321 */ 4322 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4323 /* 4324 * If no other installed backoff applies, 4325 * resend. 4326 */ 4327 send_request(req); 4328 } 4329 } 4330 } 4331 } 4332 4333 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4334 { 4335 struct ceph_osd_client *osdc = osd->o_osdc; 4336 struct MOSDBackoff m; 4337 int ret; 4338 4339 down_read(&osdc->lock); 4340 if (!osd_registered(osd)) { 4341 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4342 up_read(&osdc->lock); 4343 return; 4344 } 4345 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4346 4347 mutex_lock(&osd->lock); 4348 ret = decode_MOSDBackoff(msg, &m); 4349 if (ret) { 4350 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4351 ceph_msg_dump(msg); 4352 goto out_unlock; 4353 } 4354 4355 switch (m.op) { 4356 case CEPH_OSD_BACKOFF_OP_BLOCK: 4357 handle_backoff_block(osd, &m); 4358 break; 4359 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4360 handle_backoff_unblock(osd, &m); 4361 break; 4362 default: 4363 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4364 } 4365 4366 free_hoid(m.begin); 4367 free_hoid(m.end); 4368 4369 out_unlock: 4370 mutex_unlock(&osd->lock); 4371 up_read(&osdc->lock); 4372 } 4373 4374 /* 4375 * Process osd watch notifications 4376 */ 4377 static void handle_watch_notify(struct ceph_osd_client *osdc, 4378 struct ceph_msg *msg) 4379 { 4380 void *p = msg->front.iov_base; 4381 void *const end = p + msg->front.iov_len; 4382 struct ceph_osd_linger_request *lreq; 4383 struct linger_work *lwork; 4384 u8 proto_ver, opcode; 4385 u64 cookie, notify_id; 4386 u64 notifier_id = 0; 4387 s32 return_code = 0; 4388 void *payload = NULL; 4389 u32 payload_len = 0; 4390 4391 ceph_decode_8_safe(&p, end, proto_ver, bad); 4392 ceph_decode_8_safe(&p, end, opcode, bad); 4393 ceph_decode_64_safe(&p, end, cookie, bad); 4394 p += 8; /* skip ver */ 4395 ceph_decode_64_safe(&p, end, notify_id, bad); 4396 4397 if (proto_ver >= 1) { 4398 ceph_decode_32_safe(&p, end, payload_len, bad); 4399 ceph_decode_need(&p, end, payload_len, bad); 4400 payload = p; 4401 p += payload_len; 4402 } 4403 4404 if (le16_to_cpu(msg->hdr.version) >= 2) 4405 ceph_decode_32_safe(&p, end, return_code, bad); 4406 4407 if (le16_to_cpu(msg->hdr.version) >= 3) 4408 ceph_decode_64_safe(&p, end, notifier_id, bad); 4409 4410 down_read(&osdc->lock); 4411 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4412 if (!lreq) { 4413 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4414 cookie); 4415 goto out_unlock_osdc; 4416 } 4417 4418 mutex_lock(&lreq->lock); 4419 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4420 opcode, cookie, lreq, lreq->is_watch); 4421 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4422 if (!lreq->last_error) { 4423 lreq->last_error = -ENOTCONN; 4424 queue_watch_error(lreq); 4425 } 4426 } else if (!lreq->is_watch) { 4427 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4428 if (lreq->notify_id && lreq->notify_id != notify_id) { 4429 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4430 lreq->notify_id, notify_id); 4431 } else if (!completion_done(&lreq->notify_finish_wait)) { 4432 struct ceph_msg_data *data = 4433 msg->num_data_items ? &msg->data[0] : NULL; 4434 4435 if (data) { 4436 if (lreq->preply_pages) { 4437 WARN_ON(data->type != 4438 CEPH_MSG_DATA_PAGES); 4439 *lreq->preply_pages = data->pages; 4440 *lreq->preply_len = data->length; 4441 } else { 4442 ceph_release_page_vector(data->pages, 4443 calc_pages_for(0, data->length)); 4444 } 4445 } 4446 lreq->notify_finish_error = return_code; 4447 complete_all(&lreq->notify_finish_wait); 4448 } 4449 } else { 4450 /* CEPH_WATCH_EVENT_NOTIFY */ 4451 lwork = lwork_alloc(lreq, do_watch_notify); 4452 if (!lwork) { 4453 pr_err("failed to allocate notify-lwork\n"); 4454 goto out_unlock_lreq; 4455 } 4456 4457 lwork->notify.notify_id = notify_id; 4458 lwork->notify.notifier_id = notifier_id; 4459 lwork->notify.payload = payload; 4460 lwork->notify.payload_len = payload_len; 4461 lwork->notify.msg = ceph_msg_get(msg); 4462 lwork_queue(lwork); 4463 } 4464 4465 out_unlock_lreq: 4466 mutex_unlock(&lreq->lock); 4467 out_unlock_osdc: 4468 up_read(&osdc->lock); 4469 return; 4470 4471 bad: 4472 pr_err("osdc handle_watch_notify corrupt msg\n"); 4473 } 4474 4475 /* 4476 * Register request, send initial attempt. 4477 */ 4478 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 4479 struct ceph_osd_request *req, 4480 bool nofail) 4481 { 4482 down_read(&osdc->lock); 4483 submit_request(req, false); 4484 up_read(&osdc->lock); 4485 4486 return 0; 4487 } 4488 EXPORT_SYMBOL(ceph_osdc_start_request); 4489 4490 /* 4491 * Unregister a registered request. The request is not completed: 4492 * ->r_result isn't set and __complete_request() isn't called. 4493 */ 4494 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4495 { 4496 struct ceph_osd_client *osdc = req->r_osdc; 4497 4498 down_write(&osdc->lock); 4499 if (req->r_osd) 4500 cancel_request(req); 4501 up_write(&osdc->lock); 4502 } 4503 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4504 4505 /* 4506 * @timeout: in jiffies, 0 means "wait forever" 4507 */ 4508 static int wait_request_timeout(struct ceph_osd_request *req, 4509 unsigned long timeout) 4510 { 4511 long left; 4512 4513 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4514 left = wait_for_completion_killable_timeout(&req->r_completion, 4515 ceph_timeout_jiffies(timeout)); 4516 if (left <= 0) { 4517 left = left ?: -ETIMEDOUT; 4518 ceph_osdc_cancel_request(req); 4519 } else { 4520 left = req->r_result; /* completed */ 4521 } 4522 4523 return left; 4524 } 4525 4526 /* 4527 * wait for a request to complete 4528 */ 4529 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4530 struct ceph_osd_request *req) 4531 { 4532 return wait_request_timeout(req, 0); 4533 } 4534 EXPORT_SYMBOL(ceph_osdc_wait_request); 4535 4536 /* 4537 * sync - wait for all in-flight requests to flush. avoid starvation. 4538 */ 4539 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4540 { 4541 struct rb_node *n, *p; 4542 u64 last_tid = atomic64_read(&osdc->last_tid); 4543 4544 again: 4545 down_read(&osdc->lock); 4546 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4547 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4548 4549 mutex_lock(&osd->lock); 4550 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4551 struct ceph_osd_request *req = 4552 rb_entry(p, struct ceph_osd_request, r_node); 4553 4554 if (req->r_tid > last_tid) 4555 break; 4556 4557 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4558 continue; 4559 4560 ceph_osdc_get_request(req); 4561 mutex_unlock(&osd->lock); 4562 up_read(&osdc->lock); 4563 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4564 __func__, req, req->r_tid, last_tid); 4565 wait_for_completion(&req->r_completion); 4566 ceph_osdc_put_request(req); 4567 goto again; 4568 } 4569 4570 mutex_unlock(&osd->lock); 4571 } 4572 4573 up_read(&osdc->lock); 4574 dout("%s done last_tid %llu\n", __func__, last_tid); 4575 } 4576 EXPORT_SYMBOL(ceph_osdc_sync); 4577 4578 static struct ceph_osd_request * 4579 alloc_linger_request(struct ceph_osd_linger_request *lreq) 4580 { 4581 struct ceph_osd_request *req; 4582 4583 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO); 4584 if (!req) 4585 return NULL; 4586 4587 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4588 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4589 return req; 4590 } 4591 4592 static struct ceph_osd_request * 4593 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) 4594 { 4595 struct ceph_osd_request *req; 4596 4597 req = alloc_linger_request(lreq); 4598 if (!req) 4599 return NULL; 4600 4601 /* 4602 * Pass 0 for cookie because we don't know it yet, it will be 4603 * filled in by linger_submit(). 4604 */ 4605 osd_req_op_watch_init(req, 0, 0, watch_opcode); 4606 4607 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { 4608 ceph_osdc_put_request(req); 4609 return NULL; 4610 } 4611 4612 return req; 4613 } 4614 4615 /* 4616 * Returns a handle, caller owns a ref. 4617 */ 4618 struct ceph_osd_linger_request * 4619 ceph_osdc_watch(struct ceph_osd_client *osdc, 4620 struct ceph_object_id *oid, 4621 struct ceph_object_locator *oloc, 4622 rados_watchcb2_t wcb, 4623 rados_watcherrcb_t errcb, 4624 void *data) 4625 { 4626 struct ceph_osd_linger_request *lreq; 4627 int ret; 4628 4629 lreq = linger_alloc(osdc); 4630 if (!lreq) 4631 return ERR_PTR(-ENOMEM); 4632 4633 lreq->is_watch = true; 4634 lreq->wcb = wcb; 4635 lreq->errcb = errcb; 4636 lreq->data = data; 4637 lreq->watch_valid_thru = jiffies; 4638 4639 ceph_oid_copy(&lreq->t.base_oid, oid); 4640 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4641 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4642 ktime_get_real_ts64(&lreq->mtime); 4643 4644 lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); 4645 if (!lreq->reg_req) { 4646 ret = -ENOMEM; 4647 goto err_put_lreq; 4648 } 4649 4650 lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); 4651 if (!lreq->ping_req) { 4652 ret = -ENOMEM; 4653 goto err_put_lreq; 4654 } 4655 4656 linger_submit(lreq); 4657 ret = linger_reg_commit_wait(lreq); 4658 if (ret) { 4659 linger_cancel(lreq); 4660 goto err_put_lreq; 4661 } 4662 4663 return lreq; 4664 4665 err_put_lreq: 4666 linger_put(lreq); 4667 return ERR_PTR(ret); 4668 } 4669 EXPORT_SYMBOL(ceph_osdc_watch); 4670 4671 /* 4672 * Releases a ref. 4673 * 4674 * Times out after mount_timeout to preserve rbd unmap behaviour 4675 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4676 * with mount_timeout"). 4677 */ 4678 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4679 struct ceph_osd_linger_request *lreq) 4680 { 4681 struct ceph_options *opts = osdc->client->options; 4682 struct ceph_osd_request *req; 4683 int ret; 4684 4685 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4686 if (!req) 4687 return -ENOMEM; 4688 4689 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4690 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4691 req->r_flags = CEPH_OSD_FLAG_WRITE; 4692 ktime_get_real_ts64(&req->r_mtime); 4693 osd_req_op_watch_init(req, 0, lreq->linger_id, 4694 CEPH_OSD_WATCH_OP_UNWATCH); 4695 4696 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4697 if (ret) 4698 goto out_put_req; 4699 4700 ceph_osdc_start_request(osdc, req, false); 4701 linger_cancel(lreq); 4702 linger_put(lreq); 4703 ret = wait_request_timeout(req, opts->mount_timeout); 4704 4705 out_put_req: 4706 ceph_osdc_put_request(req); 4707 return ret; 4708 } 4709 EXPORT_SYMBOL(ceph_osdc_unwatch); 4710 4711 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4712 u64 notify_id, u64 cookie, void *payload, 4713 u32 payload_len) 4714 { 4715 struct ceph_osd_req_op *op; 4716 struct ceph_pagelist *pl; 4717 int ret; 4718 4719 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4720 4721 pl = ceph_pagelist_alloc(GFP_NOIO); 4722 if (!pl) 4723 return -ENOMEM; 4724 4725 ret = ceph_pagelist_encode_64(pl, notify_id); 4726 ret |= ceph_pagelist_encode_64(pl, cookie); 4727 if (payload) { 4728 ret |= ceph_pagelist_encode_32(pl, payload_len); 4729 ret |= ceph_pagelist_append(pl, payload, payload_len); 4730 } else { 4731 ret |= ceph_pagelist_encode_32(pl, 0); 4732 } 4733 if (ret) { 4734 ceph_pagelist_release(pl); 4735 return -ENOMEM; 4736 } 4737 4738 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4739 op->indata_len = pl->length; 4740 return 0; 4741 } 4742 4743 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4744 struct ceph_object_id *oid, 4745 struct ceph_object_locator *oloc, 4746 u64 notify_id, 4747 u64 cookie, 4748 void *payload, 4749 u32 payload_len) 4750 { 4751 struct ceph_osd_request *req; 4752 int ret; 4753 4754 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4755 if (!req) 4756 return -ENOMEM; 4757 4758 ceph_oid_copy(&req->r_base_oid, oid); 4759 ceph_oloc_copy(&req->r_base_oloc, oloc); 4760 req->r_flags = CEPH_OSD_FLAG_READ; 4761 4762 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4763 payload_len); 4764 if (ret) 4765 goto out_put_req; 4766 4767 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4768 if (ret) 4769 goto out_put_req; 4770 4771 ceph_osdc_start_request(osdc, req, false); 4772 ret = ceph_osdc_wait_request(osdc, req); 4773 4774 out_put_req: 4775 ceph_osdc_put_request(req); 4776 return ret; 4777 } 4778 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4779 4780 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, 4781 u64 cookie, u32 prot_ver, u32 timeout, 4782 void *payload, u32 payload_len) 4783 { 4784 struct ceph_osd_req_op *op; 4785 struct ceph_pagelist *pl; 4786 int ret; 4787 4788 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 4789 op->notify.cookie = cookie; 4790 4791 pl = ceph_pagelist_alloc(GFP_NOIO); 4792 if (!pl) 4793 return -ENOMEM; 4794 4795 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ 4796 ret |= ceph_pagelist_encode_32(pl, timeout); 4797 ret |= ceph_pagelist_encode_32(pl, payload_len); 4798 ret |= ceph_pagelist_append(pl, payload, payload_len); 4799 if (ret) { 4800 ceph_pagelist_release(pl); 4801 return -ENOMEM; 4802 } 4803 4804 ceph_osd_data_pagelist_init(&op->notify.request_data, pl); 4805 op->indata_len = pl->length; 4806 return 0; 4807 } 4808 4809 /* 4810 * @timeout: in seconds 4811 * 4812 * @preply_{pages,len} are initialized both on success and error. 4813 * The caller is responsible for: 4814 * 4815 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4816 */ 4817 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4818 struct ceph_object_id *oid, 4819 struct ceph_object_locator *oloc, 4820 void *payload, 4821 u32 payload_len, 4822 u32 timeout, 4823 struct page ***preply_pages, 4824 size_t *preply_len) 4825 { 4826 struct ceph_osd_linger_request *lreq; 4827 struct page **pages; 4828 int ret; 4829 4830 WARN_ON(!timeout); 4831 if (preply_pages) { 4832 *preply_pages = NULL; 4833 *preply_len = 0; 4834 } 4835 4836 lreq = linger_alloc(osdc); 4837 if (!lreq) 4838 return -ENOMEM; 4839 4840 lreq->preply_pages = preply_pages; 4841 lreq->preply_len = preply_len; 4842 4843 ceph_oid_copy(&lreq->t.base_oid, oid); 4844 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4845 lreq->t.flags = CEPH_OSD_FLAG_READ; 4846 4847 lreq->reg_req = alloc_linger_request(lreq); 4848 if (!lreq->reg_req) { 4849 ret = -ENOMEM; 4850 goto out_put_lreq; 4851 } 4852 4853 /* 4854 * Pass 0 for cookie because we don't know it yet, it will be 4855 * filled in by linger_submit(). 4856 */ 4857 ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, 4858 payload, payload_len); 4859 if (ret) 4860 goto out_put_lreq; 4861 4862 /* for notify_id */ 4863 pages = ceph_alloc_page_vector(1, GFP_NOIO); 4864 if (IS_ERR(pages)) { 4865 ret = PTR_ERR(pages); 4866 goto out_put_lreq; 4867 } 4868 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, 4869 response_data), 4870 pages, PAGE_SIZE, 0, false, true); 4871 4872 ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); 4873 if (ret) 4874 goto out_put_lreq; 4875 4876 linger_submit(lreq); 4877 ret = linger_reg_commit_wait(lreq); 4878 if (!ret) 4879 ret = linger_notify_finish_wait(lreq); 4880 else 4881 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4882 4883 linger_cancel(lreq); 4884 out_put_lreq: 4885 linger_put(lreq); 4886 return ret; 4887 } 4888 EXPORT_SYMBOL(ceph_osdc_notify); 4889 4890 /* 4891 * Return the number of milliseconds since the watch was last 4892 * confirmed, or an error. If there is an error, the watch is no 4893 * longer valid, and should be destroyed with ceph_osdc_unwatch(). 4894 */ 4895 int ceph_osdc_watch_check(struct ceph_osd_client *osdc, 4896 struct ceph_osd_linger_request *lreq) 4897 { 4898 unsigned long stamp, age; 4899 int ret; 4900 4901 down_read(&osdc->lock); 4902 mutex_lock(&lreq->lock); 4903 stamp = lreq->watch_valid_thru; 4904 if (!list_empty(&lreq->pending_lworks)) { 4905 struct linger_work *lwork = 4906 list_first_entry(&lreq->pending_lworks, 4907 struct linger_work, 4908 pending_item); 4909 4910 if (time_before(lwork->queued_stamp, stamp)) 4911 stamp = lwork->queued_stamp; 4912 } 4913 age = jiffies - stamp; 4914 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__, 4915 lreq, lreq->linger_id, age, lreq->last_error); 4916 /* we are truncating to msecs, so return a safe upper bound */ 4917 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age); 4918 4919 mutex_unlock(&lreq->lock); 4920 up_read(&osdc->lock); 4921 return ret; 4922 } 4923 4924 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 4925 { 4926 u8 struct_v; 4927 u32 struct_len; 4928 int ret; 4929 4930 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 4931 &struct_v, &struct_len); 4932 if (ret) 4933 goto bad; 4934 4935 ret = -EINVAL; 4936 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 4937 ceph_decode_64_safe(p, end, item->cookie, bad); 4938 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 4939 4940 if (struct_v >= 2) { 4941 ret = ceph_decode_entity_addr(p, end, &item->addr); 4942 if (ret) 4943 goto bad; 4944 } else { 4945 ret = 0; 4946 } 4947 4948 dout("%s %s%llu cookie %llu addr %s\n", __func__, 4949 ENTITY_NAME(item->name), item->cookie, 4950 ceph_pr_addr(&item->addr)); 4951 bad: 4952 return ret; 4953 } 4954 4955 static int decode_watchers(void **p, void *end, 4956 struct ceph_watch_item **watchers, 4957 u32 *num_watchers) 4958 { 4959 u8 struct_v; 4960 u32 struct_len; 4961 int i; 4962 int ret; 4963 4964 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 4965 &struct_v, &struct_len); 4966 if (ret) 4967 return ret; 4968 4969 *num_watchers = ceph_decode_32(p); 4970 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 4971 if (!*watchers) 4972 return -ENOMEM; 4973 4974 for (i = 0; i < *num_watchers; i++) { 4975 ret = decode_watcher(p, end, *watchers + i); 4976 if (ret) { 4977 kfree(*watchers); 4978 return ret; 4979 } 4980 } 4981 4982 return 0; 4983 } 4984 4985 /* 4986 * On success, the caller is responsible for: 4987 * 4988 * kfree(watchers); 4989 */ 4990 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 4991 struct ceph_object_id *oid, 4992 struct ceph_object_locator *oloc, 4993 struct ceph_watch_item **watchers, 4994 u32 *num_watchers) 4995 { 4996 struct ceph_osd_request *req; 4997 struct page **pages; 4998 int ret; 4999 5000 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5001 if (!req) 5002 return -ENOMEM; 5003 5004 ceph_oid_copy(&req->r_base_oid, oid); 5005 ceph_oloc_copy(&req->r_base_oloc, oloc); 5006 req->r_flags = CEPH_OSD_FLAG_READ; 5007 5008 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5009 if (IS_ERR(pages)) { 5010 ret = PTR_ERR(pages); 5011 goto out_put_req; 5012 } 5013 5014 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5015 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5016 response_data), 5017 pages, PAGE_SIZE, 0, false, true); 5018 5019 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5020 if (ret) 5021 goto out_put_req; 5022 5023 ceph_osdc_start_request(osdc, req, false); 5024 ret = ceph_osdc_wait_request(osdc, req); 5025 if (ret >= 0) { 5026 void *p = page_address(pages[0]); 5027 void *const end = p + req->r_ops[0].outdata_len; 5028 5029 ret = decode_watchers(&p, end, watchers, num_watchers); 5030 } 5031 5032 out_put_req: 5033 ceph_osdc_put_request(req); 5034 return ret; 5035 } 5036 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5037 5038 /* 5039 * Call all pending notify callbacks - for use after a watch is 5040 * unregistered, to make sure no more callbacks for it will be invoked 5041 */ 5042 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5043 { 5044 dout("%s osdc %p\n", __func__, osdc); 5045 flush_workqueue(osdc->notify_wq); 5046 } 5047 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5048 5049 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5050 { 5051 down_read(&osdc->lock); 5052 maybe_request_map(osdc); 5053 up_read(&osdc->lock); 5054 } 5055 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5056 5057 /* 5058 * Execute an OSD class method on an object. 5059 * 5060 * @flags: CEPH_OSD_FLAG_* 5061 * @resp_len: in/out param for reply length 5062 */ 5063 int ceph_osdc_call(struct ceph_osd_client *osdc, 5064 struct ceph_object_id *oid, 5065 struct ceph_object_locator *oloc, 5066 const char *class, const char *method, 5067 unsigned int flags, 5068 struct page *req_page, size_t req_len, 5069 struct page **resp_pages, size_t *resp_len) 5070 { 5071 struct ceph_osd_request *req; 5072 int ret; 5073 5074 if (req_len > PAGE_SIZE) 5075 return -E2BIG; 5076 5077 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5078 if (!req) 5079 return -ENOMEM; 5080 5081 ceph_oid_copy(&req->r_base_oid, oid); 5082 ceph_oloc_copy(&req->r_base_oloc, oloc); 5083 req->r_flags = flags; 5084 5085 ret = osd_req_op_cls_init(req, 0, class, method); 5086 if (ret) 5087 goto out_put_req; 5088 5089 if (req_page) 5090 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5091 0, false, false); 5092 if (resp_pages) 5093 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5094 *resp_len, 0, false, false); 5095 5096 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5097 if (ret) 5098 goto out_put_req; 5099 5100 ceph_osdc_start_request(osdc, req, false); 5101 ret = ceph_osdc_wait_request(osdc, req); 5102 if (ret >= 0) { 5103 ret = req->r_ops[0].rval; 5104 if (resp_pages) 5105 *resp_len = req->r_ops[0].outdata_len; 5106 } 5107 5108 out_put_req: 5109 ceph_osdc_put_request(req); 5110 return ret; 5111 } 5112 EXPORT_SYMBOL(ceph_osdc_call); 5113 5114 /* 5115 * reset all osd connections 5116 */ 5117 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5118 { 5119 struct rb_node *n; 5120 5121 down_write(&osdc->lock); 5122 for (n = rb_first(&osdc->osds); n; ) { 5123 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5124 5125 n = rb_next(n); 5126 if (!reopen_osd(osd)) 5127 kick_osd_requests(osd); 5128 } 5129 up_write(&osdc->lock); 5130 } 5131 5132 /* 5133 * init, shutdown 5134 */ 5135 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5136 { 5137 int err; 5138 5139 dout("init\n"); 5140 osdc->client = client; 5141 init_rwsem(&osdc->lock); 5142 osdc->osds = RB_ROOT; 5143 INIT_LIST_HEAD(&osdc->osd_lru); 5144 spin_lock_init(&osdc->osd_lru_lock); 5145 osd_init(&osdc->homeless_osd); 5146 osdc->homeless_osd.o_osdc = osdc; 5147 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5148 osdc->last_linger_id = CEPH_LINGER_ID_START; 5149 osdc->linger_requests = RB_ROOT; 5150 osdc->map_checks = RB_ROOT; 5151 osdc->linger_map_checks = RB_ROOT; 5152 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5153 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5154 5155 err = -ENOMEM; 5156 osdc->osdmap = ceph_osdmap_alloc(); 5157 if (!osdc->osdmap) 5158 goto out; 5159 5160 osdc->req_mempool = mempool_create_slab_pool(10, 5161 ceph_osd_request_cache); 5162 if (!osdc->req_mempool) 5163 goto out_map; 5164 5165 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5166 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5167 if (err < 0) 5168 goto out_mempool; 5169 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5170 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5171 "osd_op_reply"); 5172 if (err < 0) 5173 goto out_msgpool; 5174 5175 err = -ENOMEM; 5176 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5177 if (!osdc->notify_wq) 5178 goto out_msgpool_reply; 5179 5180 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5181 if (!osdc->completion_wq) 5182 goto out_notify_wq; 5183 5184 schedule_delayed_work(&osdc->timeout_work, 5185 osdc->client->options->osd_keepalive_timeout); 5186 schedule_delayed_work(&osdc->osds_timeout_work, 5187 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5188 5189 return 0; 5190 5191 out_notify_wq: 5192 destroy_workqueue(osdc->notify_wq); 5193 out_msgpool_reply: 5194 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5195 out_msgpool: 5196 ceph_msgpool_destroy(&osdc->msgpool_op); 5197 out_mempool: 5198 mempool_destroy(osdc->req_mempool); 5199 out_map: 5200 ceph_osdmap_destroy(osdc->osdmap); 5201 out: 5202 return err; 5203 } 5204 5205 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5206 { 5207 destroy_workqueue(osdc->completion_wq); 5208 destroy_workqueue(osdc->notify_wq); 5209 cancel_delayed_work_sync(&osdc->timeout_work); 5210 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5211 5212 down_write(&osdc->lock); 5213 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5214 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5215 struct ceph_osd, o_node); 5216 close_osd(osd); 5217 } 5218 up_write(&osdc->lock); 5219 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5220 osd_cleanup(&osdc->homeless_osd); 5221 5222 WARN_ON(!list_empty(&osdc->osd_lru)); 5223 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5224 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5225 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5226 WARN_ON(atomic_read(&osdc->num_requests)); 5227 WARN_ON(atomic_read(&osdc->num_homeless)); 5228 5229 ceph_osdmap_destroy(osdc->osdmap); 5230 mempool_destroy(osdc->req_mempool); 5231 ceph_msgpool_destroy(&osdc->msgpool_op); 5232 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5233 } 5234 5235 /* 5236 * Read some contiguous pages. If we cross a stripe boundary, shorten 5237 * *plen. Return number of bytes read, or error. 5238 */ 5239 int ceph_osdc_readpages(struct ceph_osd_client *osdc, 5240 struct ceph_vino vino, struct ceph_file_layout *layout, 5241 u64 off, u64 *plen, 5242 u32 truncate_seq, u64 truncate_size, 5243 struct page **pages, int num_pages, int page_align) 5244 { 5245 struct ceph_osd_request *req; 5246 int rc = 0; 5247 5248 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 5249 vino.snap, off, *plen); 5250 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1, 5251 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 5252 NULL, truncate_seq, truncate_size, 5253 false); 5254 if (IS_ERR(req)) 5255 return PTR_ERR(req); 5256 5257 /* it may be a short read due to an object boundary */ 5258 osd_req_op_extent_osd_data_pages(req, 0, 5259 pages, *plen, page_align, false, false); 5260 5261 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", 5262 off, *plen, *plen, page_align); 5263 5264 rc = ceph_osdc_start_request(osdc, req, false); 5265 if (!rc) 5266 rc = ceph_osdc_wait_request(osdc, req); 5267 5268 ceph_osdc_put_request(req); 5269 dout("readpages result %d\n", rc); 5270 return rc; 5271 } 5272 EXPORT_SYMBOL(ceph_osdc_readpages); 5273 5274 /* 5275 * do a synchronous write on N pages 5276 */ 5277 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, 5278 struct ceph_file_layout *layout, 5279 struct ceph_snap_context *snapc, 5280 u64 off, u64 len, 5281 u32 truncate_seq, u64 truncate_size, 5282 struct timespec64 *mtime, 5283 struct page **pages, int num_pages) 5284 { 5285 struct ceph_osd_request *req; 5286 int rc = 0; 5287 int page_align = off & ~PAGE_MASK; 5288 5289 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1, 5290 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 5291 snapc, truncate_seq, truncate_size, 5292 true); 5293 if (IS_ERR(req)) 5294 return PTR_ERR(req); 5295 5296 /* it may be a short write due to an object boundary */ 5297 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, 5298 false, false); 5299 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); 5300 5301 req->r_mtime = *mtime; 5302 rc = ceph_osdc_start_request(osdc, req, true); 5303 if (!rc) 5304 rc = ceph_osdc_wait_request(osdc, req); 5305 5306 ceph_osdc_put_request(req); 5307 if (rc == 0) 5308 rc = len; 5309 dout("writepages result %d\n", rc); 5310 return rc; 5311 } 5312 EXPORT_SYMBOL(ceph_osdc_writepages); 5313 5314 static int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5315 u64 src_snapid, u64 src_version, 5316 struct ceph_object_id *src_oid, 5317 struct ceph_object_locator *src_oloc, 5318 u32 src_fadvise_flags, 5319 u32 dst_fadvise_flags, 5320 u8 copy_from_flags) 5321 { 5322 struct ceph_osd_req_op *op; 5323 struct page **pages; 5324 void *p, *end; 5325 5326 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5327 if (IS_ERR(pages)) 5328 return PTR_ERR(pages); 5329 5330 op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags); 5331 op->copy_from.snapid = src_snapid; 5332 op->copy_from.src_version = src_version; 5333 op->copy_from.flags = copy_from_flags; 5334 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5335 5336 p = page_address(pages[0]); 5337 end = p + PAGE_SIZE; 5338 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5339 encode_oloc(&p, end, src_oloc); 5340 op->indata_len = PAGE_SIZE - (end - p); 5341 5342 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5343 op->indata_len, 0, false, true); 5344 return 0; 5345 } 5346 5347 int ceph_osdc_copy_from(struct ceph_osd_client *osdc, 5348 u64 src_snapid, u64 src_version, 5349 struct ceph_object_id *src_oid, 5350 struct ceph_object_locator *src_oloc, 5351 u32 src_fadvise_flags, 5352 struct ceph_object_id *dst_oid, 5353 struct ceph_object_locator *dst_oloc, 5354 u32 dst_fadvise_flags, 5355 u8 copy_from_flags) 5356 { 5357 struct ceph_osd_request *req; 5358 int ret; 5359 5360 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 5361 if (!req) 5362 return -ENOMEM; 5363 5364 req->r_flags = CEPH_OSD_FLAG_WRITE; 5365 5366 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc); 5367 ceph_oid_copy(&req->r_t.base_oid, dst_oid); 5368 5369 ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid, 5370 src_oloc, src_fadvise_flags, 5371 dst_fadvise_flags, copy_from_flags); 5372 if (ret) 5373 goto out; 5374 5375 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 5376 if (ret) 5377 goto out; 5378 5379 ceph_osdc_start_request(osdc, req, false); 5380 ret = ceph_osdc_wait_request(osdc, req); 5381 5382 out: 5383 ceph_osdc_put_request(req); 5384 return ret; 5385 } 5386 EXPORT_SYMBOL(ceph_osdc_copy_from); 5387 5388 int __init ceph_osdc_setup(void) 5389 { 5390 size_t size = sizeof(struct ceph_osd_request) + 5391 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5392 5393 BUG_ON(ceph_osd_request_cache); 5394 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5395 0, 0, NULL); 5396 5397 return ceph_osd_request_cache ? 0 : -ENOMEM; 5398 } 5399 5400 void ceph_osdc_cleanup(void) 5401 { 5402 BUG_ON(!ceph_osd_request_cache); 5403 kmem_cache_destroy(ceph_osd_request_cache); 5404 ceph_osd_request_cache = NULL; 5405 } 5406 5407 /* 5408 * handle incoming message 5409 */ 5410 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5411 { 5412 struct ceph_osd *osd = con->private; 5413 struct ceph_osd_client *osdc = osd->o_osdc; 5414 int type = le16_to_cpu(msg->hdr.type); 5415 5416 switch (type) { 5417 case CEPH_MSG_OSD_MAP: 5418 ceph_osdc_handle_map(osdc, msg); 5419 break; 5420 case CEPH_MSG_OSD_OPREPLY: 5421 handle_reply(osd, msg); 5422 break; 5423 case CEPH_MSG_OSD_BACKOFF: 5424 handle_backoff(osd, msg); 5425 break; 5426 case CEPH_MSG_WATCH_NOTIFY: 5427 handle_watch_notify(osdc, msg); 5428 break; 5429 5430 default: 5431 pr_err("received unknown message type %d %s\n", type, 5432 ceph_msg_type_name(type)); 5433 } 5434 5435 ceph_msg_put(msg); 5436 } 5437 5438 /* 5439 * Lookup and return message for incoming reply. Don't try to do 5440 * anything about a larger than preallocated data portion of the 5441 * message at the moment - for now, just skip the message. 5442 */ 5443 static struct ceph_msg *get_reply(struct ceph_connection *con, 5444 struct ceph_msg_header *hdr, 5445 int *skip) 5446 { 5447 struct ceph_osd *osd = con->private; 5448 struct ceph_osd_client *osdc = osd->o_osdc; 5449 struct ceph_msg *m = NULL; 5450 struct ceph_osd_request *req; 5451 int front_len = le32_to_cpu(hdr->front_len); 5452 int data_len = le32_to_cpu(hdr->data_len); 5453 u64 tid = le64_to_cpu(hdr->tid); 5454 5455 down_read(&osdc->lock); 5456 if (!osd_registered(osd)) { 5457 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5458 *skip = 1; 5459 goto out_unlock_osdc; 5460 } 5461 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5462 5463 mutex_lock(&osd->lock); 5464 req = lookup_request(&osd->o_requests, tid); 5465 if (!req) { 5466 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5467 osd->o_osd, tid); 5468 *skip = 1; 5469 goto out_unlock_session; 5470 } 5471 5472 ceph_msg_revoke_incoming(req->r_reply); 5473 5474 if (front_len > req->r_reply->front_alloc_len) { 5475 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5476 __func__, osd->o_osd, req->r_tid, front_len, 5477 req->r_reply->front_alloc_len); 5478 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5479 false); 5480 if (!m) 5481 goto out_unlock_session; 5482 ceph_msg_put(req->r_reply); 5483 req->r_reply = m; 5484 } 5485 5486 if (data_len > req->r_reply->data_length) { 5487 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5488 __func__, osd->o_osd, req->r_tid, data_len, 5489 req->r_reply->data_length); 5490 m = NULL; 5491 *skip = 1; 5492 goto out_unlock_session; 5493 } 5494 5495 m = ceph_msg_get(req->r_reply); 5496 dout("get_reply tid %lld %p\n", tid, m); 5497 5498 out_unlock_session: 5499 mutex_unlock(&osd->lock); 5500 out_unlock_osdc: 5501 up_read(&osdc->lock); 5502 return m; 5503 } 5504 5505 /* 5506 * TODO: switch to a msg-owned pagelist 5507 */ 5508 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5509 { 5510 struct ceph_msg *m; 5511 int type = le16_to_cpu(hdr->type); 5512 u32 front_len = le32_to_cpu(hdr->front_len); 5513 u32 data_len = le32_to_cpu(hdr->data_len); 5514 5515 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5516 if (!m) 5517 return NULL; 5518 5519 if (data_len) { 5520 struct page **pages; 5521 struct ceph_osd_data osd_data; 5522 5523 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5524 GFP_NOIO); 5525 if (IS_ERR(pages)) { 5526 ceph_msg_put(m); 5527 return NULL; 5528 } 5529 5530 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false, 5531 false); 5532 ceph_osdc_msg_data_add(m, &osd_data); 5533 } 5534 5535 return m; 5536 } 5537 5538 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 5539 struct ceph_msg_header *hdr, 5540 int *skip) 5541 { 5542 struct ceph_osd *osd = con->private; 5543 int type = le16_to_cpu(hdr->type); 5544 5545 *skip = 0; 5546 switch (type) { 5547 case CEPH_MSG_OSD_MAP: 5548 case CEPH_MSG_OSD_BACKOFF: 5549 case CEPH_MSG_WATCH_NOTIFY: 5550 return alloc_msg_with_page_vector(hdr); 5551 case CEPH_MSG_OSD_OPREPLY: 5552 return get_reply(con, hdr, skip); 5553 default: 5554 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5555 osd->o_osd, type); 5556 *skip = 1; 5557 return NULL; 5558 } 5559 } 5560 5561 /* 5562 * Wrappers to refcount containing ceph_osd struct 5563 */ 5564 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 5565 { 5566 struct ceph_osd *osd = con->private; 5567 if (get_osd(osd)) 5568 return con; 5569 return NULL; 5570 } 5571 5572 static void put_osd_con(struct ceph_connection *con) 5573 { 5574 struct ceph_osd *osd = con->private; 5575 put_osd(osd); 5576 } 5577 5578 /* 5579 * authentication 5580 */ 5581 /* 5582 * Note: returned pointer is the address of a structure that's 5583 * managed separately. Caller must *not* attempt to free it. 5584 */ 5585 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 5586 int *proto, int force_new) 5587 { 5588 struct ceph_osd *o = con->private; 5589 struct ceph_osd_client *osdc = o->o_osdc; 5590 struct ceph_auth_client *ac = osdc->client->monc.auth; 5591 struct ceph_auth_handshake *auth = &o->o_auth; 5592 5593 if (force_new && auth->authorizer) { 5594 ceph_auth_destroy_authorizer(auth->authorizer); 5595 auth->authorizer = NULL; 5596 } 5597 if (!auth->authorizer) { 5598 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5599 auth); 5600 if (ret) 5601 return ERR_PTR(ret); 5602 } else { 5603 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5604 auth); 5605 if (ret) 5606 return ERR_PTR(ret); 5607 } 5608 *proto = ac->protocol; 5609 5610 return auth; 5611 } 5612 5613 static int add_authorizer_challenge(struct ceph_connection *con, 5614 void *challenge_buf, int challenge_buf_len) 5615 { 5616 struct ceph_osd *o = con->private; 5617 struct ceph_osd_client *osdc = o->o_osdc; 5618 struct ceph_auth_client *ac = osdc->client->monc.auth; 5619 5620 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5621 challenge_buf, challenge_buf_len); 5622 } 5623 5624 static int verify_authorizer_reply(struct ceph_connection *con) 5625 { 5626 struct ceph_osd *o = con->private; 5627 struct ceph_osd_client *osdc = o->o_osdc; 5628 struct ceph_auth_client *ac = osdc->client->monc.auth; 5629 5630 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer); 5631 } 5632 5633 static int invalidate_authorizer(struct ceph_connection *con) 5634 { 5635 struct ceph_osd *o = con->private; 5636 struct ceph_osd_client *osdc = o->o_osdc; 5637 struct ceph_auth_client *ac = osdc->client->monc.auth; 5638 5639 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5640 return ceph_monc_validate_auth(&osdc->client->monc); 5641 } 5642 5643 static void osd_reencode_message(struct ceph_msg *msg) 5644 { 5645 int type = le16_to_cpu(msg->hdr.type); 5646 5647 if (type == CEPH_MSG_OSD_OP) 5648 encode_request_finish(msg); 5649 } 5650 5651 static int osd_sign_message(struct ceph_msg *msg) 5652 { 5653 struct ceph_osd *o = msg->con->private; 5654 struct ceph_auth_handshake *auth = &o->o_auth; 5655 5656 return ceph_auth_sign_message(auth, msg); 5657 } 5658 5659 static int osd_check_message_signature(struct ceph_msg *msg) 5660 { 5661 struct ceph_osd *o = msg->con->private; 5662 struct ceph_auth_handshake *auth = &o->o_auth; 5663 5664 return ceph_auth_check_message_signature(auth, msg); 5665 } 5666 5667 static const struct ceph_connection_operations osd_con_ops = { 5668 .get = get_osd_con, 5669 .put = put_osd_con, 5670 .dispatch = dispatch, 5671 .get_authorizer = get_authorizer, 5672 .add_authorizer_challenge = add_authorizer_challenge, 5673 .verify_authorizer_reply = verify_authorizer_reply, 5674 .invalidate_authorizer = invalidate_authorizer, 5675 .alloc_msg = alloc_msg, 5676 .reencode_message = osd_reencode_message, 5677 .sign_message = osd_sign_message, 5678 .check_message_signature = osd_check_message_signature, 5679 .fault = osd_fault, 5680 }; 5681