1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/slab.h> 11 #include <linux/uaccess.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/osd_client.h> 19 #include <linux/ceph/messenger.h> 20 #include <linux/ceph/decode.h> 21 #include <linux/ceph/auth.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/ceph/striper.h> 24 25 #define OSD_OPREPLY_FRONT_LEN 512 26 27 static struct kmem_cache *ceph_osd_request_cache; 28 29 static const struct ceph_connection_operations osd_con_ops; 30 31 /* 32 * Implement client access to distributed object storage cluster. 33 * 34 * All data objects are stored within a cluster/cloud of OSDs, or 35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 37 * remote daemons serving up and coordinating consistent and safe 38 * access to storage. 39 * 40 * Cluster membership and the mapping of data objects onto storage devices 41 * are described by the osd map. 42 * 43 * We keep track of pending OSD requests (read, write), resubmit 44 * requests to different OSDs when the cluster topology/data layout 45 * change, or retry the affected requests when the communications 46 * channel with an OSD is reset. 47 */ 48 49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req); 50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req); 51 static void link_linger(struct ceph_osd *osd, 52 struct ceph_osd_linger_request *lreq); 53 static void unlink_linger(struct ceph_osd *osd, 54 struct ceph_osd_linger_request *lreq); 55 static void clear_backoffs(struct ceph_osd *osd); 56 57 #if 1 58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem) 59 { 60 bool wrlocked = true; 61 62 if (unlikely(down_read_trylock(sem))) { 63 wrlocked = false; 64 up_read(sem); 65 } 66 67 return wrlocked; 68 } 69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) 70 { 71 WARN_ON(!rwsem_is_locked(&osdc->lock)); 72 } 73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) 74 { 75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock)); 76 } 77 static inline void verify_osd_locked(struct ceph_osd *osd) 78 { 79 struct ceph_osd_client *osdc = osd->o_osdc; 80 81 WARN_ON(!(mutex_is_locked(&osd->lock) && 82 rwsem_is_locked(&osdc->lock)) && 83 !rwsem_is_wrlocked(&osdc->lock)); 84 } 85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) 86 { 87 WARN_ON(!mutex_is_locked(&lreq->lock)); 88 } 89 #else 90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { } 91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { } 92 static inline void verify_osd_locked(struct ceph_osd *osd) { } 93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } 94 #endif 95 96 /* 97 * calculate the mapping of a file extent onto an object, and fill out the 98 * request accordingly. shorten extent as necessary if it crosses an 99 * object boundary. 100 * 101 * fill osd op in request message. 102 */ 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 104 u64 *objnum, u64 *objoff, u64 *objlen) 105 { 106 u64 orig_len = *plen; 107 u32 xlen; 108 109 /* object extent? */ 110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 111 objoff, &xlen); 112 *objlen = xlen; 113 if (*objlen < orig_len) { 114 *plen = *objlen; 115 dout(" skipping last %llu, final file extent %llu~%llu\n", 116 orig_len - *plen, off, *plen); 117 } 118 119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 120 return 0; 121 } 122 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 124 { 125 memset(osd_data, 0, sizeof (*osd_data)); 126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 127 } 128 129 /* 130 * Consumes @pages if @own_pages is true. 131 */ 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 133 struct page **pages, u64 length, u32 alignment, 134 bool pages_from_pool, bool own_pages) 135 { 136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 137 osd_data->pages = pages; 138 osd_data->length = length; 139 osd_data->alignment = alignment; 140 osd_data->pages_from_pool = pages_from_pool; 141 osd_data->own_pages = own_pages; 142 } 143 144 /* 145 * Consumes a ref on @pagelist. 146 */ 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 148 struct ceph_pagelist *pagelist) 149 { 150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 151 osd_data->pagelist = pagelist; 152 } 153 154 #ifdef CONFIG_BLOCK 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 156 struct ceph_bio_iter *bio_pos, 157 u32 bio_length) 158 { 159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 160 osd_data->bio_pos = *bio_pos; 161 osd_data->bio_length = bio_length; 162 } 163 #endif /* CONFIG_BLOCK */ 164 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data, 166 struct ceph_bvec_iter *bvec_pos, 167 u32 num_bvecs) 168 { 169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS; 170 osd_data->bvec_pos = *bvec_pos; 171 osd_data->num_bvecs = num_bvecs; 172 } 173 174 static struct ceph_osd_data * 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 176 { 177 BUG_ON(which >= osd_req->r_num_ops); 178 179 return &osd_req->r_ops[which].raw_data_in; 180 } 181 182 struct ceph_osd_data * 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 184 unsigned int which) 185 { 186 return osd_req_op_data(osd_req, which, extent, osd_data); 187 } 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 189 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, 191 unsigned int which, struct page **pages, 192 u64 length, u32 alignment, 193 bool pages_from_pool, bool own_pages) 194 { 195 struct ceph_osd_data *osd_data; 196 197 osd_data = osd_req_op_raw_data_in(osd_req, which); 198 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 199 pages_from_pool, own_pages); 200 } 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); 202 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, 204 unsigned int which, struct page **pages, 205 u64 length, u32 alignment, 206 bool pages_from_pool, bool own_pages) 207 { 208 struct ceph_osd_data *osd_data; 209 210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 211 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 212 pages_from_pool, own_pages); 213 } 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); 215 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, 217 unsigned int which, struct ceph_pagelist *pagelist) 218 { 219 struct ceph_osd_data *osd_data; 220 221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 222 ceph_osd_data_pagelist_init(osd_data, pagelist); 223 } 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); 225 226 #ifdef CONFIG_BLOCK 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, 228 unsigned int which, 229 struct ceph_bio_iter *bio_pos, 230 u32 bio_length) 231 { 232 struct ceph_osd_data *osd_data; 233 234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length); 236 } 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); 238 #endif /* CONFIG_BLOCK */ 239 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req, 241 unsigned int which, 242 struct bio_vec *bvecs, u32 num_bvecs, 243 u32 bytes) 244 { 245 struct ceph_osd_data *osd_data; 246 struct ceph_bvec_iter it = { 247 .bvecs = bvecs, 248 .iter = { .bi_size = bytes }, 249 }; 250 251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 253 } 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs); 255 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, 257 unsigned int which, 258 struct ceph_bvec_iter *bvec_pos) 259 { 260 struct ceph_osd_data *osd_data; 261 262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data); 263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0); 264 } 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos); 266 267 static void osd_req_op_cls_request_info_pagelist( 268 struct ceph_osd_request *osd_req, 269 unsigned int which, struct ceph_pagelist *pagelist) 270 { 271 struct ceph_osd_data *osd_data; 272 273 osd_data = osd_req_op_data(osd_req, which, cls, request_info); 274 ceph_osd_data_pagelist_init(osd_data, pagelist); 275 } 276 277 void osd_req_op_cls_request_data_pagelist( 278 struct ceph_osd_request *osd_req, 279 unsigned int which, struct ceph_pagelist *pagelist) 280 { 281 struct ceph_osd_data *osd_data; 282 283 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 284 ceph_osd_data_pagelist_init(osd_data, pagelist); 285 osd_req->r_ops[which].cls.indata_len += pagelist->length; 286 osd_req->r_ops[which].indata_len += pagelist->length; 287 } 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); 289 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, 291 unsigned int which, struct page **pages, u64 length, 292 u32 alignment, bool pages_from_pool, bool own_pages) 293 { 294 struct ceph_osd_data *osd_data; 295 296 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 297 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 298 pages_from_pool, own_pages); 299 osd_req->r_ops[which].cls.indata_len += length; 300 osd_req->r_ops[which].indata_len += length; 301 } 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); 303 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, 305 unsigned int which, 306 struct bio_vec *bvecs, u32 num_bvecs, 307 u32 bytes) 308 { 309 struct ceph_osd_data *osd_data; 310 struct ceph_bvec_iter it = { 311 .bvecs = bvecs, 312 .iter = { .bi_size = bytes }, 313 }; 314 315 osd_data = osd_req_op_data(osd_req, which, cls, request_data); 316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs); 317 osd_req->r_ops[which].cls.indata_len += bytes; 318 osd_req->r_ops[which].indata_len += bytes; 319 } 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs); 321 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, 323 unsigned int which, struct page **pages, u64 length, 324 u32 alignment, bool pages_from_pool, bool own_pages) 325 { 326 struct ceph_osd_data *osd_data; 327 328 osd_data = osd_req_op_data(osd_req, which, cls, response_data); 329 ceph_osd_data_pages_init(osd_data, pages, length, alignment, 330 pages_from_pool, own_pages); 331 } 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); 333 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 335 { 336 switch (osd_data->type) { 337 case CEPH_OSD_DATA_TYPE_NONE: 338 return 0; 339 case CEPH_OSD_DATA_TYPE_PAGES: 340 return osd_data->length; 341 case CEPH_OSD_DATA_TYPE_PAGELIST: 342 return (u64)osd_data->pagelist->length; 343 #ifdef CONFIG_BLOCK 344 case CEPH_OSD_DATA_TYPE_BIO: 345 return (u64)osd_data->bio_length; 346 #endif /* CONFIG_BLOCK */ 347 case CEPH_OSD_DATA_TYPE_BVECS: 348 return osd_data->bvec_pos.iter.bi_size; 349 default: 350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 351 return 0; 352 } 353 } 354 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 356 { 357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { 358 int num_pages; 359 360 num_pages = calc_pages_for((u64)osd_data->alignment, 361 (u64)osd_data->length); 362 ceph_release_page_vector(osd_data->pages, num_pages); 363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 364 ceph_pagelist_release(osd_data->pagelist); 365 } 366 ceph_osd_data_init(osd_data); 367 } 368 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req, 370 unsigned int which) 371 { 372 struct ceph_osd_req_op *op; 373 374 BUG_ON(which >= osd_req->r_num_ops); 375 op = &osd_req->r_ops[which]; 376 377 switch (op->op) { 378 case CEPH_OSD_OP_READ: 379 case CEPH_OSD_OP_WRITE: 380 case CEPH_OSD_OP_WRITEFULL: 381 ceph_osd_data_release(&op->extent.osd_data); 382 break; 383 case CEPH_OSD_OP_CALL: 384 ceph_osd_data_release(&op->cls.request_info); 385 ceph_osd_data_release(&op->cls.request_data); 386 ceph_osd_data_release(&op->cls.response_data); 387 break; 388 case CEPH_OSD_OP_SETXATTR: 389 case CEPH_OSD_OP_CMPXATTR: 390 ceph_osd_data_release(&op->xattr.osd_data); 391 break; 392 case CEPH_OSD_OP_STAT: 393 ceph_osd_data_release(&op->raw_data_in); 394 break; 395 case CEPH_OSD_OP_NOTIFY_ACK: 396 ceph_osd_data_release(&op->notify_ack.request_data); 397 break; 398 case CEPH_OSD_OP_NOTIFY: 399 ceph_osd_data_release(&op->notify.request_data); 400 ceph_osd_data_release(&op->notify.response_data); 401 break; 402 case CEPH_OSD_OP_LIST_WATCHERS: 403 ceph_osd_data_release(&op->list_watchers.response_data); 404 break; 405 case CEPH_OSD_OP_COPY_FROM2: 406 ceph_osd_data_release(&op->copy_from.osd_data); 407 break; 408 default: 409 break; 410 } 411 } 412 413 /* 414 * Assumes @t is zero-initialized. 415 */ 416 static void target_init(struct ceph_osd_request_target *t) 417 { 418 ceph_oid_init(&t->base_oid); 419 ceph_oloc_init(&t->base_oloc); 420 ceph_oid_init(&t->target_oid); 421 ceph_oloc_init(&t->target_oloc); 422 423 ceph_osds_init(&t->acting); 424 ceph_osds_init(&t->up); 425 t->size = -1; 426 t->min_size = -1; 427 428 t->osd = CEPH_HOMELESS_OSD; 429 } 430 431 static void target_copy(struct ceph_osd_request_target *dest, 432 const struct ceph_osd_request_target *src) 433 { 434 ceph_oid_copy(&dest->base_oid, &src->base_oid); 435 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc); 436 ceph_oid_copy(&dest->target_oid, &src->target_oid); 437 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc); 438 439 dest->pgid = src->pgid; /* struct */ 440 dest->spgid = src->spgid; /* struct */ 441 dest->pg_num = src->pg_num; 442 dest->pg_num_mask = src->pg_num_mask; 443 ceph_osds_copy(&dest->acting, &src->acting); 444 ceph_osds_copy(&dest->up, &src->up); 445 dest->size = src->size; 446 dest->min_size = src->min_size; 447 dest->sort_bitwise = src->sort_bitwise; 448 449 dest->flags = src->flags; 450 dest->paused = src->paused; 451 452 dest->epoch = src->epoch; 453 dest->last_force_resend = src->last_force_resend; 454 455 dest->osd = src->osd; 456 } 457 458 static void target_destroy(struct ceph_osd_request_target *t) 459 { 460 ceph_oid_destroy(&t->base_oid); 461 ceph_oloc_destroy(&t->base_oloc); 462 ceph_oid_destroy(&t->target_oid); 463 ceph_oloc_destroy(&t->target_oloc); 464 } 465 466 /* 467 * requests 468 */ 469 static void request_release_checks(struct ceph_osd_request *req) 470 { 471 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); 472 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node)); 473 WARN_ON(!list_empty(&req->r_private_item)); 474 WARN_ON(req->r_osd); 475 } 476 477 static void ceph_osdc_release_request(struct kref *kref) 478 { 479 struct ceph_osd_request *req = container_of(kref, 480 struct ceph_osd_request, r_kref); 481 unsigned int which; 482 483 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, 484 req->r_request, req->r_reply); 485 request_release_checks(req); 486 487 if (req->r_request) 488 ceph_msg_put(req->r_request); 489 if (req->r_reply) 490 ceph_msg_put(req->r_reply); 491 492 for (which = 0; which < req->r_num_ops; which++) 493 osd_req_op_data_release(req, which); 494 495 target_destroy(&req->r_t); 496 ceph_put_snap_context(req->r_snapc); 497 498 if (req->r_mempool) 499 mempool_free(req, req->r_osdc->req_mempool); 500 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS) 501 kmem_cache_free(ceph_osd_request_cache, req); 502 else 503 kfree(req); 504 } 505 506 void ceph_osdc_get_request(struct ceph_osd_request *req) 507 { 508 dout("%s %p (was %d)\n", __func__, req, 509 kref_read(&req->r_kref)); 510 kref_get(&req->r_kref); 511 } 512 EXPORT_SYMBOL(ceph_osdc_get_request); 513 514 void ceph_osdc_put_request(struct ceph_osd_request *req) 515 { 516 if (req) { 517 dout("%s %p (was %d)\n", __func__, req, 518 kref_read(&req->r_kref)); 519 kref_put(&req->r_kref, ceph_osdc_release_request); 520 } 521 } 522 EXPORT_SYMBOL(ceph_osdc_put_request); 523 524 static void request_init(struct ceph_osd_request *req) 525 { 526 /* req only, each op is zeroed in _osd_req_op_init() */ 527 memset(req, 0, sizeof(*req)); 528 529 kref_init(&req->r_kref); 530 init_completion(&req->r_completion); 531 RB_CLEAR_NODE(&req->r_node); 532 RB_CLEAR_NODE(&req->r_mc_node); 533 INIT_LIST_HEAD(&req->r_private_item); 534 535 target_init(&req->r_t); 536 } 537 538 /* 539 * This is ugly, but it allows us to reuse linger registration and ping 540 * requests, keeping the structure of the code around send_linger{_ping}() 541 * reasonable. Setting up a min_nr=2 mempool for each linger request 542 * and dealing with copying ops (this blasts req only, watch op remains 543 * intact) isn't any better. 544 */ 545 static void request_reinit(struct ceph_osd_request *req) 546 { 547 struct ceph_osd_client *osdc = req->r_osdc; 548 bool mempool = req->r_mempool; 549 unsigned int num_ops = req->r_num_ops; 550 u64 snapid = req->r_snapid; 551 struct ceph_snap_context *snapc = req->r_snapc; 552 bool linger = req->r_linger; 553 struct ceph_msg *request_msg = req->r_request; 554 struct ceph_msg *reply_msg = req->r_reply; 555 556 dout("%s req %p\n", __func__, req); 557 WARN_ON(kref_read(&req->r_kref) != 1); 558 request_release_checks(req); 559 560 WARN_ON(kref_read(&request_msg->kref) != 1); 561 WARN_ON(kref_read(&reply_msg->kref) != 1); 562 target_destroy(&req->r_t); 563 564 request_init(req); 565 req->r_osdc = osdc; 566 req->r_mempool = mempool; 567 req->r_num_ops = num_ops; 568 req->r_snapid = snapid; 569 req->r_snapc = snapc; 570 req->r_linger = linger; 571 req->r_request = request_msg; 572 req->r_reply = reply_msg; 573 } 574 575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 576 struct ceph_snap_context *snapc, 577 unsigned int num_ops, 578 bool use_mempool, 579 gfp_t gfp_flags) 580 { 581 struct ceph_osd_request *req; 582 583 if (use_mempool) { 584 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS); 585 req = mempool_alloc(osdc->req_mempool, gfp_flags); 586 } else if (num_ops <= CEPH_OSD_SLAB_OPS) { 587 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags); 588 } else { 589 BUG_ON(num_ops > CEPH_OSD_MAX_OPS); 590 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); 591 } 592 if (unlikely(!req)) 593 return NULL; 594 595 request_init(req); 596 req->r_osdc = osdc; 597 req->r_mempool = use_mempool; 598 req->r_num_ops = num_ops; 599 req->r_snapid = CEPH_NOSNAP; 600 req->r_snapc = ceph_get_snap_context(snapc); 601 602 dout("%s req %p\n", __func__, req); 603 return req; 604 } 605 EXPORT_SYMBOL(ceph_osdc_alloc_request); 606 607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) 608 { 609 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); 610 } 611 612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, 613 int num_request_data_items, 614 int num_reply_data_items) 615 { 616 struct ceph_osd_client *osdc = req->r_osdc; 617 struct ceph_msg *msg; 618 int msg_size; 619 620 WARN_ON(req->r_request || req->r_reply); 621 WARN_ON(ceph_oid_empty(&req->r_base_oid)); 622 WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); 623 624 /* create request message */ 625 msg_size = CEPH_ENCODING_START_BLK_LEN + 626 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 627 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */ 628 msg_size += CEPH_ENCODING_START_BLK_LEN + 629 sizeof(struct ceph_osd_reqid); /* reqid */ 630 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */ 631 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */ 632 msg_size += CEPH_ENCODING_START_BLK_LEN + 633 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */ 634 msg_size += 4 + req->r_base_oid.name_len; /* oid */ 635 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op); 636 msg_size += 8; /* snapid */ 637 msg_size += 8; /* snap_seq */ 638 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0); 639 msg_size += 4 + 8; /* retry_attempt, features */ 640 641 if (req->r_mempool) 642 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, 643 num_request_data_items); 644 else 645 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, 646 num_request_data_items, gfp, true); 647 if (!msg) 648 return -ENOMEM; 649 650 memset(msg->front.iov_base, 0, msg->front.iov_len); 651 req->r_request = msg; 652 653 /* create reply message */ 654 msg_size = OSD_OPREPLY_FRONT_LEN; 655 msg_size += req->r_base_oid.name_len; 656 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); 657 658 if (req->r_mempool) 659 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, 660 num_reply_data_items); 661 else 662 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, 663 num_reply_data_items, gfp, true); 664 if (!msg) 665 return -ENOMEM; 666 667 req->r_reply = msg; 668 669 return 0; 670 } 671 672 static bool osd_req_opcode_valid(u16 opcode) 673 { 674 switch (opcode) { 675 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true; 676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE) 677 #undef GENERATE_CASE 678 default: 679 return false; 680 } 681 } 682 683 static void get_num_data_items(struct ceph_osd_request *req, 684 int *num_request_data_items, 685 int *num_reply_data_items) 686 { 687 struct ceph_osd_req_op *op; 688 689 *num_request_data_items = 0; 690 *num_reply_data_items = 0; 691 692 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 693 switch (op->op) { 694 /* request */ 695 case CEPH_OSD_OP_WRITE: 696 case CEPH_OSD_OP_WRITEFULL: 697 case CEPH_OSD_OP_SETXATTR: 698 case CEPH_OSD_OP_CMPXATTR: 699 case CEPH_OSD_OP_NOTIFY_ACK: 700 case CEPH_OSD_OP_COPY_FROM2: 701 *num_request_data_items += 1; 702 break; 703 704 /* reply */ 705 case CEPH_OSD_OP_STAT: 706 case CEPH_OSD_OP_READ: 707 case CEPH_OSD_OP_LIST_WATCHERS: 708 *num_reply_data_items += 1; 709 break; 710 711 /* both */ 712 case CEPH_OSD_OP_NOTIFY: 713 *num_request_data_items += 1; 714 *num_reply_data_items += 1; 715 break; 716 case CEPH_OSD_OP_CALL: 717 *num_request_data_items += 2; 718 *num_reply_data_items += 1; 719 break; 720 721 default: 722 WARN_ON(!osd_req_opcode_valid(op->op)); 723 break; 724 } 725 } 726 } 727 728 /* 729 * oid, oloc and OSD op opcode(s) must be filled in before this function 730 * is called. 731 */ 732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) 733 { 734 int num_request_data_items, num_reply_data_items; 735 736 get_num_data_items(req, &num_request_data_items, &num_reply_data_items); 737 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, 738 num_reply_data_items); 739 } 740 EXPORT_SYMBOL(ceph_osdc_alloc_messages); 741 742 /* 743 * This is an osd op init function for opcodes that have no data or 744 * other information associated with them. It also serves as a 745 * common init routine for all the other init functions, below. 746 */ 747 static struct ceph_osd_req_op * 748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 749 u16 opcode, u32 flags) 750 { 751 struct ceph_osd_req_op *op; 752 753 BUG_ON(which >= osd_req->r_num_ops); 754 BUG_ON(!osd_req_opcode_valid(opcode)); 755 756 op = &osd_req->r_ops[which]; 757 memset(op, 0, sizeof (*op)); 758 op->op = opcode; 759 op->flags = flags; 760 761 return op; 762 } 763 764 void osd_req_op_init(struct ceph_osd_request *osd_req, 765 unsigned int which, u16 opcode, u32 flags) 766 { 767 (void)_osd_req_op_init(osd_req, which, opcode, flags); 768 } 769 EXPORT_SYMBOL(osd_req_op_init); 770 771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 772 unsigned int which, u16 opcode, 773 u64 offset, u64 length, 774 u64 truncate_size, u32 truncate_seq) 775 { 776 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 777 opcode, 0); 778 size_t payload_len = 0; 779 780 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 781 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && 782 opcode != CEPH_OSD_OP_TRUNCATE); 783 784 op->extent.offset = offset; 785 op->extent.length = length; 786 op->extent.truncate_size = truncate_size; 787 op->extent.truncate_seq = truncate_seq; 788 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) 789 payload_len += length; 790 791 op->indata_len = payload_len; 792 } 793 EXPORT_SYMBOL(osd_req_op_extent_init); 794 795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 796 unsigned int which, u64 length) 797 { 798 struct ceph_osd_req_op *op; 799 u64 previous; 800 801 BUG_ON(which >= osd_req->r_num_ops); 802 op = &osd_req->r_ops[which]; 803 previous = op->extent.length; 804 805 if (length == previous) 806 return; /* Nothing to do */ 807 BUG_ON(length > previous); 808 809 op->extent.length = length; 810 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 811 op->indata_len -= previous - length; 812 } 813 EXPORT_SYMBOL(osd_req_op_extent_update); 814 815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, 816 unsigned int which, u64 offset_inc) 817 { 818 struct ceph_osd_req_op *op, *prev_op; 819 820 BUG_ON(which + 1 >= osd_req->r_num_ops); 821 822 prev_op = &osd_req->r_ops[which]; 823 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); 824 /* dup previous one */ 825 op->indata_len = prev_op->indata_len; 826 op->outdata_len = prev_op->outdata_len; 827 op->extent = prev_op->extent; 828 /* adjust offset */ 829 op->extent.offset += offset_inc; 830 op->extent.length -= offset_inc; 831 832 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL) 833 op->indata_len -= offset_inc; 834 } 835 EXPORT_SYMBOL(osd_req_op_extent_dup_last); 836 837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 838 const char *class, const char *method) 839 { 840 struct ceph_osd_req_op *op; 841 struct ceph_pagelist *pagelist; 842 size_t payload_len = 0; 843 size_t size; 844 int ret; 845 846 op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); 847 848 pagelist = ceph_pagelist_alloc(GFP_NOFS); 849 if (!pagelist) 850 return -ENOMEM; 851 852 op->cls.class_name = class; 853 size = strlen(class); 854 BUG_ON(size > (size_t) U8_MAX); 855 op->cls.class_len = size; 856 ret = ceph_pagelist_append(pagelist, class, size); 857 if (ret) 858 goto err_pagelist_free; 859 payload_len += size; 860 861 op->cls.method_name = method; 862 size = strlen(method); 863 BUG_ON(size > (size_t) U8_MAX); 864 op->cls.method_len = size; 865 ret = ceph_pagelist_append(pagelist, method, size); 866 if (ret) 867 goto err_pagelist_free; 868 payload_len += size; 869 870 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); 871 op->indata_len = payload_len; 872 return 0; 873 874 err_pagelist_free: 875 ceph_pagelist_release(pagelist); 876 return ret; 877 } 878 EXPORT_SYMBOL(osd_req_op_cls_init); 879 880 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, 881 u16 opcode, const char *name, const void *value, 882 size_t size, u8 cmp_op, u8 cmp_mode) 883 { 884 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 885 opcode, 0); 886 struct ceph_pagelist *pagelist; 887 size_t payload_len; 888 int ret; 889 890 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); 891 892 pagelist = ceph_pagelist_alloc(GFP_NOFS); 893 if (!pagelist) 894 return -ENOMEM; 895 896 payload_len = strlen(name); 897 op->xattr.name_len = payload_len; 898 ret = ceph_pagelist_append(pagelist, name, payload_len); 899 if (ret) 900 goto err_pagelist_free; 901 902 op->xattr.value_len = size; 903 ret = ceph_pagelist_append(pagelist, value, size); 904 if (ret) 905 goto err_pagelist_free; 906 payload_len += size; 907 908 op->xattr.cmp_op = cmp_op; 909 op->xattr.cmp_mode = cmp_mode; 910 911 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist); 912 op->indata_len = payload_len; 913 return 0; 914 915 err_pagelist_free: 916 ceph_pagelist_release(pagelist); 917 return ret; 918 } 919 EXPORT_SYMBOL(osd_req_op_xattr_init); 920 921 /* 922 * @watch_opcode: CEPH_OSD_WATCH_OP_* 923 */ 924 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, 925 u64 cookie, u8 watch_opcode) 926 { 927 struct ceph_osd_req_op *op; 928 929 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); 930 op->watch.cookie = cookie; 931 op->watch.op = watch_opcode; 932 op->watch.gen = 0; 933 } 934 935 /* 936 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_* 937 */ 938 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, 939 unsigned int which, 940 u64 expected_object_size, 941 u64 expected_write_size, 942 u32 flags) 943 { 944 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, 945 CEPH_OSD_OP_SETALLOCHINT, 946 0); 947 948 op->alloc_hint.expected_object_size = expected_object_size; 949 op->alloc_hint.expected_write_size = expected_write_size; 950 op->alloc_hint.flags = flags; 951 952 /* 953 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed 954 * not worth a feature bit. Set FAILOK per-op flag to make 955 * sure older osds don't trip over an unsupported opcode. 956 */ 957 op->flags |= CEPH_OSD_OP_FLAG_FAILOK; 958 } 959 EXPORT_SYMBOL(osd_req_op_alloc_hint_init); 960 961 static void ceph_osdc_msg_data_add(struct ceph_msg *msg, 962 struct ceph_osd_data *osd_data) 963 { 964 u64 length = ceph_osd_data_length(osd_data); 965 966 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 967 BUG_ON(length > (u64) SIZE_MAX); 968 if (length) 969 ceph_msg_data_add_pages(msg, osd_data->pages, 970 length, osd_data->alignment, false); 971 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 972 BUG_ON(!length); 973 ceph_msg_data_add_pagelist(msg, osd_data->pagelist); 974 #ifdef CONFIG_BLOCK 975 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 976 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length); 977 #endif 978 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) { 979 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos); 980 } else { 981 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 982 } 983 } 984 985 static u32 osd_req_encode_op(struct ceph_osd_op *dst, 986 const struct ceph_osd_req_op *src) 987 { 988 switch (src->op) { 989 case CEPH_OSD_OP_STAT: 990 break; 991 case CEPH_OSD_OP_READ: 992 case CEPH_OSD_OP_WRITE: 993 case CEPH_OSD_OP_WRITEFULL: 994 case CEPH_OSD_OP_ZERO: 995 case CEPH_OSD_OP_TRUNCATE: 996 dst->extent.offset = cpu_to_le64(src->extent.offset); 997 dst->extent.length = cpu_to_le64(src->extent.length); 998 dst->extent.truncate_size = 999 cpu_to_le64(src->extent.truncate_size); 1000 dst->extent.truncate_seq = 1001 cpu_to_le32(src->extent.truncate_seq); 1002 break; 1003 case CEPH_OSD_OP_CALL: 1004 dst->cls.class_len = src->cls.class_len; 1005 dst->cls.method_len = src->cls.method_len; 1006 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); 1007 break; 1008 case CEPH_OSD_OP_WATCH: 1009 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 1010 dst->watch.ver = cpu_to_le64(0); 1011 dst->watch.op = src->watch.op; 1012 dst->watch.gen = cpu_to_le32(src->watch.gen); 1013 break; 1014 case CEPH_OSD_OP_NOTIFY_ACK: 1015 break; 1016 case CEPH_OSD_OP_NOTIFY: 1017 dst->notify.cookie = cpu_to_le64(src->notify.cookie); 1018 break; 1019 case CEPH_OSD_OP_LIST_WATCHERS: 1020 break; 1021 case CEPH_OSD_OP_SETALLOCHINT: 1022 dst->alloc_hint.expected_object_size = 1023 cpu_to_le64(src->alloc_hint.expected_object_size); 1024 dst->alloc_hint.expected_write_size = 1025 cpu_to_le64(src->alloc_hint.expected_write_size); 1026 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags); 1027 break; 1028 case CEPH_OSD_OP_SETXATTR: 1029 case CEPH_OSD_OP_CMPXATTR: 1030 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len); 1031 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len); 1032 dst->xattr.cmp_op = src->xattr.cmp_op; 1033 dst->xattr.cmp_mode = src->xattr.cmp_mode; 1034 break; 1035 case CEPH_OSD_OP_CREATE: 1036 case CEPH_OSD_OP_DELETE: 1037 break; 1038 case CEPH_OSD_OP_COPY_FROM2: 1039 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); 1040 dst->copy_from.src_version = 1041 cpu_to_le64(src->copy_from.src_version); 1042 dst->copy_from.flags = src->copy_from.flags; 1043 dst->copy_from.src_fadvise_flags = 1044 cpu_to_le32(src->copy_from.src_fadvise_flags); 1045 break; 1046 default: 1047 pr_err("unsupported osd opcode %s\n", 1048 ceph_osd_op_name(src->op)); 1049 WARN_ON(1); 1050 1051 return 0; 1052 } 1053 1054 dst->op = cpu_to_le16(src->op); 1055 dst->flags = cpu_to_le32(src->flags); 1056 dst->payload_len = cpu_to_le32(src->indata_len); 1057 1058 return src->indata_len; 1059 } 1060 1061 /* 1062 * build new request AND message, calculate layout, and adjust file 1063 * extent as needed. 1064 * 1065 * if the file was recently truncated, we include information about its 1066 * old and new size so that the object can be updated appropriately. (we 1067 * avoid synchronously deleting truncated objects because it's slow.) 1068 */ 1069 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 1070 struct ceph_file_layout *layout, 1071 struct ceph_vino vino, 1072 u64 off, u64 *plen, 1073 unsigned int which, int num_ops, 1074 int opcode, int flags, 1075 struct ceph_snap_context *snapc, 1076 u32 truncate_seq, 1077 u64 truncate_size, 1078 bool use_mempool) 1079 { 1080 struct ceph_osd_request *req; 1081 u64 objnum = 0; 1082 u64 objoff = 0; 1083 u64 objlen = 0; 1084 int r; 1085 1086 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && 1087 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && 1088 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); 1089 1090 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 1091 GFP_NOFS); 1092 if (!req) { 1093 r = -ENOMEM; 1094 goto fail; 1095 } 1096 1097 /* calculate max write size */ 1098 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 1099 if (r) 1100 goto fail; 1101 1102 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { 1103 osd_req_op_init(req, which, opcode, 0); 1104 } else { 1105 u32 object_size = layout->object_size; 1106 u32 object_base = off - objoff; 1107 if (!(truncate_seq == 1 && truncate_size == -1ULL)) { 1108 if (truncate_size <= object_base) { 1109 truncate_size = 0; 1110 } else { 1111 truncate_size -= object_base; 1112 if (truncate_size > object_size) 1113 truncate_size = object_size; 1114 } 1115 } 1116 osd_req_op_extent_init(req, which, opcode, objoff, objlen, 1117 truncate_size, truncate_seq); 1118 } 1119 1120 req->r_flags = flags; 1121 req->r_base_oloc.pool = layout->pool_id; 1122 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); 1123 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); 1124 1125 req->r_snapid = vino.snap; 1126 if (flags & CEPH_OSD_FLAG_WRITE) 1127 req->r_data_offset = off; 1128 1129 if (num_ops > 1) 1130 /* 1131 * This is a special case for ceph_writepages_start(), but it 1132 * also covers ceph_uninline_data(). If more multi-op request 1133 * use cases emerge, we will need a separate helper. 1134 */ 1135 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); 1136 else 1137 r = ceph_osdc_alloc_messages(req, GFP_NOFS); 1138 if (r) 1139 goto fail; 1140 1141 return req; 1142 1143 fail: 1144 ceph_osdc_put_request(req); 1145 return ERR_PTR(r); 1146 } 1147 EXPORT_SYMBOL(ceph_osdc_new_request); 1148 1149 /* 1150 * We keep osd requests in an rbtree, sorted by ->r_tid. 1151 */ 1152 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node) 1153 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node) 1154 1155 /* 1156 * Call @fn on each OSD request as long as @fn returns 0. 1157 */ 1158 static void for_each_request(struct ceph_osd_client *osdc, 1159 int (*fn)(struct ceph_osd_request *req, void *arg), 1160 void *arg) 1161 { 1162 struct rb_node *n, *p; 1163 1164 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 1165 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 1166 1167 for (p = rb_first(&osd->o_requests); p; ) { 1168 struct ceph_osd_request *req = 1169 rb_entry(p, struct ceph_osd_request, r_node); 1170 1171 p = rb_next(p); 1172 if (fn(req, arg)) 1173 return; 1174 } 1175 } 1176 1177 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 1178 struct ceph_osd_request *req = 1179 rb_entry(p, struct ceph_osd_request, r_node); 1180 1181 p = rb_next(p); 1182 if (fn(req, arg)) 1183 return; 1184 } 1185 } 1186 1187 static bool osd_homeless(struct ceph_osd *osd) 1188 { 1189 return osd->o_osd == CEPH_HOMELESS_OSD; 1190 } 1191 1192 static bool osd_registered(struct ceph_osd *osd) 1193 { 1194 verify_osdc_locked(osd->o_osdc); 1195 1196 return !RB_EMPTY_NODE(&osd->o_node); 1197 } 1198 1199 /* 1200 * Assumes @osd is zero-initialized. 1201 */ 1202 static void osd_init(struct ceph_osd *osd) 1203 { 1204 refcount_set(&osd->o_ref, 1); 1205 RB_CLEAR_NODE(&osd->o_node); 1206 osd->o_requests = RB_ROOT; 1207 osd->o_linger_requests = RB_ROOT; 1208 osd->o_backoff_mappings = RB_ROOT; 1209 osd->o_backoffs_by_id = RB_ROOT; 1210 INIT_LIST_HEAD(&osd->o_osd_lru); 1211 INIT_LIST_HEAD(&osd->o_keepalive_item); 1212 osd->o_incarnation = 1; 1213 mutex_init(&osd->lock); 1214 } 1215 1216 static void osd_cleanup(struct ceph_osd *osd) 1217 { 1218 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); 1219 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 1220 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 1221 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); 1222 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); 1223 WARN_ON(!list_empty(&osd->o_osd_lru)); 1224 WARN_ON(!list_empty(&osd->o_keepalive_item)); 1225 1226 if (osd->o_auth.authorizer) { 1227 WARN_ON(osd_homeless(osd)); 1228 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); 1229 } 1230 } 1231 1232 /* 1233 * Track open sessions with osds. 1234 */ 1235 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 1236 { 1237 struct ceph_osd *osd; 1238 1239 WARN_ON(onum == CEPH_HOMELESS_OSD); 1240 1241 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); 1242 osd_init(osd); 1243 osd->o_osdc = osdc; 1244 osd->o_osd = onum; 1245 1246 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 1247 1248 return osd; 1249 } 1250 1251 static struct ceph_osd *get_osd(struct ceph_osd *osd) 1252 { 1253 if (refcount_inc_not_zero(&osd->o_ref)) { 1254 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, 1255 refcount_read(&osd->o_ref)); 1256 return osd; 1257 } else { 1258 dout("get_osd %p FAIL\n", osd); 1259 return NULL; 1260 } 1261 } 1262 1263 static void put_osd(struct ceph_osd *osd) 1264 { 1265 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), 1266 refcount_read(&osd->o_ref) - 1); 1267 if (refcount_dec_and_test(&osd->o_ref)) { 1268 osd_cleanup(osd); 1269 kfree(osd); 1270 } 1271 } 1272 1273 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) 1274 1275 static void __move_osd_to_lru(struct ceph_osd *osd) 1276 { 1277 struct ceph_osd_client *osdc = osd->o_osdc; 1278 1279 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1280 BUG_ON(!list_empty(&osd->o_osd_lru)); 1281 1282 spin_lock(&osdc->osd_lru_lock); 1283 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 1284 spin_unlock(&osdc->osd_lru_lock); 1285 1286 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; 1287 } 1288 1289 static void maybe_move_osd_to_lru(struct ceph_osd *osd) 1290 { 1291 if (RB_EMPTY_ROOT(&osd->o_requests) && 1292 RB_EMPTY_ROOT(&osd->o_linger_requests)) 1293 __move_osd_to_lru(osd); 1294 } 1295 1296 static void __remove_osd_from_lru(struct ceph_osd *osd) 1297 { 1298 struct ceph_osd_client *osdc = osd->o_osdc; 1299 1300 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1301 1302 spin_lock(&osdc->osd_lru_lock); 1303 if (!list_empty(&osd->o_osd_lru)) 1304 list_del_init(&osd->o_osd_lru); 1305 spin_unlock(&osdc->osd_lru_lock); 1306 } 1307 1308 /* 1309 * Close the connection and assign any leftover requests to the 1310 * homeless session. 1311 */ 1312 static void close_osd(struct ceph_osd *osd) 1313 { 1314 struct ceph_osd_client *osdc = osd->o_osdc; 1315 struct rb_node *n; 1316 1317 verify_osdc_wrlocked(osdc); 1318 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1319 1320 ceph_con_close(&osd->o_con); 1321 1322 for (n = rb_first(&osd->o_requests); n; ) { 1323 struct ceph_osd_request *req = 1324 rb_entry(n, struct ceph_osd_request, r_node); 1325 1326 n = rb_next(n); /* unlink_request() */ 1327 1328 dout(" reassigning req %p tid %llu\n", req, req->r_tid); 1329 unlink_request(osd, req); 1330 link_request(&osdc->homeless_osd, req); 1331 } 1332 for (n = rb_first(&osd->o_linger_requests); n; ) { 1333 struct ceph_osd_linger_request *lreq = 1334 rb_entry(n, struct ceph_osd_linger_request, node); 1335 1336 n = rb_next(n); /* unlink_linger() */ 1337 1338 dout(" reassigning lreq %p linger_id %llu\n", lreq, 1339 lreq->linger_id); 1340 unlink_linger(osd, lreq); 1341 link_linger(&osdc->homeless_osd, lreq); 1342 } 1343 clear_backoffs(osd); 1344 1345 __remove_osd_from_lru(osd); 1346 erase_osd(&osdc->osds, osd); 1347 put_osd(osd); 1348 } 1349 1350 /* 1351 * reset osd connect 1352 */ 1353 static int reopen_osd(struct ceph_osd *osd) 1354 { 1355 struct ceph_entity_addr *peer_addr; 1356 1357 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 1358 1359 if (RB_EMPTY_ROOT(&osd->o_requests) && 1360 RB_EMPTY_ROOT(&osd->o_linger_requests)) { 1361 close_osd(osd); 1362 return -ENODEV; 1363 } 1364 1365 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; 1366 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 1367 !ceph_con_opened(&osd->o_con)) { 1368 struct rb_node *n; 1369 1370 dout("osd addr hasn't changed and connection never opened, " 1371 "letting msgr retry\n"); 1372 /* touch each r_stamp for handle_timeout()'s benfit */ 1373 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 1374 struct ceph_osd_request *req = 1375 rb_entry(n, struct ceph_osd_request, r_node); 1376 req->r_stamp = jiffies; 1377 } 1378 1379 return -EAGAIN; 1380 } 1381 1382 ceph_con_close(&osd->o_con); 1383 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 1384 osd->o_incarnation++; 1385 1386 return 0; 1387 } 1388 1389 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o, 1390 bool wrlocked) 1391 { 1392 struct ceph_osd *osd; 1393 1394 if (wrlocked) 1395 verify_osdc_wrlocked(osdc); 1396 else 1397 verify_osdc_locked(osdc); 1398 1399 if (o != CEPH_HOMELESS_OSD) 1400 osd = lookup_osd(&osdc->osds, o); 1401 else 1402 osd = &osdc->homeless_osd; 1403 if (!osd) { 1404 if (!wrlocked) 1405 return ERR_PTR(-EAGAIN); 1406 1407 osd = create_osd(osdc, o); 1408 insert_osd(&osdc->osds, osd); 1409 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, 1410 &osdc->osdmap->osd_addr[osd->o_osd]); 1411 } 1412 1413 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); 1414 return osd; 1415 } 1416 1417 /* 1418 * Create request <-> OSD session relation. 1419 * 1420 * @req has to be assigned a tid, @osd may be homeless. 1421 */ 1422 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1423 { 1424 verify_osd_locked(osd); 1425 WARN_ON(!req->r_tid || req->r_osd); 1426 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1427 req, req->r_tid); 1428 1429 if (!osd_homeless(osd)) 1430 __remove_osd_from_lru(osd); 1431 else 1432 atomic_inc(&osd->o_osdc->num_homeless); 1433 1434 get_osd(osd); 1435 insert_request(&osd->o_requests, req); 1436 req->r_osd = osd; 1437 } 1438 1439 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) 1440 { 1441 verify_osd_locked(osd); 1442 WARN_ON(req->r_osd != osd); 1443 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, 1444 req, req->r_tid); 1445 1446 req->r_osd = NULL; 1447 erase_request(&osd->o_requests, req); 1448 put_osd(osd); 1449 1450 if (!osd_homeless(osd)) 1451 maybe_move_osd_to_lru(osd); 1452 else 1453 atomic_dec(&osd->o_osdc->num_homeless); 1454 } 1455 1456 static bool __pool_full(struct ceph_pg_pool_info *pi) 1457 { 1458 return pi->flags & CEPH_POOL_FLAG_FULL; 1459 } 1460 1461 static bool have_pool_full(struct ceph_osd_client *osdc) 1462 { 1463 struct rb_node *n; 1464 1465 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 1466 struct ceph_pg_pool_info *pi = 1467 rb_entry(n, struct ceph_pg_pool_info, node); 1468 1469 if (__pool_full(pi)) 1470 return true; 1471 } 1472 1473 return false; 1474 } 1475 1476 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id) 1477 { 1478 struct ceph_pg_pool_info *pi; 1479 1480 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 1481 if (!pi) 1482 return false; 1483 1484 return __pool_full(pi); 1485 } 1486 1487 /* 1488 * Returns whether a request should be blocked from being sent 1489 * based on the current osdmap and osd_client settings. 1490 */ 1491 static bool target_should_be_paused(struct ceph_osd_client *osdc, 1492 const struct ceph_osd_request_target *t, 1493 struct ceph_pg_pool_info *pi) 1494 { 1495 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 1496 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 1497 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 1498 __pool_full(pi); 1499 1500 WARN_ON(pi->id != t->target_oloc.pool); 1501 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || 1502 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || 1503 (osdc->osdmap->epoch < osdc->epoch_barrier); 1504 } 1505 1506 static int pick_random_replica(const struct ceph_osds *acting) 1507 { 1508 int i = prandom_u32() % acting->size; 1509 1510 dout("%s picked osd%d, primary osd%d\n", __func__, 1511 acting->osds[i], acting->primary); 1512 return i; 1513 } 1514 1515 /* 1516 * Picks the closest replica based on client's location given by 1517 * crush_location option. Prefers the primary if the locality is 1518 * the same. 1519 */ 1520 static int pick_closest_replica(struct ceph_osd_client *osdc, 1521 const struct ceph_osds *acting) 1522 { 1523 struct ceph_options *opt = osdc->client->options; 1524 int best_i, best_locality; 1525 int i = 0, locality; 1526 1527 do { 1528 locality = ceph_get_crush_locality(osdc->osdmap, 1529 acting->osds[i], 1530 &opt->crush_locs); 1531 if (i == 0 || 1532 (locality >= 0 && best_locality < 0) || 1533 (locality >= 0 && best_locality >= 0 && 1534 locality < best_locality)) { 1535 best_i = i; 1536 best_locality = locality; 1537 } 1538 } while (++i < acting->size); 1539 1540 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__, 1541 acting->osds[best_i], best_locality, acting->primary); 1542 return best_i; 1543 } 1544 1545 enum calc_target_result { 1546 CALC_TARGET_NO_ACTION = 0, 1547 CALC_TARGET_NEED_RESEND, 1548 CALC_TARGET_POOL_DNE, 1549 }; 1550 1551 static enum calc_target_result calc_target(struct ceph_osd_client *osdc, 1552 struct ceph_osd_request_target *t, 1553 bool any_change) 1554 { 1555 struct ceph_pg_pool_info *pi; 1556 struct ceph_pg pgid, last_pgid; 1557 struct ceph_osds up, acting; 1558 bool is_read = t->flags & CEPH_OSD_FLAG_READ; 1559 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; 1560 bool force_resend = false; 1561 bool unpaused = false; 1562 bool legacy_change = false; 1563 bool split = false; 1564 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1565 bool recovery_deletes = ceph_osdmap_flag(osdc, 1566 CEPH_OSDMAP_RECOVERY_DELETES); 1567 enum calc_target_result ct_res; 1568 1569 t->epoch = osdc->osdmap->epoch; 1570 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool); 1571 if (!pi) { 1572 t->osd = CEPH_HOMELESS_OSD; 1573 ct_res = CALC_TARGET_POOL_DNE; 1574 goto out; 1575 } 1576 1577 if (osdc->osdmap->epoch == pi->last_force_request_resend) { 1578 if (t->last_force_resend < pi->last_force_request_resend) { 1579 t->last_force_resend = pi->last_force_request_resend; 1580 force_resend = true; 1581 } else if (t->last_force_resend == 0) { 1582 force_resend = true; 1583 } 1584 } 1585 1586 /* apply tiering */ 1587 ceph_oid_copy(&t->target_oid, &t->base_oid); 1588 ceph_oloc_copy(&t->target_oloc, &t->base_oloc); 1589 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { 1590 if (is_read && pi->read_tier >= 0) 1591 t->target_oloc.pool = pi->read_tier; 1592 if (is_write && pi->write_tier >= 0) 1593 t->target_oloc.pool = pi->write_tier; 1594 1595 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool); 1596 if (!pi) { 1597 t->osd = CEPH_HOMELESS_OSD; 1598 ct_res = CALC_TARGET_POOL_DNE; 1599 goto out; 1600 } 1601 } 1602 1603 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid); 1604 last_pgid.pool = pgid.pool; 1605 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask); 1606 1607 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting); 1608 if (any_change && 1609 ceph_is_new_interval(&t->acting, 1610 &acting, 1611 &t->up, 1612 &up, 1613 t->size, 1614 pi->size, 1615 t->min_size, 1616 pi->min_size, 1617 t->pg_num, 1618 pi->pg_num, 1619 t->sort_bitwise, 1620 sort_bitwise, 1621 t->recovery_deletes, 1622 recovery_deletes, 1623 &last_pgid)) 1624 force_resend = true; 1625 1626 if (t->paused && !target_should_be_paused(osdc, t, pi)) { 1627 t->paused = false; 1628 unpaused = true; 1629 } 1630 legacy_change = ceph_pg_compare(&t->pgid, &pgid) || 1631 ceph_osds_changed(&t->acting, &acting, 1632 t->used_replica || any_change); 1633 if (t->pg_num) 1634 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num); 1635 1636 if (legacy_change || force_resend || split) { 1637 t->pgid = pgid; /* struct */ 1638 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid); 1639 ceph_osds_copy(&t->acting, &acting); 1640 ceph_osds_copy(&t->up, &up); 1641 t->size = pi->size; 1642 t->min_size = pi->min_size; 1643 t->pg_num = pi->pg_num; 1644 t->pg_num_mask = pi->pg_num_mask; 1645 t->sort_bitwise = sort_bitwise; 1646 t->recovery_deletes = recovery_deletes; 1647 1648 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS | 1649 CEPH_OSD_FLAG_LOCALIZE_READS)) && 1650 !is_write && pi->type == CEPH_POOL_TYPE_REP && 1651 acting.size > 1) { 1652 int pos; 1653 1654 WARN_ON(!is_read || acting.osds[0] != acting.primary); 1655 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) { 1656 pos = pick_random_replica(&acting); 1657 } else { 1658 pos = pick_closest_replica(osdc, &acting); 1659 } 1660 t->osd = acting.osds[pos]; 1661 t->used_replica = pos > 0; 1662 } else { 1663 t->osd = acting.primary; 1664 t->used_replica = false; 1665 } 1666 } 1667 1668 if (unpaused || legacy_change || force_resend || split) 1669 ct_res = CALC_TARGET_NEED_RESEND; 1670 else 1671 ct_res = CALC_TARGET_NO_ACTION; 1672 1673 out: 1674 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, 1675 legacy_change, force_resend, split, ct_res, t->osd); 1676 return ct_res; 1677 } 1678 1679 static struct ceph_spg_mapping *alloc_spg_mapping(void) 1680 { 1681 struct ceph_spg_mapping *spg; 1682 1683 spg = kmalloc(sizeof(*spg), GFP_NOIO); 1684 if (!spg) 1685 return NULL; 1686 1687 RB_CLEAR_NODE(&spg->node); 1688 spg->backoffs = RB_ROOT; 1689 return spg; 1690 } 1691 1692 static void free_spg_mapping(struct ceph_spg_mapping *spg) 1693 { 1694 WARN_ON(!RB_EMPTY_NODE(&spg->node)); 1695 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); 1696 1697 kfree(spg); 1698 } 1699 1700 /* 1701 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to 1702 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is 1703 * defined only within a specific spgid; it does not pass anything to 1704 * children on split, or to another primary. 1705 */ 1706 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare, 1707 RB_BYPTR, const struct ceph_spg *, node) 1708 1709 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid) 1710 { 1711 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits; 1712 } 1713 1714 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid, 1715 void **pkey, size_t *pkey_len) 1716 { 1717 if (hoid->key_len) { 1718 *pkey = hoid->key; 1719 *pkey_len = hoid->key_len; 1720 } else { 1721 *pkey = hoid->oid; 1722 *pkey_len = hoid->oid_len; 1723 } 1724 } 1725 1726 static int compare_names(const void *name1, size_t name1_len, 1727 const void *name2, size_t name2_len) 1728 { 1729 int ret; 1730 1731 ret = memcmp(name1, name2, min(name1_len, name2_len)); 1732 if (!ret) { 1733 if (name1_len < name2_len) 1734 ret = -1; 1735 else if (name1_len > name2_len) 1736 ret = 1; 1737 } 1738 return ret; 1739 } 1740 1741 static int hoid_compare(const struct ceph_hobject_id *lhs, 1742 const struct ceph_hobject_id *rhs) 1743 { 1744 void *effective_key1, *effective_key2; 1745 size_t effective_key1_len, effective_key2_len; 1746 int ret; 1747 1748 if (lhs->is_max < rhs->is_max) 1749 return -1; 1750 if (lhs->is_max > rhs->is_max) 1751 return 1; 1752 1753 if (lhs->pool < rhs->pool) 1754 return -1; 1755 if (lhs->pool > rhs->pool) 1756 return 1; 1757 1758 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs)) 1759 return -1; 1760 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs)) 1761 return 1; 1762 1763 ret = compare_names(lhs->nspace, lhs->nspace_len, 1764 rhs->nspace, rhs->nspace_len); 1765 if (ret) 1766 return ret; 1767 1768 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len); 1769 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len); 1770 ret = compare_names(effective_key1, effective_key1_len, 1771 effective_key2, effective_key2_len); 1772 if (ret) 1773 return ret; 1774 1775 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len); 1776 if (ret) 1777 return ret; 1778 1779 if (lhs->snapid < rhs->snapid) 1780 return -1; 1781 if (lhs->snapid > rhs->snapid) 1782 return 1; 1783 1784 return 0; 1785 } 1786 1787 /* 1788 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX 1789 * compat stuff here. 1790 * 1791 * Assumes @hoid is zero-initialized. 1792 */ 1793 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid) 1794 { 1795 u8 struct_v; 1796 u32 struct_len; 1797 int ret; 1798 1799 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v, 1800 &struct_len); 1801 if (ret) 1802 return ret; 1803 1804 if (struct_v < 4) { 1805 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v); 1806 goto e_inval; 1807 } 1808 1809 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len, 1810 GFP_NOIO); 1811 if (IS_ERR(hoid->key)) { 1812 ret = PTR_ERR(hoid->key); 1813 hoid->key = NULL; 1814 return ret; 1815 } 1816 1817 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len, 1818 GFP_NOIO); 1819 if (IS_ERR(hoid->oid)) { 1820 ret = PTR_ERR(hoid->oid); 1821 hoid->oid = NULL; 1822 return ret; 1823 } 1824 1825 ceph_decode_64_safe(p, end, hoid->snapid, e_inval); 1826 ceph_decode_32_safe(p, end, hoid->hash, e_inval); 1827 ceph_decode_8_safe(p, end, hoid->is_max, e_inval); 1828 1829 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len, 1830 GFP_NOIO); 1831 if (IS_ERR(hoid->nspace)) { 1832 ret = PTR_ERR(hoid->nspace); 1833 hoid->nspace = NULL; 1834 return ret; 1835 } 1836 1837 ceph_decode_64_safe(p, end, hoid->pool, e_inval); 1838 1839 ceph_hoid_build_hash_cache(hoid); 1840 return 0; 1841 1842 e_inval: 1843 return -EINVAL; 1844 } 1845 1846 static int hoid_encoding_size(const struct ceph_hobject_id *hoid) 1847 { 1848 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */ 1849 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len; 1850 } 1851 1852 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid) 1853 { 1854 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid)); 1855 ceph_encode_string(p, end, hoid->key, hoid->key_len); 1856 ceph_encode_string(p, end, hoid->oid, hoid->oid_len); 1857 ceph_encode_64(p, hoid->snapid); 1858 ceph_encode_32(p, hoid->hash); 1859 ceph_encode_8(p, hoid->is_max); 1860 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len); 1861 ceph_encode_64(p, hoid->pool); 1862 } 1863 1864 static void free_hoid(struct ceph_hobject_id *hoid) 1865 { 1866 if (hoid) { 1867 kfree(hoid->key); 1868 kfree(hoid->oid); 1869 kfree(hoid->nspace); 1870 kfree(hoid); 1871 } 1872 } 1873 1874 static struct ceph_osd_backoff *alloc_backoff(void) 1875 { 1876 struct ceph_osd_backoff *backoff; 1877 1878 backoff = kzalloc(sizeof(*backoff), GFP_NOIO); 1879 if (!backoff) 1880 return NULL; 1881 1882 RB_CLEAR_NODE(&backoff->spg_node); 1883 RB_CLEAR_NODE(&backoff->id_node); 1884 return backoff; 1885 } 1886 1887 static void free_backoff(struct ceph_osd_backoff *backoff) 1888 { 1889 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node)); 1890 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node)); 1891 1892 free_hoid(backoff->begin); 1893 free_hoid(backoff->end); 1894 kfree(backoff); 1895 } 1896 1897 /* 1898 * Within a specific spgid, backoffs are managed by ->begin hoid. 1899 */ 1900 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare, 1901 RB_BYVAL, spg_node); 1902 1903 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root, 1904 const struct ceph_hobject_id *hoid) 1905 { 1906 struct rb_node *n = root->rb_node; 1907 1908 while (n) { 1909 struct ceph_osd_backoff *cur = 1910 rb_entry(n, struct ceph_osd_backoff, spg_node); 1911 int cmp; 1912 1913 cmp = hoid_compare(hoid, cur->begin); 1914 if (cmp < 0) { 1915 n = n->rb_left; 1916 } else if (cmp > 0) { 1917 if (hoid_compare(hoid, cur->end) < 0) 1918 return cur; 1919 1920 n = n->rb_right; 1921 } else { 1922 return cur; 1923 } 1924 } 1925 1926 return NULL; 1927 } 1928 1929 /* 1930 * Each backoff has a unique id within its OSD session. 1931 */ 1932 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node) 1933 1934 static void clear_backoffs(struct ceph_osd *osd) 1935 { 1936 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { 1937 struct ceph_spg_mapping *spg = 1938 rb_entry(rb_first(&osd->o_backoff_mappings), 1939 struct ceph_spg_mapping, node); 1940 1941 while (!RB_EMPTY_ROOT(&spg->backoffs)) { 1942 struct ceph_osd_backoff *backoff = 1943 rb_entry(rb_first(&spg->backoffs), 1944 struct ceph_osd_backoff, spg_node); 1945 1946 erase_backoff(&spg->backoffs, backoff); 1947 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 1948 free_backoff(backoff); 1949 } 1950 erase_spg_mapping(&osd->o_backoff_mappings, spg); 1951 free_spg_mapping(spg); 1952 } 1953 } 1954 1955 /* 1956 * Set up a temporary, non-owning view into @t. 1957 */ 1958 static void hoid_fill_from_target(struct ceph_hobject_id *hoid, 1959 const struct ceph_osd_request_target *t) 1960 { 1961 hoid->key = NULL; 1962 hoid->key_len = 0; 1963 hoid->oid = t->target_oid.name; 1964 hoid->oid_len = t->target_oid.name_len; 1965 hoid->snapid = CEPH_NOSNAP; 1966 hoid->hash = t->pgid.seed; 1967 hoid->is_max = false; 1968 if (t->target_oloc.pool_ns) { 1969 hoid->nspace = t->target_oloc.pool_ns->str; 1970 hoid->nspace_len = t->target_oloc.pool_ns->len; 1971 } else { 1972 hoid->nspace = NULL; 1973 hoid->nspace_len = 0; 1974 } 1975 hoid->pool = t->target_oloc.pool; 1976 ceph_hoid_build_hash_cache(hoid); 1977 } 1978 1979 static bool should_plug_request(struct ceph_osd_request *req) 1980 { 1981 struct ceph_osd *osd = req->r_osd; 1982 struct ceph_spg_mapping *spg; 1983 struct ceph_osd_backoff *backoff; 1984 struct ceph_hobject_id hoid; 1985 1986 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); 1987 if (!spg) 1988 return false; 1989 1990 hoid_fill_from_target(&hoid, &req->r_t); 1991 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); 1992 if (!backoff) 1993 return false; 1994 1995 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n", 1996 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, 1997 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id); 1998 return true; 1999 } 2000 2001 /* 2002 * Keep get_num_data_items() in sync with this function. 2003 */ 2004 static void setup_request_data(struct ceph_osd_request *req) 2005 { 2006 struct ceph_msg *request_msg = req->r_request; 2007 struct ceph_msg *reply_msg = req->r_reply; 2008 struct ceph_osd_req_op *op; 2009 2010 if (req->r_request->num_data_items || req->r_reply->num_data_items) 2011 return; 2012 2013 WARN_ON(request_msg->data_length || reply_msg->data_length); 2014 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { 2015 switch (op->op) { 2016 /* request */ 2017 case CEPH_OSD_OP_WRITE: 2018 case CEPH_OSD_OP_WRITEFULL: 2019 WARN_ON(op->indata_len != op->extent.length); 2020 ceph_osdc_msg_data_add(request_msg, 2021 &op->extent.osd_data); 2022 break; 2023 case CEPH_OSD_OP_SETXATTR: 2024 case CEPH_OSD_OP_CMPXATTR: 2025 WARN_ON(op->indata_len != op->xattr.name_len + 2026 op->xattr.value_len); 2027 ceph_osdc_msg_data_add(request_msg, 2028 &op->xattr.osd_data); 2029 break; 2030 case CEPH_OSD_OP_NOTIFY_ACK: 2031 ceph_osdc_msg_data_add(request_msg, 2032 &op->notify_ack.request_data); 2033 break; 2034 case CEPH_OSD_OP_COPY_FROM2: 2035 ceph_osdc_msg_data_add(request_msg, 2036 &op->copy_from.osd_data); 2037 break; 2038 2039 /* reply */ 2040 case CEPH_OSD_OP_STAT: 2041 ceph_osdc_msg_data_add(reply_msg, 2042 &op->raw_data_in); 2043 break; 2044 case CEPH_OSD_OP_READ: 2045 ceph_osdc_msg_data_add(reply_msg, 2046 &op->extent.osd_data); 2047 break; 2048 case CEPH_OSD_OP_LIST_WATCHERS: 2049 ceph_osdc_msg_data_add(reply_msg, 2050 &op->list_watchers.response_data); 2051 break; 2052 2053 /* both */ 2054 case CEPH_OSD_OP_CALL: 2055 WARN_ON(op->indata_len != op->cls.class_len + 2056 op->cls.method_len + 2057 op->cls.indata_len); 2058 ceph_osdc_msg_data_add(request_msg, 2059 &op->cls.request_info); 2060 /* optional, can be NONE */ 2061 ceph_osdc_msg_data_add(request_msg, 2062 &op->cls.request_data); 2063 /* optional, can be NONE */ 2064 ceph_osdc_msg_data_add(reply_msg, 2065 &op->cls.response_data); 2066 break; 2067 case CEPH_OSD_OP_NOTIFY: 2068 ceph_osdc_msg_data_add(request_msg, 2069 &op->notify.request_data); 2070 ceph_osdc_msg_data_add(reply_msg, 2071 &op->notify.response_data); 2072 break; 2073 } 2074 } 2075 } 2076 2077 static void encode_pgid(void **p, const struct ceph_pg *pgid) 2078 { 2079 ceph_encode_8(p, 1); 2080 ceph_encode_64(p, pgid->pool); 2081 ceph_encode_32(p, pgid->seed); 2082 ceph_encode_32(p, -1); /* preferred */ 2083 } 2084 2085 static void encode_spgid(void **p, const struct ceph_spg *spgid) 2086 { 2087 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1); 2088 encode_pgid(p, &spgid->pgid); 2089 ceph_encode_8(p, spgid->shard); 2090 } 2091 2092 static void encode_oloc(void **p, void *end, 2093 const struct ceph_object_locator *oloc) 2094 { 2095 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc)); 2096 ceph_encode_64(p, oloc->pool); 2097 ceph_encode_32(p, -1); /* preferred */ 2098 ceph_encode_32(p, 0); /* key len */ 2099 if (oloc->pool_ns) 2100 ceph_encode_string(p, end, oloc->pool_ns->str, 2101 oloc->pool_ns->len); 2102 else 2103 ceph_encode_32(p, 0); 2104 } 2105 2106 static void encode_request_partial(struct ceph_osd_request *req, 2107 struct ceph_msg *msg) 2108 { 2109 void *p = msg->front.iov_base; 2110 void *const end = p + msg->front_alloc_len; 2111 u32 data_len = 0; 2112 int i; 2113 2114 if (req->r_flags & CEPH_OSD_FLAG_WRITE) { 2115 /* snapshots aren't writeable */ 2116 WARN_ON(req->r_snapid != CEPH_NOSNAP); 2117 } else { 2118 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec || 2119 req->r_data_offset || req->r_snapc); 2120 } 2121 2122 setup_request_data(req); 2123 2124 encode_spgid(&p, &req->r_t.spgid); /* actual spg */ 2125 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ 2126 ceph_encode_32(&p, req->r_osdc->osdmap->epoch); 2127 ceph_encode_32(&p, req->r_flags); 2128 2129 /* reqid */ 2130 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid)); 2131 memset(p, 0, sizeof(struct ceph_osd_reqid)); 2132 p += sizeof(struct ceph_osd_reqid); 2133 2134 /* trace */ 2135 memset(p, 0, sizeof(struct ceph_blkin_trace_info)); 2136 p += sizeof(struct ceph_blkin_trace_info); 2137 2138 ceph_encode_32(&p, 0); /* client_inc, always 0 */ 2139 ceph_encode_timespec64(p, &req->r_mtime); 2140 p += sizeof(struct ceph_timespec); 2141 2142 encode_oloc(&p, end, &req->r_t.target_oloc); 2143 ceph_encode_string(&p, end, req->r_t.target_oid.name, 2144 req->r_t.target_oid.name_len); 2145 2146 /* ops, can imply data */ 2147 ceph_encode_16(&p, req->r_num_ops); 2148 for (i = 0; i < req->r_num_ops; i++) { 2149 data_len += osd_req_encode_op(p, &req->r_ops[i]); 2150 p += sizeof(struct ceph_osd_op); 2151 } 2152 2153 ceph_encode_64(&p, req->r_snapid); /* snapid */ 2154 if (req->r_snapc) { 2155 ceph_encode_64(&p, req->r_snapc->seq); 2156 ceph_encode_32(&p, req->r_snapc->num_snaps); 2157 for (i = 0; i < req->r_snapc->num_snaps; i++) 2158 ceph_encode_64(&p, req->r_snapc->snaps[i]); 2159 } else { 2160 ceph_encode_64(&p, 0); /* snap_seq */ 2161 ceph_encode_32(&p, 0); /* snaps len */ 2162 } 2163 2164 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */ 2165 BUG_ON(p > end - 8); /* space for features */ 2166 2167 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */ 2168 /* front_len is finalized in encode_request_finish() */ 2169 msg->front.iov_len = p - msg->front.iov_base; 2170 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2171 msg->hdr.data_len = cpu_to_le32(data_len); 2172 /* 2173 * The header "data_off" is a hint to the receiver allowing it 2174 * to align received data into its buffers such that there's no 2175 * need to re-copy it before writing it to disk (direct I/O). 2176 */ 2177 msg->hdr.data_off = cpu_to_le16(req->r_data_offset); 2178 2179 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg, 2180 req->r_t.target_oid.name, req->r_t.target_oid.name_len); 2181 } 2182 2183 static void encode_request_finish(struct ceph_msg *msg) 2184 { 2185 void *p = msg->front.iov_base; 2186 void *const partial_end = p + msg->front.iov_len; 2187 void *const end = p + msg->front_alloc_len; 2188 2189 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) { 2190 /* luminous OSD -- encode features and be done */ 2191 p = partial_end; 2192 ceph_encode_64(&p, msg->con->peer_features); 2193 } else { 2194 struct { 2195 char spgid[CEPH_ENCODING_START_BLK_LEN + 2196 CEPH_PGID_ENCODING_LEN + 1]; 2197 __le32 hash; 2198 __le32 epoch; 2199 __le32 flags; 2200 char reqid[CEPH_ENCODING_START_BLK_LEN + 2201 sizeof(struct ceph_osd_reqid)]; 2202 char trace[sizeof(struct ceph_blkin_trace_info)]; 2203 __le32 client_inc; 2204 struct ceph_timespec mtime; 2205 } __packed head; 2206 struct ceph_pg pgid; 2207 void *oloc, *oid, *tail; 2208 int oloc_len, oid_len, tail_len; 2209 int len; 2210 2211 /* 2212 * Pre-luminous OSD -- reencode v8 into v4 using @head 2213 * as a temporary buffer. Encode the raw PG; the rest 2214 * is just a matter of moving oloc, oid and tail blobs 2215 * around. 2216 */ 2217 memcpy(&head, p, sizeof(head)); 2218 p += sizeof(head); 2219 2220 oloc = p; 2221 p += CEPH_ENCODING_START_BLK_LEN; 2222 pgid.pool = ceph_decode_64(&p); 2223 p += 4 + 4; /* preferred, key len */ 2224 len = ceph_decode_32(&p); 2225 p += len; /* nspace */ 2226 oloc_len = p - oloc; 2227 2228 oid = p; 2229 len = ceph_decode_32(&p); 2230 p += len; 2231 oid_len = p - oid; 2232 2233 tail = p; 2234 tail_len = partial_end - p; 2235 2236 p = msg->front.iov_base; 2237 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); 2238 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); 2239 ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); 2240 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); 2241 2242 /* reassert_version */ 2243 memset(p, 0, sizeof(struct ceph_eversion)); 2244 p += sizeof(struct ceph_eversion); 2245 2246 BUG_ON(p >= oloc); 2247 memmove(p, oloc, oloc_len); 2248 p += oloc_len; 2249 2250 pgid.seed = le32_to_cpu(head.hash); 2251 encode_pgid(&p, &pgid); /* raw pg */ 2252 2253 BUG_ON(p >= oid); 2254 memmove(p, oid, oid_len); 2255 p += oid_len; 2256 2257 /* tail -- ops, snapid, snapc, retry_attempt */ 2258 BUG_ON(p >= tail); 2259 memmove(p, tail, tail_len); 2260 p += tail_len; 2261 2262 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */ 2263 } 2264 2265 BUG_ON(p > end); 2266 msg->front.iov_len = p - msg->front.iov_base; 2267 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2268 2269 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg, 2270 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len), 2271 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len), 2272 le16_to_cpu(msg->hdr.version)); 2273 } 2274 2275 /* 2276 * @req has to be assigned a tid and registered. 2277 */ 2278 static void send_request(struct ceph_osd_request *req) 2279 { 2280 struct ceph_osd *osd = req->r_osd; 2281 2282 verify_osd_locked(osd); 2283 WARN_ON(osd->o_osd != req->r_t.osd); 2284 2285 /* backoff? */ 2286 if (should_plug_request(req)) 2287 return; 2288 2289 /* 2290 * We may have a previously queued request message hanging 2291 * around. Cancel it to avoid corrupting the msgr. 2292 */ 2293 if (req->r_sent) 2294 ceph_msg_revoke(req->r_request); 2295 2296 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR; 2297 if (req->r_attempts) 2298 req->r_flags |= CEPH_OSD_FLAG_RETRY; 2299 else 2300 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY); 2301 2302 encode_request_partial(req, req->r_request); 2303 2304 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n", 2305 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, 2306 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed, 2307 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, 2308 req->r_attempts); 2309 2310 req->r_t.paused = false; 2311 req->r_stamp = jiffies; 2312 req->r_attempts++; 2313 2314 req->r_sent = osd->o_incarnation; 2315 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 2316 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); 2317 } 2318 2319 static void maybe_request_map(struct ceph_osd_client *osdc) 2320 { 2321 bool continuous = false; 2322 2323 verify_osdc_locked(osdc); 2324 WARN_ON(!osdc->osdmap->epoch); 2325 2326 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2327 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || 2328 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2329 dout("%s osdc %p continuous\n", __func__, osdc); 2330 continuous = true; 2331 } else { 2332 dout("%s osdc %p onetime\n", __func__, osdc); 2333 } 2334 2335 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 2336 osdc->osdmap->epoch + 1, continuous)) 2337 ceph_monc_renew_subs(&osdc->client->monc); 2338 } 2339 2340 static void complete_request(struct ceph_osd_request *req, int err); 2341 static void send_map_check(struct ceph_osd_request *req); 2342 2343 static void __submit_request(struct ceph_osd_request *req, bool wrlocked) 2344 { 2345 struct ceph_osd_client *osdc = req->r_osdc; 2346 struct ceph_osd *osd; 2347 enum calc_target_result ct_res; 2348 int err = 0; 2349 bool need_send = false; 2350 bool promoted = false; 2351 2352 WARN_ON(req->r_tid); 2353 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); 2354 2355 again: 2356 ct_res = calc_target(osdc, &req->r_t, false); 2357 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked) 2358 goto promote; 2359 2360 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); 2361 if (IS_ERR(osd)) { 2362 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); 2363 goto promote; 2364 } 2365 2366 if (osdc->abort_err) { 2367 dout("req %p abort_err %d\n", req, osdc->abort_err); 2368 err = osdc->abort_err; 2369 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) { 2370 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, 2371 osdc->epoch_barrier); 2372 req->r_t.paused = true; 2373 maybe_request_map(osdc); 2374 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2375 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { 2376 dout("req %p pausewr\n", req); 2377 req->r_t.paused = true; 2378 maybe_request_map(osdc); 2379 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && 2380 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 2381 dout("req %p pauserd\n", req); 2382 req->r_t.paused = true; 2383 maybe_request_map(osdc); 2384 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2385 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | 2386 CEPH_OSD_FLAG_FULL_FORCE)) && 2387 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2388 pool_full(osdc, req->r_t.base_oloc.pool))) { 2389 dout("req %p full/pool_full\n", req); 2390 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2391 err = -ENOSPC; 2392 } else { 2393 pr_warn_ratelimited("FULL or reached pool quota\n"); 2394 req->r_t.paused = true; 2395 maybe_request_map(osdc); 2396 } 2397 } else if (!osd_homeless(osd)) { 2398 need_send = true; 2399 } else { 2400 maybe_request_map(osdc); 2401 } 2402 2403 mutex_lock(&osd->lock); 2404 /* 2405 * Assign the tid atomically with send_request() to protect 2406 * multiple writes to the same object from racing with each 2407 * other, resulting in out of order ops on the OSDs. 2408 */ 2409 req->r_tid = atomic64_inc_return(&osdc->last_tid); 2410 link_request(osd, req); 2411 if (need_send) 2412 send_request(req); 2413 else if (err) 2414 complete_request(req, err); 2415 mutex_unlock(&osd->lock); 2416 2417 if (!err && ct_res == CALC_TARGET_POOL_DNE) 2418 send_map_check(req); 2419 2420 if (promoted) 2421 downgrade_write(&osdc->lock); 2422 return; 2423 2424 promote: 2425 up_read(&osdc->lock); 2426 down_write(&osdc->lock); 2427 wrlocked = true; 2428 promoted = true; 2429 goto again; 2430 } 2431 2432 static void account_request(struct ceph_osd_request *req) 2433 { 2434 struct ceph_osd_client *osdc = req->r_osdc; 2435 2436 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); 2437 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); 2438 2439 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 2440 req->r_flags |= osdc->client->options->osd_req_flags; 2441 atomic_inc(&osdc->num_requests); 2442 2443 req->r_start_stamp = jiffies; 2444 req->r_start_latency = ktime_get(); 2445 } 2446 2447 static void submit_request(struct ceph_osd_request *req, bool wrlocked) 2448 { 2449 ceph_osdc_get_request(req); 2450 account_request(req); 2451 __submit_request(req, wrlocked); 2452 } 2453 2454 static void finish_request(struct ceph_osd_request *req) 2455 { 2456 struct ceph_osd_client *osdc = req->r_osdc; 2457 2458 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid)); 2459 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2460 2461 req->r_end_latency = ktime_get(); 2462 2463 if (req->r_osd) 2464 unlink_request(req->r_osd, req); 2465 atomic_dec(&osdc->num_requests); 2466 2467 /* 2468 * If an OSD has failed or returned and a request has been sent 2469 * twice, it's possible to get a reply and end up here while the 2470 * request message is queued for delivery. We will ignore the 2471 * reply, so not a big deal, but better to try and catch it. 2472 */ 2473 ceph_msg_revoke(req->r_request); 2474 ceph_msg_revoke_incoming(req->r_reply); 2475 } 2476 2477 static void __complete_request(struct ceph_osd_request *req) 2478 { 2479 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2480 req->r_tid, req->r_callback, req->r_result); 2481 2482 if (req->r_callback) 2483 req->r_callback(req); 2484 complete_all(&req->r_completion); 2485 ceph_osdc_put_request(req); 2486 } 2487 2488 static void complete_request_workfn(struct work_struct *work) 2489 { 2490 struct ceph_osd_request *req = 2491 container_of(work, struct ceph_osd_request, r_complete_work); 2492 2493 __complete_request(req); 2494 } 2495 2496 /* 2497 * This is open-coded in handle_reply(). 2498 */ 2499 static void complete_request(struct ceph_osd_request *req, int err) 2500 { 2501 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2502 2503 req->r_result = err; 2504 finish_request(req); 2505 2506 INIT_WORK(&req->r_complete_work, complete_request_workfn); 2507 queue_work(req->r_osdc->completion_wq, &req->r_complete_work); 2508 } 2509 2510 static void cancel_map_check(struct ceph_osd_request *req) 2511 { 2512 struct ceph_osd_client *osdc = req->r_osdc; 2513 struct ceph_osd_request *lookup_req; 2514 2515 verify_osdc_wrlocked(osdc); 2516 2517 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2518 if (!lookup_req) 2519 return; 2520 2521 WARN_ON(lookup_req != req); 2522 erase_request_mc(&osdc->map_checks, req); 2523 ceph_osdc_put_request(req); 2524 } 2525 2526 static void cancel_request(struct ceph_osd_request *req) 2527 { 2528 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 2529 2530 cancel_map_check(req); 2531 finish_request(req); 2532 complete_all(&req->r_completion); 2533 ceph_osdc_put_request(req); 2534 } 2535 2536 static void abort_request(struct ceph_osd_request *req, int err) 2537 { 2538 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err); 2539 2540 cancel_map_check(req); 2541 complete_request(req, err); 2542 } 2543 2544 static int abort_fn(struct ceph_osd_request *req, void *arg) 2545 { 2546 int err = *(int *)arg; 2547 2548 abort_request(req, err); 2549 return 0; /* continue iteration */ 2550 } 2551 2552 /* 2553 * Abort all in-flight requests with @err and arrange for all future 2554 * requests to be failed immediately. 2555 */ 2556 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err) 2557 { 2558 dout("%s osdc %p err %d\n", __func__, osdc, err); 2559 down_write(&osdc->lock); 2560 for_each_request(osdc, abort_fn, &err); 2561 osdc->abort_err = err; 2562 up_write(&osdc->lock); 2563 } 2564 EXPORT_SYMBOL(ceph_osdc_abort_requests); 2565 2566 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc) 2567 { 2568 down_write(&osdc->lock); 2569 osdc->abort_err = 0; 2570 up_write(&osdc->lock); 2571 } 2572 EXPORT_SYMBOL(ceph_osdc_clear_abort_err); 2573 2574 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2575 { 2576 if (likely(eb > osdc->epoch_barrier)) { 2577 dout("updating epoch_barrier from %u to %u\n", 2578 osdc->epoch_barrier, eb); 2579 osdc->epoch_barrier = eb; 2580 /* Request map if we're not to the barrier yet */ 2581 if (eb > osdc->osdmap->epoch) 2582 maybe_request_map(osdc); 2583 } 2584 } 2585 2586 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) 2587 { 2588 down_read(&osdc->lock); 2589 if (unlikely(eb > osdc->epoch_barrier)) { 2590 up_read(&osdc->lock); 2591 down_write(&osdc->lock); 2592 update_epoch_barrier(osdc, eb); 2593 up_write(&osdc->lock); 2594 } else { 2595 up_read(&osdc->lock); 2596 } 2597 } 2598 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); 2599 2600 /* 2601 * We can end up releasing caps as a result of abort_request(). 2602 * In that case, we probably want to ensure that the cap release message 2603 * has an updated epoch barrier in it, so set the epoch barrier prior to 2604 * aborting the first request. 2605 */ 2606 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg) 2607 { 2608 struct ceph_osd_client *osdc = req->r_osdc; 2609 bool *victims = arg; 2610 2611 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && 2612 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2613 pool_full(osdc, req->r_t.base_oloc.pool))) { 2614 if (!*victims) { 2615 update_epoch_barrier(osdc, osdc->osdmap->epoch); 2616 *victims = true; 2617 } 2618 abort_request(req, -ENOSPC); 2619 } 2620 2621 return 0; /* continue iteration */ 2622 } 2623 2624 /* 2625 * Drop all pending requests that are stalled waiting on a full condition to 2626 * clear, and complete them with ENOSPC as the return code. Set the 2627 * osdc->epoch_barrier to the latest map epoch that we've seen if any were 2628 * cancelled. 2629 */ 2630 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) 2631 { 2632 bool victims = false; 2633 2634 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) && 2635 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2636 for_each_request(osdc, abort_on_full_fn, &victims); 2637 } 2638 2639 static void check_pool_dne(struct ceph_osd_request *req) 2640 { 2641 struct ceph_osd_client *osdc = req->r_osdc; 2642 struct ceph_osdmap *map = osdc->osdmap; 2643 2644 verify_osdc_wrlocked(osdc); 2645 WARN_ON(!map->epoch); 2646 2647 if (req->r_attempts) { 2648 /* 2649 * We sent a request earlier, which means that 2650 * previously the pool existed, and now it does not 2651 * (i.e., it was deleted). 2652 */ 2653 req->r_map_dne_bound = map->epoch; 2654 dout("%s req %p tid %llu pool disappeared\n", __func__, req, 2655 req->r_tid); 2656 } else { 2657 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__, 2658 req, req->r_tid, req->r_map_dne_bound, map->epoch); 2659 } 2660 2661 if (req->r_map_dne_bound) { 2662 if (map->epoch >= req->r_map_dne_bound) { 2663 /* we had a new enough map */ 2664 pr_info_ratelimited("tid %llu pool does not exist\n", 2665 req->r_tid); 2666 complete_request(req, -ENOENT); 2667 } 2668 } else { 2669 send_map_check(req); 2670 } 2671 } 2672 2673 static void map_check_cb(struct ceph_mon_generic_request *greq) 2674 { 2675 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 2676 struct ceph_osd_request *req; 2677 u64 tid = greq->private_data; 2678 2679 WARN_ON(greq->result || !greq->u.newest); 2680 2681 down_write(&osdc->lock); 2682 req = lookup_request_mc(&osdc->map_checks, tid); 2683 if (!req) { 2684 dout("%s tid %llu dne\n", __func__, tid); 2685 goto out_unlock; 2686 } 2687 2688 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__, 2689 req, req->r_tid, req->r_map_dne_bound, greq->u.newest); 2690 if (!req->r_map_dne_bound) 2691 req->r_map_dne_bound = greq->u.newest; 2692 erase_request_mc(&osdc->map_checks, req); 2693 check_pool_dne(req); 2694 2695 ceph_osdc_put_request(req); 2696 out_unlock: 2697 up_write(&osdc->lock); 2698 } 2699 2700 static void send_map_check(struct ceph_osd_request *req) 2701 { 2702 struct ceph_osd_client *osdc = req->r_osdc; 2703 struct ceph_osd_request *lookup_req; 2704 int ret; 2705 2706 verify_osdc_wrlocked(osdc); 2707 2708 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid); 2709 if (lookup_req) { 2710 WARN_ON(lookup_req != req); 2711 return; 2712 } 2713 2714 ceph_osdc_get_request(req); 2715 insert_request_mc(&osdc->map_checks, req); 2716 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 2717 map_check_cb, req->r_tid); 2718 WARN_ON(ret); 2719 } 2720 2721 /* 2722 * lingering requests, watch/notify v2 infrastructure 2723 */ 2724 static void linger_release(struct kref *kref) 2725 { 2726 struct ceph_osd_linger_request *lreq = 2727 container_of(kref, struct ceph_osd_linger_request, kref); 2728 2729 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq, 2730 lreq->reg_req, lreq->ping_req); 2731 WARN_ON(!RB_EMPTY_NODE(&lreq->node)); 2732 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node)); 2733 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node)); 2734 WARN_ON(!list_empty(&lreq->scan_item)); 2735 WARN_ON(!list_empty(&lreq->pending_lworks)); 2736 WARN_ON(lreq->osd); 2737 2738 if (lreq->reg_req) 2739 ceph_osdc_put_request(lreq->reg_req); 2740 if (lreq->ping_req) 2741 ceph_osdc_put_request(lreq->ping_req); 2742 target_destroy(&lreq->t); 2743 kfree(lreq); 2744 } 2745 2746 static void linger_put(struct ceph_osd_linger_request *lreq) 2747 { 2748 if (lreq) 2749 kref_put(&lreq->kref, linger_release); 2750 } 2751 2752 static struct ceph_osd_linger_request * 2753 linger_get(struct ceph_osd_linger_request *lreq) 2754 { 2755 kref_get(&lreq->kref); 2756 return lreq; 2757 } 2758 2759 static struct ceph_osd_linger_request * 2760 linger_alloc(struct ceph_osd_client *osdc) 2761 { 2762 struct ceph_osd_linger_request *lreq; 2763 2764 lreq = kzalloc(sizeof(*lreq), GFP_NOIO); 2765 if (!lreq) 2766 return NULL; 2767 2768 kref_init(&lreq->kref); 2769 mutex_init(&lreq->lock); 2770 RB_CLEAR_NODE(&lreq->node); 2771 RB_CLEAR_NODE(&lreq->osdc_node); 2772 RB_CLEAR_NODE(&lreq->mc_node); 2773 INIT_LIST_HEAD(&lreq->scan_item); 2774 INIT_LIST_HEAD(&lreq->pending_lworks); 2775 init_completion(&lreq->reg_commit_wait); 2776 init_completion(&lreq->notify_finish_wait); 2777 2778 lreq->osdc = osdc; 2779 target_init(&lreq->t); 2780 2781 dout("%s lreq %p\n", __func__, lreq); 2782 return lreq; 2783 } 2784 2785 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node) 2786 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node) 2787 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node) 2788 2789 /* 2790 * Create linger request <-> OSD session relation. 2791 * 2792 * @lreq has to be registered, @osd may be homeless. 2793 */ 2794 static void link_linger(struct ceph_osd *osd, 2795 struct ceph_osd_linger_request *lreq) 2796 { 2797 verify_osd_locked(osd); 2798 WARN_ON(!lreq->linger_id || lreq->osd); 2799 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2800 osd->o_osd, lreq, lreq->linger_id); 2801 2802 if (!osd_homeless(osd)) 2803 __remove_osd_from_lru(osd); 2804 else 2805 atomic_inc(&osd->o_osdc->num_homeless); 2806 2807 get_osd(osd); 2808 insert_linger(&osd->o_linger_requests, lreq); 2809 lreq->osd = osd; 2810 } 2811 2812 static void unlink_linger(struct ceph_osd *osd, 2813 struct ceph_osd_linger_request *lreq) 2814 { 2815 verify_osd_locked(osd); 2816 WARN_ON(lreq->osd != osd); 2817 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, 2818 osd->o_osd, lreq, lreq->linger_id); 2819 2820 lreq->osd = NULL; 2821 erase_linger(&osd->o_linger_requests, lreq); 2822 put_osd(osd); 2823 2824 if (!osd_homeless(osd)) 2825 maybe_move_osd_to_lru(osd); 2826 else 2827 atomic_dec(&osd->o_osdc->num_homeless); 2828 } 2829 2830 static bool __linger_registered(struct ceph_osd_linger_request *lreq) 2831 { 2832 verify_osdc_locked(lreq->osdc); 2833 2834 return !RB_EMPTY_NODE(&lreq->osdc_node); 2835 } 2836 2837 static bool linger_registered(struct ceph_osd_linger_request *lreq) 2838 { 2839 struct ceph_osd_client *osdc = lreq->osdc; 2840 bool registered; 2841 2842 down_read(&osdc->lock); 2843 registered = __linger_registered(lreq); 2844 up_read(&osdc->lock); 2845 2846 return registered; 2847 } 2848 2849 static void linger_register(struct ceph_osd_linger_request *lreq) 2850 { 2851 struct ceph_osd_client *osdc = lreq->osdc; 2852 2853 verify_osdc_wrlocked(osdc); 2854 WARN_ON(lreq->linger_id); 2855 2856 linger_get(lreq); 2857 lreq->linger_id = ++osdc->last_linger_id; 2858 insert_linger_osdc(&osdc->linger_requests, lreq); 2859 } 2860 2861 static void linger_unregister(struct ceph_osd_linger_request *lreq) 2862 { 2863 struct ceph_osd_client *osdc = lreq->osdc; 2864 2865 verify_osdc_wrlocked(osdc); 2866 2867 erase_linger_osdc(&osdc->linger_requests, lreq); 2868 linger_put(lreq); 2869 } 2870 2871 static void cancel_linger_request(struct ceph_osd_request *req) 2872 { 2873 struct ceph_osd_linger_request *lreq = req->r_priv; 2874 2875 WARN_ON(!req->r_linger); 2876 cancel_request(req); 2877 linger_put(lreq); 2878 } 2879 2880 struct linger_work { 2881 struct work_struct work; 2882 struct ceph_osd_linger_request *lreq; 2883 struct list_head pending_item; 2884 unsigned long queued_stamp; 2885 2886 union { 2887 struct { 2888 u64 notify_id; 2889 u64 notifier_id; 2890 void *payload; /* points into @msg front */ 2891 size_t payload_len; 2892 2893 struct ceph_msg *msg; /* for ceph_msg_put() */ 2894 } notify; 2895 struct { 2896 int err; 2897 } error; 2898 }; 2899 }; 2900 2901 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq, 2902 work_func_t workfn) 2903 { 2904 struct linger_work *lwork; 2905 2906 lwork = kzalloc(sizeof(*lwork), GFP_NOIO); 2907 if (!lwork) 2908 return NULL; 2909 2910 INIT_WORK(&lwork->work, workfn); 2911 INIT_LIST_HEAD(&lwork->pending_item); 2912 lwork->lreq = linger_get(lreq); 2913 2914 return lwork; 2915 } 2916 2917 static void lwork_free(struct linger_work *lwork) 2918 { 2919 struct ceph_osd_linger_request *lreq = lwork->lreq; 2920 2921 mutex_lock(&lreq->lock); 2922 list_del(&lwork->pending_item); 2923 mutex_unlock(&lreq->lock); 2924 2925 linger_put(lreq); 2926 kfree(lwork); 2927 } 2928 2929 static void lwork_queue(struct linger_work *lwork) 2930 { 2931 struct ceph_osd_linger_request *lreq = lwork->lreq; 2932 struct ceph_osd_client *osdc = lreq->osdc; 2933 2934 verify_lreq_locked(lreq); 2935 WARN_ON(!list_empty(&lwork->pending_item)); 2936 2937 lwork->queued_stamp = jiffies; 2938 list_add_tail(&lwork->pending_item, &lreq->pending_lworks); 2939 queue_work(osdc->notify_wq, &lwork->work); 2940 } 2941 2942 static void do_watch_notify(struct work_struct *w) 2943 { 2944 struct linger_work *lwork = container_of(w, struct linger_work, work); 2945 struct ceph_osd_linger_request *lreq = lwork->lreq; 2946 2947 if (!linger_registered(lreq)) { 2948 dout("%s lreq %p not registered\n", __func__, lreq); 2949 goto out; 2950 } 2951 2952 WARN_ON(!lreq->is_watch); 2953 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n", 2954 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id, 2955 lwork->notify.payload_len); 2956 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id, 2957 lwork->notify.notifier_id, lwork->notify.payload, 2958 lwork->notify.payload_len); 2959 2960 out: 2961 ceph_msg_put(lwork->notify.msg); 2962 lwork_free(lwork); 2963 } 2964 2965 static void do_watch_error(struct work_struct *w) 2966 { 2967 struct linger_work *lwork = container_of(w, struct linger_work, work); 2968 struct ceph_osd_linger_request *lreq = lwork->lreq; 2969 2970 if (!linger_registered(lreq)) { 2971 dout("%s lreq %p not registered\n", __func__, lreq); 2972 goto out; 2973 } 2974 2975 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err); 2976 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err); 2977 2978 out: 2979 lwork_free(lwork); 2980 } 2981 2982 static void queue_watch_error(struct ceph_osd_linger_request *lreq) 2983 { 2984 struct linger_work *lwork; 2985 2986 lwork = lwork_alloc(lreq, do_watch_error); 2987 if (!lwork) { 2988 pr_err("failed to allocate error-lwork\n"); 2989 return; 2990 } 2991 2992 lwork->error.err = lreq->last_error; 2993 lwork_queue(lwork); 2994 } 2995 2996 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq, 2997 int result) 2998 { 2999 if (!completion_done(&lreq->reg_commit_wait)) { 3000 lreq->reg_commit_error = (result <= 0 ? result : 0); 3001 complete_all(&lreq->reg_commit_wait); 3002 } 3003 } 3004 3005 static void linger_commit_cb(struct ceph_osd_request *req) 3006 { 3007 struct ceph_osd_linger_request *lreq = req->r_priv; 3008 3009 mutex_lock(&lreq->lock); 3010 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, 3011 lreq->linger_id, req->r_result); 3012 linger_reg_commit_complete(lreq, req->r_result); 3013 lreq->committed = true; 3014 3015 if (!lreq->is_watch) { 3016 struct ceph_osd_data *osd_data = 3017 osd_req_op_data(req, 0, notify, response_data); 3018 void *p = page_address(osd_data->pages[0]); 3019 3020 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY || 3021 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 3022 3023 /* make note of the notify_id */ 3024 if (req->r_ops[0].outdata_len >= sizeof(u64)) { 3025 lreq->notify_id = ceph_decode_64(&p); 3026 dout("lreq %p notify_id %llu\n", lreq, 3027 lreq->notify_id); 3028 } else { 3029 dout("lreq %p no notify_id\n", lreq); 3030 } 3031 } 3032 3033 mutex_unlock(&lreq->lock); 3034 linger_put(lreq); 3035 } 3036 3037 static int normalize_watch_error(int err) 3038 { 3039 /* 3040 * Translate ENOENT -> ENOTCONN so that a delete->disconnection 3041 * notification and a failure to reconnect because we raced with 3042 * the delete appear the same to the user. 3043 */ 3044 if (err == -ENOENT) 3045 err = -ENOTCONN; 3046 3047 return err; 3048 } 3049 3050 static void linger_reconnect_cb(struct ceph_osd_request *req) 3051 { 3052 struct ceph_osd_linger_request *lreq = req->r_priv; 3053 3054 mutex_lock(&lreq->lock); 3055 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, 3056 lreq, lreq->linger_id, req->r_result, lreq->last_error); 3057 if (req->r_result < 0) { 3058 if (!lreq->last_error) { 3059 lreq->last_error = normalize_watch_error(req->r_result); 3060 queue_watch_error(lreq); 3061 } 3062 } 3063 3064 mutex_unlock(&lreq->lock); 3065 linger_put(lreq); 3066 } 3067 3068 static void send_linger(struct ceph_osd_linger_request *lreq) 3069 { 3070 struct ceph_osd_request *req = lreq->reg_req; 3071 struct ceph_osd_req_op *op = &req->r_ops[0]; 3072 3073 verify_osdc_wrlocked(req->r_osdc); 3074 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3075 3076 if (req->r_osd) 3077 cancel_linger_request(req); 3078 3079 request_reinit(req); 3080 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 3081 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 3082 req->r_flags = lreq->t.flags; 3083 req->r_mtime = lreq->mtime; 3084 3085 mutex_lock(&lreq->lock); 3086 if (lreq->is_watch && lreq->committed) { 3087 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3088 op->watch.cookie != lreq->linger_id); 3089 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT; 3090 op->watch.gen = ++lreq->register_gen; 3091 dout("lreq %p reconnect register_gen %u\n", lreq, 3092 op->watch.gen); 3093 req->r_callback = linger_reconnect_cb; 3094 } else { 3095 if (!lreq->is_watch) 3096 lreq->notify_id = 0; 3097 else 3098 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH); 3099 dout("lreq %p register\n", lreq); 3100 req->r_callback = linger_commit_cb; 3101 } 3102 mutex_unlock(&lreq->lock); 3103 3104 req->r_priv = linger_get(lreq); 3105 req->r_linger = true; 3106 3107 submit_request(req, true); 3108 } 3109 3110 static void linger_ping_cb(struct ceph_osd_request *req) 3111 { 3112 struct ceph_osd_linger_request *lreq = req->r_priv; 3113 3114 mutex_lock(&lreq->lock); 3115 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", 3116 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, 3117 lreq->last_error); 3118 if (lreq->register_gen == req->r_ops[0].watch.gen) { 3119 if (!req->r_result) { 3120 lreq->watch_valid_thru = lreq->ping_sent; 3121 } else if (!lreq->last_error) { 3122 lreq->last_error = normalize_watch_error(req->r_result); 3123 queue_watch_error(lreq); 3124 } 3125 } else { 3126 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq, 3127 lreq->register_gen, req->r_ops[0].watch.gen); 3128 } 3129 3130 mutex_unlock(&lreq->lock); 3131 linger_put(lreq); 3132 } 3133 3134 static void send_linger_ping(struct ceph_osd_linger_request *lreq) 3135 { 3136 struct ceph_osd_client *osdc = lreq->osdc; 3137 struct ceph_osd_request *req = lreq->ping_req; 3138 struct ceph_osd_req_op *op = &req->r_ops[0]; 3139 3140 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { 3141 dout("%s PAUSERD\n", __func__); 3142 return; 3143 } 3144 3145 lreq->ping_sent = jiffies; 3146 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n", 3147 __func__, lreq, lreq->linger_id, lreq->ping_sent, 3148 lreq->register_gen); 3149 3150 if (req->r_osd) 3151 cancel_linger_request(req); 3152 3153 request_reinit(req); 3154 target_copy(&req->r_t, &lreq->t); 3155 3156 WARN_ON(op->op != CEPH_OSD_OP_WATCH || 3157 op->watch.cookie != lreq->linger_id || 3158 op->watch.op != CEPH_OSD_WATCH_OP_PING); 3159 op->watch.gen = lreq->register_gen; 3160 req->r_callback = linger_ping_cb; 3161 req->r_priv = linger_get(lreq); 3162 req->r_linger = true; 3163 3164 ceph_osdc_get_request(req); 3165 account_request(req); 3166 req->r_tid = atomic64_inc_return(&osdc->last_tid); 3167 link_request(lreq->osd, req); 3168 send_request(req); 3169 } 3170 3171 static void linger_submit(struct ceph_osd_linger_request *lreq) 3172 { 3173 struct ceph_osd_client *osdc = lreq->osdc; 3174 struct ceph_osd *osd; 3175 3176 down_write(&osdc->lock); 3177 linger_register(lreq); 3178 if (lreq->is_watch) { 3179 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; 3180 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; 3181 } else { 3182 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; 3183 } 3184 3185 calc_target(osdc, &lreq->t, false); 3186 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3187 link_linger(osd, lreq); 3188 3189 send_linger(lreq); 3190 up_write(&osdc->lock); 3191 } 3192 3193 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) 3194 { 3195 struct ceph_osd_client *osdc = lreq->osdc; 3196 struct ceph_osd_linger_request *lookup_lreq; 3197 3198 verify_osdc_wrlocked(osdc); 3199 3200 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3201 lreq->linger_id); 3202 if (!lookup_lreq) 3203 return; 3204 3205 WARN_ON(lookup_lreq != lreq); 3206 erase_linger_mc(&osdc->linger_map_checks, lreq); 3207 linger_put(lreq); 3208 } 3209 3210 /* 3211 * @lreq has to be both registered and linked. 3212 */ 3213 static void __linger_cancel(struct ceph_osd_linger_request *lreq) 3214 { 3215 if (lreq->is_watch && lreq->ping_req->r_osd) 3216 cancel_linger_request(lreq->ping_req); 3217 if (lreq->reg_req->r_osd) 3218 cancel_linger_request(lreq->reg_req); 3219 cancel_linger_map_check(lreq); 3220 unlink_linger(lreq->osd, lreq); 3221 linger_unregister(lreq); 3222 } 3223 3224 static void linger_cancel(struct ceph_osd_linger_request *lreq) 3225 { 3226 struct ceph_osd_client *osdc = lreq->osdc; 3227 3228 down_write(&osdc->lock); 3229 if (__linger_registered(lreq)) 3230 __linger_cancel(lreq); 3231 up_write(&osdc->lock); 3232 } 3233 3234 static void send_linger_map_check(struct ceph_osd_linger_request *lreq); 3235 3236 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq) 3237 { 3238 struct ceph_osd_client *osdc = lreq->osdc; 3239 struct ceph_osdmap *map = osdc->osdmap; 3240 3241 verify_osdc_wrlocked(osdc); 3242 WARN_ON(!map->epoch); 3243 3244 if (lreq->register_gen) { 3245 lreq->map_dne_bound = map->epoch; 3246 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__, 3247 lreq, lreq->linger_id); 3248 } else { 3249 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n", 3250 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3251 map->epoch); 3252 } 3253 3254 if (lreq->map_dne_bound) { 3255 if (map->epoch >= lreq->map_dne_bound) { 3256 /* we had a new enough map */ 3257 pr_info("linger_id %llu pool does not exist\n", 3258 lreq->linger_id); 3259 linger_reg_commit_complete(lreq, -ENOENT); 3260 __linger_cancel(lreq); 3261 } 3262 } else { 3263 send_linger_map_check(lreq); 3264 } 3265 } 3266 3267 static void linger_map_check_cb(struct ceph_mon_generic_request *greq) 3268 { 3269 struct ceph_osd_client *osdc = &greq->monc->client->osdc; 3270 struct ceph_osd_linger_request *lreq; 3271 u64 linger_id = greq->private_data; 3272 3273 WARN_ON(greq->result || !greq->u.newest); 3274 3275 down_write(&osdc->lock); 3276 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id); 3277 if (!lreq) { 3278 dout("%s linger_id %llu dne\n", __func__, linger_id); 3279 goto out_unlock; 3280 } 3281 3282 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n", 3283 __func__, lreq, lreq->linger_id, lreq->map_dne_bound, 3284 greq->u.newest); 3285 if (!lreq->map_dne_bound) 3286 lreq->map_dne_bound = greq->u.newest; 3287 erase_linger_mc(&osdc->linger_map_checks, lreq); 3288 check_linger_pool_dne(lreq); 3289 3290 linger_put(lreq); 3291 out_unlock: 3292 up_write(&osdc->lock); 3293 } 3294 3295 static void send_linger_map_check(struct ceph_osd_linger_request *lreq) 3296 { 3297 struct ceph_osd_client *osdc = lreq->osdc; 3298 struct ceph_osd_linger_request *lookup_lreq; 3299 int ret; 3300 3301 verify_osdc_wrlocked(osdc); 3302 3303 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks, 3304 lreq->linger_id); 3305 if (lookup_lreq) { 3306 WARN_ON(lookup_lreq != lreq); 3307 return; 3308 } 3309 3310 linger_get(lreq); 3311 insert_linger_mc(&osdc->linger_map_checks, lreq); 3312 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap", 3313 linger_map_check_cb, lreq->linger_id); 3314 WARN_ON(ret); 3315 } 3316 3317 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) 3318 { 3319 int ret; 3320 3321 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3322 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); 3323 return ret ?: lreq->reg_commit_error; 3324 } 3325 3326 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) 3327 { 3328 int ret; 3329 3330 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); 3331 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); 3332 return ret ?: lreq->notify_finish_error; 3333 } 3334 3335 /* 3336 * Timeout callback, called every N seconds. When 1 or more OSD 3337 * requests has been active for more than N seconds, we send a keepalive 3338 * (tag + timestamp) to its OSD to ensure any communications channel 3339 * reset is detected. 3340 */ 3341 static void handle_timeout(struct work_struct *work) 3342 { 3343 struct ceph_osd_client *osdc = 3344 container_of(work, struct ceph_osd_client, timeout_work.work); 3345 struct ceph_options *opts = osdc->client->options; 3346 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 3347 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout; 3348 LIST_HEAD(slow_osds); 3349 struct rb_node *n, *p; 3350 3351 dout("%s osdc %p\n", __func__, osdc); 3352 down_write(&osdc->lock); 3353 3354 /* 3355 * ping osds that are a bit slow. this ensures that if there 3356 * is a break in the TCP connection we will notice, and reopen 3357 * a connection with that osd (from the fault callback). 3358 */ 3359 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 3360 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3361 bool found = false; 3362 3363 for (p = rb_first(&osd->o_requests); p; ) { 3364 struct ceph_osd_request *req = 3365 rb_entry(p, struct ceph_osd_request, r_node); 3366 3367 p = rb_next(p); /* abort_request() */ 3368 3369 if (time_before(req->r_stamp, cutoff)) { 3370 dout(" req %p tid %llu on osd%d is laggy\n", 3371 req, req->r_tid, osd->o_osd); 3372 found = true; 3373 } 3374 if (opts->osd_request_timeout && 3375 time_before(req->r_start_stamp, expiry_cutoff)) { 3376 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3377 req->r_tid, osd->o_osd); 3378 abort_request(req, -ETIMEDOUT); 3379 } 3380 } 3381 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 3382 struct ceph_osd_linger_request *lreq = 3383 rb_entry(p, struct ceph_osd_linger_request, node); 3384 3385 dout(" lreq %p linger_id %llu is served by osd%d\n", 3386 lreq, lreq->linger_id, osd->o_osd); 3387 found = true; 3388 3389 mutex_lock(&lreq->lock); 3390 if (lreq->is_watch && lreq->committed && !lreq->last_error) 3391 send_linger_ping(lreq); 3392 mutex_unlock(&lreq->lock); 3393 } 3394 3395 if (found) 3396 list_move_tail(&osd->o_keepalive_item, &slow_osds); 3397 } 3398 3399 if (opts->osd_request_timeout) { 3400 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { 3401 struct ceph_osd_request *req = 3402 rb_entry(p, struct ceph_osd_request, r_node); 3403 3404 p = rb_next(p); /* abort_request() */ 3405 3406 if (time_before(req->r_start_stamp, expiry_cutoff)) { 3407 pr_err_ratelimited("tid %llu on osd%d timeout\n", 3408 req->r_tid, osdc->homeless_osd.o_osd); 3409 abort_request(req, -ETIMEDOUT); 3410 } 3411 } 3412 } 3413 3414 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 3415 maybe_request_map(osdc); 3416 3417 while (!list_empty(&slow_osds)) { 3418 struct ceph_osd *osd = list_first_entry(&slow_osds, 3419 struct ceph_osd, 3420 o_keepalive_item); 3421 list_del_init(&osd->o_keepalive_item); 3422 ceph_con_keepalive(&osd->o_con); 3423 } 3424 3425 up_write(&osdc->lock); 3426 schedule_delayed_work(&osdc->timeout_work, 3427 osdc->client->options->osd_keepalive_timeout); 3428 } 3429 3430 static void handle_osds_timeout(struct work_struct *work) 3431 { 3432 struct ceph_osd_client *osdc = 3433 container_of(work, struct ceph_osd_client, 3434 osds_timeout_work.work); 3435 unsigned long delay = osdc->client->options->osd_idle_ttl / 4; 3436 struct ceph_osd *osd, *nosd; 3437 3438 dout("%s osdc %p\n", __func__, osdc); 3439 down_write(&osdc->lock); 3440 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 3441 if (time_before(jiffies, osd->lru_ttl)) 3442 break; 3443 3444 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); 3445 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); 3446 close_osd(osd); 3447 } 3448 3449 up_write(&osdc->lock); 3450 schedule_delayed_work(&osdc->osds_timeout_work, 3451 round_jiffies_relative(delay)); 3452 } 3453 3454 static int ceph_oloc_decode(void **p, void *end, 3455 struct ceph_object_locator *oloc) 3456 { 3457 u8 struct_v, struct_cv; 3458 u32 len; 3459 void *struct_end; 3460 int ret = 0; 3461 3462 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3463 struct_v = ceph_decode_8(p); 3464 struct_cv = ceph_decode_8(p); 3465 if (struct_v < 3) { 3466 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", 3467 struct_v, struct_cv); 3468 goto e_inval; 3469 } 3470 if (struct_cv > 6) { 3471 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", 3472 struct_v, struct_cv); 3473 goto e_inval; 3474 } 3475 len = ceph_decode_32(p); 3476 ceph_decode_need(p, end, len, e_inval); 3477 struct_end = *p + len; 3478 3479 oloc->pool = ceph_decode_64(p); 3480 *p += 4; /* skip preferred */ 3481 3482 len = ceph_decode_32(p); 3483 if (len > 0) { 3484 pr_warn("ceph_object_locator::key is set\n"); 3485 goto e_inval; 3486 } 3487 3488 if (struct_v >= 5) { 3489 bool changed = false; 3490 3491 len = ceph_decode_32(p); 3492 if (len > 0) { 3493 ceph_decode_need(p, end, len, e_inval); 3494 if (!oloc->pool_ns || 3495 ceph_compare_string(oloc->pool_ns, *p, len)) 3496 changed = true; 3497 *p += len; 3498 } else { 3499 if (oloc->pool_ns) 3500 changed = true; 3501 } 3502 if (changed) { 3503 /* redirect changes namespace */ 3504 pr_warn("ceph_object_locator::nspace is changed\n"); 3505 goto e_inval; 3506 } 3507 } 3508 3509 if (struct_v >= 6) { 3510 s64 hash = ceph_decode_64(p); 3511 if (hash != -1) { 3512 pr_warn("ceph_object_locator::hash is set\n"); 3513 goto e_inval; 3514 } 3515 } 3516 3517 /* skip the rest */ 3518 *p = struct_end; 3519 out: 3520 return ret; 3521 3522 e_inval: 3523 ret = -EINVAL; 3524 goto out; 3525 } 3526 3527 static int ceph_redirect_decode(void **p, void *end, 3528 struct ceph_request_redirect *redir) 3529 { 3530 u8 struct_v, struct_cv; 3531 u32 len; 3532 void *struct_end; 3533 int ret; 3534 3535 ceph_decode_need(p, end, 1 + 1 + 4, e_inval); 3536 struct_v = ceph_decode_8(p); 3537 struct_cv = ceph_decode_8(p); 3538 if (struct_cv > 1) { 3539 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", 3540 struct_v, struct_cv); 3541 goto e_inval; 3542 } 3543 len = ceph_decode_32(p); 3544 ceph_decode_need(p, end, len, e_inval); 3545 struct_end = *p + len; 3546 3547 ret = ceph_oloc_decode(p, end, &redir->oloc); 3548 if (ret) 3549 goto out; 3550 3551 len = ceph_decode_32(p); 3552 if (len > 0) { 3553 pr_warn("ceph_request_redirect::object_name is set\n"); 3554 goto e_inval; 3555 } 3556 3557 /* skip the rest */ 3558 *p = struct_end; 3559 out: 3560 return ret; 3561 3562 e_inval: 3563 ret = -EINVAL; 3564 goto out; 3565 } 3566 3567 struct MOSDOpReply { 3568 struct ceph_pg pgid; 3569 u64 flags; 3570 int result; 3571 u32 epoch; 3572 int num_ops; 3573 u32 outdata_len[CEPH_OSD_MAX_OPS]; 3574 s32 rval[CEPH_OSD_MAX_OPS]; 3575 int retry_attempt; 3576 struct ceph_eversion replay_version; 3577 u64 user_version; 3578 struct ceph_request_redirect redirect; 3579 }; 3580 3581 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m) 3582 { 3583 void *p = msg->front.iov_base; 3584 void *const end = p + msg->front.iov_len; 3585 u16 version = le16_to_cpu(msg->hdr.version); 3586 struct ceph_eversion bad_replay_version; 3587 u8 decode_redir; 3588 u32 len; 3589 int ret; 3590 int i; 3591 3592 ceph_decode_32_safe(&p, end, len, e_inval); 3593 ceph_decode_need(&p, end, len, e_inval); 3594 p += len; /* skip oid */ 3595 3596 ret = ceph_decode_pgid(&p, end, &m->pgid); 3597 if (ret) 3598 return ret; 3599 3600 ceph_decode_64_safe(&p, end, m->flags, e_inval); 3601 ceph_decode_32_safe(&p, end, m->result, e_inval); 3602 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval); 3603 memcpy(&bad_replay_version, p, sizeof(bad_replay_version)); 3604 p += sizeof(bad_replay_version); 3605 ceph_decode_32_safe(&p, end, m->epoch, e_inval); 3606 3607 ceph_decode_32_safe(&p, end, m->num_ops, e_inval); 3608 if (m->num_ops > ARRAY_SIZE(m->outdata_len)) 3609 goto e_inval; 3610 3611 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op), 3612 e_inval); 3613 for (i = 0; i < m->num_ops; i++) { 3614 struct ceph_osd_op *op = p; 3615 3616 m->outdata_len[i] = le32_to_cpu(op->payload_len); 3617 p += sizeof(*op); 3618 } 3619 3620 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval); 3621 for (i = 0; i < m->num_ops; i++) 3622 ceph_decode_32_safe(&p, end, m->rval[i], e_inval); 3623 3624 if (version >= 5) { 3625 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval); 3626 memcpy(&m->replay_version, p, sizeof(m->replay_version)); 3627 p += sizeof(m->replay_version); 3628 ceph_decode_64_safe(&p, end, m->user_version, e_inval); 3629 } else { 3630 m->replay_version = bad_replay_version; /* struct */ 3631 m->user_version = le64_to_cpu(m->replay_version.version); 3632 } 3633 3634 if (version >= 6) { 3635 if (version >= 7) 3636 ceph_decode_8_safe(&p, end, decode_redir, e_inval); 3637 else 3638 decode_redir = 1; 3639 } else { 3640 decode_redir = 0; 3641 } 3642 3643 if (decode_redir) { 3644 ret = ceph_redirect_decode(&p, end, &m->redirect); 3645 if (ret) 3646 return ret; 3647 } else { 3648 ceph_oloc_init(&m->redirect.oloc); 3649 } 3650 3651 return 0; 3652 3653 e_inval: 3654 return -EINVAL; 3655 } 3656 3657 /* 3658 * Handle MOSDOpReply. Set ->r_result and call the callback if it is 3659 * specified. 3660 */ 3661 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) 3662 { 3663 struct ceph_osd_client *osdc = osd->o_osdc; 3664 struct ceph_osd_request *req; 3665 struct MOSDOpReply m; 3666 u64 tid = le64_to_cpu(msg->hdr.tid); 3667 u32 data_len = 0; 3668 int ret; 3669 int i; 3670 3671 dout("%s msg %p tid %llu\n", __func__, msg, tid); 3672 3673 down_read(&osdc->lock); 3674 if (!osd_registered(osd)) { 3675 dout("%s osd%d unknown\n", __func__, osd->o_osd); 3676 goto out_unlock_osdc; 3677 } 3678 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 3679 3680 mutex_lock(&osd->lock); 3681 req = lookup_request(&osd->o_requests, tid); 3682 if (!req) { 3683 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); 3684 goto out_unlock_session; 3685 } 3686 3687 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns; 3688 ret = decode_MOSDOpReply(msg, &m); 3689 m.redirect.oloc.pool_ns = NULL; 3690 if (ret) { 3691 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n", 3692 req->r_tid, ret); 3693 ceph_msg_dump(msg); 3694 goto fail_request; 3695 } 3696 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n", 3697 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed, 3698 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch), 3699 le64_to_cpu(m.replay_version.version), m.user_version); 3700 3701 if (m.retry_attempt >= 0) { 3702 if (m.retry_attempt != req->r_attempts - 1) { 3703 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n", 3704 req, req->r_tid, m.retry_attempt, 3705 req->r_attempts - 1); 3706 goto out_unlock_session; 3707 } 3708 } else { 3709 WARN_ON(1); /* MOSDOpReply v4 is assumed */ 3710 } 3711 3712 if (!ceph_oloc_empty(&m.redirect.oloc)) { 3713 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid, 3714 m.redirect.oloc.pool); 3715 unlink_request(osd, req); 3716 mutex_unlock(&osd->lock); 3717 3718 /* 3719 * Not ceph_oloc_copy() - changing pool_ns is not 3720 * supported. 3721 */ 3722 req->r_t.target_oloc.pool = m.redirect.oloc.pool; 3723 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | 3724 CEPH_OSD_FLAG_IGNORE_OVERLAY | 3725 CEPH_OSD_FLAG_IGNORE_CACHE; 3726 req->r_tid = 0; 3727 __submit_request(req, false); 3728 goto out_unlock_osdc; 3729 } 3730 3731 if (m.result == -EAGAIN) { 3732 dout("req %p tid %llu EAGAIN\n", req, req->r_tid); 3733 unlink_request(osd, req); 3734 mutex_unlock(&osd->lock); 3735 3736 /* 3737 * The object is missing on the replica or not (yet) 3738 * readable. Clear pgid to force a resend to the primary 3739 * via legacy_change. 3740 */ 3741 req->r_t.pgid.pool = 0; 3742 req->r_t.pgid.seed = 0; 3743 WARN_ON(!req->r_t.used_replica); 3744 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | 3745 CEPH_OSD_FLAG_LOCALIZE_READS); 3746 req->r_tid = 0; 3747 __submit_request(req, false); 3748 goto out_unlock_osdc; 3749 } 3750 3751 if (m.num_ops != req->r_num_ops) { 3752 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops, 3753 req->r_num_ops, req->r_tid); 3754 goto fail_request; 3755 } 3756 for (i = 0; i < req->r_num_ops; i++) { 3757 dout(" req %p tid %llu op %d rval %d len %u\n", req, 3758 req->r_tid, i, m.rval[i], m.outdata_len[i]); 3759 req->r_ops[i].rval = m.rval[i]; 3760 req->r_ops[i].outdata_len = m.outdata_len[i]; 3761 data_len += m.outdata_len[i]; 3762 } 3763 if (data_len != le32_to_cpu(msg->hdr.data_len)) { 3764 pr_err("sum of lens %u != %u for tid %llu\n", data_len, 3765 le32_to_cpu(msg->hdr.data_len), req->r_tid); 3766 goto fail_request; 3767 } 3768 dout("%s req %p tid %llu result %d data_len %u\n", __func__, 3769 req, req->r_tid, m.result, data_len); 3770 3771 /* 3772 * Since we only ever request ONDISK, we should only ever get 3773 * one (type of) reply back. 3774 */ 3775 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK)); 3776 req->r_result = m.result ?: data_len; 3777 finish_request(req); 3778 mutex_unlock(&osd->lock); 3779 up_read(&osdc->lock); 3780 3781 __complete_request(req); 3782 return; 3783 3784 fail_request: 3785 complete_request(req, -EIO); 3786 out_unlock_session: 3787 mutex_unlock(&osd->lock); 3788 out_unlock_osdc: 3789 up_read(&osdc->lock); 3790 } 3791 3792 static void set_pool_was_full(struct ceph_osd_client *osdc) 3793 { 3794 struct rb_node *n; 3795 3796 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { 3797 struct ceph_pg_pool_info *pi = 3798 rb_entry(n, struct ceph_pg_pool_info, node); 3799 3800 pi->was_full = __pool_full(pi); 3801 } 3802 } 3803 3804 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id) 3805 { 3806 struct ceph_pg_pool_info *pi; 3807 3808 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id); 3809 if (!pi) 3810 return false; 3811 3812 return pi->was_full && !__pool_full(pi); 3813 } 3814 3815 static enum calc_target_result 3816 recalc_linger_target(struct ceph_osd_linger_request *lreq) 3817 { 3818 struct ceph_osd_client *osdc = lreq->osdc; 3819 enum calc_target_result ct_res; 3820 3821 ct_res = calc_target(osdc, &lreq->t, true); 3822 if (ct_res == CALC_TARGET_NEED_RESEND) { 3823 struct ceph_osd *osd; 3824 3825 osd = lookup_create_osd(osdc, lreq->t.osd, true); 3826 if (osd != lreq->osd) { 3827 unlink_linger(lreq->osd, lreq); 3828 link_linger(osd, lreq); 3829 } 3830 } 3831 3832 return ct_res; 3833 } 3834 3835 /* 3836 * Requeue requests whose mapping to an OSD has changed. 3837 */ 3838 static void scan_requests(struct ceph_osd *osd, 3839 bool force_resend, 3840 bool cleared_full, 3841 bool check_pool_cleared_full, 3842 struct rb_root *need_resend, 3843 struct list_head *need_resend_linger) 3844 { 3845 struct ceph_osd_client *osdc = osd->o_osdc; 3846 struct rb_node *n; 3847 bool force_resend_writes; 3848 3849 for (n = rb_first(&osd->o_linger_requests); n; ) { 3850 struct ceph_osd_linger_request *lreq = 3851 rb_entry(n, struct ceph_osd_linger_request, node); 3852 enum calc_target_result ct_res; 3853 3854 n = rb_next(n); /* recalc_linger_target() */ 3855 3856 dout("%s lreq %p linger_id %llu\n", __func__, lreq, 3857 lreq->linger_id); 3858 ct_res = recalc_linger_target(lreq); 3859 switch (ct_res) { 3860 case CALC_TARGET_NO_ACTION: 3861 force_resend_writes = cleared_full || 3862 (check_pool_cleared_full && 3863 pool_cleared_full(osdc, lreq->t.base_oloc.pool)); 3864 if (!force_resend && !force_resend_writes) 3865 break; 3866 3867 /* fall through */ 3868 case CALC_TARGET_NEED_RESEND: 3869 cancel_linger_map_check(lreq); 3870 /* 3871 * scan_requests() for the previous epoch(s) 3872 * may have already added it to the list, since 3873 * it's not unlinked here. 3874 */ 3875 if (list_empty(&lreq->scan_item)) 3876 list_add_tail(&lreq->scan_item, need_resend_linger); 3877 break; 3878 case CALC_TARGET_POOL_DNE: 3879 list_del_init(&lreq->scan_item); 3880 check_linger_pool_dne(lreq); 3881 break; 3882 } 3883 } 3884 3885 for (n = rb_first(&osd->o_requests); n; ) { 3886 struct ceph_osd_request *req = 3887 rb_entry(n, struct ceph_osd_request, r_node); 3888 enum calc_target_result ct_res; 3889 3890 n = rb_next(n); /* unlink_request(), check_pool_dne() */ 3891 3892 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 3893 ct_res = calc_target(osdc, &req->r_t, false); 3894 switch (ct_res) { 3895 case CALC_TARGET_NO_ACTION: 3896 force_resend_writes = cleared_full || 3897 (check_pool_cleared_full && 3898 pool_cleared_full(osdc, req->r_t.base_oloc.pool)); 3899 if (!force_resend && 3900 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) || 3901 !force_resend_writes)) 3902 break; 3903 3904 /* fall through */ 3905 case CALC_TARGET_NEED_RESEND: 3906 cancel_map_check(req); 3907 unlink_request(osd, req); 3908 insert_request(need_resend, req); 3909 break; 3910 case CALC_TARGET_POOL_DNE: 3911 check_pool_dne(req); 3912 break; 3913 } 3914 } 3915 } 3916 3917 static int handle_one_map(struct ceph_osd_client *osdc, 3918 void *p, void *end, bool incremental, 3919 struct rb_root *need_resend, 3920 struct list_head *need_resend_linger) 3921 { 3922 struct ceph_osdmap *newmap; 3923 struct rb_node *n; 3924 bool skipped_map = false; 3925 bool was_full; 3926 3927 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3928 set_pool_was_full(osdc); 3929 3930 if (incremental) 3931 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap); 3932 else 3933 newmap = ceph_osdmap_decode(&p, end); 3934 if (IS_ERR(newmap)) 3935 return PTR_ERR(newmap); 3936 3937 if (newmap != osdc->osdmap) { 3938 /* 3939 * Preserve ->was_full before destroying the old map. 3940 * For pools that weren't in the old map, ->was_full 3941 * should be false. 3942 */ 3943 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { 3944 struct ceph_pg_pool_info *pi = 3945 rb_entry(n, struct ceph_pg_pool_info, node); 3946 struct ceph_pg_pool_info *old_pi; 3947 3948 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id); 3949 if (old_pi) 3950 pi->was_full = old_pi->was_full; 3951 else 3952 WARN_ON(pi->was_full); 3953 } 3954 3955 if (osdc->osdmap->epoch && 3956 osdc->osdmap->epoch + 1 < newmap->epoch) { 3957 WARN_ON(incremental); 3958 skipped_map = true; 3959 } 3960 3961 ceph_osdmap_destroy(osdc->osdmap); 3962 osdc->osdmap = newmap; 3963 } 3964 3965 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); 3966 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, 3967 need_resend, need_resend_linger); 3968 3969 for (n = rb_first(&osdc->osds); n; ) { 3970 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 3971 3972 n = rb_next(n); /* close_osd() */ 3973 3974 scan_requests(osd, skipped_map, was_full, true, need_resend, 3975 need_resend_linger); 3976 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 3977 memcmp(&osd->o_con.peer_addr, 3978 ceph_osd_addr(osdc->osdmap, osd->o_osd), 3979 sizeof(struct ceph_entity_addr))) 3980 close_osd(osd); 3981 } 3982 3983 return 0; 3984 } 3985 3986 static void kick_requests(struct ceph_osd_client *osdc, 3987 struct rb_root *need_resend, 3988 struct list_head *need_resend_linger) 3989 { 3990 struct ceph_osd_linger_request *lreq, *nlreq; 3991 enum calc_target_result ct_res; 3992 struct rb_node *n; 3993 3994 /* make sure need_resend targets reflect latest map */ 3995 for (n = rb_first(need_resend); n; ) { 3996 struct ceph_osd_request *req = 3997 rb_entry(n, struct ceph_osd_request, r_node); 3998 3999 n = rb_next(n); 4000 4001 if (req->r_t.epoch < osdc->osdmap->epoch) { 4002 ct_res = calc_target(osdc, &req->r_t, false); 4003 if (ct_res == CALC_TARGET_POOL_DNE) { 4004 erase_request(need_resend, req); 4005 check_pool_dne(req); 4006 } 4007 } 4008 } 4009 4010 for (n = rb_first(need_resend); n; ) { 4011 struct ceph_osd_request *req = 4012 rb_entry(n, struct ceph_osd_request, r_node); 4013 struct ceph_osd *osd; 4014 4015 n = rb_next(n); 4016 erase_request(need_resend, req); /* before link_request() */ 4017 4018 osd = lookup_create_osd(osdc, req->r_t.osd, true); 4019 link_request(osd, req); 4020 if (!req->r_linger) { 4021 if (!osd_homeless(osd) && !req->r_t.paused) 4022 send_request(req); 4023 } else { 4024 cancel_linger_request(req); 4025 } 4026 } 4027 4028 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) { 4029 if (!osd_homeless(lreq->osd)) 4030 send_linger(lreq); 4031 4032 list_del_init(&lreq->scan_item); 4033 } 4034 } 4035 4036 /* 4037 * Process updated osd map. 4038 * 4039 * The message contains any number of incremental and full maps, normally 4040 * indicating some sort of topology change in the cluster. Kick requests 4041 * off to different OSDs as needed. 4042 */ 4043 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 4044 { 4045 void *p = msg->front.iov_base; 4046 void *const end = p + msg->front.iov_len; 4047 u32 nr_maps, maplen; 4048 u32 epoch; 4049 struct ceph_fsid fsid; 4050 struct rb_root need_resend = RB_ROOT; 4051 LIST_HEAD(need_resend_linger); 4052 bool handled_incremental = false; 4053 bool was_pauserd, was_pausewr; 4054 bool pauserd, pausewr; 4055 int err; 4056 4057 dout("%s have %u\n", __func__, osdc->osdmap->epoch); 4058 down_write(&osdc->lock); 4059 4060 /* verify fsid */ 4061 ceph_decode_need(&p, end, sizeof(fsid), bad); 4062 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 4063 if (ceph_check_fsid(osdc->client, &fsid) < 0) 4064 goto bad; 4065 4066 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4067 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4068 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4069 have_pool_full(osdc); 4070 4071 /* incremental maps */ 4072 ceph_decode_32_safe(&p, end, nr_maps, bad); 4073 dout(" %d inc maps\n", nr_maps); 4074 while (nr_maps > 0) { 4075 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4076 epoch = ceph_decode_32(&p); 4077 maplen = ceph_decode_32(&p); 4078 ceph_decode_need(&p, end, maplen, bad); 4079 if (osdc->osdmap->epoch && 4080 osdc->osdmap->epoch + 1 == epoch) { 4081 dout("applying incremental map %u len %d\n", 4082 epoch, maplen); 4083 err = handle_one_map(osdc, p, p + maplen, true, 4084 &need_resend, &need_resend_linger); 4085 if (err) 4086 goto bad; 4087 handled_incremental = true; 4088 } else { 4089 dout("ignoring incremental map %u len %d\n", 4090 epoch, maplen); 4091 } 4092 p += maplen; 4093 nr_maps--; 4094 } 4095 if (handled_incremental) 4096 goto done; 4097 4098 /* full maps */ 4099 ceph_decode_32_safe(&p, end, nr_maps, bad); 4100 dout(" %d full maps\n", nr_maps); 4101 while (nr_maps) { 4102 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 4103 epoch = ceph_decode_32(&p); 4104 maplen = ceph_decode_32(&p); 4105 ceph_decode_need(&p, end, maplen, bad); 4106 if (nr_maps > 1) { 4107 dout("skipping non-latest full map %u len %d\n", 4108 epoch, maplen); 4109 } else if (osdc->osdmap->epoch >= epoch) { 4110 dout("skipping full map %u len %d, " 4111 "older than our %u\n", epoch, maplen, 4112 osdc->osdmap->epoch); 4113 } else { 4114 dout("taking full map %u len %d\n", epoch, maplen); 4115 err = handle_one_map(osdc, p, p + maplen, false, 4116 &need_resend, &need_resend_linger); 4117 if (err) 4118 goto bad; 4119 } 4120 p += maplen; 4121 nr_maps--; 4122 } 4123 4124 done: 4125 /* 4126 * subscribe to subsequent osdmap updates if full to ensure 4127 * we find out when we are no longer full and stop returning 4128 * ENOSPC. 4129 */ 4130 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); 4131 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || 4132 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 4133 have_pool_full(osdc); 4134 if (was_pauserd || was_pausewr || pauserd || pausewr || 4135 osdc->osdmap->epoch < osdc->epoch_barrier) 4136 maybe_request_map(osdc); 4137 4138 kick_requests(osdc, &need_resend, &need_resend_linger); 4139 4140 ceph_osdc_abort_on_full(osdc); 4141 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP, 4142 osdc->osdmap->epoch); 4143 up_write(&osdc->lock); 4144 wake_up_all(&osdc->client->auth_wq); 4145 return; 4146 4147 bad: 4148 pr_err("osdc handle_map corrupt msg\n"); 4149 ceph_msg_dump(msg); 4150 up_write(&osdc->lock); 4151 } 4152 4153 /* 4154 * Resubmit requests pending on the given osd. 4155 */ 4156 static void kick_osd_requests(struct ceph_osd *osd) 4157 { 4158 struct rb_node *n; 4159 4160 clear_backoffs(osd); 4161 4162 for (n = rb_first(&osd->o_requests); n; ) { 4163 struct ceph_osd_request *req = 4164 rb_entry(n, struct ceph_osd_request, r_node); 4165 4166 n = rb_next(n); /* cancel_linger_request() */ 4167 4168 if (!req->r_linger) { 4169 if (!req->r_t.paused) 4170 send_request(req); 4171 } else { 4172 cancel_linger_request(req); 4173 } 4174 } 4175 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { 4176 struct ceph_osd_linger_request *lreq = 4177 rb_entry(n, struct ceph_osd_linger_request, node); 4178 4179 send_linger(lreq); 4180 } 4181 } 4182 4183 /* 4184 * If the osd connection drops, we need to resubmit all requests. 4185 */ 4186 static void osd_fault(struct ceph_connection *con) 4187 { 4188 struct ceph_osd *osd = con->private; 4189 struct ceph_osd_client *osdc = osd->o_osdc; 4190 4191 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); 4192 4193 down_write(&osdc->lock); 4194 if (!osd_registered(osd)) { 4195 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4196 goto out_unlock; 4197 } 4198 4199 if (!reopen_osd(osd)) 4200 kick_osd_requests(osd); 4201 maybe_request_map(osdc); 4202 4203 out_unlock: 4204 up_write(&osdc->lock); 4205 } 4206 4207 struct MOSDBackoff { 4208 struct ceph_spg spgid; 4209 u32 map_epoch; 4210 u8 op; 4211 u64 id; 4212 struct ceph_hobject_id *begin; 4213 struct ceph_hobject_id *end; 4214 }; 4215 4216 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m) 4217 { 4218 void *p = msg->front.iov_base; 4219 void *const end = p + msg->front.iov_len; 4220 u8 struct_v; 4221 u32 struct_len; 4222 int ret; 4223 4224 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len); 4225 if (ret) 4226 return ret; 4227 4228 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid); 4229 if (ret) 4230 return ret; 4231 4232 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval); 4233 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval); 4234 ceph_decode_8_safe(&p, end, m->op, e_inval); 4235 ceph_decode_64_safe(&p, end, m->id, e_inval); 4236 4237 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO); 4238 if (!m->begin) 4239 return -ENOMEM; 4240 4241 ret = decode_hoid(&p, end, m->begin); 4242 if (ret) { 4243 free_hoid(m->begin); 4244 return ret; 4245 } 4246 4247 m->end = kzalloc(sizeof(*m->end), GFP_NOIO); 4248 if (!m->end) { 4249 free_hoid(m->begin); 4250 return -ENOMEM; 4251 } 4252 4253 ret = decode_hoid(&p, end, m->end); 4254 if (ret) { 4255 free_hoid(m->begin); 4256 free_hoid(m->end); 4257 return ret; 4258 } 4259 4260 return 0; 4261 4262 e_inval: 4263 return -EINVAL; 4264 } 4265 4266 static struct ceph_msg *create_backoff_message( 4267 const struct ceph_osd_backoff *backoff, 4268 u32 map_epoch) 4269 { 4270 struct ceph_msg *msg; 4271 void *p, *end; 4272 int msg_size; 4273 4274 msg_size = CEPH_ENCODING_START_BLK_LEN + 4275 CEPH_PGID_ENCODING_LEN + 1; /* spgid */ 4276 msg_size += 4 + 1 + 8; /* map_epoch, op, id */ 4277 msg_size += CEPH_ENCODING_START_BLK_LEN + 4278 hoid_encoding_size(backoff->begin); 4279 msg_size += CEPH_ENCODING_START_BLK_LEN + 4280 hoid_encoding_size(backoff->end); 4281 4282 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true); 4283 if (!msg) 4284 return NULL; 4285 4286 p = msg->front.iov_base; 4287 end = p + msg->front_alloc_len; 4288 4289 encode_spgid(&p, &backoff->spgid); 4290 ceph_encode_32(&p, map_epoch); 4291 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK); 4292 ceph_encode_64(&p, backoff->id); 4293 encode_hoid(&p, end, backoff->begin); 4294 encode_hoid(&p, end, backoff->end); 4295 BUG_ON(p != end); 4296 4297 msg->front.iov_len = p - msg->front.iov_base; 4298 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */ 4299 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 4300 4301 return msg; 4302 } 4303 4304 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) 4305 { 4306 struct ceph_spg_mapping *spg; 4307 struct ceph_osd_backoff *backoff; 4308 struct ceph_msg *msg; 4309 4310 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4311 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4312 4313 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); 4314 if (!spg) { 4315 spg = alloc_spg_mapping(); 4316 if (!spg) { 4317 pr_err("%s failed to allocate spg\n", __func__); 4318 return; 4319 } 4320 spg->spgid = m->spgid; /* struct */ 4321 insert_spg_mapping(&osd->o_backoff_mappings, spg); 4322 } 4323 4324 backoff = alloc_backoff(); 4325 if (!backoff) { 4326 pr_err("%s failed to allocate backoff\n", __func__); 4327 return; 4328 } 4329 backoff->spgid = m->spgid; /* struct */ 4330 backoff->id = m->id; 4331 backoff->begin = m->begin; 4332 m->begin = NULL; /* backoff now owns this */ 4333 backoff->end = m->end; 4334 m->end = NULL; /* ditto */ 4335 4336 insert_backoff(&spg->backoffs, backoff); 4337 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4338 4339 /* 4340 * Ack with original backoff's epoch so that the OSD can 4341 * discard this if there was a PG split. 4342 */ 4343 msg = create_backoff_message(backoff, m->map_epoch); 4344 if (!msg) { 4345 pr_err("%s failed to allocate msg\n", __func__); 4346 return; 4347 } 4348 ceph_con_send(&osd->o_con, msg); 4349 } 4350 4351 static bool target_contained_by(const struct ceph_osd_request_target *t, 4352 const struct ceph_hobject_id *begin, 4353 const struct ceph_hobject_id *end) 4354 { 4355 struct ceph_hobject_id hoid; 4356 int cmp; 4357 4358 hoid_fill_from_target(&hoid, t); 4359 cmp = hoid_compare(&hoid, begin); 4360 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0); 4361 } 4362 4363 static void handle_backoff_unblock(struct ceph_osd *osd, 4364 const struct MOSDBackoff *m) 4365 { 4366 struct ceph_spg_mapping *spg; 4367 struct ceph_osd_backoff *backoff; 4368 struct rb_node *n; 4369 4370 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, 4371 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id); 4372 4373 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); 4374 if (!backoff) { 4375 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n", 4376 __func__, osd->o_osd, m->spgid.pgid.pool, 4377 m->spgid.pgid.seed, m->spgid.shard, m->id); 4378 return; 4379 } 4380 4381 if (hoid_compare(backoff->begin, m->begin) && 4382 hoid_compare(backoff->end, m->end)) { 4383 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n", 4384 __func__, osd->o_osd, m->spgid.pgid.pool, 4385 m->spgid.pgid.seed, m->spgid.shard, m->id); 4386 /* unblock it anyway... */ 4387 } 4388 4389 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); 4390 BUG_ON(!spg); 4391 4392 erase_backoff(&spg->backoffs, backoff); 4393 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); 4394 free_backoff(backoff); 4395 4396 if (RB_EMPTY_ROOT(&spg->backoffs)) { 4397 erase_spg_mapping(&osd->o_backoff_mappings, spg); 4398 free_spg_mapping(spg); 4399 } 4400 4401 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { 4402 struct ceph_osd_request *req = 4403 rb_entry(n, struct ceph_osd_request, r_node); 4404 4405 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) { 4406 /* 4407 * Match against @m, not @backoff -- the PG may 4408 * have split on the OSD. 4409 */ 4410 if (target_contained_by(&req->r_t, m->begin, m->end)) { 4411 /* 4412 * If no other installed backoff applies, 4413 * resend. 4414 */ 4415 send_request(req); 4416 } 4417 } 4418 } 4419 } 4420 4421 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) 4422 { 4423 struct ceph_osd_client *osdc = osd->o_osdc; 4424 struct MOSDBackoff m; 4425 int ret; 4426 4427 down_read(&osdc->lock); 4428 if (!osd_registered(osd)) { 4429 dout("%s osd%d unknown\n", __func__, osd->o_osd); 4430 up_read(&osdc->lock); 4431 return; 4432 } 4433 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); 4434 4435 mutex_lock(&osd->lock); 4436 ret = decode_MOSDBackoff(msg, &m); 4437 if (ret) { 4438 pr_err("failed to decode MOSDBackoff: %d\n", ret); 4439 ceph_msg_dump(msg); 4440 goto out_unlock; 4441 } 4442 4443 switch (m.op) { 4444 case CEPH_OSD_BACKOFF_OP_BLOCK: 4445 handle_backoff_block(osd, &m); 4446 break; 4447 case CEPH_OSD_BACKOFF_OP_UNBLOCK: 4448 handle_backoff_unblock(osd, &m); 4449 break; 4450 default: 4451 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); 4452 } 4453 4454 free_hoid(m.begin); 4455 free_hoid(m.end); 4456 4457 out_unlock: 4458 mutex_unlock(&osd->lock); 4459 up_read(&osdc->lock); 4460 } 4461 4462 /* 4463 * Process osd watch notifications 4464 */ 4465 static void handle_watch_notify(struct ceph_osd_client *osdc, 4466 struct ceph_msg *msg) 4467 { 4468 void *p = msg->front.iov_base; 4469 void *const end = p + msg->front.iov_len; 4470 struct ceph_osd_linger_request *lreq; 4471 struct linger_work *lwork; 4472 u8 proto_ver, opcode; 4473 u64 cookie, notify_id; 4474 u64 notifier_id = 0; 4475 s32 return_code = 0; 4476 void *payload = NULL; 4477 u32 payload_len = 0; 4478 4479 ceph_decode_8_safe(&p, end, proto_ver, bad); 4480 ceph_decode_8_safe(&p, end, opcode, bad); 4481 ceph_decode_64_safe(&p, end, cookie, bad); 4482 p += 8; /* skip ver */ 4483 ceph_decode_64_safe(&p, end, notify_id, bad); 4484 4485 if (proto_ver >= 1) { 4486 ceph_decode_32_safe(&p, end, payload_len, bad); 4487 ceph_decode_need(&p, end, payload_len, bad); 4488 payload = p; 4489 p += payload_len; 4490 } 4491 4492 if (le16_to_cpu(msg->hdr.version) >= 2) 4493 ceph_decode_32_safe(&p, end, return_code, bad); 4494 4495 if (le16_to_cpu(msg->hdr.version) >= 3) 4496 ceph_decode_64_safe(&p, end, notifier_id, bad); 4497 4498 down_read(&osdc->lock); 4499 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie); 4500 if (!lreq) { 4501 dout("%s opcode %d cookie %llu dne\n", __func__, opcode, 4502 cookie); 4503 goto out_unlock_osdc; 4504 } 4505 4506 mutex_lock(&lreq->lock); 4507 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__, 4508 opcode, cookie, lreq, lreq->is_watch); 4509 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) { 4510 if (!lreq->last_error) { 4511 lreq->last_error = -ENOTCONN; 4512 queue_watch_error(lreq); 4513 } 4514 } else if (!lreq->is_watch) { 4515 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */ 4516 if (lreq->notify_id && lreq->notify_id != notify_id) { 4517 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq, 4518 lreq->notify_id, notify_id); 4519 } else if (!completion_done(&lreq->notify_finish_wait)) { 4520 struct ceph_msg_data *data = 4521 msg->num_data_items ? &msg->data[0] : NULL; 4522 4523 if (data) { 4524 if (lreq->preply_pages) { 4525 WARN_ON(data->type != 4526 CEPH_MSG_DATA_PAGES); 4527 *lreq->preply_pages = data->pages; 4528 *lreq->preply_len = data->length; 4529 data->own_pages = false; 4530 } 4531 } 4532 lreq->notify_finish_error = return_code; 4533 complete_all(&lreq->notify_finish_wait); 4534 } 4535 } else { 4536 /* CEPH_WATCH_EVENT_NOTIFY */ 4537 lwork = lwork_alloc(lreq, do_watch_notify); 4538 if (!lwork) { 4539 pr_err("failed to allocate notify-lwork\n"); 4540 goto out_unlock_lreq; 4541 } 4542 4543 lwork->notify.notify_id = notify_id; 4544 lwork->notify.notifier_id = notifier_id; 4545 lwork->notify.payload = payload; 4546 lwork->notify.payload_len = payload_len; 4547 lwork->notify.msg = ceph_msg_get(msg); 4548 lwork_queue(lwork); 4549 } 4550 4551 out_unlock_lreq: 4552 mutex_unlock(&lreq->lock); 4553 out_unlock_osdc: 4554 up_read(&osdc->lock); 4555 return; 4556 4557 bad: 4558 pr_err("osdc handle_watch_notify corrupt msg\n"); 4559 } 4560 4561 /* 4562 * Register request, send initial attempt. 4563 */ 4564 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 4565 struct ceph_osd_request *req, 4566 bool nofail) 4567 { 4568 down_read(&osdc->lock); 4569 submit_request(req, false); 4570 up_read(&osdc->lock); 4571 4572 return 0; 4573 } 4574 EXPORT_SYMBOL(ceph_osdc_start_request); 4575 4576 /* 4577 * Unregister a registered request. The request is not completed: 4578 * ->r_result isn't set and __complete_request() isn't called. 4579 */ 4580 void ceph_osdc_cancel_request(struct ceph_osd_request *req) 4581 { 4582 struct ceph_osd_client *osdc = req->r_osdc; 4583 4584 down_write(&osdc->lock); 4585 if (req->r_osd) 4586 cancel_request(req); 4587 up_write(&osdc->lock); 4588 } 4589 EXPORT_SYMBOL(ceph_osdc_cancel_request); 4590 4591 /* 4592 * @timeout: in jiffies, 0 means "wait forever" 4593 */ 4594 static int wait_request_timeout(struct ceph_osd_request *req, 4595 unsigned long timeout) 4596 { 4597 long left; 4598 4599 dout("%s req %p tid %llu\n", __func__, req, req->r_tid); 4600 left = wait_for_completion_killable_timeout(&req->r_completion, 4601 ceph_timeout_jiffies(timeout)); 4602 if (left <= 0) { 4603 left = left ?: -ETIMEDOUT; 4604 ceph_osdc_cancel_request(req); 4605 } else { 4606 left = req->r_result; /* completed */ 4607 } 4608 4609 return left; 4610 } 4611 4612 /* 4613 * wait for a request to complete 4614 */ 4615 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 4616 struct ceph_osd_request *req) 4617 { 4618 return wait_request_timeout(req, 0); 4619 } 4620 EXPORT_SYMBOL(ceph_osdc_wait_request); 4621 4622 /* 4623 * sync - wait for all in-flight requests to flush. avoid starvation. 4624 */ 4625 void ceph_osdc_sync(struct ceph_osd_client *osdc) 4626 { 4627 struct rb_node *n, *p; 4628 u64 last_tid = atomic64_read(&osdc->last_tid); 4629 4630 again: 4631 down_read(&osdc->lock); 4632 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { 4633 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 4634 4635 mutex_lock(&osd->lock); 4636 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 4637 struct ceph_osd_request *req = 4638 rb_entry(p, struct ceph_osd_request, r_node); 4639 4640 if (req->r_tid > last_tid) 4641 break; 4642 4643 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE)) 4644 continue; 4645 4646 ceph_osdc_get_request(req); 4647 mutex_unlock(&osd->lock); 4648 up_read(&osdc->lock); 4649 dout("%s waiting on req %p tid %llu last_tid %llu\n", 4650 __func__, req, req->r_tid, last_tid); 4651 wait_for_completion(&req->r_completion); 4652 ceph_osdc_put_request(req); 4653 goto again; 4654 } 4655 4656 mutex_unlock(&osd->lock); 4657 } 4658 4659 up_read(&osdc->lock); 4660 dout("%s done last_tid %llu\n", __func__, last_tid); 4661 } 4662 EXPORT_SYMBOL(ceph_osdc_sync); 4663 4664 static struct ceph_osd_request * 4665 alloc_linger_request(struct ceph_osd_linger_request *lreq) 4666 { 4667 struct ceph_osd_request *req; 4668 4669 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO); 4670 if (!req) 4671 return NULL; 4672 4673 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4674 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4675 return req; 4676 } 4677 4678 static struct ceph_osd_request * 4679 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) 4680 { 4681 struct ceph_osd_request *req; 4682 4683 req = alloc_linger_request(lreq); 4684 if (!req) 4685 return NULL; 4686 4687 /* 4688 * Pass 0 for cookie because we don't know it yet, it will be 4689 * filled in by linger_submit(). 4690 */ 4691 osd_req_op_watch_init(req, 0, 0, watch_opcode); 4692 4693 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { 4694 ceph_osdc_put_request(req); 4695 return NULL; 4696 } 4697 4698 return req; 4699 } 4700 4701 /* 4702 * Returns a handle, caller owns a ref. 4703 */ 4704 struct ceph_osd_linger_request * 4705 ceph_osdc_watch(struct ceph_osd_client *osdc, 4706 struct ceph_object_id *oid, 4707 struct ceph_object_locator *oloc, 4708 rados_watchcb2_t wcb, 4709 rados_watcherrcb_t errcb, 4710 void *data) 4711 { 4712 struct ceph_osd_linger_request *lreq; 4713 int ret; 4714 4715 lreq = linger_alloc(osdc); 4716 if (!lreq) 4717 return ERR_PTR(-ENOMEM); 4718 4719 lreq->is_watch = true; 4720 lreq->wcb = wcb; 4721 lreq->errcb = errcb; 4722 lreq->data = data; 4723 lreq->watch_valid_thru = jiffies; 4724 4725 ceph_oid_copy(&lreq->t.base_oid, oid); 4726 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4727 lreq->t.flags = CEPH_OSD_FLAG_WRITE; 4728 ktime_get_real_ts64(&lreq->mtime); 4729 4730 lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); 4731 if (!lreq->reg_req) { 4732 ret = -ENOMEM; 4733 goto err_put_lreq; 4734 } 4735 4736 lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); 4737 if (!lreq->ping_req) { 4738 ret = -ENOMEM; 4739 goto err_put_lreq; 4740 } 4741 4742 linger_submit(lreq); 4743 ret = linger_reg_commit_wait(lreq); 4744 if (ret) { 4745 linger_cancel(lreq); 4746 goto err_put_lreq; 4747 } 4748 4749 return lreq; 4750 4751 err_put_lreq: 4752 linger_put(lreq); 4753 return ERR_PTR(ret); 4754 } 4755 EXPORT_SYMBOL(ceph_osdc_watch); 4756 4757 /* 4758 * Releases a ref. 4759 * 4760 * Times out after mount_timeout to preserve rbd unmap behaviour 4761 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap 4762 * with mount_timeout"). 4763 */ 4764 int ceph_osdc_unwatch(struct ceph_osd_client *osdc, 4765 struct ceph_osd_linger_request *lreq) 4766 { 4767 struct ceph_options *opts = osdc->client->options; 4768 struct ceph_osd_request *req; 4769 int ret; 4770 4771 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4772 if (!req) 4773 return -ENOMEM; 4774 4775 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); 4776 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); 4777 req->r_flags = CEPH_OSD_FLAG_WRITE; 4778 ktime_get_real_ts64(&req->r_mtime); 4779 osd_req_op_watch_init(req, 0, lreq->linger_id, 4780 CEPH_OSD_WATCH_OP_UNWATCH); 4781 4782 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4783 if (ret) 4784 goto out_put_req; 4785 4786 ceph_osdc_start_request(osdc, req, false); 4787 linger_cancel(lreq); 4788 linger_put(lreq); 4789 ret = wait_request_timeout(req, opts->mount_timeout); 4790 4791 out_put_req: 4792 ceph_osdc_put_request(req); 4793 return ret; 4794 } 4795 EXPORT_SYMBOL(ceph_osdc_unwatch); 4796 4797 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, 4798 u64 notify_id, u64 cookie, void *payload, 4799 u32 payload_len) 4800 { 4801 struct ceph_osd_req_op *op; 4802 struct ceph_pagelist *pl; 4803 int ret; 4804 4805 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); 4806 4807 pl = ceph_pagelist_alloc(GFP_NOIO); 4808 if (!pl) 4809 return -ENOMEM; 4810 4811 ret = ceph_pagelist_encode_64(pl, notify_id); 4812 ret |= ceph_pagelist_encode_64(pl, cookie); 4813 if (payload) { 4814 ret |= ceph_pagelist_encode_32(pl, payload_len); 4815 ret |= ceph_pagelist_append(pl, payload, payload_len); 4816 } else { 4817 ret |= ceph_pagelist_encode_32(pl, 0); 4818 } 4819 if (ret) { 4820 ceph_pagelist_release(pl); 4821 return -ENOMEM; 4822 } 4823 4824 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); 4825 op->indata_len = pl->length; 4826 return 0; 4827 } 4828 4829 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, 4830 struct ceph_object_id *oid, 4831 struct ceph_object_locator *oloc, 4832 u64 notify_id, 4833 u64 cookie, 4834 void *payload, 4835 u32 payload_len) 4836 { 4837 struct ceph_osd_request *req; 4838 int ret; 4839 4840 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 4841 if (!req) 4842 return -ENOMEM; 4843 4844 ceph_oid_copy(&req->r_base_oid, oid); 4845 ceph_oloc_copy(&req->r_base_oloc, oloc); 4846 req->r_flags = CEPH_OSD_FLAG_READ; 4847 4848 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, 4849 payload_len); 4850 if (ret) 4851 goto out_put_req; 4852 4853 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 4854 if (ret) 4855 goto out_put_req; 4856 4857 ceph_osdc_start_request(osdc, req, false); 4858 ret = ceph_osdc_wait_request(osdc, req); 4859 4860 out_put_req: 4861 ceph_osdc_put_request(req); 4862 return ret; 4863 } 4864 EXPORT_SYMBOL(ceph_osdc_notify_ack); 4865 4866 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, 4867 u64 cookie, u32 prot_ver, u32 timeout, 4868 void *payload, u32 payload_len) 4869 { 4870 struct ceph_osd_req_op *op; 4871 struct ceph_pagelist *pl; 4872 int ret; 4873 4874 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); 4875 op->notify.cookie = cookie; 4876 4877 pl = ceph_pagelist_alloc(GFP_NOIO); 4878 if (!pl) 4879 return -ENOMEM; 4880 4881 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ 4882 ret |= ceph_pagelist_encode_32(pl, timeout); 4883 ret |= ceph_pagelist_encode_32(pl, payload_len); 4884 ret |= ceph_pagelist_append(pl, payload, payload_len); 4885 if (ret) { 4886 ceph_pagelist_release(pl); 4887 return -ENOMEM; 4888 } 4889 4890 ceph_osd_data_pagelist_init(&op->notify.request_data, pl); 4891 op->indata_len = pl->length; 4892 return 0; 4893 } 4894 4895 /* 4896 * @timeout: in seconds 4897 * 4898 * @preply_{pages,len} are initialized both on success and error. 4899 * The caller is responsible for: 4900 * 4901 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len)) 4902 */ 4903 int ceph_osdc_notify(struct ceph_osd_client *osdc, 4904 struct ceph_object_id *oid, 4905 struct ceph_object_locator *oloc, 4906 void *payload, 4907 u32 payload_len, 4908 u32 timeout, 4909 struct page ***preply_pages, 4910 size_t *preply_len) 4911 { 4912 struct ceph_osd_linger_request *lreq; 4913 struct page **pages; 4914 int ret; 4915 4916 WARN_ON(!timeout); 4917 if (preply_pages) { 4918 *preply_pages = NULL; 4919 *preply_len = 0; 4920 } 4921 4922 lreq = linger_alloc(osdc); 4923 if (!lreq) 4924 return -ENOMEM; 4925 4926 lreq->preply_pages = preply_pages; 4927 lreq->preply_len = preply_len; 4928 4929 ceph_oid_copy(&lreq->t.base_oid, oid); 4930 ceph_oloc_copy(&lreq->t.base_oloc, oloc); 4931 lreq->t.flags = CEPH_OSD_FLAG_READ; 4932 4933 lreq->reg_req = alloc_linger_request(lreq); 4934 if (!lreq->reg_req) { 4935 ret = -ENOMEM; 4936 goto out_put_lreq; 4937 } 4938 4939 /* 4940 * Pass 0 for cookie because we don't know it yet, it will be 4941 * filled in by linger_submit(). 4942 */ 4943 ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, 4944 payload, payload_len); 4945 if (ret) 4946 goto out_put_lreq; 4947 4948 /* for notify_id */ 4949 pages = ceph_alloc_page_vector(1, GFP_NOIO); 4950 if (IS_ERR(pages)) { 4951 ret = PTR_ERR(pages); 4952 goto out_put_lreq; 4953 } 4954 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, 4955 response_data), 4956 pages, PAGE_SIZE, 0, false, true); 4957 4958 ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); 4959 if (ret) 4960 goto out_put_lreq; 4961 4962 linger_submit(lreq); 4963 ret = linger_reg_commit_wait(lreq); 4964 if (!ret) 4965 ret = linger_notify_finish_wait(lreq); 4966 else 4967 dout("lreq %p failed to initiate notify %d\n", lreq, ret); 4968 4969 linger_cancel(lreq); 4970 out_put_lreq: 4971 linger_put(lreq); 4972 return ret; 4973 } 4974 EXPORT_SYMBOL(ceph_osdc_notify); 4975 4976 /* 4977 * Return the number of milliseconds since the watch was last 4978 * confirmed, or an error. If there is an error, the watch is no 4979 * longer valid, and should be destroyed with ceph_osdc_unwatch(). 4980 */ 4981 int ceph_osdc_watch_check(struct ceph_osd_client *osdc, 4982 struct ceph_osd_linger_request *lreq) 4983 { 4984 unsigned long stamp, age; 4985 int ret; 4986 4987 down_read(&osdc->lock); 4988 mutex_lock(&lreq->lock); 4989 stamp = lreq->watch_valid_thru; 4990 if (!list_empty(&lreq->pending_lworks)) { 4991 struct linger_work *lwork = 4992 list_first_entry(&lreq->pending_lworks, 4993 struct linger_work, 4994 pending_item); 4995 4996 if (time_before(lwork->queued_stamp, stamp)) 4997 stamp = lwork->queued_stamp; 4998 } 4999 age = jiffies - stamp; 5000 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__, 5001 lreq, lreq->linger_id, age, lreq->last_error); 5002 /* we are truncating to msecs, so return a safe upper bound */ 5003 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age); 5004 5005 mutex_unlock(&lreq->lock); 5006 up_read(&osdc->lock); 5007 return ret; 5008 } 5009 5010 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item) 5011 { 5012 u8 struct_v; 5013 u32 struct_len; 5014 int ret; 5015 5016 ret = ceph_start_decoding(p, end, 2, "watch_item_t", 5017 &struct_v, &struct_len); 5018 if (ret) 5019 goto bad; 5020 5021 ret = -EINVAL; 5022 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad); 5023 ceph_decode_64_safe(p, end, item->cookie, bad); 5024 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */ 5025 5026 if (struct_v >= 2) { 5027 ret = ceph_decode_entity_addr(p, end, &item->addr); 5028 if (ret) 5029 goto bad; 5030 } else { 5031 ret = 0; 5032 } 5033 5034 dout("%s %s%llu cookie %llu addr %s\n", __func__, 5035 ENTITY_NAME(item->name), item->cookie, 5036 ceph_pr_addr(&item->addr)); 5037 bad: 5038 return ret; 5039 } 5040 5041 static int decode_watchers(void **p, void *end, 5042 struct ceph_watch_item **watchers, 5043 u32 *num_watchers) 5044 { 5045 u8 struct_v; 5046 u32 struct_len; 5047 int i; 5048 int ret; 5049 5050 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t", 5051 &struct_v, &struct_len); 5052 if (ret) 5053 return ret; 5054 5055 *num_watchers = ceph_decode_32(p); 5056 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO); 5057 if (!*watchers) 5058 return -ENOMEM; 5059 5060 for (i = 0; i < *num_watchers; i++) { 5061 ret = decode_watcher(p, end, *watchers + i); 5062 if (ret) { 5063 kfree(*watchers); 5064 return ret; 5065 } 5066 } 5067 5068 return 0; 5069 } 5070 5071 /* 5072 * On success, the caller is responsible for: 5073 * 5074 * kfree(watchers); 5075 */ 5076 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, 5077 struct ceph_object_id *oid, 5078 struct ceph_object_locator *oloc, 5079 struct ceph_watch_item **watchers, 5080 u32 *num_watchers) 5081 { 5082 struct ceph_osd_request *req; 5083 struct page **pages; 5084 int ret; 5085 5086 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5087 if (!req) 5088 return -ENOMEM; 5089 5090 ceph_oid_copy(&req->r_base_oid, oid); 5091 ceph_oloc_copy(&req->r_base_oloc, oloc); 5092 req->r_flags = CEPH_OSD_FLAG_READ; 5093 5094 pages = ceph_alloc_page_vector(1, GFP_NOIO); 5095 if (IS_ERR(pages)) { 5096 ret = PTR_ERR(pages); 5097 goto out_put_req; 5098 } 5099 5100 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0); 5101 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers, 5102 response_data), 5103 pages, PAGE_SIZE, 0, false, true); 5104 5105 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5106 if (ret) 5107 goto out_put_req; 5108 5109 ceph_osdc_start_request(osdc, req, false); 5110 ret = ceph_osdc_wait_request(osdc, req); 5111 if (ret >= 0) { 5112 void *p = page_address(pages[0]); 5113 void *const end = p + req->r_ops[0].outdata_len; 5114 5115 ret = decode_watchers(&p, end, watchers, num_watchers); 5116 } 5117 5118 out_put_req: 5119 ceph_osdc_put_request(req); 5120 return ret; 5121 } 5122 EXPORT_SYMBOL(ceph_osdc_list_watchers); 5123 5124 /* 5125 * Call all pending notify callbacks - for use after a watch is 5126 * unregistered, to make sure no more callbacks for it will be invoked 5127 */ 5128 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 5129 { 5130 dout("%s osdc %p\n", __func__, osdc); 5131 flush_workqueue(osdc->notify_wq); 5132 } 5133 EXPORT_SYMBOL(ceph_osdc_flush_notifies); 5134 5135 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc) 5136 { 5137 down_read(&osdc->lock); 5138 maybe_request_map(osdc); 5139 up_read(&osdc->lock); 5140 } 5141 EXPORT_SYMBOL(ceph_osdc_maybe_request_map); 5142 5143 /* 5144 * Execute an OSD class method on an object. 5145 * 5146 * @flags: CEPH_OSD_FLAG_* 5147 * @resp_len: in/out param for reply length 5148 */ 5149 int ceph_osdc_call(struct ceph_osd_client *osdc, 5150 struct ceph_object_id *oid, 5151 struct ceph_object_locator *oloc, 5152 const char *class, const char *method, 5153 unsigned int flags, 5154 struct page *req_page, size_t req_len, 5155 struct page **resp_pages, size_t *resp_len) 5156 { 5157 struct ceph_osd_request *req; 5158 int ret; 5159 5160 if (req_len > PAGE_SIZE) 5161 return -E2BIG; 5162 5163 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO); 5164 if (!req) 5165 return -ENOMEM; 5166 5167 ceph_oid_copy(&req->r_base_oid, oid); 5168 ceph_oloc_copy(&req->r_base_oloc, oloc); 5169 req->r_flags = flags; 5170 5171 ret = osd_req_op_cls_init(req, 0, class, method); 5172 if (ret) 5173 goto out_put_req; 5174 5175 if (req_page) 5176 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len, 5177 0, false, false); 5178 if (resp_pages) 5179 osd_req_op_cls_response_data_pages(req, 0, resp_pages, 5180 *resp_len, 0, false, false); 5181 5182 ret = ceph_osdc_alloc_messages(req, GFP_NOIO); 5183 if (ret) 5184 goto out_put_req; 5185 5186 ceph_osdc_start_request(osdc, req, false); 5187 ret = ceph_osdc_wait_request(osdc, req); 5188 if (ret >= 0) { 5189 ret = req->r_ops[0].rval; 5190 if (resp_pages) 5191 *resp_len = req->r_ops[0].outdata_len; 5192 } 5193 5194 out_put_req: 5195 ceph_osdc_put_request(req); 5196 return ret; 5197 } 5198 EXPORT_SYMBOL(ceph_osdc_call); 5199 5200 /* 5201 * reset all osd connections 5202 */ 5203 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc) 5204 { 5205 struct rb_node *n; 5206 5207 down_write(&osdc->lock); 5208 for (n = rb_first(&osdc->osds); n; ) { 5209 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 5210 5211 n = rb_next(n); 5212 if (!reopen_osd(osd)) 5213 kick_osd_requests(osd); 5214 } 5215 up_write(&osdc->lock); 5216 } 5217 5218 /* 5219 * init, shutdown 5220 */ 5221 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 5222 { 5223 int err; 5224 5225 dout("init\n"); 5226 osdc->client = client; 5227 init_rwsem(&osdc->lock); 5228 osdc->osds = RB_ROOT; 5229 INIT_LIST_HEAD(&osdc->osd_lru); 5230 spin_lock_init(&osdc->osd_lru_lock); 5231 osd_init(&osdc->homeless_osd); 5232 osdc->homeless_osd.o_osdc = osdc; 5233 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD; 5234 osdc->last_linger_id = CEPH_LINGER_ID_START; 5235 osdc->linger_requests = RB_ROOT; 5236 osdc->map_checks = RB_ROOT; 5237 osdc->linger_map_checks = RB_ROOT; 5238 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 5239 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 5240 5241 err = -ENOMEM; 5242 osdc->osdmap = ceph_osdmap_alloc(); 5243 if (!osdc->osdmap) 5244 goto out; 5245 5246 osdc->req_mempool = mempool_create_slab_pool(10, 5247 ceph_osd_request_cache); 5248 if (!osdc->req_mempool) 5249 goto out_map; 5250 5251 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 5252 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); 5253 if (err < 0) 5254 goto out_mempool; 5255 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 5256 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, 5257 "osd_op_reply"); 5258 if (err < 0) 5259 goto out_msgpool; 5260 5261 err = -ENOMEM; 5262 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 5263 if (!osdc->notify_wq) 5264 goto out_msgpool_reply; 5265 5266 osdc->completion_wq = create_singlethread_workqueue("ceph-completion"); 5267 if (!osdc->completion_wq) 5268 goto out_notify_wq; 5269 5270 schedule_delayed_work(&osdc->timeout_work, 5271 osdc->client->options->osd_keepalive_timeout); 5272 schedule_delayed_work(&osdc->osds_timeout_work, 5273 round_jiffies_relative(osdc->client->options->osd_idle_ttl)); 5274 5275 return 0; 5276 5277 out_notify_wq: 5278 destroy_workqueue(osdc->notify_wq); 5279 out_msgpool_reply: 5280 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5281 out_msgpool: 5282 ceph_msgpool_destroy(&osdc->msgpool_op); 5283 out_mempool: 5284 mempool_destroy(osdc->req_mempool); 5285 out_map: 5286 ceph_osdmap_destroy(osdc->osdmap); 5287 out: 5288 return err; 5289 } 5290 5291 void ceph_osdc_stop(struct ceph_osd_client *osdc) 5292 { 5293 destroy_workqueue(osdc->completion_wq); 5294 destroy_workqueue(osdc->notify_wq); 5295 cancel_delayed_work_sync(&osdc->timeout_work); 5296 cancel_delayed_work_sync(&osdc->osds_timeout_work); 5297 5298 down_write(&osdc->lock); 5299 while (!RB_EMPTY_ROOT(&osdc->osds)) { 5300 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 5301 struct ceph_osd, o_node); 5302 close_osd(osd); 5303 } 5304 up_write(&osdc->lock); 5305 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); 5306 osd_cleanup(&osdc->homeless_osd); 5307 5308 WARN_ON(!list_empty(&osdc->osd_lru)); 5309 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests)); 5310 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks)); 5311 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks)); 5312 WARN_ON(atomic_read(&osdc->num_requests)); 5313 WARN_ON(atomic_read(&osdc->num_homeless)); 5314 5315 ceph_osdmap_destroy(osdc->osdmap); 5316 mempool_destroy(osdc->req_mempool); 5317 ceph_msgpool_destroy(&osdc->msgpool_op); 5318 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 5319 } 5320 5321 static int osd_req_op_copy_from_init(struct ceph_osd_request *req, 5322 u64 src_snapid, u64 src_version, 5323 struct ceph_object_id *src_oid, 5324 struct ceph_object_locator *src_oloc, 5325 u32 src_fadvise_flags, 5326 u32 dst_fadvise_flags, 5327 u32 truncate_seq, u64 truncate_size, 5328 u8 copy_from_flags) 5329 { 5330 struct ceph_osd_req_op *op; 5331 struct page **pages; 5332 void *p, *end; 5333 5334 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 5335 if (IS_ERR(pages)) 5336 return PTR_ERR(pages); 5337 5338 op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, 5339 dst_fadvise_flags); 5340 op->copy_from.snapid = src_snapid; 5341 op->copy_from.src_version = src_version; 5342 op->copy_from.flags = copy_from_flags; 5343 op->copy_from.src_fadvise_flags = src_fadvise_flags; 5344 5345 p = page_address(pages[0]); 5346 end = p + PAGE_SIZE; 5347 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); 5348 encode_oloc(&p, end, src_oloc); 5349 ceph_encode_32(&p, truncate_seq); 5350 ceph_encode_64(&p, truncate_size); 5351 op->indata_len = PAGE_SIZE - (end - p); 5352 5353 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, 5354 op->indata_len, 0, false, true); 5355 return 0; 5356 } 5357 5358 int ceph_osdc_copy_from(struct ceph_osd_client *osdc, 5359 u64 src_snapid, u64 src_version, 5360 struct ceph_object_id *src_oid, 5361 struct ceph_object_locator *src_oloc, 5362 u32 src_fadvise_flags, 5363 struct ceph_object_id *dst_oid, 5364 struct ceph_object_locator *dst_oloc, 5365 u32 dst_fadvise_flags, 5366 u32 truncate_seq, u64 truncate_size, 5367 u8 copy_from_flags) 5368 { 5369 struct ceph_osd_request *req; 5370 int ret; 5371 5372 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); 5373 if (!req) 5374 return -ENOMEM; 5375 5376 req->r_flags = CEPH_OSD_FLAG_WRITE; 5377 5378 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc); 5379 ceph_oid_copy(&req->r_t.base_oid, dst_oid); 5380 5381 ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid, 5382 src_oloc, src_fadvise_flags, 5383 dst_fadvise_flags, truncate_seq, 5384 truncate_size, copy_from_flags); 5385 if (ret) 5386 goto out; 5387 5388 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); 5389 if (ret) 5390 goto out; 5391 5392 ceph_osdc_start_request(osdc, req, false); 5393 ret = ceph_osdc_wait_request(osdc, req); 5394 5395 out: 5396 ceph_osdc_put_request(req); 5397 return ret; 5398 } 5399 EXPORT_SYMBOL(ceph_osdc_copy_from); 5400 5401 int __init ceph_osdc_setup(void) 5402 { 5403 size_t size = sizeof(struct ceph_osd_request) + 5404 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op); 5405 5406 BUG_ON(ceph_osd_request_cache); 5407 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size, 5408 0, 0, NULL); 5409 5410 return ceph_osd_request_cache ? 0 : -ENOMEM; 5411 } 5412 5413 void ceph_osdc_cleanup(void) 5414 { 5415 BUG_ON(!ceph_osd_request_cache); 5416 kmem_cache_destroy(ceph_osd_request_cache); 5417 ceph_osd_request_cache = NULL; 5418 } 5419 5420 /* 5421 * handle incoming message 5422 */ 5423 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 5424 { 5425 struct ceph_osd *osd = con->private; 5426 struct ceph_osd_client *osdc = osd->o_osdc; 5427 int type = le16_to_cpu(msg->hdr.type); 5428 5429 switch (type) { 5430 case CEPH_MSG_OSD_MAP: 5431 ceph_osdc_handle_map(osdc, msg); 5432 break; 5433 case CEPH_MSG_OSD_OPREPLY: 5434 handle_reply(osd, msg); 5435 break; 5436 case CEPH_MSG_OSD_BACKOFF: 5437 handle_backoff(osd, msg); 5438 break; 5439 case CEPH_MSG_WATCH_NOTIFY: 5440 handle_watch_notify(osdc, msg); 5441 break; 5442 5443 default: 5444 pr_err("received unknown message type %d %s\n", type, 5445 ceph_msg_type_name(type)); 5446 } 5447 5448 ceph_msg_put(msg); 5449 } 5450 5451 /* 5452 * Lookup and return message for incoming reply. Don't try to do 5453 * anything about a larger than preallocated data portion of the 5454 * message at the moment - for now, just skip the message. 5455 */ 5456 static struct ceph_msg *get_reply(struct ceph_connection *con, 5457 struct ceph_msg_header *hdr, 5458 int *skip) 5459 { 5460 struct ceph_osd *osd = con->private; 5461 struct ceph_osd_client *osdc = osd->o_osdc; 5462 struct ceph_msg *m = NULL; 5463 struct ceph_osd_request *req; 5464 int front_len = le32_to_cpu(hdr->front_len); 5465 int data_len = le32_to_cpu(hdr->data_len); 5466 u64 tid = le64_to_cpu(hdr->tid); 5467 5468 down_read(&osdc->lock); 5469 if (!osd_registered(osd)) { 5470 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); 5471 *skip = 1; 5472 goto out_unlock_osdc; 5473 } 5474 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); 5475 5476 mutex_lock(&osd->lock); 5477 req = lookup_request(&osd->o_requests, tid); 5478 if (!req) { 5479 dout("%s osd%d tid %llu unknown, skipping\n", __func__, 5480 osd->o_osd, tid); 5481 *skip = 1; 5482 goto out_unlock_session; 5483 } 5484 5485 ceph_msg_revoke_incoming(req->r_reply); 5486 5487 if (front_len > req->r_reply->front_alloc_len) { 5488 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n", 5489 __func__, osd->o_osd, req->r_tid, front_len, 5490 req->r_reply->front_alloc_len); 5491 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, 5492 false); 5493 if (!m) 5494 goto out_unlock_session; 5495 ceph_msg_put(req->r_reply); 5496 req->r_reply = m; 5497 } 5498 5499 if (data_len > req->r_reply->data_length) { 5500 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n", 5501 __func__, osd->o_osd, req->r_tid, data_len, 5502 req->r_reply->data_length); 5503 m = NULL; 5504 *skip = 1; 5505 goto out_unlock_session; 5506 } 5507 5508 m = ceph_msg_get(req->r_reply); 5509 dout("get_reply tid %lld %p\n", tid, m); 5510 5511 out_unlock_session: 5512 mutex_unlock(&osd->lock); 5513 out_unlock_osdc: 5514 up_read(&osdc->lock); 5515 return m; 5516 } 5517 5518 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) 5519 { 5520 struct ceph_msg *m; 5521 int type = le16_to_cpu(hdr->type); 5522 u32 front_len = le32_to_cpu(hdr->front_len); 5523 u32 data_len = le32_to_cpu(hdr->data_len); 5524 5525 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); 5526 if (!m) 5527 return NULL; 5528 5529 if (data_len) { 5530 struct page **pages; 5531 5532 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 5533 GFP_NOIO); 5534 if (IS_ERR(pages)) { 5535 ceph_msg_put(m); 5536 return NULL; 5537 } 5538 5539 ceph_msg_data_add_pages(m, pages, data_len, 0, true); 5540 } 5541 5542 return m; 5543 } 5544 5545 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 5546 struct ceph_msg_header *hdr, 5547 int *skip) 5548 { 5549 struct ceph_osd *osd = con->private; 5550 int type = le16_to_cpu(hdr->type); 5551 5552 *skip = 0; 5553 switch (type) { 5554 case CEPH_MSG_OSD_MAP: 5555 case CEPH_MSG_OSD_BACKOFF: 5556 case CEPH_MSG_WATCH_NOTIFY: 5557 return alloc_msg_with_page_vector(hdr); 5558 case CEPH_MSG_OSD_OPREPLY: 5559 return get_reply(con, hdr, skip); 5560 default: 5561 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__, 5562 osd->o_osd, type); 5563 *skip = 1; 5564 return NULL; 5565 } 5566 } 5567 5568 /* 5569 * Wrappers to refcount containing ceph_osd struct 5570 */ 5571 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 5572 { 5573 struct ceph_osd *osd = con->private; 5574 if (get_osd(osd)) 5575 return con; 5576 return NULL; 5577 } 5578 5579 static void put_osd_con(struct ceph_connection *con) 5580 { 5581 struct ceph_osd *osd = con->private; 5582 put_osd(osd); 5583 } 5584 5585 /* 5586 * authentication 5587 */ 5588 /* 5589 * Note: returned pointer is the address of a structure that's 5590 * managed separately. Caller must *not* attempt to free it. 5591 */ 5592 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 5593 int *proto, int force_new) 5594 { 5595 struct ceph_osd *o = con->private; 5596 struct ceph_osd_client *osdc = o->o_osdc; 5597 struct ceph_auth_client *ac = osdc->client->monc.auth; 5598 struct ceph_auth_handshake *auth = &o->o_auth; 5599 5600 if (force_new && auth->authorizer) { 5601 ceph_auth_destroy_authorizer(auth->authorizer); 5602 auth->authorizer = NULL; 5603 } 5604 if (!auth->authorizer) { 5605 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5606 auth); 5607 if (ret) 5608 return ERR_PTR(ret); 5609 } else { 5610 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 5611 auth); 5612 if (ret) 5613 return ERR_PTR(ret); 5614 } 5615 *proto = ac->protocol; 5616 5617 return auth; 5618 } 5619 5620 static int add_authorizer_challenge(struct ceph_connection *con, 5621 void *challenge_buf, int challenge_buf_len) 5622 { 5623 struct ceph_osd *o = con->private; 5624 struct ceph_osd_client *osdc = o->o_osdc; 5625 struct ceph_auth_client *ac = osdc->client->monc.auth; 5626 5627 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer, 5628 challenge_buf, challenge_buf_len); 5629 } 5630 5631 static int verify_authorizer_reply(struct ceph_connection *con) 5632 { 5633 struct ceph_osd *o = con->private; 5634 struct ceph_osd_client *osdc = o->o_osdc; 5635 struct ceph_auth_client *ac = osdc->client->monc.auth; 5636 5637 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer); 5638 } 5639 5640 static int invalidate_authorizer(struct ceph_connection *con) 5641 { 5642 struct ceph_osd *o = con->private; 5643 struct ceph_osd_client *osdc = o->o_osdc; 5644 struct ceph_auth_client *ac = osdc->client->monc.auth; 5645 5646 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 5647 return ceph_monc_validate_auth(&osdc->client->monc); 5648 } 5649 5650 static void osd_reencode_message(struct ceph_msg *msg) 5651 { 5652 int type = le16_to_cpu(msg->hdr.type); 5653 5654 if (type == CEPH_MSG_OSD_OP) 5655 encode_request_finish(msg); 5656 } 5657 5658 static int osd_sign_message(struct ceph_msg *msg) 5659 { 5660 struct ceph_osd *o = msg->con->private; 5661 struct ceph_auth_handshake *auth = &o->o_auth; 5662 5663 return ceph_auth_sign_message(auth, msg); 5664 } 5665 5666 static int osd_check_message_signature(struct ceph_msg *msg) 5667 { 5668 struct ceph_osd *o = msg->con->private; 5669 struct ceph_auth_handshake *auth = &o->o_auth; 5670 5671 return ceph_auth_check_message_signature(auth, msg); 5672 } 5673 5674 static const struct ceph_connection_operations osd_con_ops = { 5675 .get = get_osd_con, 5676 .put = put_osd_con, 5677 .dispatch = dispatch, 5678 .get_authorizer = get_authorizer, 5679 .add_authorizer_challenge = add_authorizer_challenge, 5680 .verify_authorizer_reply = verify_authorizer_reply, 5681 .invalidate_authorizer = invalidate_authorizer, 5682 .alloc_msg = alloc_msg, 5683 .reencode_message = osd_reencode_message, 5684 .sign_message = osd_sign_message, 5685 .check_message_signature = osd_check_message_signature, 5686 .fault = osd_fault, 5687 }; 5688