1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/err.h> 5 #include <linux/highmem.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/slab.h> 9 #include <linux/uaccess.h> 10 #ifdef CONFIG_BLOCK 11 #include <linux/bio.h> 12 #endif 13 14 #include <linux/ceph/libceph.h> 15 #include <linux/ceph/osd_client.h> 16 #include <linux/ceph/messenger.h> 17 #include <linux/ceph/decode.h> 18 #include <linux/ceph/auth.h> 19 #include <linux/ceph/pagelist.h> 20 21 #define OSD_OP_FRONT_LEN 4096 22 #define OSD_OPREPLY_FRONT_LEN 512 23 24 static const struct ceph_connection_operations osd_con_ops; 25 26 static void __send_queued(struct ceph_osd_client *osdc); 27 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); 28 static void __register_request(struct ceph_osd_client *osdc, 29 struct ceph_osd_request *req); 30 static void __unregister_linger_request(struct ceph_osd_client *osdc, 31 struct ceph_osd_request *req); 32 static void __send_request(struct ceph_osd_client *osdc, 33 struct ceph_osd_request *req); 34 35 /* 36 * Implement client access to distributed object storage cluster. 37 * 38 * All data objects are stored within a cluster/cloud of OSDs, or 39 * "object storage devices." (Note that Ceph OSDs have _nothing_ to 40 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply 41 * remote daemons serving up and coordinating consistent and safe 42 * access to storage. 43 * 44 * Cluster membership and the mapping of data objects onto storage devices 45 * are described by the osd map. 46 * 47 * We keep track of pending OSD requests (read, write), resubmit 48 * requests to different OSDs when the cluster topology/data layout 49 * change, or retry the affected requests when the communications 50 * channel with an OSD is reset. 51 */ 52 53 /* 54 * calculate the mapping of a file extent onto an object, and fill out the 55 * request accordingly. shorten extent as necessary if it crosses an 56 * object boundary. 57 * 58 * fill osd op in request message. 59 */ 60 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, 61 u64 *objnum, u64 *objoff, u64 *objlen) 62 { 63 u64 orig_len = *plen; 64 int r; 65 66 /* object extent? */ 67 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, 68 objoff, objlen); 69 if (r < 0) 70 return r; 71 if (*objlen < orig_len) { 72 *plen = *objlen; 73 dout(" skipping last %llu, final file extent %llu~%llu\n", 74 orig_len - *plen, off, *plen); 75 } 76 77 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); 78 79 return 0; 80 } 81 82 static void ceph_osd_data_init(struct ceph_osd_data *osd_data) 83 { 84 memset(osd_data, 0, sizeof (*osd_data)); 85 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 86 } 87 88 void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 89 struct page **pages, u64 length, u32 alignment, 90 bool pages_from_pool, bool own_pages) 91 { 92 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; 93 osd_data->pages = pages; 94 osd_data->length = length; 95 osd_data->alignment = alignment; 96 osd_data->pages_from_pool = pages_from_pool; 97 osd_data->own_pages = own_pages; 98 } 99 EXPORT_SYMBOL(ceph_osd_data_pages_init); 100 101 void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 102 struct ceph_pagelist *pagelist) 103 { 104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 105 osd_data->pagelist = pagelist; 106 } 107 EXPORT_SYMBOL(ceph_osd_data_pagelist_init); 108 109 #ifdef CONFIG_BLOCK 110 void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 111 struct bio *bio, size_t bio_length) 112 { 113 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 114 osd_data->bio = bio; 115 osd_data->bio_length = bio_length; 116 } 117 EXPORT_SYMBOL(ceph_osd_data_bio_init); 118 #endif /* CONFIG_BLOCK */ 119 120 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 121 { 122 switch (osd_data->type) { 123 case CEPH_OSD_DATA_TYPE_NONE: 124 return 0; 125 case CEPH_OSD_DATA_TYPE_PAGES: 126 return osd_data->length; 127 case CEPH_OSD_DATA_TYPE_PAGELIST: 128 return (u64)osd_data->pagelist->length; 129 #ifdef CONFIG_BLOCK 130 case CEPH_OSD_DATA_TYPE_BIO: 131 return (u64)osd_data->bio_length; 132 #endif /* CONFIG_BLOCK */ 133 default: 134 WARN(true, "unrecognized data type %d\n", (int)osd_data->type); 135 return 0; 136 } 137 } 138 139 static void ceph_osd_data_release(struct ceph_osd_data *osd_data) 140 { 141 if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES) 142 return; 143 144 if (osd_data->own_pages) { 145 int num_pages; 146 147 num_pages = calc_pages_for((u64)osd_data->alignment, 148 (u64)osd_data->length); 149 ceph_release_page_vector(osd_data->pages, num_pages); 150 } 151 } 152 153 /* 154 * requests 155 */ 156 void ceph_osdc_release_request(struct kref *kref) 157 { 158 struct ceph_osd_request *req; 159 160 req = container_of(kref, struct ceph_osd_request, r_kref); 161 if (req->r_request) 162 ceph_msg_put(req->r_request); 163 if (req->r_reply) { 164 ceph_msg_revoke_incoming(req->r_reply); 165 ceph_msg_put(req->r_reply); 166 } 167 168 ceph_osd_data_release(&req->r_data_in); 169 ceph_osd_data_release(&req->r_data_out); 170 171 ceph_put_snap_context(req->r_snapc); 172 if (req->r_mempool) 173 mempool_free(req, req->r_osdc->req_mempool); 174 else 175 kfree(req); 176 } 177 EXPORT_SYMBOL(ceph_osdc_release_request); 178 179 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, 180 struct ceph_snap_context *snapc, 181 unsigned int num_ops, 182 bool use_mempool, 183 gfp_t gfp_flags) 184 { 185 struct ceph_osd_request *req; 186 struct ceph_msg *msg; 187 size_t msg_size; 188 189 BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); 190 BUG_ON(num_ops > CEPH_OSD_MAX_OP); 191 192 msg_size = 4 + 4 + 8 + 8 + 4+8; 193 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ 194 msg_size += 1 + 8 + 4 + 4; /* pg_t */ 195 msg_size += 4 + MAX_OBJ_NAME_SIZE; 196 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); 197 msg_size += 8; /* snapid */ 198 msg_size += 8; /* snap_seq */ 199 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ 200 msg_size += 4; 201 202 if (use_mempool) { 203 req = mempool_alloc(osdc->req_mempool, gfp_flags); 204 memset(req, 0, sizeof(*req)); 205 } else { 206 req = kzalloc(sizeof(*req), gfp_flags); 207 } 208 if (req == NULL) 209 return NULL; 210 211 req->r_osdc = osdc; 212 req->r_mempool = use_mempool; 213 req->r_num_ops = num_ops; 214 215 kref_init(&req->r_kref); 216 init_completion(&req->r_completion); 217 init_completion(&req->r_safe_completion); 218 RB_CLEAR_NODE(&req->r_node); 219 INIT_LIST_HEAD(&req->r_unsafe_item); 220 INIT_LIST_HEAD(&req->r_linger_item); 221 INIT_LIST_HEAD(&req->r_linger_osd); 222 INIT_LIST_HEAD(&req->r_req_lru_item); 223 INIT_LIST_HEAD(&req->r_osd_item); 224 225 /* create reply message */ 226 if (use_mempool) 227 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); 228 else 229 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 230 OSD_OPREPLY_FRONT_LEN, gfp_flags, true); 231 if (!msg) { 232 ceph_osdc_put_request(req); 233 return NULL; 234 } 235 req->r_reply = msg; 236 237 ceph_osd_data_init(&req->r_data_in); 238 ceph_osd_data_init(&req->r_data_out); 239 240 /* create request message; allow space for oid */ 241 if (use_mempool) 242 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 243 else 244 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); 245 if (!msg) { 246 ceph_osdc_put_request(req); 247 return NULL; 248 } 249 250 memset(msg->front.iov_base, 0, msg->front.iov_len); 251 252 req->r_request = msg; 253 254 return req; 255 } 256 EXPORT_SYMBOL(ceph_osdc_alloc_request); 257 258 static bool osd_req_opcode_valid(u16 opcode) 259 { 260 switch (opcode) { 261 case CEPH_OSD_OP_READ: 262 case CEPH_OSD_OP_STAT: 263 case CEPH_OSD_OP_MAPEXT: 264 case CEPH_OSD_OP_MASKTRUNC: 265 case CEPH_OSD_OP_SPARSE_READ: 266 case CEPH_OSD_OP_NOTIFY: 267 case CEPH_OSD_OP_NOTIFY_ACK: 268 case CEPH_OSD_OP_ASSERT_VER: 269 case CEPH_OSD_OP_WRITE: 270 case CEPH_OSD_OP_WRITEFULL: 271 case CEPH_OSD_OP_TRUNCATE: 272 case CEPH_OSD_OP_ZERO: 273 case CEPH_OSD_OP_DELETE: 274 case CEPH_OSD_OP_APPEND: 275 case CEPH_OSD_OP_STARTSYNC: 276 case CEPH_OSD_OP_SETTRUNC: 277 case CEPH_OSD_OP_TRIMTRUNC: 278 case CEPH_OSD_OP_TMAPUP: 279 case CEPH_OSD_OP_TMAPPUT: 280 case CEPH_OSD_OP_TMAPGET: 281 case CEPH_OSD_OP_CREATE: 282 case CEPH_OSD_OP_ROLLBACK: 283 case CEPH_OSD_OP_WATCH: 284 case CEPH_OSD_OP_OMAPGETKEYS: 285 case CEPH_OSD_OP_OMAPGETVALS: 286 case CEPH_OSD_OP_OMAPGETHEADER: 287 case CEPH_OSD_OP_OMAPGETVALSBYKEYS: 288 case CEPH_OSD_OP_OMAPSETVALS: 289 case CEPH_OSD_OP_OMAPSETHEADER: 290 case CEPH_OSD_OP_OMAPCLEAR: 291 case CEPH_OSD_OP_OMAPRMKEYS: 292 case CEPH_OSD_OP_OMAP_CMP: 293 case CEPH_OSD_OP_CLONERANGE: 294 case CEPH_OSD_OP_ASSERT_SRC_VERSION: 295 case CEPH_OSD_OP_SRC_CMPXATTR: 296 case CEPH_OSD_OP_GETXATTR: 297 case CEPH_OSD_OP_GETXATTRS: 298 case CEPH_OSD_OP_CMPXATTR: 299 case CEPH_OSD_OP_SETXATTR: 300 case CEPH_OSD_OP_SETXATTRS: 301 case CEPH_OSD_OP_RESETXATTRS: 302 case CEPH_OSD_OP_RMXATTR: 303 case CEPH_OSD_OP_PULL: 304 case CEPH_OSD_OP_PUSH: 305 case CEPH_OSD_OP_BALANCEREADS: 306 case CEPH_OSD_OP_UNBALANCEREADS: 307 case CEPH_OSD_OP_SCRUB: 308 case CEPH_OSD_OP_SCRUB_RESERVE: 309 case CEPH_OSD_OP_SCRUB_UNRESERVE: 310 case CEPH_OSD_OP_SCRUB_STOP: 311 case CEPH_OSD_OP_SCRUB_MAP: 312 case CEPH_OSD_OP_WRLOCK: 313 case CEPH_OSD_OP_WRUNLOCK: 314 case CEPH_OSD_OP_RDLOCK: 315 case CEPH_OSD_OP_RDUNLOCK: 316 case CEPH_OSD_OP_UPLOCK: 317 case CEPH_OSD_OP_DNLOCK: 318 case CEPH_OSD_OP_CALL: 319 case CEPH_OSD_OP_PGLS: 320 case CEPH_OSD_OP_PGLS_FILTER: 321 return true; 322 default: 323 return false; 324 } 325 } 326 327 /* 328 * This is an osd op init function for opcodes that have no data or 329 * other information associated with them. It also serves as a 330 * common init routine for all the other init functions, below. 331 */ 332 static struct ceph_osd_req_op * 333 osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, 334 u16 opcode) 335 { 336 struct ceph_osd_req_op *op; 337 338 BUG_ON(which >= osd_req->r_num_ops); 339 BUG_ON(!osd_req_opcode_valid(opcode)); 340 341 op = &osd_req->r_ops[which]; 342 memset(op, 0, sizeof (*op)); 343 op->op = opcode; 344 345 return op; 346 } 347 348 void osd_req_op_extent_init(struct ceph_osd_request *osd_req, 349 unsigned int which, u16 opcode, 350 u64 offset, u64 length, 351 u64 truncate_size, u32 truncate_seq) 352 { 353 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); 354 size_t payload_len = 0; 355 356 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); 357 358 op->extent.offset = offset; 359 op->extent.length = length; 360 op->extent.truncate_size = truncate_size; 361 op->extent.truncate_seq = truncate_seq; 362 if (opcode == CEPH_OSD_OP_WRITE) 363 payload_len += length; 364 365 op->payload_len = payload_len; 366 } 367 EXPORT_SYMBOL(osd_req_op_extent_init); 368 369 void osd_req_op_extent_update(struct ceph_osd_request *osd_req, 370 unsigned int which, u64 length) 371 { 372 struct ceph_osd_req_op *op; 373 u64 previous; 374 375 BUG_ON(which >= osd_req->r_num_ops); 376 op = &osd_req->r_ops[which]; 377 previous = op->extent.length; 378 379 if (length == previous) 380 return; /* Nothing to do */ 381 BUG_ON(length > previous); 382 383 op->extent.length = length; 384 op->payload_len -= previous - length; 385 } 386 EXPORT_SYMBOL(osd_req_op_extent_update); 387 388 void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, 389 unsigned int which, 390 struct ceph_osd_data *osd_data) 391 { 392 BUG_ON(which >= osd_req->r_num_ops); 393 osd_req->r_ops[which].extent.osd_data = osd_data; 394 } 395 EXPORT_SYMBOL(osd_req_op_extent_osd_data); 396 397 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 398 u16 opcode, const char *class, const char *method, 399 const void *request_data, size_t request_data_size) 400 { 401 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); 402 struct ceph_pagelist *pagelist; 403 size_t payload_len = 0; 404 size_t size; 405 406 BUG_ON(opcode != CEPH_OSD_OP_CALL); 407 408 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); 409 BUG_ON(!pagelist); 410 ceph_pagelist_init(pagelist); 411 412 op->cls.class_name = class; 413 size = strlen(class); 414 BUG_ON(size > (size_t) U8_MAX); 415 op->cls.class_len = size; 416 ceph_pagelist_append(pagelist, class, size); 417 payload_len += size; 418 419 op->cls.method_name = method; 420 size = strlen(method); 421 BUG_ON(size > (size_t) U8_MAX); 422 op->cls.method_len = size; 423 ceph_pagelist_append(pagelist, method, size); 424 payload_len += size; 425 426 op->cls.request_data = request_data; 427 BUG_ON(request_data_size > (size_t) U32_MAX); 428 op->cls.request_data_len = (u32) request_data_size; 429 ceph_pagelist_append(pagelist, request_data, request_data_size); 430 payload_len += request_data_size; 431 432 op->cls.request_info = &osd_req->r_data_out; 433 ceph_osd_data_pagelist_init(op->cls.request_info, pagelist); 434 435 op->cls.argc = 0; /* currently unused */ 436 437 op->payload_len = payload_len; 438 } 439 EXPORT_SYMBOL(osd_req_op_cls_init); 440 void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, 441 unsigned int which, 442 struct ceph_osd_data *response_data) 443 { 444 BUG_ON(which >= osd_req->r_num_ops); 445 osd_req->r_ops[which].cls.response_data = response_data; 446 } 447 EXPORT_SYMBOL(osd_req_op_cls_response_data); 448 449 void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 450 unsigned int which, u16 opcode, 451 u64 cookie, u64 version, int flag) 452 { 453 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); 454 455 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); 456 457 op->watch.cookie = cookie; 458 /* op->watch.ver = version; */ /* XXX 3847 */ 459 op->watch.ver = cpu_to_le64(version); 460 if (opcode == CEPH_OSD_OP_WATCH && flag) 461 op->watch.flag = (u8)1; 462 } 463 EXPORT_SYMBOL(osd_req_op_watch_init); 464 465 static u64 osd_req_encode_op(struct ceph_osd_request *req, 466 struct ceph_osd_op *dst, unsigned int which) 467 { 468 struct ceph_osd_req_op *src; 469 u64 request_data_len = 0; 470 471 BUG_ON(which >= req->r_num_ops); 472 src = &req->r_ops[which]; 473 if (WARN_ON(!osd_req_opcode_valid(src->op))) { 474 pr_err("unrecognized osd opcode %d\n", src->op); 475 476 return 0; 477 } 478 479 switch (src->op) { 480 case CEPH_OSD_OP_STAT: 481 break; 482 case CEPH_OSD_OP_READ: 483 case CEPH_OSD_OP_WRITE: 484 if (src->op == CEPH_OSD_OP_WRITE) 485 request_data_len = src->extent.length; 486 dst->extent.offset = cpu_to_le64(src->extent.offset); 487 dst->extent.length = cpu_to_le64(src->extent.length); 488 dst->extent.truncate_size = 489 cpu_to_le64(src->extent.truncate_size); 490 dst->extent.truncate_seq = 491 cpu_to_le32(src->extent.truncate_seq); 492 if (src->op == CEPH_OSD_OP_WRITE) 493 WARN_ON(src->extent.osd_data != &req->r_data_out); 494 else 495 WARN_ON(src->extent.osd_data != &req->r_data_in); 496 break; 497 case CEPH_OSD_OP_CALL: 498 dst->cls.class_len = src->cls.class_len; 499 dst->cls.method_len = src->cls.method_len; 500 dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); 501 WARN_ON(src->cls.response_data != &req->r_data_in); 502 WARN_ON(src->cls.request_info != &req->r_data_out); 503 BUG_ON(src->cls.request_info->type != 504 CEPH_OSD_DATA_TYPE_PAGELIST); 505 request_data_len = src->cls.request_info->pagelist->length; 506 break; 507 case CEPH_OSD_OP_STARTSYNC: 508 break; 509 case CEPH_OSD_OP_NOTIFY_ACK: 510 case CEPH_OSD_OP_WATCH: 511 dst->watch.cookie = cpu_to_le64(src->watch.cookie); 512 dst->watch.ver = cpu_to_le64(src->watch.ver); 513 dst->watch.flag = src->watch.flag; 514 break; 515 default: 516 pr_err("unsupported osd opcode %s\n", 517 ceph_osd_op_name(src->op)); 518 WARN_ON(1); 519 520 return 0; 521 } 522 dst->op = cpu_to_le16(src->op); 523 dst->payload_len = cpu_to_le32(src->payload_len); 524 525 return request_data_len; 526 } 527 528 /* 529 * build new request AND message 530 * 531 */ 532 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, 533 struct ceph_snap_context *snapc, u64 snap_id, 534 struct timespec *mtime) 535 { 536 struct ceph_msg *msg = req->r_request; 537 void *p; 538 size_t msg_size; 539 int flags = req->r_flags; 540 u64 data_len; 541 unsigned int i; 542 543 req->r_snapid = snap_id; 544 req->r_snapc = ceph_get_snap_context(snapc); 545 546 /* encode request */ 547 msg->hdr.version = cpu_to_le16(4); 548 549 p = msg->front.iov_base; 550 ceph_encode_32(&p, 1); /* client_inc is always 1 */ 551 req->r_request_osdmap_epoch = p; 552 p += 4; 553 req->r_request_flags = p; 554 p += 4; 555 if (req->r_flags & CEPH_OSD_FLAG_WRITE) 556 ceph_encode_timespec(p, mtime); 557 p += sizeof(struct ceph_timespec); 558 req->r_request_reassert_version = p; 559 p += sizeof(struct ceph_eversion); /* will get filled in */ 560 561 /* oloc */ 562 ceph_encode_8(&p, 4); 563 ceph_encode_8(&p, 4); 564 ceph_encode_32(&p, 8 + 4 + 4); 565 req->r_request_pool = p; 566 p += 8; 567 ceph_encode_32(&p, -1); /* preferred */ 568 ceph_encode_32(&p, 0); /* key len */ 569 570 ceph_encode_8(&p, 1); 571 req->r_request_pgid = p; 572 p += 8 + 4; 573 ceph_encode_32(&p, -1); /* preferred */ 574 575 /* oid */ 576 ceph_encode_32(&p, req->r_oid_len); 577 memcpy(p, req->r_oid, req->r_oid_len); 578 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); 579 p += req->r_oid_len; 580 581 /* ops--can imply data */ 582 ceph_encode_16(&p, (u16)req->r_num_ops); 583 data_len = 0; 584 for (i = 0; i < req->r_num_ops; i++) { 585 data_len += osd_req_encode_op(req, p, i); 586 p += sizeof(struct ceph_osd_op); 587 } 588 589 /* snaps */ 590 ceph_encode_64(&p, req->r_snapid); 591 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); 592 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); 593 if (req->r_snapc) { 594 for (i = 0; i < snapc->num_snaps; i++) { 595 ceph_encode_64(&p, req->r_snapc->snaps[i]); 596 } 597 } 598 599 req->r_request_attempts = p; 600 p += 4; 601 602 /* data */ 603 if (flags & CEPH_OSD_FLAG_WRITE) { 604 u16 data_off; 605 606 /* 607 * The header "data_off" is a hint to the receiver 608 * allowing it to align received data into its 609 * buffers such that there's no need to re-copy 610 * it before writing it to disk (direct I/O). 611 */ 612 data_off = (u16) (off & 0xffff); 613 req->r_request->hdr.data_off = cpu_to_le16(data_off); 614 } 615 req->r_request->hdr.data_len = cpu_to_le32(data_len); 616 617 BUG_ON(p > msg->front.iov_base + msg->front.iov_len); 618 msg_size = p - msg->front.iov_base; 619 msg->front.iov_len = msg_size; 620 msg->hdr.front_len = cpu_to_le32(msg_size); 621 622 dout("build_request msg_size was %d\n", (int)msg_size); 623 } 624 EXPORT_SYMBOL(ceph_osdc_build_request); 625 626 /* 627 * build new request AND message, calculate layout, and adjust file 628 * extent as needed. 629 * 630 * if the file was recently truncated, we include information about its 631 * old and new size so that the object can be updated appropriately. (we 632 * avoid synchronously deleting truncated objects because it's slow.) 633 * 634 * if @do_sync, include a 'startsync' command so that the osd will flush 635 * data quickly. 636 */ 637 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, 638 struct ceph_file_layout *layout, 639 struct ceph_vino vino, 640 u64 off, u64 *plen, int num_ops, 641 int opcode, int flags, 642 struct ceph_snap_context *snapc, 643 u32 truncate_seq, 644 u64 truncate_size, 645 bool use_mempool) 646 { 647 struct ceph_osd_request *req; 648 struct ceph_osd_data *osd_data; 649 u64 objnum = 0; 650 u64 objoff = 0; 651 u64 objlen = 0; 652 u32 object_size; 653 u64 object_base; 654 int r; 655 656 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); 657 658 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, 659 GFP_NOFS); 660 if (!req) 661 return ERR_PTR(-ENOMEM); 662 osd_data = opcode == CEPH_OSD_OP_WRITE ? &req->r_data_out 663 : &req->r_data_in; 664 665 req->r_flags = flags; 666 667 /* calculate max write size */ 668 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); 669 if (r < 0) { 670 ceph_osdc_put_request(req); 671 return ERR_PTR(r); 672 } 673 674 object_size = le32_to_cpu(layout->fl_object_size); 675 object_base = off - objoff; 676 if (truncate_size <= object_base) { 677 truncate_size = 0; 678 } else { 679 truncate_size -= object_base; 680 if (truncate_size > object_size) 681 truncate_size = object_size; 682 } 683 684 osd_req_op_extent_init(req, 0, opcode, objoff, objlen, 685 truncate_size, truncate_seq); 686 osd_req_op_extent_osd_data(req, 0, osd_data); 687 688 /* 689 * A second op in the ops array means the caller wants to 690 * also issue a include a 'startsync' command so that the 691 * osd will flush data quickly. 692 */ 693 if (num_ops > 1) 694 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); 695 696 req->r_file_layout = *layout; /* keep a copy */ 697 698 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", 699 vino.ino, objnum); 700 req->r_oid_len = strlen(req->r_oid); 701 702 return req; 703 } 704 EXPORT_SYMBOL(ceph_osdc_new_request); 705 706 /* 707 * We keep osd requests in an rbtree, sorted by ->r_tid. 708 */ 709 static void __insert_request(struct ceph_osd_client *osdc, 710 struct ceph_osd_request *new) 711 { 712 struct rb_node **p = &osdc->requests.rb_node; 713 struct rb_node *parent = NULL; 714 struct ceph_osd_request *req = NULL; 715 716 while (*p) { 717 parent = *p; 718 req = rb_entry(parent, struct ceph_osd_request, r_node); 719 if (new->r_tid < req->r_tid) 720 p = &(*p)->rb_left; 721 else if (new->r_tid > req->r_tid) 722 p = &(*p)->rb_right; 723 else 724 BUG(); 725 } 726 727 rb_link_node(&new->r_node, parent, p); 728 rb_insert_color(&new->r_node, &osdc->requests); 729 } 730 731 static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, 732 u64 tid) 733 { 734 struct ceph_osd_request *req; 735 struct rb_node *n = osdc->requests.rb_node; 736 737 while (n) { 738 req = rb_entry(n, struct ceph_osd_request, r_node); 739 if (tid < req->r_tid) 740 n = n->rb_left; 741 else if (tid > req->r_tid) 742 n = n->rb_right; 743 else 744 return req; 745 } 746 return NULL; 747 } 748 749 static struct ceph_osd_request * 750 __lookup_request_ge(struct ceph_osd_client *osdc, 751 u64 tid) 752 { 753 struct ceph_osd_request *req; 754 struct rb_node *n = osdc->requests.rb_node; 755 756 while (n) { 757 req = rb_entry(n, struct ceph_osd_request, r_node); 758 if (tid < req->r_tid) { 759 if (!n->rb_left) 760 return req; 761 n = n->rb_left; 762 } else if (tid > req->r_tid) { 763 n = n->rb_right; 764 } else { 765 return req; 766 } 767 } 768 return NULL; 769 } 770 771 /* 772 * Resubmit requests pending on the given osd. 773 */ 774 static void __kick_osd_requests(struct ceph_osd_client *osdc, 775 struct ceph_osd *osd) 776 { 777 struct ceph_osd_request *req, *nreq; 778 LIST_HEAD(resend); 779 int err; 780 781 dout("__kick_osd_requests osd%d\n", osd->o_osd); 782 err = __reset_osd(osdc, osd); 783 if (err) 784 return; 785 /* 786 * Build up a list of requests to resend by traversing the 787 * osd's list of requests. Requests for a given object are 788 * sent in tid order, and that is also the order they're 789 * kept on this list. Therefore all requests that are in 790 * flight will be found first, followed by all requests that 791 * have not yet been sent. And to resend requests while 792 * preserving this order we will want to put any sent 793 * requests back on the front of the osd client's unsent 794 * list. 795 * 796 * So we build a separate ordered list of already-sent 797 * requests for the affected osd and splice it onto the 798 * front of the osd client's unsent list. Once we've seen a 799 * request that has not yet been sent we're done. Those 800 * requests are already sitting right where they belong. 801 */ 802 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 803 if (!req->r_sent) 804 break; 805 list_move_tail(&req->r_req_lru_item, &resend); 806 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, 807 osd->o_osd); 808 if (!req->r_linger) 809 req->r_flags |= CEPH_OSD_FLAG_RETRY; 810 } 811 list_splice(&resend, &osdc->req_unsent); 812 813 /* 814 * Linger requests are re-registered before sending, which 815 * sets up a new tid for each. We add them to the unsent 816 * list at the end to keep things in tid order. 817 */ 818 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, 819 r_linger_osd) { 820 /* 821 * reregister request prior to unregistering linger so 822 * that r_osd is preserved. 823 */ 824 BUG_ON(!list_empty(&req->r_req_lru_item)); 825 __register_request(osdc, req); 826 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); 827 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 828 __unregister_linger_request(osdc, req); 829 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, 830 osd->o_osd); 831 } 832 } 833 834 /* 835 * If the osd connection drops, we need to resubmit all requests. 836 */ 837 static void osd_reset(struct ceph_connection *con) 838 { 839 struct ceph_osd *osd = con->private; 840 struct ceph_osd_client *osdc; 841 842 if (!osd) 843 return; 844 dout("osd_reset osd%d\n", osd->o_osd); 845 osdc = osd->o_osdc; 846 down_read(&osdc->map_sem); 847 mutex_lock(&osdc->request_mutex); 848 __kick_osd_requests(osdc, osd); 849 __send_queued(osdc); 850 mutex_unlock(&osdc->request_mutex); 851 up_read(&osdc->map_sem); 852 } 853 854 /* 855 * Track open sessions with osds. 856 */ 857 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) 858 { 859 struct ceph_osd *osd; 860 861 osd = kzalloc(sizeof(*osd), GFP_NOFS); 862 if (!osd) 863 return NULL; 864 865 atomic_set(&osd->o_ref, 1); 866 osd->o_osdc = osdc; 867 osd->o_osd = onum; 868 RB_CLEAR_NODE(&osd->o_node); 869 INIT_LIST_HEAD(&osd->o_requests); 870 INIT_LIST_HEAD(&osd->o_linger_requests); 871 INIT_LIST_HEAD(&osd->o_osd_lru); 872 osd->o_incarnation = 1; 873 874 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); 875 876 INIT_LIST_HEAD(&osd->o_keepalive_item); 877 return osd; 878 } 879 880 static struct ceph_osd *get_osd(struct ceph_osd *osd) 881 { 882 if (atomic_inc_not_zero(&osd->o_ref)) { 883 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, 884 atomic_read(&osd->o_ref)); 885 return osd; 886 } else { 887 dout("get_osd %p FAIL\n", osd); 888 return NULL; 889 } 890 } 891 892 static void put_osd(struct ceph_osd *osd) 893 { 894 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 895 atomic_read(&osd->o_ref) - 1); 896 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { 897 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; 898 899 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 900 kfree(osd); 901 } 902 } 903 904 /* 905 * remove an osd from our map 906 */ 907 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 908 { 909 dout("__remove_osd %p\n", osd); 910 BUG_ON(!list_empty(&osd->o_requests)); 911 rb_erase(&osd->o_node, &osdc->osds); 912 list_del_init(&osd->o_osd_lru); 913 ceph_con_close(&osd->o_con); 914 put_osd(osd); 915 } 916 917 static void remove_all_osds(struct ceph_osd_client *osdc) 918 { 919 dout("%s %p\n", __func__, osdc); 920 mutex_lock(&osdc->request_mutex); 921 while (!RB_EMPTY_ROOT(&osdc->osds)) { 922 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 923 struct ceph_osd, o_node); 924 __remove_osd(osdc, osd); 925 } 926 mutex_unlock(&osdc->request_mutex); 927 } 928 929 static void __move_osd_to_lru(struct ceph_osd_client *osdc, 930 struct ceph_osd *osd) 931 { 932 dout("__move_osd_to_lru %p\n", osd); 933 BUG_ON(!list_empty(&osd->o_osd_lru)); 934 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); 935 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; 936 } 937 938 static void __remove_osd_from_lru(struct ceph_osd *osd) 939 { 940 dout("__remove_osd_from_lru %p\n", osd); 941 if (!list_empty(&osd->o_osd_lru)) 942 list_del_init(&osd->o_osd_lru); 943 } 944 945 static void remove_old_osds(struct ceph_osd_client *osdc) 946 { 947 struct ceph_osd *osd, *nosd; 948 949 dout("__remove_old_osds %p\n", osdc); 950 mutex_lock(&osdc->request_mutex); 951 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 952 if (time_before(jiffies, osd->lru_ttl)) 953 break; 954 __remove_osd(osdc, osd); 955 } 956 mutex_unlock(&osdc->request_mutex); 957 } 958 959 /* 960 * reset osd connect 961 */ 962 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 963 { 964 struct ceph_entity_addr *peer_addr; 965 966 dout("__reset_osd %p osd%d\n", osd, osd->o_osd); 967 if (list_empty(&osd->o_requests) && 968 list_empty(&osd->o_linger_requests)) { 969 __remove_osd(osdc, osd); 970 971 return -ENODEV; 972 } 973 974 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; 975 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && 976 !ceph_con_opened(&osd->o_con)) { 977 struct ceph_osd_request *req; 978 979 dout(" osd addr hasn't changed and connection never opened," 980 " letting msgr retry"); 981 /* touch each r_stamp for handle_timeout()'s benfit */ 982 list_for_each_entry(req, &osd->o_requests, r_osd_item) 983 req->r_stamp = jiffies; 984 985 return -EAGAIN; 986 } 987 988 ceph_con_close(&osd->o_con); 989 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); 990 osd->o_incarnation++; 991 992 return 0; 993 } 994 995 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) 996 { 997 struct rb_node **p = &osdc->osds.rb_node; 998 struct rb_node *parent = NULL; 999 struct ceph_osd *osd = NULL; 1000 1001 dout("__insert_osd %p osd%d\n", new, new->o_osd); 1002 while (*p) { 1003 parent = *p; 1004 osd = rb_entry(parent, struct ceph_osd, o_node); 1005 if (new->o_osd < osd->o_osd) 1006 p = &(*p)->rb_left; 1007 else if (new->o_osd > osd->o_osd) 1008 p = &(*p)->rb_right; 1009 else 1010 BUG(); 1011 } 1012 1013 rb_link_node(&new->o_node, parent, p); 1014 rb_insert_color(&new->o_node, &osdc->osds); 1015 } 1016 1017 static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) 1018 { 1019 struct ceph_osd *osd; 1020 struct rb_node *n = osdc->osds.rb_node; 1021 1022 while (n) { 1023 osd = rb_entry(n, struct ceph_osd, o_node); 1024 if (o < osd->o_osd) 1025 n = n->rb_left; 1026 else if (o > osd->o_osd) 1027 n = n->rb_right; 1028 else 1029 return osd; 1030 } 1031 return NULL; 1032 } 1033 1034 static void __schedule_osd_timeout(struct ceph_osd_client *osdc) 1035 { 1036 schedule_delayed_work(&osdc->timeout_work, 1037 osdc->client->options->osd_keepalive_timeout * HZ); 1038 } 1039 1040 static void __cancel_osd_timeout(struct ceph_osd_client *osdc) 1041 { 1042 cancel_delayed_work(&osdc->timeout_work); 1043 } 1044 1045 /* 1046 * Register request, assign tid. If this is the first request, set up 1047 * the timeout event. 1048 */ 1049 static void __register_request(struct ceph_osd_client *osdc, 1050 struct ceph_osd_request *req) 1051 { 1052 req->r_tid = ++osdc->last_tid; 1053 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 1054 dout("__register_request %p tid %lld\n", req, req->r_tid); 1055 __insert_request(osdc, req); 1056 ceph_osdc_get_request(req); 1057 osdc->num_requests++; 1058 if (osdc->num_requests == 1) { 1059 dout(" first request, scheduling timeout\n"); 1060 __schedule_osd_timeout(osdc); 1061 } 1062 } 1063 1064 /* 1065 * called under osdc->request_mutex 1066 */ 1067 static void __unregister_request(struct ceph_osd_client *osdc, 1068 struct ceph_osd_request *req) 1069 { 1070 if (RB_EMPTY_NODE(&req->r_node)) { 1071 dout("__unregister_request %p tid %lld not registered\n", 1072 req, req->r_tid); 1073 return; 1074 } 1075 1076 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 1077 rb_erase(&req->r_node, &osdc->requests); 1078 osdc->num_requests--; 1079 1080 if (req->r_osd) { 1081 /* make sure the original request isn't in flight. */ 1082 ceph_msg_revoke(req->r_request); 1083 1084 list_del_init(&req->r_osd_item); 1085 if (list_empty(&req->r_osd->o_requests) && 1086 list_empty(&req->r_osd->o_linger_requests)) { 1087 dout("moving osd to %p lru\n", req->r_osd); 1088 __move_osd_to_lru(osdc, req->r_osd); 1089 } 1090 if (list_empty(&req->r_linger_item)) 1091 req->r_osd = NULL; 1092 } 1093 1094 list_del_init(&req->r_req_lru_item); 1095 ceph_osdc_put_request(req); 1096 1097 if (osdc->num_requests == 0) { 1098 dout(" no requests, canceling timeout\n"); 1099 __cancel_osd_timeout(osdc); 1100 } 1101 } 1102 1103 /* 1104 * Cancel a previously queued request message 1105 */ 1106 static void __cancel_request(struct ceph_osd_request *req) 1107 { 1108 if (req->r_sent && req->r_osd) { 1109 ceph_msg_revoke(req->r_request); 1110 req->r_sent = 0; 1111 } 1112 } 1113 1114 static void __register_linger_request(struct ceph_osd_client *osdc, 1115 struct ceph_osd_request *req) 1116 { 1117 dout("__register_linger_request %p\n", req); 1118 list_add_tail(&req->r_linger_item, &osdc->req_linger); 1119 if (req->r_osd) 1120 list_add_tail(&req->r_linger_osd, 1121 &req->r_osd->o_linger_requests); 1122 } 1123 1124 static void __unregister_linger_request(struct ceph_osd_client *osdc, 1125 struct ceph_osd_request *req) 1126 { 1127 dout("__unregister_linger_request %p\n", req); 1128 list_del_init(&req->r_linger_item); 1129 if (req->r_osd) { 1130 list_del_init(&req->r_linger_osd); 1131 1132 if (list_empty(&req->r_osd->o_requests) && 1133 list_empty(&req->r_osd->o_linger_requests)) { 1134 dout("moving osd to %p lru\n", req->r_osd); 1135 __move_osd_to_lru(osdc, req->r_osd); 1136 } 1137 if (list_empty(&req->r_osd_item)) 1138 req->r_osd = NULL; 1139 } 1140 } 1141 1142 void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, 1143 struct ceph_osd_request *req) 1144 { 1145 mutex_lock(&osdc->request_mutex); 1146 if (req->r_linger) { 1147 __unregister_linger_request(osdc, req); 1148 ceph_osdc_put_request(req); 1149 } 1150 mutex_unlock(&osdc->request_mutex); 1151 } 1152 EXPORT_SYMBOL(ceph_osdc_unregister_linger_request); 1153 1154 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, 1155 struct ceph_osd_request *req) 1156 { 1157 if (!req->r_linger) { 1158 dout("set_request_linger %p\n", req); 1159 req->r_linger = 1; 1160 /* 1161 * caller is now responsible for calling 1162 * unregister_linger_request 1163 */ 1164 ceph_osdc_get_request(req); 1165 } 1166 } 1167 EXPORT_SYMBOL(ceph_osdc_set_request_linger); 1168 1169 /* 1170 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct 1171 * (as needed), and set the request r_osd appropriately. If there is 1172 * no up osd, set r_osd to NULL. Move the request to the appropriate list 1173 * (unsent, homeless) or leave on in-flight lru. 1174 * 1175 * Return 0 if unchanged, 1 if changed, or negative on error. 1176 * 1177 * Caller should hold map_sem for read and request_mutex. 1178 */ 1179 static int __map_request(struct ceph_osd_client *osdc, 1180 struct ceph_osd_request *req, int force_resend) 1181 { 1182 struct ceph_pg pgid; 1183 int acting[CEPH_PG_MAX_SIZE]; 1184 int o = -1, num = 0; 1185 int err; 1186 1187 dout("map_request %p tid %lld\n", req, req->r_tid); 1188 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap, 1189 ceph_file_layout_pg_pool(req->r_file_layout)); 1190 if (err) { 1191 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1192 return err; 1193 } 1194 req->r_pgid = pgid; 1195 1196 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); 1197 if (err > 0) { 1198 o = acting[0]; 1199 num = err; 1200 } 1201 1202 if ((!force_resend && 1203 req->r_osd && req->r_osd->o_osd == o && 1204 req->r_sent >= req->r_osd->o_incarnation && 1205 req->r_num_pg_osds == num && 1206 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || 1207 (req->r_osd == NULL && o == -1)) 1208 return 0; /* no change */ 1209 1210 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", 1211 req->r_tid, pgid.pool, pgid.seed, o, 1212 req->r_osd ? req->r_osd->o_osd : -1); 1213 1214 /* record full pg acting set */ 1215 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); 1216 req->r_num_pg_osds = num; 1217 1218 if (req->r_osd) { 1219 __cancel_request(req); 1220 list_del_init(&req->r_osd_item); 1221 req->r_osd = NULL; 1222 } 1223 1224 req->r_osd = __lookup_osd(osdc, o); 1225 if (!req->r_osd && o >= 0) { 1226 err = -ENOMEM; 1227 req->r_osd = create_osd(osdc, o); 1228 if (!req->r_osd) { 1229 list_move(&req->r_req_lru_item, &osdc->req_notarget); 1230 goto out; 1231 } 1232 1233 dout("map_request osd %p is osd%d\n", req->r_osd, o); 1234 __insert_osd(osdc, req->r_osd); 1235 1236 ceph_con_open(&req->r_osd->o_con, 1237 CEPH_ENTITY_TYPE_OSD, o, 1238 &osdc->osdmap->osd_addr[o]); 1239 } 1240 1241 if (req->r_osd) { 1242 __remove_osd_from_lru(req->r_osd); 1243 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); 1244 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); 1245 } else { 1246 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); 1247 } 1248 err = 1; /* osd or pg changed */ 1249 1250 out: 1251 return err; 1252 } 1253 1254 /* 1255 * caller should hold map_sem (for read) and request_mutex 1256 */ 1257 static void __send_request(struct ceph_osd_client *osdc, 1258 struct ceph_osd_request *req) 1259 { 1260 void *p; 1261 1262 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", 1263 req, req->r_tid, req->r_osd->o_osd, req->r_flags, 1264 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); 1265 1266 /* fill in message content that changes each time we send it */ 1267 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); 1268 put_unaligned_le32(req->r_flags, req->r_request_flags); 1269 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool); 1270 p = req->r_request_pgid; 1271 ceph_encode_64(&p, req->r_pgid.pool); 1272 ceph_encode_32(&p, req->r_pgid.seed); 1273 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ 1274 memcpy(req->r_request_reassert_version, &req->r_reassert_version, 1275 sizeof(req->r_reassert_version)); 1276 1277 req->r_stamp = jiffies; 1278 list_move_tail(&req->r_req_lru_item, &osdc->req_lru); 1279 1280 ceph_msg_get(req->r_request); /* send consumes a ref */ 1281 ceph_con_send(&req->r_osd->o_con, req->r_request); 1282 req->r_sent = req->r_osd->o_incarnation; 1283 } 1284 1285 /* 1286 * Send any requests in the queue (req_unsent). 1287 */ 1288 static void __send_queued(struct ceph_osd_client *osdc) 1289 { 1290 struct ceph_osd_request *req, *tmp; 1291 1292 dout("__send_queued\n"); 1293 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) 1294 __send_request(osdc, req); 1295 } 1296 1297 /* 1298 * Timeout callback, called every N seconds when 1 or more osd 1299 * requests has been active for more than N seconds. When this 1300 * happens, we ping all OSDs with requests who have timed out to 1301 * ensure any communications channel reset is detected. Reset the 1302 * request timeouts another N seconds in the future as we go. 1303 * Reschedule the timeout event another N seconds in future (unless 1304 * there are no open requests). 1305 */ 1306 static void handle_timeout(struct work_struct *work) 1307 { 1308 struct ceph_osd_client *osdc = 1309 container_of(work, struct ceph_osd_client, timeout_work.work); 1310 struct ceph_osd_request *req; 1311 struct ceph_osd *osd; 1312 unsigned long keepalive = 1313 osdc->client->options->osd_keepalive_timeout * HZ; 1314 struct list_head slow_osds; 1315 dout("timeout\n"); 1316 down_read(&osdc->map_sem); 1317 1318 ceph_monc_request_next_osdmap(&osdc->client->monc); 1319 1320 mutex_lock(&osdc->request_mutex); 1321 1322 /* 1323 * ping osds that are a bit slow. this ensures that if there 1324 * is a break in the TCP connection we will notice, and reopen 1325 * a connection with that osd (from the fault callback). 1326 */ 1327 INIT_LIST_HEAD(&slow_osds); 1328 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { 1329 if (time_before(jiffies, req->r_stamp + keepalive)) 1330 break; 1331 1332 osd = req->r_osd; 1333 BUG_ON(!osd); 1334 dout(" tid %llu is slow, will send keepalive on osd%d\n", 1335 req->r_tid, osd->o_osd); 1336 list_move_tail(&osd->o_keepalive_item, &slow_osds); 1337 } 1338 while (!list_empty(&slow_osds)) { 1339 osd = list_entry(slow_osds.next, struct ceph_osd, 1340 o_keepalive_item); 1341 list_del_init(&osd->o_keepalive_item); 1342 ceph_con_keepalive(&osd->o_con); 1343 } 1344 1345 __schedule_osd_timeout(osdc); 1346 __send_queued(osdc); 1347 mutex_unlock(&osdc->request_mutex); 1348 up_read(&osdc->map_sem); 1349 } 1350 1351 static void handle_osds_timeout(struct work_struct *work) 1352 { 1353 struct ceph_osd_client *osdc = 1354 container_of(work, struct ceph_osd_client, 1355 osds_timeout_work.work); 1356 unsigned long delay = 1357 osdc->client->options->osd_idle_ttl * HZ >> 2; 1358 1359 dout("osds timeout\n"); 1360 down_read(&osdc->map_sem); 1361 remove_old_osds(osdc); 1362 up_read(&osdc->map_sem); 1363 1364 schedule_delayed_work(&osdc->osds_timeout_work, 1365 round_jiffies_relative(delay)); 1366 } 1367 1368 static void complete_request(struct ceph_osd_request *req) 1369 { 1370 if (req->r_safe_callback) 1371 req->r_safe_callback(req, NULL); 1372 complete_all(&req->r_safe_completion); /* fsync waiter */ 1373 } 1374 1375 /* 1376 * handle osd op reply. either call the callback if it is specified, 1377 * or do the completion to wake up the waiting thread. 1378 */ 1379 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, 1380 struct ceph_connection *con) 1381 { 1382 void *p, *end; 1383 struct ceph_osd_request *req; 1384 u64 tid; 1385 int object_len; 1386 unsigned int numops; 1387 int payload_len, flags; 1388 s32 result; 1389 s32 retry_attempt; 1390 struct ceph_pg pg; 1391 int err; 1392 u32 reassert_epoch; 1393 u64 reassert_version; 1394 u32 osdmap_epoch; 1395 int already_completed; 1396 u32 bytes; 1397 unsigned int i; 1398 1399 tid = le64_to_cpu(msg->hdr.tid); 1400 dout("handle_reply %p tid %llu\n", msg, tid); 1401 1402 p = msg->front.iov_base; 1403 end = p + msg->front.iov_len; 1404 1405 ceph_decode_need(&p, end, 4, bad); 1406 object_len = ceph_decode_32(&p); 1407 ceph_decode_need(&p, end, object_len, bad); 1408 p += object_len; 1409 1410 err = ceph_decode_pgid(&p, end, &pg); 1411 if (err) 1412 goto bad; 1413 1414 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); 1415 flags = ceph_decode_64(&p); 1416 result = ceph_decode_32(&p); 1417 reassert_epoch = ceph_decode_32(&p); 1418 reassert_version = ceph_decode_64(&p); 1419 osdmap_epoch = ceph_decode_32(&p); 1420 1421 /* lookup */ 1422 mutex_lock(&osdc->request_mutex); 1423 req = __lookup_request(osdc, tid); 1424 if (req == NULL) { 1425 dout("handle_reply tid %llu dne\n", tid); 1426 goto bad_mutex; 1427 } 1428 ceph_osdc_get_request(req); 1429 1430 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, 1431 req, result); 1432 1433 ceph_decode_need(&p, end, 4, bad); 1434 numops = ceph_decode_32(&p); 1435 if (numops > CEPH_OSD_MAX_OP) 1436 goto bad_put; 1437 if (numops != req->r_num_ops) 1438 goto bad_put; 1439 payload_len = 0; 1440 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad); 1441 for (i = 0; i < numops; i++) { 1442 struct ceph_osd_op *op = p; 1443 int len; 1444 1445 len = le32_to_cpu(op->payload_len); 1446 req->r_reply_op_len[i] = len; 1447 dout(" op %d has %d bytes\n", i, len); 1448 payload_len += len; 1449 p += sizeof(*op); 1450 } 1451 bytes = le32_to_cpu(msg->hdr.data_len); 1452 if (payload_len != bytes) { 1453 pr_warning("sum of op payload lens %d != data_len %d", 1454 payload_len, bytes); 1455 goto bad_put; 1456 } 1457 1458 ceph_decode_need(&p, end, 4 + numops * 4, bad); 1459 retry_attempt = ceph_decode_32(&p); 1460 for (i = 0; i < numops; i++) 1461 req->r_reply_op_result[i] = ceph_decode_32(&p); 1462 1463 if (!req->r_got_reply) { 1464 1465 req->r_result = result; 1466 dout("handle_reply result %d bytes %d\n", req->r_result, 1467 bytes); 1468 if (req->r_result == 0) 1469 req->r_result = bytes; 1470 1471 /* in case this is a write and we need to replay, */ 1472 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); 1473 req->r_reassert_version.version = cpu_to_le64(reassert_version); 1474 1475 req->r_got_reply = 1; 1476 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { 1477 dout("handle_reply tid %llu dup ack\n", tid); 1478 mutex_unlock(&osdc->request_mutex); 1479 goto done; 1480 } 1481 1482 dout("handle_reply tid %llu flags %d\n", tid, flags); 1483 1484 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) 1485 __register_linger_request(osdc, req); 1486 1487 /* either this is a read, or we got the safe response */ 1488 if (result < 0 || 1489 (flags & CEPH_OSD_FLAG_ONDISK) || 1490 ((flags & CEPH_OSD_FLAG_WRITE) == 0)) 1491 __unregister_request(osdc, req); 1492 1493 already_completed = req->r_completed; 1494 req->r_completed = 1; 1495 mutex_unlock(&osdc->request_mutex); 1496 if (already_completed) 1497 goto done; 1498 1499 if (req->r_callback) 1500 req->r_callback(req, msg); 1501 else 1502 complete_all(&req->r_completion); 1503 1504 if (flags & CEPH_OSD_FLAG_ONDISK) 1505 complete_request(req); 1506 1507 done: 1508 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1509 ceph_osdc_put_request(req); 1510 return; 1511 1512 bad_put: 1513 ceph_osdc_put_request(req); 1514 bad_mutex: 1515 mutex_unlock(&osdc->request_mutex); 1516 bad: 1517 pr_err("corrupt osd_op_reply got %d %d\n", 1518 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); 1519 ceph_msg_dump(msg); 1520 } 1521 1522 static void reset_changed_osds(struct ceph_osd_client *osdc) 1523 { 1524 struct rb_node *p, *n; 1525 1526 for (p = rb_first(&osdc->osds); p; p = n) { 1527 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); 1528 1529 n = rb_next(p); 1530 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || 1531 memcmp(&osd->o_con.peer_addr, 1532 ceph_osd_addr(osdc->osdmap, 1533 osd->o_osd), 1534 sizeof(struct ceph_entity_addr)) != 0) 1535 __reset_osd(osdc, osd); 1536 } 1537 } 1538 1539 /* 1540 * Requeue requests whose mapping to an OSD has changed. If requests map to 1541 * no osd, request a new map. 1542 * 1543 * Caller should hold map_sem for read. 1544 */ 1545 static void kick_requests(struct ceph_osd_client *osdc, int force_resend) 1546 { 1547 struct ceph_osd_request *req, *nreq; 1548 struct rb_node *p; 1549 int needmap = 0; 1550 int err; 1551 1552 dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); 1553 mutex_lock(&osdc->request_mutex); 1554 for (p = rb_first(&osdc->requests); p; ) { 1555 req = rb_entry(p, struct ceph_osd_request, r_node); 1556 p = rb_next(p); 1557 1558 /* 1559 * For linger requests that have not yet been 1560 * registered, move them to the linger list; they'll 1561 * be sent to the osd in the loop below. Unregister 1562 * the request before re-registering it as a linger 1563 * request to ensure the __map_request() below 1564 * will decide it needs to be sent. 1565 */ 1566 if (req->r_linger && list_empty(&req->r_linger_item)) { 1567 dout("%p tid %llu restart on osd%d\n", 1568 req, req->r_tid, 1569 req->r_osd ? req->r_osd->o_osd : -1); 1570 __unregister_request(osdc, req); 1571 __register_linger_request(osdc, req); 1572 continue; 1573 } 1574 1575 err = __map_request(osdc, req, force_resend); 1576 if (err < 0) 1577 continue; /* error */ 1578 if (req->r_osd == NULL) { 1579 dout("%p tid %llu maps to no osd\n", req, req->r_tid); 1580 needmap++; /* request a newer map */ 1581 } else if (err > 0) { 1582 if (!req->r_linger) { 1583 dout("%p tid %llu requeued on osd%d\n", req, 1584 req->r_tid, 1585 req->r_osd ? req->r_osd->o_osd : -1); 1586 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1587 } 1588 } 1589 } 1590 1591 list_for_each_entry_safe(req, nreq, &osdc->req_linger, 1592 r_linger_item) { 1593 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1594 1595 err = __map_request(osdc, req, force_resend); 1596 dout("__map_request returned %d\n", err); 1597 if (err == 0) 1598 continue; /* no change and no osd was specified */ 1599 if (err < 0) 1600 continue; /* hrm! */ 1601 if (req->r_osd == NULL) { 1602 dout("tid %llu maps to no valid osd\n", req->r_tid); 1603 needmap++; /* request a newer map */ 1604 continue; 1605 } 1606 1607 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 1608 req->r_osd ? req->r_osd->o_osd : -1); 1609 __register_request(osdc, req); 1610 __unregister_linger_request(osdc, req); 1611 } 1612 mutex_unlock(&osdc->request_mutex); 1613 1614 if (needmap) { 1615 dout("%d requests for down osds, need new map\n", needmap); 1616 ceph_monc_request_next_osdmap(&osdc->client->monc); 1617 } 1618 reset_changed_osds(osdc); 1619 } 1620 1621 1622 /* 1623 * Process updated osd map. 1624 * 1625 * The message contains any number of incremental and full maps, normally 1626 * indicating some sort of topology change in the cluster. Kick requests 1627 * off to different OSDs as needed. 1628 */ 1629 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) 1630 { 1631 void *p, *end, *next; 1632 u32 nr_maps, maplen; 1633 u32 epoch; 1634 struct ceph_osdmap *newmap = NULL, *oldmap; 1635 int err; 1636 struct ceph_fsid fsid; 1637 1638 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); 1639 p = msg->front.iov_base; 1640 end = p + msg->front.iov_len; 1641 1642 /* verify fsid */ 1643 ceph_decode_need(&p, end, sizeof(fsid), bad); 1644 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 1645 if (ceph_check_fsid(osdc->client, &fsid) < 0) 1646 return; 1647 1648 down_write(&osdc->map_sem); 1649 1650 /* incremental maps */ 1651 ceph_decode_32_safe(&p, end, nr_maps, bad); 1652 dout(" %d inc maps\n", nr_maps); 1653 while (nr_maps > 0) { 1654 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 1655 epoch = ceph_decode_32(&p); 1656 maplen = ceph_decode_32(&p); 1657 ceph_decode_need(&p, end, maplen, bad); 1658 next = p + maplen; 1659 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { 1660 dout("applying incremental map %u len %d\n", 1661 epoch, maplen); 1662 newmap = osdmap_apply_incremental(&p, next, 1663 osdc->osdmap, 1664 &osdc->client->msgr); 1665 if (IS_ERR(newmap)) { 1666 err = PTR_ERR(newmap); 1667 goto bad; 1668 } 1669 BUG_ON(!newmap); 1670 if (newmap != osdc->osdmap) { 1671 ceph_osdmap_destroy(osdc->osdmap); 1672 osdc->osdmap = newmap; 1673 } 1674 kick_requests(osdc, 0); 1675 } else { 1676 dout("ignoring incremental map %u len %d\n", 1677 epoch, maplen); 1678 } 1679 p = next; 1680 nr_maps--; 1681 } 1682 if (newmap) 1683 goto done; 1684 1685 /* full maps */ 1686 ceph_decode_32_safe(&p, end, nr_maps, bad); 1687 dout(" %d full maps\n", nr_maps); 1688 while (nr_maps) { 1689 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 1690 epoch = ceph_decode_32(&p); 1691 maplen = ceph_decode_32(&p); 1692 ceph_decode_need(&p, end, maplen, bad); 1693 if (nr_maps > 1) { 1694 dout("skipping non-latest full map %u len %d\n", 1695 epoch, maplen); 1696 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { 1697 dout("skipping full map %u len %d, " 1698 "older than our %u\n", epoch, maplen, 1699 osdc->osdmap->epoch); 1700 } else { 1701 int skipped_map = 0; 1702 1703 dout("taking full map %u len %d\n", epoch, maplen); 1704 newmap = osdmap_decode(&p, p+maplen); 1705 if (IS_ERR(newmap)) { 1706 err = PTR_ERR(newmap); 1707 goto bad; 1708 } 1709 BUG_ON(!newmap); 1710 oldmap = osdc->osdmap; 1711 osdc->osdmap = newmap; 1712 if (oldmap) { 1713 if (oldmap->epoch + 1 < newmap->epoch) 1714 skipped_map = 1; 1715 ceph_osdmap_destroy(oldmap); 1716 } 1717 kick_requests(osdc, skipped_map); 1718 } 1719 p += maplen; 1720 nr_maps--; 1721 } 1722 1723 done: 1724 downgrade_write(&osdc->map_sem); 1725 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1726 1727 /* 1728 * subscribe to subsequent osdmap updates if full to ensure 1729 * we find out when we are no longer full and stop returning 1730 * ENOSPC. 1731 */ 1732 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) 1733 ceph_monc_request_next_osdmap(&osdc->client->monc); 1734 1735 mutex_lock(&osdc->request_mutex); 1736 __send_queued(osdc); 1737 mutex_unlock(&osdc->request_mutex); 1738 up_read(&osdc->map_sem); 1739 wake_up_all(&osdc->client->auth_wq); 1740 return; 1741 1742 bad: 1743 pr_err("osdc handle_map corrupt msg\n"); 1744 ceph_msg_dump(msg); 1745 up_write(&osdc->map_sem); 1746 return; 1747 } 1748 1749 /* 1750 * watch/notify callback event infrastructure 1751 * 1752 * These callbacks are used both for watch and notify operations. 1753 */ 1754 static void __release_event(struct kref *kref) 1755 { 1756 struct ceph_osd_event *event = 1757 container_of(kref, struct ceph_osd_event, kref); 1758 1759 dout("__release_event %p\n", event); 1760 kfree(event); 1761 } 1762 1763 static void get_event(struct ceph_osd_event *event) 1764 { 1765 kref_get(&event->kref); 1766 } 1767 1768 void ceph_osdc_put_event(struct ceph_osd_event *event) 1769 { 1770 kref_put(&event->kref, __release_event); 1771 } 1772 EXPORT_SYMBOL(ceph_osdc_put_event); 1773 1774 static void __insert_event(struct ceph_osd_client *osdc, 1775 struct ceph_osd_event *new) 1776 { 1777 struct rb_node **p = &osdc->event_tree.rb_node; 1778 struct rb_node *parent = NULL; 1779 struct ceph_osd_event *event = NULL; 1780 1781 while (*p) { 1782 parent = *p; 1783 event = rb_entry(parent, struct ceph_osd_event, node); 1784 if (new->cookie < event->cookie) 1785 p = &(*p)->rb_left; 1786 else if (new->cookie > event->cookie) 1787 p = &(*p)->rb_right; 1788 else 1789 BUG(); 1790 } 1791 1792 rb_link_node(&new->node, parent, p); 1793 rb_insert_color(&new->node, &osdc->event_tree); 1794 } 1795 1796 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, 1797 u64 cookie) 1798 { 1799 struct rb_node **p = &osdc->event_tree.rb_node; 1800 struct rb_node *parent = NULL; 1801 struct ceph_osd_event *event = NULL; 1802 1803 while (*p) { 1804 parent = *p; 1805 event = rb_entry(parent, struct ceph_osd_event, node); 1806 if (cookie < event->cookie) 1807 p = &(*p)->rb_left; 1808 else if (cookie > event->cookie) 1809 p = &(*p)->rb_right; 1810 else 1811 return event; 1812 } 1813 return NULL; 1814 } 1815 1816 static void __remove_event(struct ceph_osd_event *event) 1817 { 1818 struct ceph_osd_client *osdc = event->osdc; 1819 1820 if (!RB_EMPTY_NODE(&event->node)) { 1821 dout("__remove_event removed %p\n", event); 1822 rb_erase(&event->node, &osdc->event_tree); 1823 ceph_osdc_put_event(event); 1824 } else { 1825 dout("__remove_event didn't remove %p\n", event); 1826 } 1827 } 1828 1829 int ceph_osdc_create_event(struct ceph_osd_client *osdc, 1830 void (*event_cb)(u64, u64, u8, void *), 1831 void *data, struct ceph_osd_event **pevent) 1832 { 1833 struct ceph_osd_event *event; 1834 1835 event = kmalloc(sizeof(*event), GFP_NOIO); 1836 if (!event) 1837 return -ENOMEM; 1838 1839 dout("create_event %p\n", event); 1840 event->cb = event_cb; 1841 event->one_shot = 0; 1842 event->data = data; 1843 event->osdc = osdc; 1844 INIT_LIST_HEAD(&event->osd_node); 1845 RB_CLEAR_NODE(&event->node); 1846 kref_init(&event->kref); /* one ref for us */ 1847 kref_get(&event->kref); /* one ref for the caller */ 1848 1849 spin_lock(&osdc->event_lock); 1850 event->cookie = ++osdc->event_count; 1851 __insert_event(osdc, event); 1852 spin_unlock(&osdc->event_lock); 1853 1854 *pevent = event; 1855 return 0; 1856 } 1857 EXPORT_SYMBOL(ceph_osdc_create_event); 1858 1859 void ceph_osdc_cancel_event(struct ceph_osd_event *event) 1860 { 1861 struct ceph_osd_client *osdc = event->osdc; 1862 1863 dout("cancel_event %p\n", event); 1864 spin_lock(&osdc->event_lock); 1865 __remove_event(event); 1866 spin_unlock(&osdc->event_lock); 1867 ceph_osdc_put_event(event); /* caller's */ 1868 } 1869 EXPORT_SYMBOL(ceph_osdc_cancel_event); 1870 1871 1872 static void do_event_work(struct work_struct *work) 1873 { 1874 struct ceph_osd_event_work *event_work = 1875 container_of(work, struct ceph_osd_event_work, work); 1876 struct ceph_osd_event *event = event_work->event; 1877 u64 ver = event_work->ver; 1878 u64 notify_id = event_work->notify_id; 1879 u8 opcode = event_work->opcode; 1880 1881 dout("do_event_work completing %p\n", event); 1882 event->cb(ver, notify_id, opcode, event->data); 1883 dout("do_event_work completed %p\n", event); 1884 ceph_osdc_put_event(event); 1885 kfree(event_work); 1886 } 1887 1888 1889 /* 1890 * Process osd watch notifications 1891 */ 1892 static void handle_watch_notify(struct ceph_osd_client *osdc, 1893 struct ceph_msg *msg) 1894 { 1895 void *p, *end; 1896 u8 proto_ver; 1897 u64 cookie, ver, notify_id; 1898 u8 opcode; 1899 struct ceph_osd_event *event; 1900 struct ceph_osd_event_work *event_work; 1901 1902 p = msg->front.iov_base; 1903 end = p + msg->front.iov_len; 1904 1905 ceph_decode_8_safe(&p, end, proto_ver, bad); 1906 ceph_decode_8_safe(&p, end, opcode, bad); 1907 ceph_decode_64_safe(&p, end, cookie, bad); 1908 ceph_decode_64_safe(&p, end, ver, bad); 1909 ceph_decode_64_safe(&p, end, notify_id, bad); 1910 1911 spin_lock(&osdc->event_lock); 1912 event = __find_event(osdc, cookie); 1913 if (event) { 1914 BUG_ON(event->one_shot); 1915 get_event(event); 1916 } 1917 spin_unlock(&osdc->event_lock); 1918 dout("handle_watch_notify cookie %lld ver %lld event %p\n", 1919 cookie, ver, event); 1920 if (event) { 1921 event_work = kmalloc(sizeof(*event_work), GFP_NOIO); 1922 if (!event_work) { 1923 dout("ERROR: could not allocate event_work\n"); 1924 goto done_err; 1925 } 1926 INIT_WORK(&event_work->work, do_event_work); 1927 event_work->event = event; 1928 event_work->ver = ver; 1929 event_work->notify_id = notify_id; 1930 event_work->opcode = opcode; 1931 if (!queue_work(osdc->notify_wq, &event_work->work)) { 1932 dout("WARNING: failed to queue notify event work\n"); 1933 goto done_err; 1934 } 1935 } 1936 1937 return; 1938 1939 done_err: 1940 ceph_osdc_put_event(event); 1941 return; 1942 1943 bad: 1944 pr_err("osdc handle_watch_notify corrupt msg\n"); 1945 return; 1946 } 1947 1948 static void ceph_osdc_msg_data_set(struct ceph_msg *msg, 1949 struct ceph_osd_data *osd_data) 1950 { 1951 u64 length = ceph_osd_data_length(osd_data); 1952 1953 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 1954 BUG_ON(length > (u64) SIZE_MAX); 1955 if (length) 1956 ceph_msg_data_set_pages(msg, osd_data->pages, 1957 length, osd_data->alignment); 1958 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { 1959 BUG_ON(!length); 1960 ceph_msg_data_set_pagelist(msg, osd_data->pagelist); 1961 #ifdef CONFIG_BLOCK 1962 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { 1963 ceph_msg_data_set_bio(msg, osd_data->bio, length); 1964 #endif 1965 } else { 1966 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); 1967 } 1968 } 1969 1970 /* 1971 * Register request, send initial attempt. 1972 */ 1973 int ceph_osdc_start_request(struct ceph_osd_client *osdc, 1974 struct ceph_osd_request *req, 1975 bool nofail) 1976 { 1977 int rc = 0; 1978 1979 /* Set up response incoming data and request outgoing data fields */ 1980 1981 ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); 1982 ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); 1983 1984 down_read(&osdc->map_sem); 1985 mutex_lock(&osdc->request_mutex); 1986 __register_request(osdc, req); 1987 WARN_ON(req->r_sent); 1988 rc = __map_request(osdc, req, 0); 1989 if (rc < 0) { 1990 if (nofail) { 1991 dout("osdc_start_request failed map, " 1992 " will retry %lld\n", req->r_tid); 1993 rc = 0; 1994 } 1995 goto out_unlock; 1996 } 1997 if (req->r_osd == NULL) { 1998 dout("send_request %p no up osds in pg\n", req); 1999 ceph_monc_request_next_osdmap(&osdc->client->monc); 2000 } else { 2001 __send_queued(osdc); 2002 } 2003 rc = 0; 2004 out_unlock: 2005 mutex_unlock(&osdc->request_mutex); 2006 up_read(&osdc->map_sem); 2007 return rc; 2008 } 2009 EXPORT_SYMBOL(ceph_osdc_start_request); 2010 2011 /* 2012 * wait for a request to complete 2013 */ 2014 int ceph_osdc_wait_request(struct ceph_osd_client *osdc, 2015 struct ceph_osd_request *req) 2016 { 2017 int rc; 2018 2019 rc = wait_for_completion_interruptible(&req->r_completion); 2020 if (rc < 0) { 2021 mutex_lock(&osdc->request_mutex); 2022 __cancel_request(req); 2023 __unregister_request(osdc, req); 2024 mutex_unlock(&osdc->request_mutex); 2025 complete_request(req); 2026 dout("wait_request tid %llu canceled/timed out\n", req->r_tid); 2027 return rc; 2028 } 2029 2030 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); 2031 return req->r_result; 2032 } 2033 EXPORT_SYMBOL(ceph_osdc_wait_request); 2034 2035 /* 2036 * sync - wait for all in-flight requests to flush. avoid starvation. 2037 */ 2038 void ceph_osdc_sync(struct ceph_osd_client *osdc) 2039 { 2040 struct ceph_osd_request *req; 2041 u64 last_tid, next_tid = 0; 2042 2043 mutex_lock(&osdc->request_mutex); 2044 last_tid = osdc->last_tid; 2045 while (1) { 2046 req = __lookup_request_ge(osdc, next_tid); 2047 if (!req) 2048 break; 2049 if (req->r_tid > last_tid) 2050 break; 2051 2052 next_tid = req->r_tid + 1; 2053 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) 2054 continue; 2055 2056 ceph_osdc_get_request(req); 2057 mutex_unlock(&osdc->request_mutex); 2058 dout("sync waiting on tid %llu (last is %llu)\n", 2059 req->r_tid, last_tid); 2060 wait_for_completion(&req->r_safe_completion); 2061 mutex_lock(&osdc->request_mutex); 2062 ceph_osdc_put_request(req); 2063 } 2064 mutex_unlock(&osdc->request_mutex); 2065 dout("sync done (thru tid %llu)\n", last_tid); 2066 } 2067 EXPORT_SYMBOL(ceph_osdc_sync); 2068 2069 /* 2070 * init, shutdown 2071 */ 2072 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 2073 { 2074 int err; 2075 2076 dout("init\n"); 2077 osdc->client = client; 2078 osdc->osdmap = NULL; 2079 init_rwsem(&osdc->map_sem); 2080 init_completion(&osdc->map_waiters); 2081 osdc->last_requested_map = 0; 2082 mutex_init(&osdc->request_mutex); 2083 osdc->last_tid = 0; 2084 osdc->osds = RB_ROOT; 2085 INIT_LIST_HEAD(&osdc->osd_lru); 2086 osdc->requests = RB_ROOT; 2087 INIT_LIST_HEAD(&osdc->req_lru); 2088 INIT_LIST_HEAD(&osdc->req_unsent); 2089 INIT_LIST_HEAD(&osdc->req_notarget); 2090 INIT_LIST_HEAD(&osdc->req_linger); 2091 osdc->num_requests = 0; 2092 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 2093 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 2094 spin_lock_init(&osdc->event_lock); 2095 osdc->event_tree = RB_ROOT; 2096 osdc->event_count = 0; 2097 2098 schedule_delayed_work(&osdc->osds_timeout_work, 2099 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); 2100 2101 err = -ENOMEM; 2102 osdc->req_mempool = mempool_create_kmalloc_pool(10, 2103 sizeof(struct ceph_osd_request)); 2104 if (!osdc->req_mempool) 2105 goto out; 2106 2107 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, 2108 OSD_OP_FRONT_LEN, 10, true, 2109 "osd_op"); 2110 if (err < 0) 2111 goto out_mempool; 2112 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, 2113 OSD_OPREPLY_FRONT_LEN, 10, true, 2114 "osd_op_reply"); 2115 if (err < 0) 2116 goto out_msgpool; 2117 2118 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); 2119 if (IS_ERR(osdc->notify_wq)) { 2120 err = PTR_ERR(osdc->notify_wq); 2121 osdc->notify_wq = NULL; 2122 goto out_msgpool; 2123 } 2124 return 0; 2125 2126 out_msgpool: 2127 ceph_msgpool_destroy(&osdc->msgpool_op); 2128 out_mempool: 2129 mempool_destroy(osdc->req_mempool); 2130 out: 2131 return err; 2132 } 2133 2134 void ceph_osdc_stop(struct ceph_osd_client *osdc) 2135 { 2136 flush_workqueue(osdc->notify_wq); 2137 destroy_workqueue(osdc->notify_wq); 2138 cancel_delayed_work_sync(&osdc->timeout_work); 2139 cancel_delayed_work_sync(&osdc->osds_timeout_work); 2140 if (osdc->osdmap) { 2141 ceph_osdmap_destroy(osdc->osdmap); 2142 osdc->osdmap = NULL; 2143 } 2144 remove_all_osds(osdc); 2145 mempool_destroy(osdc->req_mempool); 2146 ceph_msgpool_destroy(&osdc->msgpool_op); 2147 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 2148 } 2149 2150 /* 2151 * Read some contiguous pages. If we cross a stripe boundary, shorten 2152 * *plen. Return number of bytes read, or error. 2153 */ 2154 int ceph_osdc_readpages(struct ceph_osd_client *osdc, 2155 struct ceph_vino vino, struct ceph_file_layout *layout, 2156 u64 off, u64 *plen, 2157 u32 truncate_seq, u64 truncate_size, 2158 struct page **pages, int num_pages, int page_align) 2159 { 2160 struct ceph_osd_request *req; 2161 int rc = 0; 2162 2163 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, 2164 vino.snap, off, *plen); 2165 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, 2166 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2167 NULL, truncate_seq, truncate_size, 2168 false); 2169 if (IS_ERR(req)) 2170 return PTR_ERR(req); 2171 2172 /* it may be a short read due to an object boundary */ 2173 2174 ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align, 2175 false, false); 2176 2177 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", 2178 off, *plen, *plen, page_align); 2179 2180 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); 2181 2182 rc = ceph_osdc_start_request(osdc, req, false); 2183 if (!rc) 2184 rc = ceph_osdc_wait_request(osdc, req); 2185 2186 ceph_osdc_put_request(req); 2187 dout("readpages result %d\n", rc); 2188 return rc; 2189 } 2190 EXPORT_SYMBOL(ceph_osdc_readpages); 2191 2192 /* 2193 * do a synchronous write on N pages 2194 */ 2195 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, 2196 struct ceph_file_layout *layout, 2197 struct ceph_snap_context *snapc, 2198 u64 off, u64 len, 2199 u32 truncate_seq, u64 truncate_size, 2200 struct timespec *mtime, 2201 struct page **pages, int num_pages) 2202 { 2203 struct ceph_osd_request *req; 2204 int rc = 0; 2205 int page_align = off & ~PAGE_MASK; 2206 2207 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ 2208 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, 2209 CEPH_OSD_OP_WRITE, 2210 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, 2211 snapc, truncate_seq, truncate_size, 2212 true); 2213 if (IS_ERR(req)) 2214 return PTR_ERR(req); 2215 2216 /* it may be a short write due to an object boundary */ 2217 ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, 2218 false, false); 2219 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); 2220 2221 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); 2222 2223 rc = ceph_osdc_start_request(osdc, req, true); 2224 if (!rc) 2225 rc = ceph_osdc_wait_request(osdc, req); 2226 2227 ceph_osdc_put_request(req); 2228 if (rc == 0) 2229 rc = len; 2230 dout("writepages result %d\n", rc); 2231 return rc; 2232 } 2233 EXPORT_SYMBOL(ceph_osdc_writepages); 2234 2235 /* 2236 * handle incoming message 2237 */ 2238 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 2239 { 2240 struct ceph_osd *osd = con->private; 2241 struct ceph_osd_client *osdc; 2242 int type = le16_to_cpu(msg->hdr.type); 2243 2244 if (!osd) 2245 goto out; 2246 osdc = osd->o_osdc; 2247 2248 switch (type) { 2249 case CEPH_MSG_OSD_MAP: 2250 ceph_osdc_handle_map(osdc, msg); 2251 break; 2252 case CEPH_MSG_OSD_OPREPLY: 2253 handle_reply(osdc, msg, con); 2254 break; 2255 case CEPH_MSG_WATCH_NOTIFY: 2256 handle_watch_notify(osdc, msg); 2257 break; 2258 2259 default: 2260 pr_err("received unknown message type %d %s\n", type, 2261 ceph_msg_type_name(type)); 2262 } 2263 out: 2264 ceph_msg_put(msg); 2265 } 2266 2267 /* 2268 * lookup and return message for incoming reply. set up reply message 2269 * pages. 2270 */ 2271 static struct ceph_msg *get_reply(struct ceph_connection *con, 2272 struct ceph_msg_header *hdr, 2273 int *skip) 2274 { 2275 struct ceph_osd *osd = con->private; 2276 struct ceph_osd_client *osdc = osd->o_osdc; 2277 struct ceph_msg *m; 2278 struct ceph_osd_request *req; 2279 int front = le32_to_cpu(hdr->front_len); 2280 int data_len = le32_to_cpu(hdr->data_len); 2281 u64 tid; 2282 2283 tid = le64_to_cpu(hdr->tid); 2284 mutex_lock(&osdc->request_mutex); 2285 req = __lookup_request(osdc, tid); 2286 if (!req) { 2287 *skip = 1; 2288 m = NULL; 2289 dout("get_reply unknown tid %llu from osd%d\n", tid, 2290 osd->o_osd); 2291 goto out; 2292 } 2293 2294 if (req->r_reply->con) 2295 dout("%s revoking msg %p from old con %p\n", __func__, 2296 req->r_reply, req->r_reply->con); 2297 ceph_msg_revoke_incoming(req->r_reply); 2298 2299 if (front > req->r_reply->front.iov_len) { 2300 pr_warning("get_reply front %d > preallocated %d\n", 2301 front, (int)req->r_reply->front.iov_len); 2302 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); 2303 if (!m) 2304 goto out; 2305 ceph_msg_put(req->r_reply); 2306 req->r_reply = m; 2307 } 2308 m = ceph_msg_get(req->r_reply); 2309 2310 if (data_len > 0) { 2311 struct ceph_osd_data *osd_data = &req->r_data_in; 2312 2313 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 2314 if (osd_data->pages && 2315 unlikely(osd_data->length < data_len)) { 2316 2317 pr_warning("tid %lld reply has %d bytes " 2318 "we had only %llu bytes ready\n", 2319 tid, data_len, osd_data->length); 2320 *skip = 1; 2321 ceph_msg_put(m); 2322 m = NULL; 2323 goto out; 2324 } 2325 } 2326 } 2327 *skip = 0; 2328 dout("get_reply tid %lld %p\n", tid, m); 2329 2330 out: 2331 mutex_unlock(&osdc->request_mutex); 2332 return m; 2333 2334 } 2335 2336 static struct ceph_msg *alloc_msg(struct ceph_connection *con, 2337 struct ceph_msg_header *hdr, 2338 int *skip) 2339 { 2340 struct ceph_osd *osd = con->private; 2341 int type = le16_to_cpu(hdr->type); 2342 int front = le32_to_cpu(hdr->front_len); 2343 2344 *skip = 0; 2345 switch (type) { 2346 case CEPH_MSG_OSD_MAP: 2347 case CEPH_MSG_WATCH_NOTIFY: 2348 return ceph_msg_new(type, front, GFP_NOFS, false); 2349 case CEPH_MSG_OSD_OPREPLY: 2350 return get_reply(con, hdr, skip); 2351 default: 2352 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, 2353 osd->o_osd); 2354 *skip = 1; 2355 return NULL; 2356 } 2357 } 2358 2359 /* 2360 * Wrappers to refcount containing ceph_osd struct 2361 */ 2362 static struct ceph_connection *get_osd_con(struct ceph_connection *con) 2363 { 2364 struct ceph_osd *osd = con->private; 2365 if (get_osd(osd)) 2366 return con; 2367 return NULL; 2368 } 2369 2370 static void put_osd_con(struct ceph_connection *con) 2371 { 2372 struct ceph_osd *osd = con->private; 2373 put_osd(osd); 2374 } 2375 2376 /* 2377 * authentication 2378 */ 2379 /* 2380 * Note: returned pointer is the address of a structure that's 2381 * managed separately. Caller must *not* attempt to free it. 2382 */ 2383 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 2384 int *proto, int force_new) 2385 { 2386 struct ceph_osd *o = con->private; 2387 struct ceph_osd_client *osdc = o->o_osdc; 2388 struct ceph_auth_client *ac = osdc->client->monc.auth; 2389 struct ceph_auth_handshake *auth = &o->o_auth; 2390 2391 if (force_new && auth->authorizer) { 2392 ceph_auth_destroy_authorizer(ac, auth->authorizer); 2393 auth->authorizer = NULL; 2394 } 2395 if (!auth->authorizer) { 2396 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2397 auth); 2398 if (ret) 2399 return ERR_PTR(ret); 2400 } else { 2401 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, 2402 auth); 2403 if (ret) 2404 return ERR_PTR(ret); 2405 } 2406 *proto = ac->protocol; 2407 2408 return auth; 2409 } 2410 2411 2412 static int verify_authorizer_reply(struct ceph_connection *con, int len) 2413 { 2414 struct ceph_osd *o = con->private; 2415 struct ceph_osd_client *osdc = o->o_osdc; 2416 struct ceph_auth_client *ac = osdc->client->monc.auth; 2417 2418 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); 2419 } 2420 2421 static int invalidate_authorizer(struct ceph_connection *con) 2422 { 2423 struct ceph_osd *o = con->private; 2424 struct ceph_osd_client *osdc = o->o_osdc; 2425 struct ceph_auth_client *ac = osdc->client->monc.auth; 2426 2427 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); 2428 return ceph_monc_validate_auth(&osdc->client->monc); 2429 } 2430 2431 static const struct ceph_connection_operations osd_con_ops = { 2432 .get = get_osd_con, 2433 .put = put_osd_con, 2434 .dispatch = dispatch, 2435 .get_authorizer = get_authorizer, 2436 .verify_authorizer_reply = verify_authorizer_reply, 2437 .invalidate_authorizer = invalidate_authorizer, 2438 .alloc_msg = alloc_msg, 2439 .fault = osd_reset, 2440 }; 2441