1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static int skip_name_map(void **p, void *end) 142 { 143 int len; 144 ceph_decode_32_safe(p, end, len ,bad); 145 while (len--) { 146 int strlen; 147 *p += sizeof(u32); 148 ceph_decode_32_safe(p, end, strlen, bad); 149 *p += strlen; 150 } 151 return 0; 152 bad: 153 return -EINVAL; 154 } 155 156 static struct crush_map *crush_decode(void *pbyval, void *end) 157 { 158 struct crush_map *c; 159 int err = -EINVAL; 160 int i, j; 161 void **p = &pbyval; 162 void *start = pbyval; 163 u32 magic; 164 u32 num_name_maps; 165 166 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 167 168 c = kzalloc(sizeof(*c), GFP_NOFS); 169 if (c == NULL) 170 return ERR_PTR(-ENOMEM); 171 172 /* set tunables to default values */ 173 c->choose_local_tries = 2; 174 c->choose_local_fallback_tries = 5; 175 c->choose_total_tries = 19; 176 c->chooseleaf_descend_once = 0; 177 178 ceph_decode_need(p, end, 4*sizeof(u32), bad); 179 magic = ceph_decode_32(p); 180 if (magic != CRUSH_MAGIC) { 181 pr_err("crush_decode magic %x != current %x\n", 182 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 183 goto bad; 184 } 185 c->max_buckets = ceph_decode_32(p); 186 c->max_rules = ceph_decode_32(p); 187 c->max_devices = ceph_decode_32(p); 188 189 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 190 if (c->buckets == NULL) 191 goto badmem; 192 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 193 if (c->rules == NULL) 194 goto badmem; 195 196 /* buckets */ 197 for (i = 0; i < c->max_buckets; i++) { 198 int size = 0; 199 u32 alg; 200 struct crush_bucket *b; 201 202 ceph_decode_32_safe(p, end, alg, bad); 203 if (alg == 0) { 204 c->buckets[i] = NULL; 205 continue; 206 } 207 dout("crush_decode bucket %d off %x %p to %p\n", 208 i, (int)(*p-start), *p, end); 209 210 switch (alg) { 211 case CRUSH_BUCKET_UNIFORM: 212 size = sizeof(struct crush_bucket_uniform); 213 break; 214 case CRUSH_BUCKET_LIST: 215 size = sizeof(struct crush_bucket_list); 216 break; 217 case CRUSH_BUCKET_TREE: 218 size = sizeof(struct crush_bucket_tree); 219 break; 220 case CRUSH_BUCKET_STRAW: 221 size = sizeof(struct crush_bucket_straw); 222 break; 223 case CRUSH_BUCKET_STRAW2: 224 size = sizeof(struct crush_bucket_straw2); 225 break; 226 default: 227 err = -EINVAL; 228 goto bad; 229 } 230 BUG_ON(size == 0); 231 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 232 if (b == NULL) 233 goto badmem; 234 235 ceph_decode_need(p, end, 4*sizeof(u32), bad); 236 b->id = ceph_decode_32(p); 237 b->type = ceph_decode_16(p); 238 b->alg = ceph_decode_8(p); 239 b->hash = ceph_decode_8(p); 240 b->weight = ceph_decode_32(p); 241 b->size = ceph_decode_32(p); 242 243 dout("crush_decode bucket size %d off %x %p to %p\n", 244 b->size, (int)(*p-start), *p, end); 245 246 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 247 if (b->items == NULL) 248 goto badmem; 249 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 250 if (b->perm == NULL) 251 goto badmem; 252 b->perm_n = 0; 253 254 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 255 for (j = 0; j < b->size; j++) 256 b->items[j] = ceph_decode_32(p); 257 258 switch (b->alg) { 259 case CRUSH_BUCKET_UNIFORM: 260 err = crush_decode_uniform_bucket(p, end, 261 (struct crush_bucket_uniform *)b); 262 if (err < 0) 263 goto bad; 264 break; 265 case CRUSH_BUCKET_LIST: 266 err = crush_decode_list_bucket(p, end, 267 (struct crush_bucket_list *)b); 268 if (err < 0) 269 goto bad; 270 break; 271 case CRUSH_BUCKET_TREE: 272 err = crush_decode_tree_bucket(p, end, 273 (struct crush_bucket_tree *)b); 274 if (err < 0) 275 goto bad; 276 break; 277 case CRUSH_BUCKET_STRAW: 278 err = crush_decode_straw_bucket(p, end, 279 (struct crush_bucket_straw *)b); 280 if (err < 0) 281 goto bad; 282 break; 283 case CRUSH_BUCKET_STRAW2: 284 err = crush_decode_straw2_bucket(p, end, 285 (struct crush_bucket_straw2 *)b); 286 if (err < 0) 287 goto bad; 288 break; 289 } 290 } 291 292 /* rules */ 293 dout("rule vec is %p\n", c->rules); 294 for (i = 0; i < c->max_rules; i++) { 295 u32 yes; 296 struct crush_rule *r; 297 298 ceph_decode_32_safe(p, end, yes, bad); 299 if (!yes) { 300 dout("crush_decode NO rule %d off %x %p to %p\n", 301 i, (int)(*p-start), *p, end); 302 c->rules[i] = NULL; 303 continue; 304 } 305 306 dout("crush_decode rule %d off %x %p to %p\n", 307 i, (int)(*p-start), *p, end); 308 309 /* len */ 310 ceph_decode_32_safe(p, end, yes, bad); 311 #if BITS_PER_LONG == 32 312 err = -EINVAL; 313 if (yes > (ULONG_MAX - sizeof(*r)) 314 / sizeof(struct crush_rule_step)) 315 goto bad; 316 #endif 317 r = c->rules[i] = kmalloc(sizeof(*r) + 318 yes*sizeof(struct crush_rule_step), 319 GFP_NOFS); 320 if (r == NULL) 321 goto badmem; 322 dout(" rule %d is at %p\n", i, r); 323 r->len = yes; 324 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 325 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 326 for (j = 0; j < r->len; j++) { 327 r->steps[j].op = ceph_decode_32(p); 328 r->steps[j].arg1 = ceph_decode_32(p); 329 r->steps[j].arg2 = ceph_decode_32(p); 330 } 331 } 332 333 /* ignore trailing name maps. */ 334 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) { 335 err = skip_name_map(p, end); 336 if (err < 0) 337 goto done; 338 } 339 340 /* tunables */ 341 ceph_decode_need(p, end, 3*sizeof(u32), done); 342 c->choose_local_tries = ceph_decode_32(p); 343 c->choose_local_fallback_tries = ceph_decode_32(p); 344 c->choose_total_tries = ceph_decode_32(p); 345 dout("crush decode tunable choose_local_tries = %d\n", 346 c->choose_local_tries); 347 dout("crush decode tunable choose_local_fallback_tries = %d\n", 348 c->choose_local_fallback_tries); 349 dout("crush decode tunable choose_total_tries = %d\n", 350 c->choose_total_tries); 351 352 ceph_decode_need(p, end, sizeof(u32), done); 353 c->chooseleaf_descend_once = ceph_decode_32(p); 354 dout("crush decode tunable chooseleaf_descend_once = %d\n", 355 c->chooseleaf_descend_once); 356 357 ceph_decode_need(p, end, sizeof(u8), done); 358 c->chooseleaf_vary_r = ceph_decode_8(p); 359 dout("crush decode tunable chooseleaf_vary_r = %d\n", 360 c->chooseleaf_vary_r); 361 362 /* skip straw_calc_version, allowed_bucket_algs */ 363 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 364 *p += sizeof(u8) + sizeof(u32); 365 366 ceph_decode_need(p, end, sizeof(u8), done); 367 c->chooseleaf_stable = ceph_decode_8(p); 368 dout("crush decode tunable chooseleaf_stable = %d\n", 369 c->chooseleaf_stable); 370 371 done: 372 dout("crush_decode success\n"); 373 return c; 374 375 badmem: 376 err = -ENOMEM; 377 bad: 378 dout("crush_decode fail %d\n", err); 379 crush_destroy(c); 380 return ERR_PTR(err); 381 } 382 383 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 384 { 385 if (lhs->pool < rhs->pool) 386 return -1; 387 if (lhs->pool > rhs->pool) 388 return 1; 389 if (lhs->seed < rhs->seed) 390 return -1; 391 if (lhs->seed > rhs->seed) 392 return 1; 393 394 return 0; 395 } 396 397 /* 398 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 399 * to a set of osds) and primary_temp (explicit primary setting) 400 */ 401 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 402 struct rb_root *root) 403 { 404 struct rb_node **p = &root->rb_node; 405 struct rb_node *parent = NULL; 406 struct ceph_pg_mapping *pg = NULL; 407 int c; 408 409 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 410 while (*p) { 411 parent = *p; 412 pg = rb_entry(parent, struct ceph_pg_mapping, node); 413 c = ceph_pg_compare(&new->pgid, &pg->pgid); 414 if (c < 0) 415 p = &(*p)->rb_left; 416 else if (c > 0) 417 p = &(*p)->rb_right; 418 else 419 return -EEXIST; 420 } 421 422 rb_link_node(&new->node, parent, p); 423 rb_insert_color(&new->node, root); 424 return 0; 425 } 426 427 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 428 struct ceph_pg pgid) 429 { 430 struct rb_node *n = root->rb_node; 431 struct ceph_pg_mapping *pg; 432 int c; 433 434 while (n) { 435 pg = rb_entry(n, struct ceph_pg_mapping, node); 436 c = ceph_pg_compare(&pgid, &pg->pgid); 437 if (c < 0) { 438 n = n->rb_left; 439 } else if (c > 0) { 440 n = n->rb_right; 441 } else { 442 dout("__lookup_pg_mapping %lld.%x got %p\n", 443 pgid.pool, pgid.seed, pg); 444 return pg; 445 } 446 } 447 return NULL; 448 } 449 450 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 451 { 452 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 453 454 if (pg) { 455 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, 456 pg); 457 rb_erase(&pg->node, root); 458 kfree(pg); 459 return 0; 460 } 461 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); 462 return -ENOENT; 463 } 464 465 /* 466 * rbtree of pg pool info 467 */ 468 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 469 { 470 struct rb_node **p = &root->rb_node; 471 struct rb_node *parent = NULL; 472 struct ceph_pg_pool_info *pi = NULL; 473 474 while (*p) { 475 parent = *p; 476 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 477 if (new->id < pi->id) 478 p = &(*p)->rb_left; 479 else if (new->id > pi->id) 480 p = &(*p)->rb_right; 481 else 482 return -EEXIST; 483 } 484 485 rb_link_node(&new->node, parent, p); 486 rb_insert_color(&new->node, root); 487 return 0; 488 } 489 490 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 491 { 492 struct ceph_pg_pool_info *pi; 493 struct rb_node *n = root->rb_node; 494 495 while (n) { 496 pi = rb_entry(n, struct ceph_pg_pool_info, node); 497 if (id < pi->id) 498 n = n->rb_left; 499 else if (id > pi->id) 500 n = n->rb_right; 501 else 502 return pi; 503 } 504 return NULL; 505 } 506 507 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 508 { 509 return __lookup_pg_pool(&map->pg_pools, id); 510 } 511 512 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 513 { 514 struct ceph_pg_pool_info *pi; 515 516 if (id == CEPH_NOPOOL) 517 return NULL; 518 519 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 520 return NULL; 521 522 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 523 524 return pi ? pi->name : NULL; 525 } 526 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 527 528 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 529 { 530 struct rb_node *rbp; 531 532 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 533 struct ceph_pg_pool_info *pi = 534 rb_entry(rbp, struct ceph_pg_pool_info, node); 535 if (pi->name && strcmp(pi->name, name) == 0) 536 return pi->id; 537 } 538 return -ENOENT; 539 } 540 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 541 542 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 543 { 544 rb_erase(&pi->node, root); 545 kfree(pi->name); 546 kfree(pi); 547 } 548 549 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 550 { 551 u8 ev, cv; 552 unsigned len, num; 553 void *pool_end; 554 555 ceph_decode_need(p, end, 2 + 4, bad); 556 ev = ceph_decode_8(p); /* encoding version */ 557 cv = ceph_decode_8(p); /* compat version */ 558 if (ev < 5) { 559 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 560 return -EINVAL; 561 } 562 if (cv > 9) { 563 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 564 return -EINVAL; 565 } 566 len = ceph_decode_32(p); 567 ceph_decode_need(p, end, len, bad); 568 pool_end = *p + len; 569 570 pi->type = ceph_decode_8(p); 571 pi->size = ceph_decode_8(p); 572 pi->crush_ruleset = ceph_decode_8(p); 573 pi->object_hash = ceph_decode_8(p); 574 575 pi->pg_num = ceph_decode_32(p); 576 pi->pgp_num = ceph_decode_32(p); 577 578 *p += 4 + 4; /* skip lpg* */ 579 *p += 4; /* skip last_change */ 580 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 581 582 /* skip snaps */ 583 num = ceph_decode_32(p); 584 while (num--) { 585 *p += 8; /* snapid key */ 586 *p += 1 + 1; /* versions */ 587 len = ceph_decode_32(p); 588 *p += len; 589 } 590 591 /* skip removed_snaps */ 592 num = ceph_decode_32(p); 593 *p += num * (8 + 8); 594 595 *p += 8; /* skip auid */ 596 pi->flags = ceph_decode_64(p); 597 *p += 4; /* skip crash_replay_interval */ 598 599 if (ev >= 7) 600 pi->min_size = ceph_decode_8(p); 601 else 602 pi->min_size = pi->size - pi->size / 2; 603 604 if (ev >= 8) 605 *p += 8 + 8; /* skip quota_max_* */ 606 607 if (ev >= 9) { 608 /* skip tiers */ 609 num = ceph_decode_32(p); 610 *p += num * 8; 611 612 *p += 8; /* skip tier_of */ 613 *p += 1; /* skip cache_mode */ 614 615 pi->read_tier = ceph_decode_64(p); 616 pi->write_tier = ceph_decode_64(p); 617 } else { 618 pi->read_tier = -1; 619 pi->write_tier = -1; 620 } 621 622 if (ev >= 10) { 623 /* skip properties */ 624 num = ceph_decode_32(p); 625 while (num--) { 626 len = ceph_decode_32(p); 627 *p += len; /* key */ 628 len = ceph_decode_32(p); 629 *p += len; /* val */ 630 } 631 } 632 633 if (ev >= 11) { 634 /* skip hit_set_params */ 635 *p += 1 + 1; /* versions */ 636 len = ceph_decode_32(p); 637 *p += len; 638 639 *p += 4; /* skip hit_set_period */ 640 *p += 4; /* skip hit_set_count */ 641 } 642 643 if (ev >= 12) 644 *p += 4; /* skip stripe_width */ 645 646 if (ev >= 13) { 647 *p += 8; /* skip target_max_bytes */ 648 *p += 8; /* skip target_max_objects */ 649 *p += 4; /* skip cache_target_dirty_ratio_micro */ 650 *p += 4; /* skip cache_target_full_ratio_micro */ 651 *p += 4; /* skip cache_min_flush_age */ 652 *p += 4; /* skip cache_min_evict_age */ 653 } 654 655 if (ev >= 14) { 656 /* skip erasure_code_profile */ 657 len = ceph_decode_32(p); 658 *p += len; 659 } 660 661 if (ev >= 15) 662 pi->last_force_request_resend = ceph_decode_32(p); 663 else 664 pi->last_force_request_resend = 0; 665 666 /* ignore the rest */ 667 668 *p = pool_end; 669 calc_pg_masks(pi); 670 return 0; 671 672 bad: 673 return -EINVAL; 674 } 675 676 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 677 { 678 struct ceph_pg_pool_info *pi; 679 u32 num, len; 680 u64 pool; 681 682 ceph_decode_32_safe(p, end, num, bad); 683 dout(" %d pool names\n", num); 684 while (num--) { 685 ceph_decode_64_safe(p, end, pool, bad); 686 ceph_decode_32_safe(p, end, len, bad); 687 dout(" pool %llu len %d\n", pool, len); 688 ceph_decode_need(p, end, len, bad); 689 pi = __lookup_pg_pool(&map->pg_pools, pool); 690 if (pi) { 691 char *name = kstrndup(*p, len, GFP_NOFS); 692 693 if (!name) 694 return -ENOMEM; 695 kfree(pi->name); 696 pi->name = name; 697 dout(" name is %s\n", pi->name); 698 } 699 *p += len; 700 } 701 return 0; 702 703 bad: 704 return -EINVAL; 705 } 706 707 /* 708 * osd map 709 */ 710 struct ceph_osdmap *ceph_osdmap_alloc(void) 711 { 712 struct ceph_osdmap *map; 713 714 map = kzalloc(sizeof(*map), GFP_NOIO); 715 if (!map) 716 return NULL; 717 718 map->pg_pools = RB_ROOT; 719 map->pool_max = -1; 720 map->pg_temp = RB_ROOT; 721 map->primary_temp = RB_ROOT; 722 mutex_init(&map->crush_scratch_mutex); 723 724 return map; 725 } 726 727 void ceph_osdmap_destroy(struct ceph_osdmap *map) 728 { 729 dout("osdmap_destroy %p\n", map); 730 if (map->crush) 731 crush_destroy(map->crush); 732 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 733 struct ceph_pg_mapping *pg = 734 rb_entry(rb_first(&map->pg_temp), 735 struct ceph_pg_mapping, node); 736 rb_erase(&pg->node, &map->pg_temp); 737 kfree(pg); 738 } 739 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 740 struct ceph_pg_mapping *pg = 741 rb_entry(rb_first(&map->primary_temp), 742 struct ceph_pg_mapping, node); 743 rb_erase(&pg->node, &map->primary_temp); 744 kfree(pg); 745 } 746 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 747 struct ceph_pg_pool_info *pi = 748 rb_entry(rb_first(&map->pg_pools), 749 struct ceph_pg_pool_info, node); 750 __remove_pg_pool(&map->pg_pools, pi); 751 } 752 kfree(map->osd_state); 753 kfree(map->osd_weight); 754 kfree(map->osd_addr); 755 kfree(map->osd_primary_affinity); 756 kfree(map); 757 } 758 759 /* 760 * Adjust max_osd value, (re)allocate arrays. 761 * 762 * The new elements are properly initialized. 763 */ 764 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 765 { 766 u8 *state; 767 u32 *weight; 768 struct ceph_entity_addr *addr; 769 int i; 770 771 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 772 if (!state) 773 return -ENOMEM; 774 map->osd_state = state; 775 776 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 777 if (!weight) 778 return -ENOMEM; 779 map->osd_weight = weight; 780 781 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 782 if (!addr) 783 return -ENOMEM; 784 map->osd_addr = addr; 785 786 for (i = map->max_osd; i < max; i++) { 787 map->osd_state[i] = 0; 788 map->osd_weight[i] = CEPH_OSD_OUT; 789 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 790 } 791 792 if (map->osd_primary_affinity) { 793 u32 *affinity; 794 795 affinity = krealloc(map->osd_primary_affinity, 796 max*sizeof(*affinity), GFP_NOFS); 797 if (!affinity) 798 return -ENOMEM; 799 map->osd_primary_affinity = affinity; 800 801 for (i = map->max_osd; i < max; i++) 802 map->osd_primary_affinity[i] = 803 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 804 } 805 806 map->max_osd = max; 807 808 return 0; 809 } 810 811 #define OSDMAP_WRAPPER_COMPAT_VER 7 812 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 813 814 /* 815 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 816 * to struct_v of the client_data section for new (v7 and above) 817 * osdmaps. 818 */ 819 static int get_osdmap_client_data_v(void **p, void *end, 820 const char *prefix, u8 *v) 821 { 822 u8 struct_v; 823 824 ceph_decode_8_safe(p, end, struct_v, e_inval); 825 if (struct_v >= 7) { 826 u8 struct_compat; 827 828 ceph_decode_8_safe(p, end, struct_compat, e_inval); 829 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 830 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 831 struct_v, struct_compat, 832 OSDMAP_WRAPPER_COMPAT_VER, prefix); 833 return -EINVAL; 834 } 835 *p += 4; /* ignore wrapper struct_len */ 836 837 ceph_decode_8_safe(p, end, struct_v, e_inval); 838 ceph_decode_8_safe(p, end, struct_compat, e_inval); 839 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 840 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 841 struct_v, struct_compat, 842 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 843 return -EINVAL; 844 } 845 *p += 4; /* ignore client data struct_len */ 846 } else { 847 u16 version; 848 849 *p -= 1; 850 ceph_decode_16_safe(p, end, version, e_inval); 851 if (version < 6) { 852 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 853 version, prefix); 854 return -EINVAL; 855 } 856 857 /* old osdmap enconding */ 858 struct_v = 0; 859 } 860 861 *v = struct_v; 862 return 0; 863 864 e_inval: 865 return -EINVAL; 866 } 867 868 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 869 bool incremental) 870 { 871 u32 n; 872 873 ceph_decode_32_safe(p, end, n, e_inval); 874 while (n--) { 875 struct ceph_pg_pool_info *pi; 876 u64 pool; 877 int ret; 878 879 ceph_decode_64_safe(p, end, pool, e_inval); 880 881 pi = __lookup_pg_pool(&map->pg_pools, pool); 882 if (!incremental || !pi) { 883 pi = kzalloc(sizeof(*pi), GFP_NOFS); 884 if (!pi) 885 return -ENOMEM; 886 887 pi->id = pool; 888 889 ret = __insert_pg_pool(&map->pg_pools, pi); 890 if (ret) { 891 kfree(pi); 892 return ret; 893 } 894 } 895 896 ret = decode_pool(p, end, pi); 897 if (ret) 898 return ret; 899 } 900 901 return 0; 902 903 e_inval: 904 return -EINVAL; 905 } 906 907 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 908 { 909 return __decode_pools(p, end, map, false); 910 } 911 912 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 913 { 914 return __decode_pools(p, end, map, true); 915 } 916 917 static int __decode_pg_temp(void **p, void *end, struct ceph_osdmap *map, 918 bool incremental) 919 { 920 u32 n; 921 922 ceph_decode_32_safe(p, end, n, e_inval); 923 while (n--) { 924 struct ceph_pg pgid; 925 u32 len, i; 926 int ret; 927 928 ret = ceph_decode_pgid(p, end, &pgid); 929 if (ret) 930 return ret; 931 932 ceph_decode_32_safe(p, end, len, e_inval); 933 934 ret = __remove_pg_mapping(&map->pg_temp, pgid); 935 BUG_ON(!incremental && ret != -ENOENT); 936 937 if (!incremental || len > 0) { 938 struct ceph_pg_mapping *pg; 939 940 ceph_decode_need(p, end, len*sizeof(u32), e_inval); 941 942 if (len > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 943 return -EINVAL; 944 945 pg = kzalloc(sizeof(*pg) + len*sizeof(u32), GFP_NOFS); 946 if (!pg) 947 return -ENOMEM; 948 949 pg->pgid = pgid; 950 pg->pg_temp.len = len; 951 for (i = 0; i < len; i++) 952 pg->pg_temp.osds[i] = ceph_decode_32(p); 953 954 ret = __insert_pg_mapping(pg, &map->pg_temp); 955 if (ret) { 956 kfree(pg); 957 return ret; 958 } 959 } 960 } 961 962 return 0; 963 964 e_inval: 965 return -EINVAL; 966 } 967 968 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 969 { 970 return __decode_pg_temp(p, end, map, false); 971 } 972 973 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 974 { 975 return __decode_pg_temp(p, end, map, true); 976 } 977 978 static int __decode_primary_temp(void **p, void *end, struct ceph_osdmap *map, 979 bool incremental) 980 { 981 u32 n; 982 983 ceph_decode_32_safe(p, end, n, e_inval); 984 while (n--) { 985 struct ceph_pg pgid; 986 u32 osd; 987 int ret; 988 989 ret = ceph_decode_pgid(p, end, &pgid); 990 if (ret) 991 return ret; 992 993 ceph_decode_32_safe(p, end, osd, e_inval); 994 995 ret = __remove_pg_mapping(&map->primary_temp, pgid); 996 BUG_ON(!incremental && ret != -ENOENT); 997 998 if (!incremental || osd != (u32)-1) { 999 struct ceph_pg_mapping *pg; 1000 1001 pg = kzalloc(sizeof(*pg), GFP_NOFS); 1002 if (!pg) 1003 return -ENOMEM; 1004 1005 pg->pgid = pgid; 1006 pg->primary_temp.osd = osd; 1007 1008 ret = __insert_pg_mapping(pg, &map->primary_temp); 1009 if (ret) { 1010 kfree(pg); 1011 return ret; 1012 } 1013 } 1014 } 1015 1016 return 0; 1017 1018 e_inval: 1019 return -EINVAL; 1020 } 1021 1022 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1023 { 1024 return __decode_primary_temp(p, end, map, false); 1025 } 1026 1027 static int decode_new_primary_temp(void **p, void *end, 1028 struct ceph_osdmap *map) 1029 { 1030 return __decode_primary_temp(p, end, map, true); 1031 } 1032 1033 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1034 { 1035 BUG_ON(osd >= map->max_osd); 1036 1037 if (!map->osd_primary_affinity) 1038 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1039 1040 return map->osd_primary_affinity[osd]; 1041 } 1042 1043 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1044 { 1045 BUG_ON(osd >= map->max_osd); 1046 1047 if (!map->osd_primary_affinity) { 1048 int i; 1049 1050 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32), 1051 GFP_NOFS); 1052 if (!map->osd_primary_affinity) 1053 return -ENOMEM; 1054 1055 for (i = 0; i < map->max_osd; i++) 1056 map->osd_primary_affinity[i] = 1057 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1058 } 1059 1060 map->osd_primary_affinity[osd] = aff; 1061 1062 return 0; 1063 } 1064 1065 static int decode_primary_affinity(void **p, void *end, 1066 struct ceph_osdmap *map) 1067 { 1068 u32 len, i; 1069 1070 ceph_decode_32_safe(p, end, len, e_inval); 1071 if (len == 0) { 1072 kfree(map->osd_primary_affinity); 1073 map->osd_primary_affinity = NULL; 1074 return 0; 1075 } 1076 if (len != map->max_osd) 1077 goto e_inval; 1078 1079 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1080 1081 for (i = 0; i < map->max_osd; i++) { 1082 int ret; 1083 1084 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1085 if (ret) 1086 return ret; 1087 } 1088 1089 return 0; 1090 1091 e_inval: 1092 return -EINVAL; 1093 } 1094 1095 static int decode_new_primary_affinity(void **p, void *end, 1096 struct ceph_osdmap *map) 1097 { 1098 u32 n; 1099 1100 ceph_decode_32_safe(p, end, n, e_inval); 1101 while (n--) { 1102 u32 osd, aff; 1103 int ret; 1104 1105 ceph_decode_32_safe(p, end, osd, e_inval); 1106 ceph_decode_32_safe(p, end, aff, e_inval); 1107 1108 ret = set_primary_affinity(map, osd, aff); 1109 if (ret) 1110 return ret; 1111 1112 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1113 } 1114 1115 return 0; 1116 1117 e_inval: 1118 return -EINVAL; 1119 } 1120 1121 /* 1122 * decode a full map. 1123 */ 1124 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1125 { 1126 u8 struct_v; 1127 u32 epoch = 0; 1128 void *start = *p; 1129 u32 max; 1130 u32 len, i; 1131 int err; 1132 1133 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1134 1135 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1136 if (err) 1137 goto bad; 1138 1139 /* fsid, epoch, created, modified */ 1140 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1141 sizeof(map->created) + sizeof(map->modified), e_inval); 1142 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1143 epoch = map->epoch = ceph_decode_32(p); 1144 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1145 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1146 1147 /* pools */ 1148 err = decode_pools(p, end, map); 1149 if (err) 1150 goto bad; 1151 1152 /* pool_name */ 1153 err = decode_pool_names(p, end, map); 1154 if (err) 1155 goto bad; 1156 1157 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1158 1159 ceph_decode_32_safe(p, end, map->flags, e_inval); 1160 1161 /* max_osd */ 1162 ceph_decode_32_safe(p, end, max, e_inval); 1163 1164 /* (re)alloc osd arrays */ 1165 err = osdmap_set_max_osd(map, max); 1166 if (err) 1167 goto bad; 1168 1169 /* osd_state, osd_weight, osd_addrs->client_addr */ 1170 ceph_decode_need(p, end, 3*sizeof(u32) + 1171 map->max_osd*(1 + sizeof(*map->osd_weight) + 1172 sizeof(*map->osd_addr)), e_inval); 1173 1174 if (ceph_decode_32(p) != map->max_osd) 1175 goto e_inval; 1176 1177 ceph_decode_copy(p, map->osd_state, map->max_osd); 1178 1179 if (ceph_decode_32(p) != map->max_osd) 1180 goto e_inval; 1181 1182 for (i = 0; i < map->max_osd; i++) 1183 map->osd_weight[i] = ceph_decode_32(p); 1184 1185 if (ceph_decode_32(p) != map->max_osd) 1186 goto e_inval; 1187 1188 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1189 for (i = 0; i < map->max_osd; i++) 1190 ceph_decode_addr(&map->osd_addr[i]); 1191 1192 /* pg_temp */ 1193 err = decode_pg_temp(p, end, map); 1194 if (err) 1195 goto bad; 1196 1197 /* primary_temp */ 1198 if (struct_v >= 1) { 1199 err = decode_primary_temp(p, end, map); 1200 if (err) 1201 goto bad; 1202 } 1203 1204 /* primary_affinity */ 1205 if (struct_v >= 2) { 1206 err = decode_primary_affinity(p, end, map); 1207 if (err) 1208 goto bad; 1209 } else { 1210 /* XXX can this happen? */ 1211 kfree(map->osd_primary_affinity); 1212 map->osd_primary_affinity = NULL; 1213 } 1214 1215 /* crush */ 1216 ceph_decode_32_safe(p, end, len, e_inval); 1217 map->crush = crush_decode(*p, min(*p + len, end)); 1218 if (IS_ERR(map->crush)) { 1219 err = PTR_ERR(map->crush); 1220 map->crush = NULL; 1221 goto bad; 1222 } 1223 *p += len; 1224 1225 /* ignore the rest */ 1226 *p = end; 1227 1228 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1229 return 0; 1230 1231 e_inval: 1232 err = -EINVAL; 1233 bad: 1234 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1235 err, epoch, (int)(*p - start), *p, start, end); 1236 print_hex_dump(KERN_DEBUG, "osdmap: ", 1237 DUMP_PREFIX_OFFSET, 16, 1, 1238 start, end - start, true); 1239 return err; 1240 } 1241 1242 /* 1243 * Allocate and decode a full map. 1244 */ 1245 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1246 { 1247 struct ceph_osdmap *map; 1248 int ret; 1249 1250 map = ceph_osdmap_alloc(); 1251 if (!map) 1252 return ERR_PTR(-ENOMEM); 1253 1254 ret = osdmap_decode(p, end, map); 1255 if (ret) { 1256 ceph_osdmap_destroy(map); 1257 return ERR_PTR(ret); 1258 } 1259 1260 return map; 1261 } 1262 1263 /* 1264 * decode and apply an incremental map update. 1265 */ 1266 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1267 struct ceph_osdmap *map) 1268 { 1269 struct crush_map *newcrush = NULL; 1270 struct ceph_fsid fsid; 1271 u32 epoch = 0; 1272 struct ceph_timespec modified; 1273 s32 len; 1274 u64 pool; 1275 __s64 new_pool_max; 1276 __s32 new_flags, max; 1277 void *start = *p; 1278 int err; 1279 u8 struct_v; 1280 1281 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1282 1283 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1284 if (err) 1285 goto bad; 1286 1287 /* fsid, epoch, modified, new_pool_max, new_flags */ 1288 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1289 sizeof(u64) + sizeof(u32), e_inval); 1290 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1291 epoch = ceph_decode_32(p); 1292 BUG_ON(epoch != map->epoch+1); 1293 ceph_decode_copy(p, &modified, sizeof(modified)); 1294 new_pool_max = ceph_decode_64(p); 1295 new_flags = ceph_decode_32(p); 1296 1297 /* full map? */ 1298 ceph_decode_32_safe(p, end, len, e_inval); 1299 if (len > 0) { 1300 dout("apply_incremental full map len %d, %p to %p\n", 1301 len, *p, end); 1302 return ceph_osdmap_decode(p, min(*p+len, end)); 1303 } 1304 1305 /* new crush? */ 1306 ceph_decode_32_safe(p, end, len, e_inval); 1307 if (len > 0) { 1308 newcrush = crush_decode(*p, min(*p+len, end)); 1309 if (IS_ERR(newcrush)) { 1310 err = PTR_ERR(newcrush); 1311 newcrush = NULL; 1312 goto bad; 1313 } 1314 *p += len; 1315 } 1316 1317 /* new flags? */ 1318 if (new_flags >= 0) 1319 map->flags = new_flags; 1320 if (new_pool_max >= 0) 1321 map->pool_max = new_pool_max; 1322 1323 /* new max? */ 1324 ceph_decode_32_safe(p, end, max, e_inval); 1325 if (max >= 0) { 1326 err = osdmap_set_max_osd(map, max); 1327 if (err) 1328 goto bad; 1329 } 1330 1331 map->epoch++; 1332 map->modified = modified; 1333 if (newcrush) { 1334 if (map->crush) 1335 crush_destroy(map->crush); 1336 map->crush = newcrush; 1337 newcrush = NULL; 1338 } 1339 1340 /* new_pools */ 1341 err = decode_new_pools(p, end, map); 1342 if (err) 1343 goto bad; 1344 1345 /* new_pool_names */ 1346 err = decode_pool_names(p, end, map); 1347 if (err) 1348 goto bad; 1349 1350 /* old_pool */ 1351 ceph_decode_32_safe(p, end, len, e_inval); 1352 while (len--) { 1353 struct ceph_pg_pool_info *pi; 1354 1355 ceph_decode_64_safe(p, end, pool, e_inval); 1356 pi = __lookup_pg_pool(&map->pg_pools, pool); 1357 if (pi) 1358 __remove_pg_pool(&map->pg_pools, pi); 1359 } 1360 1361 /* new_up */ 1362 ceph_decode_32_safe(p, end, len, e_inval); 1363 while (len--) { 1364 u32 osd; 1365 struct ceph_entity_addr addr; 1366 ceph_decode_32_safe(p, end, osd, e_inval); 1367 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval); 1368 ceph_decode_addr(&addr); 1369 pr_info("osd%d up\n", osd); 1370 BUG_ON(osd >= map->max_osd); 1371 map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS; 1372 map->osd_addr[osd] = addr; 1373 } 1374 1375 /* new_state */ 1376 ceph_decode_32_safe(p, end, len, e_inval); 1377 while (len--) { 1378 u32 osd; 1379 u8 xorstate; 1380 ceph_decode_32_safe(p, end, osd, e_inval); 1381 xorstate = **(u8 **)p; 1382 (*p)++; /* clean flag */ 1383 if (xorstate == 0) 1384 xorstate = CEPH_OSD_UP; 1385 if (xorstate & CEPH_OSD_UP) 1386 pr_info("osd%d down\n", osd); 1387 if (osd < map->max_osd) 1388 map->osd_state[osd] ^= xorstate; 1389 } 1390 1391 /* new_weight */ 1392 ceph_decode_32_safe(p, end, len, e_inval); 1393 while (len--) { 1394 u32 osd, off; 1395 ceph_decode_need(p, end, sizeof(u32)*2, e_inval); 1396 osd = ceph_decode_32(p); 1397 off = ceph_decode_32(p); 1398 pr_info("osd%d weight 0x%x %s\n", osd, off, 1399 off == CEPH_OSD_IN ? "(in)" : 1400 (off == CEPH_OSD_OUT ? "(out)" : "")); 1401 if (osd < map->max_osd) 1402 map->osd_weight[osd] = off; 1403 } 1404 1405 /* new_pg_temp */ 1406 err = decode_new_pg_temp(p, end, map); 1407 if (err) 1408 goto bad; 1409 1410 /* new_primary_temp */ 1411 if (struct_v >= 1) { 1412 err = decode_new_primary_temp(p, end, map); 1413 if (err) 1414 goto bad; 1415 } 1416 1417 /* new_primary_affinity */ 1418 if (struct_v >= 2) { 1419 err = decode_new_primary_affinity(p, end, map); 1420 if (err) 1421 goto bad; 1422 } 1423 1424 /* ignore the rest */ 1425 *p = end; 1426 1427 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1428 return map; 1429 1430 e_inval: 1431 err = -EINVAL; 1432 bad: 1433 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1434 err, epoch, (int)(*p - start), *p, start, end); 1435 print_hex_dump(KERN_DEBUG, "osdmap: ", 1436 DUMP_PREFIX_OFFSET, 16, 1, 1437 start, end - start, true); 1438 if (newcrush) 1439 crush_destroy(newcrush); 1440 return ERR_PTR(err); 1441 } 1442 1443 void ceph_oid_copy(struct ceph_object_id *dest, 1444 const struct ceph_object_id *src) 1445 { 1446 WARN_ON(!ceph_oid_empty(dest)); 1447 1448 if (src->name != src->inline_name) { 1449 /* very rare, see ceph_object_id definition */ 1450 dest->name = kmalloc(src->name_len + 1, 1451 GFP_NOIO | __GFP_NOFAIL); 1452 } 1453 1454 memcpy(dest->name, src->name, src->name_len + 1); 1455 dest->name_len = src->name_len; 1456 } 1457 EXPORT_SYMBOL(ceph_oid_copy); 1458 1459 static __printf(2, 0) 1460 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1461 { 1462 int len; 1463 1464 WARN_ON(!ceph_oid_empty(oid)); 1465 1466 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1467 if (len >= sizeof(oid->inline_name)) 1468 return len; 1469 1470 oid->name_len = len; 1471 return 0; 1472 } 1473 1474 /* 1475 * If oid doesn't fit into inline buffer, BUG. 1476 */ 1477 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1478 { 1479 va_list ap; 1480 1481 va_start(ap, fmt); 1482 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1483 va_end(ap); 1484 } 1485 EXPORT_SYMBOL(ceph_oid_printf); 1486 1487 static __printf(3, 0) 1488 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1489 const char *fmt, va_list ap) 1490 { 1491 va_list aq; 1492 int len; 1493 1494 va_copy(aq, ap); 1495 len = oid_printf_vargs(oid, fmt, aq); 1496 va_end(aq); 1497 1498 if (len) { 1499 char *external_name; 1500 1501 external_name = kmalloc(len + 1, gfp); 1502 if (!external_name) 1503 return -ENOMEM; 1504 1505 oid->name = external_name; 1506 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1507 oid->name_len = len; 1508 } 1509 1510 return 0; 1511 } 1512 1513 /* 1514 * If oid doesn't fit into inline buffer, allocate. 1515 */ 1516 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1517 const char *fmt, ...) 1518 { 1519 va_list ap; 1520 int ret; 1521 1522 va_start(ap, fmt); 1523 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1524 va_end(ap); 1525 1526 return ret; 1527 } 1528 EXPORT_SYMBOL(ceph_oid_aprintf); 1529 1530 void ceph_oid_destroy(struct ceph_object_id *oid) 1531 { 1532 if (oid->name != oid->inline_name) 1533 kfree(oid->name); 1534 } 1535 EXPORT_SYMBOL(ceph_oid_destroy); 1536 1537 /* 1538 * osds only 1539 */ 1540 static bool __osds_equal(const struct ceph_osds *lhs, 1541 const struct ceph_osds *rhs) 1542 { 1543 if (lhs->size == rhs->size && 1544 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 1545 return true; 1546 1547 return false; 1548 } 1549 1550 /* 1551 * osds + primary 1552 */ 1553 static bool osds_equal(const struct ceph_osds *lhs, 1554 const struct ceph_osds *rhs) 1555 { 1556 if (__osds_equal(lhs, rhs) && 1557 lhs->primary == rhs->primary) 1558 return true; 1559 1560 return false; 1561 } 1562 1563 static bool osds_valid(const struct ceph_osds *set) 1564 { 1565 /* non-empty set */ 1566 if (set->size > 0 && set->primary >= 0) 1567 return true; 1568 1569 /* empty can_shift_osds set */ 1570 if (!set->size && set->primary == -1) 1571 return true; 1572 1573 /* empty !can_shift_osds set - all NONE */ 1574 if (set->size > 0 && set->primary == -1) { 1575 int i; 1576 1577 for (i = 0; i < set->size; i++) { 1578 if (set->osds[i] != CRUSH_ITEM_NONE) 1579 break; 1580 } 1581 if (i == set->size) 1582 return true; 1583 } 1584 1585 return false; 1586 } 1587 1588 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 1589 { 1590 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 1591 dest->size = src->size; 1592 dest->primary = src->primary; 1593 } 1594 1595 static bool is_split(const struct ceph_pg *pgid, 1596 u32 old_pg_num, 1597 u32 new_pg_num) 1598 { 1599 int old_bits = calc_bits_of(old_pg_num); 1600 int old_mask = (1 << old_bits) - 1; 1601 int n; 1602 1603 WARN_ON(pgid->seed >= old_pg_num); 1604 if (new_pg_num <= old_pg_num) 1605 return false; 1606 1607 for (n = 1; ; n++) { 1608 int next_bit = n << (old_bits - 1); 1609 u32 s = next_bit | pgid->seed; 1610 1611 if (s < old_pg_num || s == pgid->seed) 1612 continue; 1613 if (s >= new_pg_num) 1614 break; 1615 1616 s = ceph_stable_mod(s, old_pg_num, old_mask); 1617 if (s == pgid->seed) 1618 return true; 1619 } 1620 1621 return false; 1622 } 1623 1624 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 1625 const struct ceph_osds *new_acting, 1626 const struct ceph_osds *old_up, 1627 const struct ceph_osds *new_up, 1628 int old_size, 1629 int new_size, 1630 int old_min_size, 1631 int new_min_size, 1632 u32 old_pg_num, 1633 u32 new_pg_num, 1634 bool old_sort_bitwise, 1635 bool new_sort_bitwise, 1636 const struct ceph_pg *pgid) 1637 { 1638 return !osds_equal(old_acting, new_acting) || 1639 !osds_equal(old_up, new_up) || 1640 old_size != new_size || 1641 old_min_size != new_min_size || 1642 is_split(pgid, old_pg_num, new_pg_num) || 1643 old_sort_bitwise != new_sort_bitwise; 1644 } 1645 1646 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 1647 { 1648 int i; 1649 1650 for (i = 0; i < acting->size; i++) { 1651 if (acting->osds[i] == osd) 1652 return i; 1653 } 1654 1655 return -1; 1656 } 1657 1658 static bool primary_changed(const struct ceph_osds *old_acting, 1659 const struct ceph_osds *new_acting) 1660 { 1661 if (!old_acting->size && !new_acting->size) 1662 return false; /* both still empty */ 1663 1664 if (!old_acting->size ^ !new_acting->size) 1665 return true; /* was empty, now not, or vice versa */ 1666 1667 if (old_acting->primary != new_acting->primary) 1668 return true; /* primary changed */ 1669 1670 if (calc_pg_rank(old_acting->primary, old_acting) != 1671 calc_pg_rank(new_acting->primary, new_acting)) 1672 return true; 1673 1674 return false; /* same primary (tho replicas may have changed) */ 1675 } 1676 1677 bool ceph_osds_changed(const struct ceph_osds *old_acting, 1678 const struct ceph_osds *new_acting, 1679 bool any_change) 1680 { 1681 if (primary_changed(old_acting, new_acting)) 1682 return true; 1683 1684 if (any_change && !__osds_equal(old_acting, new_acting)) 1685 return true; 1686 1687 return false; 1688 } 1689 1690 /* 1691 * calculate file layout from given offset, length. 1692 * fill in correct oid, logical length, and object extent 1693 * offset, length. 1694 * 1695 * for now, we write only a single su, until we can 1696 * pass a stride back to the caller. 1697 */ 1698 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 1699 u64 off, u64 len, 1700 u64 *ono, 1701 u64 *oxoff, u64 *oxlen) 1702 { 1703 u32 osize = le32_to_cpu(layout->fl_object_size); 1704 u32 su = le32_to_cpu(layout->fl_stripe_unit); 1705 u32 sc = le32_to_cpu(layout->fl_stripe_count); 1706 u32 bl, stripeno, stripepos, objsetno; 1707 u32 su_per_object; 1708 u64 t, su_offset; 1709 1710 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 1711 osize, su); 1712 if (su == 0 || sc == 0) 1713 goto invalid; 1714 su_per_object = osize / su; 1715 if (su_per_object == 0) 1716 goto invalid; 1717 dout("osize %u / su %u = su_per_object %u\n", osize, su, 1718 su_per_object); 1719 1720 if ((su & ~PAGE_MASK) != 0) 1721 goto invalid; 1722 1723 /* bl = *off / su; */ 1724 t = off; 1725 do_div(t, su); 1726 bl = t; 1727 dout("off %llu / su %u = bl %u\n", off, su, bl); 1728 1729 stripeno = bl / sc; 1730 stripepos = bl % sc; 1731 objsetno = stripeno / su_per_object; 1732 1733 *ono = objsetno * sc + stripepos; 1734 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 1735 1736 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 1737 t = off; 1738 su_offset = do_div(t, su); 1739 *oxoff = su_offset + (stripeno % su_per_object) * su; 1740 1741 /* 1742 * Calculate the length of the extent being written to the selected 1743 * object. This is the minimum of the full length requested (len) or 1744 * the remainder of the current stripe being written to. 1745 */ 1746 *oxlen = min_t(u64, len, su - su_offset); 1747 1748 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 1749 return 0; 1750 1751 invalid: 1752 dout(" invalid layout\n"); 1753 *ono = 0; 1754 *oxoff = 0; 1755 *oxlen = 0; 1756 return -EINVAL; 1757 } 1758 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 1759 1760 /* 1761 * Map an object into a PG. 1762 * 1763 * Should only be called with target_oid and target_oloc (as opposed to 1764 * base_oid and base_oloc), since tiering isn't taken into account. 1765 */ 1766 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 1767 struct ceph_object_id *oid, 1768 struct ceph_object_locator *oloc, 1769 struct ceph_pg *raw_pgid) 1770 { 1771 struct ceph_pg_pool_info *pi; 1772 1773 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 1774 if (!pi) 1775 return -ENOENT; 1776 1777 raw_pgid->pool = oloc->pool; 1778 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 1779 oid->name_len); 1780 1781 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 1782 raw_pgid->pool, raw_pgid->seed); 1783 return 0; 1784 } 1785 EXPORT_SYMBOL(ceph_object_locator_to_pg); 1786 1787 /* 1788 * Map a raw PG (full precision ps) into an actual PG. 1789 */ 1790 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 1791 const struct ceph_pg *raw_pgid, 1792 struct ceph_pg *pgid) 1793 { 1794 pgid->pool = raw_pgid->pool; 1795 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 1796 pi->pg_num_mask); 1797 } 1798 1799 /* 1800 * Map a raw PG (full precision ps) into a placement ps (placement 1801 * seed). Include pool id in that value so that different pools don't 1802 * use the same seeds. 1803 */ 1804 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 1805 const struct ceph_pg *raw_pgid) 1806 { 1807 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 1808 /* hash pool id and seed so that pool PGs do not overlap */ 1809 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 1810 ceph_stable_mod(raw_pgid->seed, 1811 pi->pgp_num, 1812 pi->pgp_num_mask), 1813 raw_pgid->pool); 1814 } else { 1815 /* 1816 * legacy behavior: add ps and pool together. this is 1817 * not a great approach because the PGs from each pool 1818 * will overlap on top of each other: 0.5 == 1.4 == 1819 * 2.3 == ... 1820 */ 1821 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 1822 pi->pgp_num_mask) + 1823 (unsigned)raw_pgid->pool; 1824 } 1825 } 1826 1827 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 1828 int *result, int result_max, 1829 const __u32 *weight, int weight_max) 1830 { 1831 int r; 1832 1833 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 1834 1835 mutex_lock(&map->crush_scratch_mutex); 1836 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 1837 weight, weight_max, map->crush_scratch_ary); 1838 mutex_unlock(&map->crush_scratch_mutex); 1839 1840 return r; 1841 } 1842 1843 /* 1844 * Calculate raw set (CRUSH output) for given PG. The result may 1845 * contain nonexistent OSDs. ->primary is undefined for a raw set. 1846 * 1847 * Placement seed (CRUSH input) is returned through @ppps. 1848 */ 1849 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 1850 struct ceph_pg_pool_info *pi, 1851 const struct ceph_pg *raw_pgid, 1852 struct ceph_osds *raw, 1853 u32 *ppps) 1854 { 1855 u32 pps = raw_pg_to_pps(pi, raw_pgid); 1856 int ruleno; 1857 int len; 1858 1859 ceph_osds_init(raw); 1860 if (ppps) 1861 *ppps = pps; 1862 1863 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 1864 pi->size); 1865 if (ruleno < 0) { 1866 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 1867 pi->id, pi->crush_ruleset, pi->type, pi->size); 1868 return; 1869 } 1870 1871 len = do_crush(osdmap, ruleno, pps, raw->osds, 1872 min_t(int, pi->size, ARRAY_SIZE(raw->osds)), 1873 osdmap->osd_weight, osdmap->max_osd); 1874 if (len < 0) { 1875 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 1876 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 1877 pi->size); 1878 return; 1879 } 1880 1881 raw->size = len; 1882 } 1883 1884 /* 1885 * Given raw set, calculate up set and up primary. By definition of an 1886 * up set, the result won't contain nonexistent or down OSDs. 1887 * 1888 * This is done in-place - on return @set is the up set. If it's 1889 * empty, ->primary will remain undefined. 1890 */ 1891 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 1892 struct ceph_pg_pool_info *pi, 1893 struct ceph_osds *set) 1894 { 1895 int i; 1896 1897 /* ->primary is undefined for a raw set */ 1898 BUG_ON(set->primary != -1); 1899 1900 if (ceph_can_shift_osds(pi)) { 1901 int removed = 0; 1902 1903 /* shift left */ 1904 for (i = 0; i < set->size; i++) { 1905 if (ceph_osd_is_down(osdmap, set->osds[i])) { 1906 removed++; 1907 continue; 1908 } 1909 if (removed) 1910 set->osds[i - removed] = set->osds[i]; 1911 } 1912 set->size -= removed; 1913 if (set->size > 0) 1914 set->primary = set->osds[0]; 1915 } else { 1916 /* set down/dne devices to NONE */ 1917 for (i = set->size - 1; i >= 0; i--) { 1918 if (ceph_osd_is_down(osdmap, set->osds[i])) 1919 set->osds[i] = CRUSH_ITEM_NONE; 1920 else 1921 set->primary = set->osds[i]; 1922 } 1923 } 1924 } 1925 1926 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 1927 struct ceph_pg_pool_info *pi, 1928 u32 pps, 1929 struct ceph_osds *up) 1930 { 1931 int i; 1932 int pos = -1; 1933 1934 /* 1935 * Do we have any non-default primary_affinity values for these 1936 * osds? 1937 */ 1938 if (!osdmap->osd_primary_affinity) 1939 return; 1940 1941 for (i = 0; i < up->size; i++) { 1942 int osd = up->osds[i]; 1943 1944 if (osd != CRUSH_ITEM_NONE && 1945 osdmap->osd_primary_affinity[osd] != 1946 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 1947 break; 1948 } 1949 } 1950 if (i == up->size) 1951 return; 1952 1953 /* 1954 * Pick the primary. Feed both the seed (for the pg) and the 1955 * osd into the hash/rng so that a proportional fraction of an 1956 * osd's pgs get rejected as primary. 1957 */ 1958 for (i = 0; i < up->size; i++) { 1959 int osd = up->osds[i]; 1960 u32 aff; 1961 1962 if (osd == CRUSH_ITEM_NONE) 1963 continue; 1964 1965 aff = osdmap->osd_primary_affinity[osd]; 1966 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 1967 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 1968 pps, osd) >> 16) >= aff) { 1969 /* 1970 * We chose not to use this primary. Note it 1971 * anyway as a fallback in case we don't pick 1972 * anyone else, but keep looking. 1973 */ 1974 if (pos < 0) 1975 pos = i; 1976 } else { 1977 pos = i; 1978 break; 1979 } 1980 } 1981 if (pos < 0) 1982 return; 1983 1984 up->primary = up->osds[pos]; 1985 1986 if (ceph_can_shift_osds(pi) && pos > 0) { 1987 /* move the new primary to the front */ 1988 for (i = pos; i > 0; i--) 1989 up->osds[i] = up->osds[i - 1]; 1990 up->osds[0] = up->primary; 1991 } 1992 } 1993 1994 /* 1995 * Get pg_temp and primary_temp mappings for given PG. 1996 * 1997 * Note that a PG may have none, only pg_temp, only primary_temp or 1998 * both pg_temp and primary_temp mappings. This means @temp isn't 1999 * always a valid OSD set on return: in the "only primary_temp" case, 2000 * @temp will have its ->primary >= 0 but ->size == 0. 2001 */ 2002 static void get_temp_osds(struct ceph_osdmap *osdmap, 2003 struct ceph_pg_pool_info *pi, 2004 const struct ceph_pg *raw_pgid, 2005 struct ceph_osds *temp) 2006 { 2007 struct ceph_pg pgid; 2008 struct ceph_pg_mapping *pg; 2009 int i; 2010 2011 raw_pg_to_pg(pi, raw_pgid, &pgid); 2012 ceph_osds_init(temp); 2013 2014 /* pg_temp? */ 2015 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 2016 if (pg) { 2017 for (i = 0; i < pg->pg_temp.len; i++) { 2018 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2019 if (ceph_can_shift_osds(pi)) 2020 continue; 2021 2022 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2023 } else { 2024 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2025 } 2026 } 2027 2028 /* apply pg_temp's primary */ 2029 for (i = 0; i < temp->size; i++) { 2030 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2031 temp->primary = temp->osds[i]; 2032 break; 2033 } 2034 } 2035 } 2036 2037 /* primary_temp? */ 2038 pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid); 2039 if (pg) 2040 temp->primary = pg->primary_temp.osd; 2041 } 2042 2043 /* 2044 * Map a PG to its acting set as well as its up set. 2045 * 2046 * Acting set is used for data mapping purposes, while up set can be 2047 * recorded for detecting interval changes and deciding whether to 2048 * resend a request. 2049 */ 2050 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2051 const struct ceph_pg *raw_pgid, 2052 struct ceph_osds *up, 2053 struct ceph_osds *acting) 2054 { 2055 struct ceph_pg_pool_info *pi; 2056 u32 pps; 2057 2058 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2059 if (!pi) { 2060 ceph_osds_init(up); 2061 ceph_osds_init(acting); 2062 goto out; 2063 } 2064 2065 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2066 raw_to_up_osds(osdmap, pi, up); 2067 apply_primary_affinity(osdmap, pi, pps, up); 2068 get_temp_osds(osdmap, pi, raw_pgid, acting); 2069 if (!acting->size) { 2070 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2071 acting->size = up->size; 2072 if (acting->primary == -1) 2073 acting->primary = up->primary; 2074 } 2075 out: 2076 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2077 } 2078 2079 /* 2080 * Return acting primary for given PG, or -1 if none. 2081 */ 2082 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2083 const struct ceph_pg *raw_pgid) 2084 { 2085 struct ceph_osds up, acting; 2086 2087 ceph_pg_to_up_acting_osds(osdmap, raw_pgid, &up, &acting); 2088 return acting.primary; 2089 } 2090 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2091