1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_32_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int skip_name_map(void **p, void *end) 126 { 127 int len; 128 ceph_decode_32_safe(p, end, len ,bad); 129 while (len--) { 130 int strlen; 131 *p += sizeof(u32); 132 ceph_decode_32_safe(p, end, strlen, bad); 133 *p += strlen; 134 } 135 return 0; 136 bad: 137 return -EINVAL; 138 } 139 140 static struct crush_map *crush_decode(void *pbyval, void *end) 141 { 142 struct crush_map *c; 143 int err = -EINVAL; 144 int i, j; 145 void **p = &pbyval; 146 void *start = pbyval; 147 u32 magic; 148 u32 num_name_maps; 149 150 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 151 152 c = kzalloc(sizeof(*c), GFP_NOFS); 153 if (c == NULL) 154 return ERR_PTR(-ENOMEM); 155 156 /* set tunables to default values */ 157 c->choose_local_tries = 2; 158 c->choose_local_fallback_tries = 5; 159 c->choose_total_tries = 19; 160 c->chooseleaf_descend_once = 0; 161 162 ceph_decode_need(p, end, 4*sizeof(u32), bad); 163 magic = ceph_decode_32(p); 164 if (magic != CRUSH_MAGIC) { 165 pr_err("crush_decode magic %x != current %x\n", 166 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 167 goto bad; 168 } 169 c->max_buckets = ceph_decode_32(p); 170 c->max_rules = ceph_decode_32(p); 171 c->max_devices = ceph_decode_32(p); 172 173 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 174 if (c->buckets == NULL) 175 goto badmem; 176 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 177 if (c->rules == NULL) 178 goto badmem; 179 180 /* buckets */ 181 for (i = 0; i < c->max_buckets; i++) { 182 int size = 0; 183 u32 alg; 184 struct crush_bucket *b; 185 186 ceph_decode_32_safe(p, end, alg, bad); 187 if (alg == 0) { 188 c->buckets[i] = NULL; 189 continue; 190 } 191 dout("crush_decode bucket %d off %x %p to %p\n", 192 i, (int)(*p-start), *p, end); 193 194 switch (alg) { 195 case CRUSH_BUCKET_UNIFORM: 196 size = sizeof(struct crush_bucket_uniform); 197 break; 198 case CRUSH_BUCKET_LIST: 199 size = sizeof(struct crush_bucket_list); 200 break; 201 case CRUSH_BUCKET_TREE: 202 size = sizeof(struct crush_bucket_tree); 203 break; 204 case CRUSH_BUCKET_STRAW: 205 size = sizeof(struct crush_bucket_straw); 206 break; 207 default: 208 err = -EINVAL; 209 goto bad; 210 } 211 BUG_ON(size == 0); 212 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 213 if (b == NULL) 214 goto badmem; 215 216 ceph_decode_need(p, end, 4*sizeof(u32), bad); 217 b->id = ceph_decode_32(p); 218 b->type = ceph_decode_16(p); 219 b->alg = ceph_decode_8(p); 220 b->hash = ceph_decode_8(p); 221 b->weight = ceph_decode_32(p); 222 b->size = ceph_decode_32(p); 223 224 dout("crush_decode bucket size %d off %x %p to %p\n", 225 b->size, (int)(*p-start), *p, end); 226 227 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 228 if (b->items == NULL) 229 goto badmem; 230 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 231 if (b->perm == NULL) 232 goto badmem; 233 b->perm_n = 0; 234 235 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 236 for (j = 0; j < b->size; j++) 237 b->items[j] = ceph_decode_32(p); 238 239 switch (b->alg) { 240 case CRUSH_BUCKET_UNIFORM: 241 err = crush_decode_uniform_bucket(p, end, 242 (struct crush_bucket_uniform *)b); 243 if (err < 0) 244 goto bad; 245 break; 246 case CRUSH_BUCKET_LIST: 247 err = crush_decode_list_bucket(p, end, 248 (struct crush_bucket_list *)b); 249 if (err < 0) 250 goto bad; 251 break; 252 case CRUSH_BUCKET_TREE: 253 err = crush_decode_tree_bucket(p, end, 254 (struct crush_bucket_tree *)b); 255 if (err < 0) 256 goto bad; 257 break; 258 case CRUSH_BUCKET_STRAW: 259 err = crush_decode_straw_bucket(p, end, 260 (struct crush_bucket_straw *)b); 261 if (err < 0) 262 goto bad; 263 break; 264 } 265 } 266 267 /* rules */ 268 dout("rule vec is %p\n", c->rules); 269 for (i = 0; i < c->max_rules; i++) { 270 u32 yes; 271 struct crush_rule *r; 272 273 ceph_decode_32_safe(p, end, yes, bad); 274 if (!yes) { 275 dout("crush_decode NO rule %d off %x %p to %p\n", 276 i, (int)(*p-start), *p, end); 277 c->rules[i] = NULL; 278 continue; 279 } 280 281 dout("crush_decode rule %d off %x %p to %p\n", 282 i, (int)(*p-start), *p, end); 283 284 /* len */ 285 ceph_decode_32_safe(p, end, yes, bad); 286 #if BITS_PER_LONG == 32 287 err = -EINVAL; 288 if (yes > (ULONG_MAX - sizeof(*r)) 289 / sizeof(struct crush_rule_step)) 290 goto bad; 291 #endif 292 r = c->rules[i] = kmalloc(sizeof(*r) + 293 yes*sizeof(struct crush_rule_step), 294 GFP_NOFS); 295 if (r == NULL) 296 goto badmem; 297 dout(" rule %d is at %p\n", i, r); 298 r->len = yes; 299 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 300 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 301 for (j = 0; j < r->len; j++) { 302 r->steps[j].op = ceph_decode_32(p); 303 r->steps[j].arg1 = ceph_decode_32(p); 304 r->steps[j].arg2 = ceph_decode_32(p); 305 } 306 } 307 308 /* ignore trailing name maps. */ 309 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) { 310 err = skip_name_map(p, end); 311 if (err < 0) 312 goto done; 313 } 314 315 /* tunables */ 316 ceph_decode_need(p, end, 3*sizeof(u32), done); 317 c->choose_local_tries = ceph_decode_32(p); 318 c->choose_local_fallback_tries = ceph_decode_32(p); 319 c->choose_total_tries = ceph_decode_32(p); 320 dout("crush decode tunable choose_local_tries = %d", 321 c->choose_local_tries); 322 dout("crush decode tunable choose_local_fallback_tries = %d", 323 c->choose_local_fallback_tries); 324 dout("crush decode tunable choose_total_tries = %d", 325 c->choose_total_tries); 326 327 ceph_decode_need(p, end, sizeof(u32), done); 328 c->chooseleaf_descend_once = ceph_decode_32(p); 329 dout("crush decode tunable chooseleaf_descend_once = %d", 330 c->chooseleaf_descend_once); 331 332 done: 333 dout("crush_decode success\n"); 334 return c; 335 336 badmem: 337 err = -ENOMEM; 338 bad: 339 dout("crush_decode fail %d\n", err); 340 crush_destroy(c); 341 return ERR_PTR(err); 342 } 343 344 /* 345 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 346 * to a set of osds) 347 */ 348 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) 349 { 350 if (l.pool < r.pool) 351 return -1; 352 if (l.pool > r.pool) 353 return 1; 354 if (l.seed < r.seed) 355 return -1; 356 if (l.seed > r.seed) 357 return 1; 358 return 0; 359 } 360 361 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 362 struct rb_root *root) 363 { 364 struct rb_node **p = &root->rb_node; 365 struct rb_node *parent = NULL; 366 struct ceph_pg_mapping *pg = NULL; 367 int c; 368 369 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 370 while (*p) { 371 parent = *p; 372 pg = rb_entry(parent, struct ceph_pg_mapping, node); 373 c = pgid_cmp(new->pgid, pg->pgid); 374 if (c < 0) 375 p = &(*p)->rb_left; 376 else if (c > 0) 377 p = &(*p)->rb_right; 378 else 379 return -EEXIST; 380 } 381 382 rb_link_node(&new->node, parent, p); 383 rb_insert_color(&new->node, root); 384 return 0; 385 } 386 387 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 388 struct ceph_pg pgid) 389 { 390 struct rb_node *n = root->rb_node; 391 struct ceph_pg_mapping *pg; 392 int c; 393 394 while (n) { 395 pg = rb_entry(n, struct ceph_pg_mapping, node); 396 c = pgid_cmp(pgid, pg->pgid); 397 if (c < 0) { 398 n = n->rb_left; 399 } else if (c > 0) { 400 n = n->rb_right; 401 } else { 402 dout("__lookup_pg_mapping %lld.%x got %p\n", 403 pgid.pool, pgid.seed, pg); 404 return pg; 405 } 406 } 407 return NULL; 408 } 409 410 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 411 { 412 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 413 414 if (pg) { 415 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, 416 pg); 417 rb_erase(&pg->node, root); 418 kfree(pg); 419 return 0; 420 } 421 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); 422 return -ENOENT; 423 } 424 425 /* 426 * rbtree of pg pool info 427 */ 428 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 429 { 430 struct rb_node **p = &root->rb_node; 431 struct rb_node *parent = NULL; 432 struct ceph_pg_pool_info *pi = NULL; 433 434 while (*p) { 435 parent = *p; 436 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 437 if (new->id < pi->id) 438 p = &(*p)->rb_left; 439 else if (new->id > pi->id) 440 p = &(*p)->rb_right; 441 else 442 return -EEXIST; 443 } 444 445 rb_link_node(&new->node, parent, p); 446 rb_insert_color(&new->node, root); 447 return 0; 448 } 449 450 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 451 { 452 struct ceph_pg_pool_info *pi; 453 struct rb_node *n = root->rb_node; 454 455 while (n) { 456 pi = rb_entry(n, struct ceph_pg_pool_info, node); 457 if (id < pi->id) 458 n = n->rb_left; 459 else if (id > pi->id) 460 n = n->rb_right; 461 else 462 return pi; 463 } 464 return NULL; 465 } 466 467 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 468 { 469 return __lookup_pg_pool(&map->pg_pools, id); 470 } 471 472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 473 { 474 struct ceph_pg_pool_info *pi; 475 476 if (id == CEPH_NOPOOL) 477 return NULL; 478 479 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 480 return NULL; 481 482 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 483 484 return pi ? pi->name : NULL; 485 } 486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 487 488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 489 { 490 struct rb_node *rbp; 491 492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 493 struct ceph_pg_pool_info *pi = 494 rb_entry(rbp, struct ceph_pg_pool_info, node); 495 if (pi->name && strcmp(pi->name, name) == 0) 496 return pi->id; 497 } 498 return -ENOENT; 499 } 500 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 501 502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 503 { 504 rb_erase(&pi->node, root); 505 kfree(pi->name); 506 kfree(pi); 507 } 508 509 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 510 { 511 u8 ev, cv; 512 unsigned len, num; 513 void *pool_end; 514 515 ceph_decode_need(p, end, 2 + 4, bad); 516 ev = ceph_decode_8(p); /* encoding version */ 517 cv = ceph_decode_8(p); /* compat version */ 518 if (ev < 5) { 519 pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 520 return -EINVAL; 521 } 522 if (cv > 9) { 523 pr_warning("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 524 return -EINVAL; 525 } 526 len = ceph_decode_32(p); 527 ceph_decode_need(p, end, len, bad); 528 pool_end = *p + len; 529 530 pi->type = ceph_decode_8(p); 531 pi->size = ceph_decode_8(p); 532 pi->crush_ruleset = ceph_decode_8(p); 533 pi->object_hash = ceph_decode_8(p); 534 535 pi->pg_num = ceph_decode_32(p); 536 pi->pgp_num = ceph_decode_32(p); 537 538 *p += 4 + 4; /* skip lpg* */ 539 *p += 4; /* skip last_change */ 540 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 541 542 /* skip snaps */ 543 num = ceph_decode_32(p); 544 while (num--) { 545 *p += 8; /* snapid key */ 546 *p += 1 + 1; /* versions */ 547 len = ceph_decode_32(p); 548 *p += len; 549 } 550 551 /* skip removed_snaps */ 552 num = ceph_decode_32(p); 553 *p += num * (8 + 8); 554 555 *p += 8; /* skip auid */ 556 pi->flags = ceph_decode_64(p); 557 *p += 4; /* skip crash_replay_interval */ 558 559 if (ev >= 7) 560 *p += 1; /* skip min_size */ 561 562 if (ev >= 8) 563 *p += 8 + 8; /* skip quota_max_* */ 564 565 if (ev >= 9) { 566 /* skip tiers */ 567 num = ceph_decode_32(p); 568 *p += num * 8; 569 570 *p += 8; /* skip tier_of */ 571 *p += 1; /* skip cache_mode */ 572 573 pi->read_tier = ceph_decode_64(p); 574 pi->write_tier = ceph_decode_64(p); 575 } else { 576 pi->read_tier = -1; 577 pi->write_tier = -1; 578 } 579 580 /* ignore the rest */ 581 582 *p = pool_end; 583 calc_pg_masks(pi); 584 return 0; 585 586 bad: 587 return -EINVAL; 588 } 589 590 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 591 { 592 struct ceph_pg_pool_info *pi; 593 u32 num, len; 594 u64 pool; 595 596 ceph_decode_32_safe(p, end, num, bad); 597 dout(" %d pool names\n", num); 598 while (num--) { 599 ceph_decode_64_safe(p, end, pool, bad); 600 ceph_decode_32_safe(p, end, len, bad); 601 dout(" pool %llu len %d\n", pool, len); 602 ceph_decode_need(p, end, len, bad); 603 pi = __lookup_pg_pool(&map->pg_pools, pool); 604 if (pi) { 605 char *name = kstrndup(*p, len, GFP_NOFS); 606 607 if (!name) 608 return -ENOMEM; 609 kfree(pi->name); 610 pi->name = name; 611 dout(" name is %s\n", pi->name); 612 } 613 *p += len; 614 } 615 return 0; 616 617 bad: 618 return -EINVAL; 619 } 620 621 /* 622 * osd map 623 */ 624 void ceph_osdmap_destroy(struct ceph_osdmap *map) 625 { 626 dout("osdmap_destroy %p\n", map); 627 if (map->crush) 628 crush_destroy(map->crush); 629 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 630 struct ceph_pg_mapping *pg = 631 rb_entry(rb_first(&map->pg_temp), 632 struct ceph_pg_mapping, node); 633 rb_erase(&pg->node, &map->pg_temp); 634 kfree(pg); 635 } 636 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 637 struct ceph_pg_pool_info *pi = 638 rb_entry(rb_first(&map->pg_pools), 639 struct ceph_pg_pool_info, node); 640 __remove_pg_pool(&map->pg_pools, pi); 641 } 642 kfree(map->osd_state); 643 kfree(map->osd_weight); 644 kfree(map->osd_addr); 645 kfree(map); 646 } 647 648 /* 649 * adjust max osd value. reallocate arrays. 650 */ 651 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 652 { 653 u8 *state; 654 struct ceph_entity_addr *addr; 655 u32 *weight; 656 657 state = kcalloc(max, sizeof(*state), GFP_NOFS); 658 addr = kcalloc(max, sizeof(*addr), GFP_NOFS); 659 weight = kcalloc(max, sizeof(*weight), GFP_NOFS); 660 if (state == NULL || addr == NULL || weight == NULL) { 661 kfree(state); 662 kfree(addr); 663 kfree(weight); 664 return -ENOMEM; 665 } 666 667 /* copy old? */ 668 if (map->osd_state) { 669 memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); 670 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); 671 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); 672 kfree(map->osd_state); 673 kfree(map->osd_addr); 674 kfree(map->osd_weight); 675 } 676 677 map->osd_state = state; 678 map->osd_weight = weight; 679 map->osd_addr = addr; 680 map->max_osd = max; 681 return 0; 682 } 683 684 /* 685 * decode a full map. 686 */ 687 struct ceph_osdmap *osdmap_decode(void **p, void *end) 688 { 689 struct ceph_osdmap *map; 690 u16 version; 691 u32 len, max, i; 692 int err = -EINVAL; 693 void *start = *p; 694 struct ceph_pg_pool_info *pi; 695 696 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 697 698 map = kzalloc(sizeof(*map), GFP_NOFS); 699 if (map == NULL) 700 return ERR_PTR(-ENOMEM); 701 map->pg_temp = RB_ROOT; 702 703 ceph_decode_16_safe(p, end, version, bad); 704 if (version > 6) { 705 pr_warning("got unknown v %d > 6 of osdmap\n", version); 706 goto bad; 707 } 708 if (version < 6) { 709 pr_warning("got old v %d < 6 of osdmap\n", version); 710 goto bad; 711 } 712 713 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); 714 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 715 map->epoch = ceph_decode_32(p); 716 ceph_decode_copy(p, &map->created, sizeof(map->created)); 717 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 718 719 ceph_decode_32_safe(p, end, max, bad); 720 while (max--) { 721 ceph_decode_need(p, end, 8 + 2, bad); 722 err = -ENOMEM; 723 pi = kzalloc(sizeof(*pi), GFP_NOFS); 724 if (!pi) 725 goto bad; 726 pi->id = ceph_decode_64(p); 727 err = __decode_pool(p, end, pi); 728 if (err < 0) { 729 kfree(pi); 730 goto bad; 731 } 732 __insert_pg_pool(&map->pg_pools, pi); 733 } 734 735 err = __decode_pool_names(p, end, map); 736 if (err < 0) { 737 dout("fail to decode pool names"); 738 goto bad; 739 } 740 741 ceph_decode_32_safe(p, end, map->pool_max, bad); 742 743 ceph_decode_32_safe(p, end, map->flags, bad); 744 745 max = ceph_decode_32(p); 746 747 /* (re)alloc osd arrays */ 748 err = osdmap_set_max_osd(map, max); 749 if (err < 0) 750 goto bad; 751 dout("osdmap_decode max_osd = %d\n", map->max_osd); 752 753 /* osds */ 754 err = -EINVAL; 755 ceph_decode_need(p, end, 3*sizeof(u32) + 756 map->max_osd*(1 + sizeof(*map->osd_weight) + 757 sizeof(*map->osd_addr)), bad); 758 *p += 4; /* skip length field (should match max) */ 759 ceph_decode_copy(p, map->osd_state, map->max_osd); 760 761 *p += 4; /* skip length field (should match max) */ 762 for (i = 0; i < map->max_osd; i++) 763 map->osd_weight[i] = ceph_decode_32(p); 764 765 *p += 4; /* skip length field (should match max) */ 766 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 767 for (i = 0; i < map->max_osd; i++) 768 ceph_decode_addr(&map->osd_addr[i]); 769 770 /* pg_temp */ 771 ceph_decode_32_safe(p, end, len, bad); 772 for (i = 0; i < len; i++) { 773 int n, j; 774 struct ceph_pg pgid; 775 struct ceph_pg_mapping *pg; 776 777 err = ceph_decode_pgid(p, end, &pgid); 778 if (err) 779 goto bad; 780 ceph_decode_need(p, end, sizeof(u32), bad); 781 n = ceph_decode_32(p); 782 err = -EINVAL; 783 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 784 goto bad; 785 ceph_decode_need(p, end, n * sizeof(u32), bad); 786 err = -ENOMEM; 787 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 788 if (!pg) 789 goto bad; 790 pg->pgid = pgid; 791 pg->len = n; 792 for (j = 0; j < n; j++) 793 pg->osds[j] = ceph_decode_32(p); 794 795 err = __insert_pg_mapping(pg, &map->pg_temp); 796 if (err) 797 goto bad; 798 dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed, 799 len); 800 } 801 802 /* crush */ 803 ceph_decode_32_safe(p, end, len, bad); 804 dout("osdmap_decode crush len %d from off 0x%x\n", len, 805 (int)(*p - start)); 806 ceph_decode_need(p, end, len, bad); 807 map->crush = crush_decode(*p, end); 808 *p += len; 809 if (IS_ERR(map->crush)) { 810 err = PTR_ERR(map->crush); 811 map->crush = NULL; 812 goto bad; 813 } 814 815 /* ignore the rest of the map */ 816 *p = end; 817 818 dout("osdmap_decode done %p %p\n", *p, end); 819 return map; 820 821 bad: 822 dout("osdmap_decode fail err %d\n", err); 823 ceph_osdmap_destroy(map); 824 return ERR_PTR(err); 825 } 826 827 /* 828 * decode and apply an incremental map update. 829 */ 830 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 831 struct ceph_osdmap *map, 832 struct ceph_messenger *msgr) 833 { 834 struct crush_map *newcrush = NULL; 835 struct ceph_fsid fsid; 836 u32 epoch = 0; 837 struct ceph_timespec modified; 838 s32 len; 839 u64 pool; 840 __s64 new_pool_max; 841 __s32 new_flags, max; 842 void *start = *p; 843 int err = -EINVAL; 844 u16 version; 845 846 ceph_decode_16_safe(p, end, version, bad); 847 if (version != 6) { 848 pr_warning("got unknown v %d != 6 of inc osdmap\n", version); 849 goto bad; 850 } 851 852 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), 853 bad); 854 ceph_decode_copy(p, &fsid, sizeof(fsid)); 855 epoch = ceph_decode_32(p); 856 BUG_ON(epoch != map->epoch+1); 857 ceph_decode_copy(p, &modified, sizeof(modified)); 858 new_pool_max = ceph_decode_64(p); 859 new_flags = ceph_decode_32(p); 860 861 /* full map? */ 862 ceph_decode_32_safe(p, end, len, bad); 863 if (len > 0) { 864 dout("apply_incremental full map len %d, %p to %p\n", 865 len, *p, end); 866 return osdmap_decode(p, min(*p+len, end)); 867 } 868 869 /* new crush? */ 870 ceph_decode_32_safe(p, end, len, bad); 871 if (len > 0) { 872 dout("apply_incremental new crush map len %d, %p to %p\n", 873 len, *p, end); 874 newcrush = crush_decode(*p, min(*p+len, end)); 875 if (IS_ERR(newcrush)) 876 return ERR_CAST(newcrush); 877 *p += len; 878 } 879 880 /* new flags? */ 881 if (new_flags >= 0) 882 map->flags = new_flags; 883 if (new_pool_max >= 0) 884 map->pool_max = new_pool_max; 885 886 ceph_decode_need(p, end, 5*sizeof(u32), bad); 887 888 /* new max? */ 889 max = ceph_decode_32(p); 890 if (max >= 0) { 891 err = osdmap_set_max_osd(map, max); 892 if (err < 0) 893 goto bad; 894 } 895 896 map->epoch++; 897 map->modified = modified; 898 if (newcrush) { 899 if (map->crush) 900 crush_destroy(map->crush); 901 map->crush = newcrush; 902 newcrush = NULL; 903 } 904 905 /* new_pool */ 906 ceph_decode_32_safe(p, end, len, bad); 907 while (len--) { 908 struct ceph_pg_pool_info *pi; 909 910 ceph_decode_64_safe(p, end, pool, bad); 911 pi = __lookup_pg_pool(&map->pg_pools, pool); 912 if (!pi) { 913 pi = kzalloc(sizeof(*pi), GFP_NOFS); 914 if (!pi) { 915 err = -ENOMEM; 916 goto bad; 917 } 918 pi->id = pool; 919 __insert_pg_pool(&map->pg_pools, pi); 920 } 921 err = __decode_pool(p, end, pi); 922 if (err < 0) 923 goto bad; 924 } 925 if (version >= 5) { 926 err = __decode_pool_names(p, end, map); 927 if (err < 0) 928 goto bad; 929 } 930 931 /* old_pool */ 932 ceph_decode_32_safe(p, end, len, bad); 933 while (len--) { 934 struct ceph_pg_pool_info *pi; 935 936 ceph_decode_64_safe(p, end, pool, bad); 937 pi = __lookup_pg_pool(&map->pg_pools, pool); 938 if (pi) 939 __remove_pg_pool(&map->pg_pools, pi); 940 } 941 942 /* new_up */ 943 err = -EINVAL; 944 ceph_decode_32_safe(p, end, len, bad); 945 while (len--) { 946 u32 osd; 947 struct ceph_entity_addr addr; 948 ceph_decode_32_safe(p, end, osd, bad); 949 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); 950 ceph_decode_addr(&addr); 951 pr_info("osd%d up\n", osd); 952 BUG_ON(osd >= map->max_osd); 953 map->osd_state[osd] |= CEPH_OSD_UP; 954 map->osd_addr[osd] = addr; 955 } 956 957 /* new_state */ 958 ceph_decode_32_safe(p, end, len, bad); 959 while (len--) { 960 u32 osd; 961 u8 xorstate; 962 ceph_decode_32_safe(p, end, osd, bad); 963 xorstate = **(u8 **)p; 964 (*p)++; /* clean flag */ 965 if (xorstate == 0) 966 xorstate = CEPH_OSD_UP; 967 if (xorstate & CEPH_OSD_UP) 968 pr_info("osd%d down\n", osd); 969 if (osd < map->max_osd) 970 map->osd_state[osd] ^= xorstate; 971 } 972 973 /* new_weight */ 974 ceph_decode_32_safe(p, end, len, bad); 975 while (len--) { 976 u32 osd, off; 977 ceph_decode_need(p, end, sizeof(u32)*2, bad); 978 osd = ceph_decode_32(p); 979 off = ceph_decode_32(p); 980 pr_info("osd%d weight 0x%x %s\n", osd, off, 981 off == CEPH_OSD_IN ? "(in)" : 982 (off == CEPH_OSD_OUT ? "(out)" : "")); 983 if (osd < map->max_osd) 984 map->osd_weight[osd] = off; 985 } 986 987 /* new_pg_temp */ 988 ceph_decode_32_safe(p, end, len, bad); 989 while (len--) { 990 struct ceph_pg_mapping *pg; 991 int j; 992 struct ceph_pg pgid; 993 u32 pglen; 994 995 err = ceph_decode_pgid(p, end, &pgid); 996 if (err) 997 goto bad; 998 ceph_decode_need(p, end, sizeof(u32), bad); 999 pglen = ceph_decode_32(p); 1000 if (pglen) { 1001 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 1002 1003 /* removing existing (if any) */ 1004 (void) __remove_pg_mapping(&map->pg_temp, pgid); 1005 1006 /* insert */ 1007 err = -EINVAL; 1008 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 1009 goto bad; 1010 err = -ENOMEM; 1011 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 1012 if (!pg) 1013 goto bad; 1014 pg->pgid = pgid; 1015 pg->len = pglen; 1016 for (j = 0; j < pglen; j++) 1017 pg->osds[j] = ceph_decode_32(p); 1018 err = __insert_pg_mapping(pg, &map->pg_temp); 1019 if (err) { 1020 kfree(pg); 1021 goto bad; 1022 } 1023 dout(" added pg_temp %lld.%x len %d\n", pgid.pool, 1024 pgid.seed, pglen); 1025 } else { 1026 /* remove */ 1027 __remove_pg_mapping(&map->pg_temp, pgid); 1028 } 1029 } 1030 1031 /* ignore the rest */ 1032 *p = end; 1033 return map; 1034 1035 bad: 1036 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", 1037 epoch, (int)(*p - start), *p, start, end); 1038 print_hex_dump(KERN_DEBUG, "osdmap: ", 1039 DUMP_PREFIX_OFFSET, 16, 1, 1040 start, end - start, true); 1041 if (newcrush) 1042 crush_destroy(newcrush); 1043 return ERR_PTR(err); 1044 } 1045 1046 1047 1048 1049 /* 1050 * calculate file layout from given offset, length. 1051 * fill in correct oid, logical length, and object extent 1052 * offset, length. 1053 * 1054 * for now, we write only a single su, until we can 1055 * pass a stride back to the caller. 1056 */ 1057 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 1058 u64 off, u64 len, 1059 u64 *ono, 1060 u64 *oxoff, u64 *oxlen) 1061 { 1062 u32 osize = le32_to_cpu(layout->fl_object_size); 1063 u32 su = le32_to_cpu(layout->fl_stripe_unit); 1064 u32 sc = le32_to_cpu(layout->fl_stripe_count); 1065 u32 bl, stripeno, stripepos, objsetno; 1066 u32 su_per_object; 1067 u64 t, su_offset; 1068 1069 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 1070 osize, su); 1071 if (su == 0 || sc == 0) 1072 goto invalid; 1073 su_per_object = osize / su; 1074 if (su_per_object == 0) 1075 goto invalid; 1076 dout("osize %u / su %u = su_per_object %u\n", osize, su, 1077 su_per_object); 1078 1079 if ((su & ~PAGE_MASK) != 0) 1080 goto invalid; 1081 1082 /* bl = *off / su; */ 1083 t = off; 1084 do_div(t, su); 1085 bl = t; 1086 dout("off %llu / su %u = bl %u\n", off, su, bl); 1087 1088 stripeno = bl / sc; 1089 stripepos = bl % sc; 1090 objsetno = stripeno / su_per_object; 1091 1092 *ono = objsetno * sc + stripepos; 1093 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 1094 1095 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 1096 t = off; 1097 su_offset = do_div(t, su); 1098 *oxoff = su_offset + (stripeno % su_per_object) * su; 1099 1100 /* 1101 * Calculate the length of the extent being written to the selected 1102 * object. This is the minimum of the full length requested (len) or 1103 * the remainder of the current stripe being written to. 1104 */ 1105 *oxlen = min_t(u64, len, su - su_offset); 1106 1107 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 1108 return 0; 1109 1110 invalid: 1111 dout(" invalid layout\n"); 1112 *ono = 0; 1113 *oxoff = 0; 1114 *oxlen = 0; 1115 return -EINVAL; 1116 } 1117 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 1118 1119 /* 1120 * Calculate mapping of a (oloc, oid) pair to a PG. Should only be 1121 * called with target's (oloc, oid), since tiering isn't taken into 1122 * account. 1123 */ 1124 int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap, 1125 struct ceph_object_locator *oloc, 1126 struct ceph_object_id *oid, 1127 struct ceph_pg *pg_out) 1128 { 1129 struct ceph_pg_pool_info *pi; 1130 1131 pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool); 1132 if (!pi) 1133 return -EIO; 1134 1135 pg_out->pool = oloc->pool; 1136 pg_out->seed = ceph_str_hash(pi->object_hash, oid->name, 1137 oid->name_len); 1138 1139 dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name, 1140 pg_out->pool, pg_out->seed); 1141 return 0; 1142 } 1143 EXPORT_SYMBOL(ceph_oloc_oid_to_pg); 1144 1145 static int crush_do_rule_ary(const struct crush_map *map, int ruleno, int x, 1146 int *result, int result_max, 1147 const __u32 *weight, int weight_max) 1148 { 1149 int scratch[result_max * 3]; 1150 1151 return crush_do_rule(map, ruleno, x, result, result_max, 1152 weight, weight_max, scratch); 1153 } 1154 1155 /* 1156 * Calculate raw osd vector for the given pgid. Return pointer to osd 1157 * array, or NULL on failure. 1158 */ 1159 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1160 int *osds, int *num) 1161 { 1162 struct ceph_pg_mapping *pg; 1163 struct ceph_pg_pool_info *pool; 1164 int ruleno; 1165 int r; 1166 u32 pps; 1167 1168 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool); 1169 if (!pool) 1170 return NULL; 1171 1172 /* pg_temp? */ 1173 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, 1174 pool->pg_num_mask); 1175 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1176 if (pg) { 1177 *num = pg->len; 1178 return pg->osds; 1179 } 1180 1181 /* crush */ 1182 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset, 1183 pool->type, pool->size); 1184 if (ruleno < 0) { 1185 pr_err("no crush rule pool %lld ruleset %d type %d size %d\n", 1186 pgid.pool, pool->crush_ruleset, pool->type, 1187 pool->size); 1188 return NULL; 1189 } 1190 1191 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 1192 /* hash pool id and seed sothat pool PGs do not overlap */ 1193 pps = crush_hash32_2(CRUSH_HASH_RJENKINS1, 1194 ceph_stable_mod(pgid.seed, pool->pgp_num, 1195 pool->pgp_num_mask), 1196 pgid.pool); 1197 } else { 1198 /* 1199 * legacy ehavior: add ps and pool together. this is 1200 * not a great approach because the PGs from each pool 1201 * will overlap on top of each other: 0.5 == 1.4 == 1202 * 2.3 == ... 1203 */ 1204 pps = ceph_stable_mod(pgid.seed, pool->pgp_num, 1205 pool->pgp_num_mask) + 1206 (unsigned)pgid.pool; 1207 } 1208 r = crush_do_rule_ary(osdmap->crush, ruleno, pps, 1209 osds, min_t(int, pool->size, *num), 1210 osdmap->osd_weight, osdmap->max_osd); 1211 if (r < 0) { 1212 pr_err("error %d from crush rule: pool %lld ruleset %d type %d" 1213 " size %d\n", r, pgid.pool, pool->crush_ruleset, 1214 pool->type, pool->size); 1215 return NULL; 1216 } 1217 *num = r; 1218 return osds; 1219 } 1220 1221 /* 1222 * Return acting set for given pgid. 1223 */ 1224 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1225 int *acting) 1226 { 1227 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1228 int i, o, num = CEPH_PG_MAX_SIZE; 1229 1230 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1231 if (!osds) 1232 return -1; 1233 1234 /* primary is first up osd */ 1235 o = 0; 1236 for (i = 0; i < num; i++) 1237 if (ceph_osd_is_up(osdmap, osds[i])) 1238 acting[o++] = osds[i]; 1239 return o; 1240 } 1241 1242 /* 1243 * Return primary osd for given pgid, or -1 if none. 1244 */ 1245 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) 1246 { 1247 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1248 int i, num = CEPH_PG_MAX_SIZE; 1249 1250 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1251 if (!osds) 1252 return -1; 1253 1254 /* primary is first up osd */ 1255 for (i = 0; i < num; i++) 1256 if (ceph_osd_is_up(osdmap, osds[i])) 1257 return osds[i]; 1258 return -1; 1259 } 1260 EXPORT_SYMBOL(ceph_calc_pg_primary); 1261