1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, u32 state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static struct crush_choose_arg_map *alloc_choose_arg_map(void) 142 { 143 struct crush_choose_arg_map *arg_map; 144 145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); 146 if (!arg_map) 147 return NULL; 148 149 RB_CLEAR_NODE(&arg_map->node); 150 return arg_map; 151 } 152 153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) 154 { 155 if (arg_map) { 156 int i, j; 157 158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); 159 160 for (i = 0; i < arg_map->size; i++) { 161 struct crush_choose_arg *arg = &arg_map->args[i]; 162 163 for (j = 0; j < arg->weight_set_size; j++) 164 kfree(arg->weight_set[j].weights); 165 kfree(arg->weight_set); 166 kfree(arg->ids); 167 } 168 kfree(arg_map->args); 169 kfree(arg_map); 170 } 171 } 172 173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, 174 node); 175 176 void clear_choose_args(struct crush_map *c) 177 { 178 while (!RB_EMPTY_ROOT(&c->choose_args)) { 179 struct crush_choose_arg_map *arg_map = 180 rb_entry(rb_first(&c->choose_args), 181 struct crush_choose_arg_map, node); 182 183 erase_choose_arg_map(&c->choose_args, arg_map); 184 free_choose_arg_map(arg_map); 185 } 186 } 187 188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) 189 { 190 u32 *a = NULL; 191 u32 len; 192 int ret; 193 194 ceph_decode_32_safe(p, end, len, e_inval); 195 if (len) { 196 u32 i; 197 198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO); 199 if (!a) { 200 ret = -ENOMEM; 201 goto fail; 202 } 203 204 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 205 for (i = 0; i < len; i++) 206 a[i] = ceph_decode_32(p); 207 } 208 209 *plen = len; 210 return a; 211 212 e_inval: 213 ret = -EINVAL; 214 fail: 215 kfree(a); 216 return ERR_PTR(ret); 217 } 218 219 /* 220 * Assumes @arg is zero-initialized. 221 */ 222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) 223 { 224 int ret; 225 226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); 227 if (arg->weight_set_size) { 228 u32 i; 229 230 arg->weight_set = kmalloc_array(arg->weight_set_size, 231 sizeof(*arg->weight_set), 232 GFP_NOIO); 233 if (!arg->weight_set) 234 return -ENOMEM; 235 236 for (i = 0; i < arg->weight_set_size; i++) { 237 struct crush_weight_set *w = &arg->weight_set[i]; 238 239 w->weights = decode_array_32_alloc(p, end, &w->size); 240 if (IS_ERR(w->weights)) { 241 ret = PTR_ERR(w->weights); 242 w->weights = NULL; 243 return ret; 244 } 245 } 246 } 247 248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); 249 if (IS_ERR(arg->ids)) { 250 ret = PTR_ERR(arg->ids); 251 arg->ids = NULL; 252 return ret; 253 } 254 255 return 0; 256 257 e_inval: 258 return -EINVAL; 259 } 260 261 static int decode_choose_args(void **p, void *end, struct crush_map *c) 262 { 263 struct crush_choose_arg_map *arg_map = NULL; 264 u32 num_choose_arg_maps, num_buckets; 265 int ret; 266 267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); 268 while (num_choose_arg_maps--) { 269 arg_map = alloc_choose_arg_map(); 270 if (!arg_map) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 ceph_decode_64_safe(p, end, arg_map->choose_args_index, 276 e_inval); 277 arg_map->size = c->max_buckets; 278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), 279 GFP_NOIO); 280 if (!arg_map->args) { 281 ret = -ENOMEM; 282 goto fail; 283 } 284 285 ceph_decode_32_safe(p, end, num_buckets, e_inval); 286 while (num_buckets--) { 287 struct crush_choose_arg *arg; 288 u32 bucket_index; 289 290 ceph_decode_32_safe(p, end, bucket_index, e_inval); 291 if (bucket_index >= arg_map->size) 292 goto e_inval; 293 294 arg = &arg_map->args[bucket_index]; 295 ret = decode_choose_arg(p, end, arg); 296 if (ret) 297 goto fail; 298 299 if (arg->ids_size && 300 arg->ids_size != c->buckets[bucket_index]->size) 301 goto e_inval; 302 } 303 304 insert_choose_arg_map(&c->choose_args, arg_map); 305 } 306 307 return 0; 308 309 e_inval: 310 ret = -EINVAL; 311 fail: 312 free_choose_arg_map(arg_map); 313 return ret; 314 } 315 316 static void crush_finalize(struct crush_map *c) 317 { 318 __s32 b; 319 320 /* Space for the array of pointers to per-bucket workspace */ 321 c->working_size = sizeof(struct crush_work) + 322 c->max_buckets * sizeof(struct crush_work_bucket *); 323 324 for (b = 0; b < c->max_buckets; b++) { 325 if (!c->buckets[b]) 326 continue; 327 328 switch (c->buckets[b]->alg) { 329 default: 330 /* 331 * The base case, permutation variables and 332 * the pointer to the permutation array. 333 */ 334 c->working_size += sizeof(struct crush_work_bucket); 335 break; 336 } 337 /* Every bucket has a permutation array. */ 338 c->working_size += c->buckets[b]->size * sizeof(__u32); 339 } 340 } 341 342 static struct crush_map *crush_decode(void *pbyval, void *end) 343 { 344 struct crush_map *c; 345 int err; 346 int i, j; 347 void **p = &pbyval; 348 void *start = pbyval; 349 u32 magic; 350 351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 352 353 c = kzalloc(sizeof(*c), GFP_NOFS); 354 if (c == NULL) 355 return ERR_PTR(-ENOMEM); 356 357 c->choose_args = RB_ROOT; 358 359 /* set tunables to default values */ 360 c->choose_local_tries = 2; 361 c->choose_local_fallback_tries = 5; 362 c->choose_total_tries = 19; 363 c->chooseleaf_descend_once = 0; 364 365 ceph_decode_need(p, end, 4*sizeof(u32), bad); 366 magic = ceph_decode_32(p); 367 if (magic != CRUSH_MAGIC) { 368 pr_err("crush_decode magic %x != current %x\n", 369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 370 goto bad; 371 } 372 c->max_buckets = ceph_decode_32(p); 373 c->max_rules = ceph_decode_32(p); 374 c->max_devices = ceph_decode_32(p); 375 376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 377 if (c->buckets == NULL) 378 goto badmem; 379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 380 if (c->rules == NULL) 381 goto badmem; 382 383 /* buckets */ 384 for (i = 0; i < c->max_buckets; i++) { 385 int size = 0; 386 u32 alg; 387 struct crush_bucket *b; 388 389 ceph_decode_32_safe(p, end, alg, bad); 390 if (alg == 0) { 391 c->buckets[i] = NULL; 392 continue; 393 } 394 dout("crush_decode bucket %d off %x %p to %p\n", 395 i, (int)(*p-start), *p, end); 396 397 switch (alg) { 398 case CRUSH_BUCKET_UNIFORM: 399 size = sizeof(struct crush_bucket_uniform); 400 break; 401 case CRUSH_BUCKET_LIST: 402 size = sizeof(struct crush_bucket_list); 403 break; 404 case CRUSH_BUCKET_TREE: 405 size = sizeof(struct crush_bucket_tree); 406 break; 407 case CRUSH_BUCKET_STRAW: 408 size = sizeof(struct crush_bucket_straw); 409 break; 410 case CRUSH_BUCKET_STRAW2: 411 size = sizeof(struct crush_bucket_straw2); 412 break; 413 default: 414 goto bad; 415 } 416 BUG_ON(size == 0); 417 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 418 if (b == NULL) 419 goto badmem; 420 421 ceph_decode_need(p, end, 4*sizeof(u32), bad); 422 b->id = ceph_decode_32(p); 423 b->type = ceph_decode_16(p); 424 b->alg = ceph_decode_8(p); 425 b->hash = ceph_decode_8(p); 426 b->weight = ceph_decode_32(p); 427 b->size = ceph_decode_32(p); 428 429 dout("crush_decode bucket size %d off %x %p to %p\n", 430 b->size, (int)(*p-start), *p, end); 431 432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 433 if (b->items == NULL) 434 goto badmem; 435 436 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 437 for (j = 0; j < b->size; j++) 438 b->items[j] = ceph_decode_32(p); 439 440 switch (b->alg) { 441 case CRUSH_BUCKET_UNIFORM: 442 err = crush_decode_uniform_bucket(p, end, 443 (struct crush_bucket_uniform *)b); 444 if (err < 0) 445 goto fail; 446 break; 447 case CRUSH_BUCKET_LIST: 448 err = crush_decode_list_bucket(p, end, 449 (struct crush_bucket_list *)b); 450 if (err < 0) 451 goto fail; 452 break; 453 case CRUSH_BUCKET_TREE: 454 err = crush_decode_tree_bucket(p, end, 455 (struct crush_bucket_tree *)b); 456 if (err < 0) 457 goto fail; 458 break; 459 case CRUSH_BUCKET_STRAW: 460 err = crush_decode_straw_bucket(p, end, 461 (struct crush_bucket_straw *)b); 462 if (err < 0) 463 goto fail; 464 break; 465 case CRUSH_BUCKET_STRAW2: 466 err = crush_decode_straw2_bucket(p, end, 467 (struct crush_bucket_straw2 *)b); 468 if (err < 0) 469 goto fail; 470 break; 471 } 472 } 473 474 /* rules */ 475 dout("rule vec is %p\n", c->rules); 476 for (i = 0; i < c->max_rules; i++) { 477 u32 yes; 478 struct crush_rule *r; 479 480 ceph_decode_32_safe(p, end, yes, bad); 481 if (!yes) { 482 dout("crush_decode NO rule %d off %x %p to %p\n", 483 i, (int)(*p-start), *p, end); 484 c->rules[i] = NULL; 485 continue; 486 } 487 488 dout("crush_decode rule %d off %x %p to %p\n", 489 i, (int)(*p-start), *p, end); 490 491 /* len */ 492 ceph_decode_32_safe(p, end, yes, bad); 493 #if BITS_PER_LONG == 32 494 if (yes > (ULONG_MAX - sizeof(*r)) 495 / sizeof(struct crush_rule_step)) 496 goto bad; 497 #endif 498 r = kmalloc(struct_size(r, steps, yes), GFP_NOFS); 499 c->rules[i] = r; 500 if (r == NULL) 501 goto badmem; 502 dout(" rule %d is at %p\n", i, r); 503 r->len = yes; 504 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 505 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 506 for (j = 0; j < r->len; j++) { 507 r->steps[j].op = ceph_decode_32(p); 508 r->steps[j].arg1 = ceph_decode_32(p); 509 r->steps[j].arg2 = ceph_decode_32(p); 510 } 511 } 512 513 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 514 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 515 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 516 517 /* tunables */ 518 ceph_decode_need(p, end, 3*sizeof(u32), done); 519 c->choose_local_tries = ceph_decode_32(p); 520 c->choose_local_fallback_tries = ceph_decode_32(p); 521 c->choose_total_tries = ceph_decode_32(p); 522 dout("crush decode tunable choose_local_tries = %d\n", 523 c->choose_local_tries); 524 dout("crush decode tunable choose_local_fallback_tries = %d\n", 525 c->choose_local_fallback_tries); 526 dout("crush decode tunable choose_total_tries = %d\n", 527 c->choose_total_tries); 528 529 ceph_decode_need(p, end, sizeof(u32), done); 530 c->chooseleaf_descend_once = ceph_decode_32(p); 531 dout("crush decode tunable chooseleaf_descend_once = %d\n", 532 c->chooseleaf_descend_once); 533 534 ceph_decode_need(p, end, sizeof(u8), done); 535 c->chooseleaf_vary_r = ceph_decode_8(p); 536 dout("crush decode tunable chooseleaf_vary_r = %d\n", 537 c->chooseleaf_vary_r); 538 539 /* skip straw_calc_version, allowed_bucket_algs */ 540 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 541 *p += sizeof(u8) + sizeof(u32); 542 543 ceph_decode_need(p, end, sizeof(u8), done); 544 c->chooseleaf_stable = ceph_decode_8(p); 545 dout("crush decode tunable chooseleaf_stable = %d\n", 546 c->chooseleaf_stable); 547 548 if (*p != end) { 549 /* class_map */ 550 ceph_decode_skip_map(p, end, 32, 32, bad); 551 /* class_name */ 552 ceph_decode_skip_map(p, end, 32, string, bad); 553 /* class_bucket */ 554 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); 555 } 556 557 if (*p != end) { 558 err = decode_choose_args(p, end, c); 559 if (err) 560 goto fail; 561 } 562 563 done: 564 crush_finalize(c); 565 dout("crush_decode success\n"); 566 return c; 567 568 badmem: 569 err = -ENOMEM; 570 fail: 571 dout("crush_decode fail %d\n", err); 572 crush_destroy(c); 573 return ERR_PTR(err); 574 575 bad: 576 err = -EINVAL; 577 goto fail; 578 } 579 580 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 581 { 582 if (lhs->pool < rhs->pool) 583 return -1; 584 if (lhs->pool > rhs->pool) 585 return 1; 586 if (lhs->seed < rhs->seed) 587 return -1; 588 if (lhs->seed > rhs->seed) 589 return 1; 590 591 return 0; 592 } 593 594 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 595 { 596 int ret; 597 598 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 599 if (ret) 600 return ret; 601 602 if (lhs->shard < rhs->shard) 603 return -1; 604 if (lhs->shard > rhs->shard) 605 return 1; 606 607 return 0; 608 } 609 610 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 611 { 612 struct ceph_pg_mapping *pg; 613 614 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 615 if (!pg) 616 return NULL; 617 618 RB_CLEAR_NODE(&pg->node); 619 return pg; 620 } 621 622 static void free_pg_mapping(struct ceph_pg_mapping *pg) 623 { 624 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 625 626 kfree(pg); 627 } 628 629 /* 630 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 631 * to a set of osds) and primary_temp (explicit primary setting) 632 */ 633 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 634 RB_BYPTR, const struct ceph_pg *, node) 635 636 /* 637 * rbtree of pg pool info 638 */ 639 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 640 { 641 struct rb_node **p = &root->rb_node; 642 struct rb_node *parent = NULL; 643 struct ceph_pg_pool_info *pi = NULL; 644 645 while (*p) { 646 parent = *p; 647 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 648 if (new->id < pi->id) 649 p = &(*p)->rb_left; 650 else if (new->id > pi->id) 651 p = &(*p)->rb_right; 652 else 653 return -EEXIST; 654 } 655 656 rb_link_node(&new->node, parent, p); 657 rb_insert_color(&new->node, root); 658 return 0; 659 } 660 661 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 662 { 663 struct ceph_pg_pool_info *pi; 664 struct rb_node *n = root->rb_node; 665 666 while (n) { 667 pi = rb_entry(n, struct ceph_pg_pool_info, node); 668 if (id < pi->id) 669 n = n->rb_left; 670 else if (id > pi->id) 671 n = n->rb_right; 672 else 673 return pi; 674 } 675 return NULL; 676 } 677 678 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 679 { 680 return __lookup_pg_pool(&map->pg_pools, id); 681 } 682 683 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 684 { 685 struct ceph_pg_pool_info *pi; 686 687 if (id == CEPH_NOPOOL) 688 return NULL; 689 690 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 691 return NULL; 692 693 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 694 695 return pi ? pi->name : NULL; 696 } 697 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 698 699 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 700 { 701 struct rb_node *rbp; 702 703 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 704 struct ceph_pg_pool_info *pi = 705 rb_entry(rbp, struct ceph_pg_pool_info, node); 706 if (pi->name && strcmp(pi->name, name) == 0) 707 return pi->id; 708 } 709 return -ENOENT; 710 } 711 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 712 713 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 714 { 715 rb_erase(&pi->node, root); 716 kfree(pi->name); 717 kfree(pi); 718 } 719 720 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 721 { 722 u8 ev, cv; 723 unsigned len, num; 724 void *pool_end; 725 726 ceph_decode_need(p, end, 2 + 4, bad); 727 ev = ceph_decode_8(p); /* encoding version */ 728 cv = ceph_decode_8(p); /* compat version */ 729 if (ev < 5) { 730 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 731 return -EINVAL; 732 } 733 if (cv > 9) { 734 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 735 return -EINVAL; 736 } 737 len = ceph_decode_32(p); 738 ceph_decode_need(p, end, len, bad); 739 pool_end = *p + len; 740 741 pi->type = ceph_decode_8(p); 742 pi->size = ceph_decode_8(p); 743 pi->crush_ruleset = ceph_decode_8(p); 744 pi->object_hash = ceph_decode_8(p); 745 746 pi->pg_num = ceph_decode_32(p); 747 pi->pgp_num = ceph_decode_32(p); 748 749 *p += 4 + 4; /* skip lpg* */ 750 *p += 4; /* skip last_change */ 751 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 752 753 /* skip snaps */ 754 num = ceph_decode_32(p); 755 while (num--) { 756 *p += 8; /* snapid key */ 757 *p += 1 + 1; /* versions */ 758 len = ceph_decode_32(p); 759 *p += len; 760 } 761 762 /* skip removed_snaps */ 763 num = ceph_decode_32(p); 764 *p += num * (8 + 8); 765 766 *p += 8; /* skip auid */ 767 pi->flags = ceph_decode_64(p); 768 *p += 4; /* skip crash_replay_interval */ 769 770 if (ev >= 7) 771 pi->min_size = ceph_decode_8(p); 772 else 773 pi->min_size = pi->size - pi->size / 2; 774 775 if (ev >= 8) 776 *p += 8 + 8; /* skip quota_max_* */ 777 778 if (ev >= 9) { 779 /* skip tiers */ 780 num = ceph_decode_32(p); 781 *p += num * 8; 782 783 *p += 8; /* skip tier_of */ 784 *p += 1; /* skip cache_mode */ 785 786 pi->read_tier = ceph_decode_64(p); 787 pi->write_tier = ceph_decode_64(p); 788 } else { 789 pi->read_tier = -1; 790 pi->write_tier = -1; 791 } 792 793 if (ev >= 10) { 794 /* skip properties */ 795 num = ceph_decode_32(p); 796 while (num--) { 797 len = ceph_decode_32(p); 798 *p += len; /* key */ 799 len = ceph_decode_32(p); 800 *p += len; /* val */ 801 } 802 } 803 804 if (ev >= 11) { 805 /* skip hit_set_params */ 806 *p += 1 + 1; /* versions */ 807 len = ceph_decode_32(p); 808 *p += len; 809 810 *p += 4; /* skip hit_set_period */ 811 *p += 4; /* skip hit_set_count */ 812 } 813 814 if (ev >= 12) 815 *p += 4; /* skip stripe_width */ 816 817 if (ev >= 13) { 818 *p += 8; /* skip target_max_bytes */ 819 *p += 8; /* skip target_max_objects */ 820 *p += 4; /* skip cache_target_dirty_ratio_micro */ 821 *p += 4; /* skip cache_target_full_ratio_micro */ 822 *p += 4; /* skip cache_min_flush_age */ 823 *p += 4; /* skip cache_min_evict_age */ 824 } 825 826 if (ev >= 14) { 827 /* skip erasure_code_profile */ 828 len = ceph_decode_32(p); 829 *p += len; 830 } 831 832 /* 833 * last_force_op_resend_preluminous, will be overridden if the 834 * map was encoded with RESEND_ON_SPLIT 835 */ 836 if (ev >= 15) 837 pi->last_force_request_resend = ceph_decode_32(p); 838 else 839 pi->last_force_request_resend = 0; 840 841 if (ev >= 16) 842 *p += 4; /* skip min_read_recency_for_promote */ 843 844 if (ev >= 17) 845 *p += 8; /* skip expected_num_objects */ 846 847 if (ev >= 19) 848 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 849 850 if (ev >= 20) 851 *p += 4; /* skip min_write_recency_for_promote */ 852 853 if (ev >= 21) 854 *p += 1; /* skip use_gmt_hitset */ 855 856 if (ev >= 22) 857 *p += 1; /* skip fast_read */ 858 859 if (ev >= 23) { 860 *p += 4; /* skip hit_set_grade_decay_rate */ 861 *p += 4; /* skip hit_set_search_last_n */ 862 } 863 864 if (ev >= 24) { 865 /* skip opts */ 866 *p += 1 + 1; /* versions */ 867 len = ceph_decode_32(p); 868 *p += len; 869 } 870 871 if (ev >= 25) 872 pi->last_force_request_resend = ceph_decode_32(p); 873 874 /* ignore the rest */ 875 876 *p = pool_end; 877 calc_pg_masks(pi); 878 return 0; 879 880 bad: 881 return -EINVAL; 882 } 883 884 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 885 { 886 struct ceph_pg_pool_info *pi; 887 u32 num, len; 888 u64 pool; 889 890 ceph_decode_32_safe(p, end, num, bad); 891 dout(" %d pool names\n", num); 892 while (num--) { 893 ceph_decode_64_safe(p, end, pool, bad); 894 ceph_decode_32_safe(p, end, len, bad); 895 dout(" pool %llu len %d\n", pool, len); 896 ceph_decode_need(p, end, len, bad); 897 pi = __lookup_pg_pool(&map->pg_pools, pool); 898 if (pi) { 899 char *name = kstrndup(*p, len, GFP_NOFS); 900 901 if (!name) 902 return -ENOMEM; 903 kfree(pi->name); 904 pi->name = name; 905 dout(" name is %s\n", pi->name); 906 } 907 *p += len; 908 } 909 return 0; 910 911 bad: 912 return -EINVAL; 913 } 914 915 /* 916 * osd map 917 */ 918 struct ceph_osdmap *ceph_osdmap_alloc(void) 919 { 920 struct ceph_osdmap *map; 921 922 map = kzalloc(sizeof(*map), GFP_NOIO); 923 if (!map) 924 return NULL; 925 926 map->pg_pools = RB_ROOT; 927 map->pool_max = -1; 928 map->pg_temp = RB_ROOT; 929 map->primary_temp = RB_ROOT; 930 map->pg_upmap = RB_ROOT; 931 map->pg_upmap_items = RB_ROOT; 932 mutex_init(&map->crush_workspace_mutex); 933 934 return map; 935 } 936 937 void ceph_osdmap_destroy(struct ceph_osdmap *map) 938 { 939 dout("osdmap_destroy %p\n", map); 940 if (map->crush) 941 crush_destroy(map->crush); 942 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 943 struct ceph_pg_mapping *pg = 944 rb_entry(rb_first(&map->pg_temp), 945 struct ceph_pg_mapping, node); 946 erase_pg_mapping(&map->pg_temp, pg); 947 free_pg_mapping(pg); 948 } 949 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 950 struct ceph_pg_mapping *pg = 951 rb_entry(rb_first(&map->primary_temp), 952 struct ceph_pg_mapping, node); 953 erase_pg_mapping(&map->primary_temp, pg); 954 free_pg_mapping(pg); 955 } 956 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 957 struct ceph_pg_mapping *pg = 958 rb_entry(rb_first(&map->pg_upmap), 959 struct ceph_pg_mapping, node); 960 rb_erase(&pg->node, &map->pg_upmap); 961 kfree(pg); 962 } 963 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 964 struct ceph_pg_mapping *pg = 965 rb_entry(rb_first(&map->pg_upmap_items), 966 struct ceph_pg_mapping, node); 967 rb_erase(&pg->node, &map->pg_upmap_items); 968 kfree(pg); 969 } 970 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 971 struct ceph_pg_pool_info *pi = 972 rb_entry(rb_first(&map->pg_pools), 973 struct ceph_pg_pool_info, node); 974 __remove_pg_pool(&map->pg_pools, pi); 975 } 976 kfree(map->osd_state); 977 kfree(map->osd_weight); 978 kfree(map->osd_addr); 979 kfree(map->osd_primary_affinity); 980 kfree(map->crush_workspace); 981 kfree(map); 982 } 983 984 /* 985 * Adjust max_osd value, (re)allocate arrays. 986 * 987 * The new elements are properly initialized. 988 */ 989 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 990 { 991 u32 *state; 992 u32 *weight; 993 struct ceph_entity_addr *addr; 994 int i; 995 996 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 997 if (!state) 998 return -ENOMEM; 999 map->osd_state = state; 1000 1001 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 1002 if (!weight) 1003 return -ENOMEM; 1004 map->osd_weight = weight; 1005 1006 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 1007 if (!addr) 1008 return -ENOMEM; 1009 map->osd_addr = addr; 1010 1011 for (i = map->max_osd; i < max; i++) { 1012 map->osd_state[i] = 0; 1013 map->osd_weight[i] = CEPH_OSD_OUT; 1014 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 1015 } 1016 1017 if (map->osd_primary_affinity) { 1018 u32 *affinity; 1019 1020 affinity = krealloc(map->osd_primary_affinity, 1021 max*sizeof(*affinity), GFP_NOFS); 1022 if (!affinity) 1023 return -ENOMEM; 1024 map->osd_primary_affinity = affinity; 1025 1026 for (i = map->max_osd; i < max; i++) 1027 map->osd_primary_affinity[i] = 1028 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1029 } 1030 1031 map->max_osd = max; 1032 1033 return 0; 1034 } 1035 1036 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 1037 { 1038 void *workspace; 1039 size_t work_size; 1040 1041 if (IS_ERR(crush)) 1042 return PTR_ERR(crush); 1043 1044 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 1045 dout("%s work_size %zu bytes\n", __func__, work_size); 1046 workspace = kmalloc(work_size, GFP_NOIO); 1047 if (!workspace) { 1048 crush_destroy(crush); 1049 return -ENOMEM; 1050 } 1051 crush_init_workspace(crush, workspace); 1052 1053 if (map->crush) 1054 crush_destroy(map->crush); 1055 kfree(map->crush_workspace); 1056 map->crush = crush; 1057 map->crush_workspace = workspace; 1058 return 0; 1059 } 1060 1061 #define OSDMAP_WRAPPER_COMPAT_VER 7 1062 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 1063 1064 /* 1065 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 1066 * to struct_v of the client_data section for new (v7 and above) 1067 * osdmaps. 1068 */ 1069 static int get_osdmap_client_data_v(void **p, void *end, 1070 const char *prefix, u8 *v) 1071 { 1072 u8 struct_v; 1073 1074 ceph_decode_8_safe(p, end, struct_v, e_inval); 1075 if (struct_v >= 7) { 1076 u8 struct_compat; 1077 1078 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1079 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 1080 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 1081 struct_v, struct_compat, 1082 OSDMAP_WRAPPER_COMPAT_VER, prefix); 1083 return -EINVAL; 1084 } 1085 *p += 4; /* ignore wrapper struct_len */ 1086 1087 ceph_decode_8_safe(p, end, struct_v, e_inval); 1088 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1089 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 1090 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 1091 struct_v, struct_compat, 1092 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 1093 return -EINVAL; 1094 } 1095 *p += 4; /* ignore client data struct_len */ 1096 } else { 1097 u16 version; 1098 1099 *p -= 1; 1100 ceph_decode_16_safe(p, end, version, e_inval); 1101 if (version < 6) { 1102 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 1103 version, prefix); 1104 return -EINVAL; 1105 } 1106 1107 /* old osdmap enconding */ 1108 struct_v = 0; 1109 } 1110 1111 *v = struct_v; 1112 return 0; 1113 1114 e_inval: 1115 return -EINVAL; 1116 } 1117 1118 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 1119 bool incremental) 1120 { 1121 u32 n; 1122 1123 ceph_decode_32_safe(p, end, n, e_inval); 1124 while (n--) { 1125 struct ceph_pg_pool_info *pi; 1126 u64 pool; 1127 int ret; 1128 1129 ceph_decode_64_safe(p, end, pool, e_inval); 1130 1131 pi = __lookup_pg_pool(&map->pg_pools, pool); 1132 if (!incremental || !pi) { 1133 pi = kzalloc(sizeof(*pi), GFP_NOFS); 1134 if (!pi) 1135 return -ENOMEM; 1136 1137 pi->id = pool; 1138 1139 ret = __insert_pg_pool(&map->pg_pools, pi); 1140 if (ret) { 1141 kfree(pi); 1142 return ret; 1143 } 1144 } 1145 1146 ret = decode_pool(p, end, pi); 1147 if (ret) 1148 return ret; 1149 } 1150 1151 return 0; 1152 1153 e_inval: 1154 return -EINVAL; 1155 } 1156 1157 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 1158 { 1159 return __decode_pools(p, end, map, false); 1160 } 1161 1162 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 1163 { 1164 return __decode_pools(p, end, map, true); 1165 } 1166 1167 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 1168 1169 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 1170 decode_mapping_fn_t fn, bool incremental) 1171 { 1172 u32 n; 1173 1174 WARN_ON(!incremental && !fn); 1175 1176 ceph_decode_32_safe(p, end, n, e_inval); 1177 while (n--) { 1178 struct ceph_pg_mapping *pg; 1179 struct ceph_pg pgid; 1180 int ret; 1181 1182 ret = ceph_decode_pgid(p, end, &pgid); 1183 if (ret) 1184 return ret; 1185 1186 pg = lookup_pg_mapping(mapping_root, &pgid); 1187 if (pg) { 1188 WARN_ON(!incremental); 1189 erase_pg_mapping(mapping_root, pg); 1190 free_pg_mapping(pg); 1191 } 1192 1193 if (fn) { 1194 pg = fn(p, end, incremental); 1195 if (IS_ERR(pg)) 1196 return PTR_ERR(pg); 1197 1198 if (pg) { 1199 pg->pgid = pgid; /* struct */ 1200 insert_pg_mapping(mapping_root, pg); 1201 } 1202 } 1203 } 1204 1205 return 0; 1206 1207 e_inval: 1208 return -EINVAL; 1209 } 1210 1211 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1212 bool incremental) 1213 { 1214 struct ceph_pg_mapping *pg; 1215 u32 len, i; 1216 1217 ceph_decode_32_safe(p, end, len, e_inval); 1218 if (len == 0 && incremental) 1219 return NULL; /* new_pg_temp: [] to remove */ 1220 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1221 return ERR_PTR(-EINVAL); 1222 1223 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1224 pg = alloc_pg_mapping(len * sizeof(u32)); 1225 if (!pg) 1226 return ERR_PTR(-ENOMEM); 1227 1228 pg->pg_temp.len = len; 1229 for (i = 0; i < len; i++) 1230 pg->pg_temp.osds[i] = ceph_decode_32(p); 1231 1232 return pg; 1233 1234 e_inval: 1235 return ERR_PTR(-EINVAL); 1236 } 1237 1238 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1239 { 1240 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1241 false); 1242 } 1243 1244 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1245 { 1246 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1247 true); 1248 } 1249 1250 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1251 bool incremental) 1252 { 1253 struct ceph_pg_mapping *pg; 1254 u32 osd; 1255 1256 ceph_decode_32_safe(p, end, osd, e_inval); 1257 if (osd == (u32)-1 && incremental) 1258 return NULL; /* new_primary_temp: -1 to remove */ 1259 1260 pg = alloc_pg_mapping(0); 1261 if (!pg) 1262 return ERR_PTR(-ENOMEM); 1263 1264 pg->primary_temp.osd = osd; 1265 return pg; 1266 1267 e_inval: 1268 return ERR_PTR(-EINVAL); 1269 } 1270 1271 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1272 { 1273 return decode_pg_mapping(p, end, &map->primary_temp, 1274 __decode_primary_temp, false); 1275 } 1276 1277 static int decode_new_primary_temp(void **p, void *end, 1278 struct ceph_osdmap *map) 1279 { 1280 return decode_pg_mapping(p, end, &map->primary_temp, 1281 __decode_primary_temp, true); 1282 } 1283 1284 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1285 { 1286 BUG_ON(osd >= map->max_osd); 1287 1288 if (!map->osd_primary_affinity) 1289 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1290 1291 return map->osd_primary_affinity[osd]; 1292 } 1293 1294 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1295 { 1296 BUG_ON(osd >= map->max_osd); 1297 1298 if (!map->osd_primary_affinity) { 1299 int i; 1300 1301 map->osd_primary_affinity = kmalloc_array(map->max_osd, 1302 sizeof(u32), 1303 GFP_NOFS); 1304 if (!map->osd_primary_affinity) 1305 return -ENOMEM; 1306 1307 for (i = 0; i < map->max_osd; i++) 1308 map->osd_primary_affinity[i] = 1309 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1310 } 1311 1312 map->osd_primary_affinity[osd] = aff; 1313 1314 return 0; 1315 } 1316 1317 static int decode_primary_affinity(void **p, void *end, 1318 struct ceph_osdmap *map) 1319 { 1320 u32 len, i; 1321 1322 ceph_decode_32_safe(p, end, len, e_inval); 1323 if (len == 0) { 1324 kfree(map->osd_primary_affinity); 1325 map->osd_primary_affinity = NULL; 1326 return 0; 1327 } 1328 if (len != map->max_osd) 1329 goto e_inval; 1330 1331 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1332 1333 for (i = 0; i < map->max_osd; i++) { 1334 int ret; 1335 1336 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1337 if (ret) 1338 return ret; 1339 } 1340 1341 return 0; 1342 1343 e_inval: 1344 return -EINVAL; 1345 } 1346 1347 static int decode_new_primary_affinity(void **p, void *end, 1348 struct ceph_osdmap *map) 1349 { 1350 u32 n; 1351 1352 ceph_decode_32_safe(p, end, n, e_inval); 1353 while (n--) { 1354 u32 osd, aff; 1355 int ret; 1356 1357 ceph_decode_32_safe(p, end, osd, e_inval); 1358 ceph_decode_32_safe(p, end, aff, e_inval); 1359 1360 ret = set_primary_affinity(map, osd, aff); 1361 if (ret) 1362 return ret; 1363 1364 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1365 } 1366 1367 return 0; 1368 1369 e_inval: 1370 return -EINVAL; 1371 } 1372 1373 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1374 bool __unused) 1375 { 1376 return __decode_pg_temp(p, end, false); 1377 } 1378 1379 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1380 { 1381 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1382 false); 1383 } 1384 1385 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1386 { 1387 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1388 true); 1389 } 1390 1391 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1392 { 1393 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1394 } 1395 1396 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1397 bool __unused) 1398 { 1399 struct ceph_pg_mapping *pg; 1400 u32 len, i; 1401 1402 ceph_decode_32_safe(p, end, len, e_inval); 1403 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1404 return ERR_PTR(-EINVAL); 1405 1406 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1407 pg = alloc_pg_mapping(2 * len * sizeof(u32)); 1408 if (!pg) 1409 return ERR_PTR(-ENOMEM); 1410 1411 pg->pg_upmap_items.len = len; 1412 for (i = 0; i < len; i++) { 1413 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1414 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1415 } 1416 1417 return pg; 1418 1419 e_inval: 1420 return ERR_PTR(-EINVAL); 1421 } 1422 1423 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1424 { 1425 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1426 __decode_pg_upmap_items, false); 1427 } 1428 1429 static int decode_new_pg_upmap_items(void **p, void *end, 1430 struct ceph_osdmap *map) 1431 { 1432 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1433 __decode_pg_upmap_items, true); 1434 } 1435 1436 static int decode_old_pg_upmap_items(void **p, void *end, 1437 struct ceph_osdmap *map) 1438 { 1439 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1440 } 1441 1442 /* 1443 * decode a full map. 1444 */ 1445 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1446 { 1447 u8 struct_v; 1448 u32 epoch = 0; 1449 void *start = *p; 1450 u32 max; 1451 u32 len, i; 1452 int err; 1453 1454 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1455 1456 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1457 if (err) 1458 goto bad; 1459 1460 /* fsid, epoch, created, modified */ 1461 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1462 sizeof(map->created) + sizeof(map->modified), e_inval); 1463 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1464 epoch = map->epoch = ceph_decode_32(p); 1465 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1466 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1467 1468 /* pools */ 1469 err = decode_pools(p, end, map); 1470 if (err) 1471 goto bad; 1472 1473 /* pool_name */ 1474 err = decode_pool_names(p, end, map); 1475 if (err) 1476 goto bad; 1477 1478 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1479 1480 ceph_decode_32_safe(p, end, map->flags, e_inval); 1481 1482 /* max_osd */ 1483 ceph_decode_32_safe(p, end, max, e_inval); 1484 1485 /* (re)alloc osd arrays */ 1486 err = osdmap_set_max_osd(map, max); 1487 if (err) 1488 goto bad; 1489 1490 /* osd_state, osd_weight, osd_addrs->client_addr */ 1491 ceph_decode_need(p, end, 3*sizeof(u32) + 1492 map->max_osd*((struct_v >= 5 ? sizeof(u32) : 1493 sizeof(u8)) + 1494 sizeof(*map->osd_weight) + 1495 sizeof(*map->osd_addr)), e_inval); 1496 1497 if (ceph_decode_32(p) != map->max_osd) 1498 goto e_inval; 1499 1500 if (struct_v >= 5) { 1501 for (i = 0; i < map->max_osd; i++) 1502 map->osd_state[i] = ceph_decode_32(p); 1503 } else { 1504 for (i = 0; i < map->max_osd; i++) 1505 map->osd_state[i] = ceph_decode_8(p); 1506 } 1507 1508 if (ceph_decode_32(p) != map->max_osd) 1509 goto e_inval; 1510 1511 for (i = 0; i < map->max_osd; i++) 1512 map->osd_weight[i] = ceph_decode_32(p); 1513 1514 if (ceph_decode_32(p) != map->max_osd) 1515 goto e_inval; 1516 1517 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1518 for (i = 0; i < map->max_osd; i++) 1519 ceph_decode_addr(&map->osd_addr[i]); 1520 1521 /* pg_temp */ 1522 err = decode_pg_temp(p, end, map); 1523 if (err) 1524 goto bad; 1525 1526 /* primary_temp */ 1527 if (struct_v >= 1) { 1528 err = decode_primary_temp(p, end, map); 1529 if (err) 1530 goto bad; 1531 } 1532 1533 /* primary_affinity */ 1534 if (struct_v >= 2) { 1535 err = decode_primary_affinity(p, end, map); 1536 if (err) 1537 goto bad; 1538 } else { 1539 WARN_ON(map->osd_primary_affinity); 1540 } 1541 1542 /* crush */ 1543 ceph_decode_32_safe(p, end, len, e_inval); 1544 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1545 if (err) 1546 goto bad; 1547 1548 *p += len; 1549 if (struct_v >= 3) { 1550 /* erasure_code_profiles */ 1551 ceph_decode_skip_map_of_map(p, end, string, string, string, 1552 e_inval); 1553 } 1554 1555 if (struct_v >= 4) { 1556 err = decode_pg_upmap(p, end, map); 1557 if (err) 1558 goto bad; 1559 1560 err = decode_pg_upmap_items(p, end, map); 1561 if (err) 1562 goto bad; 1563 } else { 1564 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1565 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1566 } 1567 1568 /* ignore the rest */ 1569 *p = end; 1570 1571 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1572 return 0; 1573 1574 e_inval: 1575 err = -EINVAL; 1576 bad: 1577 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1578 err, epoch, (int)(*p - start), *p, start, end); 1579 print_hex_dump(KERN_DEBUG, "osdmap: ", 1580 DUMP_PREFIX_OFFSET, 16, 1, 1581 start, end - start, true); 1582 return err; 1583 } 1584 1585 /* 1586 * Allocate and decode a full map. 1587 */ 1588 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1589 { 1590 struct ceph_osdmap *map; 1591 int ret; 1592 1593 map = ceph_osdmap_alloc(); 1594 if (!map) 1595 return ERR_PTR(-ENOMEM); 1596 1597 ret = osdmap_decode(p, end, map); 1598 if (ret) { 1599 ceph_osdmap_destroy(map); 1600 return ERR_PTR(ret); 1601 } 1602 1603 return map; 1604 } 1605 1606 /* 1607 * Encoding order is (new_up_client, new_state, new_weight). Need to 1608 * apply in the (new_weight, new_state, new_up_client) order, because 1609 * an incremental map may look like e.g. 1610 * 1611 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1612 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1613 */ 1614 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, 1615 struct ceph_osdmap *map) 1616 { 1617 void *new_up_client; 1618 void *new_state; 1619 void *new_weight_end; 1620 u32 len; 1621 1622 new_up_client = *p; 1623 ceph_decode_32_safe(p, end, len, e_inval); 1624 len *= sizeof(u32) + sizeof(struct ceph_entity_addr); 1625 ceph_decode_need(p, end, len, e_inval); 1626 *p += len; 1627 1628 new_state = *p; 1629 ceph_decode_32_safe(p, end, len, e_inval); 1630 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); 1631 ceph_decode_need(p, end, len, e_inval); 1632 *p += len; 1633 1634 /* new_weight */ 1635 ceph_decode_32_safe(p, end, len, e_inval); 1636 while (len--) { 1637 s32 osd; 1638 u32 w; 1639 1640 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1641 osd = ceph_decode_32(p); 1642 w = ceph_decode_32(p); 1643 BUG_ON(osd >= map->max_osd); 1644 pr_info("osd%d weight 0x%x %s\n", osd, w, 1645 w == CEPH_OSD_IN ? "(in)" : 1646 (w == CEPH_OSD_OUT ? "(out)" : "")); 1647 map->osd_weight[osd] = w; 1648 1649 /* 1650 * If we are marking in, set the EXISTS, and clear the 1651 * AUTOOUT and NEW bits. 1652 */ 1653 if (w) { 1654 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1655 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1656 CEPH_OSD_NEW); 1657 } 1658 } 1659 new_weight_end = *p; 1660 1661 /* new_state (up/down) */ 1662 *p = new_state; 1663 len = ceph_decode_32(p); 1664 while (len--) { 1665 s32 osd; 1666 u32 xorstate; 1667 int ret; 1668 1669 osd = ceph_decode_32(p); 1670 if (struct_v >= 5) 1671 xorstate = ceph_decode_32(p); 1672 else 1673 xorstate = ceph_decode_8(p); 1674 if (xorstate == 0) 1675 xorstate = CEPH_OSD_UP; 1676 BUG_ON(osd >= map->max_osd); 1677 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1678 (xorstate & CEPH_OSD_UP)) 1679 pr_info("osd%d down\n", osd); 1680 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1681 (xorstate & CEPH_OSD_EXISTS)) { 1682 pr_info("osd%d does not exist\n", osd); 1683 ret = set_primary_affinity(map, osd, 1684 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1685 if (ret) 1686 return ret; 1687 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1688 map->osd_state[osd] = 0; 1689 } else { 1690 map->osd_state[osd] ^= xorstate; 1691 } 1692 } 1693 1694 /* new_up_client */ 1695 *p = new_up_client; 1696 len = ceph_decode_32(p); 1697 while (len--) { 1698 s32 osd; 1699 struct ceph_entity_addr addr; 1700 1701 osd = ceph_decode_32(p); 1702 ceph_decode_copy(p, &addr, sizeof(addr)); 1703 ceph_decode_addr(&addr); 1704 BUG_ON(osd >= map->max_osd); 1705 pr_info("osd%d up\n", osd); 1706 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1707 map->osd_addr[osd] = addr; 1708 } 1709 1710 *p = new_weight_end; 1711 return 0; 1712 1713 e_inval: 1714 return -EINVAL; 1715 } 1716 1717 /* 1718 * decode and apply an incremental map update. 1719 */ 1720 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1721 struct ceph_osdmap *map) 1722 { 1723 struct ceph_fsid fsid; 1724 u32 epoch = 0; 1725 struct ceph_timespec modified; 1726 s32 len; 1727 u64 pool; 1728 __s64 new_pool_max; 1729 __s32 new_flags, max; 1730 void *start = *p; 1731 int err; 1732 u8 struct_v; 1733 1734 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1735 1736 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1737 if (err) 1738 goto bad; 1739 1740 /* fsid, epoch, modified, new_pool_max, new_flags */ 1741 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1742 sizeof(u64) + sizeof(u32), e_inval); 1743 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1744 epoch = ceph_decode_32(p); 1745 BUG_ON(epoch != map->epoch+1); 1746 ceph_decode_copy(p, &modified, sizeof(modified)); 1747 new_pool_max = ceph_decode_64(p); 1748 new_flags = ceph_decode_32(p); 1749 1750 /* full map? */ 1751 ceph_decode_32_safe(p, end, len, e_inval); 1752 if (len > 0) { 1753 dout("apply_incremental full map len %d, %p to %p\n", 1754 len, *p, end); 1755 return ceph_osdmap_decode(p, min(*p+len, end)); 1756 } 1757 1758 /* new crush? */ 1759 ceph_decode_32_safe(p, end, len, e_inval); 1760 if (len > 0) { 1761 err = osdmap_set_crush(map, 1762 crush_decode(*p, min(*p + len, end))); 1763 if (err) 1764 goto bad; 1765 *p += len; 1766 } 1767 1768 /* new flags? */ 1769 if (new_flags >= 0) 1770 map->flags = new_flags; 1771 if (new_pool_max >= 0) 1772 map->pool_max = new_pool_max; 1773 1774 /* new max? */ 1775 ceph_decode_32_safe(p, end, max, e_inval); 1776 if (max >= 0) { 1777 err = osdmap_set_max_osd(map, max); 1778 if (err) 1779 goto bad; 1780 } 1781 1782 map->epoch++; 1783 map->modified = modified; 1784 1785 /* new_pools */ 1786 err = decode_new_pools(p, end, map); 1787 if (err) 1788 goto bad; 1789 1790 /* new_pool_names */ 1791 err = decode_pool_names(p, end, map); 1792 if (err) 1793 goto bad; 1794 1795 /* old_pool */ 1796 ceph_decode_32_safe(p, end, len, e_inval); 1797 while (len--) { 1798 struct ceph_pg_pool_info *pi; 1799 1800 ceph_decode_64_safe(p, end, pool, e_inval); 1801 pi = __lookup_pg_pool(&map->pg_pools, pool); 1802 if (pi) 1803 __remove_pg_pool(&map->pg_pools, pi); 1804 } 1805 1806 /* new_up_client, new_state, new_weight */ 1807 err = decode_new_up_state_weight(p, end, struct_v, map); 1808 if (err) 1809 goto bad; 1810 1811 /* new_pg_temp */ 1812 err = decode_new_pg_temp(p, end, map); 1813 if (err) 1814 goto bad; 1815 1816 /* new_primary_temp */ 1817 if (struct_v >= 1) { 1818 err = decode_new_primary_temp(p, end, map); 1819 if (err) 1820 goto bad; 1821 } 1822 1823 /* new_primary_affinity */ 1824 if (struct_v >= 2) { 1825 err = decode_new_primary_affinity(p, end, map); 1826 if (err) 1827 goto bad; 1828 } 1829 1830 if (struct_v >= 3) { 1831 /* new_erasure_code_profiles */ 1832 ceph_decode_skip_map_of_map(p, end, string, string, string, 1833 e_inval); 1834 /* old_erasure_code_profiles */ 1835 ceph_decode_skip_set(p, end, string, e_inval); 1836 } 1837 1838 if (struct_v >= 4) { 1839 err = decode_new_pg_upmap(p, end, map); 1840 if (err) 1841 goto bad; 1842 1843 err = decode_old_pg_upmap(p, end, map); 1844 if (err) 1845 goto bad; 1846 1847 err = decode_new_pg_upmap_items(p, end, map); 1848 if (err) 1849 goto bad; 1850 1851 err = decode_old_pg_upmap_items(p, end, map); 1852 if (err) 1853 goto bad; 1854 } 1855 1856 /* ignore the rest */ 1857 *p = end; 1858 1859 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1860 return map; 1861 1862 e_inval: 1863 err = -EINVAL; 1864 bad: 1865 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1866 err, epoch, (int)(*p - start), *p, start, end); 1867 print_hex_dump(KERN_DEBUG, "osdmap: ", 1868 DUMP_PREFIX_OFFSET, 16, 1, 1869 start, end - start, true); 1870 return ERR_PTR(err); 1871 } 1872 1873 void ceph_oloc_copy(struct ceph_object_locator *dest, 1874 const struct ceph_object_locator *src) 1875 { 1876 ceph_oloc_destroy(dest); 1877 1878 dest->pool = src->pool; 1879 if (src->pool_ns) 1880 dest->pool_ns = ceph_get_string(src->pool_ns); 1881 else 1882 dest->pool_ns = NULL; 1883 } 1884 EXPORT_SYMBOL(ceph_oloc_copy); 1885 1886 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1887 { 1888 ceph_put_string(oloc->pool_ns); 1889 } 1890 EXPORT_SYMBOL(ceph_oloc_destroy); 1891 1892 void ceph_oid_copy(struct ceph_object_id *dest, 1893 const struct ceph_object_id *src) 1894 { 1895 ceph_oid_destroy(dest); 1896 1897 if (src->name != src->inline_name) { 1898 /* very rare, see ceph_object_id definition */ 1899 dest->name = kmalloc(src->name_len + 1, 1900 GFP_NOIO | __GFP_NOFAIL); 1901 } else { 1902 dest->name = dest->inline_name; 1903 } 1904 memcpy(dest->name, src->name, src->name_len + 1); 1905 dest->name_len = src->name_len; 1906 } 1907 EXPORT_SYMBOL(ceph_oid_copy); 1908 1909 static __printf(2, 0) 1910 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1911 { 1912 int len; 1913 1914 WARN_ON(!ceph_oid_empty(oid)); 1915 1916 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1917 if (len >= sizeof(oid->inline_name)) 1918 return len; 1919 1920 oid->name_len = len; 1921 return 0; 1922 } 1923 1924 /* 1925 * If oid doesn't fit into inline buffer, BUG. 1926 */ 1927 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1928 { 1929 va_list ap; 1930 1931 va_start(ap, fmt); 1932 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1933 va_end(ap); 1934 } 1935 EXPORT_SYMBOL(ceph_oid_printf); 1936 1937 static __printf(3, 0) 1938 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1939 const char *fmt, va_list ap) 1940 { 1941 va_list aq; 1942 int len; 1943 1944 va_copy(aq, ap); 1945 len = oid_printf_vargs(oid, fmt, aq); 1946 va_end(aq); 1947 1948 if (len) { 1949 char *external_name; 1950 1951 external_name = kmalloc(len + 1, gfp); 1952 if (!external_name) 1953 return -ENOMEM; 1954 1955 oid->name = external_name; 1956 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1957 oid->name_len = len; 1958 } 1959 1960 return 0; 1961 } 1962 1963 /* 1964 * If oid doesn't fit into inline buffer, allocate. 1965 */ 1966 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1967 const char *fmt, ...) 1968 { 1969 va_list ap; 1970 int ret; 1971 1972 va_start(ap, fmt); 1973 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1974 va_end(ap); 1975 1976 return ret; 1977 } 1978 EXPORT_SYMBOL(ceph_oid_aprintf); 1979 1980 void ceph_oid_destroy(struct ceph_object_id *oid) 1981 { 1982 if (oid->name != oid->inline_name) 1983 kfree(oid->name); 1984 } 1985 EXPORT_SYMBOL(ceph_oid_destroy); 1986 1987 /* 1988 * osds only 1989 */ 1990 static bool __osds_equal(const struct ceph_osds *lhs, 1991 const struct ceph_osds *rhs) 1992 { 1993 if (lhs->size == rhs->size && 1994 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 1995 return true; 1996 1997 return false; 1998 } 1999 2000 /* 2001 * osds + primary 2002 */ 2003 static bool osds_equal(const struct ceph_osds *lhs, 2004 const struct ceph_osds *rhs) 2005 { 2006 if (__osds_equal(lhs, rhs) && 2007 lhs->primary == rhs->primary) 2008 return true; 2009 2010 return false; 2011 } 2012 2013 static bool osds_valid(const struct ceph_osds *set) 2014 { 2015 /* non-empty set */ 2016 if (set->size > 0 && set->primary >= 0) 2017 return true; 2018 2019 /* empty can_shift_osds set */ 2020 if (!set->size && set->primary == -1) 2021 return true; 2022 2023 /* empty !can_shift_osds set - all NONE */ 2024 if (set->size > 0 && set->primary == -1) { 2025 int i; 2026 2027 for (i = 0; i < set->size; i++) { 2028 if (set->osds[i] != CRUSH_ITEM_NONE) 2029 break; 2030 } 2031 if (i == set->size) 2032 return true; 2033 } 2034 2035 return false; 2036 } 2037 2038 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 2039 { 2040 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 2041 dest->size = src->size; 2042 dest->primary = src->primary; 2043 } 2044 2045 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 2046 u32 new_pg_num) 2047 { 2048 int old_bits = calc_bits_of(old_pg_num); 2049 int old_mask = (1 << old_bits) - 1; 2050 int n; 2051 2052 WARN_ON(pgid->seed >= old_pg_num); 2053 if (new_pg_num <= old_pg_num) 2054 return false; 2055 2056 for (n = 1; ; n++) { 2057 int next_bit = n << (old_bits - 1); 2058 u32 s = next_bit | pgid->seed; 2059 2060 if (s < old_pg_num || s == pgid->seed) 2061 continue; 2062 if (s >= new_pg_num) 2063 break; 2064 2065 s = ceph_stable_mod(s, old_pg_num, old_mask); 2066 if (s == pgid->seed) 2067 return true; 2068 } 2069 2070 return false; 2071 } 2072 2073 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 2074 const struct ceph_osds *new_acting, 2075 const struct ceph_osds *old_up, 2076 const struct ceph_osds *new_up, 2077 int old_size, 2078 int new_size, 2079 int old_min_size, 2080 int new_min_size, 2081 u32 old_pg_num, 2082 u32 new_pg_num, 2083 bool old_sort_bitwise, 2084 bool new_sort_bitwise, 2085 bool old_recovery_deletes, 2086 bool new_recovery_deletes, 2087 const struct ceph_pg *pgid) 2088 { 2089 return !osds_equal(old_acting, new_acting) || 2090 !osds_equal(old_up, new_up) || 2091 old_size != new_size || 2092 old_min_size != new_min_size || 2093 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 2094 old_sort_bitwise != new_sort_bitwise || 2095 old_recovery_deletes != new_recovery_deletes; 2096 } 2097 2098 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 2099 { 2100 int i; 2101 2102 for (i = 0; i < acting->size; i++) { 2103 if (acting->osds[i] == osd) 2104 return i; 2105 } 2106 2107 return -1; 2108 } 2109 2110 static bool primary_changed(const struct ceph_osds *old_acting, 2111 const struct ceph_osds *new_acting) 2112 { 2113 if (!old_acting->size && !new_acting->size) 2114 return false; /* both still empty */ 2115 2116 if (!old_acting->size ^ !new_acting->size) 2117 return true; /* was empty, now not, or vice versa */ 2118 2119 if (old_acting->primary != new_acting->primary) 2120 return true; /* primary changed */ 2121 2122 if (calc_pg_rank(old_acting->primary, old_acting) != 2123 calc_pg_rank(new_acting->primary, new_acting)) 2124 return true; 2125 2126 return false; /* same primary (tho replicas may have changed) */ 2127 } 2128 2129 bool ceph_osds_changed(const struct ceph_osds *old_acting, 2130 const struct ceph_osds *new_acting, 2131 bool any_change) 2132 { 2133 if (primary_changed(old_acting, new_acting)) 2134 return true; 2135 2136 if (any_change && !__osds_equal(old_acting, new_acting)) 2137 return true; 2138 2139 return false; 2140 } 2141 2142 /* 2143 * Map an object into a PG. 2144 * 2145 * Should only be called with target_oid and target_oloc (as opposed to 2146 * base_oid and base_oloc), since tiering isn't taken into account. 2147 */ 2148 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2149 const struct ceph_object_id *oid, 2150 const struct ceph_object_locator *oloc, 2151 struct ceph_pg *raw_pgid) 2152 { 2153 WARN_ON(pi->id != oloc->pool); 2154 2155 if (!oloc->pool_ns) { 2156 raw_pgid->pool = oloc->pool; 2157 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2158 oid->name_len); 2159 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2160 raw_pgid->pool, raw_pgid->seed); 2161 } else { 2162 char stack_buf[256]; 2163 char *buf = stack_buf; 2164 int nsl = oloc->pool_ns->len; 2165 size_t total = nsl + 1 + oid->name_len; 2166 2167 if (total > sizeof(stack_buf)) 2168 buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL); 2169 memcpy(buf, oloc->pool_ns->str, nsl); 2170 buf[nsl] = '\037'; 2171 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2172 raw_pgid->pool = oloc->pool; 2173 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2174 if (buf != stack_buf) 2175 kfree(buf); 2176 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2177 oid->name, nsl, oloc->pool_ns->str, 2178 raw_pgid->pool, raw_pgid->seed); 2179 } 2180 } 2181 2182 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2183 const struct ceph_object_id *oid, 2184 const struct ceph_object_locator *oloc, 2185 struct ceph_pg *raw_pgid) 2186 { 2187 struct ceph_pg_pool_info *pi; 2188 2189 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2190 if (!pi) 2191 return -ENOENT; 2192 2193 __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2194 return 0; 2195 } 2196 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2197 2198 /* 2199 * Map a raw PG (full precision ps) into an actual PG. 2200 */ 2201 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2202 const struct ceph_pg *raw_pgid, 2203 struct ceph_pg *pgid) 2204 { 2205 pgid->pool = raw_pgid->pool; 2206 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2207 pi->pg_num_mask); 2208 } 2209 2210 /* 2211 * Map a raw PG (full precision ps) into a placement ps (placement 2212 * seed). Include pool id in that value so that different pools don't 2213 * use the same seeds. 2214 */ 2215 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2216 const struct ceph_pg *raw_pgid) 2217 { 2218 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2219 /* hash pool id and seed so that pool PGs do not overlap */ 2220 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2221 ceph_stable_mod(raw_pgid->seed, 2222 pi->pgp_num, 2223 pi->pgp_num_mask), 2224 raw_pgid->pool); 2225 } else { 2226 /* 2227 * legacy behavior: add ps and pool together. this is 2228 * not a great approach because the PGs from each pool 2229 * will overlap on top of each other: 0.5 == 1.4 == 2230 * 2.3 == ... 2231 */ 2232 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2233 pi->pgp_num_mask) + 2234 (unsigned)raw_pgid->pool; 2235 } 2236 } 2237 2238 /* 2239 * Magic value used for a "default" fallback choose_args, used if the 2240 * crush_choose_arg_map passed to do_crush() does not exist. If this 2241 * also doesn't exist, fall back to canonical weights. 2242 */ 2243 #define CEPH_DEFAULT_CHOOSE_ARGS -1 2244 2245 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2246 int *result, int result_max, 2247 const __u32 *weight, int weight_max, 2248 s64 choose_args_index) 2249 { 2250 struct crush_choose_arg_map *arg_map; 2251 int r; 2252 2253 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2254 2255 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2256 choose_args_index); 2257 if (!arg_map) 2258 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2259 CEPH_DEFAULT_CHOOSE_ARGS); 2260 2261 mutex_lock(&map->crush_workspace_mutex); 2262 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2263 weight, weight_max, map->crush_workspace, 2264 arg_map ? arg_map->args : NULL); 2265 mutex_unlock(&map->crush_workspace_mutex); 2266 2267 return r; 2268 } 2269 2270 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2271 struct ceph_pg_pool_info *pi, 2272 struct ceph_osds *set) 2273 { 2274 int i; 2275 2276 if (ceph_can_shift_osds(pi)) { 2277 int removed = 0; 2278 2279 /* shift left */ 2280 for (i = 0; i < set->size; i++) { 2281 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2282 removed++; 2283 continue; 2284 } 2285 if (removed) 2286 set->osds[i - removed] = set->osds[i]; 2287 } 2288 set->size -= removed; 2289 } else { 2290 /* set dne devices to NONE */ 2291 for (i = 0; i < set->size; i++) { 2292 if (!ceph_osd_exists(osdmap, set->osds[i])) 2293 set->osds[i] = CRUSH_ITEM_NONE; 2294 } 2295 } 2296 } 2297 2298 /* 2299 * Calculate raw set (CRUSH output) for given PG and filter out 2300 * nonexistent OSDs. ->primary is undefined for a raw set. 2301 * 2302 * Placement seed (CRUSH input) is returned through @ppps. 2303 */ 2304 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2305 struct ceph_pg_pool_info *pi, 2306 const struct ceph_pg *raw_pgid, 2307 struct ceph_osds *raw, 2308 u32 *ppps) 2309 { 2310 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2311 int ruleno; 2312 int len; 2313 2314 ceph_osds_init(raw); 2315 if (ppps) 2316 *ppps = pps; 2317 2318 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2319 pi->size); 2320 if (ruleno < 0) { 2321 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2322 pi->id, pi->crush_ruleset, pi->type, pi->size); 2323 return; 2324 } 2325 2326 if (pi->size > ARRAY_SIZE(raw->osds)) { 2327 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2328 pi->id, pi->crush_ruleset, pi->type, pi->size, 2329 ARRAY_SIZE(raw->osds)); 2330 return; 2331 } 2332 2333 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2334 osdmap->osd_weight, osdmap->max_osd, pi->id); 2335 if (len < 0) { 2336 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2337 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2338 pi->size); 2339 return; 2340 } 2341 2342 raw->size = len; 2343 remove_nonexistent_osds(osdmap, pi, raw); 2344 } 2345 2346 /* apply pg_upmap[_items] mappings */ 2347 static void apply_upmap(struct ceph_osdmap *osdmap, 2348 const struct ceph_pg *pgid, 2349 struct ceph_osds *raw) 2350 { 2351 struct ceph_pg_mapping *pg; 2352 int i, j; 2353 2354 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2355 if (pg) { 2356 /* make sure targets aren't marked out */ 2357 for (i = 0; i < pg->pg_upmap.len; i++) { 2358 int osd = pg->pg_upmap.osds[i]; 2359 2360 if (osd != CRUSH_ITEM_NONE && 2361 osd < osdmap->max_osd && 2362 osdmap->osd_weight[osd] == 0) { 2363 /* reject/ignore explicit mapping */ 2364 return; 2365 } 2366 } 2367 for (i = 0; i < pg->pg_upmap.len; i++) 2368 raw->osds[i] = pg->pg_upmap.osds[i]; 2369 raw->size = pg->pg_upmap.len; 2370 /* check and apply pg_upmap_items, if any */ 2371 } 2372 2373 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2374 if (pg) { 2375 /* 2376 * Note: this approach does not allow a bidirectional swap, 2377 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2378 */ 2379 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2380 int from = pg->pg_upmap_items.from_to[i][0]; 2381 int to = pg->pg_upmap_items.from_to[i][1]; 2382 int pos = -1; 2383 bool exists = false; 2384 2385 /* make sure replacement doesn't already appear */ 2386 for (j = 0; j < raw->size; j++) { 2387 int osd = raw->osds[j]; 2388 2389 if (osd == to) { 2390 exists = true; 2391 break; 2392 } 2393 /* ignore mapping if target is marked out */ 2394 if (osd == from && pos < 0 && 2395 !(to != CRUSH_ITEM_NONE && 2396 to < osdmap->max_osd && 2397 osdmap->osd_weight[to] == 0)) { 2398 pos = j; 2399 } 2400 } 2401 if (!exists && pos >= 0) 2402 raw->osds[pos] = to; 2403 } 2404 } 2405 } 2406 2407 /* 2408 * Given raw set, calculate up set and up primary. By definition of an 2409 * up set, the result won't contain nonexistent or down OSDs. 2410 * 2411 * This is done in-place - on return @set is the up set. If it's 2412 * empty, ->primary will remain undefined. 2413 */ 2414 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2415 struct ceph_pg_pool_info *pi, 2416 struct ceph_osds *set) 2417 { 2418 int i; 2419 2420 /* ->primary is undefined for a raw set */ 2421 BUG_ON(set->primary != -1); 2422 2423 if (ceph_can_shift_osds(pi)) { 2424 int removed = 0; 2425 2426 /* shift left */ 2427 for (i = 0; i < set->size; i++) { 2428 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2429 removed++; 2430 continue; 2431 } 2432 if (removed) 2433 set->osds[i - removed] = set->osds[i]; 2434 } 2435 set->size -= removed; 2436 if (set->size > 0) 2437 set->primary = set->osds[0]; 2438 } else { 2439 /* set down/dne devices to NONE */ 2440 for (i = set->size - 1; i >= 0; i--) { 2441 if (ceph_osd_is_down(osdmap, set->osds[i])) 2442 set->osds[i] = CRUSH_ITEM_NONE; 2443 else 2444 set->primary = set->osds[i]; 2445 } 2446 } 2447 } 2448 2449 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2450 struct ceph_pg_pool_info *pi, 2451 u32 pps, 2452 struct ceph_osds *up) 2453 { 2454 int i; 2455 int pos = -1; 2456 2457 /* 2458 * Do we have any non-default primary_affinity values for these 2459 * osds? 2460 */ 2461 if (!osdmap->osd_primary_affinity) 2462 return; 2463 2464 for (i = 0; i < up->size; i++) { 2465 int osd = up->osds[i]; 2466 2467 if (osd != CRUSH_ITEM_NONE && 2468 osdmap->osd_primary_affinity[osd] != 2469 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2470 break; 2471 } 2472 } 2473 if (i == up->size) 2474 return; 2475 2476 /* 2477 * Pick the primary. Feed both the seed (for the pg) and the 2478 * osd into the hash/rng so that a proportional fraction of an 2479 * osd's pgs get rejected as primary. 2480 */ 2481 for (i = 0; i < up->size; i++) { 2482 int osd = up->osds[i]; 2483 u32 aff; 2484 2485 if (osd == CRUSH_ITEM_NONE) 2486 continue; 2487 2488 aff = osdmap->osd_primary_affinity[osd]; 2489 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2490 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2491 pps, osd) >> 16) >= aff) { 2492 /* 2493 * We chose not to use this primary. Note it 2494 * anyway as a fallback in case we don't pick 2495 * anyone else, but keep looking. 2496 */ 2497 if (pos < 0) 2498 pos = i; 2499 } else { 2500 pos = i; 2501 break; 2502 } 2503 } 2504 if (pos < 0) 2505 return; 2506 2507 up->primary = up->osds[pos]; 2508 2509 if (ceph_can_shift_osds(pi) && pos > 0) { 2510 /* move the new primary to the front */ 2511 for (i = pos; i > 0; i--) 2512 up->osds[i] = up->osds[i - 1]; 2513 up->osds[0] = up->primary; 2514 } 2515 } 2516 2517 /* 2518 * Get pg_temp and primary_temp mappings for given PG. 2519 * 2520 * Note that a PG may have none, only pg_temp, only primary_temp or 2521 * both pg_temp and primary_temp mappings. This means @temp isn't 2522 * always a valid OSD set on return: in the "only primary_temp" case, 2523 * @temp will have its ->primary >= 0 but ->size == 0. 2524 */ 2525 static void get_temp_osds(struct ceph_osdmap *osdmap, 2526 struct ceph_pg_pool_info *pi, 2527 const struct ceph_pg *pgid, 2528 struct ceph_osds *temp) 2529 { 2530 struct ceph_pg_mapping *pg; 2531 int i; 2532 2533 ceph_osds_init(temp); 2534 2535 /* pg_temp? */ 2536 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2537 if (pg) { 2538 for (i = 0; i < pg->pg_temp.len; i++) { 2539 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2540 if (ceph_can_shift_osds(pi)) 2541 continue; 2542 2543 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2544 } else { 2545 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2546 } 2547 } 2548 2549 /* apply pg_temp's primary */ 2550 for (i = 0; i < temp->size; i++) { 2551 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2552 temp->primary = temp->osds[i]; 2553 break; 2554 } 2555 } 2556 } 2557 2558 /* primary_temp? */ 2559 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2560 if (pg) 2561 temp->primary = pg->primary_temp.osd; 2562 } 2563 2564 /* 2565 * Map a PG to its acting set as well as its up set. 2566 * 2567 * Acting set is used for data mapping purposes, while up set can be 2568 * recorded for detecting interval changes and deciding whether to 2569 * resend a request. 2570 */ 2571 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2572 struct ceph_pg_pool_info *pi, 2573 const struct ceph_pg *raw_pgid, 2574 struct ceph_osds *up, 2575 struct ceph_osds *acting) 2576 { 2577 struct ceph_pg pgid; 2578 u32 pps; 2579 2580 WARN_ON(pi->id != raw_pgid->pool); 2581 raw_pg_to_pg(pi, raw_pgid, &pgid); 2582 2583 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2584 apply_upmap(osdmap, &pgid, up); 2585 raw_to_up_osds(osdmap, pi, up); 2586 apply_primary_affinity(osdmap, pi, pps, up); 2587 get_temp_osds(osdmap, pi, &pgid, acting); 2588 if (!acting->size) { 2589 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2590 acting->size = up->size; 2591 if (acting->primary == -1) 2592 acting->primary = up->primary; 2593 } 2594 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2595 } 2596 2597 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2598 struct ceph_pg_pool_info *pi, 2599 const struct ceph_pg *raw_pgid, 2600 struct ceph_spg *spgid) 2601 { 2602 struct ceph_pg pgid; 2603 struct ceph_osds up, acting; 2604 int i; 2605 2606 WARN_ON(pi->id != raw_pgid->pool); 2607 raw_pg_to_pg(pi, raw_pgid, &pgid); 2608 2609 if (ceph_can_shift_osds(pi)) { 2610 spgid->pgid = pgid; /* struct */ 2611 spgid->shard = CEPH_SPG_NOSHARD; 2612 return true; 2613 } 2614 2615 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2616 for (i = 0; i < acting.size; i++) { 2617 if (acting.osds[i] == acting.primary) { 2618 spgid->pgid = pgid; /* struct */ 2619 spgid->shard = i; 2620 return true; 2621 } 2622 } 2623 2624 return false; 2625 } 2626 2627 /* 2628 * Return acting primary for given PG, or -1 if none. 2629 */ 2630 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2631 const struct ceph_pg *raw_pgid) 2632 { 2633 struct ceph_pg_pool_info *pi; 2634 struct ceph_osds up, acting; 2635 2636 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2637 if (!pi) 2638 return -1; 2639 2640 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2641 return acting.primary; 2642 } 2643 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2644