1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, u32 state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static struct crush_choose_arg_map *alloc_choose_arg_map(void) 142 { 143 struct crush_choose_arg_map *arg_map; 144 145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); 146 if (!arg_map) 147 return NULL; 148 149 RB_CLEAR_NODE(&arg_map->node); 150 return arg_map; 151 } 152 153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) 154 { 155 if (arg_map) { 156 int i, j; 157 158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); 159 160 for (i = 0; i < arg_map->size; i++) { 161 struct crush_choose_arg *arg = &arg_map->args[i]; 162 163 for (j = 0; j < arg->weight_set_size; j++) 164 kfree(arg->weight_set[j].weights); 165 kfree(arg->weight_set); 166 kfree(arg->ids); 167 } 168 kfree(arg_map->args); 169 kfree(arg_map); 170 } 171 } 172 173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, 174 node); 175 176 void clear_choose_args(struct crush_map *c) 177 { 178 while (!RB_EMPTY_ROOT(&c->choose_args)) { 179 struct crush_choose_arg_map *arg_map = 180 rb_entry(rb_first(&c->choose_args), 181 struct crush_choose_arg_map, node); 182 183 erase_choose_arg_map(&c->choose_args, arg_map); 184 free_choose_arg_map(arg_map); 185 } 186 } 187 188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) 189 { 190 u32 *a = NULL; 191 u32 len; 192 int ret; 193 194 ceph_decode_32_safe(p, end, len, e_inval); 195 if (len) { 196 u32 i; 197 198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO); 199 if (!a) { 200 ret = -ENOMEM; 201 goto fail; 202 } 203 204 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 205 for (i = 0; i < len; i++) 206 a[i] = ceph_decode_32(p); 207 } 208 209 *plen = len; 210 return a; 211 212 e_inval: 213 ret = -EINVAL; 214 fail: 215 kfree(a); 216 return ERR_PTR(ret); 217 } 218 219 /* 220 * Assumes @arg is zero-initialized. 221 */ 222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) 223 { 224 int ret; 225 226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); 227 if (arg->weight_set_size) { 228 u32 i; 229 230 arg->weight_set = kmalloc_array(arg->weight_set_size, 231 sizeof(*arg->weight_set), 232 GFP_NOIO); 233 if (!arg->weight_set) 234 return -ENOMEM; 235 236 for (i = 0; i < arg->weight_set_size; i++) { 237 struct crush_weight_set *w = &arg->weight_set[i]; 238 239 w->weights = decode_array_32_alloc(p, end, &w->size); 240 if (IS_ERR(w->weights)) { 241 ret = PTR_ERR(w->weights); 242 w->weights = NULL; 243 return ret; 244 } 245 } 246 } 247 248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); 249 if (IS_ERR(arg->ids)) { 250 ret = PTR_ERR(arg->ids); 251 arg->ids = NULL; 252 return ret; 253 } 254 255 return 0; 256 257 e_inval: 258 return -EINVAL; 259 } 260 261 static int decode_choose_args(void **p, void *end, struct crush_map *c) 262 { 263 struct crush_choose_arg_map *arg_map = NULL; 264 u32 num_choose_arg_maps, num_buckets; 265 int ret; 266 267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); 268 while (num_choose_arg_maps--) { 269 arg_map = alloc_choose_arg_map(); 270 if (!arg_map) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 ceph_decode_64_safe(p, end, arg_map->choose_args_index, 276 e_inval); 277 arg_map->size = c->max_buckets; 278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), 279 GFP_NOIO); 280 if (!arg_map->args) { 281 ret = -ENOMEM; 282 goto fail; 283 } 284 285 ceph_decode_32_safe(p, end, num_buckets, e_inval); 286 while (num_buckets--) { 287 struct crush_choose_arg *arg; 288 u32 bucket_index; 289 290 ceph_decode_32_safe(p, end, bucket_index, e_inval); 291 if (bucket_index >= arg_map->size) 292 goto e_inval; 293 294 arg = &arg_map->args[bucket_index]; 295 ret = decode_choose_arg(p, end, arg); 296 if (ret) 297 goto fail; 298 299 if (arg->ids_size && 300 arg->ids_size != c->buckets[bucket_index]->size) 301 goto e_inval; 302 } 303 304 insert_choose_arg_map(&c->choose_args, arg_map); 305 } 306 307 return 0; 308 309 e_inval: 310 ret = -EINVAL; 311 fail: 312 free_choose_arg_map(arg_map); 313 return ret; 314 } 315 316 static void crush_finalize(struct crush_map *c) 317 { 318 __s32 b; 319 320 /* Space for the array of pointers to per-bucket workspace */ 321 c->working_size = sizeof(struct crush_work) + 322 c->max_buckets * sizeof(struct crush_work_bucket *); 323 324 for (b = 0; b < c->max_buckets; b++) { 325 if (!c->buckets[b]) 326 continue; 327 328 switch (c->buckets[b]->alg) { 329 default: 330 /* 331 * The base case, permutation variables and 332 * the pointer to the permutation array. 333 */ 334 c->working_size += sizeof(struct crush_work_bucket); 335 break; 336 } 337 /* Every bucket has a permutation array. */ 338 c->working_size += c->buckets[b]->size * sizeof(__u32); 339 } 340 } 341 342 static struct crush_map *crush_decode(void *pbyval, void *end) 343 { 344 struct crush_map *c; 345 int err; 346 int i, j; 347 void **p = &pbyval; 348 void *start = pbyval; 349 u32 magic; 350 351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 352 353 c = kzalloc(sizeof(*c), GFP_NOFS); 354 if (c == NULL) 355 return ERR_PTR(-ENOMEM); 356 357 c->choose_args = RB_ROOT; 358 359 /* set tunables to default values */ 360 c->choose_local_tries = 2; 361 c->choose_local_fallback_tries = 5; 362 c->choose_total_tries = 19; 363 c->chooseleaf_descend_once = 0; 364 365 ceph_decode_need(p, end, 4*sizeof(u32), bad); 366 magic = ceph_decode_32(p); 367 if (magic != CRUSH_MAGIC) { 368 pr_err("crush_decode magic %x != current %x\n", 369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 370 goto bad; 371 } 372 c->max_buckets = ceph_decode_32(p); 373 c->max_rules = ceph_decode_32(p); 374 c->max_devices = ceph_decode_32(p); 375 376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 377 if (c->buckets == NULL) 378 goto badmem; 379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 380 if (c->rules == NULL) 381 goto badmem; 382 383 /* buckets */ 384 for (i = 0; i < c->max_buckets; i++) { 385 int size = 0; 386 u32 alg; 387 struct crush_bucket *b; 388 389 ceph_decode_32_safe(p, end, alg, bad); 390 if (alg == 0) { 391 c->buckets[i] = NULL; 392 continue; 393 } 394 dout("crush_decode bucket %d off %x %p to %p\n", 395 i, (int)(*p-start), *p, end); 396 397 switch (alg) { 398 case CRUSH_BUCKET_UNIFORM: 399 size = sizeof(struct crush_bucket_uniform); 400 break; 401 case CRUSH_BUCKET_LIST: 402 size = sizeof(struct crush_bucket_list); 403 break; 404 case CRUSH_BUCKET_TREE: 405 size = sizeof(struct crush_bucket_tree); 406 break; 407 case CRUSH_BUCKET_STRAW: 408 size = sizeof(struct crush_bucket_straw); 409 break; 410 case CRUSH_BUCKET_STRAW2: 411 size = sizeof(struct crush_bucket_straw2); 412 break; 413 default: 414 goto bad; 415 } 416 BUG_ON(size == 0); 417 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 418 if (b == NULL) 419 goto badmem; 420 421 ceph_decode_need(p, end, 4*sizeof(u32), bad); 422 b->id = ceph_decode_32(p); 423 b->type = ceph_decode_16(p); 424 b->alg = ceph_decode_8(p); 425 b->hash = ceph_decode_8(p); 426 b->weight = ceph_decode_32(p); 427 b->size = ceph_decode_32(p); 428 429 dout("crush_decode bucket size %d off %x %p to %p\n", 430 b->size, (int)(*p-start), *p, end); 431 432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 433 if (b->items == NULL) 434 goto badmem; 435 436 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 437 for (j = 0; j < b->size; j++) 438 b->items[j] = ceph_decode_32(p); 439 440 switch (b->alg) { 441 case CRUSH_BUCKET_UNIFORM: 442 err = crush_decode_uniform_bucket(p, end, 443 (struct crush_bucket_uniform *)b); 444 if (err < 0) 445 goto fail; 446 break; 447 case CRUSH_BUCKET_LIST: 448 err = crush_decode_list_bucket(p, end, 449 (struct crush_bucket_list *)b); 450 if (err < 0) 451 goto fail; 452 break; 453 case CRUSH_BUCKET_TREE: 454 err = crush_decode_tree_bucket(p, end, 455 (struct crush_bucket_tree *)b); 456 if (err < 0) 457 goto fail; 458 break; 459 case CRUSH_BUCKET_STRAW: 460 err = crush_decode_straw_bucket(p, end, 461 (struct crush_bucket_straw *)b); 462 if (err < 0) 463 goto fail; 464 break; 465 case CRUSH_BUCKET_STRAW2: 466 err = crush_decode_straw2_bucket(p, end, 467 (struct crush_bucket_straw2 *)b); 468 if (err < 0) 469 goto fail; 470 break; 471 } 472 } 473 474 /* rules */ 475 dout("rule vec is %p\n", c->rules); 476 for (i = 0; i < c->max_rules; i++) { 477 u32 yes; 478 struct crush_rule *r; 479 480 ceph_decode_32_safe(p, end, yes, bad); 481 if (!yes) { 482 dout("crush_decode NO rule %d off %x %p to %p\n", 483 i, (int)(*p-start), *p, end); 484 c->rules[i] = NULL; 485 continue; 486 } 487 488 dout("crush_decode rule %d off %x %p to %p\n", 489 i, (int)(*p-start), *p, end); 490 491 /* len */ 492 ceph_decode_32_safe(p, end, yes, bad); 493 #if BITS_PER_LONG == 32 494 if (yes > (ULONG_MAX - sizeof(*r)) 495 / sizeof(struct crush_rule_step)) 496 goto bad; 497 #endif 498 r = kmalloc(struct_size(r, steps, yes), GFP_NOFS); 499 c->rules[i] = r; 500 if (r == NULL) 501 goto badmem; 502 dout(" rule %d is at %p\n", i, r); 503 r->len = yes; 504 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 505 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 506 for (j = 0; j < r->len; j++) { 507 r->steps[j].op = ceph_decode_32(p); 508 r->steps[j].arg1 = ceph_decode_32(p); 509 r->steps[j].arg2 = ceph_decode_32(p); 510 } 511 } 512 513 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 514 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 515 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 516 517 /* tunables */ 518 ceph_decode_need(p, end, 3*sizeof(u32), done); 519 c->choose_local_tries = ceph_decode_32(p); 520 c->choose_local_fallback_tries = ceph_decode_32(p); 521 c->choose_total_tries = ceph_decode_32(p); 522 dout("crush decode tunable choose_local_tries = %d\n", 523 c->choose_local_tries); 524 dout("crush decode tunable choose_local_fallback_tries = %d\n", 525 c->choose_local_fallback_tries); 526 dout("crush decode tunable choose_total_tries = %d\n", 527 c->choose_total_tries); 528 529 ceph_decode_need(p, end, sizeof(u32), done); 530 c->chooseleaf_descend_once = ceph_decode_32(p); 531 dout("crush decode tunable chooseleaf_descend_once = %d\n", 532 c->chooseleaf_descend_once); 533 534 ceph_decode_need(p, end, sizeof(u8), done); 535 c->chooseleaf_vary_r = ceph_decode_8(p); 536 dout("crush decode tunable chooseleaf_vary_r = %d\n", 537 c->chooseleaf_vary_r); 538 539 /* skip straw_calc_version, allowed_bucket_algs */ 540 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 541 *p += sizeof(u8) + sizeof(u32); 542 543 ceph_decode_need(p, end, sizeof(u8), done); 544 c->chooseleaf_stable = ceph_decode_8(p); 545 dout("crush decode tunable chooseleaf_stable = %d\n", 546 c->chooseleaf_stable); 547 548 if (*p != end) { 549 /* class_map */ 550 ceph_decode_skip_map(p, end, 32, 32, bad); 551 /* class_name */ 552 ceph_decode_skip_map(p, end, 32, string, bad); 553 /* class_bucket */ 554 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); 555 } 556 557 if (*p != end) { 558 err = decode_choose_args(p, end, c); 559 if (err) 560 goto fail; 561 } 562 563 done: 564 crush_finalize(c); 565 dout("crush_decode success\n"); 566 return c; 567 568 badmem: 569 err = -ENOMEM; 570 fail: 571 dout("crush_decode fail %d\n", err); 572 crush_destroy(c); 573 return ERR_PTR(err); 574 575 bad: 576 err = -EINVAL; 577 goto fail; 578 } 579 580 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 581 { 582 if (lhs->pool < rhs->pool) 583 return -1; 584 if (lhs->pool > rhs->pool) 585 return 1; 586 if (lhs->seed < rhs->seed) 587 return -1; 588 if (lhs->seed > rhs->seed) 589 return 1; 590 591 return 0; 592 } 593 594 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 595 { 596 int ret; 597 598 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 599 if (ret) 600 return ret; 601 602 if (lhs->shard < rhs->shard) 603 return -1; 604 if (lhs->shard > rhs->shard) 605 return 1; 606 607 return 0; 608 } 609 610 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 611 { 612 struct ceph_pg_mapping *pg; 613 614 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 615 if (!pg) 616 return NULL; 617 618 RB_CLEAR_NODE(&pg->node); 619 return pg; 620 } 621 622 static void free_pg_mapping(struct ceph_pg_mapping *pg) 623 { 624 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 625 626 kfree(pg); 627 } 628 629 /* 630 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 631 * to a set of osds) and primary_temp (explicit primary setting) 632 */ 633 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 634 RB_BYPTR, const struct ceph_pg *, node) 635 636 /* 637 * rbtree of pg pool info 638 */ 639 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 640 { 641 struct rb_node **p = &root->rb_node; 642 struct rb_node *parent = NULL; 643 struct ceph_pg_pool_info *pi = NULL; 644 645 while (*p) { 646 parent = *p; 647 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 648 if (new->id < pi->id) 649 p = &(*p)->rb_left; 650 else if (new->id > pi->id) 651 p = &(*p)->rb_right; 652 else 653 return -EEXIST; 654 } 655 656 rb_link_node(&new->node, parent, p); 657 rb_insert_color(&new->node, root); 658 return 0; 659 } 660 661 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 662 { 663 struct ceph_pg_pool_info *pi; 664 struct rb_node *n = root->rb_node; 665 666 while (n) { 667 pi = rb_entry(n, struct ceph_pg_pool_info, node); 668 if (id < pi->id) 669 n = n->rb_left; 670 else if (id > pi->id) 671 n = n->rb_right; 672 else 673 return pi; 674 } 675 return NULL; 676 } 677 678 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 679 { 680 return __lookup_pg_pool(&map->pg_pools, id); 681 } 682 683 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 684 { 685 struct ceph_pg_pool_info *pi; 686 687 if (id == CEPH_NOPOOL) 688 return NULL; 689 690 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 691 return NULL; 692 693 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 694 695 return pi ? pi->name : NULL; 696 } 697 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 698 699 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 700 { 701 struct rb_node *rbp; 702 703 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 704 struct ceph_pg_pool_info *pi = 705 rb_entry(rbp, struct ceph_pg_pool_info, node); 706 if (pi->name && strcmp(pi->name, name) == 0) 707 return pi->id; 708 } 709 return -ENOENT; 710 } 711 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 712 713 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 714 { 715 rb_erase(&pi->node, root); 716 kfree(pi->name); 717 kfree(pi); 718 } 719 720 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 721 { 722 u8 ev, cv; 723 unsigned len, num; 724 void *pool_end; 725 726 ceph_decode_need(p, end, 2 + 4, bad); 727 ev = ceph_decode_8(p); /* encoding version */ 728 cv = ceph_decode_8(p); /* compat version */ 729 if (ev < 5) { 730 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 731 return -EINVAL; 732 } 733 if (cv > 9) { 734 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 735 return -EINVAL; 736 } 737 len = ceph_decode_32(p); 738 ceph_decode_need(p, end, len, bad); 739 pool_end = *p + len; 740 741 pi->type = ceph_decode_8(p); 742 pi->size = ceph_decode_8(p); 743 pi->crush_ruleset = ceph_decode_8(p); 744 pi->object_hash = ceph_decode_8(p); 745 746 pi->pg_num = ceph_decode_32(p); 747 pi->pgp_num = ceph_decode_32(p); 748 749 *p += 4 + 4; /* skip lpg* */ 750 *p += 4; /* skip last_change */ 751 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 752 753 /* skip snaps */ 754 num = ceph_decode_32(p); 755 while (num--) { 756 *p += 8; /* snapid key */ 757 *p += 1 + 1; /* versions */ 758 len = ceph_decode_32(p); 759 *p += len; 760 } 761 762 /* skip removed_snaps */ 763 num = ceph_decode_32(p); 764 *p += num * (8 + 8); 765 766 *p += 8; /* skip auid */ 767 pi->flags = ceph_decode_64(p); 768 *p += 4; /* skip crash_replay_interval */ 769 770 if (ev >= 7) 771 pi->min_size = ceph_decode_8(p); 772 else 773 pi->min_size = pi->size - pi->size / 2; 774 775 if (ev >= 8) 776 *p += 8 + 8; /* skip quota_max_* */ 777 778 if (ev >= 9) { 779 /* skip tiers */ 780 num = ceph_decode_32(p); 781 *p += num * 8; 782 783 *p += 8; /* skip tier_of */ 784 *p += 1; /* skip cache_mode */ 785 786 pi->read_tier = ceph_decode_64(p); 787 pi->write_tier = ceph_decode_64(p); 788 } else { 789 pi->read_tier = -1; 790 pi->write_tier = -1; 791 } 792 793 if (ev >= 10) { 794 /* skip properties */ 795 num = ceph_decode_32(p); 796 while (num--) { 797 len = ceph_decode_32(p); 798 *p += len; /* key */ 799 len = ceph_decode_32(p); 800 *p += len; /* val */ 801 } 802 } 803 804 if (ev >= 11) { 805 /* skip hit_set_params */ 806 *p += 1 + 1; /* versions */ 807 len = ceph_decode_32(p); 808 *p += len; 809 810 *p += 4; /* skip hit_set_period */ 811 *p += 4; /* skip hit_set_count */ 812 } 813 814 if (ev >= 12) 815 *p += 4; /* skip stripe_width */ 816 817 if (ev >= 13) { 818 *p += 8; /* skip target_max_bytes */ 819 *p += 8; /* skip target_max_objects */ 820 *p += 4; /* skip cache_target_dirty_ratio_micro */ 821 *p += 4; /* skip cache_target_full_ratio_micro */ 822 *p += 4; /* skip cache_min_flush_age */ 823 *p += 4; /* skip cache_min_evict_age */ 824 } 825 826 if (ev >= 14) { 827 /* skip erasure_code_profile */ 828 len = ceph_decode_32(p); 829 *p += len; 830 } 831 832 /* 833 * last_force_op_resend_preluminous, will be overridden if the 834 * map was encoded with RESEND_ON_SPLIT 835 */ 836 if (ev >= 15) 837 pi->last_force_request_resend = ceph_decode_32(p); 838 else 839 pi->last_force_request_resend = 0; 840 841 if (ev >= 16) 842 *p += 4; /* skip min_read_recency_for_promote */ 843 844 if (ev >= 17) 845 *p += 8; /* skip expected_num_objects */ 846 847 if (ev >= 19) 848 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 849 850 if (ev >= 20) 851 *p += 4; /* skip min_write_recency_for_promote */ 852 853 if (ev >= 21) 854 *p += 1; /* skip use_gmt_hitset */ 855 856 if (ev >= 22) 857 *p += 1; /* skip fast_read */ 858 859 if (ev >= 23) { 860 *p += 4; /* skip hit_set_grade_decay_rate */ 861 *p += 4; /* skip hit_set_search_last_n */ 862 } 863 864 if (ev >= 24) { 865 /* skip opts */ 866 *p += 1 + 1; /* versions */ 867 len = ceph_decode_32(p); 868 *p += len; 869 } 870 871 if (ev >= 25) 872 pi->last_force_request_resend = ceph_decode_32(p); 873 874 /* ignore the rest */ 875 876 *p = pool_end; 877 calc_pg_masks(pi); 878 return 0; 879 880 bad: 881 return -EINVAL; 882 } 883 884 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 885 { 886 struct ceph_pg_pool_info *pi; 887 u32 num, len; 888 u64 pool; 889 890 ceph_decode_32_safe(p, end, num, bad); 891 dout(" %d pool names\n", num); 892 while (num--) { 893 ceph_decode_64_safe(p, end, pool, bad); 894 ceph_decode_32_safe(p, end, len, bad); 895 dout(" pool %llu len %d\n", pool, len); 896 ceph_decode_need(p, end, len, bad); 897 pi = __lookup_pg_pool(&map->pg_pools, pool); 898 if (pi) { 899 char *name = kstrndup(*p, len, GFP_NOFS); 900 901 if (!name) 902 return -ENOMEM; 903 kfree(pi->name); 904 pi->name = name; 905 dout(" name is %s\n", pi->name); 906 } 907 *p += len; 908 } 909 return 0; 910 911 bad: 912 return -EINVAL; 913 } 914 915 /* 916 * osd map 917 */ 918 struct ceph_osdmap *ceph_osdmap_alloc(void) 919 { 920 struct ceph_osdmap *map; 921 922 map = kzalloc(sizeof(*map), GFP_NOIO); 923 if (!map) 924 return NULL; 925 926 map->pg_pools = RB_ROOT; 927 map->pool_max = -1; 928 map->pg_temp = RB_ROOT; 929 map->primary_temp = RB_ROOT; 930 map->pg_upmap = RB_ROOT; 931 map->pg_upmap_items = RB_ROOT; 932 mutex_init(&map->crush_workspace_mutex); 933 934 return map; 935 } 936 937 void ceph_osdmap_destroy(struct ceph_osdmap *map) 938 { 939 dout("osdmap_destroy %p\n", map); 940 if (map->crush) 941 crush_destroy(map->crush); 942 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 943 struct ceph_pg_mapping *pg = 944 rb_entry(rb_first(&map->pg_temp), 945 struct ceph_pg_mapping, node); 946 erase_pg_mapping(&map->pg_temp, pg); 947 free_pg_mapping(pg); 948 } 949 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 950 struct ceph_pg_mapping *pg = 951 rb_entry(rb_first(&map->primary_temp), 952 struct ceph_pg_mapping, node); 953 erase_pg_mapping(&map->primary_temp, pg); 954 free_pg_mapping(pg); 955 } 956 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 957 struct ceph_pg_mapping *pg = 958 rb_entry(rb_first(&map->pg_upmap), 959 struct ceph_pg_mapping, node); 960 rb_erase(&pg->node, &map->pg_upmap); 961 kfree(pg); 962 } 963 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 964 struct ceph_pg_mapping *pg = 965 rb_entry(rb_first(&map->pg_upmap_items), 966 struct ceph_pg_mapping, node); 967 rb_erase(&pg->node, &map->pg_upmap_items); 968 kfree(pg); 969 } 970 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 971 struct ceph_pg_pool_info *pi = 972 rb_entry(rb_first(&map->pg_pools), 973 struct ceph_pg_pool_info, node); 974 __remove_pg_pool(&map->pg_pools, pi); 975 } 976 kvfree(map->osd_state); 977 kvfree(map->osd_weight); 978 kvfree(map->osd_addr); 979 kvfree(map->osd_primary_affinity); 980 kvfree(map->crush_workspace); 981 kfree(map); 982 } 983 984 /* 985 * Adjust max_osd value, (re)allocate arrays. 986 * 987 * The new elements are properly initialized. 988 */ 989 static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) 990 { 991 u32 *state; 992 u32 *weight; 993 struct ceph_entity_addr *addr; 994 u32 to_copy; 995 int i; 996 997 dout("%s old %u new %u\n", __func__, map->max_osd, max); 998 if (max == map->max_osd) 999 return 0; 1000 1001 state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); 1002 weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); 1003 addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); 1004 if (!state || !weight || !addr) { 1005 kvfree(state); 1006 kvfree(weight); 1007 kvfree(addr); 1008 return -ENOMEM; 1009 } 1010 1011 to_copy = min(map->max_osd, max); 1012 if (map->osd_state) { 1013 memcpy(state, map->osd_state, to_copy * sizeof(*state)); 1014 memcpy(weight, map->osd_weight, to_copy * sizeof(*weight)); 1015 memcpy(addr, map->osd_addr, to_copy * sizeof(*addr)); 1016 kvfree(map->osd_state); 1017 kvfree(map->osd_weight); 1018 kvfree(map->osd_addr); 1019 } 1020 1021 map->osd_state = state; 1022 map->osd_weight = weight; 1023 map->osd_addr = addr; 1024 for (i = map->max_osd; i < max; i++) { 1025 map->osd_state[i] = 0; 1026 map->osd_weight[i] = CEPH_OSD_OUT; 1027 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 1028 } 1029 1030 if (map->osd_primary_affinity) { 1031 u32 *affinity; 1032 1033 affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)), 1034 GFP_NOFS); 1035 if (!affinity) 1036 return -ENOMEM; 1037 1038 memcpy(affinity, map->osd_primary_affinity, 1039 to_copy * sizeof(*affinity)); 1040 kvfree(map->osd_primary_affinity); 1041 1042 map->osd_primary_affinity = affinity; 1043 for (i = map->max_osd; i < max; i++) 1044 map->osd_primary_affinity[i] = 1045 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1046 } 1047 1048 map->max_osd = max; 1049 1050 return 0; 1051 } 1052 1053 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 1054 { 1055 void *workspace; 1056 size_t work_size; 1057 1058 if (IS_ERR(crush)) 1059 return PTR_ERR(crush); 1060 1061 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 1062 dout("%s work_size %zu bytes\n", __func__, work_size); 1063 workspace = ceph_kvmalloc(work_size, GFP_NOIO); 1064 if (!workspace) { 1065 crush_destroy(crush); 1066 return -ENOMEM; 1067 } 1068 crush_init_workspace(crush, workspace); 1069 1070 if (map->crush) 1071 crush_destroy(map->crush); 1072 kvfree(map->crush_workspace); 1073 map->crush = crush; 1074 map->crush_workspace = workspace; 1075 return 0; 1076 } 1077 1078 #define OSDMAP_WRAPPER_COMPAT_VER 7 1079 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 1080 1081 /* 1082 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 1083 * to struct_v of the client_data section for new (v7 and above) 1084 * osdmaps. 1085 */ 1086 static int get_osdmap_client_data_v(void **p, void *end, 1087 const char *prefix, u8 *v) 1088 { 1089 u8 struct_v; 1090 1091 ceph_decode_8_safe(p, end, struct_v, e_inval); 1092 if (struct_v >= 7) { 1093 u8 struct_compat; 1094 1095 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1096 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 1097 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 1098 struct_v, struct_compat, 1099 OSDMAP_WRAPPER_COMPAT_VER, prefix); 1100 return -EINVAL; 1101 } 1102 *p += 4; /* ignore wrapper struct_len */ 1103 1104 ceph_decode_8_safe(p, end, struct_v, e_inval); 1105 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1106 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 1107 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 1108 struct_v, struct_compat, 1109 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 1110 return -EINVAL; 1111 } 1112 *p += 4; /* ignore client data struct_len */ 1113 } else { 1114 u16 version; 1115 1116 *p -= 1; 1117 ceph_decode_16_safe(p, end, version, e_inval); 1118 if (version < 6) { 1119 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 1120 version, prefix); 1121 return -EINVAL; 1122 } 1123 1124 /* old osdmap enconding */ 1125 struct_v = 0; 1126 } 1127 1128 *v = struct_v; 1129 return 0; 1130 1131 e_inval: 1132 return -EINVAL; 1133 } 1134 1135 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 1136 bool incremental) 1137 { 1138 u32 n; 1139 1140 ceph_decode_32_safe(p, end, n, e_inval); 1141 while (n--) { 1142 struct ceph_pg_pool_info *pi; 1143 u64 pool; 1144 int ret; 1145 1146 ceph_decode_64_safe(p, end, pool, e_inval); 1147 1148 pi = __lookup_pg_pool(&map->pg_pools, pool); 1149 if (!incremental || !pi) { 1150 pi = kzalloc(sizeof(*pi), GFP_NOFS); 1151 if (!pi) 1152 return -ENOMEM; 1153 1154 pi->id = pool; 1155 1156 ret = __insert_pg_pool(&map->pg_pools, pi); 1157 if (ret) { 1158 kfree(pi); 1159 return ret; 1160 } 1161 } 1162 1163 ret = decode_pool(p, end, pi); 1164 if (ret) 1165 return ret; 1166 } 1167 1168 return 0; 1169 1170 e_inval: 1171 return -EINVAL; 1172 } 1173 1174 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 1175 { 1176 return __decode_pools(p, end, map, false); 1177 } 1178 1179 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 1180 { 1181 return __decode_pools(p, end, map, true); 1182 } 1183 1184 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 1185 1186 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 1187 decode_mapping_fn_t fn, bool incremental) 1188 { 1189 u32 n; 1190 1191 WARN_ON(!incremental && !fn); 1192 1193 ceph_decode_32_safe(p, end, n, e_inval); 1194 while (n--) { 1195 struct ceph_pg_mapping *pg; 1196 struct ceph_pg pgid; 1197 int ret; 1198 1199 ret = ceph_decode_pgid(p, end, &pgid); 1200 if (ret) 1201 return ret; 1202 1203 pg = lookup_pg_mapping(mapping_root, &pgid); 1204 if (pg) { 1205 WARN_ON(!incremental); 1206 erase_pg_mapping(mapping_root, pg); 1207 free_pg_mapping(pg); 1208 } 1209 1210 if (fn) { 1211 pg = fn(p, end, incremental); 1212 if (IS_ERR(pg)) 1213 return PTR_ERR(pg); 1214 1215 if (pg) { 1216 pg->pgid = pgid; /* struct */ 1217 insert_pg_mapping(mapping_root, pg); 1218 } 1219 } 1220 } 1221 1222 return 0; 1223 1224 e_inval: 1225 return -EINVAL; 1226 } 1227 1228 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1229 bool incremental) 1230 { 1231 struct ceph_pg_mapping *pg; 1232 u32 len, i; 1233 1234 ceph_decode_32_safe(p, end, len, e_inval); 1235 if (len == 0 && incremental) 1236 return NULL; /* new_pg_temp: [] to remove */ 1237 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1238 return ERR_PTR(-EINVAL); 1239 1240 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1241 pg = alloc_pg_mapping(len * sizeof(u32)); 1242 if (!pg) 1243 return ERR_PTR(-ENOMEM); 1244 1245 pg->pg_temp.len = len; 1246 for (i = 0; i < len; i++) 1247 pg->pg_temp.osds[i] = ceph_decode_32(p); 1248 1249 return pg; 1250 1251 e_inval: 1252 return ERR_PTR(-EINVAL); 1253 } 1254 1255 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1256 { 1257 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1258 false); 1259 } 1260 1261 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1262 { 1263 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1264 true); 1265 } 1266 1267 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1268 bool incremental) 1269 { 1270 struct ceph_pg_mapping *pg; 1271 u32 osd; 1272 1273 ceph_decode_32_safe(p, end, osd, e_inval); 1274 if (osd == (u32)-1 && incremental) 1275 return NULL; /* new_primary_temp: -1 to remove */ 1276 1277 pg = alloc_pg_mapping(0); 1278 if (!pg) 1279 return ERR_PTR(-ENOMEM); 1280 1281 pg->primary_temp.osd = osd; 1282 return pg; 1283 1284 e_inval: 1285 return ERR_PTR(-EINVAL); 1286 } 1287 1288 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1289 { 1290 return decode_pg_mapping(p, end, &map->primary_temp, 1291 __decode_primary_temp, false); 1292 } 1293 1294 static int decode_new_primary_temp(void **p, void *end, 1295 struct ceph_osdmap *map) 1296 { 1297 return decode_pg_mapping(p, end, &map->primary_temp, 1298 __decode_primary_temp, true); 1299 } 1300 1301 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1302 { 1303 BUG_ON(osd >= map->max_osd); 1304 1305 if (!map->osd_primary_affinity) 1306 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1307 1308 return map->osd_primary_affinity[osd]; 1309 } 1310 1311 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1312 { 1313 BUG_ON(osd >= map->max_osd); 1314 1315 if (!map->osd_primary_affinity) { 1316 int i; 1317 1318 map->osd_primary_affinity = ceph_kvmalloc( 1319 array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), 1320 GFP_NOFS); 1321 if (!map->osd_primary_affinity) 1322 return -ENOMEM; 1323 1324 for (i = 0; i < map->max_osd; i++) 1325 map->osd_primary_affinity[i] = 1326 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1327 } 1328 1329 map->osd_primary_affinity[osd] = aff; 1330 1331 return 0; 1332 } 1333 1334 static int decode_primary_affinity(void **p, void *end, 1335 struct ceph_osdmap *map) 1336 { 1337 u32 len, i; 1338 1339 ceph_decode_32_safe(p, end, len, e_inval); 1340 if (len == 0) { 1341 kvfree(map->osd_primary_affinity); 1342 map->osd_primary_affinity = NULL; 1343 return 0; 1344 } 1345 if (len != map->max_osd) 1346 goto e_inval; 1347 1348 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1349 1350 for (i = 0; i < map->max_osd; i++) { 1351 int ret; 1352 1353 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1354 if (ret) 1355 return ret; 1356 } 1357 1358 return 0; 1359 1360 e_inval: 1361 return -EINVAL; 1362 } 1363 1364 static int decode_new_primary_affinity(void **p, void *end, 1365 struct ceph_osdmap *map) 1366 { 1367 u32 n; 1368 1369 ceph_decode_32_safe(p, end, n, e_inval); 1370 while (n--) { 1371 u32 osd, aff; 1372 int ret; 1373 1374 ceph_decode_32_safe(p, end, osd, e_inval); 1375 ceph_decode_32_safe(p, end, aff, e_inval); 1376 1377 ret = set_primary_affinity(map, osd, aff); 1378 if (ret) 1379 return ret; 1380 1381 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1382 } 1383 1384 return 0; 1385 1386 e_inval: 1387 return -EINVAL; 1388 } 1389 1390 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1391 bool __unused) 1392 { 1393 return __decode_pg_temp(p, end, false); 1394 } 1395 1396 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1397 { 1398 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1399 false); 1400 } 1401 1402 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1403 { 1404 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1405 true); 1406 } 1407 1408 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1409 { 1410 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1411 } 1412 1413 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1414 bool __unused) 1415 { 1416 struct ceph_pg_mapping *pg; 1417 u32 len, i; 1418 1419 ceph_decode_32_safe(p, end, len, e_inval); 1420 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1421 return ERR_PTR(-EINVAL); 1422 1423 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1424 pg = alloc_pg_mapping(2 * len * sizeof(u32)); 1425 if (!pg) 1426 return ERR_PTR(-ENOMEM); 1427 1428 pg->pg_upmap_items.len = len; 1429 for (i = 0; i < len; i++) { 1430 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1431 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1432 } 1433 1434 return pg; 1435 1436 e_inval: 1437 return ERR_PTR(-EINVAL); 1438 } 1439 1440 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1441 { 1442 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1443 __decode_pg_upmap_items, false); 1444 } 1445 1446 static int decode_new_pg_upmap_items(void **p, void *end, 1447 struct ceph_osdmap *map) 1448 { 1449 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1450 __decode_pg_upmap_items, true); 1451 } 1452 1453 static int decode_old_pg_upmap_items(void **p, void *end, 1454 struct ceph_osdmap *map) 1455 { 1456 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1457 } 1458 1459 /* 1460 * decode a full map. 1461 */ 1462 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1463 { 1464 u8 struct_v; 1465 u32 epoch = 0; 1466 void *start = *p; 1467 u32 max; 1468 u32 len, i; 1469 int err; 1470 1471 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1472 1473 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1474 if (err) 1475 goto bad; 1476 1477 /* fsid, epoch, created, modified */ 1478 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1479 sizeof(map->created) + sizeof(map->modified), e_inval); 1480 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1481 epoch = map->epoch = ceph_decode_32(p); 1482 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1483 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1484 1485 /* pools */ 1486 err = decode_pools(p, end, map); 1487 if (err) 1488 goto bad; 1489 1490 /* pool_name */ 1491 err = decode_pool_names(p, end, map); 1492 if (err) 1493 goto bad; 1494 1495 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1496 1497 ceph_decode_32_safe(p, end, map->flags, e_inval); 1498 1499 /* max_osd */ 1500 ceph_decode_32_safe(p, end, max, e_inval); 1501 1502 /* (re)alloc osd arrays */ 1503 err = osdmap_set_max_osd(map, max); 1504 if (err) 1505 goto bad; 1506 1507 /* osd_state, osd_weight, osd_addrs->client_addr */ 1508 ceph_decode_need(p, end, 3*sizeof(u32) + 1509 map->max_osd*(struct_v >= 5 ? sizeof(u32) : 1510 sizeof(u8)) + 1511 sizeof(*map->osd_weight), e_inval); 1512 if (ceph_decode_32(p) != map->max_osd) 1513 goto e_inval; 1514 1515 if (struct_v >= 5) { 1516 for (i = 0; i < map->max_osd; i++) 1517 map->osd_state[i] = ceph_decode_32(p); 1518 } else { 1519 for (i = 0; i < map->max_osd; i++) 1520 map->osd_state[i] = ceph_decode_8(p); 1521 } 1522 1523 if (ceph_decode_32(p) != map->max_osd) 1524 goto e_inval; 1525 1526 for (i = 0; i < map->max_osd; i++) 1527 map->osd_weight[i] = ceph_decode_32(p); 1528 1529 if (ceph_decode_32(p) != map->max_osd) 1530 goto e_inval; 1531 1532 for (i = 0; i < map->max_osd; i++) { 1533 err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]); 1534 if (err) 1535 goto bad; 1536 } 1537 1538 /* pg_temp */ 1539 err = decode_pg_temp(p, end, map); 1540 if (err) 1541 goto bad; 1542 1543 /* primary_temp */ 1544 if (struct_v >= 1) { 1545 err = decode_primary_temp(p, end, map); 1546 if (err) 1547 goto bad; 1548 } 1549 1550 /* primary_affinity */ 1551 if (struct_v >= 2) { 1552 err = decode_primary_affinity(p, end, map); 1553 if (err) 1554 goto bad; 1555 } else { 1556 WARN_ON(map->osd_primary_affinity); 1557 } 1558 1559 /* crush */ 1560 ceph_decode_32_safe(p, end, len, e_inval); 1561 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1562 if (err) 1563 goto bad; 1564 1565 *p += len; 1566 if (struct_v >= 3) { 1567 /* erasure_code_profiles */ 1568 ceph_decode_skip_map_of_map(p, end, string, string, string, 1569 e_inval); 1570 } 1571 1572 if (struct_v >= 4) { 1573 err = decode_pg_upmap(p, end, map); 1574 if (err) 1575 goto bad; 1576 1577 err = decode_pg_upmap_items(p, end, map); 1578 if (err) 1579 goto bad; 1580 } else { 1581 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1582 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1583 } 1584 1585 /* ignore the rest */ 1586 *p = end; 1587 1588 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1589 return 0; 1590 1591 e_inval: 1592 err = -EINVAL; 1593 bad: 1594 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1595 err, epoch, (int)(*p - start), *p, start, end); 1596 print_hex_dump(KERN_DEBUG, "osdmap: ", 1597 DUMP_PREFIX_OFFSET, 16, 1, 1598 start, end - start, true); 1599 return err; 1600 } 1601 1602 /* 1603 * Allocate and decode a full map. 1604 */ 1605 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1606 { 1607 struct ceph_osdmap *map; 1608 int ret; 1609 1610 map = ceph_osdmap_alloc(); 1611 if (!map) 1612 return ERR_PTR(-ENOMEM); 1613 1614 ret = osdmap_decode(p, end, map); 1615 if (ret) { 1616 ceph_osdmap_destroy(map); 1617 return ERR_PTR(ret); 1618 } 1619 1620 return map; 1621 } 1622 1623 /* 1624 * Encoding order is (new_up_client, new_state, new_weight). Need to 1625 * apply in the (new_weight, new_state, new_up_client) order, because 1626 * an incremental map may look like e.g. 1627 * 1628 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1629 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1630 */ 1631 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, 1632 struct ceph_osdmap *map) 1633 { 1634 void *new_up_client; 1635 void *new_state; 1636 void *new_weight_end; 1637 u32 len; 1638 int i; 1639 1640 new_up_client = *p; 1641 ceph_decode_32_safe(p, end, len, e_inval); 1642 for (i = 0; i < len; ++i) { 1643 struct ceph_entity_addr addr; 1644 1645 ceph_decode_skip_32(p, end, e_inval); 1646 if (ceph_decode_entity_addr(p, end, &addr)) 1647 goto e_inval; 1648 } 1649 1650 new_state = *p; 1651 ceph_decode_32_safe(p, end, len, e_inval); 1652 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); 1653 ceph_decode_need(p, end, len, e_inval); 1654 *p += len; 1655 1656 /* new_weight */ 1657 ceph_decode_32_safe(p, end, len, e_inval); 1658 while (len--) { 1659 s32 osd; 1660 u32 w; 1661 1662 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1663 osd = ceph_decode_32(p); 1664 w = ceph_decode_32(p); 1665 BUG_ON(osd >= map->max_osd); 1666 pr_info("osd%d weight 0x%x %s\n", osd, w, 1667 w == CEPH_OSD_IN ? "(in)" : 1668 (w == CEPH_OSD_OUT ? "(out)" : "")); 1669 map->osd_weight[osd] = w; 1670 1671 /* 1672 * If we are marking in, set the EXISTS, and clear the 1673 * AUTOOUT and NEW bits. 1674 */ 1675 if (w) { 1676 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1677 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1678 CEPH_OSD_NEW); 1679 } 1680 } 1681 new_weight_end = *p; 1682 1683 /* new_state (up/down) */ 1684 *p = new_state; 1685 len = ceph_decode_32(p); 1686 while (len--) { 1687 s32 osd; 1688 u32 xorstate; 1689 int ret; 1690 1691 osd = ceph_decode_32(p); 1692 if (struct_v >= 5) 1693 xorstate = ceph_decode_32(p); 1694 else 1695 xorstate = ceph_decode_8(p); 1696 if (xorstate == 0) 1697 xorstate = CEPH_OSD_UP; 1698 BUG_ON(osd >= map->max_osd); 1699 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1700 (xorstate & CEPH_OSD_UP)) 1701 pr_info("osd%d down\n", osd); 1702 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1703 (xorstate & CEPH_OSD_EXISTS)) { 1704 pr_info("osd%d does not exist\n", osd); 1705 ret = set_primary_affinity(map, osd, 1706 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1707 if (ret) 1708 return ret; 1709 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1710 map->osd_state[osd] = 0; 1711 } else { 1712 map->osd_state[osd] ^= xorstate; 1713 } 1714 } 1715 1716 /* new_up_client */ 1717 *p = new_up_client; 1718 len = ceph_decode_32(p); 1719 while (len--) { 1720 s32 osd; 1721 struct ceph_entity_addr addr; 1722 1723 osd = ceph_decode_32(p); 1724 BUG_ON(osd >= map->max_osd); 1725 if (ceph_decode_entity_addr(p, end, &addr)) 1726 goto e_inval; 1727 pr_info("osd%d up\n", osd); 1728 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1729 map->osd_addr[osd] = addr; 1730 } 1731 1732 *p = new_weight_end; 1733 return 0; 1734 1735 e_inval: 1736 return -EINVAL; 1737 } 1738 1739 /* 1740 * decode and apply an incremental map update. 1741 */ 1742 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1743 struct ceph_osdmap *map) 1744 { 1745 struct ceph_fsid fsid; 1746 u32 epoch = 0; 1747 struct ceph_timespec modified; 1748 s32 len; 1749 u64 pool; 1750 __s64 new_pool_max; 1751 __s32 new_flags, max; 1752 void *start = *p; 1753 int err; 1754 u8 struct_v; 1755 1756 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1757 1758 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1759 if (err) 1760 goto bad; 1761 1762 /* fsid, epoch, modified, new_pool_max, new_flags */ 1763 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1764 sizeof(u64) + sizeof(u32), e_inval); 1765 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1766 epoch = ceph_decode_32(p); 1767 BUG_ON(epoch != map->epoch+1); 1768 ceph_decode_copy(p, &modified, sizeof(modified)); 1769 new_pool_max = ceph_decode_64(p); 1770 new_flags = ceph_decode_32(p); 1771 1772 /* full map? */ 1773 ceph_decode_32_safe(p, end, len, e_inval); 1774 if (len > 0) { 1775 dout("apply_incremental full map len %d, %p to %p\n", 1776 len, *p, end); 1777 return ceph_osdmap_decode(p, min(*p+len, end)); 1778 } 1779 1780 /* new crush? */ 1781 ceph_decode_32_safe(p, end, len, e_inval); 1782 if (len > 0) { 1783 err = osdmap_set_crush(map, 1784 crush_decode(*p, min(*p + len, end))); 1785 if (err) 1786 goto bad; 1787 *p += len; 1788 } 1789 1790 /* new flags? */ 1791 if (new_flags >= 0) 1792 map->flags = new_flags; 1793 if (new_pool_max >= 0) 1794 map->pool_max = new_pool_max; 1795 1796 /* new max? */ 1797 ceph_decode_32_safe(p, end, max, e_inval); 1798 if (max >= 0) { 1799 err = osdmap_set_max_osd(map, max); 1800 if (err) 1801 goto bad; 1802 } 1803 1804 map->epoch++; 1805 map->modified = modified; 1806 1807 /* new_pools */ 1808 err = decode_new_pools(p, end, map); 1809 if (err) 1810 goto bad; 1811 1812 /* new_pool_names */ 1813 err = decode_pool_names(p, end, map); 1814 if (err) 1815 goto bad; 1816 1817 /* old_pool */ 1818 ceph_decode_32_safe(p, end, len, e_inval); 1819 while (len--) { 1820 struct ceph_pg_pool_info *pi; 1821 1822 ceph_decode_64_safe(p, end, pool, e_inval); 1823 pi = __lookup_pg_pool(&map->pg_pools, pool); 1824 if (pi) 1825 __remove_pg_pool(&map->pg_pools, pi); 1826 } 1827 1828 /* new_up_client, new_state, new_weight */ 1829 err = decode_new_up_state_weight(p, end, struct_v, map); 1830 if (err) 1831 goto bad; 1832 1833 /* new_pg_temp */ 1834 err = decode_new_pg_temp(p, end, map); 1835 if (err) 1836 goto bad; 1837 1838 /* new_primary_temp */ 1839 if (struct_v >= 1) { 1840 err = decode_new_primary_temp(p, end, map); 1841 if (err) 1842 goto bad; 1843 } 1844 1845 /* new_primary_affinity */ 1846 if (struct_v >= 2) { 1847 err = decode_new_primary_affinity(p, end, map); 1848 if (err) 1849 goto bad; 1850 } 1851 1852 if (struct_v >= 3) { 1853 /* new_erasure_code_profiles */ 1854 ceph_decode_skip_map_of_map(p, end, string, string, string, 1855 e_inval); 1856 /* old_erasure_code_profiles */ 1857 ceph_decode_skip_set(p, end, string, e_inval); 1858 } 1859 1860 if (struct_v >= 4) { 1861 err = decode_new_pg_upmap(p, end, map); 1862 if (err) 1863 goto bad; 1864 1865 err = decode_old_pg_upmap(p, end, map); 1866 if (err) 1867 goto bad; 1868 1869 err = decode_new_pg_upmap_items(p, end, map); 1870 if (err) 1871 goto bad; 1872 1873 err = decode_old_pg_upmap_items(p, end, map); 1874 if (err) 1875 goto bad; 1876 } 1877 1878 /* ignore the rest */ 1879 *p = end; 1880 1881 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1882 return map; 1883 1884 e_inval: 1885 err = -EINVAL; 1886 bad: 1887 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1888 err, epoch, (int)(*p - start), *p, start, end); 1889 print_hex_dump(KERN_DEBUG, "osdmap: ", 1890 DUMP_PREFIX_OFFSET, 16, 1, 1891 start, end - start, true); 1892 return ERR_PTR(err); 1893 } 1894 1895 void ceph_oloc_copy(struct ceph_object_locator *dest, 1896 const struct ceph_object_locator *src) 1897 { 1898 ceph_oloc_destroy(dest); 1899 1900 dest->pool = src->pool; 1901 if (src->pool_ns) 1902 dest->pool_ns = ceph_get_string(src->pool_ns); 1903 else 1904 dest->pool_ns = NULL; 1905 } 1906 EXPORT_SYMBOL(ceph_oloc_copy); 1907 1908 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1909 { 1910 ceph_put_string(oloc->pool_ns); 1911 } 1912 EXPORT_SYMBOL(ceph_oloc_destroy); 1913 1914 void ceph_oid_copy(struct ceph_object_id *dest, 1915 const struct ceph_object_id *src) 1916 { 1917 ceph_oid_destroy(dest); 1918 1919 if (src->name != src->inline_name) { 1920 /* very rare, see ceph_object_id definition */ 1921 dest->name = kmalloc(src->name_len + 1, 1922 GFP_NOIO | __GFP_NOFAIL); 1923 } else { 1924 dest->name = dest->inline_name; 1925 } 1926 memcpy(dest->name, src->name, src->name_len + 1); 1927 dest->name_len = src->name_len; 1928 } 1929 EXPORT_SYMBOL(ceph_oid_copy); 1930 1931 static __printf(2, 0) 1932 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1933 { 1934 int len; 1935 1936 WARN_ON(!ceph_oid_empty(oid)); 1937 1938 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1939 if (len >= sizeof(oid->inline_name)) 1940 return len; 1941 1942 oid->name_len = len; 1943 return 0; 1944 } 1945 1946 /* 1947 * If oid doesn't fit into inline buffer, BUG. 1948 */ 1949 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1950 { 1951 va_list ap; 1952 1953 va_start(ap, fmt); 1954 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1955 va_end(ap); 1956 } 1957 EXPORT_SYMBOL(ceph_oid_printf); 1958 1959 static __printf(3, 0) 1960 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1961 const char *fmt, va_list ap) 1962 { 1963 va_list aq; 1964 int len; 1965 1966 va_copy(aq, ap); 1967 len = oid_printf_vargs(oid, fmt, aq); 1968 va_end(aq); 1969 1970 if (len) { 1971 char *external_name; 1972 1973 external_name = kmalloc(len + 1, gfp); 1974 if (!external_name) 1975 return -ENOMEM; 1976 1977 oid->name = external_name; 1978 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1979 oid->name_len = len; 1980 } 1981 1982 return 0; 1983 } 1984 1985 /* 1986 * If oid doesn't fit into inline buffer, allocate. 1987 */ 1988 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1989 const char *fmt, ...) 1990 { 1991 va_list ap; 1992 int ret; 1993 1994 va_start(ap, fmt); 1995 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1996 va_end(ap); 1997 1998 return ret; 1999 } 2000 EXPORT_SYMBOL(ceph_oid_aprintf); 2001 2002 void ceph_oid_destroy(struct ceph_object_id *oid) 2003 { 2004 if (oid->name != oid->inline_name) 2005 kfree(oid->name); 2006 } 2007 EXPORT_SYMBOL(ceph_oid_destroy); 2008 2009 /* 2010 * osds only 2011 */ 2012 static bool __osds_equal(const struct ceph_osds *lhs, 2013 const struct ceph_osds *rhs) 2014 { 2015 if (lhs->size == rhs->size && 2016 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 2017 return true; 2018 2019 return false; 2020 } 2021 2022 /* 2023 * osds + primary 2024 */ 2025 static bool osds_equal(const struct ceph_osds *lhs, 2026 const struct ceph_osds *rhs) 2027 { 2028 if (__osds_equal(lhs, rhs) && 2029 lhs->primary == rhs->primary) 2030 return true; 2031 2032 return false; 2033 } 2034 2035 static bool osds_valid(const struct ceph_osds *set) 2036 { 2037 /* non-empty set */ 2038 if (set->size > 0 && set->primary >= 0) 2039 return true; 2040 2041 /* empty can_shift_osds set */ 2042 if (!set->size && set->primary == -1) 2043 return true; 2044 2045 /* empty !can_shift_osds set - all NONE */ 2046 if (set->size > 0 && set->primary == -1) { 2047 int i; 2048 2049 for (i = 0; i < set->size; i++) { 2050 if (set->osds[i] != CRUSH_ITEM_NONE) 2051 break; 2052 } 2053 if (i == set->size) 2054 return true; 2055 } 2056 2057 return false; 2058 } 2059 2060 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 2061 { 2062 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 2063 dest->size = src->size; 2064 dest->primary = src->primary; 2065 } 2066 2067 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 2068 u32 new_pg_num) 2069 { 2070 int old_bits = calc_bits_of(old_pg_num); 2071 int old_mask = (1 << old_bits) - 1; 2072 int n; 2073 2074 WARN_ON(pgid->seed >= old_pg_num); 2075 if (new_pg_num <= old_pg_num) 2076 return false; 2077 2078 for (n = 1; ; n++) { 2079 int next_bit = n << (old_bits - 1); 2080 u32 s = next_bit | pgid->seed; 2081 2082 if (s < old_pg_num || s == pgid->seed) 2083 continue; 2084 if (s >= new_pg_num) 2085 break; 2086 2087 s = ceph_stable_mod(s, old_pg_num, old_mask); 2088 if (s == pgid->seed) 2089 return true; 2090 } 2091 2092 return false; 2093 } 2094 2095 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 2096 const struct ceph_osds *new_acting, 2097 const struct ceph_osds *old_up, 2098 const struct ceph_osds *new_up, 2099 int old_size, 2100 int new_size, 2101 int old_min_size, 2102 int new_min_size, 2103 u32 old_pg_num, 2104 u32 new_pg_num, 2105 bool old_sort_bitwise, 2106 bool new_sort_bitwise, 2107 bool old_recovery_deletes, 2108 bool new_recovery_deletes, 2109 const struct ceph_pg *pgid) 2110 { 2111 return !osds_equal(old_acting, new_acting) || 2112 !osds_equal(old_up, new_up) || 2113 old_size != new_size || 2114 old_min_size != new_min_size || 2115 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 2116 old_sort_bitwise != new_sort_bitwise || 2117 old_recovery_deletes != new_recovery_deletes; 2118 } 2119 2120 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 2121 { 2122 int i; 2123 2124 for (i = 0; i < acting->size; i++) { 2125 if (acting->osds[i] == osd) 2126 return i; 2127 } 2128 2129 return -1; 2130 } 2131 2132 static bool primary_changed(const struct ceph_osds *old_acting, 2133 const struct ceph_osds *new_acting) 2134 { 2135 if (!old_acting->size && !new_acting->size) 2136 return false; /* both still empty */ 2137 2138 if (!old_acting->size ^ !new_acting->size) 2139 return true; /* was empty, now not, or vice versa */ 2140 2141 if (old_acting->primary != new_acting->primary) 2142 return true; /* primary changed */ 2143 2144 if (calc_pg_rank(old_acting->primary, old_acting) != 2145 calc_pg_rank(new_acting->primary, new_acting)) 2146 return true; 2147 2148 return false; /* same primary (tho replicas may have changed) */ 2149 } 2150 2151 bool ceph_osds_changed(const struct ceph_osds *old_acting, 2152 const struct ceph_osds *new_acting, 2153 bool any_change) 2154 { 2155 if (primary_changed(old_acting, new_acting)) 2156 return true; 2157 2158 if (any_change && !__osds_equal(old_acting, new_acting)) 2159 return true; 2160 2161 return false; 2162 } 2163 2164 /* 2165 * Map an object into a PG. 2166 * 2167 * Should only be called with target_oid and target_oloc (as opposed to 2168 * base_oid and base_oloc), since tiering isn't taken into account. 2169 */ 2170 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2171 const struct ceph_object_id *oid, 2172 const struct ceph_object_locator *oloc, 2173 struct ceph_pg *raw_pgid) 2174 { 2175 WARN_ON(pi->id != oloc->pool); 2176 2177 if (!oloc->pool_ns) { 2178 raw_pgid->pool = oloc->pool; 2179 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2180 oid->name_len); 2181 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2182 raw_pgid->pool, raw_pgid->seed); 2183 } else { 2184 char stack_buf[256]; 2185 char *buf = stack_buf; 2186 int nsl = oloc->pool_ns->len; 2187 size_t total = nsl + 1 + oid->name_len; 2188 2189 if (total > sizeof(stack_buf)) 2190 buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL); 2191 memcpy(buf, oloc->pool_ns->str, nsl); 2192 buf[nsl] = '\037'; 2193 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2194 raw_pgid->pool = oloc->pool; 2195 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2196 if (buf != stack_buf) 2197 kfree(buf); 2198 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2199 oid->name, nsl, oloc->pool_ns->str, 2200 raw_pgid->pool, raw_pgid->seed); 2201 } 2202 } 2203 2204 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2205 const struct ceph_object_id *oid, 2206 const struct ceph_object_locator *oloc, 2207 struct ceph_pg *raw_pgid) 2208 { 2209 struct ceph_pg_pool_info *pi; 2210 2211 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2212 if (!pi) 2213 return -ENOENT; 2214 2215 __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2216 return 0; 2217 } 2218 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2219 2220 /* 2221 * Map a raw PG (full precision ps) into an actual PG. 2222 */ 2223 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2224 const struct ceph_pg *raw_pgid, 2225 struct ceph_pg *pgid) 2226 { 2227 pgid->pool = raw_pgid->pool; 2228 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2229 pi->pg_num_mask); 2230 } 2231 2232 /* 2233 * Map a raw PG (full precision ps) into a placement ps (placement 2234 * seed). Include pool id in that value so that different pools don't 2235 * use the same seeds. 2236 */ 2237 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2238 const struct ceph_pg *raw_pgid) 2239 { 2240 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2241 /* hash pool id and seed so that pool PGs do not overlap */ 2242 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2243 ceph_stable_mod(raw_pgid->seed, 2244 pi->pgp_num, 2245 pi->pgp_num_mask), 2246 raw_pgid->pool); 2247 } else { 2248 /* 2249 * legacy behavior: add ps and pool together. this is 2250 * not a great approach because the PGs from each pool 2251 * will overlap on top of each other: 0.5 == 1.4 == 2252 * 2.3 == ... 2253 */ 2254 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2255 pi->pgp_num_mask) + 2256 (unsigned)raw_pgid->pool; 2257 } 2258 } 2259 2260 /* 2261 * Magic value used for a "default" fallback choose_args, used if the 2262 * crush_choose_arg_map passed to do_crush() does not exist. If this 2263 * also doesn't exist, fall back to canonical weights. 2264 */ 2265 #define CEPH_DEFAULT_CHOOSE_ARGS -1 2266 2267 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2268 int *result, int result_max, 2269 const __u32 *weight, int weight_max, 2270 s64 choose_args_index) 2271 { 2272 struct crush_choose_arg_map *arg_map; 2273 int r; 2274 2275 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2276 2277 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2278 choose_args_index); 2279 if (!arg_map) 2280 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2281 CEPH_DEFAULT_CHOOSE_ARGS); 2282 2283 mutex_lock(&map->crush_workspace_mutex); 2284 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2285 weight, weight_max, map->crush_workspace, 2286 arg_map ? arg_map->args : NULL); 2287 mutex_unlock(&map->crush_workspace_mutex); 2288 2289 return r; 2290 } 2291 2292 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2293 struct ceph_pg_pool_info *pi, 2294 struct ceph_osds *set) 2295 { 2296 int i; 2297 2298 if (ceph_can_shift_osds(pi)) { 2299 int removed = 0; 2300 2301 /* shift left */ 2302 for (i = 0; i < set->size; i++) { 2303 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2304 removed++; 2305 continue; 2306 } 2307 if (removed) 2308 set->osds[i - removed] = set->osds[i]; 2309 } 2310 set->size -= removed; 2311 } else { 2312 /* set dne devices to NONE */ 2313 for (i = 0; i < set->size; i++) { 2314 if (!ceph_osd_exists(osdmap, set->osds[i])) 2315 set->osds[i] = CRUSH_ITEM_NONE; 2316 } 2317 } 2318 } 2319 2320 /* 2321 * Calculate raw set (CRUSH output) for given PG and filter out 2322 * nonexistent OSDs. ->primary is undefined for a raw set. 2323 * 2324 * Placement seed (CRUSH input) is returned through @ppps. 2325 */ 2326 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2327 struct ceph_pg_pool_info *pi, 2328 const struct ceph_pg *raw_pgid, 2329 struct ceph_osds *raw, 2330 u32 *ppps) 2331 { 2332 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2333 int ruleno; 2334 int len; 2335 2336 ceph_osds_init(raw); 2337 if (ppps) 2338 *ppps = pps; 2339 2340 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2341 pi->size); 2342 if (ruleno < 0) { 2343 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2344 pi->id, pi->crush_ruleset, pi->type, pi->size); 2345 return; 2346 } 2347 2348 if (pi->size > ARRAY_SIZE(raw->osds)) { 2349 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2350 pi->id, pi->crush_ruleset, pi->type, pi->size, 2351 ARRAY_SIZE(raw->osds)); 2352 return; 2353 } 2354 2355 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2356 osdmap->osd_weight, osdmap->max_osd, pi->id); 2357 if (len < 0) { 2358 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2359 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2360 pi->size); 2361 return; 2362 } 2363 2364 raw->size = len; 2365 remove_nonexistent_osds(osdmap, pi, raw); 2366 } 2367 2368 /* apply pg_upmap[_items] mappings */ 2369 static void apply_upmap(struct ceph_osdmap *osdmap, 2370 const struct ceph_pg *pgid, 2371 struct ceph_osds *raw) 2372 { 2373 struct ceph_pg_mapping *pg; 2374 int i, j; 2375 2376 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2377 if (pg) { 2378 /* make sure targets aren't marked out */ 2379 for (i = 0; i < pg->pg_upmap.len; i++) { 2380 int osd = pg->pg_upmap.osds[i]; 2381 2382 if (osd != CRUSH_ITEM_NONE && 2383 osd < osdmap->max_osd && 2384 osdmap->osd_weight[osd] == 0) { 2385 /* reject/ignore explicit mapping */ 2386 return; 2387 } 2388 } 2389 for (i = 0; i < pg->pg_upmap.len; i++) 2390 raw->osds[i] = pg->pg_upmap.osds[i]; 2391 raw->size = pg->pg_upmap.len; 2392 /* check and apply pg_upmap_items, if any */ 2393 } 2394 2395 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2396 if (pg) { 2397 /* 2398 * Note: this approach does not allow a bidirectional swap, 2399 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2400 */ 2401 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2402 int from = pg->pg_upmap_items.from_to[i][0]; 2403 int to = pg->pg_upmap_items.from_to[i][1]; 2404 int pos = -1; 2405 bool exists = false; 2406 2407 /* make sure replacement doesn't already appear */ 2408 for (j = 0; j < raw->size; j++) { 2409 int osd = raw->osds[j]; 2410 2411 if (osd == to) { 2412 exists = true; 2413 break; 2414 } 2415 /* ignore mapping if target is marked out */ 2416 if (osd == from && pos < 0 && 2417 !(to != CRUSH_ITEM_NONE && 2418 to < osdmap->max_osd && 2419 osdmap->osd_weight[to] == 0)) { 2420 pos = j; 2421 } 2422 } 2423 if (!exists && pos >= 0) 2424 raw->osds[pos] = to; 2425 } 2426 } 2427 } 2428 2429 /* 2430 * Given raw set, calculate up set and up primary. By definition of an 2431 * up set, the result won't contain nonexistent or down OSDs. 2432 * 2433 * This is done in-place - on return @set is the up set. If it's 2434 * empty, ->primary will remain undefined. 2435 */ 2436 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2437 struct ceph_pg_pool_info *pi, 2438 struct ceph_osds *set) 2439 { 2440 int i; 2441 2442 /* ->primary is undefined for a raw set */ 2443 BUG_ON(set->primary != -1); 2444 2445 if (ceph_can_shift_osds(pi)) { 2446 int removed = 0; 2447 2448 /* shift left */ 2449 for (i = 0; i < set->size; i++) { 2450 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2451 removed++; 2452 continue; 2453 } 2454 if (removed) 2455 set->osds[i - removed] = set->osds[i]; 2456 } 2457 set->size -= removed; 2458 if (set->size > 0) 2459 set->primary = set->osds[0]; 2460 } else { 2461 /* set down/dne devices to NONE */ 2462 for (i = set->size - 1; i >= 0; i--) { 2463 if (ceph_osd_is_down(osdmap, set->osds[i])) 2464 set->osds[i] = CRUSH_ITEM_NONE; 2465 else 2466 set->primary = set->osds[i]; 2467 } 2468 } 2469 } 2470 2471 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2472 struct ceph_pg_pool_info *pi, 2473 u32 pps, 2474 struct ceph_osds *up) 2475 { 2476 int i; 2477 int pos = -1; 2478 2479 /* 2480 * Do we have any non-default primary_affinity values for these 2481 * osds? 2482 */ 2483 if (!osdmap->osd_primary_affinity) 2484 return; 2485 2486 for (i = 0; i < up->size; i++) { 2487 int osd = up->osds[i]; 2488 2489 if (osd != CRUSH_ITEM_NONE && 2490 osdmap->osd_primary_affinity[osd] != 2491 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2492 break; 2493 } 2494 } 2495 if (i == up->size) 2496 return; 2497 2498 /* 2499 * Pick the primary. Feed both the seed (for the pg) and the 2500 * osd into the hash/rng so that a proportional fraction of an 2501 * osd's pgs get rejected as primary. 2502 */ 2503 for (i = 0; i < up->size; i++) { 2504 int osd = up->osds[i]; 2505 u32 aff; 2506 2507 if (osd == CRUSH_ITEM_NONE) 2508 continue; 2509 2510 aff = osdmap->osd_primary_affinity[osd]; 2511 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2512 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2513 pps, osd) >> 16) >= aff) { 2514 /* 2515 * We chose not to use this primary. Note it 2516 * anyway as a fallback in case we don't pick 2517 * anyone else, but keep looking. 2518 */ 2519 if (pos < 0) 2520 pos = i; 2521 } else { 2522 pos = i; 2523 break; 2524 } 2525 } 2526 if (pos < 0) 2527 return; 2528 2529 up->primary = up->osds[pos]; 2530 2531 if (ceph_can_shift_osds(pi) && pos > 0) { 2532 /* move the new primary to the front */ 2533 for (i = pos; i > 0; i--) 2534 up->osds[i] = up->osds[i - 1]; 2535 up->osds[0] = up->primary; 2536 } 2537 } 2538 2539 /* 2540 * Get pg_temp and primary_temp mappings for given PG. 2541 * 2542 * Note that a PG may have none, only pg_temp, only primary_temp or 2543 * both pg_temp and primary_temp mappings. This means @temp isn't 2544 * always a valid OSD set on return: in the "only primary_temp" case, 2545 * @temp will have its ->primary >= 0 but ->size == 0. 2546 */ 2547 static void get_temp_osds(struct ceph_osdmap *osdmap, 2548 struct ceph_pg_pool_info *pi, 2549 const struct ceph_pg *pgid, 2550 struct ceph_osds *temp) 2551 { 2552 struct ceph_pg_mapping *pg; 2553 int i; 2554 2555 ceph_osds_init(temp); 2556 2557 /* pg_temp? */ 2558 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2559 if (pg) { 2560 for (i = 0; i < pg->pg_temp.len; i++) { 2561 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2562 if (ceph_can_shift_osds(pi)) 2563 continue; 2564 2565 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2566 } else { 2567 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2568 } 2569 } 2570 2571 /* apply pg_temp's primary */ 2572 for (i = 0; i < temp->size; i++) { 2573 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2574 temp->primary = temp->osds[i]; 2575 break; 2576 } 2577 } 2578 } 2579 2580 /* primary_temp? */ 2581 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2582 if (pg) 2583 temp->primary = pg->primary_temp.osd; 2584 } 2585 2586 /* 2587 * Map a PG to its acting set as well as its up set. 2588 * 2589 * Acting set is used for data mapping purposes, while up set can be 2590 * recorded for detecting interval changes and deciding whether to 2591 * resend a request. 2592 */ 2593 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2594 struct ceph_pg_pool_info *pi, 2595 const struct ceph_pg *raw_pgid, 2596 struct ceph_osds *up, 2597 struct ceph_osds *acting) 2598 { 2599 struct ceph_pg pgid; 2600 u32 pps; 2601 2602 WARN_ON(pi->id != raw_pgid->pool); 2603 raw_pg_to_pg(pi, raw_pgid, &pgid); 2604 2605 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2606 apply_upmap(osdmap, &pgid, up); 2607 raw_to_up_osds(osdmap, pi, up); 2608 apply_primary_affinity(osdmap, pi, pps, up); 2609 get_temp_osds(osdmap, pi, &pgid, acting); 2610 if (!acting->size) { 2611 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2612 acting->size = up->size; 2613 if (acting->primary == -1) 2614 acting->primary = up->primary; 2615 } 2616 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2617 } 2618 2619 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2620 struct ceph_pg_pool_info *pi, 2621 const struct ceph_pg *raw_pgid, 2622 struct ceph_spg *spgid) 2623 { 2624 struct ceph_pg pgid; 2625 struct ceph_osds up, acting; 2626 int i; 2627 2628 WARN_ON(pi->id != raw_pgid->pool); 2629 raw_pg_to_pg(pi, raw_pgid, &pgid); 2630 2631 if (ceph_can_shift_osds(pi)) { 2632 spgid->pgid = pgid; /* struct */ 2633 spgid->shard = CEPH_SPG_NOSHARD; 2634 return true; 2635 } 2636 2637 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2638 for (i = 0; i < acting.size; i++) { 2639 if (acting.osds[i] == acting.primary) { 2640 spgid->pgid = pgid; /* struct */ 2641 spgid->shard = i; 2642 return true; 2643 } 2644 } 2645 2646 return false; 2647 } 2648 2649 /* 2650 * Return acting primary for given PG, or -1 if none. 2651 */ 2652 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2653 const struct ceph_pg *raw_pgid) 2654 { 2655 struct ceph_pg_pool_info *pi; 2656 struct ceph_osds up, acting; 2657 2658 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2659 if (!pi) 2660 return -1; 2661 2662 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2663 return acting.primary; 2664 } 2665 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2666