1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, u32 state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static struct crush_choose_arg_map *alloc_choose_arg_map(void) 142 { 143 struct crush_choose_arg_map *arg_map; 144 145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); 146 if (!arg_map) 147 return NULL; 148 149 RB_CLEAR_NODE(&arg_map->node); 150 return arg_map; 151 } 152 153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) 154 { 155 if (arg_map) { 156 int i, j; 157 158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); 159 160 for (i = 0; i < arg_map->size; i++) { 161 struct crush_choose_arg *arg = &arg_map->args[i]; 162 163 for (j = 0; j < arg->weight_set_size; j++) 164 kfree(arg->weight_set[j].weights); 165 kfree(arg->weight_set); 166 kfree(arg->ids); 167 } 168 kfree(arg_map->args); 169 kfree(arg_map); 170 } 171 } 172 173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, 174 node); 175 176 void clear_choose_args(struct crush_map *c) 177 { 178 while (!RB_EMPTY_ROOT(&c->choose_args)) { 179 struct crush_choose_arg_map *arg_map = 180 rb_entry(rb_first(&c->choose_args), 181 struct crush_choose_arg_map, node); 182 183 erase_choose_arg_map(&c->choose_args, arg_map); 184 free_choose_arg_map(arg_map); 185 } 186 } 187 188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) 189 { 190 u32 *a = NULL; 191 u32 len; 192 int ret; 193 194 ceph_decode_32_safe(p, end, len, e_inval); 195 if (len) { 196 u32 i; 197 198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO); 199 if (!a) { 200 ret = -ENOMEM; 201 goto fail; 202 } 203 204 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 205 for (i = 0; i < len; i++) 206 a[i] = ceph_decode_32(p); 207 } 208 209 *plen = len; 210 return a; 211 212 e_inval: 213 ret = -EINVAL; 214 fail: 215 kfree(a); 216 return ERR_PTR(ret); 217 } 218 219 /* 220 * Assumes @arg is zero-initialized. 221 */ 222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) 223 { 224 int ret; 225 226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); 227 if (arg->weight_set_size) { 228 u32 i; 229 230 arg->weight_set = kmalloc_array(arg->weight_set_size, 231 sizeof(*arg->weight_set), 232 GFP_NOIO); 233 if (!arg->weight_set) 234 return -ENOMEM; 235 236 for (i = 0; i < arg->weight_set_size; i++) { 237 struct crush_weight_set *w = &arg->weight_set[i]; 238 239 w->weights = decode_array_32_alloc(p, end, &w->size); 240 if (IS_ERR(w->weights)) { 241 ret = PTR_ERR(w->weights); 242 w->weights = NULL; 243 return ret; 244 } 245 } 246 } 247 248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); 249 if (IS_ERR(arg->ids)) { 250 ret = PTR_ERR(arg->ids); 251 arg->ids = NULL; 252 return ret; 253 } 254 255 return 0; 256 257 e_inval: 258 return -EINVAL; 259 } 260 261 static int decode_choose_args(void **p, void *end, struct crush_map *c) 262 { 263 struct crush_choose_arg_map *arg_map = NULL; 264 u32 num_choose_arg_maps, num_buckets; 265 int ret; 266 267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); 268 while (num_choose_arg_maps--) { 269 arg_map = alloc_choose_arg_map(); 270 if (!arg_map) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 ceph_decode_64_safe(p, end, arg_map->choose_args_index, 276 e_inval); 277 arg_map->size = c->max_buckets; 278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), 279 GFP_NOIO); 280 if (!arg_map->args) { 281 ret = -ENOMEM; 282 goto fail; 283 } 284 285 ceph_decode_32_safe(p, end, num_buckets, e_inval); 286 while (num_buckets--) { 287 struct crush_choose_arg *arg; 288 u32 bucket_index; 289 290 ceph_decode_32_safe(p, end, bucket_index, e_inval); 291 if (bucket_index >= arg_map->size) 292 goto e_inval; 293 294 arg = &arg_map->args[bucket_index]; 295 ret = decode_choose_arg(p, end, arg); 296 if (ret) 297 goto fail; 298 299 if (arg->ids_size && 300 arg->ids_size != c->buckets[bucket_index]->size) 301 goto e_inval; 302 } 303 304 insert_choose_arg_map(&c->choose_args, arg_map); 305 } 306 307 return 0; 308 309 e_inval: 310 ret = -EINVAL; 311 fail: 312 free_choose_arg_map(arg_map); 313 return ret; 314 } 315 316 static void crush_finalize(struct crush_map *c) 317 { 318 __s32 b; 319 320 /* Space for the array of pointers to per-bucket workspace */ 321 c->working_size = sizeof(struct crush_work) + 322 c->max_buckets * sizeof(struct crush_work_bucket *); 323 324 for (b = 0; b < c->max_buckets; b++) { 325 if (!c->buckets[b]) 326 continue; 327 328 switch (c->buckets[b]->alg) { 329 default: 330 /* 331 * The base case, permutation variables and 332 * the pointer to the permutation array. 333 */ 334 c->working_size += sizeof(struct crush_work_bucket); 335 break; 336 } 337 /* Every bucket has a permutation array. */ 338 c->working_size += c->buckets[b]->size * sizeof(__u32); 339 } 340 } 341 342 static struct crush_map *crush_decode(void *pbyval, void *end) 343 { 344 struct crush_map *c; 345 int err; 346 int i, j; 347 void **p = &pbyval; 348 void *start = pbyval; 349 u32 magic; 350 351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 352 353 c = kzalloc(sizeof(*c), GFP_NOFS); 354 if (c == NULL) 355 return ERR_PTR(-ENOMEM); 356 357 c->choose_args = RB_ROOT; 358 359 /* set tunables to default values */ 360 c->choose_local_tries = 2; 361 c->choose_local_fallback_tries = 5; 362 c->choose_total_tries = 19; 363 c->chooseleaf_descend_once = 0; 364 365 ceph_decode_need(p, end, 4*sizeof(u32), bad); 366 magic = ceph_decode_32(p); 367 if (magic != CRUSH_MAGIC) { 368 pr_err("crush_decode magic %x != current %x\n", 369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 370 goto bad; 371 } 372 c->max_buckets = ceph_decode_32(p); 373 c->max_rules = ceph_decode_32(p); 374 c->max_devices = ceph_decode_32(p); 375 376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 377 if (c->buckets == NULL) 378 goto badmem; 379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 380 if (c->rules == NULL) 381 goto badmem; 382 383 /* buckets */ 384 for (i = 0; i < c->max_buckets; i++) { 385 int size = 0; 386 u32 alg; 387 struct crush_bucket *b; 388 389 ceph_decode_32_safe(p, end, alg, bad); 390 if (alg == 0) { 391 c->buckets[i] = NULL; 392 continue; 393 } 394 dout("crush_decode bucket %d off %x %p to %p\n", 395 i, (int)(*p-start), *p, end); 396 397 switch (alg) { 398 case CRUSH_BUCKET_UNIFORM: 399 size = sizeof(struct crush_bucket_uniform); 400 break; 401 case CRUSH_BUCKET_LIST: 402 size = sizeof(struct crush_bucket_list); 403 break; 404 case CRUSH_BUCKET_TREE: 405 size = sizeof(struct crush_bucket_tree); 406 break; 407 case CRUSH_BUCKET_STRAW: 408 size = sizeof(struct crush_bucket_straw); 409 break; 410 case CRUSH_BUCKET_STRAW2: 411 size = sizeof(struct crush_bucket_straw2); 412 break; 413 default: 414 goto bad; 415 } 416 BUG_ON(size == 0); 417 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 418 if (b == NULL) 419 goto badmem; 420 421 ceph_decode_need(p, end, 4*sizeof(u32), bad); 422 b->id = ceph_decode_32(p); 423 b->type = ceph_decode_16(p); 424 b->alg = ceph_decode_8(p); 425 b->hash = ceph_decode_8(p); 426 b->weight = ceph_decode_32(p); 427 b->size = ceph_decode_32(p); 428 429 dout("crush_decode bucket size %d off %x %p to %p\n", 430 b->size, (int)(*p-start), *p, end); 431 432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 433 if (b->items == NULL) 434 goto badmem; 435 436 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 437 for (j = 0; j < b->size; j++) 438 b->items[j] = ceph_decode_32(p); 439 440 switch (b->alg) { 441 case CRUSH_BUCKET_UNIFORM: 442 err = crush_decode_uniform_bucket(p, end, 443 (struct crush_bucket_uniform *)b); 444 if (err < 0) 445 goto fail; 446 break; 447 case CRUSH_BUCKET_LIST: 448 err = crush_decode_list_bucket(p, end, 449 (struct crush_bucket_list *)b); 450 if (err < 0) 451 goto fail; 452 break; 453 case CRUSH_BUCKET_TREE: 454 err = crush_decode_tree_bucket(p, end, 455 (struct crush_bucket_tree *)b); 456 if (err < 0) 457 goto fail; 458 break; 459 case CRUSH_BUCKET_STRAW: 460 err = crush_decode_straw_bucket(p, end, 461 (struct crush_bucket_straw *)b); 462 if (err < 0) 463 goto fail; 464 break; 465 case CRUSH_BUCKET_STRAW2: 466 err = crush_decode_straw2_bucket(p, end, 467 (struct crush_bucket_straw2 *)b); 468 if (err < 0) 469 goto fail; 470 break; 471 } 472 } 473 474 /* rules */ 475 dout("rule vec is %p\n", c->rules); 476 for (i = 0; i < c->max_rules; i++) { 477 u32 yes; 478 struct crush_rule *r; 479 480 ceph_decode_32_safe(p, end, yes, bad); 481 if (!yes) { 482 dout("crush_decode NO rule %d off %x %p to %p\n", 483 i, (int)(*p-start), *p, end); 484 c->rules[i] = NULL; 485 continue; 486 } 487 488 dout("crush_decode rule %d off %x %p to %p\n", 489 i, (int)(*p-start), *p, end); 490 491 /* len */ 492 ceph_decode_32_safe(p, end, yes, bad); 493 #if BITS_PER_LONG == 32 494 if (yes > (ULONG_MAX - sizeof(*r)) 495 / sizeof(struct crush_rule_step)) 496 goto bad; 497 #endif 498 r = kmalloc(struct_size(r, steps, yes), GFP_NOFS); 499 c->rules[i] = r; 500 if (r == NULL) 501 goto badmem; 502 dout(" rule %d is at %p\n", i, r); 503 r->len = yes; 504 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 505 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 506 for (j = 0; j < r->len; j++) { 507 r->steps[j].op = ceph_decode_32(p); 508 r->steps[j].arg1 = ceph_decode_32(p); 509 r->steps[j].arg2 = ceph_decode_32(p); 510 } 511 } 512 513 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 514 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 515 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 516 517 /* tunables */ 518 ceph_decode_need(p, end, 3*sizeof(u32), done); 519 c->choose_local_tries = ceph_decode_32(p); 520 c->choose_local_fallback_tries = ceph_decode_32(p); 521 c->choose_total_tries = ceph_decode_32(p); 522 dout("crush decode tunable choose_local_tries = %d\n", 523 c->choose_local_tries); 524 dout("crush decode tunable choose_local_fallback_tries = %d\n", 525 c->choose_local_fallback_tries); 526 dout("crush decode tunable choose_total_tries = %d\n", 527 c->choose_total_tries); 528 529 ceph_decode_need(p, end, sizeof(u32), done); 530 c->chooseleaf_descend_once = ceph_decode_32(p); 531 dout("crush decode tunable chooseleaf_descend_once = %d\n", 532 c->chooseleaf_descend_once); 533 534 ceph_decode_need(p, end, sizeof(u8), done); 535 c->chooseleaf_vary_r = ceph_decode_8(p); 536 dout("crush decode tunable chooseleaf_vary_r = %d\n", 537 c->chooseleaf_vary_r); 538 539 /* skip straw_calc_version, allowed_bucket_algs */ 540 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 541 *p += sizeof(u8) + sizeof(u32); 542 543 ceph_decode_need(p, end, sizeof(u8), done); 544 c->chooseleaf_stable = ceph_decode_8(p); 545 dout("crush decode tunable chooseleaf_stable = %d\n", 546 c->chooseleaf_stable); 547 548 if (*p != end) { 549 /* class_map */ 550 ceph_decode_skip_map(p, end, 32, 32, bad); 551 /* class_name */ 552 ceph_decode_skip_map(p, end, 32, string, bad); 553 /* class_bucket */ 554 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); 555 } 556 557 if (*p != end) { 558 err = decode_choose_args(p, end, c); 559 if (err) 560 goto fail; 561 } 562 563 done: 564 crush_finalize(c); 565 dout("crush_decode success\n"); 566 return c; 567 568 badmem: 569 err = -ENOMEM; 570 fail: 571 dout("crush_decode fail %d\n", err); 572 crush_destroy(c); 573 return ERR_PTR(err); 574 575 bad: 576 err = -EINVAL; 577 goto fail; 578 } 579 580 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 581 { 582 if (lhs->pool < rhs->pool) 583 return -1; 584 if (lhs->pool > rhs->pool) 585 return 1; 586 if (lhs->seed < rhs->seed) 587 return -1; 588 if (lhs->seed > rhs->seed) 589 return 1; 590 591 return 0; 592 } 593 594 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 595 { 596 int ret; 597 598 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 599 if (ret) 600 return ret; 601 602 if (lhs->shard < rhs->shard) 603 return -1; 604 if (lhs->shard > rhs->shard) 605 return 1; 606 607 return 0; 608 } 609 610 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 611 { 612 struct ceph_pg_mapping *pg; 613 614 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 615 if (!pg) 616 return NULL; 617 618 RB_CLEAR_NODE(&pg->node); 619 return pg; 620 } 621 622 static void free_pg_mapping(struct ceph_pg_mapping *pg) 623 { 624 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 625 626 kfree(pg); 627 } 628 629 /* 630 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 631 * to a set of osds) and primary_temp (explicit primary setting) 632 */ 633 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 634 RB_BYPTR, const struct ceph_pg *, node) 635 636 /* 637 * rbtree of pg pool info 638 */ 639 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 640 { 641 struct rb_node **p = &root->rb_node; 642 struct rb_node *parent = NULL; 643 struct ceph_pg_pool_info *pi = NULL; 644 645 while (*p) { 646 parent = *p; 647 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 648 if (new->id < pi->id) 649 p = &(*p)->rb_left; 650 else if (new->id > pi->id) 651 p = &(*p)->rb_right; 652 else 653 return -EEXIST; 654 } 655 656 rb_link_node(&new->node, parent, p); 657 rb_insert_color(&new->node, root); 658 return 0; 659 } 660 661 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 662 { 663 struct ceph_pg_pool_info *pi; 664 struct rb_node *n = root->rb_node; 665 666 while (n) { 667 pi = rb_entry(n, struct ceph_pg_pool_info, node); 668 if (id < pi->id) 669 n = n->rb_left; 670 else if (id > pi->id) 671 n = n->rb_right; 672 else 673 return pi; 674 } 675 return NULL; 676 } 677 678 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 679 { 680 return __lookup_pg_pool(&map->pg_pools, id); 681 } 682 683 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 684 { 685 struct ceph_pg_pool_info *pi; 686 687 if (id == CEPH_NOPOOL) 688 return NULL; 689 690 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 691 return NULL; 692 693 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 694 695 return pi ? pi->name : NULL; 696 } 697 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 698 699 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 700 { 701 struct rb_node *rbp; 702 703 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 704 struct ceph_pg_pool_info *pi = 705 rb_entry(rbp, struct ceph_pg_pool_info, node); 706 if (pi->name && strcmp(pi->name, name) == 0) 707 return pi->id; 708 } 709 return -ENOENT; 710 } 711 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 712 713 u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id) 714 { 715 struct ceph_pg_pool_info *pi; 716 717 pi = __lookup_pg_pool(&map->pg_pools, id); 718 return pi ? pi->flags : 0; 719 } 720 EXPORT_SYMBOL(ceph_pg_pool_flags); 721 722 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 723 { 724 rb_erase(&pi->node, root); 725 kfree(pi->name); 726 kfree(pi); 727 } 728 729 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 730 { 731 u8 ev, cv; 732 unsigned len, num; 733 void *pool_end; 734 735 ceph_decode_need(p, end, 2 + 4, bad); 736 ev = ceph_decode_8(p); /* encoding version */ 737 cv = ceph_decode_8(p); /* compat version */ 738 if (ev < 5) { 739 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 740 return -EINVAL; 741 } 742 if (cv > 9) { 743 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 744 return -EINVAL; 745 } 746 len = ceph_decode_32(p); 747 ceph_decode_need(p, end, len, bad); 748 pool_end = *p + len; 749 750 pi->type = ceph_decode_8(p); 751 pi->size = ceph_decode_8(p); 752 pi->crush_ruleset = ceph_decode_8(p); 753 pi->object_hash = ceph_decode_8(p); 754 755 pi->pg_num = ceph_decode_32(p); 756 pi->pgp_num = ceph_decode_32(p); 757 758 *p += 4 + 4; /* skip lpg* */ 759 *p += 4; /* skip last_change */ 760 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 761 762 /* skip snaps */ 763 num = ceph_decode_32(p); 764 while (num--) { 765 *p += 8; /* snapid key */ 766 *p += 1 + 1; /* versions */ 767 len = ceph_decode_32(p); 768 *p += len; 769 } 770 771 /* skip removed_snaps */ 772 num = ceph_decode_32(p); 773 *p += num * (8 + 8); 774 775 *p += 8; /* skip auid */ 776 pi->flags = ceph_decode_64(p); 777 *p += 4; /* skip crash_replay_interval */ 778 779 if (ev >= 7) 780 pi->min_size = ceph_decode_8(p); 781 else 782 pi->min_size = pi->size - pi->size / 2; 783 784 if (ev >= 8) 785 *p += 8 + 8; /* skip quota_max_* */ 786 787 if (ev >= 9) { 788 /* skip tiers */ 789 num = ceph_decode_32(p); 790 *p += num * 8; 791 792 *p += 8; /* skip tier_of */ 793 *p += 1; /* skip cache_mode */ 794 795 pi->read_tier = ceph_decode_64(p); 796 pi->write_tier = ceph_decode_64(p); 797 } else { 798 pi->read_tier = -1; 799 pi->write_tier = -1; 800 } 801 802 if (ev >= 10) { 803 /* skip properties */ 804 num = ceph_decode_32(p); 805 while (num--) { 806 len = ceph_decode_32(p); 807 *p += len; /* key */ 808 len = ceph_decode_32(p); 809 *p += len; /* val */ 810 } 811 } 812 813 if (ev >= 11) { 814 /* skip hit_set_params */ 815 *p += 1 + 1; /* versions */ 816 len = ceph_decode_32(p); 817 *p += len; 818 819 *p += 4; /* skip hit_set_period */ 820 *p += 4; /* skip hit_set_count */ 821 } 822 823 if (ev >= 12) 824 *p += 4; /* skip stripe_width */ 825 826 if (ev >= 13) { 827 *p += 8; /* skip target_max_bytes */ 828 *p += 8; /* skip target_max_objects */ 829 *p += 4; /* skip cache_target_dirty_ratio_micro */ 830 *p += 4; /* skip cache_target_full_ratio_micro */ 831 *p += 4; /* skip cache_min_flush_age */ 832 *p += 4; /* skip cache_min_evict_age */ 833 } 834 835 if (ev >= 14) { 836 /* skip erasure_code_profile */ 837 len = ceph_decode_32(p); 838 *p += len; 839 } 840 841 /* 842 * last_force_op_resend_preluminous, will be overridden if the 843 * map was encoded with RESEND_ON_SPLIT 844 */ 845 if (ev >= 15) 846 pi->last_force_request_resend = ceph_decode_32(p); 847 else 848 pi->last_force_request_resend = 0; 849 850 if (ev >= 16) 851 *p += 4; /* skip min_read_recency_for_promote */ 852 853 if (ev >= 17) 854 *p += 8; /* skip expected_num_objects */ 855 856 if (ev >= 19) 857 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 858 859 if (ev >= 20) 860 *p += 4; /* skip min_write_recency_for_promote */ 861 862 if (ev >= 21) 863 *p += 1; /* skip use_gmt_hitset */ 864 865 if (ev >= 22) 866 *p += 1; /* skip fast_read */ 867 868 if (ev >= 23) { 869 *p += 4; /* skip hit_set_grade_decay_rate */ 870 *p += 4; /* skip hit_set_search_last_n */ 871 } 872 873 if (ev >= 24) { 874 /* skip opts */ 875 *p += 1 + 1; /* versions */ 876 len = ceph_decode_32(p); 877 *p += len; 878 } 879 880 if (ev >= 25) 881 pi->last_force_request_resend = ceph_decode_32(p); 882 883 /* ignore the rest */ 884 885 *p = pool_end; 886 calc_pg_masks(pi); 887 return 0; 888 889 bad: 890 return -EINVAL; 891 } 892 893 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 894 { 895 struct ceph_pg_pool_info *pi; 896 u32 num, len; 897 u64 pool; 898 899 ceph_decode_32_safe(p, end, num, bad); 900 dout(" %d pool names\n", num); 901 while (num--) { 902 ceph_decode_64_safe(p, end, pool, bad); 903 ceph_decode_32_safe(p, end, len, bad); 904 dout(" pool %llu len %d\n", pool, len); 905 ceph_decode_need(p, end, len, bad); 906 pi = __lookup_pg_pool(&map->pg_pools, pool); 907 if (pi) { 908 char *name = kstrndup(*p, len, GFP_NOFS); 909 910 if (!name) 911 return -ENOMEM; 912 kfree(pi->name); 913 pi->name = name; 914 dout(" name is %s\n", pi->name); 915 } 916 *p += len; 917 } 918 return 0; 919 920 bad: 921 return -EINVAL; 922 } 923 924 /* 925 * osd map 926 */ 927 struct ceph_osdmap *ceph_osdmap_alloc(void) 928 { 929 struct ceph_osdmap *map; 930 931 map = kzalloc(sizeof(*map), GFP_NOIO); 932 if (!map) 933 return NULL; 934 935 map->pg_pools = RB_ROOT; 936 map->pool_max = -1; 937 map->pg_temp = RB_ROOT; 938 map->primary_temp = RB_ROOT; 939 map->pg_upmap = RB_ROOT; 940 map->pg_upmap_items = RB_ROOT; 941 mutex_init(&map->crush_workspace_mutex); 942 943 return map; 944 } 945 946 void ceph_osdmap_destroy(struct ceph_osdmap *map) 947 { 948 dout("osdmap_destroy %p\n", map); 949 if (map->crush) 950 crush_destroy(map->crush); 951 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 952 struct ceph_pg_mapping *pg = 953 rb_entry(rb_first(&map->pg_temp), 954 struct ceph_pg_mapping, node); 955 erase_pg_mapping(&map->pg_temp, pg); 956 free_pg_mapping(pg); 957 } 958 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 959 struct ceph_pg_mapping *pg = 960 rb_entry(rb_first(&map->primary_temp), 961 struct ceph_pg_mapping, node); 962 erase_pg_mapping(&map->primary_temp, pg); 963 free_pg_mapping(pg); 964 } 965 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 966 struct ceph_pg_mapping *pg = 967 rb_entry(rb_first(&map->pg_upmap), 968 struct ceph_pg_mapping, node); 969 rb_erase(&pg->node, &map->pg_upmap); 970 kfree(pg); 971 } 972 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 973 struct ceph_pg_mapping *pg = 974 rb_entry(rb_first(&map->pg_upmap_items), 975 struct ceph_pg_mapping, node); 976 rb_erase(&pg->node, &map->pg_upmap_items); 977 kfree(pg); 978 } 979 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 980 struct ceph_pg_pool_info *pi = 981 rb_entry(rb_first(&map->pg_pools), 982 struct ceph_pg_pool_info, node); 983 __remove_pg_pool(&map->pg_pools, pi); 984 } 985 kvfree(map->osd_state); 986 kvfree(map->osd_weight); 987 kvfree(map->osd_addr); 988 kvfree(map->osd_primary_affinity); 989 kvfree(map->crush_workspace); 990 kfree(map); 991 } 992 993 /* 994 * Adjust max_osd value, (re)allocate arrays. 995 * 996 * The new elements are properly initialized. 997 */ 998 static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) 999 { 1000 u32 *state; 1001 u32 *weight; 1002 struct ceph_entity_addr *addr; 1003 u32 to_copy; 1004 int i; 1005 1006 dout("%s old %u new %u\n", __func__, map->max_osd, max); 1007 if (max == map->max_osd) 1008 return 0; 1009 1010 state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); 1011 weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); 1012 addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); 1013 if (!state || !weight || !addr) { 1014 kvfree(state); 1015 kvfree(weight); 1016 kvfree(addr); 1017 return -ENOMEM; 1018 } 1019 1020 to_copy = min(map->max_osd, max); 1021 if (map->osd_state) { 1022 memcpy(state, map->osd_state, to_copy * sizeof(*state)); 1023 memcpy(weight, map->osd_weight, to_copy * sizeof(*weight)); 1024 memcpy(addr, map->osd_addr, to_copy * sizeof(*addr)); 1025 kvfree(map->osd_state); 1026 kvfree(map->osd_weight); 1027 kvfree(map->osd_addr); 1028 } 1029 1030 map->osd_state = state; 1031 map->osd_weight = weight; 1032 map->osd_addr = addr; 1033 for (i = map->max_osd; i < max; i++) { 1034 map->osd_state[i] = 0; 1035 map->osd_weight[i] = CEPH_OSD_OUT; 1036 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 1037 } 1038 1039 if (map->osd_primary_affinity) { 1040 u32 *affinity; 1041 1042 affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)), 1043 GFP_NOFS); 1044 if (!affinity) 1045 return -ENOMEM; 1046 1047 memcpy(affinity, map->osd_primary_affinity, 1048 to_copy * sizeof(*affinity)); 1049 kvfree(map->osd_primary_affinity); 1050 1051 map->osd_primary_affinity = affinity; 1052 for (i = map->max_osd; i < max; i++) 1053 map->osd_primary_affinity[i] = 1054 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1055 } 1056 1057 map->max_osd = max; 1058 1059 return 0; 1060 } 1061 1062 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 1063 { 1064 void *workspace; 1065 size_t work_size; 1066 1067 if (IS_ERR(crush)) 1068 return PTR_ERR(crush); 1069 1070 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 1071 dout("%s work_size %zu bytes\n", __func__, work_size); 1072 workspace = ceph_kvmalloc(work_size, GFP_NOIO); 1073 if (!workspace) { 1074 crush_destroy(crush); 1075 return -ENOMEM; 1076 } 1077 crush_init_workspace(crush, workspace); 1078 1079 if (map->crush) 1080 crush_destroy(map->crush); 1081 kvfree(map->crush_workspace); 1082 map->crush = crush; 1083 map->crush_workspace = workspace; 1084 return 0; 1085 } 1086 1087 #define OSDMAP_WRAPPER_COMPAT_VER 7 1088 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 1089 1090 /* 1091 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 1092 * to struct_v of the client_data section for new (v7 and above) 1093 * osdmaps. 1094 */ 1095 static int get_osdmap_client_data_v(void **p, void *end, 1096 const char *prefix, u8 *v) 1097 { 1098 u8 struct_v; 1099 1100 ceph_decode_8_safe(p, end, struct_v, e_inval); 1101 if (struct_v >= 7) { 1102 u8 struct_compat; 1103 1104 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1105 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 1106 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 1107 struct_v, struct_compat, 1108 OSDMAP_WRAPPER_COMPAT_VER, prefix); 1109 return -EINVAL; 1110 } 1111 *p += 4; /* ignore wrapper struct_len */ 1112 1113 ceph_decode_8_safe(p, end, struct_v, e_inval); 1114 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1115 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 1116 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 1117 struct_v, struct_compat, 1118 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 1119 return -EINVAL; 1120 } 1121 *p += 4; /* ignore client data struct_len */ 1122 } else { 1123 u16 version; 1124 1125 *p -= 1; 1126 ceph_decode_16_safe(p, end, version, e_inval); 1127 if (version < 6) { 1128 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 1129 version, prefix); 1130 return -EINVAL; 1131 } 1132 1133 /* old osdmap enconding */ 1134 struct_v = 0; 1135 } 1136 1137 *v = struct_v; 1138 return 0; 1139 1140 e_inval: 1141 return -EINVAL; 1142 } 1143 1144 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 1145 bool incremental) 1146 { 1147 u32 n; 1148 1149 ceph_decode_32_safe(p, end, n, e_inval); 1150 while (n--) { 1151 struct ceph_pg_pool_info *pi; 1152 u64 pool; 1153 int ret; 1154 1155 ceph_decode_64_safe(p, end, pool, e_inval); 1156 1157 pi = __lookup_pg_pool(&map->pg_pools, pool); 1158 if (!incremental || !pi) { 1159 pi = kzalloc(sizeof(*pi), GFP_NOFS); 1160 if (!pi) 1161 return -ENOMEM; 1162 1163 pi->id = pool; 1164 1165 ret = __insert_pg_pool(&map->pg_pools, pi); 1166 if (ret) { 1167 kfree(pi); 1168 return ret; 1169 } 1170 } 1171 1172 ret = decode_pool(p, end, pi); 1173 if (ret) 1174 return ret; 1175 } 1176 1177 return 0; 1178 1179 e_inval: 1180 return -EINVAL; 1181 } 1182 1183 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 1184 { 1185 return __decode_pools(p, end, map, false); 1186 } 1187 1188 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 1189 { 1190 return __decode_pools(p, end, map, true); 1191 } 1192 1193 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 1194 1195 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 1196 decode_mapping_fn_t fn, bool incremental) 1197 { 1198 u32 n; 1199 1200 WARN_ON(!incremental && !fn); 1201 1202 ceph_decode_32_safe(p, end, n, e_inval); 1203 while (n--) { 1204 struct ceph_pg_mapping *pg; 1205 struct ceph_pg pgid; 1206 int ret; 1207 1208 ret = ceph_decode_pgid(p, end, &pgid); 1209 if (ret) 1210 return ret; 1211 1212 pg = lookup_pg_mapping(mapping_root, &pgid); 1213 if (pg) { 1214 WARN_ON(!incremental); 1215 erase_pg_mapping(mapping_root, pg); 1216 free_pg_mapping(pg); 1217 } 1218 1219 if (fn) { 1220 pg = fn(p, end, incremental); 1221 if (IS_ERR(pg)) 1222 return PTR_ERR(pg); 1223 1224 if (pg) { 1225 pg->pgid = pgid; /* struct */ 1226 insert_pg_mapping(mapping_root, pg); 1227 } 1228 } 1229 } 1230 1231 return 0; 1232 1233 e_inval: 1234 return -EINVAL; 1235 } 1236 1237 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1238 bool incremental) 1239 { 1240 struct ceph_pg_mapping *pg; 1241 u32 len, i; 1242 1243 ceph_decode_32_safe(p, end, len, e_inval); 1244 if (len == 0 && incremental) 1245 return NULL; /* new_pg_temp: [] to remove */ 1246 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1247 return ERR_PTR(-EINVAL); 1248 1249 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1250 pg = alloc_pg_mapping(len * sizeof(u32)); 1251 if (!pg) 1252 return ERR_PTR(-ENOMEM); 1253 1254 pg->pg_temp.len = len; 1255 for (i = 0; i < len; i++) 1256 pg->pg_temp.osds[i] = ceph_decode_32(p); 1257 1258 return pg; 1259 1260 e_inval: 1261 return ERR_PTR(-EINVAL); 1262 } 1263 1264 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1265 { 1266 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1267 false); 1268 } 1269 1270 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1271 { 1272 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1273 true); 1274 } 1275 1276 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1277 bool incremental) 1278 { 1279 struct ceph_pg_mapping *pg; 1280 u32 osd; 1281 1282 ceph_decode_32_safe(p, end, osd, e_inval); 1283 if (osd == (u32)-1 && incremental) 1284 return NULL; /* new_primary_temp: -1 to remove */ 1285 1286 pg = alloc_pg_mapping(0); 1287 if (!pg) 1288 return ERR_PTR(-ENOMEM); 1289 1290 pg->primary_temp.osd = osd; 1291 return pg; 1292 1293 e_inval: 1294 return ERR_PTR(-EINVAL); 1295 } 1296 1297 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1298 { 1299 return decode_pg_mapping(p, end, &map->primary_temp, 1300 __decode_primary_temp, false); 1301 } 1302 1303 static int decode_new_primary_temp(void **p, void *end, 1304 struct ceph_osdmap *map) 1305 { 1306 return decode_pg_mapping(p, end, &map->primary_temp, 1307 __decode_primary_temp, true); 1308 } 1309 1310 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1311 { 1312 BUG_ON(osd >= map->max_osd); 1313 1314 if (!map->osd_primary_affinity) 1315 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1316 1317 return map->osd_primary_affinity[osd]; 1318 } 1319 1320 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1321 { 1322 BUG_ON(osd >= map->max_osd); 1323 1324 if (!map->osd_primary_affinity) { 1325 int i; 1326 1327 map->osd_primary_affinity = ceph_kvmalloc( 1328 array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), 1329 GFP_NOFS); 1330 if (!map->osd_primary_affinity) 1331 return -ENOMEM; 1332 1333 for (i = 0; i < map->max_osd; i++) 1334 map->osd_primary_affinity[i] = 1335 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1336 } 1337 1338 map->osd_primary_affinity[osd] = aff; 1339 1340 return 0; 1341 } 1342 1343 static int decode_primary_affinity(void **p, void *end, 1344 struct ceph_osdmap *map) 1345 { 1346 u32 len, i; 1347 1348 ceph_decode_32_safe(p, end, len, e_inval); 1349 if (len == 0) { 1350 kvfree(map->osd_primary_affinity); 1351 map->osd_primary_affinity = NULL; 1352 return 0; 1353 } 1354 if (len != map->max_osd) 1355 goto e_inval; 1356 1357 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1358 1359 for (i = 0; i < map->max_osd; i++) { 1360 int ret; 1361 1362 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1363 if (ret) 1364 return ret; 1365 } 1366 1367 return 0; 1368 1369 e_inval: 1370 return -EINVAL; 1371 } 1372 1373 static int decode_new_primary_affinity(void **p, void *end, 1374 struct ceph_osdmap *map) 1375 { 1376 u32 n; 1377 1378 ceph_decode_32_safe(p, end, n, e_inval); 1379 while (n--) { 1380 u32 osd, aff; 1381 int ret; 1382 1383 ceph_decode_32_safe(p, end, osd, e_inval); 1384 ceph_decode_32_safe(p, end, aff, e_inval); 1385 1386 ret = set_primary_affinity(map, osd, aff); 1387 if (ret) 1388 return ret; 1389 1390 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1391 } 1392 1393 return 0; 1394 1395 e_inval: 1396 return -EINVAL; 1397 } 1398 1399 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1400 bool __unused) 1401 { 1402 return __decode_pg_temp(p, end, false); 1403 } 1404 1405 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1406 { 1407 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1408 false); 1409 } 1410 1411 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1412 { 1413 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1414 true); 1415 } 1416 1417 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1418 { 1419 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1420 } 1421 1422 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1423 bool __unused) 1424 { 1425 struct ceph_pg_mapping *pg; 1426 u32 len, i; 1427 1428 ceph_decode_32_safe(p, end, len, e_inval); 1429 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1430 return ERR_PTR(-EINVAL); 1431 1432 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1433 pg = alloc_pg_mapping(2 * len * sizeof(u32)); 1434 if (!pg) 1435 return ERR_PTR(-ENOMEM); 1436 1437 pg->pg_upmap_items.len = len; 1438 for (i = 0; i < len; i++) { 1439 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1440 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1441 } 1442 1443 return pg; 1444 1445 e_inval: 1446 return ERR_PTR(-EINVAL); 1447 } 1448 1449 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1450 { 1451 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1452 __decode_pg_upmap_items, false); 1453 } 1454 1455 static int decode_new_pg_upmap_items(void **p, void *end, 1456 struct ceph_osdmap *map) 1457 { 1458 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1459 __decode_pg_upmap_items, true); 1460 } 1461 1462 static int decode_old_pg_upmap_items(void **p, void *end, 1463 struct ceph_osdmap *map) 1464 { 1465 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1466 } 1467 1468 /* 1469 * decode a full map. 1470 */ 1471 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1472 { 1473 u8 struct_v; 1474 u32 epoch = 0; 1475 void *start = *p; 1476 u32 max; 1477 u32 len, i; 1478 int err; 1479 1480 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1481 1482 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1483 if (err) 1484 goto bad; 1485 1486 /* fsid, epoch, created, modified */ 1487 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1488 sizeof(map->created) + sizeof(map->modified), e_inval); 1489 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1490 epoch = map->epoch = ceph_decode_32(p); 1491 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1492 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1493 1494 /* pools */ 1495 err = decode_pools(p, end, map); 1496 if (err) 1497 goto bad; 1498 1499 /* pool_name */ 1500 err = decode_pool_names(p, end, map); 1501 if (err) 1502 goto bad; 1503 1504 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1505 1506 ceph_decode_32_safe(p, end, map->flags, e_inval); 1507 1508 /* max_osd */ 1509 ceph_decode_32_safe(p, end, max, e_inval); 1510 1511 /* (re)alloc osd arrays */ 1512 err = osdmap_set_max_osd(map, max); 1513 if (err) 1514 goto bad; 1515 1516 /* osd_state, osd_weight, osd_addrs->client_addr */ 1517 ceph_decode_need(p, end, 3*sizeof(u32) + 1518 map->max_osd*(struct_v >= 5 ? sizeof(u32) : 1519 sizeof(u8)) + 1520 sizeof(*map->osd_weight), e_inval); 1521 if (ceph_decode_32(p) != map->max_osd) 1522 goto e_inval; 1523 1524 if (struct_v >= 5) { 1525 for (i = 0; i < map->max_osd; i++) 1526 map->osd_state[i] = ceph_decode_32(p); 1527 } else { 1528 for (i = 0; i < map->max_osd; i++) 1529 map->osd_state[i] = ceph_decode_8(p); 1530 } 1531 1532 if (ceph_decode_32(p) != map->max_osd) 1533 goto e_inval; 1534 1535 for (i = 0; i < map->max_osd; i++) 1536 map->osd_weight[i] = ceph_decode_32(p); 1537 1538 if (ceph_decode_32(p) != map->max_osd) 1539 goto e_inval; 1540 1541 for (i = 0; i < map->max_osd; i++) { 1542 err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]); 1543 if (err) 1544 goto bad; 1545 } 1546 1547 /* pg_temp */ 1548 err = decode_pg_temp(p, end, map); 1549 if (err) 1550 goto bad; 1551 1552 /* primary_temp */ 1553 if (struct_v >= 1) { 1554 err = decode_primary_temp(p, end, map); 1555 if (err) 1556 goto bad; 1557 } 1558 1559 /* primary_affinity */ 1560 if (struct_v >= 2) { 1561 err = decode_primary_affinity(p, end, map); 1562 if (err) 1563 goto bad; 1564 } else { 1565 WARN_ON(map->osd_primary_affinity); 1566 } 1567 1568 /* crush */ 1569 ceph_decode_32_safe(p, end, len, e_inval); 1570 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1571 if (err) 1572 goto bad; 1573 1574 *p += len; 1575 if (struct_v >= 3) { 1576 /* erasure_code_profiles */ 1577 ceph_decode_skip_map_of_map(p, end, string, string, string, 1578 e_inval); 1579 } 1580 1581 if (struct_v >= 4) { 1582 err = decode_pg_upmap(p, end, map); 1583 if (err) 1584 goto bad; 1585 1586 err = decode_pg_upmap_items(p, end, map); 1587 if (err) 1588 goto bad; 1589 } else { 1590 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1591 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1592 } 1593 1594 /* ignore the rest */ 1595 *p = end; 1596 1597 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1598 return 0; 1599 1600 e_inval: 1601 err = -EINVAL; 1602 bad: 1603 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1604 err, epoch, (int)(*p - start), *p, start, end); 1605 print_hex_dump(KERN_DEBUG, "osdmap: ", 1606 DUMP_PREFIX_OFFSET, 16, 1, 1607 start, end - start, true); 1608 return err; 1609 } 1610 1611 /* 1612 * Allocate and decode a full map. 1613 */ 1614 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1615 { 1616 struct ceph_osdmap *map; 1617 int ret; 1618 1619 map = ceph_osdmap_alloc(); 1620 if (!map) 1621 return ERR_PTR(-ENOMEM); 1622 1623 ret = osdmap_decode(p, end, map); 1624 if (ret) { 1625 ceph_osdmap_destroy(map); 1626 return ERR_PTR(ret); 1627 } 1628 1629 return map; 1630 } 1631 1632 /* 1633 * Encoding order is (new_up_client, new_state, new_weight). Need to 1634 * apply in the (new_weight, new_state, new_up_client) order, because 1635 * an incremental map may look like e.g. 1636 * 1637 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1638 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1639 */ 1640 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, 1641 struct ceph_osdmap *map) 1642 { 1643 void *new_up_client; 1644 void *new_state; 1645 void *new_weight_end; 1646 u32 len; 1647 int i; 1648 1649 new_up_client = *p; 1650 ceph_decode_32_safe(p, end, len, e_inval); 1651 for (i = 0; i < len; ++i) { 1652 struct ceph_entity_addr addr; 1653 1654 ceph_decode_skip_32(p, end, e_inval); 1655 if (ceph_decode_entity_addr(p, end, &addr)) 1656 goto e_inval; 1657 } 1658 1659 new_state = *p; 1660 ceph_decode_32_safe(p, end, len, e_inval); 1661 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); 1662 ceph_decode_need(p, end, len, e_inval); 1663 *p += len; 1664 1665 /* new_weight */ 1666 ceph_decode_32_safe(p, end, len, e_inval); 1667 while (len--) { 1668 s32 osd; 1669 u32 w; 1670 1671 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1672 osd = ceph_decode_32(p); 1673 w = ceph_decode_32(p); 1674 BUG_ON(osd >= map->max_osd); 1675 pr_info("osd%d weight 0x%x %s\n", osd, w, 1676 w == CEPH_OSD_IN ? "(in)" : 1677 (w == CEPH_OSD_OUT ? "(out)" : "")); 1678 map->osd_weight[osd] = w; 1679 1680 /* 1681 * If we are marking in, set the EXISTS, and clear the 1682 * AUTOOUT and NEW bits. 1683 */ 1684 if (w) { 1685 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1686 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1687 CEPH_OSD_NEW); 1688 } 1689 } 1690 new_weight_end = *p; 1691 1692 /* new_state (up/down) */ 1693 *p = new_state; 1694 len = ceph_decode_32(p); 1695 while (len--) { 1696 s32 osd; 1697 u32 xorstate; 1698 int ret; 1699 1700 osd = ceph_decode_32(p); 1701 if (struct_v >= 5) 1702 xorstate = ceph_decode_32(p); 1703 else 1704 xorstate = ceph_decode_8(p); 1705 if (xorstate == 0) 1706 xorstate = CEPH_OSD_UP; 1707 BUG_ON(osd >= map->max_osd); 1708 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1709 (xorstate & CEPH_OSD_UP)) 1710 pr_info("osd%d down\n", osd); 1711 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1712 (xorstate & CEPH_OSD_EXISTS)) { 1713 pr_info("osd%d does not exist\n", osd); 1714 ret = set_primary_affinity(map, osd, 1715 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1716 if (ret) 1717 return ret; 1718 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1719 map->osd_state[osd] = 0; 1720 } else { 1721 map->osd_state[osd] ^= xorstate; 1722 } 1723 } 1724 1725 /* new_up_client */ 1726 *p = new_up_client; 1727 len = ceph_decode_32(p); 1728 while (len--) { 1729 s32 osd; 1730 struct ceph_entity_addr addr; 1731 1732 osd = ceph_decode_32(p); 1733 BUG_ON(osd >= map->max_osd); 1734 if (ceph_decode_entity_addr(p, end, &addr)) 1735 goto e_inval; 1736 pr_info("osd%d up\n", osd); 1737 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1738 map->osd_addr[osd] = addr; 1739 } 1740 1741 *p = new_weight_end; 1742 return 0; 1743 1744 e_inval: 1745 return -EINVAL; 1746 } 1747 1748 /* 1749 * decode and apply an incremental map update. 1750 */ 1751 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1752 struct ceph_osdmap *map) 1753 { 1754 struct ceph_fsid fsid; 1755 u32 epoch = 0; 1756 struct ceph_timespec modified; 1757 s32 len; 1758 u64 pool; 1759 __s64 new_pool_max; 1760 __s32 new_flags, max; 1761 void *start = *p; 1762 int err; 1763 u8 struct_v; 1764 1765 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1766 1767 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1768 if (err) 1769 goto bad; 1770 1771 /* fsid, epoch, modified, new_pool_max, new_flags */ 1772 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1773 sizeof(u64) + sizeof(u32), e_inval); 1774 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1775 epoch = ceph_decode_32(p); 1776 BUG_ON(epoch != map->epoch+1); 1777 ceph_decode_copy(p, &modified, sizeof(modified)); 1778 new_pool_max = ceph_decode_64(p); 1779 new_flags = ceph_decode_32(p); 1780 1781 /* full map? */ 1782 ceph_decode_32_safe(p, end, len, e_inval); 1783 if (len > 0) { 1784 dout("apply_incremental full map len %d, %p to %p\n", 1785 len, *p, end); 1786 return ceph_osdmap_decode(p, min(*p+len, end)); 1787 } 1788 1789 /* new crush? */ 1790 ceph_decode_32_safe(p, end, len, e_inval); 1791 if (len > 0) { 1792 err = osdmap_set_crush(map, 1793 crush_decode(*p, min(*p + len, end))); 1794 if (err) 1795 goto bad; 1796 *p += len; 1797 } 1798 1799 /* new flags? */ 1800 if (new_flags >= 0) 1801 map->flags = new_flags; 1802 if (new_pool_max >= 0) 1803 map->pool_max = new_pool_max; 1804 1805 /* new max? */ 1806 ceph_decode_32_safe(p, end, max, e_inval); 1807 if (max >= 0) { 1808 err = osdmap_set_max_osd(map, max); 1809 if (err) 1810 goto bad; 1811 } 1812 1813 map->epoch++; 1814 map->modified = modified; 1815 1816 /* new_pools */ 1817 err = decode_new_pools(p, end, map); 1818 if (err) 1819 goto bad; 1820 1821 /* new_pool_names */ 1822 err = decode_pool_names(p, end, map); 1823 if (err) 1824 goto bad; 1825 1826 /* old_pool */ 1827 ceph_decode_32_safe(p, end, len, e_inval); 1828 while (len--) { 1829 struct ceph_pg_pool_info *pi; 1830 1831 ceph_decode_64_safe(p, end, pool, e_inval); 1832 pi = __lookup_pg_pool(&map->pg_pools, pool); 1833 if (pi) 1834 __remove_pg_pool(&map->pg_pools, pi); 1835 } 1836 1837 /* new_up_client, new_state, new_weight */ 1838 err = decode_new_up_state_weight(p, end, struct_v, map); 1839 if (err) 1840 goto bad; 1841 1842 /* new_pg_temp */ 1843 err = decode_new_pg_temp(p, end, map); 1844 if (err) 1845 goto bad; 1846 1847 /* new_primary_temp */ 1848 if (struct_v >= 1) { 1849 err = decode_new_primary_temp(p, end, map); 1850 if (err) 1851 goto bad; 1852 } 1853 1854 /* new_primary_affinity */ 1855 if (struct_v >= 2) { 1856 err = decode_new_primary_affinity(p, end, map); 1857 if (err) 1858 goto bad; 1859 } 1860 1861 if (struct_v >= 3) { 1862 /* new_erasure_code_profiles */ 1863 ceph_decode_skip_map_of_map(p, end, string, string, string, 1864 e_inval); 1865 /* old_erasure_code_profiles */ 1866 ceph_decode_skip_set(p, end, string, e_inval); 1867 } 1868 1869 if (struct_v >= 4) { 1870 err = decode_new_pg_upmap(p, end, map); 1871 if (err) 1872 goto bad; 1873 1874 err = decode_old_pg_upmap(p, end, map); 1875 if (err) 1876 goto bad; 1877 1878 err = decode_new_pg_upmap_items(p, end, map); 1879 if (err) 1880 goto bad; 1881 1882 err = decode_old_pg_upmap_items(p, end, map); 1883 if (err) 1884 goto bad; 1885 } 1886 1887 /* ignore the rest */ 1888 *p = end; 1889 1890 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1891 return map; 1892 1893 e_inval: 1894 err = -EINVAL; 1895 bad: 1896 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1897 err, epoch, (int)(*p - start), *p, start, end); 1898 print_hex_dump(KERN_DEBUG, "osdmap: ", 1899 DUMP_PREFIX_OFFSET, 16, 1, 1900 start, end - start, true); 1901 return ERR_PTR(err); 1902 } 1903 1904 void ceph_oloc_copy(struct ceph_object_locator *dest, 1905 const struct ceph_object_locator *src) 1906 { 1907 ceph_oloc_destroy(dest); 1908 1909 dest->pool = src->pool; 1910 if (src->pool_ns) 1911 dest->pool_ns = ceph_get_string(src->pool_ns); 1912 else 1913 dest->pool_ns = NULL; 1914 } 1915 EXPORT_SYMBOL(ceph_oloc_copy); 1916 1917 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1918 { 1919 ceph_put_string(oloc->pool_ns); 1920 } 1921 EXPORT_SYMBOL(ceph_oloc_destroy); 1922 1923 void ceph_oid_copy(struct ceph_object_id *dest, 1924 const struct ceph_object_id *src) 1925 { 1926 ceph_oid_destroy(dest); 1927 1928 if (src->name != src->inline_name) { 1929 /* very rare, see ceph_object_id definition */ 1930 dest->name = kmalloc(src->name_len + 1, 1931 GFP_NOIO | __GFP_NOFAIL); 1932 } else { 1933 dest->name = dest->inline_name; 1934 } 1935 memcpy(dest->name, src->name, src->name_len + 1); 1936 dest->name_len = src->name_len; 1937 } 1938 EXPORT_SYMBOL(ceph_oid_copy); 1939 1940 static __printf(2, 0) 1941 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1942 { 1943 int len; 1944 1945 WARN_ON(!ceph_oid_empty(oid)); 1946 1947 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1948 if (len >= sizeof(oid->inline_name)) 1949 return len; 1950 1951 oid->name_len = len; 1952 return 0; 1953 } 1954 1955 /* 1956 * If oid doesn't fit into inline buffer, BUG. 1957 */ 1958 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1959 { 1960 va_list ap; 1961 1962 va_start(ap, fmt); 1963 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1964 va_end(ap); 1965 } 1966 EXPORT_SYMBOL(ceph_oid_printf); 1967 1968 static __printf(3, 0) 1969 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1970 const char *fmt, va_list ap) 1971 { 1972 va_list aq; 1973 int len; 1974 1975 va_copy(aq, ap); 1976 len = oid_printf_vargs(oid, fmt, aq); 1977 va_end(aq); 1978 1979 if (len) { 1980 char *external_name; 1981 1982 external_name = kmalloc(len + 1, gfp); 1983 if (!external_name) 1984 return -ENOMEM; 1985 1986 oid->name = external_name; 1987 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1988 oid->name_len = len; 1989 } 1990 1991 return 0; 1992 } 1993 1994 /* 1995 * If oid doesn't fit into inline buffer, allocate. 1996 */ 1997 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1998 const char *fmt, ...) 1999 { 2000 va_list ap; 2001 int ret; 2002 2003 va_start(ap, fmt); 2004 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 2005 va_end(ap); 2006 2007 return ret; 2008 } 2009 EXPORT_SYMBOL(ceph_oid_aprintf); 2010 2011 void ceph_oid_destroy(struct ceph_object_id *oid) 2012 { 2013 if (oid->name != oid->inline_name) 2014 kfree(oid->name); 2015 } 2016 EXPORT_SYMBOL(ceph_oid_destroy); 2017 2018 /* 2019 * osds only 2020 */ 2021 static bool __osds_equal(const struct ceph_osds *lhs, 2022 const struct ceph_osds *rhs) 2023 { 2024 if (lhs->size == rhs->size && 2025 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 2026 return true; 2027 2028 return false; 2029 } 2030 2031 /* 2032 * osds + primary 2033 */ 2034 static bool osds_equal(const struct ceph_osds *lhs, 2035 const struct ceph_osds *rhs) 2036 { 2037 if (__osds_equal(lhs, rhs) && 2038 lhs->primary == rhs->primary) 2039 return true; 2040 2041 return false; 2042 } 2043 2044 static bool osds_valid(const struct ceph_osds *set) 2045 { 2046 /* non-empty set */ 2047 if (set->size > 0 && set->primary >= 0) 2048 return true; 2049 2050 /* empty can_shift_osds set */ 2051 if (!set->size && set->primary == -1) 2052 return true; 2053 2054 /* empty !can_shift_osds set - all NONE */ 2055 if (set->size > 0 && set->primary == -1) { 2056 int i; 2057 2058 for (i = 0; i < set->size; i++) { 2059 if (set->osds[i] != CRUSH_ITEM_NONE) 2060 break; 2061 } 2062 if (i == set->size) 2063 return true; 2064 } 2065 2066 return false; 2067 } 2068 2069 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 2070 { 2071 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 2072 dest->size = src->size; 2073 dest->primary = src->primary; 2074 } 2075 2076 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 2077 u32 new_pg_num) 2078 { 2079 int old_bits = calc_bits_of(old_pg_num); 2080 int old_mask = (1 << old_bits) - 1; 2081 int n; 2082 2083 WARN_ON(pgid->seed >= old_pg_num); 2084 if (new_pg_num <= old_pg_num) 2085 return false; 2086 2087 for (n = 1; ; n++) { 2088 int next_bit = n << (old_bits - 1); 2089 u32 s = next_bit | pgid->seed; 2090 2091 if (s < old_pg_num || s == pgid->seed) 2092 continue; 2093 if (s >= new_pg_num) 2094 break; 2095 2096 s = ceph_stable_mod(s, old_pg_num, old_mask); 2097 if (s == pgid->seed) 2098 return true; 2099 } 2100 2101 return false; 2102 } 2103 2104 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 2105 const struct ceph_osds *new_acting, 2106 const struct ceph_osds *old_up, 2107 const struct ceph_osds *new_up, 2108 int old_size, 2109 int new_size, 2110 int old_min_size, 2111 int new_min_size, 2112 u32 old_pg_num, 2113 u32 new_pg_num, 2114 bool old_sort_bitwise, 2115 bool new_sort_bitwise, 2116 bool old_recovery_deletes, 2117 bool new_recovery_deletes, 2118 const struct ceph_pg *pgid) 2119 { 2120 return !osds_equal(old_acting, new_acting) || 2121 !osds_equal(old_up, new_up) || 2122 old_size != new_size || 2123 old_min_size != new_min_size || 2124 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 2125 old_sort_bitwise != new_sort_bitwise || 2126 old_recovery_deletes != new_recovery_deletes; 2127 } 2128 2129 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 2130 { 2131 int i; 2132 2133 for (i = 0; i < acting->size; i++) { 2134 if (acting->osds[i] == osd) 2135 return i; 2136 } 2137 2138 return -1; 2139 } 2140 2141 static bool primary_changed(const struct ceph_osds *old_acting, 2142 const struct ceph_osds *new_acting) 2143 { 2144 if (!old_acting->size && !new_acting->size) 2145 return false; /* both still empty */ 2146 2147 if (!old_acting->size ^ !new_acting->size) 2148 return true; /* was empty, now not, or vice versa */ 2149 2150 if (old_acting->primary != new_acting->primary) 2151 return true; /* primary changed */ 2152 2153 if (calc_pg_rank(old_acting->primary, old_acting) != 2154 calc_pg_rank(new_acting->primary, new_acting)) 2155 return true; 2156 2157 return false; /* same primary (tho replicas may have changed) */ 2158 } 2159 2160 bool ceph_osds_changed(const struct ceph_osds *old_acting, 2161 const struct ceph_osds *new_acting, 2162 bool any_change) 2163 { 2164 if (primary_changed(old_acting, new_acting)) 2165 return true; 2166 2167 if (any_change && !__osds_equal(old_acting, new_acting)) 2168 return true; 2169 2170 return false; 2171 } 2172 2173 /* 2174 * Map an object into a PG. 2175 * 2176 * Should only be called with target_oid and target_oloc (as opposed to 2177 * base_oid and base_oloc), since tiering isn't taken into account. 2178 */ 2179 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2180 const struct ceph_object_id *oid, 2181 const struct ceph_object_locator *oloc, 2182 struct ceph_pg *raw_pgid) 2183 { 2184 WARN_ON(pi->id != oloc->pool); 2185 2186 if (!oloc->pool_ns) { 2187 raw_pgid->pool = oloc->pool; 2188 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2189 oid->name_len); 2190 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2191 raw_pgid->pool, raw_pgid->seed); 2192 } else { 2193 char stack_buf[256]; 2194 char *buf = stack_buf; 2195 int nsl = oloc->pool_ns->len; 2196 size_t total = nsl + 1 + oid->name_len; 2197 2198 if (total > sizeof(stack_buf)) 2199 buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL); 2200 memcpy(buf, oloc->pool_ns->str, nsl); 2201 buf[nsl] = '\037'; 2202 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2203 raw_pgid->pool = oloc->pool; 2204 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2205 if (buf != stack_buf) 2206 kfree(buf); 2207 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2208 oid->name, nsl, oloc->pool_ns->str, 2209 raw_pgid->pool, raw_pgid->seed); 2210 } 2211 } 2212 2213 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2214 const struct ceph_object_id *oid, 2215 const struct ceph_object_locator *oloc, 2216 struct ceph_pg *raw_pgid) 2217 { 2218 struct ceph_pg_pool_info *pi; 2219 2220 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2221 if (!pi) 2222 return -ENOENT; 2223 2224 __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2225 return 0; 2226 } 2227 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2228 2229 /* 2230 * Map a raw PG (full precision ps) into an actual PG. 2231 */ 2232 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2233 const struct ceph_pg *raw_pgid, 2234 struct ceph_pg *pgid) 2235 { 2236 pgid->pool = raw_pgid->pool; 2237 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2238 pi->pg_num_mask); 2239 } 2240 2241 /* 2242 * Map a raw PG (full precision ps) into a placement ps (placement 2243 * seed). Include pool id in that value so that different pools don't 2244 * use the same seeds. 2245 */ 2246 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2247 const struct ceph_pg *raw_pgid) 2248 { 2249 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2250 /* hash pool id and seed so that pool PGs do not overlap */ 2251 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2252 ceph_stable_mod(raw_pgid->seed, 2253 pi->pgp_num, 2254 pi->pgp_num_mask), 2255 raw_pgid->pool); 2256 } else { 2257 /* 2258 * legacy behavior: add ps and pool together. this is 2259 * not a great approach because the PGs from each pool 2260 * will overlap on top of each other: 0.5 == 1.4 == 2261 * 2.3 == ... 2262 */ 2263 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2264 pi->pgp_num_mask) + 2265 (unsigned)raw_pgid->pool; 2266 } 2267 } 2268 2269 /* 2270 * Magic value used for a "default" fallback choose_args, used if the 2271 * crush_choose_arg_map passed to do_crush() does not exist. If this 2272 * also doesn't exist, fall back to canonical weights. 2273 */ 2274 #define CEPH_DEFAULT_CHOOSE_ARGS -1 2275 2276 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2277 int *result, int result_max, 2278 const __u32 *weight, int weight_max, 2279 s64 choose_args_index) 2280 { 2281 struct crush_choose_arg_map *arg_map; 2282 int r; 2283 2284 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2285 2286 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2287 choose_args_index); 2288 if (!arg_map) 2289 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2290 CEPH_DEFAULT_CHOOSE_ARGS); 2291 2292 mutex_lock(&map->crush_workspace_mutex); 2293 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2294 weight, weight_max, map->crush_workspace, 2295 arg_map ? arg_map->args : NULL); 2296 mutex_unlock(&map->crush_workspace_mutex); 2297 2298 return r; 2299 } 2300 2301 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2302 struct ceph_pg_pool_info *pi, 2303 struct ceph_osds *set) 2304 { 2305 int i; 2306 2307 if (ceph_can_shift_osds(pi)) { 2308 int removed = 0; 2309 2310 /* shift left */ 2311 for (i = 0; i < set->size; i++) { 2312 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2313 removed++; 2314 continue; 2315 } 2316 if (removed) 2317 set->osds[i - removed] = set->osds[i]; 2318 } 2319 set->size -= removed; 2320 } else { 2321 /* set dne devices to NONE */ 2322 for (i = 0; i < set->size; i++) { 2323 if (!ceph_osd_exists(osdmap, set->osds[i])) 2324 set->osds[i] = CRUSH_ITEM_NONE; 2325 } 2326 } 2327 } 2328 2329 /* 2330 * Calculate raw set (CRUSH output) for given PG and filter out 2331 * nonexistent OSDs. ->primary is undefined for a raw set. 2332 * 2333 * Placement seed (CRUSH input) is returned through @ppps. 2334 */ 2335 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2336 struct ceph_pg_pool_info *pi, 2337 const struct ceph_pg *raw_pgid, 2338 struct ceph_osds *raw, 2339 u32 *ppps) 2340 { 2341 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2342 int ruleno; 2343 int len; 2344 2345 ceph_osds_init(raw); 2346 if (ppps) 2347 *ppps = pps; 2348 2349 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2350 pi->size); 2351 if (ruleno < 0) { 2352 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2353 pi->id, pi->crush_ruleset, pi->type, pi->size); 2354 return; 2355 } 2356 2357 if (pi->size > ARRAY_SIZE(raw->osds)) { 2358 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2359 pi->id, pi->crush_ruleset, pi->type, pi->size, 2360 ARRAY_SIZE(raw->osds)); 2361 return; 2362 } 2363 2364 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2365 osdmap->osd_weight, osdmap->max_osd, pi->id); 2366 if (len < 0) { 2367 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2368 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2369 pi->size); 2370 return; 2371 } 2372 2373 raw->size = len; 2374 remove_nonexistent_osds(osdmap, pi, raw); 2375 } 2376 2377 /* apply pg_upmap[_items] mappings */ 2378 static void apply_upmap(struct ceph_osdmap *osdmap, 2379 const struct ceph_pg *pgid, 2380 struct ceph_osds *raw) 2381 { 2382 struct ceph_pg_mapping *pg; 2383 int i, j; 2384 2385 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2386 if (pg) { 2387 /* make sure targets aren't marked out */ 2388 for (i = 0; i < pg->pg_upmap.len; i++) { 2389 int osd = pg->pg_upmap.osds[i]; 2390 2391 if (osd != CRUSH_ITEM_NONE && 2392 osd < osdmap->max_osd && 2393 osdmap->osd_weight[osd] == 0) { 2394 /* reject/ignore explicit mapping */ 2395 return; 2396 } 2397 } 2398 for (i = 0; i < pg->pg_upmap.len; i++) 2399 raw->osds[i] = pg->pg_upmap.osds[i]; 2400 raw->size = pg->pg_upmap.len; 2401 /* check and apply pg_upmap_items, if any */ 2402 } 2403 2404 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2405 if (pg) { 2406 /* 2407 * Note: this approach does not allow a bidirectional swap, 2408 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2409 */ 2410 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2411 int from = pg->pg_upmap_items.from_to[i][0]; 2412 int to = pg->pg_upmap_items.from_to[i][1]; 2413 int pos = -1; 2414 bool exists = false; 2415 2416 /* make sure replacement doesn't already appear */ 2417 for (j = 0; j < raw->size; j++) { 2418 int osd = raw->osds[j]; 2419 2420 if (osd == to) { 2421 exists = true; 2422 break; 2423 } 2424 /* ignore mapping if target is marked out */ 2425 if (osd == from && pos < 0 && 2426 !(to != CRUSH_ITEM_NONE && 2427 to < osdmap->max_osd && 2428 osdmap->osd_weight[to] == 0)) { 2429 pos = j; 2430 } 2431 } 2432 if (!exists && pos >= 0) 2433 raw->osds[pos] = to; 2434 } 2435 } 2436 } 2437 2438 /* 2439 * Given raw set, calculate up set and up primary. By definition of an 2440 * up set, the result won't contain nonexistent or down OSDs. 2441 * 2442 * This is done in-place - on return @set is the up set. If it's 2443 * empty, ->primary will remain undefined. 2444 */ 2445 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2446 struct ceph_pg_pool_info *pi, 2447 struct ceph_osds *set) 2448 { 2449 int i; 2450 2451 /* ->primary is undefined for a raw set */ 2452 BUG_ON(set->primary != -1); 2453 2454 if (ceph_can_shift_osds(pi)) { 2455 int removed = 0; 2456 2457 /* shift left */ 2458 for (i = 0; i < set->size; i++) { 2459 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2460 removed++; 2461 continue; 2462 } 2463 if (removed) 2464 set->osds[i - removed] = set->osds[i]; 2465 } 2466 set->size -= removed; 2467 if (set->size > 0) 2468 set->primary = set->osds[0]; 2469 } else { 2470 /* set down/dne devices to NONE */ 2471 for (i = set->size - 1; i >= 0; i--) { 2472 if (ceph_osd_is_down(osdmap, set->osds[i])) 2473 set->osds[i] = CRUSH_ITEM_NONE; 2474 else 2475 set->primary = set->osds[i]; 2476 } 2477 } 2478 } 2479 2480 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2481 struct ceph_pg_pool_info *pi, 2482 u32 pps, 2483 struct ceph_osds *up) 2484 { 2485 int i; 2486 int pos = -1; 2487 2488 /* 2489 * Do we have any non-default primary_affinity values for these 2490 * osds? 2491 */ 2492 if (!osdmap->osd_primary_affinity) 2493 return; 2494 2495 for (i = 0; i < up->size; i++) { 2496 int osd = up->osds[i]; 2497 2498 if (osd != CRUSH_ITEM_NONE && 2499 osdmap->osd_primary_affinity[osd] != 2500 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2501 break; 2502 } 2503 } 2504 if (i == up->size) 2505 return; 2506 2507 /* 2508 * Pick the primary. Feed both the seed (for the pg) and the 2509 * osd into the hash/rng so that a proportional fraction of an 2510 * osd's pgs get rejected as primary. 2511 */ 2512 for (i = 0; i < up->size; i++) { 2513 int osd = up->osds[i]; 2514 u32 aff; 2515 2516 if (osd == CRUSH_ITEM_NONE) 2517 continue; 2518 2519 aff = osdmap->osd_primary_affinity[osd]; 2520 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2521 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2522 pps, osd) >> 16) >= aff) { 2523 /* 2524 * We chose not to use this primary. Note it 2525 * anyway as a fallback in case we don't pick 2526 * anyone else, but keep looking. 2527 */ 2528 if (pos < 0) 2529 pos = i; 2530 } else { 2531 pos = i; 2532 break; 2533 } 2534 } 2535 if (pos < 0) 2536 return; 2537 2538 up->primary = up->osds[pos]; 2539 2540 if (ceph_can_shift_osds(pi) && pos > 0) { 2541 /* move the new primary to the front */ 2542 for (i = pos; i > 0; i--) 2543 up->osds[i] = up->osds[i - 1]; 2544 up->osds[0] = up->primary; 2545 } 2546 } 2547 2548 /* 2549 * Get pg_temp and primary_temp mappings for given PG. 2550 * 2551 * Note that a PG may have none, only pg_temp, only primary_temp or 2552 * both pg_temp and primary_temp mappings. This means @temp isn't 2553 * always a valid OSD set on return: in the "only primary_temp" case, 2554 * @temp will have its ->primary >= 0 but ->size == 0. 2555 */ 2556 static void get_temp_osds(struct ceph_osdmap *osdmap, 2557 struct ceph_pg_pool_info *pi, 2558 const struct ceph_pg *pgid, 2559 struct ceph_osds *temp) 2560 { 2561 struct ceph_pg_mapping *pg; 2562 int i; 2563 2564 ceph_osds_init(temp); 2565 2566 /* pg_temp? */ 2567 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2568 if (pg) { 2569 for (i = 0; i < pg->pg_temp.len; i++) { 2570 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2571 if (ceph_can_shift_osds(pi)) 2572 continue; 2573 2574 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2575 } else { 2576 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2577 } 2578 } 2579 2580 /* apply pg_temp's primary */ 2581 for (i = 0; i < temp->size; i++) { 2582 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2583 temp->primary = temp->osds[i]; 2584 break; 2585 } 2586 } 2587 } 2588 2589 /* primary_temp? */ 2590 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2591 if (pg) 2592 temp->primary = pg->primary_temp.osd; 2593 } 2594 2595 /* 2596 * Map a PG to its acting set as well as its up set. 2597 * 2598 * Acting set is used for data mapping purposes, while up set can be 2599 * recorded for detecting interval changes and deciding whether to 2600 * resend a request. 2601 */ 2602 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2603 struct ceph_pg_pool_info *pi, 2604 const struct ceph_pg *raw_pgid, 2605 struct ceph_osds *up, 2606 struct ceph_osds *acting) 2607 { 2608 struct ceph_pg pgid; 2609 u32 pps; 2610 2611 WARN_ON(pi->id != raw_pgid->pool); 2612 raw_pg_to_pg(pi, raw_pgid, &pgid); 2613 2614 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2615 apply_upmap(osdmap, &pgid, up); 2616 raw_to_up_osds(osdmap, pi, up); 2617 apply_primary_affinity(osdmap, pi, pps, up); 2618 get_temp_osds(osdmap, pi, &pgid, acting); 2619 if (!acting->size) { 2620 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2621 acting->size = up->size; 2622 if (acting->primary == -1) 2623 acting->primary = up->primary; 2624 } 2625 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2626 } 2627 2628 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2629 struct ceph_pg_pool_info *pi, 2630 const struct ceph_pg *raw_pgid, 2631 struct ceph_spg *spgid) 2632 { 2633 struct ceph_pg pgid; 2634 struct ceph_osds up, acting; 2635 int i; 2636 2637 WARN_ON(pi->id != raw_pgid->pool); 2638 raw_pg_to_pg(pi, raw_pgid, &pgid); 2639 2640 if (ceph_can_shift_osds(pi)) { 2641 spgid->pgid = pgid; /* struct */ 2642 spgid->shard = CEPH_SPG_NOSHARD; 2643 return true; 2644 } 2645 2646 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2647 for (i = 0; i < acting.size; i++) { 2648 if (acting.osds[i] == acting.primary) { 2649 spgid->pgid = pgid; /* struct */ 2650 spgid->shard = i; 2651 return true; 2652 } 2653 } 2654 2655 return false; 2656 } 2657 2658 /* 2659 * Return acting primary for given PG, or -1 if none. 2660 */ 2661 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2662 const struct ceph_pg *raw_pgid) 2663 { 2664 struct ceph_pg_pool_info *pi; 2665 struct ceph_osds up, acting; 2666 2667 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2668 if (!pi) 2669 return -1; 2670 2671 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2672 return acting.primary; 2673 } 2674 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2675