1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, u32 state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static struct crush_choose_arg_map *alloc_choose_arg_map(void) 142 { 143 struct crush_choose_arg_map *arg_map; 144 145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); 146 if (!arg_map) 147 return NULL; 148 149 RB_CLEAR_NODE(&arg_map->node); 150 return arg_map; 151 } 152 153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) 154 { 155 if (arg_map) { 156 int i, j; 157 158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); 159 160 for (i = 0; i < arg_map->size; i++) { 161 struct crush_choose_arg *arg = &arg_map->args[i]; 162 163 for (j = 0; j < arg->weight_set_size; j++) 164 kfree(arg->weight_set[j].weights); 165 kfree(arg->weight_set); 166 kfree(arg->ids); 167 } 168 kfree(arg_map->args); 169 kfree(arg_map); 170 } 171 } 172 173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, 174 node); 175 176 void clear_choose_args(struct crush_map *c) 177 { 178 while (!RB_EMPTY_ROOT(&c->choose_args)) { 179 struct crush_choose_arg_map *arg_map = 180 rb_entry(rb_first(&c->choose_args), 181 struct crush_choose_arg_map, node); 182 183 erase_choose_arg_map(&c->choose_args, arg_map); 184 free_choose_arg_map(arg_map); 185 } 186 } 187 188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) 189 { 190 u32 *a = NULL; 191 u32 len; 192 int ret; 193 194 ceph_decode_32_safe(p, end, len, e_inval); 195 if (len) { 196 u32 i; 197 198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO); 199 if (!a) { 200 ret = -ENOMEM; 201 goto fail; 202 } 203 204 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 205 for (i = 0; i < len; i++) 206 a[i] = ceph_decode_32(p); 207 } 208 209 *plen = len; 210 return a; 211 212 e_inval: 213 ret = -EINVAL; 214 fail: 215 kfree(a); 216 return ERR_PTR(ret); 217 } 218 219 /* 220 * Assumes @arg is zero-initialized. 221 */ 222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) 223 { 224 int ret; 225 226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); 227 if (arg->weight_set_size) { 228 u32 i; 229 230 arg->weight_set = kmalloc_array(arg->weight_set_size, 231 sizeof(*arg->weight_set), 232 GFP_NOIO); 233 if (!arg->weight_set) 234 return -ENOMEM; 235 236 for (i = 0; i < arg->weight_set_size; i++) { 237 struct crush_weight_set *w = &arg->weight_set[i]; 238 239 w->weights = decode_array_32_alloc(p, end, &w->size); 240 if (IS_ERR(w->weights)) { 241 ret = PTR_ERR(w->weights); 242 w->weights = NULL; 243 return ret; 244 } 245 } 246 } 247 248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); 249 if (IS_ERR(arg->ids)) { 250 ret = PTR_ERR(arg->ids); 251 arg->ids = NULL; 252 return ret; 253 } 254 255 return 0; 256 257 e_inval: 258 return -EINVAL; 259 } 260 261 static int decode_choose_args(void **p, void *end, struct crush_map *c) 262 { 263 struct crush_choose_arg_map *arg_map = NULL; 264 u32 num_choose_arg_maps, num_buckets; 265 int ret; 266 267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); 268 while (num_choose_arg_maps--) { 269 arg_map = alloc_choose_arg_map(); 270 if (!arg_map) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 ceph_decode_64_safe(p, end, arg_map->choose_args_index, 276 e_inval); 277 arg_map->size = c->max_buckets; 278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), 279 GFP_NOIO); 280 if (!arg_map->args) { 281 ret = -ENOMEM; 282 goto fail; 283 } 284 285 ceph_decode_32_safe(p, end, num_buckets, e_inval); 286 while (num_buckets--) { 287 struct crush_choose_arg *arg; 288 u32 bucket_index; 289 290 ceph_decode_32_safe(p, end, bucket_index, e_inval); 291 if (bucket_index >= arg_map->size) 292 goto e_inval; 293 294 arg = &arg_map->args[bucket_index]; 295 ret = decode_choose_arg(p, end, arg); 296 if (ret) 297 goto fail; 298 299 if (arg->ids_size && 300 arg->ids_size != c->buckets[bucket_index]->size) 301 goto e_inval; 302 } 303 304 insert_choose_arg_map(&c->choose_args, arg_map); 305 } 306 307 return 0; 308 309 e_inval: 310 ret = -EINVAL; 311 fail: 312 free_choose_arg_map(arg_map); 313 return ret; 314 } 315 316 static void crush_finalize(struct crush_map *c) 317 { 318 __s32 b; 319 320 /* Space for the array of pointers to per-bucket workspace */ 321 c->working_size = sizeof(struct crush_work) + 322 c->max_buckets * sizeof(struct crush_work_bucket *); 323 324 for (b = 0; b < c->max_buckets; b++) { 325 if (!c->buckets[b]) 326 continue; 327 328 switch (c->buckets[b]->alg) { 329 default: 330 /* 331 * The base case, permutation variables and 332 * the pointer to the permutation array. 333 */ 334 c->working_size += sizeof(struct crush_work_bucket); 335 break; 336 } 337 /* Every bucket has a permutation array. */ 338 c->working_size += c->buckets[b]->size * sizeof(__u32); 339 } 340 } 341 342 static struct crush_map *crush_decode(void *pbyval, void *end) 343 { 344 struct crush_map *c; 345 int err; 346 int i, j; 347 void **p = &pbyval; 348 void *start = pbyval; 349 u32 magic; 350 351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 352 353 c = kzalloc(sizeof(*c), GFP_NOFS); 354 if (c == NULL) 355 return ERR_PTR(-ENOMEM); 356 357 c->choose_args = RB_ROOT; 358 359 /* set tunables to default values */ 360 c->choose_local_tries = 2; 361 c->choose_local_fallback_tries = 5; 362 c->choose_total_tries = 19; 363 c->chooseleaf_descend_once = 0; 364 365 ceph_decode_need(p, end, 4*sizeof(u32), bad); 366 magic = ceph_decode_32(p); 367 if (magic != CRUSH_MAGIC) { 368 pr_err("crush_decode magic %x != current %x\n", 369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 370 goto bad; 371 } 372 c->max_buckets = ceph_decode_32(p); 373 c->max_rules = ceph_decode_32(p); 374 c->max_devices = ceph_decode_32(p); 375 376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 377 if (c->buckets == NULL) 378 goto badmem; 379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 380 if (c->rules == NULL) 381 goto badmem; 382 383 /* buckets */ 384 for (i = 0; i < c->max_buckets; i++) { 385 int size = 0; 386 u32 alg; 387 struct crush_bucket *b; 388 389 ceph_decode_32_safe(p, end, alg, bad); 390 if (alg == 0) { 391 c->buckets[i] = NULL; 392 continue; 393 } 394 dout("crush_decode bucket %d off %x %p to %p\n", 395 i, (int)(*p-start), *p, end); 396 397 switch (alg) { 398 case CRUSH_BUCKET_UNIFORM: 399 size = sizeof(struct crush_bucket_uniform); 400 break; 401 case CRUSH_BUCKET_LIST: 402 size = sizeof(struct crush_bucket_list); 403 break; 404 case CRUSH_BUCKET_TREE: 405 size = sizeof(struct crush_bucket_tree); 406 break; 407 case CRUSH_BUCKET_STRAW: 408 size = sizeof(struct crush_bucket_straw); 409 break; 410 case CRUSH_BUCKET_STRAW2: 411 size = sizeof(struct crush_bucket_straw2); 412 break; 413 default: 414 goto bad; 415 } 416 BUG_ON(size == 0); 417 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 418 if (b == NULL) 419 goto badmem; 420 421 ceph_decode_need(p, end, 4*sizeof(u32), bad); 422 b->id = ceph_decode_32(p); 423 b->type = ceph_decode_16(p); 424 b->alg = ceph_decode_8(p); 425 b->hash = ceph_decode_8(p); 426 b->weight = ceph_decode_32(p); 427 b->size = ceph_decode_32(p); 428 429 dout("crush_decode bucket size %d off %x %p to %p\n", 430 b->size, (int)(*p-start), *p, end); 431 432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 433 if (b->items == NULL) 434 goto badmem; 435 436 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 437 for (j = 0; j < b->size; j++) 438 b->items[j] = ceph_decode_32(p); 439 440 switch (b->alg) { 441 case CRUSH_BUCKET_UNIFORM: 442 err = crush_decode_uniform_bucket(p, end, 443 (struct crush_bucket_uniform *)b); 444 if (err < 0) 445 goto fail; 446 break; 447 case CRUSH_BUCKET_LIST: 448 err = crush_decode_list_bucket(p, end, 449 (struct crush_bucket_list *)b); 450 if (err < 0) 451 goto fail; 452 break; 453 case CRUSH_BUCKET_TREE: 454 err = crush_decode_tree_bucket(p, end, 455 (struct crush_bucket_tree *)b); 456 if (err < 0) 457 goto fail; 458 break; 459 case CRUSH_BUCKET_STRAW: 460 err = crush_decode_straw_bucket(p, end, 461 (struct crush_bucket_straw *)b); 462 if (err < 0) 463 goto fail; 464 break; 465 case CRUSH_BUCKET_STRAW2: 466 err = crush_decode_straw2_bucket(p, end, 467 (struct crush_bucket_straw2 *)b); 468 if (err < 0) 469 goto fail; 470 break; 471 } 472 } 473 474 /* rules */ 475 dout("rule vec is %p\n", c->rules); 476 for (i = 0; i < c->max_rules; i++) { 477 u32 yes; 478 struct crush_rule *r; 479 480 ceph_decode_32_safe(p, end, yes, bad); 481 if (!yes) { 482 dout("crush_decode NO rule %d off %x %p to %p\n", 483 i, (int)(*p-start), *p, end); 484 c->rules[i] = NULL; 485 continue; 486 } 487 488 dout("crush_decode rule %d off %x %p to %p\n", 489 i, (int)(*p-start), *p, end); 490 491 /* len */ 492 ceph_decode_32_safe(p, end, yes, bad); 493 #if BITS_PER_LONG == 32 494 if (yes > (ULONG_MAX - sizeof(*r)) 495 / sizeof(struct crush_rule_step)) 496 goto bad; 497 #endif 498 r = c->rules[i] = kmalloc(sizeof(*r) + 499 yes*sizeof(struct crush_rule_step), 500 GFP_NOFS); 501 if (r == NULL) 502 goto badmem; 503 dout(" rule %d is at %p\n", i, r); 504 r->len = yes; 505 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 506 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 507 for (j = 0; j < r->len; j++) { 508 r->steps[j].op = ceph_decode_32(p); 509 r->steps[j].arg1 = ceph_decode_32(p); 510 r->steps[j].arg2 = ceph_decode_32(p); 511 } 512 } 513 514 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 515 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 516 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 517 518 /* tunables */ 519 ceph_decode_need(p, end, 3*sizeof(u32), done); 520 c->choose_local_tries = ceph_decode_32(p); 521 c->choose_local_fallback_tries = ceph_decode_32(p); 522 c->choose_total_tries = ceph_decode_32(p); 523 dout("crush decode tunable choose_local_tries = %d\n", 524 c->choose_local_tries); 525 dout("crush decode tunable choose_local_fallback_tries = %d\n", 526 c->choose_local_fallback_tries); 527 dout("crush decode tunable choose_total_tries = %d\n", 528 c->choose_total_tries); 529 530 ceph_decode_need(p, end, sizeof(u32), done); 531 c->chooseleaf_descend_once = ceph_decode_32(p); 532 dout("crush decode tunable chooseleaf_descend_once = %d\n", 533 c->chooseleaf_descend_once); 534 535 ceph_decode_need(p, end, sizeof(u8), done); 536 c->chooseleaf_vary_r = ceph_decode_8(p); 537 dout("crush decode tunable chooseleaf_vary_r = %d\n", 538 c->chooseleaf_vary_r); 539 540 /* skip straw_calc_version, allowed_bucket_algs */ 541 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 542 *p += sizeof(u8) + sizeof(u32); 543 544 ceph_decode_need(p, end, sizeof(u8), done); 545 c->chooseleaf_stable = ceph_decode_8(p); 546 dout("crush decode tunable chooseleaf_stable = %d\n", 547 c->chooseleaf_stable); 548 549 if (*p != end) { 550 /* class_map */ 551 ceph_decode_skip_map(p, end, 32, 32, bad); 552 /* class_name */ 553 ceph_decode_skip_map(p, end, 32, string, bad); 554 /* class_bucket */ 555 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); 556 } 557 558 if (*p != end) { 559 err = decode_choose_args(p, end, c); 560 if (err) 561 goto fail; 562 } 563 564 done: 565 crush_finalize(c); 566 dout("crush_decode success\n"); 567 return c; 568 569 badmem: 570 err = -ENOMEM; 571 fail: 572 dout("crush_decode fail %d\n", err); 573 crush_destroy(c); 574 return ERR_PTR(err); 575 576 bad: 577 err = -EINVAL; 578 goto fail; 579 } 580 581 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 582 { 583 if (lhs->pool < rhs->pool) 584 return -1; 585 if (lhs->pool > rhs->pool) 586 return 1; 587 if (lhs->seed < rhs->seed) 588 return -1; 589 if (lhs->seed > rhs->seed) 590 return 1; 591 592 return 0; 593 } 594 595 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 596 { 597 int ret; 598 599 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 600 if (ret) 601 return ret; 602 603 if (lhs->shard < rhs->shard) 604 return -1; 605 if (lhs->shard > rhs->shard) 606 return 1; 607 608 return 0; 609 } 610 611 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 612 { 613 struct ceph_pg_mapping *pg; 614 615 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 616 if (!pg) 617 return NULL; 618 619 RB_CLEAR_NODE(&pg->node); 620 return pg; 621 } 622 623 static void free_pg_mapping(struct ceph_pg_mapping *pg) 624 { 625 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 626 627 kfree(pg); 628 } 629 630 /* 631 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 632 * to a set of osds) and primary_temp (explicit primary setting) 633 */ 634 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 635 RB_BYPTR, const struct ceph_pg *, node) 636 637 /* 638 * rbtree of pg pool info 639 */ 640 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 641 { 642 struct rb_node **p = &root->rb_node; 643 struct rb_node *parent = NULL; 644 struct ceph_pg_pool_info *pi = NULL; 645 646 while (*p) { 647 parent = *p; 648 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 649 if (new->id < pi->id) 650 p = &(*p)->rb_left; 651 else if (new->id > pi->id) 652 p = &(*p)->rb_right; 653 else 654 return -EEXIST; 655 } 656 657 rb_link_node(&new->node, parent, p); 658 rb_insert_color(&new->node, root); 659 return 0; 660 } 661 662 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 663 { 664 struct ceph_pg_pool_info *pi; 665 struct rb_node *n = root->rb_node; 666 667 while (n) { 668 pi = rb_entry(n, struct ceph_pg_pool_info, node); 669 if (id < pi->id) 670 n = n->rb_left; 671 else if (id > pi->id) 672 n = n->rb_right; 673 else 674 return pi; 675 } 676 return NULL; 677 } 678 679 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 680 { 681 return __lookup_pg_pool(&map->pg_pools, id); 682 } 683 684 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 685 { 686 struct ceph_pg_pool_info *pi; 687 688 if (id == CEPH_NOPOOL) 689 return NULL; 690 691 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 692 return NULL; 693 694 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 695 696 return pi ? pi->name : NULL; 697 } 698 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 699 700 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 701 { 702 struct rb_node *rbp; 703 704 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 705 struct ceph_pg_pool_info *pi = 706 rb_entry(rbp, struct ceph_pg_pool_info, node); 707 if (pi->name && strcmp(pi->name, name) == 0) 708 return pi->id; 709 } 710 return -ENOENT; 711 } 712 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 713 714 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 715 { 716 rb_erase(&pi->node, root); 717 kfree(pi->name); 718 kfree(pi); 719 } 720 721 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 722 { 723 u8 ev, cv; 724 unsigned len, num; 725 void *pool_end; 726 727 ceph_decode_need(p, end, 2 + 4, bad); 728 ev = ceph_decode_8(p); /* encoding version */ 729 cv = ceph_decode_8(p); /* compat version */ 730 if (ev < 5) { 731 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 732 return -EINVAL; 733 } 734 if (cv > 9) { 735 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 736 return -EINVAL; 737 } 738 len = ceph_decode_32(p); 739 ceph_decode_need(p, end, len, bad); 740 pool_end = *p + len; 741 742 pi->type = ceph_decode_8(p); 743 pi->size = ceph_decode_8(p); 744 pi->crush_ruleset = ceph_decode_8(p); 745 pi->object_hash = ceph_decode_8(p); 746 747 pi->pg_num = ceph_decode_32(p); 748 pi->pgp_num = ceph_decode_32(p); 749 750 *p += 4 + 4; /* skip lpg* */ 751 *p += 4; /* skip last_change */ 752 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 753 754 /* skip snaps */ 755 num = ceph_decode_32(p); 756 while (num--) { 757 *p += 8; /* snapid key */ 758 *p += 1 + 1; /* versions */ 759 len = ceph_decode_32(p); 760 *p += len; 761 } 762 763 /* skip removed_snaps */ 764 num = ceph_decode_32(p); 765 *p += num * (8 + 8); 766 767 *p += 8; /* skip auid */ 768 pi->flags = ceph_decode_64(p); 769 *p += 4; /* skip crash_replay_interval */ 770 771 if (ev >= 7) 772 pi->min_size = ceph_decode_8(p); 773 else 774 pi->min_size = pi->size - pi->size / 2; 775 776 if (ev >= 8) 777 *p += 8 + 8; /* skip quota_max_* */ 778 779 if (ev >= 9) { 780 /* skip tiers */ 781 num = ceph_decode_32(p); 782 *p += num * 8; 783 784 *p += 8; /* skip tier_of */ 785 *p += 1; /* skip cache_mode */ 786 787 pi->read_tier = ceph_decode_64(p); 788 pi->write_tier = ceph_decode_64(p); 789 } else { 790 pi->read_tier = -1; 791 pi->write_tier = -1; 792 } 793 794 if (ev >= 10) { 795 /* skip properties */ 796 num = ceph_decode_32(p); 797 while (num--) { 798 len = ceph_decode_32(p); 799 *p += len; /* key */ 800 len = ceph_decode_32(p); 801 *p += len; /* val */ 802 } 803 } 804 805 if (ev >= 11) { 806 /* skip hit_set_params */ 807 *p += 1 + 1; /* versions */ 808 len = ceph_decode_32(p); 809 *p += len; 810 811 *p += 4; /* skip hit_set_period */ 812 *p += 4; /* skip hit_set_count */ 813 } 814 815 if (ev >= 12) 816 *p += 4; /* skip stripe_width */ 817 818 if (ev >= 13) { 819 *p += 8; /* skip target_max_bytes */ 820 *p += 8; /* skip target_max_objects */ 821 *p += 4; /* skip cache_target_dirty_ratio_micro */ 822 *p += 4; /* skip cache_target_full_ratio_micro */ 823 *p += 4; /* skip cache_min_flush_age */ 824 *p += 4; /* skip cache_min_evict_age */ 825 } 826 827 if (ev >= 14) { 828 /* skip erasure_code_profile */ 829 len = ceph_decode_32(p); 830 *p += len; 831 } 832 833 /* 834 * last_force_op_resend_preluminous, will be overridden if the 835 * map was encoded with RESEND_ON_SPLIT 836 */ 837 if (ev >= 15) 838 pi->last_force_request_resend = ceph_decode_32(p); 839 else 840 pi->last_force_request_resend = 0; 841 842 if (ev >= 16) 843 *p += 4; /* skip min_read_recency_for_promote */ 844 845 if (ev >= 17) 846 *p += 8; /* skip expected_num_objects */ 847 848 if (ev >= 19) 849 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 850 851 if (ev >= 20) 852 *p += 4; /* skip min_write_recency_for_promote */ 853 854 if (ev >= 21) 855 *p += 1; /* skip use_gmt_hitset */ 856 857 if (ev >= 22) 858 *p += 1; /* skip fast_read */ 859 860 if (ev >= 23) { 861 *p += 4; /* skip hit_set_grade_decay_rate */ 862 *p += 4; /* skip hit_set_search_last_n */ 863 } 864 865 if (ev >= 24) { 866 /* skip opts */ 867 *p += 1 + 1; /* versions */ 868 len = ceph_decode_32(p); 869 *p += len; 870 } 871 872 if (ev >= 25) 873 pi->last_force_request_resend = ceph_decode_32(p); 874 875 /* ignore the rest */ 876 877 *p = pool_end; 878 calc_pg_masks(pi); 879 return 0; 880 881 bad: 882 return -EINVAL; 883 } 884 885 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 886 { 887 struct ceph_pg_pool_info *pi; 888 u32 num, len; 889 u64 pool; 890 891 ceph_decode_32_safe(p, end, num, bad); 892 dout(" %d pool names\n", num); 893 while (num--) { 894 ceph_decode_64_safe(p, end, pool, bad); 895 ceph_decode_32_safe(p, end, len, bad); 896 dout(" pool %llu len %d\n", pool, len); 897 ceph_decode_need(p, end, len, bad); 898 pi = __lookup_pg_pool(&map->pg_pools, pool); 899 if (pi) { 900 char *name = kstrndup(*p, len, GFP_NOFS); 901 902 if (!name) 903 return -ENOMEM; 904 kfree(pi->name); 905 pi->name = name; 906 dout(" name is %s\n", pi->name); 907 } 908 *p += len; 909 } 910 return 0; 911 912 bad: 913 return -EINVAL; 914 } 915 916 /* 917 * osd map 918 */ 919 struct ceph_osdmap *ceph_osdmap_alloc(void) 920 { 921 struct ceph_osdmap *map; 922 923 map = kzalloc(sizeof(*map), GFP_NOIO); 924 if (!map) 925 return NULL; 926 927 map->pg_pools = RB_ROOT; 928 map->pool_max = -1; 929 map->pg_temp = RB_ROOT; 930 map->primary_temp = RB_ROOT; 931 map->pg_upmap = RB_ROOT; 932 map->pg_upmap_items = RB_ROOT; 933 mutex_init(&map->crush_workspace_mutex); 934 935 return map; 936 } 937 938 void ceph_osdmap_destroy(struct ceph_osdmap *map) 939 { 940 dout("osdmap_destroy %p\n", map); 941 if (map->crush) 942 crush_destroy(map->crush); 943 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 944 struct ceph_pg_mapping *pg = 945 rb_entry(rb_first(&map->pg_temp), 946 struct ceph_pg_mapping, node); 947 erase_pg_mapping(&map->pg_temp, pg); 948 free_pg_mapping(pg); 949 } 950 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 951 struct ceph_pg_mapping *pg = 952 rb_entry(rb_first(&map->primary_temp), 953 struct ceph_pg_mapping, node); 954 erase_pg_mapping(&map->primary_temp, pg); 955 free_pg_mapping(pg); 956 } 957 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 958 struct ceph_pg_mapping *pg = 959 rb_entry(rb_first(&map->pg_upmap), 960 struct ceph_pg_mapping, node); 961 rb_erase(&pg->node, &map->pg_upmap); 962 kfree(pg); 963 } 964 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 965 struct ceph_pg_mapping *pg = 966 rb_entry(rb_first(&map->pg_upmap_items), 967 struct ceph_pg_mapping, node); 968 rb_erase(&pg->node, &map->pg_upmap_items); 969 kfree(pg); 970 } 971 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 972 struct ceph_pg_pool_info *pi = 973 rb_entry(rb_first(&map->pg_pools), 974 struct ceph_pg_pool_info, node); 975 __remove_pg_pool(&map->pg_pools, pi); 976 } 977 kfree(map->osd_state); 978 kfree(map->osd_weight); 979 kfree(map->osd_addr); 980 kfree(map->osd_primary_affinity); 981 kfree(map->crush_workspace); 982 kfree(map); 983 } 984 985 /* 986 * Adjust max_osd value, (re)allocate arrays. 987 * 988 * The new elements are properly initialized. 989 */ 990 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 991 { 992 u32 *state; 993 u32 *weight; 994 struct ceph_entity_addr *addr; 995 int i; 996 997 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 998 if (!state) 999 return -ENOMEM; 1000 map->osd_state = state; 1001 1002 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 1003 if (!weight) 1004 return -ENOMEM; 1005 map->osd_weight = weight; 1006 1007 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 1008 if (!addr) 1009 return -ENOMEM; 1010 map->osd_addr = addr; 1011 1012 for (i = map->max_osd; i < max; i++) { 1013 map->osd_state[i] = 0; 1014 map->osd_weight[i] = CEPH_OSD_OUT; 1015 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 1016 } 1017 1018 if (map->osd_primary_affinity) { 1019 u32 *affinity; 1020 1021 affinity = krealloc(map->osd_primary_affinity, 1022 max*sizeof(*affinity), GFP_NOFS); 1023 if (!affinity) 1024 return -ENOMEM; 1025 map->osd_primary_affinity = affinity; 1026 1027 for (i = map->max_osd; i < max; i++) 1028 map->osd_primary_affinity[i] = 1029 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1030 } 1031 1032 map->max_osd = max; 1033 1034 return 0; 1035 } 1036 1037 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 1038 { 1039 void *workspace; 1040 size_t work_size; 1041 1042 if (IS_ERR(crush)) 1043 return PTR_ERR(crush); 1044 1045 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 1046 dout("%s work_size %zu bytes\n", __func__, work_size); 1047 workspace = kmalloc(work_size, GFP_NOIO); 1048 if (!workspace) { 1049 crush_destroy(crush); 1050 return -ENOMEM; 1051 } 1052 crush_init_workspace(crush, workspace); 1053 1054 if (map->crush) 1055 crush_destroy(map->crush); 1056 kfree(map->crush_workspace); 1057 map->crush = crush; 1058 map->crush_workspace = workspace; 1059 return 0; 1060 } 1061 1062 #define OSDMAP_WRAPPER_COMPAT_VER 7 1063 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 1064 1065 /* 1066 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 1067 * to struct_v of the client_data section for new (v7 and above) 1068 * osdmaps. 1069 */ 1070 static int get_osdmap_client_data_v(void **p, void *end, 1071 const char *prefix, u8 *v) 1072 { 1073 u8 struct_v; 1074 1075 ceph_decode_8_safe(p, end, struct_v, e_inval); 1076 if (struct_v >= 7) { 1077 u8 struct_compat; 1078 1079 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1080 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 1081 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 1082 struct_v, struct_compat, 1083 OSDMAP_WRAPPER_COMPAT_VER, prefix); 1084 return -EINVAL; 1085 } 1086 *p += 4; /* ignore wrapper struct_len */ 1087 1088 ceph_decode_8_safe(p, end, struct_v, e_inval); 1089 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1090 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 1091 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 1092 struct_v, struct_compat, 1093 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 1094 return -EINVAL; 1095 } 1096 *p += 4; /* ignore client data struct_len */ 1097 } else { 1098 u16 version; 1099 1100 *p -= 1; 1101 ceph_decode_16_safe(p, end, version, e_inval); 1102 if (version < 6) { 1103 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 1104 version, prefix); 1105 return -EINVAL; 1106 } 1107 1108 /* old osdmap enconding */ 1109 struct_v = 0; 1110 } 1111 1112 *v = struct_v; 1113 return 0; 1114 1115 e_inval: 1116 return -EINVAL; 1117 } 1118 1119 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 1120 bool incremental) 1121 { 1122 u32 n; 1123 1124 ceph_decode_32_safe(p, end, n, e_inval); 1125 while (n--) { 1126 struct ceph_pg_pool_info *pi; 1127 u64 pool; 1128 int ret; 1129 1130 ceph_decode_64_safe(p, end, pool, e_inval); 1131 1132 pi = __lookup_pg_pool(&map->pg_pools, pool); 1133 if (!incremental || !pi) { 1134 pi = kzalloc(sizeof(*pi), GFP_NOFS); 1135 if (!pi) 1136 return -ENOMEM; 1137 1138 pi->id = pool; 1139 1140 ret = __insert_pg_pool(&map->pg_pools, pi); 1141 if (ret) { 1142 kfree(pi); 1143 return ret; 1144 } 1145 } 1146 1147 ret = decode_pool(p, end, pi); 1148 if (ret) 1149 return ret; 1150 } 1151 1152 return 0; 1153 1154 e_inval: 1155 return -EINVAL; 1156 } 1157 1158 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 1159 { 1160 return __decode_pools(p, end, map, false); 1161 } 1162 1163 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 1164 { 1165 return __decode_pools(p, end, map, true); 1166 } 1167 1168 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 1169 1170 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 1171 decode_mapping_fn_t fn, bool incremental) 1172 { 1173 u32 n; 1174 1175 WARN_ON(!incremental && !fn); 1176 1177 ceph_decode_32_safe(p, end, n, e_inval); 1178 while (n--) { 1179 struct ceph_pg_mapping *pg; 1180 struct ceph_pg pgid; 1181 int ret; 1182 1183 ret = ceph_decode_pgid(p, end, &pgid); 1184 if (ret) 1185 return ret; 1186 1187 pg = lookup_pg_mapping(mapping_root, &pgid); 1188 if (pg) { 1189 WARN_ON(!incremental); 1190 erase_pg_mapping(mapping_root, pg); 1191 free_pg_mapping(pg); 1192 } 1193 1194 if (fn) { 1195 pg = fn(p, end, incremental); 1196 if (IS_ERR(pg)) 1197 return PTR_ERR(pg); 1198 1199 if (pg) { 1200 pg->pgid = pgid; /* struct */ 1201 insert_pg_mapping(mapping_root, pg); 1202 } 1203 } 1204 } 1205 1206 return 0; 1207 1208 e_inval: 1209 return -EINVAL; 1210 } 1211 1212 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1213 bool incremental) 1214 { 1215 struct ceph_pg_mapping *pg; 1216 u32 len, i; 1217 1218 ceph_decode_32_safe(p, end, len, e_inval); 1219 if (len == 0 && incremental) 1220 return NULL; /* new_pg_temp: [] to remove */ 1221 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1222 return ERR_PTR(-EINVAL); 1223 1224 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1225 pg = alloc_pg_mapping(len * sizeof(u32)); 1226 if (!pg) 1227 return ERR_PTR(-ENOMEM); 1228 1229 pg->pg_temp.len = len; 1230 for (i = 0; i < len; i++) 1231 pg->pg_temp.osds[i] = ceph_decode_32(p); 1232 1233 return pg; 1234 1235 e_inval: 1236 return ERR_PTR(-EINVAL); 1237 } 1238 1239 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1240 { 1241 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1242 false); 1243 } 1244 1245 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1246 { 1247 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1248 true); 1249 } 1250 1251 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1252 bool incremental) 1253 { 1254 struct ceph_pg_mapping *pg; 1255 u32 osd; 1256 1257 ceph_decode_32_safe(p, end, osd, e_inval); 1258 if (osd == (u32)-1 && incremental) 1259 return NULL; /* new_primary_temp: -1 to remove */ 1260 1261 pg = alloc_pg_mapping(0); 1262 if (!pg) 1263 return ERR_PTR(-ENOMEM); 1264 1265 pg->primary_temp.osd = osd; 1266 return pg; 1267 1268 e_inval: 1269 return ERR_PTR(-EINVAL); 1270 } 1271 1272 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1273 { 1274 return decode_pg_mapping(p, end, &map->primary_temp, 1275 __decode_primary_temp, false); 1276 } 1277 1278 static int decode_new_primary_temp(void **p, void *end, 1279 struct ceph_osdmap *map) 1280 { 1281 return decode_pg_mapping(p, end, &map->primary_temp, 1282 __decode_primary_temp, true); 1283 } 1284 1285 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1286 { 1287 BUG_ON(osd >= map->max_osd); 1288 1289 if (!map->osd_primary_affinity) 1290 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1291 1292 return map->osd_primary_affinity[osd]; 1293 } 1294 1295 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1296 { 1297 BUG_ON(osd >= map->max_osd); 1298 1299 if (!map->osd_primary_affinity) { 1300 int i; 1301 1302 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32), 1303 GFP_NOFS); 1304 if (!map->osd_primary_affinity) 1305 return -ENOMEM; 1306 1307 for (i = 0; i < map->max_osd; i++) 1308 map->osd_primary_affinity[i] = 1309 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1310 } 1311 1312 map->osd_primary_affinity[osd] = aff; 1313 1314 return 0; 1315 } 1316 1317 static int decode_primary_affinity(void **p, void *end, 1318 struct ceph_osdmap *map) 1319 { 1320 u32 len, i; 1321 1322 ceph_decode_32_safe(p, end, len, e_inval); 1323 if (len == 0) { 1324 kfree(map->osd_primary_affinity); 1325 map->osd_primary_affinity = NULL; 1326 return 0; 1327 } 1328 if (len != map->max_osd) 1329 goto e_inval; 1330 1331 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1332 1333 for (i = 0; i < map->max_osd; i++) { 1334 int ret; 1335 1336 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1337 if (ret) 1338 return ret; 1339 } 1340 1341 return 0; 1342 1343 e_inval: 1344 return -EINVAL; 1345 } 1346 1347 static int decode_new_primary_affinity(void **p, void *end, 1348 struct ceph_osdmap *map) 1349 { 1350 u32 n; 1351 1352 ceph_decode_32_safe(p, end, n, e_inval); 1353 while (n--) { 1354 u32 osd, aff; 1355 int ret; 1356 1357 ceph_decode_32_safe(p, end, osd, e_inval); 1358 ceph_decode_32_safe(p, end, aff, e_inval); 1359 1360 ret = set_primary_affinity(map, osd, aff); 1361 if (ret) 1362 return ret; 1363 1364 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1365 } 1366 1367 return 0; 1368 1369 e_inval: 1370 return -EINVAL; 1371 } 1372 1373 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1374 bool __unused) 1375 { 1376 return __decode_pg_temp(p, end, false); 1377 } 1378 1379 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1380 { 1381 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1382 false); 1383 } 1384 1385 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1386 { 1387 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1388 true); 1389 } 1390 1391 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1392 { 1393 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1394 } 1395 1396 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1397 bool __unused) 1398 { 1399 struct ceph_pg_mapping *pg; 1400 u32 len, i; 1401 1402 ceph_decode_32_safe(p, end, len, e_inval); 1403 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1404 return ERR_PTR(-EINVAL); 1405 1406 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1407 pg = alloc_pg_mapping(2 * len * sizeof(u32)); 1408 if (!pg) 1409 return ERR_PTR(-ENOMEM); 1410 1411 pg->pg_upmap_items.len = len; 1412 for (i = 0; i < len; i++) { 1413 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1414 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1415 } 1416 1417 return pg; 1418 1419 e_inval: 1420 return ERR_PTR(-EINVAL); 1421 } 1422 1423 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1424 { 1425 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1426 __decode_pg_upmap_items, false); 1427 } 1428 1429 static int decode_new_pg_upmap_items(void **p, void *end, 1430 struct ceph_osdmap *map) 1431 { 1432 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1433 __decode_pg_upmap_items, true); 1434 } 1435 1436 static int decode_old_pg_upmap_items(void **p, void *end, 1437 struct ceph_osdmap *map) 1438 { 1439 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1440 } 1441 1442 /* 1443 * decode a full map. 1444 */ 1445 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1446 { 1447 u8 struct_v; 1448 u32 epoch = 0; 1449 void *start = *p; 1450 u32 max; 1451 u32 len, i; 1452 int err; 1453 1454 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1455 1456 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1457 if (err) 1458 goto bad; 1459 1460 /* fsid, epoch, created, modified */ 1461 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1462 sizeof(map->created) + sizeof(map->modified), e_inval); 1463 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1464 epoch = map->epoch = ceph_decode_32(p); 1465 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1466 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1467 1468 /* pools */ 1469 err = decode_pools(p, end, map); 1470 if (err) 1471 goto bad; 1472 1473 /* pool_name */ 1474 err = decode_pool_names(p, end, map); 1475 if (err) 1476 goto bad; 1477 1478 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1479 1480 ceph_decode_32_safe(p, end, map->flags, e_inval); 1481 1482 /* max_osd */ 1483 ceph_decode_32_safe(p, end, max, e_inval); 1484 1485 /* (re)alloc osd arrays */ 1486 err = osdmap_set_max_osd(map, max); 1487 if (err) 1488 goto bad; 1489 1490 /* osd_state, osd_weight, osd_addrs->client_addr */ 1491 ceph_decode_need(p, end, 3*sizeof(u32) + 1492 map->max_osd*((struct_v >= 5 ? sizeof(u32) : 1493 sizeof(u8)) + 1494 sizeof(*map->osd_weight) + 1495 sizeof(*map->osd_addr)), e_inval); 1496 1497 if (ceph_decode_32(p) != map->max_osd) 1498 goto e_inval; 1499 1500 if (struct_v >= 5) { 1501 for (i = 0; i < map->max_osd; i++) 1502 map->osd_state[i] = ceph_decode_32(p); 1503 } else { 1504 for (i = 0; i < map->max_osd; i++) 1505 map->osd_state[i] = ceph_decode_8(p); 1506 } 1507 1508 if (ceph_decode_32(p) != map->max_osd) 1509 goto e_inval; 1510 1511 for (i = 0; i < map->max_osd; i++) 1512 map->osd_weight[i] = ceph_decode_32(p); 1513 1514 if (ceph_decode_32(p) != map->max_osd) 1515 goto e_inval; 1516 1517 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1518 for (i = 0; i < map->max_osd; i++) 1519 ceph_decode_addr(&map->osd_addr[i]); 1520 1521 /* pg_temp */ 1522 err = decode_pg_temp(p, end, map); 1523 if (err) 1524 goto bad; 1525 1526 /* primary_temp */ 1527 if (struct_v >= 1) { 1528 err = decode_primary_temp(p, end, map); 1529 if (err) 1530 goto bad; 1531 } 1532 1533 /* primary_affinity */ 1534 if (struct_v >= 2) { 1535 err = decode_primary_affinity(p, end, map); 1536 if (err) 1537 goto bad; 1538 } else { 1539 WARN_ON(map->osd_primary_affinity); 1540 } 1541 1542 /* crush */ 1543 ceph_decode_32_safe(p, end, len, e_inval); 1544 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1545 if (err) 1546 goto bad; 1547 1548 *p += len; 1549 if (struct_v >= 3) { 1550 /* erasure_code_profiles */ 1551 ceph_decode_skip_map_of_map(p, end, string, string, string, 1552 e_inval); 1553 } 1554 1555 if (struct_v >= 4) { 1556 err = decode_pg_upmap(p, end, map); 1557 if (err) 1558 goto bad; 1559 1560 err = decode_pg_upmap_items(p, end, map); 1561 if (err) 1562 goto bad; 1563 } else { 1564 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1565 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1566 } 1567 1568 /* ignore the rest */ 1569 *p = end; 1570 1571 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1572 return 0; 1573 1574 e_inval: 1575 err = -EINVAL; 1576 bad: 1577 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1578 err, epoch, (int)(*p - start), *p, start, end); 1579 print_hex_dump(KERN_DEBUG, "osdmap: ", 1580 DUMP_PREFIX_OFFSET, 16, 1, 1581 start, end - start, true); 1582 return err; 1583 } 1584 1585 /* 1586 * Allocate and decode a full map. 1587 */ 1588 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1589 { 1590 struct ceph_osdmap *map; 1591 int ret; 1592 1593 map = ceph_osdmap_alloc(); 1594 if (!map) 1595 return ERR_PTR(-ENOMEM); 1596 1597 ret = osdmap_decode(p, end, map); 1598 if (ret) { 1599 ceph_osdmap_destroy(map); 1600 return ERR_PTR(ret); 1601 } 1602 1603 return map; 1604 } 1605 1606 /* 1607 * Encoding order is (new_up_client, new_state, new_weight). Need to 1608 * apply in the (new_weight, new_state, new_up_client) order, because 1609 * an incremental map may look like e.g. 1610 * 1611 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1612 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1613 */ 1614 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, 1615 struct ceph_osdmap *map) 1616 { 1617 void *new_up_client; 1618 void *new_state; 1619 void *new_weight_end; 1620 u32 len; 1621 1622 new_up_client = *p; 1623 ceph_decode_32_safe(p, end, len, e_inval); 1624 len *= sizeof(u32) + sizeof(struct ceph_entity_addr); 1625 ceph_decode_need(p, end, len, e_inval); 1626 *p += len; 1627 1628 new_state = *p; 1629 ceph_decode_32_safe(p, end, len, e_inval); 1630 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); 1631 ceph_decode_need(p, end, len, e_inval); 1632 *p += len; 1633 1634 /* new_weight */ 1635 ceph_decode_32_safe(p, end, len, e_inval); 1636 while (len--) { 1637 s32 osd; 1638 u32 w; 1639 1640 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1641 osd = ceph_decode_32(p); 1642 w = ceph_decode_32(p); 1643 BUG_ON(osd >= map->max_osd); 1644 pr_info("osd%d weight 0x%x %s\n", osd, w, 1645 w == CEPH_OSD_IN ? "(in)" : 1646 (w == CEPH_OSD_OUT ? "(out)" : "")); 1647 map->osd_weight[osd] = w; 1648 1649 /* 1650 * If we are marking in, set the EXISTS, and clear the 1651 * AUTOOUT and NEW bits. 1652 */ 1653 if (w) { 1654 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1655 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1656 CEPH_OSD_NEW); 1657 } 1658 } 1659 new_weight_end = *p; 1660 1661 /* new_state (up/down) */ 1662 *p = new_state; 1663 len = ceph_decode_32(p); 1664 while (len--) { 1665 s32 osd; 1666 u32 xorstate; 1667 int ret; 1668 1669 osd = ceph_decode_32(p); 1670 if (struct_v >= 5) 1671 xorstate = ceph_decode_32(p); 1672 else 1673 xorstate = ceph_decode_8(p); 1674 if (xorstate == 0) 1675 xorstate = CEPH_OSD_UP; 1676 BUG_ON(osd >= map->max_osd); 1677 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1678 (xorstate & CEPH_OSD_UP)) 1679 pr_info("osd%d down\n", osd); 1680 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1681 (xorstate & CEPH_OSD_EXISTS)) { 1682 pr_info("osd%d does not exist\n", osd); 1683 ret = set_primary_affinity(map, osd, 1684 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1685 if (ret) 1686 return ret; 1687 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1688 map->osd_state[osd] = 0; 1689 } else { 1690 map->osd_state[osd] ^= xorstate; 1691 } 1692 } 1693 1694 /* new_up_client */ 1695 *p = new_up_client; 1696 len = ceph_decode_32(p); 1697 while (len--) { 1698 s32 osd; 1699 struct ceph_entity_addr addr; 1700 1701 osd = ceph_decode_32(p); 1702 ceph_decode_copy(p, &addr, sizeof(addr)); 1703 ceph_decode_addr(&addr); 1704 BUG_ON(osd >= map->max_osd); 1705 pr_info("osd%d up\n", osd); 1706 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1707 map->osd_addr[osd] = addr; 1708 } 1709 1710 *p = new_weight_end; 1711 return 0; 1712 1713 e_inval: 1714 return -EINVAL; 1715 } 1716 1717 /* 1718 * decode and apply an incremental map update. 1719 */ 1720 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1721 struct ceph_osdmap *map) 1722 { 1723 struct ceph_fsid fsid; 1724 u32 epoch = 0; 1725 struct ceph_timespec modified; 1726 s32 len; 1727 u64 pool; 1728 __s64 new_pool_max; 1729 __s32 new_flags, max; 1730 void *start = *p; 1731 int err; 1732 u8 struct_v; 1733 1734 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1735 1736 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1737 if (err) 1738 goto bad; 1739 1740 /* fsid, epoch, modified, new_pool_max, new_flags */ 1741 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1742 sizeof(u64) + sizeof(u32), e_inval); 1743 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1744 epoch = ceph_decode_32(p); 1745 BUG_ON(epoch != map->epoch+1); 1746 ceph_decode_copy(p, &modified, sizeof(modified)); 1747 new_pool_max = ceph_decode_64(p); 1748 new_flags = ceph_decode_32(p); 1749 1750 /* full map? */ 1751 ceph_decode_32_safe(p, end, len, e_inval); 1752 if (len > 0) { 1753 dout("apply_incremental full map len %d, %p to %p\n", 1754 len, *p, end); 1755 return ceph_osdmap_decode(p, min(*p+len, end)); 1756 } 1757 1758 /* new crush? */ 1759 ceph_decode_32_safe(p, end, len, e_inval); 1760 if (len > 0) { 1761 err = osdmap_set_crush(map, 1762 crush_decode(*p, min(*p + len, end))); 1763 if (err) 1764 goto bad; 1765 *p += len; 1766 } 1767 1768 /* new flags? */ 1769 if (new_flags >= 0) 1770 map->flags = new_flags; 1771 if (new_pool_max >= 0) 1772 map->pool_max = new_pool_max; 1773 1774 /* new max? */ 1775 ceph_decode_32_safe(p, end, max, e_inval); 1776 if (max >= 0) { 1777 err = osdmap_set_max_osd(map, max); 1778 if (err) 1779 goto bad; 1780 } 1781 1782 map->epoch++; 1783 map->modified = modified; 1784 1785 /* new_pools */ 1786 err = decode_new_pools(p, end, map); 1787 if (err) 1788 goto bad; 1789 1790 /* new_pool_names */ 1791 err = decode_pool_names(p, end, map); 1792 if (err) 1793 goto bad; 1794 1795 /* old_pool */ 1796 ceph_decode_32_safe(p, end, len, e_inval); 1797 while (len--) { 1798 struct ceph_pg_pool_info *pi; 1799 1800 ceph_decode_64_safe(p, end, pool, e_inval); 1801 pi = __lookup_pg_pool(&map->pg_pools, pool); 1802 if (pi) 1803 __remove_pg_pool(&map->pg_pools, pi); 1804 } 1805 1806 /* new_up_client, new_state, new_weight */ 1807 err = decode_new_up_state_weight(p, end, struct_v, map); 1808 if (err) 1809 goto bad; 1810 1811 /* new_pg_temp */ 1812 err = decode_new_pg_temp(p, end, map); 1813 if (err) 1814 goto bad; 1815 1816 /* new_primary_temp */ 1817 if (struct_v >= 1) { 1818 err = decode_new_primary_temp(p, end, map); 1819 if (err) 1820 goto bad; 1821 } 1822 1823 /* new_primary_affinity */ 1824 if (struct_v >= 2) { 1825 err = decode_new_primary_affinity(p, end, map); 1826 if (err) 1827 goto bad; 1828 } 1829 1830 if (struct_v >= 3) { 1831 /* new_erasure_code_profiles */ 1832 ceph_decode_skip_map_of_map(p, end, string, string, string, 1833 e_inval); 1834 /* old_erasure_code_profiles */ 1835 ceph_decode_skip_set(p, end, string, e_inval); 1836 } 1837 1838 if (struct_v >= 4) { 1839 err = decode_new_pg_upmap(p, end, map); 1840 if (err) 1841 goto bad; 1842 1843 err = decode_old_pg_upmap(p, end, map); 1844 if (err) 1845 goto bad; 1846 1847 err = decode_new_pg_upmap_items(p, end, map); 1848 if (err) 1849 goto bad; 1850 1851 err = decode_old_pg_upmap_items(p, end, map); 1852 if (err) 1853 goto bad; 1854 } 1855 1856 /* ignore the rest */ 1857 *p = end; 1858 1859 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1860 return map; 1861 1862 e_inval: 1863 err = -EINVAL; 1864 bad: 1865 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1866 err, epoch, (int)(*p - start), *p, start, end); 1867 print_hex_dump(KERN_DEBUG, "osdmap: ", 1868 DUMP_PREFIX_OFFSET, 16, 1, 1869 start, end - start, true); 1870 return ERR_PTR(err); 1871 } 1872 1873 void ceph_oloc_copy(struct ceph_object_locator *dest, 1874 const struct ceph_object_locator *src) 1875 { 1876 ceph_oloc_destroy(dest); 1877 1878 dest->pool = src->pool; 1879 if (src->pool_ns) 1880 dest->pool_ns = ceph_get_string(src->pool_ns); 1881 else 1882 dest->pool_ns = NULL; 1883 } 1884 EXPORT_SYMBOL(ceph_oloc_copy); 1885 1886 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1887 { 1888 ceph_put_string(oloc->pool_ns); 1889 } 1890 EXPORT_SYMBOL(ceph_oloc_destroy); 1891 1892 void ceph_oid_copy(struct ceph_object_id *dest, 1893 const struct ceph_object_id *src) 1894 { 1895 ceph_oid_destroy(dest); 1896 1897 if (src->name != src->inline_name) { 1898 /* very rare, see ceph_object_id definition */ 1899 dest->name = kmalloc(src->name_len + 1, 1900 GFP_NOIO | __GFP_NOFAIL); 1901 } else { 1902 dest->name = dest->inline_name; 1903 } 1904 memcpy(dest->name, src->name, src->name_len + 1); 1905 dest->name_len = src->name_len; 1906 } 1907 EXPORT_SYMBOL(ceph_oid_copy); 1908 1909 static __printf(2, 0) 1910 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1911 { 1912 int len; 1913 1914 WARN_ON(!ceph_oid_empty(oid)); 1915 1916 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1917 if (len >= sizeof(oid->inline_name)) 1918 return len; 1919 1920 oid->name_len = len; 1921 return 0; 1922 } 1923 1924 /* 1925 * If oid doesn't fit into inline buffer, BUG. 1926 */ 1927 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1928 { 1929 va_list ap; 1930 1931 va_start(ap, fmt); 1932 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1933 va_end(ap); 1934 } 1935 EXPORT_SYMBOL(ceph_oid_printf); 1936 1937 static __printf(3, 0) 1938 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1939 const char *fmt, va_list ap) 1940 { 1941 va_list aq; 1942 int len; 1943 1944 va_copy(aq, ap); 1945 len = oid_printf_vargs(oid, fmt, aq); 1946 va_end(aq); 1947 1948 if (len) { 1949 char *external_name; 1950 1951 external_name = kmalloc(len + 1, gfp); 1952 if (!external_name) 1953 return -ENOMEM; 1954 1955 oid->name = external_name; 1956 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1957 oid->name_len = len; 1958 } 1959 1960 return 0; 1961 } 1962 1963 /* 1964 * If oid doesn't fit into inline buffer, allocate. 1965 */ 1966 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1967 const char *fmt, ...) 1968 { 1969 va_list ap; 1970 int ret; 1971 1972 va_start(ap, fmt); 1973 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1974 va_end(ap); 1975 1976 return ret; 1977 } 1978 EXPORT_SYMBOL(ceph_oid_aprintf); 1979 1980 void ceph_oid_destroy(struct ceph_object_id *oid) 1981 { 1982 if (oid->name != oid->inline_name) 1983 kfree(oid->name); 1984 } 1985 EXPORT_SYMBOL(ceph_oid_destroy); 1986 1987 /* 1988 * osds only 1989 */ 1990 static bool __osds_equal(const struct ceph_osds *lhs, 1991 const struct ceph_osds *rhs) 1992 { 1993 if (lhs->size == rhs->size && 1994 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 1995 return true; 1996 1997 return false; 1998 } 1999 2000 /* 2001 * osds + primary 2002 */ 2003 static bool osds_equal(const struct ceph_osds *lhs, 2004 const struct ceph_osds *rhs) 2005 { 2006 if (__osds_equal(lhs, rhs) && 2007 lhs->primary == rhs->primary) 2008 return true; 2009 2010 return false; 2011 } 2012 2013 static bool osds_valid(const struct ceph_osds *set) 2014 { 2015 /* non-empty set */ 2016 if (set->size > 0 && set->primary >= 0) 2017 return true; 2018 2019 /* empty can_shift_osds set */ 2020 if (!set->size && set->primary == -1) 2021 return true; 2022 2023 /* empty !can_shift_osds set - all NONE */ 2024 if (set->size > 0 && set->primary == -1) { 2025 int i; 2026 2027 for (i = 0; i < set->size; i++) { 2028 if (set->osds[i] != CRUSH_ITEM_NONE) 2029 break; 2030 } 2031 if (i == set->size) 2032 return true; 2033 } 2034 2035 return false; 2036 } 2037 2038 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 2039 { 2040 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 2041 dest->size = src->size; 2042 dest->primary = src->primary; 2043 } 2044 2045 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 2046 u32 new_pg_num) 2047 { 2048 int old_bits = calc_bits_of(old_pg_num); 2049 int old_mask = (1 << old_bits) - 1; 2050 int n; 2051 2052 WARN_ON(pgid->seed >= old_pg_num); 2053 if (new_pg_num <= old_pg_num) 2054 return false; 2055 2056 for (n = 1; ; n++) { 2057 int next_bit = n << (old_bits - 1); 2058 u32 s = next_bit | pgid->seed; 2059 2060 if (s < old_pg_num || s == pgid->seed) 2061 continue; 2062 if (s >= new_pg_num) 2063 break; 2064 2065 s = ceph_stable_mod(s, old_pg_num, old_mask); 2066 if (s == pgid->seed) 2067 return true; 2068 } 2069 2070 return false; 2071 } 2072 2073 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 2074 const struct ceph_osds *new_acting, 2075 const struct ceph_osds *old_up, 2076 const struct ceph_osds *new_up, 2077 int old_size, 2078 int new_size, 2079 int old_min_size, 2080 int new_min_size, 2081 u32 old_pg_num, 2082 u32 new_pg_num, 2083 bool old_sort_bitwise, 2084 bool new_sort_bitwise, 2085 bool old_recovery_deletes, 2086 bool new_recovery_deletes, 2087 const struct ceph_pg *pgid) 2088 { 2089 return !osds_equal(old_acting, new_acting) || 2090 !osds_equal(old_up, new_up) || 2091 old_size != new_size || 2092 old_min_size != new_min_size || 2093 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 2094 old_sort_bitwise != new_sort_bitwise || 2095 old_recovery_deletes != new_recovery_deletes; 2096 } 2097 2098 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 2099 { 2100 int i; 2101 2102 for (i = 0; i < acting->size; i++) { 2103 if (acting->osds[i] == osd) 2104 return i; 2105 } 2106 2107 return -1; 2108 } 2109 2110 static bool primary_changed(const struct ceph_osds *old_acting, 2111 const struct ceph_osds *new_acting) 2112 { 2113 if (!old_acting->size && !new_acting->size) 2114 return false; /* both still empty */ 2115 2116 if (!old_acting->size ^ !new_acting->size) 2117 return true; /* was empty, now not, or vice versa */ 2118 2119 if (old_acting->primary != new_acting->primary) 2120 return true; /* primary changed */ 2121 2122 if (calc_pg_rank(old_acting->primary, old_acting) != 2123 calc_pg_rank(new_acting->primary, new_acting)) 2124 return true; 2125 2126 return false; /* same primary (tho replicas may have changed) */ 2127 } 2128 2129 bool ceph_osds_changed(const struct ceph_osds *old_acting, 2130 const struct ceph_osds *new_acting, 2131 bool any_change) 2132 { 2133 if (primary_changed(old_acting, new_acting)) 2134 return true; 2135 2136 if (any_change && !__osds_equal(old_acting, new_acting)) 2137 return true; 2138 2139 return false; 2140 } 2141 2142 /* 2143 * calculate file layout from given offset, length. 2144 * fill in correct oid, logical length, and object extent 2145 * offset, length. 2146 * 2147 * for now, we write only a single su, until we can 2148 * pass a stride back to the caller. 2149 */ 2150 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 2151 u64 off, u64 len, 2152 u64 *ono, 2153 u64 *oxoff, u64 *oxlen) 2154 { 2155 u32 osize = layout->object_size; 2156 u32 su = layout->stripe_unit; 2157 u32 sc = layout->stripe_count; 2158 u32 bl, stripeno, stripepos, objsetno; 2159 u32 su_per_object; 2160 u64 t, su_offset; 2161 2162 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 2163 osize, su); 2164 if (su == 0 || sc == 0) 2165 goto invalid; 2166 su_per_object = osize / su; 2167 if (su_per_object == 0) 2168 goto invalid; 2169 dout("osize %u / su %u = su_per_object %u\n", osize, su, 2170 su_per_object); 2171 2172 if ((su & ~PAGE_MASK) != 0) 2173 goto invalid; 2174 2175 /* bl = *off / su; */ 2176 t = off; 2177 do_div(t, su); 2178 bl = t; 2179 dout("off %llu / su %u = bl %u\n", off, su, bl); 2180 2181 stripeno = bl / sc; 2182 stripepos = bl % sc; 2183 objsetno = stripeno / su_per_object; 2184 2185 *ono = objsetno * sc + stripepos; 2186 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 2187 2188 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 2189 t = off; 2190 su_offset = do_div(t, su); 2191 *oxoff = su_offset + (stripeno % su_per_object) * su; 2192 2193 /* 2194 * Calculate the length of the extent being written to the selected 2195 * object. This is the minimum of the full length requested (len) or 2196 * the remainder of the current stripe being written to. 2197 */ 2198 *oxlen = min_t(u64, len, su - su_offset); 2199 2200 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 2201 return 0; 2202 2203 invalid: 2204 dout(" invalid layout\n"); 2205 *ono = 0; 2206 *oxoff = 0; 2207 *oxlen = 0; 2208 return -EINVAL; 2209 } 2210 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 2211 2212 /* 2213 * Map an object into a PG. 2214 * 2215 * Should only be called with target_oid and target_oloc (as opposed to 2216 * base_oid and base_oloc), since tiering isn't taken into account. 2217 */ 2218 int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2219 const struct ceph_object_id *oid, 2220 const struct ceph_object_locator *oloc, 2221 struct ceph_pg *raw_pgid) 2222 { 2223 WARN_ON(pi->id != oloc->pool); 2224 2225 if (!oloc->pool_ns) { 2226 raw_pgid->pool = oloc->pool; 2227 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2228 oid->name_len); 2229 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2230 raw_pgid->pool, raw_pgid->seed); 2231 } else { 2232 char stack_buf[256]; 2233 char *buf = stack_buf; 2234 int nsl = oloc->pool_ns->len; 2235 size_t total = nsl + 1 + oid->name_len; 2236 2237 if (total > sizeof(stack_buf)) { 2238 buf = kmalloc(total, GFP_NOIO); 2239 if (!buf) 2240 return -ENOMEM; 2241 } 2242 memcpy(buf, oloc->pool_ns->str, nsl); 2243 buf[nsl] = '\037'; 2244 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2245 raw_pgid->pool = oloc->pool; 2246 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2247 if (buf != stack_buf) 2248 kfree(buf); 2249 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2250 oid->name, nsl, oloc->pool_ns->str, 2251 raw_pgid->pool, raw_pgid->seed); 2252 } 2253 return 0; 2254 } 2255 2256 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2257 const struct ceph_object_id *oid, 2258 const struct ceph_object_locator *oloc, 2259 struct ceph_pg *raw_pgid) 2260 { 2261 struct ceph_pg_pool_info *pi; 2262 2263 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2264 if (!pi) 2265 return -ENOENT; 2266 2267 return __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2268 } 2269 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2270 2271 /* 2272 * Map a raw PG (full precision ps) into an actual PG. 2273 */ 2274 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2275 const struct ceph_pg *raw_pgid, 2276 struct ceph_pg *pgid) 2277 { 2278 pgid->pool = raw_pgid->pool; 2279 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2280 pi->pg_num_mask); 2281 } 2282 2283 /* 2284 * Map a raw PG (full precision ps) into a placement ps (placement 2285 * seed). Include pool id in that value so that different pools don't 2286 * use the same seeds. 2287 */ 2288 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2289 const struct ceph_pg *raw_pgid) 2290 { 2291 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2292 /* hash pool id and seed so that pool PGs do not overlap */ 2293 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2294 ceph_stable_mod(raw_pgid->seed, 2295 pi->pgp_num, 2296 pi->pgp_num_mask), 2297 raw_pgid->pool); 2298 } else { 2299 /* 2300 * legacy behavior: add ps and pool together. this is 2301 * not a great approach because the PGs from each pool 2302 * will overlap on top of each other: 0.5 == 1.4 == 2303 * 2.3 == ... 2304 */ 2305 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2306 pi->pgp_num_mask) + 2307 (unsigned)raw_pgid->pool; 2308 } 2309 } 2310 2311 /* 2312 * Magic value used for a "default" fallback choose_args, used if the 2313 * crush_choose_arg_map passed to do_crush() does not exist. If this 2314 * also doesn't exist, fall back to canonical weights. 2315 */ 2316 #define CEPH_DEFAULT_CHOOSE_ARGS -1 2317 2318 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2319 int *result, int result_max, 2320 const __u32 *weight, int weight_max, 2321 s64 choose_args_index) 2322 { 2323 struct crush_choose_arg_map *arg_map; 2324 int r; 2325 2326 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2327 2328 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2329 choose_args_index); 2330 if (!arg_map) 2331 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2332 CEPH_DEFAULT_CHOOSE_ARGS); 2333 2334 mutex_lock(&map->crush_workspace_mutex); 2335 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2336 weight, weight_max, map->crush_workspace, 2337 arg_map ? arg_map->args : NULL); 2338 mutex_unlock(&map->crush_workspace_mutex); 2339 2340 return r; 2341 } 2342 2343 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2344 struct ceph_pg_pool_info *pi, 2345 struct ceph_osds *set) 2346 { 2347 int i; 2348 2349 if (ceph_can_shift_osds(pi)) { 2350 int removed = 0; 2351 2352 /* shift left */ 2353 for (i = 0; i < set->size; i++) { 2354 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2355 removed++; 2356 continue; 2357 } 2358 if (removed) 2359 set->osds[i - removed] = set->osds[i]; 2360 } 2361 set->size -= removed; 2362 } else { 2363 /* set dne devices to NONE */ 2364 for (i = 0; i < set->size; i++) { 2365 if (!ceph_osd_exists(osdmap, set->osds[i])) 2366 set->osds[i] = CRUSH_ITEM_NONE; 2367 } 2368 } 2369 } 2370 2371 /* 2372 * Calculate raw set (CRUSH output) for given PG and filter out 2373 * nonexistent OSDs. ->primary is undefined for a raw set. 2374 * 2375 * Placement seed (CRUSH input) is returned through @ppps. 2376 */ 2377 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2378 struct ceph_pg_pool_info *pi, 2379 const struct ceph_pg *raw_pgid, 2380 struct ceph_osds *raw, 2381 u32 *ppps) 2382 { 2383 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2384 int ruleno; 2385 int len; 2386 2387 ceph_osds_init(raw); 2388 if (ppps) 2389 *ppps = pps; 2390 2391 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2392 pi->size); 2393 if (ruleno < 0) { 2394 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2395 pi->id, pi->crush_ruleset, pi->type, pi->size); 2396 return; 2397 } 2398 2399 if (pi->size > ARRAY_SIZE(raw->osds)) { 2400 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2401 pi->id, pi->crush_ruleset, pi->type, pi->size, 2402 ARRAY_SIZE(raw->osds)); 2403 return; 2404 } 2405 2406 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2407 osdmap->osd_weight, osdmap->max_osd, pi->id); 2408 if (len < 0) { 2409 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2410 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2411 pi->size); 2412 return; 2413 } 2414 2415 raw->size = len; 2416 remove_nonexistent_osds(osdmap, pi, raw); 2417 } 2418 2419 /* apply pg_upmap[_items] mappings */ 2420 static void apply_upmap(struct ceph_osdmap *osdmap, 2421 const struct ceph_pg *pgid, 2422 struct ceph_osds *raw) 2423 { 2424 struct ceph_pg_mapping *pg; 2425 int i, j; 2426 2427 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2428 if (pg) { 2429 /* make sure targets aren't marked out */ 2430 for (i = 0; i < pg->pg_upmap.len; i++) { 2431 int osd = pg->pg_upmap.osds[i]; 2432 2433 if (osd != CRUSH_ITEM_NONE && 2434 osd < osdmap->max_osd && 2435 osdmap->osd_weight[osd] == 0) { 2436 /* reject/ignore explicit mapping */ 2437 return; 2438 } 2439 } 2440 for (i = 0; i < pg->pg_upmap.len; i++) 2441 raw->osds[i] = pg->pg_upmap.osds[i]; 2442 raw->size = pg->pg_upmap.len; 2443 /* check and apply pg_upmap_items, if any */ 2444 } 2445 2446 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2447 if (pg) { 2448 /* 2449 * Note: this approach does not allow a bidirectional swap, 2450 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2451 */ 2452 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2453 int from = pg->pg_upmap_items.from_to[i][0]; 2454 int to = pg->pg_upmap_items.from_to[i][1]; 2455 int pos = -1; 2456 bool exists = false; 2457 2458 /* make sure replacement doesn't already appear */ 2459 for (j = 0; j < raw->size; j++) { 2460 int osd = raw->osds[j]; 2461 2462 if (osd == to) { 2463 exists = true; 2464 break; 2465 } 2466 /* ignore mapping if target is marked out */ 2467 if (osd == from && pos < 0 && 2468 !(to != CRUSH_ITEM_NONE && 2469 to < osdmap->max_osd && 2470 osdmap->osd_weight[to] == 0)) { 2471 pos = j; 2472 } 2473 } 2474 if (!exists && pos >= 0) 2475 raw->osds[pos] = to; 2476 } 2477 } 2478 } 2479 2480 /* 2481 * Given raw set, calculate up set and up primary. By definition of an 2482 * up set, the result won't contain nonexistent or down OSDs. 2483 * 2484 * This is done in-place - on return @set is the up set. If it's 2485 * empty, ->primary will remain undefined. 2486 */ 2487 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2488 struct ceph_pg_pool_info *pi, 2489 struct ceph_osds *set) 2490 { 2491 int i; 2492 2493 /* ->primary is undefined for a raw set */ 2494 BUG_ON(set->primary != -1); 2495 2496 if (ceph_can_shift_osds(pi)) { 2497 int removed = 0; 2498 2499 /* shift left */ 2500 for (i = 0; i < set->size; i++) { 2501 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2502 removed++; 2503 continue; 2504 } 2505 if (removed) 2506 set->osds[i - removed] = set->osds[i]; 2507 } 2508 set->size -= removed; 2509 if (set->size > 0) 2510 set->primary = set->osds[0]; 2511 } else { 2512 /* set down/dne devices to NONE */ 2513 for (i = set->size - 1; i >= 0; i--) { 2514 if (ceph_osd_is_down(osdmap, set->osds[i])) 2515 set->osds[i] = CRUSH_ITEM_NONE; 2516 else 2517 set->primary = set->osds[i]; 2518 } 2519 } 2520 } 2521 2522 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2523 struct ceph_pg_pool_info *pi, 2524 u32 pps, 2525 struct ceph_osds *up) 2526 { 2527 int i; 2528 int pos = -1; 2529 2530 /* 2531 * Do we have any non-default primary_affinity values for these 2532 * osds? 2533 */ 2534 if (!osdmap->osd_primary_affinity) 2535 return; 2536 2537 for (i = 0; i < up->size; i++) { 2538 int osd = up->osds[i]; 2539 2540 if (osd != CRUSH_ITEM_NONE && 2541 osdmap->osd_primary_affinity[osd] != 2542 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2543 break; 2544 } 2545 } 2546 if (i == up->size) 2547 return; 2548 2549 /* 2550 * Pick the primary. Feed both the seed (for the pg) and the 2551 * osd into the hash/rng so that a proportional fraction of an 2552 * osd's pgs get rejected as primary. 2553 */ 2554 for (i = 0; i < up->size; i++) { 2555 int osd = up->osds[i]; 2556 u32 aff; 2557 2558 if (osd == CRUSH_ITEM_NONE) 2559 continue; 2560 2561 aff = osdmap->osd_primary_affinity[osd]; 2562 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2563 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2564 pps, osd) >> 16) >= aff) { 2565 /* 2566 * We chose not to use this primary. Note it 2567 * anyway as a fallback in case we don't pick 2568 * anyone else, but keep looking. 2569 */ 2570 if (pos < 0) 2571 pos = i; 2572 } else { 2573 pos = i; 2574 break; 2575 } 2576 } 2577 if (pos < 0) 2578 return; 2579 2580 up->primary = up->osds[pos]; 2581 2582 if (ceph_can_shift_osds(pi) && pos > 0) { 2583 /* move the new primary to the front */ 2584 for (i = pos; i > 0; i--) 2585 up->osds[i] = up->osds[i - 1]; 2586 up->osds[0] = up->primary; 2587 } 2588 } 2589 2590 /* 2591 * Get pg_temp and primary_temp mappings for given PG. 2592 * 2593 * Note that a PG may have none, only pg_temp, only primary_temp or 2594 * both pg_temp and primary_temp mappings. This means @temp isn't 2595 * always a valid OSD set on return: in the "only primary_temp" case, 2596 * @temp will have its ->primary >= 0 but ->size == 0. 2597 */ 2598 static void get_temp_osds(struct ceph_osdmap *osdmap, 2599 struct ceph_pg_pool_info *pi, 2600 const struct ceph_pg *pgid, 2601 struct ceph_osds *temp) 2602 { 2603 struct ceph_pg_mapping *pg; 2604 int i; 2605 2606 ceph_osds_init(temp); 2607 2608 /* pg_temp? */ 2609 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2610 if (pg) { 2611 for (i = 0; i < pg->pg_temp.len; i++) { 2612 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2613 if (ceph_can_shift_osds(pi)) 2614 continue; 2615 2616 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2617 } else { 2618 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2619 } 2620 } 2621 2622 /* apply pg_temp's primary */ 2623 for (i = 0; i < temp->size; i++) { 2624 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2625 temp->primary = temp->osds[i]; 2626 break; 2627 } 2628 } 2629 } 2630 2631 /* primary_temp? */ 2632 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2633 if (pg) 2634 temp->primary = pg->primary_temp.osd; 2635 } 2636 2637 /* 2638 * Map a PG to its acting set as well as its up set. 2639 * 2640 * Acting set is used for data mapping purposes, while up set can be 2641 * recorded for detecting interval changes and deciding whether to 2642 * resend a request. 2643 */ 2644 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2645 struct ceph_pg_pool_info *pi, 2646 const struct ceph_pg *raw_pgid, 2647 struct ceph_osds *up, 2648 struct ceph_osds *acting) 2649 { 2650 struct ceph_pg pgid; 2651 u32 pps; 2652 2653 WARN_ON(pi->id != raw_pgid->pool); 2654 raw_pg_to_pg(pi, raw_pgid, &pgid); 2655 2656 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2657 apply_upmap(osdmap, &pgid, up); 2658 raw_to_up_osds(osdmap, pi, up); 2659 apply_primary_affinity(osdmap, pi, pps, up); 2660 get_temp_osds(osdmap, pi, &pgid, acting); 2661 if (!acting->size) { 2662 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2663 acting->size = up->size; 2664 if (acting->primary == -1) 2665 acting->primary = up->primary; 2666 } 2667 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2668 } 2669 2670 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2671 struct ceph_pg_pool_info *pi, 2672 const struct ceph_pg *raw_pgid, 2673 struct ceph_spg *spgid) 2674 { 2675 struct ceph_pg pgid; 2676 struct ceph_osds up, acting; 2677 int i; 2678 2679 WARN_ON(pi->id != raw_pgid->pool); 2680 raw_pg_to_pg(pi, raw_pgid, &pgid); 2681 2682 if (ceph_can_shift_osds(pi)) { 2683 spgid->pgid = pgid; /* struct */ 2684 spgid->shard = CEPH_SPG_NOSHARD; 2685 return true; 2686 } 2687 2688 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2689 for (i = 0; i < acting.size; i++) { 2690 if (acting.osds[i] == acting.primary) { 2691 spgid->pgid = pgid; /* struct */ 2692 spgid->shard = i; 2693 return true; 2694 } 2695 } 2696 2697 return false; 2698 } 2699 2700 /* 2701 * Return acting primary for given PG, or -1 if none. 2702 */ 2703 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2704 const struct ceph_pg *raw_pgid) 2705 { 2706 struct ceph_pg_pool_info *pi; 2707 struct ceph_osds up, acting; 2708 2709 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2710 if (!pi) 2711 return -1; 2712 2713 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2714 return acting.primary; 2715 } 2716 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2717