1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, u32 state) 15 { 16 if (!len) 17 return str; 18 19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) 20 snprintf(str, len, "exists, up"); 21 else if (state & CEPH_OSD_EXISTS) 22 snprintf(str, len, "exists"); 23 else if (state & CEPH_OSD_UP) 24 snprintf(str, len, "up"); 25 else 26 snprintf(str, len, "doesn't exist"); 27 28 return str; 29 } 30 31 /* maps */ 32 33 static int calc_bits_of(unsigned int t) 34 { 35 int b = 0; 36 while (t) { 37 t = t >> 1; 38 b++; 39 } 40 return b; 41 } 42 43 /* 44 * the foo_mask is the smallest value 2^n-1 that is >= foo. 45 */ 46 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 47 { 48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; 49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; 50 } 51 52 /* 53 * decode crush map 54 */ 55 static int crush_decode_uniform_bucket(void **p, void *end, 56 struct crush_bucket_uniform *b) 57 { 58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 60 b->item_weight = ceph_decode_32(p); 61 return 0; 62 bad: 63 return -EINVAL; 64 } 65 66 static int crush_decode_list_bucket(void **p, void *end, 67 struct crush_bucket_list *b) 68 { 69 int j; 70 dout("crush_decode_list_bucket %p to %p\n", *p, end); 71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 72 if (b->item_weights == NULL) 73 return -ENOMEM; 74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 75 if (b->sum_weights == NULL) 76 return -ENOMEM; 77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 78 for (j = 0; j < b->h.size; j++) { 79 b->item_weights[j] = ceph_decode_32(p); 80 b->sum_weights[j] = ceph_decode_32(p); 81 } 82 return 0; 83 bad: 84 return -EINVAL; 85 } 86 87 static int crush_decode_tree_bucket(void **p, void *end, 88 struct crush_bucket_tree *b) 89 { 90 int j; 91 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 92 ceph_decode_8_safe(p, end, b->num_nodes, bad); 93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 94 if (b->node_weights == NULL) 95 return -ENOMEM; 96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 97 for (j = 0; j < b->num_nodes; j++) 98 b->node_weights[j] = ceph_decode_32(p); 99 return 0; 100 bad: 101 return -EINVAL; 102 } 103 104 static int crush_decode_straw_bucket(void **p, void *end, 105 struct crush_bucket_straw *b) 106 { 107 int j; 108 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 110 if (b->item_weights == NULL) 111 return -ENOMEM; 112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 113 if (b->straws == NULL) 114 return -ENOMEM; 115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 116 for (j = 0; j < b->h.size; j++) { 117 b->item_weights[j] = ceph_decode_32(p); 118 b->straws[j] = ceph_decode_32(p); 119 } 120 return 0; 121 bad: 122 return -EINVAL; 123 } 124 125 static int crush_decode_straw2_bucket(void **p, void *end, 126 struct crush_bucket_straw2 *b) 127 { 128 int j; 129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end); 130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 131 if (b->item_weights == NULL) 132 return -ENOMEM; 133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); 134 for (j = 0; j < b->h.size; j++) 135 b->item_weights[j] = ceph_decode_32(p); 136 return 0; 137 bad: 138 return -EINVAL; 139 } 140 141 static struct crush_choose_arg_map *alloc_choose_arg_map(void) 142 { 143 struct crush_choose_arg_map *arg_map; 144 145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); 146 if (!arg_map) 147 return NULL; 148 149 RB_CLEAR_NODE(&arg_map->node); 150 return arg_map; 151 } 152 153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) 154 { 155 if (arg_map) { 156 int i, j; 157 158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); 159 160 for (i = 0; i < arg_map->size; i++) { 161 struct crush_choose_arg *arg = &arg_map->args[i]; 162 163 for (j = 0; j < arg->weight_set_size; j++) 164 kfree(arg->weight_set[j].weights); 165 kfree(arg->weight_set); 166 kfree(arg->ids); 167 } 168 kfree(arg_map->args); 169 kfree(arg_map); 170 } 171 } 172 173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, 174 node); 175 176 void clear_choose_args(struct crush_map *c) 177 { 178 while (!RB_EMPTY_ROOT(&c->choose_args)) { 179 struct crush_choose_arg_map *arg_map = 180 rb_entry(rb_first(&c->choose_args), 181 struct crush_choose_arg_map, node); 182 183 erase_choose_arg_map(&c->choose_args, arg_map); 184 free_choose_arg_map(arg_map); 185 } 186 } 187 188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) 189 { 190 u32 *a = NULL; 191 u32 len; 192 int ret; 193 194 ceph_decode_32_safe(p, end, len, e_inval); 195 if (len) { 196 u32 i; 197 198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO); 199 if (!a) { 200 ret = -ENOMEM; 201 goto fail; 202 } 203 204 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 205 for (i = 0; i < len; i++) 206 a[i] = ceph_decode_32(p); 207 } 208 209 *plen = len; 210 return a; 211 212 e_inval: 213 ret = -EINVAL; 214 fail: 215 kfree(a); 216 return ERR_PTR(ret); 217 } 218 219 /* 220 * Assumes @arg is zero-initialized. 221 */ 222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) 223 { 224 int ret; 225 226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); 227 if (arg->weight_set_size) { 228 u32 i; 229 230 arg->weight_set = kmalloc_array(arg->weight_set_size, 231 sizeof(*arg->weight_set), 232 GFP_NOIO); 233 if (!arg->weight_set) 234 return -ENOMEM; 235 236 for (i = 0; i < arg->weight_set_size; i++) { 237 struct crush_weight_set *w = &arg->weight_set[i]; 238 239 w->weights = decode_array_32_alloc(p, end, &w->size); 240 if (IS_ERR(w->weights)) { 241 ret = PTR_ERR(w->weights); 242 w->weights = NULL; 243 return ret; 244 } 245 } 246 } 247 248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); 249 if (IS_ERR(arg->ids)) { 250 ret = PTR_ERR(arg->ids); 251 arg->ids = NULL; 252 return ret; 253 } 254 255 return 0; 256 257 e_inval: 258 return -EINVAL; 259 } 260 261 static int decode_choose_args(void **p, void *end, struct crush_map *c) 262 { 263 struct crush_choose_arg_map *arg_map = NULL; 264 u32 num_choose_arg_maps, num_buckets; 265 int ret; 266 267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); 268 while (num_choose_arg_maps--) { 269 arg_map = alloc_choose_arg_map(); 270 if (!arg_map) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 ceph_decode_64_safe(p, end, arg_map->choose_args_index, 276 e_inval); 277 arg_map->size = c->max_buckets; 278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), 279 GFP_NOIO); 280 if (!arg_map->args) { 281 ret = -ENOMEM; 282 goto fail; 283 } 284 285 ceph_decode_32_safe(p, end, num_buckets, e_inval); 286 while (num_buckets--) { 287 struct crush_choose_arg *arg; 288 u32 bucket_index; 289 290 ceph_decode_32_safe(p, end, bucket_index, e_inval); 291 if (bucket_index >= arg_map->size) 292 goto e_inval; 293 294 arg = &arg_map->args[bucket_index]; 295 ret = decode_choose_arg(p, end, arg); 296 if (ret) 297 goto fail; 298 } 299 300 insert_choose_arg_map(&c->choose_args, arg_map); 301 } 302 303 return 0; 304 305 e_inval: 306 ret = -EINVAL; 307 fail: 308 free_choose_arg_map(arg_map); 309 return ret; 310 } 311 312 static void crush_finalize(struct crush_map *c) 313 { 314 __s32 b; 315 316 /* Space for the array of pointers to per-bucket workspace */ 317 c->working_size = sizeof(struct crush_work) + 318 c->max_buckets * sizeof(struct crush_work_bucket *); 319 320 for (b = 0; b < c->max_buckets; b++) { 321 if (!c->buckets[b]) 322 continue; 323 324 switch (c->buckets[b]->alg) { 325 default: 326 /* 327 * The base case, permutation variables and 328 * the pointer to the permutation array. 329 */ 330 c->working_size += sizeof(struct crush_work_bucket); 331 break; 332 } 333 /* Every bucket has a permutation array. */ 334 c->working_size += c->buckets[b]->size * sizeof(__u32); 335 } 336 } 337 338 static struct crush_map *crush_decode(void *pbyval, void *end) 339 { 340 struct crush_map *c; 341 int err = -EINVAL; 342 int i, j; 343 void **p = &pbyval; 344 void *start = pbyval; 345 u32 magic; 346 347 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 348 349 c = kzalloc(sizeof(*c), GFP_NOFS); 350 if (c == NULL) 351 return ERR_PTR(-ENOMEM); 352 353 c->choose_args = RB_ROOT; 354 355 /* set tunables to default values */ 356 c->choose_local_tries = 2; 357 c->choose_local_fallback_tries = 5; 358 c->choose_total_tries = 19; 359 c->chooseleaf_descend_once = 0; 360 361 ceph_decode_need(p, end, 4*sizeof(u32), bad); 362 magic = ceph_decode_32(p); 363 if (magic != CRUSH_MAGIC) { 364 pr_err("crush_decode magic %x != current %x\n", 365 (unsigned int)magic, (unsigned int)CRUSH_MAGIC); 366 goto bad; 367 } 368 c->max_buckets = ceph_decode_32(p); 369 c->max_rules = ceph_decode_32(p); 370 c->max_devices = ceph_decode_32(p); 371 372 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 373 if (c->buckets == NULL) 374 goto badmem; 375 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 376 if (c->rules == NULL) 377 goto badmem; 378 379 /* buckets */ 380 for (i = 0; i < c->max_buckets; i++) { 381 int size = 0; 382 u32 alg; 383 struct crush_bucket *b; 384 385 ceph_decode_32_safe(p, end, alg, bad); 386 if (alg == 0) { 387 c->buckets[i] = NULL; 388 continue; 389 } 390 dout("crush_decode bucket %d off %x %p to %p\n", 391 i, (int)(*p-start), *p, end); 392 393 switch (alg) { 394 case CRUSH_BUCKET_UNIFORM: 395 size = sizeof(struct crush_bucket_uniform); 396 break; 397 case CRUSH_BUCKET_LIST: 398 size = sizeof(struct crush_bucket_list); 399 break; 400 case CRUSH_BUCKET_TREE: 401 size = sizeof(struct crush_bucket_tree); 402 break; 403 case CRUSH_BUCKET_STRAW: 404 size = sizeof(struct crush_bucket_straw); 405 break; 406 case CRUSH_BUCKET_STRAW2: 407 size = sizeof(struct crush_bucket_straw2); 408 break; 409 default: 410 err = -EINVAL; 411 goto bad; 412 } 413 BUG_ON(size == 0); 414 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 415 if (b == NULL) 416 goto badmem; 417 418 ceph_decode_need(p, end, 4*sizeof(u32), bad); 419 b->id = ceph_decode_32(p); 420 b->type = ceph_decode_16(p); 421 b->alg = ceph_decode_8(p); 422 b->hash = ceph_decode_8(p); 423 b->weight = ceph_decode_32(p); 424 b->size = ceph_decode_32(p); 425 426 dout("crush_decode bucket size %d off %x %p to %p\n", 427 b->size, (int)(*p-start), *p, end); 428 429 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 430 if (b->items == NULL) 431 goto badmem; 432 433 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 434 for (j = 0; j < b->size; j++) 435 b->items[j] = ceph_decode_32(p); 436 437 switch (b->alg) { 438 case CRUSH_BUCKET_UNIFORM: 439 err = crush_decode_uniform_bucket(p, end, 440 (struct crush_bucket_uniform *)b); 441 if (err < 0) 442 goto bad; 443 break; 444 case CRUSH_BUCKET_LIST: 445 err = crush_decode_list_bucket(p, end, 446 (struct crush_bucket_list *)b); 447 if (err < 0) 448 goto bad; 449 break; 450 case CRUSH_BUCKET_TREE: 451 err = crush_decode_tree_bucket(p, end, 452 (struct crush_bucket_tree *)b); 453 if (err < 0) 454 goto bad; 455 break; 456 case CRUSH_BUCKET_STRAW: 457 err = crush_decode_straw_bucket(p, end, 458 (struct crush_bucket_straw *)b); 459 if (err < 0) 460 goto bad; 461 break; 462 case CRUSH_BUCKET_STRAW2: 463 err = crush_decode_straw2_bucket(p, end, 464 (struct crush_bucket_straw2 *)b); 465 if (err < 0) 466 goto bad; 467 break; 468 } 469 } 470 471 /* rules */ 472 dout("rule vec is %p\n", c->rules); 473 for (i = 0; i < c->max_rules; i++) { 474 u32 yes; 475 struct crush_rule *r; 476 477 err = -EINVAL; 478 ceph_decode_32_safe(p, end, yes, bad); 479 if (!yes) { 480 dout("crush_decode NO rule %d off %x %p to %p\n", 481 i, (int)(*p-start), *p, end); 482 c->rules[i] = NULL; 483 continue; 484 } 485 486 dout("crush_decode rule %d off %x %p to %p\n", 487 i, (int)(*p-start), *p, end); 488 489 /* len */ 490 ceph_decode_32_safe(p, end, yes, bad); 491 #if BITS_PER_LONG == 32 492 err = -EINVAL; 493 if (yes > (ULONG_MAX - sizeof(*r)) 494 / sizeof(struct crush_rule_step)) 495 goto bad; 496 #endif 497 r = c->rules[i] = kmalloc(sizeof(*r) + 498 yes*sizeof(struct crush_rule_step), 499 GFP_NOFS); 500 if (r == NULL) 501 goto badmem; 502 dout(" rule %d is at %p\n", i, r); 503 r->len = yes; 504 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 505 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 506 for (j = 0; j < r->len; j++) { 507 r->steps[j].op = ceph_decode_32(p); 508 r->steps[j].arg1 = ceph_decode_32(p); 509 r->steps[j].arg2 = ceph_decode_32(p); 510 } 511 } 512 513 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */ 514 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */ 515 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ 516 517 /* tunables */ 518 ceph_decode_need(p, end, 3*sizeof(u32), done); 519 c->choose_local_tries = ceph_decode_32(p); 520 c->choose_local_fallback_tries = ceph_decode_32(p); 521 c->choose_total_tries = ceph_decode_32(p); 522 dout("crush decode tunable choose_local_tries = %d\n", 523 c->choose_local_tries); 524 dout("crush decode tunable choose_local_fallback_tries = %d\n", 525 c->choose_local_fallback_tries); 526 dout("crush decode tunable choose_total_tries = %d\n", 527 c->choose_total_tries); 528 529 ceph_decode_need(p, end, sizeof(u32), done); 530 c->chooseleaf_descend_once = ceph_decode_32(p); 531 dout("crush decode tunable chooseleaf_descend_once = %d\n", 532 c->chooseleaf_descend_once); 533 534 ceph_decode_need(p, end, sizeof(u8), done); 535 c->chooseleaf_vary_r = ceph_decode_8(p); 536 dout("crush decode tunable chooseleaf_vary_r = %d\n", 537 c->chooseleaf_vary_r); 538 539 /* skip straw_calc_version, allowed_bucket_algs */ 540 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); 541 *p += sizeof(u8) + sizeof(u32); 542 543 ceph_decode_need(p, end, sizeof(u8), done); 544 c->chooseleaf_stable = ceph_decode_8(p); 545 dout("crush decode tunable chooseleaf_stable = %d\n", 546 c->chooseleaf_stable); 547 548 if (*p != end) { 549 /* class_map */ 550 ceph_decode_skip_map(p, end, 32, 32, bad); 551 /* class_name */ 552 ceph_decode_skip_map(p, end, 32, string, bad); 553 /* class_bucket */ 554 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); 555 } 556 557 if (*p != end) { 558 err = decode_choose_args(p, end, c); 559 if (err) 560 goto bad; 561 } 562 563 done: 564 crush_finalize(c); 565 dout("crush_decode success\n"); 566 return c; 567 568 badmem: 569 err = -ENOMEM; 570 bad: 571 dout("crush_decode fail %d\n", err); 572 crush_destroy(c); 573 return ERR_PTR(err); 574 } 575 576 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) 577 { 578 if (lhs->pool < rhs->pool) 579 return -1; 580 if (lhs->pool > rhs->pool) 581 return 1; 582 if (lhs->seed < rhs->seed) 583 return -1; 584 if (lhs->seed > rhs->seed) 585 return 1; 586 587 return 0; 588 } 589 590 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) 591 { 592 int ret; 593 594 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); 595 if (ret) 596 return ret; 597 598 if (lhs->shard < rhs->shard) 599 return -1; 600 if (lhs->shard > rhs->shard) 601 return 1; 602 603 return 0; 604 } 605 606 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) 607 { 608 struct ceph_pg_mapping *pg; 609 610 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); 611 if (!pg) 612 return NULL; 613 614 RB_CLEAR_NODE(&pg->node); 615 return pg; 616 } 617 618 static void free_pg_mapping(struct ceph_pg_mapping *pg) 619 { 620 WARN_ON(!RB_EMPTY_NODE(&pg->node)); 621 622 kfree(pg); 623 } 624 625 /* 626 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 627 * to a set of osds) and primary_temp (explicit primary setting) 628 */ 629 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, 630 RB_BYPTR, const struct ceph_pg *, node) 631 632 /* 633 * rbtree of pg pool info 634 */ 635 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 636 { 637 struct rb_node **p = &root->rb_node; 638 struct rb_node *parent = NULL; 639 struct ceph_pg_pool_info *pi = NULL; 640 641 while (*p) { 642 parent = *p; 643 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 644 if (new->id < pi->id) 645 p = &(*p)->rb_left; 646 else if (new->id > pi->id) 647 p = &(*p)->rb_right; 648 else 649 return -EEXIST; 650 } 651 652 rb_link_node(&new->node, parent, p); 653 rb_insert_color(&new->node, root); 654 return 0; 655 } 656 657 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id) 658 { 659 struct ceph_pg_pool_info *pi; 660 struct rb_node *n = root->rb_node; 661 662 while (n) { 663 pi = rb_entry(n, struct ceph_pg_pool_info, node); 664 if (id < pi->id) 665 n = n->rb_left; 666 else if (id > pi->id) 667 n = n->rb_right; 668 else 669 return pi; 670 } 671 return NULL; 672 } 673 674 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) 675 { 676 return __lookup_pg_pool(&map->pg_pools, id); 677 } 678 679 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) 680 { 681 struct ceph_pg_pool_info *pi; 682 683 if (id == CEPH_NOPOOL) 684 return NULL; 685 686 if (WARN_ON_ONCE(id > (u64) INT_MAX)) 687 return NULL; 688 689 pi = __lookup_pg_pool(&map->pg_pools, (int) id); 690 691 return pi ? pi->name : NULL; 692 } 693 EXPORT_SYMBOL(ceph_pg_pool_name_by_id); 694 695 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 696 { 697 struct rb_node *rbp; 698 699 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 700 struct ceph_pg_pool_info *pi = 701 rb_entry(rbp, struct ceph_pg_pool_info, node); 702 if (pi->name && strcmp(pi->name, name) == 0) 703 return pi->id; 704 } 705 return -ENOENT; 706 } 707 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 708 709 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 710 { 711 rb_erase(&pi->node, root); 712 kfree(pi->name); 713 kfree(pi); 714 } 715 716 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 717 { 718 u8 ev, cv; 719 unsigned len, num; 720 void *pool_end; 721 722 ceph_decode_need(p, end, 2 + 4, bad); 723 ev = ceph_decode_8(p); /* encoding version */ 724 cv = ceph_decode_8(p); /* compat version */ 725 if (ev < 5) { 726 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); 727 return -EINVAL; 728 } 729 if (cv > 9) { 730 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); 731 return -EINVAL; 732 } 733 len = ceph_decode_32(p); 734 ceph_decode_need(p, end, len, bad); 735 pool_end = *p + len; 736 737 pi->type = ceph_decode_8(p); 738 pi->size = ceph_decode_8(p); 739 pi->crush_ruleset = ceph_decode_8(p); 740 pi->object_hash = ceph_decode_8(p); 741 742 pi->pg_num = ceph_decode_32(p); 743 pi->pgp_num = ceph_decode_32(p); 744 745 *p += 4 + 4; /* skip lpg* */ 746 *p += 4; /* skip last_change */ 747 *p += 8 + 4; /* skip snap_seq, snap_epoch */ 748 749 /* skip snaps */ 750 num = ceph_decode_32(p); 751 while (num--) { 752 *p += 8; /* snapid key */ 753 *p += 1 + 1; /* versions */ 754 len = ceph_decode_32(p); 755 *p += len; 756 } 757 758 /* skip removed_snaps */ 759 num = ceph_decode_32(p); 760 *p += num * (8 + 8); 761 762 *p += 8; /* skip auid */ 763 pi->flags = ceph_decode_64(p); 764 *p += 4; /* skip crash_replay_interval */ 765 766 if (ev >= 7) 767 pi->min_size = ceph_decode_8(p); 768 else 769 pi->min_size = pi->size - pi->size / 2; 770 771 if (ev >= 8) 772 *p += 8 + 8; /* skip quota_max_* */ 773 774 if (ev >= 9) { 775 /* skip tiers */ 776 num = ceph_decode_32(p); 777 *p += num * 8; 778 779 *p += 8; /* skip tier_of */ 780 *p += 1; /* skip cache_mode */ 781 782 pi->read_tier = ceph_decode_64(p); 783 pi->write_tier = ceph_decode_64(p); 784 } else { 785 pi->read_tier = -1; 786 pi->write_tier = -1; 787 } 788 789 if (ev >= 10) { 790 /* skip properties */ 791 num = ceph_decode_32(p); 792 while (num--) { 793 len = ceph_decode_32(p); 794 *p += len; /* key */ 795 len = ceph_decode_32(p); 796 *p += len; /* val */ 797 } 798 } 799 800 if (ev >= 11) { 801 /* skip hit_set_params */ 802 *p += 1 + 1; /* versions */ 803 len = ceph_decode_32(p); 804 *p += len; 805 806 *p += 4; /* skip hit_set_period */ 807 *p += 4; /* skip hit_set_count */ 808 } 809 810 if (ev >= 12) 811 *p += 4; /* skip stripe_width */ 812 813 if (ev >= 13) { 814 *p += 8; /* skip target_max_bytes */ 815 *p += 8; /* skip target_max_objects */ 816 *p += 4; /* skip cache_target_dirty_ratio_micro */ 817 *p += 4; /* skip cache_target_full_ratio_micro */ 818 *p += 4; /* skip cache_min_flush_age */ 819 *p += 4; /* skip cache_min_evict_age */ 820 } 821 822 if (ev >= 14) { 823 /* skip erasure_code_profile */ 824 len = ceph_decode_32(p); 825 *p += len; 826 } 827 828 /* 829 * last_force_op_resend_preluminous, will be overridden if the 830 * map was encoded with RESEND_ON_SPLIT 831 */ 832 if (ev >= 15) 833 pi->last_force_request_resend = ceph_decode_32(p); 834 else 835 pi->last_force_request_resend = 0; 836 837 if (ev >= 16) 838 *p += 4; /* skip min_read_recency_for_promote */ 839 840 if (ev >= 17) 841 *p += 8; /* skip expected_num_objects */ 842 843 if (ev >= 19) 844 *p += 4; /* skip cache_target_dirty_high_ratio_micro */ 845 846 if (ev >= 20) 847 *p += 4; /* skip min_write_recency_for_promote */ 848 849 if (ev >= 21) 850 *p += 1; /* skip use_gmt_hitset */ 851 852 if (ev >= 22) 853 *p += 1; /* skip fast_read */ 854 855 if (ev >= 23) { 856 *p += 4; /* skip hit_set_grade_decay_rate */ 857 *p += 4; /* skip hit_set_search_last_n */ 858 } 859 860 if (ev >= 24) { 861 /* skip opts */ 862 *p += 1 + 1; /* versions */ 863 len = ceph_decode_32(p); 864 *p += len; 865 } 866 867 if (ev >= 25) 868 pi->last_force_request_resend = ceph_decode_32(p); 869 870 /* ignore the rest */ 871 872 *p = pool_end; 873 calc_pg_masks(pi); 874 return 0; 875 876 bad: 877 return -EINVAL; 878 } 879 880 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 881 { 882 struct ceph_pg_pool_info *pi; 883 u32 num, len; 884 u64 pool; 885 886 ceph_decode_32_safe(p, end, num, bad); 887 dout(" %d pool names\n", num); 888 while (num--) { 889 ceph_decode_64_safe(p, end, pool, bad); 890 ceph_decode_32_safe(p, end, len, bad); 891 dout(" pool %llu len %d\n", pool, len); 892 ceph_decode_need(p, end, len, bad); 893 pi = __lookup_pg_pool(&map->pg_pools, pool); 894 if (pi) { 895 char *name = kstrndup(*p, len, GFP_NOFS); 896 897 if (!name) 898 return -ENOMEM; 899 kfree(pi->name); 900 pi->name = name; 901 dout(" name is %s\n", pi->name); 902 } 903 *p += len; 904 } 905 return 0; 906 907 bad: 908 return -EINVAL; 909 } 910 911 /* 912 * osd map 913 */ 914 struct ceph_osdmap *ceph_osdmap_alloc(void) 915 { 916 struct ceph_osdmap *map; 917 918 map = kzalloc(sizeof(*map), GFP_NOIO); 919 if (!map) 920 return NULL; 921 922 map->pg_pools = RB_ROOT; 923 map->pool_max = -1; 924 map->pg_temp = RB_ROOT; 925 map->primary_temp = RB_ROOT; 926 map->pg_upmap = RB_ROOT; 927 map->pg_upmap_items = RB_ROOT; 928 mutex_init(&map->crush_workspace_mutex); 929 930 return map; 931 } 932 933 void ceph_osdmap_destroy(struct ceph_osdmap *map) 934 { 935 dout("osdmap_destroy %p\n", map); 936 if (map->crush) 937 crush_destroy(map->crush); 938 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 939 struct ceph_pg_mapping *pg = 940 rb_entry(rb_first(&map->pg_temp), 941 struct ceph_pg_mapping, node); 942 erase_pg_mapping(&map->pg_temp, pg); 943 free_pg_mapping(pg); 944 } 945 while (!RB_EMPTY_ROOT(&map->primary_temp)) { 946 struct ceph_pg_mapping *pg = 947 rb_entry(rb_first(&map->primary_temp), 948 struct ceph_pg_mapping, node); 949 erase_pg_mapping(&map->primary_temp, pg); 950 free_pg_mapping(pg); 951 } 952 while (!RB_EMPTY_ROOT(&map->pg_upmap)) { 953 struct ceph_pg_mapping *pg = 954 rb_entry(rb_first(&map->pg_upmap), 955 struct ceph_pg_mapping, node); 956 rb_erase(&pg->node, &map->pg_upmap); 957 kfree(pg); 958 } 959 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { 960 struct ceph_pg_mapping *pg = 961 rb_entry(rb_first(&map->pg_upmap_items), 962 struct ceph_pg_mapping, node); 963 rb_erase(&pg->node, &map->pg_upmap_items); 964 kfree(pg); 965 } 966 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 967 struct ceph_pg_pool_info *pi = 968 rb_entry(rb_first(&map->pg_pools), 969 struct ceph_pg_pool_info, node); 970 __remove_pg_pool(&map->pg_pools, pi); 971 } 972 kfree(map->osd_state); 973 kfree(map->osd_weight); 974 kfree(map->osd_addr); 975 kfree(map->osd_primary_affinity); 976 kfree(map->crush_workspace); 977 kfree(map); 978 } 979 980 /* 981 * Adjust max_osd value, (re)allocate arrays. 982 * 983 * The new elements are properly initialized. 984 */ 985 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 986 { 987 u32 *state; 988 u32 *weight; 989 struct ceph_entity_addr *addr; 990 int i; 991 992 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS); 993 if (!state) 994 return -ENOMEM; 995 map->osd_state = state; 996 997 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS); 998 if (!weight) 999 return -ENOMEM; 1000 map->osd_weight = weight; 1001 1002 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS); 1003 if (!addr) 1004 return -ENOMEM; 1005 map->osd_addr = addr; 1006 1007 for (i = map->max_osd; i < max; i++) { 1008 map->osd_state[i] = 0; 1009 map->osd_weight[i] = CEPH_OSD_OUT; 1010 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); 1011 } 1012 1013 if (map->osd_primary_affinity) { 1014 u32 *affinity; 1015 1016 affinity = krealloc(map->osd_primary_affinity, 1017 max*sizeof(*affinity), GFP_NOFS); 1018 if (!affinity) 1019 return -ENOMEM; 1020 map->osd_primary_affinity = affinity; 1021 1022 for (i = map->max_osd; i < max; i++) 1023 map->osd_primary_affinity[i] = 1024 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1025 } 1026 1027 map->max_osd = max; 1028 1029 return 0; 1030 } 1031 1032 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) 1033 { 1034 void *workspace; 1035 size_t work_size; 1036 1037 if (IS_ERR(crush)) 1038 return PTR_ERR(crush); 1039 1040 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE); 1041 dout("%s work_size %zu bytes\n", __func__, work_size); 1042 workspace = kmalloc(work_size, GFP_NOIO); 1043 if (!workspace) { 1044 crush_destroy(crush); 1045 return -ENOMEM; 1046 } 1047 crush_init_workspace(crush, workspace); 1048 1049 if (map->crush) 1050 crush_destroy(map->crush); 1051 kfree(map->crush_workspace); 1052 map->crush = crush; 1053 map->crush_workspace = workspace; 1054 return 0; 1055 } 1056 1057 #define OSDMAP_WRAPPER_COMPAT_VER 7 1058 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 1059 1060 /* 1061 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, 1062 * to struct_v of the client_data section for new (v7 and above) 1063 * osdmaps. 1064 */ 1065 static int get_osdmap_client_data_v(void **p, void *end, 1066 const char *prefix, u8 *v) 1067 { 1068 u8 struct_v; 1069 1070 ceph_decode_8_safe(p, end, struct_v, e_inval); 1071 if (struct_v >= 7) { 1072 u8 struct_compat; 1073 1074 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1075 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { 1076 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", 1077 struct_v, struct_compat, 1078 OSDMAP_WRAPPER_COMPAT_VER, prefix); 1079 return -EINVAL; 1080 } 1081 *p += 4; /* ignore wrapper struct_len */ 1082 1083 ceph_decode_8_safe(p, end, struct_v, e_inval); 1084 ceph_decode_8_safe(p, end, struct_compat, e_inval); 1085 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { 1086 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", 1087 struct_v, struct_compat, 1088 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); 1089 return -EINVAL; 1090 } 1091 *p += 4; /* ignore client data struct_len */ 1092 } else { 1093 u16 version; 1094 1095 *p -= 1; 1096 ceph_decode_16_safe(p, end, version, e_inval); 1097 if (version < 6) { 1098 pr_warn("got v %d < 6 of %s ceph_osdmap\n", 1099 version, prefix); 1100 return -EINVAL; 1101 } 1102 1103 /* old osdmap enconding */ 1104 struct_v = 0; 1105 } 1106 1107 *v = struct_v; 1108 return 0; 1109 1110 e_inval: 1111 return -EINVAL; 1112 } 1113 1114 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, 1115 bool incremental) 1116 { 1117 u32 n; 1118 1119 ceph_decode_32_safe(p, end, n, e_inval); 1120 while (n--) { 1121 struct ceph_pg_pool_info *pi; 1122 u64 pool; 1123 int ret; 1124 1125 ceph_decode_64_safe(p, end, pool, e_inval); 1126 1127 pi = __lookup_pg_pool(&map->pg_pools, pool); 1128 if (!incremental || !pi) { 1129 pi = kzalloc(sizeof(*pi), GFP_NOFS); 1130 if (!pi) 1131 return -ENOMEM; 1132 1133 pi->id = pool; 1134 1135 ret = __insert_pg_pool(&map->pg_pools, pi); 1136 if (ret) { 1137 kfree(pi); 1138 return ret; 1139 } 1140 } 1141 1142 ret = decode_pool(p, end, pi); 1143 if (ret) 1144 return ret; 1145 } 1146 1147 return 0; 1148 1149 e_inval: 1150 return -EINVAL; 1151 } 1152 1153 static int decode_pools(void **p, void *end, struct ceph_osdmap *map) 1154 { 1155 return __decode_pools(p, end, map, false); 1156 } 1157 1158 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) 1159 { 1160 return __decode_pools(p, end, map, true); 1161 } 1162 1163 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); 1164 1165 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, 1166 decode_mapping_fn_t fn, bool incremental) 1167 { 1168 u32 n; 1169 1170 WARN_ON(!incremental && !fn); 1171 1172 ceph_decode_32_safe(p, end, n, e_inval); 1173 while (n--) { 1174 struct ceph_pg_mapping *pg; 1175 struct ceph_pg pgid; 1176 int ret; 1177 1178 ret = ceph_decode_pgid(p, end, &pgid); 1179 if (ret) 1180 return ret; 1181 1182 pg = lookup_pg_mapping(mapping_root, &pgid); 1183 if (pg) { 1184 WARN_ON(!incremental); 1185 erase_pg_mapping(mapping_root, pg); 1186 free_pg_mapping(pg); 1187 } 1188 1189 if (fn) { 1190 pg = fn(p, end, incremental); 1191 if (IS_ERR(pg)) 1192 return PTR_ERR(pg); 1193 1194 if (pg) { 1195 pg->pgid = pgid; /* struct */ 1196 insert_pg_mapping(mapping_root, pg); 1197 } 1198 } 1199 } 1200 1201 return 0; 1202 1203 e_inval: 1204 return -EINVAL; 1205 } 1206 1207 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, 1208 bool incremental) 1209 { 1210 struct ceph_pg_mapping *pg; 1211 u32 len, i; 1212 1213 ceph_decode_32_safe(p, end, len, e_inval); 1214 if (len == 0 && incremental) 1215 return NULL; /* new_pg_temp: [] to remove */ 1216 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) 1217 return ERR_PTR(-EINVAL); 1218 1219 ceph_decode_need(p, end, len * sizeof(u32), e_inval); 1220 pg = alloc_pg_mapping(len * sizeof(u32)); 1221 if (!pg) 1222 return ERR_PTR(-ENOMEM); 1223 1224 pg->pg_temp.len = len; 1225 for (i = 0; i < len; i++) 1226 pg->pg_temp.osds[i] = ceph_decode_32(p); 1227 1228 return pg; 1229 1230 e_inval: 1231 return ERR_PTR(-EINVAL); 1232 } 1233 1234 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1235 { 1236 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1237 false); 1238 } 1239 1240 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) 1241 { 1242 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, 1243 true); 1244 } 1245 1246 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, 1247 bool incremental) 1248 { 1249 struct ceph_pg_mapping *pg; 1250 u32 osd; 1251 1252 ceph_decode_32_safe(p, end, osd, e_inval); 1253 if (osd == (u32)-1 && incremental) 1254 return NULL; /* new_primary_temp: -1 to remove */ 1255 1256 pg = alloc_pg_mapping(0); 1257 if (!pg) 1258 return ERR_PTR(-ENOMEM); 1259 1260 pg->primary_temp.osd = osd; 1261 return pg; 1262 1263 e_inval: 1264 return ERR_PTR(-EINVAL); 1265 } 1266 1267 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) 1268 { 1269 return decode_pg_mapping(p, end, &map->primary_temp, 1270 __decode_primary_temp, false); 1271 } 1272 1273 static int decode_new_primary_temp(void **p, void *end, 1274 struct ceph_osdmap *map) 1275 { 1276 return decode_pg_mapping(p, end, &map->primary_temp, 1277 __decode_primary_temp, true); 1278 } 1279 1280 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) 1281 { 1282 BUG_ON(osd >= map->max_osd); 1283 1284 if (!map->osd_primary_affinity) 1285 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1286 1287 return map->osd_primary_affinity[osd]; 1288 } 1289 1290 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) 1291 { 1292 BUG_ON(osd >= map->max_osd); 1293 1294 if (!map->osd_primary_affinity) { 1295 int i; 1296 1297 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32), 1298 GFP_NOFS); 1299 if (!map->osd_primary_affinity) 1300 return -ENOMEM; 1301 1302 for (i = 0; i < map->max_osd; i++) 1303 map->osd_primary_affinity[i] = 1304 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; 1305 } 1306 1307 map->osd_primary_affinity[osd] = aff; 1308 1309 return 0; 1310 } 1311 1312 static int decode_primary_affinity(void **p, void *end, 1313 struct ceph_osdmap *map) 1314 { 1315 u32 len, i; 1316 1317 ceph_decode_32_safe(p, end, len, e_inval); 1318 if (len == 0) { 1319 kfree(map->osd_primary_affinity); 1320 map->osd_primary_affinity = NULL; 1321 return 0; 1322 } 1323 if (len != map->max_osd) 1324 goto e_inval; 1325 1326 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); 1327 1328 for (i = 0; i < map->max_osd; i++) { 1329 int ret; 1330 1331 ret = set_primary_affinity(map, i, ceph_decode_32(p)); 1332 if (ret) 1333 return ret; 1334 } 1335 1336 return 0; 1337 1338 e_inval: 1339 return -EINVAL; 1340 } 1341 1342 static int decode_new_primary_affinity(void **p, void *end, 1343 struct ceph_osdmap *map) 1344 { 1345 u32 n; 1346 1347 ceph_decode_32_safe(p, end, n, e_inval); 1348 while (n--) { 1349 u32 osd, aff; 1350 int ret; 1351 1352 ceph_decode_32_safe(p, end, osd, e_inval); 1353 ceph_decode_32_safe(p, end, aff, e_inval); 1354 1355 ret = set_primary_affinity(map, osd, aff); 1356 if (ret) 1357 return ret; 1358 1359 pr_info("osd%d primary-affinity 0x%x\n", osd, aff); 1360 } 1361 1362 return 0; 1363 1364 e_inval: 1365 return -EINVAL; 1366 } 1367 1368 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, 1369 bool __unused) 1370 { 1371 return __decode_pg_temp(p, end, false); 1372 } 1373 1374 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1375 { 1376 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1377 false); 1378 } 1379 1380 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1381 { 1382 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, 1383 true); 1384 } 1385 1386 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) 1387 { 1388 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); 1389 } 1390 1391 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, 1392 bool __unused) 1393 { 1394 struct ceph_pg_mapping *pg; 1395 u32 len, i; 1396 1397 ceph_decode_32_safe(p, end, len, e_inval); 1398 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) 1399 return ERR_PTR(-EINVAL); 1400 1401 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); 1402 pg = kzalloc(sizeof(*pg) + 2 * len * sizeof(u32), GFP_NOIO); 1403 if (!pg) 1404 return ERR_PTR(-ENOMEM); 1405 1406 pg->pg_upmap_items.len = len; 1407 for (i = 0; i < len; i++) { 1408 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); 1409 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); 1410 } 1411 1412 return pg; 1413 1414 e_inval: 1415 return ERR_PTR(-EINVAL); 1416 } 1417 1418 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) 1419 { 1420 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1421 __decode_pg_upmap_items, false); 1422 } 1423 1424 static int decode_new_pg_upmap_items(void **p, void *end, 1425 struct ceph_osdmap *map) 1426 { 1427 return decode_pg_mapping(p, end, &map->pg_upmap_items, 1428 __decode_pg_upmap_items, true); 1429 } 1430 1431 static int decode_old_pg_upmap_items(void **p, void *end, 1432 struct ceph_osdmap *map) 1433 { 1434 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); 1435 } 1436 1437 /* 1438 * decode a full map. 1439 */ 1440 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) 1441 { 1442 u8 struct_v; 1443 u32 epoch = 0; 1444 void *start = *p; 1445 u32 max; 1446 u32 len, i; 1447 int err; 1448 1449 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1450 1451 err = get_osdmap_client_data_v(p, end, "full", &struct_v); 1452 if (err) 1453 goto bad; 1454 1455 /* fsid, epoch, created, modified */ 1456 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + 1457 sizeof(map->created) + sizeof(map->modified), e_inval); 1458 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 1459 epoch = map->epoch = ceph_decode_32(p); 1460 ceph_decode_copy(p, &map->created, sizeof(map->created)); 1461 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 1462 1463 /* pools */ 1464 err = decode_pools(p, end, map); 1465 if (err) 1466 goto bad; 1467 1468 /* pool_name */ 1469 err = decode_pool_names(p, end, map); 1470 if (err) 1471 goto bad; 1472 1473 ceph_decode_32_safe(p, end, map->pool_max, e_inval); 1474 1475 ceph_decode_32_safe(p, end, map->flags, e_inval); 1476 1477 /* max_osd */ 1478 ceph_decode_32_safe(p, end, max, e_inval); 1479 1480 /* (re)alloc osd arrays */ 1481 err = osdmap_set_max_osd(map, max); 1482 if (err) 1483 goto bad; 1484 1485 /* osd_state, osd_weight, osd_addrs->client_addr */ 1486 ceph_decode_need(p, end, 3*sizeof(u32) + 1487 map->max_osd*((struct_v >= 5 ? sizeof(u32) : 1488 sizeof(u8)) + 1489 sizeof(*map->osd_weight) + 1490 sizeof(*map->osd_addr)), e_inval); 1491 1492 if (ceph_decode_32(p) != map->max_osd) 1493 goto e_inval; 1494 1495 if (struct_v >= 5) { 1496 for (i = 0; i < map->max_osd; i++) 1497 map->osd_state[i] = ceph_decode_32(p); 1498 } else { 1499 for (i = 0; i < map->max_osd; i++) 1500 map->osd_state[i] = ceph_decode_8(p); 1501 } 1502 1503 if (ceph_decode_32(p) != map->max_osd) 1504 goto e_inval; 1505 1506 for (i = 0; i < map->max_osd; i++) 1507 map->osd_weight[i] = ceph_decode_32(p); 1508 1509 if (ceph_decode_32(p) != map->max_osd) 1510 goto e_inval; 1511 1512 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 1513 for (i = 0; i < map->max_osd; i++) 1514 ceph_decode_addr(&map->osd_addr[i]); 1515 1516 /* pg_temp */ 1517 err = decode_pg_temp(p, end, map); 1518 if (err) 1519 goto bad; 1520 1521 /* primary_temp */ 1522 if (struct_v >= 1) { 1523 err = decode_primary_temp(p, end, map); 1524 if (err) 1525 goto bad; 1526 } 1527 1528 /* primary_affinity */ 1529 if (struct_v >= 2) { 1530 err = decode_primary_affinity(p, end, map); 1531 if (err) 1532 goto bad; 1533 } else { 1534 WARN_ON(map->osd_primary_affinity); 1535 } 1536 1537 /* crush */ 1538 ceph_decode_32_safe(p, end, len, e_inval); 1539 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); 1540 if (err) 1541 goto bad; 1542 1543 *p += len; 1544 if (struct_v >= 3) { 1545 /* erasure_code_profiles */ 1546 ceph_decode_skip_map_of_map(p, end, string, string, string, 1547 bad); 1548 } 1549 1550 if (struct_v >= 4) { 1551 err = decode_pg_upmap(p, end, map); 1552 if (err) 1553 goto bad; 1554 1555 err = decode_pg_upmap_items(p, end, map); 1556 if (err) 1557 goto bad; 1558 } else { 1559 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); 1560 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); 1561 } 1562 1563 /* ignore the rest */ 1564 *p = end; 1565 1566 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1567 return 0; 1568 1569 e_inval: 1570 err = -EINVAL; 1571 bad: 1572 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1573 err, epoch, (int)(*p - start), *p, start, end); 1574 print_hex_dump(KERN_DEBUG, "osdmap: ", 1575 DUMP_PREFIX_OFFSET, 16, 1, 1576 start, end - start, true); 1577 return err; 1578 } 1579 1580 /* 1581 * Allocate and decode a full map. 1582 */ 1583 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) 1584 { 1585 struct ceph_osdmap *map; 1586 int ret; 1587 1588 map = ceph_osdmap_alloc(); 1589 if (!map) 1590 return ERR_PTR(-ENOMEM); 1591 1592 ret = osdmap_decode(p, end, map); 1593 if (ret) { 1594 ceph_osdmap_destroy(map); 1595 return ERR_PTR(ret); 1596 } 1597 1598 return map; 1599 } 1600 1601 /* 1602 * Encoding order is (new_up_client, new_state, new_weight). Need to 1603 * apply in the (new_weight, new_state, new_up_client) order, because 1604 * an incremental map may look like e.g. 1605 * 1606 * new_up_client: { osd=6, addr=... } # set osd_state and addr 1607 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state 1608 */ 1609 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, 1610 struct ceph_osdmap *map) 1611 { 1612 void *new_up_client; 1613 void *new_state; 1614 void *new_weight_end; 1615 u32 len; 1616 1617 new_up_client = *p; 1618 ceph_decode_32_safe(p, end, len, e_inval); 1619 len *= sizeof(u32) + sizeof(struct ceph_entity_addr); 1620 ceph_decode_need(p, end, len, e_inval); 1621 *p += len; 1622 1623 new_state = *p; 1624 ceph_decode_32_safe(p, end, len, e_inval); 1625 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); 1626 ceph_decode_need(p, end, len, e_inval); 1627 *p += len; 1628 1629 /* new_weight */ 1630 ceph_decode_32_safe(p, end, len, e_inval); 1631 while (len--) { 1632 s32 osd; 1633 u32 w; 1634 1635 ceph_decode_need(p, end, 2*sizeof(u32), e_inval); 1636 osd = ceph_decode_32(p); 1637 w = ceph_decode_32(p); 1638 BUG_ON(osd >= map->max_osd); 1639 pr_info("osd%d weight 0x%x %s\n", osd, w, 1640 w == CEPH_OSD_IN ? "(in)" : 1641 (w == CEPH_OSD_OUT ? "(out)" : "")); 1642 map->osd_weight[osd] = w; 1643 1644 /* 1645 * If we are marking in, set the EXISTS, and clear the 1646 * AUTOOUT and NEW bits. 1647 */ 1648 if (w) { 1649 map->osd_state[osd] |= CEPH_OSD_EXISTS; 1650 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | 1651 CEPH_OSD_NEW); 1652 } 1653 } 1654 new_weight_end = *p; 1655 1656 /* new_state (up/down) */ 1657 *p = new_state; 1658 len = ceph_decode_32(p); 1659 while (len--) { 1660 s32 osd; 1661 u32 xorstate; 1662 int ret; 1663 1664 osd = ceph_decode_32(p); 1665 if (struct_v >= 5) 1666 xorstate = ceph_decode_32(p); 1667 else 1668 xorstate = ceph_decode_8(p); 1669 if (xorstate == 0) 1670 xorstate = CEPH_OSD_UP; 1671 BUG_ON(osd >= map->max_osd); 1672 if ((map->osd_state[osd] & CEPH_OSD_UP) && 1673 (xorstate & CEPH_OSD_UP)) 1674 pr_info("osd%d down\n", osd); 1675 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1676 (xorstate & CEPH_OSD_EXISTS)) { 1677 pr_info("osd%d does not exist\n", osd); 1678 ret = set_primary_affinity(map, osd, 1679 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1680 if (ret) 1681 return ret; 1682 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); 1683 map->osd_state[osd] = 0; 1684 } else { 1685 map->osd_state[osd] ^= xorstate; 1686 } 1687 } 1688 1689 /* new_up_client */ 1690 *p = new_up_client; 1691 len = ceph_decode_32(p); 1692 while (len--) { 1693 s32 osd; 1694 struct ceph_entity_addr addr; 1695 1696 osd = ceph_decode_32(p); 1697 ceph_decode_copy(p, &addr, sizeof(addr)); 1698 ceph_decode_addr(&addr); 1699 BUG_ON(osd >= map->max_osd); 1700 pr_info("osd%d up\n", osd); 1701 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; 1702 map->osd_addr[osd] = addr; 1703 } 1704 1705 *p = new_weight_end; 1706 return 0; 1707 1708 e_inval: 1709 return -EINVAL; 1710 } 1711 1712 /* 1713 * decode and apply an incremental map update. 1714 */ 1715 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 1716 struct ceph_osdmap *map) 1717 { 1718 struct ceph_fsid fsid; 1719 u32 epoch = 0; 1720 struct ceph_timespec modified; 1721 s32 len; 1722 u64 pool; 1723 __s64 new_pool_max; 1724 __s32 new_flags, max; 1725 void *start = *p; 1726 int err; 1727 u8 struct_v; 1728 1729 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); 1730 1731 err = get_osdmap_client_data_v(p, end, "inc", &struct_v); 1732 if (err) 1733 goto bad; 1734 1735 /* fsid, epoch, modified, new_pool_max, new_flags */ 1736 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + 1737 sizeof(u64) + sizeof(u32), e_inval); 1738 ceph_decode_copy(p, &fsid, sizeof(fsid)); 1739 epoch = ceph_decode_32(p); 1740 BUG_ON(epoch != map->epoch+1); 1741 ceph_decode_copy(p, &modified, sizeof(modified)); 1742 new_pool_max = ceph_decode_64(p); 1743 new_flags = ceph_decode_32(p); 1744 1745 /* full map? */ 1746 ceph_decode_32_safe(p, end, len, e_inval); 1747 if (len > 0) { 1748 dout("apply_incremental full map len %d, %p to %p\n", 1749 len, *p, end); 1750 return ceph_osdmap_decode(p, min(*p+len, end)); 1751 } 1752 1753 /* new crush? */ 1754 ceph_decode_32_safe(p, end, len, e_inval); 1755 if (len > 0) { 1756 err = osdmap_set_crush(map, 1757 crush_decode(*p, min(*p + len, end))); 1758 if (err) 1759 goto bad; 1760 *p += len; 1761 } 1762 1763 /* new flags? */ 1764 if (new_flags >= 0) 1765 map->flags = new_flags; 1766 if (new_pool_max >= 0) 1767 map->pool_max = new_pool_max; 1768 1769 /* new max? */ 1770 ceph_decode_32_safe(p, end, max, e_inval); 1771 if (max >= 0) { 1772 err = osdmap_set_max_osd(map, max); 1773 if (err) 1774 goto bad; 1775 } 1776 1777 map->epoch++; 1778 map->modified = modified; 1779 1780 /* new_pools */ 1781 err = decode_new_pools(p, end, map); 1782 if (err) 1783 goto bad; 1784 1785 /* new_pool_names */ 1786 err = decode_pool_names(p, end, map); 1787 if (err) 1788 goto bad; 1789 1790 /* old_pool */ 1791 ceph_decode_32_safe(p, end, len, e_inval); 1792 while (len--) { 1793 struct ceph_pg_pool_info *pi; 1794 1795 ceph_decode_64_safe(p, end, pool, e_inval); 1796 pi = __lookup_pg_pool(&map->pg_pools, pool); 1797 if (pi) 1798 __remove_pg_pool(&map->pg_pools, pi); 1799 } 1800 1801 /* new_up_client, new_state, new_weight */ 1802 err = decode_new_up_state_weight(p, end, struct_v, map); 1803 if (err) 1804 goto bad; 1805 1806 /* new_pg_temp */ 1807 err = decode_new_pg_temp(p, end, map); 1808 if (err) 1809 goto bad; 1810 1811 /* new_primary_temp */ 1812 if (struct_v >= 1) { 1813 err = decode_new_primary_temp(p, end, map); 1814 if (err) 1815 goto bad; 1816 } 1817 1818 /* new_primary_affinity */ 1819 if (struct_v >= 2) { 1820 err = decode_new_primary_affinity(p, end, map); 1821 if (err) 1822 goto bad; 1823 } 1824 1825 if (struct_v >= 3) { 1826 /* new_erasure_code_profiles */ 1827 ceph_decode_skip_map_of_map(p, end, string, string, string, 1828 bad); 1829 /* old_erasure_code_profiles */ 1830 ceph_decode_skip_set(p, end, string, bad); 1831 } 1832 1833 if (struct_v >= 4) { 1834 err = decode_new_pg_upmap(p, end, map); 1835 if (err) 1836 goto bad; 1837 1838 err = decode_old_pg_upmap(p, end, map); 1839 if (err) 1840 goto bad; 1841 1842 err = decode_new_pg_upmap_items(p, end, map); 1843 if (err) 1844 goto bad; 1845 1846 err = decode_old_pg_upmap_items(p, end, map); 1847 if (err) 1848 goto bad; 1849 } 1850 1851 /* ignore the rest */ 1852 *p = end; 1853 1854 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); 1855 return map; 1856 1857 e_inval: 1858 err = -EINVAL; 1859 bad: 1860 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", 1861 err, epoch, (int)(*p - start), *p, start, end); 1862 print_hex_dump(KERN_DEBUG, "osdmap: ", 1863 DUMP_PREFIX_OFFSET, 16, 1, 1864 start, end - start, true); 1865 return ERR_PTR(err); 1866 } 1867 1868 void ceph_oloc_copy(struct ceph_object_locator *dest, 1869 const struct ceph_object_locator *src) 1870 { 1871 ceph_oloc_destroy(dest); 1872 1873 dest->pool = src->pool; 1874 if (src->pool_ns) 1875 dest->pool_ns = ceph_get_string(src->pool_ns); 1876 else 1877 dest->pool_ns = NULL; 1878 } 1879 EXPORT_SYMBOL(ceph_oloc_copy); 1880 1881 void ceph_oloc_destroy(struct ceph_object_locator *oloc) 1882 { 1883 ceph_put_string(oloc->pool_ns); 1884 } 1885 EXPORT_SYMBOL(ceph_oloc_destroy); 1886 1887 void ceph_oid_copy(struct ceph_object_id *dest, 1888 const struct ceph_object_id *src) 1889 { 1890 ceph_oid_destroy(dest); 1891 1892 if (src->name != src->inline_name) { 1893 /* very rare, see ceph_object_id definition */ 1894 dest->name = kmalloc(src->name_len + 1, 1895 GFP_NOIO | __GFP_NOFAIL); 1896 } else { 1897 dest->name = dest->inline_name; 1898 } 1899 memcpy(dest->name, src->name, src->name_len + 1); 1900 dest->name_len = src->name_len; 1901 } 1902 EXPORT_SYMBOL(ceph_oid_copy); 1903 1904 static __printf(2, 0) 1905 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) 1906 { 1907 int len; 1908 1909 WARN_ON(!ceph_oid_empty(oid)); 1910 1911 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); 1912 if (len >= sizeof(oid->inline_name)) 1913 return len; 1914 1915 oid->name_len = len; 1916 return 0; 1917 } 1918 1919 /* 1920 * If oid doesn't fit into inline buffer, BUG. 1921 */ 1922 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) 1923 { 1924 va_list ap; 1925 1926 va_start(ap, fmt); 1927 BUG_ON(oid_printf_vargs(oid, fmt, ap)); 1928 va_end(ap); 1929 } 1930 EXPORT_SYMBOL(ceph_oid_printf); 1931 1932 static __printf(3, 0) 1933 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, 1934 const char *fmt, va_list ap) 1935 { 1936 va_list aq; 1937 int len; 1938 1939 va_copy(aq, ap); 1940 len = oid_printf_vargs(oid, fmt, aq); 1941 va_end(aq); 1942 1943 if (len) { 1944 char *external_name; 1945 1946 external_name = kmalloc(len + 1, gfp); 1947 if (!external_name) 1948 return -ENOMEM; 1949 1950 oid->name = external_name; 1951 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); 1952 oid->name_len = len; 1953 } 1954 1955 return 0; 1956 } 1957 1958 /* 1959 * If oid doesn't fit into inline buffer, allocate. 1960 */ 1961 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, 1962 const char *fmt, ...) 1963 { 1964 va_list ap; 1965 int ret; 1966 1967 va_start(ap, fmt); 1968 ret = oid_aprintf_vargs(oid, gfp, fmt, ap); 1969 va_end(ap); 1970 1971 return ret; 1972 } 1973 EXPORT_SYMBOL(ceph_oid_aprintf); 1974 1975 void ceph_oid_destroy(struct ceph_object_id *oid) 1976 { 1977 if (oid->name != oid->inline_name) 1978 kfree(oid->name); 1979 } 1980 EXPORT_SYMBOL(ceph_oid_destroy); 1981 1982 /* 1983 * osds only 1984 */ 1985 static bool __osds_equal(const struct ceph_osds *lhs, 1986 const struct ceph_osds *rhs) 1987 { 1988 if (lhs->size == rhs->size && 1989 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) 1990 return true; 1991 1992 return false; 1993 } 1994 1995 /* 1996 * osds + primary 1997 */ 1998 static bool osds_equal(const struct ceph_osds *lhs, 1999 const struct ceph_osds *rhs) 2000 { 2001 if (__osds_equal(lhs, rhs) && 2002 lhs->primary == rhs->primary) 2003 return true; 2004 2005 return false; 2006 } 2007 2008 static bool osds_valid(const struct ceph_osds *set) 2009 { 2010 /* non-empty set */ 2011 if (set->size > 0 && set->primary >= 0) 2012 return true; 2013 2014 /* empty can_shift_osds set */ 2015 if (!set->size && set->primary == -1) 2016 return true; 2017 2018 /* empty !can_shift_osds set - all NONE */ 2019 if (set->size > 0 && set->primary == -1) { 2020 int i; 2021 2022 for (i = 0; i < set->size; i++) { 2023 if (set->osds[i] != CRUSH_ITEM_NONE) 2024 break; 2025 } 2026 if (i == set->size) 2027 return true; 2028 } 2029 2030 return false; 2031 } 2032 2033 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) 2034 { 2035 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); 2036 dest->size = src->size; 2037 dest->primary = src->primary; 2038 } 2039 2040 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, 2041 u32 new_pg_num) 2042 { 2043 int old_bits = calc_bits_of(old_pg_num); 2044 int old_mask = (1 << old_bits) - 1; 2045 int n; 2046 2047 WARN_ON(pgid->seed >= old_pg_num); 2048 if (new_pg_num <= old_pg_num) 2049 return false; 2050 2051 for (n = 1; ; n++) { 2052 int next_bit = n << (old_bits - 1); 2053 u32 s = next_bit | pgid->seed; 2054 2055 if (s < old_pg_num || s == pgid->seed) 2056 continue; 2057 if (s >= new_pg_num) 2058 break; 2059 2060 s = ceph_stable_mod(s, old_pg_num, old_mask); 2061 if (s == pgid->seed) 2062 return true; 2063 } 2064 2065 return false; 2066 } 2067 2068 bool ceph_is_new_interval(const struct ceph_osds *old_acting, 2069 const struct ceph_osds *new_acting, 2070 const struct ceph_osds *old_up, 2071 const struct ceph_osds *new_up, 2072 int old_size, 2073 int new_size, 2074 int old_min_size, 2075 int new_min_size, 2076 u32 old_pg_num, 2077 u32 new_pg_num, 2078 bool old_sort_bitwise, 2079 bool new_sort_bitwise, 2080 const struct ceph_pg *pgid) 2081 { 2082 return !osds_equal(old_acting, new_acting) || 2083 !osds_equal(old_up, new_up) || 2084 old_size != new_size || 2085 old_min_size != new_min_size || 2086 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || 2087 old_sort_bitwise != new_sort_bitwise; 2088 } 2089 2090 static int calc_pg_rank(int osd, const struct ceph_osds *acting) 2091 { 2092 int i; 2093 2094 for (i = 0; i < acting->size; i++) { 2095 if (acting->osds[i] == osd) 2096 return i; 2097 } 2098 2099 return -1; 2100 } 2101 2102 static bool primary_changed(const struct ceph_osds *old_acting, 2103 const struct ceph_osds *new_acting) 2104 { 2105 if (!old_acting->size && !new_acting->size) 2106 return false; /* both still empty */ 2107 2108 if (!old_acting->size ^ !new_acting->size) 2109 return true; /* was empty, now not, or vice versa */ 2110 2111 if (old_acting->primary != new_acting->primary) 2112 return true; /* primary changed */ 2113 2114 if (calc_pg_rank(old_acting->primary, old_acting) != 2115 calc_pg_rank(new_acting->primary, new_acting)) 2116 return true; 2117 2118 return false; /* same primary (tho replicas may have changed) */ 2119 } 2120 2121 bool ceph_osds_changed(const struct ceph_osds *old_acting, 2122 const struct ceph_osds *new_acting, 2123 bool any_change) 2124 { 2125 if (primary_changed(old_acting, new_acting)) 2126 return true; 2127 2128 if (any_change && !__osds_equal(old_acting, new_acting)) 2129 return true; 2130 2131 return false; 2132 } 2133 2134 /* 2135 * calculate file layout from given offset, length. 2136 * fill in correct oid, logical length, and object extent 2137 * offset, length. 2138 * 2139 * for now, we write only a single su, until we can 2140 * pass a stride back to the caller. 2141 */ 2142 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 2143 u64 off, u64 len, 2144 u64 *ono, 2145 u64 *oxoff, u64 *oxlen) 2146 { 2147 u32 osize = layout->object_size; 2148 u32 su = layout->stripe_unit; 2149 u32 sc = layout->stripe_count; 2150 u32 bl, stripeno, stripepos, objsetno; 2151 u32 su_per_object; 2152 u64 t, su_offset; 2153 2154 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len, 2155 osize, su); 2156 if (su == 0 || sc == 0) 2157 goto invalid; 2158 su_per_object = osize / su; 2159 if (su_per_object == 0) 2160 goto invalid; 2161 dout("osize %u / su %u = su_per_object %u\n", osize, su, 2162 su_per_object); 2163 2164 if ((su & ~PAGE_MASK) != 0) 2165 goto invalid; 2166 2167 /* bl = *off / su; */ 2168 t = off; 2169 do_div(t, su); 2170 bl = t; 2171 dout("off %llu / su %u = bl %u\n", off, su, bl); 2172 2173 stripeno = bl / sc; 2174 stripepos = bl % sc; 2175 objsetno = stripeno / su_per_object; 2176 2177 *ono = objsetno * sc + stripepos; 2178 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono); 2179 2180 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 2181 t = off; 2182 su_offset = do_div(t, su); 2183 *oxoff = su_offset + (stripeno % su_per_object) * su; 2184 2185 /* 2186 * Calculate the length of the extent being written to the selected 2187 * object. This is the minimum of the full length requested (len) or 2188 * the remainder of the current stripe being written to. 2189 */ 2190 *oxlen = min_t(u64, len, su - su_offset); 2191 2192 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 2193 return 0; 2194 2195 invalid: 2196 dout(" invalid layout\n"); 2197 *ono = 0; 2198 *oxoff = 0; 2199 *oxlen = 0; 2200 return -EINVAL; 2201 } 2202 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 2203 2204 /* 2205 * Map an object into a PG. 2206 * 2207 * Should only be called with target_oid and target_oloc (as opposed to 2208 * base_oid and base_oloc), since tiering isn't taken into account. 2209 */ 2210 int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, 2211 const struct ceph_object_id *oid, 2212 const struct ceph_object_locator *oloc, 2213 struct ceph_pg *raw_pgid) 2214 { 2215 WARN_ON(pi->id != oloc->pool); 2216 2217 if (!oloc->pool_ns) { 2218 raw_pgid->pool = oloc->pool; 2219 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, 2220 oid->name_len); 2221 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, 2222 raw_pgid->pool, raw_pgid->seed); 2223 } else { 2224 char stack_buf[256]; 2225 char *buf = stack_buf; 2226 int nsl = oloc->pool_ns->len; 2227 size_t total = nsl + 1 + oid->name_len; 2228 2229 if (total > sizeof(stack_buf)) { 2230 buf = kmalloc(total, GFP_NOIO); 2231 if (!buf) 2232 return -ENOMEM; 2233 } 2234 memcpy(buf, oloc->pool_ns->str, nsl); 2235 buf[nsl] = '\037'; 2236 memcpy(buf + nsl + 1, oid->name, oid->name_len); 2237 raw_pgid->pool = oloc->pool; 2238 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); 2239 if (buf != stack_buf) 2240 kfree(buf); 2241 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, 2242 oid->name, nsl, oloc->pool_ns->str, 2243 raw_pgid->pool, raw_pgid->seed); 2244 } 2245 return 0; 2246 } 2247 2248 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, 2249 const struct ceph_object_id *oid, 2250 const struct ceph_object_locator *oloc, 2251 struct ceph_pg *raw_pgid) 2252 { 2253 struct ceph_pg_pool_info *pi; 2254 2255 pi = ceph_pg_pool_by_id(osdmap, oloc->pool); 2256 if (!pi) 2257 return -ENOENT; 2258 2259 return __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); 2260 } 2261 EXPORT_SYMBOL(ceph_object_locator_to_pg); 2262 2263 /* 2264 * Map a raw PG (full precision ps) into an actual PG. 2265 */ 2266 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, 2267 const struct ceph_pg *raw_pgid, 2268 struct ceph_pg *pgid) 2269 { 2270 pgid->pool = raw_pgid->pool; 2271 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, 2272 pi->pg_num_mask); 2273 } 2274 2275 /* 2276 * Map a raw PG (full precision ps) into a placement ps (placement 2277 * seed). Include pool id in that value so that different pools don't 2278 * use the same seeds. 2279 */ 2280 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, 2281 const struct ceph_pg *raw_pgid) 2282 { 2283 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { 2284 /* hash pool id and seed so that pool PGs do not overlap */ 2285 return crush_hash32_2(CRUSH_HASH_RJENKINS1, 2286 ceph_stable_mod(raw_pgid->seed, 2287 pi->pgp_num, 2288 pi->pgp_num_mask), 2289 raw_pgid->pool); 2290 } else { 2291 /* 2292 * legacy behavior: add ps and pool together. this is 2293 * not a great approach because the PGs from each pool 2294 * will overlap on top of each other: 0.5 == 1.4 == 2295 * 2.3 == ... 2296 */ 2297 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, 2298 pi->pgp_num_mask) + 2299 (unsigned)raw_pgid->pool; 2300 } 2301 } 2302 2303 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, 2304 int *result, int result_max, 2305 const __u32 *weight, int weight_max, 2306 u64 choose_args_index) 2307 { 2308 struct crush_choose_arg_map *arg_map; 2309 int r; 2310 2311 BUG_ON(result_max > CEPH_PG_MAX_SIZE); 2312 2313 arg_map = lookup_choose_arg_map(&map->crush->choose_args, 2314 choose_args_index); 2315 2316 mutex_lock(&map->crush_workspace_mutex); 2317 r = crush_do_rule(map->crush, ruleno, x, result, result_max, 2318 weight, weight_max, map->crush_workspace, 2319 arg_map ? arg_map->args : NULL); 2320 mutex_unlock(&map->crush_workspace_mutex); 2321 2322 return r; 2323 } 2324 2325 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, 2326 struct ceph_pg_pool_info *pi, 2327 struct ceph_osds *set) 2328 { 2329 int i; 2330 2331 if (ceph_can_shift_osds(pi)) { 2332 int removed = 0; 2333 2334 /* shift left */ 2335 for (i = 0; i < set->size; i++) { 2336 if (!ceph_osd_exists(osdmap, set->osds[i])) { 2337 removed++; 2338 continue; 2339 } 2340 if (removed) 2341 set->osds[i - removed] = set->osds[i]; 2342 } 2343 set->size -= removed; 2344 } else { 2345 /* set dne devices to NONE */ 2346 for (i = 0; i < set->size; i++) { 2347 if (!ceph_osd_exists(osdmap, set->osds[i])) 2348 set->osds[i] = CRUSH_ITEM_NONE; 2349 } 2350 } 2351 } 2352 2353 /* 2354 * Calculate raw set (CRUSH output) for given PG and filter out 2355 * nonexistent OSDs. ->primary is undefined for a raw set. 2356 * 2357 * Placement seed (CRUSH input) is returned through @ppps. 2358 */ 2359 static void pg_to_raw_osds(struct ceph_osdmap *osdmap, 2360 struct ceph_pg_pool_info *pi, 2361 const struct ceph_pg *raw_pgid, 2362 struct ceph_osds *raw, 2363 u32 *ppps) 2364 { 2365 u32 pps = raw_pg_to_pps(pi, raw_pgid); 2366 int ruleno; 2367 int len; 2368 2369 ceph_osds_init(raw); 2370 if (ppps) 2371 *ppps = pps; 2372 2373 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, 2374 pi->size); 2375 if (ruleno < 0) { 2376 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", 2377 pi->id, pi->crush_ruleset, pi->type, pi->size); 2378 return; 2379 } 2380 2381 if (pi->size > ARRAY_SIZE(raw->osds)) { 2382 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", 2383 pi->id, pi->crush_ruleset, pi->type, pi->size, 2384 ARRAY_SIZE(raw->osds)); 2385 return; 2386 } 2387 2388 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, 2389 osdmap->osd_weight, osdmap->max_osd, pi->id); 2390 if (len < 0) { 2391 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", 2392 len, ruleno, pi->id, pi->crush_ruleset, pi->type, 2393 pi->size); 2394 return; 2395 } 2396 2397 raw->size = len; 2398 remove_nonexistent_osds(osdmap, pi, raw); 2399 } 2400 2401 /* apply pg_upmap[_items] mappings */ 2402 static void apply_upmap(struct ceph_osdmap *osdmap, 2403 const struct ceph_pg *pgid, 2404 struct ceph_osds *raw) 2405 { 2406 struct ceph_pg_mapping *pg; 2407 int i, j; 2408 2409 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); 2410 if (pg) { 2411 /* make sure targets aren't marked out */ 2412 for (i = 0; i < pg->pg_upmap.len; i++) { 2413 int osd = pg->pg_upmap.osds[i]; 2414 2415 if (osd != CRUSH_ITEM_NONE && 2416 osd < osdmap->max_osd && 2417 osdmap->osd_weight[osd] == 0) { 2418 /* reject/ignore explicit mapping */ 2419 return; 2420 } 2421 } 2422 for (i = 0; i < pg->pg_upmap.len; i++) 2423 raw->osds[i] = pg->pg_upmap.osds[i]; 2424 raw->size = pg->pg_upmap.len; 2425 return; 2426 } 2427 2428 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); 2429 if (pg) { 2430 /* 2431 * Note: this approach does not allow a bidirectional swap, 2432 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. 2433 */ 2434 for (i = 0; i < pg->pg_upmap_items.len; i++) { 2435 int from = pg->pg_upmap_items.from_to[i][0]; 2436 int to = pg->pg_upmap_items.from_to[i][1]; 2437 int pos = -1; 2438 bool exists = false; 2439 2440 /* make sure replacement doesn't already appear */ 2441 for (j = 0; j < raw->size; j++) { 2442 int osd = raw->osds[j]; 2443 2444 if (osd == to) { 2445 exists = true; 2446 break; 2447 } 2448 /* ignore mapping if target is marked out */ 2449 if (osd == from && pos < 0 && 2450 !(to != CRUSH_ITEM_NONE && 2451 to < osdmap->max_osd && 2452 osdmap->osd_weight[to] == 0)) { 2453 pos = j; 2454 } 2455 } 2456 if (!exists && pos >= 0) { 2457 raw->osds[pos] = to; 2458 return; 2459 } 2460 } 2461 } 2462 } 2463 2464 /* 2465 * Given raw set, calculate up set and up primary. By definition of an 2466 * up set, the result won't contain nonexistent or down OSDs. 2467 * 2468 * This is done in-place - on return @set is the up set. If it's 2469 * empty, ->primary will remain undefined. 2470 */ 2471 static void raw_to_up_osds(struct ceph_osdmap *osdmap, 2472 struct ceph_pg_pool_info *pi, 2473 struct ceph_osds *set) 2474 { 2475 int i; 2476 2477 /* ->primary is undefined for a raw set */ 2478 BUG_ON(set->primary != -1); 2479 2480 if (ceph_can_shift_osds(pi)) { 2481 int removed = 0; 2482 2483 /* shift left */ 2484 for (i = 0; i < set->size; i++) { 2485 if (ceph_osd_is_down(osdmap, set->osds[i])) { 2486 removed++; 2487 continue; 2488 } 2489 if (removed) 2490 set->osds[i - removed] = set->osds[i]; 2491 } 2492 set->size -= removed; 2493 if (set->size > 0) 2494 set->primary = set->osds[0]; 2495 } else { 2496 /* set down/dne devices to NONE */ 2497 for (i = set->size - 1; i >= 0; i--) { 2498 if (ceph_osd_is_down(osdmap, set->osds[i])) 2499 set->osds[i] = CRUSH_ITEM_NONE; 2500 else 2501 set->primary = set->osds[i]; 2502 } 2503 } 2504 } 2505 2506 static void apply_primary_affinity(struct ceph_osdmap *osdmap, 2507 struct ceph_pg_pool_info *pi, 2508 u32 pps, 2509 struct ceph_osds *up) 2510 { 2511 int i; 2512 int pos = -1; 2513 2514 /* 2515 * Do we have any non-default primary_affinity values for these 2516 * osds? 2517 */ 2518 if (!osdmap->osd_primary_affinity) 2519 return; 2520 2521 for (i = 0; i < up->size; i++) { 2522 int osd = up->osds[i]; 2523 2524 if (osd != CRUSH_ITEM_NONE && 2525 osdmap->osd_primary_affinity[osd] != 2526 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { 2527 break; 2528 } 2529 } 2530 if (i == up->size) 2531 return; 2532 2533 /* 2534 * Pick the primary. Feed both the seed (for the pg) and the 2535 * osd into the hash/rng so that a proportional fraction of an 2536 * osd's pgs get rejected as primary. 2537 */ 2538 for (i = 0; i < up->size; i++) { 2539 int osd = up->osds[i]; 2540 u32 aff; 2541 2542 if (osd == CRUSH_ITEM_NONE) 2543 continue; 2544 2545 aff = osdmap->osd_primary_affinity[osd]; 2546 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && 2547 (crush_hash32_2(CRUSH_HASH_RJENKINS1, 2548 pps, osd) >> 16) >= aff) { 2549 /* 2550 * We chose not to use this primary. Note it 2551 * anyway as a fallback in case we don't pick 2552 * anyone else, but keep looking. 2553 */ 2554 if (pos < 0) 2555 pos = i; 2556 } else { 2557 pos = i; 2558 break; 2559 } 2560 } 2561 if (pos < 0) 2562 return; 2563 2564 up->primary = up->osds[pos]; 2565 2566 if (ceph_can_shift_osds(pi) && pos > 0) { 2567 /* move the new primary to the front */ 2568 for (i = pos; i > 0; i--) 2569 up->osds[i] = up->osds[i - 1]; 2570 up->osds[0] = up->primary; 2571 } 2572 } 2573 2574 /* 2575 * Get pg_temp and primary_temp mappings for given PG. 2576 * 2577 * Note that a PG may have none, only pg_temp, only primary_temp or 2578 * both pg_temp and primary_temp mappings. This means @temp isn't 2579 * always a valid OSD set on return: in the "only primary_temp" case, 2580 * @temp will have its ->primary >= 0 but ->size == 0. 2581 */ 2582 static void get_temp_osds(struct ceph_osdmap *osdmap, 2583 struct ceph_pg_pool_info *pi, 2584 const struct ceph_pg *pgid, 2585 struct ceph_osds *temp) 2586 { 2587 struct ceph_pg_mapping *pg; 2588 int i; 2589 2590 ceph_osds_init(temp); 2591 2592 /* pg_temp? */ 2593 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); 2594 if (pg) { 2595 for (i = 0; i < pg->pg_temp.len; i++) { 2596 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { 2597 if (ceph_can_shift_osds(pi)) 2598 continue; 2599 2600 temp->osds[temp->size++] = CRUSH_ITEM_NONE; 2601 } else { 2602 temp->osds[temp->size++] = pg->pg_temp.osds[i]; 2603 } 2604 } 2605 2606 /* apply pg_temp's primary */ 2607 for (i = 0; i < temp->size; i++) { 2608 if (temp->osds[i] != CRUSH_ITEM_NONE) { 2609 temp->primary = temp->osds[i]; 2610 break; 2611 } 2612 } 2613 } 2614 2615 /* primary_temp? */ 2616 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); 2617 if (pg) 2618 temp->primary = pg->primary_temp.osd; 2619 } 2620 2621 /* 2622 * Map a PG to its acting set as well as its up set. 2623 * 2624 * Acting set is used for data mapping purposes, while up set can be 2625 * recorded for detecting interval changes and deciding whether to 2626 * resend a request. 2627 */ 2628 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, 2629 struct ceph_pg_pool_info *pi, 2630 const struct ceph_pg *raw_pgid, 2631 struct ceph_osds *up, 2632 struct ceph_osds *acting) 2633 { 2634 struct ceph_pg pgid; 2635 u32 pps; 2636 2637 WARN_ON(pi->id != raw_pgid->pool); 2638 raw_pg_to_pg(pi, raw_pgid, &pgid); 2639 2640 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); 2641 apply_upmap(osdmap, &pgid, up); 2642 raw_to_up_osds(osdmap, pi, up); 2643 apply_primary_affinity(osdmap, pi, pps, up); 2644 get_temp_osds(osdmap, pi, &pgid, acting); 2645 if (!acting->size) { 2646 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); 2647 acting->size = up->size; 2648 if (acting->primary == -1) 2649 acting->primary = up->primary; 2650 } 2651 WARN_ON(!osds_valid(up) || !osds_valid(acting)); 2652 } 2653 2654 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, 2655 struct ceph_pg_pool_info *pi, 2656 const struct ceph_pg *raw_pgid, 2657 struct ceph_spg *spgid) 2658 { 2659 struct ceph_pg pgid; 2660 struct ceph_osds up, acting; 2661 int i; 2662 2663 WARN_ON(pi->id != raw_pgid->pool); 2664 raw_pg_to_pg(pi, raw_pgid, &pgid); 2665 2666 if (ceph_can_shift_osds(pi)) { 2667 spgid->pgid = pgid; /* struct */ 2668 spgid->shard = CEPH_SPG_NOSHARD; 2669 return true; 2670 } 2671 2672 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); 2673 for (i = 0; i < acting.size; i++) { 2674 if (acting.osds[i] == acting.primary) { 2675 spgid->pgid = pgid; /* struct */ 2676 spgid->shard = i; 2677 return true; 2678 } 2679 } 2680 2681 return false; 2682 } 2683 2684 /* 2685 * Return acting primary for given PG, or -1 if none. 2686 */ 2687 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, 2688 const struct ceph_pg *raw_pgid) 2689 { 2690 struct ceph_pg_pool_info *pi; 2691 struct ceph_osds up, acting; 2692 2693 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); 2694 if (!pi) 2695 return -1; 2696 2697 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); 2698 return acting.primary; 2699 } 2700 EXPORT_SYMBOL(ceph_pg_to_acting_primary); 2701