1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include <asm/div64.h> 7 8 #include <linux/ceph/libceph.h> 9 #include <linux/ceph/osdmap.h> 10 #include <linux/ceph/decode.h> 11 #include <linux/crush/hash.h> 12 #include <linux/crush/mapper.h> 13 14 char *ceph_osdmap_state_str(char *str, int len, int state) 15 { 16 int flag = 0; 17 18 if (!len) 19 goto done; 20 21 *str = '\0'; 22 if (state) { 23 if (state & CEPH_OSD_EXISTS) { 24 snprintf(str, len, "exists"); 25 flag = 1; 26 } 27 if (state & CEPH_OSD_UP) { 28 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""), 29 "up"); 30 flag = 1; 31 } 32 } else { 33 snprintf(str, len, "doesn't exist"); 34 } 35 done: 36 return str; 37 } 38 39 /* maps */ 40 41 static int calc_bits_of(unsigned t) 42 { 43 int b = 0; 44 while (t) { 45 t = t >> 1; 46 b++; 47 } 48 return b; 49 } 50 51 /* 52 * the foo_mask is the smallest value 2^n-1 that is >= foo. 53 */ 54 static void calc_pg_masks(struct ceph_pg_pool_info *pi) 55 { 56 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1; 57 pi->pgp_num_mask = 58 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1; 59 pi->lpg_num_mask = 60 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1; 61 pi->lpgp_num_mask = 62 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1; 63 } 64 65 /* 66 * decode crush map 67 */ 68 static int crush_decode_uniform_bucket(void **p, void *end, 69 struct crush_bucket_uniform *b) 70 { 71 dout("crush_decode_uniform_bucket %p to %p\n", *p, end); 72 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); 73 b->item_weight = ceph_decode_32(p); 74 return 0; 75 bad: 76 return -EINVAL; 77 } 78 79 static int crush_decode_list_bucket(void **p, void *end, 80 struct crush_bucket_list *b) 81 { 82 int j; 83 dout("crush_decode_list_bucket %p to %p\n", *p, end); 84 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 85 if (b->item_weights == NULL) 86 return -ENOMEM; 87 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 88 if (b->sum_weights == NULL) 89 return -ENOMEM; 90 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 91 for (j = 0; j < b->h.size; j++) { 92 b->item_weights[j] = ceph_decode_32(p); 93 b->sum_weights[j] = ceph_decode_32(p); 94 } 95 return 0; 96 bad: 97 return -EINVAL; 98 } 99 100 static int crush_decode_tree_bucket(void **p, void *end, 101 struct crush_bucket_tree *b) 102 { 103 int j; 104 dout("crush_decode_tree_bucket %p to %p\n", *p, end); 105 ceph_decode_32_safe(p, end, b->num_nodes, bad); 106 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); 107 if (b->node_weights == NULL) 108 return -ENOMEM; 109 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); 110 for (j = 0; j < b->num_nodes; j++) 111 b->node_weights[j] = ceph_decode_32(p); 112 return 0; 113 bad: 114 return -EINVAL; 115 } 116 117 static int crush_decode_straw_bucket(void **p, void *end, 118 struct crush_bucket_straw *b) 119 { 120 int j; 121 dout("crush_decode_straw_bucket %p to %p\n", *p, end); 122 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 123 if (b->item_weights == NULL) 124 return -ENOMEM; 125 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); 126 if (b->straws == NULL) 127 return -ENOMEM; 128 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); 129 for (j = 0; j < b->h.size; j++) { 130 b->item_weights[j] = ceph_decode_32(p); 131 b->straws[j] = ceph_decode_32(p); 132 } 133 return 0; 134 bad: 135 return -EINVAL; 136 } 137 138 static struct crush_map *crush_decode(void *pbyval, void *end) 139 { 140 struct crush_map *c; 141 int err = -EINVAL; 142 int i, j; 143 void **p = &pbyval; 144 void *start = pbyval; 145 u32 magic; 146 147 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 148 149 c = kzalloc(sizeof(*c), GFP_NOFS); 150 if (c == NULL) 151 return ERR_PTR(-ENOMEM); 152 153 ceph_decode_need(p, end, 4*sizeof(u32), bad); 154 magic = ceph_decode_32(p); 155 if (magic != CRUSH_MAGIC) { 156 pr_err("crush_decode magic %x != current %x\n", 157 (unsigned)magic, (unsigned)CRUSH_MAGIC); 158 goto bad; 159 } 160 c->max_buckets = ceph_decode_32(p); 161 c->max_rules = ceph_decode_32(p); 162 c->max_devices = ceph_decode_32(p); 163 164 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS); 165 if (c->device_parents == NULL) 166 goto badmem; 167 c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS); 168 if (c->bucket_parents == NULL) 169 goto badmem; 170 171 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); 172 if (c->buckets == NULL) 173 goto badmem; 174 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); 175 if (c->rules == NULL) 176 goto badmem; 177 178 /* buckets */ 179 for (i = 0; i < c->max_buckets; i++) { 180 int size = 0; 181 u32 alg; 182 struct crush_bucket *b; 183 184 ceph_decode_32_safe(p, end, alg, bad); 185 if (alg == 0) { 186 c->buckets[i] = NULL; 187 continue; 188 } 189 dout("crush_decode bucket %d off %x %p to %p\n", 190 i, (int)(*p-start), *p, end); 191 192 switch (alg) { 193 case CRUSH_BUCKET_UNIFORM: 194 size = sizeof(struct crush_bucket_uniform); 195 break; 196 case CRUSH_BUCKET_LIST: 197 size = sizeof(struct crush_bucket_list); 198 break; 199 case CRUSH_BUCKET_TREE: 200 size = sizeof(struct crush_bucket_tree); 201 break; 202 case CRUSH_BUCKET_STRAW: 203 size = sizeof(struct crush_bucket_straw); 204 break; 205 default: 206 err = -EINVAL; 207 goto bad; 208 } 209 BUG_ON(size == 0); 210 b = c->buckets[i] = kzalloc(size, GFP_NOFS); 211 if (b == NULL) 212 goto badmem; 213 214 ceph_decode_need(p, end, 4*sizeof(u32), bad); 215 b->id = ceph_decode_32(p); 216 b->type = ceph_decode_16(p); 217 b->alg = ceph_decode_8(p); 218 b->hash = ceph_decode_8(p); 219 b->weight = ceph_decode_32(p); 220 b->size = ceph_decode_32(p); 221 222 dout("crush_decode bucket size %d off %x %p to %p\n", 223 b->size, (int)(*p-start), *p, end); 224 225 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); 226 if (b->items == NULL) 227 goto badmem; 228 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); 229 if (b->perm == NULL) 230 goto badmem; 231 b->perm_n = 0; 232 233 ceph_decode_need(p, end, b->size*sizeof(u32), bad); 234 for (j = 0; j < b->size; j++) 235 b->items[j] = ceph_decode_32(p); 236 237 switch (b->alg) { 238 case CRUSH_BUCKET_UNIFORM: 239 err = crush_decode_uniform_bucket(p, end, 240 (struct crush_bucket_uniform *)b); 241 if (err < 0) 242 goto bad; 243 break; 244 case CRUSH_BUCKET_LIST: 245 err = crush_decode_list_bucket(p, end, 246 (struct crush_bucket_list *)b); 247 if (err < 0) 248 goto bad; 249 break; 250 case CRUSH_BUCKET_TREE: 251 err = crush_decode_tree_bucket(p, end, 252 (struct crush_bucket_tree *)b); 253 if (err < 0) 254 goto bad; 255 break; 256 case CRUSH_BUCKET_STRAW: 257 err = crush_decode_straw_bucket(p, end, 258 (struct crush_bucket_straw *)b); 259 if (err < 0) 260 goto bad; 261 break; 262 } 263 } 264 265 /* rules */ 266 dout("rule vec is %p\n", c->rules); 267 for (i = 0; i < c->max_rules; i++) { 268 u32 yes; 269 struct crush_rule *r; 270 271 ceph_decode_32_safe(p, end, yes, bad); 272 if (!yes) { 273 dout("crush_decode NO rule %d off %x %p to %p\n", 274 i, (int)(*p-start), *p, end); 275 c->rules[i] = NULL; 276 continue; 277 } 278 279 dout("crush_decode rule %d off %x %p to %p\n", 280 i, (int)(*p-start), *p, end); 281 282 /* len */ 283 ceph_decode_32_safe(p, end, yes, bad); 284 #if BITS_PER_LONG == 32 285 err = -EINVAL; 286 if (yes > ULONG_MAX / sizeof(struct crush_rule_step)) 287 goto bad; 288 #endif 289 r = c->rules[i] = kmalloc(sizeof(*r) + 290 yes*sizeof(struct crush_rule_step), 291 GFP_NOFS); 292 if (r == NULL) 293 goto badmem; 294 dout(" rule %d is at %p\n", i, r); 295 r->len = yes; 296 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ 297 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); 298 for (j = 0; j < r->len; j++) { 299 r->steps[j].op = ceph_decode_32(p); 300 r->steps[j].arg1 = ceph_decode_32(p); 301 r->steps[j].arg2 = ceph_decode_32(p); 302 } 303 } 304 305 /* ignore trailing name maps. */ 306 307 dout("crush_decode success\n"); 308 return c; 309 310 badmem: 311 err = -ENOMEM; 312 bad: 313 dout("crush_decode fail %d\n", err); 314 crush_destroy(c); 315 return ERR_PTR(err); 316 } 317 318 /* 319 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 320 * to a set of osds) 321 */ 322 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) 323 { 324 u64 a = *(u64 *)&l; 325 u64 b = *(u64 *)&r; 326 327 if (a < b) 328 return -1; 329 if (a > b) 330 return 1; 331 return 0; 332 } 333 334 static int __insert_pg_mapping(struct ceph_pg_mapping *new, 335 struct rb_root *root) 336 { 337 struct rb_node **p = &root->rb_node; 338 struct rb_node *parent = NULL; 339 struct ceph_pg_mapping *pg = NULL; 340 int c; 341 342 while (*p) { 343 parent = *p; 344 pg = rb_entry(parent, struct ceph_pg_mapping, node); 345 c = pgid_cmp(new->pgid, pg->pgid); 346 if (c < 0) 347 p = &(*p)->rb_left; 348 else if (c > 0) 349 p = &(*p)->rb_right; 350 else 351 return -EEXIST; 352 } 353 354 rb_link_node(&new->node, parent, p); 355 rb_insert_color(&new->node, root); 356 return 0; 357 } 358 359 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, 360 struct ceph_pg pgid) 361 { 362 struct rb_node *n = root->rb_node; 363 struct ceph_pg_mapping *pg; 364 int c; 365 366 while (n) { 367 pg = rb_entry(n, struct ceph_pg_mapping, node); 368 c = pgid_cmp(pgid, pg->pgid); 369 if (c < 0) 370 n = n->rb_left; 371 else if (c > 0) 372 n = n->rb_right; 373 else 374 return pg; 375 } 376 return NULL; 377 } 378 379 /* 380 * rbtree of pg pool info 381 */ 382 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) 383 { 384 struct rb_node **p = &root->rb_node; 385 struct rb_node *parent = NULL; 386 struct ceph_pg_pool_info *pi = NULL; 387 388 while (*p) { 389 parent = *p; 390 pi = rb_entry(parent, struct ceph_pg_pool_info, node); 391 if (new->id < pi->id) 392 p = &(*p)->rb_left; 393 else if (new->id > pi->id) 394 p = &(*p)->rb_right; 395 else 396 return -EEXIST; 397 } 398 399 rb_link_node(&new->node, parent, p); 400 rb_insert_color(&new->node, root); 401 return 0; 402 } 403 404 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) 405 { 406 struct ceph_pg_pool_info *pi; 407 struct rb_node *n = root->rb_node; 408 409 while (n) { 410 pi = rb_entry(n, struct ceph_pg_pool_info, node); 411 if (id < pi->id) 412 n = n->rb_left; 413 else if (id > pi->id) 414 n = n->rb_right; 415 else 416 return pi; 417 } 418 return NULL; 419 } 420 421 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 422 { 423 struct rb_node *rbp; 424 425 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { 426 struct ceph_pg_pool_info *pi = 427 rb_entry(rbp, struct ceph_pg_pool_info, node); 428 if (pi->name && strcmp(pi->name, name) == 0) 429 return pi->id; 430 } 431 return -ENOENT; 432 } 433 EXPORT_SYMBOL(ceph_pg_poolid_by_name); 434 435 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) 436 { 437 rb_erase(&pi->node, root); 438 kfree(pi->name); 439 kfree(pi); 440 } 441 442 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 443 { 444 unsigned n, m; 445 446 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 447 calc_pg_masks(pi); 448 449 /* num_snaps * snap_info_t */ 450 n = le32_to_cpu(pi->v.num_snaps); 451 while (n--) { 452 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) + 453 sizeof(struct ceph_timespec), bad); 454 *p += sizeof(u64) + /* key */ 455 1 + sizeof(u64) + /* u8, snapid */ 456 sizeof(struct ceph_timespec); 457 m = ceph_decode_32(p); /* snap name */ 458 *p += m; 459 } 460 461 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; 462 return 0; 463 464 bad: 465 return -EINVAL; 466 } 467 468 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) 469 { 470 struct ceph_pg_pool_info *pi; 471 u32 num, len, pool; 472 473 ceph_decode_32_safe(p, end, num, bad); 474 dout(" %d pool names\n", num); 475 while (num--) { 476 ceph_decode_32_safe(p, end, pool, bad); 477 ceph_decode_32_safe(p, end, len, bad); 478 dout(" pool %d len %d\n", pool, len); 479 pi = __lookup_pg_pool(&map->pg_pools, pool); 480 if (pi) { 481 kfree(pi->name); 482 pi->name = kmalloc(len + 1, GFP_NOFS); 483 if (pi->name) { 484 memcpy(pi->name, *p, len); 485 pi->name[len] = '\0'; 486 dout(" name is %s\n", pi->name); 487 } 488 } 489 *p += len; 490 } 491 return 0; 492 493 bad: 494 return -EINVAL; 495 } 496 497 /* 498 * osd map 499 */ 500 void ceph_osdmap_destroy(struct ceph_osdmap *map) 501 { 502 dout("osdmap_destroy %p\n", map); 503 if (map->crush) 504 crush_destroy(map->crush); 505 while (!RB_EMPTY_ROOT(&map->pg_temp)) { 506 struct ceph_pg_mapping *pg = 507 rb_entry(rb_first(&map->pg_temp), 508 struct ceph_pg_mapping, node); 509 rb_erase(&pg->node, &map->pg_temp); 510 kfree(pg); 511 } 512 while (!RB_EMPTY_ROOT(&map->pg_pools)) { 513 struct ceph_pg_pool_info *pi = 514 rb_entry(rb_first(&map->pg_pools), 515 struct ceph_pg_pool_info, node); 516 __remove_pg_pool(&map->pg_pools, pi); 517 } 518 kfree(map->osd_state); 519 kfree(map->osd_weight); 520 kfree(map->osd_addr); 521 kfree(map); 522 } 523 524 /* 525 * adjust max osd value. reallocate arrays. 526 */ 527 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) 528 { 529 u8 *state; 530 struct ceph_entity_addr *addr; 531 u32 *weight; 532 533 state = kcalloc(max, sizeof(*state), GFP_NOFS); 534 addr = kcalloc(max, sizeof(*addr), GFP_NOFS); 535 weight = kcalloc(max, sizeof(*weight), GFP_NOFS); 536 if (state == NULL || addr == NULL || weight == NULL) { 537 kfree(state); 538 kfree(addr); 539 kfree(weight); 540 return -ENOMEM; 541 } 542 543 /* copy old? */ 544 if (map->osd_state) { 545 memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); 546 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); 547 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); 548 kfree(map->osd_state); 549 kfree(map->osd_addr); 550 kfree(map->osd_weight); 551 } 552 553 map->osd_state = state; 554 map->osd_weight = weight; 555 map->osd_addr = addr; 556 map->max_osd = max; 557 return 0; 558 } 559 560 /* 561 * decode a full map. 562 */ 563 struct ceph_osdmap *osdmap_decode(void **p, void *end) 564 { 565 struct ceph_osdmap *map; 566 u16 version; 567 u32 len, max, i; 568 u8 ev; 569 int err = -EINVAL; 570 void *start = *p; 571 struct ceph_pg_pool_info *pi; 572 573 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); 574 575 map = kzalloc(sizeof(*map), GFP_NOFS); 576 if (map == NULL) 577 return ERR_PTR(-ENOMEM); 578 map->pg_temp = RB_ROOT; 579 580 ceph_decode_16_safe(p, end, version, bad); 581 if (version > CEPH_OSDMAP_VERSION) { 582 pr_warning("got unknown v %d > %d of osdmap\n", version, 583 CEPH_OSDMAP_VERSION); 584 goto bad; 585 } 586 587 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); 588 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); 589 map->epoch = ceph_decode_32(p); 590 ceph_decode_copy(p, &map->created, sizeof(map->created)); 591 ceph_decode_copy(p, &map->modified, sizeof(map->modified)); 592 593 ceph_decode_32_safe(p, end, max, bad); 594 while (max--) { 595 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 596 pi = kzalloc(sizeof(*pi), GFP_NOFS); 597 if (!pi) 598 goto bad; 599 pi->id = ceph_decode_32(p); 600 ev = ceph_decode_8(p); /* encoding version */ 601 if (ev > CEPH_PG_POOL_VERSION) { 602 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 603 ev, CEPH_PG_POOL_VERSION); 604 kfree(pi); 605 goto bad; 606 } 607 err = __decode_pool(p, end, pi); 608 if (err < 0) { 609 kfree(pi); 610 goto bad; 611 } 612 __insert_pg_pool(&map->pg_pools, pi); 613 } 614 615 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 616 goto bad; 617 618 ceph_decode_32_safe(p, end, map->pool_max, bad); 619 620 ceph_decode_32_safe(p, end, map->flags, bad); 621 622 max = ceph_decode_32(p); 623 624 /* (re)alloc osd arrays */ 625 err = osdmap_set_max_osd(map, max); 626 if (err < 0) 627 goto bad; 628 dout("osdmap_decode max_osd = %d\n", map->max_osd); 629 630 /* osds */ 631 err = -EINVAL; 632 ceph_decode_need(p, end, 3*sizeof(u32) + 633 map->max_osd*(1 + sizeof(*map->osd_weight) + 634 sizeof(*map->osd_addr)), bad); 635 *p += 4; /* skip length field (should match max) */ 636 ceph_decode_copy(p, map->osd_state, map->max_osd); 637 638 *p += 4; /* skip length field (should match max) */ 639 for (i = 0; i < map->max_osd; i++) 640 map->osd_weight[i] = ceph_decode_32(p); 641 642 *p += 4; /* skip length field (should match max) */ 643 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); 644 for (i = 0; i < map->max_osd; i++) 645 ceph_decode_addr(&map->osd_addr[i]); 646 647 /* pg_temp */ 648 ceph_decode_32_safe(p, end, len, bad); 649 for (i = 0; i < len; i++) { 650 int n, j; 651 struct ceph_pg pgid; 652 struct ceph_pg_mapping *pg; 653 654 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 655 ceph_decode_copy(p, &pgid, sizeof(pgid)); 656 n = ceph_decode_32(p); 657 ceph_decode_need(p, end, n * sizeof(u32), bad); 658 err = -ENOMEM; 659 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); 660 if (!pg) 661 goto bad; 662 pg->pgid = pgid; 663 pg->len = n; 664 for (j = 0; j < n; j++) 665 pg->osds[j] = ceph_decode_32(p); 666 667 err = __insert_pg_mapping(pg, &map->pg_temp); 668 if (err) 669 goto bad; 670 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len); 671 } 672 673 /* crush */ 674 ceph_decode_32_safe(p, end, len, bad); 675 dout("osdmap_decode crush len %d from off 0x%x\n", len, 676 (int)(*p - start)); 677 ceph_decode_need(p, end, len, bad); 678 map->crush = crush_decode(*p, end); 679 *p += len; 680 if (IS_ERR(map->crush)) { 681 err = PTR_ERR(map->crush); 682 map->crush = NULL; 683 goto bad; 684 } 685 686 /* ignore the rest of the map */ 687 *p = end; 688 689 dout("osdmap_decode done %p %p\n", *p, end); 690 return map; 691 692 bad: 693 dout("osdmap_decode fail\n"); 694 ceph_osdmap_destroy(map); 695 return ERR_PTR(err); 696 } 697 698 /* 699 * decode and apply an incremental map update. 700 */ 701 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, 702 struct ceph_osdmap *map, 703 struct ceph_messenger *msgr) 704 { 705 struct crush_map *newcrush = NULL; 706 struct ceph_fsid fsid; 707 u32 epoch = 0; 708 struct ceph_timespec modified; 709 u32 len, pool; 710 __s32 new_pool_max, new_flags, max; 711 void *start = *p; 712 int err = -EINVAL; 713 u16 version; 714 struct rb_node *rbp; 715 716 ceph_decode_16_safe(p, end, version, bad); 717 if (version > CEPH_OSDMAP_INC_VERSION) { 718 pr_warning("got unknown v %d > %d of inc osdmap\n", version, 719 CEPH_OSDMAP_INC_VERSION); 720 goto bad; 721 } 722 723 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), 724 bad); 725 ceph_decode_copy(p, &fsid, sizeof(fsid)); 726 epoch = ceph_decode_32(p); 727 BUG_ON(epoch != map->epoch+1); 728 ceph_decode_copy(p, &modified, sizeof(modified)); 729 new_pool_max = ceph_decode_32(p); 730 new_flags = ceph_decode_32(p); 731 732 /* full map? */ 733 ceph_decode_32_safe(p, end, len, bad); 734 if (len > 0) { 735 dout("apply_incremental full map len %d, %p to %p\n", 736 len, *p, end); 737 return osdmap_decode(p, min(*p+len, end)); 738 } 739 740 /* new crush? */ 741 ceph_decode_32_safe(p, end, len, bad); 742 if (len > 0) { 743 dout("apply_incremental new crush map len %d, %p to %p\n", 744 len, *p, end); 745 newcrush = crush_decode(*p, min(*p+len, end)); 746 if (IS_ERR(newcrush)) 747 return ERR_CAST(newcrush); 748 *p += len; 749 } 750 751 /* new flags? */ 752 if (new_flags >= 0) 753 map->flags = new_flags; 754 if (new_pool_max >= 0) 755 map->pool_max = new_pool_max; 756 757 ceph_decode_need(p, end, 5*sizeof(u32), bad); 758 759 /* new max? */ 760 max = ceph_decode_32(p); 761 if (max >= 0) { 762 err = osdmap_set_max_osd(map, max); 763 if (err < 0) 764 goto bad; 765 } 766 767 map->epoch++; 768 map->modified = map->modified; 769 if (newcrush) { 770 if (map->crush) 771 crush_destroy(map->crush); 772 map->crush = newcrush; 773 newcrush = NULL; 774 } 775 776 /* new_pool */ 777 ceph_decode_32_safe(p, end, len, bad); 778 while (len--) { 779 __u8 ev; 780 struct ceph_pg_pool_info *pi; 781 782 ceph_decode_32_safe(p, end, pool, bad); 783 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad); 784 ev = ceph_decode_8(p); /* encoding version */ 785 if (ev > CEPH_PG_POOL_VERSION) { 786 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 787 ev, CEPH_PG_POOL_VERSION); 788 goto bad; 789 } 790 pi = __lookup_pg_pool(&map->pg_pools, pool); 791 if (!pi) { 792 pi = kzalloc(sizeof(*pi), GFP_NOFS); 793 if (!pi) { 794 err = -ENOMEM; 795 goto bad; 796 } 797 pi->id = pool; 798 __insert_pg_pool(&map->pg_pools, pi); 799 } 800 err = __decode_pool(p, end, pi); 801 if (err < 0) 802 goto bad; 803 } 804 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 805 goto bad; 806 807 /* old_pool */ 808 ceph_decode_32_safe(p, end, len, bad); 809 while (len--) { 810 struct ceph_pg_pool_info *pi; 811 812 ceph_decode_32_safe(p, end, pool, bad); 813 pi = __lookup_pg_pool(&map->pg_pools, pool); 814 if (pi) 815 __remove_pg_pool(&map->pg_pools, pi); 816 } 817 818 /* new_up */ 819 err = -EINVAL; 820 ceph_decode_32_safe(p, end, len, bad); 821 while (len--) { 822 u32 osd; 823 struct ceph_entity_addr addr; 824 ceph_decode_32_safe(p, end, osd, bad); 825 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); 826 ceph_decode_addr(&addr); 827 pr_info("osd%d up\n", osd); 828 BUG_ON(osd >= map->max_osd); 829 map->osd_state[osd] |= CEPH_OSD_UP; 830 map->osd_addr[osd] = addr; 831 } 832 833 /* new_down */ 834 ceph_decode_32_safe(p, end, len, bad); 835 while (len--) { 836 u32 osd; 837 ceph_decode_32_safe(p, end, osd, bad); 838 (*p)++; /* clean flag */ 839 pr_info("osd%d down\n", osd); 840 if (osd < map->max_osd) 841 map->osd_state[osd] &= ~CEPH_OSD_UP; 842 } 843 844 /* new_weight */ 845 ceph_decode_32_safe(p, end, len, bad); 846 while (len--) { 847 u32 osd, off; 848 ceph_decode_need(p, end, sizeof(u32)*2, bad); 849 osd = ceph_decode_32(p); 850 off = ceph_decode_32(p); 851 pr_info("osd%d weight 0x%x %s\n", osd, off, 852 off == CEPH_OSD_IN ? "(in)" : 853 (off == CEPH_OSD_OUT ? "(out)" : "")); 854 if (osd < map->max_osd) 855 map->osd_weight[osd] = off; 856 } 857 858 /* new_pg_temp */ 859 rbp = rb_first(&map->pg_temp); 860 ceph_decode_32_safe(p, end, len, bad); 861 while (len--) { 862 struct ceph_pg_mapping *pg; 863 int j; 864 struct ceph_pg pgid; 865 u32 pglen; 866 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 867 ceph_decode_copy(p, &pgid, sizeof(pgid)); 868 pglen = ceph_decode_32(p); 869 870 /* remove any? */ 871 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, 872 node)->pgid, pgid) <= 0) { 873 struct ceph_pg_mapping *cur = 874 rb_entry(rbp, struct ceph_pg_mapping, node); 875 876 rbp = rb_next(rbp); 877 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); 878 rb_erase(&cur->node, &map->pg_temp); 879 kfree(cur); 880 } 881 882 if (pglen) { 883 /* insert */ 884 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 885 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 886 if (!pg) { 887 err = -ENOMEM; 888 goto bad; 889 } 890 pg->pgid = pgid; 891 pg->len = pglen; 892 for (j = 0; j < pglen; j++) 893 pg->osds[j] = ceph_decode_32(p); 894 err = __insert_pg_mapping(pg, &map->pg_temp); 895 if (err) { 896 kfree(pg); 897 goto bad; 898 } 899 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 900 pglen); 901 } 902 } 903 while (rbp) { 904 struct ceph_pg_mapping *cur = 905 rb_entry(rbp, struct ceph_pg_mapping, node); 906 907 rbp = rb_next(rbp); 908 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); 909 rb_erase(&cur->node, &map->pg_temp); 910 kfree(cur); 911 } 912 913 /* ignore the rest */ 914 *p = end; 915 return map; 916 917 bad: 918 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", 919 epoch, (int)(*p - start), *p, start, end); 920 print_hex_dump(KERN_DEBUG, "osdmap: ", 921 DUMP_PREFIX_OFFSET, 16, 1, 922 start, end - start, true); 923 if (newcrush) 924 crush_destroy(newcrush); 925 return ERR_PTR(err); 926 } 927 928 929 930 931 /* 932 * calculate file layout from given offset, length. 933 * fill in correct oid, logical length, and object extent 934 * offset, length. 935 * 936 * for now, we write only a single su, until we can 937 * pass a stride back to the caller. 938 */ 939 void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, 940 u64 off, u64 *plen, 941 u64 *ono, 942 u64 *oxoff, u64 *oxlen) 943 { 944 u32 osize = le32_to_cpu(layout->fl_object_size); 945 u32 su = le32_to_cpu(layout->fl_stripe_unit); 946 u32 sc = le32_to_cpu(layout->fl_stripe_count); 947 u32 bl, stripeno, stripepos, objsetno; 948 u32 su_per_object; 949 u64 t, su_offset; 950 951 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, 952 osize, su); 953 su_per_object = osize / su; 954 dout("osize %u / su %u = su_per_object %u\n", osize, su, 955 su_per_object); 956 957 BUG_ON((su & ~PAGE_MASK) != 0); 958 /* bl = *off / su; */ 959 t = off; 960 do_div(t, su); 961 bl = t; 962 dout("off %llu / su %u = bl %u\n", off, su, bl); 963 964 stripeno = bl / sc; 965 stripepos = bl % sc; 966 objsetno = stripeno / su_per_object; 967 968 *ono = objsetno * sc + stripepos; 969 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); 970 971 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 972 t = off; 973 su_offset = do_div(t, su); 974 *oxoff = su_offset + (stripeno % su_per_object) * su; 975 976 /* 977 * Calculate the length of the extent being written to the selected 978 * object. This is the minimum of the full length requested (plen) or 979 * the remainder of the current stripe being written to. 980 */ 981 *oxlen = min_t(u64, *plen, su - su_offset); 982 *plen = *oxlen; 983 984 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); 985 } 986 EXPORT_SYMBOL(ceph_calc_file_object_mapping); 987 988 /* 989 * calculate an object layout (i.e. pgid) from an oid, 990 * file_layout, and osdmap 991 */ 992 int ceph_calc_object_layout(struct ceph_object_layout *ol, 993 const char *oid, 994 struct ceph_file_layout *fl, 995 struct ceph_osdmap *osdmap) 996 { 997 unsigned num, num_mask; 998 struct ceph_pg pgid; 999 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); 1000 int poolid = le32_to_cpu(fl->fl_pg_pool); 1001 struct ceph_pg_pool_info *pool; 1002 unsigned ps; 1003 1004 BUG_ON(!osdmap); 1005 1006 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1007 if (!pool) 1008 return -EIO; 1009 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); 1010 if (preferred >= 0) { 1011 ps += preferred; 1012 num = le32_to_cpu(pool->v.lpg_num); 1013 num_mask = pool->lpg_num_mask; 1014 } else { 1015 num = le32_to_cpu(pool->v.pg_num); 1016 num_mask = pool->pg_num_mask; 1017 } 1018 1019 pgid.ps = cpu_to_le16(ps); 1020 pgid.preferred = cpu_to_le16(preferred); 1021 pgid.pool = fl->fl_pg_pool; 1022 if (preferred >= 0) 1023 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps, 1024 (int)preferred); 1025 else 1026 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps); 1027 1028 ol->ol_pgid = pgid; 1029 ol->ol_stripe_unit = fl->fl_object_stripe_unit; 1030 return 0; 1031 } 1032 EXPORT_SYMBOL(ceph_calc_object_layout); 1033 1034 /* 1035 * Calculate raw osd vector for the given pgid. Return pointer to osd 1036 * array, or NULL on failure. 1037 */ 1038 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1039 int *osds, int *num) 1040 { 1041 struct ceph_pg_mapping *pg; 1042 struct ceph_pg_pool_info *pool; 1043 int ruleno; 1044 unsigned poolid, ps, pps; 1045 int preferred; 1046 1047 /* pg_temp? */ 1048 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1049 if (pg) { 1050 *num = pg->len; 1051 return pg->osds; 1052 } 1053 1054 /* crush */ 1055 poolid = le32_to_cpu(pgid.pool); 1056 ps = le16_to_cpu(pgid.ps); 1057 preferred = (s16)le16_to_cpu(pgid.preferred); 1058 1059 /* don't forcefeed bad device ids to crush */ 1060 if (preferred >= osdmap->max_osd || 1061 preferred >= osdmap->crush->max_devices) 1062 preferred = -1; 1063 1064 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1065 if (!pool) 1066 return NULL; 1067 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1068 pool->v.type, pool->v.size); 1069 if (ruleno < 0) { 1070 pr_err("no crush rule pool %d ruleset %d type %d size %d\n", 1071 poolid, pool->v.crush_ruleset, pool->v.type, 1072 pool->v.size); 1073 return NULL; 1074 } 1075 1076 if (preferred >= 0) 1077 pps = ceph_stable_mod(ps, 1078 le32_to_cpu(pool->v.lpgp_num), 1079 pool->lpgp_num_mask); 1080 else 1081 pps = ceph_stable_mod(ps, 1082 le32_to_cpu(pool->v.pgp_num), 1083 pool->pgp_num_mask); 1084 pps += poolid; 1085 *num = crush_do_rule(osdmap->crush, ruleno, pps, osds, 1086 min_t(int, pool->v.size, *num), 1087 preferred, osdmap->osd_weight); 1088 return osds; 1089 } 1090 1091 /* 1092 * Return acting set for given pgid. 1093 */ 1094 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, 1095 int *acting) 1096 { 1097 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1098 int i, o, num = CEPH_PG_MAX_SIZE; 1099 1100 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1101 if (!osds) 1102 return -1; 1103 1104 /* primary is first up osd */ 1105 o = 0; 1106 for (i = 0; i < num; i++) 1107 if (ceph_osd_is_up(osdmap, osds[i])) 1108 acting[o++] = osds[i]; 1109 return o; 1110 } 1111 1112 /* 1113 * Return primary osd for given pgid, or -1 if none. 1114 */ 1115 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) 1116 { 1117 int rawosds[CEPH_PG_MAX_SIZE], *osds; 1118 int i, num = CEPH_PG_MAX_SIZE; 1119 1120 osds = calc_pg_raw(osdmap, pgid, rawosds, &num); 1121 if (!osds) 1122 return -1; 1123 1124 /* primary is first up osd */ 1125 for (i = 0; i < num; i++) 1126 if (ceph_osd_is_up(osdmap, osds[i])) 1127 return osds[i]; 1128 return -1; 1129 } 1130 EXPORT_SYMBOL(ceph_calc_pg_primary); 1131