xref: /openbmc/linux/net/ceph/osdmap.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <asm/div64.h>
7 
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
13 
14 char *ceph_osdmap_state_str(char *str, int len, int state)
15 {
16 	int flag = 0;
17 
18 	if (!len)
19 		goto done;
20 
21 	*str = '\0';
22 	if (state) {
23 		if (state & CEPH_OSD_EXISTS) {
24 			snprintf(str, len, "exists");
25 			flag = 1;
26 		}
27 		if (state & CEPH_OSD_UP) {
28 			snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
29 				 "up");
30 			flag = 1;
31 		}
32 	} else {
33 		snprintf(str, len, "doesn't exist");
34 	}
35 done:
36 	return str;
37 }
38 
39 /* maps */
40 
41 static int calc_bits_of(unsigned t)
42 {
43 	int b = 0;
44 	while (t) {
45 		t = t >> 1;
46 		b++;
47 	}
48 	return b;
49 }
50 
51 /*
52  * the foo_mask is the smallest value 2^n-1 that is >= foo.
53  */
54 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
55 {
56 	pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
57 	pi->pgp_num_mask =
58 		(1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
59 	pi->lpg_num_mask =
60 		(1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
61 	pi->lpgp_num_mask =
62 		(1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
63 }
64 
65 /*
66  * decode crush map
67  */
68 static int crush_decode_uniform_bucket(void **p, void *end,
69 				       struct crush_bucket_uniform *b)
70 {
71 	dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
72 	ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
73 	b->item_weight = ceph_decode_32(p);
74 	return 0;
75 bad:
76 	return -EINVAL;
77 }
78 
79 static int crush_decode_list_bucket(void **p, void *end,
80 				    struct crush_bucket_list *b)
81 {
82 	int j;
83 	dout("crush_decode_list_bucket %p to %p\n", *p, end);
84 	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
85 	if (b->item_weights == NULL)
86 		return -ENOMEM;
87 	b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
88 	if (b->sum_weights == NULL)
89 		return -ENOMEM;
90 	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
91 	for (j = 0; j < b->h.size; j++) {
92 		b->item_weights[j] = ceph_decode_32(p);
93 		b->sum_weights[j] = ceph_decode_32(p);
94 	}
95 	return 0;
96 bad:
97 	return -EINVAL;
98 }
99 
100 static int crush_decode_tree_bucket(void **p, void *end,
101 				    struct crush_bucket_tree *b)
102 {
103 	int j;
104 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
105 	ceph_decode_32_safe(p, end, b->num_nodes, bad);
106 	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
107 	if (b->node_weights == NULL)
108 		return -ENOMEM;
109 	ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
110 	for (j = 0; j < b->num_nodes; j++)
111 		b->node_weights[j] = ceph_decode_32(p);
112 	return 0;
113 bad:
114 	return -EINVAL;
115 }
116 
117 static int crush_decode_straw_bucket(void **p, void *end,
118 				     struct crush_bucket_straw *b)
119 {
120 	int j;
121 	dout("crush_decode_straw_bucket %p to %p\n", *p, end);
122 	b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
123 	if (b->item_weights == NULL)
124 		return -ENOMEM;
125 	b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
126 	if (b->straws == NULL)
127 		return -ENOMEM;
128 	ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
129 	for (j = 0; j < b->h.size; j++) {
130 		b->item_weights[j] = ceph_decode_32(p);
131 		b->straws[j] = ceph_decode_32(p);
132 	}
133 	return 0;
134 bad:
135 	return -EINVAL;
136 }
137 
138 static struct crush_map *crush_decode(void *pbyval, void *end)
139 {
140 	struct crush_map *c;
141 	int err = -EINVAL;
142 	int i, j;
143 	void **p = &pbyval;
144 	void *start = pbyval;
145 	u32 magic;
146 
147 	dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
148 
149 	c = kzalloc(sizeof(*c), GFP_NOFS);
150 	if (c == NULL)
151 		return ERR_PTR(-ENOMEM);
152 
153 	ceph_decode_need(p, end, 4*sizeof(u32), bad);
154 	magic = ceph_decode_32(p);
155 	if (magic != CRUSH_MAGIC) {
156 		pr_err("crush_decode magic %x != current %x\n",
157 		       (unsigned)magic, (unsigned)CRUSH_MAGIC);
158 		goto bad;
159 	}
160 	c->max_buckets = ceph_decode_32(p);
161 	c->max_rules = ceph_decode_32(p);
162 	c->max_devices = ceph_decode_32(p);
163 
164 	c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
165 	if (c->device_parents == NULL)
166 		goto badmem;
167 	c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
168 	if (c->bucket_parents == NULL)
169 		goto badmem;
170 
171 	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
172 	if (c->buckets == NULL)
173 		goto badmem;
174 	c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
175 	if (c->rules == NULL)
176 		goto badmem;
177 
178 	/* buckets */
179 	for (i = 0; i < c->max_buckets; i++) {
180 		int size = 0;
181 		u32 alg;
182 		struct crush_bucket *b;
183 
184 		ceph_decode_32_safe(p, end, alg, bad);
185 		if (alg == 0) {
186 			c->buckets[i] = NULL;
187 			continue;
188 		}
189 		dout("crush_decode bucket %d off %x %p to %p\n",
190 		     i, (int)(*p-start), *p, end);
191 
192 		switch (alg) {
193 		case CRUSH_BUCKET_UNIFORM:
194 			size = sizeof(struct crush_bucket_uniform);
195 			break;
196 		case CRUSH_BUCKET_LIST:
197 			size = sizeof(struct crush_bucket_list);
198 			break;
199 		case CRUSH_BUCKET_TREE:
200 			size = sizeof(struct crush_bucket_tree);
201 			break;
202 		case CRUSH_BUCKET_STRAW:
203 			size = sizeof(struct crush_bucket_straw);
204 			break;
205 		default:
206 			err = -EINVAL;
207 			goto bad;
208 		}
209 		BUG_ON(size == 0);
210 		b = c->buckets[i] = kzalloc(size, GFP_NOFS);
211 		if (b == NULL)
212 			goto badmem;
213 
214 		ceph_decode_need(p, end, 4*sizeof(u32), bad);
215 		b->id = ceph_decode_32(p);
216 		b->type = ceph_decode_16(p);
217 		b->alg = ceph_decode_8(p);
218 		b->hash = ceph_decode_8(p);
219 		b->weight = ceph_decode_32(p);
220 		b->size = ceph_decode_32(p);
221 
222 		dout("crush_decode bucket size %d off %x %p to %p\n",
223 		     b->size, (int)(*p-start), *p, end);
224 
225 		b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
226 		if (b->items == NULL)
227 			goto badmem;
228 		b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
229 		if (b->perm == NULL)
230 			goto badmem;
231 		b->perm_n = 0;
232 
233 		ceph_decode_need(p, end, b->size*sizeof(u32), bad);
234 		for (j = 0; j < b->size; j++)
235 			b->items[j] = ceph_decode_32(p);
236 
237 		switch (b->alg) {
238 		case CRUSH_BUCKET_UNIFORM:
239 			err = crush_decode_uniform_bucket(p, end,
240 				  (struct crush_bucket_uniform *)b);
241 			if (err < 0)
242 				goto bad;
243 			break;
244 		case CRUSH_BUCKET_LIST:
245 			err = crush_decode_list_bucket(p, end,
246 			       (struct crush_bucket_list *)b);
247 			if (err < 0)
248 				goto bad;
249 			break;
250 		case CRUSH_BUCKET_TREE:
251 			err = crush_decode_tree_bucket(p, end,
252 				(struct crush_bucket_tree *)b);
253 			if (err < 0)
254 				goto bad;
255 			break;
256 		case CRUSH_BUCKET_STRAW:
257 			err = crush_decode_straw_bucket(p, end,
258 				(struct crush_bucket_straw *)b);
259 			if (err < 0)
260 				goto bad;
261 			break;
262 		}
263 	}
264 
265 	/* rules */
266 	dout("rule vec is %p\n", c->rules);
267 	for (i = 0; i < c->max_rules; i++) {
268 		u32 yes;
269 		struct crush_rule *r;
270 
271 		ceph_decode_32_safe(p, end, yes, bad);
272 		if (!yes) {
273 			dout("crush_decode NO rule %d off %x %p to %p\n",
274 			     i, (int)(*p-start), *p, end);
275 			c->rules[i] = NULL;
276 			continue;
277 		}
278 
279 		dout("crush_decode rule %d off %x %p to %p\n",
280 		     i, (int)(*p-start), *p, end);
281 
282 		/* len */
283 		ceph_decode_32_safe(p, end, yes, bad);
284 #if BITS_PER_LONG == 32
285 		err = -EINVAL;
286 		if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
287 			goto bad;
288 #endif
289 		r = c->rules[i] = kmalloc(sizeof(*r) +
290 					  yes*sizeof(struct crush_rule_step),
291 					  GFP_NOFS);
292 		if (r == NULL)
293 			goto badmem;
294 		dout(" rule %d is at %p\n", i, r);
295 		r->len = yes;
296 		ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
297 		ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
298 		for (j = 0; j < r->len; j++) {
299 			r->steps[j].op = ceph_decode_32(p);
300 			r->steps[j].arg1 = ceph_decode_32(p);
301 			r->steps[j].arg2 = ceph_decode_32(p);
302 		}
303 	}
304 
305 	/* ignore trailing name maps. */
306 
307 	dout("crush_decode success\n");
308 	return c;
309 
310 badmem:
311 	err = -ENOMEM;
312 bad:
313 	dout("crush_decode fail %d\n", err);
314 	crush_destroy(c);
315 	return ERR_PTR(err);
316 }
317 
318 /*
319  * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
320  * to a set of osds)
321  */
322 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
323 {
324 	u64 a = *(u64 *)&l;
325 	u64 b = *(u64 *)&r;
326 
327 	if (a < b)
328 		return -1;
329 	if (a > b)
330 		return 1;
331 	return 0;
332 }
333 
334 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
335 			       struct rb_root *root)
336 {
337 	struct rb_node **p = &root->rb_node;
338 	struct rb_node *parent = NULL;
339 	struct ceph_pg_mapping *pg = NULL;
340 	int c;
341 
342 	while (*p) {
343 		parent = *p;
344 		pg = rb_entry(parent, struct ceph_pg_mapping, node);
345 		c = pgid_cmp(new->pgid, pg->pgid);
346 		if (c < 0)
347 			p = &(*p)->rb_left;
348 		else if (c > 0)
349 			p = &(*p)->rb_right;
350 		else
351 			return -EEXIST;
352 	}
353 
354 	rb_link_node(&new->node, parent, p);
355 	rb_insert_color(&new->node, root);
356 	return 0;
357 }
358 
359 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
360 						   struct ceph_pg pgid)
361 {
362 	struct rb_node *n = root->rb_node;
363 	struct ceph_pg_mapping *pg;
364 	int c;
365 
366 	while (n) {
367 		pg = rb_entry(n, struct ceph_pg_mapping, node);
368 		c = pgid_cmp(pgid, pg->pgid);
369 		if (c < 0)
370 			n = n->rb_left;
371 		else if (c > 0)
372 			n = n->rb_right;
373 		else
374 			return pg;
375 	}
376 	return NULL;
377 }
378 
379 /*
380  * rbtree of pg pool info
381  */
382 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
383 {
384 	struct rb_node **p = &root->rb_node;
385 	struct rb_node *parent = NULL;
386 	struct ceph_pg_pool_info *pi = NULL;
387 
388 	while (*p) {
389 		parent = *p;
390 		pi = rb_entry(parent, struct ceph_pg_pool_info, node);
391 		if (new->id < pi->id)
392 			p = &(*p)->rb_left;
393 		else if (new->id > pi->id)
394 			p = &(*p)->rb_right;
395 		else
396 			return -EEXIST;
397 	}
398 
399 	rb_link_node(&new->node, parent, p);
400 	rb_insert_color(&new->node, root);
401 	return 0;
402 }
403 
404 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
405 {
406 	struct ceph_pg_pool_info *pi;
407 	struct rb_node *n = root->rb_node;
408 
409 	while (n) {
410 		pi = rb_entry(n, struct ceph_pg_pool_info, node);
411 		if (id < pi->id)
412 			n = n->rb_left;
413 		else if (id > pi->id)
414 			n = n->rb_right;
415 		else
416 			return pi;
417 	}
418 	return NULL;
419 }
420 
421 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
422 {
423 	struct rb_node *rbp;
424 
425 	for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
426 		struct ceph_pg_pool_info *pi =
427 			rb_entry(rbp, struct ceph_pg_pool_info, node);
428 		if (pi->name && strcmp(pi->name, name) == 0)
429 			return pi->id;
430 	}
431 	return -ENOENT;
432 }
433 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
434 
435 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
436 {
437 	rb_erase(&pi->node, root);
438 	kfree(pi->name);
439 	kfree(pi);
440 }
441 
442 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
443 {
444 	unsigned n, m;
445 
446 	ceph_decode_copy(p, &pi->v, sizeof(pi->v));
447 	calc_pg_masks(pi);
448 
449 	/* num_snaps * snap_info_t */
450 	n = le32_to_cpu(pi->v.num_snaps);
451 	while (n--) {
452 		ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
453 				 sizeof(struct ceph_timespec), bad);
454 		*p += sizeof(u64) +       /* key */
455 			1 + sizeof(u64) + /* u8, snapid */
456 			sizeof(struct ceph_timespec);
457 		m = ceph_decode_32(p);    /* snap name */
458 		*p += m;
459 	}
460 
461 	*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
462 	return 0;
463 
464 bad:
465 	return -EINVAL;
466 }
467 
468 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
469 {
470 	struct ceph_pg_pool_info *pi;
471 	u32 num, len, pool;
472 
473 	ceph_decode_32_safe(p, end, num, bad);
474 	dout(" %d pool names\n", num);
475 	while (num--) {
476 		ceph_decode_32_safe(p, end, pool, bad);
477 		ceph_decode_32_safe(p, end, len, bad);
478 		dout("  pool %d len %d\n", pool, len);
479 		pi = __lookup_pg_pool(&map->pg_pools, pool);
480 		if (pi) {
481 			kfree(pi->name);
482 			pi->name = kmalloc(len + 1, GFP_NOFS);
483 			if (pi->name) {
484 				memcpy(pi->name, *p, len);
485 				pi->name[len] = '\0';
486 				dout("  name is %s\n", pi->name);
487 			}
488 		}
489 		*p += len;
490 	}
491 	return 0;
492 
493 bad:
494 	return -EINVAL;
495 }
496 
497 /*
498  * osd map
499  */
500 void ceph_osdmap_destroy(struct ceph_osdmap *map)
501 {
502 	dout("osdmap_destroy %p\n", map);
503 	if (map->crush)
504 		crush_destroy(map->crush);
505 	while (!RB_EMPTY_ROOT(&map->pg_temp)) {
506 		struct ceph_pg_mapping *pg =
507 			rb_entry(rb_first(&map->pg_temp),
508 				 struct ceph_pg_mapping, node);
509 		rb_erase(&pg->node, &map->pg_temp);
510 		kfree(pg);
511 	}
512 	while (!RB_EMPTY_ROOT(&map->pg_pools)) {
513 		struct ceph_pg_pool_info *pi =
514 			rb_entry(rb_first(&map->pg_pools),
515 				 struct ceph_pg_pool_info, node);
516 		__remove_pg_pool(&map->pg_pools, pi);
517 	}
518 	kfree(map->osd_state);
519 	kfree(map->osd_weight);
520 	kfree(map->osd_addr);
521 	kfree(map);
522 }
523 
524 /*
525  * adjust max osd value.  reallocate arrays.
526  */
527 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
528 {
529 	u8 *state;
530 	struct ceph_entity_addr *addr;
531 	u32 *weight;
532 
533 	state = kcalloc(max, sizeof(*state), GFP_NOFS);
534 	addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
535 	weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
536 	if (state == NULL || addr == NULL || weight == NULL) {
537 		kfree(state);
538 		kfree(addr);
539 		kfree(weight);
540 		return -ENOMEM;
541 	}
542 
543 	/* copy old? */
544 	if (map->osd_state) {
545 		memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
546 		memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
547 		memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
548 		kfree(map->osd_state);
549 		kfree(map->osd_addr);
550 		kfree(map->osd_weight);
551 	}
552 
553 	map->osd_state = state;
554 	map->osd_weight = weight;
555 	map->osd_addr = addr;
556 	map->max_osd = max;
557 	return 0;
558 }
559 
560 /*
561  * decode a full map.
562  */
563 struct ceph_osdmap *osdmap_decode(void **p, void *end)
564 {
565 	struct ceph_osdmap *map;
566 	u16 version;
567 	u32 len, max, i;
568 	u8 ev;
569 	int err = -EINVAL;
570 	void *start = *p;
571 	struct ceph_pg_pool_info *pi;
572 
573 	dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
574 
575 	map = kzalloc(sizeof(*map), GFP_NOFS);
576 	if (map == NULL)
577 		return ERR_PTR(-ENOMEM);
578 	map->pg_temp = RB_ROOT;
579 
580 	ceph_decode_16_safe(p, end, version, bad);
581 	if (version > CEPH_OSDMAP_VERSION) {
582 		pr_warning("got unknown v %d > %d of osdmap\n", version,
583 			   CEPH_OSDMAP_VERSION);
584 		goto bad;
585 	}
586 
587 	ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
588 	ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
589 	map->epoch = ceph_decode_32(p);
590 	ceph_decode_copy(p, &map->created, sizeof(map->created));
591 	ceph_decode_copy(p, &map->modified, sizeof(map->modified));
592 
593 	ceph_decode_32_safe(p, end, max, bad);
594 	while (max--) {
595 		ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
596 		pi = kzalloc(sizeof(*pi), GFP_NOFS);
597 		if (!pi)
598 			goto bad;
599 		pi->id = ceph_decode_32(p);
600 		ev = ceph_decode_8(p); /* encoding version */
601 		if (ev > CEPH_PG_POOL_VERSION) {
602 			pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
603 				   ev, CEPH_PG_POOL_VERSION);
604 			kfree(pi);
605 			goto bad;
606 		}
607 		err = __decode_pool(p, end, pi);
608 		if (err < 0)
609 			goto bad;
610 		__insert_pg_pool(&map->pg_pools, pi);
611 	}
612 
613 	if (version >= 5 && __decode_pool_names(p, end, map) < 0)
614 		goto bad;
615 
616 	ceph_decode_32_safe(p, end, map->pool_max, bad);
617 
618 	ceph_decode_32_safe(p, end, map->flags, bad);
619 
620 	max = ceph_decode_32(p);
621 
622 	/* (re)alloc osd arrays */
623 	err = osdmap_set_max_osd(map, max);
624 	if (err < 0)
625 		goto bad;
626 	dout("osdmap_decode max_osd = %d\n", map->max_osd);
627 
628 	/* osds */
629 	err = -EINVAL;
630 	ceph_decode_need(p, end, 3*sizeof(u32) +
631 			 map->max_osd*(1 + sizeof(*map->osd_weight) +
632 				       sizeof(*map->osd_addr)), bad);
633 	*p += 4; /* skip length field (should match max) */
634 	ceph_decode_copy(p, map->osd_state, map->max_osd);
635 
636 	*p += 4; /* skip length field (should match max) */
637 	for (i = 0; i < map->max_osd; i++)
638 		map->osd_weight[i] = ceph_decode_32(p);
639 
640 	*p += 4; /* skip length field (should match max) */
641 	ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
642 	for (i = 0; i < map->max_osd; i++)
643 		ceph_decode_addr(&map->osd_addr[i]);
644 
645 	/* pg_temp */
646 	ceph_decode_32_safe(p, end, len, bad);
647 	for (i = 0; i < len; i++) {
648 		int n, j;
649 		struct ceph_pg pgid;
650 		struct ceph_pg_mapping *pg;
651 
652 		ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
653 		ceph_decode_copy(p, &pgid, sizeof(pgid));
654 		n = ceph_decode_32(p);
655 		ceph_decode_need(p, end, n * sizeof(u32), bad);
656 		err = -ENOMEM;
657 		pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
658 		if (!pg)
659 			goto bad;
660 		pg->pgid = pgid;
661 		pg->len = n;
662 		for (j = 0; j < n; j++)
663 			pg->osds[j] = ceph_decode_32(p);
664 
665 		err = __insert_pg_mapping(pg, &map->pg_temp);
666 		if (err)
667 			goto bad;
668 		dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
669 	}
670 
671 	/* crush */
672 	ceph_decode_32_safe(p, end, len, bad);
673 	dout("osdmap_decode crush len %d from off 0x%x\n", len,
674 	     (int)(*p - start));
675 	ceph_decode_need(p, end, len, bad);
676 	map->crush = crush_decode(*p, end);
677 	*p += len;
678 	if (IS_ERR(map->crush)) {
679 		err = PTR_ERR(map->crush);
680 		map->crush = NULL;
681 		goto bad;
682 	}
683 
684 	/* ignore the rest of the map */
685 	*p = end;
686 
687 	dout("osdmap_decode done %p %p\n", *p, end);
688 	return map;
689 
690 bad:
691 	dout("osdmap_decode fail\n");
692 	ceph_osdmap_destroy(map);
693 	return ERR_PTR(err);
694 }
695 
696 /*
697  * decode and apply an incremental map update.
698  */
699 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
700 					     struct ceph_osdmap *map,
701 					     struct ceph_messenger *msgr)
702 {
703 	struct crush_map *newcrush = NULL;
704 	struct ceph_fsid fsid;
705 	u32 epoch = 0;
706 	struct ceph_timespec modified;
707 	u32 len, pool;
708 	__s32 new_pool_max, new_flags, max;
709 	void *start = *p;
710 	int err = -EINVAL;
711 	u16 version;
712 	struct rb_node *rbp;
713 
714 	ceph_decode_16_safe(p, end, version, bad);
715 	if (version > CEPH_OSDMAP_INC_VERSION) {
716 		pr_warning("got unknown v %d > %d of inc osdmap\n", version,
717 			   CEPH_OSDMAP_INC_VERSION);
718 		goto bad;
719 	}
720 
721 	ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
722 			 bad);
723 	ceph_decode_copy(p, &fsid, sizeof(fsid));
724 	epoch = ceph_decode_32(p);
725 	BUG_ON(epoch != map->epoch+1);
726 	ceph_decode_copy(p, &modified, sizeof(modified));
727 	new_pool_max = ceph_decode_32(p);
728 	new_flags = ceph_decode_32(p);
729 
730 	/* full map? */
731 	ceph_decode_32_safe(p, end, len, bad);
732 	if (len > 0) {
733 		dout("apply_incremental full map len %d, %p to %p\n",
734 		     len, *p, end);
735 		return osdmap_decode(p, min(*p+len, end));
736 	}
737 
738 	/* new crush? */
739 	ceph_decode_32_safe(p, end, len, bad);
740 	if (len > 0) {
741 		dout("apply_incremental new crush map len %d, %p to %p\n",
742 		     len, *p, end);
743 		newcrush = crush_decode(*p, min(*p+len, end));
744 		if (IS_ERR(newcrush))
745 			return ERR_CAST(newcrush);
746 		*p += len;
747 	}
748 
749 	/* new flags? */
750 	if (new_flags >= 0)
751 		map->flags = new_flags;
752 	if (new_pool_max >= 0)
753 		map->pool_max = new_pool_max;
754 
755 	ceph_decode_need(p, end, 5*sizeof(u32), bad);
756 
757 	/* new max? */
758 	max = ceph_decode_32(p);
759 	if (max >= 0) {
760 		err = osdmap_set_max_osd(map, max);
761 		if (err < 0)
762 			goto bad;
763 	}
764 
765 	map->epoch++;
766 	map->modified = map->modified;
767 	if (newcrush) {
768 		if (map->crush)
769 			crush_destroy(map->crush);
770 		map->crush = newcrush;
771 		newcrush = NULL;
772 	}
773 
774 	/* new_pool */
775 	ceph_decode_32_safe(p, end, len, bad);
776 	while (len--) {
777 		__u8 ev;
778 		struct ceph_pg_pool_info *pi;
779 
780 		ceph_decode_32_safe(p, end, pool, bad);
781 		ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
782 		ev = ceph_decode_8(p);  /* encoding version */
783 		if (ev > CEPH_PG_POOL_VERSION) {
784 			pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
785 				   ev, CEPH_PG_POOL_VERSION);
786 			goto bad;
787 		}
788 		pi = __lookup_pg_pool(&map->pg_pools, pool);
789 		if (!pi) {
790 			pi = kzalloc(sizeof(*pi), GFP_NOFS);
791 			if (!pi) {
792 				err = -ENOMEM;
793 				goto bad;
794 			}
795 			pi->id = pool;
796 			__insert_pg_pool(&map->pg_pools, pi);
797 		}
798 		err = __decode_pool(p, end, pi);
799 		if (err < 0)
800 			goto bad;
801 	}
802 	if (version >= 5 && __decode_pool_names(p, end, map) < 0)
803 		goto bad;
804 
805 	/* old_pool */
806 	ceph_decode_32_safe(p, end, len, bad);
807 	while (len--) {
808 		struct ceph_pg_pool_info *pi;
809 
810 		ceph_decode_32_safe(p, end, pool, bad);
811 		pi = __lookup_pg_pool(&map->pg_pools, pool);
812 		if (pi)
813 			__remove_pg_pool(&map->pg_pools, pi);
814 	}
815 
816 	/* new_up */
817 	err = -EINVAL;
818 	ceph_decode_32_safe(p, end, len, bad);
819 	while (len--) {
820 		u32 osd;
821 		struct ceph_entity_addr addr;
822 		ceph_decode_32_safe(p, end, osd, bad);
823 		ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
824 		ceph_decode_addr(&addr);
825 		pr_info("osd%d up\n", osd);
826 		BUG_ON(osd >= map->max_osd);
827 		map->osd_state[osd] |= CEPH_OSD_UP;
828 		map->osd_addr[osd] = addr;
829 	}
830 
831 	/* new_down */
832 	ceph_decode_32_safe(p, end, len, bad);
833 	while (len--) {
834 		u32 osd;
835 		ceph_decode_32_safe(p, end, osd, bad);
836 		(*p)++;  /* clean flag */
837 		pr_info("osd%d down\n", osd);
838 		if (osd < map->max_osd)
839 			map->osd_state[osd] &= ~CEPH_OSD_UP;
840 	}
841 
842 	/* new_weight */
843 	ceph_decode_32_safe(p, end, len, bad);
844 	while (len--) {
845 		u32 osd, off;
846 		ceph_decode_need(p, end, sizeof(u32)*2, bad);
847 		osd = ceph_decode_32(p);
848 		off = ceph_decode_32(p);
849 		pr_info("osd%d weight 0x%x %s\n", osd, off,
850 		     off == CEPH_OSD_IN ? "(in)" :
851 		     (off == CEPH_OSD_OUT ? "(out)" : ""));
852 		if (osd < map->max_osd)
853 			map->osd_weight[osd] = off;
854 	}
855 
856 	/* new_pg_temp */
857 	rbp = rb_first(&map->pg_temp);
858 	ceph_decode_32_safe(p, end, len, bad);
859 	while (len--) {
860 		struct ceph_pg_mapping *pg;
861 		int j;
862 		struct ceph_pg pgid;
863 		u32 pglen;
864 		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
865 		ceph_decode_copy(p, &pgid, sizeof(pgid));
866 		pglen = ceph_decode_32(p);
867 
868 		/* remove any? */
869 		while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
870 						node)->pgid, pgid) <= 0) {
871 			struct ceph_pg_mapping *cur =
872 				rb_entry(rbp, struct ceph_pg_mapping, node);
873 
874 			rbp = rb_next(rbp);
875 			dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
876 			rb_erase(&cur->node, &map->pg_temp);
877 			kfree(cur);
878 		}
879 
880 		if (pglen) {
881 			/* insert */
882 			ceph_decode_need(p, end, pglen*sizeof(u32), bad);
883 			pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
884 			if (!pg) {
885 				err = -ENOMEM;
886 				goto bad;
887 			}
888 			pg->pgid = pgid;
889 			pg->len = pglen;
890 			for (j = 0; j < pglen; j++)
891 				pg->osds[j] = ceph_decode_32(p);
892 			err = __insert_pg_mapping(pg, &map->pg_temp);
893 			if (err) {
894 				kfree(pg);
895 				goto bad;
896 			}
897 			dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
898 			     pglen);
899 		}
900 	}
901 	while (rbp) {
902 		struct ceph_pg_mapping *cur =
903 			rb_entry(rbp, struct ceph_pg_mapping, node);
904 
905 		rbp = rb_next(rbp);
906 		dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
907 		rb_erase(&cur->node, &map->pg_temp);
908 		kfree(cur);
909 	}
910 
911 	/* ignore the rest */
912 	*p = end;
913 	return map;
914 
915 bad:
916 	pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
917 	       epoch, (int)(*p - start), *p, start, end);
918 	print_hex_dump(KERN_DEBUG, "osdmap: ",
919 		       DUMP_PREFIX_OFFSET, 16, 1,
920 		       start, end - start, true);
921 	if (newcrush)
922 		crush_destroy(newcrush);
923 	return ERR_PTR(err);
924 }
925 
926 
927 
928 
929 /*
930  * calculate file layout from given offset, length.
931  * fill in correct oid, logical length, and object extent
932  * offset, length.
933  *
934  * for now, we write only a single su, until we can
935  * pass a stride back to the caller.
936  */
937 void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
938 				   u64 off, u64 *plen,
939 				   u64 *ono,
940 				   u64 *oxoff, u64 *oxlen)
941 {
942 	u32 osize = le32_to_cpu(layout->fl_object_size);
943 	u32 su = le32_to_cpu(layout->fl_stripe_unit);
944 	u32 sc = le32_to_cpu(layout->fl_stripe_count);
945 	u32 bl, stripeno, stripepos, objsetno;
946 	u32 su_per_object;
947 	u64 t, su_offset;
948 
949 	dout("mapping %llu~%llu  osize %u fl_su %u\n", off, *plen,
950 	     osize, su);
951 	su_per_object = osize / su;
952 	dout("osize %u / su %u = su_per_object %u\n", osize, su,
953 	     su_per_object);
954 
955 	BUG_ON((su & ~PAGE_MASK) != 0);
956 	/* bl = *off / su; */
957 	t = off;
958 	do_div(t, su);
959 	bl = t;
960 	dout("off %llu / su %u = bl %u\n", off, su, bl);
961 
962 	stripeno = bl / sc;
963 	stripepos = bl % sc;
964 	objsetno = stripeno / su_per_object;
965 
966 	*ono = objsetno * sc + stripepos;
967 	dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
968 
969 	/* *oxoff = *off % layout->fl_stripe_unit;  # offset in su */
970 	t = off;
971 	su_offset = do_div(t, su);
972 	*oxoff = su_offset + (stripeno % su_per_object) * su;
973 
974 	/*
975 	 * Calculate the length of the extent being written to the selected
976 	 * object. This is the minimum of the full length requested (plen) or
977 	 * the remainder of the current stripe being written to.
978 	 */
979 	*oxlen = min_t(u64, *plen, su - su_offset);
980 	*plen = *oxlen;
981 
982 	dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
983 }
984 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
985 
986 /*
987  * calculate an object layout (i.e. pgid) from an oid,
988  * file_layout, and osdmap
989  */
990 int ceph_calc_object_layout(struct ceph_object_layout *ol,
991 			    const char *oid,
992 			    struct ceph_file_layout *fl,
993 			    struct ceph_osdmap *osdmap)
994 {
995 	unsigned num, num_mask;
996 	struct ceph_pg pgid;
997 	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
998 	int poolid = le32_to_cpu(fl->fl_pg_pool);
999 	struct ceph_pg_pool_info *pool;
1000 	unsigned ps;
1001 
1002 	BUG_ON(!osdmap);
1003 
1004 	pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1005 	if (!pool)
1006 		return -EIO;
1007 	ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1008 	if (preferred >= 0) {
1009 		ps += preferred;
1010 		num = le32_to_cpu(pool->v.lpg_num);
1011 		num_mask = pool->lpg_num_mask;
1012 	} else {
1013 		num = le32_to_cpu(pool->v.pg_num);
1014 		num_mask = pool->pg_num_mask;
1015 	}
1016 
1017 	pgid.ps = cpu_to_le16(ps);
1018 	pgid.preferred = cpu_to_le16(preferred);
1019 	pgid.pool = fl->fl_pg_pool;
1020 	if (preferred >= 0)
1021 		dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
1022 		     (int)preferred);
1023 	else
1024 		dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
1025 
1026 	ol->ol_pgid = pgid;
1027 	ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1028 	return 0;
1029 }
1030 EXPORT_SYMBOL(ceph_calc_object_layout);
1031 
1032 /*
1033  * Calculate raw osd vector for the given pgid.  Return pointer to osd
1034  * array, or NULL on failure.
1035  */
1036 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1037 			int *osds, int *num)
1038 {
1039 	struct ceph_pg_mapping *pg;
1040 	struct ceph_pg_pool_info *pool;
1041 	int ruleno;
1042 	unsigned poolid, ps, pps;
1043 	int preferred;
1044 
1045 	/* pg_temp? */
1046 	pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1047 	if (pg) {
1048 		*num = pg->len;
1049 		return pg->osds;
1050 	}
1051 
1052 	/* crush */
1053 	poolid = le32_to_cpu(pgid.pool);
1054 	ps = le16_to_cpu(pgid.ps);
1055 	preferred = (s16)le16_to_cpu(pgid.preferred);
1056 
1057 	/* don't forcefeed bad device ids to crush */
1058 	if (preferred >= osdmap->max_osd ||
1059 	    preferred >= osdmap->crush->max_devices)
1060 		preferred = -1;
1061 
1062 	pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1063 	if (!pool)
1064 		return NULL;
1065 	ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1066 				 pool->v.type, pool->v.size);
1067 	if (ruleno < 0) {
1068 		pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1069 		       poolid, pool->v.crush_ruleset, pool->v.type,
1070 		       pool->v.size);
1071 		return NULL;
1072 	}
1073 
1074 	if (preferred >= 0)
1075 		pps = ceph_stable_mod(ps,
1076 				      le32_to_cpu(pool->v.lpgp_num),
1077 				      pool->lpgp_num_mask);
1078 	else
1079 		pps = ceph_stable_mod(ps,
1080 				      le32_to_cpu(pool->v.pgp_num),
1081 				      pool->pgp_num_mask);
1082 	pps += poolid;
1083 	*num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1084 			     min_t(int, pool->v.size, *num),
1085 			     preferred, osdmap->osd_weight);
1086 	return osds;
1087 }
1088 
1089 /*
1090  * Return acting set for given pgid.
1091  */
1092 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1093 			int *acting)
1094 {
1095 	int rawosds[CEPH_PG_MAX_SIZE], *osds;
1096 	int i, o, num = CEPH_PG_MAX_SIZE;
1097 
1098 	osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1099 	if (!osds)
1100 		return -1;
1101 
1102 	/* primary is first up osd */
1103 	o = 0;
1104 	for (i = 0; i < num; i++)
1105 		if (ceph_osd_is_up(osdmap, osds[i]))
1106 			acting[o++] = osds[i];
1107 	return o;
1108 }
1109 
1110 /*
1111  * Return primary osd for given pgid, or -1 if none.
1112  */
1113 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1114 {
1115 	int rawosds[CEPH_PG_MAX_SIZE], *osds;
1116 	int i, num = CEPH_PG_MAX_SIZE;
1117 
1118 	osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1119 	if (!osds)
1120 		return -1;
1121 
1122 	/* primary is first up osd */
1123 	for (i = 0; i < num; i++)
1124 		if (ceph_osd_is_up(osdmap, osds[i]))
1125 			return osds[i];
1126 	return -1;
1127 }
1128 EXPORT_SYMBOL(ceph_calc_pg_primary);
1129