1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-space-map.h"
8 #include "dm-space-map-common.h"
9 #include "dm-space-map-metadata.h"
10 
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/device-mapper.h>
14 
15 #define DM_MSG_PREFIX "space map metadata"
16 
17 /*----------------------------------------------------------------*/
18 
19 /*
20  * An edge triggered threshold.
21  */
22 struct threshold {
23 	bool threshold_set;
24 	bool value_set;
25 	dm_block_t threshold;
26 	dm_block_t current_value;
27 	dm_sm_threshold_fn fn;
28 	void *context;
29 };
30 
31 static void threshold_init(struct threshold *t)
32 {
33 	t->threshold_set = false;
34 	t->value_set = false;
35 }
36 
37 static void set_threshold(struct threshold *t, dm_block_t value,
38 			  dm_sm_threshold_fn fn, void *context)
39 {
40 	t->threshold_set = true;
41 	t->threshold = value;
42 	t->fn = fn;
43 	t->context = context;
44 }
45 
46 static bool below_threshold(struct threshold *t, dm_block_t value)
47 {
48 	return t->threshold_set && value <= t->threshold;
49 }
50 
51 static bool threshold_already_triggered(struct threshold *t)
52 {
53 	return t->value_set && below_threshold(t, t->current_value);
54 }
55 
56 static void check_threshold(struct threshold *t, dm_block_t value)
57 {
58 	if (below_threshold(t, value) &&
59 	    !threshold_already_triggered(t))
60 		t->fn(t->context);
61 
62 	t->value_set = true;
63 	t->current_value = value;
64 }
65 
66 /*----------------------------------------------------------------*/
67 
68 /*
69  * Space map interface.
70  *
71  * The low level disk format is written using the standard btree and
72  * transaction manager.  This means that performing disk operations may
73  * cause us to recurse into the space map in order to allocate new blocks.
74  * For this reason we have a pool of pre-allocated blocks large enough to
75  * service any metadata_ll_disk operation.
76  */
77 
78 /*
79  * FIXME: we should calculate this based on the size of the device.
80  * Only the metadata space map needs this functionality.
81  */
82 #define MAX_RECURSIVE_ALLOCATIONS 1024
83 
84 enum block_op_type {
85 	BOP_INC,
86 	BOP_DEC
87 };
88 
89 struct block_op {
90 	enum block_op_type type;
91 	dm_block_t block;
92 };
93 
94 struct sm_metadata {
95 	struct dm_space_map sm;
96 
97 	struct ll_disk ll;
98 	struct ll_disk old_ll;
99 
100 	dm_block_t begin;
101 
102 	unsigned recursion_count;
103 	unsigned allocated_this_transaction;
104 	unsigned nr_uncommitted;
105 	struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
106 
107 	struct threshold threshold;
108 };
109 
110 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
111 {
112 	struct block_op *op;
113 
114 	if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
115 		DMERR("too many recursive allocations");
116 		return -ENOMEM;
117 	}
118 
119 	op = smm->uncommitted + smm->nr_uncommitted++;
120 	op->type = type;
121 	op->block = b;
122 
123 	return 0;
124 }
125 
126 static int commit_bop(struct sm_metadata *smm, struct block_op *op)
127 {
128 	int r = 0;
129 	enum allocation_event ev;
130 
131 	switch (op->type) {
132 	case BOP_INC:
133 		r = sm_ll_inc(&smm->ll, op->block, &ev);
134 		break;
135 
136 	case BOP_DEC:
137 		r = sm_ll_dec(&smm->ll, op->block, &ev);
138 		break;
139 	}
140 
141 	return r;
142 }
143 
144 static void in(struct sm_metadata *smm)
145 {
146 	smm->recursion_count++;
147 }
148 
149 static int out(struct sm_metadata *smm)
150 {
151 	int r = 0;
152 
153 	/*
154 	 * If we're not recursing then very bad things are happening.
155 	 */
156 	if (!smm->recursion_count) {
157 		DMERR("lost track of recursion depth");
158 		return -ENOMEM;
159 	}
160 
161 	if (smm->recursion_count == 1 && smm->nr_uncommitted) {
162 		while (smm->nr_uncommitted && !r) {
163 			smm->nr_uncommitted--;
164 			r = commit_bop(smm, smm->uncommitted +
165 				       smm->nr_uncommitted);
166 			if (r)
167 				break;
168 		}
169 	}
170 
171 	smm->recursion_count--;
172 
173 	return r;
174 }
175 
176 /*
177  * When using the out() function above, we often want to combine an error
178  * code for the operation run in the recursive context with that from
179  * out().
180  */
181 static int combine_errors(int r1, int r2)
182 {
183 	return r1 ? r1 : r2;
184 }
185 
186 static int recursing(struct sm_metadata *smm)
187 {
188 	return smm->recursion_count;
189 }
190 
191 static void sm_metadata_destroy(struct dm_space_map *sm)
192 {
193 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
194 
195 	kfree(smm);
196 }
197 
198 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
199 {
200 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
201 
202 	*count = smm->ll.nr_blocks;
203 
204 	return 0;
205 }
206 
207 static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
208 {
209 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
210 
211 	*count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
212 		 smm->allocated_this_transaction;
213 
214 	return 0;
215 }
216 
217 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
218 				 uint32_t *result)
219 {
220 	int r, i;
221 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
222 	unsigned adjustment = 0;
223 
224 	/*
225 	 * We may have some uncommitted adjustments to add.  This list
226 	 * should always be really short.
227 	 */
228 	for (i = 0; i < smm->nr_uncommitted; i++) {
229 		struct block_op *op = smm->uncommitted + i;
230 
231 		if (op->block != b)
232 			continue;
233 
234 		switch (op->type) {
235 		case BOP_INC:
236 			adjustment++;
237 			break;
238 
239 		case BOP_DEC:
240 			adjustment--;
241 			break;
242 		}
243 	}
244 
245 	r = sm_ll_lookup(&smm->ll, b, result);
246 	if (r)
247 		return r;
248 
249 	*result += adjustment;
250 
251 	return 0;
252 }
253 
254 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
255 					      dm_block_t b, int *result)
256 {
257 	int r, i, adjustment = 0;
258 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
259 	uint32_t rc;
260 
261 	/*
262 	 * We may have some uncommitted adjustments to add.  This list
263 	 * should always be really short.
264 	 */
265 	for (i = 0; i < smm->nr_uncommitted; i++) {
266 		struct block_op *op = smm->uncommitted + i;
267 
268 		if (op->block != b)
269 			continue;
270 
271 		switch (op->type) {
272 		case BOP_INC:
273 			adjustment++;
274 			break;
275 
276 		case BOP_DEC:
277 			adjustment--;
278 			break;
279 		}
280 	}
281 
282 	if (adjustment > 1) {
283 		*result = 1;
284 		return 0;
285 	}
286 
287 	r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
288 	if (r)
289 		return r;
290 
291 	if (rc == 3)
292 		/*
293 		 * We err on the side of caution, and always return true.
294 		 */
295 		*result = 1;
296 	else
297 		*result = rc + adjustment > 1;
298 
299 	return 0;
300 }
301 
302 static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
303 				 uint32_t count)
304 {
305 	int r, r2;
306 	enum allocation_event ev;
307 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
308 
309 	if (smm->recursion_count) {
310 		DMERR("cannot recurse set_count()");
311 		return -EINVAL;
312 	}
313 
314 	in(smm);
315 	r = sm_ll_insert(&smm->ll, b, count, &ev);
316 	r2 = out(smm);
317 
318 	return combine_errors(r, r2);
319 }
320 
321 static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
322 {
323 	int r, r2 = 0;
324 	enum allocation_event ev;
325 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
326 
327 	if (recursing(smm))
328 		r = add_bop(smm, BOP_INC, b);
329 	else {
330 		in(smm);
331 		r = sm_ll_inc(&smm->ll, b, &ev);
332 		r2 = out(smm);
333 	}
334 
335 	return combine_errors(r, r2);
336 }
337 
338 static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
339 {
340 	int r, r2 = 0;
341 	enum allocation_event ev;
342 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
343 
344 	if (recursing(smm))
345 		r = add_bop(smm, BOP_DEC, b);
346 	else {
347 		in(smm);
348 		r = sm_ll_dec(&smm->ll, b, &ev);
349 		r2 = out(smm);
350 	}
351 
352 	return combine_errors(r, r2);
353 }
354 
355 static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
356 {
357 	int r, r2 = 0;
358 	enum allocation_event ev;
359 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
360 
361 	r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
362 	if (r)
363 		return r;
364 
365 	smm->begin = *b + 1;
366 
367 	if (recursing(smm))
368 		r = add_bop(smm, BOP_INC, *b);
369 	else {
370 		in(smm);
371 		r = sm_ll_inc(&smm->ll, *b, &ev);
372 		r2 = out(smm);
373 	}
374 
375 	if (!r)
376 		smm->allocated_this_transaction++;
377 
378 	return combine_errors(r, r2);
379 }
380 
381 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
382 {
383 	dm_block_t count;
384 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
385 
386 	int r = sm_metadata_new_block_(sm, b);
387 	if (r)
388 		DMERR("unable to allocate new metadata block");
389 
390 	r = sm_metadata_get_nr_free(sm, &count);
391 	if (r)
392 		DMERR("couldn't get free block count");
393 
394 	check_threshold(&smm->threshold, count);
395 
396 	return r;
397 }
398 
399 static int sm_metadata_commit(struct dm_space_map *sm)
400 {
401 	int r;
402 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
403 
404 	r = sm_ll_commit(&smm->ll);
405 	if (r)
406 		return r;
407 
408 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
409 	smm->begin = 0;
410 	smm->allocated_this_transaction = 0;
411 
412 	return 0;
413 }
414 
415 static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
416 						   dm_block_t threshold,
417 						   dm_sm_threshold_fn fn,
418 						   void *context)
419 {
420 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
421 
422 	set_threshold(&smm->threshold, threshold, fn, context);
423 
424 	return 0;
425 }
426 
427 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
428 {
429 	*result = sizeof(struct disk_sm_root);
430 
431 	return 0;
432 }
433 
434 static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
435 {
436 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
437 	struct disk_sm_root root_le;
438 
439 	root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
440 	root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
441 	root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
442 	root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
443 
444 	if (max < sizeof(root_le))
445 		return -ENOSPC;
446 
447 	memcpy(where_le, &root_le, sizeof(root_le));
448 
449 	return 0;
450 }
451 
452 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
453 
454 static struct dm_space_map ops = {
455 	.destroy = sm_metadata_destroy,
456 	.extend = sm_metadata_extend,
457 	.get_nr_blocks = sm_metadata_get_nr_blocks,
458 	.get_nr_free = sm_metadata_get_nr_free,
459 	.get_count = sm_metadata_get_count,
460 	.count_is_more_than_one = sm_metadata_count_is_more_than_one,
461 	.set_count = sm_metadata_set_count,
462 	.inc_block = sm_metadata_inc_block,
463 	.dec_block = sm_metadata_dec_block,
464 	.new_block = sm_metadata_new_block,
465 	.commit = sm_metadata_commit,
466 	.root_size = sm_metadata_root_size,
467 	.copy_root = sm_metadata_copy_root,
468 	.register_threshold_callback = sm_metadata_register_threshold_callback
469 };
470 
471 /*----------------------------------------------------------------*/
472 
473 /*
474  * When a new space map is created that manages its own space.  We use
475  * this tiny bootstrap allocator.
476  */
477 static void sm_bootstrap_destroy(struct dm_space_map *sm)
478 {
479 }
480 
481 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
482 {
483 	DMERR("bootstrap doesn't support extend");
484 
485 	return -EINVAL;
486 }
487 
488 static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
489 {
490 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
491 
492 	return smm->ll.nr_blocks;
493 }
494 
495 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
496 {
497 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
498 
499 	*count = smm->ll.nr_blocks - smm->begin;
500 
501 	return 0;
502 }
503 
504 static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
505 				  uint32_t *result)
506 {
507 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
508 
509 	return b < smm->begin ? 1 : 0;
510 }
511 
512 static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
513 					       dm_block_t b, int *result)
514 {
515 	*result = 0;
516 
517 	return 0;
518 }
519 
520 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
521 				  uint32_t count)
522 {
523 	DMERR("bootstrap doesn't support set_count");
524 
525 	return -EINVAL;
526 }
527 
528 static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
529 {
530 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
531 
532 	/*
533 	 * We know the entire device is unused.
534 	 */
535 	if (smm->begin == smm->ll.nr_blocks)
536 		return -ENOSPC;
537 
538 	*b = smm->begin++;
539 
540 	return 0;
541 }
542 
543 static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
544 {
545 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
546 
547 	return add_bop(smm, BOP_INC, b);
548 }
549 
550 static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
551 {
552 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
553 
554 	return add_bop(smm, BOP_DEC, b);
555 }
556 
557 static int sm_bootstrap_commit(struct dm_space_map *sm)
558 {
559 	return 0;
560 }
561 
562 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
563 {
564 	DMERR("bootstrap doesn't support root_size");
565 
566 	return -EINVAL;
567 }
568 
569 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
570 				  size_t max)
571 {
572 	DMERR("bootstrap doesn't support copy_root");
573 
574 	return -EINVAL;
575 }
576 
577 static struct dm_space_map bootstrap_ops = {
578 	.destroy = sm_bootstrap_destroy,
579 	.extend = sm_bootstrap_extend,
580 	.get_nr_blocks = sm_bootstrap_get_nr_blocks,
581 	.get_nr_free = sm_bootstrap_get_nr_free,
582 	.get_count = sm_bootstrap_get_count,
583 	.count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
584 	.set_count = sm_bootstrap_set_count,
585 	.inc_block = sm_bootstrap_inc_block,
586 	.dec_block = sm_bootstrap_dec_block,
587 	.new_block = sm_bootstrap_new_block,
588 	.commit = sm_bootstrap_commit,
589 	.root_size = sm_bootstrap_root_size,
590 	.copy_root = sm_bootstrap_copy_root,
591 	.register_threshold_callback = NULL
592 };
593 
594 /*----------------------------------------------------------------*/
595 
596 static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
597 {
598 	int r, i;
599 	enum allocation_event ev;
600 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
601 	dm_block_t old_len = smm->ll.nr_blocks;
602 
603 	/*
604 	 * Flick into a mode where all blocks get allocated in the new area.
605 	 */
606 	smm->begin = old_len;
607 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
608 
609 	/*
610 	 * Extend.
611 	 */
612 	r = sm_ll_extend(&smm->ll, extra_blocks);
613 
614 	/*
615 	 * Switch back to normal behaviour.
616 	 */
617 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
618 	for (i = old_len; !r && i < smm->begin; i++)
619 		r = sm_ll_inc(&smm->ll, i, &ev);
620 
621 	return r;
622 }
623 
624 /*----------------------------------------------------------------*/
625 
626 struct dm_space_map *dm_sm_metadata_init(void)
627 {
628 	struct sm_metadata *smm;
629 
630 	smm = kmalloc(sizeof(*smm), GFP_KERNEL);
631 	if (!smm)
632 		return ERR_PTR(-ENOMEM);
633 
634 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
635 
636 	return &smm->sm;
637 }
638 
639 int dm_sm_metadata_create(struct dm_space_map *sm,
640 			  struct dm_transaction_manager *tm,
641 			  dm_block_t nr_blocks,
642 			  dm_block_t superblock)
643 {
644 	int r;
645 	dm_block_t i;
646 	enum allocation_event ev;
647 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
648 
649 	smm->begin = superblock + 1;
650 	smm->recursion_count = 0;
651 	smm->allocated_this_transaction = 0;
652 	smm->nr_uncommitted = 0;
653 	threshold_init(&smm->threshold);
654 
655 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
656 
657 	r = sm_ll_new_metadata(&smm->ll, tm);
658 	if (r)
659 		return r;
660 
661 	r = sm_ll_extend(&smm->ll, nr_blocks);
662 	if (r)
663 		return r;
664 
665 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
666 
667 	/*
668 	 * Now we need to update the newly created data structures with the
669 	 * allocated blocks that they were built from.
670 	 */
671 	for (i = superblock; !r && i < smm->begin; i++)
672 		r = sm_ll_inc(&smm->ll, i, &ev);
673 
674 	if (r)
675 		return r;
676 
677 	return sm_metadata_commit(sm);
678 }
679 
680 int dm_sm_metadata_open(struct dm_space_map *sm,
681 			struct dm_transaction_manager *tm,
682 			void *root_le, size_t len)
683 {
684 	int r;
685 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
686 
687 	r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
688 	if (r)
689 		return r;
690 
691 	smm->begin = 0;
692 	smm->recursion_count = 0;
693 	smm->allocated_this_transaction = 0;
694 	smm->nr_uncommitted = 0;
695 	threshold_init(&smm->threshold);
696 
697 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
698 	return 0;
699 }
700