xref: /openbmc/linux/fs/ntfs3/run.c (revision 2010776f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: try to use extents tree (instead of array)
7  */
8 
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/fs.h>
12 #include <linux/log2.h>
13 #include <linux/nls.h>
14 
15 #include "debug.h"
16 #include "ntfs.h"
17 #include "ntfs_fs.h"
18 
19 /* runs_tree is a continues memory. Try to avoid big size. */
20 #define NTFS3_RUN_MAX_BYTES 0x10000
21 
22 struct ntfs_run {
23 	CLST vcn; /* Virtual cluster number. */
24 	CLST len; /* Length in clusters. */
25 	CLST lcn; /* Logical cluster number. */
26 };
27 
28 /*
29  * run_lookup - Lookup the index of a MCB entry that is first <= vcn.
30  *
31  * Case of success it will return non-zero value and set
32  * @index parameter to index of entry been found.
33  * Case of entry missing from list 'index' will be set to
34  * point to insertion position for the entry question.
35  */
36 bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
37 {
38 	size_t min_idx, max_idx, mid_idx;
39 	struct ntfs_run *r;
40 
41 	if (!run->count) {
42 		*index = 0;
43 		return false;
44 	}
45 
46 	min_idx = 0;
47 	max_idx = run->count - 1;
48 
49 	/* Check boundary cases specially, 'cause they cover the often requests. */
50 	r = run->runs;
51 	if (vcn < r->vcn) {
52 		*index = 0;
53 		return false;
54 	}
55 
56 	if (vcn < r->vcn + r->len) {
57 		*index = 0;
58 		return true;
59 	}
60 
61 	r += max_idx;
62 	if (vcn >= r->vcn + r->len) {
63 		*index = run->count;
64 		return false;
65 	}
66 
67 	if (vcn >= r->vcn) {
68 		*index = max_idx;
69 		return true;
70 	}
71 
72 	do {
73 		mid_idx = min_idx + ((max_idx - min_idx) >> 1);
74 		r = run->runs + mid_idx;
75 
76 		if (vcn < r->vcn) {
77 			max_idx = mid_idx - 1;
78 			if (!mid_idx)
79 				break;
80 		} else if (vcn >= r->vcn + r->len) {
81 			min_idx = mid_idx + 1;
82 		} else {
83 			*index = mid_idx;
84 			return true;
85 		}
86 	} while (min_idx <= max_idx);
87 
88 	*index = max_idx + 1;
89 	return false;
90 }
91 
92 /*
93  * run_consolidate - Consolidate runs starting from a given one.
94  */
95 static void run_consolidate(struct runs_tree *run, size_t index)
96 {
97 	size_t i;
98 	struct ntfs_run *r = run->runs + index;
99 
100 	while (index + 1 < run->count) {
101 		/*
102 		 * I should merge current run with next
103 		 * if start of the next run lies inside one being tested.
104 		 */
105 		struct ntfs_run *n = r + 1;
106 		CLST end = r->vcn + r->len;
107 		CLST dl;
108 
109 		/* Stop if runs are not aligned one to another. */
110 		if (n->vcn > end)
111 			break;
112 
113 		dl = end - n->vcn;
114 
115 		/*
116 		 * If range at index overlaps with next one
117 		 * then I will either adjust it's start position
118 		 * or (if completely matches) dust remove one from the list.
119 		 */
120 		if (dl > 0) {
121 			if (n->len <= dl)
122 				goto remove_next_range;
123 
124 			n->len -= dl;
125 			n->vcn += dl;
126 			if (n->lcn != SPARSE_LCN)
127 				n->lcn += dl;
128 			dl = 0;
129 		}
130 
131 		/*
132 		 * Stop if sparse mode does not match
133 		 * both current and next runs.
134 		 */
135 		if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
136 			index += 1;
137 			r = n;
138 			continue;
139 		}
140 
141 		/*
142 		 * Check if volume block
143 		 * of a next run lcn does not match
144 		 * last volume block of the current run.
145 		 */
146 		if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
147 			break;
148 
149 		/*
150 		 * Next and current are siblings.
151 		 * Eat/join.
152 		 */
153 		r->len += n->len - dl;
154 
155 remove_next_range:
156 		i = run->count - (index + 1);
157 		if (i > 1)
158 			memmove(n, n + 1, sizeof(*n) * (i - 1));
159 
160 		run->count -= 1;
161 	}
162 }
163 
164 /*
165  * run_is_mapped_full
166  *
167  * Return: True if range [svcn - evcn] is mapped.
168  */
169 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
170 {
171 	size_t i;
172 	const struct ntfs_run *r, *end;
173 	CLST next_vcn;
174 
175 	if (!run_lookup(run, svcn, &i))
176 		return false;
177 
178 	end = run->runs + run->count;
179 	r = run->runs + i;
180 
181 	for (;;) {
182 		next_vcn = r->vcn + r->len;
183 		if (next_vcn > evcn)
184 			return true;
185 
186 		if (++r >= end)
187 			return false;
188 
189 		if (r->vcn != next_vcn)
190 			return false;
191 	}
192 }
193 
194 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
195 		      CLST *len, size_t *index)
196 {
197 	size_t idx;
198 	CLST gap;
199 	struct ntfs_run *r;
200 
201 	/* Fail immediately if nrun was not touched yet. */
202 	if (!run->runs)
203 		return false;
204 
205 	if (!run_lookup(run, vcn, &idx))
206 		return false;
207 
208 	r = run->runs + idx;
209 
210 	if (vcn >= r->vcn + r->len)
211 		return false;
212 
213 	gap = vcn - r->vcn;
214 	if (r->len <= gap)
215 		return false;
216 
217 	*lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
218 
219 	if (len)
220 		*len = r->len - gap;
221 	if (index)
222 		*index = idx;
223 
224 	return true;
225 }
226 
227 /*
228  * run_truncate_head - Decommit the range before vcn.
229  */
230 void run_truncate_head(struct runs_tree *run, CLST vcn)
231 {
232 	size_t index;
233 	struct ntfs_run *r;
234 
235 	if (run_lookup(run, vcn, &index)) {
236 		r = run->runs + index;
237 
238 		if (vcn > r->vcn) {
239 			CLST dlen = vcn - r->vcn;
240 
241 			r->vcn = vcn;
242 			r->len -= dlen;
243 			if (r->lcn != SPARSE_LCN)
244 				r->lcn += dlen;
245 		}
246 
247 		if (!index)
248 			return;
249 	}
250 	r = run->runs;
251 	memmove(r, r + index, sizeof(*r) * (run->count - index));
252 
253 	run->count -= index;
254 
255 	if (!run->count) {
256 		kvfree(run->runs);
257 		run->runs = NULL;
258 		run->allocated = 0;
259 	}
260 }
261 
262 /*
263  * run_truncate - Decommit the range after vcn.
264  */
265 void run_truncate(struct runs_tree *run, CLST vcn)
266 {
267 	size_t index;
268 
269 	/*
270 	 * If I hit the range then
271 	 * I have to truncate one.
272 	 * If range to be truncated is becoming empty
273 	 * then it will entirely be removed.
274 	 */
275 	if (run_lookup(run, vcn, &index)) {
276 		struct ntfs_run *r = run->runs + index;
277 
278 		r->len = vcn - r->vcn;
279 
280 		if (r->len > 0)
281 			index += 1;
282 	}
283 
284 	/*
285 	 * At this point 'index' is set to position that
286 	 * should be thrown away (including index itself)
287 	 * Simple one - just set the limit.
288 	 */
289 	run->count = index;
290 
291 	/* Do not reallocate array 'runs'. Only free if possible. */
292 	if (!index) {
293 		kvfree(run->runs);
294 		run->runs = NULL;
295 		run->allocated = 0;
296 	}
297 }
298 
299 /*
300  * run_truncate_around - Trim head and tail if necessary.
301  */
302 void run_truncate_around(struct runs_tree *run, CLST vcn)
303 {
304 	run_truncate_head(run, vcn);
305 
306 	if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
307 		run_truncate(run, (run->runs + (run->count >> 1))->vcn);
308 }
309 
310 /*
311  * run_add_entry
312  *
313  * Sets location to known state.
314  * Run to be added may overlap with existing location.
315  *
316  * Return: false if of memory.
317  */
318 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
319 		   bool is_mft)
320 {
321 	size_t used, index;
322 	struct ntfs_run *r;
323 	bool inrange;
324 	CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
325 	bool should_add_tail = false;
326 
327 	/*
328 	 * Lookup the insertion point.
329 	 *
330 	 * Execute bsearch for the entry containing
331 	 * start position question.
332 	 */
333 	inrange = run_lookup(run, vcn, &index);
334 
335 	/*
336 	 * Shortcut here would be case of
337 	 * range not been found but one been added
338 	 * continues previous run.
339 	 * This case I can directly make use of
340 	 * existing range as my start point.
341 	 */
342 	if (!inrange && index > 0) {
343 		struct ntfs_run *t = run->runs + index - 1;
344 
345 		if (t->vcn + t->len == vcn &&
346 		    (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
347 		    (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
348 			inrange = true;
349 			index -= 1;
350 		}
351 	}
352 
353 	/*
354 	 * At this point 'index' either points to the range
355 	 * containing start position or to the insertion position
356 	 * for a new range.
357 	 * So first let's check if range I'm probing is here already.
358 	 */
359 	if (!inrange) {
360 requires_new_range:
361 		/*
362 		 * Range was not found.
363 		 * Insert at position 'index'
364 		 */
365 		used = run->count * sizeof(struct ntfs_run);
366 
367 		/*
368 		 * Check allocated space.
369 		 * If one is not enough to get one more entry
370 		 * then it will be reallocated.
371 		 */
372 		if (run->allocated < used + sizeof(struct ntfs_run)) {
373 			size_t bytes;
374 			struct ntfs_run *new_ptr;
375 
376 			/* Use power of 2 for 'bytes'. */
377 			if (!used) {
378 				bytes = 64;
379 			} else if (used <= 16 * PAGE_SIZE) {
380 				if (is_power_of_2(run->allocated))
381 					bytes = run->allocated << 1;
382 				else
383 					bytes = (size_t)1
384 						<< (2 + blksize_bits(used));
385 			} else {
386 				bytes = run->allocated + (16 * PAGE_SIZE);
387 			}
388 
389 			WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
390 
391 			new_ptr = kvmalloc(bytes, GFP_KERNEL);
392 
393 			if (!new_ptr)
394 				return false;
395 
396 			r = new_ptr + index;
397 			memcpy(new_ptr, run->runs,
398 			       index * sizeof(struct ntfs_run));
399 			memcpy(r + 1, run->runs + index,
400 			       sizeof(struct ntfs_run) * (run->count - index));
401 
402 			kvfree(run->runs);
403 			run->runs = new_ptr;
404 			run->allocated = bytes;
405 
406 		} else {
407 			size_t i = run->count - index;
408 
409 			r = run->runs + index;
410 
411 			/* memmove appears to be a bottle neck here... */
412 			if (i > 0)
413 				memmove(r + 1, r, sizeof(struct ntfs_run) * i);
414 		}
415 
416 		r->vcn = vcn;
417 		r->lcn = lcn;
418 		r->len = len;
419 		run->count += 1;
420 	} else {
421 		r = run->runs + index;
422 
423 		/*
424 		 * If one of ranges was not allocated then we
425 		 * have to split location we just matched and
426 		 * insert current one.
427 		 * A common case this requires tail to be reinserted
428 		 * a recursive call.
429 		 */
430 		if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
431 		    (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
432 			CLST to_eat = vcn - r->vcn;
433 			CLST Tovcn = to_eat + len;
434 
435 			should_add_tail = Tovcn < r->len;
436 
437 			if (should_add_tail) {
438 				tail_lcn = r->lcn == SPARSE_LCN
439 						   ? SPARSE_LCN
440 						   : (r->lcn + Tovcn);
441 				tail_vcn = r->vcn + Tovcn;
442 				tail_len = r->len - Tovcn;
443 			}
444 
445 			if (to_eat > 0) {
446 				r->len = to_eat;
447 				inrange = false;
448 				index += 1;
449 				goto requires_new_range;
450 			}
451 
452 			/* lcn should match one were going to add. */
453 			r->lcn = lcn;
454 		}
455 
456 		/*
457 		 * If existing range fits then were done.
458 		 * Otherwise extend found one and fall back to range jocode.
459 		 */
460 		if (r->vcn + r->len < vcn + len)
461 			r->len += len - ((r->vcn + r->len) - vcn);
462 	}
463 
464 	/*
465 	 * And normalize it starting from insertion point.
466 	 * It's possible that no insertion needed case if
467 	 * start point lies within the range of an entry
468 	 * that 'index' points to.
469 	 */
470 	if (inrange && index > 0)
471 		index -= 1;
472 	run_consolidate(run, index);
473 	run_consolidate(run, index + 1);
474 
475 	/*
476 	 * A special case.
477 	 * We have to add extra range a tail.
478 	 */
479 	if (should_add_tail &&
480 	    !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
481 		return false;
482 
483 	return true;
484 }
485 
486 /* run_collapse_range
487  *
488  * Helper for attr_collapse_range(),
489  * which is helper for fallocate(collapse_range).
490  */
491 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
492 {
493 	size_t index, eat;
494 	struct ntfs_run *r, *e, *eat_start, *eat_end;
495 	CLST end;
496 
497 	if (WARN_ON(!run_lookup(run, vcn, &index)))
498 		return true; /* Should never be here. */
499 
500 	e = run->runs + run->count;
501 	r = run->runs + index;
502 	end = vcn + len;
503 
504 	if (vcn > r->vcn) {
505 		if (r->vcn + r->len <= end) {
506 			/* Collapse tail of run .*/
507 			r->len = vcn - r->vcn;
508 		} else if (r->lcn == SPARSE_LCN) {
509 			/* Collapse a middle part of sparsed run. */
510 			r->len -= len;
511 		} else {
512 			/* Collapse a middle part of normal run, split. */
513 			if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
514 				return false;
515 			return run_collapse_range(run, vcn, len);
516 		}
517 
518 		r += 1;
519 	}
520 
521 	eat_start = r;
522 	eat_end = r;
523 
524 	for (; r < e; r++) {
525 		CLST d;
526 
527 		if (r->vcn >= end) {
528 			r->vcn -= len;
529 			continue;
530 		}
531 
532 		if (r->vcn + r->len <= end) {
533 			/* Eat this run. */
534 			eat_end = r + 1;
535 			continue;
536 		}
537 
538 		d = end - r->vcn;
539 		if (r->lcn != SPARSE_LCN)
540 			r->lcn += d;
541 		r->len -= d;
542 		r->vcn -= len - d;
543 	}
544 
545 	eat = eat_end - eat_start;
546 	memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
547 	run->count -= eat;
548 
549 	return true;
550 }
551 
552 /*
553  * run_get_entry - Return index-th mapped region.
554  */
555 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
556 		   CLST *lcn, CLST *len)
557 {
558 	const struct ntfs_run *r;
559 
560 	if (index >= run->count)
561 		return false;
562 
563 	r = run->runs + index;
564 
565 	if (!r->len)
566 		return false;
567 
568 	if (vcn)
569 		*vcn = r->vcn;
570 	if (lcn)
571 		*lcn = r->lcn;
572 	if (len)
573 		*len = r->len;
574 	return true;
575 }
576 
577 /*
578  * run_packed_size - Calculate the size of packed int64.
579  */
580 #ifdef __BIG_ENDIAN
581 static inline int run_packed_size(const s64 n)
582 {
583 	const u8 *p = (const u8 *)&n + sizeof(n) - 1;
584 
585 	if (n >= 0) {
586 		if (p[-7] || p[-6] || p[-5] || p[-4])
587 			p -= 4;
588 		if (p[-3] || p[-2])
589 			p -= 2;
590 		if (p[-1])
591 			p -= 1;
592 		if (p[0] & 0x80)
593 			p -= 1;
594 	} else {
595 		if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
596 		    p[-4] != 0xff)
597 			p -= 4;
598 		if (p[-3] != 0xff || p[-2] != 0xff)
599 			p -= 2;
600 		if (p[-1] != 0xff)
601 			p -= 1;
602 		if (!(p[0] & 0x80))
603 			p -= 1;
604 	}
605 	return (const u8 *)&n + sizeof(n) - p;
606 }
607 
608 /* Full trusted function. It does not check 'size' for errors. */
609 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
610 {
611 	const u8 *p = (u8 *)&v;
612 
613 	switch (size) {
614 	case 8:
615 		run_buf[7] = p[0];
616 		fallthrough;
617 	case 7:
618 		run_buf[6] = p[1];
619 		fallthrough;
620 	case 6:
621 		run_buf[5] = p[2];
622 		fallthrough;
623 	case 5:
624 		run_buf[4] = p[3];
625 		fallthrough;
626 	case 4:
627 		run_buf[3] = p[4];
628 		fallthrough;
629 	case 3:
630 		run_buf[2] = p[5];
631 		fallthrough;
632 	case 2:
633 		run_buf[1] = p[6];
634 		fallthrough;
635 	case 1:
636 		run_buf[0] = p[7];
637 	}
638 }
639 
640 /* Full trusted function. It does not check 'size' for errors. */
641 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
642 {
643 	u8 *p = (u8 *)&v;
644 
645 	switch (size) {
646 	case 8:
647 		p[0] = run_buf[7];
648 		fallthrough;
649 	case 7:
650 		p[1] = run_buf[6];
651 		fallthrough;
652 	case 6:
653 		p[2] = run_buf[5];
654 		fallthrough;
655 	case 5:
656 		p[3] = run_buf[4];
657 		fallthrough;
658 	case 4:
659 		p[4] = run_buf[3];
660 		fallthrough;
661 	case 3:
662 		p[5] = run_buf[2];
663 		fallthrough;
664 	case 2:
665 		p[6] = run_buf[1];
666 		fallthrough;
667 	case 1:
668 		p[7] = run_buf[0];
669 	}
670 	return v;
671 }
672 
673 #else
674 
675 static inline int run_packed_size(const s64 n)
676 {
677 	const u8 *p = (const u8 *)&n;
678 
679 	if (n >= 0) {
680 		if (p[7] || p[6] || p[5] || p[4])
681 			p += 4;
682 		if (p[3] || p[2])
683 			p += 2;
684 		if (p[1])
685 			p += 1;
686 		if (p[0] & 0x80)
687 			p += 1;
688 	} else {
689 		if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
690 		    p[4] != 0xff)
691 			p += 4;
692 		if (p[3] != 0xff || p[2] != 0xff)
693 			p += 2;
694 		if (p[1] != 0xff)
695 			p += 1;
696 		if (!(p[0] & 0x80))
697 			p += 1;
698 	}
699 
700 	return 1 + p - (const u8 *)&n;
701 }
702 
703 /* Full trusted function. It does not check 'size' for errors. */
704 static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
705 {
706 	const u8 *p = (u8 *)&v;
707 
708 	/* memcpy( run_buf, &v, size); Is it faster? */
709 	switch (size) {
710 	case 8:
711 		run_buf[7] = p[7];
712 		fallthrough;
713 	case 7:
714 		run_buf[6] = p[6];
715 		fallthrough;
716 	case 6:
717 		run_buf[5] = p[5];
718 		fallthrough;
719 	case 5:
720 		run_buf[4] = p[4];
721 		fallthrough;
722 	case 4:
723 		run_buf[3] = p[3];
724 		fallthrough;
725 	case 3:
726 		run_buf[2] = p[2];
727 		fallthrough;
728 	case 2:
729 		run_buf[1] = p[1];
730 		fallthrough;
731 	case 1:
732 		run_buf[0] = p[0];
733 	}
734 }
735 
736 /* full trusted function. It does not check 'size' for errors */
737 static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
738 {
739 	u8 *p = (u8 *)&v;
740 
741 	/* memcpy( &v, run_buf, size); Is it faster? */
742 	switch (size) {
743 	case 8:
744 		p[7] = run_buf[7];
745 		fallthrough;
746 	case 7:
747 		p[6] = run_buf[6];
748 		fallthrough;
749 	case 6:
750 		p[5] = run_buf[5];
751 		fallthrough;
752 	case 5:
753 		p[4] = run_buf[4];
754 		fallthrough;
755 	case 4:
756 		p[3] = run_buf[3];
757 		fallthrough;
758 	case 3:
759 		p[2] = run_buf[2];
760 		fallthrough;
761 	case 2:
762 		p[1] = run_buf[1];
763 		fallthrough;
764 	case 1:
765 		p[0] = run_buf[0];
766 	}
767 	return v;
768 }
769 #endif
770 
771 /*
772  * run_pack - Pack runs into buffer.
773  *
774  * packed_vcns - How much runs we have packed.
775  * packed_size - How much bytes we have used run_buf.
776  */
777 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
778 	     u32 run_buf_size, CLST *packed_vcns)
779 {
780 	CLST next_vcn, vcn, lcn;
781 	CLST prev_lcn = 0;
782 	CLST evcn1 = svcn + len;
783 	int packed_size = 0;
784 	size_t i;
785 	bool ok;
786 	s64 dlcn;
787 	int offset_size, size_size, tmp;
788 
789 	next_vcn = vcn = svcn;
790 
791 	*packed_vcns = 0;
792 
793 	if (!len)
794 		goto out;
795 
796 	ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
797 
798 	if (!ok)
799 		goto error;
800 
801 	if (next_vcn != vcn)
802 		goto error;
803 
804 	for (;;) {
805 		next_vcn = vcn + len;
806 		if (next_vcn > evcn1)
807 			len = evcn1 - vcn;
808 
809 		/* How much bytes required to pack len. */
810 		size_size = run_packed_size(len);
811 
812 		/* offset_size - How much bytes is packed dlcn. */
813 		if (lcn == SPARSE_LCN) {
814 			offset_size = 0;
815 			dlcn = 0;
816 		} else {
817 			/* NOTE: lcn can be less than prev_lcn! */
818 			dlcn = (s64)lcn - prev_lcn;
819 			offset_size = run_packed_size(dlcn);
820 			prev_lcn = lcn;
821 		}
822 
823 		tmp = run_buf_size - packed_size - 2 - offset_size;
824 		if (tmp <= 0)
825 			goto out;
826 
827 		/* Can we store this entire run. */
828 		if (tmp < size_size)
829 			goto out;
830 
831 		if (run_buf) {
832 			/* Pack run header. */
833 			run_buf[0] = ((u8)(size_size | (offset_size << 4)));
834 			run_buf += 1;
835 
836 			/* Pack the length of run. */
837 			run_pack_s64(run_buf, size_size, len);
838 
839 			run_buf += size_size;
840 			/* Pack the offset from previous LCN. */
841 			run_pack_s64(run_buf, offset_size, dlcn);
842 			run_buf += offset_size;
843 		}
844 
845 		packed_size += 1 + offset_size + size_size;
846 		*packed_vcns += len;
847 
848 		if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
849 			goto out;
850 
851 		ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
852 		if (!ok)
853 			goto error;
854 
855 		if (next_vcn != vcn)
856 			goto error;
857 	}
858 
859 out:
860 	/* Store last zero. */
861 	if (run_buf)
862 		run_buf[0] = 0;
863 
864 	return packed_size + 1;
865 
866 error:
867 	return -EOPNOTSUPP;
868 }
869 
870 /*
871  * run_unpack - Unpack packed runs from @run_buf.
872  *
873  * Return: Error if negative, or real used bytes.
874  */
875 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
876 	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
877 	       u32 run_buf_size)
878 {
879 	u64 prev_lcn, vcn64, lcn, next_vcn;
880 	const u8 *run_last, *run_0;
881 	bool is_mft = ino == MFT_REC_MFT;
882 
883 	/* Check for empty. */
884 	if (evcn + 1 == svcn)
885 		return 0;
886 
887 	if (evcn < svcn)
888 		return -EINVAL;
889 
890 	run_0 = run_buf;
891 	run_last = run_buf + run_buf_size;
892 	prev_lcn = 0;
893 	vcn64 = svcn;
894 
895 	/* Read all runs the chain. */
896 	/* size_size - How much bytes is packed len. */
897 	while (run_buf < run_last) {
898 		/* size_size - How much bytes is packed len. */
899 		u8 size_size = *run_buf & 0xF;
900 		/* offset_size - How much bytes is packed dlcn. */
901 		u8 offset_size = *run_buf++ >> 4;
902 		u64 len;
903 
904 		if (!size_size)
905 			break;
906 
907 		/*
908 		 * Unpack runs.
909 		 * NOTE: Runs are stored little endian order
910 		 * "len" is unsigned value, "dlcn" is signed.
911 		 * Large positive number requires to store 5 bytes
912 		 * e.g.: 05 FF 7E FF FF 00 00 00
913 		 */
914 		if (size_size > 8)
915 			return -EINVAL;
916 
917 		len = run_unpack_s64(run_buf, size_size, 0);
918 		/* Skip size_size. */
919 		run_buf += size_size;
920 
921 		if (!len)
922 			return -EINVAL;
923 
924 		if (!offset_size)
925 			lcn = SPARSE_LCN64;
926 		else if (offset_size <= 8) {
927 			s64 dlcn;
928 
929 			/* Initial value of dlcn is -1 or 0. */
930 			dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
931 			dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
932 			/* Skip offset_size. */
933 			run_buf += offset_size;
934 
935 			if (!dlcn)
936 				return -EINVAL;
937 			lcn = prev_lcn + dlcn;
938 			prev_lcn = lcn;
939 		} else
940 			return -EINVAL;
941 
942 		next_vcn = vcn64 + len;
943 		/* Check boundary. */
944 		if (next_vcn > evcn + 1)
945 			return -EINVAL;
946 
947 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
948 		if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
949 			ntfs_err(
950 				sbi->sb,
951 				"This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
952 				"Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
953 				"Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
954 				vcn64, lcn, len);
955 			return -EOPNOTSUPP;
956 		}
957 #endif
958 		if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
959 			/* LCN range is out of volume. */
960 			return -EINVAL;
961 		}
962 
963 		if (!run)
964 			; /* Called from check_attr(fslog.c) to check run. */
965 		else if (run == RUN_DEALLOCATE) {
966 			/*
967 			 * Called from ni_delete_all to free clusters
968 			 * without storing in run.
969 			 */
970 			if (lcn != SPARSE_LCN64)
971 				mark_as_free_ex(sbi, lcn, len, true);
972 		} else if (vcn64 >= vcn) {
973 			if (!run_add_entry(run, vcn64, lcn, len, is_mft))
974 				return -ENOMEM;
975 		} else if (next_vcn > vcn) {
976 			u64 dlen = vcn - vcn64;
977 
978 			if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
979 					   is_mft))
980 				return -ENOMEM;
981 		}
982 
983 		vcn64 = next_vcn;
984 	}
985 
986 	if (vcn64 != evcn + 1) {
987 		/* Not expected length of unpacked runs. */
988 		return -EINVAL;
989 	}
990 
991 	return run_buf - run_0;
992 }
993 
994 #ifdef NTFS3_CHECK_FREE_CLST
995 /*
996  * run_unpack_ex - Unpack packed runs from "run_buf".
997  *
998  * Checks unpacked runs to be used in bitmap.
999  *
1000  * Return: Error if negative, or real used bytes.
1001  */
1002 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1003 		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
1004 		  u32 run_buf_size)
1005 {
1006 	int ret, err;
1007 	CLST next_vcn, lcn, len;
1008 	size_t index;
1009 	bool ok;
1010 	struct wnd_bitmap *wnd;
1011 
1012 	ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
1013 	if (ret <= 0)
1014 		return ret;
1015 
1016 	if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
1017 		return ret;
1018 
1019 	if (ino == MFT_REC_BADCLUST)
1020 		return ret;
1021 
1022 	next_vcn = vcn = svcn;
1023 	wnd = &sbi->used.bitmap;
1024 
1025 	for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
1026 	     next_vcn <= evcn;
1027 	     ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
1028 		if (!ok || next_vcn != vcn)
1029 			return -EINVAL;
1030 
1031 		next_vcn = vcn + len;
1032 
1033 		if (lcn == SPARSE_LCN)
1034 			continue;
1035 
1036 		if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
1037 			continue;
1038 
1039 		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1040 		/* Check for free blocks. */
1041 		ok = wnd_is_used(wnd, lcn, len);
1042 		up_read(&wnd->rw_lock);
1043 		if (ok)
1044 			continue;
1045 
1046 		/* Looks like volume is corrupted. */
1047 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1048 
1049 		if (down_write_trylock(&wnd->rw_lock)) {
1050 			/* Mark all zero bits as used in range [lcn, lcn+len). */
1051 			CLST i, lcn_f = 0, len_f = 0;
1052 
1053 			err = 0;
1054 			for (i = 0; i < len; i++) {
1055 				if (wnd_is_free(wnd, lcn + i, 1)) {
1056 					if (!len_f)
1057 						lcn_f = lcn + i;
1058 					len_f += 1;
1059 				} else if (len_f) {
1060 					err = wnd_set_used(wnd, lcn_f, len_f);
1061 					len_f = 0;
1062 					if (err)
1063 						break;
1064 				}
1065 			}
1066 
1067 			if (len_f)
1068 				err = wnd_set_used(wnd, lcn_f, len_f);
1069 
1070 			up_write(&wnd->rw_lock);
1071 			if (err)
1072 				return err;
1073 		}
1074 	}
1075 
1076 	return ret;
1077 }
1078 #endif
1079 
1080 /*
1081  * run_get_highest_vcn
1082  *
1083  * Return the highest vcn from a mapping pairs array
1084  * it used while replaying log file.
1085  */
1086 int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
1087 {
1088 	u64 vcn64 = vcn;
1089 	u8 size_size;
1090 
1091 	while ((size_size = *run_buf & 0xF)) {
1092 		u8 offset_size = *run_buf++ >> 4;
1093 		u64 len;
1094 
1095 		if (size_size > 8 || offset_size > 8)
1096 			return -EINVAL;
1097 
1098 		len = run_unpack_s64(run_buf, size_size, 0);
1099 		if (!len)
1100 			return -EINVAL;
1101 
1102 		run_buf += size_size + offset_size;
1103 		vcn64 += len;
1104 
1105 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
1106 		if (vcn64 > 0x100000000ull)
1107 			return -EINVAL;
1108 #endif
1109 	}
1110 
1111 	*highest_vcn = vcn64 - 1;
1112 	return 0;
1113 }
1114