Lines Matching full:run
34 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) in run_lookup() argument
39 if (!run->count) { in run_lookup()
45 max_idx = run->count - 1; in run_lookup()
48 r = run->runs; in run_lookup()
61 *index = run->count; in run_lookup()
72 r = run->runs + mid_idx; in run_lookup()
93 static void run_consolidate(struct runs_tree *run, size_t index) in run_consolidate() argument
96 struct ntfs_run *r = run->runs + index; in run_consolidate()
98 while (index + 1 < run->count) { in run_consolidate()
100 * I should merge current run with next in run_consolidate()
101 * if start of the next run lies inside one being tested. in run_consolidate()
141 * of a next run lcn does not match in run_consolidate()
142 * last volume block of the current run. in run_consolidate()
154 i = run->count - (index + 1); in run_consolidate()
158 run->count -= 1; in run_consolidate()
167 bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn) in run_is_mapped_full() argument
173 if (!run_lookup(run, svcn, &i)) in run_is_mapped_full()
176 end = run->runs + run->count; in run_is_mapped_full()
177 r = run->runs + i; in run_is_mapped_full()
192 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, in run_lookup_entry() argument
200 if (!run->runs) in run_lookup_entry()
203 if (!run_lookup(run, vcn, &idx)) in run_lookup_entry()
206 r = run->runs + idx; in run_lookup_entry()
228 void run_truncate_head(struct runs_tree *run, CLST vcn) in run_truncate_head() argument
233 if (run_lookup(run, vcn, &index)) { in run_truncate_head()
234 r = run->runs + index; in run_truncate_head()
248 r = run->runs; in run_truncate_head()
249 memmove(r, r + index, sizeof(*r) * (run->count - index)); in run_truncate_head()
251 run->count -= index; in run_truncate_head()
253 if (!run->count) { in run_truncate_head()
254 kvfree(run->runs); in run_truncate_head()
255 run->runs = NULL; in run_truncate_head()
256 run->allocated = 0; in run_truncate_head()
263 void run_truncate(struct runs_tree *run, CLST vcn) in run_truncate() argument
273 if (run_lookup(run, vcn, &index)) { in run_truncate()
274 struct ntfs_run *r = run->runs + index; in run_truncate()
287 run->count = index; in run_truncate()
291 kvfree(run->runs); in run_truncate()
292 run->runs = NULL; in run_truncate()
293 run->allocated = 0; in run_truncate()
300 void run_truncate_around(struct runs_tree *run, CLST vcn) in run_truncate_around() argument
302 run_truncate_head(run, vcn); in run_truncate_around()
304 if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2) in run_truncate_around()
305 run_truncate(run, (run->runs + (run->count >> 1))->vcn); in run_truncate_around()
312 * Run to be added may overlap with existing location.
316 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, in run_add_entry() argument
331 inrange = run_lookup(run, vcn, &index); in run_add_entry()
336 * continues previous run. in run_add_entry()
341 struct ntfs_run *t = run->runs + index - 1; in run_add_entry()
363 used = run->count * sizeof(struct ntfs_run); in run_add_entry()
370 if (run->allocated < used + sizeof(struct ntfs_run)) { in run_add_entry()
378 if (is_power_of_2(run->allocated)) in run_add_entry()
379 bytes = run->allocated << 1; in run_add_entry()
384 bytes = run->allocated + (16 * PAGE_SIZE); in run_add_entry()
395 memcpy(new_ptr, run->runs, in run_add_entry()
397 memcpy(r + 1, run->runs + index, in run_add_entry()
398 sizeof(struct ntfs_run) * (run->count - index)); in run_add_entry()
400 kvfree(run->runs); in run_add_entry()
401 run->runs = new_ptr; in run_add_entry()
402 run->allocated = bytes; in run_add_entry()
405 size_t i = run->count - index; in run_add_entry()
407 r = run->runs + index; in run_add_entry()
417 run->count += 1; in run_add_entry()
419 r = run->runs + index; in run_add_entry()
470 run_consolidate(run, index); in run_add_entry()
471 run_consolidate(run, index + 1); in run_add_entry()
478 !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft)) in run_add_entry()
489 bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) in run_collapse_range() argument
495 if (WARN_ON(!run_lookup(run, vcn, &index))) in run_collapse_range()
498 e = run->runs + run->count; in run_collapse_range()
499 r = run->runs + index; in run_collapse_range()
504 /* Collapse tail of run .*/ in run_collapse_range()
507 /* Collapse a middle part of sparsed run. */ in run_collapse_range()
510 /* Collapse a middle part of normal run, split. */ in run_collapse_range()
511 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) in run_collapse_range()
513 return run_collapse_range(run, vcn, len); in run_collapse_range()
531 /* Eat this run. */ in run_collapse_range()
545 run->count -= eat; in run_collapse_range()
555 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len) in run_insert_range() argument
560 if (WARN_ON(!run_lookup(run, vcn, &index))) in run_insert_range()
563 e = run->runs + run->count; in run_insert_range()
564 r = run->runs + index; in run_insert_range()
572 r = run->runs + index; in run_insert_range()
582 if (!run_add_entry(run, vcn + len, lcn2, len2, false)) in run_insert_range()
586 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) in run_insert_range()
595 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, in run_get_entry() argument
600 if (index >= run->count) in run_get_entry()
603 r = run->runs + index; in run_get_entry()
817 int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, in run_pack() argument
835 if (!run_lookup(run, svcn, &i)) in run_pack()
838 r_end = run->runs + run->count; in run_pack()
839 r = run->runs + i; in run_pack()
848 r = run->runs + i; in run_pack()
877 /* Can we store this entire run. */ in run_pack()
882 /* Pack run header. */ in run_pack()
886 /* Pack the length of run. */ in run_pack()
920 int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, in run_unpack() argument
1000 "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" in run_unpack()
1011 if (!run) in run_unpack()
1012 ; /* Called from check_attr(fslog.c) to check run. */ in run_unpack()
1013 else if (run == RUN_DEALLOCATE) { in run_unpack()
1016 * without storing in run. in run_unpack()
1021 if (!run_add_entry(run, vcn64, lcn, len, is_mft)) in run_unpack()
1026 if (!run_add_entry(run, vcn, lcn + dlen, len - dlen, in run_unpack()
1050 int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, in run_unpack_ex() argument
1060 ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); in run_unpack_ex()
1064 if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) in run_unpack_ex()
1073 for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index); in run_unpack_ex()
1075 ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) { in run_unpack_ex()
1112 /* Restore zone. Lock mft run. */ in run_unpack_ex()
1169 * Make a copy of run
1171 int run_clone(const struct runs_tree *run, struct runs_tree *new_run) in run_clone() argument
1173 size_t bytes = run->count * sizeof(struct ntfs_run); in run_clone()
1186 memcpy(new_run->runs, run->runs, bytes); in run_clone()
1187 new_run->count = run->count; in run_clone()