14342306fSKonstantin Komarov // SPDX-License-Identifier: GPL-2.0
24342306fSKonstantin Komarov /*
34342306fSKonstantin Komarov *
44342306fSKonstantin Komarov * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
54342306fSKonstantin Komarov *
64342306fSKonstantin Komarov * TODO: try to use extents tree (instead of array)
74342306fSKonstantin Komarov */
84342306fSKonstantin Komarov
94342306fSKonstantin Komarov #include <linux/blkdev.h>
104342306fSKonstantin Komarov #include <linux/fs.h>
11528c9b3dSKari Argillander #include <linux/log2.h>
124342306fSKonstantin Komarov
134342306fSKonstantin Komarov #include "debug.h"
144342306fSKonstantin Komarov #include "ntfs.h"
154342306fSKonstantin Komarov #include "ntfs_fs.h"
164342306fSKonstantin Komarov
17e8b8e97fSKari Argillander /* runs_tree is a continues memory. Try to avoid big size. */
184342306fSKonstantin Komarov #define NTFS3_RUN_MAX_BYTES 0x10000
194342306fSKonstantin Komarov
204342306fSKonstantin Komarov struct ntfs_run {
21e8b8e97fSKari Argillander CLST vcn; /* Virtual cluster number. */
22e8b8e97fSKari Argillander CLST len; /* Length in clusters. */
23e8b8e97fSKari Argillander CLST lcn; /* Logical cluster number. */
244342306fSKonstantin Komarov };
254342306fSKonstantin Komarov
264342306fSKonstantin Komarov /*
27e8b8e97fSKari Argillander * run_lookup - Lookup the index of a MCB entry that is first <= vcn.
284342306fSKonstantin Komarov *
29e8b8e97fSKari Argillander * Case of success it will return non-zero value and set
30e8b8e97fSKari Argillander * @index parameter to index of entry been found.
31e8b8e97fSKari Argillander * Case of entry missing from list 'index' will be set to
324342306fSKonstantin Komarov * point to insertion position for the entry question.
334342306fSKonstantin Komarov */
run_lookup(const struct runs_tree * run,CLST vcn,size_t * index)3454033c13SKonstantin Komarov static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
354342306fSKonstantin Komarov {
364342306fSKonstantin Komarov size_t min_idx, max_idx, mid_idx;
374342306fSKonstantin Komarov struct ntfs_run *r;
384342306fSKonstantin Komarov
394342306fSKonstantin Komarov if (!run->count) {
404342306fSKonstantin Komarov *index = 0;
414342306fSKonstantin Komarov return false;
424342306fSKonstantin Komarov }
434342306fSKonstantin Komarov
444342306fSKonstantin Komarov min_idx = 0;
454342306fSKonstantin Komarov max_idx = run->count - 1;
464342306fSKonstantin Komarov
47e8b8e97fSKari Argillander /* Check boundary cases specially, 'cause they cover the often requests. */
484342306fSKonstantin Komarov r = run->runs;
494342306fSKonstantin Komarov if (vcn < r->vcn) {
504342306fSKonstantin Komarov *index = 0;
514342306fSKonstantin Komarov return false;
524342306fSKonstantin Komarov }
534342306fSKonstantin Komarov
544342306fSKonstantin Komarov if (vcn < r->vcn + r->len) {
554342306fSKonstantin Komarov *index = 0;
564342306fSKonstantin Komarov return true;
574342306fSKonstantin Komarov }
584342306fSKonstantin Komarov
594342306fSKonstantin Komarov r += max_idx;
604342306fSKonstantin Komarov if (vcn >= r->vcn + r->len) {
614342306fSKonstantin Komarov *index = run->count;
624342306fSKonstantin Komarov return false;
634342306fSKonstantin Komarov }
644342306fSKonstantin Komarov
654342306fSKonstantin Komarov if (vcn >= r->vcn) {
664342306fSKonstantin Komarov *index = max_idx;
674342306fSKonstantin Komarov return true;
684342306fSKonstantin Komarov }
694342306fSKonstantin Komarov
704342306fSKonstantin Komarov do {
714342306fSKonstantin Komarov mid_idx = min_idx + ((max_idx - min_idx) >> 1);
724342306fSKonstantin Komarov r = run->runs + mid_idx;
734342306fSKonstantin Komarov
744342306fSKonstantin Komarov if (vcn < r->vcn) {
754342306fSKonstantin Komarov max_idx = mid_idx - 1;
764342306fSKonstantin Komarov if (!mid_idx)
774342306fSKonstantin Komarov break;
784342306fSKonstantin Komarov } else if (vcn >= r->vcn + r->len) {
794342306fSKonstantin Komarov min_idx = mid_idx + 1;
804342306fSKonstantin Komarov } else {
814342306fSKonstantin Komarov *index = mid_idx;
824342306fSKonstantin Komarov return true;
834342306fSKonstantin Komarov }
844342306fSKonstantin Komarov } while (min_idx <= max_idx);
854342306fSKonstantin Komarov
864342306fSKonstantin Komarov *index = max_idx + 1;
874342306fSKonstantin Komarov return false;
884342306fSKonstantin Komarov }
894342306fSKonstantin Komarov
904342306fSKonstantin Komarov /*
91e8b8e97fSKari Argillander * run_consolidate - Consolidate runs starting from a given one.
924342306fSKonstantin Komarov */
run_consolidate(struct runs_tree * run,size_t index)934342306fSKonstantin Komarov static void run_consolidate(struct runs_tree *run, size_t index)
944342306fSKonstantin Komarov {
954342306fSKonstantin Komarov size_t i;
964342306fSKonstantin Komarov struct ntfs_run *r = run->runs + index;
974342306fSKonstantin Komarov
984342306fSKonstantin Komarov while (index + 1 < run->count) {
994342306fSKonstantin Komarov /*
1004342306fSKonstantin Komarov * I should merge current run with next
1014342306fSKonstantin Komarov * if start of the next run lies inside one being tested.
1024342306fSKonstantin Komarov */
1034342306fSKonstantin Komarov struct ntfs_run *n = r + 1;
1044342306fSKonstantin Komarov CLST end = r->vcn + r->len;
1054342306fSKonstantin Komarov CLST dl;
1064342306fSKonstantin Komarov
1074342306fSKonstantin Komarov /* Stop if runs are not aligned one to another. */
1084342306fSKonstantin Komarov if (n->vcn > end)
1094342306fSKonstantin Komarov break;
1104342306fSKonstantin Komarov
1114342306fSKonstantin Komarov dl = end - n->vcn;
1124342306fSKonstantin Komarov
1134342306fSKonstantin Komarov /*
1144342306fSKonstantin Komarov * If range at index overlaps with next one
1154342306fSKonstantin Komarov * then I will either adjust it's start position
1164342306fSKonstantin Komarov * or (if completely matches) dust remove one from the list.
1174342306fSKonstantin Komarov */
1184342306fSKonstantin Komarov if (dl > 0) {
1194342306fSKonstantin Komarov if (n->len <= dl)
1204342306fSKonstantin Komarov goto remove_next_range;
1214342306fSKonstantin Komarov
1224342306fSKonstantin Komarov n->len -= dl;
1234342306fSKonstantin Komarov n->vcn += dl;
1244342306fSKonstantin Komarov if (n->lcn != SPARSE_LCN)
1254342306fSKonstantin Komarov n->lcn += dl;
1264342306fSKonstantin Komarov dl = 0;
1274342306fSKonstantin Komarov }
1284342306fSKonstantin Komarov
1294342306fSKonstantin Komarov /*
1304342306fSKonstantin Komarov * Stop if sparse mode does not match
1314342306fSKonstantin Komarov * both current and next runs.
1324342306fSKonstantin Komarov */
1334342306fSKonstantin Komarov if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
1344342306fSKonstantin Komarov index += 1;
1354342306fSKonstantin Komarov r = n;
1364342306fSKonstantin Komarov continue;
1374342306fSKonstantin Komarov }
1384342306fSKonstantin Komarov
1394342306fSKonstantin Komarov /*
1404342306fSKonstantin Komarov * Check if volume block
1414342306fSKonstantin Komarov * of a next run lcn does not match
1424342306fSKonstantin Komarov * last volume block of the current run.
1434342306fSKonstantin Komarov */
1444342306fSKonstantin Komarov if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
1454342306fSKonstantin Komarov break;
1464342306fSKonstantin Komarov
1474342306fSKonstantin Komarov /*
1484342306fSKonstantin Komarov * Next and current are siblings.
1494342306fSKonstantin Komarov * Eat/join.
1504342306fSKonstantin Komarov */
1514342306fSKonstantin Komarov r->len += n->len - dl;
1524342306fSKonstantin Komarov
1534342306fSKonstantin Komarov remove_next_range:
1544342306fSKonstantin Komarov i = run->count - (index + 1);
1554342306fSKonstantin Komarov if (i > 1)
1564342306fSKonstantin Komarov memmove(n, n + 1, sizeof(*n) * (i - 1));
1574342306fSKonstantin Komarov
1584342306fSKonstantin Komarov run->count -= 1;
1594342306fSKonstantin Komarov }
1604342306fSKonstantin Komarov }
1614342306fSKonstantin Komarov
162e8b8e97fSKari Argillander /*
163e8b8e97fSKari Argillander * run_is_mapped_full
164e8b8e97fSKari Argillander *
165e8b8e97fSKari Argillander * Return: True if range [svcn - evcn] is mapped.
166e8b8e97fSKari Argillander */
run_is_mapped_full(const struct runs_tree * run,CLST svcn,CLST evcn)1674342306fSKonstantin Komarov bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
1684342306fSKonstantin Komarov {
1694342306fSKonstantin Komarov size_t i;
1704342306fSKonstantin Komarov const struct ntfs_run *r, *end;
1714342306fSKonstantin Komarov CLST next_vcn;
1724342306fSKonstantin Komarov
1734342306fSKonstantin Komarov if (!run_lookup(run, svcn, &i))
1744342306fSKonstantin Komarov return false;
1754342306fSKonstantin Komarov
1764342306fSKonstantin Komarov end = run->runs + run->count;
1774342306fSKonstantin Komarov r = run->runs + i;
1784342306fSKonstantin Komarov
1794342306fSKonstantin Komarov for (;;) {
1804342306fSKonstantin Komarov next_vcn = r->vcn + r->len;
1814342306fSKonstantin Komarov if (next_vcn > evcn)
1824342306fSKonstantin Komarov return true;
1834342306fSKonstantin Komarov
1844342306fSKonstantin Komarov if (++r >= end)
1854342306fSKonstantin Komarov return false;
1864342306fSKonstantin Komarov
1874342306fSKonstantin Komarov if (r->vcn != next_vcn)
1884342306fSKonstantin Komarov return false;
1894342306fSKonstantin Komarov }
1904342306fSKonstantin Komarov }
1914342306fSKonstantin Komarov
run_lookup_entry(const struct runs_tree * run,CLST vcn,CLST * lcn,CLST * len,size_t * index)1924342306fSKonstantin Komarov bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
1934342306fSKonstantin Komarov CLST *len, size_t *index)
1944342306fSKonstantin Komarov {
1954342306fSKonstantin Komarov size_t idx;
1964342306fSKonstantin Komarov CLST gap;
1974342306fSKonstantin Komarov struct ntfs_run *r;
1984342306fSKonstantin Komarov
1994342306fSKonstantin Komarov /* Fail immediately if nrun was not touched yet. */
2004342306fSKonstantin Komarov if (!run->runs)
2014342306fSKonstantin Komarov return false;
2024342306fSKonstantin Komarov
2034342306fSKonstantin Komarov if (!run_lookup(run, vcn, &idx))
2044342306fSKonstantin Komarov return false;
2054342306fSKonstantin Komarov
2064342306fSKonstantin Komarov r = run->runs + idx;
2074342306fSKonstantin Komarov
2084342306fSKonstantin Komarov if (vcn >= r->vcn + r->len)
2094342306fSKonstantin Komarov return false;
2104342306fSKonstantin Komarov
2114342306fSKonstantin Komarov gap = vcn - r->vcn;
2124342306fSKonstantin Komarov if (r->len <= gap)
2134342306fSKonstantin Komarov return false;
2144342306fSKonstantin Komarov
2154342306fSKonstantin Komarov *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
2164342306fSKonstantin Komarov
2174342306fSKonstantin Komarov if (len)
2184342306fSKonstantin Komarov *len = r->len - gap;
2194342306fSKonstantin Komarov if (index)
2204342306fSKonstantin Komarov *index = idx;
2214342306fSKonstantin Komarov
2224342306fSKonstantin Komarov return true;
2234342306fSKonstantin Komarov }
2244342306fSKonstantin Komarov
2254342306fSKonstantin Komarov /*
226e8b8e97fSKari Argillander * run_truncate_head - Decommit the range before vcn.
2274342306fSKonstantin Komarov */
run_truncate_head(struct runs_tree * run,CLST vcn)2284342306fSKonstantin Komarov void run_truncate_head(struct runs_tree *run, CLST vcn)
2294342306fSKonstantin Komarov {
2304342306fSKonstantin Komarov size_t index;
2314342306fSKonstantin Komarov struct ntfs_run *r;
2324342306fSKonstantin Komarov
2334342306fSKonstantin Komarov if (run_lookup(run, vcn, &index)) {
2344342306fSKonstantin Komarov r = run->runs + index;
2354342306fSKonstantin Komarov
2364342306fSKonstantin Komarov if (vcn > r->vcn) {
2374342306fSKonstantin Komarov CLST dlen = vcn - r->vcn;
2384342306fSKonstantin Komarov
2394342306fSKonstantin Komarov r->vcn = vcn;
2404342306fSKonstantin Komarov r->len -= dlen;
2414342306fSKonstantin Komarov if (r->lcn != SPARSE_LCN)
2424342306fSKonstantin Komarov r->lcn += dlen;
2434342306fSKonstantin Komarov }
2444342306fSKonstantin Komarov
2454342306fSKonstantin Komarov if (!index)
2464342306fSKonstantin Komarov return;
2474342306fSKonstantin Komarov }
2484342306fSKonstantin Komarov r = run->runs;
2494342306fSKonstantin Komarov memmove(r, r + index, sizeof(*r) * (run->count - index));
2504342306fSKonstantin Komarov
2514342306fSKonstantin Komarov run->count -= index;
2524342306fSKonstantin Komarov
2534342306fSKonstantin Komarov if (!run->count) {
254195c52bdSKari Argillander kvfree(run->runs);
2554342306fSKonstantin Komarov run->runs = NULL;
2564342306fSKonstantin Komarov run->allocated = 0;
2574342306fSKonstantin Komarov }
2584342306fSKonstantin Komarov }
2594342306fSKonstantin Komarov
2604342306fSKonstantin Komarov /*
261e8b8e97fSKari Argillander * run_truncate - Decommit the range after vcn.
2624342306fSKonstantin Komarov */
run_truncate(struct runs_tree * run,CLST vcn)2634342306fSKonstantin Komarov void run_truncate(struct runs_tree *run, CLST vcn)
2644342306fSKonstantin Komarov {
2654342306fSKonstantin Komarov size_t index;
2664342306fSKonstantin Komarov
2674342306fSKonstantin Komarov /*
2684342306fSKonstantin Komarov * If I hit the range then
2694342306fSKonstantin Komarov * I have to truncate one.
2704342306fSKonstantin Komarov * If range to be truncated is becoming empty
2714342306fSKonstantin Komarov * then it will entirely be removed.
2724342306fSKonstantin Komarov */
2734342306fSKonstantin Komarov if (run_lookup(run, vcn, &index)) {
2744342306fSKonstantin Komarov struct ntfs_run *r = run->runs + index;
2754342306fSKonstantin Komarov
2764342306fSKonstantin Komarov r->len = vcn - r->vcn;
2774342306fSKonstantin Komarov
2784342306fSKonstantin Komarov if (r->len > 0)
2794342306fSKonstantin Komarov index += 1;
2804342306fSKonstantin Komarov }
2814342306fSKonstantin Komarov
2824342306fSKonstantin Komarov /*
283e8b8e97fSKari Argillander * At this point 'index' is set to position that
284e8b8e97fSKari Argillander * should be thrown away (including index itself)
2854342306fSKonstantin Komarov * Simple one - just set the limit.
2864342306fSKonstantin Komarov */
2874342306fSKonstantin Komarov run->count = index;
2884342306fSKonstantin Komarov
289e8b8e97fSKari Argillander /* Do not reallocate array 'runs'. Only free if possible. */
2904342306fSKonstantin Komarov if (!index) {
291195c52bdSKari Argillander kvfree(run->runs);
2924342306fSKonstantin Komarov run->runs = NULL;
2934342306fSKonstantin Komarov run->allocated = 0;
2944342306fSKonstantin Komarov }
2954342306fSKonstantin Komarov }
2964342306fSKonstantin Komarov
297e8b8e97fSKari Argillander /*
298e8b8e97fSKari Argillander * run_truncate_around - Trim head and tail if necessary.
299e8b8e97fSKari Argillander */
run_truncate_around(struct runs_tree * run,CLST vcn)3004342306fSKonstantin Komarov void run_truncate_around(struct runs_tree *run, CLST vcn)
3014342306fSKonstantin Komarov {
3024342306fSKonstantin Komarov run_truncate_head(run, vcn);
3034342306fSKonstantin Komarov
3044342306fSKonstantin Komarov if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
3054342306fSKonstantin Komarov run_truncate(run, (run->runs + (run->count >> 1))->vcn);
3064342306fSKonstantin Komarov }
3074342306fSKonstantin Komarov
3084342306fSKonstantin Komarov /*
3094342306fSKonstantin Komarov * run_add_entry
3104342306fSKonstantin Komarov *
311e8b8e97fSKari Argillander * Sets location to known state.
312e8b8e97fSKari Argillander * Run to be added may overlap with existing location.
313e8b8e97fSKari Argillander *
314e8b8e97fSKari Argillander * Return: false if of memory.
3154342306fSKonstantin Komarov */
run_add_entry(struct runs_tree * run,CLST vcn,CLST lcn,CLST len,bool is_mft)3164342306fSKonstantin Komarov bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
3174342306fSKonstantin Komarov bool is_mft)
3184342306fSKonstantin Komarov {
3194342306fSKonstantin Komarov size_t used, index;
3204342306fSKonstantin Komarov struct ntfs_run *r;
3214342306fSKonstantin Komarov bool inrange;
3224342306fSKonstantin Komarov CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
3234342306fSKonstantin Komarov bool should_add_tail = false;
3244342306fSKonstantin Komarov
3254342306fSKonstantin Komarov /*
3264342306fSKonstantin Komarov * Lookup the insertion point.
3274342306fSKonstantin Komarov *
3284342306fSKonstantin Komarov * Execute bsearch for the entry containing
3294342306fSKonstantin Komarov * start position question.
3304342306fSKonstantin Komarov */
3314342306fSKonstantin Komarov inrange = run_lookup(run, vcn, &index);
3324342306fSKonstantin Komarov
3334342306fSKonstantin Komarov /*
3344342306fSKonstantin Komarov * Shortcut here would be case of
3354342306fSKonstantin Komarov * range not been found but one been added
3364342306fSKonstantin Komarov * continues previous run.
337e8b8e97fSKari Argillander * This case I can directly make use of
3384342306fSKonstantin Komarov * existing range as my start point.
3394342306fSKonstantin Komarov */
3404342306fSKonstantin Komarov if (!inrange && index > 0) {
3414342306fSKonstantin Komarov struct ntfs_run *t = run->runs + index - 1;
3424342306fSKonstantin Komarov
3434342306fSKonstantin Komarov if (t->vcn + t->len == vcn &&
3444342306fSKonstantin Komarov (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
3454342306fSKonstantin Komarov (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
3464342306fSKonstantin Komarov inrange = true;
3474342306fSKonstantin Komarov index -= 1;
3484342306fSKonstantin Komarov }
3494342306fSKonstantin Komarov }
3504342306fSKonstantin Komarov
3514342306fSKonstantin Komarov /*
3524342306fSKonstantin Komarov * At this point 'index' either points to the range
3534342306fSKonstantin Komarov * containing start position or to the insertion position
3544342306fSKonstantin Komarov * for a new range.
3554342306fSKonstantin Komarov * So first let's check if range I'm probing is here already.
3564342306fSKonstantin Komarov */
3574342306fSKonstantin Komarov if (!inrange) {
3584342306fSKonstantin Komarov requires_new_range:
3594342306fSKonstantin Komarov /*
3604342306fSKonstantin Komarov * Range was not found.
3614342306fSKonstantin Komarov * Insert at position 'index'
3624342306fSKonstantin Komarov */
3634342306fSKonstantin Komarov used = run->count * sizeof(struct ntfs_run);
3644342306fSKonstantin Komarov
3654342306fSKonstantin Komarov /*
3664342306fSKonstantin Komarov * Check allocated space.
3674342306fSKonstantin Komarov * If one is not enough to get one more entry
368e8b8e97fSKari Argillander * then it will be reallocated.
3694342306fSKonstantin Komarov */
3704342306fSKonstantin Komarov if (run->allocated < used + sizeof(struct ntfs_run)) {
3714342306fSKonstantin Komarov size_t bytes;
3724342306fSKonstantin Komarov struct ntfs_run *new_ptr;
3734342306fSKonstantin Komarov
374e8b8e97fSKari Argillander /* Use power of 2 for 'bytes'. */
3754342306fSKonstantin Komarov if (!used) {
3764342306fSKonstantin Komarov bytes = 64;
3774342306fSKonstantin Komarov } else if (used <= 16 * PAGE_SIZE) {
378528c9b3dSKari Argillander if (is_power_of_2(run->allocated))
3794342306fSKonstantin Komarov bytes = run->allocated << 1;
3804342306fSKonstantin Komarov else
3814342306fSKonstantin Komarov bytes = (size_t)1
3824342306fSKonstantin Komarov << (2 + blksize_bits(used));
3834342306fSKonstantin Komarov } else {
3844342306fSKonstantin Komarov bytes = run->allocated + (16 * PAGE_SIZE);
3854342306fSKonstantin Komarov }
3864342306fSKonstantin Komarov
3874342306fSKonstantin Komarov WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
3884342306fSKonstantin Komarov
389195c52bdSKari Argillander new_ptr = kvmalloc(bytes, GFP_KERNEL);
3904342306fSKonstantin Komarov
3914342306fSKonstantin Komarov if (!new_ptr)
3924342306fSKonstantin Komarov return false;
3934342306fSKonstantin Komarov
3944342306fSKonstantin Komarov r = new_ptr + index;
3954342306fSKonstantin Komarov memcpy(new_ptr, run->runs,
3964342306fSKonstantin Komarov index * sizeof(struct ntfs_run));
3974342306fSKonstantin Komarov memcpy(r + 1, run->runs + index,
3984342306fSKonstantin Komarov sizeof(struct ntfs_run) * (run->count - index));
3994342306fSKonstantin Komarov
400195c52bdSKari Argillander kvfree(run->runs);
4014342306fSKonstantin Komarov run->runs = new_ptr;
4024342306fSKonstantin Komarov run->allocated = bytes;
4034342306fSKonstantin Komarov
4044342306fSKonstantin Komarov } else {
4054342306fSKonstantin Komarov size_t i = run->count - index;
4064342306fSKonstantin Komarov
4074342306fSKonstantin Komarov r = run->runs + index;
4084342306fSKonstantin Komarov
4094342306fSKonstantin Komarov /* memmove appears to be a bottle neck here... */
4104342306fSKonstantin Komarov if (i > 0)
4114342306fSKonstantin Komarov memmove(r + 1, r, sizeof(struct ntfs_run) * i);
4124342306fSKonstantin Komarov }
4134342306fSKonstantin Komarov
4144342306fSKonstantin Komarov r->vcn = vcn;
4154342306fSKonstantin Komarov r->lcn = lcn;
4164342306fSKonstantin Komarov r->len = len;
4174342306fSKonstantin Komarov run->count += 1;
4184342306fSKonstantin Komarov } else {
4194342306fSKonstantin Komarov r = run->runs + index;
4204342306fSKonstantin Komarov
4214342306fSKonstantin Komarov /*
422e8b8e97fSKari Argillander * If one of ranges was not allocated then we
423e8b8e97fSKari Argillander * have to split location we just matched and
424e8b8e97fSKari Argillander * insert current one.
425e8b8e97fSKari Argillander * A common case this requires tail to be reinserted
4264342306fSKonstantin Komarov * a recursive call.
4274342306fSKonstantin Komarov */
4284342306fSKonstantin Komarov if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
4294342306fSKonstantin Komarov (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
4304342306fSKonstantin Komarov CLST to_eat = vcn - r->vcn;
4314342306fSKonstantin Komarov CLST Tovcn = to_eat + len;
4324342306fSKonstantin Komarov
4334342306fSKonstantin Komarov should_add_tail = Tovcn < r->len;
4344342306fSKonstantin Komarov
4354342306fSKonstantin Komarov if (should_add_tail) {
43696de65a9SKonstantin Komarov tail_lcn = r->lcn == SPARSE_LCN ?
43796de65a9SKonstantin Komarov SPARSE_LCN :
43896de65a9SKonstantin Komarov (r->lcn + Tovcn);
4394342306fSKonstantin Komarov tail_vcn = r->vcn + Tovcn;
4404342306fSKonstantin Komarov tail_len = r->len - Tovcn;
4414342306fSKonstantin Komarov }
4424342306fSKonstantin Komarov
4434342306fSKonstantin Komarov if (to_eat > 0) {
4444342306fSKonstantin Komarov r->len = to_eat;
4454342306fSKonstantin Komarov inrange = false;
4464342306fSKonstantin Komarov index += 1;
4474342306fSKonstantin Komarov goto requires_new_range;
4484342306fSKonstantin Komarov }
4494342306fSKonstantin Komarov
450e8b8e97fSKari Argillander /* lcn should match one were going to add. */
4514342306fSKonstantin Komarov r->lcn = lcn;
4524342306fSKonstantin Komarov }
4534342306fSKonstantin Komarov
4544342306fSKonstantin Komarov /*
455e8b8e97fSKari Argillander * If existing range fits then were done.
4564342306fSKonstantin Komarov * Otherwise extend found one and fall back to range jocode.
4574342306fSKonstantin Komarov */
4584342306fSKonstantin Komarov if (r->vcn + r->len < vcn + len)
4594342306fSKonstantin Komarov r->len += len - ((r->vcn + r->len) - vcn);
4604342306fSKonstantin Komarov }
4614342306fSKonstantin Komarov
4624342306fSKonstantin Komarov /*
4634342306fSKonstantin Komarov * And normalize it starting from insertion point.
4644342306fSKonstantin Komarov * It's possible that no insertion needed case if
4654342306fSKonstantin Komarov * start point lies within the range of an entry
4664342306fSKonstantin Komarov * that 'index' points to.
4674342306fSKonstantin Komarov */
4684342306fSKonstantin Komarov if (inrange && index > 0)
4694342306fSKonstantin Komarov index -= 1;
4704342306fSKonstantin Komarov run_consolidate(run, index);
4714342306fSKonstantin Komarov run_consolidate(run, index + 1);
4724342306fSKonstantin Komarov
4734342306fSKonstantin Komarov /*
474e8b8e97fSKari Argillander * A special case.
475e8b8e97fSKari Argillander * We have to add extra range a tail.
4764342306fSKonstantin Komarov */
4774342306fSKonstantin Komarov if (should_add_tail &&
4784342306fSKonstantin Komarov !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
4794342306fSKonstantin Komarov return false;
4804342306fSKonstantin Komarov
4814342306fSKonstantin Komarov return true;
4824342306fSKonstantin Komarov }
4834342306fSKonstantin Komarov
484e8b8e97fSKari Argillander /* run_collapse_range
485e8b8e97fSKari Argillander *
486e8b8e97fSKari Argillander * Helper for attr_collapse_range(),
487e8b8e97fSKari Argillander * which is helper for fallocate(collapse_range).
488e8b8e97fSKari Argillander */
run_collapse_range(struct runs_tree * run,CLST vcn,CLST len)4894342306fSKonstantin Komarov bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
4904342306fSKonstantin Komarov {
4914342306fSKonstantin Komarov size_t index, eat;
4924342306fSKonstantin Komarov struct ntfs_run *r, *e, *eat_start, *eat_end;
4934342306fSKonstantin Komarov CLST end;
4944342306fSKonstantin Komarov
4954342306fSKonstantin Komarov if (WARN_ON(!run_lookup(run, vcn, &index)))
496e8b8e97fSKari Argillander return true; /* Should never be here. */
4974342306fSKonstantin Komarov
4984342306fSKonstantin Komarov e = run->runs + run->count;
4994342306fSKonstantin Komarov r = run->runs + index;
5004342306fSKonstantin Komarov end = vcn + len;
5014342306fSKonstantin Komarov
5024342306fSKonstantin Komarov if (vcn > r->vcn) {
5034342306fSKonstantin Komarov if (r->vcn + r->len <= end) {
504e8b8e97fSKari Argillander /* Collapse tail of run .*/
5054342306fSKonstantin Komarov r->len = vcn - r->vcn;
5064342306fSKonstantin Komarov } else if (r->lcn == SPARSE_LCN) {
507e8b8e97fSKari Argillander /* Collapse a middle part of sparsed run. */
5084342306fSKonstantin Komarov r->len -= len;
5094342306fSKonstantin Komarov } else {
510e8b8e97fSKari Argillander /* Collapse a middle part of normal run, split. */
5114342306fSKonstantin Komarov if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
5124342306fSKonstantin Komarov return false;
5134342306fSKonstantin Komarov return run_collapse_range(run, vcn, len);
5144342306fSKonstantin Komarov }
5154342306fSKonstantin Komarov
5164342306fSKonstantin Komarov r += 1;
5174342306fSKonstantin Komarov }
5184342306fSKonstantin Komarov
5194342306fSKonstantin Komarov eat_start = r;
5204342306fSKonstantin Komarov eat_end = r;
5214342306fSKonstantin Komarov
5224342306fSKonstantin Komarov for (; r < e; r++) {
5234342306fSKonstantin Komarov CLST d;
5244342306fSKonstantin Komarov
5254342306fSKonstantin Komarov if (r->vcn >= end) {
5264342306fSKonstantin Komarov r->vcn -= len;
5274342306fSKonstantin Komarov continue;
5284342306fSKonstantin Komarov }
5294342306fSKonstantin Komarov
5304342306fSKonstantin Komarov if (r->vcn + r->len <= end) {
531e8b8e97fSKari Argillander /* Eat this run. */
5324342306fSKonstantin Komarov eat_end = r + 1;
5334342306fSKonstantin Komarov continue;
5344342306fSKonstantin Komarov }
5354342306fSKonstantin Komarov
5364342306fSKonstantin Komarov d = end - r->vcn;
5374342306fSKonstantin Komarov if (r->lcn != SPARSE_LCN)
5384342306fSKonstantin Komarov r->lcn += d;
5394342306fSKonstantin Komarov r->len -= d;
5404342306fSKonstantin Komarov r->vcn -= len - d;
5414342306fSKonstantin Komarov }
5424342306fSKonstantin Komarov
5434342306fSKonstantin Komarov eat = eat_end - eat_start;
5444342306fSKonstantin Komarov memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
5454342306fSKonstantin Komarov run->count -= eat;
5464342306fSKonstantin Komarov
5474342306fSKonstantin Komarov return true;
5484342306fSKonstantin Komarov }
5494342306fSKonstantin Komarov
550aa30eccbSKonstantin Komarov /* run_insert_range
551aa30eccbSKonstantin Komarov *
552aa30eccbSKonstantin Komarov * Helper for attr_insert_range(),
553aa30eccbSKonstantin Komarov * which is helper for fallocate(insert_range).
554aa30eccbSKonstantin Komarov */
run_insert_range(struct runs_tree * run,CLST vcn,CLST len)555aa30eccbSKonstantin Komarov bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
556aa30eccbSKonstantin Komarov {
557aa30eccbSKonstantin Komarov size_t index;
558aa30eccbSKonstantin Komarov struct ntfs_run *r, *e;
559aa30eccbSKonstantin Komarov
560aa30eccbSKonstantin Komarov if (WARN_ON(!run_lookup(run, vcn, &index)))
561aa30eccbSKonstantin Komarov return false; /* Should never be here. */
562aa30eccbSKonstantin Komarov
563aa30eccbSKonstantin Komarov e = run->runs + run->count;
564aa30eccbSKonstantin Komarov r = run->runs + index;
565aa30eccbSKonstantin Komarov
566aa30eccbSKonstantin Komarov if (vcn > r->vcn)
567aa30eccbSKonstantin Komarov r += 1;
568aa30eccbSKonstantin Komarov
569aa30eccbSKonstantin Komarov for (; r < e; r++)
570aa30eccbSKonstantin Komarov r->vcn += len;
571aa30eccbSKonstantin Komarov
572aa30eccbSKonstantin Komarov r = run->runs + index;
573aa30eccbSKonstantin Komarov
574aa30eccbSKonstantin Komarov if (vcn > r->vcn) {
575aa30eccbSKonstantin Komarov /* split fragment. */
576aa30eccbSKonstantin Komarov CLST len1 = vcn - r->vcn;
577aa30eccbSKonstantin Komarov CLST len2 = r->len - len1;
578aa30eccbSKonstantin Komarov CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
579aa30eccbSKonstantin Komarov
580aa30eccbSKonstantin Komarov r->len = len1;
581aa30eccbSKonstantin Komarov
582aa30eccbSKonstantin Komarov if (!run_add_entry(run, vcn + len, lcn2, len2, false))
583aa30eccbSKonstantin Komarov return false;
584aa30eccbSKonstantin Komarov }
585aa30eccbSKonstantin Komarov
586aa30eccbSKonstantin Komarov if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
587aa30eccbSKonstantin Komarov return false;
588aa30eccbSKonstantin Komarov
589aa30eccbSKonstantin Komarov return true;
590aa30eccbSKonstantin Komarov }
591aa30eccbSKonstantin Komarov
5924342306fSKonstantin Komarov /*
593e8b8e97fSKari Argillander * run_get_entry - Return index-th mapped region.
5944342306fSKonstantin Komarov */
run_get_entry(const struct runs_tree * run,size_t index,CLST * vcn,CLST * lcn,CLST * len)5954342306fSKonstantin Komarov bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
5964342306fSKonstantin Komarov CLST *lcn, CLST *len)
5974342306fSKonstantin Komarov {
5984342306fSKonstantin Komarov const struct ntfs_run *r;
5994342306fSKonstantin Komarov
6004342306fSKonstantin Komarov if (index >= run->count)
6014342306fSKonstantin Komarov return false;
6024342306fSKonstantin Komarov
6034342306fSKonstantin Komarov r = run->runs + index;
6044342306fSKonstantin Komarov
6054342306fSKonstantin Komarov if (!r->len)
6064342306fSKonstantin Komarov return false;
6074342306fSKonstantin Komarov
6084342306fSKonstantin Komarov if (vcn)
6094342306fSKonstantin Komarov *vcn = r->vcn;
6104342306fSKonstantin Komarov if (lcn)
6114342306fSKonstantin Komarov *lcn = r->lcn;
6124342306fSKonstantin Komarov if (len)
6134342306fSKonstantin Komarov *len = r->len;
6144342306fSKonstantin Komarov return true;
6154342306fSKonstantin Komarov }
6164342306fSKonstantin Komarov
6174342306fSKonstantin Komarov /*
618e8b8e97fSKari Argillander * run_packed_size - Calculate the size of packed int64.
6194342306fSKonstantin Komarov */
6204342306fSKonstantin Komarov #ifdef __BIG_ENDIAN
run_packed_size(const s64 n)6214342306fSKonstantin Komarov static inline int run_packed_size(const s64 n)
6224342306fSKonstantin Komarov {
6234342306fSKonstantin Komarov const u8 *p = (const u8 *)&n + sizeof(n) - 1;
6244342306fSKonstantin Komarov
6254342306fSKonstantin Komarov if (n >= 0) {
6264342306fSKonstantin Komarov if (p[-7] || p[-6] || p[-5] || p[-4])
6274342306fSKonstantin Komarov p -= 4;
6284342306fSKonstantin Komarov if (p[-3] || p[-2])
6294342306fSKonstantin Komarov p -= 2;
6304342306fSKonstantin Komarov if (p[-1])
6314342306fSKonstantin Komarov p -= 1;
6324342306fSKonstantin Komarov if (p[0] & 0x80)
6334342306fSKonstantin Komarov p -= 1;
6344342306fSKonstantin Komarov } else {
6354342306fSKonstantin Komarov if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
6364342306fSKonstantin Komarov p[-4] != 0xff)
6374342306fSKonstantin Komarov p -= 4;
6384342306fSKonstantin Komarov if (p[-3] != 0xff || p[-2] != 0xff)
6394342306fSKonstantin Komarov p -= 2;
6404342306fSKonstantin Komarov if (p[-1] != 0xff)
6414342306fSKonstantin Komarov p -= 1;
6424342306fSKonstantin Komarov if (!(p[0] & 0x80))
6434342306fSKonstantin Komarov p -= 1;
6444342306fSKonstantin Komarov }
6454342306fSKonstantin Komarov return (const u8 *)&n + sizeof(n) - p;
6464342306fSKonstantin Komarov }
6474342306fSKonstantin Komarov
648e8b8e97fSKari Argillander /* Full trusted function. It does not check 'size' for errors. */
run_pack_s64(u8 * run_buf,u8 size,s64 v)6494342306fSKonstantin Komarov static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
6504342306fSKonstantin Komarov {
6514342306fSKonstantin Komarov const u8 *p = (u8 *)&v;
6524342306fSKonstantin Komarov
6534342306fSKonstantin Komarov switch (size) {
6544342306fSKonstantin Komarov case 8:
6554342306fSKonstantin Komarov run_buf[7] = p[0];
6564342306fSKonstantin Komarov fallthrough;
6574342306fSKonstantin Komarov case 7:
6584342306fSKonstantin Komarov run_buf[6] = p[1];
6594342306fSKonstantin Komarov fallthrough;
6604342306fSKonstantin Komarov case 6:
6614342306fSKonstantin Komarov run_buf[5] = p[2];
6624342306fSKonstantin Komarov fallthrough;
6634342306fSKonstantin Komarov case 5:
6644342306fSKonstantin Komarov run_buf[4] = p[3];
6654342306fSKonstantin Komarov fallthrough;
6664342306fSKonstantin Komarov case 4:
6674342306fSKonstantin Komarov run_buf[3] = p[4];
6684342306fSKonstantin Komarov fallthrough;
6694342306fSKonstantin Komarov case 3:
6704342306fSKonstantin Komarov run_buf[2] = p[5];
6714342306fSKonstantin Komarov fallthrough;
6724342306fSKonstantin Komarov case 2:
6734342306fSKonstantin Komarov run_buf[1] = p[6];
6744342306fSKonstantin Komarov fallthrough;
6754342306fSKonstantin Komarov case 1:
6764342306fSKonstantin Komarov run_buf[0] = p[7];
6774342306fSKonstantin Komarov }
6784342306fSKonstantin Komarov }
6794342306fSKonstantin Komarov
680e8b8e97fSKari Argillander /* Full trusted function. It does not check 'size' for errors. */
run_unpack_s64(const u8 * run_buf,u8 size,s64 v)6814342306fSKonstantin Komarov static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
6824342306fSKonstantin Komarov {
6834342306fSKonstantin Komarov u8 *p = (u8 *)&v;
6844342306fSKonstantin Komarov
6854342306fSKonstantin Komarov switch (size) {
6864342306fSKonstantin Komarov case 8:
6874342306fSKonstantin Komarov p[0] = run_buf[7];
6884342306fSKonstantin Komarov fallthrough;
6894342306fSKonstantin Komarov case 7:
6904342306fSKonstantin Komarov p[1] = run_buf[6];
6914342306fSKonstantin Komarov fallthrough;
6924342306fSKonstantin Komarov case 6:
6934342306fSKonstantin Komarov p[2] = run_buf[5];
6944342306fSKonstantin Komarov fallthrough;
6954342306fSKonstantin Komarov case 5:
6964342306fSKonstantin Komarov p[3] = run_buf[4];
6974342306fSKonstantin Komarov fallthrough;
6984342306fSKonstantin Komarov case 4:
6994342306fSKonstantin Komarov p[4] = run_buf[3];
7004342306fSKonstantin Komarov fallthrough;
7014342306fSKonstantin Komarov case 3:
7024342306fSKonstantin Komarov p[5] = run_buf[2];
7034342306fSKonstantin Komarov fallthrough;
7044342306fSKonstantin Komarov case 2:
7054342306fSKonstantin Komarov p[6] = run_buf[1];
7064342306fSKonstantin Komarov fallthrough;
7074342306fSKonstantin Komarov case 1:
7084342306fSKonstantin Komarov p[7] = run_buf[0];
7094342306fSKonstantin Komarov }
7104342306fSKonstantin Komarov return v;
7114342306fSKonstantin Komarov }
7124342306fSKonstantin Komarov
7134342306fSKonstantin Komarov #else
7144342306fSKonstantin Komarov
run_packed_size(const s64 n)7154342306fSKonstantin Komarov static inline int run_packed_size(const s64 n)
7164342306fSKonstantin Komarov {
7174342306fSKonstantin Komarov const u8 *p = (const u8 *)&n;
7184342306fSKonstantin Komarov
7194342306fSKonstantin Komarov if (n >= 0) {
7204342306fSKonstantin Komarov if (p[7] || p[6] || p[5] || p[4])
7214342306fSKonstantin Komarov p += 4;
7224342306fSKonstantin Komarov if (p[3] || p[2])
7234342306fSKonstantin Komarov p += 2;
7244342306fSKonstantin Komarov if (p[1])
7254342306fSKonstantin Komarov p += 1;
7264342306fSKonstantin Komarov if (p[0] & 0x80)
7274342306fSKonstantin Komarov p += 1;
7284342306fSKonstantin Komarov } else {
7294342306fSKonstantin Komarov if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
7304342306fSKonstantin Komarov p[4] != 0xff)
7314342306fSKonstantin Komarov p += 4;
7324342306fSKonstantin Komarov if (p[3] != 0xff || p[2] != 0xff)
7334342306fSKonstantin Komarov p += 2;
7344342306fSKonstantin Komarov if (p[1] != 0xff)
7354342306fSKonstantin Komarov p += 1;
7364342306fSKonstantin Komarov if (!(p[0] & 0x80))
7374342306fSKonstantin Komarov p += 1;
7384342306fSKonstantin Komarov }
7394342306fSKonstantin Komarov
7404342306fSKonstantin Komarov return 1 + p - (const u8 *)&n;
7414342306fSKonstantin Komarov }
7424342306fSKonstantin Komarov
743e8b8e97fSKari Argillander /* Full trusted function. It does not check 'size' for errors. */
run_pack_s64(u8 * run_buf,u8 size,s64 v)7444342306fSKonstantin Komarov static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
7454342306fSKonstantin Komarov {
7464342306fSKonstantin Komarov const u8 *p = (u8 *)&v;
7474342306fSKonstantin Komarov
748e8b8e97fSKari Argillander /* memcpy( run_buf, &v, size); Is it faster? */
7494342306fSKonstantin Komarov switch (size) {
7504342306fSKonstantin Komarov case 8:
7514342306fSKonstantin Komarov run_buf[7] = p[7];
7524342306fSKonstantin Komarov fallthrough;
7534342306fSKonstantin Komarov case 7:
7544342306fSKonstantin Komarov run_buf[6] = p[6];
7554342306fSKonstantin Komarov fallthrough;
7564342306fSKonstantin Komarov case 6:
7574342306fSKonstantin Komarov run_buf[5] = p[5];
7584342306fSKonstantin Komarov fallthrough;
7594342306fSKonstantin Komarov case 5:
7604342306fSKonstantin Komarov run_buf[4] = p[4];
7614342306fSKonstantin Komarov fallthrough;
7624342306fSKonstantin Komarov case 4:
7634342306fSKonstantin Komarov run_buf[3] = p[3];
7644342306fSKonstantin Komarov fallthrough;
7654342306fSKonstantin Komarov case 3:
7664342306fSKonstantin Komarov run_buf[2] = p[2];
7674342306fSKonstantin Komarov fallthrough;
7684342306fSKonstantin Komarov case 2:
7694342306fSKonstantin Komarov run_buf[1] = p[1];
7704342306fSKonstantin Komarov fallthrough;
7714342306fSKonstantin Komarov case 1:
7724342306fSKonstantin Komarov run_buf[0] = p[0];
7734342306fSKonstantin Komarov }
7744342306fSKonstantin Komarov }
7754342306fSKonstantin Komarov
7764342306fSKonstantin Komarov /* full trusted function. It does not check 'size' for errors */
run_unpack_s64(const u8 * run_buf,u8 size,s64 v)7774342306fSKonstantin Komarov static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
7784342306fSKonstantin Komarov {
7794342306fSKonstantin Komarov u8 *p = (u8 *)&v;
7804342306fSKonstantin Komarov
781e8b8e97fSKari Argillander /* memcpy( &v, run_buf, size); Is it faster? */
7824342306fSKonstantin Komarov switch (size) {
7834342306fSKonstantin Komarov case 8:
7844342306fSKonstantin Komarov p[7] = run_buf[7];
7854342306fSKonstantin Komarov fallthrough;
7864342306fSKonstantin Komarov case 7:
7874342306fSKonstantin Komarov p[6] = run_buf[6];
7884342306fSKonstantin Komarov fallthrough;
7894342306fSKonstantin Komarov case 6:
7904342306fSKonstantin Komarov p[5] = run_buf[5];
7914342306fSKonstantin Komarov fallthrough;
7924342306fSKonstantin Komarov case 5:
7934342306fSKonstantin Komarov p[4] = run_buf[4];
7944342306fSKonstantin Komarov fallthrough;
7954342306fSKonstantin Komarov case 4:
7964342306fSKonstantin Komarov p[3] = run_buf[3];
7974342306fSKonstantin Komarov fallthrough;
7984342306fSKonstantin Komarov case 3:
7994342306fSKonstantin Komarov p[2] = run_buf[2];
8004342306fSKonstantin Komarov fallthrough;
8014342306fSKonstantin Komarov case 2:
8024342306fSKonstantin Komarov p[1] = run_buf[1];
8034342306fSKonstantin Komarov fallthrough;
8044342306fSKonstantin Komarov case 1:
8054342306fSKonstantin Komarov p[0] = run_buf[0];
8064342306fSKonstantin Komarov }
8074342306fSKonstantin Komarov return v;
8084342306fSKonstantin Komarov }
8094342306fSKonstantin Komarov #endif
8104342306fSKonstantin Komarov
8114342306fSKonstantin Komarov /*
812e8b8e97fSKari Argillander * run_pack - Pack runs into buffer.
8134342306fSKonstantin Komarov *
814e8b8e97fSKari Argillander * packed_vcns - How much runs we have packed.
815e8b8e97fSKari Argillander * packed_size - How much bytes we have used run_buf.
8164342306fSKonstantin Komarov */
run_pack(const struct runs_tree * run,CLST svcn,CLST len,u8 * run_buf,u32 run_buf_size,CLST * packed_vcns)8174342306fSKonstantin Komarov int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
8184342306fSKonstantin Komarov u32 run_buf_size, CLST *packed_vcns)
8194342306fSKonstantin Komarov {
8204342306fSKonstantin Komarov CLST next_vcn, vcn, lcn;
8214342306fSKonstantin Komarov CLST prev_lcn = 0;
8224342306fSKonstantin Komarov CLST evcn1 = svcn + len;
823e6d9398cSKonstantin Komarov const struct ntfs_run *r, *r_end;
8244342306fSKonstantin Komarov int packed_size = 0;
8254342306fSKonstantin Komarov size_t i;
8264342306fSKonstantin Komarov s64 dlcn;
8274342306fSKonstantin Komarov int offset_size, size_size, tmp;
8284342306fSKonstantin Komarov
8294342306fSKonstantin Komarov *packed_vcns = 0;
8304342306fSKonstantin Komarov
8314342306fSKonstantin Komarov if (!len)
8324342306fSKonstantin Komarov goto out;
8334342306fSKonstantin Komarov
834e6d9398cSKonstantin Komarov /* Check all required entries [svcn, encv1) available. */
835e6d9398cSKonstantin Komarov if (!run_lookup(run, svcn, &i))
836e6d9398cSKonstantin Komarov return -ENOENT;
8374342306fSKonstantin Komarov
838e6d9398cSKonstantin Komarov r_end = run->runs + run->count;
839e6d9398cSKonstantin Komarov r = run->runs + i;
8404342306fSKonstantin Komarov
841e6d9398cSKonstantin Komarov for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
842e6d9398cSKonstantin Komarov next_vcn = r->vcn + r->len) {
843e6d9398cSKonstantin Komarov if (++r >= r_end || r->vcn != next_vcn)
844e6d9398cSKonstantin Komarov return -ENOENT;
845e6d9398cSKonstantin Komarov }
846e6d9398cSKonstantin Komarov
847e6d9398cSKonstantin Komarov /* Repeat cycle above and pack runs. Assume no errors. */
848e6d9398cSKonstantin Komarov r = run->runs + i;
849e6d9398cSKonstantin Komarov len = svcn - r->vcn;
850e6d9398cSKonstantin Komarov vcn = svcn;
851e6d9398cSKonstantin Komarov lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
852e6d9398cSKonstantin Komarov len = r->len - len;
8534342306fSKonstantin Komarov
8544342306fSKonstantin Komarov for (;;) {
8554342306fSKonstantin Komarov next_vcn = vcn + len;
8564342306fSKonstantin Komarov if (next_vcn > evcn1)
8574342306fSKonstantin Komarov len = evcn1 - vcn;
8584342306fSKonstantin Komarov
859e8b8e97fSKari Argillander /* How much bytes required to pack len. */
8604342306fSKonstantin Komarov size_size = run_packed_size(len);
8614342306fSKonstantin Komarov
862e8b8e97fSKari Argillander /* offset_size - How much bytes is packed dlcn. */
8634342306fSKonstantin Komarov if (lcn == SPARSE_LCN) {
8644342306fSKonstantin Komarov offset_size = 0;
8654342306fSKonstantin Komarov dlcn = 0;
8664342306fSKonstantin Komarov } else {
8674342306fSKonstantin Komarov /* NOTE: lcn can be less than prev_lcn! */
8684342306fSKonstantin Komarov dlcn = (s64)lcn - prev_lcn;
8694342306fSKonstantin Komarov offset_size = run_packed_size(dlcn);
8704342306fSKonstantin Komarov prev_lcn = lcn;
8714342306fSKonstantin Komarov }
8724342306fSKonstantin Komarov
8734342306fSKonstantin Komarov tmp = run_buf_size - packed_size - 2 - offset_size;
8744342306fSKonstantin Komarov if (tmp <= 0)
8754342306fSKonstantin Komarov goto out;
8764342306fSKonstantin Komarov
877e8b8e97fSKari Argillander /* Can we store this entire run. */
8784342306fSKonstantin Komarov if (tmp < size_size)
8794342306fSKonstantin Komarov goto out;
8804342306fSKonstantin Komarov
8814342306fSKonstantin Komarov if (run_buf) {
882e8b8e97fSKari Argillander /* Pack run header. */
8834342306fSKonstantin Komarov run_buf[0] = ((u8)(size_size | (offset_size << 4)));
8844342306fSKonstantin Komarov run_buf += 1;
8854342306fSKonstantin Komarov
886e8b8e97fSKari Argillander /* Pack the length of run. */
8874342306fSKonstantin Komarov run_pack_s64(run_buf, size_size, len);
8884342306fSKonstantin Komarov
8894342306fSKonstantin Komarov run_buf += size_size;
890e8b8e97fSKari Argillander /* Pack the offset from previous LCN. */
8914342306fSKonstantin Komarov run_pack_s64(run_buf, offset_size, dlcn);
8924342306fSKonstantin Komarov run_buf += offset_size;
8934342306fSKonstantin Komarov }
8944342306fSKonstantin Komarov
8954342306fSKonstantin Komarov packed_size += 1 + offset_size + size_size;
8964342306fSKonstantin Komarov *packed_vcns += len;
8974342306fSKonstantin Komarov
8984342306fSKonstantin Komarov if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
8994342306fSKonstantin Komarov goto out;
9004342306fSKonstantin Komarov
901e6d9398cSKonstantin Komarov r += 1;
902e6d9398cSKonstantin Komarov vcn = r->vcn;
903e6d9398cSKonstantin Komarov lcn = r->lcn;
904e6d9398cSKonstantin Komarov len = r->len;
9054342306fSKonstantin Komarov }
9064342306fSKonstantin Komarov
9074342306fSKonstantin Komarov out:
908e8b8e97fSKari Argillander /* Store last zero. */
9094342306fSKonstantin Komarov if (run_buf)
9104342306fSKonstantin Komarov run_buf[0] = 0;
9114342306fSKonstantin Komarov
9124342306fSKonstantin Komarov return packed_size + 1;
9134342306fSKonstantin Komarov }
9144342306fSKonstantin Komarov
9154342306fSKonstantin Komarov /*
916e8b8e97fSKari Argillander * run_unpack - Unpack packed runs from @run_buf.
9174342306fSKonstantin Komarov *
918e8b8e97fSKari Argillander * Return: Error if negative, or real used bytes.
9194342306fSKonstantin Komarov */
run_unpack(struct runs_tree * run,struct ntfs_sb_info * sbi,CLST ino,CLST svcn,CLST evcn,CLST vcn,const u8 * run_buf,int run_buf_size)9204342306fSKonstantin Komarov int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
9214342306fSKonstantin Komarov CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
9220e8235d2SKonstantin Komarov int run_buf_size)
9234342306fSKonstantin Komarov {
9244342306fSKonstantin Komarov u64 prev_lcn, vcn64, lcn, next_vcn;
9254342306fSKonstantin Komarov const u8 *run_last, *run_0;
9264342306fSKonstantin Komarov bool is_mft = ino == MFT_REC_MFT;
9274342306fSKonstantin Komarov
9280e8235d2SKonstantin Komarov if (run_buf_size < 0)
9290e8235d2SKonstantin Komarov return -EINVAL;
9300e8235d2SKonstantin Komarov
931e8b8e97fSKari Argillander /* Check for empty. */
9324342306fSKonstantin Komarov if (evcn + 1 == svcn)
9334342306fSKonstantin Komarov return 0;
9344342306fSKonstantin Komarov
9354342306fSKonstantin Komarov if (evcn < svcn)
9364342306fSKonstantin Komarov return -EINVAL;
9374342306fSKonstantin Komarov
9384342306fSKonstantin Komarov run_0 = run_buf;
9394342306fSKonstantin Komarov run_last = run_buf + run_buf_size;
9404342306fSKonstantin Komarov prev_lcn = 0;
9414342306fSKonstantin Komarov vcn64 = svcn;
9424342306fSKonstantin Komarov
943e8b8e97fSKari Argillander /* Read all runs the chain. */
944e8b8e97fSKari Argillander /* size_size - How much bytes is packed len. */
9454342306fSKonstantin Komarov while (run_buf < run_last) {
946e8b8e97fSKari Argillander /* size_size - How much bytes is packed len. */
9474342306fSKonstantin Komarov u8 size_size = *run_buf & 0xF;
948e8b8e97fSKari Argillander /* offset_size - How much bytes is packed dlcn. */
9494342306fSKonstantin Komarov u8 offset_size = *run_buf++ >> 4;
9504342306fSKonstantin Komarov u64 len;
9514342306fSKonstantin Komarov
9524342306fSKonstantin Komarov if (!size_size)
9534342306fSKonstantin Komarov break;
9544342306fSKonstantin Komarov
9554342306fSKonstantin Komarov /*
9564342306fSKonstantin Komarov * Unpack runs.
957e8b8e97fSKari Argillander * NOTE: Runs are stored little endian order
958e8b8e97fSKari Argillander * "len" is unsigned value, "dlcn" is signed.
9594342306fSKonstantin Komarov * Large positive number requires to store 5 bytes
9604342306fSKonstantin Komarov * e.g.: 05 FF 7E FF FF 00 00 00
9614342306fSKonstantin Komarov */
9624342306fSKonstantin Komarov if (size_size > 8)
9634342306fSKonstantin Komarov return -EINVAL;
9644342306fSKonstantin Komarov
9654342306fSKonstantin Komarov len = run_unpack_s64(run_buf, size_size, 0);
966e8b8e97fSKari Argillander /* Skip size_size. */
9674342306fSKonstantin Komarov run_buf += size_size;
9684342306fSKonstantin Komarov
9694342306fSKonstantin Komarov if (!len)
9704342306fSKonstantin Komarov return -EINVAL;
9714342306fSKonstantin Komarov
9724342306fSKonstantin Komarov if (!offset_size)
9734342306fSKonstantin Komarov lcn = SPARSE_LCN64;
9744342306fSKonstantin Komarov else if (offset_size <= 8) {
9754342306fSKonstantin Komarov s64 dlcn;
9764342306fSKonstantin Komarov
977e8b8e97fSKari Argillander /* Initial value of dlcn is -1 or 0. */
9784342306fSKonstantin Komarov dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
9794342306fSKonstantin Komarov dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
980e8b8e97fSKari Argillander /* Skip offset_size. */
9814342306fSKonstantin Komarov run_buf += offset_size;
9824342306fSKonstantin Komarov
9834342306fSKonstantin Komarov if (!dlcn)
9844342306fSKonstantin Komarov return -EINVAL;
9854342306fSKonstantin Komarov lcn = prev_lcn + dlcn;
9864342306fSKonstantin Komarov prev_lcn = lcn;
9874342306fSKonstantin Komarov } else
9884342306fSKonstantin Komarov return -EINVAL;
9894342306fSKonstantin Komarov
9904342306fSKonstantin Komarov next_vcn = vcn64 + len;
991e8b8e97fSKari Argillander /* Check boundary. */
9924342306fSKonstantin Komarov if (next_vcn > evcn + 1)
9934342306fSKonstantin Komarov return -EINVAL;
9944342306fSKonstantin Komarov
9954342306fSKonstantin Komarov #ifndef CONFIG_NTFS3_64BIT_CLUSTER
9964342306fSKonstantin Komarov if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
9974342306fSKonstantin Komarov ntfs_err(
9984342306fSKonstantin Komarov sbi->sb,
999f8d87ed9SColin Ian King "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
10004342306fSKonstantin Komarov "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
10014342306fSKonstantin Komarov "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
10024342306fSKonstantin Komarov vcn64, lcn, len);
10034342306fSKonstantin Komarov return -EOPNOTSUPP;
10044342306fSKonstantin Komarov }
10054342306fSKonstantin Komarov #endif
10064342306fSKonstantin Komarov if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
1007e8b8e97fSKari Argillander /* LCN range is out of volume. */
10084342306fSKonstantin Komarov return -EINVAL;
10094342306fSKonstantin Komarov }
10104342306fSKonstantin Komarov
10114342306fSKonstantin Komarov if (!run)
1012e8b8e97fSKari Argillander ; /* Called from check_attr(fslog.c) to check run. */
10134342306fSKonstantin Komarov else if (run == RUN_DEALLOCATE) {
1014e8b8e97fSKari Argillander /*
1015e8b8e97fSKari Argillander * Called from ni_delete_all to free clusters
1016e8b8e97fSKari Argillander * without storing in run.
1017e8b8e97fSKari Argillander */
10184342306fSKonstantin Komarov if (lcn != SPARSE_LCN64)
10194342306fSKonstantin Komarov mark_as_free_ex(sbi, lcn, len, true);
10204342306fSKonstantin Komarov } else if (vcn64 >= vcn) {
10214342306fSKonstantin Komarov if (!run_add_entry(run, vcn64, lcn, len, is_mft))
10224342306fSKonstantin Komarov return -ENOMEM;
10234342306fSKonstantin Komarov } else if (next_vcn > vcn) {
10244342306fSKonstantin Komarov u64 dlen = vcn - vcn64;
10254342306fSKonstantin Komarov
10264342306fSKonstantin Komarov if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
10274342306fSKonstantin Komarov is_mft))
10284342306fSKonstantin Komarov return -ENOMEM;
10294342306fSKonstantin Komarov }
10304342306fSKonstantin Komarov
10314342306fSKonstantin Komarov vcn64 = next_vcn;
10324342306fSKonstantin Komarov }
10334342306fSKonstantin Komarov
10344342306fSKonstantin Komarov if (vcn64 != evcn + 1) {
1035e8b8e97fSKari Argillander /* Not expected length of unpacked runs. */
10364342306fSKonstantin Komarov return -EINVAL;
10374342306fSKonstantin Komarov }
10384342306fSKonstantin Komarov
10394342306fSKonstantin Komarov return run_buf - run_0;
10404342306fSKonstantin Komarov }
10414342306fSKonstantin Komarov
10424342306fSKonstantin Komarov #ifdef NTFS3_CHECK_FREE_CLST
10434342306fSKonstantin Komarov /*
1044e8b8e97fSKari Argillander * run_unpack_ex - Unpack packed runs from "run_buf".
10454342306fSKonstantin Komarov *
1046e8b8e97fSKari Argillander * Checks unpacked runs to be used in bitmap.
1047e8b8e97fSKari Argillander *
1048e8b8e97fSKari Argillander * Return: Error if negative, or real used bytes.
10494342306fSKonstantin Komarov */
run_unpack_ex(struct runs_tree * run,struct ntfs_sb_info * sbi,CLST ino,CLST svcn,CLST evcn,CLST vcn,const u8 * run_buf,int run_buf_size)10504342306fSKonstantin Komarov int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
10514342306fSKonstantin Komarov CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
10520e8235d2SKonstantin Komarov int run_buf_size)
10534342306fSKonstantin Komarov {
10544342306fSKonstantin Komarov int ret, err;
10554342306fSKonstantin Komarov CLST next_vcn, lcn, len;
1056*57f7979aSKonstantin Komarov size_t index, done;
1057*57f7979aSKonstantin Komarov bool ok, zone;
10584342306fSKonstantin Komarov struct wnd_bitmap *wnd;
10594342306fSKonstantin Komarov
10604342306fSKonstantin Komarov ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
10614342306fSKonstantin Komarov if (ret <= 0)
10624342306fSKonstantin Komarov return ret;
10634342306fSKonstantin Komarov
10644342306fSKonstantin Komarov if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
10654342306fSKonstantin Komarov return ret;
10664342306fSKonstantin Komarov
10674342306fSKonstantin Komarov if (ino == MFT_REC_BADCLUST)
10684342306fSKonstantin Komarov return ret;
10694342306fSKonstantin Komarov
10704342306fSKonstantin Komarov next_vcn = vcn = svcn;
10714342306fSKonstantin Komarov wnd = &sbi->used.bitmap;
10724342306fSKonstantin Komarov
10734342306fSKonstantin Komarov for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
10744342306fSKonstantin Komarov next_vcn <= evcn;
10754342306fSKonstantin Komarov ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
10764342306fSKonstantin Komarov if (!ok || next_vcn != vcn)
10774342306fSKonstantin Komarov return -EINVAL;
10784342306fSKonstantin Komarov
10794342306fSKonstantin Komarov next_vcn = vcn + len;
10804342306fSKonstantin Komarov
10814342306fSKonstantin Komarov if (lcn == SPARSE_LCN)
10824342306fSKonstantin Komarov continue;
10834342306fSKonstantin Komarov
10844342306fSKonstantin Komarov if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
10854342306fSKonstantin Komarov continue;
10864342306fSKonstantin Komarov
10874342306fSKonstantin Komarov down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1088*57f7979aSKonstantin Komarov zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len);
1089e8b8e97fSKari Argillander /* Check for free blocks. */
1090*57f7979aSKonstantin Komarov ok = !zone && wnd_is_used(wnd, lcn, len);
10914342306fSKonstantin Komarov up_read(&wnd->rw_lock);
10924342306fSKonstantin Komarov if (ok)
10934342306fSKonstantin Komarov continue;
10944342306fSKonstantin Komarov
1095e8b8e97fSKari Argillander /* Looks like volume is corrupted. */
10964342306fSKonstantin Komarov ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
10974342306fSKonstantin Komarov
1098*57f7979aSKonstantin Komarov if (!down_write_trylock(&wnd->rw_lock))
1099*57f7979aSKonstantin Komarov continue;
1100*57f7979aSKonstantin Komarov
1101*57f7979aSKonstantin Komarov if (zone) {
1102*57f7979aSKonstantin Komarov /*
1103*57f7979aSKonstantin Komarov * Range [lcn, lcn + len) intersects with zone.
1104*57f7979aSKonstantin Komarov * To avoid complex with zone just turn it off.
1105*57f7979aSKonstantin Komarov */
1106*57f7979aSKonstantin Komarov wnd_zone_set(wnd, 0, 0);
1107*57f7979aSKonstantin Komarov }
1108*57f7979aSKonstantin Komarov
1109e8b8e97fSKari Argillander /* Mark all zero bits as used in range [lcn, lcn+len). */
1110ec5fc720SKonstantin Komarov err = wnd_set_used_safe(wnd, lcn, len, &done);
1111*57f7979aSKonstantin Komarov if (zone) {
1112*57f7979aSKonstantin Komarov /* Restore zone. Lock mft run. */
1113*57f7979aSKonstantin Komarov struct rw_semaphore *lock;
1114*57f7979aSKonstantin Komarov lock = is_mounted(sbi) ? &sbi->mft.ni->file.run_lock :
1115*57f7979aSKonstantin Komarov NULL;
1116*57f7979aSKonstantin Komarov if (lock)
1117*57f7979aSKonstantin Komarov down_read(lock);
1118*57f7979aSKonstantin Komarov ntfs_refresh_zone(sbi);
1119*57f7979aSKonstantin Komarov if (lock)
1120*57f7979aSKonstantin Komarov up_read(lock);
1121*57f7979aSKonstantin Komarov }
11224342306fSKonstantin Komarov up_write(&wnd->rw_lock);
11234342306fSKonstantin Komarov if (err)
11244342306fSKonstantin Komarov return err;
11254342306fSKonstantin Komarov }
11264342306fSKonstantin Komarov
11274342306fSKonstantin Komarov return ret;
11284342306fSKonstantin Komarov }
11294342306fSKonstantin Komarov #endif
11304342306fSKonstantin Komarov
11314342306fSKonstantin Komarov /*
11324342306fSKonstantin Komarov * run_get_highest_vcn
11334342306fSKonstantin Komarov *
1134e8b8e97fSKari Argillander * Return the highest vcn from a mapping pairs array
1135e8b8e97fSKari Argillander * it used while replaying log file.
11364342306fSKonstantin Komarov */
run_get_highest_vcn(CLST vcn,const u8 * run_buf,u64 * highest_vcn)11374342306fSKonstantin Komarov int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
11384342306fSKonstantin Komarov {
11394342306fSKonstantin Komarov u64 vcn64 = vcn;
11404342306fSKonstantin Komarov u8 size_size;
11414342306fSKonstantin Komarov
11424342306fSKonstantin Komarov while ((size_size = *run_buf & 0xF)) {
11434342306fSKonstantin Komarov u8 offset_size = *run_buf++ >> 4;
11444342306fSKonstantin Komarov u64 len;
11454342306fSKonstantin Komarov
11464342306fSKonstantin Komarov if (size_size > 8 || offset_size > 8)
11474342306fSKonstantin Komarov return -EINVAL;
11484342306fSKonstantin Komarov
11494342306fSKonstantin Komarov len = run_unpack_s64(run_buf, size_size, 0);
11504342306fSKonstantin Komarov if (!len)
11514342306fSKonstantin Komarov return -EINVAL;
11524342306fSKonstantin Komarov
11534342306fSKonstantin Komarov run_buf += size_size + offset_size;
11544342306fSKonstantin Komarov vcn64 += len;
11554342306fSKonstantin Komarov
11564342306fSKonstantin Komarov #ifndef CONFIG_NTFS3_64BIT_CLUSTER
11574342306fSKonstantin Komarov if (vcn64 > 0x100000000ull)
11584342306fSKonstantin Komarov return -EINVAL;
11594342306fSKonstantin Komarov #endif
11604342306fSKonstantin Komarov }
11614342306fSKonstantin Komarov
11624342306fSKonstantin Komarov *highest_vcn = vcn64 - 1;
11634342306fSKonstantin Komarov return 0;
11644342306fSKonstantin Komarov }
116520abc64fSKonstantin Komarov
116620abc64fSKonstantin Komarov /*
116720abc64fSKonstantin Komarov * run_clone
116820abc64fSKonstantin Komarov *
116920abc64fSKonstantin Komarov * Make a copy of run
117020abc64fSKonstantin Komarov */
run_clone(const struct runs_tree * run,struct runs_tree * new_run)117120abc64fSKonstantin Komarov int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
117220abc64fSKonstantin Komarov {
117320abc64fSKonstantin Komarov size_t bytes = run->count * sizeof(struct ntfs_run);
117420abc64fSKonstantin Komarov
117520abc64fSKonstantin Komarov if (bytes > new_run->allocated) {
117620abc64fSKonstantin Komarov struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
117720abc64fSKonstantin Komarov
117820abc64fSKonstantin Komarov if (!new_ptr)
117920abc64fSKonstantin Komarov return -ENOMEM;
118020abc64fSKonstantin Komarov
118120abc64fSKonstantin Komarov kvfree(new_run->runs);
118220abc64fSKonstantin Komarov new_run->runs = new_ptr;
118320abc64fSKonstantin Komarov new_run->allocated = bytes;
118420abc64fSKonstantin Komarov }
118520abc64fSKonstantin Komarov
118620abc64fSKonstantin Komarov memcpy(new_run->runs, run->runs, bytes);
118720abc64fSKonstantin Komarov new_run->count = run->count;
118820abc64fSKonstantin Komarov return 0;
118920abc64fSKonstantin Komarov }
1190