xref: /openbmc/linux/fs/ntfs3/attrib.c (revision 39f555fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_load_runs - Load all runs stored in @attr.
59  */
60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 			  struct runs_tree *run, const CLST *vcn)
62 {
63 	int err;
64 	CLST svcn = le64_to_cpu(attr->nres.svcn);
65 	CLST evcn = le64_to_cpu(attr->nres.evcn);
66 	u32 asize;
67 	u16 run_off;
68 
69 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 		return 0;
71 
72 	if (vcn && (evcn < *vcn || *vcn < svcn))
73 		return -EINVAL;
74 
75 	asize = le32_to_cpu(attr->size);
76 	run_off = le16_to_cpu(attr->nres.run_off);
77 
78 	if (run_off > asize)
79 		return -EINVAL;
80 
81 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 			    asize - run_off);
84 	if (err < 0)
85 		return err;
86 
87 	return 0;
88 }
89 
90 /*
91  * run_deallocate_ex - Deallocate clusters.
92  */
93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 			     CLST vcn, CLST len, CLST *done, bool trim)
95 {
96 	int err = 0;
97 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98 	size_t idx;
99 
100 	if (!len)
101 		goto out;
102 
103 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104 failed:
105 		run_truncate(run, vcn0);
106 		err = -EINVAL;
107 		goto out;
108 	}
109 
110 	for (;;) {
111 		if (clen > len)
112 			clen = len;
113 
114 		if (!clen) {
115 			err = -EINVAL;
116 			goto out;
117 		}
118 
119 		if (lcn != SPARSE_LCN) {
120 			if (sbi) {
121 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
122 				mark_as_free_ex(sbi, lcn, clen, trim);
123 			}
124 			dn += clen;
125 		}
126 
127 		len -= clen;
128 		if (!len)
129 			break;
130 
131 		vcn_next = vcn + clen;
132 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133 		    vcn != vcn_next) {
134 			/* Save memory - don't load entire run. */
135 			goto failed;
136 		}
137 	}
138 
139 out:
140 	if (done)
141 		*done += dn;
142 
143 	return err;
144 }
145 
146 /*
147  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
148  */
149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152 			   CLST *new_lcn, CLST *new_len)
153 {
154 	int err;
155 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156 	size_t cnt = run->count;
157 
158 	for (;;) {
159 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160 					       opt);
161 
162 		if (err == -ENOSPC && pre) {
163 			pre = 0;
164 			if (*pre_alloc)
165 				*pre_alloc = 0;
166 			continue;
167 		}
168 
169 		if (err)
170 			goto out;
171 
172 		if (vcn == vcn0) {
173 			/* Return the first fragment. */
174 			if (new_lcn)
175 				*new_lcn = lcn;
176 			if (new_len)
177 				*new_len = flen;
178 		}
179 
180 		/* Add new fragment into run storage. */
181 		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182 			/* Undo last 'ntfs_look_for_free_space' */
183 			mark_as_free_ex(sbi, lcn, len, false);
184 			err = -ENOMEM;
185 			goto out;
186 		}
187 
188 		if (opt & ALLOCATE_ZERO) {
189 			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190 
191 			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 						   (sector_t)lcn << shift,
193 						   (sector_t)flen << shift,
194 						   GFP_NOFS, 0);
195 			if (err)
196 				goto out;
197 		}
198 
199 		vcn += flen;
200 
201 		if (flen >= len || (opt & ALLOCATE_MFT) ||
202 		    (fr && run->count - cnt >= fr)) {
203 			*alen = vcn - vcn0;
204 			return 0;
205 		}
206 
207 		len -= flen;
208 	}
209 
210 out:
211 	/* Undo 'ntfs_look_for_free_space' */
212 	if (vcn - vcn0) {
213 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 		run_truncate(run, vcn0);
215 	}
216 
217 	return err;
218 }
219 
220 /*
221  * attr_make_nonresident
222  *
223  * If page is not NULL - it is already contains resident data
224  * and locked (called from ni_write_frame()).
225  */
226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 			  u64 new_size, struct runs_tree *run,
229 			  struct ATTRIB **ins_attr, struct page *page)
230 {
231 	struct ntfs_sb_info *sbi;
232 	struct ATTRIB *attr_s;
233 	struct MFT_REC *rec;
234 	u32 used, asize, rsize, aoff, align;
235 	bool is_data;
236 	CLST len, alen;
237 	char *next;
238 	int err;
239 
240 	if (attr->non_res) {
241 		*ins_attr = attr;
242 		return 0;
243 	}
244 
245 	sbi = mi->sbi;
246 	rec = mi->mrec;
247 	attr_s = NULL;
248 	used = le32_to_cpu(rec->used);
249 	asize = le32_to_cpu(attr->size);
250 	next = Add2Ptr(attr, asize);
251 	aoff = PtrOffset(rec, attr);
252 	rsize = le32_to_cpu(attr->res.data_size);
253 	is_data = attr->type == ATTR_DATA && !attr->name_len;
254 
255 	align = sbi->cluster_size;
256 	if (is_attr_compressed(attr))
257 		align <<= COMPRESSION_UNIT;
258 	len = (rsize + align - 1) >> sbi->cluster_bits;
259 
260 	run_init(run);
261 
262 	/* Make a copy of original attribute. */
263 	attr_s = kmemdup(attr, asize, GFP_NOFS);
264 	if (!attr_s) {
265 		err = -ENOMEM;
266 		goto out;
267 	}
268 
269 	if (!len) {
270 		/* Empty resident -> Empty nonresident. */
271 		alen = 0;
272 	} else {
273 		const char *data = resident_data(attr);
274 
275 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
276 					     ALLOCATE_DEF, &alen, 0, NULL,
277 					     NULL);
278 		if (err)
279 			goto out1;
280 
281 		if (!rsize) {
282 			/* Empty resident -> Non empty nonresident. */
283 		} else if (!is_data) {
284 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
285 			if (err)
286 				goto out2;
287 		} else if (!page) {
288 			char *kaddr;
289 
290 			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
291 			if (!page) {
292 				err = -ENOMEM;
293 				goto out2;
294 			}
295 			kaddr = kmap_atomic(page);
296 			memcpy(kaddr, data, rsize);
297 			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
298 			kunmap_atomic(kaddr);
299 			flush_dcache_page(page);
300 			SetPageUptodate(page);
301 			set_page_dirty(page);
302 			unlock_page(page);
303 			put_page(page);
304 		}
305 	}
306 
307 	/* Remove original attribute. */
308 	used -= asize;
309 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
310 	rec->used = cpu_to_le32(used);
311 	mi->dirty = true;
312 	if (le)
313 		al_remove_le(ni, le);
314 
315 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
316 				    attr_s->name_len, run, 0, alen,
317 				    attr_s->flags, &attr, NULL, NULL);
318 	if (err)
319 		goto out3;
320 
321 	kfree(attr_s);
322 	attr->nres.data_size = cpu_to_le64(rsize);
323 	attr->nres.valid_size = attr->nres.data_size;
324 
325 	*ins_attr = attr;
326 
327 	if (is_data)
328 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
329 
330 	/* Resident attribute becomes non resident. */
331 	return 0;
332 
333 out3:
334 	attr = Add2Ptr(rec, aoff);
335 	memmove(next, attr, used - aoff);
336 	memcpy(attr, attr_s, asize);
337 	rec->used = cpu_to_le32(used + asize);
338 	mi->dirty = true;
339 out2:
340 	/* Undo: do not trim new allocated clusters. */
341 	run_deallocate(sbi, run, false);
342 	run_close(run);
343 out1:
344 	kfree(attr_s);
345 out:
346 	return err;
347 }
348 
349 /*
350  * attr_set_size_res - Helper for attr_set_size().
351  */
352 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
353 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
354 			     u64 new_size, struct runs_tree *run,
355 			     struct ATTRIB **ins_attr)
356 {
357 	struct ntfs_sb_info *sbi = mi->sbi;
358 	struct MFT_REC *rec = mi->mrec;
359 	u32 used = le32_to_cpu(rec->used);
360 	u32 asize = le32_to_cpu(attr->size);
361 	u32 aoff = PtrOffset(rec, attr);
362 	u32 rsize = le32_to_cpu(attr->res.data_size);
363 	u32 tail = used - aoff - asize;
364 	char *next = Add2Ptr(attr, asize);
365 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
366 
367 	if (dsize < 0) {
368 		memmove(next + dsize, next, tail);
369 	} else if (dsize > 0) {
370 		if (used + dsize > sbi->max_bytes_per_attr)
371 			return attr_make_nonresident(ni, attr, le, mi, new_size,
372 						     run, ins_attr, NULL);
373 
374 		memmove(next + dsize, next, tail);
375 		memset(next, 0, dsize);
376 	}
377 
378 	if (new_size > rsize)
379 		memset(Add2Ptr(resident_data(attr), rsize), 0,
380 		       new_size - rsize);
381 
382 	rec->used = cpu_to_le32(used + dsize);
383 	attr->size = cpu_to_le32(asize + dsize);
384 	attr->res.data_size = cpu_to_le32(new_size);
385 	mi->dirty = true;
386 	*ins_attr = attr;
387 
388 	return 0;
389 }
390 
391 /*
392  * attr_set_size - Change the size of attribute.
393  *
394  * Extend:
395  *   - Sparse/compressed: No allocated clusters.
396  *   - Normal: Append allocated and preallocated new clusters.
397  * Shrink:
398  *   - No deallocate if @keep_prealloc is set.
399  */
400 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
401 		  const __le16 *name, u8 name_len, struct runs_tree *run,
402 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
403 		  struct ATTRIB **ret)
404 {
405 	int err = 0;
406 	struct ntfs_sb_info *sbi = ni->mi.sbi;
407 	u8 cluster_bits = sbi->cluster_bits;
408 	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
409 		      !name_len;
410 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
411 	struct ATTRIB *attr = NULL, *attr_b;
412 	struct ATTR_LIST_ENTRY *le, *le_b;
413 	struct mft_inode *mi, *mi_b;
414 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
415 	CLST next_svcn, pre_alloc = -1, done = 0;
416 	bool is_ext, is_bad = false;
417 	bool dirty = false;
418 	u32 align;
419 	struct MFT_REC *rec;
420 
421 again:
422 	alen = 0;
423 	le_b = NULL;
424 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
425 			      &mi_b);
426 	if (!attr_b) {
427 		err = -ENOENT;
428 		goto bad_inode;
429 	}
430 
431 	if (!attr_b->non_res) {
432 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
433 					&attr_b);
434 		if (err)
435 			return err;
436 
437 		/* Return if file is still resident. */
438 		if (!attr_b->non_res) {
439 			dirty = true;
440 			goto ok1;
441 		}
442 
443 		/* Layout of records may be changed, so do a full search. */
444 		goto again;
445 	}
446 
447 	is_ext = is_attr_ext(attr_b);
448 	align = sbi->cluster_size;
449 	if (is_ext)
450 		align <<= attr_b->nres.c_unit;
451 
452 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
453 	old_size = le64_to_cpu(attr_b->nres.data_size);
454 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
455 
456 again_1:
457 	old_alen = old_alloc >> cluster_bits;
458 
459 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
460 	new_alen = new_alloc >> cluster_bits;
461 
462 	if (keep_prealloc && new_size < old_size) {
463 		attr_b->nres.data_size = cpu_to_le64(new_size);
464 		mi_b->dirty = dirty = true;
465 		goto ok;
466 	}
467 
468 	vcn = old_alen - 1;
469 
470 	svcn = le64_to_cpu(attr_b->nres.svcn);
471 	evcn = le64_to_cpu(attr_b->nres.evcn);
472 
473 	if (svcn <= vcn && vcn <= evcn) {
474 		attr = attr_b;
475 		le = le_b;
476 		mi = mi_b;
477 	} else if (!le_b) {
478 		err = -EINVAL;
479 		goto bad_inode;
480 	} else {
481 		le = le_b;
482 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
483 				    &mi);
484 		if (!attr) {
485 			err = -EINVAL;
486 			goto bad_inode;
487 		}
488 
489 next_le_1:
490 		svcn = le64_to_cpu(attr->nres.svcn);
491 		evcn = le64_to_cpu(attr->nres.evcn);
492 	}
493 	/*
494 	 * Here we have:
495 	 * attr,mi,le - last attribute segment (containing 'vcn').
496 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
497 	 */
498 next_le:
499 	rec = mi->mrec;
500 	err = attr_load_runs(attr, ni, run, NULL);
501 	if (err)
502 		goto out;
503 
504 	if (new_size > old_size) {
505 		CLST to_allocate;
506 		size_t free;
507 
508 		if (new_alloc <= old_alloc) {
509 			attr_b->nres.data_size = cpu_to_le64(new_size);
510 			mi_b->dirty = dirty = true;
511 			goto ok;
512 		}
513 
514 		/*
515 		 * Add clusters. In simple case we have to:
516 		 *  - allocate space (vcn, lcn, len)
517 		 *  - update packed run in 'mi'
518 		 *  - update attr->nres.evcn
519 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
520 		 */
521 		to_allocate = new_alen - old_alen;
522 add_alloc_in_same_attr_seg:
523 		lcn = 0;
524 		if (is_mft) {
525 			/* MFT allocates clusters from MFT zone. */
526 			pre_alloc = 0;
527 		} else if (is_ext) {
528 			/* No preallocate for sparse/compress. */
529 			pre_alloc = 0;
530 		} else if (pre_alloc == -1) {
531 			pre_alloc = 0;
532 			if (type == ATTR_DATA && !name_len &&
533 			    sbi->options->prealloc) {
534 				pre_alloc = bytes_to_cluster(
535 						    sbi, get_pre_allocated(
536 								 new_size)) -
537 					    new_alen;
538 			}
539 
540 			/* Get the last LCN to allocate from. */
541 			if (old_alen &&
542 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
543 				lcn = SPARSE_LCN;
544 			}
545 
546 			if (lcn == SPARSE_LCN)
547 				lcn = 0;
548 			else if (lcn)
549 				lcn += 1;
550 
551 			free = wnd_zeroes(&sbi->used.bitmap);
552 			if (to_allocate > free) {
553 				err = -ENOSPC;
554 				goto out;
555 			}
556 
557 			if (pre_alloc && to_allocate + pre_alloc > free)
558 				pre_alloc = 0;
559 		}
560 
561 		vcn = old_alen;
562 
563 		if (is_ext) {
564 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
565 					   false)) {
566 				err = -ENOMEM;
567 				goto out;
568 			}
569 			alen = to_allocate;
570 		} else {
571 			/* ~3 bytes per fragment. */
572 			err = attr_allocate_clusters(
573 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
574 				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
575 				is_mft ? 0 :
576 					 (sbi->record_size -
577 					  le32_to_cpu(rec->used) + 8) /
578 							 3 +
579 						 1,
580 				NULL, NULL);
581 			if (err)
582 				goto out;
583 		}
584 
585 		done += alen;
586 		vcn += alen;
587 		if (to_allocate > alen)
588 			to_allocate -= alen;
589 		else
590 			to_allocate = 0;
591 
592 pack_runs:
593 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
594 		if (err)
595 			goto undo_1;
596 
597 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
598 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
599 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
600 		mi_b->dirty = dirty = true;
601 
602 		if (next_svcn >= vcn && !to_allocate) {
603 			/* Normal way. Update attribute and exit. */
604 			attr_b->nres.data_size = cpu_to_le64(new_size);
605 			goto ok;
606 		}
607 
608 		/* At least two MFT to avoid recursive loop. */
609 		if (is_mft && next_svcn == vcn &&
610 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
611 			new_size = new_alloc_tmp;
612 			attr_b->nres.data_size = attr_b->nres.alloc_size;
613 			goto ok;
614 		}
615 
616 		if (le32_to_cpu(rec->used) < sbi->record_size) {
617 			old_alen = next_svcn;
618 			evcn = old_alen - 1;
619 			goto add_alloc_in_same_attr_seg;
620 		}
621 
622 		attr_b->nres.data_size = attr_b->nres.alloc_size;
623 		if (new_alloc_tmp < old_valid)
624 			attr_b->nres.valid_size = attr_b->nres.data_size;
625 
626 		if (type == ATTR_LIST) {
627 			err = ni_expand_list(ni);
628 			if (err)
629 				goto undo_2;
630 			if (next_svcn < vcn)
631 				goto pack_runs;
632 
633 			/* Layout of records is changed. */
634 			goto again;
635 		}
636 
637 		if (!ni->attr_list.size) {
638 			err = ni_create_attr_list(ni);
639 			/* In case of error layout of records is not changed. */
640 			if (err)
641 				goto undo_2;
642 			/* Layout of records is changed. */
643 		}
644 
645 		if (next_svcn >= vcn) {
646 			/* This is MFT data, repeat. */
647 			goto again;
648 		}
649 
650 		/* Insert new attribute segment. */
651 		err = ni_insert_nonresident(ni, type, name, name_len, run,
652 					    next_svcn, vcn - next_svcn,
653 					    attr_b->flags, &attr, &mi, NULL);
654 
655 		/*
656 		 * Layout of records maybe changed.
657 		 * Find base attribute to update.
658 		 */
659 		le_b = NULL;
660 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
661 				      NULL, &mi_b);
662 		if (!attr_b) {
663 			err = -EINVAL;
664 			goto bad_inode;
665 		}
666 
667 		if (err) {
668 			/* ni_insert_nonresident failed. */
669 			attr = NULL;
670 			goto undo_2;
671 		}
672 
673 		if (!is_mft)
674 			run_truncate_head(run, evcn + 1);
675 
676 		svcn = le64_to_cpu(attr->nres.svcn);
677 		evcn = le64_to_cpu(attr->nres.evcn);
678 
679 		/*
680 		 * Attribute is in consistency state.
681 		 * Save this point to restore to if next steps fail.
682 		 */
683 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
684 		attr_b->nres.valid_size = attr_b->nres.data_size =
685 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
686 		mi_b->dirty = dirty = true;
687 		goto again_1;
688 	}
689 
690 	if (new_size != old_size ||
691 	    (new_alloc != old_alloc && !keep_prealloc)) {
692 		/*
693 		 * Truncate clusters. In simple case we have to:
694 		 *  - update packed run in 'mi'
695 		 *  - update attr->nres.evcn
696 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
697 		 *  - mark and trim clusters as free (vcn, lcn, len)
698 		 */
699 		CLST dlen = 0;
700 
701 		vcn = max(svcn, new_alen);
702 		new_alloc_tmp = (u64)vcn << cluster_bits;
703 
704 		if (vcn > svcn) {
705 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
706 			if (err)
707 				goto out;
708 		} else if (le && le->vcn) {
709 			u16 le_sz = le16_to_cpu(le->size);
710 
711 			/*
712 			 * NOTE: List entries for one attribute are always
713 			 * the same size. We deal with last entry (vcn==0)
714 			 * and it is not first in entries array
715 			 * (list entry for std attribute always first).
716 			 * So it is safe to step back.
717 			 */
718 			mi_remove_attr(NULL, mi, attr);
719 
720 			if (!al_remove_le(ni, le)) {
721 				err = -EINVAL;
722 				goto bad_inode;
723 			}
724 
725 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
726 		} else {
727 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
728 			mi->dirty = true;
729 		}
730 
731 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
732 
733 		if (vcn == new_alen) {
734 			attr_b->nres.data_size = cpu_to_le64(new_size);
735 			if (new_size < old_valid)
736 				attr_b->nres.valid_size =
737 					attr_b->nres.data_size;
738 		} else {
739 			if (new_alloc_tmp <=
740 			    le64_to_cpu(attr_b->nres.data_size))
741 				attr_b->nres.data_size =
742 					attr_b->nres.alloc_size;
743 			if (new_alloc_tmp <
744 			    le64_to_cpu(attr_b->nres.valid_size))
745 				attr_b->nres.valid_size =
746 					attr_b->nres.alloc_size;
747 		}
748 		mi_b->dirty = dirty = true;
749 
750 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
751 					true);
752 		if (err)
753 			goto out;
754 
755 		if (is_ext) {
756 			/* dlen - really deallocated clusters. */
757 			le64_sub_cpu(&attr_b->nres.total_size,
758 				     ((u64)dlen << cluster_bits));
759 		}
760 
761 		run_truncate(run, vcn);
762 
763 		if (new_alloc_tmp <= new_alloc)
764 			goto ok;
765 
766 		old_size = new_alloc_tmp;
767 		vcn = svcn - 1;
768 
769 		if (le == le_b) {
770 			attr = attr_b;
771 			mi = mi_b;
772 			evcn = svcn - 1;
773 			svcn = 0;
774 			goto next_le;
775 		}
776 
777 		if (le->type != type || le->name_len != name_len ||
778 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
779 			err = -EINVAL;
780 			goto bad_inode;
781 		}
782 
783 		err = ni_load_mi(ni, le, &mi);
784 		if (err)
785 			goto out;
786 
787 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
788 		if (!attr) {
789 			err = -EINVAL;
790 			goto bad_inode;
791 		}
792 		goto next_le_1;
793 	}
794 
795 ok:
796 	if (new_valid) {
797 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
798 
799 		if (attr_b->nres.valid_size != valid) {
800 			attr_b->nres.valid_size = valid;
801 			mi_b->dirty = true;
802 		}
803 	}
804 
805 ok1:
806 	if (ret)
807 		*ret = attr_b;
808 
809 	if (((type == ATTR_DATA && !name_len) ||
810 	     (type == ATTR_ALLOC && name == I30_NAME))) {
811 		/* Update inode_set_bytes. */
812 		if (attr_b->non_res) {
813 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
814 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
815 				inode_set_bytes(&ni->vfs_inode, new_alloc);
816 				dirty = true;
817 			}
818 		}
819 
820 		/* Don't forget to update duplicate information in parent. */
821 		if (dirty) {
822 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
823 			mark_inode_dirty(&ni->vfs_inode);
824 		}
825 	}
826 
827 	return 0;
828 
829 undo_2:
830 	vcn -= alen;
831 	attr_b->nres.data_size = cpu_to_le64(old_size);
832 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
833 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
834 
835 	/* Restore 'attr' and 'mi'. */
836 	if (attr)
837 		goto restore_run;
838 
839 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
840 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
841 		attr = attr_b;
842 		le = le_b;
843 		mi = mi_b;
844 	} else if (!le_b) {
845 		err = -EINVAL;
846 		goto bad_inode;
847 	} else {
848 		le = le_b;
849 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
850 				    &svcn, &mi);
851 		if (!attr)
852 			goto bad_inode;
853 	}
854 
855 restore_run:
856 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
857 		is_bad = true;
858 
859 undo_1:
860 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
861 
862 	run_truncate(run, vcn);
863 out:
864 	if (is_bad) {
865 bad_inode:
866 		_ntfs_bad_inode(&ni->vfs_inode);
867 	}
868 	return err;
869 }
870 
871 /*
872  * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
873  *
874  * @new == NULL means just to get current mapping for 'vcn'
875  * @new != NULL means allocate real cluster if 'vcn' maps to hole
876  * @zero - zeroout new allocated clusters
877  *
878  *  NOTE:
879  *  - @new != NULL is called only for sparsed or compressed attributes.
880  *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
881  */
882 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
883 			CLST *len, bool *new, bool zero)
884 {
885 	int err = 0;
886 	struct runs_tree *run = &ni->file.run;
887 	struct ntfs_sb_info *sbi;
888 	u8 cluster_bits;
889 	struct ATTRIB *attr = NULL, *attr_b;
890 	struct ATTR_LIST_ENTRY *le, *le_b;
891 	struct mft_inode *mi, *mi_b;
892 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
893 	CLST alloc, evcn;
894 	unsigned fr;
895 	u64 total_size, total_size0;
896 	int step = 0;
897 
898 	if (new)
899 		*new = false;
900 
901 	/* Try to find in cache. */
902 	down_read(&ni->file.run_lock);
903 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
904 		*len = 0;
905 	up_read(&ni->file.run_lock);
906 
907 	if (*len) {
908 		if (*lcn != SPARSE_LCN || !new)
909 			return 0; /* Fast normal way without allocation. */
910 		else if (clen > *len)
911 			clen = *len;
912 	}
913 
914 	/* No cluster in cache or we need to allocate cluster in hole. */
915 	sbi = ni->mi.sbi;
916 	cluster_bits = sbi->cluster_bits;
917 
918 	ni_lock(ni);
919 	down_write(&ni->file.run_lock);
920 
921 	le_b = NULL;
922 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
923 	if (!attr_b) {
924 		err = -ENOENT;
925 		goto out;
926 	}
927 
928 	if (!attr_b->non_res) {
929 		*lcn = RESIDENT_LCN;
930 		*len = 1;
931 		goto out;
932 	}
933 
934 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
935 	if (vcn >= asize) {
936 		if (new) {
937 			err = -EINVAL;
938 		} else {
939 			*len = 1;
940 			*lcn = SPARSE_LCN;
941 		}
942 		goto out;
943 	}
944 
945 	svcn = le64_to_cpu(attr_b->nres.svcn);
946 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
947 
948 	attr = attr_b;
949 	le = le_b;
950 	mi = mi_b;
951 
952 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
953 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
954 				    &mi);
955 		if (!attr) {
956 			err = -EINVAL;
957 			goto out;
958 		}
959 		svcn = le64_to_cpu(attr->nres.svcn);
960 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
961 	}
962 
963 	/* Load in cache actual information. */
964 	err = attr_load_runs(attr, ni, run, NULL);
965 	if (err)
966 		goto out;
967 
968 	if (!*len) {
969 		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
970 			if (*lcn != SPARSE_LCN || !new)
971 				goto ok; /* Slow normal way without allocation. */
972 
973 			if (clen > *len)
974 				clen = *len;
975 		} else if (!new) {
976 			/* Here we may return -ENOENT.
977 			 * In any case caller gets zero length. */
978 			goto ok;
979 		}
980 	}
981 
982 	if (!is_attr_ext(attr_b)) {
983 		/* The code below only for sparsed or compressed attributes. */
984 		err = -EINVAL;
985 		goto out;
986 	}
987 
988 	vcn0 = vcn;
989 	to_alloc = clen;
990 	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
991 	/* Allocate frame aligned clusters.
992 	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
993 	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
994 	if (attr_b->nres.c_unit) {
995 		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
996 		CLST cmask = ~(clst_per_frame - 1);
997 
998 		/* Get frame aligned vcn and to_alloc. */
999 		vcn = vcn0 & cmask;
1000 		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1001 		if (fr < clst_per_frame)
1002 			fr = clst_per_frame;
1003 		zero = true;
1004 
1005 		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1006 		if (vcn < svcn || evcn1 <= vcn) {
1007 			/* Load attribute for truncated vcn. */
1008 			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1009 					    &vcn, &mi);
1010 			if (!attr) {
1011 				err = -EINVAL;
1012 				goto out;
1013 			}
1014 			svcn = le64_to_cpu(attr->nres.svcn);
1015 			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1016 			err = attr_load_runs(attr, ni, run, NULL);
1017 			if (err)
1018 				goto out;
1019 		}
1020 	}
1021 
1022 	if (vcn + to_alloc > asize)
1023 		to_alloc = asize - vcn;
1024 
1025 	/* Get the last LCN to allocate from. */
1026 	hint = 0;
1027 
1028 	if (vcn > evcn1) {
1029 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1030 				   false)) {
1031 			err = -ENOMEM;
1032 			goto out;
1033 		}
1034 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1035 		hint = -1;
1036 	}
1037 
1038 	/* Allocate and zeroout new clusters. */
1039 	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1040 				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1041 				     fr, lcn, len);
1042 	if (err)
1043 		goto out;
1044 	*new = true;
1045 	step = 1;
1046 
1047 	end = vcn + alen;
1048 	/* Save 'total_size0' to restore if error. */
1049 	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1050 	total_size = total_size0 + ((u64)alen << cluster_bits);
1051 
1052 	if (vcn != vcn0) {
1053 		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1054 			err = -EINVAL;
1055 			goto out;
1056 		}
1057 		if (*lcn == SPARSE_LCN) {
1058 			/* Internal error. Should not happened. */
1059 			WARN_ON(1);
1060 			err = -EINVAL;
1061 			goto out;
1062 		}
1063 		/* Check case when vcn0 + len overlaps new allocated clusters. */
1064 		if (vcn0 + *len > end)
1065 			*len = end - vcn0;
1066 	}
1067 
1068 repack:
1069 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1070 	if (err)
1071 		goto out;
1072 
1073 	attr_b->nres.total_size = cpu_to_le64(total_size);
1074 	inode_set_bytes(&ni->vfs_inode, total_size);
1075 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1076 
1077 	mi_b->dirty = true;
1078 	mark_inode_dirty(&ni->vfs_inode);
1079 
1080 	/* Stored [vcn : next_svcn) from [vcn : end). */
1081 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1082 
1083 	if (end <= evcn1) {
1084 		if (next_svcn == evcn1) {
1085 			/* Normal way. Update attribute and exit. */
1086 			goto ok;
1087 		}
1088 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1089 		if (!ni->attr_list.size) {
1090 			err = ni_create_attr_list(ni);
1091 			if (err)
1092 				goto undo1;
1093 			/* Layout of records is changed. */
1094 			le_b = NULL;
1095 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1096 					      0, NULL, &mi_b);
1097 			if (!attr_b) {
1098 				err = -ENOENT;
1099 				goto out;
1100 			}
1101 
1102 			attr = attr_b;
1103 			le = le_b;
1104 			mi = mi_b;
1105 			goto repack;
1106 		}
1107 	}
1108 
1109 	/*
1110 	 * The code below may require additional cluster (to extend attribute list)
1111 	 * and / or one MFT record
1112 	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1113 	 * in 'ni_insert_nonresident'.
1114 	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1115 	 */
1116 	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1117 		/* Undo step 1. */
1118 		err = -ENOSPC;
1119 		goto undo1;
1120 	}
1121 
1122 	step = 2;
1123 	svcn = evcn1;
1124 
1125 	/* Estimate next attribute. */
1126 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1127 
1128 	if (!attr) {
1129 		/* Insert new attribute segment. */
1130 		goto ins_ext;
1131 	}
1132 
1133 	/* Try to update existed attribute segment. */
1134 	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1135 	evcn = le64_to_cpu(attr->nres.evcn);
1136 
1137 	if (end < next_svcn)
1138 		end = next_svcn;
1139 	while (end > evcn) {
1140 		/* Remove segment [svcn : evcn). */
1141 		mi_remove_attr(NULL, mi, attr);
1142 
1143 		if (!al_remove_le(ni, le)) {
1144 			err = -EINVAL;
1145 			goto out;
1146 		}
1147 
1148 		if (evcn + 1 >= alloc) {
1149 			/* Last attribute segment. */
1150 			evcn1 = evcn + 1;
1151 			goto ins_ext;
1152 		}
1153 
1154 		if (ni_load_mi(ni, le, &mi)) {
1155 			attr = NULL;
1156 			goto out;
1157 		}
1158 
1159 		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1160 		if (!attr) {
1161 			err = -EINVAL;
1162 			goto out;
1163 		}
1164 		svcn = le64_to_cpu(attr->nres.svcn);
1165 		evcn = le64_to_cpu(attr->nres.evcn);
1166 	}
1167 
1168 	if (end < svcn)
1169 		end = svcn;
1170 
1171 	err = attr_load_runs(attr, ni, run, &end);
1172 	if (err)
1173 		goto out;
1174 
1175 	evcn1 = evcn + 1;
1176 	attr->nres.svcn = cpu_to_le64(next_svcn);
1177 	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1178 	if (err)
1179 		goto out;
1180 
1181 	le->vcn = cpu_to_le64(next_svcn);
1182 	ni->attr_list.dirty = true;
1183 	mi->dirty = true;
1184 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1185 
1186 ins_ext:
1187 	if (evcn1 > next_svcn) {
1188 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1189 					    next_svcn, evcn1 - next_svcn,
1190 					    attr_b->flags, &attr, &mi, NULL);
1191 		if (err)
1192 			goto out;
1193 	}
1194 ok:
1195 	run_truncate_around(run, vcn);
1196 out:
1197 	if (err && step > 1) {
1198 		/* Too complex to restore. */
1199 		_ntfs_bad_inode(&ni->vfs_inode);
1200 	}
1201 	up_write(&ni->file.run_lock);
1202 	ni_unlock(ni);
1203 
1204 	return err;
1205 
1206 undo1:
1207 	/* Undo step1. */
1208 	attr_b->nres.total_size = cpu_to_le64(total_size0);
1209 	inode_set_bytes(&ni->vfs_inode, total_size0);
1210 
1211 	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1212 	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1213 	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1214 		_ntfs_bad_inode(&ni->vfs_inode);
1215 	}
1216 	goto out;
1217 }
1218 
1219 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1220 {
1221 	u64 vbo;
1222 	struct ATTRIB *attr;
1223 	u32 data_size;
1224 
1225 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1226 	if (!attr)
1227 		return -EINVAL;
1228 
1229 	if (attr->non_res)
1230 		return E_NTFS_NONRESIDENT;
1231 
1232 	vbo = page->index << PAGE_SHIFT;
1233 	data_size = le32_to_cpu(attr->res.data_size);
1234 	if (vbo < data_size) {
1235 		const char *data = resident_data(attr);
1236 		char *kaddr = kmap_atomic(page);
1237 		u32 use = data_size - vbo;
1238 
1239 		if (use > PAGE_SIZE)
1240 			use = PAGE_SIZE;
1241 
1242 		memcpy(kaddr, data + vbo, use);
1243 		memset(kaddr + use, 0, PAGE_SIZE - use);
1244 		kunmap_atomic(kaddr);
1245 		flush_dcache_page(page);
1246 		SetPageUptodate(page);
1247 	} else if (!PageUptodate(page)) {
1248 		zero_user_segment(page, 0, PAGE_SIZE);
1249 		SetPageUptodate(page);
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1256 {
1257 	u64 vbo;
1258 	struct mft_inode *mi;
1259 	struct ATTRIB *attr;
1260 	u32 data_size;
1261 
1262 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1263 	if (!attr)
1264 		return -EINVAL;
1265 
1266 	if (attr->non_res) {
1267 		/* Return special error code to check this case. */
1268 		return E_NTFS_NONRESIDENT;
1269 	}
1270 
1271 	vbo = page->index << PAGE_SHIFT;
1272 	data_size = le32_to_cpu(attr->res.data_size);
1273 	if (vbo < data_size) {
1274 		char *data = resident_data(attr);
1275 		char *kaddr = kmap_atomic(page);
1276 		u32 use = data_size - vbo;
1277 
1278 		if (use > PAGE_SIZE)
1279 			use = PAGE_SIZE;
1280 		memcpy(data + vbo, kaddr, use);
1281 		kunmap_atomic(kaddr);
1282 		mi->dirty = true;
1283 	}
1284 	ni->i_valid = data_size;
1285 
1286 	return 0;
1287 }
1288 
1289 /*
1290  * attr_load_runs_vcn - Load runs with VCN.
1291  */
1292 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1293 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1294 		       CLST vcn)
1295 {
1296 	struct ATTRIB *attr;
1297 	int err;
1298 	CLST svcn, evcn;
1299 	u16 ro;
1300 
1301 	if (!ni) {
1302 		/* Is record corrupted? */
1303 		return -ENOENT;
1304 	}
1305 
1306 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1307 	if (!attr) {
1308 		/* Is record corrupted? */
1309 		return -ENOENT;
1310 	}
1311 
1312 	svcn = le64_to_cpu(attr->nres.svcn);
1313 	evcn = le64_to_cpu(attr->nres.evcn);
1314 
1315 	if (evcn < vcn || vcn < svcn) {
1316 		/* Is record corrupted? */
1317 		return -EINVAL;
1318 	}
1319 
1320 	ro = le16_to_cpu(attr->nres.run_off);
1321 
1322 	if (ro > le32_to_cpu(attr->size))
1323 		return -EINVAL;
1324 
1325 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1326 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1327 	if (err < 0)
1328 		return err;
1329 	return 0;
1330 }
1331 
1332 /*
1333  * attr_load_runs_range - Load runs for given range [from to).
1334  */
1335 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1336 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1337 			 u64 from, u64 to)
1338 {
1339 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1340 	u8 cluster_bits = sbi->cluster_bits;
1341 	CLST vcn;
1342 	CLST vcn_last = (to - 1) >> cluster_bits;
1343 	CLST lcn, clen;
1344 	int err;
1345 
1346 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1347 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1348 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1349 						 vcn);
1350 			if (err)
1351 				return err;
1352 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1353 		}
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 #ifdef CONFIG_NTFS3_LZX_XPRESS
1360 /*
1361  * attr_wof_frame_info
1362  *
1363  * Read header of Xpress/LZX file to get info about frame.
1364  */
1365 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1366 			struct runs_tree *run, u64 frame, u64 frames,
1367 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1368 {
1369 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1370 	u64 vbo[2], off[2], wof_size;
1371 	u32 voff;
1372 	u8 bytes_per_off;
1373 	char *addr;
1374 	struct page *page;
1375 	int i, err;
1376 	__le32 *off32;
1377 	__le64 *off64;
1378 
1379 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1380 		/* File starts with array of 32 bit offsets. */
1381 		bytes_per_off = sizeof(__le32);
1382 		vbo[1] = frame << 2;
1383 		*vbo_data = frames << 2;
1384 	} else {
1385 		/* File starts with array of 64 bit offsets. */
1386 		bytes_per_off = sizeof(__le64);
1387 		vbo[1] = frame << 3;
1388 		*vbo_data = frames << 3;
1389 	}
1390 
1391 	/*
1392 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1393 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1394 	 */
1395 	if (!attr->non_res) {
1396 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1397 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1398 			return -EINVAL;
1399 		}
1400 		addr = resident_data(attr);
1401 
1402 		if (bytes_per_off == sizeof(__le32)) {
1403 			off32 = Add2Ptr(addr, vbo[1]);
1404 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1405 			off[1] = le32_to_cpu(off32[0]);
1406 		} else {
1407 			off64 = Add2Ptr(addr, vbo[1]);
1408 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1409 			off[1] = le64_to_cpu(off64[0]);
1410 		}
1411 
1412 		*vbo_data += off[0];
1413 		*ondisk_size = off[1] - off[0];
1414 		return 0;
1415 	}
1416 
1417 	wof_size = le64_to_cpu(attr->nres.data_size);
1418 	down_write(&ni->file.run_lock);
1419 	page = ni->file.offs_page;
1420 	if (!page) {
1421 		page = alloc_page(GFP_KERNEL);
1422 		if (!page) {
1423 			err = -ENOMEM;
1424 			goto out;
1425 		}
1426 		page->index = -1;
1427 		ni->file.offs_page = page;
1428 	}
1429 	lock_page(page);
1430 	addr = page_address(page);
1431 
1432 	if (vbo[1]) {
1433 		voff = vbo[1] & (PAGE_SIZE - 1);
1434 		vbo[0] = vbo[1] - bytes_per_off;
1435 		i = 0;
1436 	} else {
1437 		voff = 0;
1438 		vbo[0] = 0;
1439 		off[0] = 0;
1440 		i = 1;
1441 	}
1442 
1443 	do {
1444 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1445 
1446 		if (index != page->index) {
1447 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1448 			u64 to = min(from + PAGE_SIZE, wof_size);
1449 
1450 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1451 						   ARRAY_SIZE(WOF_NAME), run,
1452 						   from, to);
1453 			if (err)
1454 				goto out1;
1455 
1456 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1457 					     to - from, REQ_OP_READ);
1458 			if (err) {
1459 				page->index = -1;
1460 				goto out1;
1461 			}
1462 			page->index = index;
1463 		}
1464 
1465 		if (i) {
1466 			if (bytes_per_off == sizeof(__le32)) {
1467 				off32 = Add2Ptr(addr, voff);
1468 				off[1] = le32_to_cpu(*off32);
1469 			} else {
1470 				off64 = Add2Ptr(addr, voff);
1471 				off[1] = le64_to_cpu(*off64);
1472 			}
1473 		} else if (!voff) {
1474 			if (bytes_per_off == sizeof(__le32)) {
1475 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1476 				off[0] = le32_to_cpu(*off32);
1477 			} else {
1478 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1479 				off[0] = le64_to_cpu(*off64);
1480 			}
1481 		} else {
1482 			/* Two values in one page. */
1483 			if (bytes_per_off == sizeof(__le32)) {
1484 				off32 = Add2Ptr(addr, voff);
1485 				off[0] = le32_to_cpu(off32[-1]);
1486 				off[1] = le32_to_cpu(off32[0]);
1487 			} else {
1488 				off64 = Add2Ptr(addr, voff);
1489 				off[0] = le64_to_cpu(off64[-1]);
1490 				off[1] = le64_to_cpu(off64[0]);
1491 			}
1492 			break;
1493 		}
1494 	} while (++i < 2);
1495 
1496 	*vbo_data += off[0];
1497 	*ondisk_size = off[1] - off[0];
1498 
1499 out1:
1500 	unlock_page(page);
1501 out:
1502 	up_write(&ni->file.run_lock);
1503 	return err;
1504 }
1505 #endif
1506 
1507 /*
1508  * attr_is_frame_compressed - Used to detect compressed frame.
1509  */
1510 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1511 			     CLST frame, CLST *clst_data)
1512 {
1513 	int err;
1514 	u32 clst_frame;
1515 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1516 	size_t idx;
1517 	struct runs_tree *run;
1518 
1519 	*clst_data = 0;
1520 
1521 	if (!is_attr_compressed(attr))
1522 		return 0;
1523 
1524 	if (!attr->non_res)
1525 		return 0;
1526 
1527 	clst_frame = 1u << attr->nres.c_unit;
1528 	vcn = frame * clst_frame;
1529 	run = &ni->file.run;
1530 
1531 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1532 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1533 					 attr->name_len, run, vcn);
1534 		if (err)
1535 			return err;
1536 
1537 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1538 			return -EINVAL;
1539 	}
1540 
1541 	if (lcn == SPARSE_LCN) {
1542 		/* Sparsed frame. */
1543 		return 0;
1544 	}
1545 
1546 	if (clen >= clst_frame) {
1547 		/*
1548 		 * The frame is not compressed 'cause
1549 		 * it does not contain any sparse clusters.
1550 		 */
1551 		*clst_data = clst_frame;
1552 		return 0;
1553 	}
1554 
1555 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1556 	slen = 0;
1557 	*clst_data = clen;
1558 
1559 	/*
1560 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1561 	 * Check next fragments.
1562 	 */
1563 	while ((vcn += clen) < alen) {
1564 		vcn_next = vcn;
1565 
1566 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1567 		    vcn_next != vcn) {
1568 			err = attr_load_runs_vcn(ni, attr->type,
1569 						 attr_name(attr),
1570 						 attr->name_len, run, vcn_next);
1571 			if (err)
1572 				return err;
1573 			vcn = vcn_next;
1574 
1575 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1576 				return -EINVAL;
1577 		}
1578 
1579 		if (lcn == SPARSE_LCN) {
1580 			slen += clen;
1581 		} else {
1582 			if (slen) {
1583 				/*
1584 				 * Data_clusters + sparse_clusters =
1585 				 * not enough for frame.
1586 				 */
1587 				return -EINVAL;
1588 			}
1589 			*clst_data += clen;
1590 		}
1591 
1592 		if (*clst_data + slen >= clst_frame) {
1593 			if (!slen) {
1594 				/*
1595 				 * There is no sparsed clusters in this frame
1596 				 * so it is not compressed.
1597 				 */
1598 				*clst_data = clst_frame;
1599 			} else {
1600 				/* Frame is compressed. */
1601 			}
1602 			break;
1603 		}
1604 	}
1605 
1606 	return 0;
1607 }
1608 
1609 /*
1610  * attr_allocate_frame - Allocate/free clusters for @frame.
1611  *
1612  * Assumed: down_write(&ni->file.run_lock);
1613  */
1614 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1615 			u64 new_valid)
1616 {
1617 	int err = 0;
1618 	struct runs_tree *run = &ni->file.run;
1619 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1620 	struct ATTRIB *attr = NULL, *attr_b;
1621 	struct ATTR_LIST_ENTRY *le, *le_b;
1622 	struct mft_inode *mi, *mi_b;
1623 	CLST svcn, evcn1, next_svcn, len;
1624 	CLST vcn, end, clst_data;
1625 	u64 total_size, valid_size, data_size;
1626 
1627 	le_b = NULL;
1628 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1629 	if (!attr_b)
1630 		return -ENOENT;
1631 
1632 	if (!is_attr_ext(attr_b))
1633 		return -EINVAL;
1634 
1635 	vcn = frame << NTFS_LZNT_CUNIT;
1636 	total_size = le64_to_cpu(attr_b->nres.total_size);
1637 
1638 	svcn = le64_to_cpu(attr_b->nres.svcn);
1639 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1640 	data_size = le64_to_cpu(attr_b->nres.data_size);
1641 
1642 	if (svcn <= vcn && vcn < evcn1) {
1643 		attr = attr_b;
1644 		le = le_b;
1645 		mi = mi_b;
1646 	} else if (!le_b) {
1647 		err = -EINVAL;
1648 		goto out;
1649 	} else {
1650 		le = le_b;
1651 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1652 				    &mi);
1653 		if (!attr) {
1654 			err = -EINVAL;
1655 			goto out;
1656 		}
1657 		svcn = le64_to_cpu(attr->nres.svcn);
1658 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1659 	}
1660 
1661 	err = attr_load_runs(attr, ni, run, NULL);
1662 	if (err)
1663 		goto out;
1664 
1665 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1666 	if (err)
1667 		goto out;
1668 
1669 	total_size -= (u64)clst_data << sbi->cluster_bits;
1670 
1671 	len = bytes_to_cluster(sbi, compr_size);
1672 
1673 	if (len == clst_data)
1674 		goto out;
1675 
1676 	if (len < clst_data) {
1677 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1678 					NULL, true);
1679 		if (err)
1680 			goto out;
1681 
1682 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1683 				   false)) {
1684 			err = -ENOMEM;
1685 			goto out;
1686 		}
1687 		end = vcn + clst_data;
1688 		/* Run contains updated range [vcn + len : end). */
1689 	} else {
1690 		CLST alen, hint = 0;
1691 		/* Get the last LCN to allocate from. */
1692 		if (vcn + clst_data &&
1693 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1694 				      NULL)) {
1695 			hint = -1;
1696 		}
1697 
1698 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1699 					     hint + 1, len - clst_data, NULL,
1700 					     ALLOCATE_DEF, &alen, 0, NULL,
1701 					     NULL);
1702 		if (err)
1703 			goto out;
1704 
1705 		end = vcn + len;
1706 		/* Run contains updated range [vcn + clst_data : end). */
1707 	}
1708 
1709 	total_size += (u64)len << sbi->cluster_bits;
1710 
1711 repack:
1712 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1713 	if (err)
1714 		goto out;
1715 
1716 	attr_b->nres.total_size = cpu_to_le64(total_size);
1717 	inode_set_bytes(&ni->vfs_inode, total_size);
1718 
1719 	mi_b->dirty = true;
1720 	mark_inode_dirty(&ni->vfs_inode);
1721 
1722 	/* Stored [vcn : next_svcn) from [vcn : end). */
1723 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1724 
1725 	if (end <= evcn1) {
1726 		if (next_svcn == evcn1) {
1727 			/* Normal way. Update attribute and exit. */
1728 			goto ok;
1729 		}
1730 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1731 		if (!ni->attr_list.size) {
1732 			err = ni_create_attr_list(ni);
1733 			if (err)
1734 				goto out;
1735 			/* Layout of records is changed. */
1736 			le_b = NULL;
1737 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1738 					      0, NULL, &mi_b);
1739 			if (!attr_b)
1740 				return -ENOENT;
1741 
1742 			attr = attr_b;
1743 			le = le_b;
1744 			mi = mi_b;
1745 			goto repack;
1746 		}
1747 	}
1748 
1749 	svcn = evcn1;
1750 
1751 	/* Estimate next attribute. */
1752 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1753 
1754 	if (attr) {
1755 		CLST alloc = bytes_to_cluster(
1756 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1757 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1758 
1759 		if (end < next_svcn)
1760 			end = next_svcn;
1761 		while (end > evcn) {
1762 			/* Remove segment [svcn : evcn). */
1763 			mi_remove_attr(NULL, mi, attr);
1764 
1765 			if (!al_remove_le(ni, le)) {
1766 				err = -EINVAL;
1767 				goto out;
1768 			}
1769 
1770 			if (evcn + 1 >= alloc) {
1771 				/* Last attribute segment. */
1772 				evcn1 = evcn + 1;
1773 				goto ins_ext;
1774 			}
1775 
1776 			if (ni_load_mi(ni, le, &mi)) {
1777 				attr = NULL;
1778 				goto out;
1779 			}
1780 
1781 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1782 					    &le->id);
1783 			if (!attr) {
1784 				err = -EINVAL;
1785 				goto out;
1786 			}
1787 			svcn = le64_to_cpu(attr->nres.svcn);
1788 			evcn = le64_to_cpu(attr->nres.evcn);
1789 		}
1790 
1791 		if (end < svcn)
1792 			end = svcn;
1793 
1794 		err = attr_load_runs(attr, ni, run, &end);
1795 		if (err)
1796 			goto out;
1797 
1798 		evcn1 = evcn + 1;
1799 		attr->nres.svcn = cpu_to_le64(next_svcn);
1800 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1801 		if (err)
1802 			goto out;
1803 
1804 		le->vcn = cpu_to_le64(next_svcn);
1805 		ni->attr_list.dirty = true;
1806 		mi->dirty = true;
1807 
1808 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1809 	}
1810 ins_ext:
1811 	if (evcn1 > next_svcn) {
1812 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1813 					    next_svcn, evcn1 - next_svcn,
1814 					    attr_b->flags, &attr, &mi, NULL);
1815 		if (err)
1816 			goto out;
1817 	}
1818 ok:
1819 	run_truncate_around(run, vcn);
1820 out:
1821 	if (new_valid > data_size)
1822 		new_valid = data_size;
1823 
1824 	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1825 	if (new_valid != valid_size) {
1826 		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1827 		mi_b->dirty = true;
1828 	}
1829 
1830 	return err;
1831 }
1832 
1833 /*
1834  * attr_collapse_range - Collapse range in file.
1835  */
1836 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1837 {
1838 	int err = 0;
1839 	struct runs_tree *run = &ni->file.run;
1840 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1841 	struct ATTRIB *attr = NULL, *attr_b;
1842 	struct ATTR_LIST_ENTRY *le, *le_b;
1843 	struct mft_inode *mi, *mi_b;
1844 	CLST svcn, evcn1, len, dealloc, alen;
1845 	CLST vcn, end;
1846 	u64 valid_size, data_size, alloc_size, total_size;
1847 	u32 mask;
1848 	__le16 a_flags;
1849 
1850 	if (!bytes)
1851 		return 0;
1852 
1853 	le_b = NULL;
1854 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1855 	if (!attr_b)
1856 		return -ENOENT;
1857 
1858 	if (!attr_b->non_res) {
1859 		/* Attribute is resident. Nothing to do? */
1860 		return 0;
1861 	}
1862 
1863 	data_size = le64_to_cpu(attr_b->nres.data_size);
1864 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1865 	a_flags = attr_b->flags;
1866 
1867 	if (is_attr_ext(attr_b)) {
1868 		total_size = le64_to_cpu(attr_b->nres.total_size);
1869 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1870 	} else {
1871 		total_size = alloc_size;
1872 		mask = sbi->cluster_mask;
1873 	}
1874 
1875 	if ((vbo & mask) || (bytes & mask)) {
1876 		/* Allow to collapse only cluster aligned ranges. */
1877 		return -EINVAL;
1878 	}
1879 
1880 	if (vbo > data_size)
1881 		return -EINVAL;
1882 
1883 	down_write(&ni->file.run_lock);
1884 
1885 	if (vbo + bytes >= data_size) {
1886 		u64 new_valid = min(ni->i_valid, vbo);
1887 
1888 		/* Simple truncate file at 'vbo'. */
1889 		truncate_setsize(&ni->vfs_inode, vbo);
1890 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1891 				    &new_valid, true, NULL);
1892 
1893 		if (!err && new_valid < ni->i_valid)
1894 			ni->i_valid = new_valid;
1895 
1896 		goto out;
1897 	}
1898 
1899 	/*
1900 	 * Enumerate all attribute segments and collapse.
1901 	 */
1902 	alen = alloc_size >> sbi->cluster_bits;
1903 	vcn = vbo >> sbi->cluster_bits;
1904 	len = bytes >> sbi->cluster_bits;
1905 	end = vcn + len;
1906 	dealloc = 0;
1907 
1908 	svcn = le64_to_cpu(attr_b->nres.svcn);
1909 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1910 
1911 	if (svcn <= vcn && vcn < evcn1) {
1912 		attr = attr_b;
1913 		le = le_b;
1914 		mi = mi_b;
1915 	} else if (!le_b) {
1916 		err = -EINVAL;
1917 		goto out;
1918 	} else {
1919 		le = le_b;
1920 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1921 				    &mi);
1922 		if (!attr) {
1923 			err = -EINVAL;
1924 			goto out;
1925 		}
1926 
1927 		svcn = le64_to_cpu(attr->nres.svcn);
1928 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1929 	}
1930 
1931 	for (;;) {
1932 		if (svcn >= end) {
1933 			/* Shift VCN- */
1934 			attr->nres.svcn = cpu_to_le64(svcn - len);
1935 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1936 			if (le) {
1937 				le->vcn = attr->nres.svcn;
1938 				ni->attr_list.dirty = true;
1939 			}
1940 			mi->dirty = true;
1941 		} else if (svcn < vcn || end < evcn1) {
1942 			CLST vcn1, eat, next_svcn;
1943 
1944 			/* Collapse a part of this attribute segment. */
1945 			err = attr_load_runs(attr, ni, run, &svcn);
1946 			if (err)
1947 				goto out;
1948 			vcn1 = max(vcn, svcn);
1949 			eat = min(end, evcn1) - vcn1;
1950 
1951 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1952 						true);
1953 			if (err)
1954 				goto out;
1955 
1956 			if (!run_collapse_range(run, vcn1, eat)) {
1957 				err = -ENOMEM;
1958 				goto out;
1959 			}
1960 
1961 			if (svcn >= vcn) {
1962 				/* Shift VCN */
1963 				attr->nres.svcn = cpu_to_le64(vcn);
1964 				if (le) {
1965 					le->vcn = attr->nres.svcn;
1966 					ni->attr_list.dirty = true;
1967 				}
1968 			}
1969 
1970 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1971 			if (err)
1972 				goto out;
1973 
1974 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1975 			if (next_svcn + eat < evcn1) {
1976 				err = ni_insert_nonresident(
1977 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1978 					evcn1 - eat - next_svcn, a_flags, &attr,
1979 					&mi, &le);
1980 				if (err)
1981 					goto out;
1982 
1983 				/* Layout of records maybe changed. */
1984 				attr_b = NULL;
1985 			}
1986 
1987 			/* Free all allocated memory. */
1988 			run_truncate(run, 0);
1989 		} else {
1990 			u16 le_sz;
1991 			u16 roff = le16_to_cpu(attr->nres.run_off);
1992 
1993 			if (roff > le32_to_cpu(attr->size)) {
1994 				err = -EINVAL;
1995 				goto out;
1996 			}
1997 
1998 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1999 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2000 				      le32_to_cpu(attr->size) - roff);
2001 
2002 			/* Delete this attribute segment. */
2003 			mi_remove_attr(NULL, mi, attr);
2004 			if (!le)
2005 				break;
2006 
2007 			le_sz = le16_to_cpu(le->size);
2008 			if (!al_remove_le(ni, le)) {
2009 				err = -EINVAL;
2010 				goto out;
2011 			}
2012 
2013 			if (evcn1 >= alen)
2014 				break;
2015 
2016 			if (!svcn) {
2017 				/* Load next record that contains this attribute. */
2018 				if (ni_load_mi(ni, le, &mi)) {
2019 					err = -EINVAL;
2020 					goto out;
2021 				}
2022 
2023 				/* Look for required attribute. */
2024 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2025 						    0, &le->id);
2026 				if (!attr) {
2027 					err = -EINVAL;
2028 					goto out;
2029 				}
2030 				goto next_attr;
2031 			}
2032 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2033 		}
2034 
2035 		if (evcn1 >= alen)
2036 			break;
2037 
2038 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2039 		if (!attr) {
2040 			err = -EINVAL;
2041 			goto out;
2042 		}
2043 
2044 next_attr:
2045 		svcn = le64_to_cpu(attr->nres.svcn);
2046 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2047 	}
2048 
2049 	if (!attr_b) {
2050 		le_b = NULL;
2051 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2052 				      &mi_b);
2053 		if (!attr_b) {
2054 			err = -ENOENT;
2055 			goto out;
2056 		}
2057 	}
2058 
2059 	data_size -= bytes;
2060 	valid_size = ni->i_valid;
2061 	if (vbo + bytes <= valid_size)
2062 		valid_size -= bytes;
2063 	else if (vbo < valid_size)
2064 		valid_size = vbo;
2065 
2066 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2067 	attr_b->nres.data_size = cpu_to_le64(data_size);
2068 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2069 	total_size -= (u64)dealloc << sbi->cluster_bits;
2070 	if (is_attr_ext(attr_b))
2071 		attr_b->nres.total_size = cpu_to_le64(total_size);
2072 	mi_b->dirty = true;
2073 
2074 	/* Update inode size. */
2075 	ni->i_valid = valid_size;
2076 	ni->vfs_inode.i_size = data_size;
2077 	inode_set_bytes(&ni->vfs_inode, total_size);
2078 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2079 	mark_inode_dirty(&ni->vfs_inode);
2080 
2081 out:
2082 	up_write(&ni->file.run_lock);
2083 	if (err)
2084 		_ntfs_bad_inode(&ni->vfs_inode);
2085 
2086 	return err;
2087 }
2088 
2089 /*
2090  * attr_punch_hole
2091  *
2092  * Not for normal files.
2093  */
2094 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2095 {
2096 	int err = 0;
2097 	struct runs_tree *run = &ni->file.run;
2098 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2099 	struct ATTRIB *attr = NULL, *attr_b;
2100 	struct ATTR_LIST_ENTRY *le, *le_b;
2101 	struct mft_inode *mi, *mi_b;
2102 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2103 	u64 total_size, alloc_size;
2104 	u32 mask;
2105 	__le16 a_flags;
2106 	struct runs_tree run2;
2107 
2108 	if (!bytes)
2109 		return 0;
2110 
2111 	le_b = NULL;
2112 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2113 	if (!attr_b)
2114 		return -ENOENT;
2115 
2116 	if (!attr_b->non_res) {
2117 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2118 		u32 from, to;
2119 
2120 		if (vbo > data_size)
2121 			return 0;
2122 
2123 		from = vbo;
2124 		to = min_t(u64, vbo + bytes, data_size);
2125 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2126 		return 0;
2127 	}
2128 
2129 	if (!is_attr_ext(attr_b))
2130 		return -EOPNOTSUPP;
2131 
2132 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2133 	total_size = le64_to_cpu(attr_b->nres.total_size);
2134 
2135 	if (vbo >= alloc_size) {
2136 		/* NOTE: It is allowed. */
2137 		return 0;
2138 	}
2139 
2140 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2141 
2142 	bytes += vbo;
2143 	if (bytes > alloc_size)
2144 		bytes = alloc_size;
2145 	bytes -= vbo;
2146 
2147 	if ((vbo & mask) || (bytes & mask)) {
2148 		/* We have to zero a range(s). */
2149 		if (frame_size == NULL) {
2150 			/* Caller insists range is aligned. */
2151 			return -EINVAL;
2152 		}
2153 		*frame_size = mask + 1;
2154 		return E_NTFS_NOTALIGNED;
2155 	}
2156 
2157 	down_write(&ni->file.run_lock);
2158 	run_init(&run2);
2159 	run_truncate(run, 0);
2160 
2161 	/*
2162 	 * Enumerate all attribute segments and punch hole where necessary.
2163 	 */
2164 	alen = alloc_size >> sbi->cluster_bits;
2165 	vcn = vbo >> sbi->cluster_bits;
2166 	len = bytes >> sbi->cluster_bits;
2167 	end = vcn + len;
2168 	hole = 0;
2169 
2170 	svcn = le64_to_cpu(attr_b->nres.svcn);
2171 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2172 	a_flags = attr_b->flags;
2173 
2174 	if (svcn <= vcn && vcn < evcn1) {
2175 		attr = attr_b;
2176 		le = le_b;
2177 		mi = mi_b;
2178 	} else if (!le_b) {
2179 		err = -EINVAL;
2180 		goto bad_inode;
2181 	} else {
2182 		le = le_b;
2183 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2184 				    &mi);
2185 		if (!attr) {
2186 			err = -EINVAL;
2187 			goto bad_inode;
2188 		}
2189 
2190 		svcn = le64_to_cpu(attr->nres.svcn);
2191 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2192 	}
2193 
2194 	while (svcn < end) {
2195 		CLST vcn1, zero, hole2 = hole;
2196 
2197 		err = attr_load_runs(attr, ni, run, &svcn);
2198 		if (err)
2199 			goto done;
2200 		vcn1 = max(vcn, svcn);
2201 		zero = min(end, evcn1) - vcn1;
2202 
2203 		/*
2204 		 * Check range [vcn1 + zero).
2205 		 * Calculate how many clusters there are.
2206 		 * Don't do any destructive actions.
2207 		 */
2208 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2209 		if (err)
2210 			goto done;
2211 
2212 		/* Check if required range is already hole. */
2213 		if (hole2 == hole)
2214 			goto next_attr;
2215 
2216 		/* Make a clone of run to undo. */
2217 		err = run_clone(run, &run2);
2218 		if (err)
2219 			goto done;
2220 
2221 		/* Make a hole range (sparse) [vcn1 + zero). */
2222 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2223 			err = -ENOMEM;
2224 			goto done;
2225 		}
2226 
2227 		/* Update run in attribute segment. */
2228 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2229 		if (err)
2230 			goto done;
2231 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2232 		if (next_svcn < evcn1) {
2233 			/* Insert new attribute segment. */
2234 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2235 						    next_svcn,
2236 						    evcn1 - next_svcn, a_flags,
2237 						    &attr, &mi, &le);
2238 			if (err)
2239 				goto undo_punch;
2240 
2241 			/* Layout of records maybe changed. */
2242 			attr_b = NULL;
2243 		}
2244 
2245 		/* Real deallocate. Should not fail. */
2246 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2247 
2248 next_attr:
2249 		/* Free all allocated memory. */
2250 		run_truncate(run, 0);
2251 
2252 		if (evcn1 >= alen)
2253 			break;
2254 
2255 		/* Get next attribute segment. */
2256 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2257 		if (!attr) {
2258 			err = -EINVAL;
2259 			goto bad_inode;
2260 		}
2261 
2262 		svcn = le64_to_cpu(attr->nres.svcn);
2263 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2264 	}
2265 
2266 done:
2267 	if (!hole)
2268 		goto out;
2269 
2270 	if (!attr_b) {
2271 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2272 				      &mi_b);
2273 		if (!attr_b) {
2274 			err = -EINVAL;
2275 			goto bad_inode;
2276 		}
2277 	}
2278 
2279 	total_size -= (u64)hole << sbi->cluster_bits;
2280 	attr_b->nres.total_size = cpu_to_le64(total_size);
2281 	mi_b->dirty = true;
2282 
2283 	/* Update inode size. */
2284 	inode_set_bytes(&ni->vfs_inode, total_size);
2285 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2286 	mark_inode_dirty(&ni->vfs_inode);
2287 
2288 out:
2289 	run_close(&run2);
2290 	up_write(&ni->file.run_lock);
2291 	return err;
2292 
2293 bad_inode:
2294 	_ntfs_bad_inode(&ni->vfs_inode);
2295 	goto out;
2296 
2297 undo_punch:
2298 	/*
2299 	 * Restore packed runs.
2300 	 * 'mi_pack_runs' should not fail, cause we restore original.
2301 	 */
2302 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2303 		goto bad_inode;
2304 
2305 	goto done;
2306 }
2307 
2308 /*
2309  * attr_insert_range - Insert range (hole) in file.
2310  * Not for normal files.
2311  */
2312 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2313 {
2314 	int err = 0;
2315 	struct runs_tree *run = &ni->file.run;
2316 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2317 	struct ATTRIB *attr = NULL, *attr_b;
2318 	struct ATTR_LIST_ENTRY *le, *le_b;
2319 	struct mft_inode *mi, *mi_b;
2320 	CLST vcn, svcn, evcn1, len, next_svcn;
2321 	u64 data_size, alloc_size;
2322 	u32 mask;
2323 	__le16 a_flags;
2324 
2325 	if (!bytes)
2326 		return 0;
2327 
2328 	le_b = NULL;
2329 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2330 	if (!attr_b)
2331 		return -ENOENT;
2332 
2333 	if (!is_attr_ext(attr_b)) {
2334 		/* It was checked above. See fallocate. */
2335 		return -EOPNOTSUPP;
2336 	}
2337 
2338 	if (!attr_b->non_res) {
2339 		data_size = le32_to_cpu(attr_b->res.data_size);
2340 		alloc_size = data_size;
2341 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2342 	} else {
2343 		data_size = le64_to_cpu(attr_b->nres.data_size);
2344 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2345 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2346 	}
2347 
2348 	if (vbo > data_size) {
2349 		/* Insert range after the file size is not allowed. */
2350 		return -EINVAL;
2351 	}
2352 
2353 	if ((vbo & mask) || (bytes & mask)) {
2354 		/* Allow to insert only frame aligned ranges. */
2355 		return -EINVAL;
2356 	}
2357 
2358 	/*
2359 	 * valid_size <= data_size <= alloc_size
2360 	 * Check alloc_size for maximum possible.
2361 	 */
2362 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2363 		return -EFBIG;
2364 
2365 	vcn = vbo >> sbi->cluster_bits;
2366 	len = bytes >> sbi->cluster_bits;
2367 
2368 	down_write(&ni->file.run_lock);
2369 
2370 	if (!attr_b->non_res) {
2371 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2372 				    data_size + bytes, NULL, false, NULL);
2373 
2374 		le_b = NULL;
2375 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2376 				      &mi_b);
2377 		if (!attr_b) {
2378 			err = -EINVAL;
2379 			goto bad_inode;
2380 		}
2381 
2382 		if (err)
2383 			goto out;
2384 
2385 		if (!attr_b->non_res) {
2386 			/* Still resident. */
2387 			char *data = Add2Ptr(attr_b,
2388 					     le16_to_cpu(attr_b->res.data_off));
2389 
2390 			memmove(data + bytes, data, bytes);
2391 			memset(data, 0, bytes);
2392 			goto done;
2393 		}
2394 
2395 		/* Resident files becomes nonresident. */
2396 		data_size = le64_to_cpu(attr_b->nres.data_size);
2397 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2398 	}
2399 
2400 	/*
2401 	 * Enumerate all attribute segments and shift start vcn.
2402 	 */
2403 	a_flags = attr_b->flags;
2404 	svcn = le64_to_cpu(attr_b->nres.svcn);
2405 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2406 
2407 	if (svcn <= vcn && vcn < evcn1) {
2408 		attr = attr_b;
2409 		le = le_b;
2410 		mi = mi_b;
2411 	} else if (!le_b) {
2412 		err = -EINVAL;
2413 		goto bad_inode;
2414 	} else {
2415 		le = le_b;
2416 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2417 				    &mi);
2418 		if (!attr) {
2419 			err = -EINVAL;
2420 			goto bad_inode;
2421 		}
2422 
2423 		svcn = le64_to_cpu(attr->nres.svcn);
2424 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2425 	}
2426 
2427 	run_truncate(run, 0); /* clear cached values. */
2428 	err = attr_load_runs(attr, ni, run, NULL);
2429 	if (err)
2430 		goto out;
2431 
2432 	if (!run_insert_range(run, vcn, len)) {
2433 		err = -ENOMEM;
2434 		goto out;
2435 	}
2436 
2437 	/* Try to pack in current record as much as possible. */
2438 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2439 	if (err)
2440 		goto out;
2441 
2442 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2443 
2444 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2445 	       attr->type == ATTR_DATA && !attr->name_len) {
2446 		le64_add_cpu(&attr->nres.svcn, len);
2447 		le64_add_cpu(&attr->nres.evcn, len);
2448 		if (le) {
2449 			le->vcn = attr->nres.svcn;
2450 			ni->attr_list.dirty = true;
2451 		}
2452 		mi->dirty = true;
2453 	}
2454 
2455 	if (next_svcn < evcn1 + len) {
2456 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2457 					    next_svcn, evcn1 + len - next_svcn,
2458 					    a_flags, NULL, NULL, NULL);
2459 
2460 		le_b = NULL;
2461 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2462 				      &mi_b);
2463 		if (!attr_b) {
2464 			err = -EINVAL;
2465 			goto bad_inode;
2466 		}
2467 
2468 		if (err) {
2469 			/* ni_insert_nonresident failed. Try to undo. */
2470 			goto undo_insert_range;
2471 		}
2472 	}
2473 
2474 	/*
2475 	 * Update primary attribute segment.
2476 	 */
2477 	if (vbo <= ni->i_valid)
2478 		ni->i_valid += bytes;
2479 
2480 	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2481 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2482 
2483 	/* ni->valid may be not equal valid_size (temporary). */
2484 	if (ni->i_valid > data_size + bytes)
2485 		attr_b->nres.valid_size = attr_b->nres.data_size;
2486 	else
2487 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2488 	mi_b->dirty = true;
2489 
2490 done:
2491 	ni->vfs_inode.i_size += bytes;
2492 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2493 	mark_inode_dirty(&ni->vfs_inode);
2494 
2495 out:
2496 	run_truncate(run, 0); /* clear cached values. */
2497 
2498 	up_write(&ni->file.run_lock);
2499 
2500 	return err;
2501 
2502 bad_inode:
2503 	_ntfs_bad_inode(&ni->vfs_inode);
2504 	goto out;
2505 
2506 undo_insert_range:
2507 	svcn = le64_to_cpu(attr_b->nres.svcn);
2508 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2509 
2510 	if (svcn <= vcn && vcn < evcn1) {
2511 		attr = attr_b;
2512 		le = le_b;
2513 		mi = mi_b;
2514 	} else if (!le_b) {
2515 		goto bad_inode;
2516 	} else {
2517 		le = le_b;
2518 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2519 				    &mi);
2520 		if (!attr) {
2521 			goto bad_inode;
2522 		}
2523 
2524 		svcn = le64_to_cpu(attr->nres.svcn);
2525 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2526 	}
2527 
2528 	if (attr_load_runs(attr, ni, run, NULL))
2529 		goto bad_inode;
2530 
2531 	if (!run_collapse_range(run, vcn, len))
2532 		goto bad_inode;
2533 
2534 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2535 		goto bad_inode;
2536 
2537 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2538 	       attr->type == ATTR_DATA && !attr->name_len) {
2539 		le64_sub_cpu(&attr->nres.svcn, len);
2540 		le64_sub_cpu(&attr->nres.evcn, len);
2541 		if (le) {
2542 			le->vcn = attr->nres.svcn;
2543 			ni->attr_list.dirty = true;
2544 		}
2545 		mi->dirty = true;
2546 	}
2547 
2548 	goto out;
2549 }
2550