xref: /openbmc/linux/fs/ntfs3/attrib.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
get_pre_allocated(u64 size)34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_load_runs - Load all runs stored in @attr.
59  */
attr_load_runs(struct ATTRIB * attr,struct ntfs_inode * ni,struct runs_tree * run,const CLST * vcn)60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 			  struct runs_tree *run, const CLST *vcn)
62 {
63 	int err;
64 	CLST svcn = le64_to_cpu(attr->nres.svcn);
65 	CLST evcn = le64_to_cpu(attr->nres.evcn);
66 	u32 asize;
67 	u16 run_off;
68 
69 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 		return 0;
71 
72 	if (vcn && (evcn < *vcn || *vcn < svcn))
73 		return -EINVAL;
74 
75 	asize = le32_to_cpu(attr->size);
76 	run_off = le16_to_cpu(attr->nres.run_off);
77 
78 	if (run_off > asize)
79 		return -EINVAL;
80 
81 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 			    asize - run_off);
84 	if (err < 0)
85 		return err;
86 
87 	return 0;
88 }
89 
90 /*
91  * run_deallocate_ex - Deallocate clusters.
92  */
run_deallocate_ex(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST len,CLST * done,bool trim)93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 			     CLST vcn, CLST len, CLST *done, bool trim)
95 {
96 	int err = 0;
97 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98 	size_t idx;
99 
100 	if (!len)
101 		goto out;
102 
103 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104 failed:
105 		run_truncate(run, vcn0);
106 		err = -EINVAL;
107 		goto out;
108 	}
109 
110 	for (;;) {
111 		if (clen > len)
112 			clen = len;
113 
114 		if (!clen) {
115 			err = -EINVAL;
116 			goto out;
117 		}
118 
119 		if (lcn != SPARSE_LCN) {
120 			if (sbi) {
121 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
122 				mark_as_free_ex(sbi, lcn, clen, trim);
123 			}
124 			dn += clen;
125 		}
126 
127 		len -= clen;
128 		if (!len)
129 			break;
130 
131 		vcn_next = vcn + clen;
132 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133 		    vcn != vcn_next) {
134 			/* Save memory - don't load entire run. */
135 			goto failed;
136 		}
137 	}
138 
139 out:
140 	if (done)
141 		*done += dn;
142 
143 	return err;
144 }
145 
146 /*
147  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
148  */
attr_allocate_clusters(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST lcn,CLST len,CLST * pre_alloc,enum ALLOCATE_OPT opt,CLST * alen,const size_t fr,CLST * new_lcn,CLST * new_len)149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152 			   CLST *new_lcn, CLST *new_len)
153 {
154 	int err;
155 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156 	size_t cnt = run->count;
157 
158 	for (;;) {
159 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160 					       opt);
161 
162 		if (err == -ENOSPC && pre) {
163 			pre = 0;
164 			if (*pre_alloc)
165 				*pre_alloc = 0;
166 			continue;
167 		}
168 
169 		if (err)
170 			goto out;
171 
172 		if (vcn == vcn0) {
173 			/* Return the first fragment. */
174 			if (new_lcn)
175 				*new_lcn = lcn;
176 			if (new_len)
177 				*new_len = flen;
178 		}
179 
180 		/* Add new fragment into run storage. */
181 		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182 			/* Undo last 'ntfs_look_for_free_space' */
183 			mark_as_free_ex(sbi, lcn, len, false);
184 			err = -ENOMEM;
185 			goto out;
186 		}
187 
188 		if (opt & ALLOCATE_ZERO) {
189 			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190 
191 			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 						   (sector_t)lcn << shift,
193 						   (sector_t)flen << shift,
194 						   GFP_NOFS, 0);
195 			if (err)
196 				goto out;
197 		}
198 
199 		vcn += flen;
200 
201 		if (flen >= len || (opt & ALLOCATE_MFT) ||
202 		    (fr && run->count - cnt >= fr)) {
203 			*alen = vcn - vcn0;
204 			return 0;
205 		}
206 
207 		len -= flen;
208 	}
209 
210 out:
211 	/* Undo 'ntfs_look_for_free_space' */
212 	if (vcn - vcn0) {
213 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 		run_truncate(run, vcn0);
215 	}
216 
217 	return err;
218 }
219 
220 /*
221  * attr_make_nonresident
222  *
223  * If page is not NULL - it is already contains resident data
224  * and locked (called from ni_write_frame()).
225  */
attr_make_nonresident(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr,struct page * page)226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 			  u64 new_size, struct runs_tree *run,
229 			  struct ATTRIB **ins_attr, struct page *page)
230 {
231 	struct ntfs_sb_info *sbi;
232 	struct ATTRIB *attr_s;
233 	struct MFT_REC *rec;
234 	u32 used, asize, rsize, aoff;
235 	bool is_data;
236 	CLST len, alen;
237 	char *next;
238 	int err;
239 
240 	if (attr->non_res) {
241 		*ins_attr = attr;
242 		return 0;
243 	}
244 
245 	sbi = mi->sbi;
246 	rec = mi->mrec;
247 	attr_s = NULL;
248 	used = le32_to_cpu(rec->used);
249 	asize = le32_to_cpu(attr->size);
250 	next = Add2Ptr(attr, asize);
251 	aoff = PtrOffset(rec, attr);
252 	rsize = le32_to_cpu(attr->res.data_size);
253 	is_data = attr->type == ATTR_DATA && !attr->name_len;
254 
255 	/* len - how many clusters required to store 'rsize' bytes */
256 	if (is_attr_compressed(attr)) {
257 		u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
258 		len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
259 	} else {
260 		len = bytes_to_cluster(sbi, rsize);
261 	}
262 
263 	run_init(run);
264 
265 	/* Make a copy of original attribute. */
266 	attr_s = kmemdup(attr, asize, GFP_NOFS);
267 	if (!attr_s) {
268 		err = -ENOMEM;
269 		goto out;
270 	}
271 
272 	if (!len) {
273 		/* Empty resident -> Empty nonresident. */
274 		alen = 0;
275 	} else {
276 		const char *data = resident_data(attr);
277 
278 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
279 					     ALLOCATE_DEF, &alen, 0, NULL,
280 					     NULL);
281 		if (err)
282 			goto out1;
283 
284 		if (!rsize) {
285 			/* Empty resident -> Non empty nonresident. */
286 		} else if (!is_data) {
287 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
288 			if (err)
289 				goto out2;
290 		} else if (!page) {
291 			char *kaddr;
292 
293 			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
294 			if (!page) {
295 				err = -ENOMEM;
296 				goto out2;
297 			}
298 			kaddr = kmap_atomic(page);
299 			memcpy(kaddr, data, rsize);
300 			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
301 			kunmap_atomic(kaddr);
302 			flush_dcache_page(page);
303 			SetPageUptodate(page);
304 			set_page_dirty(page);
305 			unlock_page(page);
306 			put_page(page);
307 		}
308 	}
309 
310 	/* Remove original attribute. */
311 	used -= asize;
312 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
313 	rec->used = cpu_to_le32(used);
314 	mi->dirty = true;
315 	if (le)
316 		al_remove_le(ni, le);
317 
318 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
319 				    attr_s->name_len, run, 0, alen,
320 				    attr_s->flags, &attr, NULL, NULL);
321 	if (err)
322 		goto out3;
323 
324 	kfree(attr_s);
325 	attr->nres.data_size = cpu_to_le64(rsize);
326 	attr->nres.valid_size = attr->nres.data_size;
327 
328 	*ins_attr = attr;
329 
330 	if (is_data)
331 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
332 
333 	/* Resident attribute becomes non resident. */
334 	return 0;
335 
336 out3:
337 	attr = Add2Ptr(rec, aoff);
338 	memmove(next, attr, used - aoff);
339 	memcpy(attr, attr_s, asize);
340 	rec->used = cpu_to_le32(used + asize);
341 	mi->dirty = true;
342 out2:
343 	/* Undo: do not trim new allocated clusters. */
344 	run_deallocate(sbi, run, false);
345 	run_close(run);
346 out1:
347 	kfree(attr_s);
348 out:
349 	return err;
350 }
351 
352 /*
353  * attr_set_size_res - Helper for attr_set_size().
354  */
attr_set_size_res(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr)355 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
356 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
357 			     u64 new_size, struct runs_tree *run,
358 			     struct ATTRIB **ins_attr)
359 {
360 	struct ntfs_sb_info *sbi = mi->sbi;
361 	struct MFT_REC *rec = mi->mrec;
362 	u32 used = le32_to_cpu(rec->used);
363 	u32 asize = le32_to_cpu(attr->size);
364 	u32 aoff = PtrOffset(rec, attr);
365 	u32 rsize = le32_to_cpu(attr->res.data_size);
366 	u32 tail = used - aoff - asize;
367 	char *next = Add2Ptr(attr, asize);
368 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
369 
370 	if (dsize < 0) {
371 		memmove(next + dsize, next, tail);
372 	} else if (dsize > 0) {
373 		if (used + dsize > sbi->max_bytes_per_attr)
374 			return attr_make_nonresident(ni, attr, le, mi, new_size,
375 						     run, ins_attr, NULL);
376 
377 		memmove(next + dsize, next, tail);
378 		memset(next, 0, dsize);
379 	}
380 
381 	if (new_size > rsize)
382 		memset(Add2Ptr(resident_data(attr), rsize), 0,
383 		       new_size - rsize);
384 
385 	rec->used = cpu_to_le32(used + dsize);
386 	attr->size = cpu_to_le32(asize + dsize);
387 	attr->res.data_size = cpu_to_le32(new_size);
388 	mi->dirty = true;
389 	*ins_attr = attr;
390 
391 	return 0;
392 }
393 
394 /*
395  * attr_set_size - Change the size of attribute.
396  *
397  * Extend:
398  *   - Sparse/compressed: No allocated clusters.
399  *   - Normal: Append allocated and preallocated new clusters.
400  * Shrink:
401  *   - No deallocate if @keep_prealloc is set.
402  */
attr_set_size(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 new_size,const u64 * new_valid,bool keep_prealloc,struct ATTRIB ** ret)403 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
404 		  const __le16 *name, u8 name_len, struct runs_tree *run,
405 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
406 		  struct ATTRIB **ret)
407 {
408 	int err = 0;
409 	struct ntfs_sb_info *sbi = ni->mi.sbi;
410 	u8 cluster_bits = sbi->cluster_bits;
411 	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
412 		      !name_len;
413 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
414 	struct ATTRIB *attr = NULL, *attr_b;
415 	struct ATTR_LIST_ENTRY *le, *le_b;
416 	struct mft_inode *mi, *mi_b;
417 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
418 	CLST next_svcn, pre_alloc = -1, done = 0;
419 	bool is_ext, is_bad = false;
420 	bool dirty = false;
421 	u32 align;
422 	struct MFT_REC *rec;
423 
424 again:
425 	alen = 0;
426 	le_b = NULL;
427 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
428 			      &mi_b);
429 	if (!attr_b) {
430 		err = -ENOENT;
431 		goto bad_inode;
432 	}
433 
434 	if (!attr_b->non_res) {
435 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
436 					&attr_b);
437 		if (err)
438 			return err;
439 
440 		/* Return if file is still resident. */
441 		if (!attr_b->non_res) {
442 			dirty = true;
443 			goto ok1;
444 		}
445 
446 		/* Layout of records may be changed, so do a full search. */
447 		goto again;
448 	}
449 
450 	is_ext = is_attr_ext(attr_b);
451 	align = sbi->cluster_size;
452 	if (is_ext)
453 		align <<= attr_b->nres.c_unit;
454 
455 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
456 	old_size = le64_to_cpu(attr_b->nres.data_size);
457 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
458 
459 again_1:
460 	old_alen = old_alloc >> cluster_bits;
461 
462 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
463 	new_alen = new_alloc >> cluster_bits;
464 
465 	if (keep_prealloc && new_size < old_size) {
466 		attr_b->nres.data_size = cpu_to_le64(new_size);
467 		mi_b->dirty = dirty = true;
468 		goto ok;
469 	}
470 
471 	vcn = old_alen - 1;
472 
473 	svcn = le64_to_cpu(attr_b->nres.svcn);
474 	evcn = le64_to_cpu(attr_b->nres.evcn);
475 
476 	if (svcn <= vcn && vcn <= evcn) {
477 		attr = attr_b;
478 		le = le_b;
479 		mi = mi_b;
480 	} else if (!le_b) {
481 		err = -EINVAL;
482 		goto bad_inode;
483 	} else {
484 		le = le_b;
485 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
486 				    &mi);
487 		if (!attr) {
488 			err = -EINVAL;
489 			goto bad_inode;
490 		}
491 
492 next_le_1:
493 		svcn = le64_to_cpu(attr->nres.svcn);
494 		evcn = le64_to_cpu(attr->nres.evcn);
495 	}
496 	/*
497 	 * Here we have:
498 	 * attr,mi,le - last attribute segment (containing 'vcn').
499 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
500 	 */
501 next_le:
502 	rec = mi->mrec;
503 	err = attr_load_runs(attr, ni, run, NULL);
504 	if (err)
505 		goto out;
506 
507 	if (new_size > old_size) {
508 		CLST to_allocate;
509 		size_t free;
510 
511 		if (new_alloc <= old_alloc) {
512 			attr_b->nres.data_size = cpu_to_le64(new_size);
513 			mi_b->dirty = dirty = true;
514 			goto ok;
515 		}
516 
517 		/*
518 		 * Add clusters. In simple case we have to:
519 		 *  - allocate space (vcn, lcn, len)
520 		 *  - update packed run in 'mi'
521 		 *  - update attr->nres.evcn
522 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
523 		 */
524 		to_allocate = new_alen - old_alen;
525 add_alloc_in_same_attr_seg:
526 		lcn = 0;
527 		if (is_mft) {
528 			/* MFT allocates clusters from MFT zone. */
529 			pre_alloc = 0;
530 		} else if (is_ext) {
531 			/* No preallocate for sparse/compress. */
532 			pre_alloc = 0;
533 		} else if (pre_alloc == -1) {
534 			pre_alloc = 0;
535 			if (type == ATTR_DATA && !name_len &&
536 			    sbi->options->prealloc) {
537 				pre_alloc = bytes_to_cluster(
538 						    sbi, get_pre_allocated(
539 								 new_size)) -
540 					    new_alen;
541 			}
542 
543 			/* Get the last LCN to allocate from. */
544 			if (old_alen &&
545 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
546 				lcn = SPARSE_LCN;
547 			}
548 
549 			if (lcn == SPARSE_LCN)
550 				lcn = 0;
551 			else if (lcn)
552 				lcn += 1;
553 
554 			free = wnd_zeroes(&sbi->used.bitmap);
555 			if (to_allocate > free) {
556 				err = -ENOSPC;
557 				goto out;
558 			}
559 
560 			if (pre_alloc && to_allocate + pre_alloc > free)
561 				pre_alloc = 0;
562 		}
563 
564 		vcn = old_alen;
565 
566 		if (is_ext) {
567 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
568 					   false)) {
569 				err = -ENOMEM;
570 				goto out;
571 			}
572 			alen = to_allocate;
573 		} else {
574 			/* ~3 bytes per fragment. */
575 			err = attr_allocate_clusters(
576 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
577 				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
578 				is_mft ? 0 :
579 					 (sbi->record_size -
580 					  le32_to_cpu(rec->used) + 8) /
581 							 3 +
582 						 1,
583 				NULL, NULL);
584 			if (err)
585 				goto out;
586 		}
587 
588 		done += alen;
589 		vcn += alen;
590 		if (to_allocate > alen)
591 			to_allocate -= alen;
592 		else
593 			to_allocate = 0;
594 
595 pack_runs:
596 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
597 		if (err)
598 			goto undo_1;
599 
600 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
601 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
602 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
603 		mi_b->dirty = dirty = true;
604 
605 		if (next_svcn >= vcn && !to_allocate) {
606 			/* Normal way. Update attribute and exit. */
607 			attr_b->nres.data_size = cpu_to_le64(new_size);
608 			goto ok;
609 		}
610 
611 		/* At least two MFT to avoid recursive loop. */
612 		if (is_mft && next_svcn == vcn &&
613 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
614 			new_size = new_alloc_tmp;
615 			attr_b->nres.data_size = attr_b->nres.alloc_size;
616 			goto ok;
617 		}
618 
619 		if (le32_to_cpu(rec->used) < sbi->record_size) {
620 			old_alen = next_svcn;
621 			evcn = old_alen - 1;
622 			goto add_alloc_in_same_attr_seg;
623 		}
624 
625 		attr_b->nres.data_size = attr_b->nres.alloc_size;
626 		if (new_alloc_tmp < old_valid)
627 			attr_b->nres.valid_size = attr_b->nres.data_size;
628 
629 		if (type == ATTR_LIST) {
630 			err = ni_expand_list(ni);
631 			if (err)
632 				goto undo_2;
633 			if (next_svcn < vcn)
634 				goto pack_runs;
635 
636 			/* Layout of records is changed. */
637 			goto again;
638 		}
639 
640 		if (!ni->attr_list.size) {
641 			err = ni_create_attr_list(ni);
642 			/* In case of error layout of records is not changed. */
643 			if (err)
644 				goto undo_2;
645 			/* Layout of records is changed. */
646 		}
647 
648 		if (next_svcn >= vcn) {
649 			/* This is MFT data, repeat. */
650 			goto again;
651 		}
652 
653 		/* Insert new attribute segment. */
654 		err = ni_insert_nonresident(ni, type, name, name_len, run,
655 					    next_svcn, vcn - next_svcn,
656 					    attr_b->flags, &attr, &mi, NULL);
657 
658 		/*
659 		 * Layout of records maybe changed.
660 		 * Find base attribute to update.
661 		 */
662 		le_b = NULL;
663 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
664 				      NULL, &mi_b);
665 		if (!attr_b) {
666 			err = -EINVAL;
667 			goto bad_inode;
668 		}
669 
670 		if (err) {
671 			/* ni_insert_nonresident failed. */
672 			attr = NULL;
673 			goto undo_2;
674 		}
675 
676 		/* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
677 		if (ni->mi.rno != MFT_REC_MFT)
678 			run_truncate_head(run, evcn + 1);
679 
680 		svcn = le64_to_cpu(attr->nres.svcn);
681 		evcn = le64_to_cpu(attr->nres.evcn);
682 
683 		/*
684 		 * Attribute is in consistency state.
685 		 * Save this point to restore to if next steps fail.
686 		 */
687 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
688 		attr_b->nres.valid_size = attr_b->nres.data_size =
689 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
690 		mi_b->dirty = dirty = true;
691 		goto again_1;
692 	}
693 
694 	if (new_size != old_size ||
695 	    (new_alloc != old_alloc && !keep_prealloc)) {
696 		/*
697 		 * Truncate clusters. In simple case we have to:
698 		 *  - update packed run in 'mi'
699 		 *  - update attr->nres.evcn
700 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
701 		 *  - mark and trim clusters as free (vcn, lcn, len)
702 		 */
703 		CLST dlen = 0;
704 
705 		vcn = max(svcn, new_alen);
706 		new_alloc_tmp = (u64)vcn << cluster_bits;
707 
708 		if (vcn > svcn) {
709 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
710 			if (err)
711 				goto out;
712 		} else if (le && le->vcn) {
713 			u16 le_sz = le16_to_cpu(le->size);
714 
715 			/*
716 			 * NOTE: List entries for one attribute are always
717 			 * the same size. We deal with last entry (vcn==0)
718 			 * and it is not first in entries array
719 			 * (list entry for std attribute always first).
720 			 * So it is safe to step back.
721 			 */
722 			mi_remove_attr(NULL, mi, attr);
723 
724 			if (!al_remove_le(ni, le)) {
725 				err = -EINVAL;
726 				goto bad_inode;
727 			}
728 
729 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
730 		} else {
731 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
732 			mi->dirty = true;
733 		}
734 
735 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
736 
737 		if (vcn == new_alen) {
738 			attr_b->nres.data_size = cpu_to_le64(new_size);
739 			if (new_size < old_valid)
740 				attr_b->nres.valid_size =
741 					attr_b->nres.data_size;
742 		} else {
743 			if (new_alloc_tmp <=
744 			    le64_to_cpu(attr_b->nres.data_size))
745 				attr_b->nres.data_size =
746 					attr_b->nres.alloc_size;
747 			if (new_alloc_tmp <
748 			    le64_to_cpu(attr_b->nres.valid_size))
749 				attr_b->nres.valid_size =
750 					attr_b->nres.alloc_size;
751 		}
752 		mi_b->dirty = dirty = true;
753 
754 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
755 					true);
756 		if (err)
757 			goto out;
758 
759 		if (is_ext) {
760 			/* dlen - really deallocated clusters. */
761 			le64_sub_cpu(&attr_b->nres.total_size,
762 				     ((u64)dlen << cluster_bits));
763 		}
764 
765 		run_truncate(run, vcn);
766 
767 		if (new_alloc_tmp <= new_alloc)
768 			goto ok;
769 
770 		old_size = new_alloc_tmp;
771 		vcn = svcn - 1;
772 
773 		if (le == le_b) {
774 			attr = attr_b;
775 			mi = mi_b;
776 			evcn = svcn - 1;
777 			svcn = 0;
778 			goto next_le;
779 		}
780 
781 		if (le->type != type || le->name_len != name_len ||
782 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
783 			err = -EINVAL;
784 			goto bad_inode;
785 		}
786 
787 		err = ni_load_mi(ni, le, &mi);
788 		if (err)
789 			goto out;
790 
791 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
792 		if (!attr) {
793 			err = -EINVAL;
794 			goto bad_inode;
795 		}
796 		goto next_le_1;
797 	}
798 
799 ok:
800 	if (new_valid) {
801 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
802 
803 		if (attr_b->nres.valid_size != valid) {
804 			attr_b->nres.valid_size = valid;
805 			mi_b->dirty = true;
806 		}
807 	}
808 
809 ok1:
810 	if (ret)
811 		*ret = attr_b;
812 
813 	if (((type == ATTR_DATA && !name_len) ||
814 	     (type == ATTR_ALLOC && name == I30_NAME))) {
815 		/* Update inode_set_bytes. */
816 		if (attr_b->non_res) {
817 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
818 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
819 				inode_set_bytes(&ni->vfs_inode, new_alloc);
820 				dirty = true;
821 			}
822 		}
823 
824 		/* Don't forget to update duplicate information in parent. */
825 		if (dirty) {
826 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
827 			mark_inode_dirty(&ni->vfs_inode);
828 		}
829 	}
830 
831 	return 0;
832 
833 undo_2:
834 	vcn -= alen;
835 	attr_b->nres.data_size = cpu_to_le64(old_size);
836 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
837 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
838 
839 	/* Restore 'attr' and 'mi'. */
840 	if (attr)
841 		goto restore_run;
842 
843 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
844 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
845 		attr = attr_b;
846 		le = le_b;
847 		mi = mi_b;
848 	} else if (!le_b) {
849 		err = -EINVAL;
850 		goto bad_inode;
851 	} else {
852 		le = le_b;
853 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
854 				    &svcn, &mi);
855 		if (!attr)
856 			goto bad_inode;
857 	}
858 
859 restore_run:
860 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
861 		is_bad = true;
862 
863 undo_1:
864 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
865 
866 	run_truncate(run, vcn);
867 out:
868 	if (is_bad) {
869 bad_inode:
870 		_ntfs_bad_inode(&ni->vfs_inode);
871 	}
872 	return err;
873 }
874 
875 /*
876  * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
877  *
878  * @new == NULL means just to get current mapping for 'vcn'
879  * @new != NULL means allocate real cluster if 'vcn' maps to hole
880  * @zero - zeroout new allocated clusters
881  *
882  *  NOTE:
883  *  - @new != NULL is called only for sparsed or compressed attributes.
884  *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
885  */
attr_data_get_block(struct ntfs_inode * ni,CLST vcn,CLST clen,CLST * lcn,CLST * len,bool * new,bool zero)886 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
887 			CLST *len, bool *new, bool zero)
888 {
889 	int err = 0;
890 	struct runs_tree *run = &ni->file.run;
891 	struct ntfs_sb_info *sbi;
892 	u8 cluster_bits;
893 	struct ATTRIB *attr, *attr_b;
894 	struct ATTR_LIST_ENTRY *le, *le_b;
895 	struct mft_inode *mi, *mi_b;
896 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
897 	CLST alloc, evcn;
898 	unsigned fr;
899 	u64 total_size, total_size0;
900 	int step = 0;
901 
902 	if (new)
903 		*new = false;
904 
905 	/* Try to find in cache. */
906 	down_read(&ni->file.run_lock);
907 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
908 		*len = 0;
909 	up_read(&ni->file.run_lock);
910 
911 	if (*len && (*lcn != SPARSE_LCN || !new))
912 		return 0; /* Fast normal way without allocation. */
913 
914 	/* No cluster in cache or we need to allocate cluster in hole. */
915 	sbi = ni->mi.sbi;
916 	cluster_bits = sbi->cluster_bits;
917 
918 	ni_lock(ni);
919 	down_write(&ni->file.run_lock);
920 
921 	/* Repeat the code above (under write lock). */
922 	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
923 		*len = 0;
924 
925 	if (*len) {
926 		if (*lcn != SPARSE_LCN || !new)
927 			goto out; /* normal way without allocation. */
928 		if (clen > *len)
929 			clen = *len;
930 	}
931 
932 	le_b = NULL;
933 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
934 	if (!attr_b) {
935 		err = -ENOENT;
936 		goto out;
937 	}
938 
939 	if (!attr_b->non_res) {
940 		*lcn = RESIDENT_LCN;
941 		*len = 1;
942 		goto out;
943 	}
944 
945 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
946 	if (vcn >= asize) {
947 		if (new) {
948 			err = -EINVAL;
949 		} else {
950 			*len = 1;
951 			*lcn = SPARSE_LCN;
952 		}
953 		goto out;
954 	}
955 
956 	svcn = le64_to_cpu(attr_b->nres.svcn);
957 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
958 
959 	attr = attr_b;
960 	le = le_b;
961 	mi = mi_b;
962 
963 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
964 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
965 				    &mi);
966 		if (!attr) {
967 			err = -EINVAL;
968 			goto out;
969 		}
970 		svcn = le64_to_cpu(attr->nres.svcn);
971 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
972 	}
973 
974 	/* Load in cache actual information. */
975 	err = attr_load_runs(attr, ni, run, NULL);
976 	if (err)
977 		goto out;
978 
979 	/* Check for compressed frame. */
980 	err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
981 				       &hint, run);
982 	if (err)
983 		goto out;
984 
985 	if (hint) {
986 		/* if frame is compressed - don't touch it. */
987 		*lcn = COMPRESSED_LCN;
988 		/* length to the end of frame. */
989 		*len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
990 		err = 0;
991 		goto out;
992 	}
993 
994 	if (!*len) {
995 		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
996 			if (*lcn != SPARSE_LCN || !new)
997 				goto ok; /* Slow normal way without allocation. */
998 
999 			if (clen > *len)
1000 				clen = *len;
1001 		} else if (!new) {
1002 			/* Here we may return -ENOENT.
1003 			 * In any case caller gets zero length. */
1004 			goto ok;
1005 		}
1006 	}
1007 
1008 	if (!is_attr_ext(attr_b)) {
1009 		/* The code below only for sparsed or compressed attributes. */
1010 		err = -EINVAL;
1011 		goto out;
1012 	}
1013 
1014 	vcn0 = vcn;
1015 	to_alloc = clen;
1016 	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1017 	/* Allocate frame aligned clusters.
1018 	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1019 	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1020 	if (attr_b->nres.c_unit) {
1021 		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1022 		CLST cmask = ~(clst_per_frame - 1);
1023 
1024 		/* Get frame aligned vcn and to_alloc. */
1025 		vcn = vcn0 & cmask;
1026 		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1027 		if (fr < clst_per_frame)
1028 			fr = clst_per_frame;
1029 		zero = true;
1030 
1031 		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1032 		if (vcn < svcn || evcn1 <= vcn) {
1033 			struct ATTRIB *attr2;
1034 			/* Load runs for truncated vcn. */
1035 			attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1036 					     0, &vcn, &mi);
1037 			if (!attr2) {
1038 				err = -EINVAL;
1039 				goto out;
1040 			}
1041 			evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1042 			err = attr_load_runs(attr2, ni, run, NULL);
1043 			if (err)
1044 				goto out;
1045 		}
1046 	}
1047 
1048 	if (vcn + to_alloc > asize)
1049 		to_alloc = asize - vcn;
1050 
1051 	/* Get the last LCN to allocate from. */
1052 	hint = 0;
1053 
1054 	if (vcn > evcn1) {
1055 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1056 				   false)) {
1057 			err = -ENOMEM;
1058 			goto out;
1059 		}
1060 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1061 		hint = -1;
1062 	}
1063 
1064 	/* Allocate and zeroout new clusters. */
1065 	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1066 				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1067 				     fr, lcn, len);
1068 	if (err)
1069 		goto out;
1070 	*new = true;
1071 	step = 1;
1072 
1073 	end = vcn + alen;
1074 	/* Save 'total_size0' to restore if error. */
1075 	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1076 	total_size = total_size0 + ((u64)alen << cluster_bits);
1077 
1078 	if (vcn != vcn0) {
1079 		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1080 			err = -EINVAL;
1081 			goto out;
1082 		}
1083 		if (*lcn == SPARSE_LCN) {
1084 			/* Internal error. Should not happened. */
1085 			WARN_ON(1);
1086 			err = -EINVAL;
1087 			goto out;
1088 		}
1089 		/* Check case when vcn0 + len overlaps new allocated clusters. */
1090 		if (vcn0 + *len > end)
1091 			*len = end - vcn0;
1092 	}
1093 
1094 repack:
1095 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1096 	if (err)
1097 		goto out;
1098 
1099 	attr_b->nres.total_size = cpu_to_le64(total_size);
1100 	inode_set_bytes(&ni->vfs_inode, total_size);
1101 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1102 
1103 	mi_b->dirty = true;
1104 	mark_inode_dirty(&ni->vfs_inode);
1105 
1106 	/* Stored [vcn : next_svcn) from [vcn : end). */
1107 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1108 
1109 	if (end <= evcn1) {
1110 		if (next_svcn == evcn1) {
1111 			/* Normal way. Update attribute and exit. */
1112 			goto ok;
1113 		}
1114 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1115 		if (!ni->attr_list.size) {
1116 			err = ni_create_attr_list(ni);
1117 			if (err)
1118 				goto undo1;
1119 			/* Layout of records is changed. */
1120 			le_b = NULL;
1121 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1122 					      0, NULL, &mi_b);
1123 			if (!attr_b) {
1124 				err = -ENOENT;
1125 				goto out;
1126 			}
1127 
1128 			attr = attr_b;
1129 			le = le_b;
1130 			mi = mi_b;
1131 			goto repack;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * The code below may require additional cluster (to extend attribute list)
1137 	 * and / or one MFT record
1138 	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1139 	 * in 'ni_insert_nonresident'.
1140 	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1141 	 */
1142 	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1143 		/* Undo step 1. */
1144 		err = -ENOSPC;
1145 		goto undo1;
1146 	}
1147 
1148 	step = 2;
1149 	svcn = evcn1;
1150 
1151 	/* Estimate next attribute. */
1152 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1153 
1154 	if (!attr) {
1155 		/* Insert new attribute segment. */
1156 		goto ins_ext;
1157 	}
1158 
1159 	/* Try to update existed attribute segment. */
1160 	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1161 	evcn = le64_to_cpu(attr->nres.evcn);
1162 
1163 	if (end < next_svcn)
1164 		end = next_svcn;
1165 	while (end > evcn) {
1166 		/* Remove segment [svcn : evcn). */
1167 		mi_remove_attr(NULL, mi, attr);
1168 
1169 		if (!al_remove_le(ni, le)) {
1170 			err = -EINVAL;
1171 			goto out;
1172 		}
1173 
1174 		if (evcn + 1 >= alloc) {
1175 			/* Last attribute segment. */
1176 			evcn1 = evcn + 1;
1177 			goto ins_ext;
1178 		}
1179 
1180 		if (ni_load_mi(ni, le, &mi)) {
1181 			attr = NULL;
1182 			goto out;
1183 		}
1184 
1185 		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1186 		if (!attr) {
1187 			err = -EINVAL;
1188 			goto out;
1189 		}
1190 		svcn = le64_to_cpu(attr->nres.svcn);
1191 		evcn = le64_to_cpu(attr->nres.evcn);
1192 	}
1193 
1194 	if (end < svcn)
1195 		end = svcn;
1196 
1197 	err = attr_load_runs(attr, ni, run, &end);
1198 	if (err)
1199 		goto out;
1200 
1201 	evcn1 = evcn + 1;
1202 	attr->nres.svcn = cpu_to_le64(next_svcn);
1203 	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1204 	if (err)
1205 		goto out;
1206 
1207 	le->vcn = cpu_to_le64(next_svcn);
1208 	ni->attr_list.dirty = true;
1209 	mi->dirty = true;
1210 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1211 
1212 ins_ext:
1213 	if (evcn1 > next_svcn) {
1214 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1215 					    next_svcn, evcn1 - next_svcn,
1216 					    attr_b->flags, &attr, &mi, NULL);
1217 		if (err)
1218 			goto out;
1219 	}
1220 ok:
1221 	run_truncate_around(run, vcn);
1222 out:
1223 	if (err && step > 1) {
1224 		/* Too complex to restore. */
1225 		_ntfs_bad_inode(&ni->vfs_inode);
1226 	}
1227 	up_write(&ni->file.run_lock);
1228 	ni_unlock(ni);
1229 
1230 	return err;
1231 
1232 undo1:
1233 	/* Undo step1. */
1234 	attr_b->nres.total_size = cpu_to_le64(total_size0);
1235 	inode_set_bytes(&ni->vfs_inode, total_size0);
1236 
1237 	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1238 	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1239 	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1240 		_ntfs_bad_inode(&ni->vfs_inode);
1241 	}
1242 	goto out;
1243 }
1244 
attr_data_read_resident(struct ntfs_inode * ni,struct page * page)1245 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1246 {
1247 	u64 vbo;
1248 	struct ATTRIB *attr;
1249 	u32 data_size;
1250 
1251 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1252 	if (!attr)
1253 		return -EINVAL;
1254 
1255 	if (attr->non_res)
1256 		return E_NTFS_NONRESIDENT;
1257 
1258 	vbo = page->index << PAGE_SHIFT;
1259 	data_size = le32_to_cpu(attr->res.data_size);
1260 	if (vbo < data_size) {
1261 		const char *data = resident_data(attr);
1262 		char *kaddr = kmap_atomic(page);
1263 		u32 use = data_size - vbo;
1264 
1265 		if (use > PAGE_SIZE)
1266 			use = PAGE_SIZE;
1267 
1268 		memcpy(kaddr, data + vbo, use);
1269 		memset(kaddr + use, 0, PAGE_SIZE - use);
1270 		kunmap_atomic(kaddr);
1271 		flush_dcache_page(page);
1272 		SetPageUptodate(page);
1273 	} else if (!PageUptodate(page)) {
1274 		zero_user_segment(page, 0, PAGE_SIZE);
1275 		SetPageUptodate(page);
1276 	}
1277 
1278 	return 0;
1279 }
1280 
attr_data_write_resident(struct ntfs_inode * ni,struct page * page)1281 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1282 {
1283 	u64 vbo;
1284 	struct mft_inode *mi;
1285 	struct ATTRIB *attr;
1286 	u32 data_size;
1287 
1288 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1289 	if (!attr)
1290 		return -EINVAL;
1291 
1292 	if (attr->non_res) {
1293 		/* Return special error code to check this case. */
1294 		return E_NTFS_NONRESIDENT;
1295 	}
1296 
1297 	vbo = page->index << PAGE_SHIFT;
1298 	data_size = le32_to_cpu(attr->res.data_size);
1299 	if (vbo < data_size) {
1300 		char *data = resident_data(attr);
1301 		char *kaddr = kmap_atomic(page);
1302 		u32 use = data_size - vbo;
1303 
1304 		if (use > PAGE_SIZE)
1305 			use = PAGE_SIZE;
1306 		memcpy(data + vbo, kaddr, use);
1307 		kunmap_atomic(kaddr);
1308 		mi->dirty = true;
1309 	}
1310 	ni->i_valid = data_size;
1311 
1312 	return 0;
1313 }
1314 
1315 /*
1316  * attr_load_runs_vcn - Load runs with VCN.
1317  */
attr_load_runs_vcn(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,CLST vcn)1318 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1319 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1320 		       CLST vcn)
1321 {
1322 	struct ATTRIB *attr;
1323 	int err;
1324 	CLST svcn, evcn;
1325 	u16 ro;
1326 
1327 	if (!ni) {
1328 		/* Is record corrupted? */
1329 		return -ENOENT;
1330 	}
1331 
1332 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1333 	if (!attr) {
1334 		/* Is record corrupted? */
1335 		return -ENOENT;
1336 	}
1337 
1338 	svcn = le64_to_cpu(attr->nres.svcn);
1339 	evcn = le64_to_cpu(attr->nres.evcn);
1340 
1341 	if (evcn < vcn || vcn < svcn) {
1342 		/* Is record corrupted? */
1343 		return -EINVAL;
1344 	}
1345 
1346 	ro = le16_to_cpu(attr->nres.run_off);
1347 
1348 	if (ro > le32_to_cpu(attr->size))
1349 		return -EINVAL;
1350 
1351 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1352 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1353 	if (err < 0)
1354 		return err;
1355 	return 0;
1356 }
1357 
1358 /*
1359  * attr_load_runs_range - Load runs for given range [from to).
1360  */
attr_load_runs_range(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 from,u64 to)1361 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1362 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1363 			 u64 from, u64 to)
1364 {
1365 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1366 	u8 cluster_bits = sbi->cluster_bits;
1367 	CLST vcn;
1368 	CLST vcn_last = (to - 1) >> cluster_bits;
1369 	CLST lcn, clen;
1370 	int err;
1371 
1372 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1373 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1374 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1375 						 vcn);
1376 			if (err)
1377 				return err;
1378 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1379 		}
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 #ifdef CONFIG_NTFS3_LZX_XPRESS
1386 /*
1387  * attr_wof_frame_info
1388  *
1389  * Read header of Xpress/LZX file to get info about frame.
1390  */
attr_wof_frame_info(struct ntfs_inode * ni,struct ATTRIB * attr,struct runs_tree * run,u64 frame,u64 frames,u8 frame_bits,u32 * ondisk_size,u64 * vbo_data)1391 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1392 			struct runs_tree *run, u64 frame, u64 frames,
1393 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1394 {
1395 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1396 	u64 vbo[2], off[2], wof_size;
1397 	u32 voff;
1398 	u8 bytes_per_off;
1399 	char *addr;
1400 	struct page *page;
1401 	int i, err;
1402 	__le32 *off32;
1403 	__le64 *off64;
1404 
1405 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1406 		/* File starts with array of 32 bit offsets. */
1407 		bytes_per_off = sizeof(__le32);
1408 		vbo[1] = frame << 2;
1409 		*vbo_data = frames << 2;
1410 	} else {
1411 		/* File starts with array of 64 bit offsets. */
1412 		bytes_per_off = sizeof(__le64);
1413 		vbo[1] = frame << 3;
1414 		*vbo_data = frames << 3;
1415 	}
1416 
1417 	/*
1418 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1419 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1420 	 */
1421 	if (!attr->non_res) {
1422 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1423 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1424 			return -EINVAL;
1425 		}
1426 		addr = resident_data(attr);
1427 
1428 		if (bytes_per_off == sizeof(__le32)) {
1429 			off32 = Add2Ptr(addr, vbo[1]);
1430 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1431 			off[1] = le32_to_cpu(off32[0]);
1432 		} else {
1433 			off64 = Add2Ptr(addr, vbo[1]);
1434 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1435 			off[1] = le64_to_cpu(off64[0]);
1436 		}
1437 
1438 		*vbo_data += off[0];
1439 		*ondisk_size = off[1] - off[0];
1440 		return 0;
1441 	}
1442 
1443 	wof_size = le64_to_cpu(attr->nres.data_size);
1444 	down_write(&ni->file.run_lock);
1445 	page = ni->file.offs_page;
1446 	if (!page) {
1447 		page = alloc_page(GFP_KERNEL);
1448 		if (!page) {
1449 			err = -ENOMEM;
1450 			goto out;
1451 		}
1452 		page->index = -1;
1453 		ni->file.offs_page = page;
1454 	}
1455 	lock_page(page);
1456 	addr = page_address(page);
1457 
1458 	if (vbo[1]) {
1459 		voff = vbo[1] & (PAGE_SIZE - 1);
1460 		vbo[0] = vbo[1] - bytes_per_off;
1461 		i = 0;
1462 	} else {
1463 		voff = 0;
1464 		vbo[0] = 0;
1465 		off[0] = 0;
1466 		i = 1;
1467 	}
1468 
1469 	do {
1470 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1471 
1472 		if (index != page->index) {
1473 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1474 			u64 to = min(from + PAGE_SIZE, wof_size);
1475 
1476 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1477 						   ARRAY_SIZE(WOF_NAME), run,
1478 						   from, to);
1479 			if (err)
1480 				goto out1;
1481 
1482 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1483 					     to - from, REQ_OP_READ);
1484 			if (err) {
1485 				page->index = -1;
1486 				goto out1;
1487 			}
1488 			page->index = index;
1489 		}
1490 
1491 		if (i) {
1492 			if (bytes_per_off == sizeof(__le32)) {
1493 				off32 = Add2Ptr(addr, voff);
1494 				off[1] = le32_to_cpu(*off32);
1495 			} else {
1496 				off64 = Add2Ptr(addr, voff);
1497 				off[1] = le64_to_cpu(*off64);
1498 			}
1499 		} else if (!voff) {
1500 			if (bytes_per_off == sizeof(__le32)) {
1501 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1502 				off[0] = le32_to_cpu(*off32);
1503 			} else {
1504 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1505 				off[0] = le64_to_cpu(*off64);
1506 			}
1507 		} else {
1508 			/* Two values in one page. */
1509 			if (bytes_per_off == sizeof(__le32)) {
1510 				off32 = Add2Ptr(addr, voff);
1511 				off[0] = le32_to_cpu(off32[-1]);
1512 				off[1] = le32_to_cpu(off32[0]);
1513 			} else {
1514 				off64 = Add2Ptr(addr, voff);
1515 				off[0] = le64_to_cpu(off64[-1]);
1516 				off[1] = le64_to_cpu(off64[0]);
1517 			}
1518 			break;
1519 		}
1520 	} while (++i < 2);
1521 
1522 	*vbo_data += off[0];
1523 	*ondisk_size = off[1] - off[0];
1524 
1525 out1:
1526 	unlock_page(page);
1527 out:
1528 	up_write(&ni->file.run_lock);
1529 	return err;
1530 }
1531 #endif
1532 
1533 /*
1534  * attr_is_frame_compressed - Used to detect compressed frame.
1535  *
1536  * attr - base (primary) attribute segment.
1537  * run  - run to use, usually == &ni->file.run.
1538  * Only base segments contains valid 'attr->nres.c_unit'
1539  */
attr_is_frame_compressed(struct ntfs_inode * ni,struct ATTRIB * attr,CLST frame,CLST * clst_data,struct runs_tree * run)1540 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1541 			     CLST frame, CLST *clst_data, struct runs_tree *run)
1542 {
1543 	int err;
1544 	u32 clst_frame;
1545 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1546 	size_t idx;
1547 
1548 	*clst_data = 0;
1549 
1550 	if (!is_attr_compressed(attr))
1551 		return 0;
1552 
1553 	if (!attr->non_res)
1554 		return 0;
1555 
1556 	clst_frame = 1u << attr->nres.c_unit;
1557 	vcn = frame * clst_frame;
1558 
1559 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1560 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1561 					 attr->name_len, run, vcn);
1562 		if (err)
1563 			return err;
1564 
1565 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1566 			return -EINVAL;
1567 	}
1568 
1569 	if (lcn == SPARSE_LCN) {
1570 		/* Sparsed frame. */
1571 		return 0;
1572 	}
1573 
1574 	if (clen >= clst_frame) {
1575 		/*
1576 		 * The frame is not compressed 'cause
1577 		 * it does not contain any sparse clusters.
1578 		 */
1579 		*clst_data = clst_frame;
1580 		return 0;
1581 	}
1582 
1583 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1584 	slen = 0;
1585 	*clst_data = clen;
1586 
1587 	/*
1588 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1589 	 * Check next fragments.
1590 	 */
1591 	while ((vcn += clen) < alen) {
1592 		vcn_next = vcn;
1593 
1594 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1595 		    vcn_next != vcn) {
1596 			err = attr_load_runs_vcn(ni, attr->type,
1597 						 attr_name(attr),
1598 						 attr->name_len, run, vcn_next);
1599 			if (err)
1600 				return err;
1601 			vcn = vcn_next;
1602 
1603 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1604 				return -EINVAL;
1605 		}
1606 
1607 		if (lcn == SPARSE_LCN) {
1608 			slen += clen;
1609 		} else {
1610 			if (slen) {
1611 				/*
1612 				 * Data_clusters + sparse_clusters =
1613 				 * not enough for frame.
1614 				 */
1615 				return -EINVAL;
1616 			}
1617 			*clst_data += clen;
1618 		}
1619 
1620 		if (*clst_data + slen >= clst_frame) {
1621 			if (!slen) {
1622 				/*
1623 				 * There is no sparsed clusters in this frame
1624 				 * so it is not compressed.
1625 				 */
1626 				*clst_data = clst_frame;
1627 			} else {
1628 				/* Frame is compressed. */
1629 			}
1630 			break;
1631 		}
1632 	}
1633 
1634 	return 0;
1635 }
1636 
1637 /*
1638  * attr_allocate_frame - Allocate/free clusters for @frame.
1639  *
1640  * Assumed: down_write(&ni->file.run_lock);
1641  */
attr_allocate_frame(struct ntfs_inode * ni,CLST frame,size_t compr_size,u64 new_valid)1642 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1643 			u64 new_valid)
1644 {
1645 	int err = 0;
1646 	struct runs_tree *run = &ni->file.run;
1647 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1648 	struct ATTRIB *attr = NULL, *attr_b;
1649 	struct ATTR_LIST_ENTRY *le, *le_b;
1650 	struct mft_inode *mi, *mi_b;
1651 	CLST svcn, evcn1, next_svcn, len;
1652 	CLST vcn, end, clst_data;
1653 	u64 total_size, valid_size, data_size;
1654 
1655 	le_b = NULL;
1656 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1657 	if (!attr_b)
1658 		return -ENOENT;
1659 
1660 	if (!is_attr_ext(attr_b))
1661 		return -EINVAL;
1662 
1663 	vcn = frame << NTFS_LZNT_CUNIT;
1664 	total_size = le64_to_cpu(attr_b->nres.total_size);
1665 
1666 	svcn = le64_to_cpu(attr_b->nres.svcn);
1667 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1668 	data_size = le64_to_cpu(attr_b->nres.data_size);
1669 
1670 	if (svcn <= vcn && vcn < evcn1) {
1671 		attr = attr_b;
1672 		le = le_b;
1673 		mi = mi_b;
1674 	} else if (!le_b) {
1675 		err = -EINVAL;
1676 		goto out;
1677 	} else {
1678 		le = le_b;
1679 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1680 				    &mi);
1681 		if (!attr) {
1682 			err = -EINVAL;
1683 			goto out;
1684 		}
1685 		svcn = le64_to_cpu(attr->nres.svcn);
1686 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1687 	}
1688 
1689 	err = attr_load_runs(attr, ni, run, NULL);
1690 	if (err)
1691 		goto out;
1692 
1693 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1694 	if (err)
1695 		goto out;
1696 
1697 	total_size -= (u64)clst_data << sbi->cluster_bits;
1698 
1699 	len = bytes_to_cluster(sbi, compr_size);
1700 
1701 	if (len == clst_data)
1702 		goto out;
1703 
1704 	if (len < clst_data) {
1705 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1706 					NULL, true);
1707 		if (err)
1708 			goto out;
1709 
1710 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1711 				   false)) {
1712 			err = -ENOMEM;
1713 			goto out;
1714 		}
1715 		end = vcn + clst_data;
1716 		/* Run contains updated range [vcn + len : end). */
1717 	} else {
1718 		CLST alen, hint = 0;
1719 		/* Get the last LCN to allocate from. */
1720 		if (vcn + clst_data &&
1721 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1722 				      NULL)) {
1723 			hint = -1;
1724 		}
1725 
1726 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1727 					     hint + 1, len - clst_data, NULL,
1728 					     ALLOCATE_DEF, &alen, 0, NULL,
1729 					     NULL);
1730 		if (err)
1731 			goto out;
1732 
1733 		end = vcn + len;
1734 		/* Run contains updated range [vcn + clst_data : end). */
1735 	}
1736 
1737 	total_size += (u64)len << sbi->cluster_bits;
1738 
1739 repack:
1740 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1741 	if (err)
1742 		goto out;
1743 
1744 	attr_b->nres.total_size = cpu_to_le64(total_size);
1745 	inode_set_bytes(&ni->vfs_inode, total_size);
1746 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1747 
1748 	mi_b->dirty = true;
1749 	mark_inode_dirty(&ni->vfs_inode);
1750 
1751 	/* Stored [vcn : next_svcn) from [vcn : end). */
1752 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1753 
1754 	if (end <= evcn1) {
1755 		if (next_svcn == evcn1) {
1756 			/* Normal way. Update attribute and exit. */
1757 			goto ok;
1758 		}
1759 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1760 		if (!ni->attr_list.size) {
1761 			err = ni_create_attr_list(ni);
1762 			if (err)
1763 				goto out;
1764 			/* Layout of records is changed. */
1765 			le_b = NULL;
1766 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1767 					      0, NULL, &mi_b);
1768 			if (!attr_b) {
1769 				err = -ENOENT;
1770 				goto out;
1771 			}
1772 
1773 			attr = attr_b;
1774 			le = le_b;
1775 			mi = mi_b;
1776 			goto repack;
1777 		}
1778 	}
1779 
1780 	svcn = evcn1;
1781 
1782 	/* Estimate next attribute. */
1783 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1784 
1785 	if (attr) {
1786 		CLST alloc = bytes_to_cluster(
1787 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1788 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1789 
1790 		if (end < next_svcn)
1791 			end = next_svcn;
1792 		while (end > evcn) {
1793 			/* Remove segment [svcn : evcn). */
1794 			mi_remove_attr(NULL, mi, attr);
1795 
1796 			if (!al_remove_le(ni, le)) {
1797 				err = -EINVAL;
1798 				goto out;
1799 			}
1800 
1801 			if (evcn + 1 >= alloc) {
1802 				/* Last attribute segment. */
1803 				evcn1 = evcn + 1;
1804 				goto ins_ext;
1805 			}
1806 
1807 			if (ni_load_mi(ni, le, &mi)) {
1808 				attr = NULL;
1809 				goto out;
1810 			}
1811 
1812 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1813 					    &le->id);
1814 			if (!attr) {
1815 				err = -EINVAL;
1816 				goto out;
1817 			}
1818 			svcn = le64_to_cpu(attr->nres.svcn);
1819 			evcn = le64_to_cpu(attr->nres.evcn);
1820 		}
1821 
1822 		if (end < svcn)
1823 			end = svcn;
1824 
1825 		err = attr_load_runs(attr, ni, run, &end);
1826 		if (err)
1827 			goto out;
1828 
1829 		evcn1 = evcn + 1;
1830 		attr->nres.svcn = cpu_to_le64(next_svcn);
1831 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1832 		if (err)
1833 			goto out;
1834 
1835 		le->vcn = cpu_to_le64(next_svcn);
1836 		ni->attr_list.dirty = true;
1837 		mi->dirty = true;
1838 
1839 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1840 	}
1841 ins_ext:
1842 	if (evcn1 > next_svcn) {
1843 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1844 					    next_svcn, evcn1 - next_svcn,
1845 					    attr_b->flags, &attr, &mi, NULL);
1846 		if (err)
1847 			goto out;
1848 	}
1849 ok:
1850 	run_truncate_around(run, vcn);
1851 out:
1852 	if (attr_b) {
1853 		if (new_valid > data_size)
1854 			new_valid = data_size;
1855 
1856 		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1857 		if (new_valid != valid_size) {
1858 			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1859 			mi_b->dirty = true;
1860 		}
1861 	}
1862 
1863 	return err;
1864 }
1865 
1866 /*
1867  * attr_collapse_range - Collapse range in file.
1868  */
attr_collapse_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)1869 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1870 {
1871 	int err = 0;
1872 	struct runs_tree *run = &ni->file.run;
1873 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1874 	struct ATTRIB *attr = NULL, *attr_b;
1875 	struct ATTR_LIST_ENTRY *le, *le_b;
1876 	struct mft_inode *mi, *mi_b;
1877 	CLST svcn, evcn1, len, dealloc, alen;
1878 	CLST vcn, end;
1879 	u64 valid_size, data_size, alloc_size, total_size;
1880 	u32 mask;
1881 	__le16 a_flags;
1882 
1883 	if (!bytes)
1884 		return 0;
1885 
1886 	le_b = NULL;
1887 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1888 	if (!attr_b)
1889 		return -ENOENT;
1890 
1891 	if (!attr_b->non_res) {
1892 		/* Attribute is resident. Nothing to do? */
1893 		return 0;
1894 	}
1895 
1896 	data_size = le64_to_cpu(attr_b->nres.data_size);
1897 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1898 	a_flags = attr_b->flags;
1899 
1900 	if (is_attr_ext(attr_b)) {
1901 		total_size = le64_to_cpu(attr_b->nres.total_size);
1902 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1903 	} else {
1904 		total_size = alloc_size;
1905 		mask = sbi->cluster_mask;
1906 	}
1907 
1908 	if ((vbo & mask) || (bytes & mask)) {
1909 		/* Allow to collapse only cluster aligned ranges. */
1910 		return -EINVAL;
1911 	}
1912 
1913 	if (vbo > data_size)
1914 		return -EINVAL;
1915 
1916 	down_write(&ni->file.run_lock);
1917 
1918 	if (vbo + bytes >= data_size) {
1919 		u64 new_valid = min(ni->i_valid, vbo);
1920 
1921 		/* Simple truncate file at 'vbo'. */
1922 		truncate_setsize(&ni->vfs_inode, vbo);
1923 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1924 				    &new_valid, true, NULL);
1925 
1926 		if (!err && new_valid < ni->i_valid)
1927 			ni->i_valid = new_valid;
1928 
1929 		goto out;
1930 	}
1931 
1932 	/*
1933 	 * Enumerate all attribute segments and collapse.
1934 	 */
1935 	alen = alloc_size >> sbi->cluster_bits;
1936 	vcn = vbo >> sbi->cluster_bits;
1937 	len = bytes >> sbi->cluster_bits;
1938 	end = vcn + len;
1939 	dealloc = 0;
1940 
1941 	svcn = le64_to_cpu(attr_b->nres.svcn);
1942 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1943 
1944 	if (svcn <= vcn && vcn < evcn1) {
1945 		attr = attr_b;
1946 		le = le_b;
1947 		mi = mi_b;
1948 	} else if (!le_b) {
1949 		err = -EINVAL;
1950 		goto out;
1951 	} else {
1952 		le = le_b;
1953 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1954 				    &mi);
1955 		if (!attr) {
1956 			err = -EINVAL;
1957 			goto out;
1958 		}
1959 
1960 		svcn = le64_to_cpu(attr->nres.svcn);
1961 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1962 	}
1963 
1964 	for (;;) {
1965 		if (svcn >= end) {
1966 			/* Shift VCN- */
1967 			attr->nres.svcn = cpu_to_le64(svcn - len);
1968 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1969 			if (le) {
1970 				le->vcn = attr->nres.svcn;
1971 				ni->attr_list.dirty = true;
1972 			}
1973 			mi->dirty = true;
1974 		} else if (svcn < vcn || end < evcn1) {
1975 			CLST vcn1, eat, next_svcn;
1976 
1977 			/* Collapse a part of this attribute segment. */
1978 			err = attr_load_runs(attr, ni, run, &svcn);
1979 			if (err)
1980 				goto out;
1981 			vcn1 = max(vcn, svcn);
1982 			eat = min(end, evcn1) - vcn1;
1983 
1984 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1985 						true);
1986 			if (err)
1987 				goto out;
1988 
1989 			if (!run_collapse_range(run, vcn1, eat)) {
1990 				err = -ENOMEM;
1991 				goto out;
1992 			}
1993 
1994 			if (svcn >= vcn) {
1995 				/* Shift VCN */
1996 				attr->nres.svcn = cpu_to_le64(vcn);
1997 				if (le) {
1998 					le->vcn = attr->nres.svcn;
1999 					ni->attr_list.dirty = true;
2000 				}
2001 			}
2002 
2003 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
2004 			if (err)
2005 				goto out;
2006 
2007 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2008 			if (next_svcn + eat < evcn1) {
2009 				err = ni_insert_nonresident(
2010 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
2011 					evcn1 - eat - next_svcn, a_flags, &attr,
2012 					&mi, &le);
2013 				if (err)
2014 					goto out;
2015 
2016 				/* Layout of records maybe changed. */
2017 				attr_b = NULL;
2018 			}
2019 
2020 			/* Free all allocated memory. */
2021 			run_truncate(run, 0);
2022 		} else {
2023 			u16 le_sz;
2024 			u16 roff = le16_to_cpu(attr->nres.run_off);
2025 
2026 			if (roff > le32_to_cpu(attr->size)) {
2027 				err = -EINVAL;
2028 				goto out;
2029 			}
2030 
2031 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2032 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2033 				      le32_to_cpu(attr->size) - roff);
2034 
2035 			/* Delete this attribute segment. */
2036 			mi_remove_attr(NULL, mi, attr);
2037 			if (!le)
2038 				break;
2039 
2040 			le_sz = le16_to_cpu(le->size);
2041 			if (!al_remove_le(ni, le)) {
2042 				err = -EINVAL;
2043 				goto out;
2044 			}
2045 
2046 			if (evcn1 >= alen)
2047 				break;
2048 
2049 			if (!svcn) {
2050 				/* Load next record that contains this attribute. */
2051 				if (ni_load_mi(ni, le, &mi)) {
2052 					err = -EINVAL;
2053 					goto out;
2054 				}
2055 
2056 				/* Look for required attribute. */
2057 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2058 						    0, &le->id);
2059 				if (!attr) {
2060 					err = -EINVAL;
2061 					goto out;
2062 				}
2063 				goto next_attr;
2064 			}
2065 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2066 		}
2067 
2068 		if (evcn1 >= alen)
2069 			break;
2070 
2071 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2072 		if (!attr) {
2073 			err = -EINVAL;
2074 			goto out;
2075 		}
2076 
2077 next_attr:
2078 		svcn = le64_to_cpu(attr->nres.svcn);
2079 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2080 	}
2081 
2082 	if (!attr_b) {
2083 		le_b = NULL;
2084 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2085 				      &mi_b);
2086 		if (!attr_b) {
2087 			err = -ENOENT;
2088 			goto out;
2089 		}
2090 	}
2091 
2092 	data_size -= bytes;
2093 	valid_size = ni->i_valid;
2094 	if (vbo + bytes <= valid_size)
2095 		valid_size -= bytes;
2096 	else if (vbo < valid_size)
2097 		valid_size = vbo;
2098 
2099 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2100 	attr_b->nres.data_size = cpu_to_le64(data_size);
2101 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2102 	total_size -= (u64)dealloc << sbi->cluster_bits;
2103 	if (is_attr_ext(attr_b))
2104 		attr_b->nres.total_size = cpu_to_le64(total_size);
2105 	mi_b->dirty = true;
2106 
2107 	/* Update inode size. */
2108 	ni->i_valid = valid_size;
2109 	i_size_write(&ni->vfs_inode, data_size);
2110 	inode_set_bytes(&ni->vfs_inode, total_size);
2111 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2112 	mark_inode_dirty(&ni->vfs_inode);
2113 
2114 out:
2115 	up_write(&ni->file.run_lock);
2116 	if (err)
2117 		_ntfs_bad_inode(&ni->vfs_inode);
2118 
2119 	return err;
2120 }
2121 
2122 /*
2123  * attr_punch_hole
2124  *
2125  * Not for normal files.
2126  */
attr_punch_hole(struct ntfs_inode * ni,u64 vbo,u64 bytes,u32 * frame_size)2127 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2128 {
2129 	int err = 0;
2130 	struct runs_tree *run = &ni->file.run;
2131 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2132 	struct ATTRIB *attr = NULL, *attr_b;
2133 	struct ATTR_LIST_ENTRY *le, *le_b;
2134 	struct mft_inode *mi, *mi_b;
2135 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2136 	u64 total_size, alloc_size;
2137 	u32 mask;
2138 	__le16 a_flags;
2139 	struct runs_tree run2;
2140 
2141 	if (!bytes)
2142 		return 0;
2143 
2144 	le_b = NULL;
2145 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2146 	if (!attr_b)
2147 		return -ENOENT;
2148 
2149 	if (!attr_b->non_res) {
2150 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2151 		u32 from, to;
2152 
2153 		if (vbo > data_size)
2154 			return 0;
2155 
2156 		from = vbo;
2157 		to = min_t(u64, vbo + bytes, data_size);
2158 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2159 		return 0;
2160 	}
2161 
2162 	if (!is_attr_ext(attr_b))
2163 		return -EOPNOTSUPP;
2164 
2165 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2166 	total_size = le64_to_cpu(attr_b->nres.total_size);
2167 
2168 	if (vbo >= alloc_size) {
2169 		/* NOTE: It is allowed. */
2170 		return 0;
2171 	}
2172 
2173 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2174 
2175 	bytes += vbo;
2176 	if (bytes > alloc_size)
2177 		bytes = alloc_size;
2178 	bytes -= vbo;
2179 
2180 	if ((vbo & mask) || (bytes & mask)) {
2181 		/* We have to zero a range(s). */
2182 		if (frame_size == NULL) {
2183 			/* Caller insists range is aligned. */
2184 			return -EINVAL;
2185 		}
2186 		*frame_size = mask + 1;
2187 		return E_NTFS_NOTALIGNED;
2188 	}
2189 
2190 	down_write(&ni->file.run_lock);
2191 	run_init(&run2);
2192 	run_truncate(run, 0);
2193 
2194 	/*
2195 	 * Enumerate all attribute segments and punch hole where necessary.
2196 	 */
2197 	alen = alloc_size >> sbi->cluster_bits;
2198 	vcn = vbo >> sbi->cluster_bits;
2199 	len = bytes >> sbi->cluster_bits;
2200 	end = vcn + len;
2201 	hole = 0;
2202 
2203 	svcn = le64_to_cpu(attr_b->nres.svcn);
2204 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2205 	a_flags = attr_b->flags;
2206 
2207 	if (svcn <= vcn && vcn < evcn1) {
2208 		attr = attr_b;
2209 		le = le_b;
2210 		mi = mi_b;
2211 	} else if (!le_b) {
2212 		err = -EINVAL;
2213 		goto bad_inode;
2214 	} else {
2215 		le = le_b;
2216 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2217 				    &mi);
2218 		if (!attr) {
2219 			err = -EINVAL;
2220 			goto bad_inode;
2221 		}
2222 
2223 		svcn = le64_to_cpu(attr->nres.svcn);
2224 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2225 	}
2226 
2227 	while (svcn < end) {
2228 		CLST vcn1, zero, hole2 = hole;
2229 
2230 		err = attr_load_runs(attr, ni, run, &svcn);
2231 		if (err)
2232 			goto done;
2233 		vcn1 = max(vcn, svcn);
2234 		zero = min(end, evcn1) - vcn1;
2235 
2236 		/*
2237 		 * Check range [vcn1 + zero).
2238 		 * Calculate how many clusters there are.
2239 		 * Don't do any destructive actions.
2240 		 */
2241 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2242 		if (err)
2243 			goto done;
2244 
2245 		/* Check if required range is already hole. */
2246 		if (hole2 == hole)
2247 			goto next_attr;
2248 
2249 		/* Make a clone of run to undo. */
2250 		err = run_clone(run, &run2);
2251 		if (err)
2252 			goto done;
2253 
2254 		/* Make a hole range (sparse) [vcn1 + zero). */
2255 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2256 			err = -ENOMEM;
2257 			goto done;
2258 		}
2259 
2260 		/* Update run in attribute segment. */
2261 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2262 		if (err)
2263 			goto done;
2264 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2265 		if (next_svcn < evcn1) {
2266 			/* Insert new attribute segment. */
2267 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2268 						    next_svcn,
2269 						    evcn1 - next_svcn, a_flags,
2270 						    &attr, &mi, &le);
2271 			if (err)
2272 				goto undo_punch;
2273 
2274 			/* Layout of records maybe changed. */
2275 			attr_b = NULL;
2276 		}
2277 
2278 		/* Real deallocate. Should not fail. */
2279 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2280 
2281 next_attr:
2282 		/* Free all allocated memory. */
2283 		run_truncate(run, 0);
2284 
2285 		if (evcn1 >= alen)
2286 			break;
2287 
2288 		/* Get next attribute segment. */
2289 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2290 		if (!attr) {
2291 			err = -EINVAL;
2292 			goto bad_inode;
2293 		}
2294 
2295 		svcn = le64_to_cpu(attr->nres.svcn);
2296 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2297 	}
2298 
2299 done:
2300 	if (!hole)
2301 		goto out;
2302 
2303 	if (!attr_b) {
2304 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2305 				      &mi_b);
2306 		if (!attr_b) {
2307 			err = -EINVAL;
2308 			goto bad_inode;
2309 		}
2310 	}
2311 
2312 	total_size -= (u64)hole << sbi->cluster_bits;
2313 	attr_b->nres.total_size = cpu_to_le64(total_size);
2314 	mi_b->dirty = true;
2315 
2316 	/* Update inode size. */
2317 	inode_set_bytes(&ni->vfs_inode, total_size);
2318 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2319 	mark_inode_dirty(&ni->vfs_inode);
2320 
2321 out:
2322 	run_close(&run2);
2323 	up_write(&ni->file.run_lock);
2324 	return err;
2325 
2326 bad_inode:
2327 	_ntfs_bad_inode(&ni->vfs_inode);
2328 	goto out;
2329 
2330 undo_punch:
2331 	/*
2332 	 * Restore packed runs.
2333 	 * 'mi_pack_runs' should not fail, cause we restore original.
2334 	 */
2335 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2336 		goto bad_inode;
2337 
2338 	goto done;
2339 }
2340 
2341 /*
2342  * attr_insert_range - Insert range (hole) in file.
2343  * Not for normal files.
2344  */
attr_insert_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)2345 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2346 {
2347 	int err = 0;
2348 	struct runs_tree *run = &ni->file.run;
2349 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2350 	struct ATTRIB *attr = NULL, *attr_b;
2351 	struct ATTR_LIST_ENTRY *le, *le_b;
2352 	struct mft_inode *mi, *mi_b;
2353 	CLST vcn, svcn, evcn1, len, next_svcn;
2354 	u64 data_size, alloc_size;
2355 	u32 mask;
2356 	__le16 a_flags;
2357 
2358 	if (!bytes)
2359 		return 0;
2360 
2361 	le_b = NULL;
2362 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2363 	if (!attr_b)
2364 		return -ENOENT;
2365 
2366 	if (!is_attr_ext(attr_b)) {
2367 		/* It was checked above. See fallocate. */
2368 		return -EOPNOTSUPP;
2369 	}
2370 
2371 	if (!attr_b->non_res) {
2372 		data_size = le32_to_cpu(attr_b->res.data_size);
2373 		alloc_size = data_size;
2374 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2375 	} else {
2376 		data_size = le64_to_cpu(attr_b->nres.data_size);
2377 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2378 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2379 	}
2380 
2381 	if (vbo > data_size) {
2382 		/* Insert range after the file size is not allowed. */
2383 		return -EINVAL;
2384 	}
2385 
2386 	if ((vbo & mask) || (bytes & mask)) {
2387 		/* Allow to insert only frame aligned ranges. */
2388 		return -EINVAL;
2389 	}
2390 
2391 	/*
2392 	 * valid_size <= data_size <= alloc_size
2393 	 * Check alloc_size for maximum possible.
2394 	 */
2395 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2396 		return -EFBIG;
2397 
2398 	vcn = vbo >> sbi->cluster_bits;
2399 	len = bytes >> sbi->cluster_bits;
2400 
2401 	down_write(&ni->file.run_lock);
2402 
2403 	if (!attr_b->non_res) {
2404 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2405 				    data_size + bytes, NULL, false, NULL);
2406 
2407 		le_b = NULL;
2408 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2409 				      &mi_b);
2410 		if (!attr_b) {
2411 			err = -EINVAL;
2412 			goto bad_inode;
2413 		}
2414 
2415 		if (err)
2416 			goto out;
2417 
2418 		if (!attr_b->non_res) {
2419 			/* Still resident. */
2420 			char *data = Add2Ptr(attr_b,
2421 					     le16_to_cpu(attr_b->res.data_off));
2422 
2423 			memmove(data + bytes, data, bytes);
2424 			memset(data, 0, bytes);
2425 			goto done;
2426 		}
2427 
2428 		/* Resident files becomes nonresident. */
2429 		data_size = le64_to_cpu(attr_b->nres.data_size);
2430 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2431 	}
2432 
2433 	/*
2434 	 * Enumerate all attribute segments and shift start vcn.
2435 	 */
2436 	a_flags = attr_b->flags;
2437 	svcn = le64_to_cpu(attr_b->nres.svcn);
2438 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2439 
2440 	if (svcn <= vcn && vcn < evcn1) {
2441 		attr = attr_b;
2442 		le = le_b;
2443 		mi = mi_b;
2444 	} else if (!le_b) {
2445 		err = -EINVAL;
2446 		goto bad_inode;
2447 	} else {
2448 		le = le_b;
2449 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2450 				    &mi);
2451 		if (!attr) {
2452 			err = -EINVAL;
2453 			goto bad_inode;
2454 		}
2455 
2456 		svcn = le64_to_cpu(attr->nres.svcn);
2457 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2458 	}
2459 
2460 	run_truncate(run, 0); /* clear cached values. */
2461 	err = attr_load_runs(attr, ni, run, NULL);
2462 	if (err)
2463 		goto out;
2464 
2465 	if (!run_insert_range(run, vcn, len)) {
2466 		err = -ENOMEM;
2467 		goto out;
2468 	}
2469 
2470 	/* Try to pack in current record as much as possible. */
2471 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2472 	if (err)
2473 		goto out;
2474 
2475 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2476 
2477 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2478 	       attr->type == ATTR_DATA && !attr->name_len) {
2479 		le64_add_cpu(&attr->nres.svcn, len);
2480 		le64_add_cpu(&attr->nres.evcn, len);
2481 		if (le) {
2482 			le->vcn = attr->nres.svcn;
2483 			ni->attr_list.dirty = true;
2484 		}
2485 		mi->dirty = true;
2486 	}
2487 
2488 	if (next_svcn < evcn1 + len) {
2489 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2490 					    next_svcn, evcn1 + len - next_svcn,
2491 					    a_flags, NULL, NULL, NULL);
2492 
2493 		le_b = NULL;
2494 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2495 				      &mi_b);
2496 		if (!attr_b) {
2497 			err = -EINVAL;
2498 			goto bad_inode;
2499 		}
2500 
2501 		if (err) {
2502 			/* ni_insert_nonresident failed. Try to undo. */
2503 			goto undo_insert_range;
2504 		}
2505 	}
2506 
2507 	/*
2508 	 * Update primary attribute segment.
2509 	 */
2510 	if (vbo <= ni->i_valid)
2511 		ni->i_valid += bytes;
2512 
2513 	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2514 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2515 
2516 	/* ni->valid may be not equal valid_size (temporary). */
2517 	if (ni->i_valid > data_size + bytes)
2518 		attr_b->nres.valid_size = attr_b->nres.data_size;
2519 	else
2520 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2521 	mi_b->dirty = true;
2522 
2523 done:
2524 	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2525 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2526 	mark_inode_dirty(&ni->vfs_inode);
2527 
2528 out:
2529 	run_truncate(run, 0); /* clear cached values. */
2530 
2531 	up_write(&ni->file.run_lock);
2532 
2533 	return err;
2534 
2535 bad_inode:
2536 	_ntfs_bad_inode(&ni->vfs_inode);
2537 	goto out;
2538 
2539 undo_insert_range:
2540 	svcn = le64_to_cpu(attr_b->nres.svcn);
2541 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2542 
2543 	if (svcn <= vcn && vcn < evcn1) {
2544 		attr = attr_b;
2545 		le = le_b;
2546 		mi = mi_b;
2547 	} else if (!le_b) {
2548 		goto bad_inode;
2549 	} else {
2550 		le = le_b;
2551 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2552 				    &mi);
2553 		if (!attr) {
2554 			goto bad_inode;
2555 		}
2556 
2557 		svcn = le64_to_cpu(attr->nres.svcn);
2558 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2559 	}
2560 
2561 	if (attr_load_runs(attr, ni, run, NULL))
2562 		goto bad_inode;
2563 
2564 	if (!run_collapse_range(run, vcn, len))
2565 		goto bad_inode;
2566 
2567 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2568 		goto bad_inode;
2569 
2570 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2571 	       attr->type == ATTR_DATA && !attr->name_len) {
2572 		le64_sub_cpu(&attr->nres.svcn, len);
2573 		le64_sub_cpu(&attr->nres.evcn, len);
2574 		if (le) {
2575 			le->vcn = attr->nres.svcn;
2576 			ni->attr_list.dirty = true;
2577 		}
2578 		mi->dirty = true;
2579 	}
2580 
2581 	goto out;
2582 }
2583