xref: /openbmc/linux/fs/ntfs3/fsntfs.c (revision 724ba675)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 // clang-format off
18 const struct cpu_str NAME_MFT = {
19 	4, 0, { '$', 'M', 'F', 'T' },
20 };
21 const struct cpu_str NAME_MIRROR = {
22 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 };
24 const struct cpu_str NAME_LOGFILE = {
25 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 };
27 const struct cpu_str NAME_VOLUME = {
28 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 };
30 const struct cpu_str NAME_ATTRDEF = {
31 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 };
33 const struct cpu_str NAME_ROOT = {
34 	1, 0, { '.' },
35 };
36 const struct cpu_str NAME_BITMAP = {
37 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 };
39 const struct cpu_str NAME_BOOT = {
40 	5, 0, { '$', 'B', 'o', 'o', 't' },
41 };
42 const struct cpu_str NAME_BADCLUS = {
43 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 };
45 const struct cpu_str NAME_QUOTA = {
46 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 };
48 const struct cpu_str NAME_SECURE = {
49 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 };
51 const struct cpu_str NAME_UPCASE = {
52 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 };
54 const struct cpu_str NAME_EXTEND = {
55 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 };
57 const struct cpu_str NAME_OBJID = {
58 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 };
60 const struct cpu_str NAME_REPARSE = {
61 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 };
63 const struct cpu_str NAME_USNJRNL = {
64 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 };
66 const __le16 BAD_NAME[4] = {
67 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 };
69 const __le16 I30_NAME[4] = {
70 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 };
72 const __le16 SII_NAME[4] = {
73 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 };
75 const __le16 SDH_NAME[4] = {
76 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 };
78 const __le16 SDS_NAME[4] = {
79 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 };
81 const __le16 SO_NAME[2] = {
82 	cpu_to_le16('$'), cpu_to_le16('O'),
83 };
84 const __le16 SQ_NAME[2] = {
85 	cpu_to_le16('$'), cpu_to_le16('Q'),
86 };
87 const __le16 SR_NAME[2] = {
88 	cpu_to_le16('$'), cpu_to_le16('R'),
89 };
90 
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 	cpu_to_le16('a'),
98 };
99 #endif
100 
101 static const __le16 CON_NAME[3] = {
102 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
103 };
104 
105 static const __le16 NUL_NAME[3] = {
106 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
107 };
108 
109 static const __le16 AUX_NAME[3] = {
110 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
111 };
112 
113 static const __le16 PRN_NAME[3] = {
114 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
115 };
116 
117 static const __le16 COM_NAME[3] = {
118 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
119 };
120 
121 static const __le16 LPT_NAME[3] = {
122 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
123 };
124 
125 // clang-format on
126 
127 /*
128  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
129  */
130 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
131 {
132 	u16 *fixup, *ptr;
133 	u16 sample;
134 	u16 fo = le16_to_cpu(rhdr->fix_off);
135 	u16 fn = le16_to_cpu(rhdr->fix_num);
136 
137 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
138 	    fn * SECTOR_SIZE > bytes) {
139 		return false;
140 	}
141 
142 	/* Get fixup pointer. */
143 	fixup = Add2Ptr(rhdr, fo);
144 
145 	if (*fixup >= 0x7FFF)
146 		*fixup = 1;
147 	else
148 		*fixup += 1;
149 
150 	sample = *fixup;
151 
152 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
153 
154 	while (fn--) {
155 		*++fixup = *ptr;
156 		*ptr = sample;
157 		ptr += SECTOR_SIZE / sizeof(short);
158 	}
159 	return true;
160 }
161 
162 /*
163  * ntfs_fix_post_read - Remove fixups after reading from disk.
164  *
165  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
166  */
167 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
168 		       bool simple)
169 {
170 	int ret;
171 	u16 *fixup, *ptr;
172 	u16 sample, fo, fn;
173 
174 	fo = le16_to_cpu(rhdr->fix_off);
175 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
176 			    le16_to_cpu(rhdr->fix_num);
177 
178 	/* Check errors. */
179 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
180 	    fn * SECTOR_SIZE > bytes) {
181 		return -EINVAL; /* Native chkntfs returns ok! */
182 	}
183 
184 	/* Get fixup pointer. */
185 	fixup = Add2Ptr(rhdr, fo);
186 	sample = *fixup;
187 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
188 	ret = 0;
189 
190 	while (fn--) {
191 		/* Test current word. */
192 		if (*ptr != sample) {
193 			/* Fixup does not match! Is it serious error? */
194 			ret = -E_NTFS_FIXUP;
195 		}
196 
197 		/* Replace fixup. */
198 		*ptr = *++fixup;
199 		ptr += SECTOR_SIZE / sizeof(short);
200 	}
201 
202 	return ret;
203 }
204 
205 /*
206  * ntfs_extend_init - Load $Extend file.
207  */
208 int ntfs_extend_init(struct ntfs_sb_info *sbi)
209 {
210 	int err;
211 	struct super_block *sb = sbi->sb;
212 	struct inode *inode, *inode2;
213 	struct MFT_REF ref;
214 
215 	if (sbi->volume.major_ver < 3) {
216 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
217 		return 0;
218 	}
219 
220 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
221 	ref.high = 0;
222 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
223 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
224 	if (IS_ERR(inode)) {
225 		err = PTR_ERR(inode);
226 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
227 		inode = NULL;
228 		goto out;
229 	}
230 
231 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
232 	if (!S_ISDIR(inode->i_mode)) {
233 		err = -EINVAL;
234 		goto out;
235 	}
236 
237 	/* Try to find $ObjId */
238 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
239 	if (inode2 && !IS_ERR(inode2)) {
240 		if (is_bad_inode(inode2)) {
241 			iput(inode2);
242 		} else {
243 			sbi->objid.ni = ntfs_i(inode2);
244 			sbi->objid_no = inode2->i_ino;
245 		}
246 	}
247 
248 	/* Try to find $Quota */
249 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
250 	if (inode2 && !IS_ERR(inode2)) {
251 		sbi->quota_no = inode2->i_ino;
252 		iput(inode2);
253 	}
254 
255 	/* Try to find $Reparse */
256 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
257 	if (inode2 && !IS_ERR(inode2)) {
258 		sbi->reparse.ni = ntfs_i(inode2);
259 		sbi->reparse_no = inode2->i_ino;
260 	}
261 
262 	/* Try to find $UsnJrnl */
263 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
264 	if (inode2 && !IS_ERR(inode2)) {
265 		sbi->usn_jrnl_no = inode2->i_ino;
266 		iput(inode2);
267 	}
268 
269 	err = 0;
270 out:
271 	iput(inode);
272 	return err;
273 }
274 
275 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
276 {
277 	int err = 0;
278 	struct super_block *sb = sbi->sb;
279 	bool initialized = false;
280 	struct MFT_REF ref;
281 	struct inode *inode;
282 
283 	/* Check for 4GB. */
284 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
285 		ntfs_err(sb, "\x24LogFile is large than 4G.");
286 		err = -EINVAL;
287 		goto out;
288 	}
289 
290 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
291 
292 	ref.low = cpu_to_le32(MFT_REC_MFT);
293 	ref.high = 0;
294 	ref.seq = cpu_to_le16(1);
295 
296 	inode = ntfs_iget5(sb, &ref, NULL);
297 
298 	if (IS_ERR(inode))
299 		inode = NULL;
300 
301 	if (!inode) {
302 		/* Try to use MFT copy. */
303 		u64 t64 = sbi->mft.lbo;
304 
305 		sbi->mft.lbo = sbi->mft.lbo2;
306 		inode = ntfs_iget5(sb, &ref, NULL);
307 		sbi->mft.lbo = t64;
308 		if (IS_ERR(inode))
309 			inode = NULL;
310 	}
311 
312 	if (!inode) {
313 		err = -EINVAL;
314 		ntfs_err(sb, "Failed to load $MFT.");
315 		goto out;
316 	}
317 
318 	sbi->mft.ni = ntfs_i(inode);
319 
320 	/* LogFile should not contains attribute list. */
321 	err = ni_load_all_mi(sbi->mft.ni);
322 	if (!err)
323 		err = log_replay(ni, &initialized);
324 
325 	iput(inode);
326 	sbi->mft.ni = NULL;
327 
328 	sync_blockdev(sb->s_bdev);
329 	invalidate_bdev(sb->s_bdev);
330 
331 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
332 		err = 0;
333 		goto out;
334 	}
335 
336 	if (sb_rdonly(sb) || !initialized)
337 		goto out;
338 
339 	/* Fill LogFile by '-1' if it is initialized. */
340 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
341 
342 out:
343 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
344 
345 	return err;
346 }
347 
348 /*
349  * ntfs_look_for_free_space - Look for a free space in bitmap.
350  */
351 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
352 			     CLST *new_lcn, CLST *new_len,
353 			     enum ALLOCATE_OPT opt)
354 {
355 	int err;
356 	CLST alen;
357 	struct super_block *sb = sbi->sb;
358 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
359 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
360 
361 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
362 	if (opt & ALLOCATE_MFT) {
363 		zlen = wnd_zone_len(wnd);
364 
365 		if (!zlen) {
366 			err = ntfs_refresh_zone(sbi);
367 			if (err)
368 				goto up_write;
369 
370 			zlen = wnd_zone_len(wnd);
371 		}
372 
373 		if (!zlen) {
374 			ntfs_err(sbi->sb, "no free space to extend mft");
375 			err = -ENOSPC;
376 			goto up_write;
377 		}
378 
379 		lcn = wnd_zone_bit(wnd);
380 		alen = min_t(CLST, len, zlen);
381 
382 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
383 
384 		err = wnd_set_used(wnd, lcn, alen);
385 		if (err)
386 			goto up_write;
387 
388 		alcn = lcn;
389 		goto space_found;
390 	}
391 	/*
392 	 * 'Cause cluster 0 is always used this value means that we should use
393 	 * cached value of 'next_free_lcn' to improve performance.
394 	 */
395 	if (!lcn)
396 		lcn = sbi->used.next_free_lcn;
397 
398 	if (lcn >= wnd->nbits)
399 		lcn = 0;
400 
401 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
402 	if (alen)
403 		goto space_found;
404 
405 	/* Try to use clusters from MftZone. */
406 	zlen = wnd_zone_len(wnd);
407 	zeroes = wnd_zeroes(wnd);
408 
409 	/* Check too big request */
410 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
411 		err = -ENOSPC;
412 		goto up_write;
413 	}
414 
415 	/* How many clusters to cat from zone. */
416 	zlcn = wnd_zone_bit(wnd);
417 	zlen2 = zlen >> 1;
418 	ztrim = clamp_val(len, zlen2, zlen);
419 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
420 
421 	wnd_zone_set(wnd, zlcn, new_zlen);
422 
423 	/* Allocate continues clusters. */
424 	alen = wnd_find(wnd, len, 0,
425 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
426 	if (!alen) {
427 		err = -ENOSPC;
428 		goto up_write;
429 	}
430 
431 space_found:
432 	err = 0;
433 	*new_len = alen;
434 	*new_lcn = alcn;
435 
436 	ntfs_unmap_meta(sb, alcn, alen);
437 
438 	/* Set hint for next requests. */
439 	if (!(opt & ALLOCATE_MFT))
440 		sbi->used.next_free_lcn = alcn + alen;
441 up_write:
442 	up_write(&wnd->rw_lock);
443 	return err;
444 }
445 
446 /*
447  * ntfs_check_for_free_space
448  *
449  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
450  */
451 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
452 {
453 	size_t free, zlen, avail;
454 	struct wnd_bitmap *wnd;
455 
456 	wnd = &sbi->used.bitmap;
457 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
458 	free = wnd_zeroes(wnd);
459 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
460 	up_read(&wnd->rw_lock);
461 
462 	if (free < zlen + clen)
463 		return false;
464 
465 	avail = free - (zlen + clen);
466 
467 	wnd = &sbi->mft.bitmap;
468 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
469 	free = wnd_zeroes(wnd);
470 	zlen = wnd_zone_len(wnd);
471 	up_read(&wnd->rw_lock);
472 
473 	if (free >= zlen + mlen)
474 		return true;
475 
476 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
477 }
478 
479 /*
480  * ntfs_extend_mft - Allocate additional MFT records.
481  *
482  * sbi->mft.bitmap is locked for write.
483  *
484  * NOTE: recursive:
485  *	ntfs_look_free_mft ->
486  *	ntfs_extend_mft ->
487  *	attr_set_size ->
488  *	ni_insert_nonresident ->
489  *	ni_insert_attr ->
490  *	ni_ins_attr_ext ->
491  *	ntfs_look_free_mft ->
492  *	ntfs_extend_mft
493  *
494  * To avoid recursive always allocate space for two new MFT records
495  * see attrib.c: "at least two MFT to avoid recursive loop".
496  */
497 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
498 {
499 	int err;
500 	struct ntfs_inode *ni = sbi->mft.ni;
501 	size_t new_mft_total;
502 	u64 new_mft_bytes, new_bitmap_bytes;
503 	struct ATTRIB *attr;
504 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
505 
506 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
507 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
508 
509 	/* Step 1: Resize $MFT::DATA. */
510 	down_write(&ni->file.run_lock);
511 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
512 			    new_mft_bytes, NULL, false, &attr);
513 
514 	if (err) {
515 		up_write(&ni->file.run_lock);
516 		goto out;
517 	}
518 
519 	attr->nres.valid_size = attr->nres.data_size;
520 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
521 	ni->mi.dirty = true;
522 
523 	/* Step 2: Resize $MFT::BITMAP. */
524 	new_bitmap_bytes = bitmap_size(new_mft_total);
525 
526 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
527 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
528 
529 	/* Refresh MFT Zone if necessary. */
530 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
531 
532 	ntfs_refresh_zone(sbi);
533 
534 	up_write(&sbi->used.bitmap.rw_lock);
535 	up_write(&ni->file.run_lock);
536 
537 	if (err)
538 		goto out;
539 
540 	err = wnd_extend(wnd, new_mft_total);
541 
542 	if (err)
543 		goto out;
544 
545 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
546 
547 	err = _ni_write_inode(&ni->vfs_inode, 0);
548 out:
549 	return err;
550 }
551 
552 /*
553  * ntfs_look_free_mft - Look for a free MFT record.
554  */
555 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
556 		       struct ntfs_inode *ni, struct mft_inode **mi)
557 {
558 	int err = 0;
559 	size_t zbit, zlen, from, to, fr;
560 	size_t mft_total;
561 	struct MFT_REF ref;
562 	struct super_block *sb = sbi->sb;
563 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
564 	u32 ir;
565 
566 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
567 		      MFT_REC_FREE - MFT_REC_RESERVED);
568 
569 	if (!mft)
570 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
571 
572 	zlen = wnd_zone_len(wnd);
573 
574 	/* Always reserve space for MFT. */
575 	if (zlen) {
576 		if (mft) {
577 			zbit = wnd_zone_bit(wnd);
578 			*rno = zbit;
579 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
580 		}
581 		goto found;
582 	}
583 
584 	/* No MFT zone. Find the nearest to '0' free MFT. */
585 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
586 		/* Resize MFT */
587 		mft_total = wnd->nbits;
588 
589 		err = ntfs_extend_mft(sbi);
590 		if (!err) {
591 			zbit = mft_total;
592 			goto reserve_mft;
593 		}
594 
595 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
596 			goto out;
597 
598 		err = 0;
599 
600 		/*
601 		 * Look for free record reserved area [11-16) ==
602 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
603 		 * marks it as used.
604 		 */
605 		if (!sbi->mft.reserved_bitmap) {
606 			/* Once per session create internal bitmap for 5 bits. */
607 			sbi->mft.reserved_bitmap = 0xFF;
608 
609 			ref.high = 0;
610 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
611 				struct inode *i;
612 				struct ntfs_inode *ni;
613 				struct MFT_REC *mrec;
614 
615 				ref.low = cpu_to_le32(ir);
616 				ref.seq = cpu_to_le16(ir);
617 
618 				i = ntfs_iget5(sb, &ref, NULL);
619 				if (IS_ERR(i)) {
620 next:
621 					ntfs_notice(
622 						sb,
623 						"Invalid reserved record %x",
624 						ref.low);
625 					continue;
626 				}
627 				if (is_bad_inode(i)) {
628 					iput(i);
629 					goto next;
630 				}
631 
632 				ni = ntfs_i(i);
633 
634 				mrec = ni->mi.mrec;
635 
636 				if (!is_rec_base(mrec))
637 					goto next;
638 
639 				if (mrec->hard_links)
640 					goto next;
641 
642 				if (!ni_std(ni))
643 					goto next;
644 
645 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
646 						 NULL, 0, NULL, NULL))
647 					goto next;
648 
649 				__clear_bit(ir - MFT_REC_RESERVED,
650 					    &sbi->mft.reserved_bitmap);
651 			}
652 		}
653 
654 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
655 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
656 					  MFT_REC_FREE, MFT_REC_RESERVED);
657 		if (zbit >= MFT_REC_FREE) {
658 			sbi->mft.next_reserved = MFT_REC_FREE;
659 			goto out;
660 		}
661 
662 		zlen = 1;
663 		sbi->mft.next_reserved = zbit;
664 	} else {
665 reserve_mft:
666 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
667 		if (zbit + zlen > wnd->nbits)
668 			zlen = wnd->nbits - zbit;
669 
670 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
671 			zlen -= 1;
672 
673 		/* [zbit, zbit + zlen) will be used for MFT itself. */
674 		from = sbi->mft.used;
675 		if (from < zbit)
676 			from = zbit;
677 		to = zbit + zlen;
678 		if (from < to) {
679 			ntfs_clear_mft_tail(sbi, from, to);
680 			sbi->mft.used = to;
681 		}
682 	}
683 
684 	if (mft) {
685 		*rno = zbit;
686 		zbit += 1;
687 		zlen -= 1;
688 	}
689 
690 	wnd_zone_set(wnd, zbit, zlen);
691 
692 found:
693 	if (!mft) {
694 		/* The request to get record for general purpose. */
695 		if (sbi->mft.next_free < MFT_REC_USER)
696 			sbi->mft.next_free = MFT_REC_USER;
697 
698 		for (;;) {
699 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
700 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
701 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
702 			} else {
703 				*rno = fr;
704 				sbi->mft.next_free = *rno + 1;
705 				break;
706 			}
707 
708 			err = ntfs_extend_mft(sbi);
709 			if (err)
710 				goto out;
711 		}
712 	}
713 
714 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
715 		err = -ENOMEM;
716 		goto out;
717 	}
718 
719 	/* We have found a record that are not reserved for next MFT. */
720 	if (*rno >= MFT_REC_FREE)
721 		wnd_set_used(wnd, *rno, 1);
722 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
723 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 
725 out:
726 	if (!mft)
727 		up_write(&wnd->rw_lock);
728 
729 	return err;
730 }
731 
732 /*
733  * ntfs_mark_rec_free - Mark record as free.
734  * is_mft - true if we are changing MFT
735  */
736 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
737 {
738 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
739 
740 	if (!is_mft)
741 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
742 	if (rno >= wnd->nbits)
743 		goto out;
744 
745 	if (rno >= MFT_REC_FREE) {
746 		if (!wnd_is_used(wnd, rno, 1))
747 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
748 		else
749 			wnd_set_free(wnd, rno, 1);
750 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
751 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
752 	}
753 
754 	if (rno < wnd_zone_bit(wnd))
755 		wnd_zone_set(wnd, rno, 1);
756 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
757 		sbi->mft.next_free = rno;
758 
759 out:
760 	if (!is_mft)
761 		up_write(&wnd->rw_lock);
762 }
763 
764 /*
765  * ntfs_clear_mft_tail - Format empty records [from, to).
766  *
767  * sbi->mft.bitmap is locked for write.
768  */
769 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
770 {
771 	int err;
772 	u32 rs;
773 	u64 vbo;
774 	struct runs_tree *run;
775 	struct ntfs_inode *ni;
776 
777 	if (from >= to)
778 		return 0;
779 
780 	rs = sbi->record_size;
781 	ni = sbi->mft.ni;
782 	run = &ni->file.run;
783 
784 	down_read(&ni->file.run_lock);
785 	vbo = (u64)from * rs;
786 	for (; from < to; from++, vbo += rs) {
787 		struct ntfs_buffers nb;
788 
789 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
790 		if (err)
791 			goto out;
792 
793 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
794 		nb_put(&nb);
795 		if (err)
796 			goto out;
797 	}
798 
799 out:
800 	sbi->mft.used = from;
801 	up_read(&ni->file.run_lock);
802 	return err;
803 }
804 
805 /*
806  * ntfs_refresh_zone - Refresh MFT zone.
807  *
808  * sbi->used.bitmap is locked for rw.
809  * sbi->mft.bitmap is locked for write.
810  * sbi->mft.ni->file.run_lock for write.
811  */
812 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
813 {
814 	CLST lcn, vcn, len;
815 	size_t lcn_s, zlen;
816 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
817 	struct ntfs_inode *ni = sbi->mft.ni;
818 
819 	/* Do not change anything unless we have non empty MFT zone. */
820 	if (wnd_zone_len(wnd))
821 		return 0;
822 
823 	vcn = bytes_to_cluster(sbi,
824 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
825 
826 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
827 		lcn = SPARSE_LCN;
828 
829 	/* We should always find Last Lcn for MFT. */
830 	if (lcn == SPARSE_LCN)
831 		return -EINVAL;
832 
833 	lcn_s = lcn + 1;
834 
835 	/* Try to allocate clusters after last MFT run. */
836 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
837 	wnd_zone_set(wnd, lcn_s, zlen);
838 
839 	return 0;
840 }
841 
842 /*
843  * ntfs_update_mftmirr - Update $MFTMirr data.
844  */
845 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
846 {
847 	int err;
848 	struct super_block *sb = sbi->sb;
849 	u32 blocksize, bytes;
850 	sector_t block1, block2;
851 
852 	/*
853 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
854 	 */
855 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
856 		return;
857 
858 	blocksize = sb->s_blocksize;
859 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
860 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
861 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
862 
863 	for (; bytes >= blocksize; bytes -= blocksize) {
864 		struct buffer_head *bh1, *bh2;
865 
866 		bh1 = sb_bread(sb, block1++);
867 		if (!bh1)
868 			return;
869 
870 		bh2 = sb_getblk(sb, block2++);
871 		if (!bh2) {
872 			put_bh(bh1);
873 			return;
874 		}
875 
876 		if (buffer_locked(bh2))
877 			__wait_on_buffer(bh2);
878 
879 		lock_buffer(bh2);
880 		memcpy(bh2->b_data, bh1->b_data, blocksize);
881 		set_buffer_uptodate(bh2);
882 		mark_buffer_dirty(bh2);
883 		unlock_buffer(bh2);
884 
885 		put_bh(bh1);
886 		bh1 = NULL;
887 
888 		err = wait ? sync_dirty_buffer(bh2) : 0;
889 
890 		put_bh(bh2);
891 		if (err)
892 			return;
893 	}
894 
895 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
896 }
897 
898 /*
899  * ntfs_bad_inode
900  *
901  * Marks inode as bad and marks fs as 'dirty'
902  */
903 void ntfs_bad_inode(struct inode *inode, const char *hint)
904 {
905 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
906 
907 	ntfs_inode_err(inode, "%s", hint);
908 	make_bad_inode(inode);
909 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
910 }
911 
912 /*
913  * ntfs_set_state
914  *
915  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
916  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
917  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
918  */
919 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
920 {
921 	int err;
922 	struct ATTRIB *attr;
923 	struct VOLUME_INFO *info;
924 	struct mft_inode *mi;
925 	struct ntfs_inode *ni;
926 	__le16 info_flags;
927 
928 	/*
929 	 * Do not change state if fs was real_dirty.
930 	 * Do not change state if fs already dirty(clear).
931 	 * Do not change any thing if mounted read only.
932 	 */
933 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
934 		return 0;
935 
936 	/* Check cached value. */
937 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
938 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
939 		return 0;
940 
941 	ni = sbi->volume.ni;
942 	if (!ni)
943 		return -EINVAL;
944 
945 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
946 
947 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
948 	if (!attr) {
949 		err = -EINVAL;
950 		goto out;
951 	}
952 
953 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
954 	if (!info) {
955 		err = -EINVAL;
956 		goto out;
957 	}
958 
959 	info_flags = info->flags;
960 
961 	switch (dirty) {
962 	case NTFS_DIRTY_ERROR:
963 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
964 		sbi->volume.real_dirty = true;
965 		fallthrough;
966 	case NTFS_DIRTY_DIRTY:
967 		info->flags |= VOLUME_FLAG_DIRTY;
968 		break;
969 	case NTFS_DIRTY_CLEAR:
970 		info->flags &= ~VOLUME_FLAG_DIRTY;
971 		break;
972 	}
973 	/* Cache current volume flags. */
974 	if (info_flags != info->flags) {
975 		sbi->volume.flags = info->flags;
976 		mi->dirty = true;
977 	}
978 	err = 0;
979 
980 out:
981 	ni_unlock(ni);
982 	if (err)
983 		return err;
984 
985 	mark_inode_dirty(&ni->vfs_inode);
986 	/* verify(!ntfs_update_mftmirr()); */
987 
988 	/*
989 	 * If we used wait=1, sync_inode_metadata waits for the io for the
990 	 * inode to finish. It hangs when media is removed.
991 	 * So wait=0 is sent down to sync_inode_metadata
992 	 * and filemap_fdatawrite is used for the data blocks.
993 	 */
994 	err = sync_inode_metadata(&ni->vfs_inode, 0);
995 	if (!err)
996 		err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
997 
998 	return err;
999 }
1000 
1001 /*
1002  * security_hash - Calculates a hash of security descriptor.
1003  */
1004 static inline __le32 security_hash(const void *sd, size_t bytes)
1005 {
1006 	u32 hash = 0;
1007 	const __le32 *ptr = sd;
1008 
1009 	bytes >>= 2;
1010 	while (bytes--)
1011 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1012 	return cpu_to_le32(hash);
1013 }
1014 
1015 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1016 {
1017 	struct block_device *bdev = sb->s_bdev;
1018 	u32 blocksize = sb->s_blocksize;
1019 	u64 block = lbo >> sb->s_blocksize_bits;
1020 	u32 off = lbo & (blocksize - 1);
1021 	u32 op = blocksize - off;
1022 
1023 	for (; bytes; block += 1, off = 0, op = blocksize) {
1024 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1025 
1026 		if (!bh)
1027 			return -EIO;
1028 
1029 		if (op > bytes)
1030 			op = bytes;
1031 
1032 		memcpy(buffer, bh->b_data + off, op);
1033 
1034 		put_bh(bh);
1035 
1036 		bytes -= op;
1037 		buffer = Add2Ptr(buffer, op);
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1044 		  const void *buf, int wait)
1045 {
1046 	u32 blocksize = sb->s_blocksize;
1047 	struct block_device *bdev = sb->s_bdev;
1048 	sector_t block = lbo >> sb->s_blocksize_bits;
1049 	u32 off = lbo & (blocksize - 1);
1050 	u32 op = blocksize - off;
1051 	struct buffer_head *bh;
1052 
1053 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1054 		wait = 1;
1055 
1056 	for (; bytes; block += 1, off = 0, op = blocksize) {
1057 		if (op > bytes)
1058 			op = bytes;
1059 
1060 		if (op < blocksize) {
1061 			bh = __bread(bdev, block, blocksize);
1062 			if (!bh) {
1063 				ntfs_err(sb, "failed to read block %llx",
1064 					 (u64)block);
1065 				return -EIO;
1066 			}
1067 		} else {
1068 			bh = __getblk(bdev, block, blocksize);
1069 			if (!bh)
1070 				return -ENOMEM;
1071 		}
1072 
1073 		if (buffer_locked(bh))
1074 			__wait_on_buffer(bh);
1075 
1076 		lock_buffer(bh);
1077 		if (buf) {
1078 			memcpy(bh->b_data + off, buf, op);
1079 			buf = Add2Ptr(buf, op);
1080 		} else {
1081 			memset(bh->b_data + off, -1, op);
1082 		}
1083 
1084 		set_buffer_uptodate(bh);
1085 		mark_buffer_dirty(bh);
1086 		unlock_buffer(bh);
1087 
1088 		if (wait) {
1089 			int err = sync_dirty_buffer(bh);
1090 
1091 			if (err) {
1092 				ntfs_err(
1093 					sb,
1094 					"failed to sync buffer at block %llx, error %d",
1095 					(u64)block, err);
1096 				put_bh(bh);
1097 				return err;
1098 			}
1099 		}
1100 
1101 		put_bh(bh);
1102 
1103 		bytes -= op;
1104 	}
1105 	return 0;
1106 }
1107 
1108 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1109 		      u64 vbo, const void *buf, size_t bytes, int sync)
1110 {
1111 	struct super_block *sb = sbi->sb;
1112 	u8 cluster_bits = sbi->cluster_bits;
1113 	u32 off = vbo & sbi->cluster_mask;
1114 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1115 	u64 lbo, len;
1116 	size_t idx;
1117 
1118 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1119 		return -ENOENT;
1120 
1121 	if (lcn == SPARSE_LCN)
1122 		return -EINVAL;
1123 
1124 	lbo = ((u64)lcn << cluster_bits) + off;
1125 	len = ((u64)clen << cluster_bits) - off;
1126 
1127 	for (;;) {
1128 		u32 op = min_t(u64, len, bytes);
1129 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1130 
1131 		if (err)
1132 			return err;
1133 
1134 		bytes -= op;
1135 		if (!bytes)
1136 			break;
1137 
1138 		vcn_next = vcn + clen;
1139 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1140 		    vcn != vcn_next)
1141 			return -ENOENT;
1142 
1143 		if (lcn == SPARSE_LCN)
1144 			return -EINVAL;
1145 
1146 		if (buf)
1147 			buf = Add2Ptr(buf, op);
1148 
1149 		lbo = ((u64)lcn << cluster_bits);
1150 		len = ((u64)clen << cluster_bits);
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1157 				   const struct runs_tree *run, u64 vbo)
1158 {
1159 	struct super_block *sb = sbi->sb;
1160 	u8 cluster_bits = sbi->cluster_bits;
1161 	CLST lcn;
1162 	u64 lbo;
1163 
1164 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1165 		return ERR_PTR(-ENOENT);
1166 
1167 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1168 
1169 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1170 }
1171 
1172 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1173 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1174 {
1175 	int err;
1176 	struct super_block *sb = sbi->sb;
1177 	u32 blocksize = sb->s_blocksize;
1178 	u8 cluster_bits = sbi->cluster_bits;
1179 	u32 off = vbo & sbi->cluster_mask;
1180 	u32 nbh = 0;
1181 	CLST vcn_next, vcn = vbo >> cluster_bits;
1182 	CLST lcn, clen;
1183 	u64 lbo, len;
1184 	size_t idx;
1185 	struct buffer_head *bh;
1186 
1187 	if (!run) {
1188 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1189 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1190 			err = -ENOENT;
1191 			goto out;
1192 		}
1193 
1194 		/* Use absolute boot's 'MFTCluster' to read record. */
1195 		lbo = vbo + sbi->mft.lbo;
1196 		len = sbi->record_size;
1197 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1198 		err = -ENOENT;
1199 		goto out;
1200 	} else {
1201 		if (lcn == SPARSE_LCN) {
1202 			err = -EINVAL;
1203 			goto out;
1204 		}
1205 
1206 		lbo = ((u64)lcn << cluster_bits) + off;
1207 		len = ((u64)clen << cluster_bits) - off;
1208 	}
1209 
1210 	off = lbo & (blocksize - 1);
1211 	if (nb) {
1212 		nb->off = off;
1213 		nb->bytes = bytes;
1214 	}
1215 
1216 	for (;;) {
1217 		u32 len32 = len >= bytes ? bytes : len;
1218 		sector_t block = lbo >> sb->s_blocksize_bits;
1219 
1220 		do {
1221 			u32 op = blocksize - off;
1222 
1223 			if (op > len32)
1224 				op = len32;
1225 
1226 			bh = ntfs_bread(sb, block);
1227 			if (!bh) {
1228 				err = -EIO;
1229 				goto out;
1230 			}
1231 
1232 			if (buf) {
1233 				memcpy(buf, bh->b_data + off, op);
1234 				buf = Add2Ptr(buf, op);
1235 			}
1236 
1237 			if (!nb) {
1238 				put_bh(bh);
1239 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1240 				err = -EINVAL;
1241 				goto out;
1242 			} else {
1243 				nb->bh[nbh++] = bh;
1244 				nb->nbufs = nbh;
1245 			}
1246 
1247 			bytes -= op;
1248 			if (!bytes)
1249 				return 0;
1250 			len32 -= op;
1251 			block += 1;
1252 			off = 0;
1253 
1254 		} while (len32);
1255 
1256 		vcn_next = vcn + clen;
1257 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1258 		    vcn != vcn_next) {
1259 			err = -ENOENT;
1260 			goto out;
1261 		}
1262 
1263 		if (lcn == SPARSE_LCN) {
1264 			err = -EINVAL;
1265 			goto out;
1266 		}
1267 
1268 		lbo = ((u64)lcn << cluster_bits);
1269 		len = ((u64)clen << cluster_bits);
1270 	}
1271 
1272 out:
1273 	if (!nbh)
1274 		return err;
1275 
1276 	while (nbh) {
1277 		put_bh(nb->bh[--nbh]);
1278 		nb->bh[nbh] = NULL;
1279 	}
1280 
1281 	nb->nbufs = 0;
1282 	return err;
1283 }
1284 
1285 /*
1286  * ntfs_read_bh
1287  *
1288  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1289  */
1290 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1291 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1292 		 struct ntfs_buffers *nb)
1293 {
1294 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1295 
1296 	if (err)
1297 		return err;
1298 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1299 }
1300 
1301 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1302 		u32 bytes, struct ntfs_buffers *nb)
1303 {
1304 	int err = 0;
1305 	struct super_block *sb = sbi->sb;
1306 	u32 blocksize = sb->s_blocksize;
1307 	u8 cluster_bits = sbi->cluster_bits;
1308 	CLST vcn_next, vcn = vbo >> cluster_bits;
1309 	u32 off;
1310 	u32 nbh = 0;
1311 	CLST lcn, clen;
1312 	u64 lbo, len;
1313 	size_t idx;
1314 
1315 	nb->bytes = bytes;
1316 
1317 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1318 		err = -ENOENT;
1319 		goto out;
1320 	}
1321 
1322 	off = vbo & sbi->cluster_mask;
1323 	lbo = ((u64)lcn << cluster_bits) + off;
1324 	len = ((u64)clen << cluster_bits) - off;
1325 
1326 	nb->off = off = lbo & (blocksize - 1);
1327 
1328 	for (;;) {
1329 		u32 len32 = min_t(u64, len, bytes);
1330 		sector_t block = lbo >> sb->s_blocksize_bits;
1331 
1332 		do {
1333 			u32 op;
1334 			struct buffer_head *bh;
1335 
1336 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1337 				err = -EINVAL;
1338 				goto out;
1339 			}
1340 
1341 			op = blocksize - off;
1342 			if (op > len32)
1343 				op = len32;
1344 
1345 			if (op == blocksize) {
1346 				bh = sb_getblk(sb, block);
1347 				if (!bh) {
1348 					err = -ENOMEM;
1349 					goto out;
1350 				}
1351 				if (buffer_locked(bh))
1352 					__wait_on_buffer(bh);
1353 				set_buffer_uptodate(bh);
1354 			} else {
1355 				bh = ntfs_bread(sb, block);
1356 				if (!bh) {
1357 					err = -EIO;
1358 					goto out;
1359 				}
1360 			}
1361 
1362 			nb->bh[nbh++] = bh;
1363 			bytes -= op;
1364 			if (!bytes) {
1365 				nb->nbufs = nbh;
1366 				return 0;
1367 			}
1368 
1369 			block += 1;
1370 			len32 -= op;
1371 			off = 0;
1372 		} while (len32);
1373 
1374 		vcn_next = vcn + clen;
1375 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1376 		    vcn != vcn_next) {
1377 			err = -ENOENT;
1378 			goto out;
1379 		}
1380 
1381 		lbo = ((u64)lcn << cluster_bits);
1382 		len = ((u64)clen << cluster_bits);
1383 	}
1384 
1385 out:
1386 	while (nbh) {
1387 		put_bh(nb->bh[--nbh]);
1388 		nb->bh[nbh] = NULL;
1389 	}
1390 
1391 	nb->nbufs = 0;
1392 
1393 	return err;
1394 }
1395 
1396 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1397 		  struct ntfs_buffers *nb, int sync)
1398 {
1399 	int err = 0;
1400 	struct super_block *sb = sbi->sb;
1401 	u32 block_size = sb->s_blocksize;
1402 	u32 bytes = nb->bytes;
1403 	u32 off = nb->off;
1404 	u16 fo = le16_to_cpu(rhdr->fix_off);
1405 	u16 fn = le16_to_cpu(rhdr->fix_num);
1406 	u32 idx;
1407 	__le16 *fixup;
1408 	__le16 sample;
1409 
1410 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1411 	    fn * SECTOR_SIZE > bytes) {
1412 		return -EINVAL;
1413 	}
1414 
1415 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1416 		u32 op = block_size - off;
1417 		char *bh_data;
1418 		struct buffer_head *bh = nb->bh[idx];
1419 		__le16 *ptr, *end_data;
1420 
1421 		if (op > bytes)
1422 			op = bytes;
1423 
1424 		if (buffer_locked(bh))
1425 			__wait_on_buffer(bh);
1426 
1427 		lock_buffer(bh);
1428 
1429 		bh_data = bh->b_data + off;
1430 		end_data = Add2Ptr(bh_data, op);
1431 		memcpy(bh_data, rhdr, op);
1432 
1433 		if (!idx) {
1434 			u16 t16;
1435 
1436 			fixup = Add2Ptr(bh_data, fo);
1437 			sample = *fixup;
1438 			t16 = le16_to_cpu(sample);
1439 			if (t16 >= 0x7FFF) {
1440 				sample = *fixup = cpu_to_le16(1);
1441 			} else {
1442 				sample = cpu_to_le16(t16 + 1);
1443 				*fixup = sample;
1444 			}
1445 
1446 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1447 		}
1448 
1449 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1450 
1451 		do {
1452 			*++fixup = *ptr;
1453 			*ptr = sample;
1454 			ptr += SECTOR_SIZE / sizeof(short);
1455 		} while (ptr < end_data);
1456 
1457 		set_buffer_uptodate(bh);
1458 		mark_buffer_dirty(bh);
1459 		unlock_buffer(bh);
1460 
1461 		if (sync) {
1462 			int err2 = sync_dirty_buffer(bh);
1463 
1464 			if (!err && err2)
1465 				err = err2;
1466 		}
1467 
1468 		bytes -= op;
1469 		rhdr = Add2Ptr(rhdr, op);
1470 	}
1471 
1472 	return err;
1473 }
1474 
1475 /*
1476  * ntfs_bio_pages - Read/write pages from/to disk.
1477  */
1478 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1479 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1480 		   enum req_op op)
1481 {
1482 	int err = 0;
1483 	struct bio *new, *bio = NULL;
1484 	struct super_block *sb = sbi->sb;
1485 	struct block_device *bdev = sb->s_bdev;
1486 	struct page *page;
1487 	u8 cluster_bits = sbi->cluster_bits;
1488 	CLST lcn, clen, vcn, vcn_next;
1489 	u32 add, off, page_idx;
1490 	u64 lbo, len;
1491 	size_t run_idx;
1492 	struct blk_plug plug;
1493 
1494 	if (!bytes)
1495 		return 0;
1496 
1497 	blk_start_plug(&plug);
1498 
1499 	/* Align vbo and bytes to be 512 bytes aligned. */
1500 	lbo = (vbo + bytes + 511) & ~511ull;
1501 	vbo = vbo & ~511ull;
1502 	bytes = lbo - vbo;
1503 
1504 	vcn = vbo >> cluster_bits;
1505 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1506 		err = -ENOENT;
1507 		goto out;
1508 	}
1509 	off = vbo & sbi->cluster_mask;
1510 	page_idx = 0;
1511 	page = pages[0];
1512 
1513 	for (;;) {
1514 		lbo = ((u64)lcn << cluster_bits) + off;
1515 		len = ((u64)clen << cluster_bits) - off;
1516 new_bio:
1517 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1518 		if (bio) {
1519 			bio_chain(bio, new);
1520 			submit_bio(bio);
1521 		}
1522 		bio = new;
1523 		bio->bi_iter.bi_sector = lbo >> 9;
1524 
1525 		while (len) {
1526 			off = vbo & (PAGE_SIZE - 1);
1527 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1528 
1529 			if (bio_add_page(bio, page, add, off) < add)
1530 				goto new_bio;
1531 
1532 			if (bytes <= add)
1533 				goto out;
1534 			bytes -= add;
1535 			vbo += add;
1536 
1537 			if (add + off == PAGE_SIZE) {
1538 				page_idx += 1;
1539 				if (WARN_ON(page_idx >= nr_pages)) {
1540 					err = -EINVAL;
1541 					goto out;
1542 				}
1543 				page = pages[page_idx];
1544 			}
1545 
1546 			if (len <= add)
1547 				break;
1548 			len -= add;
1549 			lbo += add;
1550 		}
1551 
1552 		vcn_next = vcn + clen;
1553 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1554 		    vcn != vcn_next) {
1555 			err = -ENOENT;
1556 			goto out;
1557 		}
1558 		off = 0;
1559 	}
1560 out:
1561 	if (bio) {
1562 		if (!err)
1563 			err = submit_bio_wait(bio);
1564 		bio_put(bio);
1565 	}
1566 	blk_finish_plug(&plug);
1567 
1568 	return err;
1569 }
1570 
1571 /*
1572  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1573  *
1574  * Fill on-disk logfile range by (-1)
1575  * this means empty logfile.
1576  */
1577 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1578 {
1579 	int err = 0;
1580 	struct super_block *sb = sbi->sb;
1581 	struct block_device *bdev = sb->s_bdev;
1582 	u8 cluster_bits = sbi->cluster_bits;
1583 	struct bio *new, *bio = NULL;
1584 	CLST lcn, clen;
1585 	u64 lbo, len;
1586 	size_t run_idx;
1587 	struct page *fill;
1588 	void *kaddr;
1589 	struct blk_plug plug;
1590 
1591 	fill = alloc_page(GFP_KERNEL);
1592 	if (!fill)
1593 		return -ENOMEM;
1594 
1595 	kaddr = kmap_atomic(fill);
1596 	memset(kaddr, -1, PAGE_SIZE);
1597 	kunmap_atomic(kaddr);
1598 	flush_dcache_page(fill);
1599 	lock_page(fill);
1600 
1601 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1602 		err = -ENOENT;
1603 		goto out;
1604 	}
1605 
1606 	/*
1607 	 * TODO: Try blkdev_issue_write_same.
1608 	 */
1609 	blk_start_plug(&plug);
1610 	do {
1611 		lbo = (u64)lcn << cluster_bits;
1612 		len = (u64)clen << cluster_bits;
1613 new_bio:
1614 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1615 		if (bio) {
1616 			bio_chain(bio, new);
1617 			submit_bio(bio);
1618 		}
1619 		bio = new;
1620 		bio->bi_iter.bi_sector = lbo >> 9;
1621 
1622 		for (;;) {
1623 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1624 
1625 			if (bio_add_page(bio, fill, add, 0) < add)
1626 				goto new_bio;
1627 
1628 			lbo += add;
1629 			if (len <= add)
1630 				break;
1631 			len -= add;
1632 		}
1633 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1634 
1635 	if (!err)
1636 		err = submit_bio_wait(bio);
1637 	bio_put(bio);
1638 
1639 	blk_finish_plug(&plug);
1640 out:
1641 	unlock_page(fill);
1642 	put_page(fill);
1643 
1644 	return err;
1645 }
1646 
1647 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1648 		    u64 vbo, u64 *lbo, u64 *bytes)
1649 {
1650 	u32 off;
1651 	CLST lcn, len;
1652 	u8 cluster_bits = sbi->cluster_bits;
1653 
1654 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1655 		return -ENOENT;
1656 
1657 	off = vbo & sbi->cluster_mask;
1658 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1659 	*bytes = ((u64)len << cluster_bits) - off;
1660 
1661 	return 0;
1662 }
1663 
1664 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1665 {
1666 	int err = 0;
1667 	struct super_block *sb = sbi->sb;
1668 	struct inode *inode = new_inode(sb);
1669 	struct ntfs_inode *ni;
1670 
1671 	if (!inode)
1672 		return ERR_PTR(-ENOMEM);
1673 
1674 	ni = ntfs_i(inode);
1675 
1676 	err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1677 			    false);
1678 	if (err)
1679 		goto out;
1680 
1681 	inode->i_ino = rno;
1682 	if (insert_inode_locked(inode) < 0) {
1683 		err = -EIO;
1684 		goto out;
1685 	}
1686 
1687 out:
1688 	if (err) {
1689 		make_bad_inode(inode);
1690 		iput(inode);
1691 		ni = ERR_PTR(err);
1692 	}
1693 	return ni;
1694 }
1695 
1696 /*
1697  * O:BAG:BAD:(A;OICI;FA;;;WD)
1698  * Owner S-1-5-32-544 (Administrators)
1699  * Group S-1-5-32-544 (Administrators)
1700  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1701  */
1702 const u8 s_default_security[] __aligned(8) = {
1703 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1704 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1705 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1706 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1707 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1708 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1709 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1710 };
1711 
1712 static_assert(sizeof(s_default_security) == 0x50);
1713 
1714 static inline u32 sid_length(const struct SID *sid)
1715 {
1716 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1717 }
1718 
1719 /*
1720  * is_acl_valid
1721  *
1722  * Thanks Mark Harmstone for idea.
1723  */
1724 static bool is_acl_valid(const struct ACL *acl, u32 len)
1725 {
1726 	const struct ACE_HEADER *ace;
1727 	u32 i;
1728 	u16 ace_count, ace_size;
1729 
1730 	if (acl->AclRevision != ACL_REVISION &&
1731 	    acl->AclRevision != ACL_REVISION_DS) {
1732 		/*
1733 		 * This value should be ACL_REVISION, unless the ACL contains an
1734 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1735 		 * All ACEs in an ACL must be at the same revision level.
1736 		 */
1737 		return false;
1738 	}
1739 
1740 	if (acl->Sbz1)
1741 		return false;
1742 
1743 	if (le16_to_cpu(acl->AclSize) > len)
1744 		return false;
1745 
1746 	if (acl->Sbz2)
1747 		return false;
1748 
1749 	len -= sizeof(struct ACL);
1750 	ace = (struct ACE_HEADER *)&acl[1];
1751 	ace_count = le16_to_cpu(acl->AceCount);
1752 
1753 	for (i = 0; i < ace_count; i++) {
1754 		if (len < sizeof(struct ACE_HEADER))
1755 			return false;
1756 
1757 		ace_size = le16_to_cpu(ace->AceSize);
1758 		if (len < ace_size)
1759 			return false;
1760 
1761 		len -= ace_size;
1762 		ace = Add2Ptr(ace, ace_size);
1763 	}
1764 
1765 	return true;
1766 }
1767 
1768 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1769 {
1770 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1771 
1772 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1773 		return false;
1774 
1775 	if (sd->Revision != 1)
1776 		return false;
1777 
1778 	if (sd->Sbz1)
1779 		return false;
1780 
1781 	if (!(sd->Control & SE_SELF_RELATIVE))
1782 		return false;
1783 
1784 	sd_owner = le32_to_cpu(sd->Owner);
1785 	if (sd_owner) {
1786 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1787 
1788 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1789 			return false;
1790 
1791 		if (owner->Revision != 1)
1792 			return false;
1793 
1794 		if (sd_owner + sid_length(owner) > len)
1795 			return false;
1796 	}
1797 
1798 	sd_group = le32_to_cpu(sd->Group);
1799 	if (sd_group) {
1800 		const struct SID *group = Add2Ptr(sd, sd_group);
1801 
1802 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1803 			return false;
1804 
1805 		if (group->Revision != 1)
1806 			return false;
1807 
1808 		if (sd_group + sid_length(group) > len)
1809 			return false;
1810 	}
1811 
1812 	sd_sacl = le32_to_cpu(sd->Sacl);
1813 	if (sd_sacl) {
1814 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1815 
1816 		if (sd_sacl + sizeof(struct ACL) > len)
1817 			return false;
1818 
1819 		if (!is_acl_valid(sacl, len - sd_sacl))
1820 			return false;
1821 	}
1822 
1823 	sd_dacl = le32_to_cpu(sd->Dacl);
1824 	if (sd_dacl) {
1825 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1826 
1827 		if (sd_dacl + sizeof(struct ACL) > len)
1828 			return false;
1829 
1830 		if (!is_acl_valid(dacl, len - sd_dacl))
1831 			return false;
1832 	}
1833 
1834 	return true;
1835 }
1836 
1837 /*
1838  * ntfs_security_init - Load and parse $Secure.
1839  */
1840 int ntfs_security_init(struct ntfs_sb_info *sbi)
1841 {
1842 	int err;
1843 	struct super_block *sb = sbi->sb;
1844 	struct inode *inode;
1845 	struct ntfs_inode *ni;
1846 	struct MFT_REF ref;
1847 	struct ATTRIB *attr;
1848 	struct ATTR_LIST_ENTRY *le;
1849 	u64 sds_size;
1850 	size_t off;
1851 	struct NTFS_DE *ne;
1852 	struct NTFS_DE_SII *sii_e;
1853 	struct ntfs_fnd *fnd_sii = NULL;
1854 	const struct INDEX_ROOT *root_sii;
1855 	const struct INDEX_ROOT *root_sdh;
1856 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1857 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1858 
1859 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1860 	ref.high = 0;
1861 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1862 
1863 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1864 	if (IS_ERR(inode)) {
1865 		err = PTR_ERR(inode);
1866 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1867 		inode = NULL;
1868 		goto out;
1869 	}
1870 
1871 	ni = ntfs_i(inode);
1872 
1873 	le = NULL;
1874 
1875 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1876 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1877 	if (!attr ||
1878 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1879 	    root_sdh->type != ATTR_ZERO ||
1880 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1881 	    offsetof(struct INDEX_ROOT, ihdr) +
1882 			    le32_to_cpu(root_sdh->ihdr.used) >
1883 		    le32_to_cpu(attr->res.data_size)) {
1884 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1885 		err = -EINVAL;
1886 		goto out;
1887 	}
1888 
1889 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1890 	if (err) {
1891 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1892 		goto out;
1893 	}
1894 
1895 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1896 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1897 	if (!attr ||
1898 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1899 	    root_sii->type != ATTR_ZERO ||
1900 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1901 	    offsetof(struct INDEX_ROOT, ihdr) +
1902 			    le32_to_cpu(root_sii->ihdr.used) >
1903 		    le32_to_cpu(attr->res.data_size)) {
1904 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1905 		err = -EINVAL;
1906 		goto out;
1907 	}
1908 
1909 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1910 	if (err) {
1911 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1912 		goto out;
1913 	}
1914 
1915 	fnd_sii = fnd_get();
1916 	if (!fnd_sii) {
1917 		err = -ENOMEM;
1918 		goto out;
1919 	}
1920 
1921 	sds_size = inode->i_size;
1922 
1923 	/* Find the last valid Id. */
1924 	sbi->security.next_id = SECURITY_ID_FIRST;
1925 	/* Always write new security at the end of bucket. */
1926 	sbi->security.next_off =
1927 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1928 
1929 	off = 0;
1930 	ne = NULL;
1931 
1932 	for (;;) {
1933 		u32 next_id;
1934 
1935 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1936 		if (err || !ne)
1937 			break;
1938 
1939 		sii_e = (struct NTFS_DE_SII *)ne;
1940 		if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1941 			continue;
1942 
1943 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1944 		if (next_id >= sbi->security.next_id)
1945 			sbi->security.next_id = next_id;
1946 	}
1947 
1948 	sbi->security.ni = ni;
1949 	inode = NULL;
1950 out:
1951 	iput(inode);
1952 	fnd_put(fnd_sii);
1953 
1954 	return err;
1955 }
1956 
1957 /*
1958  * ntfs_get_security_by_id - Read security descriptor by id.
1959  */
1960 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1961 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1962 			    size_t *size)
1963 {
1964 	int err;
1965 	int diff;
1966 	struct ntfs_inode *ni = sbi->security.ni;
1967 	struct ntfs_index *indx = &sbi->security.index_sii;
1968 	void *p = NULL;
1969 	struct NTFS_DE_SII *sii_e;
1970 	struct ntfs_fnd *fnd_sii;
1971 	struct SECURITY_HDR d_security;
1972 	const struct INDEX_ROOT *root_sii;
1973 	u32 t32;
1974 
1975 	*sd = NULL;
1976 
1977 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1978 
1979 	fnd_sii = fnd_get();
1980 	if (!fnd_sii) {
1981 		err = -ENOMEM;
1982 		goto out;
1983 	}
1984 
1985 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1986 	if (!root_sii) {
1987 		err = -EINVAL;
1988 		goto out;
1989 	}
1990 
1991 	/* Try to find this SECURITY descriptor in SII indexes. */
1992 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1993 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1994 	if (err)
1995 		goto out;
1996 
1997 	if (diff)
1998 		goto out;
1999 
2000 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2001 	if (t32 < SIZEOF_SECURITY_HDR) {
2002 		err = -EINVAL;
2003 		goto out;
2004 	}
2005 
2006 	if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2007 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2008 		err = -EFBIG;
2009 		goto out;
2010 	}
2011 
2012 	*size = t32 - SIZEOF_SECURITY_HDR;
2013 
2014 	p = kmalloc(*size, GFP_NOFS);
2015 	if (!p) {
2016 		err = -ENOMEM;
2017 		goto out;
2018 	}
2019 
2020 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2021 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2022 			       sizeof(d_security), NULL);
2023 	if (err)
2024 		goto out;
2025 
2026 	if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2027 		err = -EINVAL;
2028 		goto out;
2029 	}
2030 
2031 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2032 			       le64_to_cpu(sii_e->sec_hdr.off) +
2033 				       SIZEOF_SECURITY_HDR,
2034 			       p, *size, NULL);
2035 	if (err)
2036 		goto out;
2037 
2038 	*sd = p;
2039 	p = NULL;
2040 
2041 out:
2042 	kfree(p);
2043 	fnd_put(fnd_sii);
2044 	ni_unlock(ni);
2045 
2046 	return err;
2047 }
2048 
2049 /*
2050  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2051  *
2052  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2053  * and it contains a mirror copy of each security descriptor.  When writing
2054  * to a security descriptor at location X, another copy will be written at
2055  * location (X+256K).
2056  * When writing a security descriptor that will cross the 256K boundary,
2057  * the pointer will be advanced by 256K to skip
2058  * over the mirror portion.
2059  */
2060 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2061 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2062 			 u32 size_sd, __le32 *security_id, bool *inserted)
2063 {
2064 	int err, diff;
2065 	struct ntfs_inode *ni = sbi->security.ni;
2066 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2067 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2068 	struct NTFS_DE_SDH *e;
2069 	struct NTFS_DE_SDH sdh_e;
2070 	struct NTFS_DE_SII sii_e;
2071 	struct SECURITY_HDR *d_security;
2072 	u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2073 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2074 	struct SECURITY_KEY hash_key;
2075 	struct ntfs_fnd *fnd_sdh = NULL;
2076 	const struct INDEX_ROOT *root_sdh;
2077 	const struct INDEX_ROOT *root_sii;
2078 	u64 mirr_off, new_sds_size;
2079 	u32 next, left;
2080 
2081 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2082 		      SecurityDescriptorsBlockSize);
2083 
2084 	hash_key.hash = security_hash(sd, size_sd);
2085 	hash_key.sec_id = SECURITY_ID_INVALID;
2086 
2087 	if (inserted)
2088 		*inserted = false;
2089 	*security_id = SECURITY_ID_INVALID;
2090 
2091 	/* Allocate a temporal buffer. */
2092 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2093 	if (!d_security)
2094 		return -ENOMEM;
2095 
2096 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2097 
2098 	fnd_sdh = fnd_get();
2099 	if (!fnd_sdh) {
2100 		err = -ENOMEM;
2101 		goto out;
2102 	}
2103 
2104 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2105 	if (!root_sdh) {
2106 		err = -EINVAL;
2107 		goto out;
2108 	}
2109 
2110 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2111 	if (!root_sii) {
2112 		err = -EINVAL;
2113 		goto out;
2114 	}
2115 
2116 	/*
2117 	 * Check if such security already exists.
2118 	 * Use "SDH" and hash -> to get the offset in "SDS".
2119 	 */
2120 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2121 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2122 			fnd_sdh);
2123 	if (err)
2124 		goto out;
2125 
2126 	while (e) {
2127 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2128 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2129 					       le64_to_cpu(e->sec_hdr.off),
2130 					       d_security, new_sec_size, NULL);
2131 			if (err)
2132 				goto out;
2133 
2134 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2135 			    d_security->key.hash == hash_key.hash &&
2136 			    !memcmp(d_security + 1, sd, size_sd)) {
2137 				*security_id = d_security->key.sec_id;
2138 				/* Such security already exists. */
2139 				err = 0;
2140 				goto out;
2141 			}
2142 		}
2143 
2144 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2145 				     (struct NTFS_DE **)&e, fnd_sdh);
2146 		if (err)
2147 			goto out;
2148 
2149 		if (!e || e->key.hash != hash_key.hash)
2150 			break;
2151 	}
2152 
2153 	/* Zero unused space. */
2154 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2155 	left = SecurityDescriptorsBlockSize - next;
2156 
2157 	/* Zero gap until SecurityDescriptorsBlockSize. */
2158 	if (left < new_sec_size) {
2159 		/* Zero "left" bytes from sbi->security.next_off. */
2160 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2161 	}
2162 
2163 	/* Zero tail of previous security. */
2164 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2165 
2166 	/*
2167 	 * Example:
2168 	 * 0x40438 == ni->vfs_inode.i_size
2169 	 * 0x00440 == sbi->security.next_off
2170 	 * need to zero [0x438-0x440)
2171 	 * if (next > used) {
2172 	 *  u32 tozero = next - used;
2173 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2174 	 */
2175 
2176 	/* Format new security descriptor. */
2177 	d_security->key.hash = hash_key.hash;
2178 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2179 	d_security->off = cpu_to_le64(sbi->security.next_off);
2180 	d_security->size = cpu_to_le32(new_sec_size);
2181 	memcpy(d_security + 1, sd, size_sd);
2182 
2183 	/* Write main SDS bucket. */
2184 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2185 				d_security, aligned_sec_size, 0);
2186 
2187 	if (err)
2188 		goto out;
2189 
2190 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2191 	new_sds_size = mirr_off + aligned_sec_size;
2192 
2193 	if (new_sds_size > ni->vfs_inode.i_size) {
2194 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2195 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2196 				    new_sds_size, &new_sds_size, false, NULL);
2197 		if (err)
2198 			goto out;
2199 	}
2200 
2201 	/* Write copy SDS bucket. */
2202 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2203 				aligned_sec_size, 0);
2204 	if (err)
2205 		goto out;
2206 
2207 	/* Fill SII entry. */
2208 	sii_e.de.view.data_off =
2209 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2210 	sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2211 	sii_e.de.view.res = 0;
2212 	sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2213 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2214 	sii_e.de.flags = 0;
2215 	sii_e.de.res = 0;
2216 	sii_e.sec_id = d_security->key.sec_id;
2217 	memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2218 
2219 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2220 	if (err)
2221 		goto out;
2222 
2223 	/* Fill SDH entry. */
2224 	sdh_e.de.view.data_off =
2225 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2226 	sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2227 	sdh_e.de.view.res = 0;
2228 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2229 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2230 	sdh_e.de.flags = 0;
2231 	sdh_e.de.res = 0;
2232 	sdh_e.key.hash = d_security->key.hash;
2233 	sdh_e.key.sec_id = d_security->key.sec_id;
2234 	memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2235 	sdh_e.magic[0] = cpu_to_le16('I');
2236 	sdh_e.magic[1] = cpu_to_le16('I');
2237 
2238 	fnd_clear(fnd_sdh);
2239 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2240 				fnd_sdh, 0);
2241 	if (err)
2242 		goto out;
2243 
2244 	*security_id = d_security->key.sec_id;
2245 	if (inserted)
2246 		*inserted = true;
2247 
2248 	/* Update Id and offset for next descriptor. */
2249 	sbi->security.next_id += 1;
2250 	sbi->security.next_off += aligned_sec_size;
2251 
2252 out:
2253 	fnd_put(fnd_sdh);
2254 	mark_inode_dirty(&ni->vfs_inode);
2255 	ni_unlock(ni);
2256 	kfree(d_security);
2257 
2258 	return err;
2259 }
2260 
2261 /*
2262  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2263  */
2264 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2265 {
2266 	int err;
2267 	struct ntfs_inode *ni = sbi->reparse.ni;
2268 	struct ntfs_index *indx = &sbi->reparse.index_r;
2269 	struct ATTRIB *attr;
2270 	struct ATTR_LIST_ENTRY *le;
2271 	const struct INDEX_ROOT *root_r;
2272 
2273 	if (!ni)
2274 		return 0;
2275 
2276 	le = NULL;
2277 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2278 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2279 	if (!attr) {
2280 		err = -EINVAL;
2281 		goto out;
2282 	}
2283 
2284 	root_r = resident_data(attr);
2285 	if (root_r->type != ATTR_ZERO ||
2286 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2287 		err = -EINVAL;
2288 		goto out;
2289 	}
2290 
2291 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2292 	if (err)
2293 		goto out;
2294 
2295 out:
2296 	return err;
2297 }
2298 
2299 /*
2300  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2301  */
2302 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2303 {
2304 	int err;
2305 	struct ntfs_inode *ni = sbi->objid.ni;
2306 	struct ntfs_index *indx = &sbi->objid.index_o;
2307 	struct ATTRIB *attr;
2308 	struct ATTR_LIST_ENTRY *le;
2309 	const struct INDEX_ROOT *root;
2310 
2311 	if (!ni)
2312 		return 0;
2313 
2314 	le = NULL;
2315 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2316 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2317 	if (!attr) {
2318 		err = -EINVAL;
2319 		goto out;
2320 	}
2321 
2322 	root = resident_data(attr);
2323 	if (root->type != ATTR_ZERO ||
2324 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2325 		err = -EINVAL;
2326 		goto out;
2327 	}
2328 
2329 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2330 	if (err)
2331 		goto out;
2332 
2333 out:
2334 	return err;
2335 }
2336 
2337 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2338 {
2339 	int err;
2340 	struct ntfs_inode *ni = sbi->objid.ni;
2341 	struct ntfs_index *indx = &sbi->objid.index_o;
2342 
2343 	if (!ni)
2344 		return -EINVAL;
2345 
2346 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2347 
2348 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2349 
2350 	mark_inode_dirty(&ni->vfs_inode);
2351 	ni_unlock(ni);
2352 
2353 	return err;
2354 }
2355 
2356 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2357 			const struct MFT_REF *ref)
2358 {
2359 	int err;
2360 	struct ntfs_inode *ni = sbi->reparse.ni;
2361 	struct ntfs_index *indx = &sbi->reparse.index_r;
2362 	struct NTFS_DE_R re;
2363 
2364 	if (!ni)
2365 		return -EINVAL;
2366 
2367 	memset(&re, 0, sizeof(re));
2368 
2369 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2370 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2371 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2372 
2373 	re.key.ReparseTag = rtag;
2374 	memcpy(&re.key.ref, ref, sizeof(*ref));
2375 
2376 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2377 
2378 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2379 
2380 	mark_inode_dirty(&ni->vfs_inode);
2381 	ni_unlock(ni);
2382 
2383 	return err;
2384 }
2385 
2386 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2387 			const struct MFT_REF *ref)
2388 {
2389 	int err, diff;
2390 	struct ntfs_inode *ni = sbi->reparse.ni;
2391 	struct ntfs_index *indx = &sbi->reparse.index_r;
2392 	struct ntfs_fnd *fnd = NULL;
2393 	struct REPARSE_KEY rkey;
2394 	struct NTFS_DE_R *re;
2395 	struct INDEX_ROOT *root_r;
2396 
2397 	if (!ni)
2398 		return -EINVAL;
2399 
2400 	rkey.ReparseTag = rtag;
2401 	rkey.ref = *ref;
2402 
2403 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2404 
2405 	if (rtag) {
2406 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2407 		goto out1;
2408 	}
2409 
2410 	fnd = fnd_get();
2411 	if (!fnd) {
2412 		err = -ENOMEM;
2413 		goto out1;
2414 	}
2415 
2416 	root_r = indx_get_root(indx, ni, NULL, NULL);
2417 	if (!root_r) {
2418 		err = -EINVAL;
2419 		goto out;
2420 	}
2421 
2422 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2423 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2424 			(struct NTFS_DE **)&re, fnd);
2425 	if (err)
2426 		goto out;
2427 
2428 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2429 		/* Impossible. Looks like volume corrupt? */
2430 		goto out;
2431 	}
2432 
2433 	memcpy(&rkey, &re->key, sizeof(rkey));
2434 
2435 	fnd_put(fnd);
2436 	fnd = NULL;
2437 
2438 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2439 	if (err)
2440 		goto out;
2441 
2442 out:
2443 	fnd_put(fnd);
2444 
2445 out1:
2446 	mark_inode_dirty(&ni->vfs_inode);
2447 	ni_unlock(ni);
2448 
2449 	return err;
2450 }
2451 
2452 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2453 					  CLST len)
2454 {
2455 	ntfs_unmap_meta(sbi->sb, lcn, len);
2456 	ntfs_discard(sbi, lcn, len);
2457 }
2458 
2459 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2460 {
2461 	CLST end, i, zone_len, zlen;
2462 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2463 
2464 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2465 	if (!wnd_is_used(wnd, lcn, len)) {
2466 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2467 
2468 		end = lcn + len;
2469 		len = 0;
2470 		for (i = lcn; i < end; i++) {
2471 			if (wnd_is_used(wnd, i, 1)) {
2472 				if (!len)
2473 					lcn = i;
2474 				len += 1;
2475 				continue;
2476 			}
2477 
2478 			if (!len)
2479 				continue;
2480 
2481 			if (trim)
2482 				ntfs_unmap_and_discard(sbi, lcn, len);
2483 
2484 			wnd_set_free(wnd, lcn, len);
2485 			len = 0;
2486 		}
2487 
2488 		if (!len)
2489 			goto out;
2490 	}
2491 
2492 	if (trim)
2493 		ntfs_unmap_and_discard(sbi, lcn, len);
2494 	wnd_set_free(wnd, lcn, len);
2495 
2496 	/* append to MFT zone, if possible. */
2497 	zone_len = wnd_zone_len(wnd);
2498 	zlen = min(zone_len + len, sbi->zone_max);
2499 
2500 	if (zlen == zone_len) {
2501 		/* MFT zone already has maximum size. */
2502 	} else if (!zone_len) {
2503 		/* Create MFT zone only if 'zlen' is large enough. */
2504 		if (zlen == sbi->zone_max)
2505 			wnd_zone_set(wnd, lcn, zlen);
2506 	} else {
2507 		CLST zone_lcn = wnd_zone_bit(wnd);
2508 
2509 		if (lcn + len == zone_lcn) {
2510 			/* Append into head MFT zone. */
2511 			wnd_zone_set(wnd, lcn, zlen);
2512 		} else if (zone_lcn + zone_len == lcn) {
2513 			/* Append into tail MFT zone. */
2514 			wnd_zone_set(wnd, zone_lcn, zlen);
2515 		}
2516 	}
2517 
2518 out:
2519 	up_write(&wnd->rw_lock);
2520 }
2521 
2522 /*
2523  * run_deallocate - Deallocate clusters.
2524  */
2525 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2526 {
2527 	CLST lcn, len;
2528 	size_t idx = 0;
2529 
2530 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2531 		if (lcn == SPARSE_LCN)
2532 			continue;
2533 
2534 		mark_as_free_ex(sbi, lcn, len, trim);
2535 	}
2536 
2537 	return 0;
2538 }
2539 
2540 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2541 {
2542 	int i, ch;
2543 
2544 	/* check for forbidden chars */
2545 	for (i = 0; i < fname->len; ++i) {
2546 		ch = le16_to_cpu(fname->name[i]);
2547 
2548 		/* control chars */
2549 		if (ch < 0x20)
2550 			return true;
2551 
2552 		switch (ch) {
2553 		/* disallowed by Windows */
2554 		case '\\':
2555 		case '/':
2556 		case ':':
2557 		case '*':
2558 		case '?':
2559 		case '<':
2560 		case '>':
2561 		case '|':
2562 		case '\"':
2563 			return true;
2564 
2565 		default:
2566 			/* allowed char */
2567 			break;
2568 		}
2569 	}
2570 
2571 	/* file names cannot end with space or . */
2572 	if (fname->len > 0) {
2573 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2574 		if (ch == ' ' || ch == '.')
2575 			return true;
2576 	}
2577 
2578 	return false;
2579 }
2580 
2581 static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
2582 				    const struct le_str *fname)
2583 {
2584 	int port_digit;
2585 	const __le16 *name = fname->name;
2586 	int len = fname->len;
2587 	u16 *upcase = sbi->upcase;
2588 
2589 	/* check for 3 chars reserved names (device names) */
2590 	/* name by itself or with any extension is forbidden */
2591 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2592 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2593 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2594 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2595 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2596 			return true;
2597 
2598 	/* check for 4 chars reserved names (port name followed by 1..9) */
2599 	/* name by itself or with any extension is forbidden */
2600 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2601 		port_digit = le16_to_cpu(name[3]);
2602 		if (port_digit >= '1' && port_digit <= '9')
2603 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2604 					    false) ||
2605 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2606 					    false))
2607 				return true;
2608 	}
2609 
2610 	return false;
2611 }
2612 
2613 /*
2614  * valid_windows_name - Check if a file name is valid in Windows.
2615  */
2616 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2617 {
2618 	return !name_has_forbidden_chars(fname) &&
2619 	       !is_reserved_name(sbi, fname);
2620 }
2621