xref: /openbmc/linux/fs/ntfs3/fsntfs.c (revision f3531d1a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 // clang-format off
18 const struct cpu_str NAME_MFT = {
19 	4, 0, { '$', 'M', 'F', 'T' },
20 };
21 const struct cpu_str NAME_MIRROR = {
22 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
23 };
24 const struct cpu_str NAME_LOGFILE = {
25 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
26 };
27 const struct cpu_str NAME_VOLUME = {
28 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
29 };
30 const struct cpu_str NAME_ATTRDEF = {
31 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
32 };
33 const struct cpu_str NAME_ROOT = {
34 	1, 0, { '.' },
35 };
36 const struct cpu_str NAME_BITMAP = {
37 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
38 };
39 const struct cpu_str NAME_BOOT = {
40 	5, 0, { '$', 'B', 'o', 'o', 't' },
41 };
42 const struct cpu_str NAME_BADCLUS = {
43 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
44 };
45 const struct cpu_str NAME_QUOTA = {
46 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
47 };
48 const struct cpu_str NAME_SECURE = {
49 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
50 };
51 const struct cpu_str NAME_UPCASE = {
52 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
53 };
54 const struct cpu_str NAME_EXTEND = {
55 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
56 };
57 const struct cpu_str NAME_OBJID = {
58 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
59 };
60 const struct cpu_str NAME_REPARSE = {
61 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
62 };
63 const struct cpu_str NAME_USNJRNL = {
64 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
65 };
66 const __le16 BAD_NAME[4] = {
67 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
68 };
69 const __le16 I30_NAME[4] = {
70 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
71 };
72 const __le16 SII_NAME[4] = {
73 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
74 };
75 const __le16 SDH_NAME[4] = {
76 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77 };
78 const __le16 SDS_NAME[4] = {
79 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
80 };
81 const __le16 SO_NAME[2] = {
82 	cpu_to_le16('$'), cpu_to_le16('O'),
83 };
84 const __le16 SQ_NAME[2] = {
85 	cpu_to_le16('$'), cpu_to_le16('Q'),
86 };
87 const __le16 SR_NAME[2] = {
88 	cpu_to_le16('$'), cpu_to_le16('R'),
89 };
90 
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
97 	cpu_to_le16('a'),
98 };
99 #endif
100 
101 static const __le16 CON_NAME[3] = {
102 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
103 };
104 
105 static const __le16 NUL_NAME[3] = {
106 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
107 };
108 
109 static const __le16 AUX_NAME[3] = {
110 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
111 };
112 
113 static const __le16 PRN_NAME[3] = {
114 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
115 };
116 
117 static const __le16 COM_NAME[3] = {
118 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
119 };
120 
121 static const __le16 LPT_NAME[3] = {
122 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
123 };
124 
125 // clang-format on
126 
127 /*
128  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
129  */
130 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
131 {
132 	u16 *fixup, *ptr;
133 	u16 sample;
134 	u16 fo = le16_to_cpu(rhdr->fix_off);
135 	u16 fn = le16_to_cpu(rhdr->fix_num);
136 
137 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
138 	    fn * SECTOR_SIZE > bytes) {
139 		return false;
140 	}
141 
142 	/* Get fixup pointer. */
143 	fixup = Add2Ptr(rhdr, fo);
144 
145 	if (*fixup >= 0x7FFF)
146 		*fixup = 1;
147 	else
148 		*fixup += 1;
149 
150 	sample = *fixup;
151 
152 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
153 
154 	while (fn--) {
155 		*++fixup = *ptr;
156 		*ptr = sample;
157 		ptr += SECTOR_SIZE / sizeof(short);
158 	}
159 	return true;
160 }
161 
162 /*
163  * ntfs_fix_post_read - Remove fixups after reading from disk.
164  *
165  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
166  */
167 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
168 		       bool simple)
169 {
170 	int ret;
171 	u16 *fixup, *ptr;
172 	u16 sample, fo, fn;
173 
174 	fo = le16_to_cpu(rhdr->fix_off);
175 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
176 		    : le16_to_cpu(rhdr->fix_num);
177 
178 	/* Check errors. */
179 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
180 	    fn * SECTOR_SIZE > bytes) {
181 		return -EINVAL; /* Native chkntfs returns ok! */
182 	}
183 
184 	/* Get fixup pointer. */
185 	fixup = Add2Ptr(rhdr, fo);
186 	sample = *fixup;
187 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
188 	ret = 0;
189 
190 	while (fn--) {
191 		/* Test current word. */
192 		if (*ptr != sample) {
193 			/* Fixup does not match! Is it serious error? */
194 			ret = -E_NTFS_FIXUP;
195 		}
196 
197 		/* Replace fixup. */
198 		*ptr = *++fixup;
199 		ptr += SECTOR_SIZE / sizeof(short);
200 	}
201 
202 	return ret;
203 }
204 
205 /*
206  * ntfs_extend_init - Load $Extend file.
207  */
208 int ntfs_extend_init(struct ntfs_sb_info *sbi)
209 {
210 	int err;
211 	struct super_block *sb = sbi->sb;
212 	struct inode *inode, *inode2;
213 	struct MFT_REF ref;
214 
215 	if (sbi->volume.major_ver < 3) {
216 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
217 		return 0;
218 	}
219 
220 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
221 	ref.high = 0;
222 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
223 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
224 	if (IS_ERR(inode)) {
225 		err = PTR_ERR(inode);
226 		ntfs_err(sb, "Failed to load $Extend.");
227 		inode = NULL;
228 		goto out;
229 	}
230 
231 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
232 	if (!S_ISDIR(inode->i_mode)) {
233 		err = -EINVAL;
234 		goto out;
235 	}
236 
237 	/* Try to find $ObjId */
238 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
239 	if (inode2 && !IS_ERR(inode2)) {
240 		if (is_bad_inode(inode2)) {
241 			iput(inode2);
242 		} else {
243 			sbi->objid.ni = ntfs_i(inode2);
244 			sbi->objid_no = inode2->i_ino;
245 		}
246 	}
247 
248 	/* Try to find $Quota */
249 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
250 	if (inode2 && !IS_ERR(inode2)) {
251 		sbi->quota_no = inode2->i_ino;
252 		iput(inode2);
253 	}
254 
255 	/* Try to find $Reparse */
256 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
257 	if (inode2 && !IS_ERR(inode2)) {
258 		sbi->reparse.ni = ntfs_i(inode2);
259 		sbi->reparse_no = inode2->i_ino;
260 	}
261 
262 	/* Try to find $UsnJrnl */
263 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
264 	if (inode2 && !IS_ERR(inode2)) {
265 		sbi->usn_jrnl_no = inode2->i_ino;
266 		iput(inode2);
267 	}
268 
269 	err = 0;
270 out:
271 	iput(inode);
272 	return err;
273 }
274 
275 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
276 {
277 	int err = 0;
278 	struct super_block *sb = sbi->sb;
279 	bool initialized = false;
280 	struct MFT_REF ref;
281 	struct inode *inode;
282 
283 	/* Check for 4GB. */
284 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
285 		ntfs_err(sb, "\x24LogFile is too big");
286 		err = -EINVAL;
287 		goto out;
288 	}
289 
290 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
291 
292 	ref.low = cpu_to_le32(MFT_REC_MFT);
293 	ref.high = 0;
294 	ref.seq = cpu_to_le16(1);
295 
296 	inode = ntfs_iget5(sb, &ref, NULL);
297 
298 	if (IS_ERR(inode))
299 		inode = NULL;
300 
301 	if (!inode) {
302 		/* Try to use MFT copy. */
303 		u64 t64 = sbi->mft.lbo;
304 
305 		sbi->mft.lbo = sbi->mft.lbo2;
306 		inode = ntfs_iget5(sb, &ref, NULL);
307 		sbi->mft.lbo = t64;
308 		if (IS_ERR(inode))
309 			inode = NULL;
310 	}
311 
312 	if (!inode) {
313 		err = -EINVAL;
314 		ntfs_err(sb, "Failed to load $MFT.");
315 		goto out;
316 	}
317 
318 	sbi->mft.ni = ntfs_i(inode);
319 
320 	/* LogFile should not contains attribute list. */
321 	err = ni_load_all_mi(sbi->mft.ni);
322 	if (!err)
323 		err = log_replay(ni, &initialized);
324 
325 	iput(inode);
326 	sbi->mft.ni = NULL;
327 
328 	sync_blockdev(sb->s_bdev);
329 	invalidate_bdev(sb->s_bdev);
330 
331 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
332 		err = 0;
333 		goto out;
334 	}
335 
336 	if (sb_rdonly(sb) || !initialized)
337 		goto out;
338 
339 	/* Fill LogFile by '-1' if it is initialized. */
340 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
341 
342 out:
343 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
344 
345 	return err;
346 }
347 
348 /*
349  * ntfs_look_for_free_space - Look for a free space in bitmap.
350  */
351 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
352 			     CLST *new_lcn, CLST *new_len,
353 			     enum ALLOCATE_OPT opt)
354 {
355 	int err;
356 	CLST alen;
357 	struct super_block *sb = sbi->sb;
358 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
359 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
360 
361 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
362 	if (opt & ALLOCATE_MFT) {
363 		zlen = wnd_zone_len(wnd);
364 
365 		if (!zlen) {
366 			err = ntfs_refresh_zone(sbi);
367 			if (err)
368 				goto up_write;
369 
370 			zlen = wnd_zone_len(wnd);
371 		}
372 
373 		if (!zlen) {
374 			ntfs_err(sbi->sb, "no free space to extend mft");
375 			err = -ENOSPC;
376 			goto up_write;
377 		}
378 
379 		lcn = wnd_zone_bit(wnd);
380 		alen = min_t(CLST, len, zlen);
381 
382 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
383 
384 		err = wnd_set_used(wnd, lcn, alen);
385 		if (err)
386 			goto up_write;
387 
388 		alcn = lcn;
389 		goto space_found;
390 	}
391 	/*
392 	 * 'Cause cluster 0 is always used this value means that we should use
393 	 * cached value of 'next_free_lcn' to improve performance.
394 	 */
395 	if (!lcn)
396 		lcn = sbi->used.next_free_lcn;
397 
398 	if (lcn >= wnd->nbits)
399 		lcn = 0;
400 
401 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
402 	if (alen)
403 		goto space_found;
404 
405 	/* Try to use clusters from MftZone. */
406 	zlen = wnd_zone_len(wnd);
407 	zeroes = wnd_zeroes(wnd);
408 
409 	/* Check too big request */
410 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
411 		err = -ENOSPC;
412 		goto up_write;
413 	}
414 
415 	/* How many clusters to cat from zone. */
416 	zlcn = wnd_zone_bit(wnd);
417 	zlen2 = zlen >> 1;
418 	ztrim = clamp_val(len, zlen2, zlen);
419 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
420 
421 	wnd_zone_set(wnd, zlcn, new_zlen);
422 
423 	/* Allocate continues clusters. */
424 	alen = wnd_find(wnd, len, 0,
425 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
426 	if (!alen) {
427 		err = -ENOSPC;
428 		goto up_write;
429 	}
430 
431 space_found:
432 	err = 0;
433 	*new_len = alen;
434 	*new_lcn = alcn;
435 
436 	ntfs_unmap_meta(sb, alcn, alen);
437 
438 	/* Set hint for next requests. */
439 	if (!(opt & ALLOCATE_MFT))
440 		sbi->used.next_free_lcn = alcn + alen;
441 up_write:
442 	up_write(&wnd->rw_lock);
443 	return err;
444 }
445 
446 /*
447  * ntfs_check_for_free_space
448  *
449  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
450  */
451 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
452 {
453 	size_t free, zlen, avail;
454 	struct wnd_bitmap *wnd;
455 
456 	wnd = &sbi->used.bitmap;
457 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
458 	free = wnd_zeroes(wnd);
459 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
460 	up_read(&wnd->rw_lock);
461 
462 	if (free < zlen + clen)
463 		return false;
464 
465 	avail = free - (zlen + clen);
466 
467 	wnd = &sbi->mft.bitmap;
468 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
469 	free = wnd_zeroes(wnd);
470 	zlen = wnd_zone_len(wnd);
471 	up_read(&wnd->rw_lock);
472 
473 	if (free >= zlen + mlen)
474 		return true;
475 
476 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
477 }
478 
479 /*
480  * ntfs_extend_mft - Allocate additional MFT records.
481  *
482  * sbi->mft.bitmap is locked for write.
483  *
484  * NOTE: recursive:
485  *	ntfs_look_free_mft ->
486  *	ntfs_extend_mft ->
487  *	attr_set_size ->
488  *	ni_insert_nonresident ->
489  *	ni_insert_attr ->
490  *	ni_ins_attr_ext ->
491  *	ntfs_look_free_mft ->
492  *	ntfs_extend_mft
493  *
494  * To avoid recursive always allocate space for two new MFT records
495  * see attrib.c: "at least two MFT to avoid recursive loop".
496  */
497 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
498 {
499 	int err;
500 	struct ntfs_inode *ni = sbi->mft.ni;
501 	size_t new_mft_total;
502 	u64 new_mft_bytes, new_bitmap_bytes;
503 	struct ATTRIB *attr;
504 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
505 
506 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
507 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
508 
509 	/* Step 1: Resize $MFT::DATA. */
510 	down_write(&ni->file.run_lock);
511 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
512 			    new_mft_bytes, NULL, false, &attr);
513 
514 	if (err) {
515 		up_write(&ni->file.run_lock);
516 		goto out;
517 	}
518 
519 	attr->nres.valid_size = attr->nres.data_size;
520 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
521 	ni->mi.dirty = true;
522 
523 	/* Step 2: Resize $MFT::BITMAP. */
524 	new_bitmap_bytes = bitmap_size(new_mft_total);
525 
526 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
527 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
528 
529 	/* Refresh MFT Zone if necessary. */
530 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
531 
532 	ntfs_refresh_zone(sbi);
533 
534 	up_write(&sbi->used.bitmap.rw_lock);
535 	up_write(&ni->file.run_lock);
536 
537 	if (err)
538 		goto out;
539 
540 	err = wnd_extend(wnd, new_mft_total);
541 
542 	if (err)
543 		goto out;
544 
545 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
546 
547 	err = _ni_write_inode(&ni->vfs_inode, 0);
548 out:
549 	return err;
550 }
551 
552 /*
553  * ntfs_look_free_mft - Look for a free MFT record.
554  */
555 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
556 		       struct ntfs_inode *ni, struct mft_inode **mi)
557 {
558 	int err = 0;
559 	size_t zbit, zlen, from, to, fr;
560 	size_t mft_total;
561 	struct MFT_REF ref;
562 	struct super_block *sb = sbi->sb;
563 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
564 	u32 ir;
565 
566 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
567 		      MFT_REC_FREE - MFT_REC_RESERVED);
568 
569 	if (!mft)
570 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
571 
572 	zlen = wnd_zone_len(wnd);
573 
574 	/* Always reserve space for MFT. */
575 	if (zlen) {
576 		if (mft) {
577 			zbit = wnd_zone_bit(wnd);
578 			*rno = zbit;
579 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
580 		}
581 		goto found;
582 	}
583 
584 	/* No MFT zone. Find the nearest to '0' free MFT. */
585 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
586 		/* Resize MFT */
587 		mft_total = wnd->nbits;
588 
589 		err = ntfs_extend_mft(sbi);
590 		if (!err) {
591 			zbit = mft_total;
592 			goto reserve_mft;
593 		}
594 
595 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
596 			goto out;
597 
598 		err = 0;
599 
600 		/*
601 		 * Look for free record reserved area [11-16) ==
602 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
603 		 * marks it as used.
604 		 */
605 		if (!sbi->mft.reserved_bitmap) {
606 			/* Once per session create internal bitmap for 5 bits. */
607 			sbi->mft.reserved_bitmap = 0xFF;
608 
609 			ref.high = 0;
610 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
611 				struct inode *i;
612 				struct ntfs_inode *ni;
613 				struct MFT_REC *mrec;
614 
615 				ref.low = cpu_to_le32(ir);
616 				ref.seq = cpu_to_le16(ir);
617 
618 				i = ntfs_iget5(sb, &ref, NULL);
619 				if (IS_ERR(i)) {
620 next:
621 					ntfs_notice(
622 						sb,
623 						"Invalid reserved record %x",
624 						ref.low);
625 					continue;
626 				}
627 				if (is_bad_inode(i)) {
628 					iput(i);
629 					goto next;
630 				}
631 
632 				ni = ntfs_i(i);
633 
634 				mrec = ni->mi.mrec;
635 
636 				if (!is_rec_base(mrec))
637 					goto next;
638 
639 				if (mrec->hard_links)
640 					goto next;
641 
642 				if (!ni_std(ni))
643 					goto next;
644 
645 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
646 						 NULL, 0, NULL, NULL))
647 					goto next;
648 
649 				__clear_bit_le(ir - MFT_REC_RESERVED,
650 					    &sbi->mft.reserved_bitmap);
651 			}
652 		}
653 
654 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
655 		zbit = find_next_zero_bit_le(&sbi->mft.reserved_bitmap,
656 					  MFT_REC_FREE, MFT_REC_RESERVED);
657 		if (zbit >= MFT_REC_FREE) {
658 			sbi->mft.next_reserved = MFT_REC_FREE;
659 			goto out;
660 		}
661 
662 		zlen = 1;
663 		sbi->mft.next_reserved = zbit;
664 	} else {
665 reserve_mft:
666 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
667 		if (zbit + zlen > wnd->nbits)
668 			zlen = wnd->nbits - zbit;
669 
670 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
671 			zlen -= 1;
672 
673 		/* [zbit, zbit + zlen) will be used for MFT itself. */
674 		from = sbi->mft.used;
675 		if (from < zbit)
676 			from = zbit;
677 		to = zbit + zlen;
678 		if (from < to) {
679 			ntfs_clear_mft_tail(sbi, from, to);
680 			sbi->mft.used = to;
681 		}
682 	}
683 
684 	if (mft) {
685 		*rno = zbit;
686 		zbit += 1;
687 		zlen -= 1;
688 	}
689 
690 	wnd_zone_set(wnd, zbit, zlen);
691 
692 found:
693 	if (!mft) {
694 		/* The request to get record for general purpose. */
695 		if (sbi->mft.next_free < MFT_REC_USER)
696 			sbi->mft.next_free = MFT_REC_USER;
697 
698 		for (;;) {
699 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
700 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
701 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
702 			} else {
703 				*rno = fr;
704 				sbi->mft.next_free = *rno + 1;
705 				break;
706 			}
707 
708 			err = ntfs_extend_mft(sbi);
709 			if (err)
710 				goto out;
711 		}
712 	}
713 
714 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
715 		err = -ENOMEM;
716 		goto out;
717 	}
718 
719 	/* We have found a record that are not reserved for next MFT. */
720 	if (*rno >= MFT_REC_FREE)
721 		wnd_set_used(wnd, *rno, 1);
722 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
723 		__set_bit_le(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
724 
725 out:
726 	if (!mft)
727 		up_write(&wnd->rw_lock);
728 
729 	return err;
730 }
731 
732 /*
733  * ntfs_mark_rec_free - Mark record as free.
734  * is_mft - true if we are changing MFT
735  */
736 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
737 {
738 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
739 
740 	if (!is_mft)
741 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
742 	if (rno >= wnd->nbits)
743 		goto out;
744 
745 	if (rno >= MFT_REC_FREE) {
746 		if (!wnd_is_used(wnd, rno, 1))
747 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
748 		else
749 			wnd_set_free(wnd, rno, 1);
750 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
751 		__clear_bit_le(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
752 	}
753 
754 	if (rno < wnd_zone_bit(wnd))
755 		wnd_zone_set(wnd, rno, 1);
756 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
757 		sbi->mft.next_free = rno;
758 
759 out:
760 	if (!is_mft)
761 		up_write(&wnd->rw_lock);
762 }
763 
764 /*
765  * ntfs_clear_mft_tail - Format empty records [from, to).
766  *
767  * sbi->mft.bitmap is locked for write.
768  */
769 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
770 {
771 	int err;
772 	u32 rs;
773 	u64 vbo;
774 	struct runs_tree *run;
775 	struct ntfs_inode *ni;
776 
777 	if (from >= to)
778 		return 0;
779 
780 	rs = sbi->record_size;
781 	ni = sbi->mft.ni;
782 	run = &ni->file.run;
783 
784 	down_read(&ni->file.run_lock);
785 	vbo = (u64)from * rs;
786 	for (; from < to; from++, vbo += rs) {
787 		struct ntfs_buffers nb;
788 
789 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
790 		if (err)
791 			goto out;
792 
793 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
794 		nb_put(&nb);
795 		if (err)
796 			goto out;
797 	}
798 
799 out:
800 	sbi->mft.used = from;
801 	up_read(&ni->file.run_lock);
802 	return err;
803 }
804 
805 /*
806  * ntfs_refresh_zone - Refresh MFT zone.
807  *
808  * sbi->used.bitmap is locked for rw.
809  * sbi->mft.bitmap is locked for write.
810  * sbi->mft.ni->file.run_lock for write.
811  */
812 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
813 {
814 	CLST lcn, vcn, len;
815 	size_t lcn_s, zlen;
816 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
817 	struct ntfs_inode *ni = sbi->mft.ni;
818 
819 	/* Do not change anything unless we have non empty MFT zone. */
820 	if (wnd_zone_len(wnd))
821 		return 0;
822 
823 	vcn = bytes_to_cluster(sbi,
824 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
825 
826 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
827 		lcn = SPARSE_LCN;
828 
829 	/* We should always find Last Lcn for MFT. */
830 	if (lcn == SPARSE_LCN)
831 		return -EINVAL;
832 
833 	lcn_s = lcn + 1;
834 
835 	/* Try to allocate clusters after last MFT run. */
836 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
837 	wnd_zone_set(wnd, lcn_s, zlen);
838 
839 	return 0;
840 }
841 
842 /*
843  * ntfs_update_mftmirr - Update $MFTMirr data.
844  */
845 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
846 {
847 	int err;
848 	struct super_block *sb = sbi->sb;
849 	u32 blocksize;
850 	sector_t block1, block2;
851 	u32 bytes;
852 
853 	if (!sb)
854 		return;
855 
856 	blocksize = sb->s_blocksize;
857 
858 	if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
859 		return;
860 
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 
909 	ntfs_inode_err(inode, "%s", hint);
910 	make_bad_inode(inode);
911 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
912 }
913 
914 /*
915  * ntfs_set_state
916  *
917  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
918  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
919  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
920  */
921 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
922 {
923 	int err;
924 	struct ATTRIB *attr;
925 	struct VOLUME_INFO *info;
926 	struct mft_inode *mi;
927 	struct ntfs_inode *ni;
928 
929 	/*
930 	 * Do not change state if fs was real_dirty.
931 	 * Do not change state if fs already dirty(clear).
932 	 * Do not change any thing if mounted read only.
933 	 */
934 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
935 		return 0;
936 
937 	/* Check cached value. */
938 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
940 		return 0;
941 
942 	ni = sbi->volume.ni;
943 	if (!ni)
944 		return -EINVAL;
945 
946 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
947 
948 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
949 	if (!attr) {
950 		err = -EINVAL;
951 		goto out;
952 	}
953 
954 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
955 	if (!info) {
956 		err = -EINVAL;
957 		goto out;
958 	}
959 
960 	switch (dirty) {
961 	case NTFS_DIRTY_ERROR:
962 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
963 		sbi->volume.real_dirty = true;
964 		fallthrough;
965 	case NTFS_DIRTY_DIRTY:
966 		info->flags |= VOLUME_FLAG_DIRTY;
967 		break;
968 	case NTFS_DIRTY_CLEAR:
969 		info->flags &= ~VOLUME_FLAG_DIRTY;
970 		break;
971 	}
972 	/* Cache current volume flags. */
973 	sbi->volume.flags = info->flags;
974 	mi->dirty = true;
975 	err = 0;
976 
977 out:
978 	ni_unlock(ni);
979 	if (err)
980 		return err;
981 
982 	mark_inode_dirty(&ni->vfs_inode);
983 	/* verify(!ntfs_update_mftmirr()); */
984 
985 	/*
986 	 * If we used wait=1, sync_inode_metadata waits for the io for the
987 	 * inode to finish. It hangs when media is removed.
988 	 * So wait=0 is sent down to sync_inode_metadata
989 	 * and filemap_fdatawrite is used for the data blocks.
990 	 */
991 	err = sync_inode_metadata(&ni->vfs_inode, 0);
992 	if (!err)
993 		err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
994 
995 	return err;
996 }
997 
998 /*
999  * security_hash - Calculates a hash of security descriptor.
1000  */
1001 static inline __le32 security_hash(const void *sd, size_t bytes)
1002 {
1003 	u32 hash = 0;
1004 	const __le32 *ptr = sd;
1005 
1006 	bytes >>= 2;
1007 	while (bytes--)
1008 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1009 	return cpu_to_le32(hash);
1010 }
1011 
1012 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1013 {
1014 	struct block_device *bdev = sb->s_bdev;
1015 	u32 blocksize = sb->s_blocksize;
1016 	u64 block = lbo >> sb->s_blocksize_bits;
1017 	u32 off = lbo & (blocksize - 1);
1018 	u32 op = blocksize - off;
1019 
1020 	for (; bytes; block += 1, off = 0, op = blocksize) {
1021 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1022 
1023 		if (!bh)
1024 			return -EIO;
1025 
1026 		if (op > bytes)
1027 			op = bytes;
1028 
1029 		memcpy(buffer, bh->b_data + off, op);
1030 
1031 		put_bh(bh);
1032 
1033 		bytes -= op;
1034 		buffer = Add2Ptr(buffer, op);
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1041 		  const void *buf, int wait)
1042 {
1043 	u32 blocksize = sb->s_blocksize;
1044 	struct block_device *bdev = sb->s_bdev;
1045 	sector_t block = lbo >> sb->s_blocksize_bits;
1046 	u32 off = lbo & (blocksize - 1);
1047 	u32 op = blocksize - off;
1048 	struct buffer_head *bh;
1049 
1050 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1051 		wait = 1;
1052 
1053 	for (; bytes; block += 1, off = 0, op = blocksize) {
1054 		if (op > bytes)
1055 			op = bytes;
1056 
1057 		if (op < blocksize) {
1058 			bh = __bread(bdev, block, blocksize);
1059 			if (!bh) {
1060 				ntfs_err(sb, "failed to read block %llx",
1061 					 (u64)block);
1062 				return -EIO;
1063 			}
1064 		} else {
1065 			bh = __getblk(bdev, block, blocksize);
1066 			if (!bh)
1067 				return -ENOMEM;
1068 		}
1069 
1070 		if (buffer_locked(bh))
1071 			__wait_on_buffer(bh);
1072 
1073 		lock_buffer(bh);
1074 		if (buf) {
1075 			memcpy(bh->b_data + off, buf, op);
1076 			buf = Add2Ptr(buf, op);
1077 		} else {
1078 			memset(bh->b_data + off, -1, op);
1079 		}
1080 
1081 		set_buffer_uptodate(bh);
1082 		mark_buffer_dirty(bh);
1083 		unlock_buffer(bh);
1084 
1085 		if (wait) {
1086 			int err = sync_dirty_buffer(bh);
1087 
1088 			if (err) {
1089 				ntfs_err(
1090 					sb,
1091 					"failed to sync buffer at block %llx, error %d",
1092 					(u64)block, err);
1093 				put_bh(bh);
1094 				return err;
1095 			}
1096 		}
1097 
1098 		put_bh(bh);
1099 
1100 		bytes -= op;
1101 	}
1102 	return 0;
1103 }
1104 
1105 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1106 		      u64 vbo, const void *buf, size_t bytes, int sync)
1107 {
1108 	struct super_block *sb = sbi->sb;
1109 	u8 cluster_bits = sbi->cluster_bits;
1110 	u32 off = vbo & sbi->cluster_mask;
1111 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1112 	u64 lbo, len;
1113 	size_t idx;
1114 
1115 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1116 		return -ENOENT;
1117 
1118 	if (lcn == SPARSE_LCN)
1119 		return -EINVAL;
1120 
1121 	lbo = ((u64)lcn << cluster_bits) + off;
1122 	len = ((u64)clen << cluster_bits) - off;
1123 
1124 	for (;;) {
1125 		u32 op = min_t(u64, len, bytes);
1126 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1127 
1128 		if (err)
1129 			return err;
1130 
1131 		bytes -= op;
1132 		if (!bytes)
1133 			break;
1134 
1135 		vcn_next = vcn + clen;
1136 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1137 		    vcn != vcn_next)
1138 			return -ENOENT;
1139 
1140 		if (lcn == SPARSE_LCN)
1141 			return -EINVAL;
1142 
1143 		if (buf)
1144 			buf = Add2Ptr(buf, op);
1145 
1146 		lbo = ((u64)lcn << cluster_bits);
1147 		len = ((u64)clen << cluster_bits);
1148 	}
1149 
1150 	return 0;
1151 }
1152 
1153 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1154 				   const struct runs_tree *run, u64 vbo)
1155 {
1156 	struct super_block *sb = sbi->sb;
1157 	u8 cluster_bits = sbi->cluster_bits;
1158 	CLST lcn;
1159 	u64 lbo;
1160 
1161 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1162 		return ERR_PTR(-ENOENT);
1163 
1164 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1165 
1166 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1167 }
1168 
1169 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1170 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1171 {
1172 	int err;
1173 	struct super_block *sb = sbi->sb;
1174 	u32 blocksize = sb->s_blocksize;
1175 	u8 cluster_bits = sbi->cluster_bits;
1176 	u32 off = vbo & sbi->cluster_mask;
1177 	u32 nbh = 0;
1178 	CLST vcn_next, vcn = vbo >> cluster_bits;
1179 	CLST lcn, clen;
1180 	u64 lbo, len;
1181 	size_t idx;
1182 	struct buffer_head *bh;
1183 
1184 	if (!run) {
1185 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1186 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1187 			err = -ENOENT;
1188 			goto out;
1189 		}
1190 
1191 		/* Use absolute boot's 'MFTCluster' to read record. */
1192 		lbo = vbo + sbi->mft.lbo;
1193 		len = sbi->record_size;
1194 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1195 		err = -ENOENT;
1196 		goto out;
1197 	} else {
1198 		if (lcn == SPARSE_LCN) {
1199 			err = -EINVAL;
1200 			goto out;
1201 		}
1202 
1203 		lbo = ((u64)lcn << cluster_bits) + off;
1204 		len = ((u64)clen << cluster_bits) - off;
1205 	}
1206 
1207 	off = lbo & (blocksize - 1);
1208 	if (nb) {
1209 		nb->off = off;
1210 		nb->bytes = bytes;
1211 	}
1212 
1213 	for (;;) {
1214 		u32 len32 = len >= bytes ? bytes : len;
1215 		sector_t block = lbo >> sb->s_blocksize_bits;
1216 
1217 		do {
1218 			u32 op = blocksize - off;
1219 
1220 			if (op > len32)
1221 				op = len32;
1222 
1223 			bh = ntfs_bread(sb, block);
1224 			if (!bh) {
1225 				err = -EIO;
1226 				goto out;
1227 			}
1228 
1229 			if (buf) {
1230 				memcpy(buf, bh->b_data + off, op);
1231 				buf = Add2Ptr(buf, op);
1232 			}
1233 
1234 			if (!nb) {
1235 				put_bh(bh);
1236 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1237 				err = -EINVAL;
1238 				goto out;
1239 			} else {
1240 				nb->bh[nbh++] = bh;
1241 				nb->nbufs = nbh;
1242 			}
1243 
1244 			bytes -= op;
1245 			if (!bytes)
1246 				return 0;
1247 			len32 -= op;
1248 			block += 1;
1249 			off = 0;
1250 
1251 		} while (len32);
1252 
1253 		vcn_next = vcn + clen;
1254 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1255 		    vcn != vcn_next) {
1256 			err = -ENOENT;
1257 			goto out;
1258 		}
1259 
1260 		if (lcn == SPARSE_LCN) {
1261 			err = -EINVAL;
1262 			goto out;
1263 		}
1264 
1265 		lbo = ((u64)lcn << cluster_bits);
1266 		len = ((u64)clen << cluster_bits);
1267 	}
1268 
1269 out:
1270 	if (!nbh)
1271 		return err;
1272 
1273 	while (nbh) {
1274 		put_bh(nb->bh[--nbh]);
1275 		nb->bh[nbh] = NULL;
1276 	}
1277 
1278 	nb->nbufs = 0;
1279 	return err;
1280 }
1281 
1282 /*
1283  * ntfs_read_bh
1284  *
1285  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1286  */
1287 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1288 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1289 		 struct ntfs_buffers *nb)
1290 {
1291 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1292 
1293 	if (err)
1294 		return err;
1295 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1296 }
1297 
1298 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1299 		u32 bytes, struct ntfs_buffers *nb)
1300 {
1301 	int err = 0;
1302 	struct super_block *sb = sbi->sb;
1303 	u32 blocksize = sb->s_blocksize;
1304 	u8 cluster_bits = sbi->cluster_bits;
1305 	CLST vcn_next, vcn = vbo >> cluster_bits;
1306 	u32 off;
1307 	u32 nbh = 0;
1308 	CLST lcn, clen;
1309 	u64 lbo, len;
1310 	size_t idx;
1311 
1312 	nb->bytes = bytes;
1313 
1314 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1315 		err = -ENOENT;
1316 		goto out;
1317 	}
1318 
1319 	off = vbo & sbi->cluster_mask;
1320 	lbo = ((u64)lcn << cluster_bits) + off;
1321 	len = ((u64)clen << cluster_bits) - off;
1322 
1323 	nb->off = off = lbo & (blocksize - 1);
1324 
1325 	for (;;) {
1326 		u32 len32 = min_t(u64, len, bytes);
1327 		sector_t block = lbo >> sb->s_blocksize_bits;
1328 
1329 		do {
1330 			u32 op;
1331 			struct buffer_head *bh;
1332 
1333 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1334 				err = -EINVAL;
1335 				goto out;
1336 			}
1337 
1338 			op = blocksize - off;
1339 			if (op > len32)
1340 				op = len32;
1341 
1342 			if (op == blocksize) {
1343 				bh = sb_getblk(sb, block);
1344 				if (!bh) {
1345 					err = -ENOMEM;
1346 					goto out;
1347 				}
1348 				if (buffer_locked(bh))
1349 					__wait_on_buffer(bh);
1350 				set_buffer_uptodate(bh);
1351 			} else {
1352 				bh = ntfs_bread(sb, block);
1353 				if (!bh) {
1354 					err = -EIO;
1355 					goto out;
1356 				}
1357 			}
1358 
1359 			nb->bh[nbh++] = bh;
1360 			bytes -= op;
1361 			if (!bytes) {
1362 				nb->nbufs = nbh;
1363 				return 0;
1364 			}
1365 
1366 			block += 1;
1367 			len32 -= op;
1368 			off = 0;
1369 		} while (len32);
1370 
1371 		vcn_next = vcn + clen;
1372 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1373 		    vcn != vcn_next) {
1374 			err = -ENOENT;
1375 			goto out;
1376 		}
1377 
1378 		lbo = ((u64)lcn << cluster_bits);
1379 		len = ((u64)clen << cluster_bits);
1380 	}
1381 
1382 out:
1383 	while (nbh) {
1384 		put_bh(nb->bh[--nbh]);
1385 		nb->bh[nbh] = NULL;
1386 	}
1387 
1388 	nb->nbufs = 0;
1389 
1390 	return err;
1391 }
1392 
1393 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1394 		  struct ntfs_buffers *nb, int sync)
1395 {
1396 	int err = 0;
1397 	struct super_block *sb = sbi->sb;
1398 	u32 block_size = sb->s_blocksize;
1399 	u32 bytes = nb->bytes;
1400 	u32 off = nb->off;
1401 	u16 fo = le16_to_cpu(rhdr->fix_off);
1402 	u16 fn = le16_to_cpu(rhdr->fix_num);
1403 	u32 idx;
1404 	__le16 *fixup;
1405 	__le16 sample;
1406 
1407 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1408 	    fn * SECTOR_SIZE > bytes) {
1409 		return -EINVAL;
1410 	}
1411 
1412 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1413 		u32 op = block_size - off;
1414 		char *bh_data;
1415 		struct buffer_head *bh = nb->bh[idx];
1416 		__le16 *ptr, *end_data;
1417 
1418 		if (op > bytes)
1419 			op = bytes;
1420 
1421 		if (buffer_locked(bh))
1422 			__wait_on_buffer(bh);
1423 
1424 		lock_buffer(bh);
1425 
1426 		bh_data = bh->b_data + off;
1427 		end_data = Add2Ptr(bh_data, op);
1428 		memcpy(bh_data, rhdr, op);
1429 
1430 		if (!idx) {
1431 			u16 t16;
1432 
1433 			fixup = Add2Ptr(bh_data, fo);
1434 			sample = *fixup;
1435 			t16 = le16_to_cpu(sample);
1436 			if (t16 >= 0x7FFF) {
1437 				sample = *fixup = cpu_to_le16(1);
1438 			} else {
1439 				sample = cpu_to_le16(t16 + 1);
1440 				*fixup = sample;
1441 			}
1442 
1443 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1444 		}
1445 
1446 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1447 
1448 		do {
1449 			*++fixup = *ptr;
1450 			*ptr = sample;
1451 			ptr += SECTOR_SIZE / sizeof(short);
1452 		} while (ptr < end_data);
1453 
1454 		set_buffer_uptodate(bh);
1455 		mark_buffer_dirty(bh);
1456 		unlock_buffer(bh);
1457 
1458 		if (sync) {
1459 			int err2 = sync_dirty_buffer(bh);
1460 
1461 			if (!err && err2)
1462 				err = err2;
1463 		}
1464 
1465 		bytes -= op;
1466 		rhdr = Add2Ptr(rhdr, op);
1467 	}
1468 
1469 	return err;
1470 }
1471 
1472 /*
1473  * ntfs_bio_pages - Read/write pages from/to disk.
1474  */
1475 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1476 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1477 		   enum req_op op)
1478 {
1479 	int err = 0;
1480 	struct bio *new, *bio = NULL;
1481 	struct super_block *sb = sbi->sb;
1482 	struct block_device *bdev = sb->s_bdev;
1483 	struct page *page;
1484 	u8 cluster_bits = sbi->cluster_bits;
1485 	CLST lcn, clen, vcn, vcn_next;
1486 	u32 add, off, page_idx;
1487 	u64 lbo, len;
1488 	size_t run_idx;
1489 	struct blk_plug plug;
1490 
1491 	if (!bytes)
1492 		return 0;
1493 
1494 	blk_start_plug(&plug);
1495 
1496 	/* Align vbo and bytes to be 512 bytes aligned. */
1497 	lbo = (vbo + bytes + 511) & ~511ull;
1498 	vbo = vbo & ~511ull;
1499 	bytes = lbo - vbo;
1500 
1501 	vcn = vbo >> cluster_bits;
1502 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1503 		err = -ENOENT;
1504 		goto out;
1505 	}
1506 	off = vbo & sbi->cluster_mask;
1507 	page_idx = 0;
1508 	page = pages[0];
1509 
1510 	for (;;) {
1511 		lbo = ((u64)lcn << cluster_bits) + off;
1512 		len = ((u64)clen << cluster_bits) - off;
1513 new_bio:
1514 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1515 		if (bio) {
1516 			bio_chain(bio, new);
1517 			submit_bio(bio);
1518 		}
1519 		bio = new;
1520 		bio->bi_iter.bi_sector = lbo >> 9;
1521 
1522 		while (len) {
1523 			off = vbo & (PAGE_SIZE - 1);
1524 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1525 
1526 			if (bio_add_page(bio, page, add, off) < add)
1527 				goto new_bio;
1528 
1529 			if (bytes <= add)
1530 				goto out;
1531 			bytes -= add;
1532 			vbo += add;
1533 
1534 			if (add + off == PAGE_SIZE) {
1535 				page_idx += 1;
1536 				if (WARN_ON(page_idx >= nr_pages)) {
1537 					err = -EINVAL;
1538 					goto out;
1539 				}
1540 				page = pages[page_idx];
1541 			}
1542 
1543 			if (len <= add)
1544 				break;
1545 			len -= add;
1546 			lbo += add;
1547 		}
1548 
1549 		vcn_next = vcn + clen;
1550 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1551 		    vcn != vcn_next) {
1552 			err = -ENOENT;
1553 			goto out;
1554 		}
1555 		off = 0;
1556 	}
1557 out:
1558 	if (bio) {
1559 		if (!err)
1560 			err = submit_bio_wait(bio);
1561 		bio_put(bio);
1562 	}
1563 	blk_finish_plug(&plug);
1564 
1565 	return err;
1566 }
1567 
1568 /*
1569  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1570  *
1571  * Fill on-disk logfile range by (-1)
1572  * this means empty logfile.
1573  */
1574 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1575 {
1576 	int err = 0;
1577 	struct super_block *sb = sbi->sb;
1578 	struct block_device *bdev = sb->s_bdev;
1579 	u8 cluster_bits = sbi->cluster_bits;
1580 	struct bio *new, *bio = NULL;
1581 	CLST lcn, clen;
1582 	u64 lbo, len;
1583 	size_t run_idx;
1584 	struct page *fill;
1585 	void *kaddr;
1586 	struct blk_plug plug;
1587 
1588 	fill = alloc_page(GFP_KERNEL);
1589 	if (!fill)
1590 		return -ENOMEM;
1591 
1592 	kaddr = kmap_atomic(fill);
1593 	memset(kaddr, -1, PAGE_SIZE);
1594 	kunmap_atomic(kaddr);
1595 	flush_dcache_page(fill);
1596 	lock_page(fill);
1597 
1598 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1599 		err = -ENOENT;
1600 		goto out;
1601 	}
1602 
1603 	/*
1604 	 * TODO: Try blkdev_issue_write_same.
1605 	 */
1606 	blk_start_plug(&plug);
1607 	do {
1608 		lbo = (u64)lcn << cluster_bits;
1609 		len = (u64)clen << cluster_bits;
1610 new_bio:
1611 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1612 		if (bio) {
1613 			bio_chain(bio, new);
1614 			submit_bio(bio);
1615 		}
1616 		bio = new;
1617 		bio->bi_iter.bi_sector = lbo >> 9;
1618 
1619 		for (;;) {
1620 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1621 
1622 			if (bio_add_page(bio, fill, add, 0) < add)
1623 				goto new_bio;
1624 
1625 			lbo += add;
1626 			if (len <= add)
1627 				break;
1628 			len -= add;
1629 		}
1630 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1631 
1632 	if (!err)
1633 		err = submit_bio_wait(bio);
1634 	bio_put(bio);
1635 
1636 	blk_finish_plug(&plug);
1637 out:
1638 	unlock_page(fill);
1639 	put_page(fill);
1640 
1641 	return err;
1642 }
1643 
1644 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1645 		    u64 vbo, u64 *lbo, u64 *bytes)
1646 {
1647 	u32 off;
1648 	CLST lcn, len;
1649 	u8 cluster_bits = sbi->cluster_bits;
1650 
1651 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1652 		return -ENOENT;
1653 
1654 	off = vbo & sbi->cluster_mask;
1655 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1656 	*bytes = ((u64)len << cluster_bits) - off;
1657 
1658 	return 0;
1659 }
1660 
1661 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1662 {
1663 	int err = 0;
1664 	struct super_block *sb = sbi->sb;
1665 	struct inode *inode = new_inode(sb);
1666 	struct ntfs_inode *ni;
1667 
1668 	if (!inode)
1669 		return ERR_PTR(-ENOMEM);
1670 
1671 	ni = ntfs_i(inode);
1672 
1673 	err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1674 			    false);
1675 	if (err)
1676 		goto out;
1677 
1678 	inode->i_ino = rno;
1679 	if (insert_inode_locked(inode) < 0) {
1680 		err = -EIO;
1681 		goto out;
1682 	}
1683 
1684 out:
1685 	if (err) {
1686 		iput(inode);
1687 		ni = ERR_PTR(err);
1688 	}
1689 	return ni;
1690 }
1691 
1692 /*
1693  * O:BAG:BAD:(A;OICI;FA;;;WD)
1694  * Owner S-1-5-32-544 (Administrators)
1695  * Group S-1-5-32-544 (Administrators)
1696  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1697  */
1698 const u8 s_default_security[] __aligned(8) = {
1699 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1700 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1701 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1702 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1703 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1704 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1705 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1706 };
1707 
1708 static_assert(sizeof(s_default_security) == 0x50);
1709 
1710 static inline u32 sid_length(const struct SID *sid)
1711 {
1712 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1713 }
1714 
1715 /*
1716  * is_acl_valid
1717  *
1718  * Thanks Mark Harmstone for idea.
1719  */
1720 static bool is_acl_valid(const struct ACL *acl, u32 len)
1721 {
1722 	const struct ACE_HEADER *ace;
1723 	u32 i;
1724 	u16 ace_count, ace_size;
1725 
1726 	if (acl->AclRevision != ACL_REVISION &&
1727 	    acl->AclRevision != ACL_REVISION_DS) {
1728 		/*
1729 		 * This value should be ACL_REVISION, unless the ACL contains an
1730 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1731 		 * All ACEs in an ACL must be at the same revision level.
1732 		 */
1733 		return false;
1734 	}
1735 
1736 	if (acl->Sbz1)
1737 		return false;
1738 
1739 	if (le16_to_cpu(acl->AclSize) > len)
1740 		return false;
1741 
1742 	if (acl->Sbz2)
1743 		return false;
1744 
1745 	len -= sizeof(struct ACL);
1746 	ace = (struct ACE_HEADER *)&acl[1];
1747 	ace_count = le16_to_cpu(acl->AceCount);
1748 
1749 	for (i = 0; i < ace_count; i++) {
1750 		if (len < sizeof(struct ACE_HEADER))
1751 			return false;
1752 
1753 		ace_size = le16_to_cpu(ace->AceSize);
1754 		if (len < ace_size)
1755 			return false;
1756 
1757 		len -= ace_size;
1758 		ace = Add2Ptr(ace, ace_size);
1759 	}
1760 
1761 	return true;
1762 }
1763 
1764 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1765 {
1766 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1767 
1768 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1769 		return false;
1770 
1771 	if (sd->Revision != 1)
1772 		return false;
1773 
1774 	if (sd->Sbz1)
1775 		return false;
1776 
1777 	if (!(sd->Control & SE_SELF_RELATIVE))
1778 		return false;
1779 
1780 	sd_owner = le32_to_cpu(sd->Owner);
1781 	if (sd_owner) {
1782 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1783 
1784 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1785 			return false;
1786 
1787 		if (owner->Revision != 1)
1788 			return false;
1789 
1790 		if (sd_owner + sid_length(owner) > len)
1791 			return false;
1792 	}
1793 
1794 	sd_group = le32_to_cpu(sd->Group);
1795 	if (sd_group) {
1796 		const struct SID *group = Add2Ptr(sd, sd_group);
1797 
1798 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1799 			return false;
1800 
1801 		if (group->Revision != 1)
1802 			return false;
1803 
1804 		if (sd_group + sid_length(group) > len)
1805 			return false;
1806 	}
1807 
1808 	sd_sacl = le32_to_cpu(sd->Sacl);
1809 	if (sd_sacl) {
1810 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1811 
1812 		if (sd_sacl + sizeof(struct ACL) > len)
1813 			return false;
1814 
1815 		if (!is_acl_valid(sacl, len - sd_sacl))
1816 			return false;
1817 	}
1818 
1819 	sd_dacl = le32_to_cpu(sd->Dacl);
1820 	if (sd_dacl) {
1821 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1822 
1823 		if (sd_dacl + sizeof(struct ACL) > len)
1824 			return false;
1825 
1826 		if (!is_acl_valid(dacl, len - sd_dacl))
1827 			return false;
1828 	}
1829 
1830 	return true;
1831 }
1832 
1833 /*
1834  * ntfs_security_init - Load and parse $Secure.
1835  */
1836 int ntfs_security_init(struct ntfs_sb_info *sbi)
1837 {
1838 	int err;
1839 	struct super_block *sb = sbi->sb;
1840 	struct inode *inode;
1841 	struct ntfs_inode *ni;
1842 	struct MFT_REF ref;
1843 	struct ATTRIB *attr;
1844 	struct ATTR_LIST_ENTRY *le;
1845 	u64 sds_size;
1846 	size_t off;
1847 	struct NTFS_DE *ne;
1848 	struct NTFS_DE_SII *sii_e;
1849 	struct ntfs_fnd *fnd_sii = NULL;
1850 	const struct INDEX_ROOT *root_sii;
1851 	const struct INDEX_ROOT *root_sdh;
1852 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1853 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1854 
1855 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1856 	ref.high = 0;
1857 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1858 
1859 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1860 	if (IS_ERR(inode)) {
1861 		err = PTR_ERR(inode);
1862 		ntfs_err(sb, "Failed to load $Secure.");
1863 		inode = NULL;
1864 		goto out;
1865 	}
1866 
1867 	ni = ntfs_i(inode);
1868 
1869 	le = NULL;
1870 
1871 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1872 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1873 	if (!attr) {
1874 		err = -EINVAL;
1875 		goto out;
1876 	}
1877 
1878 	root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1879 	if (root_sdh->type != ATTR_ZERO ||
1880 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1881 	    offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
1882 		err = -EINVAL;
1883 		goto out;
1884 	}
1885 
1886 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1887 	if (err)
1888 		goto out;
1889 
1890 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1891 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1892 	if (!attr) {
1893 		err = -EINVAL;
1894 		goto out;
1895 	}
1896 
1897 	root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1898 	if (root_sii->type != ATTR_ZERO ||
1899 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1900 	    offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
1901 		err = -EINVAL;
1902 		goto out;
1903 	}
1904 
1905 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1906 	if (err)
1907 		goto out;
1908 
1909 	fnd_sii = fnd_get();
1910 	if (!fnd_sii) {
1911 		err = -ENOMEM;
1912 		goto out;
1913 	}
1914 
1915 	sds_size = inode->i_size;
1916 
1917 	/* Find the last valid Id. */
1918 	sbi->security.next_id = SECURITY_ID_FIRST;
1919 	/* Always write new security at the end of bucket. */
1920 	sbi->security.next_off =
1921 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1922 
1923 	off = 0;
1924 	ne = NULL;
1925 
1926 	for (;;) {
1927 		u32 next_id;
1928 
1929 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1930 		if (err || !ne)
1931 			break;
1932 
1933 		sii_e = (struct NTFS_DE_SII *)ne;
1934 		if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1935 			continue;
1936 
1937 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1938 		if (next_id >= sbi->security.next_id)
1939 			sbi->security.next_id = next_id;
1940 	}
1941 
1942 	sbi->security.ni = ni;
1943 	inode = NULL;
1944 out:
1945 	iput(inode);
1946 	fnd_put(fnd_sii);
1947 
1948 	return err;
1949 }
1950 
1951 /*
1952  * ntfs_get_security_by_id - Read security descriptor by id.
1953  */
1954 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1955 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1956 			    size_t *size)
1957 {
1958 	int err;
1959 	int diff;
1960 	struct ntfs_inode *ni = sbi->security.ni;
1961 	struct ntfs_index *indx = &sbi->security.index_sii;
1962 	void *p = NULL;
1963 	struct NTFS_DE_SII *sii_e;
1964 	struct ntfs_fnd *fnd_sii;
1965 	struct SECURITY_HDR d_security;
1966 	const struct INDEX_ROOT *root_sii;
1967 	u32 t32;
1968 
1969 	*sd = NULL;
1970 
1971 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1972 
1973 	fnd_sii = fnd_get();
1974 	if (!fnd_sii) {
1975 		err = -ENOMEM;
1976 		goto out;
1977 	}
1978 
1979 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1980 	if (!root_sii) {
1981 		err = -EINVAL;
1982 		goto out;
1983 	}
1984 
1985 	/* Try to find this SECURITY descriptor in SII indexes. */
1986 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1987 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1988 	if (err)
1989 		goto out;
1990 
1991 	if (diff)
1992 		goto out;
1993 
1994 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
1995 	if (t32 < SIZEOF_SECURITY_HDR) {
1996 		err = -EINVAL;
1997 		goto out;
1998 	}
1999 
2000 	if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2001 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2002 		err = -EFBIG;
2003 		goto out;
2004 	}
2005 
2006 	*size = t32 - SIZEOF_SECURITY_HDR;
2007 
2008 	p = kmalloc(*size, GFP_NOFS);
2009 	if (!p) {
2010 		err = -ENOMEM;
2011 		goto out;
2012 	}
2013 
2014 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2015 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2016 			       sizeof(d_security), NULL);
2017 	if (err)
2018 		goto out;
2019 
2020 	if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2021 		err = -EINVAL;
2022 		goto out;
2023 	}
2024 
2025 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2026 			       le64_to_cpu(sii_e->sec_hdr.off) +
2027 				       SIZEOF_SECURITY_HDR,
2028 			       p, *size, NULL);
2029 	if (err)
2030 		goto out;
2031 
2032 	*sd = p;
2033 	p = NULL;
2034 
2035 out:
2036 	kfree(p);
2037 	fnd_put(fnd_sii);
2038 	ni_unlock(ni);
2039 
2040 	return err;
2041 }
2042 
2043 /*
2044  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2045  *
2046  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2047  * and it contains a mirror copy of each security descriptor.  When writing
2048  * to a security descriptor at location X, another copy will be written at
2049  * location (X+256K).
2050  * When writing a security descriptor that will cross the 256K boundary,
2051  * the pointer will be advanced by 256K to skip
2052  * over the mirror portion.
2053  */
2054 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2055 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2056 			 u32 size_sd, __le32 *security_id, bool *inserted)
2057 {
2058 	int err, diff;
2059 	struct ntfs_inode *ni = sbi->security.ni;
2060 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2061 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2062 	struct NTFS_DE_SDH *e;
2063 	struct NTFS_DE_SDH sdh_e;
2064 	struct NTFS_DE_SII sii_e;
2065 	struct SECURITY_HDR *d_security;
2066 	u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2067 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2068 	struct SECURITY_KEY hash_key;
2069 	struct ntfs_fnd *fnd_sdh = NULL;
2070 	const struct INDEX_ROOT *root_sdh;
2071 	const struct INDEX_ROOT *root_sii;
2072 	u64 mirr_off, new_sds_size;
2073 	u32 next, left;
2074 
2075 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2076 		      SecurityDescriptorsBlockSize);
2077 
2078 	hash_key.hash = security_hash(sd, size_sd);
2079 	hash_key.sec_id = SECURITY_ID_INVALID;
2080 
2081 	if (inserted)
2082 		*inserted = false;
2083 	*security_id = SECURITY_ID_INVALID;
2084 
2085 	/* Allocate a temporal buffer. */
2086 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2087 	if (!d_security)
2088 		return -ENOMEM;
2089 
2090 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2091 
2092 	fnd_sdh = fnd_get();
2093 	if (!fnd_sdh) {
2094 		err = -ENOMEM;
2095 		goto out;
2096 	}
2097 
2098 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2099 	if (!root_sdh) {
2100 		err = -EINVAL;
2101 		goto out;
2102 	}
2103 
2104 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2105 	if (!root_sii) {
2106 		err = -EINVAL;
2107 		goto out;
2108 	}
2109 
2110 	/*
2111 	 * Check if such security already exists.
2112 	 * Use "SDH" and hash -> to get the offset in "SDS".
2113 	 */
2114 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2115 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2116 			fnd_sdh);
2117 	if (err)
2118 		goto out;
2119 
2120 	while (e) {
2121 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2122 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2123 					       le64_to_cpu(e->sec_hdr.off),
2124 					       d_security, new_sec_size, NULL);
2125 			if (err)
2126 				goto out;
2127 
2128 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2129 			    d_security->key.hash == hash_key.hash &&
2130 			    !memcmp(d_security + 1, sd, size_sd)) {
2131 				*security_id = d_security->key.sec_id;
2132 				/* Such security already exists. */
2133 				err = 0;
2134 				goto out;
2135 			}
2136 		}
2137 
2138 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2139 				     (struct NTFS_DE **)&e, fnd_sdh);
2140 		if (err)
2141 			goto out;
2142 
2143 		if (!e || e->key.hash != hash_key.hash)
2144 			break;
2145 	}
2146 
2147 	/* Zero unused space. */
2148 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2149 	left = SecurityDescriptorsBlockSize - next;
2150 
2151 	/* Zero gap until SecurityDescriptorsBlockSize. */
2152 	if (left < new_sec_size) {
2153 		/* Zero "left" bytes from sbi->security.next_off. */
2154 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2155 	}
2156 
2157 	/* Zero tail of previous security. */
2158 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2159 
2160 	/*
2161 	 * Example:
2162 	 * 0x40438 == ni->vfs_inode.i_size
2163 	 * 0x00440 == sbi->security.next_off
2164 	 * need to zero [0x438-0x440)
2165 	 * if (next > used) {
2166 	 *  u32 tozero = next - used;
2167 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2168 	 */
2169 
2170 	/* Format new security descriptor. */
2171 	d_security->key.hash = hash_key.hash;
2172 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2173 	d_security->off = cpu_to_le64(sbi->security.next_off);
2174 	d_security->size = cpu_to_le32(new_sec_size);
2175 	memcpy(d_security + 1, sd, size_sd);
2176 
2177 	/* Write main SDS bucket. */
2178 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2179 				d_security, aligned_sec_size, 0);
2180 
2181 	if (err)
2182 		goto out;
2183 
2184 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2185 	new_sds_size = mirr_off + aligned_sec_size;
2186 
2187 	if (new_sds_size > ni->vfs_inode.i_size) {
2188 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2189 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2190 				    new_sds_size, &new_sds_size, false, NULL);
2191 		if (err)
2192 			goto out;
2193 	}
2194 
2195 	/* Write copy SDS bucket. */
2196 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2197 				aligned_sec_size, 0);
2198 	if (err)
2199 		goto out;
2200 
2201 	/* Fill SII entry. */
2202 	sii_e.de.view.data_off =
2203 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2204 	sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2205 	sii_e.de.view.res = 0;
2206 	sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2207 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2208 	sii_e.de.flags = 0;
2209 	sii_e.de.res = 0;
2210 	sii_e.sec_id = d_security->key.sec_id;
2211 	memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2212 
2213 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2214 	if (err)
2215 		goto out;
2216 
2217 	/* Fill SDH entry. */
2218 	sdh_e.de.view.data_off =
2219 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2220 	sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2221 	sdh_e.de.view.res = 0;
2222 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2223 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2224 	sdh_e.de.flags = 0;
2225 	sdh_e.de.res = 0;
2226 	sdh_e.key.hash = d_security->key.hash;
2227 	sdh_e.key.sec_id = d_security->key.sec_id;
2228 	memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2229 	sdh_e.magic[0] = cpu_to_le16('I');
2230 	sdh_e.magic[1] = cpu_to_le16('I');
2231 
2232 	fnd_clear(fnd_sdh);
2233 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2234 				fnd_sdh, 0);
2235 	if (err)
2236 		goto out;
2237 
2238 	*security_id = d_security->key.sec_id;
2239 	if (inserted)
2240 		*inserted = true;
2241 
2242 	/* Update Id and offset for next descriptor. */
2243 	sbi->security.next_id += 1;
2244 	sbi->security.next_off += aligned_sec_size;
2245 
2246 out:
2247 	fnd_put(fnd_sdh);
2248 	mark_inode_dirty(&ni->vfs_inode);
2249 	ni_unlock(ni);
2250 	kfree(d_security);
2251 
2252 	return err;
2253 }
2254 
2255 /*
2256  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2257  */
2258 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2259 {
2260 	int err;
2261 	struct ntfs_inode *ni = sbi->reparse.ni;
2262 	struct ntfs_index *indx = &sbi->reparse.index_r;
2263 	struct ATTRIB *attr;
2264 	struct ATTR_LIST_ENTRY *le;
2265 	const struct INDEX_ROOT *root_r;
2266 
2267 	if (!ni)
2268 		return 0;
2269 
2270 	le = NULL;
2271 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2272 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2273 	if (!attr) {
2274 		err = -EINVAL;
2275 		goto out;
2276 	}
2277 
2278 	root_r = resident_data(attr);
2279 	if (root_r->type != ATTR_ZERO ||
2280 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2281 		err = -EINVAL;
2282 		goto out;
2283 	}
2284 
2285 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2286 	if (err)
2287 		goto out;
2288 
2289 out:
2290 	return err;
2291 }
2292 
2293 /*
2294  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2295  */
2296 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2297 {
2298 	int err;
2299 	struct ntfs_inode *ni = sbi->objid.ni;
2300 	struct ntfs_index *indx = &sbi->objid.index_o;
2301 	struct ATTRIB *attr;
2302 	struct ATTR_LIST_ENTRY *le;
2303 	const struct INDEX_ROOT *root;
2304 
2305 	if (!ni)
2306 		return 0;
2307 
2308 	le = NULL;
2309 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2310 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2311 	if (!attr) {
2312 		err = -EINVAL;
2313 		goto out;
2314 	}
2315 
2316 	root = resident_data(attr);
2317 	if (root->type != ATTR_ZERO ||
2318 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2319 		err = -EINVAL;
2320 		goto out;
2321 	}
2322 
2323 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2324 	if (err)
2325 		goto out;
2326 
2327 out:
2328 	return err;
2329 }
2330 
2331 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2332 {
2333 	int err;
2334 	struct ntfs_inode *ni = sbi->objid.ni;
2335 	struct ntfs_index *indx = &sbi->objid.index_o;
2336 
2337 	if (!ni)
2338 		return -EINVAL;
2339 
2340 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2341 
2342 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2343 
2344 	mark_inode_dirty(&ni->vfs_inode);
2345 	ni_unlock(ni);
2346 
2347 	return err;
2348 }
2349 
2350 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2351 			const struct MFT_REF *ref)
2352 {
2353 	int err;
2354 	struct ntfs_inode *ni = sbi->reparse.ni;
2355 	struct ntfs_index *indx = &sbi->reparse.index_r;
2356 	struct NTFS_DE_R re;
2357 
2358 	if (!ni)
2359 		return -EINVAL;
2360 
2361 	memset(&re, 0, sizeof(re));
2362 
2363 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2364 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2365 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2366 
2367 	re.key.ReparseTag = rtag;
2368 	memcpy(&re.key.ref, ref, sizeof(*ref));
2369 
2370 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2371 
2372 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2373 
2374 	mark_inode_dirty(&ni->vfs_inode);
2375 	ni_unlock(ni);
2376 
2377 	return err;
2378 }
2379 
2380 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2381 			const struct MFT_REF *ref)
2382 {
2383 	int err, diff;
2384 	struct ntfs_inode *ni = sbi->reparse.ni;
2385 	struct ntfs_index *indx = &sbi->reparse.index_r;
2386 	struct ntfs_fnd *fnd = NULL;
2387 	struct REPARSE_KEY rkey;
2388 	struct NTFS_DE_R *re;
2389 	struct INDEX_ROOT *root_r;
2390 
2391 	if (!ni)
2392 		return -EINVAL;
2393 
2394 	rkey.ReparseTag = rtag;
2395 	rkey.ref = *ref;
2396 
2397 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2398 
2399 	if (rtag) {
2400 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2401 		goto out1;
2402 	}
2403 
2404 	fnd = fnd_get();
2405 	if (!fnd) {
2406 		err = -ENOMEM;
2407 		goto out1;
2408 	}
2409 
2410 	root_r = indx_get_root(indx, ni, NULL, NULL);
2411 	if (!root_r) {
2412 		err = -EINVAL;
2413 		goto out;
2414 	}
2415 
2416 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2417 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2418 			(struct NTFS_DE **)&re, fnd);
2419 	if (err)
2420 		goto out;
2421 
2422 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2423 		/* Impossible. Looks like volume corrupt? */
2424 		goto out;
2425 	}
2426 
2427 	memcpy(&rkey, &re->key, sizeof(rkey));
2428 
2429 	fnd_put(fnd);
2430 	fnd = NULL;
2431 
2432 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2433 	if (err)
2434 		goto out;
2435 
2436 out:
2437 	fnd_put(fnd);
2438 
2439 out1:
2440 	mark_inode_dirty(&ni->vfs_inode);
2441 	ni_unlock(ni);
2442 
2443 	return err;
2444 }
2445 
2446 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2447 					  CLST len)
2448 {
2449 	ntfs_unmap_meta(sbi->sb, lcn, len);
2450 	ntfs_discard(sbi, lcn, len);
2451 }
2452 
2453 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2454 {
2455 	CLST end, i, zone_len, zlen;
2456 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2457 
2458 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2459 	if (!wnd_is_used(wnd, lcn, len)) {
2460 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2461 
2462 		end = lcn + len;
2463 		len = 0;
2464 		for (i = lcn; i < end; i++) {
2465 			if (wnd_is_used(wnd, i, 1)) {
2466 				if (!len)
2467 					lcn = i;
2468 				len += 1;
2469 				continue;
2470 			}
2471 
2472 			if (!len)
2473 				continue;
2474 
2475 			if (trim)
2476 				ntfs_unmap_and_discard(sbi, lcn, len);
2477 
2478 			wnd_set_free(wnd, lcn, len);
2479 			len = 0;
2480 		}
2481 
2482 		if (!len)
2483 			goto out;
2484 	}
2485 
2486 	if (trim)
2487 		ntfs_unmap_and_discard(sbi, lcn, len);
2488 	wnd_set_free(wnd, lcn, len);
2489 
2490 	/* append to MFT zone, if possible. */
2491 	zone_len = wnd_zone_len(wnd);
2492 	zlen = min(zone_len + len, sbi->zone_max);
2493 
2494 	if (zlen == zone_len) {
2495 		/* MFT zone already has maximum size. */
2496 	} else if (!zone_len) {
2497 		/* Create MFT zone only if 'zlen' is large enough. */
2498 		if (zlen == sbi->zone_max)
2499 			wnd_zone_set(wnd, lcn, zlen);
2500 	} else {
2501 		CLST zone_lcn = wnd_zone_bit(wnd);
2502 
2503 		if (lcn + len == zone_lcn) {
2504 			/* Append into head MFT zone. */
2505 			wnd_zone_set(wnd, lcn, zlen);
2506 		} else if (zone_lcn + zone_len == lcn) {
2507 			/* Append into tail MFT zone. */
2508 			wnd_zone_set(wnd, zone_lcn, zlen);
2509 		}
2510 	}
2511 
2512 out:
2513 	up_write(&wnd->rw_lock);
2514 }
2515 
2516 /*
2517  * run_deallocate - Deallocate clusters.
2518  */
2519 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2520 {
2521 	CLST lcn, len;
2522 	size_t idx = 0;
2523 
2524 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2525 		if (lcn == SPARSE_LCN)
2526 			continue;
2527 
2528 		mark_as_free_ex(sbi, lcn, len, trim);
2529 	}
2530 
2531 	return 0;
2532 }
2533 
2534 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2535 {
2536 	int i, ch;
2537 
2538 	/* check for forbidden chars */
2539 	for (i = 0; i < fname->len; ++i) {
2540 		ch = le16_to_cpu(fname->name[i]);
2541 
2542 		/* control chars */
2543 		if (ch < 0x20)
2544 			return true;
2545 
2546 		switch (ch) {
2547 		/* disallowed by Windows */
2548 		case '\\':
2549 		case '/':
2550 		case ':':
2551 		case '*':
2552 		case '?':
2553 		case '<':
2554 		case '>':
2555 		case '|':
2556 		case '\"':
2557 			return true;
2558 
2559 		default:
2560 			/* allowed char */
2561 			break;
2562 		}
2563 	}
2564 
2565 	/* file names cannot end with space or . */
2566 	if (fname->len > 0) {
2567 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2568 		if (ch == ' ' || ch == '.')
2569 			return true;
2570 	}
2571 
2572 	return false;
2573 }
2574 
2575 static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
2576 				    const struct le_str *fname)
2577 {
2578 	int port_digit;
2579 	const __le16 *name = fname->name;
2580 	int len = fname->len;
2581 	u16 *upcase = sbi->upcase;
2582 
2583 	/* check for 3 chars reserved names (device names) */
2584 	/* name by itself or with any extension is forbidden */
2585 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2586 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2587 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2588 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2589 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2590 			return true;
2591 
2592 	/* check for 4 chars reserved names (port name followed by 1..9) */
2593 	/* name by itself or with any extension is forbidden */
2594 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2595 		port_digit = le16_to_cpu(name[3]);
2596 		if (port_digit >= '1' && port_digit <= '9')
2597 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) ||
2598 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false))
2599 				return true;
2600 	}
2601 
2602 	return false;
2603 }
2604 
2605 /*
2606  * valid_windows_name - Check if a file name is valid in Windows.
2607  */
2608 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2609 {
2610 	return !name_has_forbidden_chars(fname) &&
2611 	       !is_reserved_name(sbi, fname);
2612 }
2613