xref: /openbmc/linux/fs/ntfs3/fsntfs.c (revision d5a9588c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
857 	    unlikely(ntfs3_forced_shutdown(sb)))
858 		return;
859 
860 	blocksize = sb->s_blocksize;
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 
909 	ntfs_inode_err(inode, "%s", hint);
910 	make_bad_inode(inode);
911 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
912 }
913 
914 /*
915  * ntfs_set_state
916  *
917  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
918  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
919  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
920  */
921 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
922 {
923 	int err;
924 	struct ATTRIB *attr;
925 	struct VOLUME_INFO *info;
926 	struct mft_inode *mi;
927 	struct ntfs_inode *ni;
928 	__le16 info_flags;
929 
930 	/*
931 	 * Do not change state if fs was real_dirty.
932 	 * Do not change state if fs already dirty(clear).
933 	 * Do not change any thing if mounted read only.
934 	 */
935 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
936 		return 0;
937 
938 	/* Check cached value. */
939 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
940 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
941 		return 0;
942 
943 	ni = sbi->volume.ni;
944 	if (!ni)
945 		return -EINVAL;
946 
947 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
948 
949 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
950 	if (!attr) {
951 		err = -EINVAL;
952 		goto out;
953 	}
954 
955 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
956 	if (!info) {
957 		err = -EINVAL;
958 		goto out;
959 	}
960 
961 	info_flags = info->flags;
962 
963 	switch (dirty) {
964 	case NTFS_DIRTY_ERROR:
965 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
966 		sbi->volume.real_dirty = true;
967 		fallthrough;
968 	case NTFS_DIRTY_DIRTY:
969 		info->flags |= VOLUME_FLAG_DIRTY;
970 		break;
971 	case NTFS_DIRTY_CLEAR:
972 		info->flags &= ~VOLUME_FLAG_DIRTY;
973 		break;
974 	}
975 	/* Cache current volume flags. */
976 	if (info_flags != info->flags) {
977 		sbi->volume.flags = info->flags;
978 		mi->dirty = true;
979 	}
980 	err = 0;
981 
982 out:
983 	ni_unlock(ni);
984 	if (err)
985 		return err;
986 
987 	mark_inode_dirty_sync(&ni->vfs_inode);
988 	/* verify(!ntfs_update_mftmirr()); */
989 
990 	/* write mft record on disk. */
991 	err = _ni_write_inode(&ni->vfs_inode, 1);
992 
993 	return err;
994 }
995 
996 /*
997  * security_hash - Calculates a hash of security descriptor.
998  */
999 static inline __le32 security_hash(const void *sd, size_t bytes)
1000 {
1001 	u32 hash = 0;
1002 	const __le32 *ptr = sd;
1003 
1004 	bytes >>= 2;
1005 	while (bytes--)
1006 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1007 	return cpu_to_le32(hash);
1008 }
1009 
1010 /*
1011  * simple wrapper for sb_bread_unmovable.
1012  */
1013 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1014 {
1015 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1016 	struct buffer_head *bh;
1017 
1018 	if (unlikely(block >= sbi->volume.blocks)) {
1019 		/* prevent generic message "attempt to access beyond end of device" */
1020 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1021 			 (u64)block << sb->s_blocksize_bits);
1022 		return NULL;
1023 	}
1024 
1025 	bh = sb_bread_unmovable(sb, block);
1026 	if (bh)
1027 		return bh;
1028 
1029 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1030 		 (u64)block << sb->s_blocksize_bits);
1031 	return NULL;
1032 }
1033 
1034 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1035 {
1036 	struct block_device *bdev = sb->s_bdev;
1037 	u32 blocksize = sb->s_blocksize;
1038 	u64 block = lbo >> sb->s_blocksize_bits;
1039 	u32 off = lbo & (blocksize - 1);
1040 	u32 op = blocksize - off;
1041 
1042 	for (; bytes; block += 1, off = 0, op = blocksize) {
1043 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1044 
1045 		if (!bh)
1046 			return -EIO;
1047 
1048 		if (op > bytes)
1049 			op = bytes;
1050 
1051 		memcpy(buffer, bh->b_data + off, op);
1052 
1053 		put_bh(bh);
1054 
1055 		bytes -= op;
1056 		buffer = Add2Ptr(buffer, op);
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1063 		  const void *buf, int wait)
1064 {
1065 	u32 blocksize = sb->s_blocksize;
1066 	struct block_device *bdev = sb->s_bdev;
1067 	sector_t block = lbo >> sb->s_blocksize_bits;
1068 	u32 off = lbo & (blocksize - 1);
1069 	u32 op = blocksize - off;
1070 	struct buffer_head *bh;
1071 
1072 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1073 		wait = 1;
1074 
1075 	for (; bytes; block += 1, off = 0, op = blocksize) {
1076 		if (op > bytes)
1077 			op = bytes;
1078 
1079 		if (op < blocksize) {
1080 			bh = __bread(bdev, block, blocksize);
1081 			if (!bh) {
1082 				ntfs_err(sb, "failed to read block %llx",
1083 					 (u64)block);
1084 				return -EIO;
1085 			}
1086 		} else {
1087 			bh = __getblk(bdev, block, blocksize);
1088 			if (!bh)
1089 				return -ENOMEM;
1090 		}
1091 
1092 		if (buffer_locked(bh))
1093 			__wait_on_buffer(bh);
1094 
1095 		lock_buffer(bh);
1096 		if (buf) {
1097 			memcpy(bh->b_data + off, buf, op);
1098 			buf = Add2Ptr(buf, op);
1099 		} else {
1100 			memset(bh->b_data + off, -1, op);
1101 		}
1102 
1103 		set_buffer_uptodate(bh);
1104 		mark_buffer_dirty(bh);
1105 		unlock_buffer(bh);
1106 
1107 		if (wait) {
1108 			int err = sync_dirty_buffer(bh);
1109 
1110 			if (err) {
1111 				ntfs_err(
1112 					sb,
1113 					"failed to sync buffer at block %llx, error %d",
1114 					(u64)block, err);
1115 				put_bh(bh);
1116 				return err;
1117 			}
1118 		}
1119 
1120 		put_bh(bh);
1121 
1122 		bytes -= op;
1123 	}
1124 	return 0;
1125 }
1126 
1127 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1128 		      u64 vbo, const void *buf, size_t bytes, int sync)
1129 {
1130 	struct super_block *sb = sbi->sb;
1131 	u8 cluster_bits = sbi->cluster_bits;
1132 	u32 off = vbo & sbi->cluster_mask;
1133 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1134 	u64 lbo, len;
1135 	size_t idx;
1136 
1137 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1138 		return -ENOENT;
1139 
1140 	if (lcn == SPARSE_LCN)
1141 		return -EINVAL;
1142 
1143 	lbo = ((u64)lcn << cluster_bits) + off;
1144 	len = ((u64)clen << cluster_bits) - off;
1145 
1146 	for (;;) {
1147 		u32 op = min_t(u64, len, bytes);
1148 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1149 
1150 		if (err)
1151 			return err;
1152 
1153 		bytes -= op;
1154 		if (!bytes)
1155 			break;
1156 
1157 		vcn_next = vcn + clen;
1158 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1159 		    vcn != vcn_next)
1160 			return -ENOENT;
1161 
1162 		if (lcn == SPARSE_LCN)
1163 			return -EINVAL;
1164 
1165 		if (buf)
1166 			buf = Add2Ptr(buf, op);
1167 
1168 		lbo = ((u64)lcn << cluster_bits);
1169 		len = ((u64)clen << cluster_bits);
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1176 				   const struct runs_tree *run, u64 vbo)
1177 {
1178 	struct super_block *sb = sbi->sb;
1179 	u8 cluster_bits = sbi->cluster_bits;
1180 	CLST lcn;
1181 	u64 lbo;
1182 
1183 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1184 		return ERR_PTR(-ENOENT);
1185 
1186 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1187 
1188 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1189 }
1190 
1191 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1192 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1193 {
1194 	int err;
1195 	struct super_block *sb = sbi->sb;
1196 	u32 blocksize = sb->s_blocksize;
1197 	u8 cluster_bits = sbi->cluster_bits;
1198 	u32 off = vbo & sbi->cluster_mask;
1199 	u32 nbh = 0;
1200 	CLST vcn_next, vcn = vbo >> cluster_bits;
1201 	CLST lcn, clen;
1202 	u64 lbo, len;
1203 	size_t idx;
1204 	struct buffer_head *bh;
1205 
1206 	if (!run) {
1207 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1208 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1209 			err = -ENOENT;
1210 			goto out;
1211 		}
1212 
1213 		/* Use absolute boot's 'MFTCluster' to read record. */
1214 		lbo = vbo + sbi->mft.lbo;
1215 		len = sbi->record_size;
1216 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1217 		err = -ENOENT;
1218 		goto out;
1219 	} else {
1220 		if (lcn == SPARSE_LCN) {
1221 			err = -EINVAL;
1222 			goto out;
1223 		}
1224 
1225 		lbo = ((u64)lcn << cluster_bits) + off;
1226 		len = ((u64)clen << cluster_bits) - off;
1227 	}
1228 
1229 	off = lbo & (blocksize - 1);
1230 	if (nb) {
1231 		nb->off = off;
1232 		nb->bytes = bytes;
1233 	}
1234 
1235 	for (;;) {
1236 		u32 len32 = len >= bytes ? bytes : len;
1237 		sector_t block = lbo >> sb->s_blocksize_bits;
1238 
1239 		do {
1240 			u32 op = blocksize - off;
1241 
1242 			if (op > len32)
1243 				op = len32;
1244 
1245 			bh = ntfs_bread(sb, block);
1246 			if (!bh) {
1247 				err = -EIO;
1248 				goto out;
1249 			}
1250 
1251 			if (buf) {
1252 				memcpy(buf, bh->b_data + off, op);
1253 				buf = Add2Ptr(buf, op);
1254 			}
1255 
1256 			if (!nb) {
1257 				put_bh(bh);
1258 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1259 				err = -EINVAL;
1260 				goto out;
1261 			} else {
1262 				nb->bh[nbh++] = bh;
1263 				nb->nbufs = nbh;
1264 			}
1265 
1266 			bytes -= op;
1267 			if (!bytes)
1268 				return 0;
1269 			len32 -= op;
1270 			block += 1;
1271 			off = 0;
1272 
1273 		} while (len32);
1274 
1275 		vcn_next = vcn + clen;
1276 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1277 		    vcn != vcn_next) {
1278 			err = -ENOENT;
1279 			goto out;
1280 		}
1281 
1282 		if (lcn == SPARSE_LCN) {
1283 			err = -EINVAL;
1284 			goto out;
1285 		}
1286 
1287 		lbo = ((u64)lcn << cluster_bits);
1288 		len = ((u64)clen << cluster_bits);
1289 	}
1290 
1291 out:
1292 	if (!nbh)
1293 		return err;
1294 
1295 	while (nbh) {
1296 		put_bh(nb->bh[--nbh]);
1297 		nb->bh[nbh] = NULL;
1298 	}
1299 
1300 	nb->nbufs = 0;
1301 	return err;
1302 }
1303 
1304 /*
1305  * ntfs_read_bh
1306  *
1307  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1308  */
1309 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1310 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1311 		 struct ntfs_buffers *nb)
1312 {
1313 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1314 
1315 	if (err)
1316 		return err;
1317 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1318 }
1319 
1320 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1321 		u32 bytes, struct ntfs_buffers *nb)
1322 {
1323 	int err = 0;
1324 	struct super_block *sb = sbi->sb;
1325 	u32 blocksize = sb->s_blocksize;
1326 	u8 cluster_bits = sbi->cluster_bits;
1327 	CLST vcn_next, vcn = vbo >> cluster_bits;
1328 	u32 off;
1329 	u32 nbh = 0;
1330 	CLST lcn, clen;
1331 	u64 lbo, len;
1332 	size_t idx;
1333 
1334 	nb->bytes = bytes;
1335 
1336 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1337 		err = -ENOENT;
1338 		goto out;
1339 	}
1340 
1341 	off = vbo & sbi->cluster_mask;
1342 	lbo = ((u64)lcn << cluster_bits) + off;
1343 	len = ((u64)clen << cluster_bits) - off;
1344 
1345 	nb->off = off = lbo & (blocksize - 1);
1346 
1347 	for (;;) {
1348 		u32 len32 = min_t(u64, len, bytes);
1349 		sector_t block = lbo >> sb->s_blocksize_bits;
1350 
1351 		do {
1352 			u32 op;
1353 			struct buffer_head *bh;
1354 
1355 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1356 				err = -EINVAL;
1357 				goto out;
1358 			}
1359 
1360 			op = blocksize - off;
1361 			if (op > len32)
1362 				op = len32;
1363 
1364 			if (op == blocksize) {
1365 				bh = sb_getblk(sb, block);
1366 				if (!bh) {
1367 					err = -ENOMEM;
1368 					goto out;
1369 				}
1370 				if (buffer_locked(bh))
1371 					__wait_on_buffer(bh);
1372 				set_buffer_uptodate(bh);
1373 			} else {
1374 				bh = ntfs_bread(sb, block);
1375 				if (!bh) {
1376 					err = -EIO;
1377 					goto out;
1378 				}
1379 			}
1380 
1381 			nb->bh[nbh++] = bh;
1382 			bytes -= op;
1383 			if (!bytes) {
1384 				nb->nbufs = nbh;
1385 				return 0;
1386 			}
1387 
1388 			block += 1;
1389 			len32 -= op;
1390 			off = 0;
1391 		} while (len32);
1392 
1393 		vcn_next = vcn + clen;
1394 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1395 		    vcn != vcn_next) {
1396 			err = -ENOENT;
1397 			goto out;
1398 		}
1399 
1400 		lbo = ((u64)lcn << cluster_bits);
1401 		len = ((u64)clen << cluster_bits);
1402 	}
1403 
1404 out:
1405 	while (nbh) {
1406 		put_bh(nb->bh[--nbh]);
1407 		nb->bh[nbh] = NULL;
1408 	}
1409 
1410 	nb->nbufs = 0;
1411 
1412 	return err;
1413 }
1414 
1415 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1416 		  struct ntfs_buffers *nb, int sync)
1417 {
1418 	int err = 0;
1419 	struct super_block *sb = sbi->sb;
1420 	u32 block_size = sb->s_blocksize;
1421 	u32 bytes = nb->bytes;
1422 	u32 off = nb->off;
1423 	u16 fo = le16_to_cpu(rhdr->fix_off);
1424 	u16 fn = le16_to_cpu(rhdr->fix_num);
1425 	u32 idx;
1426 	__le16 *fixup;
1427 	__le16 sample;
1428 
1429 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1430 	    fn * SECTOR_SIZE > bytes) {
1431 		return -EINVAL;
1432 	}
1433 
1434 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1435 		u32 op = block_size - off;
1436 		char *bh_data;
1437 		struct buffer_head *bh = nb->bh[idx];
1438 		__le16 *ptr, *end_data;
1439 
1440 		if (op > bytes)
1441 			op = bytes;
1442 
1443 		if (buffer_locked(bh))
1444 			__wait_on_buffer(bh);
1445 
1446 		lock_buffer(bh);
1447 
1448 		bh_data = bh->b_data + off;
1449 		end_data = Add2Ptr(bh_data, op);
1450 		memcpy(bh_data, rhdr, op);
1451 
1452 		if (!idx) {
1453 			u16 t16;
1454 
1455 			fixup = Add2Ptr(bh_data, fo);
1456 			sample = *fixup;
1457 			t16 = le16_to_cpu(sample);
1458 			if (t16 >= 0x7FFF) {
1459 				sample = *fixup = cpu_to_le16(1);
1460 			} else {
1461 				sample = cpu_to_le16(t16 + 1);
1462 				*fixup = sample;
1463 			}
1464 
1465 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1466 		}
1467 
1468 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1469 
1470 		do {
1471 			*++fixup = *ptr;
1472 			*ptr = sample;
1473 			ptr += SECTOR_SIZE / sizeof(short);
1474 		} while (ptr < end_data);
1475 
1476 		set_buffer_uptodate(bh);
1477 		mark_buffer_dirty(bh);
1478 		unlock_buffer(bh);
1479 
1480 		if (sync) {
1481 			int err2 = sync_dirty_buffer(bh);
1482 
1483 			if (!err && err2)
1484 				err = err2;
1485 		}
1486 
1487 		bytes -= op;
1488 		rhdr = Add2Ptr(rhdr, op);
1489 	}
1490 
1491 	return err;
1492 }
1493 
1494 /*
1495  * ntfs_bio_pages - Read/write pages from/to disk.
1496  */
1497 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1498 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1499 		   enum req_op op)
1500 {
1501 	int err = 0;
1502 	struct bio *new, *bio = NULL;
1503 	struct super_block *sb = sbi->sb;
1504 	struct block_device *bdev = sb->s_bdev;
1505 	struct page *page;
1506 	u8 cluster_bits = sbi->cluster_bits;
1507 	CLST lcn, clen, vcn, vcn_next;
1508 	u32 add, off, page_idx;
1509 	u64 lbo, len;
1510 	size_t run_idx;
1511 	struct blk_plug plug;
1512 
1513 	if (!bytes)
1514 		return 0;
1515 
1516 	blk_start_plug(&plug);
1517 
1518 	/* Align vbo and bytes to be 512 bytes aligned. */
1519 	lbo = (vbo + bytes + 511) & ~511ull;
1520 	vbo = vbo & ~511ull;
1521 	bytes = lbo - vbo;
1522 
1523 	vcn = vbo >> cluster_bits;
1524 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1525 		err = -ENOENT;
1526 		goto out;
1527 	}
1528 	off = vbo & sbi->cluster_mask;
1529 	page_idx = 0;
1530 	page = pages[0];
1531 
1532 	for (;;) {
1533 		lbo = ((u64)lcn << cluster_bits) + off;
1534 		len = ((u64)clen << cluster_bits) - off;
1535 new_bio:
1536 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1537 		if (bio) {
1538 			bio_chain(bio, new);
1539 			submit_bio(bio);
1540 		}
1541 		bio = new;
1542 		bio->bi_iter.bi_sector = lbo >> 9;
1543 
1544 		while (len) {
1545 			off = vbo & (PAGE_SIZE - 1);
1546 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1547 
1548 			if (bio_add_page(bio, page, add, off) < add)
1549 				goto new_bio;
1550 
1551 			if (bytes <= add)
1552 				goto out;
1553 			bytes -= add;
1554 			vbo += add;
1555 
1556 			if (add + off == PAGE_SIZE) {
1557 				page_idx += 1;
1558 				if (WARN_ON(page_idx >= nr_pages)) {
1559 					err = -EINVAL;
1560 					goto out;
1561 				}
1562 				page = pages[page_idx];
1563 			}
1564 
1565 			if (len <= add)
1566 				break;
1567 			len -= add;
1568 			lbo += add;
1569 		}
1570 
1571 		vcn_next = vcn + clen;
1572 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1573 		    vcn != vcn_next) {
1574 			err = -ENOENT;
1575 			goto out;
1576 		}
1577 		off = 0;
1578 	}
1579 out:
1580 	if (bio) {
1581 		if (!err)
1582 			err = submit_bio_wait(bio);
1583 		bio_put(bio);
1584 	}
1585 	blk_finish_plug(&plug);
1586 
1587 	return err;
1588 }
1589 
1590 /*
1591  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1592  *
1593  * Fill on-disk logfile range by (-1)
1594  * this means empty logfile.
1595  */
1596 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1597 {
1598 	int err = 0;
1599 	struct super_block *sb = sbi->sb;
1600 	struct block_device *bdev = sb->s_bdev;
1601 	u8 cluster_bits = sbi->cluster_bits;
1602 	struct bio *new, *bio = NULL;
1603 	CLST lcn, clen;
1604 	u64 lbo, len;
1605 	size_t run_idx;
1606 	struct page *fill;
1607 	void *kaddr;
1608 	struct blk_plug plug;
1609 
1610 	fill = alloc_page(GFP_KERNEL);
1611 	if (!fill)
1612 		return -ENOMEM;
1613 
1614 	kaddr = kmap_atomic(fill);
1615 	memset(kaddr, -1, PAGE_SIZE);
1616 	kunmap_atomic(kaddr);
1617 	flush_dcache_page(fill);
1618 	lock_page(fill);
1619 
1620 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1621 		err = -ENOENT;
1622 		goto out;
1623 	}
1624 
1625 	/*
1626 	 * TODO: Try blkdev_issue_write_same.
1627 	 */
1628 	blk_start_plug(&plug);
1629 	do {
1630 		lbo = (u64)lcn << cluster_bits;
1631 		len = (u64)clen << cluster_bits;
1632 new_bio:
1633 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1634 		if (bio) {
1635 			bio_chain(bio, new);
1636 			submit_bio(bio);
1637 		}
1638 		bio = new;
1639 		bio->bi_iter.bi_sector = lbo >> 9;
1640 
1641 		for (;;) {
1642 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1643 
1644 			if (bio_add_page(bio, fill, add, 0) < add)
1645 				goto new_bio;
1646 
1647 			lbo += add;
1648 			if (len <= add)
1649 				break;
1650 			len -= add;
1651 		}
1652 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1653 
1654 	if (!err)
1655 		err = submit_bio_wait(bio);
1656 	bio_put(bio);
1657 
1658 	blk_finish_plug(&plug);
1659 out:
1660 	unlock_page(fill);
1661 	put_page(fill);
1662 
1663 	return err;
1664 }
1665 
1666 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1667 		    u64 vbo, u64 *lbo, u64 *bytes)
1668 {
1669 	u32 off;
1670 	CLST lcn, len;
1671 	u8 cluster_bits = sbi->cluster_bits;
1672 
1673 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1674 		return -ENOENT;
1675 
1676 	off = vbo & sbi->cluster_mask;
1677 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1678 	*bytes = ((u64)len << cluster_bits) - off;
1679 
1680 	return 0;
1681 }
1682 
1683 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1684 				  enum RECORD_FLAG flag)
1685 {
1686 	int err = 0;
1687 	struct super_block *sb = sbi->sb;
1688 	struct inode *inode = new_inode(sb);
1689 	struct ntfs_inode *ni;
1690 
1691 	if (!inode)
1692 		return ERR_PTR(-ENOMEM);
1693 
1694 	ni = ntfs_i(inode);
1695 
1696 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1697 	if (err)
1698 		goto out;
1699 
1700 	inode->i_ino = rno;
1701 	if (insert_inode_locked(inode) < 0) {
1702 		err = -EIO;
1703 		goto out;
1704 	}
1705 
1706 out:
1707 	if (err) {
1708 		make_bad_inode(inode);
1709 		iput(inode);
1710 		ni = ERR_PTR(err);
1711 	}
1712 	return ni;
1713 }
1714 
1715 /*
1716  * O:BAG:BAD:(A;OICI;FA;;;WD)
1717  * Owner S-1-5-32-544 (Administrators)
1718  * Group S-1-5-32-544 (Administrators)
1719  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1720  */
1721 const u8 s_default_security[] __aligned(8) = {
1722 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1723 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1724 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1725 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1726 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1727 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1728 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1729 };
1730 
1731 static_assert(sizeof(s_default_security) == 0x50);
1732 
1733 static inline u32 sid_length(const struct SID *sid)
1734 {
1735 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1736 }
1737 
1738 /*
1739  * is_acl_valid
1740  *
1741  * Thanks Mark Harmstone for idea.
1742  */
1743 static bool is_acl_valid(const struct ACL *acl, u32 len)
1744 {
1745 	const struct ACE_HEADER *ace;
1746 	u32 i;
1747 	u16 ace_count, ace_size;
1748 
1749 	if (acl->AclRevision != ACL_REVISION &&
1750 	    acl->AclRevision != ACL_REVISION_DS) {
1751 		/*
1752 		 * This value should be ACL_REVISION, unless the ACL contains an
1753 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1754 		 * All ACEs in an ACL must be at the same revision level.
1755 		 */
1756 		return false;
1757 	}
1758 
1759 	if (acl->Sbz1)
1760 		return false;
1761 
1762 	if (le16_to_cpu(acl->AclSize) > len)
1763 		return false;
1764 
1765 	if (acl->Sbz2)
1766 		return false;
1767 
1768 	len -= sizeof(struct ACL);
1769 	ace = (struct ACE_HEADER *)&acl[1];
1770 	ace_count = le16_to_cpu(acl->AceCount);
1771 
1772 	for (i = 0; i < ace_count; i++) {
1773 		if (len < sizeof(struct ACE_HEADER))
1774 			return false;
1775 
1776 		ace_size = le16_to_cpu(ace->AceSize);
1777 		if (len < ace_size)
1778 			return false;
1779 
1780 		len -= ace_size;
1781 		ace = Add2Ptr(ace, ace_size);
1782 	}
1783 
1784 	return true;
1785 }
1786 
1787 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1788 {
1789 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1790 
1791 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1792 		return false;
1793 
1794 	if (sd->Revision != 1)
1795 		return false;
1796 
1797 	if (sd->Sbz1)
1798 		return false;
1799 
1800 	if (!(sd->Control & SE_SELF_RELATIVE))
1801 		return false;
1802 
1803 	sd_owner = le32_to_cpu(sd->Owner);
1804 	if (sd_owner) {
1805 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1806 
1807 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1808 			return false;
1809 
1810 		if (owner->Revision != 1)
1811 			return false;
1812 
1813 		if (sd_owner + sid_length(owner) > len)
1814 			return false;
1815 	}
1816 
1817 	sd_group = le32_to_cpu(sd->Group);
1818 	if (sd_group) {
1819 		const struct SID *group = Add2Ptr(sd, sd_group);
1820 
1821 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1822 			return false;
1823 
1824 		if (group->Revision != 1)
1825 			return false;
1826 
1827 		if (sd_group + sid_length(group) > len)
1828 			return false;
1829 	}
1830 
1831 	sd_sacl = le32_to_cpu(sd->Sacl);
1832 	if (sd_sacl) {
1833 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1834 
1835 		if (sd_sacl + sizeof(struct ACL) > len)
1836 			return false;
1837 
1838 		if (!is_acl_valid(sacl, len - sd_sacl))
1839 			return false;
1840 	}
1841 
1842 	sd_dacl = le32_to_cpu(sd->Dacl);
1843 	if (sd_dacl) {
1844 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1845 
1846 		if (sd_dacl + sizeof(struct ACL) > len)
1847 			return false;
1848 
1849 		if (!is_acl_valid(dacl, len - sd_dacl))
1850 			return false;
1851 	}
1852 
1853 	return true;
1854 }
1855 
1856 /*
1857  * ntfs_security_init - Load and parse $Secure.
1858  */
1859 int ntfs_security_init(struct ntfs_sb_info *sbi)
1860 {
1861 	int err;
1862 	struct super_block *sb = sbi->sb;
1863 	struct inode *inode;
1864 	struct ntfs_inode *ni;
1865 	struct MFT_REF ref;
1866 	struct ATTRIB *attr;
1867 	struct ATTR_LIST_ENTRY *le;
1868 	u64 sds_size;
1869 	size_t off;
1870 	struct NTFS_DE *ne;
1871 	struct NTFS_DE_SII *sii_e;
1872 	struct ntfs_fnd *fnd_sii = NULL;
1873 	const struct INDEX_ROOT *root_sii;
1874 	const struct INDEX_ROOT *root_sdh;
1875 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1876 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1877 
1878 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1879 	ref.high = 0;
1880 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1881 
1882 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1883 	if (IS_ERR(inode)) {
1884 		err = PTR_ERR(inode);
1885 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1886 		inode = NULL;
1887 		goto out;
1888 	}
1889 
1890 	ni = ntfs_i(inode);
1891 
1892 	le = NULL;
1893 
1894 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1895 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1896 	if (!attr ||
1897 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1898 	    root_sdh->type != ATTR_ZERO ||
1899 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1900 	    offsetof(struct INDEX_ROOT, ihdr) +
1901 			    le32_to_cpu(root_sdh->ihdr.used) >
1902 		    le32_to_cpu(attr->res.data_size)) {
1903 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1904 		err = -EINVAL;
1905 		goto out;
1906 	}
1907 
1908 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1909 	if (err) {
1910 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1911 		goto out;
1912 	}
1913 
1914 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1915 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1916 	if (!attr ||
1917 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1918 	    root_sii->type != ATTR_ZERO ||
1919 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1920 	    offsetof(struct INDEX_ROOT, ihdr) +
1921 			    le32_to_cpu(root_sii->ihdr.used) >
1922 		    le32_to_cpu(attr->res.data_size)) {
1923 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1924 		err = -EINVAL;
1925 		goto out;
1926 	}
1927 
1928 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1929 	if (err) {
1930 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1931 		goto out;
1932 	}
1933 
1934 	fnd_sii = fnd_get();
1935 	if (!fnd_sii) {
1936 		err = -ENOMEM;
1937 		goto out;
1938 	}
1939 
1940 	sds_size = inode->i_size;
1941 
1942 	/* Find the last valid Id. */
1943 	sbi->security.next_id = SECURITY_ID_FIRST;
1944 	/* Always write new security at the end of bucket. */
1945 	sbi->security.next_off =
1946 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1947 
1948 	off = 0;
1949 	ne = NULL;
1950 
1951 	for (;;) {
1952 		u32 next_id;
1953 
1954 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1955 		if (err || !ne)
1956 			break;
1957 
1958 		sii_e = (struct NTFS_DE_SII *)ne;
1959 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1960 			continue;
1961 
1962 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1963 		if (next_id >= sbi->security.next_id)
1964 			sbi->security.next_id = next_id;
1965 	}
1966 
1967 	sbi->security.ni = ni;
1968 	inode = NULL;
1969 out:
1970 	iput(inode);
1971 	fnd_put(fnd_sii);
1972 
1973 	return err;
1974 }
1975 
1976 /*
1977  * ntfs_get_security_by_id - Read security descriptor by id.
1978  */
1979 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1980 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1981 			    size_t *size)
1982 {
1983 	int err;
1984 	int diff;
1985 	struct ntfs_inode *ni = sbi->security.ni;
1986 	struct ntfs_index *indx = &sbi->security.index_sii;
1987 	void *p = NULL;
1988 	struct NTFS_DE_SII *sii_e;
1989 	struct ntfs_fnd *fnd_sii;
1990 	struct SECURITY_HDR d_security;
1991 	const struct INDEX_ROOT *root_sii;
1992 	u32 t32;
1993 
1994 	*sd = NULL;
1995 
1996 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1997 
1998 	fnd_sii = fnd_get();
1999 	if (!fnd_sii) {
2000 		err = -ENOMEM;
2001 		goto out;
2002 	}
2003 
2004 	root_sii = indx_get_root(indx, ni, NULL, NULL);
2005 	if (!root_sii) {
2006 		err = -EINVAL;
2007 		goto out;
2008 	}
2009 
2010 	/* Try to find this SECURITY descriptor in SII indexes. */
2011 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2012 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2013 	if (err)
2014 		goto out;
2015 
2016 	if (diff)
2017 		goto out;
2018 
2019 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2020 	if (t32 < sizeof(struct SECURITY_HDR)) {
2021 		err = -EINVAL;
2022 		goto out;
2023 	}
2024 
2025 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2026 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2027 		err = -EFBIG;
2028 		goto out;
2029 	}
2030 
2031 	*size = t32 - sizeof(struct SECURITY_HDR);
2032 
2033 	p = kmalloc(*size, GFP_NOFS);
2034 	if (!p) {
2035 		err = -ENOMEM;
2036 		goto out;
2037 	}
2038 
2039 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2040 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2041 			       sizeof(d_security), NULL);
2042 	if (err)
2043 		goto out;
2044 
2045 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2046 		err = -EINVAL;
2047 		goto out;
2048 	}
2049 
2050 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2051 			       le64_to_cpu(sii_e->sec_hdr.off) +
2052 				       sizeof(struct SECURITY_HDR),
2053 			       p, *size, NULL);
2054 	if (err)
2055 		goto out;
2056 
2057 	*sd = p;
2058 	p = NULL;
2059 
2060 out:
2061 	kfree(p);
2062 	fnd_put(fnd_sii);
2063 	ni_unlock(ni);
2064 
2065 	return err;
2066 }
2067 
2068 /*
2069  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2070  *
2071  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2072  * and it contains a mirror copy of each security descriptor.  When writing
2073  * to a security descriptor at location X, another copy will be written at
2074  * location (X+256K).
2075  * When writing a security descriptor that will cross the 256K boundary,
2076  * the pointer will be advanced by 256K to skip
2077  * over the mirror portion.
2078  */
2079 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2080 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2081 			 u32 size_sd, __le32 *security_id, bool *inserted)
2082 {
2083 	int err, diff;
2084 	struct ntfs_inode *ni = sbi->security.ni;
2085 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2086 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2087 	struct NTFS_DE_SDH *e;
2088 	struct NTFS_DE_SDH sdh_e;
2089 	struct NTFS_DE_SII sii_e;
2090 	struct SECURITY_HDR *d_security;
2091 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2092 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2093 	struct SECURITY_KEY hash_key;
2094 	struct ntfs_fnd *fnd_sdh = NULL;
2095 	const struct INDEX_ROOT *root_sdh;
2096 	const struct INDEX_ROOT *root_sii;
2097 	u64 mirr_off, new_sds_size;
2098 	u32 next, left;
2099 
2100 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2101 		      SecurityDescriptorsBlockSize);
2102 
2103 	hash_key.hash = security_hash(sd, size_sd);
2104 	hash_key.sec_id = SECURITY_ID_INVALID;
2105 
2106 	if (inserted)
2107 		*inserted = false;
2108 	*security_id = SECURITY_ID_INVALID;
2109 
2110 	/* Allocate a temporal buffer. */
2111 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2112 	if (!d_security)
2113 		return -ENOMEM;
2114 
2115 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2116 
2117 	fnd_sdh = fnd_get();
2118 	if (!fnd_sdh) {
2119 		err = -ENOMEM;
2120 		goto out;
2121 	}
2122 
2123 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2124 	if (!root_sdh) {
2125 		err = -EINVAL;
2126 		goto out;
2127 	}
2128 
2129 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2130 	if (!root_sii) {
2131 		err = -EINVAL;
2132 		goto out;
2133 	}
2134 
2135 	/*
2136 	 * Check if such security already exists.
2137 	 * Use "SDH" and hash -> to get the offset in "SDS".
2138 	 */
2139 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2140 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2141 			fnd_sdh);
2142 	if (err)
2143 		goto out;
2144 
2145 	while (e) {
2146 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2147 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2148 					       le64_to_cpu(e->sec_hdr.off),
2149 					       d_security, new_sec_size, NULL);
2150 			if (err)
2151 				goto out;
2152 
2153 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2154 			    d_security->key.hash == hash_key.hash &&
2155 			    !memcmp(d_security + 1, sd, size_sd)) {
2156 				*security_id = d_security->key.sec_id;
2157 				/* Such security already exists. */
2158 				err = 0;
2159 				goto out;
2160 			}
2161 		}
2162 
2163 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2164 				     (struct NTFS_DE **)&e, fnd_sdh);
2165 		if (err)
2166 			goto out;
2167 
2168 		if (!e || e->key.hash != hash_key.hash)
2169 			break;
2170 	}
2171 
2172 	/* Zero unused space. */
2173 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2174 	left = SecurityDescriptorsBlockSize - next;
2175 
2176 	/* Zero gap until SecurityDescriptorsBlockSize. */
2177 	if (left < new_sec_size) {
2178 		/* Zero "left" bytes from sbi->security.next_off. */
2179 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2180 	}
2181 
2182 	/* Zero tail of previous security. */
2183 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2184 
2185 	/*
2186 	 * Example:
2187 	 * 0x40438 == ni->vfs_inode.i_size
2188 	 * 0x00440 == sbi->security.next_off
2189 	 * need to zero [0x438-0x440)
2190 	 * if (next > used) {
2191 	 *  u32 tozero = next - used;
2192 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2193 	 */
2194 
2195 	/* Format new security descriptor. */
2196 	d_security->key.hash = hash_key.hash;
2197 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2198 	d_security->off = cpu_to_le64(sbi->security.next_off);
2199 	d_security->size = cpu_to_le32(new_sec_size);
2200 	memcpy(d_security + 1, sd, size_sd);
2201 
2202 	/* Write main SDS bucket. */
2203 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2204 				d_security, aligned_sec_size, 0);
2205 
2206 	if (err)
2207 		goto out;
2208 
2209 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2210 	new_sds_size = mirr_off + aligned_sec_size;
2211 
2212 	if (new_sds_size > ni->vfs_inode.i_size) {
2213 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2214 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2215 				    new_sds_size, &new_sds_size, false, NULL);
2216 		if (err)
2217 			goto out;
2218 	}
2219 
2220 	/* Write copy SDS bucket. */
2221 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2222 				aligned_sec_size, 0);
2223 	if (err)
2224 		goto out;
2225 
2226 	/* Fill SII entry. */
2227 	sii_e.de.view.data_off =
2228 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2229 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2230 	sii_e.de.view.res = 0;
2231 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2232 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2233 	sii_e.de.flags = 0;
2234 	sii_e.de.res = 0;
2235 	sii_e.sec_id = d_security->key.sec_id;
2236 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2237 
2238 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2239 	if (err)
2240 		goto out;
2241 
2242 	/* Fill SDH entry. */
2243 	sdh_e.de.view.data_off =
2244 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2245 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2246 	sdh_e.de.view.res = 0;
2247 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2248 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2249 	sdh_e.de.flags = 0;
2250 	sdh_e.de.res = 0;
2251 	sdh_e.key.hash = d_security->key.hash;
2252 	sdh_e.key.sec_id = d_security->key.sec_id;
2253 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2254 	sdh_e.magic[0] = cpu_to_le16('I');
2255 	sdh_e.magic[1] = cpu_to_le16('I');
2256 
2257 	fnd_clear(fnd_sdh);
2258 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2259 				fnd_sdh, 0);
2260 	if (err)
2261 		goto out;
2262 
2263 	*security_id = d_security->key.sec_id;
2264 	if (inserted)
2265 		*inserted = true;
2266 
2267 	/* Update Id and offset for next descriptor. */
2268 	sbi->security.next_id += 1;
2269 	sbi->security.next_off += aligned_sec_size;
2270 
2271 out:
2272 	fnd_put(fnd_sdh);
2273 	mark_inode_dirty(&ni->vfs_inode);
2274 	ni_unlock(ni);
2275 	kfree(d_security);
2276 
2277 	return err;
2278 }
2279 
2280 /*
2281  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2282  */
2283 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2284 {
2285 	int err;
2286 	struct ntfs_inode *ni = sbi->reparse.ni;
2287 	struct ntfs_index *indx = &sbi->reparse.index_r;
2288 	struct ATTRIB *attr;
2289 	struct ATTR_LIST_ENTRY *le;
2290 	const struct INDEX_ROOT *root_r;
2291 
2292 	if (!ni)
2293 		return 0;
2294 
2295 	le = NULL;
2296 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2297 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2298 	if (!attr) {
2299 		err = -EINVAL;
2300 		goto out;
2301 	}
2302 
2303 	root_r = resident_data(attr);
2304 	if (root_r->type != ATTR_ZERO ||
2305 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2306 		err = -EINVAL;
2307 		goto out;
2308 	}
2309 
2310 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2311 	if (err)
2312 		goto out;
2313 
2314 out:
2315 	return err;
2316 }
2317 
2318 /*
2319  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2320  */
2321 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2322 {
2323 	int err;
2324 	struct ntfs_inode *ni = sbi->objid.ni;
2325 	struct ntfs_index *indx = &sbi->objid.index_o;
2326 	struct ATTRIB *attr;
2327 	struct ATTR_LIST_ENTRY *le;
2328 	const struct INDEX_ROOT *root;
2329 
2330 	if (!ni)
2331 		return 0;
2332 
2333 	le = NULL;
2334 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2335 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2336 	if (!attr) {
2337 		err = -EINVAL;
2338 		goto out;
2339 	}
2340 
2341 	root = resident_data(attr);
2342 	if (root->type != ATTR_ZERO ||
2343 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2344 		err = -EINVAL;
2345 		goto out;
2346 	}
2347 
2348 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2349 	if (err)
2350 		goto out;
2351 
2352 out:
2353 	return err;
2354 }
2355 
2356 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2357 {
2358 	int err;
2359 	struct ntfs_inode *ni = sbi->objid.ni;
2360 	struct ntfs_index *indx = &sbi->objid.index_o;
2361 
2362 	if (!ni)
2363 		return -EINVAL;
2364 
2365 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2366 
2367 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2368 
2369 	mark_inode_dirty(&ni->vfs_inode);
2370 	ni_unlock(ni);
2371 
2372 	return err;
2373 }
2374 
2375 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2376 			const struct MFT_REF *ref)
2377 {
2378 	int err;
2379 	struct ntfs_inode *ni = sbi->reparse.ni;
2380 	struct ntfs_index *indx = &sbi->reparse.index_r;
2381 	struct NTFS_DE_R re;
2382 
2383 	if (!ni)
2384 		return -EINVAL;
2385 
2386 	memset(&re, 0, sizeof(re));
2387 
2388 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2389 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2390 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2391 
2392 	re.key.ReparseTag = rtag;
2393 	memcpy(&re.key.ref, ref, sizeof(*ref));
2394 
2395 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2396 
2397 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2398 
2399 	mark_inode_dirty(&ni->vfs_inode);
2400 	ni_unlock(ni);
2401 
2402 	return err;
2403 }
2404 
2405 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2406 			const struct MFT_REF *ref)
2407 {
2408 	int err, diff;
2409 	struct ntfs_inode *ni = sbi->reparse.ni;
2410 	struct ntfs_index *indx = &sbi->reparse.index_r;
2411 	struct ntfs_fnd *fnd = NULL;
2412 	struct REPARSE_KEY rkey;
2413 	struct NTFS_DE_R *re;
2414 	struct INDEX_ROOT *root_r;
2415 
2416 	if (!ni)
2417 		return -EINVAL;
2418 
2419 	rkey.ReparseTag = rtag;
2420 	rkey.ref = *ref;
2421 
2422 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2423 
2424 	if (rtag) {
2425 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2426 		goto out1;
2427 	}
2428 
2429 	fnd = fnd_get();
2430 	if (!fnd) {
2431 		err = -ENOMEM;
2432 		goto out1;
2433 	}
2434 
2435 	root_r = indx_get_root(indx, ni, NULL, NULL);
2436 	if (!root_r) {
2437 		err = -EINVAL;
2438 		goto out;
2439 	}
2440 
2441 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2442 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2443 			(struct NTFS_DE **)&re, fnd);
2444 	if (err)
2445 		goto out;
2446 
2447 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2448 		/* Impossible. Looks like volume corrupt? */
2449 		goto out;
2450 	}
2451 
2452 	memcpy(&rkey, &re->key, sizeof(rkey));
2453 
2454 	fnd_put(fnd);
2455 	fnd = NULL;
2456 
2457 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2458 	if (err)
2459 		goto out;
2460 
2461 out:
2462 	fnd_put(fnd);
2463 
2464 out1:
2465 	mark_inode_dirty(&ni->vfs_inode);
2466 	ni_unlock(ni);
2467 
2468 	return err;
2469 }
2470 
2471 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2472 					  CLST len)
2473 {
2474 	ntfs_unmap_meta(sbi->sb, lcn, len);
2475 	ntfs_discard(sbi, lcn, len);
2476 }
2477 
2478 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2479 {
2480 	CLST end, i, zone_len, zlen;
2481 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2482 	bool dirty = false;
2483 
2484 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2485 	if (!wnd_is_used(wnd, lcn, len)) {
2486 		/* mark volume as dirty out of wnd->rw_lock */
2487 		dirty = true;
2488 
2489 		end = lcn + len;
2490 		len = 0;
2491 		for (i = lcn; i < end; i++) {
2492 			if (wnd_is_used(wnd, i, 1)) {
2493 				if (!len)
2494 					lcn = i;
2495 				len += 1;
2496 				continue;
2497 			}
2498 
2499 			if (!len)
2500 				continue;
2501 
2502 			if (trim)
2503 				ntfs_unmap_and_discard(sbi, lcn, len);
2504 
2505 			wnd_set_free(wnd, lcn, len);
2506 			len = 0;
2507 		}
2508 
2509 		if (!len)
2510 			goto out;
2511 	}
2512 
2513 	if (trim)
2514 		ntfs_unmap_and_discard(sbi, lcn, len);
2515 	wnd_set_free(wnd, lcn, len);
2516 
2517 	/* append to MFT zone, if possible. */
2518 	zone_len = wnd_zone_len(wnd);
2519 	zlen = min(zone_len + len, sbi->zone_max);
2520 
2521 	if (zlen == zone_len) {
2522 		/* MFT zone already has maximum size. */
2523 	} else if (!zone_len) {
2524 		/* Create MFT zone only if 'zlen' is large enough. */
2525 		if (zlen == sbi->zone_max)
2526 			wnd_zone_set(wnd, lcn, zlen);
2527 	} else {
2528 		CLST zone_lcn = wnd_zone_bit(wnd);
2529 
2530 		if (lcn + len == zone_lcn) {
2531 			/* Append into head MFT zone. */
2532 			wnd_zone_set(wnd, lcn, zlen);
2533 		} else if (zone_lcn + zone_len == lcn) {
2534 			/* Append into tail MFT zone. */
2535 			wnd_zone_set(wnd, zone_lcn, zlen);
2536 		}
2537 	}
2538 
2539 out:
2540 	up_write(&wnd->rw_lock);
2541 	if (dirty)
2542 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2543 }
2544 
2545 /*
2546  * run_deallocate - Deallocate clusters.
2547  */
2548 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2549 		   bool trim)
2550 {
2551 	CLST lcn, len;
2552 	size_t idx = 0;
2553 
2554 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2555 		if (lcn == SPARSE_LCN)
2556 			continue;
2557 
2558 		mark_as_free_ex(sbi, lcn, len, trim);
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2565 {
2566 	int i, ch;
2567 
2568 	/* check for forbidden chars */
2569 	for (i = 0; i < fname->len; ++i) {
2570 		ch = le16_to_cpu(fname->name[i]);
2571 
2572 		/* control chars */
2573 		if (ch < 0x20)
2574 			return true;
2575 
2576 		switch (ch) {
2577 		/* disallowed by Windows */
2578 		case '\\':
2579 		case '/':
2580 		case ':':
2581 		case '*':
2582 		case '?':
2583 		case '<':
2584 		case '>':
2585 		case '|':
2586 		case '\"':
2587 			return true;
2588 
2589 		default:
2590 			/* allowed char */
2591 			break;
2592 		}
2593 	}
2594 
2595 	/* file names cannot end with space or . */
2596 	if (fname->len > 0) {
2597 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2598 		if (ch == ' ' || ch == '.')
2599 			return true;
2600 	}
2601 
2602 	return false;
2603 }
2604 
2605 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2606 				    const struct le_str *fname)
2607 {
2608 	int port_digit;
2609 	const __le16 *name = fname->name;
2610 	int len = fname->len;
2611 	const u16 *upcase = sbi->upcase;
2612 
2613 	/* check for 3 chars reserved names (device names) */
2614 	/* name by itself or with any extension is forbidden */
2615 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2616 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2617 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2618 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2619 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2620 			return true;
2621 
2622 	/* check for 4 chars reserved names (port name followed by 1..9) */
2623 	/* name by itself or with any extension is forbidden */
2624 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2625 		port_digit = le16_to_cpu(name[3]);
2626 		if (port_digit >= '1' && port_digit <= '9')
2627 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2628 					    false) ||
2629 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2630 					    false))
2631 				return true;
2632 	}
2633 
2634 	return false;
2635 }
2636 
2637 /*
2638  * valid_windows_name - Check if a file name is valid in Windows.
2639  */
2640 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2641 {
2642 	return !name_has_forbidden_chars(fname) &&
2643 	       !is_reserved_name(sbi, fname);
2644 }
2645 
2646 /*
2647  * ntfs_set_label - updates current ntfs label.
2648  */
2649 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2650 {
2651 	int err;
2652 	struct ATTRIB *attr;
2653 	struct ntfs_inode *ni = sbi->volume.ni;
2654 	const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2655 	/* Allocate PATH_MAX bytes. */
2656 	struct cpu_str *uni = __getname();
2657 
2658 	if (!uni)
2659 		return -ENOMEM;
2660 
2661 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2662 				UTF16_LITTLE_ENDIAN);
2663 	if (err < 0)
2664 		goto out;
2665 
2666 	if (uni->len > max_ulen) {
2667 		ntfs_warn(sbi->sb, "new label is too long");
2668 		err = -EFBIG;
2669 		goto out;
2670 	}
2671 
2672 	ni_lock(ni);
2673 
2674 	/* Ignore any errors. */
2675 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2676 
2677 	err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2678 				 0, &attr, NULL, NULL);
2679 	if (err < 0)
2680 		goto unlock_out;
2681 
2682 	/* write new label in on-disk struct. */
2683 	memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2684 
2685 	/* update cached value of current label. */
2686 	if (len >= ARRAY_SIZE(sbi->volume.label))
2687 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2688 	memcpy(sbi->volume.label, label, len);
2689 	sbi->volume.label[len] = 0;
2690 	mark_inode_dirty_sync(&ni->vfs_inode);
2691 
2692 unlock_out:
2693 	ni_unlock(ni);
2694 
2695 	if (!err)
2696 		err = _ni_write_inode(&ni->vfs_inode, 0);
2697 
2698 out:
2699 	__putname(uni);
2700 	return err;
2701 }