xref: /openbmc/linux/fs/ntfs3/fsntfs.c (revision aded0023)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
857 		return;
858 
859 	blocksize = sb->s_blocksize;
860 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
861 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
862 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
863 
864 	for (; bytes >= blocksize; bytes -= blocksize) {
865 		struct buffer_head *bh1, *bh2;
866 
867 		bh1 = sb_bread(sb, block1++);
868 		if (!bh1)
869 			return;
870 
871 		bh2 = sb_getblk(sb, block2++);
872 		if (!bh2) {
873 			put_bh(bh1);
874 			return;
875 		}
876 
877 		if (buffer_locked(bh2))
878 			__wait_on_buffer(bh2);
879 
880 		lock_buffer(bh2);
881 		memcpy(bh2->b_data, bh1->b_data, blocksize);
882 		set_buffer_uptodate(bh2);
883 		mark_buffer_dirty(bh2);
884 		unlock_buffer(bh2);
885 
886 		put_bh(bh1);
887 		bh1 = NULL;
888 
889 		err = wait ? sync_dirty_buffer(bh2) : 0;
890 
891 		put_bh(bh2);
892 		if (err)
893 			return;
894 	}
895 
896 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
897 }
898 
899 /*
900  * ntfs_bad_inode
901  *
902  * Marks inode as bad and marks fs as 'dirty'
903  */
904 void ntfs_bad_inode(struct inode *inode, const char *hint)
905 {
906 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
907 
908 	ntfs_inode_err(inode, "%s", hint);
909 	make_bad_inode(inode);
910 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
911 }
912 
913 /*
914  * ntfs_set_state
915  *
916  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
917  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
918  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
919  */
920 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
921 {
922 	int err;
923 	struct ATTRIB *attr;
924 	struct VOLUME_INFO *info;
925 	struct mft_inode *mi;
926 	struct ntfs_inode *ni;
927 	__le16 info_flags;
928 
929 	/*
930 	 * Do not change state if fs was real_dirty.
931 	 * Do not change state if fs already dirty(clear).
932 	 * Do not change any thing if mounted read only.
933 	 */
934 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
935 		return 0;
936 
937 	/* Check cached value. */
938 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
940 		return 0;
941 
942 	ni = sbi->volume.ni;
943 	if (!ni)
944 		return -EINVAL;
945 
946 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
947 
948 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
949 	if (!attr) {
950 		err = -EINVAL;
951 		goto out;
952 	}
953 
954 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
955 	if (!info) {
956 		err = -EINVAL;
957 		goto out;
958 	}
959 
960 	info_flags = info->flags;
961 
962 	switch (dirty) {
963 	case NTFS_DIRTY_ERROR:
964 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
965 		sbi->volume.real_dirty = true;
966 		fallthrough;
967 	case NTFS_DIRTY_DIRTY:
968 		info->flags |= VOLUME_FLAG_DIRTY;
969 		break;
970 	case NTFS_DIRTY_CLEAR:
971 		info->flags &= ~VOLUME_FLAG_DIRTY;
972 		break;
973 	}
974 	/* Cache current volume flags. */
975 	if (info_flags != info->flags) {
976 		sbi->volume.flags = info->flags;
977 		mi->dirty = true;
978 	}
979 	err = 0;
980 
981 out:
982 	ni_unlock(ni);
983 	if (err)
984 		return err;
985 
986 	mark_inode_dirty(&ni->vfs_inode);
987 	/* verify(!ntfs_update_mftmirr()); */
988 
989 	/*
990 	 * If we used wait=1, sync_inode_metadata waits for the io for the
991 	 * inode to finish. It hangs when media is removed.
992 	 * So wait=0 is sent down to sync_inode_metadata
993 	 * and filemap_fdatawrite is used for the data blocks.
994 	 */
995 	err = sync_inode_metadata(&ni->vfs_inode, 0);
996 	if (!err)
997 		err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
998 
999 	return err;
1000 }
1001 
1002 /*
1003  * security_hash - Calculates a hash of security descriptor.
1004  */
1005 static inline __le32 security_hash(const void *sd, size_t bytes)
1006 {
1007 	u32 hash = 0;
1008 	const __le32 *ptr = sd;
1009 
1010 	bytes >>= 2;
1011 	while (bytes--)
1012 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1013 	return cpu_to_le32(hash);
1014 }
1015 
1016 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1017 {
1018 	struct block_device *bdev = sb->s_bdev;
1019 	u32 blocksize = sb->s_blocksize;
1020 	u64 block = lbo >> sb->s_blocksize_bits;
1021 	u32 off = lbo & (blocksize - 1);
1022 	u32 op = blocksize - off;
1023 
1024 	for (; bytes; block += 1, off = 0, op = blocksize) {
1025 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1026 
1027 		if (!bh)
1028 			return -EIO;
1029 
1030 		if (op > bytes)
1031 			op = bytes;
1032 
1033 		memcpy(buffer, bh->b_data + off, op);
1034 
1035 		put_bh(bh);
1036 
1037 		bytes -= op;
1038 		buffer = Add2Ptr(buffer, op);
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1045 		  const void *buf, int wait)
1046 {
1047 	u32 blocksize = sb->s_blocksize;
1048 	struct block_device *bdev = sb->s_bdev;
1049 	sector_t block = lbo >> sb->s_blocksize_bits;
1050 	u32 off = lbo & (blocksize - 1);
1051 	u32 op = blocksize - off;
1052 	struct buffer_head *bh;
1053 
1054 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1055 		wait = 1;
1056 
1057 	for (; bytes; block += 1, off = 0, op = blocksize) {
1058 		if (op > bytes)
1059 			op = bytes;
1060 
1061 		if (op < blocksize) {
1062 			bh = __bread(bdev, block, blocksize);
1063 			if (!bh) {
1064 				ntfs_err(sb, "failed to read block %llx",
1065 					 (u64)block);
1066 				return -EIO;
1067 			}
1068 		} else {
1069 			bh = __getblk(bdev, block, blocksize);
1070 			if (!bh)
1071 				return -ENOMEM;
1072 		}
1073 
1074 		if (buffer_locked(bh))
1075 			__wait_on_buffer(bh);
1076 
1077 		lock_buffer(bh);
1078 		if (buf) {
1079 			memcpy(bh->b_data + off, buf, op);
1080 			buf = Add2Ptr(buf, op);
1081 		} else {
1082 			memset(bh->b_data + off, -1, op);
1083 		}
1084 
1085 		set_buffer_uptodate(bh);
1086 		mark_buffer_dirty(bh);
1087 		unlock_buffer(bh);
1088 
1089 		if (wait) {
1090 			int err = sync_dirty_buffer(bh);
1091 
1092 			if (err) {
1093 				ntfs_err(
1094 					sb,
1095 					"failed to sync buffer at block %llx, error %d",
1096 					(u64)block, err);
1097 				put_bh(bh);
1098 				return err;
1099 			}
1100 		}
1101 
1102 		put_bh(bh);
1103 
1104 		bytes -= op;
1105 	}
1106 	return 0;
1107 }
1108 
1109 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1110 		      u64 vbo, const void *buf, size_t bytes, int sync)
1111 {
1112 	struct super_block *sb = sbi->sb;
1113 	u8 cluster_bits = sbi->cluster_bits;
1114 	u32 off = vbo & sbi->cluster_mask;
1115 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1116 	u64 lbo, len;
1117 	size_t idx;
1118 
1119 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1120 		return -ENOENT;
1121 
1122 	if (lcn == SPARSE_LCN)
1123 		return -EINVAL;
1124 
1125 	lbo = ((u64)lcn << cluster_bits) + off;
1126 	len = ((u64)clen << cluster_bits) - off;
1127 
1128 	for (;;) {
1129 		u32 op = min_t(u64, len, bytes);
1130 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1131 
1132 		if (err)
1133 			return err;
1134 
1135 		bytes -= op;
1136 		if (!bytes)
1137 			break;
1138 
1139 		vcn_next = vcn + clen;
1140 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1141 		    vcn != vcn_next)
1142 			return -ENOENT;
1143 
1144 		if (lcn == SPARSE_LCN)
1145 			return -EINVAL;
1146 
1147 		if (buf)
1148 			buf = Add2Ptr(buf, op);
1149 
1150 		lbo = ((u64)lcn << cluster_bits);
1151 		len = ((u64)clen << cluster_bits);
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1158 				   const struct runs_tree *run, u64 vbo)
1159 {
1160 	struct super_block *sb = sbi->sb;
1161 	u8 cluster_bits = sbi->cluster_bits;
1162 	CLST lcn;
1163 	u64 lbo;
1164 
1165 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1166 		return ERR_PTR(-ENOENT);
1167 
1168 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1169 
1170 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1171 }
1172 
1173 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1174 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1175 {
1176 	int err;
1177 	struct super_block *sb = sbi->sb;
1178 	u32 blocksize = sb->s_blocksize;
1179 	u8 cluster_bits = sbi->cluster_bits;
1180 	u32 off = vbo & sbi->cluster_mask;
1181 	u32 nbh = 0;
1182 	CLST vcn_next, vcn = vbo >> cluster_bits;
1183 	CLST lcn, clen;
1184 	u64 lbo, len;
1185 	size_t idx;
1186 	struct buffer_head *bh;
1187 
1188 	if (!run) {
1189 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1190 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1191 			err = -ENOENT;
1192 			goto out;
1193 		}
1194 
1195 		/* Use absolute boot's 'MFTCluster' to read record. */
1196 		lbo = vbo + sbi->mft.lbo;
1197 		len = sbi->record_size;
1198 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1199 		err = -ENOENT;
1200 		goto out;
1201 	} else {
1202 		if (lcn == SPARSE_LCN) {
1203 			err = -EINVAL;
1204 			goto out;
1205 		}
1206 
1207 		lbo = ((u64)lcn << cluster_bits) + off;
1208 		len = ((u64)clen << cluster_bits) - off;
1209 	}
1210 
1211 	off = lbo & (blocksize - 1);
1212 	if (nb) {
1213 		nb->off = off;
1214 		nb->bytes = bytes;
1215 	}
1216 
1217 	for (;;) {
1218 		u32 len32 = len >= bytes ? bytes : len;
1219 		sector_t block = lbo >> sb->s_blocksize_bits;
1220 
1221 		do {
1222 			u32 op = blocksize - off;
1223 
1224 			if (op > len32)
1225 				op = len32;
1226 
1227 			bh = ntfs_bread(sb, block);
1228 			if (!bh) {
1229 				err = -EIO;
1230 				goto out;
1231 			}
1232 
1233 			if (buf) {
1234 				memcpy(buf, bh->b_data + off, op);
1235 				buf = Add2Ptr(buf, op);
1236 			}
1237 
1238 			if (!nb) {
1239 				put_bh(bh);
1240 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1241 				err = -EINVAL;
1242 				goto out;
1243 			} else {
1244 				nb->bh[nbh++] = bh;
1245 				nb->nbufs = nbh;
1246 			}
1247 
1248 			bytes -= op;
1249 			if (!bytes)
1250 				return 0;
1251 			len32 -= op;
1252 			block += 1;
1253 			off = 0;
1254 
1255 		} while (len32);
1256 
1257 		vcn_next = vcn + clen;
1258 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1259 		    vcn != vcn_next) {
1260 			err = -ENOENT;
1261 			goto out;
1262 		}
1263 
1264 		if (lcn == SPARSE_LCN) {
1265 			err = -EINVAL;
1266 			goto out;
1267 		}
1268 
1269 		lbo = ((u64)lcn << cluster_bits);
1270 		len = ((u64)clen << cluster_bits);
1271 	}
1272 
1273 out:
1274 	if (!nbh)
1275 		return err;
1276 
1277 	while (nbh) {
1278 		put_bh(nb->bh[--nbh]);
1279 		nb->bh[nbh] = NULL;
1280 	}
1281 
1282 	nb->nbufs = 0;
1283 	return err;
1284 }
1285 
1286 /*
1287  * ntfs_read_bh
1288  *
1289  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1290  */
1291 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1292 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1293 		 struct ntfs_buffers *nb)
1294 {
1295 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1296 
1297 	if (err)
1298 		return err;
1299 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1300 }
1301 
1302 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1303 		u32 bytes, struct ntfs_buffers *nb)
1304 {
1305 	int err = 0;
1306 	struct super_block *sb = sbi->sb;
1307 	u32 blocksize = sb->s_blocksize;
1308 	u8 cluster_bits = sbi->cluster_bits;
1309 	CLST vcn_next, vcn = vbo >> cluster_bits;
1310 	u32 off;
1311 	u32 nbh = 0;
1312 	CLST lcn, clen;
1313 	u64 lbo, len;
1314 	size_t idx;
1315 
1316 	nb->bytes = bytes;
1317 
1318 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1319 		err = -ENOENT;
1320 		goto out;
1321 	}
1322 
1323 	off = vbo & sbi->cluster_mask;
1324 	lbo = ((u64)lcn << cluster_bits) + off;
1325 	len = ((u64)clen << cluster_bits) - off;
1326 
1327 	nb->off = off = lbo & (blocksize - 1);
1328 
1329 	for (;;) {
1330 		u32 len32 = min_t(u64, len, bytes);
1331 		sector_t block = lbo >> sb->s_blocksize_bits;
1332 
1333 		do {
1334 			u32 op;
1335 			struct buffer_head *bh;
1336 
1337 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1338 				err = -EINVAL;
1339 				goto out;
1340 			}
1341 
1342 			op = blocksize - off;
1343 			if (op > len32)
1344 				op = len32;
1345 
1346 			if (op == blocksize) {
1347 				bh = sb_getblk(sb, block);
1348 				if (!bh) {
1349 					err = -ENOMEM;
1350 					goto out;
1351 				}
1352 				if (buffer_locked(bh))
1353 					__wait_on_buffer(bh);
1354 				set_buffer_uptodate(bh);
1355 			} else {
1356 				bh = ntfs_bread(sb, block);
1357 				if (!bh) {
1358 					err = -EIO;
1359 					goto out;
1360 				}
1361 			}
1362 
1363 			nb->bh[nbh++] = bh;
1364 			bytes -= op;
1365 			if (!bytes) {
1366 				nb->nbufs = nbh;
1367 				return 0;
1368 			}
1369 
1370 			block += 1;
1371 			len32 -= op;
1372 			off = 0;
1373 		} while (len32);
1374 
1375 		vcn_next = vcn + clen;
1376 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1377 		    vcn != vcn_next) {
1378 			err = -ENOENT;
1379 			goto out;
1380 		}
1381 
1382 		lbo = ((u64)lcn << cluster_bits);
1383 		len = ((u64)clen << cluster_bits);
1384 	}
1385 
1386 out:
1387 	while (nbh) {
1388 		put_bh(nb->bh[--nbh]);
1389 		nb->bh[nbh] = NULL;
1390 	}
1391 
1392 	nb->nbufs = 0;
1393 
1394 	return err;
1395 }
1396 
1397 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1398 		  struct ntfs_buffers *nb, int sync)
1399 {
1400 	int err = 0;
1401 	struct super_block *sb = sbi->sb;
1402 	u32 block_size = sb->s_blocksize;
1403 	u32 bytes = nb->bytes;
1404 	u32 off = nb->off;
1405 	u16 fo = le16_to_cpu(rhdr->fix_off);
1406 	u16 fn = le16_to_cpu(rhdr->fix_num);
1407 	u32 idx;
1408 	__le16 *fixup;
1409 	__le16 sample;
1410 
1411 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1412 	    fn * SECTOR_SIZE > bytes) {
1413 		return -EINVAL;
1414 	}
1415 
1416 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1417 		u32 op = block_size - off;
1418 		char *bh_data;
1419 		struct buffer_head *bh = nb->bh[idx];
1420 		__le16 *ptr, *end_data;
1421 
1422 		if (op > bytes)
1423 			op = bytes;
1424 
1425 		if (buffer_locked(bh))
1426 			__wait_on_buffer(bh);
1427 
1428 		lock_buffer(bh);
1429 
1430 		bh_data = bh->b_data + off;
1431 		end_data = Add2Ptr(bh_data, op);
1432 		memcpy(bh_data, rhdr, op);
1433 
1434 		if (!idx) {
1435 			u16 t16;
1436 
1437 			fixup = Add2Ptr(bh_data, fo);
1438 			sample = *fixup;
1439 			t16 = le16_to_cpu(sample);
1440 			if (t16 >= 0x7FFF) {
1441 				sample = *fixup = cpu_to_le16(1);
1442 			} else {
1443 				sample = cpu_to_le16(t16 + 1);
1444 				*fixup = sample;
1445 			}
1446 
1447 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1448 		}
1449 
1450 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1451 
1452 		do {
1453 			*++fixup = *ptr;
1454 			*ptr = sample;
1455 			ptr += SECTOR_SIZE / sizeof(short);
1456 		} while (ptr < end_data);
1457 
1458 		set_buffer_uptodate(bh);
1459 		mark_buffer_dirty(bh);
1460 		unlock_buffer(bh);
1461 
1462 		if (sync) {
1463 			int err2 = sync_dirty_buffer(bh);
1464 
1465 			if (!err && err2)
1466 				err = err2;
1467 		}
1468 
1469 		bytes -= op;
1470 		rhdr = Add2Ptr(rhdr, op);
1471 	}
1472 
1473 	return err;
1474 }
1475 
1476 /*
1477  * ntfs_bio_pages - Read/write pages from/to disk.
1478  */
1479 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1480 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1481 		   enum req_op op)
1482 {
1483 	int err = 0;
1484 	struct bio *new, *bio = NULL;
1485 	struct super_block *sb = sbi->sb;
1486 	struct block_device *bdev = sb->s_bdev;
1487 	struct page *page;
1488 	u8 cluster_bits = sbi->cluster_bits;
1489 	CLST lcn, clen, vcn, vcn_next;
1490 	u32 add, off, page_idx;
1491 	u64 lbo, len;
1492 	size_t run_idx;
1493 	struct blk_plug plug;
1494 
1495 	if (!bytes)
1496 		return 0;
1497 
1498 	blk_start_plug(&plug);
1499 
1500 	/* Align vbo and bytes to be 512 bytes aligned. */
1501 	lbo = (vbo + bytes + 511) & ~511ull;
1502 	vbo = vbo & ~511ull;
1503 	bytes = lbo - vbo;
1504 
1505 	vcn = vbo >> cluster_bits;
1506 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1507 		err = -ENOENT;
1508 		goto out;
1509 	}
1510 	off = vbo & sbi->cluster_mask;
1511 	page_idx = 0;
1512 	page = pages[0];
1513 
1514 	for (;;) {
1515 		lbo = ((u64)lcn << cluster_bits) + off;
1516 		len = ((u64)clen << cluster_bits) - off;
1517 new_bio:
1518 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1519 		if (bio) {
1520 			bio_chain(bio, new);
1521 			submit_bio(bio);
1522 		}
1523 		bio = new;
1524 		bio->bi_iter.bi_sector = lbo >> 9;
1525 
1526 		while (len) {
1527 			off = vbo & (PAGE_SIZE - 1);
1528 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1529 
1530 			if (bio_add_page(bio, page, add, off) < add)
1531 				goto new_bio;
1532 
1533 			if (bytes <= add)
1534 				goto out;
1535 			bytes -= add;
1536 			vbo += add;
1537 
1538 			if (add + off == PAGE_SIZE) {
1539 				page_idx += 1;
1540 				if (WARN_ON(page_idx >= nr_pages)) {
1541 					err = -EINVAL;
1542 					goto out;
1543 				}
1544 				page = pages[page_idx];
1545 			}
1546 
1547 			if (len <= add)
1548 				break;
1549 			len -= add;
1550 			lbo += add;
1551 		}
1552 
1553 		vcn_next = vcn + clen;
1554 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1555 		    vcn != vcn_next) {
1556 			err = -ENOENT;
1557 			goto out;
1558 		}
1559 		off = 0;
1560 	}
1561 out:
1562 	if (bio) {
1563 		if (!err)
1564 			err = submit_bio_wait(bio);
1565 		bio_put(bio);
1566 	}
1567 	blk_finish_plug(&plug);
1568 
1569 	return err;
1570 }
1571 
1572 /*
1573  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1574  *
1575  * Fill on-disk logfile range by (-1)
1576  * this means empty logfile.
1577  */
1578 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1579 {
1580 	int err = 0;
1581 	struct super_block *sb = sbi->sb;
1582 	struct block_device *bdev = sb->s_bdev;
1583 	u8 cluster_bits = sbi->cluster_bits;
1584 	struct bio *new, *bio = NULL;
1585 	CLST lcn, clen;
1586 	u64 lbo, len;
1587 	size_t run_idx;
1588 	struct page *fill;
1589 	void *kaddr;
1590 	struct blk_plug plug;
1591 
1592 	fill = alloc_page(GFP_KERNEL);
1593 	if (!fill)
1594 		return -ENOMEM;
1595 
1596 	kaddr = kmap_atomic(fill);
1597 	memset(kaddr, -1, PAGE_SIZE);
1598 	kunmap_atomic(kaddr);
1599 	flush_dcache_page(fill);
1600 	lock_page(fill);
1601 
1602 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1603 		err = -ENOENT;
1604 		goto out;
1605 	}
1606 
1607 	/*
1608 	 * TODO: Try blkdev_issue_write_same.
1609 	 */
1610 	blk_start_plug(&plug);
1611 	do {
1612 		lbo = (u64)lcn << cluster_bits;
1613 		len = (u64)clen << cluster_bits;
1614 new_bio:
1615 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1616 		if (bio) {
1617 			bio_chain(bio, new);
1618 			submit_bio(bio);
1619 		}
1620 		bio = new;
1621 		bio->bi_iter.bi_sector = lbo >> 9;
1622 
1623 		for (;;) {
1624 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1625 
1626 			if (bio_add_page(bio, fill, add, 0) < add)
1627 				goto new_bio;
1628 
1629 			lbo += add;
1630 			if (len <= add)
1631 				break;
1632 			len -= add;
1633 		}
1634 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1635 
1636 	if (!err)
1637 		err = submit_bio_wait(bio);
1638 	bio_put(bio);
1639 
1640 	blk_finish_plug(&plug);
1641 out:
1642 	unlock_page(fill);
1643 	put_page(fill);
1644 
1645 	return err;
1646 }
1647 
1648 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1649 		    u64 vbo, u64 *lbo, u64 *bytes)
1650 {
1651 	u32 off;
1652 	CLST lcn, len;
1653 	u8 cluster_bits = sbi->cluster_bits;
1654 
1655 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1656 		return -ENOENT;
1657 
1658 	off = vbo & sbi->cluster_mask;
1659 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1660 	*bytes = ((u64)len << cluster_bits) - off;
1661 
1662 	return 0;
1663 }
1664 
1665 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1666 				  enum RECORD_FLAG flag)
1667 {
1668 	int err = 0;
1669 	struct super_block *sb = sbi->sb;
1670 	struct inode *inode = new_inode(sb);
1671 	struct ntfs_inode *ni;
1672 
1673 	if (!inode)
1674 		return ERR_PTR(-ENOMEM);
1675 
1676 	ni = ntfs_i(inode);
1677 
1678 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1679 	if (err)
1680 		goto out;
1681 
1682 	inode->i_ino = rno;
1683 	if (insert_inode_locked(inode) < 0) {
1684 		err = -EIO;
1685 		goto out;
1686 	}
1687 
1688 out:
1689 	if (err) {
1690 		make_bad_inode(inode);
1691 		iput(inode);
1692 		ni = ERR_PTR(err);
1693 	}
1694 	return ni;
1695 }
1696 
1697 /*
1698  * O:BAG:BAD:(A;OICI;FA;;;WD)
1699  * Owner S-1-5-32-544 (Administrators)
1700  * Group S-1-5-32-544 (Administrators)
1701  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1702  */
1703 const u8 s_default_security[] __aligned(8) = {
1704 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1705 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1706 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1707 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1708 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1709 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1710 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1711 };
1712 
1713 static_assert(sizeof(s_default_security) == 0x50);
1714 
1715 static inline u32 sid_length(const struct SID *sid)
1716 {
1717 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1718 }
1719 
1720 /*
1721  * is_acl_valid
1722  *
1723  * Thanks Mark Harmstone for idea.
1724  */
1725 static bool is_acl_valid(const struct ACL *acl, u32 len)
1726 {
1727 	const struct ACE_HEADER *ace;
1728 	u32 i;
1729 	u16 ace_count, ace_size;
1730 
1731 	if (acl->AclRevision != ACL_REVISION &&
1732 	    acl->AclRevision != ACL_REVISION_DS) {
1733 		/*
1734 		 * This value should be ACL_REVISION, unless the ACL contains an
1735 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1736 		 * All ACEs in an ACL must be at the same revision level.
1737 		 */
1738 		return false;
1739 	}
1740 
1741 	if (acl->Sbz1)
1742 		return false;
1743 
1744 	if (le16_to_cpu(acl->AclSize) > len)
1745 		return false;
1746 
1747 	if (acl->Sbz2)
1748 		return false;
1749 
1750 	len -= sizeof(struct ACL);
1751 	ace = (struct ACE_HEADER *)&acl[1];
1752 	ace_count = le16_to_cpu(acl->AceCount);
1753 
1754 	for (i = 0; i < ace_count; i++) {
1755 		if (len < sizeof(struct ACE_HEADER))
1756 			return false;
1757 
1758 		ace_size = le16_to_cpu(ace->AceSize);
1759 		if (len < ace_size)
1760 			return false;
1761 
1762 		len -= ace_size;
1763 		ace = Add2Ptr(ace, ace_size);
1764 	}
1765 
1766 	return true;
1767 }
1768 
1769 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1770 {
1771 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1772 
1773 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1774 		return false;
1775 
1776 	if (sd->Revision != 1)
1777 		return false;
1778 
1779 	if (sd->Sbz1)
1780 		return false;
1781 
1782 	if (!(sd->Control & SE_SELF_RELATIVE))
1783 		return false;
1784 
1785 	sd_owner = le32_to_cpu(sd->Owner);
1786 	if (sd_owner) {
1787 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1788 
1789 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1790 			return false;
1791 
1792 		if (owner->Revision != 1)
1793 			return false;
1794 
1795 		if (sd_owner + sid_length(owner) > len)
1796 			return false;
1797 	}
1798 
1799 	sd_group = le32_to_cpu(sd->Group);
1800 	if (sd_group) {
1801 		const struct SID *group = Add2Ptr(sd, sd_group);
1802 
1803 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1804 			return false;
1805 
1806 		if (group->Revision != 1)
1807 			return false;
1808 
1809 		if (sd_group + sid_length(group) > len)
1810 			return false;
1811 	}
1812 
1813 	sd_sacl = le32_to_cpu(sd->Sacl);
1814 	if (sd_sacl) {
1815 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1816 
1817 		if (sd_sacl + sizeof(struct ACL) > len)
1818 			return false;
1819 
1820 		if (!is_acl_valid(sacl, len - sd_sacl))
1821 			return false;
1822 	}
1823 
1824 	sd_dacl = le32_to_cpu(sd->Dacl);
1825 	if (sd_dacl) {
1826 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1827 
1828 		if (sd_dacl + sizeof(struct ACL) > len)
1829 			return false;
1830 
1831 		if (!is_acl_valid(dacl, len - sd_dacl))
1832 			return false;
1833 	}
1834 
1835 	return true;
1836 }
1837 
1838 /*
1839  * ntfs_security_init - Load and parse $Secure.
1840  */
1841 int ntfs_security_init(struct ntfs_sb_info *sbi)
1842 {
1843 	int err;
1844 	struct super_block *sb = sbi->sb;
1845 	struct inode *inode;
1846 	struct ntfs_inode *ni;
1847 	struct MFT_REF ref;
1848 	struct ATTRIB *attr;
1849 	struct ATTR_LIST_ENTRY *le;
1850 	u64 sds_size;
1851 	size_t off;
1852 	struct NTFS_DE *ne;
1853 	struct NTFS_DE_SII *sii_e;
1854 	struct ntfs_fnd *fnd_sii = NULL;
1855 	const struct INDEX_ROOT *root_sii;
1856 	const struct INDEX_ROOT *root_sdh;
1857 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1858 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1859 
1860 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1861 	ref.high = 0;
1862 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1863 
1864 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1865 	if (IS_ERR(inode)) {
1866 		err = PTR_ERR(inode);
1867 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1868 		inode = NULL;
1869 		goto out;
1870 	}
1871 
1872 	ni = ntfs_i(inode);
1873 
1874 	le = NULL;
1875 
1876 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1877 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1878 	if (!attr ||
1879 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1880 	    root_sdh->type != ATTR_ZERO ||
1881 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1882 	    offsetof(struct INDEX_ROOT, ihdr) +
1883 			    le32_to_cpu(root_sdh->ihdr.used) >
1884 		    le32_to_cpu(attr->res.data_size)) {
1885 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1886 		err = -EINVAL;
1887 		goto out;
1888 	}
1889 
1890 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1891 	if (err) {
1892 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1893 		goto out;
1894 	}
1895 
1896 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1897 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1898 	if (!attr ||
1899 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1900 	    root_sii->type != ATTR_ZERO ||
1901 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1902 	    offsetof(struct INDEX_ROOT, ihdr) +
1903 			    le32_to_cpu(root_sii->ihdr.used) >
1904 		    le32_to_cpu(attr->res.data_size)) {
1905 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1906 		err = -EINVAL;
1907 		goto out;
1908 	}
1909 
1910 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1911 	if (err) {
1912 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1913 		goto out;
1914 	}
1915 
1916 	fnd_sii = fnd_get();
1917 	if (!fnd_sii) {
1918 		err = -ENOMEM;
1919 		goto out;
1920 	}
1921 
1922 	sds_size = inode->i_size;
1923 
1924 	/* Find the last valid Id. */
1925 	sbi->security.next_id = SECURITY_ID_FIRST;
1926 	/* Always write new security at the end of bucket. */
1927 	sbi->security.next_off =
1928 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1929 
1930 	off = 0;
1931 	ne = NULL;
1932 
1933 	for (;;) {
1934 		u32 next_id;
1935 
1936 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1937 		if (err || !ne)
1938 			break;
1939 
1940 		sii_e = (struct NTFS_DE_SII *)ne;
1941 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1942 			continue;
1943 
1944 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1945 		if (next_id >= sbi->security.next_id)
1946 			sbi->security.next_id = next_id;
1947 	}
1948 
1949 	sbi->security.ni = ni;
1950 	inode = NULL;
1951 out:
1952 	iput(inode);
1953 	fnd_put(fnd_sii);
1954 
1955 	return err;
1956 }
1957 
1958 /*
1959  * ntfs_get_security_by_id - Read security descriptor by id.
1960  */
1961 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1962 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1963 			    size_t *size)
1964 {
1965 	int err;
1966 	int diff;
1967 	struct ntfs_inode *ni = sbi->security.ni;
1968 	struct ntfs_index *indx = &sbi->security.index_sii;
1969 	void *p = NULL;
1970 	struct NTFS_DE_SII *sii_e;
1971 	struct ntfs_fnd *fnd_sii;
1972 	struct SECURITY_HDR d_security;
1973 	const struct INDEX_ROOT *root_sii;
1974 	u32 t32;
1975 
1976 	*sd = NULL;
1977 
1978 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1979 
1980 	fnd_sii = fnd_get();
1981 	if (!fnd_sii) {
1982 		err = -ENOMEM;
1983 		goto out;
1984 	}
1985 
1986 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1987 	if (!root_sii) {
1988 		err = -EINVAL;
1989 		goto out;
1990 	}
1991 
1992 	/* Try to find this SECURITY descriptor in SII indexes. */
1993 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1994 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1995 	if (err)
1996 		goto out;
1997 
1998 	if (diff)
1999 		goto out;
2000 
2001 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2002 	if (t32 < sizeof(struct SECURITY_HDR)) {
2003 		err = -EINVAL;
2004 		goto out;
2005 	}
2006 
2007 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2008 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2009 		err = -EFBIG;
2010 		goto out;
2011 	}
2012 
2013 	*size = t32 - sizeof(struct SECURITY_HDR);
2014 
2015 	p = kmalloc(*size, GFP_NOFS);
2016 	if (!p) {
2017 		err = -ENOMEM;
2018 		goto out;
2019 	}
2020 
2021 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2022 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2023 			       sizeof(d_security), NULL);
2024 	if (err)
2025 		goto out;
2026 
2027 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2028 		err = -EINVAL;
2029 		goto out;
2030 	}
2031 
2032 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2033 			       le64_to_cpu(sii_e->sec_hdr.off) +
2034 				       sizeof(struct SECURITY_HDR),
2035 			       p, *size, NULL);
2036 	if (err)
2037 		goto out;
2038 
2039 	*sd = p;
2040 	p = NULL;
2041 
2042 out:
2043 	kfree(p);
2044 	fnd_put(fnd_sii);
2045 	ni_unlock(ni);
2046 
2047 	return err;
2048 }
2049 
2050 /*
2051  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2052  *
2053  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2054  * and it contains a mirror copy of each security descriptor.  When writing
2055  * to a security descriptor at location X, another copy will be written at
2056  * location (X+256K).
2057  * When writing a security descriptor that will cross the 256K boundary,
2058  * the pointer will be advanced by 256K to skip
2059  * over the mirror portion.
2060  */
2061 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2062 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2063 			 u32 size_sd, __le32 *security_id, bool *inserted)
2064 {
2065 	int err, diff;
2066 	struct ntfs_inode *ni = sbi->security.ni;
2067 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2068 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2069 	struct NTFS_DE_SDH *e;
2070 	struct NTFS_DE_SDH sdh_e;
2071 	struct NTFS_DE_SII sii_e;
2072 	struct SECURITY_HDR *d_security;
2073 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2074 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2075 	struct SECURITY_KEY hash_key;
2076 	struct ntfs_fnd *fnd_sdh = NULL;
2077 	const struct INDEX_ROOT *root_sdh;
2078 	const struct INDEX_ROOT *root_sii;
2079 	u64 mirr_off, new_sds_size;
2080 	u32 next, left;
2081 
2082 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2083 		      SecurityDescriptorsBlockSize);
2084 
2085 	hash_key.hash = security_hash(sd, size_sd);
2086 	hash_key.sec_id = SECURITY_ID_INVALID;
2087 
2088 	if (inserted)
2089 		*inserted = false;
2090 	*security_id = SECURITY_ID_INVALID;
2091 
2092 	/* Allocate a temporal buffer. */
2093 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2094 	if (!d_security)
2095 		return -ENOMEM;
2096 
2097 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2098 
2099 	fnd_sdh = fnd_get();
2100 	if (!fnd_sdh) {
2101 		err = -ENOMEM;
2102 		goto out;
2103 	}
2104 
2105 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2106 	if (!root_sdh) {
2107 		err = -EINVAL;
2108 		goto out;
2109 	}
2110 
2111 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2112 	if (!root_sii) {
2113 		err = -EINVAL;
2114 		goto out;
2115 	}
2116 
2117 	/*
2118 	 * Check if such security already exists.
2119 	 * Use "SDH" and hash -> to get the offset in "SDS".
2120 	 */
2121 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2122 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2123 			fnd_sdh);
2124 	if (err)
2125 		goto out;
2126 
2127 	while (e) {
2128 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2129 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2130 					       le64_to_cpu(e->sec_hdr.off),
2131 					       d_security, new_sec_size, NULL);
2132 			if (err)
2133 				goto out;
2134 
2135 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2136 			    d_security->key.hash == hash_key.hash &&
2137 			    !memcmp(d_security + 1, sd, size_sd)) {
2138 				*security_id = d_security->key.sec_id;
2139 				/* Such security already exists. */
2140 				err = 0;
2141 				goto out;
2142 			}
2143 		}
2144 
2145 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2146 				     (struct NTFS_DE **)&e, fnd_sdh);
2147 		if (err)
2148 			goto out;
2149 
2150 		if (!e || e->key.hash != hash_key.hash)
2151 			break;
2152 	}
2153 
2154 	/* Zero unused space. */
2155 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2156 	left = SecurityDescriptorsBlockSize - next;
2157 
2158 	/* Zero gap until SecurityDescriptorsBlockSize. */
2159 	if (left < new_sec_size) {
2160 		/* Zero "left" bytes from sbi->security.next_off. */
2161 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2162 	}
2163 
2164 	/* Zero tail of previous security. */
2165 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2166 
2167 	/*
2168 	 * Example:
2169 	 * 0x40438 == ni->vfs_inode.i_size
2170 	 * 0x00440 == sbi->security.next_off
2171 	 * need to zero [0x438-0x440)
2172 	 * if (next > used) {
2173 	 *  u32 tozero = next - used;
2174 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2175 	 */
2176 
2177 	/* Format new security descriptor. */
2178 	d_security->key.hash = hash_key.hash;
2179 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2180 	d_security->off = cpu_to_le64(sbi->security.next_off);
2181 	d_security->size = cpu_to_le32(new_sec_size);
2182 	memcpy(d_security + 1, sd, size_sd);
2183 
2184 	/* Write main SDS bucket. */
2185 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2186 				d_security, aligned_sec_size, 0);
2187 
2188 	if (err)
2189 		goto out;
2190 
2191 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2192 	new_sds_size = mirr_off + aligned_sec_size;
2193 
2194 	if (new_sds_size > ni->vfs_inode.i_size) {
2195 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2196 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2197 				    new_sds_size, &new_sds_size, false, NULL);
2198 		if (err)
2199 			goto out;
2200 	}
2201 
2202 	/* Write copy SDS bucket. */
2203 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2204 				aligned_sec_size, 0);
2205 	if (err)
2206 		goto out;
2207 
2208 	/* Fill SII entry. */
2209 	sii_e.de.view.data_off =
2210 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2211 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2212 	sii_e.de.view.res = 0;
2213 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2214 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2215 	sii_e.de.flags = 0;
2216 	sii_e.de.res = 0;
2217 	sii_e.sec_id = d_security->key.sec_id;
2218 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2219 
2220 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2221 	if (err)
2222 		goto out;
2223 
2224 	/* Fill SDH entry. */
2225 	sdh_e.de.view.data_off =
2226 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2227 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2228 	sdh_e.de.view.res = 0;
2229 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2230 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2231 	sdh_e.de.flags = 0;
2232 	sdh_e.de.res = 0;
2233 	sdh_e.key.hash = d_security->key.hash;
2234 	sdh_e.key.sec_id = d_security->key.sec_id;
2235 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2236 	sdh_e.magic[0] = cpu_to_le16('I');
2237 	sdh_e.magic[1] = cpu_to_le16('I');
2238 
2239 	fnd_clear(fnd_sdh);
2240 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2241 				fnd_sdh, 0);
2242 	if (err)
2243 		goto out;
2244 
2245 	*security_id = d_security->key.sec_id;
2246 	if (inserted)
2247 		*inserted = true;
2248 
2249 	/* Update Id and offset for next descriptor. */
2250 	sbi->security.next_id += 1;
2251 	sbi->security.next_off += aligned_sec_size;
2252 
2253 out:
2254 	fnd_put(fnd_sdh);
2255 	mark_inode_dirty(&ni->vfs_inode);
2256 	ni_unlock(ni);
2257 	kfree(d_security);
2258 
2259 	return err;
2260 }
2261 
2262 /*
2263  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2264  */
2265 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2266 {
2267 	int err;
2268 	struct ntfs_inode *ni = sbi->reparse.ni;
2269 	struct ntfs_index *indx = &sbi->reparse.index_r;
2270 	struct ATTRIB *attr;
2271 	struct ATTR_LIST_ENTRY *le;
2272 	const struct INDEX_ROOT *root_r;
2273 
2274 	if (!ni)
2275 		return 0;
2276 
2277 	le = NULL;
2278 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2279 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2280 	if (!attr) {
2281 		err = -EINVAL;
2282 		goto out;
2283 	}
2284 
2285 	root_r = resident_data(attr);
2286 	if (root_r->type != ATTR_ZERO ||
2287 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2288 		err = -EINVAL;
2289 		goto out;
2290 	}
2291 
2292 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2293 	if (err)
2294 		goto out;
2295 
2296 out:
2297 	return err;
2298 }
2299 
2300 /*
2301  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2302  */
2303 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2304 {
2305 	int err;
2306 	struct ntfs_inode *ni = sbi->objid.ni;
2307 	struct ntfs_index *indx = &sbi->objid.index_o;
2308 	struct ATTRIB *attr;
2309 	struct ATTR_LIST_ENTRY *le;
2310 	const struct INDEX_ROOT *root;
2311 
2312 	if (!ni)
2313 		return 0;
2314 
2315 	le = NULL;
2316 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2317 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2318 	if (!attr) {
2319 		err = -EINVAL;
2320 		goto out;
2321 	}
2322 
2323 	root = resident_data(attr);
2324 	if (root->type != ATTR_ZERO ||
2325 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2326 		err = -EINVAL;
2327 		goto out;
2328 	}
2329 
2330 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2331 	if (err)
2332 		goto out;
2333 
2334 out:
2335 	return err;
2336 }
2337 
2338 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2339 {
2340 	int err;
2341 	struct ntfs_inode *ni = sbi->objid.ni;
2342 	struct ntfs_index *indx = &sbi->objid.index_o;
2343 
2344 	if (!ni)
2345 		return -EINVAL;
2346 
2347 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2348 
2349 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2350 
2351 	mark_inode_dirty(&ni->vfs_inode);
2352 	ni_unlock(ni);
2353 
2354 	return err;
2355 }
2356 
2357 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2358 			const struct MFT_REF *ref)
2359 {
2360 	int err;
2361 	struct ntfs_inode *ni = sbi->reparse.ni;
2362 	struct ntfs_index *indx = &sbi->reparse.index_r;
2363 	struct NTFS_DE_R re;
2364 
2365 	if (!ni)
2366 		return -EINVAL;
2367 
2368 	memset(&re, 0, sizeof(re));
2369 
2370 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2371 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2372 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2373 
2374 	re.key.ReparseTag = rtag;
2375 	memcpy(&re.key.ref, ref, sizeof(*ref));
2376 
2377 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2378 
2379 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2380 
2381 	mark_inode_dirty(&ni->vfs_inode);
2382 	ni_unlock(ni);
2383 
2384 	return err;
2385 }
2386 
2387 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2388 			const struct MFT_REF *ref)
2389 {
2390 	int err, diff;
2391 	struct ntfs_inode *ni = sbi->reparse.ni;
2392 	struct ntfs_index *indx = &sbi->reparse.index_r;
2393 	struct ntfs_fnd *fnd = NULL;
2394 	struct REPARSE_KEY rkey;
2395 	struct NTFS_DE_R *re;
2396 	struct INDEX_ROOT *root_r;
2397 
2398 	if (!ni)
2399 		return -EINVAL;
2400 
2401 	rkey.ReparseTag = rtag;
2402 	rkey.ref = *ref;
2403 
2404 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2405 
2406 	if (rtag) {
2407 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2408 		goto out1;
2409 	}
2410 
2411 	fnd = fnd_get();
2412 	if (!fnd) {
2413 		err = -ENOMEM;
2414 		goto out1;
2415 	}
2416 
2417 	root_r = indx_get_root(indx, ni, NULL, NULL);
2418 	if (!root_r) {
2419 		err = -EINVAL;
2420 		goto out;
2421 	}
2422 
2423 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2424 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2425 			(struct NTFS_DE **)&re, fnd);
2426 	if (err)
2427 		goto out;
2428 
2429 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2430 		/* Impossible. Looks like volume corrupt? */
2431 		goto out;
2432 	}
2433 
2434 	memcpy(&rkey, &re->key, sizeof(rkey));
2435 
2436 	fnd_put(fnd);
2437 	fnd = NULL;
2438 
2439 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2440 	if (err)
2441 		goto out;
2442 
2443 out:
2444 	fnd_put(fnd);
2445 
2446 out1:
2447 	mark_inode_dirty(&ni->vfs_inode);
2448 	ni_unlock(ni);
2449 
2450 	return err;
2451 }
2452 
2453 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2454 					  CLST len)
2455 {
2456 	ntfs_unmap_meta(sbi->sb, lcn, len);
2457 	ntfs_discard(sbi, lcn, len);
2458 }
2459 
2460 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2461 {
2462 	CLST end, i, zone_len, zlen;
2463 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2464 
2465 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2466 	if (!wnd_is_used(wnd, lcn, len)) {
2467 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2468 
2469 		end = lcn + len;
2470 		len = 0;
2471 		for (i = lcn; i < end; i++) {
2472 			if (wnd_is_used(wnd, i, 1)) {
2473 				if (!len)
2474 					lcn = i;
2475 				len += 1;
2476 				continue;
2477 			}
2478 
2479 			if (!len)
2480 				continue;
2481 
2482 			if (trim)
2483 				ntfs_unmap_and_discard(sbi, lcn, len);
2484 
2485 			wnd_set_free(wnd, lcn, len);
2486 			len = 0;
2487 		}
2488 
2489 		if (!len)
2490 			goto out;
2491 	}
2492 
2493 	if (trim)
2494 		ntfs_unmap_and_discard(sbi, lcn, len);
2495 	wnd_set_free(wnd, lcn, len);
2496 
2497 	/* append to MFT zone, if possible. */
2498 	zone_len = wnd_zone_len(wnd);
2499 	zlen = min(zone_len + len, sbi->zone_max);
2500 
2501 	if (zlen == zone_len) {
2502 		/* MFT zone already has maximum size. */
2503 	} else if (!zone_len) {
2504 		/* Create MFT zone only if 'zlen' is large enough. */
2505 		if (zlen == sbi->zone_max)
2506 			wnd_zone_set(wnd, lcn, zlen);
2507 	} else {
2508 		CLST zone_lcn = wnd_zone_bit(wnd);
2509 
2510 		if (lcn + len == zone_lcn) {
2511 			/* Append into head MFT zone. */
2512 			wnd_zone_set(wnd, lcn, zlen);
2513 		} else if (zone_lcn + zone_len == lcn) {
2514 			/* Append into tail MFT zone. */
2515 			wnd_zone_set(wnd, zone_lcn, zlen);
2516 		}
2517 	}
2518 
2519 out:
2520 	up_write(&wnd->rw_lock);
2521 }
2522 
2523 /*
2524  * run_deallocate - Deallocate clusters.
2525  */
2526 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2527 		   bool trim)
2528 {
2529 	CLST lcn, len;
2530 	size_t idx = 0;
2531 
2532 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2533 		if (lcn == SPARSE_LCN)
2534 			continue;
2535 
2536 		mark_as_free_ex(sbi, lcn, len, trim);
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2543 {
2544 	int i, ch;
2545 
2546 	/* check for forbidden chars */
2547 	for (i = 0; i < fname->len; ++i) {
2548 		ch = le16_to_cpu(fname->name[i]);
2549 
2550 		/* control chars */
2551 		if (ch < 0x20)
2552 			return true;
2553 
2554 		switch (ch) {
2555 		/* disallowed by Windows */
2556 		case '\\':
2557 		case '/':
2558 		case ':':
2559 		case '*':
2560 		case '?':
2561 		case '<':
2562 		case '>':
2563 		case '|':
2564 		case '\"':
2565 			return true;
2566 
2567 		default:
2568 			/* allowed char */
2569 			break;
2570 		}
2571 	}
2572 
2573 	/* file names cannot end with space or . */
2574 	if (fname->len > 0) {
2575 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2576 		if (ch == ' ' || ch == '.')
2577 			return true;
2578 	}
2579 
2580 	return false;
2581 }
2582 
2583 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2584 				    const struct le_str *fname)
2585 {
2586 	int port_digit;
2587 	const __le16 *name = fname->name;
2588 	int len = fname->len;
2589 	const u16 *upcase = sbi->upcase;
2590 
2591 	/* check for 3 chars reserved names (device names) */
2592 	/* name by itself or with any extension is forbidden */
2593 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2594 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2595 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2596 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2597 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2598 			return true;
2599 
2600 	/* check for 4 chars reserved names (port name followed by 1..9) */
2601 	/* name by itself or with any extension is forbidden */
2602 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2603 		port_digit = le16_to_cpu(name[3]);
2604 		if (port_digit >= '1' && port_digit <= '9')
2605 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2606 					    false) ||
2607 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2608 					    false))
2609 				return true;
2610 	}
2611 
2612 	return false;
2613 }
2614 
2615 /*
2616  * valid_windows_name - Check if a file name is valid in Windows.
2617  */
2618 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2619 {
2620 	return !name_has_forbidden_chars(fname) &&
2621 	       !is_reserved_name(sbi, fname);
2622 }
2623 
2624 /*
2625  * ntfs_set_label - updates current ntfs label.
2626  */
2627 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2628 {
2629 	int err;
2630 	struct ATTRIB *attr;
2631 	struct ntfs_inode *ni = sbi->volume.ni;
2632 	const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2633 	/* Allocate PATH_MAX bytes. */
2634 	struct cpu_str *uni = __getname();
2635 
2636 	if (!uni)
2637 		return -ENOMEM;
2638 
2639 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2640 				UTF16_LITTLE_ENDIAN);
2641 	if (err < 0)
2642 		goto out;
2643 
2644 	if (uni->len > max_ulen) {
2645 		ntfs_warn(sbi->sb, "new label is too long");
2646 		err = -EFBIG;
2647 		goto out;
2648 	}
2649 
2650 	ni_lock(ni);
2651 
2652 	/* Ignore any errors. */
2653 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2654 
2655 	err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2656 				 0, &attr, NULL, NULL);
2657 	if (err < 0)
2658 		goto unlock_out;
2659 
2660 	/* write new label in on-disk struct. */
2661 	memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2662 
2663 	/* update cached value of current label. */
2664 	if (len >= ARRAY_SIZE(sbi->volume.label))
2665 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2666 	memcpy(sbi->volume.label, label, len);
2667 	sbi->volume.label[len] = 0;
2668 	mark_inode_dirty_sync(&ni->vfs_inode);
2669 
2670 unlock_out:
2671 	ni_unlock(ni);
2672 
2673 	if (!err)
2674 		err = _ni_write_inode(&ni->vfs_inode, 0);
2675 
2676 out:
2677 	__putname(uni);
2678 	return err;
2679 }