xref: /openbmc/linux/fs/ntfs3/fsntfs.c (revision 323b0ab3f235f043c1be616ad495b57169bb4b18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
857 	    unlikely(ntfs3_forced_shutdown(sb)))
858 		return;
859 
860 	blocksize = sb->s_blocksize;
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 
909 	ntfs_inode_err(inode, "%s", hint);
910 	make_bad_inode(inode);
911 	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
912 }
913 
914 /*
915  * ntfs_set_state
916  *
917  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
918  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
919  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
920  */
921 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
922 {
923 	int err;
924 	struct ATTRIB *attr;
925 	struct VOLUME_INFO *info;
926 	struct mft_inode *mi;
927 	struct ntfs_inode *ni;
928 	__le16 info_flags;
929 
930 	/*
931 	 * Do not change state if fs was real_dirty.
932 	 * Do not change state if fs already dirty(clear).
933 	 * Do not change any thing if mounted read only.
934 	 */
935 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
936 		return 0;
937 
938 	/* Check cached value. */
939 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
940 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
941 		return 0;
942 
943 	ni = sbi->volume.ni;
944 	if (!ni)
945 		return -EINVAL;
946 
947 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
948 
949 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
950 	if (!attr) {
951 		err = -EINVAL;
952 		goto out;
953 	}
954 
955 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
956 	if (!info) {
957 		err = -EINVAL;
958 		goto out;
959 	}
960 
961 	info_flags = info->flags;
962 
963 	switch (dirty) {
964 	case NTFS_DIRTY_ERROR:
965 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
966 		sbi->volume.real_dirty = true;
967 		fallthrough;
968 	case NTFS_DIRTY_DIRTY:
969 		info->flags |= VOLUME_FLAG_DIRTY;
970 		break;
971 	case NTFS_DIRTY_CLEAR:
972 		info->flags &= ~VOLUME_FLAG_DIRTY;
973 		break;
974 	}
975 	/* Cache current volume flags. */
976 	if (info_flags != info->flags) {
977 		sbi->volume.flags = info->flags;
978 		mi->dirty = true;
979 	}
980 	err = 0;
981 
982 out:
983 	ni_unlock(ni);
984 	if (err)
985 		return err;
986 
987 	mark_inode_dirty_sync(&ni->vfs_inode);
988 	/* verify(!ntfs_update_mftmirr()); */
989 
990 	/* write mft record on disk. */
991 	err = _ni_write_inode(&ni->vfs_inode, 1);
992 
993 	return err;
994 }
995 
996 /*
997  * security_hash - Calculates a hash of security descriptor.
998  */
999 static inline __le32 security_hash(const void *sd, size_t bytes)
1000 {
1001 	u32 hash = 0;
1002 	const __le32 *ptr = sd;
1003 
1004 	bytes >>= 2;
1005 	while (bytes--)
1006 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1007 	return cpu_to_le32(hash);
1008 }
1009 
1010 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1011 {
1012 	struct block_device *bdev = sb->s_bdev;
1013 	u32 blocksize = sb->s_blocksize;
1014 	u64 block = lbo >> sb->s_blocksize_bits;
1015 	u32 off = lbo & (blocksize - 1);
1016 	u32 op = blocksize - off;
1017 
1018 	for (; bytes; block += 1, off = 0, op = blocksize) {
1019 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1020 
1021 		if (!bh)
1022 			return -EIO;
1023 
1024 		if (op > bytes)
1025 			op = bytes;
1026 
1027 		memcpy(buffer, bh->b_data + off, op);
1028 
1029 		put_bh(bh);
1030 
1031 		bytes -= op;
1032 		buffer = Add2Ptr(buffer, op);
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1039 		  const void *buf, int wait)
1040 {
1041 	u32 blocksize = sb->s_blocksize;
1042 	struct block_device *bdev = sb->s_bdev;
1043 	sector_t block = lbo >> sb->s_blocksize_bits;
1044 	u32 off = lbo & (blocksize - 1);
1045 	u32 op = blocksize - off;
1046 	struct buffer_head *bh;
1047 
1048 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1049 		wait = 1;
1050 
1051 	for (; bytes; block += 1, off = 0, op = blocksize) {
1052 		if (op > bytes)
1053 			op = bytes;
1054 
1055 		if (op < blocksize) {
1056 			bh = __bread(bdev, block, blocksize);
1057 			if (!bh) {
1058 				ntfs_err(sb, "failed to read block %llx",
1059 					 (u64)block);
1060 				return -EIO;
1061 			}
1062 		} else {
1063 			bh = __getblk(bdev, block, blocksize);
1064 			if (!bh)
1065 				return -ENOMEM;
1066 		}
1067 
1068 		if (buffer_locked(bh))
1069 			__wait_on_buffer(bh);
1070 
1071 		lock_buffer(bh);
1072 		if (buf) {
1073 			memcpy(bh->b_data + off, buf, op);
1074 			buf = Add2Ptr(buf, op);
1075 		} else {
1076 			memset(bh->b_data + off, -1, op);
1077 		}
1078 
1079 		set_buffer_uptodate(bh);
1080 		mark_buffer_dirty(bh);
1081 		unlock_buffer(bh);
1082 
1083 		if (wait) {
1084 			int err = sync_dirty_buffer(bh);
1085 
1086 			if (err) {
1087 				ntfs_err(
1088 					sb,
1089 					"failed to sync buffer at block %llx, error %d",
1090 					(u64)block, err);
1091 				put_bh(bh);
1092 				return err;
1093 			}
1094 		}
1095 
1096 		put_bh(bh);
1097 
1098 		bytes -= op;
1099 	}
1100 	return 0;
1101 }
1102 
1103 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1104 		      u64 vbo, const void *buf, size_t bytes, int sync)
1105 {
1106 	struct super_block *sb = sbi->sb;
1107 	u8 cluster_bits = sbi->cluster_bits;
1108 	u32 off = vbo & sbi->cluster_mask;
1109 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1110 	u64 lbo, len;
1111 	size_t idx;
1112 
1113 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1114 		return -ENOENT;
1115 
1116 	if (lcn == SPARSE_LCN)
1117 		return -EINVAL;
1118 
1119 	lbo = ((u64)lcn << cluster_bits) + off;
1120 	len = ((u64)clen << cluster_bits) - off;
1121 
1122 	for (;;) {
1123 		u32 op = min_t(u64, len, bytes);
1124 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1125 
1126 		if (err)
1127 			return err;
1128 
1129 		bytes -= op;
1130 		if (!bytes)
1131 			break;
1132 
1133 		vcn_next = vcn + clen;
1134 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1135 		    vcn != vcn_next)
1136 			return -ENOENT;
1137 
1138 		if (lcn == SPARSE_LCN)
1139 			return -EINVAL;
1140 
1141 		if (buf)
1142 			buf = Add2Ptr(buf, op);
1143 
1144 		lbo = ((u64)lcn << cluster_bits);
1145 		len = ((u64)clen << cluster_bits);
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1152 				   const struct runs_tree *run, u64 vbo)
1153 {
1154 	struct super_block *sb = sbi->sb;
1155 	u8 cluster_bits = sbi->cluster_bits;
1156 	CLST lcn;
1157 	u64 lbo;
1158 
1159 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1160 		return ERR_PTR(-ENOENT);
1161 
1162 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1163 
1164 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1165 }
1166 
1167 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1168 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1169 {
1170 	int err;
1171 	struct super_block *sb = sbi->sb;
1172 	u32 blocksize = sb->s_blocksize;
1173 	u8 cluster_bits = sbi->cluster_bits;
1174 	u32 off = vbo & sbi->cluster_mask;
1175 	u32 nbh = 0;
1176 	CLST vcn_next, vcn = vbo >> cluster_bits;
1177 	CLST lcn, clen;
1178 	u64 lbo, len;
1179 	size_t idx;
1180 	struct buffer_head *bh;
1181 
1182 	if (!run) {
1183 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1184 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1185 			err = -ENOENT;
1186 			goto out;
1187 		}
1188 
1189 		/* Use absolute boot's 'MFTCluster' to read record. */
1190 		lbo = vbo + sbi->mft.lbo;
1191 		len = sbi->record_size;
1192 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1193 		err = -ENOENT;
1194 		goto out;
1195 	} else {
1196 		if (lcn == SPARSE_LCN) {
1197 			err = -EINVAL;
1198 			goto out;
1199 		}
1200 
1201 		lbo = ((u64)lcn << cluster_bits) + off;
1202 		len = ((u64)clen << cluster_bits) - off;
1203 	}
1204 
1205 	off = lbo & (blocksize - 1);
1206 	if (nb) {
1207 		nb->off = off;
1208 		nb->bytes = bytes;
1209 	}
1210 
1211 	for (;;) {
1212 		u32 len32 = len >= bytes ? bytes : len;
1213 		sector_t block = lbo >> sb->s_blocksize_bits;
1214 
1215 		do {
1216 			u32 op = blocksize - off;
1217 
1218 			if (op > len32)
1219 				op = len32;
1220 
1221 			bh = ntfs_bread(sb, block);
1222 			if (!bh) {
1223 				err = -EIO;
1224 				goto out;
1225 			}
1226 
1227 			if (buf) {
1228 				memcpy(buf, bh->b_data + off, op);
1229 				buf = Add2Ptr(buf, op);
1230 			}
1231 
1232 			if (!nb) {
1233 				put_bh(bh);
1234 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1235 				err = -EINVAL;
1236 				goto out;
1237 			} else {
1238 				nb->bh[nbh++] = bh;
1239 				nb->nbufs = nbh;
1240 			}
1241 
1242 			bytes -= op;
1243 			if (!bytes)
1244 				return 0;
1245 			len32 -= op;
1246 			block += 1;
1247 			off = 0;
1248 
1249 		} while (len32);
1250 
1251 		vcn_next = vcn + clen;
1252 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1253 		    vcn != vcn_next) {
1254 			err = -ENOENT;
1255 			goto out;
1256 		}
1257 
1258 		if (lcn == SPARSE_LCN) {
1259 			err = -EINVAL;
1260 			goto out;
1261 		}
1262 
1263 		lbo = ((u64)lcn << cluster_bits);
1264 		len = ((u64)clen << cluster_bits);
1265 	}
1266 
1267 out:
1268 	if (!nbh)
1269 		return err;
1270 
1271 	while (nbh) {
1272 		put_bh(nb->bh[--nbh]);
1273 		nb->bh[nbh] = NULL;
1274 	}
1275 
1276 	nb->nbufs = 0;
1277 	return err;
1278 }
1279 
1280 /*
1281  * ntfs_read_bh
1282  *
1283  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1284  */
1285 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1286 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1287 		 struct ntfs_buffers *nb)
1288 {
1289 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1290 
1291 	if (err)
1292 		return err;
1293 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1294 }
1295 
1296 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1297 		u32 bytes, struct ntfs_buffers *nb)
1298 {
1299 	int err = 0;
1300 	struct super_block *sb = sbi->sb;
1301 	u32 blocksize = sb->s_blocksize;
1302 	u8 cluster_bits = sbi->cluster_bits;
1303 	CLST vcn_next, vcn = vbo >> cluster_bits;
1304 	u32 off;
1305 	u32 nbh = 0;
1306 	CLST lcn, clen;
1307 	u64 lbo, len;
1308 	size_t idx;
1309 
1310 	nb->bytes = bytes;
1311 
1312 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1313 		err = -ENOENT;
1314 		goto out;
1315 	}
1316 
1317 	off = vbo & sbi->cluster_mask;
1318 	lbo = ((u64)lcn << cluster_bits) + off;
1319 	len = ((u64)clen << cluster_bits) - off;
1320 
1321 	nb->off = off = lbo & (blocksize - 1);
1322 
1323 	for (;;) {
1324 		u32 len32 = min_t(u64, len, bytes);
1325 		sector_t block = lbo >> sb->s_blocksize_bits;
1326 
1327 		do {
1328 			u32 op;
1329 			struct buffer_head *bh;
1330 
1331 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1332 				err = -EINVAL;
1333 				goto out;
1334 			}
1335 
1336 			op = blocksize - off;
1337 			if (op > len32)
1338 				op = len32;
1339 
1340 			if (op == blocksize) {
1341 				bh = sb_getblk(sb, block);
1342 				if (!bh) {
1343 					err = -ENOMEM;
1344 					goto out;
1345 				}
1346 				if (buffer_locked(bh))
1347 					__wait_on_buffer(bh);
1348 				set_buffer_uptodate(bh);
1349 			} else {
1350 				bh = ntfs_bread(sb, block);
1351 				if (!bh) {
1352 					err = -EIO;
1353 					goto out;
1354 				}
1355 			}
1356 
1357 			nb->bh[nbh++] = bh;
1358 			bytes -= op;
1359 			if (!bytes) {
1360 				nb->nbufs = nbh;
1361 				return 0;
1362 			}
1363 
1364 			block += 1;
1365 			len32 -= op;
1366 			off = 0;
1367 		} while (len32);
1368 
1369 		vcn_next = vcn + clen;
1370 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1371 		    vcn != vcn_next) {
1372 			err = -ENOENT;
1373 			goto out;
1374 		}
1375 
1376 		lbo = ((u64)lcn << cluster_bits);
1377 		len = ((u64)clen << cluster_bits);
1378 	}
1379 
1380 out:
1381 	while (nbh) {
1382 		put_bh(nb->bh[--nbh]);
1383 		nb->bh[nbh] = NULL;
1384 	}
1385 
1386 	nb->nbufs = 0;
1387 
1388 	return err;
1389 }
1390 
1391 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1392 		  struct ntfs_buffers *nb, int sync)
1393 {
1394 	int err = 0;
1395 	struct super_block *sb = sbi->sb;
1396 	u32 block_size = sb->s_blocksize;
1397 	u32 bytes = nb->bytes;
1398 	u32 off = nb->off;
1399 	u16 fo = le16_to_cpu(rhdr->fix_off);
1400 	u16 fn = le16_to_cpu(rhdr->fix_num);
1401 	u32 idx;
1402 	__le16 *fixup;
1403 	__le16 sample;
1404 
1405 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1406 	    fn * SECTOR_SIZE > bytes) {
1407 		return -EINVAL;
1408 	}
1409 
1410 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1411 		u32 op = block_size - off;
1412 		char *bh_data;
1413 		struct buffer_head *bh = nb->bh[idx];
1414 		__le16 *ptr, *end_data;
1415 
1416 		if (op > bytes)
1417 			op = bytes;
1418 
1419 		if (buffer_locked(bh))
1420 			__wait_on_buffer(bh);
1421 
1422 		lock_buffer(bh);
1423 
1424 		bh_data = bh->b_data + off;
1425 		end_data = Add2Ptr(bh_data, op);
1426 		memcpy(bh_data, rhdr, op);
1427 
1428 		if (!idx) {
1429 			u16 t16;
1430 
1431 			fixup = Add2Ptr(bh_data, fo);
1432 			sample = *fixup;
1433 			t16 = le16_to_cpu(sample);
1434 			if (t16 >= 0x7FFF) {
1435 				sample = *fixup = cpu_to_le16(1);
1436 			} else {
1437 				sample = cpu_to_le16(t16 + 1);
1438 				*fixup = sample;
1439 			}
1440 
1441 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1442 		}
1443 
1444 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1445 
1446 		do {
1447 			*++fixup = *ptr;
1448 			*ptr = sample;
1449 			ptr += SECTOR_SIZE / sizeof(short);
1450 		} while (ptr < end_data);
1451 
1452 		set_buffer_uptodate(bh);
1453 		mark_buffer_dirty(bh);
1454 		unlock_buffer(bh);
1455 
1456 		if (sync) {
1457 			int err2 = sync_dirty_buffer(bh);
1458 
1459 			if (!err && err2)
1460 				err = err2;
1461 		}
1462 
1463 		bytes -= op;
1464 		rhdr = Add2Ptr(rhdr, op);
1465 	}
1466 
1467 	return err;
1468 }
1469 
1470 /*
1471  * ntfs_bio_pages - Read/write pages from/to disk.
1472  */
1473 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1474 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1475 		   enum req_op op)
1476 {
1477 	int err = 0;
1478 	struct bio *new, *bio = NULL;
1479 	struct super_block *sb = sbi->sb;
1480 	struct block_device *bdev = sb->s_bdev;
1481 	struct page *page;
1482 	u8 cluster_bits = sbi->cluster_bits;
1483 	CLST lcn, clen, vcn, vcn_next;
1484 	u32 add, off, page_idx;
1485 	u64 lbo, len;
1486 	size_t run_idx;
1487 	struct blk_plug plug;
1488 
1489 	if (!bytes)
1490 		return 0;
1491 
1492 	blk_start_plug(&plug);
1493 
1494 	/* Align vbo and bytes to be 512 bytes aligned. */
1495 	lbo = (vbo + bytes + 511) & ~511ull;
1496 	vbo = vbo & ~511ull;
1497 	bytes = lbo - vbo;
1498 
1499 	vcn = vbo >> cluster_bits;
1500 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1501 		err = -ENOENT;
1502 		goto out;
1503 	}
1504 	off = vbo & sbi->cluster_mask;
1505 	page_idx = 0;
1506 	page = pages[0];
1507 
1508 	for (;;) {
1509 		lbo = ((u64)lcn << cluster_bits) + off;
1510 		len = ((u64)clen << cluster_bits) - off;
1511 new_bio:
1512 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1513 		if (bio) {
1514 			bio_chain(bio, new);
1515 			submit_bio(bio);
1516 		}
1517 		bio = new;
1518 		bio->bi_iter.bi_sector = lbo >> 9;
1519 
1520 		while (len) {
1521 			off = vbo & (PAGE_SIZE - 1);
1522 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1523 
1524 			if (bio_add_page(bio, page, add, off) < add)
1525 				goto new_bio;
1526 
1527 			if (bytes <= add)
1528 				goto out;
1529 			bytes -= add;
1530 			vbo += add;
1531 
1532 			if (add + off == PAGE_SIZE) {
1533 				page_idx += 1;
1534 				if (WARN_ON(page_idx >= nr_pages)) {
1535 					err = -EINVAL;
1536 					goto out;
1537 				}
1538 				page = pages[page_idx];
1539 			}
1540 
1541 			if (len <= add)
1542 				break;
1543 			len -= add;
1544 			lbo += add;
1545 		}
1546 
1547 		vcn_next = vcn + clen;
1548 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1549 		    vcn != vcn_next) {
1550 			err = -ENOENT;
1551 			goto out;
1552 		}
1553 		off = 0;
1554 	}
1555 out:
1556 	if (bio) {
1557 		if (!err)
1558 			err = submit_bio_wait(bio);
1559 		bio_put(bio);
1560 	}
1561 	blk_finish_plug(&plug);
1562 
1563 	return err;
1564 }
1565 
1566 /*
1567  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1568  *
1569  * Fill on-disk logfile range by (-1)
1570  * this means empty logfile.
1571  */
1572 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1573 {
1574 	int err = 0;
1575 	struct super_block *sb = sbi->sb;
1576 	struct block_device *bdev = sb->s_bdev;
1577 	u8 cluster_bits = sbi->cluster_bits;
1578 	struct bio *new, *bio = NULL;
1579 	CLST lcn, clen;
1580 	u64 lbo, len;
1581 	size_t run_idx;
1582 	struct page *fill;
1583 	void *kaddr;
1584 	struct blk_plug plug;
1585 
1586 	fill = alloc_page(GFP_KERNEL);
1587 	if (!fill)
1588 		return -ENOMEM;
1589 
1590 	kaddr = kmap_atomic(fill);
1591 	memset(kaddr, -1, PAGE_SIZE);
1592 	kunmap_atomic(kaddr);
1593 	flush_dcache_page(fill);
1594 	lock_page(fill);
1595 
1596 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1597 		err = -ENOENT;
1598 		goto out;
1599 	}
1600 
1601 	/*
1602 	 * TODO: Try blkdev_issue_write_same.
1603 	 */
1604 	blk_start_plug(&plug);
1605 	do {
1606 		lbo = (u64)lcn << cluster_bits;
1607 		len = (u64)clen << cluster_bits;
1608 new_bio:
1609 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1610 		if (bio) {
1611 			bio_chain(bio, new);
1612 			submit_bio(bio);
1613 		}
1614 		bio = new;
1615 		bio->bi_iter.bi_sector = lbo >> 9;
1616 
1617 		for (;;) {
1618 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1619 
1620 			if (bio_add_page(bio, fill, add, 0) < add)
1621 				goto new_bio;
1622 
1623 			lbo += add;
1624 			if (len <= add)
1625 				break;
1626 			len -= add;
1627 		}
1628 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1629 
1630 	if (!err)
1631 		err = submit_bio_wait(bio);
1632 	bio_put(bio);
1633 
1634 	blk_finish_plug(&plug);
1635 out:
1636 	unlock_page(fill);
1637 	put_page(fill);
1638 
1639 	return err;
1640 }
1641 
1642 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1643 		    u64 vbo, u64 *lbo, u64 *bytes)
1644 {
1645 	u32 off;
1646 	CLST lcn, len;
1647 	u8 cluster_bits = sbi->cluster_bits;
1648 
1649 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1650 		return -ENOENT;
1651 
1652 	off = vbo & sbi->cluster_mask;
1653 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1654 	*bytes = ((u64)len << cluster_bits) - off;
1655 
1656 	return 0;
1657 }
1658 
1659 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1660 				  enum RECORD_FLAG flag)
1661 {
1662 	int err = 0;
1663 	struct super_block *sb = sbi->sb;
1664 	struct inode *inode = new_inode(sb);
1665 	struct ntfs_inode *ni;
1666 
1667 	if (!inode)
1668 		return ERR_PTR(-ENOMEM);
1669 
1670 	ni = ntfs_i(inode);
1671 
1672 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1673 	if (err)
1674 		goto out;
1675 
1676 	inode->i_ino = rno;
1677 	if (insert_inode_locked(inode) < 0) {
1678 		err = -EIO;
1679 		goto out;
1680 	}
1681 
1682 out:
1683 	if (err) {
1684 		make_bad_inode(inode);
1685 		iput(inode);
1686 		ni = ERR_PTR(err);
1687 	}
1688 	return ni;
1689 }
1690 
1691 /*
1692  * O:BAG:BAD:(A;OICI;FA;;;WD)
1693  * Owner S-1-5-32-544 (Administrators)
1694  * Group S-1-5-32-544 (Administrators)
1695  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1696  */
1697 const u8 s_default_security[] __aligned(8) = {
1698 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1699 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1700 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1701 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1702 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1703 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1704 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1705 };
1706 
1707 static_assert(sizeof(s_default_security) == 0x50);
1708 
1709 static inline u32 sid_length(const struct SID *sid)
1710 {
1711 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1712 }
1713 
1714 /*
1715  * is_acl_valid
1716  *
1717  * Thanks Mark Harmstone for idea.
1718  */
1719 static bool is_acl_valid(const struct ACL *acl, u32 len)
1720 {
1721 	const struct ACE_HEADER *ace;
1722 	u32 i;
1723 	u16 ace_count, ace_size;
1724 
1725 	if (acl->AclRevision != ACL_REVISION &&
1726 	    acl->AclRevision != ACL_REVISION_DS) {
1727 		/*
1728 		 * This value should be ACL_REVISION, unless the ACL contains an
1729 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1730 		 * All ACEs in an ACL must be at the same revision level.
1731 		 */
1732 		return false;
1733 	}
1734 
1735 	if (acl->Sbz1)
1736 		return false;
1737 
1738 	if (le16_to_cpu(acl->AclSize) > len)
1739 		return false;
1740 
1741 	if (acl->Sbz2)
1742 		return false;
1743 
1744 	len -= sizeof(struct ACL);
1745 	ace = (struct ACE_HEADER *)&acl[1];
1746 	ace_count = le16_to_cpu(acl->AceCount);
1747 
1748 	for (i = 0; i < ace_count; i++) {
1749 		if (len < sizeof(struct ACE_HEADER))
1750 			return false;
1751 
1752 		ace_size = le16_to_cpu(ace->AceSize);
1753 		if (len < ace_size)
1754 			return false;
1755 
1756 		len -= ace_size;
1757 		ace = Add2Ptr(ace, ace_size);
1758 	}
1759 
1760 	return true;
1761 }
1762 
1763 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1764 {
1765 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1766 
1767 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1768 		return false;
1769 
1770 	if (sd->Revision != 1)
1771 		return false;
1772 
1773 	if (sd->Sbz1)
1774 		return false;
1775 
1776 	if (!(sd->Control & SE_SELF_RELATIVE))
1777 		return false;
1778 
1779 	sd_owner = le32_to_cpu(sd->Owner);
1780 	if (sd_owner) {
1781 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1782 
1783 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1784 			return false;
1785 
1786 		if (owner->Revision != 1)
1787 			return false;
1788 
1789 		if (sd_owner + sid_length(owner) > len)
1790 			return false;
1791 	}
1792 
1793 	sd_group = le32_to_cpu(sd->Group);
1794 	if (sd_group) {
1795 		const struct SID *group = Add2Ptr(sd, sd_group);
1796 
1797 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1798 			return false;
1799 
1800 		if (group->Revision != 1)
1801 			return false;
1802 
1803 		if (sd_group + sid_length(group) > len)
1804 			return false;
1805 	}
1806 
1807 	sd_sacl = le32_to_cpu(sd->Sacl);
1808 	if (sd_sacl) {
1809 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1810 
1811 		if (sd_sacl + sizeof(struct ACL) > len)
1812 			return false;
1813 
1814 		if (!is_acl_valid(sacl, len - sd_sacl))
1815 			return false;
1816 	}
1817 
1818 	sd_dacl = le32_to_cpu(sd->Dacl);
1819 	if (sd_dacl) {
1820 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1821 
1822 		if (sd_dacl + sizeof(struct ACL) > len)
1823 			return false;
1824 
1825 		if (!is_acl_valid(dacl, len - sd_dacl))
1826 			return false;
1827 	}
1828 
1829 	return true;
1830 }
1831 
1832 /*
1833  * ntfs_security_init - Load and parse $Secure.
1834  */
1835 int ntfs_security_init(struct ntfs_sb_info *sbi)
1836 {
1837 	int err;
1838 	struct super_block *sb = sbi->sb;
1839 	struct inode *inode;
1840 	struct ntfs_inode *ni;
1841 	struct MFT_REF ref;
1842 	struct ATTRIB *attr;
1843 	struct ATTR_LIST_ENTRY *le;
1844 	u64 sds_size;
1845 	size_t off;
1846 	struct NTFS_DE *ne;
1847 	struct NTFS_DE_SII *sii_e;
1848 	struct ntfs_fnd *fnd_sii = NULL;
1849 	const struct INDEX_ROOT *root_sii;
1850 	const struct INDEX_ROOT *root_sdh;
1851 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1852 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1853 
1854 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1855 	ref.high = 0;
1856 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1857 
1858 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1859 	if (IS_ERR(inode)) {
1860 		err = PTR_ERR(inode);
1861 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1862 		inode = NULL;
1863 		goto out;
1864 	}
1865 
1866 	ni = ntfs_i(inode);
1867 
1868 	le = NULL;
1869 
1870 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1871 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1872 	if (!attr ||
1873 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1874 	    root_sdh->type != ATTR_ZERO ||
1875 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1876 	    offsetof(struct INDEX_ROOT, ihdr) +
1877 			    le32_to_cpu(root_sdh->ihdr.used) >
1878 		    le32_to_cpu(attr->res.data_size)) {
1879 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1880 		err = -EINVAL;
1881 		goto out;
1882 	}
1883 
1884 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1885 	if (err) {
1886 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1887 		goto out;
1888 	}
1889 
1890 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1891 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1892 	if (!attr ||
1893 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1894 	    root_sii->type != ATTR_ZERO ||
1895 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1896 	    offsetof(struct INDEX_ROOT, ihdr) +
1897 			    le32_to_cpu(root_sii->ihdr.used) >
1898 		    le32_to_cpu(attr->res.data_size)) {
1899 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1900 		err = -EINVAL;
1901 		goto out;
1902 	}
1903 
1904 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1905 	if (err) {
1906 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1907 		goto out;
1908 	}
1909 
1910 	fnd_sii = fnd_get();
1911 	if (!fnd_sii) {
1912 		err = -ENOMEM;
1913 		goto out;
1914 	}
1915 
1916 	sds_size = inode->i_size;
1917 
1918 	/* Find the last valid Id. */
1919 	sbi->security.next_id = SECURITY_ID_FIRST;
1920 	/* Always write new security at the end of bucket. */
1921 	sbi->security.next_off =
1922 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1923 
1924 	off = 0;
1925 	ne = NULL;
1926 
1927 	for (;;) {
1928 		u32 next_id;
1929 
1930 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1931 		if (err || !ne)
1932 			break;
1933 
1934 		sii_e = (struct NTFS_DE_SII *)ne;
1935 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1936 			continue;
1937 
1938 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1939 		if (next_id >= sbi->security.next_id)
1940 			sbi->security.next_id = next_id;
1941 	}
1942 
1943 	sbi->security.ni = ni;
1944 	inode = NULL;
1945 out:
1946 	iput(inode);
1947 	fnd_put(fnd_sii);
1948 
1949 	return err;
1950 }
1951 
1952 /*
1953  * ntfs_get_security_by_id - Read security descriptor by id.
1954  */
1955 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1956 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1957 			    size_t *size)
1958 {
1959 	int err;
1960 	int diff;
1961 	struct ntfs_inode *ni = sbi->security.ni;
1962 	struct ntfs_index *indx = &sbi->security.index_sii;
1963 	void *p = NULL;
1964 	struct NTFS_DE_SII *sii_e;
1965 	struct ntfs_fnd *fnd_sii;
1966 	struct SECURITY_HDR d_security;
1967 	const struct INDEX_ROOT *root_sii;
1968 	u32 t32;
1969 
1970 	*sd = NULL;
1971 
1972 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1973 
1974 	fnd_sii = fnd_get();
1975 	if (!fnd_sii) {
1976 		err = -ENOMEM;
1977 		goto out;
1978 	}
1979 
1980 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1981 	if (!root_sii) {
1982 		err = -EINVAL;
1983 		goto out;
1984 	}
1985 
1986 	/* Try to find this SECURITY descriptor in SII indexes. */
1987 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1988 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1989 	if (err)
1990 		goto out;
1991 
1992 	if (diff)
1993 		goto out;
1994 
1995 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
1996 	if (t32 < sizeof(struct SECURITY_HDR)) {
1997 		err = -EINVAL;
1998 		goto out;
1999 	}
2000 
2001 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2002 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2003 		err = -EFBIG;
2004 		goto out;
2005 	}
2006 
2007 	*size = t32 - sizeof(struct SECURITY_HDR);
2008 
2009 	p = kmalloc(*size, GFP_NOFS);
2010 	if (!p) {
2011 		err = -ENOMEM;
2012 		goto out;
2013 	}
2014 
2015 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2016 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2017 			       sizeof(d_security), NULL);
2018 	if (err)
2019 		goto out;
2020 
2021 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2022 		err = -EINVAL;
2023 		goto out;
2024 	}
2025 
2026 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2027 			       le64_to_cpu(sii_e->sec_hdr.off) +
2028 				       sizeof(struct SECURITY_HDR),
2029 			       p, *size, NULL);
2030 	if (err)
2031 		goto out;
2032 
2033 	*sd = p;
2034 	p = NULL;
2035 
2036 out:
2037 	kfree(p);
2038 	fnd_put(fnd_sii);
2039 	ni_unlock(ni);
2040 
2041 	return err;
2042 }
2043 
2044 /*
2045  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2046  *
2047  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2048  * and it contains a mirror copy of each security descriptor.  When writing
2049  * to a security descriptor at location X, another copy will be written at
2050  * location (X+256K).
2051  * When writing a security descriptor that will cross the 256K boundary,
2052  * the pointer will be advanced by 256K to skip
2053  * over the mirror portion.
2054  */
2055 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2056 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2057 			 u32 size_sd, __le32 *security_id, bool *inserted)
2058 {
2059 	int err, diff;
2060 	struct ntfs_inode *ni = sbi->security.ni;
2061 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2062 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2063 	struct NTFS_DE_SDH *e;
2064 	struct NTFS_DE_SDH sdh_e;
2065 	struct NTFS_DE_SII sii_e;
2066 	struct SECURITY_HDR *d_security;
2067 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2068 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2069 	struct SECURITY_KEY hash_key;
2070 	struct ntfs_fnd *fnd_sdh = NULL;
2071 	const struct INDEX_ROOT *root_sdh;
2072 	const struct INDEX_ROOT *root_sii;
2073 	u64 mirr_off, new_sds_size;
2074 	u32 next, left;
2075 
2076 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2077 		      SecurityDescriptorsBlockSize);
2078 
2079 	hash_key.hash = security_hash(sd, size_sd);
2080 	hash_key.sec_id = SECURITY_ID_INVALID;
2081 
2082 	if (inserted)
2083 		*inserted = false;
2084 	*security_id = SECURITY_ID_INVALID;
2085 
2086 	/* Allocate a temporal buffer. */
2087 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2088 	if (!d_security)
2089 		return -ENOMEM;
2090 
2091 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2092 
2093 	fnd_sdh = fnd_get();
2094 	if (!fnd_sdh) {
2095 		err = -ENOMEM;
2096 		goto out;
2097 	}
2098 
2099 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2100 	if (!root_sdh) {
2101 		err = -EINVAL;
2102 		goto out;
2103 	}
2104 
2105 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2106 	if (!root_sii) {
2107 		err = -EINVAL;
2108 		goto out;
2109 	}
2110 
2111 	/*
2112 	 * Check if such security already exists.
2113 	 * Use "SDH" and hash -> to get the offset in "SDS".
2114 	 */
2115 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2116 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2117 			fnd_sdh);
2118 	if (err)
2119 		goto out;
2120 
2121 	while (e) {
2122 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2123 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2124 					       le64_to_cpu(e->sec_hdr.off),
2125 					       d_security, new_sec_size, NULL);
2126 			if (err)
2127 				goto out;
2128 
2129 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2130 			    d_security->key.hash == hash_key.hash &&
2131 			    !memcmp(d_security + 1, sd, size_sd)) {
2132 				*security_id = d_security->key.sec_id;
2133 				/* Such security already exists. */
2134 				err = 0;
2135 				goto out;
2136 			}
2137 		}
2138 
2139 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2140 				     (struct NTFS_DE **)&e, fnd_sdh);
2141 		if (err)
2142 			goto out;
2143 
2144 		if (!e || e->key.hash != hash_key.hash)
2145 			break;
2146 	}
2147 
2148 	/* Zero unused space. */
2149 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2150 	left = SecurityDescriptorsBlockSize - next;
2151 
2152 	/* Zero gap until SecurityDescriptorsBlockSize. */
2153 	if (left < new_sec_size) {
2154 		/* Zero "left" bytes from sbi->security.next_off. */
2155 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2156 	}
2157 
2158 	/* Zero tail of previous security. */
2159 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2160 
2161 	/*
2162 	 * Example:
2163 	 * 0x40438 == ni->vfs_inode.i_size
2164 	 * 0x00440 == sbi->security.next_off
2165 	 * need to zero [0x438-0x440)
2166 	 * if (next > used) {
2167 	 *  u32 tozero = next - used;
2168 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2169 	 */
2170 
2171 	/* Format new security descriptor. */
2172 	d_security->key.hash = hash_key.hash;
2173 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2174 	d_security->off = cpu_to_le64(sbi->security.next_off);
2175 	d_security->size = cpu_to_le32(new_sec_size);
2176 	memcpy(d_security + 1, sd, size_sd);
2177 
2178 	/* Write main SDS bucket. */
2179 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2180 				d_security, aligned_sec_size, 0);
2181 
2182 	if (err)
2183 		goto out;
2184 
2185 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2186 	new_sds_size = mirr_off + aligned_sec_size;
2187 
2188 	if (new_sds_size > ni->vfs_inode.i_size) {
2189 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2190 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2191 				    new_sds_size, &new_sds_size, false, NULL);
2192 		if (err)
2193 			goto out;
2194 	}
2195 
2196 	/* Write copy SDS bucket. */
2197 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2198 				aligned_sec_size, 0);
2199 	if (err)
2200 		goto out;
2201 
2202 	/* Fill SII entry. */
2203 	sii_e.de.view.data_off =
2204 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2205 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2206 	sii_e.de.view.res = 0;
2207 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2208 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2209 	sii_e.de.flags = 0;
2210 	sii_e.de.res = 0;
2211 	sii_e.sec_id = d_security->key.sec_id;
2212 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2213 
2214 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2215 	if (err)
2216 		goto out;
2217 
2218 	/* Fill SDH entry. */
2219 	sdh_e.de.view.data_off =
2220 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2221 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2222 	sdh_e.de.view.res = 0;
2223 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2224 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2225 	sdh_e.de.flags = 0;
2226 	sdh_e.de.res = 0;
2227 	sdh_e.key.hash = d_security->key.hash;
2228 	sdh_e.key.sec_id = d_security->key.sec_id;
2229 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2230 	sdh_e.magic[0] = cpu_to_le16('I');
2231 	sdh_e.magic[1] = cpu_to_le16('I');
2232 
2233 	fnd_clear(fnd_sdh);
2234 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2235 				fnd_sdh, 0);
2236 	if (err)
2237 		goto out;
2238 
2239 	*security_id = d_security->key.sec_id;
2240 	if (inserted)
2241 		*inserted = true;
2242 
2243 	/* Update Id and offset for next descriptor. */
2244 	sbi->security.next_id += 1;
2245 	sbi->security.next_off += aligned_sec_size;
2246 
2247 out:
2248 	fnd_put(fnd_sdh);
2249 	mark_inode_dirty(&ni->vfs_inode);
2250 	ni_unlock(ni);
2251 	kfree(d_security);
2252 
2253 	return err;
2254 }
2255 
2256 /*
2257  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2258  */
2259 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2260 {
2261 	int err;
2262 	struct ntfs_inode *ni = sbi->reparse.ni;
2263 	struct ntfs_index *indx = &sbi->reparse.index_r;
2264 	struct ATTRIB *attr;
2265 	struct ATTR_LIST_ENTRY *le;
2266 	const struct INDEX_ROOT *root_r;
2267 
2268 	if (!ni)
2269 		return 0;
2270 
2271 	le = NULL;
2272 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2273 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2274 	if (!attr) {
2275 		err = -EINVAL;
2276 		goto out;
2277 	}
2278 
2279 	root_r = resident_data(attr);
2280 	if (root_r->type != ATTR_ZERO ||
2281 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2282 		err = -EINVAL;
2283 		goto out;
2284 	}
2285 
2286 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2287 	if (err)
2288 		goto out;
2289 
2290 out:
2291 	return err;
2292 }
2293 
2294 /*
2295  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2296  */
2297 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2298 {
2299 	int err;
2300 	struct ntfs_inode *ni = sbi->objid.ni;
2301 	struct ntfs_index *indx = &sbi->objid.index_o;
2302 	struct ATTRIB *attr;
2303 	struct ATTR_LIST_ENTRY *le;
2304 	const struct INDEX_ROOT *root;
2305 
2306 	if (!ni)
2307 		return 0;
2308 
2309 	le = NULL;
2310 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2311 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2312 	if (!attr) {
2313 		err = -EINVAL;
2314 		goto out;
2315 	}
2316 
2317 	root = resident_data(attr);
2318 	if (root->type != ATTR_ZERO ||
2319 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2320 		err = -EINVAL;
2321 		goto out;
2322 	}
2323 
2324 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2325 	if (err)
2326 		goto out;
2327 
2328 out:
2329 	return err;
2330 }
2331 
2332 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2333 {
2334 	int err;
2335 	struct ntfs_inode *ni = sbi->objid.ni;
2336 	struct ntfs_index *indx = &sbi->objid.index_o;
2337 
2338 	if (!ni)
2339 		return -EINVAL;
2340 
2341 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2342 
2343 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2344 
2345 	mark_inode_dirty(&ni->vfs_inode);
2346 	ni_unlock(ni);
2347 
2348 	return err;
2349 }
2350 
2351 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2352 			const struct MFT_REF *ref)
2353 {
2354 	int err;
2355 	struct ntfs_inode *ni = sbi->reparse.ni;
2356 	struct ntfs_index *indx = &sbi->reparse.index_r;
2357 	struct NTFS_DE_R re;
2358 
2359 	if (!ni)
2360 		return -EINVAL;
2361 
2362 	memset(&re, 0, sizeof(re));
2363 
2364 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2365 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2366 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2367 
2368 	re.key.ReparseTag = rtag;
2369 	memcpy(&re.key.ref, ref, sizeof(*ref));
2370 
2371 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2372 
2373 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2374 
2375 	mark_inode_dirty(&ni->vfs_inode);
2376 	ni_unlock(ni);
2377 
2378 	return err;
2379 }
2380 
2381 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2382 			const struct MFT_REF *ref)
2383 {
2384 	int err, diff;
2385 	struct ntfs_inode *ni = sbi->reparse.ni;
2386 	struct ntfs_index *indx = &sbi->reparse.index_r;
2387 	struct ntfs_fnd *fnd = NULL;
2388 	struct REPARSE_KEY rkey;
2389 	struct NTFS_DE_R *re;
2390 	struct INDEX_ROOT *root_r;
2391 
2392 	if (!ni)
2393 		return -EINVAL;
2394 
2395 	rkey.ReparseTag = rtag;
2396 	rkey.ref = *ref;
2397 
2398 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2399 
2400 	if (rtag) {
2401 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2402 		goto out1;
2403 	}
2404 
2405 	fnd = fnd_get();
2406 	if (!fnd) {
2407 		err = -ENOMEM;
2408 		goto out1;
2409 	}
2410 
2411 	root_r = indx_get_root(indx, ni, NULL, NULL);
2412 	if (!root_r) {
2413 		err = -EINVAL;
2414 		goto out;
2415 	}
2416 
2417 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2418 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2419 			(struct NTFS_DE **)&re, fnd);
2420 	if (err)
2421 		goto out;
2422 
2423 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2424 		/* Impossible. Looks like volume corrupt? */
2425 		goto out;
2426 	}
2427 
2428 	memcpy(&rkey, &re->key, sizeof(rkey));
2429 
2430 	fnd_put(fnd);
2431 	fnd = NULL;
2432 
2433 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2434 	if (err)
2435 		goto out;
2436 
2437 out:
2438 	fnd_put(fnd);
2439 
2440 out1:
2441 	mark_inode_dirty(&ni->vfs_inode);
2442 	ni_unlock(ni);
2443 
2444 	return err;
2445 }
2446 
2447 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2448 					  CLST len)
2449 {
2450 	ntfs_unmap_meta(sbi->sb, lcn, len);
2451 	ntfs_discard(sbi, lcn, len);
2452 }
2453 
2454 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2455 {
2456 	CLST end, i, zone_len, zlen;
2457 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2458 	bool dirty = false;
2459 
2460 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2461 	if (!wnd_is_used(wnd, lcn, len)) {
2462 		/* mark volume as dirty out of wnd->rw_lock */
2463 		dirty = true;
2464 
2465 		end = lcn + len;
2466 		len = 0;
2467 		for (i = lcn; i < end; i++) {
2468 			if (wnd_is_used(wnd, i, 1)) {
2469 				if (!len)
2470 					lcn = i;
2471 				len += 1;
2472 				continue;
2473 			}
2474 
2475 			if (!len)
2476 				continue;
2477 
2478 			if (trim)
2479 				ntfs_unmap_and_discard(sbi, lcn, len);
2480 
2481 			wnd_set_free(wnd, lcn, len);
2482 			len = 0;
2483 		}
2484 
2485 		if (!len)
2486 			goto out;
2487 	}
2488 
2489 	if (trim)
2490 		ntfs_unmap_and_discard(sbi, lcn, len);
2491 	wnd_set_free(wnd, lcn, len);
2492 
2493 	/* append to MFT zone, if possible. */
2494 	zone_len = wnd_zone_len(wnd);
2495 	zlen = min(zone_len + len, sbi->zone_max);
2496 
2497 	if (zlen == zone_len) {
2498 		/* MFT zone already has maximum size. */
2499 	} else if (!zone_len) {
2500 		/* Create MFT zone only if 'zlen' is large enough. */
2501 		if (zlen == sbi->zone_max)
2502 			wnd_zone_set(wnd, lcn, zlen);
2503 	} else {
2504 		CLST zone_lcn = wnd_zone_bit(wnd);
2505 
2506 		if (lcn + len == zone_lcn) {
2507 			/* Append into head MFT zone. */
2508 			wnd_zone_set(wnd, lcn, zlen);
2509 		} else if (zone_lcn + zone_len == lcn) {
2510 			/* Append into tail MFT zone. */
2511 			wnd_zone_set(wnd, zone_lcn, zlen);
2512 		}
2513 	}
2514 
2515 out:
2516 	up_write(&wnd->rw_lock);
2517 	if (dirty)
2518 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2519 }
2520 
2521 /*
2522  * run_deallocate - Deallocate clusters.
2523  */
2524 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2525 		   bool trim)
2526 {
2527 	CLST lcn, len;
2528 	size_t idx = 0;
2529 
2530 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2531 		if (lcn == SPARSE_LCN)
2532 			continue;
2533 
2534 		mark_as_free_ex(sbi, lcn, len, trim);
2535 	}
2536 
2537 	return 0;
2538 }
2539 
2540 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2541 {
2542 	int i, ch;
2543 
2544 	/* check for forbidden chars */
2545 	for (i = 0; i < fname->len; ++i) {
2546 		ch = le16_to_cpu(fname->name[i]);
2547 
2548 		/* control chars */
2549 		if (ch < 0x20)
2550 			return true;
2551 
2552 		switch (ch) {
2553 		/* disallowed by Windows */
2554 		case '\\':
2555 		case '/':
2556 		case ':':
2557 		case '*':
2558 		case '?':
2559 		case '<':
2560 		case '>':
2561 		case '|':
2562 		case '\"':
2563 			return true;
2564 
2565 		default:
2566 			/* allowed char */
2567 			break;
2568 		}
2569 	}
2570 
2571 	/* file names cannot end with space or . */
2572 	if (fname->len > 0) {
2573 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2574 		if (ch == ' ' || ch == '.')
2575 			return true;
2576 	}
2577 
2578 	return false;
2579 }
2580 
2581 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2582 				    const struct le_str *fname)
2583 {
2584 	int port_digit;
2585 	const __le16 *name = fname->name;
2586 	int len = fname->len;
2587 	const u16 *upcase = sbi->upcase;
2588 
2589 	/* check for 3 chars reserved names (device names) */
2590 	/* name by itself or with any extension is forbidden */
2591 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2592 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2593 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2594 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2595 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2596 			return true;
2597 
2598 	/* check for 4 chars reserved names (port name followed by 1..9) */
2599 	/* name by itself or with any extension is forbidden */
2600 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2601 		port_digit = le16_to_cpu(name[3]);
2602 		if (port_digit >= '1' && port_digit <= '9')
2603 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2604 					    false) ||
2605 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2606 					    false))
2607 				return true;
2608 	}
2609 
2610 	return false;
2611 }
2612 
2613 /*
2614  * valid_windows_name - Check if a file name is valid in Windows.
2615  */
2616 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2617 {
2618 	return !name_has_forbidden_chars(fname) &&
2619 	       !is_reserved_name(sbi, fname);
2620 }
2621 
2622 /*
2623  * ntfs_set_label - updates current ntfs label.
2624  */
2625 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2626 {
2627 	int err;
2628 	struct ATTRIB *attr;
2629 	struct ntfs_inode *ni = sbi->volume.ni;
2630 	const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
2631 	/* Allocate PATH_MAX bytes. */
2632 	struct cpu_str *uni = __getname();
2633 
2634 	if (!uni)
2635 		return -ENOMEM;
2636 
2637 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2638 				UTF16_LITTLE_ENDIAN);
2639 	if (err < 0)
2640 		goto out;
2641 
2642 	if (uni->len > max_ulen) {
2643 		ntfs_warn(sbi->sb, "new label is too long");
2644 		err = -EFBIG;
2645 		goto out;
2646 	}
2647 
2648 	ni_lock(ni);
2649 
2650 	/* Ignore any errors. */
2651 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2652 
2653 	err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
2654 				 0, &attr, NULL, NULL);
2655 	if (err < 0)
2656 		goto unlock_out;
2657 
2658 	/* write new label in on-disk struct. */
2659 	memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
2660 
2661 	/* update cached value of current label. */
2662 	if (len >= ARRAY_SIZE(sbi->volume.label))
2663 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2664 	memcpy(sbi->volume.label, label, len);
2665 	sbi->volume.label[len] = 0;
2666 	mark_inode_dirty_sync(&ni->vfs_inode);
2667 
2668 unlock_out:
2669 	ni_unlock(ni);
2670 
2671 	if (!err)
2672 		err = _ni_write_inode(&ni->vfs_inode, 0);
2673 
2674 out:
2675 	__putname(uni);
2676 	return err;
2677 }