xref: /openbmc/linux/fs/ntfs3/inode.c (revision e6e8c6c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/buffer_head.h>
9 #include <linux/fs.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
15 
16 #include "debug.h"
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19 
20 /*
21  * ntfs_read_mft - Read record and parses MFT.
22  */
23 static struct inode *ntfs_read_mft(struct inode *inode,
24 				   const struct cpu_str *name,
25 				   const struct MFT_REF *ref)
26 {
27 	int err = 0;
28 	struct ntfs_inode *ni = ntfs_i(inode);
29 	struct super_block *sb = inode->i_sb;
30 	struct ntfs_sb_info *sbi = sb->s_fs_info;
31 	mode_t mode = 0;
32 	struct ATTR_STD_INFO5 *std5 = NULL;
33 	struct ATTR_LIST_ENTRY *le;
34 	struct ATTRIB *attr;
35 	bool is_match = false;
36 	bool is_root = false;
37 	bool is_dir;
38 	unsigned long ino = inode->i_ino;
39 	u32 rp_fa = 0, asize, t32;
40 	u16 roff, rsize, names = 0;
41 	const struct ATTR_FILE_NAME *fname = NULL;
42 	const struct INDEX_ROOT *root;
43 	struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
44 	u64 t64;
45 	struct MFT_REC *rec;
46 	struct runs_tree *run;
47 
48 	inode->i_op = NULL;
49 	/* Setup 'uid' and 'gid' */
50 	inode->i_uid = sbi->options->fs_uid;
51 	inode->i_gid = sbi->options->fs_gid;
52 
53 	err = mi_init(&ni->mi, sbi, ino);
54 	if (err)
55 		goto out;
56 
57 	if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
58 		t64 = sbi->mft.lbo >> sbi->cluster_bits;
59 		t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
60 		sbi->mft.ni = ni;
61 		init_rwsem(&ni->file.run_lock);
62 
63 		if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
64 			err = -ENOMEM;
65 			goto out;
66 		}
67 	}
68 
69 	err = mi_read(&ni->mi, ino == MFT_REC_MFT);
70 
71 	if (err)
72 		goto out;
73 
74 	rec = ni->mi.mrec;
75 
76 	if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
77 		;
78 	} else if (ref->seq != rec->seq) {
79 		err = -EINVAL;
80 		ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
81 			 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
82 		goto out;
83 	} else if (!is_rec_inuse(rec)) {
84 		err = -EINVAL;
85 		ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86 		goto out;
87 	}
88 
89 	if (le32_to_cpu(rec->total) != sbi->record_size) {
90 		/* Bad inode? */
91 		err = -EINVAL;
92 		goto out;
93 	}
94 
95 	if (!is_rec_base(rec))
96 		goto Ok;
97 
98 	/* Record should contain $I30 root. */
99 	is_dir = rec->flags & RECORD_FLAG_DIR;
100 
101 	inode->i_generation = le16_to_cpu(rec->seq);
102 
103 	/* Enumerate all struct Attributes MFT. */
104 	le = NULL;
105 	attr = NULL;
106 
107 	/*
108 	 * To reduce tab pressure use goto instead of
109 	 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
110 	 */
111 next_attr:
112 	run = NULL;
113 	err = -EINVAL;
114 	attr = ni_enum_attr_ex(ni, attr, &le, NULL);
115 	if (!attr)
116 		goto end_enum;
117 
118 	if (le && le->vcn) {
119 		/* This is non primary attribute segment. Ignore if not MFT. */
120 		if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
121 			goto next_attr;
122 
123 		run = &ni->file.run;
124 		asize = le32_to_cpu(attr->size);
125 		goto attr_unpack_run;
126 	}
127 
128 	roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
129 	rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
130 	asize = le32_to_cpu(attr->size);
131 
132 	switch (attr->type) {
133 	case ATTR_STD:
134 		if (attr->non_res ||
135 		    asize < sizeof(struct ATTR_STD_INFO) + roff ||
136 		    rsize < sizeof(struct ATTR_STD_INFO))
137 			goto out;
138 
139 		if (std5)
140 			goto next_attr;
141 
142 		std5 = Add2Ptr(attr, roff);
143 
144 #ifdef STATX_BTIME
145 		nt2kernel(std5->cr_time, &ni->i_crtime);
146 #endif
147 		nt2kernel(std5->a_time, &inode->i_atime);
148 		nt2kernel(std5->c_time, &inode->i_ctime);
149 		nt2kernel(std5->m_time, &inode->i_mtime);
150 
151 		ni->std_fa = std5->fa;
152 
153 		if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
154 		    rsize >= sizeof(struct ATTR_STD_INFO5))
155 			ni->std_security_id = std5->security_id;
156 		goto next_attr;
157 
158 	case ATTR_LIST:
159 		if (attr->name_len || le || ino == MFT_REC_LOG)
160 			goto out;
161 
162 		err = ntfs_load_attr_list(ni, attr);
163 		if (err)
164 			goto out;
165 
166 		le = NULL;
167 		attr = NULL;
168 		goto next_attr;
169 
170 	case ATTR_NAME:
171 		if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
172 		    rsize < SIZEOF_ATTRIBUTE_FILENAME)
173 			goto out;
174 
175 		fname = Add2Ptr(attr, roff);
176 		if (fname->type == FILE_NAME_DOS)
177 			goto next_attr;
178 
179 		names += 1;
180 		if (name && name->len == fname->name_len &&
181 		    !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
182 					NULL, false))
183 			is_match = true;
184 
185 		goto next_attr;
186 
187 	case ATTR_DATA:
188 		if (is_dir) {
189 			/* Ignore data attribute in dir record. */
190 			goto next_attr;
191 		}
192 
193 		if (ino == MFT_REC_BADCLUST && !attr->non_res)
194 			goto next_attr;
195 
196 		if (attr->name_len &&
197 		    ((ino != MFT_REC_BADCLUST || !attr->non_res ||
198 		      attr->name_len != ARRAY_SIZE(BAD_NAME) ||
199 		      memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
200 		     (ino != MFT_REC_SECURE || !attr->non_res ||
201 		      attr->name_len != ARRAY_SIZE(SDS_NAME) ||
202 		      memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
203 			/* File contains stream attribute. Ignore it. */
204 			goto next_attr;
205 		}
206 
207 		if (is_attr_sparsed(attr))
208 			ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
209 		else
210 			ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
211 
212 		if (is_attr_compressed(attr))
213 			ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
214 		else
215 			ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
216 
217 		if (is_attr_encrypted(attr))
218 			ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
219 		else
220 			ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
221 
222 		if (!attr->non_res) {
223 			ni->i_valid = inode->i_size = rsize;
224 			inode_set_bytes(inode, rsize);
225 		}
226 
227 		mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
228 
229 		if (!attr->non_res) {
230 			ni->ni_flags |= NI_FLAG_RESIDENT;
231 			goto next_attr;
232 		}
233 
234 		inode_set_bytes(inode, attr_ondisk_size(attr));
235 
236 		ni->i_valid = le64_to_cpu(attr->nres.valid_size);
237 		inode->i_size = le64_to_cpu(attr->nres.data_size);
238 		if (!attr->nres.alloc_size)
239 			goto next_attr;
240 
241 		run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
242 					    : &ni->file.run;
243 		break;
244 
245 	case ATTR_ROOT:
246 		if (attr->non_res)
247 			goto out;
248 
249 		root = Add2Ptr(attr, roff);
250 		is_root = true;
251 
252 		if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
253 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
254 			goto next_attr;
255 
256 		if (root->type != ATTR_NAME ||
257 		    root->rule != NTFS_COLLATION_TYPE_FILENAME)
258 			goto out;
259 
260 		if (!is_dir)
261 			goto next_attr;
262 
263 		ni->ni_flags |= NI_FLAG_DIR;
264 
265 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
266 		if (err)
267 			goto out;
268 
269 		mode = sb->s_root
270 			       ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
271 			       : (S_IFDIR | 0777);
272 		goto next_attr;
273 
274 	case ATTR_ALLOC:
275 		if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
276 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
277 			goto next_attr;
278 
279 		inode->i_size = le64_to_cpu(attr->nres.data_size);
280 		ni->i_valid = le64_to_cpu(attr->nres.valid_size);
281 		inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
282 
283 		run = &ni->dir.alloc_run;
284 		break;
285 
286 	case ATTR_BITMAP:
287 		if (ino == MFT_REC_MFT) {
288 			if (!attr->non_res)
289 				goto out;
290 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
291 			/* 0x20000000 = 2^32 / 8 */
292 			if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
293 				goto out;
294 #endif
295 			run = &sbi->mft.bitmap.run;
296 			break;
297 		} else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
298 			   !memcmp(attr_name(attr), I30_NAME,
299 				   sizeof(I30_NAME)) &&
300 			   attr->non_res) {
301 			run = &ni->dir.bitmap_run;
302 			break;
303 		}
304 		goto next_attr;
305 
306 	case ATTR_REPARSE:
307 		if (attr->name_len)
308 			goto next_attr;
309 
310 		rp_fa = ni_parse_reparse(ni, attr, &rp);
311 		switch (rp_fa) {
312 		case REPARSE_LINK:
313 			/*
314 			 * Normal symlink.
315 			 * Assume one unicode symbol == one utf8.
316 			 */
317 			inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
318 							    .PrintNameLength) /
319 					sizeof(u16);
320 
321 			ni->i_valid = inode->i_size;
322 
323 			/* Clear directory bit. */
324 			if (ni->ni_flags & NI_FLAG_DIR) {
325 				indx_clear(&ni->dir);
326 				memset(&ni->dir, 0, sizeof(ni->dir));
327 				ni->ni_flags &= ~NI_FLAG_DIR;
328 			} else {
329 				run_close(&ni->file.run);
330 			}
331 			mode = S_IFLNK | 0777;
332 			is_dir = false;
333 			if (attr->non_res) {
334 				run = &ni->file.run;
335 				goto attr_unpack_run; // Double break.
336 			}
337 			break;
338 
339 		case REPARSE_COMPRESSED:
340 			break;
341 
342 		case REPARSE_DEDUPLICATED:
343 			break;
344 		}
345 		goto next_attr;
346 
347 	case ATTR_EA_INFO:
348 		if (!attr->name_len &&
349 		    resident_data_ex(attr, sizeof(struct EA_INFO))) {
350 			ni->ni_flags |= NI_FLAG_EA;
351 			/*
352 			 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
353 			 */
354 			inode->i_mode = mode;
355 			ntfs_get_wsl_perm(inode);
356 			mode = inode->i_mode;
357 		}
358 		goto next_attr;
359 
360 	default:
361 		goto next_attr;
362 	}
363 
364 attr_unpack_run:
365 	roff = le16_to_cpu(attr->nres.run_off);
366 
367 	t64 = le64_to_cpu(attr->nres.svcn);
368 	err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
369 			    t64, Add2Ptr(attr, roff), asize - roff);
370 	if (err < 0)
371 		goto out;
372 	err = 0;
373 	goto next_attr;
374 
375 end_enum:
376 
377 	if (!std5)
378 		goto out;
379 
380 	if (!is_match && name) {
381 		/* Reuse rec as buffer for ascii name. */
382 		err = -ENOENT;
383 		goto out;
384 	}
385 
386 	if (std5->fa & FILE_ATTRIBUTE_READONLY)
387 		mode &= ~0222;
388 
389 	if (!names) {
390 		err = -EINVAL;
391 		goto out;
392 	}
393 
394 	if (names != le16_to_cpu(rec->hard_links)) {
395 		/* Correct minor error on the fly. Do not mark inode as dirty. */
396 		rec->hard_links = cpu_to_le16(names);
397 		ni->mi.dirty = true;
398 	}
399 
400 	set_nlink(inode, names);
401 
402 	if (S_ISDIR(mode)) {
403 		ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
404 
405 		/*
406 		 * Dot and dot-dot should be included in count but was not
407 		 * included in enumeration.
408 		 * Usually a hard links to directories are disabled.
409 		 */
410 		inode->i_op = &ntfs_dir_inode_operations;
411 		inode->i_fop = &ntfs_dir_operations;
412 		ni->i_valid = 0;
413 	} else if (S_ISLNK(mode)) {
414 		ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
415 		inode->i_op = &ntfs_link_inode_operations;
416 		inode->i_fop = NULL;
417 		inode_nohighmem(inode);
418 	} else if (S_ISREG(mode)) {
419 		ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
420 		inode->i_op = &ntfs_file_inode_operations;
421 		inode->i_fop = &ntfs_file_operations;
422 		inode->i_mapping->a_ops =
423 			is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
424 		if (ino != MFT_REC_MFT)
425 			init_rwsem(&ni->file.run_lock);
426 	} else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
427 		   S_ISSOCK(mode)) {
428 		inode->i_op = &ntfs_special_inode_operations;
429 		init_special_inode(inode, mode, inode->i_rdev);
430 	} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
431 		   fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
432 		/* Records in $Extend are not a files or general directories. */
433 		inode->i_op = &ntfs_file_inode_operations;
434 	} else {
435 		err = -EINVAL;
436 		goto out;
437 	}
438 
439 	if ((sbi->options->sys_immutable &&
440 	     (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
441 	    !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
442 		inode->i_flags |= S_IMMUTABLE;
443 	} else {
444 		inode->i_flags &= ~S_IMMUTABLE;
445 	}
446 
447 	inode->i_mode = mode;
448 	if (!(ni->ni_flags & NI_FLAG_EA)) {
449 		/* If no xattr then no security (stored in xattr). */
450 		inode->i_flags |= S_NOSEC;
451 	}
452 
453 Ok:
454 	if (ino == MFT_REC_MFT && !sb->s_root)
455 		sbi->mft.ni = NULL;
456 
457 	unlock_new_inode(inode);
458 
459 	return inode;
460 
461 out:
462 	if (ino == MFT_REC_MFT && !sb->s_root)
463 		sbi->mft.ni = NULL;
464 
465 	iget_failed(inode);
466 	return ERR_PTR(err);
467 }
468 
469 /*
470  * ntfs_test_inode
471  *
472  * Return: 1 if match.
473  */
474 static int ntfs_test_inode(struct inode *inode, void *data)
475 {
476 	struct MFT_REF *ref = data;
477 
478 	return ino_get(ref) == inode->i_ino;
479 }
480 
481 static int ntfs_set_inode(struct inode *inode, void *data)
482 {
483 	const struct MFT_REF *ref = data;
484 
485 	inode->i_ino = ino_get(ref);
486 	return 0;
487 }
488 
489 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
490 			 const struct cpu_str *name)
491 {
492 	struct inode *inode;
493 
494 	inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
495 			     (void *)ref);
496 	if (unlikely(!inode))
497 		return ERR_PTR(-ENOMEM);
498 
499 	/* If this is a freshly allocated inode, need to read it now. */
500 	if (inode->i_state & I_NEW)
501 		inode = ntfs_read_mft(inode, name, ref);
502 	else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
503 		/* Inode overlaps? */
504 		_ntfs_bad_inode(inode);
505 	}
506 
507 	return inode;
508 }
509 
510 enum get_block_ctx {
511 	GET_BLOCK_GENERAL = 0,
512 	GET_BLOCK_WRITE_BEGIN = 1,
513 	GET_BLOCK_DIRECT_IO_R = 2,
514 	GET_BLOCK_DIRECT_IO_W = 3,
515 	GET_BLOCK_BMAP = 4,
516 };
517 
518 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
519 				       struct buffer_head *bh, int create,
520 				       enum get_block_ctx ctx)
521 {
522 	struct super_block *sb = inode->i_sb;
523 	struct ntfs_sb_info *sbi = sb->s_fs_info;
524 	struct ntfs_inode *ni = ntfs_i(inode);
525 	struct page *page = bh->b_page;
526 	u8 cluster_bits = sbi->cluster_bits;
527 	u32 block_size = sb->s_blocksize;
528 	u64 bytes, lbo, valid;
529 	u32 off;
530 	int err;
531 	CLST vcn, lcn, len;
532 	bool new;
533 
534 	/* Clear previous state. */
535 	clear_buffer_new(bh);
536 	clear_buffer_uptodate(bh);
537 
538 	/* Direct write uses 'create=0'. */
539 	if (!create && vbo >= ni->i_valid) {
540 		/* Out of valid. */
541 		return 0;
542 	}
543 
544 	if (vbo >= inode->i_size) {
545 		/* Out of size. */
546 		return 0;
547 	}
548 
549 	if (is_resident(ni)) {
550 		ni_lock(ni);
551 		err = attr_data_read_resident(ni, page);
552 		ni_unlock(ni);
553 
554 		if (!err)
555 			set_buffer_uptodate(bh);
556 		bh->b_size = block_size;
557 		return err;
558 	}
559 
560 	vcn = vbo >> cluster_bits;
561 	off = vbo & sbi->cluster_mask;
562 	new = false;
563 
564 	err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
565 	if (err)
566 		goto out;
567 
568 	if (!len)
569 		return 0;
570 
571 	bytes = ((u64)len << cluster_bits) - off;
572 
573 	if (lcn == SPARSE_LCN) {
574 		if (!create) {
575 			if (bh->b_size > bytes)
576 				bh->b_size = bytes;
577 			return 0;
578 		}
579 		WARN_ON(1);
580 	}
581 
582 	if (new) {
583 		set_buffer_new(bh);
584 		if ((len << cluster_bits) > block_size)
585 			ntfs_sparse_cluster(inode, page, vcn, len);
586 	}
587 
588 	lbo = ((u64)lcn << cluster_bits) + off;
589 
590 	set_buffer_mapped(bh);
591 	bh->b_bdev = sb->s_bdev;
592 	bh->b_blocknr = lbo >> sb->s_blocksize_bits;
593 
594 	valid = ni->i_valid;
595 
596 	if (ctx == GET_BLOCK_DIRECT_IO_W) {
597 		/* ntfs_direct_IO will update ni->i_valid. */
598 		if (vbo >= valid)
599 			set_buffer_new(bh);
600 	} else if (create) {
601 		/* Normal write. */
602 		if (bytes > bh->b_size)
603 			bytes = bh->b_size;
604 
605 		if (vbo >= valid)
606 			set_buffer_new(bh);
607 
608 		if (vbo + bytes > valid) {
609 			ni->i_valid = vbo + bytes;
610 			mark_inode_dirty(inode);
611 		}
612 	} else if (vbo >= valid) {
613 		/* Read out of valid data. */
614 		/* Should never be here 'cause already checked. */
615 		clear_buffer_mapped(bh);
616 	} else if (vbo + bytes <= valid) {
617 		/* Normal read. */
618 	} else if (vbo + block_size <= valid) {
619 		/* Normal short read. */
620 		bytes = block_size;
621 	} else {
622 		/*
623 		 * Read across valid size: vbo < valid && valid < vbo + block_size
624 		 */
625 		bytes = block_size;
626 
627 		if (page) {
628 			u32 voff = valid - vbo;
629 
630 			bh->b_size = block_size;
631 			off = vbo & (PAGE_SIZE - 1);
632 			set_bh_page(bh, page, off);
633 			ll_rw_block(REQ_OP_READ, 1, &bh);
634 			wait_on_buffer(bh);
635 			if (!buffer_uptodate(bh)) {
636 				err = -EIO;
637 				goto out;
638 			}
639 			zero_user_segment(page, off + voff, off + block_size);
640 		}
641 	}
642 
643 	if (bh->b_size > bytes)
644 		bh->b_size = bytes;
645 
646 #ifndef __LP64__
647 	if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
648 		static_assert(sizeof(size_t) < sizeof(loff_t));
649 		if (bytes > 0x40000000u)
650 			bh->b_size = 0x40000000u;
651 	}
652 #endif
653 
654 	return 0;
655 
656 out:
657 	return err;
658 }
659 
660 int ntfs_get_block(struct inode *inode, sector_t vbn,
661 		   struct buffer_head *bh_result, int create)
662 {
663 	return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
664 				  bh_result, create, GET_BLOCK_GENERAL);
665 }
666 
667 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
668 			       struct buffer_head *bh_result, int create)
669 {
670 	return ntfs_get_block_vbo(inode,
671 				  (u64)vsn << inode->i_sb->s_blocksize_bits,
672 				  bh_result, create, GET_BLOCK_BMAP);
673 }
674 
675 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
676 {
677 	return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
678 }
679 
680 static int ntfs_read_folio(struct file *file, struct folio *folio)
681 {
682 	struct page *page = &folio->page;
683 	int err;
684 	struct address_space *mapping = page->mapping;
685 	struct inode *inode = mapping->host;
686 	struct ntfs_inode *ni = ntfs_i(inode);
687 
688 	if (is_resident(ni)) {
689 		ni_lock(ni);
690 		err = attr_data_read_resident(ni, page);
691 		ni_unlock(ni);
692 		if (err != E_NTFS_NONRESIDENT) {
693 			unlock_page(page);
694 			return err;
695 		}
696 	}
697 
698 	if (is_compressed(ni)) {
699 		ni_lock(ni);
700 		err = ni_readpage_cmpr(ni, page);
701 		ni_unlock(ni);
702 		return err;
703 	}
704 
705 	/* Normal + sparse files. */
706 	return mpage_read_folio(folio, ntfs_get_block);
707 }
708 
709 static void ntfs_readahead(struct readahead_control *rac)
710 {
711 	struct address_space *mapping = rac->mapping;
712 	struct inode *inode = mapping->host;
713 	struct ntfs_inode *ni = ntfs_i(inode);
714 	u64 valid;
715 	loff_t pos;
716 
717 	if (is_resident(ni)) {
718 		/* No readahead for resident. */
719 		return;
720 	}
721 
722 	if (is_compressed(ni)) {
723 		/* No readahead for compressed. */
724 		return;
725 	}
726 
727 	valid = ni->i_valid;
728 	pos = readahead_pos(rac);
729 
730 	if (valid < i_size_read(inode) && pos <= valid &&
731 	    valid < pos + readahead_length(rac)) {
732 		/* Range cross 'valid'. Read it page by page. */
733 		return;
734 	}
735 
736 	mpage_readahead(rac, ntfs_get_block);
737 }
738 
739 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
740 				      struct buffer_head *bh_result, int create)
741 {
742 	return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
743 				  bh_result, create, GET_BLOCK_DIRECT_IO_R);
744 }
745 
746 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
747 				      struct buffer_head *bh_result, int create)
748 {
749 	return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
750 				  bh_result, create, GET_BLOCK_DIRECT_IO_W);
751 }
752 
753 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
754 {
755 	struct file *file = iocb->ki_filp;
756 	struct address_space *mapping = file->f_mapping;
757 	struct inode *inode = mapping->host;
758 	struct ntfs_inode *ni = ntfs_i(inode);
759 	loff_t vbo = iocb->ki_pos;
760 	loff_t end;
761 	int wr = iov_iter_rw(iter) & WRITE;
762 	size_t iter_count = iov_iter_count(iter);
763 	loff_t valid;
764 	ssize_t ret;
765 
766 	if (is_resident(ni)) {
767 		/* Switch to buffered write. */
768 		ret = 0;
769 		goto out;
770 	}
771 
772 	ret = blockdev_direct_IO(iocb, inode, iter,
773 				 wr ? ntfs_get_block_direct_IO_W
774 				    : ntfs_get_block_direct_IO_R);
775 
776 	if (ret > 0)
777 		end = vbo + ret;
778 	else if (wr && ret == -EIOCBQUEUED)
779 		end = vbo + iter_count;
780 	else
781 		goto out;
782 
783 	valid = ni->i_valid;
784 	if (wr) {
785 		if (end > valid && !S_ISBLK(inode->i_mode)) {
786 			ni->i_valid = end;
787 			mark_inode_dirty(inode);
788 		}
789 	} else if (vbo < valid && valid < end) {
790 		/* Fix page. */
791 		iov_iter_revert(iter, end - valid);
792 		iov_iter_zero(end - valid, iter);
793 	}
794 
795 out:
796 	return ret;
797 }
798 
799 int ntfs_set_size(struct inode *inode, u64 new_size)
800 {
801 	struct super_block *sb = inode->i_sb;
802 	struct ntfs_sb_info *sbi = sb->s_fs_info;
803 	struct ntfs_inode *ni = ntfs_i(inode);
804 	int err;
805 
806 	/* Check for maximum file size. */
807 	if (is_sparsed(ni) || is_compressed(ni)) {
808 		if (new_size > sbi->maxbytes_sparse) {
809 			err = -EFBIG;
810 			goto out;
811 		}
812 	} else if (new_size > sbi->maxbytes) {
813 		err = -EFBIG;
814 		goto out;
815 	}
816 
817 	ni_lock(ni);
818 	down_write(&ni->file.run_lock);
819 
820 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
821 			    &ni->i_valid, true, NULL);
822 
823 	up_write(&ni->file.run_lock);
824 	ni_unlock(ni);
825 
826 	mark_inode_dirty(inode);
827 
828 out:
829 	return err;
830 }
831 
832 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
833 {
834 	struct address_space *mapping = page->mapping;
835 	struct inode *inode = mapping->host;
836 	struct ntfs_inode *ni = ntfs_i(inode);
837 	int err;
838 
839 	if (is_resident(ni)) {
840 		ni_lock(ni);
841 		err = attr_data_write_resident(ni, page);
842 		ni_unlock(ni);
843 		if (err != E_NTFS_NONRESIDENT) {
844 			unlock_page(page);
845 			return err;
846 		}
847 	}
848 
849 	return block_write_full_page(page, ntfs_get_block, wbc);
850 }
851 
852 static int ntfs_writepages(struct address_space *mapping,
853 			   struct writeback_control *wbc)
854 {
855 	/* Redirect call to 'ntfs_writepage' for resident files. */
856 	if (is_resident(ntfs_i(mapping->host)))
857 		return generic_writepages(mapping, wbc);
858 	return mpage_writepages(mapping, wbc, ntfs_get_block);
859 }
860 
861 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
862 				      struct buffer_head *bh_result, int create)
863 {
864 	return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
865 				  bh_result, create, GET_BLOCK_WRITE_BEGIN);
866 }
867 
868 int ntfs_write_begin(struct file *file, struct address_space *mapping,
869 		     loff_t pos, u32 len, struct page **pagep, void **fsdata)
870 {
871 	int err;
872 	struct inode *inode = mapping->host;
873 	struct ntfs_inode *ni = ntfs_i(inode);
874 
875 	*pagep = NULL;
876 	if (is_resident(ni)) {
877 		struct page *page = grab_cache_page_write_begin(
878 			mapping, pos >> PAGE_SHIFT);
879 
880 		if (!page) {
881 			err = -ENOMEM;
882 			goto out;
883 		}
884 
885 		ni_lock(ni);
886 		err = attr_data_read_resident(ni, page);
887 		ni_unlock(ni);
888 
889 		if (!err) {
890 			*pagep = page;
891 			goto out;
892 		}
893 		unlock_page(page);
894 		put_page(page);
895 
896 		if (err != E_NTFS_NONRESIDENT)
897 			goto out;
898 	}
899 
900 	err = block_write_begin(mapping, pos, len, pagep,
901 				ntfs_get_block_write_begin);
902 
903 out:
904 	return err;
905 }
906 
907 /*
908  * ntfs_write_end - Address_space_operations::write_end.
909  */
910 int ntfs_write_end(struct file *file, struct address_space *mapping,
911 		   loff_t pos, u32 len, u32 copied, struct page *page,
912 		   void *fsdata)
913 {
914 	struct inode *inode = mapping->host;
915 	struct ntfs_inode *ni = ntfs_i(inode);
916 	u64 valid = ni->i_valid;
917 	bool dirty = false;
918 	int err;
919 
920 	if (is_resident(ni)) {
921 		ni_lock(ni);
922 		err = attr_data_write_resident(ni, page);
923 		ni_unlock(ni);
924 		if (!err) {
925 			dirty = true;
926 			/* Clear any buffers in page. */
927 			if (page_has_buffers(page)) {
928 				struct buffer_head *head, *bh;
929 
930 				bh = head = page_buffers(page);
931 				do {
932 					clear_buffer_dirty(bh);
933 					clear_buffer_mapped(bh);
934 					set_buffer_uptodate(bh);
935 				} while (head != (bh = bh->b_this_page));
936 			}
937 			SetPageUptodate(page);
938 			err = copied;
939 		}
940 		unlock_page(page);
941 		put_page(page);
942 	} else {
943 		err = generic_write_end(file, mapping, pos, len, copied, page,
944 					fsdata);
945 	}
946 
947 	if (err >= 0) {
948 		if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
949 			inode->i_ctime = inode->i_mtime = current_time(inode);
950 			ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
951 			dirty = true;
952 		}
953 
954 		if (valid != ni->i_valid) {
955 			/* ni->i_valid is changed in ntfs_get_block_vbo. */
956 			dirty = true;
957 		}
958 
959 		if (dirty)
960 			mark_inode_dirty(inode);
961 	}
962 
963 	return err;
964 }
965 
966 int reset_log_file(struct inode *inode)
967 {
968 	int err;
969 	loff_t pos = 0;
970 	u32 log_size = inode->i_size;
971 	struct address_space *mapping = inode->i_mapping;
972 
973 	for (;;) {
974 		u32 len;
975 		void *kaddr;
976 		struct page *page;
977 
978 		len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
979 
980 		err = block_write_begin(mapping, pos, len, &page,
981 					ntfs_get_block_write_begin);
982 		if (err)
983 			goto out;
984 
985 		kaddr = kmap_atomic(page);
986 		memset(kaddr, -1, len);
987 		kunmap_atomic(kaddr);
988 		flush_dcache_page(page);
989 
990 		err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
991 		if (err < 0)
992 			goto out;
993 		pos += len;
994 
995 		if (pos >= log_size)
996 			break;
997 		balance_dirty_pages_ratelimited(mapping);
998 	}
999 out:
1000 	mark_inode_dirty_sync(inode);
1001 
1002 	return err;
1003 }
1004 
1005 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1006 {
1007 	return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1008 }
1009 
1010 int ntfs_sync_inode(struct inode *inode)
1011 {
1012 	return _ni_write_inode(inode, 1);
1013 }
1014 
1015 /*
1016  * writeback_inode - Helper function for ntfs_flush_inodes().
1017  *
1018  * This writes both the inode and the file data blocks, waiting
1019  * for in flight data blocks before the start of the call.  It
1020  * does not wait for any io started during the call.
1021  */
1022 static int writeback_inode(struct inode *inode)
1023 {
1024 	int ret = sync_inode_metadata(inode, 0);
1025 
1026 	if (!ret)
1027 		ret = filemap_fdatawrite(inode->i_mapping);
1028 	return ret;
1029 }
1030 
1031 /*
1032  * ntfs_flush_inodes
1033  *
1034  * Write data and metadata corresponding to i1 and i2.  The io is
1035  * started but we do not wait for any of it to finish.
1036  *
1037  * filemap_flush() is used for the block device, so if there is a dirty
1038  * page for a block already in flight, we will not wait and start the
1039  * io over again.
1040  */
1041 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1042 		      struct inode *i2)
1043 {
1044 	int ret = 0;
1045 
1046 	if (i1)
1047 		ret = writeback_inode(i1);
1048 	if (!ret && i2)
1049 		ret = writeback_inode(i2);
1050 	if (!ret)
1051 		ret = sync_blockdev_nowait(sb->s_bdev);
1052 	return ret;
1053 }
1054 
1055 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1056 {
1057 	pgoff_t idx;
1058 
1059 	/* Write non resident data. */
1060 	for (idx = 0; bytes; idx++) {
1061 		size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1062 		struct page *page = ntfs_map_page(inode->i_mapping, idx);
1063 
1064 		if (IS_ERR(page))
1065 			return PTR_ERR(page);
1066 
1067 		lock_page(page);
1068 		WARN_ON(!PageUptodate(page));
1069 		ClearPageUptodate(page);
1070 
1071 		memcpy(page_address(page), data, op);
1072 
1073 		flush_dcache_page(page);
1074 		SetPageUptodate(page);
1075 		unlock_page(page);
1076 
1077 		ntfs_unmap_page(page);
1078 
1079 		bytes -= op;
1080 		data = Add2Ptr(data, PAGE_SIZE);
1081 	}
1082 	return 0;
1083 }
1084 
1085 /*
1086  * ntfs_reparse_bytes
1087  *
1088  * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1089  * for unicode string of @uni_len length.
1090  */
1091 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1092 {
1093 	/* Header + unicode string + decorated unicode string. */
1094 	return sizeof(short) * (2 * uni_len + 4) +
1095 	       offsetof(struct REPARSE_DATA_BUFFER,
1096 			SymbolicLinkReparseBuffer.PathBuffer);
1097 }
1098 
1099 static struct REPARSE_DATA_BUFFER *
1100 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1101 			   u32 size, u16 *nsize)
1102 {
1103 	int i, err;
1104 	struct REPARSE_DATA_BUFFER *rp;
1105 	__le16 *rp_name;
1106 	typeof(rp->SymbolicLinkReparseBuffer) *rs;
1107 
1108 	rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1109 	if (!rp)
1110 		return ERR_PTR(-ENOMEM);
1111 
1112 	rs = &rp->SymbolicLinkReparseBuffer;
1113 	rp_name = rs->PathBuffer;
1114 
1115 	/* Convert link name to UTF-16. */
1116 	err = ntfs_nls_to_utf16(sbi, symname, size,
1117 				(struct cpu_str *)(rp_name - 1), 2 * size,
1118 				UTF16_LITTLE_ENDIAN);
1119 	if (err < 0)
1120 		goto out;
1121 
1122 	/* err = the length of unicode name of symlink. */
1123 	*nsize = ntfs_reparse_bytes(err);
1124 
1125 	if (*nsize > sbi->reparse.max_size) {
1126 		err = -EFBIG;
1127 		goto out;
1128 	}
1129 
1130 	/* Translate Linux '/' into Windows '\'. */
1131 	for (i = 0; i < err; i++) {
1132 		if (rp_name[i] == cpu_to_le16('/'))
1133 			rp_name[i] = cpu_to_le16('\\');
1134 	}
1135 
1136 	rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1137 	rp->ReparseDataLength =
1138 		cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1139 					      SymbolicLinkReparseBuffer));
1140 
1141 	/* PrintName + SubstituteName. */
1142 	rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1143 	rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1144 	rs->PrintNameLength = rs->SubstituteNameOffset;
1145 
1146 	/*
1147 	 * TODO: Use relative path if possible to allow Windows to
1148 	 * parse this path.
1149 	 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1150 	 */
1151 	rs->Flags = 0;
1152 
1153 	memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1154 
1155 	/* Decorate SubstituteName. */
1156 	rp_name += err;
1157 	rp_name[0] = cpu_to_le16('\\');
1158 	rp_name[1] = cpu_to_le16('?');
1159 	rp_name[2] = cpu_to_le16('?');
1160 	rp_name[3] = cpu_to_le16('\\');
1161 
1162 	return rp;
1163 out:
1164 	kfree(rp);
1165 	return ERR_PTR(err);
1166 }
1167 
1168 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1169 				struct inode *dir, struct dentry *dentry,
1170 				const struct cpu_str *uni, umode_t mode,
1171 				dev_t dev, const char *symname, u32 size,
1172 				struct ntfs_fnd *fnd)
1173 {
1174 	int err;
1175 	struct super_block *sb = dir->i_sb;
1176 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1177 	const struct qstr *name = &dentry->d_name;
1178 	CLST ino = 0;
1179 	struct ntfs_inode *dir_ni = ntfs_i(dir);
1180 	struct ntfs_inode *ni = NULL;
1181 	struct inode *inode = NULL;
1182 	struct ATTRIB *attr;
1183 	struct ATTR_STD_INFO5 *std5;
1184 	struct ATTR_FILE_NAME *fname;
1185 	struct MFT_REC *rec;
1186 	u32 asize, dsize, sd_size;
1187 	enum FILE_ATTRIBUTE fa;
1188 	__le32 security_id = SECURITY_ID_INVALID;
1189 	CLST vcn;
1190 	const void *sd;
1191 	u16 t16, nsize = 0, aid = 0;
1192 	struct INDEX_ROOT *root, *dir_root;
1193 	struct NTFS_DE *e, *new_de = NULL;
1194 	struct REPARSE_DATA_BUFFER *rp = NULL;
1195 	bool rp_inserted = false;
1196 
1197 	ni_lock_dir(dir_ni);
1198 
1199 	dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1200 	if (!dir_root) {
1201 		err = -EINVAL;
1202 		goto out1;
1203 	}
1204 
1205 	if (S_ISDIR(mode)) {
1206 		/* Use parent's directory attributes. */
1207 		fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1208 		     FILE_ATTRIBUTE_ARCHIVE;
1209 		/*
1210 		 * By default child directory inherits parent attributes.
1211 		 * Root directory is hidden + system.
1212 		 * Make an exception for children in root.
1213 		 */
1214 		if (dir->i_ino == MFT_REC_ROOT)
1215 			fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1216 	} else if (S_ISLNK(mode)) {
1217 		/* It is good idea that link should be the same type (file/dir) as target */
1218 		fa = FILE_ATTRIBUTE_REPARSE_POINT;
1219 
1220 		/*
1221 		 * Linux: there are dir/file/symlink and so on.
1222 		 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1223 		 * It is good idea to create:
1224 		 * dir + reparse if 'symname' points to directory
1225 		 * or
1226 		 * file + reparse if 'symname' points to file
1227 		 * Unfortunately kern_path hangs if symname contains 'dir'.
1228 		 */
1229 
1230 		/*
1231 		 *	struct path path;
1232 		 *
1233 		 *	if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1234 		 *		struct inode *target = d_inode(path.dentry);
1235 		 *
1236 		 *		if (S_ISDIR(target->i_mode))
1237 		 *			fa |= FILE_ATTRIBUTE_DIRECTORY;
1238 		 *		// if ( target->i_sb == sb ){
1239 		 *		//	use relative path?
1240 		 *		// }
1241 		 *		path_put(&path);
1242 		 *	}
1243 		 */
1244 	} else if (S_ISREG(mode)) {
1245 		if (sbi->options->sparse) {
1246 			/* Sparsed regular file, cause option 'sparse'. */
1247 			fa = FILE_ATTRIBUTE_SPARSE_FILE |
1248 			     FILE_ATTRIBUTE_ARCHIVE;
1249 		} else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1250 			/* Compressed regular file, if parent is compressed. */
1251 			fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1252 		} else {
1253 			/* Regular file, default attributes. */
1254 			fa = FILE_ATTRIBUTE_ARCHIVE;
1255 		}
1256 	} else {
1257 		fa = FILE_ATTRIBUTE_ARCHIVE;
1258 	}
1259 
1260 	if (!(mode & 0222))
1261 		fa |= FILE_ATTRIBUTE_READONLY;
1262 
1263 	/* Allocate PATH_MAX bytes. */
1264 	new_de = __getname();
1265 	if (!new_de) {
1266 		err = -ENOMEM;
1267 		goto out1;
1268 	}
1269 
1270 	/* Mark rw ntfs as dirty. it will be cleared at umount. */
1271 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1272 
1273 	/* Step 1: allocate and fill new mft record. */
1274 	err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1275 	if (err)
1276 		goto out2;
1277 
1278 	ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1279 	if (IS_ERR(ni)) {
1280 		err = PTR_ERR(ni);
1281 		ni = NULL;
1282 		goto out3;
1283 	}
1284 	inode = &ni->vfs_inode;
1285 	inode_init_owner(mnt_userns, inode, dir, mode);
1286 	mode = inode->i_mode;
1287 
1288 	inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1289 		current_time(inode);
1290 
1291 	rec = ni->mi.mrec;
1292 	rec->hard_links = cpu_to_le16(1);
1293 	attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1294 
1295 	/* Get default security id. */
1296 	sd = s_default_security;
1297 	sd_size = sizeof(s_default_security);
1298 
1299 	if (is_ntfs3(sbi)) {
1300 		security_id = dir_ni->std_security_id;
1301 		if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1302 			security_id = sbi->security.def_security_id;
1303 
1304 			if (security_id == SECURITY_ID_INVALID &&
1305 			    !ntfs_insert_security(sbi, sd, sd_size,
1306 						  &security_id, NULL))
1307 				sbi->security.def_security_id = security_id;
1308 		}
1309 	}
1310 
1311 	/* Insert standard info. */
1312 	std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1313 
1314 	if (security_id == SECURITY_ID_INVALID) {
1315 		dsize = sizeof(struct ATTR_STD_INFO);
1316 	} else {
1317 		dsize = sizeof(struct ATTR_STD_INFO5);
1318 		std5->security_id = security_id;
1319 		ni->std_security_id = security_id;
1320 	}
1321 	asize = SIZEOF_RESIDENT + dsize;
1322 
1323 	attr->type = ATTR_STD;
1324 	attr->size = cpu_to_le32(asize);
1325 	attr->id = cpu_to_le16(aid++);
1326 	attr->res.data_off = SIZEOF_RESIDENT_LE;
1327 	attr->res.data_size = cpu_to_le32(dsize);
1328 
1329 	std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1330 		kernel2nt(&inode->i_atime);
1331 
1332 	ni->std_fa = fa;
1333 	std5->fa = fa;
1334 
1335 	attr = Add2Ptr(attr, asize);
1336 
1337 	/* Insert file name. */
1338 	err = fill_name_de(sbi, new_de, name, uni);
1339 	if (err)
1340 		goto out4;
1341 
1342 	mi_get_ref(&ni->mi, &new_de->ref);
1343 
1344 	fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1345 	mi_get_ref(&dir_ni->mi, &fname->home);
1346 	fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1347 		fname->dup.a_time = std5->cr_time;
1348 	fname->dup.alloc_size = fname->dup.data_size = 0;
1349 	fname->dup.fa = std5->fa;
1350 	fname->dup.ea_size = fname->dup.reparse = 0;
1351 
1352 	dsize = le16_to_cpu(new_de->key_size);
1353 	asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1354 
1355 	attr->type = ATTR_NAME;
1356 	attr->size = cpu_to_le32(asize);
1357 	attr->res.data_off = SIZEOF_RESIDENT_LE;
1358 	attr->res.flags = RESIDENT_FLAG_INDEXED;
1359 	attr->id = cpu_to_le16(aid++);
1360 	attr->res.data_size = cpu_to_le32(dsize);
1361 	memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1362 
1363 	attr = Add2Ptr(attr, asize);
1364 
1365 	if (security_id == SECURITY_ID_INVALID) {
1366 		/* Insert security attribute. */
1367 		asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1368 
1369 		attr->type = ATTR_SECURE;
1370 		attr->size = cpu_to_le32(asize);
1371 		attr->id = cpu_to_le16(aid++);
1372 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1373 		attr->res.data_size = cpu_to_le32(sd_size);
1374 		memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1375 
1376 		attr = Add2Ptr(attr, asize);
1377 	}
1378 
1379 	attr->id = cpu_to_le16(aid++);
1380 	if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1381 		/*
1382 		 * Regular directory or symlink to directory.
1383 		 * Create root attribute.
1384 		 */
1385 		dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1386 		asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1387 
1388 		attr->type = ATTR_ROOT;
1389 		attr->size = cpu_to_le32(asize);
1390 
1391 		attr->name_len = ARRAY_SIZE(I30_NAME);
1392 		attr->name_off = SIZEOF_RESIDENT_LE;
1393 		attr->res.data_off =
1394 			cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1395 		attr->res.data_size = cpu_to_le32(dsize);
1396 		memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1397 		       sizeof(I30_NAME));
1398 
1399 		root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1400 		memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1401 		root->ihdr.de_off =
1402 			cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1403 		root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1404 					      sizeof(struct NTFS_DE));
1405 		root->ihdr.total = root->ihdr.used;
1406 
1407 		e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1408 		e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1409 		e->flags = NTFS_IE_LAST;
1410 	} else if (S_ISLNK(mode)) {
1411 		/*
1412 		 * Symlink to file.
1413 		 * Create empty resident data attribute.
1414 		 */
1415 		asize = SIZEOF_RESIDENT;
1416 
1417 		/* Insert empty ATTR_DATA */
1418 		attr->type = ATTR_DATA;
1419 		attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1420 		attr->name_off = SIZEOF_RESIDENT_LE;
1421 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1422 	} else if (S_ISREG(mode)) {
1423 		/*
1424 		 * Regular file. Create empty non resident data attribute.
1425 		 */
1426 		attr->type = ATTR_DATA;
1427 		attr->non_res = 1;
1428 		attr->nres.evcn = cpu_to_le64(-1ll);
1429 		if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1430 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1431 			attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1432 			attr->flags = ATTR_FLAG_SPARSED;
1433 			asize = SIZEOF_NONRESIDENT_EX + 8;
1434 		} else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1435 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1436 			attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1437 			attr->flags = ATTR_FLAG_COMPRESSED;
1438 			attr->nres.c_unit = COMPRESSION_UNIT;
1439 			asize = SIZEOF_NONRESIDENT_EX + 8;
1440 		} else {
1441 			attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1442 			attr->name_off = SIZEOF_NONRESIDENT_LE;
1443 			asize = SIZEOF_NONRESIDENT + 8;
1444 		}
1445 		attr->nres.run_off = attr->name_off;
1446 	} else {
1447 		/*
1448 		 * Node. Create empty resident data attribute.
1449 		 */
1450 		attr->type = ATTR_DATA;
1451 		attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1452 		attr->name_off = SIZEOF_RESIDENT_LE;
1453 		if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1454 			attr->flags = ATTR_FLAG_SPARSED;
1455 		else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1456 			attr->flags = ATTR_FLAG_COMPRESSED;
1457 		attr->res.data_off = SIZEOF_RESIDENT_LE;
1458 		asize = SIZEOF_RESIDENT;
1459 		ni->ni_flags |= NI_FLAG_RESIDENT;
1460 	}
1461 
1462 	if (S_ISDIR(mode)) {
1463 		ni->ni_flags |= NI_FLAG_DIR;
1464 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1465 		if (err)
1466 			goto out4;
1467 	} else if (S_ISLNK(mode)) {
1468 		rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1469 
1470 		if (IS_ERR(rp)) {
1471 			err = PTR_ERR(rp);
1472 			rp = NULL;
1473 			goto out4;
1474 		}
1475 
1476 		/*
1477 		 * Insert ATTR_REPARSE.
1478 		 */
1479 		attr = Add2Ptr(attr, asize);
1480 		attr->type = ATTR_REPARSE;
1481 		attr->id = cpu_to_le16(aid++);
1482 
1483 		/* Resident or non resident? */
1484 		asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1485 		t16 = PtrOffset(rec, attr);
1486 
1487 		/*
1488 		 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1489 		 * It is good idea to keep extened attributes resident.
1490 		 */
1491 		if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1492 			CLST alen;
1493 			CLST clst = bytes_to_cluster(sbi, nsize);
1494 
1495 			/* Bytes per runs. */
1496 			t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1497 
1498 			attr->non_res = 1;
1499 			attr->nres.evcn = cpu_to_le64(clst - 1);
1500 			attr->name_off = SIZEOF_NONRESIDENT_LE;
1501 			attr->nres.run_off = attr->name_off;
1502 			attr->nres.data_size = cpu_to_le64(nsize);
1503 			attr->nres.valid_size = attr->nres.data_size;
1504 			attr->nres.alloc_size =
1505 				cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1506 
1507 			err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1508 						     clst, NULL, 0, &alen, 0,
1509 						     NULL);
1510 			if (err)
1511 				goto out5;
1512 
1513 			err = run_pack(&ni->file.run, 0, clst,
1514 				       Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1515 				       &vcn);
1516 			if (err < 0)
1517 				goto out5;
1518 
1519 			if (vcn != clst) {
1520 				err = -EINVAL;
1521 				goto out5;
1522 			}
1523 
1524 			asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1525 		} else {
1526 			attr->res.data_off = SIZEOF_RESIDENT_LE;
1527 			attr->res.data_size = cpu_to_le32(nsize);
1528 			memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1529 			nsize = 0;
1530 		}
1531 		/* Size of symlink equals the length of input string. */
1532 		inode->i_size = size;
1533 
1534 		attr->size = cpu_to_le32(asize);
1535 
1536 		err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1537 					  &new_de->ref);
1538 		if (err)
1539 			goto out5;
1540 
1541 		rp_inserted = true;
1542 	}
1543 
1544 	attr = Add2Ptr(attr, asize);
1545 	attr->type = ATTR_END;
1546 
1547 	rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1548 	rec->next_attr_id = cpu_to_le16(aid);
1549 
1550 	/* Step 2: Add new name in index. */
1551 	err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1552 	if (err)
1553 		goto out6;
1554 
1555 	/* Unlock parent directory before ntfs_init_acl. */
1556 	ni_unlock(dir_ni);
1557 
1558 	inode->i_generation = le16_to_cpu(rec->seq);
1559 
1560 	dir->i_mtime = dir->i_ctime = inode->i_atime;
1561 
1562 	if (S_ISDIR(mode)) {
1563 		inode->i_op = &ntfs_dir_inode_operations;
1564 		inode->i_fop = &ntfs_dir_operations;
1565 	} else if (S_ISLNK(mode)) {
1566 		inode->i_op = &ntfs_link_inode_operations;
1567 		inode->i_fop = NULL;
1568 		inode->i_mapping->a_ops = &ntfs_aops;
1569 		inode->i_size = size;
1570 		inode_nohighmem(inode);
1571 	} else if (S_ISREG(mode)) {
1572 		inode->i_op = &ntfs_file_inode_operations;
1573 		inode->i_fop = &ntfs_file_operations;
1574 		inode->i_mapping->a_ops =
1575 			is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1576 		init_rwsem(&ni->file.run_lock);
1577 	} else {
1578 		inode->i_op = &ntfs_special_inode_operations;
1579 		init_special_inode(inode, mode, dev);
1580 	}
1581 
1582 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1583 	if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1584 		err = ntfs_init_acl(mnt_userns, inode, dir);
1585 		if (err)
1586 			goto out7;
1587 	} else
1588 #endif
1589 	{
1590 		inode->i_flags |= S_NOSEC;
1591 	}
1592 
1593 	/* Write non resident data. */
1594 	if (nsize) {
1595 		err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
1596 		if (err)
1597 			goto out7;
1598 	}
1599 
1600 	/*
1601 	 * Call 'd_instantiate' after inode->i_op is set
1602 	 * but before finish_open.
1603 	 */
1604 	d_instantiate(dentry, inode);
1605 
1606 	ntfs_save_wsl_perm(inode);
1607 	mark_inode_dirty(dir);
1608 	mark_inode_dirty(inode);
1609 
1610 	/* Normal exit. */
1611 	goto out2;
1612 
1613 out7:
1614 
1615 	/* Undo 'indx_insert_entry'. */
1616 	ni_lock_dir(dir_ni);
1617 	indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1618 			  le16_to_cpu(new_de->key_size), sbi);
1619 	/* ni_unlock(dir_ni); will be called later. */
1620 out6:
1621 	if (rp_inserted)
1622 		ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1623 
1624 out5:
1625 	if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
1626 		goto out4;
1627 
1628 	run_deallocate(sbi, &ni->file.run, false);
1629 
1630 out4:
1631 	clear_rec_inuse(rec);
1632 	clear_nlink(inode);
1633 	ni->mi.dirty = false;
1634 	discard_new_inode(inode);
1635 out3:
1636 	ntfs_mark_rec_free(sbi, ino, false);
1637 
1638 out2:
1639 	__putname(new_de);
1640 	kfree(rp);
1641 
1642 out1:
1643 	if (err) {
1644 		ni_unlock(dir_ni);
1645 		return ERR_PTR(err);
1646 	}
1647 
1648 	unlock_new_inode(inode);
1649 
1650 	return inode;
1651 }
1652 
1653 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1654 {
1655 	int err;
1656 	struct ntfs_inode *ni = ntfs_i(inode);
1657 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1658 	struct NTFS_DE *de;
1659 
1660 	/* Allocate PATH_MAX bytes. */
1661 	de = __getname();
1662 	if (!de)
1663 		return -ENOMEM;
1664 
1665 	/* Mark rw ntfs as dirty. It will be cleared at umount. */
1666 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1667 
1668 	/* Construct 'de'. */
1669 	err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1670 	if (err)
1671 		goto out;
1672 
1673 	err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1674 out:
1675 	__putname(de);
1676 	return err;
1677 }
1678 
1679 /*
1680  * ntfs_unlink_inode
1681  *
1682  * inode_operations::unlink
1683  * inode_operations::rmdir
1684  */
1685 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1686 {
1687 	int err;
1688 	struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1689 	struct inode *inode = d_inode(dentry);
1690 	struct ntfs_inode *ni = ntfs_i(inode);
1691 	struct ntfs_inode *dir_ni = ntfs_i(dir);
1692 	struct NTFS_DE *de, *de2 = NULL;
1693 	int undo_remove;
1694 
1695 	if (ntfs_is_meta_file(sbi, ni->mi.rno))
1696 		return -EINVAL;
1697 
1698 	/* Allocate PATH_MAX bytes. */
1699 	de = __getname();
1700 	if (!de)
1701 		return -ENOMEM;
1702 
1703 	ni_lock(ni);
1704 
1705 	if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1706 		err = -ENOTEMPTY;
1707 		goto out;
1708 	}
1709 
1710 	err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1711 	if (err < 0)
1712 		goto out;
1713 
1714 	undo_remove = 0;
1715 	err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1716 
1717 	if (!err) {
1718 		drop_nlink(inode);
1719 		dir->i_mtime = dir->i_ctime = current_time(dir);
1720 		mark_inode_dirty(dir);
1721 		inode->i_ctime = dir->i_ctime;
1722 		if (inode->i_nlink)
1723 			mark_inode_dirty(inode);
1724 	} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1725 		_ntfs_bad_inode(inode);
1726 	} else {
1727 		if (ni_is_dirty(dir))
1728 			mark_inode_dirty(dir);
1729 		if (ni_is_dirty(inode))
1730 			mark_inode_dirty(inode);
1731 	}
1732 
1733 out:
1734 	ni_unlock(ni);
1735 	__putname(de);
1736 	return err;
1737 }
1738 
1739 void ntfs_evict_inode(struct inode *inode)
1740 {
1741 	truncate_inode_pages_final(&inode->i_data);
1742 
1743 	if (inode->i_nlink)
1744 		_ni_write_inode(inode, inode_needs_sync(inode));
1745 
1746 	invalidate_inode_buffers(inode);
1747 	clear_inode(inode);
1748 
1749 	ni_clear(ntfs_i(inode));
1750 }
1751 
1752 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1753 				      int buflen)
1754 {
1755 	int i, err = -EINVAL;
1756 	struct ntfs_inode *ni = ntfs_i(inode);
1757 	struct super_block *sb = inode->i_sb;
1758 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1759 	u64 size;
1760 	u16 ulen = 0;
1761 	void *to_free = NULL;
1762 	struct REPARSE_DATA_BUFFER *rp;
1763 	const __le16 *uname;
1764 	struct ATTRIB *attr;
1765 
1766 	/* Reparse data present. Try to parse it. */
1767 	static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1768 	static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1769 
1770 	*buffer = 0;
1771 
1772 	attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1773 	if (!attr)
1774 		goto out;
1775 
1776 	if (!attr->non_res) {
1777 		rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1778 		if (!rp)
1779 			goto out;
1780 		size = le32_to_cpu(attr->res.data_size);
1781 	} else {
1782 		size = le64_to_cpu(attr->nres.data_size);
1783 		rp = NULL;
1784 	}
1785 
1786 	if (size > sbi->reparse.max_size || size <= sizeof(u32))
1787 		goto out;
1788 
1789 	if (!rp) {
1790 		rp = kmalloc(size, GFP_NOFS);
1791 		if (!rp) {
1792 			err = -ENOMEM;
1793 			goto out;
1794 		}
1795 		to_free = rp;
1796 		/* Read into temporal buffer. */
1797 		err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1798 		if (err)
1799 			goto out;
1800 	}
1801 
1802 	/* Microsoft Tag. */
1803 	switch (rp->ReparseTag) {
1804 	case IO_REPARSE_TAG_MOUNT_POINT:
1805 		/* Mount points and junctions. */
1806 		/* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1807 		if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1808 				     MountPointReparseBuffer.PathBuffer))
1809 			goto out;
1810 		uname = Add2Ptr(rp,
1811 				offsetof(struct REPARSE_DATA_BUFFER,
1812 					 MountPointReparseBuffer.PathBuffer) +
1813 					le16_to_cpu(rp->MountPointReparseBuffer
1814 							    .PrintNameOffset));
1815 		ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1816 		break;
1817 
1818 	case IO_REPARSE_TAG_SYMLINK:
1819 		/* FolderSymbolicLink */
1820 		/* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1821 		if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1822 				     SymbolicLinkReparseBuffer.PathBuffer))
1823 			goto out;
1824 		uname = Add2Ptr(
1825 			rp, offsetof(struct REPARSE_DATA_BUFFER,
1826 				     SymbolicLinkReparseBuffer.PathBuffer) +
1827 				    le16_to_cpu(rp->SymbolicLinkReparseBuffer
1828 							.PrintNameOffset));
1829 		ulen = le16_to_cpu(
1830 			rp->SymbolicLinkReparseBuffer.PrintNameLength);
1831 		break;
1832 
1833 	case IO_REPARSE_TAG_CLOUD:
1834 	case IO_REPARSE_TAG_CLOUD_1:
1835 	case IO_REPARSE_TAG_CLOUD_2:
1836 	case IO_REPARSE_TAG_CLOUD_3:
1837 	case IO_REPARSE_TAG_CLOUD_4:
1838 	case IO_REPARSE_TAG_CLOUD_5:
1839 	case IO_REPARSE_TAG_CLOUD_6:
1840 	case IO_REPARSE_TAG_CLOUD_7:
1841 	case IO_REPARSE_TAG_CLOUD_8:
1842 	case IO_REPARSE_TAG_CLOUD_9:
1843 	case IO_REPARSE_TAG_CLOUD_A:
1844 	case IO_REPARSE_TAG_CLOUD_B:
1845 	case IO_REPARSE_TAG_CLOUD_C:
1846 	case IO_REPARSE_TAG_CLOUD_D:
1847 	case IO_REPARSE_TAG_CLOUD_E:
1848 	case IO_REPARSE_TAG_CLOUD_F:
1849 		err = sizeof("OneDrive") - 1;
1850 		if (err > buflen)
1851 			err = buflen;
1852 		memcpy(buffer, "OneDrive", err);
1853 		goto out;
1854 
1855 	default:
1856 		if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1857 			/* Unknown Microsoft Tag. */
1858 			goto out;
1859 		}
1860 		if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1861 		    size <= sizeof(struct REPARSE_POINT)) {
1862 			goto out;
1863 		}
1864 
1865 		/* Users tag. */
1866 		uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
1867 		ulen = le16_to_cpu(rp->ReparseDataLength) -
1868 		       sizeof(struct REPARSE_POINT);
1869 	}
1870 
1871 	/* Convert nlen from bytes to UNICODE chars. */
1872 	ulen >>= 1;
1873 
1874 	/* Check that name is available. */
1875 	if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
1876 		goto out;
1877 
1878 	/* If name is already zero terminated then truncate it now. */
1879 	if (!uname[ulen - 1])
1880 		ulen -= 1;
1881 
1882 	err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
1883 
1884 	if (err < 0)
1885 		goto out;
1886 
1887 	/* Translate Windows '\' into Linux '/'. */
1888 	for (i = 0; i < err; i++) {
1889 		if (buffer[i] == '\\')
1890 			buffer[i] = '/';
1891 	}
1892 
1893 	/* Always set last zero. */
1894 	buffer[err] = 0;
1895 out:
1896 	kfree(to_free);
1897 	return err;
1898 }
1899 
1900 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1901 				 struct delayed_call *done)
1902 {
1903 	int err;
1904 	char *ret;
1905 
1906 	if (!de)
1907 		return ERR_PTR(-ECHILD);
1908 
1909 	ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1910 	if (!ret)
1911 		return ERR_PTR(-ENOMEM);
1912 
1913 	err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1914 	if (err < 0) {
1915 		kfree(ret);
1916 		return ERR_PTR(err);
1917 	}
1918 
1919 	set_delayed_call(done, kfree_link, ret);
1920 
1921 	return ret;
1922 }
1923 
1924 // clang-format off
1925 const struct inode_operations ntfs_link_inode_operations = {
1926 	.get_link	= ntfs_get_link,
1927 	.setattr	= ntfs3_setattr,
1928 	.listxattr	= ntfs_listxattr,
1929 	.permission	= ntfs_permission,
1930 	.get_acl	= ntfs_get_acl,
1931 	.set_acl	= ntfs_set_acl,
1932 };
1933 
1934 const struct address_space_operations ntfs_aops = {
1935 	.read_folio	= ntfs_read_folio,
1936 	.readahead	= ntfs_readahead,
1937 	.writepage	= ntfs_writepage,
1938 	.writepages	= ntfs_writepages,
1939 	.write_begin	= ntfs_write_begin,
1940 	.write_end	= ntfs_write_end,
1941 	.direct_IO	= ntfs_direct_IO,
1942 	.bmap		= ntfs_bmap,
1943 	.dirty_folio	= block_dirty_folio,
1944 	.invalidate_folio = block_invalidate_folio,
1945 };
1946 
1947 const struct address_space_operations ntfs_aops_cmpr = {
1948 	.read_folio	= ntfs_read_folio,
1949 	.readahead	= ntfs_readahead,
1950 };
1951 // clang-format on
1952