xref: /openbmc/linux/fs/nfs/dir.c (revision dbeaf8c984ca689c2c0966c41bd78dee178b5dfe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/nfs/dir.c
4  *
5  *  Copyright (C) 1992  Rick Sladkey
6  *
7  *  nfs directory handling functions
8  *
9  * 10 Apr 1996	Added silly rename for unlink	--okir
10  * 28 Sep 1996	Improved directory cache --okir
11  * 23 Aug 1997  Claus Heine claus@momo.math.rwth-aachen.de
12  *              Re-implemented silly rename for unlink, newly implemented
13  *              silly rename for nfs_rename() following the suggestions
14  *              of Olaf Kirch (okir) found in this file.
15  *              Following Linus comments on my original hack, this version
16  *              depends only on the dcache stuff and doesn't touch the inode
17  *              layer (iput() and friends).
18  *  6 Jun 1999	Cache readdir lookups in the page cache. -DaveM
19  */
20 
21 #include <linux/module.h>
22 #include <linux/time.h>
23 #include <linux/errno.h>
24 #include <linux/stat.h>
25 #include <linux/fcntl.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/sunrpc/clnt.h>
31 #include <linux/nfs_fs.h>
32 #include <linux/nfs_mount.h>
33 #include <linux/pagemap.h>
34 #include <linux/pagevec.h>
35 #include <linux/namei.h>
36 #include <linux/mount.h>
37 #include <linux/swap.h>
38 #include <linux/sched.h>
39 #include <linux/kmemleak.h>
40 #include <linux/xattr.h>
41 
42 #include "delegation.h"
43 #include "iostat.h"
44 #include "internal.h"
45 #include "fscache.h"
46 
47 #include "nfstrace.h"
48 
49 /* #define NFS_DEBUG_VERBOSE 1 */
50 
51 static int nfs_opendir(struct inode *, struct file *);
52 static int nfs_closedir(struct inode *, struct file *);
53 static int nfs_readdir(struct file *, struct dir_context *);
54 static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
55 static loff_t nfs_llseek_dir(struct file *, loff_t, int);
56 static void nfs_readdir_clear_array(struct page*);
57 
58 const struct file_operations nfs_dir_operations = {
59 	.llseek		= nfs_llseek_dir,
60 	.read		= generic_read_dir,
61 	.iterate_shared	= nfs_readdir,
62 	.open		= nfs_opendir,
63 	.release	= nfs_closedir,
64 	.fsync		= nfs_fsync_dir,
65 };
66 
67 const struct address_space_operations nfs_dir_aops = {
68 	.freepage = nfs_readdir_clear_array,
69 };
70 
71 static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir)
72 {
73 	struct nfs_inode *nfsi = NFS_I(dir);
74 	struct nfs_open_dir_context *ctx;
75 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
76 	if (ctx != NULL) {
77 		ctx->duped = 0;
78 		ctx->attr_gencount = nfsi->attr_gencount;
79 		ctx->dir_cookie = 0;
80 		ctx->dup_cookie = 0;
81 		spin_lock(&dir->i_lock);
82 		if (list_empty(&nfsi->open_files) &&
83 		    (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
84 			nfsi->cache_validity |= NFS_INO_INVALID_DATA |
85 				NFS_INO_REVAL_FORCED;
86 		list_add(&ctx->list, &nfsi->open_files);
87 		spin_unlock(&dir->i_lock);
88 		return ctx;
89 	}
90 	return  ERR_PTR(-ENOMEM);
91 }
92 
93 static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx)
94 {
95 	spin_lock(&dir->i_lock);
96 	list_del(&ctx->list);
97 	spin_unlock(&dir->i_lock);
98 	kfree(ctx);
99 }
100 
101 /*
102  * Open file
103  */
104 static int
105 nfs_opendir(struct inode *inode, struct file *filp)
106 {
107 	int res = 0;
108 	struct nfs_open_dir_context *ctx;
109 
110 	dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
111 
112 	nfs_inc_stats(inode, NFSIOS_VFSOPEN);
113 
114 	ctx = alloc_nfs_open_dir_context(inode);
115 	if (IS_ERR(ctx)) {
116 		res = PTR_ERR(ctx);
117 		goto out;
118 	}
119 	filp->private_data = ctx;
120 out:
121 	return res;
122 }
123 
124 static int
125 nfs_closedir(struct inode *inode, struct file *filp)
126 {
127 	put_nfs_open_dir_context(file_inode(filp), filp->private_data);
128 	return 0;
129 }
130 
131 struct nfs_cache_array_entry {
132 	u64 cookie;
133 	u64 ino;
134 	const char *name;
135 	unsigned int name_len;
136 	unsigned char d_type;
137 };
138 
139 struct nfs_cache_array {
140 	u64 last_cookie;
141 	unsigned int size;
142 	unsigned char page_full : 1,
143 		      page_is_eof : 1;
144 	struct nfs_cache_array_entry array[];
145 };
146 
147 typedef struct nfs_readdir_descriptor {
148 	struct file	*file;
149 	struct page	*page;
150 	struct dir_context *ctx;
151 	pgoff_t		page_index;
152 	u64		dir_cookie;
153 	u64		last_cookie;
154 	u64		dup_cookie;
155 	loff_t		current_index;
156 	loff_t		prev_index;
157 
158 	unsigned long	dir_verifier;
159 	unsigned long	timestamp;
160 	unsigned long	gencount;
161 	unsigned long	attr_gencount;
162 	unsigned int	cache_entry_index;
163 	signed char duped;
164 	bool plus;
165 	bool eof;
166 } nfs_readdir_descriptor_t;
167 
168 static void nfs_readdir_array_init(struct nfs_cache_array *array)
169 {
170 	memset(array, 0, sizeof(struct nfs_cache_array));
171 }
172 
173 static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie)
174 {
175 	struct nfs_cache_array *array;
176 
177 	array = kmap_atomic(page);
178 	nfs_readdir_array_init(array);
179 	array->last_cookie = last_cookie;
180 	kunmap_atomic(array);
181 }
182 
183 /*
184  * we are freeing strings created by nfs_add_to_readdir_array()
185  */
186 static
187 void nfs_readdir_clear_array(struct page *page)
188 {
189 	struct nfs_cache_array *array;
190 	int i;
191 
192 	array = kmap_atomic(page);
193 	for (i = 0; i < array->size; i++)
194 		kfree(array->array[i].name);
195 	nfs_readdir_array_init(array);
196 	kunmap_atomic(array);
197 }
198 
199 static void nfs_readdir_array_set_eof(struct nfs_cache_array *array)
200 {
201 	array->page_is_eof = 1;
202 	array->page_full = 1;
203 }
204 
205 static bool nfs_readdir_array_is_full(struct nfs_cache_array *array)
206 {
207 	return array->page_full;
208 }
209 
210 /*
211  * the caller is responsible for freeing qstr.name
212  * when called by nfs_readdir_add_to_array, the strings will be freed in
213  * nfs_clear_readdir_array()
214  */
215 static const char *nfs_readdir_copy_name(const char *name, unsigned int len)
216 {
217 	const char *ret = kmemdup_nul(name, len, GFP_KERNEL);
218 
219 	/*
220 	 * Avoid a kmemleak false positive. The pointer to the name is stored
221 	 * in a page cache page which kmemleak does not scan.
222 	 */
223 	if (ret != NULL)
224 		kmemleak_not_leak(ret);
225 	return ret;
226 }
227 
228 /*
229  * Check that the next array entry lies entirely within the page bounds
230  */
231 static int nfs_readdir_array_can_expand(struct nfs_cache_array *array)
232 {
233 	struct nfs_cache_array_entry *cache_entry;
234 
235 	if (array->page_full)
236 		return -ENOSPC;
237 	cache_entry = &array->array[array->size + 1];
238 	if ((char *)cache_entry - (char *)array > PAGE_SIZE) {
239 		array->page_full = 1;
240 		return -ENOSPC;
241 	}
242 	return 0;
243 }
244 
245 static
246 int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
247 {
248 	struct nfs_cache_array *array;
249 	struct nfs_cache_array_entry *cache_entry;
250 	const char *name;
251 	int ret;
252 
253 	name = nfs_readdir_copy_name(entry->name, entry->len);
254 	if (!name)
255 		return -ENOMEM;
256 
257 	array = kmap_atomic(page);
258 	ret = nfs_readdir_array_can_expand(array);
259 	if (ret) {
260 		kfree(name);
261 		goto out;
262 	}
263 
264 	cache_entry = &array->array[array->size];
265 	cache_entry->cookie = entry->prev_cookie;
266 	cache_entry->ino = entry->ino;
267 	cache_entry->d_type = entry->d_type;
268 	cache_entry->name_len = entry->len;
269 	cache_entry->name = name;
270 	array->last_cookie = entry->cookie;
271 	array->size++;
272 	if (entry->eof != 0)
273 		nfs_readdir_array_set_eof(array);
274 out:
275 	kunmap_atomic(array);
276 	return ret;
277 }
278 
279 static struct page *nfs_readdir_page_get_locked(struct address_space *mapping,
280 						pgoff_t index, u64 last_cookie)
281 {
282 	struct page *page;
283 
284 	page = grab_cache_page(mapping, index);
285 	if (page && !PageUptodate(page)) {
286 		nfs_readdir_page_init_array(page, last_cookie);
287 		if (invalidate_inode_pages2_range(mapping, index + 1, -1) < 0)
288 			nfs_zap_mapping(mapping->host, mapping);
289 		SetPageUptodate(page);
290 	}
291 
292 	return page;
293 }
294 
295 static u64 nfs_readdir_page_last_cookie(struct page *page)
296 {
297 	struct nfs_cache_array *array;
298 	u64 ret;
299 
300 	array = kmap_atomic(page);
301 	ret = array->last_cookie;
302 	kunmap_atomic(array);
303 	return ret;
304 }
305 
306 static bool nfs_readdir_page_needs_filling(struct page *page)
307 {
308 	struct nfs_cache_array *array;
309 	bool ret;
310 
311 	array = kmap_atomic(page);
312 	ret = !nfs_readdir_array_is_full(array);
313 	kunmap_atomic(array);
314 	return ret;
315 }
316 
317 static void nfs_readdir_page_set_eof(struct page *page)
318 {
319 	struct nfs_cache_array *array;
320 
321 	array = kmap_atomic(page);
322 	nfs_readdir_array_set_eof(array);
323 	kunmap_atomic(array);
324 }
325 
326 static void nfs_readdir_page_unlock_and_put(struct page *page)
327 {
328 	unlock_page(page);
329 	put_page(page);
330 }
331 
332 static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
333 					      pgoff_t index, u64 cookie)
334 {
335 	struct page *page;
336 
337 	page = nfs_readdir_page_get_locked(mapping, index, cookie);
338 	if (page) {
339 		if (nfs_readdir_page_last_cookie(page) == cookie)
340 			return page;
341 		nfs_readdir_page_unlock_and_put(page);
342 	}
343 	return NULL;
344 }
345 
346 static inline
347 int is_32bit_api(void)
348 {
349 #ifdef CONFIG_COMPAT
350 	return in_compat_syscall();
351 #else
352 	return (BITS_PER_LONG == 32);
353 #endif
354 }
355 
356 static
357 bool nfs_readdir_use_cookie(const struct file *filp)
358 {
359 	if ((filp->f_mode & FMODE_32BITHASH) ||
360 	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
361 		return false;
362 	return true;
363 }
364 
365 static
366 int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
367 {
368 	loff_t diff = desc->ctx->pos - desc->current_index;
369 	unsigned int index;
370 
371 	if (diff < 0)
372 		goto out_eof;
373 	if (diff >= array->size) {
374 		if (array->page_is_eof)
375 			goto out_eof;
376 		return -EAGAIN;
377 	}
378 
379 	index = (unsigned int)diff;
380 	desc->dir_cookie = array->array[index].cookie;
381 	desc->cache_entry_index = index;
382 	return 0;
383 out_eof:
384 	desc->eof = true;
385 	return -EBADCOOKIE;
386 }
387 
388 static bool
389 nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
390 {
391 	if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
392 		return false;
393 	smp_rmb();
394 	return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
395 }
396 
397 static
398 int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
399 {
400 	int i;
401 	loff_t new_pos;
402 	int status = -EAGAIN;
403 
404 	for (i = 0; i < array->size; i++) {
405 		if (array->array[i].cookie == desc->dir_cookie) {
406 			struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
407 
408 			new_pos = desc->current_index + i;
409 			if (desc->attr_gencount != nfsi->attr_gencount ||
410 			    !nfs_readdir_inode_mapping_valid(nfsi)) {
411 				desc->duped = 0;
412 				desc->attr_gencount = nfsi->attr_gencount;
413 			} else if (new_pos < desc->prev_index) {
414 				if (desc->duped > 0
415 				    && desc->dup_cookie == desc->dir_cookie) {
416 					if (printk_ratelimit()) {
417 						pr_notice("NFS: directory %pD2 contains a readdir loop."
418 								"Please contact your server vendor.  "
419 								"The file: %s has duplicate cookie %llu\n",
420 								desc->file, array->array[i].name, desc->dir_cookie);
421 					}
422 					status = -ELOOP;
423 					goto out;
424 				}
425 				desc->dup_cookie = desc->dir_cookie;
426 				desc->duped = -1;
427 			}
428 			if (nfs_readdir_use_cookie(desc->file))
429 				desc->ctx->pos = desc->dir_cookie;
430 			else
431 				desc->ctx->pos = new_pos;
432 			desc->prev_index = new_pos;
433 			desc->cache_entry_index = i;
434 			return 0;
435 		}
436 	}
437 	if (array->page_is_eof) {
438 		status = -EBADCOOKIE;
439 		if (desc->dir_cookie == array->last_cookie)
440 			desc->eof = true;
441 	}
442 out:
443 	return status;
444 }
445 
446 static
447 int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
448 {
449 	struct nfs_cache_array *array;
450 	int status;
451 
452 	array = kmap_atomic(desc->page);
453 
454 	if (desc->dir_cookie == 0)
455 		status = nfs_readdir_search_for_pos(array, desc);
456 	else
457 		status = nfs_readdir_search_for_cookie(array, desc);
458 
459 	if (status == -EAGAIN) {
460 		desc->last_cookie = array->last_cookie;
461 		desc->current_index += array->size;
462 		desc->page_index++;
463 	}
464 	kunmap_atomic(array);
465 	return status;
466 }
467 
468 /* Fill a page with xdr information before transferring to the cache page */
469 static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc,
470 				  u64 cookie, struct page **pages,
471 				  size_t bufsize)
472 {
473 	struct file *file = desc->file;
474 	struct inode *inode = file_inode(file);
475 	unsigned long	timestamp, gencount;
476 	int		error;
477 
478  again:
479 	timestamp = jiffies;
480 	gencount = nfs_inc_attr_generation_counter();
481 	desc->dir_verifier = nfs_save_change_attribute(inode);
482 	error = NFS_PROTO(inode)->readdir(file_dentry(file), file->f_cred,
483 					  cookie, pages, bufsize, desc->plus);
484 	if (error < 0) {
485 		/* We requested READDIRPLUS, but the server doesn't grok it */
486 		if (error == -ENOTSUPP && desc->plus) {
487 			NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
488 			clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
489 			desc->plus = false;
490 			goto again;
491 		}
492 		goto error;
493 	}
494 	desc->timestamp = timestamp;
495 	desc->gencount = gencount;
496 error:
497 	return error;
498 }
499 
500 static int xdr_decode(nfs_readdir_descriptor_t *desc,
501 		      struct nfs_entry *entry, struct xdr_stream *xdr)
502 {
503 	struct inode *inode = file_inode(desc->file);
504 	int error;
505 
506 	error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus);
507 	if (error)
508 		return error;
509 	entry->fattr->time_start = desc->timestamp;
510 	entry->fattr->gencount = desc->gencount;
511 	return 0;
512 }
513 
514 /* Match file and dirent using either filehandle or fileid
515  * Note: caller is responsible for checking the fsid
516  */
517 static
518 int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
519 {
520 	struct inode *inode;
521 	struct nfs_inode *nfsi;
522 
523 	if (d_really_is_negative(dentry))
524 		return 0;
525 
526 	inode = d_inode(dentry);
527 	if (is_bad_inode(inode) || NFS_STALE(inode))
528 		return 0;
529 
530 	nfsi = NFS_I(inode);
531 	if (entry->fattr->fileid != nfsi->fileid)
532 		return 0;
533 	if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0)
534 		return 0;
535 	return 1;
536 }
537 
538 static
539 bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
540 {
541 	if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
542 		return false;
543 	if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
544 		return true;
545 	if (ctx->pos == 0)
546 		return true;
547 	return false;
548 }
549 
550 /*
551  * This function is called by the lookup and getattr code to request the
552  * use of readdirplus to accelerate any future lookups in the same
553  * directory.
554  */
555 void nfs_advise_use_readdirplus(struct inode *dir)
556 {
557 	struct nfs_inode *nfsi = NFS_I(dir);
558 
559 	if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
560 	    !list_empty(&nfsi->open_files))
561 		set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
562 }
563 
564 /*
565  * This function is mainly for use by nfs_getattr().
566  *
567  * If this is an 'ls -l', we want to force use of readdirplus.
568  * Do this by checking if there is an active file descriptor
569  * and calling nfs_advise_use_readdirplus, then forcing a
570  * cache flush.
571  */
572 void nfs_force_use_readdirplus(struct inode *dir)
573 {
574 	struct nfs_inode *nfsi = NFS_I(dir);
575 
576 	if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
577 	    !list_empty(&nfsi->open_files)) {
578 		set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
579 		invalidate_mapping_pages(dir->i_mapping,
580 			nfsi->page_index + 1, -1);
581 	}
582 }
583 
584 static
585 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
586 		unsigned long dir_verifier)
587 {
588 	struct qstr filename = QSTR_INIT(entry->name, entry->len);
589 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
590 	struct dentry *dentry;
591 	struct dentry *alias;
592 	struct inode *inode;
593 	int status;
594 
595 	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
596 		return;
597 	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
598 		return;
599 	if (filename.len == 0)
600 		return;
601 	/* Validate that the name doesn't contain any illegal '\0' */
602 	if (strnlen(filename.name, filename.len) != filename.len)
603 		return;
604 	/* ...or '/' */
605 	if (strnchr(filename.name, filename.len, '/'))
606 		return;
607 	if (filename.name[0] == '.') {
608 		if (filename.len == 1)
609 			return;
610 		if (filename.len == 2 && filename.name[1] == '.')
611 			return;
612 	}
613 	filename.hash = full_name_hash(parent, filename.name, filename.len);
614 
615 	dentry = d_lookup(parent, &filename);
616 again:
617 	if (!dentry) {
618 		dentry = d_alloc_parallel(parent, &filename, &wq);
619 		if (IS_ERR(dentry))
620 			return;
621 	}
622 	if (!d_in_lookup(dentry)) {
623 		/* Is there a mountpoint here? If so, just exit */
624 		if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
625 					&entry->fattr->fsid))
626 			goto out;
627 		if (nfs_same_file(dentry, entry)) {
628 			if (!entry->fh->size)
629 				goto out;
630 			nfs_set_verifier(dentry, dir_verifier);
631 			status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
632 			if (!status)
633 				nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label);
634 			goto out;
635 		} else {
636 			d_invalidate(dentry);
637 			dput(dentry);
638 			dentry = NULL;
639 			goto again;
640 		}
641 	}
642 	if (!entry->fh->size) {
643 		d_lookup_done(dentry);
644 		goto out;
645 	}
646 
647 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
648 	alias = d_splice_alias(inode, dentry);
649 	d_lookup_done(dentry);
650 	if (alias) {
651 		if (IS_ERR(alias))
652 			goto out;
653 		dput(dentry);
654 		dentry = alias;
655 	}
656 	nfs_set_verifier(dentry, dir_verifier);
657 out:
658 	dput(dentry);
659 }
660 
661 /* Perform conversion from xdr to cache array */
662 static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
663 				   struct nfs_entry *entry,
664 				   struct page **xdr_pages,
665 				   struct page *fillme, unsigned int buflen)
666 {
667 	struct address_space *mapping = desc->file->f_mapping;
668 	struct xdr_stream stream;
669 	struct xdr_buf buf;
670 	struct page *scratch, *new, *page = fillme;
671 	int status;
672 
673 	scratch = alloc_page(GFP_KERNEL);
674 	if (scratch == NULL)
675 		return -ENOMEM;
676 
677 	xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
678 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
679 
680 	do {
681 		if (entry->label)
682 			entry->label->len = NFS4_MAXLABELLEN;
683 
684 		status = xdr_decode(desc, entry, &stream);
685 		if (status != 0)
686 			break;
687 
688 		if (desc->plus)
689 			nfs_prime_dcache(file_dentry(desc->file), entry,
690 					desc->dir_verifier);
691 
692 		status = nfs_readdir_add_to_array(entry, page);
693 		if (status != -ENOSPC)
694 			continue;
695 
696 		if (page->mapping != mapping)
697 			break;
698 		new = nfs_readdir_page_get_next(mapping, page->index + 1,
699 						entry->prev_cookie);
700 		if (!new)
701 			break;
702 		if (page != fillme)
703 			nfs_readdir_page_unlock_and_put(page);
704 		page = new;
705 		status = nfs_readdir_add_to_array(entry, page);
706 	} while (!status && !entry->eof);
707 
708 	switch (status) {
709 	case -EBADCOOKIE:
710 		if (entry->eof) {
711 			nfs_readdir_page_set_eof(page);
712 			status = 0;
713 		}
714 		break;
715 	case -ENOSPC:
716 	case -EAGAIN:
717 		status = 0;
718 		break;
719 	}
720 
721 	if (page != fillme)
722 		nfs_readdir_page_unlock_and_put(page);
723 
724 	put_page(scratch);
725 	return status;
726 }
727 
728 static void nfs_readdir_free_pages(struct page **pages, size_t npages)
729 {
730 	while (npages--)
731 		put_page(pages[npages]);
732 	kfree(pages);
733 }
734 
735 /*
736  * nfs_readdir_alloc_pages() will allocate pages that must be freed with a call
737  * to nfs_readdir_free_pages()
738  */
739 static struct page **nfs_readdir_alloc_pages(size_t npages)
740 {
741 	struct page **pages;
742 	size_t i;
743 
744 	pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
745 	if (!pages)
746 		return NULL;
747 	for (i = 0; i < npages; i++) {
748 		struct page *page = alloc_page(GFP_KERNEL);
749 		if (page == NULL)
750 			goto out_freepages;
751 		pages[i] = page;
752 	}
753 	return pages;
754 
755 out_freepages:
756 	nfs_readdir_free_pages(pages, i);
757 	return NULL;
758 }
759 
760 static
761 int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
762 {
763 	struct page **pages;
764 	struct nfs_entry entry;
765 	size_t array_size;
766 	size_t dtsize = NFS_SERVER(inode)->dtsize;
767 	int status = -ENOMEM;
768 
769 	entry.prev_cookie = 0;
770 	entry.cookie = nfs_readdir_page_last_cookie(page);
771 	entry.eof = 0;
772 	entry.fh = nfs_alloc_fhandle();
773 	entry.fattr = nfs_alloc_fattr();
774 	entry.server = NFS_SERVER(inode);
775 	if (entry.fh == NULL || entry.fattr == NULL)
776 		goto out;
777 
778 	entry.label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
779 	if (IS_ERR(entry.label)) {
780 		status = PTR_ERR(entry.label);
781 		goto out;
782 	}
783 
784 	array_size = (dtsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
785 	pages = nfs_readdir_alloc_pages(array_size);
786 	if (!pages)
787 		goto out_release_label;
788 
789 	do {
790 		unsigned int pglen;
791 		status = nfs_readdir_xdr_filler(desc, entry.cookie,
792 						pages, dtsize);
793 		if (status < 0)
794 			break;
795 
796 		pglen = status;
797 		if (pglen == 0) {
798 			nfs_readdir_page_set_eof(page);
799 			break;
800 		}
801 
802 		status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
803 	} while (!status && nfs_readdir_page_needs_filling(page));
804 
805 	nfs_readdir_free_pages(pages, array_size);
806 out_release_label:
807 	nfs4_label_free(entry.label);
808 out:
809 	nfs_free_fattr(entry.fattr);
810 	nfs_free_fhandle(entry.fh);
811 	return status;
812 }
813 
814 static void nfs_readdir_page_put(struct nfs_readdir_descriptor *desc)
815 {
816 	put_page(desc->page);
817 	desc->page = NULL;
818 }
819 
820 static void
821 nfs_readdir_page_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
822 {
823 	unlock_page(desc->page);
824 	nfs_readdir_page_put(desc);
825 }
826 
827 static struct page *
828 nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc)
829 {
830 	return nfs_readdir_page_get_locked(desc->file->f_mapping,
831 					   desc->page_index,
832 					   desc->last_cookie);
833 }
834 
835 /*
836  * Returns 0 if desc->dir_cookie was found on page desc->page_index
837  * and locks the page to prevent removal from the page cache.
838  */
839 static
840 int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc)
841 {
842 	struct inode *inode = file_inode(desc->file);
843 	struct nfs_inode *nfsi = NFS_I(inode);
844 	int res;
845 
846 	desc->page = nfs_readdir_page_get_cached(desc);
847 	if (!desc->page)
848 		return -ENOMEM;
849 	if (nfs_readdir_page_needs_filling(desc->page)) {
850 		res = nfs_readdir_xdr_to_array(desc, desc->page, inode);
851 		if (res < 0)
852 			goto error;
853 	}
854 	res = nfs_readdir_search_array(desc);
855 	if (res == 0) {
856 		nfsi->page_index = desc->page_index;
857 		return 0;
858 	}
859 error:
860 	nfs_readdir_page_unlock_and_put_cached(desc);
861 	return res;
862 }
863 
864 /* Search for desc->dir_cookie from the beginning of the page cache */
865 static inline
866 int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
867 {
868 	int res;
869 
870 	if (desc->page_index == 0) {
871 		desc->current_index = 0;
872 		desc->prev_index = 0;
873 		desc->last_cookie = 0;
874 	}
875 	do {
876 		res = find_and_lock_cache_page(desc);
877 	} while (res == -EAGAIN);
878 	return res;
879 }
880 
881 /*
882  * Once we've found the start of the dirent within a page: fill 'er up...
883  */
884 static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
885 {
886 	struct file	*file = desc->file;
887 	struct nfs_cache_array *array;
888 	unsigned int i = 0;
889 
890 	array = kmap(desc->page);
891 	for (i = desc->cache_entry_index; i < array->size; i++) {
892 		struct nfs_cache_array_entry *ent;
893 
894 		ent = &array->array[i];
895 		if (!dir_emit(desc->ctx, ent->name, ent->name_len,
896 		    nfs_compat_user_ino64(ent->ino), ent->d_type)) {
897 			desc->eof = true;
898 			break;
899 		}
900 		if (i < (array->size-1))
901 			desc->dir_cookie = array->array[i+1].cookie;
902 		else
903 			desc->dir_cookie = array->last_cookie;
904 		if (nfs_readdir_use_cookie(file))
905 			desc->ctx->pos = desc->dir_cookie;
906 		else
907 			desc->ctx->pos++;
908 		if (desc->duped != 0)
909 			desc->duped = 1;
910 	}
911 	if (array->page_is_eof)
912 		desc->eof = true;
913 
914 	kunmap(desc->page);
915 	dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
916 			(unsigned long long)desc->dir_cookie);
917 }
918 
919 /*
920  * If we cannot find a cookie in our cache, we suspect that this is
921  * because it points to a deleted file, so we ask the server to return
922  * whatever it thinks is the next entry. We then feed this to filldir.
923  * If all goes well, we should then be able to find our way round the
924  * cache on the next call to readdir_search_pagecache();
925  *
926  * NOTE: we cannot add the anonymous page to the pagecache because
927  *	 the data it contains might not be page aligned. Besides,
928  *	 we should already have a complete representation of the
929  *	 directory in the page cache by the time we get here.
930  */
931 static inline
932 int uncached_readdir(nfs_readdir_descriptor_t *desc)
933 {
934 	struct page	*page = NULL;
935 	int		status;
936 	struct inode *inode = file_inode(desc->file);
937 
938 	dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
939 			(unsigned long long)desc->dir_cookie);
940 
941 	page = alloc_page(GFP_HIGHUSER);
942 	if (!page) {
943 		status = -ENOMEM;
944 		goto out;
945 	}
946 
947 	desc->page_index = 0;
948 	desc->last_cookie = desc->dir_cookie;
949 	desc->page = page;
950 	desc->duped = 0;
951 
952 	nfs_readdir_page_init_array(page, desc->dir_cookie);
953 	status = nfs_readdir_xdr_to_array(desc, page, inode);
954 	if (status < 0)
955 		goto out_release;
956 
957 	nfs_do_filldir(desc);
958 
959  out_release:
960 	nfs_readdir_clear_array(desc->page);
961 	nfs_readdir_page_put(desc);
962  out:
963 	dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
964 			__func__, status);
965 	return status;
966 }
967 
968 /* The file offset position represents the dirent entry number.  A
969    last cookie cache takes care of the common case of reading the
970    whole directory.
971  */
972 static int nfs_readdir(struct file *file, struct dir_context *ctx)
973 {
974 	struct dentry	*dentry = file_dentry(file);
975 	struct inode	*inode = d_inode(dentry);
976 	struct nfs_open_dir_context *dir_ctx = file->private_data;
977 	nfs_readdir_descriptor_t my_desc = {
978 		.file = file,
979 		.ctx = ctx,
980 		.plus = nfs_use_readdirplus(inode, ctx),
981 	},
982 			*desc = &my_desc;
983 	int res = 0;
984 
985 	dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
986 			file, (long long)ctx->pos);
987 	nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
988 
989 	/*
990 	 * ctx->pos points to the dirent entry number.
991 	 * *desc->dir_cookie has the cookie for the next entry. We have
992 	 * to either find the entry with the appropriate number or
993 	 * revalidate the cookie.
994 	 */
995 	if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
996 		res = nfs_revalidate_mapping(inode, file->f_mapping);
997 	if (res < 0)
998 		goto out;
999 
1000 	spin_lock(&file->f_lock);
1001 	desc->dir_cookie = dir_ctx->dir_cookie;
1002 	desc->dup_cookie = dir_ctx->dup_cookie;
1003 	desc->duped = dir_ctx->duped;
1004 	desc->attr_gencount = dir_ctx->attr_gencount;
1005 	spin_unlock(&file->f_lock);
1006 
1007 	do {
1008 		res = readdir_search_pagecache(desc);
1009 
1010 		if (res == -EBADCOOKIE) {
1011 			res = 0;
1012 			/* This means either end of directory */
1013 			if (desc->dir_cookie && !desc->eof) {
1014 				/* Or that the server has 'lost' a cookie */
1015 				res = uncached_readdir(desc);
1016 				if (res == 0)
1017 					continue;
1018 			}
1019 			break;
1020 		}
1021 		if (res == -ETOOSMALL && desc->plus) {
1022 			clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
1023 			nfs_zap_caches(inode);
1024 			desc->page_index = 0;
1025 			desc->plus = false;
1026 			desc->eof = false;
1027 			continue;
1028 		}
1029 		if (res < 0)
1030 			break;
1031 
1032 		nfs_do_filldir(desc);
1033 		nfs_readdir_page_unlock_and_put_cached(desc);
1034 	} while (!desc->eof);
1035 
1036 	spin_lock(&file->f_lock);
1037 	dir_ctx->dir_cookie = desc->dir_cookie;
1038 	dir_ctx->dup_cookie = desc->dup_cookie;
1039 	dir_ctx->duped = desc->duped;
1040 	dir_ctx->attr_gencount = desc->attr_gencount;
1041 	spin_unlock(&file->f_lock);
1042 
1043 out:
1044 	dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
1045 	return res;
1046 }
1047 
1048 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
1049 {
1050 	struct nfs_open_dir_context *dir_ctx = filp->private_data;
1051 
1052 	dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
1053 			filp, offset, whence);
1054 
1055 	switch (whence) {
1056 	default:
1057 		return -EINVAL;
1058 	case SEEK_SET:
1059 		if (offset < 0)
1060 			return -EINVAL;
1061 		spin_lock(&filp->f_lock);
1062 		break;
1063 	case SEEK_CUR:
1064 		if (offset == 0)
1065 			return filp->f_pos;
1066 		spin_lock(&filp->f_lock);
1067 		offset += filp->f_pos;
1068 		if (offset < 0) {
1069 			spin_unlock(&filp->f_lock);
1070 			return -EINVAL;
1071 		}
1072 	}
1073 	if (offset != filp->f_pos) {
1074 		filp->f_pos = offset;
1075 		if (nfs_readdir_use_cookie(filp))
1076 			dir_ctx->dir_cookie = offset;
1077 		else
1078 			dir_ctx->dir_cookie = 0;
1079 		dir_ctx->duped = 0;
1080 	}
1081 	spin_unlock(&filp->f_lock);
1082 	return offset;
1083 }
1084 
1085 /*
1086  * All directory operations under NFS are synchronous, so fsync()
1087  * is a dummy operation.
1088  */
1089 static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
1090 			 int datasync)
1091 {
1092 	dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
1093 
1094 	nfs_inc_stats(file_inode(filp), NFSIOS_VFSFSYNC);
1095 	return 0;
1096 }
1097 
1098 /**
1099  * nfs_force_lookup_revalidate - Mark the directory as having changed
1100  * @dir: pointer to directory inode
1101  *
1102  * This forces the revalidation code in nfs_lookup_revalidate() to do a
1103  * full lookup on all child dentries of 'dir' whenever a change occurs
1104  * on the server that might have invalidated our dcache.
1105  *
1106  * Note that we reserve bit '0' as a tag to let us know when a dentry
1107  * was revalidated while holding a delegation on its inode.
1108  *
1109  * The caller should be holding dir->i_lock
1110  */
1111 void nfs_force_lookup_revalidate(struct inode *dir)
1112 {
1113 	NFS_I(dir)->cache_change_attribute += 2;
1114 }
1115 EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
1116 
1117 /**
1118  * nfs_verify_change_attribute - Detects NFS remote directory changes
1119  * @dir: pointer to parent directory inode
1120  * @verf: previously saved change attribute
1121  *
1122  * Return "false" if the verifiers doesn't match the change attribute.
1123  * This would usually indicate that the directory contents have changed on
1124  * the server, and that any dentries need revalidating.
1125  */
1126 static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf)
1127 {
1128 	return (verf & ~1UL) == nfs_save_change_attribute(dir);
1129 }
1130 
1131 static void nfs_set_verifier_delegated(unsigned long *verf)
1132 {
1133 	*verf |= 1UL;
1134 }
1135 
1136 #if IS_ENABLED(CONFIG_NFS_V4)
1137 static void nfs_unset_verifier_delegated(unsigned long *verf)
1138 {
1139 	*verf &= ~1UL;
1140 }
1141 #endif /* IS_ENABLED(CONFIG_NFS_V4) */
1142 
1143 static bool nfs_test_verifier_delegated(unsigned long verf)
1144 {
1145 	return verf & 1;
1146 }
1147 
1148 static bool nfs_verifier_is_delegated(struct dentry *dentry)
1149 {
1150 	return nfs_test_verifier_delegated(dentry->d_time);
1151 }
1152 
1153 static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
1154 {
1155 	struct inode *inode = d_inode(dentry);
1156 
1157 	if (!nfs_verifier_is_delegated(dentry) &&
1158 	    !nfs_verify_change_attribute(d_inode(dentry->d_parent), verf))
1159 		goto out;
1160 	if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1161 		nfs_set_verifier_delegated(&verf);
1162 out:
1163 	dentry->d_time = verf;
1164 }
1165 
1166 /**
1167  * nfs_set_verifier - save a parent directory verifier in the dentry
1168  * @dentry: pointer to dentry
1169  * @verf: verifier to save
1170  *
1171  * Saves the parent directory verifier in @dentry. If the inode has
1172  * a delegation, we also tag the dentry as having been revalidated
1173  * while holding a delegation so that we know we don't have to
1174  * look it up again after a directory change.
1175  */
1176 void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
1177 {
1178 
1179 	spin_lock(&dentry->d_lock);
1180 	nfs_set_verifier_locked(dentry, verf);
1181 	spin_unlock(&dentry->d_lock);
1182 }
1183 EXPORT_SYMBOL_GPL(nfs_set_verifier);
1184 
1185 #if IS_ENABLED(CONFIG_NFS_V4)
1186 /**
1187  * nfs_clear_verifier_delegated - clear the dir verifier delegation tag
1188  * @inode: pointer to inode
1189  *
1190  * Iterates through the dentries in the inode alias list and clears
1191  * the tag used to indicate that the dentry has been revalidated
1192  * while holding a delegation.
1193  * This function is intended for use when the delegation is being
1194  * returned or revoked.
1195  */
1196 void nfs_clear_verifier_delegated(struct inode *inode)
1197 {
1198 	struct dentry *alias;
1199 
1200 	if (!inode)
1201 		return;
1202 	spin_lock(&inode->i_lock);
1203 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1204 		spin_lock(&alias->d_lock);
1205 		nfs_unset_verifier_delegated(&alias->d_time);
1206 		spin_unlock(&alias->d_lock);
1207 	}
1208 	spin_unlock(&inode->i_lock);
1209 }
1210 EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
1211 #endif /* IS_ENABLED(CONFIG_NFS_V4) */
1212 
1213 /*
1214  * A check for whether or not the parent directory has changed.
1215  * In the case it has, we assume that the dentries are untrustworthy
1216  * and may need to be looked up again.
1217  * If rcu_walk prevents us from performing a full check, return 0.
1218  */
1219 static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
1220 			      int rcu_walk)
1221 {
1222 	if (IS_ROOT(dentry))
1223 		return 1;
1224 	if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
1225 		return 0;
1226 	if (!nfs_verify_change_attribute(dir, dentry->d_time))
1227 		return 0;
1228 	/* Revalidate nfsi->cache_change_attribute before we declare a match */
1229 	if (nfs_mapping_need_revalidate_inode(dir)) {
1230 		if (rcu_walk)
1231 			return 0;
1232 		if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
1233 			return 0;
1234 	}
1235 	if (!nfs_verify_change_attribute(dir, dentry->d_time))
1236 		return 0;
1237 	return 1;
1238 }
1239 
1240 /*
1241  * Use intent information to check whether or not we're going to do
1242  * an O_EXCL create using this path component.
1243  */
1244 static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
1245 {
1246 	if (NFS_PROTO(dir)->version == 2)
1247 		return 0;
1248 	return flags & LOOKUP_EXCL;
1249 }
1250 
1251 /*
1252  * Inode and filehandle revalidation for lookups.
1253  *
1254  * We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
1255  * or if the intent information indicates that we're about to open this
1256  * particular file and the "nocto" mount flag is not set.
1257  *
1258  */
1259 static
1260 int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
1261 {
1262 	struct nfs_server *server = NFS_SERVER(inode);
1263 	int ret;
1264 
1265 	if (IS_AUTOMOUNT(inode))
1266 		return 0;
1267 
1268 	if (flags & LOOKUP_OPEN) {
1269 		switch (inode->i_mode & S_IFMT) {
1270 		case S_IFREG:
1271 			/* A NFSv4 OPEN will revalidate later */
1272 			if (server->caps & NFS_CAP_ATOMIC_OPEN)
1273 				goto out;
1274 			fallthrough;
1275 		case S_IFDIR:
1276 			if (server->flags & NFS_MOUNT_NOCTO)
1277 				break;
1278 			/* NFS close-to-open cache consistency validation */
1279 			goto out_force;
1280 		}
1281 	}
1282 
1283 	/* VFS wants an on-the-wire revalidation */
1284 	if (flags & LOOKUP_REVAL)
1285 		goto out_force;
1286 out:
1287 	return (inode->i_nlink == 0) ? -ESTALE : 0;
1288 out_force:
1289 	if (flags & LOOKUP_RCU)
1290 		return -ECHILD;
1291 	ret = __nfs_revalidate_inode(server, inode);
1292 	if (ret != 0)
1293 		return ret;
1294 	goto out;
1295 }
1296 
1297 /*
1298  * We judge how long we want to trust negative
1299  * dentries by looking at the parent inode mtime.
1300  *
1301  * If parent mtime has changed, we revalidate, else we wait for a
1302  * period corresponding to the parent's attribute cache timeout value.
1303  *
1304  * If LOOKUP_RCU prevents us from performing a full check, return 1
1305  * suggesting a reval is needed.
1306  *
1307  * Note that when creating a new file, or looking up a rename target,
1308  * then it shouldn't be necessary to revalidate a negative dentry.
1309  */
1310 static inline
1311 int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
1312 		       unsigned int flags)
1313 {
1314 	if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
1315 		return 0;
1316 	if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
1317 		return 1;
1318 	return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
1319 }
1320 
1321 static int
1322 nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
1323 			   struct inode *inode, int error)
1324 {
1325 	switch (error) {
1326 	case 1:
1327 		dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
1328 			__func__, dentry);
1329 		return 1;
1330 	case 0:
1331 		nfs_mark_for_revalidate(dir);
1332 		if (inode && S_ISDIR(inode->i_mode)) {
1333 			/* Purge readdir caches. */
1334 			nfs_zap_caches(inode);
1335 			/*
1336 			 * We can't d_drop the root of a disconnected tree:
1337 			 * its d_hash is on the s_anon list and d_drop() would hide
1338 			 * it from shrink_dcache_for_unmount(), leading to busy
1339 			 * inodes on unmount and further oopses.
1340 			 */
1341 			if (IS_ROOT(dentry))
1342 				return 1;
1343 		}
1344 		dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
1345 				__func__, dentry);
1346 		return 0;
1347 	}
1348 	dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
1349 				__func__, dentry, error);
1350 	return error;
1351 }
1352 
1353 static int
1354 nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
1355 			       unsigned int flags)
1356 {
1357 	int ret = 1;
1358 	if (nfs_neg_need_reval(dir, dentry, flags)) {
1359 		if (flags & LOOKUP_RCU)
1360 			return -ECHILD;
1361 		ret = 0;
1362 	}
1363 	return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
1364 }
1365 
1366 static int
1367 nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
1368 				struct inode *inode)
1369 {
1370 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1371 	return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
1372 }
1373 
1374 static int
1375 nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
1376 			     struct inode *inode)
1377 {
1378 	struct nfs_fh *fhandle;
1379 	struct nfs_fattr *fattr;
1380 	struct nfs4_label *label;
1381 	unsigned long dir_verifier;
1382 	int ret;
1383 
1384 	ret = -ENOMEM;
1385 	fhandle = nfs_alloc_fhandle();
1386 	fattr = nfs_alloc_fattr();
1387 	label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
1388 	if (fhandle == NULL || fattr == NULL || IS_ERR(label))
1389 		goto out;
1390 
1391 	dir_verifier = nfs_save_change_attribute(dir);
1392 	ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
1393 	if (ret < 0) {
1394 		switch (ret) {
1395 		case -ESTALE:
1396 		case -ENOENT:
1397 			ret = 0;
1398 			break;
1399 		case -ETIMEDOUT:
1400 			if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
1401 				ret = 1;
1402 		}
1403 		goto out;
1404 	}
1405 	ret = 0;
1406 	if (nfs_compare_fh(NFS_FH(inode), fhandle))
1407 		goto out;
1408 	if (nfs_refresh_inode(inode, fattr) < 0)
1409 		goto out;
1410 
1411 	nfs_setsecurity(inode, fattr, label);
1412 	nfs_set_verifier(dentry, dir_verifier);
1413 
1414 	/* set a readdirplus hint that we had a cache miss */
1415 	nfs_force_use_readdirplus(dir);
1416 	ret = 1;
1417 out:
1418 	nfs_free_fattr(fattr);
1419 	nfs_free_fhandle(fhandle);
1420 	nfs4_label_free(label);
1421 	return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
1422 }
1423 
1424 /*
1425  * This is called every time the dcache has a lookup hit,
1426  * and we should check whether we can really trust that
1427  * lookup.
1428  *
1429  * NOTE! The hit can be a negative hit too, don't assume
1430  * we have an inode!
1431  *
1432  * If the parent directory is seen to have changed, we throw out the
1433  * cached dentry and do a new lookup.
1434  */
1435 static int
1436 nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
1437 			 unsigned int flags)
1438 {
1439 	struct inode *inode;
1440 	int error;
1441 
1442 	nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
1443 	inode = d_inode(dentry);
1444 
1445 	if (!inode)
1446 		return nfs_lookup_revalidate_negative(dir, dentry, flags);
1447 
1448 	if (is_bad_inode(inode)) {
1449 		dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
1450 				__func__, dentry);
1451 		goto out_bad;
1452 	}
1453 
1454 	if (nfs_verifier_is_delegated(dentry))
1455 		return nfs_lookup_revalidate_delegated(dir, dentry, inode);
1456 
1457 	/* Force a full look up iff the parent directory has changed */
1458 	if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
1459 	    nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
1460 		error = nfs_lookup_verify_inode(inode, flags);
1461 		if (error) {
1462 			if (error == -ESTALE)
1463 				nfs_zap_caches(dir);
1464 			goto out_bad;
1465 		}
1466 		nfs_advise_use_readdirplus(dir);
1467 		goto out_valid;
1468 	}
1469 
1470 	if (flags & LOOKUP_RCU)
1471 		return -ECHILD;
1472 
1473 	if (NFS_STALE(inode))
1474 		goto out_bad;
1475 
1476 	trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
1477 	error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
1478 	trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
1479 	return error;
1480 out_valid:
1481 	return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
1482 out_bad:
1483 	if (flags & LOOKUP_RCU)
1484 		return -ECHILD;
1485 	return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
1486 }
1487 
1488 static int
1489 __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
1490 			int (*reval)(struct inode *, struct dentry *, unsigned int))
1491 {
1492 	struct dentry *parent;
1493 	struct inode *dir;
1494 	int ret;
1495 
1496 	if (flags & LOOKUP_RCU) {
1497 		parent = READ_ONCE(dentry->d_parent);
1498 		dir = d_inode_rcu(parent);
1499 		if (!dir)
1500 			return -ECHILD;
1501 		ret = reval(dir, dentry, flags);
1502 		if (parent != READ_ONCE(dentry->d_parent))
1503 			return -ECHILD;
1504 	} else {
1505 		parent = dget_parent(dentry);
1506 		ret = reval(d_inode(parent), dentry, flags);
1507 		dput(parent);
1508 	}
1509 	return ret;
1510 }
1511 
1512 static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
1513 {
1514 	return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
1515 }
1516 
1517 /*
1518  * A weaker form of d_revalidate for revalidating just the d_inode(dentry)
1519  * when we don't really care about the dentry name. This is called when a
1520  * pathwalk ends on a dentry that was not found via a normal lookup in the
1521  * parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
1522  *
1523  * In this situation, we just want to verify that the inode itself is OK
1524  * since the dentry might have changed on the server.
1525  */
1526 static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
1527 {
1528 	struct inode *inode = d_inode(dentry);
1529 	int error = 0;
1530 
1531 	/*
1532 	 * I believe we can only get a negative dentry here in the case of a
1533 	 * procfs-style symlink. Just assume it's correct for now, but we may
1534 	 * eventually need to do something more here.
1535 	 */
1536 	if (!inode) {
1537 		dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
1538 				__func__, dentry);
1539 		return 1;
1540 	}
1541 
1542 	if (is_bad_inode(inode)) {
1543 		dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
1544 				__func__, dentry);
1545 		return 0;
1546 	}
1547 
1548 	error = nfs_lookup_verify_inode(inode, flags);
1549 	dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
1550 			__func__, inode->i_ino, error ? "invalid" : "valid");
1551 	return !error;
1552 }
1553 
1554 /*
1555  * This is called from dput() when d_count is going to 0.
1556  */
1557 static int nfs_dentry_delete(const struct dentry *dentry)
1558 {
1559 	dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
1560 		dentry, dentry->d_flags);
1561 
1562 	/* Unhash any dentry with a stale inode */
1563 	if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry)))
1564 		return 1;
1565 
1566 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
1567 		/* Unhash it, so that ->d_iput() would be called */
1568 		return 1;
1569 	}
1570 	if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
1571 		/* Unhash it, so that ancestors of killed async unlink
1572 		 * files will be cleaned up during umount */
1573 		return 1;
1574 	}
1575 	return 0;
1576 
1577 }
1578 
1579 /* Ensure that we revalidate inode->i_nlink */
1580 static void nfs_drop_nlink(struct inode *inode)
1581 {
1582 	spin_lock(&inode->i_lock);
1583 	/* drop the inode if we're reasonably sure this is the last link */
1584 	if (inode->i_nlink > 0)
1585 		drop_nlink(inode);
1586 	NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
1587 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
1588 		| NFS_INO_INVALID_CTIME
1589 		| NFS_INO_INVALID_OTHER
1590 		| NFS_INO_REVAL_FORCED;
1591 	spin_unlock(&inode->i_lock);
1592 }
1593 
1594 /*
1595  * Called when the dentry loses inode.
1596  * We use it to clean up silly-renamed files.
1597  */
1598 static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
1599 {
1600 	if (S_ISDIR(inode->i_mode))
1601 		/* drop any readdir cache as it could easily be old */
1602 		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
1603 
1604 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
1605 		nfs_complete_unlink(dentry, inode);
1606 		nfs_drop_nlink(inode);
1607 	}
1608 	iput(inode);
1609 }
1610 
1611 static void nfs_d_release(struct dentry *dentry)
1612 {
1613 	/* free cached devname value, if it survived that far */
1614 	if (unlikely(dentry->d_fsdata)) {
1615 		if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
1616 			WARN_ON(1);
1617 		else
1618 			kfree(dentry->d_fsdata);
1619 	}
1620 }
1621 
1622 const struct dentry_operations nfs_dentry_operations = {
1623 	.d_revalidate	= nfs_lookup_revalidate,
1624 	.d_weak_revalidate	= nfs_weak_revalidate,
1625 	.d_delete	= nfs_dentry_delete,
1626 	.d_iput		= nfs_dentry_iput,
1627 	.d_automount	= nfs_d_automount,
1628 	.d_release	= nfs_d_release,
1629 };
1630 EXPORT_SYMBOL_GPL(nfs_dentry_operations);
1631 
1632 struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
1633 {
1634 	struct dentry *res;
1635 	struct inode *inode = NULL;
1636 	struct nfs_fh *fhandle = NULL;
1637 	struct nfs_fattr *fattr = NULL;
1638 	struct nfs4_label *label = NULL;
1639 	unsigned long dir_verifier;
1640 	int error;
1641 
1642 	dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
1643 	nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
1644 
1645 	if (unlikely(dentry->d_name.len > NFS_SERVER(dir)->namelen))
1646 		return ERR_PTR(-ENAMETOOLONG);
1647 
1648 	/*
1649 	 * If we're doing an exclusive create, optimize away the lookup
1650 	 * but don't hash the dentry.
1651 	 */
1652 	if (nfs_is_exclusive_create(dir, flags) || flags & LOOKUP_RENAME_TARGET)
1653 		return NULL;
1654 
1655 	res = ERR_PTR(-ENOMEM);
1656 	fhandle = nfs_alloc_fhandle();
1657 	fattr = nfs_alloc_fattr();
1658 	if (fhandle == NULL || fattr == NULL)
1659 		goto out;
1660 
1661 	label = nfs4_label_alloc(NFS_SERVER(dir), GFP_NOWAIT);
1662 	if (IS_ERR(label))
1663 		goto out;
1664 
1665 	dir_verifier = nfs_save_change_attribute(dir);
1666 	trace_nfs_lookup_enter(dir, dentry, flags);
1667 	error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
1668 	if (error == -ENOENT)
1669 		goto no_entry;
1670 	if (error < 0) {
1671 		res = ERR_PTR(error);
1672 		goto out_label;
1673 	}
1674 	inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
1675 	res = ERR_CAST(inode);
1676 	if (IS_ERR(res))
1677 		goto out_label;
1678 
1679 	/* Notify readdir to use READDIRPLUS */
1680 	nfs_force_use_readdirplus(dir);
1681 
1682 no_entry:
1683 	res = d_splice_alias(inode, dentry);
1684 	if (res != NULL) {
1685 		if (IS_ERR(res))
1686 			goto out_label;
1687 		dentry = res;
1688 	}
1689 	nfs_set_verifier(dentry, dir_verifier);
1690 out_label:
1691 	trace_nfs_lookup_exit(dir, dentry, flags, error);
1692 	nfs4_label_free(label);
1693 out:
1694 	nfs_free_fattr(fattr);
1695 	nfs_free_fhandle(fhandle);
1696 	return res;
1697 }
1698 EXPORT_SYMBOL_GPL(nfs_lookup);
1699 
1700 #if IS_ENABLED(CONFIG_NFS_V4)
1701 static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
1702 
1703 const struct dentry_operations nfs4_dentry_operations = {
1704 	.d_revalidate	= nfs4_lookup_revalidate,
1705 	.d_weak_revalidate	= nfs_weak_revalidate,
1706 	.d_delete	= nfs_dentry_delete,
1707 	.d_iput		= nfs_dentry_iput,
1708 	.d_automount	= nfs_d_automount,
1709 	.d_release	= nfs_d_release,
1710 };
1711 EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
1712 
1713 static fmode_t flags_to_mode(int flags)
1714 {
1715 	fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
1716 	if ((flags & O_ACCMODE) != O_WRONLY)
1717 		res |= FMODE_READ;
1718 	if ((flags & O_ACCMODE) != O_RDONLY)
1719 		res |= FMODE_WRITE;
1720 	return res;
1721 }
1722 
1723 static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
1724 {
1725 	return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
1726 }
1727 
1728 static int do_open(struct inode *inode, struct file *filp)
1729 {
1730 	nfs_fscache_open_file(inode, filp);
1731 	return 0;
1732 }
1733 
1734 static int nfs_finish_open(struct nfs_open_context *ctx,
1735 			   struct dentry *dentry,
1736 			   struct file *file, unsigned open_flags)
1737 {
1738 	int err;
1739 
1740 	err = finish_open(file, dentry, do_open);
1741 	if (err)
1742 		goto out;
1743 	if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1744 		nfs_file_set_open_context(file, ctx);
1745 	else
1746 		err = -EOPENSTALE;
1747 out:
1748 	return err;
1749 }
1750 
1751 int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1752 		    struct file *file, unsigned open_flags,
1753 		    umode_t mode)
1754 {
1755 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
1756 	struct nfs_open_context *ctx;
1757 	struct dentry *res;
1758 	struct iattr attr = { .ia_valid = ATTR_OPEN };
1759 	struct inode *inode;
1760 	unsigned int lookup_flags = 0;
1761 	bool switched = false;
1762 	int created = 0;
1763 	int err;
1764 
1765 	/* Expect a negative dentry */
1766 	BUG_ON(d_inode(dentry));
1767 
1768 	dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n",
1769 			dir->i_sb->s_id, dir->i_ino, dentry);
1770 
1771 	err = nfs_check_flags(open_flags);
1772 	if (err)
1773 		return err;
1774 
1775 	/* NFS only supports OPEN on regular files */
1776 	if ((open_flags & O_DIRECTORY)) {
1777 		if (!d_in_lookup(dentry)) {
1778 			/*
1779 			 * Hashed negative dentry with O_DIRECTORY: dentry was
1780 			 * revalidated and is fine, no need to perform lookup
1781 			 * again
1782 			 */
1783 			return -ENOENT;
1784 		}
1785 		lookup_flags = LOOKUP_OPEN|LOOKUP_DIRECTORY;
1786 		goto no_open;
1787 	}
1788 
1789 	if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
1790 		return -ENAMETOOLONG;
1791 
1792 	if (open_flags & O_CREAT) {
1793 		struct nfs_server *server = NFS_SERVER(dir);
1794 
1795 		if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
1796 			mode &= ~current_umask();
1797 
1798 		attr.ia_valid |= ATTR_MODE;
1799 		attr.ia_mode = mode;
1800 	}
1801 	if (open_flags & O_TRUNC) {
1802 		attr.ia_valid |= ATTR_SIZE;
1803 		attr.ia_size = 0;
1804 	}
1805 
1806 	if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
1807 		d_drop(dentry);
1808 		switched = true;
1809 		dentry = d_alloc_parallel(dentry->d_parent,
1810 					  &dentry->d_name, &wq);
1811 		if (IS_ERR(dentry))
1812 			return PTR_ERR(dentry);
1813 		if (unlikely(!d_in_lookup(dentry)))
1814 			return finish_no_open(file, dentry);
1815 	}
1816 
1817 	ctx = create_nfs_open_context(dentry, open_flags, file);
1818 	err = PTR_ERR(ctx);
1819 	if (IS_ERR(ctx))
1820 		goto out;
1821 
1822 	trace_nfs_atomic_open_enter(dir, ctx, open_flags);
1823 	inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
1824 	if (created)
1825 		file->f_mode |= FMODE_CREATED;
1826 	if (IS_ERR(inode)) {
1827 		err = PTR_ERR(inode);
1828 		trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
1829 		put_nfs_open_context(ctx);
1830 		d_drop(dentry);
1831 		switch (err) {
1832 		case -ENOENT:
1833 			d_splice_alias(NULL, dentry);
1834 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1835 			break;
1836 		case -EISDIR:
1837 		case -ENOTDIR:
1838 			goto no_open;
1839 		case -ELOOP:
1840 			if (!(open_flags & O_NOFOLLOW))
1841 				goto no_open;
1842 			break;
1843 			/* case -EINVAL: */
1844 		default:
1845 			break;
1846 		}
1847 		goto out;
1848 	}
1849 
1850 	err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
1851 	trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
1852 	put_nfs_open_context(ctx);
1853 out:
1854 	if (unlikely(switched)) {
1855 		d_lookup_done(dentry);
1856 		dput(dentry);
1857 	}
1858 	return err;
1859 
1860 no_open:
1861 	res = nfs_lookup(dir, dentry, lookup_flags);
1862 	if (switched) {
1863 		d_lookup_done(dentry);
1864 		if (!res)
1865 			res = dentry;
1866 		else
1867 			dput(dentry);
1868 	}
1869 	if (IS_ERR(res))
1870 		return PTR_ERR(res);
1871 	return finish_no_open(file, res);
1872 }
1873 EXPORT_SYMBOL_GPL(nfs_atomic_open);
1874 
1875 static int
1876 nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
1877 			  unsigned int flags)
1878 {
1879 	struct inode *inode;
1880 
1881 	if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
1882 		goto full_reval;
1883 	if (d_mountpoint(dentry))
1884 		goto full_reval;
1885 
1886 	inode = d_inode(dentry);
1887 
1888 	/* We can't create new files in nfs_open_revalidate(), so we
1889 	 * optimize away revalidation of negative dentries.
1890 	 */
1891 	if (inode == NULL)
1892 		goto full_reval;
1893 
1894 	if (nfs_verifier_is_delegated(dentry))
1895 		return nfs_lookup_revalidate_delegated(dir, dentry, inode);
1896 
1897 	/* NFS only supports OPEN on regular files */
1898 	if (!S_ISREG(inode->i_mode))
1899 		goto full_reval;
1900 
1901 	/* We cannot do exclusive creation on a positive dentry */
1902 	if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
1903 		goto reval_dentry;
1904 
1905 	/* Check if the directory changed */
1906 	if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
1907 		goto reval_dentry;
1908 
1909 	/* Let f_op->open() actually open (and revalidate) the file */
1910 	return 1;
1911 reval_dentry:
1912 	if (flags & LOOKUP_RCU)
1913 		return -ECHILD;
1914 	return nfs_lookup_revalidate_dentry(dir, dentry, inode);
1915 
1916 full_reval:
1917 	return nfs_do_lookup_revalidate(dir, dentry, flags);
1918 }
1919 
1920 static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
1921 {
1922 	return __nfs_lookup_revalidate(dentry, flags,
1923 			nfs4_do_lookup_revalidate);
1924 }
1925 
1926 #endif /* CONFIG_NFSV4 */
1927 
1928 struct dentry *
1929 nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
1930 				struct nfs_fattr *fattr,
1931 				struct nfs4_label *label)
1932 {
1933 	struct dentry *parent = dget_parent(dentry);
1934 	struct inode *dir = d_inode(parent);
1935 	struct inode *inode;
1936 	struct dentry *d;
1937 	int error;
1938 
1939 	d_drop(dentry);
1940 
1941 	if (fhandle->size == 0) {
1942 		error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, NULL);
1943 		if (error)
1944 			goto out_error;
1945 	}
1946 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1947 	if (!(fattr->valid & NFS_ATTR_FATTR)) {
1948 		struct nfs_server *server = NFS_SB(dentry->d_sb);
1949 		error = server->nfs_client->rpc_ops->getattr(server, fhandle,
1950 				fattr, NULL, NULL);
1951 		if (error < 0)
1952 			goto out_error;
1953 	}
1954 	inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
1955 	d = d_splice_alias(inode, dentry);
1956 out:
1957 	dput(parent);
1958 	return d;
1959 out_error:
1960 	nfs_mark_for_revalidate(dir);
1961 	d = ERR_PTR(error);
1962 	goto out;
1963 }
1964 EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
1965 
1966 /*
1967  * Code common to create, mkdir, and mknod.
1968  */
1969 int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
1970 				struct nfs_fattr *fattr,
1971 				struct nfs4_label *label)
1972 {
1973 	struct dentry *d;
1974 
1975 	d = nfs_add_or_obtain(dentry, fhandle, fattr, label);
1976 	if (IS_ERR(d))
1977 		return PTR_ERR(d);
1978 
1979 	/* Callers don't care */
1980 	dput(d);
1981 	return 0;
1982 }
1983 EXPORT_SYMBOL_GPL(nfs_instantiate);
1984 
1985 /*
1986  * Following a failed create operation, we drop the dentry rather
1987  * than retain a negative dentry. This avoids a problem in the event
1988  * that the operation succeeded on the server, but an error in the
1989  * reply path made it appear to have failed.
1990  */
1991 int nfs_create(struct inode *dir, struct dentry *dentry,
1992 		umode_t mode, bool excl)
1993 {
1994 	struct iattr attr;
1995 	int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
1996 	int error;
1997 
1998 	dfprintk(VFS, "NFS: create(%s/%lu), %pd\n",
1999 			dir->i_sb->s_id, dir->i_ino, dentry);
2000 
2001 	attr.ia_mode = mode;
2002 	attr.ia_valid = ATTR_MODE;
2003 
2004 	trace_nfs_create_enter(dir, dentry, open_flags);
2005 	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
2006 	trace_nfs_create_exit(dir, dentry, open_flags, error);
2007 	if (error != 0)
2008 		goto out_err;
2009 	return 0;
2010 out_err:
2011 	d_drop(dentry);
2012 	return error;
2013 }
2014 EXPORT_SYMBOL_GPL(nfs_create);
2015 
2016 /*
2017  * See comments for nfs_proc_create regarding failed operations.
2018  */
2019 int
2020 nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
2021 {
2022 	struct iattr attr;
2023 	int status;
2024 
2025 	dfprintk(VFS, "NFS: mknod(%s/%lu), %pd\n",
2026 			dir->i_sb->s_id, dir->i_ino, dentry);
2027 
2028 	attr.ia_mode = mode;
2029 	attr.ia_valid = ATTR_MODE;
2030 
2031 	trace_nfs_mknod_enter(dir, dentry);
2032 	status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
2033 	trace_nfs_mknod_exit(dir, dentry, status);
2034 	if (status != 0)
2035 		goto out_err;
2036 	return 0;
2037 out_err:
2038 	d_drop(dentry);
2039 	return status;
2040 }
2041 EXPORT_SYMBOL_GPL(nfs_mknod);
2042 
2043 /*
2044  * See comments for nfs_proc_create regarding failed operations.
2045  */
2046 int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2047 {
2048 	struct iattr attr;
2049 	int error;
2050 
2051 	dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
2052 			dir->i_sb->s_id, dir->i_ino, dentry);
2053 
2054 	attr.ia_valid = ATTR_MODE;
2055 	attr.ia_mode = mode | S_IFDIR;
2056 
2057 	trace_nfs_mkdir_enter(dir, dentry);
2058 	error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
2059 	trace_nfs_mkdir_exit(dir, dentry, error);
2060 	if (error != 0)
2061 		goto out_err;
2062 	return 0;
2063 out_err:
2064 	d_drop(dentry);
2065 	return error;
2066 }
2067 EXPORT_SYMBOL_GPL(nfs_mkdir);
2068 
2069 static void nfs_dentry_handle_enoent(struct dentry *dentry)
2070 {
2071 	if (simple_positive(dentry))
2072 		d_delete(dentry);
2073 }
2074 
2075 int nfs_rmdir(struct inode *dir, struct dentry *dentry)
2076 {
2077 	int error;
2078 
2079 	dfprintk(VFS, "NFS: rmdir(%s/%lu), %pd\n",
2080 			dir->i_sb->s_id, dir->i_ino, dentry);
2081 
2082 	trace_nfs_rmdir_enter(dir, dentry);
2083 	if (d_really_is_positive(dentry)) {
2084 		down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
2085 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
2086 		/* Ensure the VFS deletes this inode */
2087 		switch (error) {
2088 		case 0:
2089 			clear_nlink(d_inode(dentry));
2090 			break;
2091 		case -ENOENT:
2092 			nfs_dentry_handle_enoent(dentry);
2093 		}
2094 		up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
2095 	} else
2096 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
2097 	trace_nfs_rmdir_exit(dir, dentry, error);
2098 
2099 	return error;
2100 }
2101 EXPORT_SYMBOL_GPL(nfs_rmdir);
2102 
2103 /*
2104  * Remove a file after making sure there are no pending writes,
2105  * and after checking that the file has only one user.
2106  *
2107  * We invalidate the attribute cache and free the inode prior to the operation
2108  * to avoid possible races if the server reuses the inode.
2109  */
2110 static int nfs_safe_remove(struct dentry *dentry)
2111 {
2112 	struct inode *dir = d_inode(dentry->d_parent);
2113 	struct inode *inode = d_inode(dentry);
2114 	int error = -EBUSY;
2115 
2116 	dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
2117 
2118 	/* If the dentry was sillyrenamed, we simply call d_delete() */
2119 	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
2120 		error = 0;
2121 		goto out;
2122 	}
2123 
2124 	trace_nfs_remove_enter(dir, dentry);
2125 	if (inode != NULL) {
2126 		error = NFS_PROTO(dir)->remove(dir, dentry);
2127 		if (error == 0)
2128 			nfs_drop_nlink(inode);
2129 	} else
2130 		error = NFS_PROTO(dir)->remove(dir, dentry);
2131 	if (error == -ENOENT)
2132 		nfs_dentry_handle_enoent(dentry);
2133 	trace_nfs_remove_exit(dir, dentry, error);
2134 out:
2135 	return error;
2136 }
2137 
2138 /*  We do silly rename. In case sillyrename() returns -EBUSY, the inode
2139  *  belongs to an active ".nfs..." file and we return -EBUSY.
2140  *
2141  *  If sillyrename() returns 0, we do nothing, otherwise we unlink.
2142  */
2143 int nfs_unlink(struct inode *dir, struct dentry *dentry)
2144 {
2145 	int error;
2146 	int need_rehash = 0;
2147 
2148 	dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id,
2149 		dir->i_ino, dentry);
2150 
2151 	trace_nfs_unlink_enter(dir, dentry);
2152 	spin_lock(&dentry->d_lock);
2153 	if (d_count(dentry) > 1) {
2154 		spin_unlock(&dentry->d_lock);
2155 		/* Start asynchronous writeout of the inode */
2156 		write_inode_now(d_inode(dentry), 0);
2157 		error = nfs_sillyrename(dir, dentry);
2158 		goto out;
2159 	}
2160 	if (!d_unhashed(dentry)) {
2161 		__d_drop(dentry);
2162 		need_rehash = 1;
2163 	}
2164 	spin_unlock(&dentry->d_lock);
2165 	error = nfs_safe_remove(dentry);
2166 	if (!error || error == -ENOENT) {
2167 		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2168 	} else if (need_rehash)
2169 		d_rehash(dentry);
2170 out:
2171 	trace_nfs_unlink_exit(dir, dentry, error);
2172 	return error;
2173 }
2174 EXPORT_SYMBOL_GPL(nfs_unlink);
2175 
2176 /*
2177  * To create a symbolic link, most file systems instantiate a new inode,
2178  * add a page to it containing the path, then write it out to the disk
2179  * using prepare_write/commit_write.
2180  *
2181  * Unfortunately the NFS client can't create the in-core inode first
2182  * because it needs a file handle to create an in-core inode (see
2183  * fs/nfs/inode.c:nfs_fhget).  We only have a file handle *after* the
2184  * symlink request has completed on the server.
2185  *
2186  * So instead we allocate a raw page, copy the symname into it, then do
2187  * the SYMLINK request with the page as the buffer.  If it succeeds, we
2188  * now have a new file handle and can instantiate an in-core NFS inode
2189  * and move the raw page into its mapping.
2190  */
2191 int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2192 {
2193 	struct page *page;
2194 	char *kaddr;
2195 	struct iattr attr;
2196 	unsigned int pathlen = strlen(symname);
2197 	int error;
2198 
2199 	dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s)\n", dir->i_sb->s_id,
2200 		dir->i_ino, dentry, symname);
2201 
2202 	if (pathlen > PAGE_SIZE)
2203 		return -ENAMETOOLONG;
2204 
2205 	attr.ia_mode = S_IFLNK | S_IRWXUGO;
2206 	attr.ia_valid = ATTR_MODE;
2207 
2208 	page = alloc_page(GFP_USER);
2209 	if (!page)
2210 		return -ENOMEM;
2211 
2212 	kaddr = page_address(page);
2213 	memcpy(kaddr, symname, pathlen);
2214 	if (pathlen < PAGE_SIZE)
2215 		memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
2216 
2217 	trace_nfs_symlink_enter(dir, dentry);
2218 	error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
2219 	trace_nfs_symlink_exit(dir, dentry, error);
2220 	if (error != 0) {
2221 		dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
2222 			dir->i_sb->s_id, dir->i_ino,
2223 			dentry, symname, error);
2224 		d_drop(dentry);
2225 		__free_page(page);
2226 		return error;
2227 	}
2228 
2229 	/*
2230 	 * No big deal if we can't add this page to the page cache here.
2231 	 * READLINK will get the missing page from the server if needed.
2232 	 */
2233 	if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
2234 							GFP_KERNEL)) {
2235 		SetPageUptodate(page);
2236 		unlock_page(page);
2237 		/*
2238 		 * add_to_page_cache_lru() grabs an extra page refcount.
2239 		 * Drop it here to avoid leaking this page later.
2240 		 */
2241 		put_page(page);
2242 	} else
2243 		__free_page(page);
2244 
2245 	return 0;
2246 }
2247 EXPORT_SYMBOL_GPL(nfs_symlink);
2248 
2249 int
2250 nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2251 {
2252 	struct inode *inode = d_inode(old_dentry);
2253 	int error;
2254 
2255 	dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
2256 		old_dentry, dentry);
2257 
2258 	trace_nfs_link_enter(inode, dir, dentry);
2259 	d_drop(dentry);
2260 	error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
2261 	if (error == 0) {
2262 		ihold(inode);
2263 		d_add(dentry, inode);
2264 	}
2265 	trace_nfs_link_exit(inode, dir, dentry, error);
2266 	return error;
2267 }
2268 EXPORT_SYMBOL_GPL(nfs_link);
2269 
2270 /*
2271  * RENAME
2272  * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
2273  * different file handle for the same inode after a rename (e.g. when
2274  * moving to a different directory). A fail-safe method to do so would
2275  * be to look up old_dir/old_name, create a link to new_dir/new_name and
2276  * rename the old file using the sillyrename stuff. This way, the original
2277  * file in old_dir will go away when the last process iput()s the inode.
2278  *
2279  * FIXED.
2280  *
2281  * It actually works quite well. One needs to have the possibility for
2282  * at least one ".nfs..." file in each directory the file ever gets
2283  * moved or linked to which happens automagically with the new
2284  * implementation that only depends on the dcache stuff instead of
2285  * using the inode layer
2286  *
2287  * Unfortunately, things are a little more complicated than indicated
2288  * above. For a cross-directory move, we want to make sure we can get
2289  * rid of the old inode after the operation.  This means there must be
2290  * no pending writes (if it's a file), and the use count must be 1.
2291  * If these conditions are met, we can drop the dentries before doing
2292  * the rename.
2293  */
2294 int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2295 	       struct inode *new_dir, struct dentry *new_dentry,
2296 	       unsigned int flags)
2297 {
2298 	struct inode *old_inode = d_inode(old_dentry);
2299 	struct inode *new_inode = d_inode(new_dentry);
2300 	struct dentry *dentry = NULL, *rehash = NULL;
2301 	struct rpc_task *task;
2302 	int error = -EBUSY;
2303 
2304 	if (flags)
2305 		return -EINVAL;
2306 
2307 	dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
2308 		 old_dentry, new_dentry,
2309 		 d_count(new_dentry));
2310 
2311 	trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
2312 	/*
2313 	 * For non-directories, check whether the target is busy and if so,
2314 	 * make a copy of the dentry and then do a silly-rename. If the
2315 	 * silly-rename succeeds, the copied dentry is hashed and becomes
2316 	 * the new target.
2317 	 */
2318 	if (new_inode && !S_ISDIR(new_inode->i_mode)) {
2319 		/*
2320 		 * To prevent any new references to the target during the
2321 		 * rename, we unhash the dentry in advance.
2322 		 */
2323 		if (!d_unhashed(new_dentry)) {
2324 			d_drop(new_dentry);
2325 			rehash = new_dentry;
2326 		}
2327 
2328 		if (d_count(new_dentry) > 2) {
2329 			int err;
2330 
2331 			/* copy the target dentry's name */
2332 			dentry = d_alloc(new_dentry->d_parent,
2333 					 &new_dentry->d_name);
2334 			if (!dentry)
2335 				goto out;
2336 
2337 			/* silly-rename the existing target ... */
2338 			err = nfs_sillyrename(new_dir, new_dentry);
2339 			if (err)
2340 				goto out;
2341 
2342 			new_dentry = dentry;
2343 			rehash = NULL;
2344 			new_inode = NULL;
2345 		}
2346 	}
2347 
2348 	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
2349 	if (IS_ERR(task)) {
2350 		error = PTR_ERR(task);
2351 		goto out;
2352 	}
2353 
2354 	error = rpc_wait_for_completion_task(task);
2355 	if (error != 0) {
2356 		((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1;
2357 		/* Paired with the atomic_dec_and_test() barrier in rpc_do_put_task() */
2358 		smp_wmb();
2359 	} else
2360 		error = task->tk_status;
2361 	rpc_put_task(task);
2362 	/* Ensure the inode attributes are revalidated */
2363 	if (error == 0) {
2364 		spin_lock(&old_inode->i_lock);
2365 		NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter();
2366 		NFS_I(old_inode)->cache_validity |= NFS_INO_INVALID_CHANGE
2367 			| NFS_INO_INVALID_CTIME
2368 			| NFS_INO_REVAL_FORCED;
2369 		spin_unlock(&old_inode->i_lock);
2370 	}
2371 out:
2372 	if (rehash)
2373 		d_rehash(rehash);
2374 	trace_nfs_rename_exit(old_dir, old_dentry,
2375 			new_dir, new_dentry, error);
2376 	if (!error) {
2377 		if (new_inode != NULL)
2378 			nfs_drop_nlink(new_inode);
2379 		/*
2380 		 * The d_move() should be here instead of in an async RPC completion
2381 		 * handler because we need the proper locks to move the dentry.  If
2382 		 * we're interrupted by a signal, the async RPC completion handler
2383 		 * should mark the directories for revalidation.
2384 		 */
2385 		d_move(old_dentry, new_dentry);
2386 		nfs_set_verifier(old_dentry,
2387 					nfs_save_change_attribute(new_dir));
2388 	} else if (error == -ENOENT)
2389 		nfs_dentry_handle_enoent(old_dentry);
2390 
2391 	/* new dentry created? */
2392 	if (dentry)
2393 		dput(dentry);
2394 	return error;
2395 }
2396 EXPORT_SYMBOL_GPL(nfs_rename);
2397 
2398 static DEFINE_SPINLOCK(nfs_access_lru_lock);
2399 static LIST_HEAD(nfs_access_lru_list);
2400 static atomic_long_t nfs_access_nr_entries;
2401 
2402 static unsigned long nfs_access_max_cachesize = 4*1024*1024;
2403 module_param(nfs_access_max_cachesize, ulong, 0644);
2404 MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
2405 
2406 static void nfs_access_free_entry(struct nfs_access_entry *entry)
2407 {
2408 	put_cred(entry->cred);
2409 	kfree_rcu(entry, rcu_head);
2410 	smp_mb__before_atomic();
2411 	atomic_long_dec(&nfs_access_nr_entries);
2412 	smp_mb__after_atomic();
2413 }
2414 
2415 static void nfs_access_free_list(struct list_head *head)
2416 {
2417 	struct nfs_access_entry *cache;
2418 
2419 	while (!list_empty(head)) {
2420 		cache = list_entry(head->next, struct nfs_access_entry, lru);
2421 		list_del(&cache->lru);
2422 		nfs_access_free_entry(cache);
2423 	}
2424 }
2425 
2426 static unsigned long
2427 nfs_do_access_cache_scan(unsigned int nr_to_scan)
2428 {
2429 	LIST_HEAD(head);
2430 	struct nfs_inode *nfsi, *next;
2431 	struct nfs_access_entry *cache;
2432 	long freed = 0;
2433 
2434 	spin_lock(&nfs_access_lru_lock);
2435 	list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
2436 		struct inode *inode;
2437 
2438 		if (nr_to_scan-- == 0)
2439 			break;
2440 		inode = &nfsi->vfs_inode;
2441 		spin_lock(&inode->i_lock);
2442 		if (list_empty(&nfsi->access_cache_entry_lru))
2443 			goto remove_lru_entry;
2444 		cache = list_entry(nfsi->access_cache_entry_lru.next,
2445 				struct nfs_access_entry, lru);
2446 		list_move(&cache->lru, &head);
2447 		rb_erase(&cache->rb_node, &nfsi->access_cache);
2448 		freed++;
2449 		if (!list_empty(&nfsi->access_cache_entry_lru))
2450 			list_move_tail(&nfsi->access_cache_inode_lru,
2451 					&nfs_access_lru_list);
2452 		else {
2453 remove_lru_entry:
2454 			list_del_init(&nfsi->access_cache_inode_lru);
2455 			smp_mb__before_atomic();
2456 			clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
2457 			smp_mb__after_atomic();
2458 		}
2459 		spin_unlock(&inode->i_lock);
2460 	}
2461 	spin_unlock(&nfs_access_lru_lock);
2462 	nfs_access_free_list(&head);
2463 	return freed;
2464 }
2465 
2466 unsigned long
2467 nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
2468 {
2469 	int nr_to_scan = sc->nr_to_scan;
2470 	gfp_t gfp_mask = sc->gfp_mask;
2471 
2472 	if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2473 		return SHRINK_STOP;
2474 	return nfs_do_access_cache_scan(nr_to_scan);
2475 }
2476 
2477 
2478 unsigned long
2479 nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
2480 {
2481 	return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2482 }
2483 
2484 static void
2485 nfs_access_cache_enforce_limit(void)
2486 {
2487 	long nr_entries = atomic_long_read(&nfs_access_nr_entries);
2488 	unsigned long diff;
2489 	unsigned int nr_to_scan;
2490 
2491 	if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize)
2492 		return;
2493 	nr_to_scan = 100;
2494 	diff = nr_entries - nfs_access_max_cachesize;
2495 	if (diff < nr_to_scan)
2496 		nr_to_scan = diff;
2497 	nfs_do_access_cache_scan(nr_to_scan);
2498 }
2499 
2500 static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
2501 {
2502 	struct rb_root *root_node = &nfsi->access_cache;
2503 	struct rb_node *n;
2504 	struct nfs_access_entry *entry;
2505 
2506 	/* Unhook entries from the cache */
2507 	while ((n = rb_first(root_node)) != NULL) {
2508 		entry = rb_entry(n, struct nfs_access_entry, rb_node);
2509 		rb_erase(n, root_node);
2510 		list_move(&entry->lru, head);
2511 	}
2512 	nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
2513 }
2514 
2515 void nfs_access_zap_cache(struct inode *inode)
2516 {
2517 	LIST_HEAD(head);
2518 
2519 	if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
2520 		return;
2521 	/* Remove from global LRU init */
2522 	spin_lock(&nfs_access_lru_lock);
2523 	if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
2524 		list_del_init(&NFS_I(inode)->access_cache_inode_lru);
2525 
2526 	spin_lock(&inode->i_lock);
2527 	__nfs_access_zap_cache(NFS_I(inode), &head);
2528 	spin_unlock(&inode->i_lock);
2529 	spin_unlock(&nfs_access_lru_lock);
2530 	nfs_access_free_list(&head);
2531 }
2532 EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
2533 
2534 static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
2535 {
2536 	struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
2537 
2538 	while (n != NULL) {
2539 		struct nfs_access_entry *entry =
2540 			rb_entry(n, struct nfs_access_entry, rb_node);
2541 		int cmp = cred_fscmp(cred, entry->cred);
2542 
2543 		if (cmp < 0)
2544 			n = n->rb_left;
2545 		else if (cmp > 0)
2546 			n = n->rb_right;
2547 		else
2548 			return entry;
2549 	}
2550 	return NULL;
2551 }
2552 
2553 static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
2554 {
2555 	struct nfs_inode *nfsi = NFS_I(inode);
2556 	struct nfs_access_entry *cache;
2557 	bool retry = true;
2558 	int err;
2559 
2560 	spin_lock(&inode->i_lock);
2561 	for(;;) {
2562 		if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
2563 			goto out_zap;
2564 		cache = nfs_access_search_rbtree(inode, cred);
2565 		err = -ENOENT;
2566 		if (cache == NULL)
2567 			goto out;
2568 		/* Found an entry, is our attribute cache valid? */
2569 		if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
2570 			break;
2571 		if (!retry)
2572 			break;
2573 		err = -ECHILD;
2574 		if (!may_block)
2575 			goto out;
2576 		spin_unlock(&inode->i_lock);
2577 		err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
2578 		if (err)
2579 			return err;
2580 		spin_lock(&inode->i_lock);
2581 		retry = false;
2582 	}
2583 	res->cred = cache->cred;
2584 	res->mask = cache->mask;
2585 	list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
2586 	err = 0;
2587 out:
2588 	spin_unlock(&inode->i_lock);
2589 	return err;
2590 out_zap:
2591 	spin_unlock(&inode->i_lock);
2592 	nfs_access_zap_cache(inode);
2593 	return -ENOENT;
2594 }
2595 
2596 static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res)
2597 {
2598 	/* Only check the most recently returned cache entry,
2599 	 * but do it without locking.
2600 	 */
2601 	struct nfs_inode *nfsi = NFS_I(inode);
2602 	struct nfs_access_entry *cache;
2603 	int err = -ECHILD;
2604 	struct list_head *lh;
2605 
2606 	rcu_read_lock();
2607 	if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
2608 		goto out;
2609 	lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru));
2610 	cache = list_entry(lh, struct nfs_access_entry, lru);
2611 	if (lh == &nfsi->access_cache_entry_lru ||
2612 	    cred_fscmp(cred, cache->cred) != 0)
2613 		cache = NULL;
2614 	if (cache == NULL)
2615 		goto out;
2616 	if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
2617 		goto out;
2618 	res->cred = cache->cred;
2619 	res->mask = cache->mask;
2620 	err = 0;
2621 out:
2622 	rcu_read_unlock();
2623 	return err;
2624 }
2625 
2626 int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct
2627 nfs_access_entry *res, bool may_block)
2628 {
2629 	int status;
2630 
2631 	status = nfs_access_get_cached_rcu(inode, cred, res);
2632 	if (status != 0)
2633 		status = nfs_access_get_cached_locked(inode, cred, res,
2634 		    may_block);
2635 
2636 	return status;
2637 }
2638 EXPORT_SYMBOL_GPL(nfs_access_get_cached);
2639 
2640 static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
2641 {
2642 	struct nfs_inode *nfsi = NFS_I(inode);
2643 	struct rb_root *root_node = &nfsi->access_cache;
2644 	struct rb_node **p = &root_node->rb_node;
2645 	struct rb_node *parent = NULL;
2646 	struct nfs_access_entry *entry;
2647 	int cmp;
2648 
2649 	spin_lock(&inode->i_lock);
2650 	while (*p != NULL) {
2651 		parent = *p;
2652 		entry = rb_entry(parent, struct nfs_access_entry, rb_node);
2653 		cmp = cred_fscmp(set->cred, entry->cred);
2654 
2655 		if (cmp < 0)
2656 			p = &parent->rb_left;
2657 		else if (cmp > 0)
2658 			p = &parent->rb_right;
2659 		else
2660 			goto found;
2661 	}
2662 	rb_link_node(&set->rb_node, parent, p);
2663 	rb_insert_color(&set->rb_node, root_node);
2664 	list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
2665 	spin_unlock(&inode->i_lock);
2666 	return;
2667 found:
2668 	rb_replace_node(parent, &set->rb_node, root_node);
2669 	list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
2670 	list_del(&entry->lru);
2671 	spin_unlock(&inode->i_lock);
2672 	nfs_access_free_entry(entry);
2673 }
2674 
2675 void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
2676 {
2677 	struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
2678 	if (cache == NULL)
2679 		return;
2680 	RB_CLEAR_NODE(&cache->rb_node);
2681 	cache->cred = get_cred(set->cred);
2682 	cache->mask = set->mask;
2683 
2684 	/* The above field assignments must be visible
2685 	 * before this item appears on the lru.  We cannot easily
2686 	 * use rcu_assign_pointer, so just force the memory barrier.
2687 	 */
2688 	smp_wmb();
2689 	nfs_access_add_rbtree(inode, cache);
2690 
2691 	/* Update accounting */
2692 	smp_mb__before_atomic();
2693 	atomic_long_inc(&nfs_access_nr_entries);
2694 	smp_mb__after_atomic();
2695 
2696 	/* Add inode to global LRU list */
2697 	if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
2698 		spin_lock(&nfs_access_lru_lock);
2699 		if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
2700 			list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
2701 					&nfs_access_lru_list);
2702 		spin_unlock(&nfs_access_lru_lock);
2703 	}
2704 	nfs_access_cache_enforce_limit();
2705 }
2706 EXPORT_SYMBOL_GPL(nfs_access_add_cache);
2707 
2708 #define NFS_MAY_READ (NFS_ACCESS_READ)
2709 #define NFS_MAY_WRITE (NFS_ACCESS_MODIFY | \
2710 		NFS_ACCESS_EXTEND | \
2711 		NFS_ACCESS_DELETE)
2712 #define NFS_FILE_MAY_WRITE (NFS_ACCESS_MODIFY | \
2713 		NFS_ACCESS_EXTEND)
2714 #define NFS_DIR_MAY_WRITE NFS_MAY_WRITE
2715 #define NFS_MAY_LOOKUP (NFS_ACCESS_LOOKUP)
2716 #define NFS_MAY_EXECUTE (NFS_ACCESS_EXECUTE)
2717 static int
2718 nfs_access_calc_mask(u32 access_result, umode_t umode)
2719 {
2720 	int mask = 0;
2721 
2722 	if (access_result & NFS_MAY_READ)
2723 		mask |= MAY_READ;
2724 	if (S_ISDIR(umode)) {
2725 		if ((access_result & NFS_DIR_MAY_WRITE) == NFS_DIR_MAY_WRITE)
2726 			mask |= MAY_WRITE;
2727 		if ((access_result & NFS_MAY_LOOKUP) == NFS_MAY_LOOKUP)
2728 			mask |= MAY_EXEC;
2729 	} else if (S_ISREG(umode)) {
2730 		if ((access_result & NFS_FILE_MAY_WRITE) == NFS_FILE_MAY_WRITE)
2731 			mask |= MAY_WRITE;
2732 		if ((access_result & NFS_MAY_EXECUTE) == NFS_MAY_EXECUTE)
2733 			mask |= MAY_EXEC;
2734 	} else if (access_result & NFS_MAY_WRITE)
2735 			mask |= MAY_WRITE;
2736 	return mask;
2737 }
2738 
2739 void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result)
2740 {
2741 	entry->mask = access_result;
2742 }
2743 EXPORT_SYMBOL_GPL(nfs_access_set_mask);
2744 
2745 static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
2746 {
2747 	struct nfs_access_entry cache;
2748 	bool may_block = (mask & MAY_NOT_BLOCK) == 0;
2749 	int cache_mask = -1;
2750 	int status;
2751 
2752 	trace_nfs_access_enter(inode);
2753 
2754 	status = nfs_access_get_cached(inode, cred, &cache, may_block);
2755 	if (status == 0)
2756 		goto out_cached;
2757 
2758 	status = -ECHILD;
2759 	if (!may_block)
2760 		goto out;
2761 
2762 	/*
2763 	 * Determine which access bits we want to ask for...
2764 	 */
2765 	cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
2766 	if (nfs_server_capable(inode, NFS_CAP_XATTR)) {
2767 		cache.mask |= NFS_ACCESS_XAREAD | NFS_ACCESS_XAWRITE |
2768 		    NFS_ACCESS_XALIST;
2769 	}
2770 	if (S_ISDIR(inode->i_mode))
2771 		cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
2772 	else
2773 		cache.mask |= NFS_ACCESS_EXECUTE;
2774 	cache.cred = cred;
2775 	status = NFS_PROTO(inode)->access(inode, &cache);
2776 	if (status != 0) {
2777 		if (status == -ESTALE) {
2778 			if (!S_ISDIR(inode->i_mode))
2779 				nfs_set_inode_stale(inode);
2780 			else
2781 				nfs_zap_caches(inode);
2782 		}
2783 		goto out;
2784 	}
2785 	nfs_access_add_cache(inode, &cache);
2786 out_cached:
2787 	cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode);
2788 	if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
2789 		status = -EACCES;
2790 out:
2791 	trace_nfs_access_exit(inode, mask, cache_mask, status);
2792 	return status;
2793 }
2794 
2795 static int nfs_open_permission_mask(int openflags)
2796 {
2797 	int mask = 0;
2798 
2799 	if (openflags & __FMODE_EXEC) {
2800 		/* ONLY check exec rights */
2801 		mask = MAY_EXEC;
2802 	} else {
2803 		if ((openflags & O_ACCMODE) != O_WRONLY)
2804 			mask |= MAY_READ;
2805 		if ((openflags & O_ACCMODE) != O_RDONLY)
2806 			mask |= MAY_WRITE;
2807 	}
2808 
2809 	return mask;
2810 }
2811 
2812 int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags)
2813 {
2814 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
2815 }
2816 EXPORT_SYMBOL_GPL(nfs_may_open);
2817 
2818 static int nfs_execute_ok(struct inode *inode, int mask)
2819 {
2820 	struct nfs_server *server = NFS_SERVER(inode);
2821 	int ret = 0;
2822 
2823 	if (S_ISDIR(inode->i_mode))
2824 		return 0;
2825 	if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_OTHER)) {
2826 		if (mask & MAY_NOT_BLOCK)
2827 			return -ECHILD;
2828 		ret = __nfs_revalidate_inode(server, inode);
2829 	}
2830 	if (ret == 0 && !execute_ok(inode))
2831 		ret = -EACCES;
2832 	return ret;
2833 }
2834 
2835 int nfs_permission(struct inode *inode, int mask)
2836 {
2837 	const struct cred *cred = current_cred();
2838 	int res = 0;
2839 
2840 	nfs_inc_stats(inode, NFSIOS_VFSACCESS);
2841 
2842 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
2843 		goto out;
2844 	/* Is this sys_access() ? */
2845 	if (mask & (MAY_ACCESS | MAY_CHDIR))
2846 		goto force_lookup;
2847 
2848 	switch (inode->i_mode & S_IFMT) {
2849 		case S_IFLNK:
2850 			goto out;
2851 		case S_IFREG:
2852 			if ((mask & MAY_OPEN) &&
2853 			   nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
2854 				return 0;
2855 			break;
2856 		case S_IFDIR:
2857 			/*
2858 			 * Optimize away all write operations, since the server
2859 			 * will check permissions when we perform the op.
2860 			 */
2861 			if ((mask & MAY_WRITE) && !(mask & MAY_READ))
2862 				goto out;
2863 	}
2864 
2865 force_lookup:
2866 	if (!NFS_PROTO(inode)->access)
2867 		goto out_notsup;
2868 
2869 	res = nfs_do_access(inode, cred, mask);
2870 out:
2871 	if (!res && (mask & MAY_EXEC))
2872 		res = nfs_execute_ok(inode, mask);
2873 
2874 	dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
2875 		inode->i_sb->s_id, inode->i_ino, mask, res);
2876 	return res;
2877 out_notsup:
2878 	if (mask & MAY_NOT_BLOCK)
2879 		return -ECHILD;
2880 
2881 	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
2882 	if (res == 0)
2883 		res = generic_permission(inode, mask);
2884 	goto out;
2885 }
2886 EXPORT_SYMBOL_GPL(nfs_permission);
2887 
2888 /*
2889  * Local variables:
2890  *  version-control: t
2891  *  kept-new-versions: 5
2892  * End:
2893  */
2894