xref: /openbmc/linux/fs/smb/client/cached_dir.c (revision 66127f0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Functions to handle the cached directory entries
4  *
5  *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6  */
7 
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14 
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19 
20 struct cached_dir_dentry {
21 	struct list_head entry;
22 	struct dentry *dentry;
23 };
24 
25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
26 						    const char *path,
27 						    bool lookup_only,
28 						    __u32 max_cached_dirs)
29 {
30 	struct cached_fid *cfid;
31 
32 	spin_lock(&cfids->cfid_list_lock);
33 	list_for_each_entry(cfid, &cfids->entries, entry) {
34 		if (!strcmp(cfid->path, path)) {
35 			/*
36 			 * If it doesn't have a lease it is either not yet
37 			 * fully cached or it may be in the process of
38 			 * being deleted due to a lease break.
39 			 */
40 			if (!cfid->time || !cfid->has_lease) {
41 				spin_unlock(&cfids->cfid_list_lock);
42 				return NULL;
43 			}
44 			kref_get(&cfid->refcount);
45 			spin_unlock(&cfids->cfid_list_lock);
46 			return cfid;
47 		}
48 	}
49 	if (lookup_only) {
50 		spin_unlock(&cfids->cfid_list_lock);
51 		return NULL;
52 	}
53 	if (cfids->num_entries >= max_cached_dirs) {
54 		spin_unlock(&cfids->cfid_list_lock);
55 		return NULL;
56 	}
57 	cfid = init_cached_dir(path);
58 	if (cfid == NULL) {
59 		spin_unlock(&cfids->cfid_list_lock);
60 		return NULL;
61 	}
62 	cfid->cfids = cfids;
63 	cfids->num_entries++;
64 	list_add(&cfid->entry, &cfids->entries);
65 	cfid->on_list = true;
66 	kref_get(&cfid->refcount);
67 	/*
68 	 * Set @cfid->has_lease to true during construction so that the lease
69 	 * reference can be put in cached_dir_lease_break() due to a potential
70 	 * lease break right after the request is sent or while @cfid is still
71 	 * being cached, or if a reconnection is triggered during construction.
72 	 * Concurrent processes won't be to use it yet due to @cfid->time being
73 	 * zero.
74 	 */
75 	cfid->has_lease = true;
76 
77 	spin_unlock(&cfids->cfid_list_lock);
78 	return cfid;
79 }
80 
81 static struct dentry *
82 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
83 {
84 	struct dentry *dentry;
85 	const char *s, *p;
86 	char sep;
87 
88 	sep = CIFS_DIR_SEP(cifs_sb);
89 	dentry = dget(cifs_sb->root);
90 	s = path;
91 
92 	do {
93 		struct inode *dir = d_inode(dentry);
94 		struct dentry *child;
95 
96 		if (!S_ISDIR(dir->i_mode)) {
97 			dput(dentry);
98 			dentry = ERR_PTR(-ENOTDIR);
99 			break;
100 		}
101 
102 		/* skip separators */
103 		while (*s == sep)
104 			s++;
105 		if (!*s)
106 			break;
107 		p = s++;
108 		/* next separator */
109 		while (*s && *s != sep)
110 			s++;
111 
112 		child = lookup_positive_unlocked(p, dentry, s - p);
113 		dput(dentry);
114 		dentry = child;
115 	} while (!IS_ERR(dentry));
116 	return dentry;
117 }
118 
119 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
120 				  const char *path)
121 {
122 	size_t len = 0;
123 
124 	if (!*path)
125 		return path;
126 
127 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
128 	    cifs_sb->prepath) {
129 		len = strlen(cifs_sb->prepath) + 1;
130 		if (unlikely(len > strlen(path)))
131 			return ERR_PTR(-EINVAL);
132 	}
133 	return path + len;
134 }
135 
136 /*
137  * Open the and cache a directory handle.
138  * If error then *cfid is not initialized.
139  */
140 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
141 		    const char *path,
142 		    struct cifs_sb_info *cifs_sb,
143 		    bool lookup_only, struct cached_fid **ret_cfid)
144 {
145 	struct cifs_ses *ses;
146 	struct TCP_Server_Info *server;
147 	struct cifs_open_parms oparms;
148 	struct smb2_create_rsp *o_rsp = NULL;
149 	struct smb2_query_info_rsp *qi_rsp = NULL;
150 	int resp_buftype[2];
151 	struct smb_rqst rqst[2];
152 	struct kvec rsp_iov[2];
153 	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
154 	struct kvec qi_iov[1];
155 	int rc, flags = 0;
156 	__le16 *utf16_path = NULL;
157 	u8 oplock = SMB2_OPLOCK_LEVEL_II;
158 	struct cifs_fid *pfid;
159 	struct dentry *dentry = NULL;
160 	struct cached_fid *cfid;
161 	struct cached_fids *cfids;
162 	const char *npath;
163 	int retries = 0, cur_sleep = 1;
164 
165 	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
166 	    is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
167 		return -EOPNOTSUPP;
168 
169 	ses = tcon->ses;
170 	cfids = tcon->cfids;
171 
172 	if (cifs_sb->root == NULL)
173 		return -ENOENT;
174 
175 replay_again:
176 	/* reinitialize for possible replay */
177 	flags = 0;
178 	oplock = SMB2_OPLOCK_LEVEL_II;
179 	server = cifs_pick_channel(ses);
180 
181 	if (!server->ops->new_lease_key)
182 		return -EIO;
183 
184 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
185 	if (!utf16_path)
186 		return -ENOMEM;
187 
188 	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
189 	if (cfid == NULL) {
190 		kfree(utf16_path);
191 		return -ENOENT;
192 	}
193 	/*
194 	 * Return cached fid if it is valid (has a lease and has a time).
195 	 * Otherwise, it is either a new entry or laundromat worker removed it
196 	 * from @cfids->entries.  Caller will put last reference if the latter.
197 	 */
198 	spin_lock(&cfids->cfid_list_lock);
199 	if (cfid->has_lease && cfid->time) {
200 		spin_unlock(&cfids->cfid_list_lock);
201 		*ret_cfid = cfid;
202 		kfree(utf16_path);
203 		return 0;
204 	}
205 	spin_unlock(&cfids->cfid_list_lock);
206 
207 	/*
208 	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
209 	 * calling ->lookup() which already adds those through
210 	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
211 	 * below when trying to send compounded request and then potentially
212 	 * having a different prefix path (e.g. after DFS failover).
213 	 */
214 	npath = path_no_prefix(cifs_sb, path);
215 	if (IS_ERR(npath)) {
216 		rc = PTR_ERR(npath);
217 		goto out;
218 	}
219 
220 	if (!npath[0]) {
221 		dentry = dget(cifs_sb->root);
222 	} else {
223 		dentry = path_to_dentry(cifs_sb, npath);
224 		if (IS_ERR(dentry)) {
225 			rc = -ENOENT;
226 			goto out;
227 		}
228 	}
229 	cfid->dentry = dentry;
230 	cfid->tcon = tcon;
231 
232 	/*
233 	 * We do not hold the lock for the open because in case
234 	 * SMB2_open needs to reconnect.
235 	 * This is safe because no other thread will be able to get a ref
236 	 * to the cfid until we have finished opening the file and (possibly)
237 	 * acquired a lease.
238 	 */
239 	if (smb3_encryption_required(tcon))
240 		flags |= CIFS_TRANSFORM_REQ;
241 
242 	pfid = &cfid->fid;
243 	server->ops->new_lease_key(pfid);
244 
245 	memset(rqst, 0, sizeof(rqst));
246 	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
247 	memset(rsp_iov, 0, sizeof(rsp_iov));
248 
249 	/* Open */
250 	memset(&open_iov, 0, sizeof(open_iov));
251 	rqst[0].rq_iov = open_iov;
252 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
253 
254 	oparms = (struct cifs_open_parms) {
255 		.tcon = tcon,
256 		.path = path,
257 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
258 		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
259 				   FILE_READ_EA,
260 		.disposition = FILE_OPEN,
261 		.fid = pfid,
262 		.replay = !!(retries),
263 	};
264 
265 	rc = SMB2_open_init(tcon, server,
266 			    &rqst[0], &oplock, &oparms, utf16_path);
267 	if (rc)
268 		goto oshr_free;
269 	smb2_set_next_command(tcon, &rqst[0]);
270 
271 	memset(&qi_iov, 0, sizeof(qi_iov));
272 	rqst[1].rq_iov = qi_iov;
273 	rqst[1].rq_nvec = 1;
274 
275 	rc = SMB2_query_info_init(tcon, server,
276 				  &rqst[1], COMPOUND_FID,
277 				  COMPOUND_FID, FILE_ALL_INFORMATION,
278 				  SMB2_O_INFO_FILE, 0,
279 				  sizeof(struct smb2_file_all_info) +
280 				  PATH_MAX * 2, 0, NULL);
281 	if (rc)
282 		goto oshr_free;
283 
284 	smb2_set_related(&rqst[1]);
285 
286 	if (retries) {
287 		smb2_set_replay(server, &rqst[0]);
288 		smb2_set_replay(server, &rqst[1]);
289 	}
290 
291 	rc = compound_send_recv(xid, ses, server,
292 				flags, 2, rqst,
293 				resp_buftype, rsp_iov);
294 	if (rc) {
295 		if (rc == -EREMCHG) {
296 			tcon->need_reconnect = true;
297 			pr_warn_once("server share %s deleted\n",
298 				     tcon->tree_name);
299 		}
300 		goto oshr_free;
301 	}
302 	cfid->is_open = true;
303 
304 	spin_lock(&cfids->cfid_list_lock);
305 
306 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
307 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
308 	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
309 #ifdef CONFIG_CIFS_DEBUG2
310 	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
311 #endif /* CIFS_DEBUG2 */
312 
313 
314 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
315 		spin_unlock(&cfids->cfid_list_lock);
316 		rc = -EINVAL;
317 		goto oshr_free;
318 	}
319 
320 	rc = smb2_parse_contexts(server, rsp_iov,
321 				 &oparms.fid->epoch,
322 				 oparms.fid->lease_key,
323 				 &oplock, NULL, NULL);
324 	if (rc) {
325 		spin_unlock(&cfids->cfid_list_lock);
326 		goto oshr_free;
327 	}
328 
329 	rc = -EINVAL;
330 	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
331 		spin_unlock(&cfids->cfid_list_lock);
332 		goto oshr_free;
333 	}
334 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
335 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
336 		spin_unlock(&cfids->cfid_list_lock);
337 		goto oshr_free;
338 	}
339 	if (!smb2_validate_and_copy_iov(
340 				le16_to_cpu(qi_rsp->OutputBufferOffset),
341 				sizeof(struct smb2_file_all_info),
342 				&rsp_iov[1], sizeof(struct smb2_file_all_info),
343 				(char *)&cfid->file_all_info))
344 		cfid->file_all_info_is_valid = true;
345 
346 	cfid->time = jiffies;
347 	spin_unlock(&cfids->cfid_list_lock);
348 	/* At this point the directory handle is fully cached */
349 	rc = 0;
350 
351 oshr_free:
352 	SMB2_open_free(&rqst[0]);
353 	SMB2_query_info_free(&rqst[1]);
354 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
355 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
356 out:
357 	if (rc) {
358 		spin_lock(&cfids->cfid_list_lock);
359 		if (cfid->on_list) {
360 			list_del(&cfid->entry);
361 			cfid->on_list = false;
362 			cfids->num_entries--;
363 		}
364 		if (cfid->has_lease) {
365 			/*
366 			 * We are guaranteed to have two references at this
367 			 * point. One for the caller and one for a potential
368 			 * lease. Release one here, and the second below.
369 			 */
370 			cfid->has_lease = false;
371 			kref_put(&cfid->refcount, smb2_close_cached_fid);
372 		}
373 		spin_unlock(&cfids->cfid_list_lock);
374 
375 		kref_put(&cfid->refcount, smb2_close_cached_fid);
376 	} else {
377 		*ret_cfid = cfid;
378 		atomic_inc(&tcon->num_remote_opens);
379 	}
380 	kfree(utf16_path);
381 
382 	if (is_replayable_error(rc) &&
383 	    smb2_should_replay(tcon, &retries, &cur_sleep))
384 		goto replay_again;
385 
386 	return rc;
387 }
388 
389 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
390 			      struct dentry *dentry,
391 			      struct cached_fid **ret_cfid)
392 {
393 	struct cached_fid *cfid;
394 	struct cached_fids *cfids = tcon->cfids;
395 
396 	if (cfids == NULL)
397 		return -ENOENT;
398 
399 	spin_lock(&cfids->cfid_list_lock);
400 	list_for_each_entry(cfid, &cfids->entries, entry) {
401 		if (dentry && cfid->dentry == dentry) {
402 			cifs_dbg(FYI, "found a cached file handle by dentry\n");
403 			kref_get(&cfid->refcount);
404 			*ret_cfid = cfid;
405 			spin_unlock(&cfids->cfid_list_lock);
406 			return 0;
407 		}
408 	}
409 	spin_unlock(&cfids->cfid_list_lock);
410 	return -ENOENT;
411 }
412 
413 static void
414 smb2_close_cached_fid(struct kref *ref)
415 {
416 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
417 					       refcount);
418 	int rc;
419 
420 	spin_lock(&cfid->cfids->cfid_list_lock);
421 	if (cfid->on_list) {
422 		list_del(&cfid->entry);
423 		cfid->on_list = false;
424 		cfid->cfids->num_entries--;
425 	}
426 	spin_unlock(&cfid->cfids->cfid_list_lock);
427 
428 	dput(cfid->dentry);
429 	cfid->dentry = NULL;
430 
431 	if (cfid->is_open) {
432 		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
433 			   cfid->fid.volatile_fid);
434 		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
435 			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
436 	}
437 
438 	free_cached_dir(cfid);
439 }
440 
441 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
442 			     const char *name, struct cifs_sb_info *cifs_sb)
443 {
444 	struct cached_fid *cfid = NULL;
445 	int rc;
446 
447 	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
448 	if (rc) {
449 		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
450 		return;
451 	}
452 	spin_lock(&cfid->cfids->cfid_list_lock);
453 	if (cfid->has_lease) {
454 		cfid->has_lease = false;
455 		kref_put(&cfid->refcount, smb2_close_cached_fid);
456 	}
457 	spin_unlock(&cfid->cfids->cfid_list_lock);
458 	close_cached_dir(cfid);
459 }
460 
461 
462 void close_cached_dir(struct cached_fid *cfid)
463 {
464 	kref_put(&cfid->refcount, smb2_close_cached_fid);
465 }
466 
467 /*
468  * Called from cifs_kill_sb when we unmount a share
469  */
470 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
471 {
472 	struct rb_root *root = &cifs_sb->tlink_tree;
473 	struct rb_node *node;
474 	struct cached_fid *cfid;
475 	struct cifs_tcon *tcon;
476 	struct tcon_link *tlink;
477 	struct cached_fids *cfids;
478 	struct cached_dir_dentry *tmp_list, *q;
479 	LIST_HEAD(entry);
480 
481 	spin_lock(&cifs_sb->tlink_tree_lock);
482 	for (node = rb_first(root); node; node = rb_next(node)) {
483 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
484 		tcon = tlink_tcon(tlink);
485 		if (IS_ERR(tcon))
486 			continue;
487 		cfids = tcon->cfids;
488 		if (cfids == NULL)
489 			continue;
490 		spin_lock(&cfids->cfid_list_lock);
491 		list_for_each_entry(cfid, &cfids->entries, entry) {
492 			tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
493 			if (tmp_list == NULL)
494 				break;
495 			spin_lock(&cfid->fid_lock);
496 			tmp_list->dentry = cfid->dentry;
497 			cfid->dentry = NULL;
498 			spin_unlock(&cfid->fid_lock);
499 
500 			list_add_tail(&tmp_list->entry, &entry);
501 		}
502 		spin_unlock(&cfids->cfid_list_lock);
503 	}
504 	spin_unlock(&cifs_sb->tlink_tree_lock);
505 
506 	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
507 		list_del(&tmp_list->entry);
508 		dput(tmp_list->dentry);
509 		kfree(tmp_list);
510 	}
511 
512 	/* Flush any pending work that will drop dentries */
513 	flush_workqueue(cfid_put_wq);
514 }
515 
516 /*
517  * Invalidate all cached dirs when a TCON has been reset
518  * due to a session loss.
519  */
520 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
521 {
522 	struct cached_fids *cfids = tcon->cfids;
523 	struct cached_fid *cfid, *q;
524 
525 	if (cfids == NULL)
526 		return;
527 
528 	/*
529 	 * Mark all the cfids as closed, and move them to the cfids->dying list.
530 	 * They'll be cleaned up later by cfids_invalidation_worker. Take
531 	 * a reference to each cfid during this process.
532 	 */
533 	spin_lock(&cfids->cfid_list_lock);
534 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
535 		list_move(&cfid->entry, &cfids->dying);
536 		cfids->num_entries--;
537 		cfid->is_open = false;
538 		cfid->on_list = false;
539 		if (cfid->has_lease) {
540 			/*
541 			 * The lease was never cancelled from the server,
542 			 * so steal that reference.
543 			 */
544 			cfid->has_lease = false;
545 		} else
546 			kref_get(&cfid->refcount);
547 	}
548 	/*
549 	 * Queue dropping of the dentries once locks have been dropped
550 	 */
551 	if (!list_empty(&cfids->dying))
552 		queue_work(cfid_put_wq, &cfids->invalidation_work);
553 	spin_unlock(&cfids->cfid_list_lock);
554 }
555 
556 static void
557 cached_dir_offload_close(struct work_struct *work)
558 {
559 	struct cached_fid *cfid = container_of(work,
560 				struct cached_fid, close_work);
561 	struct cifs_tcon *tcon = cfid->tcon;
562 
563 	WARN_ON(cfid->on_list);
564 
565 	kref_put(&cfid->refcount, smb2_close_cached_fid);
566 	cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
567 }
568 
569 /*
570  * Release the cached directory's dentry, and then queue work to drop cached
571  * directory itself (closing on server if needed).
572  *
573  * Must be called with a reference to the cached_fid and a reference to the
574  * tcon.
575  */
576 static void cached_dir_put_work(struct work_struct *work)
577 {
578 	struct cached_fid *cfid = container_of(work, struct cached_fid,
579 					       put_work);
580 	struct dentry *dentry;
581 
582 	spin_lock(&cfid->fid_lock);
583 	dentry = cfid->dentry;
584 	cfid->dentry = NULL;
585 	spin_unlock(&cfid->fid_lock);
586 
587 	dput(dentry);
588 	queue_work(serverclose_wq, &cfid->close_work);
589 }
590 
591 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
592 {
593 	struct cached_fids *cfids = tcon->cfids;
594 	struct cached_fid *cfid;
595 
596 	if (cfids == NULL)
597 		return false;
598 
599 	spin_lock(&cfids->cfid_list_lock);
600 	list_for_each_entry(cfid, &cfids->entries, entry) {
601 		if (cfid->has_lease &&
602 		    !memcmp(lease_key,
603 			    cfid->fid.lease_key,
604 			    SMB2_LEASE_KEY_SIZE)) {
605 			cfid->has_lease = false;
606 			cfid->time = 0;
607 			/*
608 			 * We found a lease remove it from the list
609 			 * so no threads can access it.
610 			 */
611 			list_del(&cfid->entry);
612 			cfid->on_list = false;
613 			cfids->num_entries--;
614 
615 			++tcon->tc_count;
616 			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
617 					    netfs_trace_tcon_ref_get_cached_lease_break);
618 			queue_work(cfid_put_wq, &cfid->put_work);
619 			spin_unlock(&cfids->cfid_list_lock);
620 			return true;
621 		}
622 	}
623 	spin_unlock(&cfids->cfid_list_lock);
624 	return false;
625 }
626 
627 static struct cached_fid *init_cached_dir(const char *path)
628 {
629 	struct cached_fid *cfid;
630 
631 	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
632 	if (!cfid)
633 		return NULL;
634 	cfid->path = kstrdup(path, GFP_ATOMIC);
635 	if (!cfid->path) {
636 		kfree(cfid);
637 		return NULL;
638 	}
639 
640 	INIT_WORK(&cfid->close_work, cached_dir_offload_close);
641 	INIT_WORK(&cfid->put_work, cached_dir_put_work);
642 	INIT_LIST_HEAD(&cfid->entry);
643 	INIT_LIST_HEAD(&cfid->dirents.entries);
644 	mutex_init(&cfid->dirents.de_mutex);
645 	spin_lock_init(&cfid->fid_lock);
646 	kref_init(&cfid->refcount);
647 	return cfid;
648 }
649 
650 static void free_cached_dir(struct cached_fid *cfid)
651 {
652 	struct cached_dirent *dirent, *q;
653 
654 	WARN_ON(work_pending(&cfid->close_work));
655 	WARN_ON(work_pending(&cfid->put_work));
656 
657 	dput(cfid->dentry);
658 	cfid->dentry = NULL;
659 
660 	/*
661 	 * Delete all cached dirent names
662 	 */
663 	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
664 		list_del(&dirent->entry);
665 		kfree(dirent->name);
666 		kfree(dirent);
667 	}
668 
669 	kfree(cfid->path);
670 	cfid->path = NULL;
671 	kfree(cfid);
672 }
673 
674 static void cfids_invalidation_worker(struct work_struct *work)
675 {
676 	struct cached_fids *cfids = container_of(work, struct cached_fids,
677 						 invalidation_work);
678 	struct cached_fid *cfid, *q;
679 	LIST_HEAD(entry);
680 
681 	spin_lock(&cfids->cfid_list_lock);
682 	/* move cfids->dying to the local list */
683 	list_cut_before(&entry, &cfids->dying, &cfids->dying);
684 	spin_unlock(&cfids->cfid_list_lock);
685 
686 	list_for_each_entry_safe(cfid, q, &entry, entry) {
687 		list_del(&cfid->entry);
688 		/* Drop the ref-count acquired in invalidate_all_cached_dirs */
689 		kref_put(&cfid->refcount, smb2_close_cached_fid);
690 	}
691 }
692 
693 static void cfids_laundromat_worker(struct work_struct *work)
694 {
695 	struct cached_fids *cfids;
696 	struct cached_fid *cfid, *q;
697 	struct dentry *dentry;
698 	LIST_HEAD(entry);
699 
700 	cfids = container_of(work, struct cached_fids, laundromat_work.work);
701 
702 	spin_lock(&cfids->cfid_list_lock);
703 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
704 		if (cfid->time &&
705 		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
706 			cfid->on_list = false;
707 			list_move(&cfid->entry, &entry);
708 			cfids->num_entries--;
709 			if (cfid->has_lease) {
710 				/*
711 				 * Our lease has not yet been cancelled from the
712 				 * server. Steal that reference.
713 				 */
714 				cfid->has_lease = false;
715 			} else
716 				kref_get(&cfid->refcount);
717 		}
718 	}
719 	spin_unlock(&cfids->cfid_list_lock);
720 
721 	list_for_each_entry_safe(cfid, q, &entry, entry) {
722 		list_del(&cfid->entry);
723 
724 		spin_lock(&cfid->fid_lock);
725 		dentry = cfid->dentry;
726 		cfid->dentry = NULL;
727 		spin_unlock(&cfid->fid_lock);
728 
729 		dput(dentry);
730 		if (cfid->is_open) {
731 			spin_lock(&cifs_tcp_ses_lock);
732 			++cfid->tcon->tc_count;
733 			trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
734 					    netfs_trace_tcon_ref_get_cached_laundromat);
735 			spin_unlock(&cifs_tcp_ses_lock);
736 			queue_work(serverclose_wq, &cfid->close_work);
737 		} else
738 			/*
739 			 * Drop the ref-count from above, either the lease-ref (if there
740 			 * was one) or the extra one acquired.
741 			 */
742 			kref_put(&cfid->refcount, smb2_close_cached_fid);
743 	}
744 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
745 			   dir_cache_timeout * HZ);
746 }
747 
748 struct cached_fids *init_cached_dirs(void)
749 {
750 	struct cached_fids *cfids;
751 
752 	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
753 	if (!cfids)
754 		return NULL;
755 	spin_lock_init(&cfids->cfid_list_lock);
756 	INIT_LIST_HEAD(&cfids->entries);
757 	INIT_LIST_HEAD(&cfids->dying);
758 
759 	INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
760 	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
761 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
762 			   dir_cache_timeout * HZ);
763 
764 	return cfids;
765 }
766 
767 /*
768  * Called from tconInfoFree when we are tearing down the tcon.
769  * There are no active users or open files/directories at this point.
770  */
771 void free_cached_dirs(struct cached_fids *cfids)
772 {
773 	struct cached_fid *cfid, *q;
774 	LIST_HEAD(entry);
775 
776 	if (cfids == NULL)
777 		return;
778 
779 	cancel_delayed_work_sync(&cfids->laundromat_work);
780 	cancel_work_sync(&cfids->invalidation_work);
781 
782 	spin_lock(&cfids->cfid_list_lock);
783 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
784 		cfid->on_list = false;
785 		cfid->is_open = false;
786 		list_move(&cfid->entry, &entry);
787 	}
788 	list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
789 		cfid->on_list = false;
790 		cfid->is_open = false;
791 		list_move(&cfid->entry, &entry);
792 	}
793 	spin_unlock(&cfids->cfid_list_lock);
794 
795 	list_for_each_entry_safe(cfid, q, &entry, entry) {
796 		list_del(&cfid->entry);
797 		free_cached_dir(cfid);
798 	}
799 
800 	kfree(cfids);
801 }
802