xref: /openbmc/linux/fs/smb/server/vfs_cache.c (revision 93696d8f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4  * Copyright (C) 2019 Samsung Electronics Co., Ltd.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/filelock.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 
12 #include "glob.h"
13 #include "vfs_cache.h"
14 #include "oplock.h"
15 #include "vfs.h"
16 #include "connection.h"
17 #include "mgmt/tree_connect.h"
18 #include "mgmt/user_session.h"
19 #include "smb_common.h"
20 
21 #define S_DEL_PENDING			1
22 #define S_DEL_ON_CLS			2
23 #define S_DEL_ON_CLS_STREAM		8
24 
25 static unsigned int inode_hash_mask __read_mostly;
26 static unsigned int inode_hash_shift __read_mostly;
27 static struct hlist_head *inode_hashtable __read_mostly;
28 static DEFINE_RWLOCK(inode_hash_lock);
29 
30 static struct ksmbd_file_table global_ft;
31 static atomic_long_t fd_limit;
32 static struct kmem_cache *filp_cache;
33 
34 void ksmbd_set_fd_limit(unsigned long limit)
35 {
36 	limit = min(limit, get_max_files());
37 	atomic_long_set(&fd_limit, limit);
38 }
39 
40 static bool fd_limit_depleted(void)
41 {
42 	long v = atomic_long_dec_return(&fd_limit);
43 
44 	if (v >= 0)
45 		return false;
46 	atomic_long_inc(&fd_limit);
47 	return true;
48 }
49 
50 static void fd_limit_close(void)
51 {
52 	atomic_long_inc(&fd_limit);
53 }
54 
55 /*
56  * INODE hash
57  */
58 
59 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
60 {
61 	unsigned long tmp;
62 
63 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
64 		L1_CACHE_BYTES;
65 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
66 	return tmp & inode_hash_mask;
67 }
68 
69 static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
70 {
71 	struct hlist_head *head = inode_hashtable +
72 		inode_hash(d_inode(de)->i_sb, (unsigned long)de);
73 	struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
74 
75 	hlist_for_each_entry(ci, head, m_hash) {
76 		if (ci->m_de == de) {
77 			if (atomic_inc_not_zero(&ci->m_count))
78 				ret_ci = ci;
79 			break;
80 		}
81 	}
82 	return ret_ci;
83 }
84 
85 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
86 {
87 	return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
88 }
89 
90 struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
91 {
92 	struct ksmbd_inode *ci;
93 
94 	read_lock(&inode_hash_lock);
95 	ci = __ksmbd_inode_lookup(d);
96 	read_unlock(&inode_hash_lock);
97 
98 	return ci;
99 }
100 
101 int ksmbd_query_inode_status(struct dentry *dentry)
102 {
103 	struct ksmbd_inode *ci;
104 	int ret = KSMBD_INODE_STATUS_UNKNOWN;
105 
106 	read_lock(&inode_hash_lock);
107 	ci = __ksmbd_inode_lookup(dentry);
108 	if (ci) {
109 		ret = KSMBD_INODE_STATUS_OK;
110 		if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
111 			ret = KSMBD_INODE_STATUS_PENDING_DELETE;
112 		atomic_dec(&ci->m_count);
113 	}
114 	read_unlock(&inode_hash_lock);
115 	return ret;
116 }
117 
118 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
119 {
120 	return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
121 }
122 
123 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
124 {
125 	fp->f_ci->m_flags |= S_DEL_PENDING;
126 }
127 
128 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
129 {
130 	fp->f_ci->m_flags &= ~S_DEL_PENDING;
131 }
132 
133 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
134 				  int file_info)
135 {
136 	if (ksmbd_stream_fd(fp)) {
137 		fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
138 		return;
139 	}
140 
141 	fp->f_ci->m_flags |= S_DEL_ON_CLS;
142 }
143 
144 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
145 {
146 	struct hlist_head *b = inode_hashtable +
147 		inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
148 
149 	hlist_add_head(&ci->m_hash, b);
150 }
151 
152 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
153 {
154 	write_lock(&inode_hash_lock);
155 	hlist_del_init(&ci->m_hash);
156 	write_unlock(&inode_hash_lock);
157 }
158 
159 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
160 {
161 	atomic_set(&ci->m_count, 1);
162 	atomic_set(&ci->op_count, 0);
163 	atomic_set(&ci->sop_count, 0);
164 	ci->m_flags = 0;
165 	ci->m_fattr = 0;
166 	INIT_LIST_HEAD(&ci->m_fp_list);
167 	INIT_LIST_HEAD(&ci->m_op_list);
168 	init_rwsem(&ci->m_lock);
169 	ci->m_de = fp->filp->f_path.dentry;
170 	return 0;
171 }
172 
173 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
174 {
175 	struct ksmbd_inode *ci, *tmpci;
176 	int rc;
177 
178 	read_lock(&inode_hash_lock);
179 	ci = ksmbd_inode_lookup(fp);
180 	read_unlock(&inode_hash_lock);
181 	if (ci)
182 		return ci;
183 
184 	ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
185 	if (!ci)
186 		return NULL;
187 
188 	rc = ksmbd_inode_init(ci, fp);
189 	if (rc) {
190 		pr_err("inode initialized failed\n");
191 		kfree(ci);
192 		return NULL;
193 	}
194 
195 	write_lock(&inode_hash_lock);
196 	tmpci = ksmbd_inode_lookup(fp);
197 	if (!tmpci) {
198 		ksmbd_inode_hash(ci);
199 	} else {
200 		kfree(ci);
201 		ci = tmpci;
202 	}
203 	write_unlock(&inode_hash_lock);
204 	return ci;
205 }
206 
207 static void ksmbd_inode_free(struct ksmbd_inode *ci)
208 {
209 	ksmbd_inode_unhash(ci);
210 	kfree(ci);
211 }
212 
213 void ksmbd_inode_put(struct ksmbd_inode *ci)
214 {
215 	if (atomic_dec_and_test(&ci->m_count))
216 		ksmbd_inode_free(ci);
217 }
218 
219 int __init ksmbd_inode_hash_init(void)
220 {
221 	unsigned int loop;
222 	unsigned long numentries = 16384;
223 	unsigned long bucketsize = sizeof(struct hlist_head);
224 	unsigned long size;
225 
226 	inode_hash_shift = ilog2(numentries);
227 	inode_hash_mask = (1 << inode_hash_shift) - 1;
228 
229 	size = bucketsize << inode_hash_shift;
230 
231 	/* init master fp hash table */
232 	inode_hashtable = vmalloc(size);
233 	if (!inode_hashtable)
234 		return -ENOMEM;
235 
236 	for (loop = 0; loop < (1U << inode_hash_shift); loop++)
237 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
238 	return 0;
239 }
240 
241 void ksmbd_release_inode_hash(void)
242 {
243 	vfree(inode_hashtable);
244 }
245 
246 static void __ksmbd_inode_close(struct ksmbd_file *fp)
247 {
248 	struct ksmbd_inode *ci = fp->f_ci;
249 	int err;
250 	struct file *filp;
251 
252 	filp = fp->filp;
253 	if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
254 		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
255 		err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
256 					     &filp->f_path,
257 					     fp->stream.name,
258 					     true);
259 		if (err)
260 			pr_err("remove xattr failed : %s\n",
261 			       fp->stream.name);
262 	}
263 
264 	if (atomic_dec_and_test(&ci->m_count)) {
265 		down_write(&ci->m_lock);
266 		if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
267 			ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
268 			up_write(&ci->m_lock);
269 			ksmbd_vfs_unlink(filp);
270 			down_write(&ci->m_lock);
271 		}
272 		up_write(&ci->m_lock);
273 
274 		ksmbd_inode_free(ci);
275 	}
276 }
277 
278 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
279 {
280 	if (!has_file_id(fp->persistent_id))
281 		return;
282 
283 	write_lock(&global_ft.lock);
284 	idr_remove(global_ft.idr, fp->persistent_id);
285 	write_unlock(&global_ft.lock);
286 }
287 
288 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
289 {
290 	if (!has_file_id(fp->volatile_id))
291 		return;
292 
293 	down_write(&fp->f_ci->m_lock);
294 	list_del_init(&fp->node);
295 	up_write(&fp->f_ci->m_lock);
296 
297 	write_lock(&ft->lock);
298 	idr_remove(ft->idr, fp->volatile_id);
299 	write_unlock(&ft->lock);
300 }
301 
302 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
303 {
304 	struct file *filp;
305 	struct ksmbd_lock *smb_lock, *tmp_lock;
306 
307 	fd_limit_close();
308 	__ksmbd_remove_durable_fd(fp);
309 	if (ft)
310 		__ksmbd_remove_fd(ft, fp);
311 
312 	close_id_del_oplock(fp);
313 	filp = fp->filp;
314 
315 	__ksmbd_inode_close(fp);
316 	if (!IS_ERR_OR_NULL(filp))
317 		fput(filp);
318 
319 	/* because the reference count of fp is 0, it is guaranteed that
320 	 * there are not accesses to fp->lock_list.
321 	 */
322 	list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
323 		spin_lock(&fp->conn->llist_lock);
324 		list_del(&smb_lock->clist);
325 		spin_unlock(&fp->conn->llist_lock);
326 
327 		list_del(&smb_lock->flist);
328 		locks_free_lock(smb_lock->fl);
329 		kfree(smb_lock);
330 	}
331 
332 	if (ksmbd_stream_fd(fp))
333 		kfree(fp->stream.name);
334 	kmem_cache_free(filp_cache, fp);
335 }
336 
337 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
338 {
339 	if (fp->f_state != FP_INITED)
340 		return NULL;
341 
342 	if (!atomic_inc_not_zero(&fp->refcount))
343 		return NULL;
344 	return fp;
345 }
346 
347 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
348 					    u64 id)
349 {
350 	struct ksmbd_file *fp;
351 
352 	if (!has_file_id(id))
353 		return NULL;
354 
355 	read_lock(&ft->lock);
356 	fp = idr_find(ft->idr, id);
357 	if (fp)
358 		fp = ksmbd_fp_get(fp);
359 	read_unlock(&ft->lock);
360 	return fp;
361 }
362 
363 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
364 {
365 	__ksmbd_close_fd(&work->sess->file_table, fp);
366 	atomic_dec(&work->conn->stats.open_files_count);
367 }
368 
369 static void set_close_state_blocked_works(struct ksmbd_file *fp)
370 {
371 	struct ksmbd_work *cancel_work;
372 
373 	spin_lock(&fp->f_lock);
374 	list_for_each_entry(cancel_work, &fp->blocked_works,
375 				 fp_entry) {
376 		cancel_work->state = KSMBD_WORK_CLOSED;
377 		cancel_work->cancel_fn(cancel_work->cancel_argv);
378 	}
379 	spin_unlock(&fp->f_lock);
380 }
381 
382 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
383 {
384 	struct ksmbd_file	*fp;
385 	struct ksmbd_file_table	*ft;
386 
387 	if (!has_file_id(id))
388 		return 0;
389 
390 	ft = &work->sess->file_table;
391 	write_lock(&ft->lock);
392 	fp = idr_find(ft->idr, id);
393 	if (fp) {
394 		set_close_state_blocked_works(fp);
395 
396 		if (fp->f_state != FP_INITED)
397 			fp = NULL;
398 		else {
399 			fp->f_state = FP_CLOSED;
400 			if (!atomic_dec_and_test(&fp->refcount))
401 				fp = NULL;
402 		}
403 	}
404 	write_unlock(&ft->lock);
405 
406 	if (!fp)
407 		return -EINVAL;
408 
409 	__put_fd_final(work, fp);
410 	return 0;
411 }
412 
413 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
414 {
415 	if (!fp)
416 		return;
417 
418 	if (!atomic_dec_and_test(&fp->refcount))
419 		return;
420 	__put_fd_final(work, fp);
421 }
422 
423 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
424 {
425 	if (!fp)
426 		return false;
427 	if (fp->tcon != tcon)
428 		return false;
429 	return true;
430 }
431 
432 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
433 {
434 	return __ksmbd_lookup_fd(&work->sess->file_table, id);
435 }
436 
437 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
438 {
439 	struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
440 
441 	if (__sanity_check(work->tcon, fp))
442 		return fp;
443 
444 	ksmbd_fd_put(work, fp);
445 	return NULL;
446 }
447 
448 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
449 					u64 pid)
450 {
451 	struct ksmbd_file *fp;
452 
453 	if (!has_file_id(id)) {
454 		id = work->compound_fid;
455 		pid = work->compound_pfid;
456 	}
457 
458 	fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
459 	if (!__sanity_check(work->tcon, fp)) {
460 		ksmbd_fd_put(work, fp);
461 		return NULL;
462 	}
463 	if (fp->persistent_id != pid) {
464 		ksmbd_fd_put(work, fp);
465 		return NULL;
466 	}
467 	return fp;
468 }
469 
470 struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
471 {
472 	return __ksmbd_lookup_fd(&global_ft, id);
473 }
474 
475 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
476 {
477 	struct ksmbd_file *fp;
478 
479 	fp = __ksmbd_lookup_fd(&global_ft, id);
480 	if (fp && fp->conn) {
481 		ksmbd_put_durable_fd(fp);
482 		fp = NULL;
483 	}
484 
485 	return fp;
486 }
487 
488 void ksmbd_put_durable_fd(struct ksmbd_file *fp)
489 {
490 	if (!atomic_dec_and_test(&fp->refcount))
491 		return;
492 
493 	__ksmbd_close_fd(NULL, fp);
494 }
495 
496 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
497 {
498 	struct ksmbd_file	*fp = NULL;
499 	unsigned int		id;
500 
501 	read_lock(&global_ft.lock);
502 	idr_for_each_entry(global_ft.idr, fp, id) {
503 		if (!memcmp(fp->create_guid,
504 			    cguid,
505 			    SMB2_CREATE_GUID_SIZE)) {
506 			fp = ksmbd_fp_get(fp);
507 			break;
508 		}
509 	}
510 	read_unlock(&global_ft.lock);
511 
512 	return fp;
513 }
514 
515 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
516 {
517 	struct ksmbd_file	*lfp;
518 	struct ksmbd_inode	*ci;
519 	struct inode		*inode = d_inode(dentry);
520 
521 	read_lock(&inode_hash_lock);
522 	ci = __ksmbd_inode_lookup(dentry);
523 	read_unlock(&inode_hash_lock);
524 	if (!ci)
525 		return NULL;
526 
527 	down_read(&ci->m_lock);
528 	list_for_each_entry(lfp, &ci->m_fp_list, node) {
529 		if (inode == file_inode(lfp->filp)) {
530 			atomic_dec(&ci->m_count);
531 			lfp = ksmbd_fp_get(lfp);
532 			up_read(&ci->m_lock);
533 			return lfp;
534 		}
535 	}
536 	atomic_dec(&ci->m_count);
537 	up_read(&ci->m_lock);
538 	return NULL;
539 }
540 
541 #define OPEN_ID_TYPE_VOLATILE_ID	(0)
542 #define OPEN_ID_TYPE_PERSISTENT_ID	(1)
543 
544 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
545 {
546 	if (type == OPEN_ID_TYPE_VOLATILE_ID)
547 		fp->volatile_id = id;
548 	if (type == OPEN_ID_TYPE_PERSISTENT_ID)
549 		fp->persistent_id = id;
550 }
551 
552 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
553 		     int type)
554 {
555 	u64			id = 0;
556 	int			ret;
557 
558 	if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
559 		__open_id_set(fp, KSMBD_NO_FID, type);
560 		return -EMFILE;
561 	}
562 
563 	idr_preload(GFP_KERNEL);
564 	write_lock(&ft->lock);
565 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
566 	if (ret >= 0) {
567 		id = ret;
568 		ret = 0;
569 	} else {
570 		id = KSMBD_NO_FID;
571 		fd_limit_close();
572 	}
573 
574 	__open_id_set(fp, id, type);
575 	write_unlock(&ft->lock);
576 	idr_preload_end();
577 	return ret;
578 }
579 
580 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
581 {
582 	__open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
583 	return fp->persistent_id;
584 }
585 
586 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
587 {
588 	struct ksmbd_file *fp;
589 	int ret;
590 
591 	fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
592 	if (!fp) {
593 		pr_err("Failed to allocate memory\n");
594 		return ERR_PTR(-ENOMEM);
595 	}
596 
597 	INIT_LIST_HEAD(&fp->blocked_works);
598 	INIT_LIST_HEAD(&fp->node);
599 	INIT_LIST_HEAD(&fp->lock_list);
600 	spin_lock_init(&fp->f_lock);
601 	atomic_set(&fp->refcount, 1);
602 
603 	fp->filp		= filp;
604 	fp->conn		= work->conn;
605 	fp->tcon		= work->tcon;
606 	fp->volatile_id		= KSMBD_NO_FID;
607 	fp->persistent_id	= KSMBD_NO_FID;
608 	fp->f_state		= FP_NEW;
609 	fp->f_ci		= ksmbd_inode_get(fp);
610 
611 	if (!fp->f_ci) {
612 		ret = -ENOMEM;
613 		goto err_out;
614 	}
615 
616 	ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
617 	if (ret) {
618 		ksmbd_inode_put(fp->f_ci);
619 		goto err_out;
620 	}
621 
622 	atomic_inc(&work->conn->stats.open_files_count);
623 	return fp;
624 
625 err_out:
626 	kmem_cache_free(filp_cache, fp);
627 	return ERR_PTR(ret);
628 }
629 
630 void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
631 			 unsigned int state)
632 {
633 	if (!fp)
634 		return;
635 
636 	write_lock(&ft->lock);
637 	fp->f_state = state;
638 	write_unlock(&ft->lock);
639 }
640 
641 static int
642 __close_file_table_ids(struct ksmbd_file_table *ft,
643 		       struct ksmbd_tree_connect *tcon,
644 		       bool (*skip)(struct ksmbd_tree_connect *tcon,
645 				    struct ksmbd_file *fp))
646 {
647 	unsigned int			id;
648 	struct ksmbd_file		*fp;
649 	int				num = 0;
650 
651 	idr_for_each_entry(ft->idr, fp, id) {
652 		if (skip(tcon, fp))
653 			continue;
654 
655 		set_close_state_blocked_works(fp);
656 
657 		if (!atomic_dec_and_test(&fp->refcount))
658 			continue;
659 		__ksmbd_close_fd(ft, fp);
660 		num++;
661 	}
662 	return num;
663 }
664 
665 static inline bool is_reconnectable(struct ksmbd_file *fp)
666 {
667 	struct oplock_info *opinfo = opinfo_get(fp);
668 	bool reconn = false;
669 
670 	if (!opinfo)
671 		return false;
672 
673 	if (opinfo->op_state != OPLOCK_STATE_NONE) {
674 		opinfo_put(opinfo);
675 		return false;
676 	}
677 
678 	if (fp->is_resilient || fp->is_persistent)
679 		reconn = true;
680 	else if (fp->is_durable && opinfo->is_lease &&
681 		 opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
682 		reconn = true;
683 
684 	else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
685 		reconn = true;
686 
687 	opinfo_put(opinfo);
688 	return reconn;
689 }
690 
691 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
692 			       struct ksmbd_file *fp)
693 {
694 	return fp->tcon != tcon;
695 }
696 
697 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
698 			     struct ksmbd_file *fp)
699 {
700 	struct ksmbd_inode *ci;
701 	struct oplock_info *op;
702 	struct ksmbd_conn *conn;
703 
704 	if (!is_reconnectable(fp))
705 		return false;
706 
707 	conn = fp->conn;
708 	ci = fp->f_ci;
709 	down_write(&ci->m_lock);
710 	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
711 		if (op->conn != conn)
712 			continue;
713 		op->conn = NULL;
714 	}
715 	up_write(&ci->m_lock);
716 
717 	fp->conn = NULL;
718 	fp->tcon = NULL;
719 	fp->volatile_id = KSMBD_NO_FID;
720 
721 	return true;
722 }
723 
724 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
725 {
726 	int num = __close_file_table_ids(&work->sess->file_table,
727 					 work->tcon,
728 					 tree_conn_fd_check);
729 
730 	atomic_sub(num, &work->conn->stats.open_files_count);
731 }
732 
733 void ksmbd_close_session_fds(struct ksmbd_work *work)
734 {
735 	int num = __close_file_table_ids(&work->sess->file_table,
736 					 work->tcon,
737 					 session_fd_check);
738 
739 	atomic_sub(num, &work->conn->stats.open_files_count);
740 }
741 
742 int ksmbd_init_global_file_table(void)
743 {
744 	return ksmbd_init_file_table(&global_ft);
745 }
746 
747 void ksmbd_free_global_file_table(void)
748 {
749 	struct ksmbd_file	*fp = NULL;
750 	unsigned int		id;
751 
752 	idr_for_each_entry(global_ft.idr, fp, id) {
753 		__ksmbd_remove_durable_fd(fp);
754 		kmem_cache_free(filp_cache, fp);
755 	}
756 
757 	ksmbd_destroy_file_table(&global_ft);
758 }
759 
760 int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
761 				  struct ksmbd_file *fp, char *name)
762 {
763 	char *pathname, *ab_pathname;
764 	int ret = 0;
765 
766 	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
767 	if (!pathname)
768 		return -EACCES;
769 
770 	ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
771 	if (IS_ERR(ab_pathname)) {
772 		kfree(pathname);
773 		return -EACCES;
774 	}
775 
776 	if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
777 		ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
778 		ret = -EINVAL;
779 	}
780 
781 	kfree(pathname);
782 
783 	return ret;
784 }
785 
786 int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
787 {
788 	struct ksmbd_inode *ci;
789 	struct oplock_info *op;
790 
791 	if (!fp->is_durable || fp->conn || fp->tcon) {
792 		pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
793 		return -EBADF;
794 	}
795 
796 	if (has_file_id(fp->volatile_id)) {
797 		pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
798 		return -EBADF;
799 	}
800 
801 	fp->conn = work->conn;
802 	fp->tcon = work->tcon;
803 
804 	ci = fp->f_ci;
805 	down_write(&ci->m_lock);
806 	list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
807 		if (op->conn)
808 			continue;
809 		op->conn = fp->conn;
810 	}
811 	up_write(&ci->m_lock);
812 
813 	__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
814 	if (!has_file_id(fp->volatile_id)) {
815 		fp->conn = NULL;
816 		fp->tcon = NULL;
817 		return -EBADF;
818 	}
819 	return 0;
820 }
821 
822 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
823 {
824 	ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
825 	if (!ft->idr)
826 		return -ENOMEM;
827 
828 	idr_init(ft->idr);
829 	rwlock_init(&ft->lock);
830 	return 0;
831 }
832 
833 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
834 {
835 	if (!ft->idr)
836 		return;
837 
838 	__close_file_table_ids(ft, NULL, session_fd_check);
839 	idr_destroy(ft->idr);
840 	kfree(ft->idr);
841 	ft->idr = NULL;
842 }
843 
844 int ksmbd_init_file_cache(void)
845 {
846 	filp_cache = kmem_cache_create("ksmbd_file_cache",
847 				       sizeof(struct ksmbd_file), 0,
848 				       SLAB_HWCACHE_ALIGN, NULL);
849 	if (!filp_cache)
850 		goto out;
851 
852 	return 0;
853 
854 out:
855 	pr_err("failed to allocate file cache\n");
856 	return -ENOMEM;
857 }
858 
859 void ksmbd_exit_file_cache(void)
860 {
861 	kmem_cache_destroy(filp_cache);
862 }
863