xref: /openbmc/linux/fs/smb/client/cifsfs.c (revision 8defb1d2)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51 
52 /*
53  * DOS dates from 1980/1/1 through 2107/12/31
54  * Protocol specifications indicate the range should be to 119, which
55  * limits maximum year to 2099. But this range has not been checked.
56  */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60 
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 spinlock_t		cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 				   "before logging that a response is delayed. "
129 				   "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131 
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134 
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
137 
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140 
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143 
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 				  "helpful to restrict the ability to "
147 				  "override the default dialects (SMB2.1, "
148 				  "SMB3 and SMB3.02) on mount with old "
149 				  "dialects (CIFS/SMB1 and SMB2) since "
150 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 				  " and less secure. Default: n/N/0");
152 
153 extern mempool_t *cifs_sm_req_poolp;
154 extern mempool_t *cifs_req_poolp;
155 extern mempool_t *cifs_mid_poolp;
156 
157 struct workqueue_struct	*cifsiod_wq;
158 struct workqueue_struct	*decrypt_wq;
159 struct workqueue_struct	*fileinfo_put_wq;
160 struct workqueue_struct	*cifsoplockd_wq;
161 struct workqueue_struct	*deferredclose_wq;
162 struct workqueue_struct	*serverclose_wq;
163 __u32 cifs_lock_secret;
164 
165 /*
166  * Bumps refcount for cifs super block.
167  * Note that it should be only called if a referece to VFS super block is
168  * already held, e.g. in open-type syscalls context. Otherwise it can race with
169  * atomic_dec_and_test in deactivate_locked_super.
170  */
171 void
172 cifs_sb_active(struct super_block *sb)
173 {
174 	struct cifs_sb_info *server = CIFS_SB(sb);
175 
176 	if (atomic_inc_return(&server->active) == 1)
177 		atomic_inc(&sb->s_active);
178 }
179 
180 void
181 cifs_sb_deactive(struct super_block *sb)
182 {
183 	struct cifs_sb_info *server = CIFS_SB(sb);
184 
185 	if (atomic_dec_and_test(&server->active))
186 		deactivate_super(sb);
187 }
188 
189 static int
190 cifs_read_super(struct super_block *sb)
191 {
192 	struct inode *inode;
193 	struct cifs_sb_info *cifs_sb;
194 	struct cifs_tcon *tcon;
195 	struct timespec64 ts;
196 	int rc = 0;
197 
198 	cifs_sb = CIFS_SB(sb);
199 	tcon = cifs_sb_master_tcon(cifs_sb);
200 
201 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
202 		sb->s_flags |= SB_POSIXACL;
203 
204 	if (tcon->snapshot_time)
205 		sb->s_flags |= SB_RDONLY;
206 
207 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
208 		sb->s_maxbytes = MAX_LFS_FILESIZE;
209 	else
210 		sb->s_maxbytes = MAX_NON_LFS;
211 
212 	/*
213 	 * Some very old servers like DOS and OS/2 used 2 second granularity
214 	 * (while all current servers use 100ns granularity - see MS-DTYP)
215 	 * but 1 second is the maximum allowed granularity for the VFS
216 	 * so for old servers set time granularity to 1 second while for
217 	 * everything else (current servers) set it to 100ns.
218 	 */
219 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
220 	    ((tcon->ses->capabilities &
221 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
222 	    !tcon->unix_ext) {
223 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
225 		sb->s_time_min = ts.tv_sec;
226 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
227 				    cpu_to_le16(SMB_TIME_MAX), 0);
228 		sb->s_time_max = ts.tv_sec;
229 	} else {
230 		/*
231 		 * Almost every server, including all SMB2+, uses DCE TIME
232 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
233 		 */
234 		sb->s_time_gran = 100;
235 		ts = cifs_NTtimeToUnix(0);
236 		sb->s_time_min = ts.tv_sec;
237 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
238 		sb->s_time_max = ts.tv_sec;
239 	}
240 
241 	sb->s_magic = CIFS_SUPER_MAGIC;
242 	sb->s_op = &cifs_super_ops;
243 	sb->s_xattr = cifs_xattr_handlers;
244 	rc = super_setup_bdi(sb);
245 	if (rc)
246 		goto out_no_root;
247 	/* tune readahead according to rsize if readahead size not set on mount */
248 	if (cifs_sb->ctx->rsize == 0)
249 		cifs_sb->ctx->rsize =
250 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
251 	if (cifs_sb->ctx->rasize)
252 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
253 	else
254 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
255 
256 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
257 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
258 	inode = cifs_root_iget(sb);
259 
260 	if (IS_ERR(inode)) {
261 		rc = PTR_ERR(inode);
262 		goto out_no_root;
263 	}
264 
265 	if (tcon->nocase)
266 		sb->s_d_op = &cifs_ci_dentry_ops;
267 	else
268 		sb->s_d_op = &cifs_dentry_ops;
269 
270 	sb->s_root = d_make_root(inode);
271 	if (!sb->s_root) {
272 		rc = -ENOMEM;
273 		goto out_no_root;
274 	}
275 
276 #ifdef CONFIG_CIFS_NFSD_EXPORT
277 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
278 		cifs_dbg(FYI, "export ops supported\n");
279 		sb->s_export_op = &cifs_export_ops;
280 	}
281 #endif /* CONFIG_CIFS_NFSD_EXPORT */
282 
283 	return 0;
284 
285 out_no_root:
286 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
287 	return rc;
288 }
289 
290 static void cifs_kill_sb(struct super_block *sb)
291 {
292 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
293 
294 	/*
295 	 * We ned to release all dentries for the cached directories
296 	 * before we kill the sb.
297 	 */
298 	if (cifs_sb->root) {
299 		close_all_cached_dirs(cifs_sb);
300 
301 		/* finally release root dentry */
302 		dput(cifs_sb->root);
303 		cifs_sb->root = NULL;
304 	}
305 
306 	kill_anon_super(sb);
307 	cifs_umount(cifs_sb);
308 }
309 
310 static int
311 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
312 {
313 	struct super_block *sb = dentry->d_sb;
314 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
315 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
316 	struct TCP_Server_Info *server = tcon->ses->server;
317 	unsigned int xid;
318 	int rc = 0;
319 
320 	xid = get_xid();
321 
322 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
323 		buf->f_namelen =
324 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
325 	else
326 		buf->f_namelen = PATH_MAX;
327 
328 	buf->f_fsid.val[0] = tcon->vol_serial_number;
329 	/* are using part of create time for more randomness, see man statfs */
330 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
331 
332 	buf->f_files = 0;	/* undefined */
333 	buf->f_ffree = 0;	/* unlimited */
334 
335 	if (server->ops->queryfs)
336 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
337 
338 	free_xid(xid);
339 	return rc;
340 }
341 
342 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
343 {
344 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
345 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
346 	struct TCP_Server_Info *server = tcon->ses->server;
347 
348 	if (server->ops->fallocate)
349 		return server->ops->fallocate(file, tcon, mode, off, len);
350 
351 	return -EOPNOTSUPP;
352 }
353 
354 static int cifs_permission(struct mnt_idmap *idmap,
355 			   struct inode *inode, int mask)
356 {
357 	struct cifs_sb_info *cifs_sb;
358 
359 	cifs_sb = CIFS_SB(inode->i_sb);
360 
361 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
362 		if ((mask & MAY_EXEC) && !execute_ok(inode))
363 			return -EACCES;
364 		else
365 			return 0;
366 	} else /* file mode might have been restricted at mount time
367 		on the client (above and beyond ACL on servers) for
368 		servers which do not support setting and viewing mode bits,
369 		so allowing client to check permissions is useful */
370 		return generic_permission(&nop_mnt_idmap, inode, mask);
371 }
372 
373 static struct kmem_cache *cifs_inode_cachep;
374 static struct kmem_cache *cifs_req_cachep;
375 static struct kmem_cache *cifs_mid_cachep;
376 static struct kmem_cache *cifs_sm_req_cachep;
377 mempool_t *cifs_sm_req_poolp;
378 mempool_t *cifs_req_poolp;
379 mempool_t *cifs_mid_poolp;
380 
381 static struct inode *
382 cifs_alloc_inode(struct super_block *sb)
383 {
384 	struct cifsInodeInfo *cifs_inode;
385 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
386 	if (!cifs_inode)
387 		return NULL;
388 	cifs_inode->cifsAttrs = 0x20;	/* default */
389 	cifs_inode->time = 0;
390 	/*
391 	 * Until the file is open and we have gotten oplock info back from the
392 	 * server, can not assume caching of file data or metadata.
393 	 */
394 	cifs_set_oplock_level(cifs_inode, 0);
395 	cifs_inode->flags = 0;
396 	spin_lock_init(&cifs_inode->writers_lock);
397 	cifs_inode->writers = 0;
398 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
399 	cifs_inode->server_eof = 0;
400 	cifs_inode->uniqueid = 0;
401 	cifs_inode->createtime = 0;
402 	cifs_inode->epoch = 0;
403 	spin_lock_init(&cifs_inode->open_file_lock);
404 	generate_random_uuid(cifs_inode->lease_key);
405 	cifs_inode->symlink_target = NULL;
406 
407 	/*
408 	 * Can not set i_flags here - they get immediately overwritten to zero
409 	 * by the VFS.
410 	 */
411 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
412 	INIT_LIST_HEAD(&cifs_inode->openFileList);
413 	INIT_LIST_HEAD(&cifs_inode->llist);
414 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
415 	spin_lock_init(&cifs_inode->deferred_lock);
416 	return &cifs_inode->netfs.inode;
417 }
418 
419 static void
420 cifs_free_inode(struct inode *inode)
421 {
422 	struct cifsInodeInfo *cinode = CIFS_I(inode);
423 
424 	if (S_ISLNK(inode->i_mode))
425 		kfree(cinode->symlink_target);
426 	kmem_cache_free(cifs_inode_cachep, cinode);
427 }
428 
429 static void
430 cifs_evict_inode(struct inode *inode)
431 {
432 	truncate_inode_pages_final(&inode->i_data);
433 	if (inode->i_state & I_PINNING_FSCACHE_WB)
434 		cifs_fscache_unuse_inode_cookie(inode, true);
435 	cifs_fscache_release_inode_cookie(inode);
436 	clear_inode(inode);
437 }
438 
439 static void
440 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
441 {
442 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
443 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
444 
445 	seq_puts(s, ",addr=");
446 
447 	switch (server->dstaddr.ss_family) {
448 	case AF_INET:
449 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
450 		break;
451 	case AF_INET6:
452 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
453 		if (sa6->sin6_scope_id)
454 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
455 		break;
456 	default:
457 		seq_puts(s, "(unknown)");
458 	}
459 	if (server->rdma)
460 		seq_puts(s, ",rdma");
461 }
462 
463 static void
464 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
465 {
466 	if (ses->sectype == Unspecified) {
467 		if (ses->user_name == NULL)
468 			seq_puts(s, ",sec=none");
469 		return;
470 	}
471 
472 	seq_puts(s, ",sec=");
473 
474 	switch (ses->sectype) {
475 	case NTLMv2:
476 		seq_puts(s, "ntlmv2");
477 		break;
478 	case Kerberos:
479 		seq_puts(s, "krb5");
480 		break;
481 	case RawNTLMSSP:
482 		seq_puts(s, "ntlmssp");
483 		break;
484 	default:
485 		/* shouldn't ever happen */
486 		seq_puts(s, "unknown");
487 		break;
488 	}
489 
490 	if (ses->sign)
491 		seq_puts(s, "i");
492 
493 	if (ses->sectype == Kerberos)
494 		seq_printf(s, ",cruid=%u",
495 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
496 }
497 
498 static void
499 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
500 {
501 	seq_puts(s, ",cache=");
502 
503 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
504 		seq_puts(s, "strict");
505 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
506 		seq_puts(s, "none");
507 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
508 		seq_puts(s, "singleclient"); /* assume only one client access */
509 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
510 		seq_puts(s, "ro"); /* read only caching assumed */
511 	else
512 		seq_puts(s, "loose");
513 }
514 
515 /*
516  * cifs_show_devname() is used so we show the mount device name with correct
517  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
518  */
519 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
520 {
521 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
522 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
523 
524 	if (devname == NULL)
525 		seq_puts(m, "none");
526 	else {
527 		convert_delimiter(devname, '/');
528 		/* escape all spaces in share names */
529 		seq_escape(m, devname, " \t");
530 		kfree(devname);
531 	}
532 	return 0;
533 }
534 
535 /*
536  * cifs_show_options() is for displaying mount options in /proc/mounts.
537  * Not all settable options are displayed but most of the important
538  * ones are.
539  */
540 static int
541 cifs_show_options(struct seq_file *s, struct dentry *root)
542 {
543 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
544 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
545 	struct sockaddr *srcaddr;
546 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
547 
548 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
549 	cifs_show_security(s, tcon->ses);
550 	cifs_show_cache_flavor(s, cifs_sb);
551 
552 	if (tcon->no_lease)
553 		seq_puts(s, ",nolease");
554 	if (cifs_sb->ctx->multiuser)
555 		seq_puts(s, ",multiuser");
556 	else if (tcon->ses->user_name)
557 		seq_show_option(s, "username", tcon->ses->user_name);
558 
559 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
560 		seq_show_option(s, "domain", tcon->ses->domainName);
561 
562 	if (srcaddr->sa_family != AF_UNSPEC) {
563 		struct sockaddr_in *saddr4;
564 		struct sockaddr_in6 *saddr6;
565 		saddr4 = (struct sockaddr_in *)srcaddr;
566 		saddr6 = (struct sockaddr_in6 *)srcaddr;
567 		if (srcaddr->sa_family == AF_INET6)
568 			seq_printf(s, ",srcaddr=%pI6c",
569 				   &saddr6->sin6_addr);
570 		else if (srcaddr->sa_family == AF_INET)
571 			seq_printf(s, ",srcaddr=%pI4",
572 				   &saddr4->sin_addr.s_addr);
573 		else
574 			seq_printf(s, ",srcaddr=BAD-AF:%i",
575 				   (int)(srcaddr->sa_family));
576 	}
577 
578 	seq_printf(s, ",uid=%u",
579 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
580 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
581 		seq_puts(s, ",forceuid");
582 	else
583 		seq_puts(s, ",noforceuid");
584 
585 	seq_printf(s, ",gid=%u",
586 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
587 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
588 		seq_puts(s, ",forcegid");
589 	else
590 		seq_puts(s, ",noforcegid");
591 
592 	cifs_show_address(s, tcon->ses->server);
593 
594 	if (!tcon->unix_ext)
595 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
596 					   cifs_sb->ctx->file_mode,
597 					   cifs_sb->ctx->dir_mode);
598 	if (cifs_sb->ctx->iocharset)
599 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
600 	if (tcon->seal)
601 		seq_puts(s, ",seal");
602 	else if (tcon->ses->server->ignore_signature)
603 		seq_puts(s, ",signloosely");
604 	if (tcon->nocase)
605 		seq_puts(s, ",nocase");
606 	if (tcon->nodelete)
607 		seq_puts(s, ",nodelete");
608 	if (cifs_sb->ctx->no_sparse)
609 		seq_puts(s, ",nosparse");
610 	if (tcon->local_lease)
611 		seq_puts(s, ",locallease");
612 	if (tcon->retry)
613 		seq_puts(s, ",hard");
614 	else
615 		seq_puts(s, ",soft");
616 	if (tcon->use_persistent)
617 		seq_puts(s, ",persistenthandles");
618 	else if (tcon->use_resilient)
619 		seq_puts(s, ",resilienthandles");
620 	if (tcon->posix_extensions)
621 		seq_puts(s, ",posix");
622 	else if (tcon->unix_ext)
623 		seq_puts(s, ",unix");
624 	else
625 		seq_puts(s, ",nounix");
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
627 		seq_puts(s, ",nodfs");
628 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
629 		seq_puts(s, ",posixpaths");
630 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
631 		seq_puts(s, ",setuids");
632 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
633 		seq_puts(s, ",idsfromsid");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
635 		seq_puts(s, ",serverino");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
637 		seq_puts(s, ",rwpidforward");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
639 		seq_puts(s, ",forcemand");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
641 		seq_puts(s, ",nouser_xattr");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
643 		seq_puts(s, ",mapchars");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
645 		seq_puts(s, ",mapposix");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
647 		seq_puts(s, ",sfu");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
649 		seq_puts(s, ",nobrl");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
651 		seq_puts(s, ",nohandlecache");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
653 		seq_puts(s, ",modefromsid");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
655 		seq_puts(s, ",cifsacl");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
657 		seq_puts(s, ",dynperm");
658 	if (root->d_sb->s_flags & SB_POSIXACL)
659 		seq_puts(s, ",acl");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
661 		seq_puts(s, ",mfsymlinks");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
663 		seq_puts(s, ",fsc");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
665 		seq_puts(s, ",nostrictsync");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
667 		seq_puts(s, ",noperm");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
669 		seq_printf(s, ",backupuid=%u",
670 			   from_kuid_munged(&init_user_ns,
671 					    cifs_sb->ctx->backupuid));
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
673 		seq_printf(s, ",backupgid=%u",
674 			   from_kgid_munged(&init_user_ns,
675 					    cifs_sb->ctx->backupgid));
676 
677 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
678 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
679 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
680 	if (cifs_sb->ctx->rasize)
681 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
682 	if (tcon->ses->server->min_offload)
683 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
684 	seq_printf(s, ",echo_interval=%lu",
685 			tcon->ses->server->echo_interval / HZ);
686 
687 	/* Only display the following if overridden on mount */
688 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
689 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
690 	if (tcon->ses->server->tcp_nodelay)
691 		seq_puts(s, ",tcpnodelay");
692 	if (tcon->ses->server->noautotune)
693 		seq_puts(s, ",noautotune");
694 	if (tcon->ses->server->noblocksnd)
695 		seq_puts(s, ",noblocksend");
696 	if (tcon->ses->server->nosharesock)
697 		seq_puts(s, ",nosharesock");
698 
699 	if (tcon->snapshot_time)
700 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
701 	if (tcon->handle_timeout)
702 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
703 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
704 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
705 
706 	/*
707 	 * Display file and directory attribute timeout in seconds.
708 	 * If file and directory attribute timeout the same then actimeo
709 	 * was likely specified on mount
710 	 */
711 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
712 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
713 	else {
714 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
715 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
716 	}
717 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
718 
719 	if (tcon->ses->chan_max > 1)
720 		seq_printf(s, ",multichannel,max_channels=%zu",
721 			   tcon->ses->chan_max);
722 
723 	if (tcon->use_witness)
724 		seq_puts(s, ",witness");
725 
726 	return 0;
727 }
728 
729 static void cifs_umount_begin(struct super_block *sb)
730 {
731 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
732 	struct cifs_tcon *tcon;
733 
734 	if (cifs_sb == NULL)
735 		return;
736 
737 	tcon = cifs_sb_master_tcon(cifs_sb);
738 
739 	spin_lock(&cifs_tcp_ses_lock);
740 	spin_lock(&tcon->tc_lock);
741 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
742 		/* we have other mounts to same share or we have
743 		   already tried to umount this and woken up
744 		   all waiting network requests, nothing to do */
745 		spin_unlock(&tcon->tc_lock);
746 		spin_unlock(&cifs_tcp_ses_lock);
747 		return;
748 	}
749 	/*
750 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
751 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
752 	 */
753 	spin_unlock(&tcon->tc_lock);
754 	spin_unlock(&cifs_tcp_ses_lock);
755 
756 	cifs_close_all_deferred_files(tcon);
757 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
758 	/* cancel_notify_requests(tcon); */
759 	if (tcon->ses && tcon->ses->server) {
760 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
761 		wake_up_all(&tcon->ses->server->request_q);
762 		wake_up_all(&tcon->ses->server->response_q);
763 		msleep(1); /* yield */
764 		/* we have to kick the requests once more */
765 		wake_up_all(&tcon->ses->server->response_q);
766 		msleep(1);
767 	}
768 
769 	return;
770 }
771 
772 static int cifs_freeze(struct super_block *sb)
773 {
774 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
775 	struct cifs_tcon *tcon;
776 
777 	if (cifs_sb == NULL)
778 		return 0;
779 
780 	tcon = cifs_sb_master_tcon(cifs_sb);
781 
782 	cifs_close_all_deferred_files(tcon);
783 	return 0;
784 }
785 
786 #ifdef CONFIG_CIFS_STATS2
787 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
788 {
789 	/* BB FIXME */
790 	return 0;
791 }
792 #endif
793 
794 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
795 {
796 	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
797 	return 0;
798 }
799 
800 static int cifs_drop_inode(struct inode *inode)
801 {
802 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
803 
804 	/* no serverino => unconditional eviction */
805 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
806 		generic_drop_inode(inode);
807 }
808 
809 static const struct super_operations cifs_super_ops = {
810 	.statfs = cifs_statfs,
811 	.alloc_inode = cifs_alloc_inode,
812 	.write_inode	= cifs_write_inode,
813 	.free_inode = cifs_free_inode,
814 	.drop_inode	= cifs_drop_inode,
815 	.evict_inode	= cifs_evict_inode,
816 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
817 	.show_devname   = cifs_show_devname,
818 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
819 	function unless later we add lazy close of inodes or unless the
820 	kernel forgets to call us with the same number of releases (closes)
821 	as opens */
822 	.show_options = cifs_show_options,
823 	.umount_begin   = cifs_umount_begin,
824 	.freeze_fs      = cifs_freeze,
825 #ifdef CONFIG_CIFS_STATS2
826 	.show_stats = cifs_show_stats,
827 #endif
828 };
829 
830 /*
831  * Get root dentry from superblock according to prefix path mount option.
832  * Return dentry with refcount + 1 on success and NULL otherwise.
833  */
834 static struct dentry *
835 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
836 {
837 	struct dentry *dentry;
838 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
839 	char *full_path = NULL;
840 	char *s, *p;
841 	char sep;
842 
843 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
844 		return dget(sb->s_root);
845 
846 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
847 				cifs_sb_master_tcon(cifs_sb), 0);
848 	if (full_path == NULL)
849 		return ERR_PTR(-ENOMEM);
850 
851 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
852 
853 	sep = CIFS_DIR_SEP(cifs_sb);
854 	dentry = dget(sb->s_root);
855 	s = full_path;
856 
857 	do {
858 		struct inode *dir = d_inode(dentry);
859 		struct dentry *child;
860 
861 		if (!S_ISDIR(dir->i_mode)) {
862 			dput(dentry);
863 			dentry = ERR_PTR(-ENOTDIR);
864 			break;
865 		}
866 
867 		/* skip separators */
868 		while (*s == sep)
869 			s++;
870 		if (!*s)
871 			break;
872 		p = s++;
873 		/* next separator */
874 		while (*s && *s != sep)
875 			s++;
876 
877 		child = lookup_positive_unlocked(p, dentry, s - p);
878 		dput(dentry);
879 		dentry = child;
880 	} while (!IS_ERR(dentry));
881 	kfree(full_path);
882 	return dentry;
883 }
884 
885 static int cifs_set_super(struct super_block *sb, void *data)
886 {
887 	struct cifs_mnt_data *mnt_data = data;
888 	sb->s_fs_info = mnt_data->cifs_sb;
889 	return set_anon_super(sb, NULL);
890 }
891 
892 struct dentry *
893 cifs_smb3_do_mount(struct file_system_type *fs_type,
894 	      int flags, struct smb3_fs_context *old_ctx)
895 {
896 	struct cifs_mnt_data mnt_data;
897 	struct cifs_sb_info *cifs_sb;
898 	struct super_block *sb;
899 	struct dentry *root;
900 	int rc;
901 
902 	if (cifsFYI) {
903 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
904 			 old_ctx->source, flags);
905 	} else {
906 		cifs_info("Attempting to mount %s\n", old_ctx->source);
907 	}
908 
909 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
910 	if (!cifs_sb)
911 		return ERR_PTR(-ENOMEM);
912 
913 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
914 	if (!cifs_sb->ctx) {
915 		root = ERR_PTR(-ENOMEM);
916 		goto out;
917 	}
918 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
919 	if (rc) {
920 		root = ERR_PTR(rc);
921 		goto out;
922 	}
923 
924 	rc = cifs_setup_cifs_sb(cifs_sb);
925 	if (rc) {
926 		root = ERR_PTR(rc);
927 		goto out;
928 	}
929 
930 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
931 	if (rc) {
932 		if (!(flags & SB_SILENT))
933 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
934 				 rc);
935 		root = ERR_PTR(rc);
936 		goto out;
937 	}
938 
939 	mnt_data.ctx = cifs_sb->ctx;
940 	mnt_data.cifs_sb = cifs_sb;
941 	mnt_data.flags = flags;
942 
943 	/* BB should we make this contingent on mount parm? */
944 	flags |= SB_NODIRATIME | SB_NOATIME;
945 
946 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
947 	if (IS_ERR(sb)) {
948 		cifs_umount(cifs_sb);
949 		return ERR_CAST(sb);
950 	}
951 
952 	if (sb->s_root) {
953 		cifs_dbg(FYI, "Use existing superblock\n");
954 		cifs_umount(cifs_sb);
955 		cifs_sb = NULL;
956 	} else {
957 		rc = cifs_read_super(sb);
958 		if (rc) {
959 			root = ERR_PTR(rc);
960 			goto out_super;
961 		}
962 
963 		sb->s_flags |= SB_ACTIVE;
964 	}
965 
966 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
967 	if (IS_ERR(root))
968 		goto out_super;
969 
970 	if (cifs_sb)
971 		cifs_sb->root = dget(root);
972 
973 	cifs_dbg(FYI, "dentry root is: %p\n", root);
974 	return root;
975 
976 out_super:
977 	deactivate_locked_super(sb);
978 	return root;
979 out:
980 	kfree(cifs_sb->prepath);
981 	smb3_cleanup_fs_context(cifs_sb->ctx);
982 	kfree(cifs_sb);
983 	return root;
984 }
985 
986 
987 static ssize_t
988 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
989 {
990 	ssize_t rc;
991 	struct inode *inode = file_inode(iocb->ki_filp);
992 
993 	if (iocb->ki_flags & IOCB_DIRECT)
994 		return cifs_user_readv(iocb, iter);
995 
996 	rc = cifs_revalidate_mapping(inode);
997 	if (rc)
998 		return rc;
999 
1000 	return generic_file_read_iter(iocb, iter);
1001 }
1002 
1003 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1004 {
1005 	struct inode *inode = file_inode(iocb->ki_filp);
1006 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1007 	ssize_t written;
1008 	int rc;
1009 
1010 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1011 		written = cifs_user_writev(iocb, from);
1012 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1013 			cifs_zap_mapping(inode);
1014 			cifs_dbg(FYI,
1015 				 "Set no oplock for inode=%p after a write operation\n",
1016 				 inode);
1017 			cinode->oplock = 0;
1018 		}
1019 		return written;
1020 	}
1021 
1022 	written = cifs_get_writer(cinode);
1023 	if (written)
1024 		return written;
1025 
1026 	written = generic_file_write_iter(iocb, from);
1027 
1028 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1029 		goto out;
1030 
1031 	rc = filemap_fdatawrite(inode->i_mapping);
1032 	if (rc)
1033 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1034 			 rc, inode);
1035 
1036 out:
1037 	cifs_put_writer(cinode);
1038 	return written;
1039 }
1040 
1041 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1042 {
1043 	struct cifsFileInfo *cfile = file->private_data;
1044 	struct cifs_tcon *tcon;
1045 
1046 	/*
1047 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1048 	 * the cached file length
1049 	 */
1050 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1051 		int rc;
1052 		struct inode *inode = file_inode(file);
1053 
1054 		/*
1055 		 * We need to be sure that all dirty pages are written and the
1056 		 * server has the newest file length.
1057 		 */
1058 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1059 		    inode->i_mapping->nrpages != 0) {
1060 			rc = filemap_fdatawait(inode->i_mapping);
1061 			if (rc) {
1062 				mapping_set_error(inode->i_mapping, rc);
1063 				return rc;
1064 			}
1065 		}
1066 		/*
1067 		 * Some applications poll for the file length in this strange
1068 		 * way so we must seek to end on non-oplocked files by
1069 		 * setting the revalidate time to zero.
1070 		 */
1071 		CIFS_I(inode)->time = 0;
1072 
1073 		rc = cifs_revalidate_file_attr(file);
1074 		if (rc < 0)
1075 			return (loff_t)rc;
1076 	}
1077 	if (cfile && cfile->tlink) {
1078 		tcon = tlink_tcon(cfile->tlink);
1079 		if (tcon->ses->server->ops->llseek)
1080 			return tcon->ses->server->ops->llseek(file, tcon,
1081 							      offset, whence);
1082 	}
1083 	return generic_file_llseek(file, offset, whence);
1084 }
1085 
1086 static int
1087 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1088 {
1089 	/*
1090 	 * Note that this is called by vfs setlease with i_lock held to
1091 	 * protect *lease from going away.
1092 	 */
1093 	struct inode *inode = file_inode(file);
1094 	struct cifsFileInfo *cfile = file->private_data;
1095 
1096 	if (!(S_ISREG(inode->i_mode)))
1097 		return -EINVAL;
1098 
1099 	/* Check if file is oplocked if this is request for new lease */
1100 	if (arg == F_UNLCK ||
1101 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1102 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1103 		return generic_setlease(file, arg, lease, priv);
1104 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1105 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1106 		/*
1107 		 * If the server claims to support oplock on this file, then we
1108 		 * still need to check oplock even if the local_lease mount
1109 		 * option is set, but there are servers which do not support
1110 		 * oplock for which this mount option may be useful if the user
1111 		 * knows that the file won't be changed on the server by anyone
1112 		 * else.
1113 		 */
1114 		return generic_setlease(file, arg, lease, priv);
1115 	else
1116 		return -EAGAIN;
1117 }
1118 
1119 struct file_system_type cifs_fs_type = {
1120 	.owner = THIS_MODULE,
1121 	.name = "cifs",
1122 	.init_fs_context = smb3_init_fs_context,
1123 	.parameters = smb3_fs_parameters,
1124 	.kill_sb = cifs_kill_sb,
1125 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1126 };
1127 MODULE_ALIAS_FS("cifs");
1128 
1129 struct file_system_type smb3_fs_type = {
1130 	.owner = THIS_MODULE,
1131 	.name = "smb3",
1132 	.init_fs_context = smb3_init_fs_context,
1133 	.parameters = smb3_fs_parameters,
1134 	.kill_sb = cifs_kill_sb,
1135 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1136 };
1137 MODULE_ALIAS_FS("smb3");
1138 MODULE_ALIAS("smb3");
1139 
1140 const struct inode_operations cifs_dir_inode_ops = {
1141 	.create = cifs_create,
1142 	.atomic_open = cifs_atomic_open,
1143 	.lookup = cifs_lookup,
1144 	.getattr = cifs_getattr,
1145 	.unlink = cifs_unlink,
1146 	.link = cifs_hardlink,
1147 	.mkdir = cifs_mkdir,
1148 	.rmdir = cifs_rmdir,
1149 	.rename = cifs_rename2,
1150 	.permission = cifs_permission,
1151 	.setattr = cifs_setattr,
1152 	.symlink = cifs_symlink,
1153 	.mknod   = cifs_mknod,
1154 	.listxattr = cifs_listxattr,
1155 	.get_acl = cifs_get_acl,
1156 	.set_acl = cifs_set_acl,
1157 };
1158 
1159 const struct inode_operations cifs_file_inode_ops = {
1160 	.setattr = cifs_setattr,
1161 	.getattr = cifs_getattr,
1162 	.permission = cifs_permission,
1163 	.listxattr = cifs_listxattr,
1164 	.fiemap = cifs_fiemap,
1165 	.get_acl = cifs_get_acl,
1166 	.set_acl = cifs_set_acl,
1167 };
1168 
1169 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1170 			    struct delayed_call *done)
1171 {
1172 	char *target_path;
1173 
1174 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1175 	if (!target_path)
1176 		return ERR_PTR(-ENOMEM);
1177 
1178 	spin_lock(&inode->i_lock);
1179 	if (likely(CIFS_I(inode)->symlink_target)) {
1180 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1181 	} else {
1182 		kfree(target_path);
1183 		target_path = ERR_PTR(-EOPNOTSUPP);
1184 	}
1185 	spin_unlock(&inode->i_lock);
1186 
1187 	if (!IS_ERR(target_path))
1188 		set_delayed_call(done, kfree_link, target_path);
1189 
1190 	return target_path;
1191 }
1192 
1193 const struct inode_operations cifs_symlink_inode_ops = {
1194 	.get_link = cifs_get_link,
1195 	.setattr = cifs_setattr,
1196 	.permission = cifs_permission,
1197 	.listxattr = cifs_listxattr,
1198 };
1199 
1200 /*
1201  * Advance the EOF marker to after the source range.
1202  */
1203 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1204 				struct cifs_tcon *src_tcon,
1205 				unsigned int xid, loff_t src_end)
1206 {
1207 	struct cifsFileInfo *writeable_srcfile;
1208 	int rc = -EINVAL;
1209 
1210 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1211 	if (writeable_srcfile) {
1212 		if (src_tcon->ses->server->ops->set_file_size)
1213 			rc = src_tcon->ses->server->ops->set_file_size(
1214 				xid, src_tcon, writeable_srcfile,
1215 				src_inode->i_size, true /* no need to set sparse */);
1216 		else
1217 			rc = -ENOSYS;
1218 		cifsFileInfo_put(writeable_srcfile);
1219 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1220 	}
1221 
1222 	if (rc < 0)
1223 		goto set_failed;
1224 
1225 	netfs_resize_file(&src_cifsi->netfs, src_end);
1226 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1227 	return 0;
1228 
1229 set_failed:
1230 	return filemap_write_and_wait(src_inode->i_mapping);
1231 }
1232 
1233 /*
1234  * Flush out either the folio that overlaps the beginning of a range in which
1235  * pos resides or the folio that overlaps the end of a range unless that folio
1236  * is entirely within the range we're going to invalidate.  We extend the flush
1237  * bounds to encompass the folio.
1238  */
1239 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1240 			    bool first)
1241 {
1242 	struct folio *folio;
1243 	unsigned long long fpos, fend;
1244 	pgoff_t index = pos / PAGE_SIZE;
1245 	size_t size;
1246 	int rc = 0;
1247 
1248 	folio = filemap_get_folio(inode->i_mapping, index);
1249 	if (IS_ERR(folio))
1250 		return 0;
1251 
1252 	size = folio_size(folio);
1253 	fpos = folio_pos(folio);
1254 	fend = fpos + size - 1;
1255 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1256 	*_fend   = max_t(unsigned long long, *_fend, fend);
1257 	if ((first && pos == fpos) || (!first && pos == fend))
1258 		goto out;
1259 
1260 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1261 out:
1262 	folio_put(folio);
1263 	return rc;
1264 }
1265 
1266 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1267 		struct file *dst_file, loff_t destoff, loff_t len,
1268 		unsigned int remap_flags)
1269 {
1270 	struct inode *src_inode = file_inode(src_file);
1271 	struct inode *target_inode = file_inode(dst_file);
1272 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1273 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1274 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1275 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1276 	struct cifs_tcon *target_tcon, *src_tcon;
1277 	unsigned long long destend, fstart, fend, new_size;
1278 	unsigned int xid;
1279 	int rc;
1280 
1281 	if (remap_flags & REMAP_FILE_DEDUP)
1282 		return -EOPNOTSUPP;
1283 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1284 		return -EINVAL;
1285 
1286 	cifs_dbg(FYI, "clone range\n");
1287 
1288 	xid = get_xid();
1289 
1290 	if (!smb_file_src || !smb_file_target) {
1291 		rc = -EBADF;
1292 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1293 		goto out;
1294 	}
1295 
1296 	src_tcon = tlink_tcon(smb_file_src->tlink);
1297 	target_tcon = tlink_tcon(smb_file_target->tlink);
1298 
1299 	/*
1300 	 * Note: cifs case is easier than btrfs since server responsible for
1301 	 * checks for proper open modes and file type and if it wants
1302 	 * server could even support copy of range where source = target
1303 	 */
1304 	lock_two_nondirectories(target_inode, src_inode);
1305 
1306 	if (len == 0)
1307 		len = src_inode->i_size - off;
1308 
1309 	cifs_dbg(FYI, "clone range\n");
1310 
1311 	/* Flush the source buffer */
1312 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1313 					  off + len - 1);
1314 	if (rc)
1315 		goto unlock;
1316 
1317 	/* The server-side copy will fail if the source crosses the EOF marker.
1318 	 * Advance the EOF marker after the flush above to the end of the range
1319 	 * if it's short of that.
1320 	 */
1321 	if (src_cifsi->netfs.remote_i_size < off + len) {
1322 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1323 		if (rc < 0)
1324 			goto unlock;
1325 	}
1326 
1327 	new_size = destoff + len;
1328 	destend = destoff + len - 1;
1329 
1330 	/* Flush the folios at either end of the destination range to prevent
1331 	 * accidental loss of dirty data outside of the range.
1332 	 */
1333 	fstart = destoff;
1334 	fend = destend;
1335 
1336 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1337 	if (rc)
1338 		goto unlock;
1339 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1340 	if (rc)
1341 		goto unlock;
1342 
1343 	/* Discard all the folios that overlap the destination region. */
1344 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1345 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1346 
1347 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1348 			   i_size_read(target_inode), 0);
1349 
1350 	rc = -EOPNOTSUPP;
1351 	if (target_tcon->ses->server->ops->duplicate_extents) {
1352 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1353 			smb_file_src, smb_file_target, off, len, destoff);
1354 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1355 			truncate_setsize(target_inode, new_size);
1356 			netfs_resize_file(&target_cifsi->netfs, new_size);
1357 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1358 					      new_size);
1359 		}
1360 	}
1361 
1362 	/* force revalidate of size and timestamps of target file now
1363 	   that target is updated on the server */
1364 	CIFS_I(target_inode)->time = 0;
1365 unlock:
1366 	/* although unlocking in the reverse order from locking is not
1367 	   strictly necessary here it is a little cleaner to be consistent */
1368 	unlock_two_nondirectories(src_inode, target_inode);
1369 out:
1370 	free_xid(xid);
1371 	return rc < 0 ? rc : len;
1372 }
1373 
1374 ssize_t cifs_file_copychunk_range(unsigned int xid,
1375 				struct file *src_file, loff_t off,
1376 				struct file *dst_file, loff_t destoff,
1377 				size_t len, unsigned int flags)
1378 {
1379 	struct inode *src_inode = file_inode(src_file);
1380 	struct inode *target_inode = file_inode(dst_file);
1381 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1382 	struct cifsFileInfo *smb_file_src;
1383 	struct cifsFileInfo *smb_file_target;
1384 	struct cifs_tcon *src_tcon;
1385 	struct cifs_tcon *target_tcon;
1386 	unsigned long long destend, fstart, fend;
1387 	ssize_t rc;
1388 
1389 	cifs_dbg(FYI, "copychunk range\n");
1390 
1391 	if (!src_file->private_data || !dst_file->private_data) {
1392 		rc = -EBADF;
1393 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1394 		goto out;
1395 	}
1396 
1397 	rc = -EXDEV;
1398 	smb_file_target = dst_file->private_data;
1399 	smb_file_src = src_file->private_data;
1400 	src_tcon = tlink_tcon(smb_file_src->tlink);
1401 	target_tcon = tlink_tcon(smb_file_target->tlink);
1402 
1403 	if (src_tcon->ses != target_tcon->ses) {
1404 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1405 		goto out;
1406 	}
1407 
1408 	rc = -EOPNOTSUPP;
1409 	if (!target_tcon->ses->server->ops->copychunk_range)
1410 		goto out;
1411 
1412 	/*
1413 	 * Note: cifs case is easier than btrfs since server responsible for
1414 	 * checks for proper open modes and file type and if it wants
1415 	 * server could even support copy of range where source = target
1416 	 */
1417 	lock_two_nondirectories(target_inode, src_inode);
1418 
1419 	cifs_dbg(FYI, "about to flush pages\n");
1420 
1421 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1422 					  off + len - 1);
1423 	if (rc)
1424 		goto unlock;
1425 
1426 	/* The server-side copy will fail if the source crosses the EOF marker.
1427 	 * Advance the EOF marker after the flush above to the end of the range
1428 	 * if it's short of that.
1429 	 */
1430 	if (src_cifsi->server_eof < off + len) {
1431 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1432 		if (rc < 0)
1433 			goto unlock;
1434 	}
1435 
1436 	destend = destoff + len - 1;
1437 
1438 	/* Flush the folios at either end of the destination range to prevent
1439 	 * accidental loss of dirty data outside of the range.
1440 	 */
1441 	fstart = destoff;
1442 	fend = destend;
1443 
1444 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1445 	if (rc)
1446 		goto unlock;
1447 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1448 	if (rc)
1449 		goto unlock;
1450 
1451 	/* Discard all the folios that overlap the destination region. */
1452 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1453 
1454 	rc = file_modified(dst_file);
1455 	if (!rc) {
1456 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1457 			smb_file_src, smb_file_target, off, len, destoff);
1458 		if (rc > 0 && destoff + rc > i_size_read(target_inode))
1459 			truncate_setsize(target_inode, destoff + rc);
1460 	}
1461 
1462 	file_accessed(src_file);
1463 
1464 	/* force revalidate of size and timestamps of target file now
1465 	 * that target is updated on the server
1466 	 */
1467 	CIFS_I(target_inode)->time = 0;
1468 
1469 unlock:
1470 	/* although unlocking in the reverse order from locking is not
1471 	 * strictly necessary here it is a little cleaner to be consistent
1472 	 */
1473 	unlock_two_nondirectories(src_inode, target_inode);
1474 
1475 out:
1476 	return rc;
1477 }
1478 
1479 /*
1480  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1481  * is a dummy operation.
1482  */
1483 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1484 {
1485 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1486 		 file, datasync);
1487 
1488 	return 0;
1489 }
1490 
1491 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1492 				struct file *dst_file, loff_t destoff,
1493 				size_t len, unsigned int flags)
1494 {
1495 	unsigned int xid = get_xid();
1496 	ssize_t rc;
1497 	struct cifsFileInfo *cfile = dst_file->private_data;
1498 
1499 	if (cfile->swapfile) {
1500 		rc = -EOPNOTSUPP;
1501 		free_xid(xid);
1502 		return rc;
1503 	}
1504 
1505 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1506 					len, flags);
1507 	free_xid(xid);
1508 
1509 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1510 		rc = generic_copy_file_range(src_file, off, dst_file,
1511 					     destoff, len, flags);
1512 	return rc;
1513 }
1514 
1515 const struct file_operations cifs_file_ops = {
1516 	.read_iter = cifs_loose_read_iter,
1517 	.write_iter = cifs_file_write_iter,
1518 	.open = cifs_open,
1519 	.release = cifs_close,
1520 	.lock = cifs_lock,
1521 	.flock = cifs_flock,
1522 	.fsync = cifs_fsync,
1523 	.flush = cifs_flush,
1524 	.mmap  = cifs_file_mmap,
1525 	.splice_read = filemap_splice_read,
1526 	.splice_write = iter_file_splice_write,
1527 	.llseek = cifs_llseek,
1528 	.unlocked_ioctl	= cifs_ioctl,
1529 	.copy_file_range = cifs_copy_file_range,
1530 	.remap_file_range = cifs_remap_file_range,
1531 	.setlease = cifs_setlease,
1532 	.fallocate = cifs_fallocate,
1533 };
1534 
1535 const struct file_operations cifs_file_strict_ops = {
1536 	.read_iter = cifs_strict_readv,
1537 	.write_iter = cifs_strict_writev,
1538 	.open = cifs_open,
1539 	.release = cifs_close,
1540 	.lock = cifs_lock,
1541 	.flock = cifs_flock,
1542 	.fsync = cifs_strict_fsync,
1543 	.flush = cifs_flush,
1544 	.mmap = cifs_file_strict_mmap,
1545 	.splice_read = filemap_splice_read,
1546 	.splice_write = iter_file_splice_write,
1547 	.llseek = cifs_llseek,
1548 	.unlocked_ioctl	= cifs_ioctl,
1549 	.copy_file_range = cifs_copy_file_range,
1550 	.remap_file_range = cifs_remap_file_range,
1551 	.setlease = cifs_setlease,
1552 	.fallocate = cifs_fallocate,
1553 };
1554 
1555 const struct file_operations cifs_file_direct_ops = {
1556 	.read_iter = cifs_direct_readv,
1557 	.write_iter = cifs_direct_writev,
1558 	.open = cifs_open,
1559 	.release = cifs_close,
1560 	.lock = cifs_lock,
1561 	.flock = cifs_flock,
1562 	.fsync = cifs_fsync,
1563 	.flush = cifs_flush,
1564 	.mmap = cifs_file_mmap,
1565 	.splice_read = copy_splice_read,
1566 	.splice_write = iter_file_splice_write,
1567 	.unlocked_ioctl  = cifs_ioctl,
1568 	.copy_file_range = cifs_copy_file_range,
1569 	.remap_file_range = cifs_remap_file_range,
1570 	.llseek = cifs_llseek,
1571 	.setlease = cifs_setlease,
1572 	.fallocate = cifs_fallocate,
1573 };
1574 
1575 const struct file_operations cifs_file_nobrl_ops = {
1576 	.read_iter = cifs_loose_read_iter,
1577 	.write_iter = cifs_file_write_iter,
1578 	.open = cifs_open,
1579 	.release = cifs_close,
1580 	.fsync = cifs_fsync,
1581 	.flush = cifs_flush,
1582 	.mmap  = cifs_file_mmap,
1583 	.splice_read = filemap_splice_read,
1584 	.splice_write = iter_file_splice_write,
1585 	.llseek = cifs_llseek,
1586 	.unlocked_ioctl	= cifs_ioctl,
1587 	.copy_file_range = cifs_copy_file_range,
1588 	.remap_file_range = cifs_remap_file_range,
1589 	.setlease = cifs_setlease,
1590 	.fallocate = cifs_fallocate,
1591 };
1592 
1593 const struct file_operations cifs_file_strict_nobrl_ops = {
1594 	.read_iter = cifs_strict_readv,
1595 	.write_iter = cifs_strict_writev,
1596 	.open = cifs_open,
1597 	.release = cifs_close,
1598 	.fsync = cifs_strict_fsync,
1599 	.flush = cifs_flush,
1600 	.mmap = cifs_file_strict_mmap,
1601 	.splice_read = filemap_splice_read,
1602 	.splice_write = iter_file_splice_write,
1603 	.llseek = cifs_llseek,
1604 	.unlocked_ioctl	= cifs_ioctl,
1605 	.copy_file_range = cifs_copy_file_range,
1606 	.remap_file_range = cifs_remap_file_range,
1607 	.setlease = cifs_setlease,
1608 	.fallocate = cifs_fallocate,
1609 };
1610 
1611 const struct file_operations cifs_file_direct_nobrl_ops = {
1612 	.read_iter = cifs_direct_readv,
1613 	.write_iter = cifs_direct_writev,
1614 	.open = cifs_open,
1615 	.release = cifs_close,
1616 	.fsync = cifs_fsync,
1617 	.flush = cifs_flush,
1618 	.mmap = cifs_file_mmap,
1619 	.splice_read = copy_splice_read,
1620 	.splice_write = iter_file_splice_write,
1621 	.unlocked_ioctl  = cifs_ioctl,
1622 	.copy_file_range = cifs_copy_file_range,
1623 	.remap_file_range = cifs_remap_file_range,
1624 	.llseek = cifs_llseek,
1625 	.setlease = cifs_setlease,
1626 	.fallocate = cifs_fallocate,
1627 };
1628 
1629 const struct file_operations cifs_dir_ops = {
1630 	.iterate_shared = cifs_readdir,
1631 	.release = cifs_closedir,
1632 	.read    = generic_read_dir,
1633 	.unlocked_ioctl  = cifs_ioctl,
1634 	.copy_file_range = cifs_copy_file_range,
1635 	.remap_file_range = cifs_remap_file_range,
1636 	.llseek = generic_file_llseek,
1637 	.fsync = cifs_dir_fsync,
1638 };
1639 
1640 static void
1641 cifs_init_once(void *inode)
1642 {
1643 	struct cifsInodeInfo *cifsi = inode;
1644 
1645 	inode_init_once(&cifsi->netfs.inode);
1646 	init_rwsem(&cifsi->lock_sem);
1647 }
1648 
1649 static int __init
1650 cifs_init_inodecache(void)
1651 {
1652 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1653 					      sizeof(struct cifsInodeInfo),
1654 					      0, (SLAB_RECLAIM_ACCOUNT|
1655 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1656 					      cifs_init_once);
1657 	if (cifs_inode_cachep == NULL)
1658 		return -ENOMEM;
1659 
1660 	return 0;
1661 }
1662 
1663 static void
1664 cifs_destroy_inodecache(void)
1665 {
1666 	/*
1667 	 * Make sure all delayed rcu free inodes are flushed before we
1668 	 * destroy cache.
1669 	 */
1670 	rcu_barrier();
1671 	kmem_cache_destroy(cifs_inode_cachep);
1672 }
1673 
1674 static int
1675 cifs_init_request_bufs(void)
1676 {
1677 	/*
1678 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1679 	 * allocate some more bytes for CIFS.
1680 	 */
1681 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1682 
1683 	if (CIFSMaxBufSize < 8192) {
1684 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1685 	Unicode path name has to fit in any SMB/CIFS path based frames */
1686 		CIFSMaxBufSize = 8192;
1687 	} else if (CIFSMaxBufSize > 1024*127) {
1688 		CIFSMaxBufSize = 1024 * 127;
1689 	} else {
1690 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1691 	}
1692 /*
1693 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1694 		 CIFSMaxBufSize, CIFSMaxBufSize);
1695 */
1696 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1697 					    CIFSMaxBufSize + max_hdr_size, 0,
1698 					    SLAB_HWCACHE_ALIGN, 0,
1699 					    CIFSMaxBufSize + max_hdr_size,
1700 					    NULL);
1701 	if (cifs_req_cachep == NULL)
1702 		return -ENOMEM;
1703 
1704 	if (cifs_min_rcv < 1)
1705 		cifs_min_rcv = 1;
1706 	else if (cifs_min_rcv > 64) {
1707 		cifs_min_rcv = 64;
1708 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1709 	}
1710 
1711 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1712 						  cifs_req_cachep);
1713 
1714 	if (cifs_req_poolp == NULL) {
1715 		kmem_cache_destroy(cifs_req_cachep);
1716 		return -ENOMEM;
1717 	}
1718 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1719 	almost all handle based requests (but not write response, nor is it
1720 	sufficient for path based requests).  A smaller size would have
1721 	been more efficient (compacting multiple slab items on one 4k page)
1722 	for the case in which debug was on, but this larger size allows
1723 	more SMBs to use small buffer alloc and is still much more
1724 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1725 	alloc of large cifs buffers even when page debugging is on */
1726 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1727 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1728 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1729 	if (cifs_sm_req_cachep == NULL) {
1730 		mempool_destroy(cifs_req_poolp);
1731 		kmem_cache_destroy(cifs_req_cachep);
1732 		return -ENOMEM;
1733 	}
1734 
1735 	if (cifs_min_small < 2)
1736 		cifs_min_small = 2;
1737 	else if (cifs_min_small > 256) {
1738 		cifs_min_small = 256;
1739 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1740 	}
1741 
1742 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1743 						     cifs_sm_req_cachep);
1744 
1745 	if (cifs_sm_req_poolp == NULL) {
1746 		mempool_destroy(cifs_req_poolp);
1747 		kmem_cache_destroy(cifs_req_cachep);
1748 		kmem_cache_destroy(cifs_sm_req_cachep);
1749 		return -ENOMEM;
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 static void
1756 cifs_destroy_request_bufs(void)
1757 {
1758 	mempool_destroy(cifs_req_poolp);
1759 	kmem_cache_destroy(cifs_req_cachep);
1760 	mempool_destroy(cifs_sm_req_poolp);
1761 	kmem_cache_destroy(cifs_sm_req_cachep);
1762 }
1763 
1764 static int init_mids(void)
1765 {
1766 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1767 					    sizeof(struct mid_q_entry), 0,
1768 					    SLAB_HWCACHE_ALIGN, NULL);
1769 	if (cifs_mid_cachep == NULL)
1770 		return -ENOMEM;
1771 
1772 	/* 3 is a reasonable minimum number of simultaneous operations */
1773 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1774 	if (cifs_mid_poolp == NULL) {
1775 		kmem_cache_destroy(cifs_mid_cachep);
1776 		return -ENOMEM;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
1782 static void destroy_mids(void)
1783 {
1784 	mempool_destroy(cifs_mid_poolp);
1785 	kmem_cache_destroy(cifs_mid_cachep);
1786 }
1787 
1788 static int __init
1789 init_cifs(void)
1790 {
1791 	int rc = 0;
1792 	cifs_proc_init();
1793 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1794 /*
1795  *  Initialize Global counters
1796  */
1797 	atomic_set(&sesInfoAllocCount, 0);
1798 	atomic_set(&tconInfoAllocCount, 0);
1799 	atomic_set(&tcpSesNextId, 0);
1800 	atomic_set(&tcpSesAllocCount, 0);
1801 	atomic_set(&tcpSesReconnectCount, 0);
1802 	atomic_set(&tconInfoReconnectCount, 0);
1803 
1804 	atomic_set(&buf_alloc_count, 0);
1805 	atomic_set(&small_buf_alloc_count, 0);
1806 #ifdef CONFIG_CIFS_STATS2
1807 	atomic_set(&total_buf_alloc_count, 0);
1808 	atomic_set(&total_small_buf_alloc_count, 0);
1809 	if (slow_rsp_threshold < 1)
1810 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1811 	else if (slow_rsp_threshold > 32767)
1812 		cifs_dbg(VFS,
1813 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1814 #endif /* CONFIG_CIFS_STATS2 */
1815 
1816 	atomic_set(&mid_count, 0);
1817 	GlobalCurrentXid = 0;
1818 	GlobalTotalActiveXid = 0;
1819 	GlobalMaxActiveXid = 0;
1820 	spin_lock_init(&cifs_tcp_ses_lock);
1821 	spin_lock_init(&GlobalMid_Lock);
1822 
1823 	cifs_lock_secret = get_random_u32();
1824 
1825 	if (cifs_max_pending < 2) {
1826 		cifs_max_pending = 2;
1827 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1828 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1829 		cifs_max_pending = CIFS_MAX_REQ;
1830 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1831 			 CIFS_MAX_REQ);
1832 	}
1833 
1834 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1835 	if (dir_cache_timeout > 65000) {
1836 		dir_cache_timeout = 65000;
1837 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1838 	}
1839 
1840 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1841 	if (!cifsiod_wq) {
1842 		rc = -ENOMEM;
1843 		goto out_clean_proc;
1844 	}
1845 
1846 	/*
1847 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1848 	 * so that we don't launch too many worker threads but
1849 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1850 	 */
1851 
1852 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1853 	decrypt_wq = alloc_workqueue("smb3decryptd",
1854 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1855 	if (!decrypt_wq) {
1856 		rc = -ENOMEM;
1857 		goto out_destroy_cifsiod_wq;
1858 	}
1859 
1860 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1861 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1862 	if (!fileinfo_put_wq) {
1863 		rc = -ENOMEM;
1864 		goto out_destroy_decrypt_wq;
1865 	}
1866 
1867 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1868 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1869 	if (!cifsoplockd_wq) {
1870 		rc = -ENOMEM;
1871 		goto out_destroy_fileinfo_put_wq;
1872 	}
1873 
1874 	deferredclose_wq = alloc_workqueue("deferredclose",
1875 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1876 	if (!deferredclose_wq) {
1877 		rc = -ENOMEM;
1878 		goto out_destroy_cifsoplockd_wq;
1879 	}
1880 
1881 	serverclose_wq = alloc_workqueue("serverclose",
1882 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1883 	if (!serverclose_wq) {
1884 		rc = -ENOMEM;
1885 		goto out_destroy_serverclose_wq;
1886 	}
1887 
1888 	rc = cifs_init_inodecache();
1889 	if (rc)
1890 		goto out_destroy_deferredclose_wq;
1891 
1892 	rc = init_mids();
1893 	if (rc)
1894 		goto out_destroy_inodecache;
1895 
1896 	rc = cifs_init_request_bufs();
1897 	if (rc)
1898 		goto out_destroy_mids;
1899 
1900 #ifdef CONFIG_CIFS_DFS_UPCALL
1901 	rc = dfs_cache_init();
1902 	if (rc)
1903 		goto out_destroy_request_bufs;
1904 #endif /* CONFIG_CIFS_DFS_UPCALL */
1905 #ifdef CONFIG_CIFS_UPCALL
1906 	rc = init_cifs_spnego();
1907 	if (rc)
1908 		goto out_destroy_dfs_cache;
1909 #endif /* CONFIG_CIFS_UPCALL */
1910 #ifdef CONFIG_CIFS_SWN_UPCALL
1911 	rc = cifs_genl_init();
1912 	if (rc)
1913 		goto out_register_key_type;
1914 #endif /* CONFIG_CIFS_SWN_UPCALL */
1915 
1916 	rc = init_cifs_idmap();
1917 	if (rc)
1918 		goto out_cifs_swn_init;
1919 
1920 	rc = register_filesystem(&cifs_fs_type);
1921 	if (rc)
1922 		goto out_init_cifs_idmap;
1923 
1924 	rc = register_filesystem(&smb3_fs_type);
1925 	if (rc) {
1926 		unregister_filesystem(&cifs_fs_type);
1927 		goto out_init_cifs_idmap;
1928 	}
1929 
1930 	return 0;
1931 
1932 out_init_cifs_idmap:
1933 	exit_cifs_idmap();
1934 out_cifs_swn_init:
1935 #ifdef CONFIG_CIFS_SWN_UPCALL
1936 	cifs_genl_exit();
1937 out_register_key_type:
1938 #endif
1939 #ifdef CONFIG_CIFS_UPCALL
1940 	exit_cifs_spnego();
1941 out_destroy_dfs_cache:
1942 #endif
1943 #ifdef CONFIG_CIFS_DFS_UPCALL
1944 	dfs_cache_destroy();
1945 out_destroy_request_bufs:
1946 #endif
1947 	cifs_destroy_request_bufs();
1948 out_destroy_mids:
1949 	destroy_mids();
1950 out_destroy_inodecache:
1951 	cifs_destroy_inodecache();
1952 out_destroy_deferredclose_wq:
1953 	destroy_workqueue(deferredclose_wq);
1954 out_destroy_cifsoplockd_wq:
1955 	destroy_workqueue(cifsoplockd_wq);
1956 out_destroy_fileinfo_put_wq:
1957 	destroy_workqueue(fileinfo_put_wq);
1958 out_destroy_decrypt_wq:
1959 	destroy_workqueue(decrypt_wq);
1960 out_destroy_cifsiod_wq:
1961 	destroy_workqueue(cifsiod_wq);
1962 out_destroy_serverclose_wq:
1963 	destroy_workqueue(serverclose_wq);
1964 out_clean_proc:
1965 	cifs_proc_clean();
1966 	return rc;
1967 }
1968 
1969 static void __exit
1970 exit_cifs(void)
1971 {
1972 	cifs_dbg(NOISY, "exit_smb3\n");
1973 	unregister_filesystem(&cifs_fs_type);
1974 	unregister_filesystem(&smb3_fs_type);
1975 	cifs_release_automount_timer();
1976 	exit_cifs_idmap();
1977 #ifdef CONFIG_CIFS_SWN_UPCALL
1978 	cifs_genl_exit();
1979 #endif
1980 #ifdef CONFIG_CIFS_UPCALL
1981 	exit_cifs_spnego();
1982 #endif
1983 #ifdef CONFIG_CIFS_DFS_UPCALL
1984 	dfs_cache_destroy();
1985 #endif
1986 	cifs_destroy_request_bufs();
1987 	destroy_mids();
1988 	cifs_destroy_inodecache();
1989 	destroy_workqueue(deferredclose_wq);
1990 	destroy_workqueue(cifsoplockd_wq);
1991 	destroy_workqueue(decrypt_wq);
1992 	destroy_workqueue(fileinfo_put_wq);
1993 	destroy_workqueue(serverclose_wq);
1994 	destroy_workqueue(cifsiod_wq);
1995 	cifs_proc_clean();
1996 }
1997 
1998 MODULE_AUTHOR("Steve French");
1999 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2000 MODULE_DESCRIPTION
2001 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2002 	"also older servers complying with the SNIA CIFS Specification)");
2003 MODULE_VERSION(CIFS_VERSION);
2004 MODULE_SOFTDEP("ecb");
2005 MODULE_SOFTDEP("hmac");
2006 MODULE_SOFTDEP("md5");
2007 MODULE_SOFTDEP("nls");
2008 MODULE_SOFTDEP("aes");
2009 MODULE_SOFTDEP("cmac");
2010 MODULE_SOFTDEP("sha256");
2011 MODULE_SOFTDEP("sha512");
2012 MODULE_SOFTDEP("aead2");
2013 MODULE_SOFTDEP("ccm");
2014 MODULE_SOFTDEP("gcm");
2015 module_init(init_cifs)
2016 module_exit(exit_cifs)
2017