xref: /openbmc/linux/fs/smb/client/cifsfs.c (revision 2c733bb7)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51 
52 /*
53  * DOS dates from 1980/1/1 through 2107/12/31
54  * Protocol specifications indicate the range should be to 119, which
55  * limits maximum year to 2099. But this range has not been checked.
56  */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60 
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 spinlock_t		cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 				   "before logging that a response is delayed. "
129 				   "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131 
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134 
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
137 
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140 
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143 
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 				  "helpful to restrict the ability to "
147 				  "override the default dialects (SMB2.1, "
148 				  "SMB3 and SMB3.02) on mount with old "
149 				  "dialects (CIFS/SMB1 and SMB2) since "
150 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 				  " and less secure. Default: n/N/0");
152 
153 extern mempool_t *cifs_sm_req_poolp;
154 extern mempool_t *cifs_req_poolp;
155 extern mempool_t *cifs_mid_poolp;
156 
157 struct workqueue_struct	*cifsiod_wq;
158 struct workqueue_struct	*decrypt_wq;
159 struct workqueue_struct	*fileinfo_put_wq;
160 struct workqueue_struct	*cifsoplockd_wq;
161 struct workqueue_struct	*deferredclose_wq;
162 struct workqueue_struct	*serverclose_wq;
163 __u32 cifs_lock_secret;
164 
165 /*
166  * Bumps refcount for cifs super block.
167  * Note that it should be only called if a referece to VFS super block is
168  * already held, e.g. in open-type syscalls context. Otherwise it can race with
169  * atomic_dec_and_test in deactivate_locked_super.
170  */
171 void
172 cifs_sb_active(struct super_block *sb)
173 {
174 	struct cifs_sb_info *server = CIFS_SB(sb);
175 
176 	if (atomic_inc_return(&server->active) == 1)
177 		atomic_inc(&sb->s_active);
178 }
179 
180 void
181 cifs_sb_deactive(struct super_block *sb)
182 {
183 	struct cifs_sb_info *server = CIFS_SB(sb);
184 
185 	if (atomic_dec_and_test(&server->active))
186 		deactivate_super(sb);
187 }
188 
189 static int
190 cifs_read_super(struct super_block *sb)
191 {
192 	struct inode *inode;
193 	struct cifs_sb_info *cifs_sb;
194 	struct cifs_tcon *tcon;
195 	struct timespec64 ts;
196 	int rc = 0;
197 
198 	cifs_sb = CIFS_SB(sb);
199 	tcon = cifs_sb_master_tcon(cifs_sb);
200 
201 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
202 		sb->s_flags |= SB_POSIXACL;
203 
204 	if (tcon->snapshot_time)
205 		sb->s_flags |= SB_RDONLY;
206 
207 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
208 		sb->s_maxbytes = MAX_LFS_FILESIZE;
209 	else
210 		sb->s_maxbytes = MAX_NON_LFS;
211 
212 	/*
213 	 * Some very old servers like DOS and OS/2 used 2 second granularity
214 	 * (while all current servers use 100ns granularity - see MS-DTYP)
215 	 * but 1 second is the maximum allowed granularity for the VFS
216 	 * so for old servers set time granularity to 1 second while for
217 	 * everything else (current servers) set it to 100ns.
218 	 */
219 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
220 	    ((tcon->ses->capabilities &
221 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
222 	    !tcon->unix_ext) {
223 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
225 		sb->s_time_min = ts.tv_sec;
226 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
227 				    cpu_to_le16(SMB_TIME_MAX), 0);
228 		sb->s_time_max = ts.tv_sec;
229 	} else {
230 		/*
231 		 * Almost every server, including all SMB2+, uses DCE TIME
232 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
233 		 */
234 		sb->s_time_gran = 100;
235 		ts = cifs_NTtimeToUnix(0);
236 		sb->s_time_min = ts.tv_sec;
237 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
238 		sb->s_time_max = ts.tv_sec;
239 	}
240 
241 	sb->s_magic = CIFS_SUPER_MAGIC;
242 	sb->s_op = &cifs_super_ops;
243 	sb->s_xattr = cifs_xattr_handlers;
244 	rc = super_setup_bdi(sb);
245 	if (rc)
246 		goto out_no_root;
247 	/* tune readahead according to rsize if readahead size not set on mount */
248 	if (cifs_sb->ctx->rsize == 0)
249 		cifs_sb->ctx->rsize =
250 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
251 	if (cifs_sb->ctx->rasize)
252 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
253 	else
254 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
255 
256 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
257 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
258 	inode = cifs_root_iget(sb);
259 
260 	if (IS_ERR(inode)) {
261 		rc = PTR_ERR(inode);
262 		goto out_no_root;
263 	}
264 
265 	if (tcon->nocase)
266 		sb->s_d_op = &cifs_ci_dentry_ops;
267 	else
268 		sb->s_d_op = &cifs_dentry_ops;
269 
270 	sb->s_root = d_make_root(inode);
271 	if (!sb->s_root) {
272 		rc = -ENOMEM;
273 		goto out_no_root;
274 	}
275 
276 #ifdef CONFIG_CIFS_NFSD_EXPORT
277 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
278 		cifs_dbg(FYI, "export ops supported\n");
279 		sb->s_export_op = &cifs_export_ops;
280 	}
281 #endif /* CONFIG_CIFS_NFSD_EXPORT */
282 
283 	return 0;
284 
285 out_no_root:
286 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
287 	return rc;
288 }
289 
290 static void cifs_kill_sb(struct super_block *sb)
291 {
292 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
293 
294 	/*
295 	 * We ned to release all dentries for the cached directories
296 	 * before we kill the sb.
297 	 */
298 	if (cifs_sb->root) {
299 		close_all_cached_dirs(cifs_sb);
300 
301 		/* finally release root dentry */
302 		dput(cifs_sb->root);
303 		cifs_sb->root = NULL;
304 	}
305 
306 	kill_anon_super(sb);
307 	cifs_umount(cifs_sb);
308 }
309 
310 static int
311 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
312 {
313 	struct super_block *sb = dentry->d_sb;
314 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
315 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
316 	struct TCP_Server_Info *server = tcon->ses->server;
317 	unsigned int xid;
318 	int rc = 0;
319 
320 	xid = get_xid();
321 
322 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
323 		buf->f_namelen =
324 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
325 	else
326 		buf->f_namelen = PATH_MAX;
327 
328 	buf->f_fsid.val[0] = tcon->vol_serial_number;
329 	/* are using part of create time for more randomness, see man statfs */
330 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
331 
332 	buf->f_files = 0;	/* undefined */
333 	buf->f_ffree = 0;	/* unlimited */
334 
335 	if (server->ops->queryfs)
336 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
337 
338 	free_xid(xid);
339 	return rc;
340 }
341 
342 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
343 {
344 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
345 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
346 	struct TCP_Server_Info *server = tcon->ses->server;
347 
348 	if (server->ops->fallocate)
349 		return server->ops->fallocate(file, tcon, mode, off, len);
350 
351 	return -EOPNOTSUPP;
352 }
353 
354 static int cifs_permission(struct mnt_idmap *idmap,
355 			   struct inode *inode, int mask)
356 {
357 	struct cifs_sb_info *cifs_sb;
358 
359 	cifs_sb = CIFS_SB(inode->i_sb);
360 
361 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
362 		if ((mask & MAY_EXEC) && !execute_ok(inode))
363 			return -EACCES;
364 		else
365 			return 0;
366 	} else /* file mode might have been restricted at mount time
367 		on the client (above and beyond ACL on servers) for
368 		servers which do not support setting and viewing mode bits,
369 		so allowing client to check permissions is useful */
370 		return generic_permission(&nop_mnt_idmap, inode, mask);
371 }
372 
373 static struct kmem_cache *cifs_inode_cachep;
374 static struct kmem_cache *cifs_req_cachep;
375 static struct kmem_cache *cifs_mid_cachep;
376 static struct kmem_cache *cifs_sm_req_cachep;
377 mempool_t *cifs_sm_req_poolp;
378 mempool_t *cifs_req_poolp;
379 mempool_t *cifs_mid_poolp;
380 
381 static struct inode *
382 cifs_alloc_inode(struct super_block *sb)
383 {
384 	struct cifsInodeInfo *cifs_inode;
385 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
386 	if (!cifs_inode)
387 		return NULL;
388 	cifs_inode->cifsAttrs = 0x20;	/* default */
389 	cifs_inode->time = 0;
390 	/*
391 	 * Until the file is open and we have gotten oplock info back from the
392 	 * server, can not assume caching of file data or metadata.
393 	 */
394 	cifs_set_oplock_level(cifs_inode, 0);
395 	cifs_inode->lease_granted = false;
396 	cifs_inode->flags = 0;
397 	spin_lock_init(&cifs_inode->writers_lock);
398 	cifs_inode->writers = 0;
399 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
400 	cifs_inode->server_eof = 0;
401 	cifs_inode->uniqueid = 0;
402 	cifs_inode->createtime = 0;
403 	cifs_inode->epoch = 0;
404 	spin_lock_init(&cifs_inode->open_file_lock);
405 	generate_random_uuid(cifs_inode->lease_key);
406 	cifs_inode->symlink_target = NULL;
407 
408 	/*
409 	 * Can not set i_flags here - they get immediately overwritten to zero
410 	 * by the VFS.
411 	 */
412 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
413 	INIT_LIST_HEAD(&cifs_inode->openFileList);
414 	INIT_LIST_HEAD(&cifs_inode->llist);
415 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
416 	spin_lock_init(&cifs_inode->deferred_lock);
417 	return &cifs_inode->netfs.inode;
418 }
419 
420 static void
421 cifs_free_inode(struct inode *inode)
422 {
423 	struct cifsInodeInfo *cinode = CIFS_I(inode);
424 
425 	if (S_ISLNK(inode->i_mode))
426 		kfree(cinode->symlink_target);
427 	kmem_cache_free(cifs_inode_cachep, cinode);
428 }
429 
430 static void
431 cifs_evict_inode(struct inode *inode)
432 {
433 	truncate_inode_pages_final(&inode->i_data);
434 	if (inode->i_state & I_PINNING_FSCACHE_WB)
435 		cifs_fscache_unuse_inode_cookie(inode, true);
436 	cifs_fscache_release_inode_cookie(inode);
437 	clear_inode(inode);
438 }
439 
440 static void
441 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
442 {
443 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
444 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
445 
446 	seq_puts(s, ",addr=");
447 
448 	switch (server->dstaddr.ss_family) {
449 	case AF_INET:
450 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
451 		break;
452 	case AF_INET6:
453 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
454 		if (sa6->sin6_scope_id)
455 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
456 		break;
457 	default:
458 		seq_puts(s, "(unknown)");
459 	}
460 	if (server->rdma)
461 		seq_puts(s, ",rdma");
462 }
463 
464 static void
465 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
466 {
467 	if (ses->sectype == Unspecified) {
468 		if (ses->user_name == NULL)
469 			seq_puts(s, ",sec=none");
470 		return;
471 	}
472 
473 	seq_puts(s, ",sec=");
474 
475 	switch (ses->sectype) {
476 	case NTLMv2:
477 		seq_puts(s, "ntlmv2");
478 		break;
479 	case Kerberos:
480 		seq_puts(s, "krb5");
481 		break;
482 	case RawNTLMSSP:
483 		seq_puts(s, "ntlmssp");
484 		break;
485 	default:
486 		/* shouldn't ever happen */
487 		seq_puts(s, "unknown");
488 		break;
489 	}
490 
491 	if (ses->sign)
492 		seq_puts(s, "i");
493 
494 	if (ses->sectype == Kerberos)
495 		seq_printf(s, ",cruid=%u",
496 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
497 }
498 
499 static void
500 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
501 {
502 	seq_puts(s, ",cache=");
503 
504 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
505 		seq_puts(s, "strict");
506 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
507 		seq_puts(s, "none");
508 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
509 		seq_puts(s, "singleclient"); /* assume only one client access */
510 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
511 		seq_puts(s, "ro"); /* read only caching assumed */
512 	else
513 		seq_puts(s, "loose");
514 }
515 
516 /*
517  * cifs_show_devname() is used so we show the mount device name with correct
518  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
519  */
520 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
521 {
522 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
523 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
524 
525 	if (devname == NULL)
526 		seq_puts(m, "none");
527 	else {
528 		convert_delimiter(devname, '/');
529 		/* escape all spaces in share names */
530 		seq_escape(m, devname, " \t");
531 		kfree(devname);
532 	}
533 	return 0;
534 }
535 
536 /*
537  * cifs_show_options() is for displaying mount options in /proc/mounts.
538  * Not all settable options are displayed but most of the important
539  * ones are.
540  */
541 static int
542 cifs_show_options(struct seq_file *s, struct dentry *root)
543 {
544 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
545 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
546 	struct sockaddr *srcaddr;
547 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
548 
549 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
550 	cifs_show_security(s, tcon->ses);
551 	cifs_show_cache_flavor(s, cifs_sb);
552 
553 	if (tcon->no_lease)
554 		seq_puts(s, ",nolease");
555 	if (cifs_sb->ctx->multiuser)
556 		seq_puts(s, ",multiuser");
557 	else if (tcon->ses->user_name)
558 		seq_show_option(s, "username", tcon->ses->user_name);
559 
560 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
561 		seq_show_option(s, "domain", tcon->ses->domainName);
562 
563 	if (srcaddr->sa_family != AF_UNSPEC) {
564 		struct sockaddr_in *saddr4;
565 		struct sockaddr_in6 *saddr6;
566 		saddr4 = (struct sockaddr_in *)srcaddr;
567 		saddr6 = (struct sockaddr_in6 *)srcaddr;
568 		if (srcaddr->sa_family == AF_INET6)
569 			seq_printf(s, ",srcaddr=%pI6c",
570 				   &saddr6->sin6_addr);
571 		else if (srcaddr->sa_family == AF_INET)
572 			seq_printf(s, ",srcaddr=%pI4",
573 				   &saddr4->sin_addr.s_addr);
574 		else
575 			seq_printf(s, ",srcaddr=BAD-AF:%i",
576 				   (int)(srcaddr->sa_family));
577 	}
578 
579 	seq_printf(s, ",uid=%u",
580 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
581 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
582 		seq_puts(s, ",forceuid");
583 	else
584 		seq_puts(s, ",noforceuid");
585 
586 	seq_printf(s, ",gid=%u",
587 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
588 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
589 		seq_puts(s, ",forcegid");
590 	else
591 		seq_puts(s, ",noforcegid");
592 
593 	cifs_show_address(s, tcon->ses->server);
594 
595 	if (!tcon->unix_ext)
596 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
597 					   cifs_sb->ctx->file_mode,
598 					   cifs_sb->ctx->dir_mode);
599 	if (cifs_sb->ctx->iocharset)
600 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
601 	if (tcon->seal)
602 		seq_puts(s, ",seal");
603 	else if (tcon->ses->server->ignore_signature)
604 		seq_puts(s, ",signloosely");
605 	if (tcon->nocase)
606 		seq_puts(s, ",nocase");
607 	if (tcon->nodelete)
608 		seq_puts(s, ",nodelete");
609 	if (cifs_sb->ctx->no_sparse)
610 		seq_puts(s, ",nosparse");
611 	if (tcon->local_lease)
612 		seq_puts(s, ",locallease");
613 	if (tcon->retry)
614 		seq_puts(s, ",hard");
615 	else
616 		seq_puts(s, ",soft");
617 	if (tcon->use_persistent)
618 		seq_puts(s, ",persistenthandles");
619 	else if (tcon->use_resilient)
620 		seq_puts(s, ",resilienthandles");
621 	if (tcon->posix_extensions)
622 		seq_puts(s, ",posix");
623 	else if (tcon->unix_ext)
624 		seq_puts(s, ",unix");
625 	else
626 		seq_puts(s, ",nounix");
627 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
628 		seq_puts(s, ",nodfs");
629 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
630 		seq_puts(s, ",posixpaths");
631 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
632 		seq_puts(s, ",setuids");
633 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
634 		seq_puts(s, ",idsfromsid");
635 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
636 		seq_puts(s, ",serverino");
637 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
638 		seq_puts(s, ",rwpidforward");
639 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
640 		seq_puts(s, ",forcemand");
641 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
642 		seq_puts(s, ",nouser_xattr");
643 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
644 		seq_puts(s, ",mapchars");
645 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
646 		seq_puts(s, ",mapposix");
647 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
648 		seq_puts(s, ",sfu");
649 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
650 		seq_puts(s, ",nobrl");
651 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
652 		seq_puts(s, ",nohandlecache");
653 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
654 		seq_puts(s, ",modefromsid");
655 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
656 		seq_puts(s, ",cifsacl");
657 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
658 		seq_puts(s, ",dynperm");
659 	if (root->d_sb->s_flags & SB_POSIXACL)
660 		seq_puts(s, ",acl");
661 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
662 		seq_puts(s, ",mfsymlinks");
663 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
664 		seq_puts(s, ",fsc");
665 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
666 		seq_puts(s, ",nostrictsync");
667 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
668 		seq_puts(s, ",noperm");
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
670 		seq_printf(s, ",backupuid=%u",
671 			   from_kuid_munged(&init_user_ns,
672 					    cifs_sb->ctx->backupuid));
673 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
674 		seq_printf(s, ",backupgid=%u",
675 			   from_kgid_munged(&init_user_ns,
676 					    cifs_sb->ctx->backupgid));
677 
678 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
679 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
680 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
681 	if (cifs_sb->ctx->rasize)
682 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
683 	if (tcon->ses->server->min_offload)
684 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
685 	seq_printf(s, ",echo_interval=%lu",
686 			tcon->ses->server->echo_interval / HZ);
687 
688 	/* Only display the following if overridden on mount */
689 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
690 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
691 	if (tcon->ses->server->tcp_nodelay)
692 		seq_puts(s, ",tcpnodelay");
693 	if (tcon->ses->server->noautotune)
694 		seq_puts(s, ",noautotune");
695 	if (tcon->ses->server->noblocksnd)
696 		seq_puts(s, ",noblocksend");
697 	if (tcon->ses->server->nosharesock)
698 		seq_puts(s, ",nosharesock");
699 
700 	if (tcon->snapshot_time)
701 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
702 	if (tcon->handle_timeout)
703 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
704 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
705 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
706 
707 	/*
708 	 * Display file and directory attribute timeout in seconds.
709 	 * If file and directory attribute timeout the same then actimeo
710 	 * was likely specified on mount
711 	 */
712 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
713 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
714 	else {
715 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
716 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
717 	}
718 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
719 
720 	if (tcon->ses->chan_max > 1)
721 		seq_printf(s, ",multichannel,max_channels=%zu",
722 			   tcon->ses->chan_max);
723 
724 	if (tcon->use_witness)
725 		seq_puts(s, ",witness");
726 
727 	return 0;
728 }
729 
730 static void cifs_umount_begin(struct super_block *sb)
731 {
732 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
733 	struct cifs_tcon *tcon;
734 
735 	if (cifs_sb == NULL)
736 		return;
737 
738 	tcon = cifs_sb_master_tcon(cifs_sb);
739 
740 	spin_lock(&cifs_tcp_ses_lock);
741 	spin_lock(&tcon->tc_lock);
742 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
743 		/* we have other mounts to same share or we have
744 		   already tried to umount this and woken up
745 		   all waiting network requests, nothing to do */
746 		spin_unlock(&tcon->tc_lock);
747 		spin_unlock(&cifs_tcp_ses_lock);
748 		return;
749 	}
750 	/*
751 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
752 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
753 	 */
754 	spin_unlock(&tcon->tc_lock);
755 	spin_unlock(&cifs_tcp_ses_lock);
756 
757 	cifs_close_all_deferred_files(tcon);
758 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
759 	/* cancel_notify_requests(tcon); */
760 	if (tcon->ses && tcon->ses->server) {
761 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
762 		wake_up_all(&tcon->ses->server->request_q);
763 		wake_up_all(&tcon->ses->server->response_q);
764 		msleep(1); /* yield */
765 		/* we have to kick the requests once more */
766 		wake_up_all(&tcon->ses->server->response_q);
767 		msleep(1);
768 	}
769 
770 	return;
771 }
772 
773 static int cifs_freeze(struct super_block *sb)
774 {
775 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
776 	struct cifs_tcon *tcon;
777 
778 	if (cifs_sb == NULL)
779 		return 0;
780 
781 	tcon = cifs_sb_master_tcon(cifs_sb);
782 
783 	cifs_close_all_deferred_files(tcon);
784 	return 0;
785 }
786 
787 #ifdef CONFIG_CIFS_STATS2
788 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
789 {
790 	/* BB FIXME */
791 	return 0;
792 }
793 #endif
794 
795 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
796 {
797 	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
798 	return 0;
799 }
800 
801 static int cifs_drop_inode(struct inode *inode)
802 {
803 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
804 
805 	/* no serverino => unconditional eviction */
806 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
807 		generic_drop_inode(inode);
808 }
809 
810 static const struct super_operations cifs_super_ops = {
811 	.statfs = cifs_statfs,
812 	.alloc_inode = cifs_alloc_inode,
813 	.write_inode	= cifs_write_inode,
814 	.free_inode = cifs_free_inode,
815 	.drop_inode	= cifs_drop_inode,
816 	.evict_inode	= cifs_evict_inode,
817 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
818 	.show_devname   = cifs_show_devname,
819 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
820 	function unless later we add lazy close of inodes or unless the
821 	kernel forgets to call us with the same number of releases (closes)
822 	as opens */
823 	.show_options = cifs_show_options,
824 	.umount_begin   = cifs_umount_begin,
825 	.freeze_fs      = cifs_freeze,
826 #ifdef CONFIG_CIFS_STATS2
827 	.show_stats = cifs_show_stats,
828 #endif
829 };
830 
831 /*
832  * Get root dentry from superblock according to prefix path mount option.
833  * Return dentry with refcount + 1 on success and NULL otherwise.
834  */
835 static struct dentry *
836 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
837 {
838 	struct dentry *dentry;
839 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
840 	char *full_path = NULL;
841 	char *s, *p;
842 	char sep;
843 
844 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
845 		return dget(sb->s_root);
846 
847 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
848 				cifs_sb_master_tcon(cifs_sb), 0);
849 	if (full_path == NULL)
850 		return ERR_PTR(-ENOMEM);
851 
852 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
853 
854 	sep = CIFS_DIR_SEP(cifs_sb);
855 	dentry = dget(sb->s_root);
856 	s = full_path;
857 
858 	do {
859 		struct inode *dir = d_inode(dentry);
860 		struct dentry *child;
861 
862 		if (!S_ISDIR(dir->i_mode)) {
863 			dput(dentry);
864 			dentry = ERR_PTR(-ENOTDIR);
865 			break;
866 		}
867 
868 		/* skip separators */
869 		while (*s == sep)
870 			s++;
871 		if (!*s)
872 			break;
873 		p = s++;
874 		/* next separator */
875 		while (*s && *s != sep)
876 			s++;
877 
878 		child = lookup_positive_unlocked(p, dentry, s - p);
879 		dput(dentry);
880 		dentry = child;
881 	} while (!IS_ERR(dentry));
882 	kfree(full_path);
883 	return dentry;
884 }
885 
886 static int cifs_set_super(struct super_block *sb, void *data)
887 {
888 	struct cifs_mnt_data *mnt_data = data;
889 	sb->s_fs_info = mnt_data->cifs_sb;
890 	return set_anon_super(sb, NULL);
891 }
892 
893 struct dentry *
894 cifs_smb3_do_mount(struct file_system_type *fs_type,
895 	      int flags, struct smb3_fs_context *old_ctx)
896 {
897 	struct cifs_mnt_data mnt_data;
898 	struct cifs_sb_info *cifs_sb;
899 	struct super_block *sb;
900 	struct dentry *root;
901 	int rc;
902 
903 	if (cifsFYI) {
904 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
905 			 old_ctx->source, flags);
906 	} else {
907 		cifs_info("Attempting to mount %s\n", old_ctx->source);
908 	}
909 
910 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
911 	if (!cifs_sb)
912 		return ERR_PTR(-ENOMEM);
913 
914 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
915 	if (!cifs_sb->ctx) {
916 		root = ERR_PTR(-ENOMEM);
917 		goto out;
918 	}
919 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
920 	if (rc) {
921 		root = ERR_PTR(rc);
922 		goto out;
923 	}
924 
925 	rc = cifs_setup_cifs_sb(cifs_sb);
926 	if (rc) {
927 		root = ERR_PTR(rc);
928 		goto out;
929 	}
930 
931 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
932 	if (rc) {
933 		if (!(flags & SB_SILENT))
934 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
935 				 rc);
936 		root = ERR_PTR(rc);
937 		goto out;
938 	}
939 
940 	mnt_data.ctx = cifs_sb->ctx;
941 	mnt_data.cifs_sb = cifs_sb;
942 	mnt_data.flags = flags;
943 
944 	/* BB should we make this contingent on mount parm? */
945 	flags |= SB_NODIRATIME | SB_NOATIME;
946 
947 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
948 	if (IS_ERR(sb)) {
949 		cifs_umount(cifs_sb);
950 		return ERR_CAST(sb);
951 	}
952 
953 	if (sb->s_root) {
954 		cifs_dbg(FYI, "Use existing superblock\n");
955 		cifs_umount(cifs_sb);
956 		cifs_sb = NULL;
957 	} else {
958 		rc = cifs_read_super(sb);
959 		if (rc) {
960 			root = ERR_PTR(rc);
961 			goto out_super;
962 		}
963 
964 		sb->s_flags |= SB_ACTIVE;
965 	}
966 
967 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
968 	if (IS_ERR(root))
969 		goto out_super;
970 
971 	if (cifs_sb)
972 		cifs_sb->root = dget(root);
973 
974 	cifs_dbg(FYI, "dentry root is: %p\n", root);
975 	return root;
976 
977 out_super:
978 	deactivate_locked_super(sb);
979 	return root;
980 out:
981 	kfree(cifs_sb->prepath);
982 	smb3_cleanup_fs_context(cifs_sb->ctx);
983 	kfree(cifs_sb);
984 	return root;
985 }
986 
987 
988 static ssize_t
989 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
990 {
991 	ssize_t rc;
992 	struct inode *inode = file_inode(iocb->ki_filp);
993 
994 	if (iocb->ki_flags & IOCB_DIRECT)
995 		return cifs_user_readv(iocb, iter);
996 
997 	rc = cifs_revalidate_mapping(inode);
998 	if (rc)
999 		return rc;
1000 
1001 	return generic_file_read_iter(iocb, iter);
1002 }
1003 
1004 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1005 {
1006 	struct inode *inode = file_inode(iocb->ki_filp);
1007 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1008 	ssize_t written;
1009 	int rc;
1010 
1011 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1012 		written = cifs_user_writev(iocb, from);
1013 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1014 			cifs_zap_mapping(inode);
1015 			cifs_dbg(FYI,
1016 				 "Set no oplock for inode=%p after a write operation\n",
1017 				 inode);
1018 			cinode->oplock = 0;
1019 		}
1020 		return written;
1021 	}
1022 
1023 	written = cifs_get_writer(cinode);
1024 	if (written)
1025 		return written;
1026 
1027 	written = generic_file_write_iter(iocb, from);
1028 
1029 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1030 		goto out;
1031 
1032 	rc = filemap_fdatawrite(inode->i_mapping);
1033 	if (rc)
1034 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1035 			 rc, inode);
1036 
1037 out:
1038 	cifs_put_writer(cinode);
1039 	return written;
1040 }
1041 
1042 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1043 {
1044 	struct cifsFileInfo *cfile = file->private_data;
1045 	struct cifs_tcon *tcon;
1046 
1047 	/*
1048 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1049 	 * the cached file length
1050 	 */
1051 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1052 		int rc;
1053 		struct inode *inode = file_inode(file);
1054 
1055 		/*
1056 		 * We need to be sure that all dirty pages are written and the
1057 		 * server has the newest file length.
1058 		 */
1059 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1060 		    inode->i_mapping->nrpages != 0) {
1061 			rc = filemap_fdatawait(inode->i_mapping);
1062 			if (rc) {
1063 				mapping_set_error(inode->i_mapping, rc);
1064 				return rc;
1065 			}
1066 		}
1067 		/*
1068 		 * Some applications poll for the file length in this strange
1069 		 * way so we must seek to end on non-oplocked files by
1070 		 * setting the revalidate time to zero.
1071 		 */
1072 		CIFS_I(inode)->time = 0;
1073 
1074 		rc = cifs_revalidate_file_attr(file);
1075 		if (rc < 0)
1076 			return (loff_t)rc;
1077 	}
1078 	if (cfile && cfile->tlink) {
1079 		tcon = tlink_tcon(cfile->tlink);
1080 		if (tcon->ses->server->ops->llseek)
1081 			return tcon->ses->server->ops->llseek(file, tcon,
1082 							      offset, whence);
1083 	}
1084 	return generic_file_llseek(file, offset, whence);
1085 }
1086 
1087 static int
1088 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1089 {
1090 	/*
1091 	 * Note that this is called by vfs setlease with i_lock held to
1092 	 * protect *lease from going away.
1093 	 */
1094 	struct inode *inode = file_inode(file);
1095 	struct cifsFileInfo *cfile = file->private_data;
1096 
1097 	if (!(S_ISREG(inode->i_mode)))
1098 		return -EINVAL;
1099 
1100 	/* Check if file is oplocked if this is request for new lease */
1101 	if (arg == F_UNLCK ||
1102 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1103 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1104 		return generic_setlease(file, arg, lease, priv);
1105 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1106 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1107 		/*
1108 		 * If the server claims to support oplock on this file, then we
1109 		 * still need to check oplock even if the local_lease mount
1110 		 * option is set, but there are servers which do not support
1111 		 * oplock for which this mount option may be useful if the user
1112 		 * knows that the file won't be changed on the server by anyone
1113 		 * else.
1114 		 */
1115 		return generic_setlease(file, arg, lease, priv);
1116 	else
1117 		return -EAGAIN;
1118 }
1119 
1120 struct file_system_type cifs_fs_type = {
1121 	.owner = THIS_MODULE,
1122 	.name = "cifs",
1123 	.init_fs_context = smb3_init_fs_context,
1124 	.parameters = smb3_fs_parameters,
1125 	.kill_sb = cifs_kill_sb,
1126 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1127 };
1128 MODULE_ALIAS_FS("cifs");
1129 
1130 struct file_system_type smb3_fs_type = {
1131 	.owner = THIS_MODULE,
1132 	.name = "smb3",
1133 	.init_fs_context = smb3_init_fs_context,
1134 	.parameters = smb3_fs_parameters,
1135 	.kill_sb = cifs_kill_sb,
1136 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1137 };
1138 MODULE_ALIAS_FS("smb3");
1139 MODULE_ALIAS("smb3");
1140 
1141 const struct inode_operations cifs_dir_inode_ops = {
1142 	.create = cifs_create,
1143 	.atomic_open = cifs_atomic_open,
1144 	.lookup = cifs_lookup,
1145 	.getattr = cifs_getattr,
1146 	.unlink = cifs_unlink,
1147 	.link = cifs_hardlink,
1148 	.mkdir = cifs_mkdir,
1149 	.rmdir = cifs_rmdir,
1150 	.rename = cifs_rename2,
1151 	.permission = cifs_permission,
1152 	.setattr = cifs_setattr,
1153 	.symlink = cifs_symlink,
1154 	.mknod   = cifs_mknod,
1155 	.listxattr = cifs_listxattr,
1156 	.get_acl = cifs_get_acl,
1157 	.set_acl = cifs_set_acl,
1158 };
1159 
1160 const struct inode_operations cifs_file_inode_ops = {
1161 	.setattr = cifs_setattr,
1162 	.getattr = cifs_getattr,
1163 	.permission = cifs_permission,
1164 	.listxattr = cifs_listxattr,
1165 	.fiemap = cifs_fiemap,
1166 	.get_acl = cifs_get_acl,
1167 	.set_acl = cifs_set_acl,
1168 };
1169 
1170 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1171 			    struct delayed_call *done)
1172 {
1173 	char *target_path;
1174 
1175 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1176 	if (!target_path)
1177 		return ERR_PTR(-ENOMEM);
1178 
1179 	spin_lock(&inode->i_lock);
1180 	if (likely(CIFS_I(inode)->symlink_target)) {
1181 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1182 	} else {
1183 		kfree(target_path);
1184 		target_path = ERR_PTR(-EOPNOTSUPP);
1185 	}
1186 	spin_unlock(&inode->i_lock);
1187 
1188 	if (!IS_ERR(target_path))
1189 		set_delayed_call(done, kfree_link, target_path);
1190 
1191 	return target_path;
1192 }
1193 
1194 const struct inode_operations cifs_symlink_inode_ops = {
1195 	.get_link = cifs_get_link,
1196 	.setattr = cifs_setattr,
1197 	.permission = cifs_permission,
1198 	.listxattr = cifs_listxattr,
1199 };
1200 
1201 /*
1202  * Advance the EOF marker to after the source range.
1203  */
1204 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1205 				struct cifs_tcon *src_tcon,
1206 				unsigned int xid, loff_t src_end)
1207 {
1208 	struct cifsFileInfo *writeable_srcfile;
1209 	int rc = -EINVAL;
1210 
1211 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1212 	if (writeable_srcfile) {
1213 		if (src_tcon->ses->server->ops->set_file_size)
1214 			rc = src_tcon->ses->server->ops->set_file_size(
1215 				xid, src_tcon, writeable_srcfile,
1216 				src_inode->i_size, true /* no need to set sparse */);
1217 		else
1218 			rc = -ENOSYS;
1219 		cifsFileInfo_put(writeable_srcfile);
1220 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1221 	}
1222 
1223 	if (rc < 0)
1224 		goto set_failed;
1225 
1226 	netfs_resize_file(&src_cifsi->netfs, src_end);
1227 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1228 	return 0;
1229 
1230 set_failed:
1231 	return filemap_write_and_wait(src_inode->i_mapping);
1232 }
1233 
1234 /*
1235  * Flush out either the folio that overlaps the beginning of a range in which
1236  * pos resides or the folio that overlaps the end of a range unless that folio
1237  * is entirely within the range we're going to invalidate.  We extend the flush
1238  * bounds to encompass the folio.
1239  */
1240 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1241 			    bool first)
1242 {
1243 	struct folio *folio;
1244 	unsigned long long fpos, fend;
1245 	pgoff_t index = pos / PAGE_SIZE;
1246 	size_t size;
1247 	int rc = 0;
1248 
1249 	folio = filemap_get_folio(inode->i_mapping, index);
1250 	if (IS_ERR(folio))
1251 		return 0;
1252 
1253 	size = folio_size(folio);
1254 	fpos = folio_pos(folio);
1255 	fend = fpos + size - 1;
1256 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1257 	*_fend   = max_t(unsigned long long, *_fend, fend);
1258 	if ((first && pos == fpos) || (!first && pos == fend))
1259 		goto out;
1260 
1261 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1262 out:
1263 	folio_put(folio);
1264 	return rc;
1265 }
1266 
1267 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1268 		struct file *dst_file, loff_t destoff, loff_t len,
1269 		unsigned int remap_flags)
1270 {
1271 	struct inode *src_inode = file_inode(src_file);
1272 	struct inode *target_inode = file_inode(dst_file);
1273 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1274 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1275 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1276 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1277 	struct cifs_tcon *target_tcon, *src_tcon;
1278 	unsigned long long destend, fstart, fend, new_size;
1279 	unsigned int xid;
1280 	int rc;
1281 
1282 	if (remap_flags & REMAP_FILE_DEDUP)
1283 		return -EOPNOTSUPP;
1284 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1285 		return -EINVAL;
1286 
1287 	cifs_dbg(FYI, "clone range\n");
1288 
1289 	xid = get_xid();
1290 
1291 	if (!smb_file_src || !smb_file_target) {
1292 		rc = -EBADF;
1293 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1294 		goto out;
1295 	}
1296 
1297 	src_tcon = tlink_tcon(smb_file_src->tlink);
1298 	target_tcon = tlink_tcon(smb_file_target->tlink);
1299 
1300 	/*
1301 	 * Note: cifs case is easier than btrfs since server responsible for
1302 	 * checks for proper open modes and file type and if it wants
1303 	 * server could even support copy of range where source = target
1304 	 */
1305 	lock_two_nondirectories(target_inode, src_inode);
1306 
1307 	if (len == 0)
1308 		len = src_inode->i_size - off;
1309 
1310 	cifs_dbg(FYI, "clone range\n");
1311 
1312 	/* Flush the source buffer */
1313 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1314 					  off + len - 1);
1315 	if (rc)
1316 		goto unlock;
1317 
1318 	/* The server-side copy will fail if the source crosses the EOF marker.
1319 	 * Advance the EOF marker after the flush above to the end of the range
1320 	 * if it's short of that.
1321 	 */
1322 	if (src_cifsi->netfs.remote_i_size < off + len) {
1323 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1324 		if (rc < 0)
1325 			goto unlock;
1326 	}
1327 
1328 	new_size = destoff + len;
1329 	destend = destoff + len - 1;
1330 
1331 	/* Flush the folios at either end of the destination range to prevent
1332 	 * accidental loss of dirty data outside of the range.
1333 	 */
1334 	fstart = destoff;
1335 	fend = destend;
1336 
1337 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1338 	if (rc)
1339 		goto unlock;
1340 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1341 	if (rc)
1342 		goto unlock;
1343 
1344 	/* Discard all the folios that overlap the destination region. */
1345 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1346 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1347 
1348 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1349 			   i_size_read(target_inode), 0);
1350 
1351 	rc = -EOPNOTSUPP;
1352 	if (target_tcon->ses->server->ops->duplicate_extents) {
1353 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1354 			smb_file_src, smb_file_target, off, len, destoff);
1355 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1356 			truncate_setsize(target_inode, new_size);
1357 			netfs_resize_file(&target_cifsi->netfs, new_size);
1358 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1359 					      new_size);
1360 		}
1361 	}
1362 
1363 	/* force revalidate of size and timestamps of target file now
1364 	   that target is updated on the server */
1365 	CIFS_I(target_inode)->time = 0;
1366 unlock:
1367 	/* although unlocking in the reverse order from locking is not
1368 	   strictly necessary here it is a little cleaner to be consistent */
1369 	unlock_two_nondirectories(src_inode, target_inode);
1370 out:
1371 	free_xid(xid);
1372 	return rc < 0 ? rc : len;
1373 }
1374 
1375 ssize_t cifs_file_copychunk_range(unsigned int xid,
1376 				struct file *src_file, loff_t off,
1377 				struct file *dst_file, loff_t destoff,
1378 				size_t len, unsigned int flags)
1379 {
1380 	struct inode *src_inode = file_inode(src_file);
1381 	struct inode *target_inode = file_inode(dst_file);
1382 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1383 	struct cifsFileInfo *smb_file_src;
1384 	struct cifsFileInfo *smb_file_target;
1385 	struct cifs_tcon *src_tcon;
1386 	struct cifs_tcon *target_tcon;
1387 	unsigned long long destend, fstart, fend;
1388 	ssize_t rc;
1389 
1390 	cifs_dbg(FYI, "copychunk range\n");
1391 
1392 	if (!src_file->private_data || !dst_file->private_data) {
1393 		rc = -EBADF;
1394 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1395 		goto out;
1396 	}
1397 
1398 	rc = -EXDEV;
1399 	smb_file_target = dst_file->private_data;
1400 	smb_file_src = src_file->private_data;
1401 	src_tcon = tlink_tcon(smb_file_src->tlink);
1402 	target_tcon = tlink_tcon(smb_file_target->tlink);
1403 
1404 	if (src_tcon->ses != target_tcon->ses) {
1405 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1406 		goto out;
1407 	}
1408 
1409 	rc = -EOPNOTSUPP;
1410 	if (!target_tcon->ses->server->ops->copychunk_range)
1411 		goto out;
1412 
1413 	/*
1414 	 * Note: cifs case is easier than btrfs since server responsible for
1415 	 * checks for proper open modes and file type and if it wants
1416 	 * server could even support copy of range where source = target
1417 	 */
1418 	lock_two_nondirectories(target_inode, src_inode);
1419 
1420 	cifs_dbg(FYI, "about to flush pages\n");
1421 
1422 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1423 					  off + len - 1);
1424 	if (rc)
1425 		goto unlock;
1426 
1427 	/* The server-side copy will fail if the source crosses the EOF marker.
1428 	 * Advance the EOF marker after the flush above to the end of the range
1429 	 * if it's short of that.
1430 	 */
1431 	if (src_cifsi->server_eof < off + len) {
1432 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1433 		if (rc < 0)
1434 			goto unlock;
1435 	}
1436 
1437 	destend = destoff + len - 1;
1438 
1439 	/* Flush the folios at either end of the destination range to prevent
1440 	 * accidental loss of dirty data outside of the range.
1441 	 */
1442 	fstart = destoff;
1443 	fend = destend;
1444 
1445 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1446 	if (rc)
1447 		goto unlock;
1448 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1449 	if (rc)
1450 		goto unlock;
1451 
1452 	/* Discard all the folios that overlap the destination region. */
1453 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1454 
1455 	rc = file_modified(dst_file);
1456 	if (!rc) {
1457 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1458 			smb_file_src, smb_file_target, off, len, destoff);
1459 		if (rc > 0 && destoff + rc > i_size_read(target_inode))
1460 			truncate_setsize(target_inode, destoff + rc);
1461 	}
1462 
1463 	file_accessed(src_file);
1464 
1465 	/* force revalidate of size and timestamps of target file now
1466 	 * that target is updated on the server
1467 	 */
1468 	CIFS_I(target_inode)->time = 0;
1469 
1470 unlock:
1471 	/* although unlocking in the reverse order from locking is not
1472 	 * strictly necessary here it is a little cleaner to be consistent
1473 	 */
1474 	unlock_two_nondirectories(src_inode, target_inode);
1475 
1476 out:
1477 	return rc;
1478 }
1479 
1480 /*
1481  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1482  * is a dummy operation.
1483  */
1484 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1485 {
1486 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1487 		 file, datasync);
1488 
1489 	return 0;
1490 }
1491 
1492 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1493 				struct file *dst_file, loff_t destoff,
1494 				size_t len, unsigned int flags)
1495 {
1496 	unsigned int xid = get_xid();
1497 	ssize_t rc;
1498 	struct cifsFileInfo *cfile = dst_file->private_data;
1499 
1500 	if (cfile->swapfile) {
1501 		rc = -EOPNOTSUPP;
1502 		free_xid(xid);
1503 		return rc;
1504 	}
1505 
1506 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1507 					len, flags);
1508 	free_xid(xid);
1509 
1510 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1511 		rc = generic_copy_file_range(src_file, off, dst_file,
1512 					     destoff, len, flags);
1513 	return rc;
1514 }
1515 
1516 const struct file_operations cifs_file_ops = {
1517 	.read_iter = cifs_loose_read_iter,
1518 	.write_iter = cifs_file_write_iter,
1519 	.open = cifs_open,
1520 	.release = cifs_close,
1521 	.lock = cifs_lock,
1522 	.flock = cifs_flock,
1523 	.fsync = cifs_fsync,
1524 	.flush = cifs_flush,
1525 	.mmap  = cifs_file_mmap,
1526 	.splice_read = filemap_splice_read,
1527 	.splice_write = iter_file_splice_write,
1528 	.llseek = cifs_llseek,
1529 	.unlocked_ioctl	= cifs_ioctl,
1530 	.copy_file_range = cifs_copy_file_range,
1531 	.remap_file_range = cifs_remap_file_range,
1532 	.setlease = cifs_setlease,
1533 	.fallocate = cifs_fallocate,
1534 };
1535 
1536 const struct file_operations cifs_file_strict_ops = {
1537 	.read_iter = cifs_strict_readv,
1538 	.write_iter = cifs_strict_writev,
1539 	.open = cifs_open,
1540 	.release = cifs_close,
1541 	.lock = cifs_lock,
1542 	.flock = cifs_flock,
1543 	.fsync = cifs_strict_fsync,
1544 	.flush = cifs_flush,
1545 	.mmap = cifs_file_strict_mmap,
1546 	.splice_read = filemap_splice_read,
1547 	.splice_write = iter_file_splice_write,
1548 	.llseek = cifs_llseek,
1549 	.unlocked_ioctl	= cifs_ioctl,
1550 	.copy_file_range = cifs_copy_file_range,
1551 	.remap_file_range = cifs_remap_file_range,
1552 	.setlease = cifs_setlease,
1553 	.fallocate = cifs_fallocate,
1554 };
1555 
1556 const struct file_operations cifs_file_direct_ops = {
1557 	.read_iter = cifs_direct_readv,
1558 	.write_iter = cifs_direct_writev,
1559 	.open = cifs_open,
1560 	.release = cifs_close,
1561 	.lock = cifs_lock,
1562 	.flock = cifs_flock,
1563 	.fsync = cifs_fsync,
1564 	.flush = cifs_flush,
1565 	.mmap = cifs_file_mmap,
1566 	.splice_read = copy_splice_read,
1567 	.splice_write = iter_file_splice_write,
1568 	.unlocked_ioctl  = cifs_ioctl,
1569 	.copy_file_range = cifs_copy_file_range,
1570 	.remap_file_range = cifs_remap_file_range,
1571 	.llseek = cifs_llseek,
1572 	.setlease = cifs_setlease,
1573 	.fallocate = cifs_fallocate,
1574 };
1575 
1576 const struct file_operations cifs_file_nobrl_ops = {
1577 	.read_iter = cifs_loose_read_iter,
1578 	.write_iter = cifs_file_write_iter,
1579 	.open = cifs_open,
1580 	.release = cifs_close,
1581 	.fsync = cifs_fsync,
1582 	.flush = cifs_flush,
1583 	.mmap  = cifs_file_mmap,
1584 	.splice_read = filemap_splice_read,
1585 	.splice_write = iter_file_splice_write,
1586 	.llseek = cifs_llseek,
1587 	.unlocked_ioctl	= cifs_ioctl,
1588 	.copy_file_range = cifs_copy_file_range,
1589 	.remap_file_range = cifs_remap_file_range,
1590 	.setlease = cifs_setlease,
1591 	.fallocate = cifs_fallocate,
1592 };
1593 
1594 const struct file_operations cifs_file_strict_nobrl_ops = {
1595 	.read_iter = cifs_strict_readv,
1596 	.write_iter = cifs_strict_writev,
1597 	.open = cifs_open,
1598 	.release = cifs_close,
1599 	.fsync = cifs_strict_fsync,
1600 	.flush = cifs_flush,
1601 	.mmap = cifs_file_strict_mmap,
1602 	.splice_read = filemap_splice_read,
1603 	.splice_write = iter_file_splice_write,
1604 	.llseek = cifs_llseek,
1605 	.unlocked_ioctl	= cifs_ioctl,
1606 	.copy_file_range = cifs_copy_file_range,
1607 	.remap_file_range = cifs_remap_file_range,
1608 	.setlease = cifs_setlease,
1609 	.fallocate = cifs_fallocate,
1610 };
1611 
1612 const struct file_operations cifs_file_direct_nobrl_ops = {
1613 	.read_iter = cifs_direct_readv,
1614 	.write_iter = cifs_direct_writev,
1615 	.open = cifs_open,
1616 	.release = cifs_close,
1617 	.fsync = cifs_fsync,
1618 	.flush = cifs_flush,
1619 	.mmap = cifs_file_mmap,
1620 	.splice_read = copy_splice_read,
1621 	.splice_write = iter_file_splice_write,
1622 	.unlocked_ioctl  = cifs_ioctl,
1623 	.copy_file_range = cifs_copy_file_range,
1624 	.remap_file_range = cifs_remap_file_range,
1625 	.llseek = cifs_llseek,
1626 	.setlease = cifs_setlease,
1627 	.fallocate = cifs_fallocate,
1628 };
1629 
1630 const struct file_operations cifs_dir_ops = {
1631 	.iterate_shared = cifs_readdir,
1632 	.release = cifs_closedir,
1633 	.read    = generic_read_dir,
1634 	.unlocked_ioctl  = cifs_ioctl,
1635 	.copy_file_range = cifs_copy_file_range,
1636 	.remap_file_range = cifs_remap_file_range,
1637 	.llseek = generic_file_llseek,
1638 	.fsync = cifs_dir_fsync,
1639 };
1640 
1641 static void
1642 cifs_init_once(void *inode)
1643 {
1644 	struct cifsInodeInfo *cifsi = inode;
1645 
1646 	inode_init_once(&cifsi->netfs.inode);
1647 	init_rwsem(&cifsi->lock_sem);
1648 }
1649 
1650 static int __init
1651 cifs_init_inodecache(void)
1652 {
1653 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1654 					      sizeof(struct cifsInodeInfo),
1655 					      0, (SLAB_RECLAIM_ACCOUNT|
1656 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1657 					      cifs_init_once);
1658 	if (cifs_inode_cachep == NULL)
1659 		return -ENOMEM;
1660 
1661 	return 0;
1662 }
1663 
1664 static void
1665 cifs_destroy_inodecache(void)
1666 {
1667 	/*
1668 	 * Make sure all delayed rcu free inodes are flushed before we
1669 	 * destroy cache.
1670 	 */
1671 	rcu_barrier();
1672 	kmem_cache_destroy(cifs_inode_cachep);
1673 }
1674 
1675 static int
1676 cifs_init_request_bufs(void)
1677 {
1678 	/*
1679 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1680 	 * allocate some more bytes for CIFS.
1681 	 */
1682 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1683 
1684 	if (CIFSMaxBufSize < 8192) {
1685 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1686 	Unicode path name has to fit in any SMB/CIFS path based frames */
1687 		CIFSMaxBufSize = 8192;
1688 	} else if (CIFSMaxBufSize > 1024*127) {
1689 		CIFSMaxBufSize = 1024 * 127;
1690 	} else {
1691 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1692 	}
1693 /*
1694 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1695 		 CIFSMaxBufSize, CIFSMaxBufSize);
1696 */
1697 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1698 					    CIFSMaxBufSize + max_hdr_size, 0,
1699 					    SLAB_HWCACHE_ALIGN, 0,
1700 					    CIFSMaxBufSize + max_hdr_size,
1701 					    NULL);
1702 	if (cifs_req_cachep == NULL)
1703 		return -ENOMEM;
1704 
1705 	if (cifs_min_rcv < 1)
1706 		cifs_min_rcv = 1;
1707 	else if (cifs_min_rcv > 64) {
1708 		cifs_min_rcv = 64;
1709 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1710 	}
1711 
1712 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1713 						  cifs_req_cachep);
1714 
1715 	if (cifs_req_poolp == NULL) {
1716 		kmem_cache_destroy(cifs_req_cachep);
1717 		return -ENOMEM;
1718 	}
1719 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1720 	almost all handle based requests (but not write response, nor is it
1721 	sufficient for path based requests).  A smaller size would have
1722 	been more efficient (compacting multiple slab items on one 4k page)
1723 	for the case in which debug was on, but this larger size allows
1724 	more SMBs to use small buffer alloc and is still much more
1725 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1726 	alloc of large cifs buffers even when page debugging is on */
1727 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1728 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1729 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1730 	if (cifs_sm_req_cachep == NULL) {
1731 		mempool_destroy(cifs_req_poolp);
1732 		kmem_cache_destroy(cifs_req_cachep);
1733 		return -ENOMEM;
1734 	}
1735 
1736 	if (cifs_min_small < 2)
1737 		cifs_min_small = 2;
1738 	else if (cifs_min_small > 256) {
1739 		cifs_min_small = 256;
1740 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1741 	}
1742 
1743 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1744 						     cifs_sm_req_cachep);
1745 
1746 	if (cifs_sm_req_poolp == NULL) {
1747 		mempool_destroy(cifs_req_poolp);
1748 		kmem_cache_destroy(cifs_req_cachep);
1749 		kmem_cache_destroy(cifs_sm_req_cachep);
1750 		return -ENOMEM;
1751 	}
1752 
1753 	return 0;
1754 }
1755 
1756 static void
1757 cifs_destroy_request_bufs(void)
1758 {
1759 	mempool_destroy(cifs_req_poolp);
1760 	kmem_cache_destroy(cifs_req_cachep);
1761 	mempool_destroy(cifs_sm_req_poolp);
1762 	kmem_cache_destroy(cifs_sm_req_cachep);
1763 }
1764 
1765 static int init_mids(void)
1766 {
1767 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1768 					    sizeof(struct mid_q_entry), 0,
1769 					    SLAB_HWCACHE_ALIGN, NULL);
1770 	if (cifs_mid_cachep == NULL)
1771 		return -ENOMEM;
1772 
1773 	/* 3 is a reasonable minimum number of simultaneous operations */
1774 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1775 	if (cifs_mid_poolp == NULL) {
1776 		kmem_cache_destroy(cifs_mid_cachep);
1777 		return -ENOMEM;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 static void destroy_mids(void)
1784 {
1785 	mempool_destroy(cifs_mid_poolp);
1786 	kmem_cache_destroy(cifs_mid_cachep);
1787 }
1788 
1789 static int __init
1790 init_cifs(void)
1791 {
1792 	int rc = 0;
1793 	cifs_proc_init();
1794 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1795 /*
1796  *  Initialize Global counters
1797  */
1798 	atomic_set(&sesInfoAllocCount, 0);
1799 	atomic_set(&tconInfoAllocCount, 0);
1800 	atomic_set(&tcpSesNextId, 0);
1801 	atomic_set(&tcpSesAllocCount, 0);
1802 	atomic_set(&tcpSesReconnectCount, 0);
1803 	atomic_set(&tconInfoReconnectCount, 0);
1804 
1805 	atomic_set(&buf_alloc_count, 0);
1806 	atomic_set(&small_buf_alloc_count, 0);
1807 #ifdef CONFIG_CIFS_STATS2
1808 	atomic_set(&total_buf_alloc_count, 0);
1809 	atomic_set(&total_small_buf_alloc_count, 0);
1810 	if (slow_rsp_threshold < 1)
1811 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1812 	else if (slow_rsp_threshold > 32767)
1813 		cifs_dbg(VFS,
1814 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1815 #endif /* CONFIG_CIFS_STATS2 */
1816 
1817 	atomic_set(&mid_count, 0);
1818 	GlobalCurrentXid = 0;
1819 	GlobalTotalActiveXid = 0;
1820 	GlobalMaxActiveXid = 0;
1821 	spin_lock_init(&cifs_tcp_ses_lock);
1822 	spin_lock_init(&GlobalMid_Lock);
1823 
1824 	cifs_lock_secret = get_random_u32();
1825 
1826 	if (cifs_max_pending < 2) {
1827 		cifs_max_pending = 2;
1828 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1829 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1830 		cifs_max_pending = CIFS_MAX_REQ;
1831 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1832 			 CIFS_MAX_REQ);
1833 	}
1834 
1835 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1836 	if (dir_cache_timeout > 65000) {
1837 		dir_cache_timeout = 65000;
1838 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1839 	}
1840 
1841 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1842 	if (!cifsiod_wq) {
1843 		rc = -ENOMEM;
1844 		goto out_clean_proc;
1845 	}
1846 
1847 	/*
1848 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1849 	 * so that we don't launch too many worker threads but
1850 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1851 	 */
1852 
1853 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1854 	decrypt_wq = alloc_workqueue("smb3decryptd",
1855 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1856 	if (!decrypt_wq) {
1857 		rc = -ENOMEM;
1858 		goto out_destroy_cifsiod_wq;
1859 	}
1860 
1861 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1862 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1863 	if (!fileinfo_put_wq) {
1864 		rc = -ENOMEM;
1865 		goto out_destroy_decrypt_wq;
1866 	}
1867 
1868 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1869 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1870 	if (!cifsoplockd_wq) {
1871 		rc = -ENOMEM;
1872 		goto out_destroy_fileinfo_put_wq;
1873 	}
1874 
1875 	deferredclose_wq = alloc_workqueue("deferredclose",
1876 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1877 	if (!deferredclose_wq) {
1878 		rc = -ENOMEM;
1879 		goto out_destroy_cifsoplockd_wq;
1880 	}
1881 
1882 	serverclose_wq = alloc_workqueue("serverclose",
1883 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1884 	if (!serverclose_wq) {
1885 		rc = -ENOMEM;
1886 		goto out_destroy_serverclose_wq;
1887 	}
1888 
1889 	rc = cifs_init_inodecache();
1890 	if (rc)
1891 		goto out_destroy_deferredclose_wq;
1892 
1893 	rc = init_mids();
1894 	if (rc)
1895 		goto out_destroy_inodecache;
1896 
1897 	rc = cifs_init_request_bufs();
1898 	if (rc)
1899 		goto out_destroy_mids;
1900 
1901 #ifdef CONFIG_CIFS_DFS_UPCALL
1902 	rc = dfs_cache_init();
1903 	if (rc)
1904 		goto out_destroy_request_bufs;
1905 #endif /* CONFIG_CIFS_DFS_UPCALL */
1906 #ifdef CONFIG_CIFS_UPCALL
1907 	rc = init_cifs_spnego();
1908 	if (rc)
1909 		goto out_destroy_dfs_cache;
1910 #endif /* CONFIG_CIFS_UPCALL */
1911 #ifdef CONFIG_CIFS_SWN_UPCALL
1912 	rc = cifs_genl_init();
1913 	if (rc)
1914 		goto out_register_key_type;
1915 #endif /* CONFIG_CIFS_SWN_UPCALL */
1916 
1917 	rc = init_cifs_idmap();
1918 	if (rc)
1919 		goto out_cifs_swn_init;
1920 
1921 	rc = register_filesystem(&cifs_fs_type);
1922 	if (rc)
1923 		goto out_init_cifs_idmap;
1924 
1925 	rc = register_filesystem(&smb3_fs_type);
1926 	if (rc) {
1927 		unregister_filesystem(&cifs_fs_type);
1928 		goto out_init_cifs_idmap;
1929 	}
1930 
1931 	return 0;
1932 
1933 out_init_cifs_idmap:
1934 	exit_cifs_idmap();
1935 out_cifs_swn_init:
1936 #ifdef CONFIG_CIFS_SWN_UPCALL
1937 	cifs_genl_exit();
1938 out_register_key_type:
1939 #endif
1940 #ifdef CONFIG_CIFS_UPCALL
1941 	exit_cifs_spnego();
1942 out_destroy_dfs_cache:
1943 #endif
1944 #ifdef CONFIG_CIFS_DFS_UPCALL
1945 	dfs_cache_destroy();
1946 out_destroy_request_bufs:
1947 #endif
1948 	cifs_destroy_request_bufs();
1949 out_destroy_mids:
1950 	destroy_mids();
1951 out_destroy_inodecache:
1952 	cifs_destroy_inodecache();
1953 out_destroy_deferredclose_wq:
1954 	destroy_workqueue(deferredclose_wq);
1955 out_destroy_cifsoplockd_wq:
1956 	destroy_workqueue(cifsoplockd_wq);
1957 out_destroy_fileinfo_put_wq:
1958 	destroy_workqueue(fileinfo_put_wq);
1959 out_destroy_decrypt_wq:
1960 	destroy_workqueue(decrypt_wq);
1961 out_destroy_cifsiod_wq:
1962 	destroy_workqueue(cifsiod_wq);
1963 out_destroy_serverclose_wq:
1964 	destroy_workqueue(serverclose_wq);
1965 out_clean_proc:
1966 	cifs_proc_clean();
1967 	return rc;
1968 }
1969 
1970 static void __exit
1971 exit_cifs(void)
1972 {
1973 	cifs_dbg(NOISY, "exit_smb3\n");
1974 	unregister_filesystem(&cifs_fs_type);
1975 	unregister_filesystem(&smb3_fs_type);
1976 	cifs_release_automount_timer();
1977 	exit_cifs_idmap();
1978 #ifdef CONFIG_CIFS_SWN_UPCALL
1979 	cifs_genl_exit();
1980 #endif
1981 #ifdef CONFIG_CIFS_UPCALL
1982 	exit_cifs_spnego();
1983 #endif
1984 #ifdef CONFIG_CIFS_DFS_UPCALL
1985 	dfs_cache_destroy();
1986 #endif
1987 	cifs_destroy_request_bufs();
1988 	destroy_mids();
1989 	cifs_destroy_inodecache();
1990 	destroy_workqueue(deferredclose_wq);
1991 	destroy_workqueue(cifsoplockd_wq);
1992 	destroy_workqueue(decrypt_wq);
1993 	destroy_workqueue(fileinfo_put_wq);
1994 	destroy_workqueue(serverclose_wq);
1995 	destroy_workqueue(cifsiod_wq);
1996 	cifs_proc_clean();
1997 }
1998 
1999 MODULE_AUTHOR("Steve French");
2000 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2001 MODULE_DESCRIPTION
2002 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2003 	"also older servers complying with the SNIA CIFS Specification)");
2004 MODULE_VERSION(CIFS_VERSION);
2005 MODULE_SOFTDEP("ecb");
2006 MODULE_SOFTDEP("hmac");
2007 MODULE_SOFTDEP("md5");
2008 MODULE_SOFTDEP("nls");
2009 MODULE_SOFTDEP("aes");
2010 MODULE_SOFTDEP("cmac");
2011 MODULE_SOFTDEP("sha256");
2012 MODULE_SOFTDEP("sha512");
2013 MODULE_SOFTDEP("aead2");
2014 MODULE_SOFTDEP("ccm");
2015 MODULE_SOFTDEP("gcm");
2016 module_init(init_cifs)
2017 module_exit(exit_cifs)
2018