xref: /openbmc/linux/fs/smb/client/cifsfs.c (revision 47297322)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51 
52 /*
53  * DOS dates from 1980/1/1 through 2107/12/31
54  * Protocol specifications indicate the range should be to 119, which
55  * limits maximum year to 2099. But this range has not been checked.
56  */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60 
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 spinlock_t		cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 				   "before logging that a response is delayed. "
129 				   "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131 
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134 
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
137 
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140 
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143 
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 				  "helpful to restrict the ability to "
147 				  "override the default dialects (SMB2.1, "
148 				  "SMB3 and SMB3.02) on mount with old "
149 				  "dialects (CIFS/SMB1 and SMB2) since "
150 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 				  " and less secure. Default: n/N/0");
152 
153 struct workqueue_struct	*cifsiod_wq;
154 struct workqueue_struct	*decrypt_wq;
155 struct workqueue_struct	*fileinfo_put_wq;
156 struct workqueue_struct	*cifsoplockd_wq;
157 struct workqueue_struct	*deferredclose_wq;
158 struct workqueue_struct	*serverclose_wq;
159 __u32 cifs_lock_secret;
160 
161 /*
162  * Bumps refcount for cifs super block.
163  * Note that it should be only called if a referece to VFS super block is
164  * already held, e.g. in open-type syscalls context. Otherwise it can race with
165  * atomic_dec_and_test in deactivate_locked_super.
166  */
167 void
cifs_sb_active(struct super_block * sb)168 cifs_sb_active(struct super_block *sb)
169 {
170 	struct cifs_sb_info *server = CIFS_SB(sb);
171 
172 	if (atomic_inc_return(&server->active) == 1)
173 		atomic_inc(&sb->s_active);
174 }
175 
176 void
cifs_sb_deactive(struct super_block * sb)177 cifs_sb_deactive(struct super_block *sb)
178 {
179 	struct cifs_sb_info *server = CIFS_SB(sb);
180 
181 	if (atomic_dec_and_test(&server->active))
182 		deactivate_super(sb);
183 }
184 
185 static int
cifs_read_super(struct super_block * sb)186 cifs_read_super(struct super_block *sb)
187 {
188 	struct inode *inode;
189 	struct cifs_sb_info *cifs_sb;
190 	struct cifs_tcon *tcon;
191 	struct timespec64 ts;
192 	int rc = 0;
193 
194 	cifs_sb = CIFS_SB(sb);
195 	tcon = cifs_sb_master_tcon(cifs_sb);
196 
197 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
198 		sb->s_flags |= SB_POSIXACL;
199 
200 	if (tcon->snapshot_time)
201 		sb->s_flags |= SB_RDONLY;
202 
203 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
204 		sb->s_maxbytes = MAX_LFS_FILESIZE;
205 	else
206 		sb->s_maxbytes = MAX_NON_LFS;
207 
208 	/*
209 	 * Some very old servers like DOS and OS/2 used 2 second granularity
210 	 * (while all current servers use 100ns granularity - see MS-DTYP)
211 	 * but 1 second is the maximum allowed granularity for the VFS
212 	 * so for old servers set time granularity to 1 second while for
213 	 * everything else (current servers) set it to 100ns.
214 	 */
215 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
216 	    ((tcon->ses->capabilities &
217 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
218 	    !tcon->unix_ext) {
219 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
220 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
221 		sb->s_time_min = ts.tv_sec;
222 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
223 				    cpu_to_le16(SMB_TIME_MAX), 0);
224 		sb->s_time_max = ts.tv_sec;
225 	} else {
226 		/*
227 		 * Almost every server, including all SMB2+, uses DCE TIME
228 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
229 		 */
230 		sb->s_time_gran = 100;
231 		ts = cifs_NTtimeToUnix(0);
232 		sb->s_time_min = ts.tv_sec;
233 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
234 		sb->s_time_max = ts.tv_sec;
235 	}
236 
237 	sb->s_magic = CIFS_SUPER_MAGIC;
238 	sb->s_op = &cifs_super_ops;
239 	sb->s_xattr = cifs_xattr_handlers;
240 	rc = super_setup_bdi(sb);
241 	if (rc)
242 		goto out_no_root;
243 	/* tune readahead according to rsize if readahead size not set on mount */
244 	if (cifs_sb->ctx->rsize == 0)
245 		cifs_sb->ctx->rsize =
246 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
247 	if (cifs_sb->ctx->rasize)
248 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
249 	else
250 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
251 
252 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
253 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
254 	inode = cifs_root_iget(sb);
255 
256 	if (IS_ERR(inode)) {
257 		rc = PTR_ERR(inode);
258 		goto out_no_root;
259 	}
260 
261 	if (tcon->nocase)
262 		sb->s_d_op = &cifs_ci_dentry_ops;
263 	else
264 		sb->s_d_op = &cifs_dentry_ops;
265 
266 	sb->s_root = d_make_root(inode);
267 	if (!sb->s_root) {
268 		rc = -ENOMEM;
269 		goto out_no_root;
270 	}
271 
272 #ifdef CONFIG_CIFS_NFSD_EXPORT
273 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
274 		cifs_dbg(FYI, "export ops supported\n");
275 		sb->s_export_op = &cifs_export_ops;
276 	}
277 #endif /* CONFIG_CIFS_NFSD_EXPORT */
278 
279 	return 0;
280 
281 out_no_root:
282 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
283 	return rc;
284 }
285 
cifs_kill_sb(struct super_block * sb)286 static void cifs_kill_sb(struct super_block *sb)
287 {
288 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
289 
290 	/*
291 	 * We ned to release all dentries for the cached directories
292 	 * before we kill the sb.
293 	 */
294 	if (cifs_sb->root) {
295 		close_all_cached_dirs(cifs_sb);
296 
297 		/* finally release root dentry */
298 		dput(cifs_sb->root);
299 		cifs_sb->root = NULL;
300 	}
301 
302 	kill_anon_super(sb);
303 	cifs_umount(cifs_sb);
304 }
305 
306 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)307 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
308 {
309 	struct super_block *sb = dentry->d_sb;
310 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
311 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
312 	struct TCP_Server_Info *server = tcon->ses->server;
313 	unsigned int xid;
314 	int rc = 0;
315 	const char *full_path;
316 	void *page;
317 
318 	xid = get_xid();
319 	page = alloc_dentry_path();
320 
321 	full_path = build_path_from_dentry(dentry, page);
322 	if (IS_ERR(full_path)) {
323 		rc = PTR_ERR(full_path);
324 		goto statfs_out;
325 	}
326 
327 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
328 		buf->f_namelen =
329 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
330 	else
331 		buf->f_namelen = PATH_MAX;
332 
333 	buf->f_fsid.val[0] = tcon->vol_serial_number;
334 	/* are using part of create time for more randomness, see man statfs */
335 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
336 
337 	buf->f_files = 0;	/* undefined */
338 	buf->f_ffree = 0;	/* unlimited */
339 
340 	if (server->ops->queryfs)
341 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
342 
343 statfs_out:
344 	free_dentry_path(page);
345 	free_xid(xid);
346 	return rc;
347 }
348 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)349 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
350 {
351 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
352 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
353 	struct TCP_Server_Info *server = tcon->ses->server;
354 
355 	if (server->ops->fallocate)
356 		return server->ops->fallocate(file, tcon, mode, off, len);
357 
358 	return -EOPNOTSUPP;
359 }
360 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)361 static int cifs_permission(struct mnt_idmap *idmap,
362 			   struct inode *inode, int mask)
363 {
364 	struct cifs_sb_info *cifs_sb;
365 
366 	cifs_sb = CIFS_SB(inode->i_sb);
367 
368 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
369 		if ((mask & MAY_EXEC) && !execute_ok(inode))
370 			return -EACCES;
371 		else
372 			return 0;
373 	} else /* file mode might have been restricted at mount time
374 		on the client (above and beyond ACL on servers) for
375 		servers which do not support setting and viewing mode bits,
376 		so allowing client to check permissions is useful */
377 		return generic_permission(&nop_mnt_idmap, inode, mask);
378 }
379 
380 static struct kmem_cache *cifs_inode_cachep;
381 static struct kmem_cache *cifs_req_cachep;
382 static struct kmem_cache *cifs_mid_cachep;
383 static struct kmem_cache *cifs_sm_req_cachep;
384 mempool_t *cifs_sm_req_poolp;
385 mempool_t *cifs_req_poolp;
386 mempool_t *cifs_mid_poolp;
387 
388 static struct inode *
cifs_alloc_inode(struct super_block * sb)389 cifs_alloc_inode(struct super_block *sb)
390 {
391 	struct cifsInodeInfo *cifs_inode;
392 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
393 	if (!cifs_inode)
394 		return NULL;
395 	cifs_inode->cifsAttrs = 0x20;	/* default */
396 	cifs_inode->time = 0;
397 	/*
398 	 * Until the file is open and we have gotten oplock info back from the
399 	 * server, can not assume caching of file data or metadata.
400 	 */
401 	cifs_set_oplock_level(cifs_inode, 0);
402 	cifs_inode->lease_granted = false;
403 	cifs_inode->flags = 0;
404 	spin_lock_init(&cifs_inode->writers_lock);
405 	cifs_inode->writers = 0;
406 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
407 	cifs_inode->server_eof = 0;
408 	cifs_inode->uniqueid = 0;
409 	cifs_inode->createtime = 0;
410 	cifs_inode->epoch = 0;
411 	spin_lock_init(&cifs_inode->open_file_lock);
412 	generate_random_uuid(cifs_inode->lease_key);
413 	cifs_inode->symlink_target = NULL;
414 
415 	/*
416 	 * Can not set i_flags here - they get immediately overwritten to zero
417 	 * by the VFS.
418 	 */
419 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
420 	INIT_LIST_HEAD(&cifs_inode->openFileList);
421 	INIT_LIST_HEAD(&cifs_inode->llist);
422 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
423 	spin_lock_init(&cifs_inode->deferred_lock);
424 	return &cifs_inode->netfs.inode;
425 }
426 
427 static void
cifs_free_inode(struct inode * inode)428 cifs_free_inode(struct inode *inode)
429 {
430 	struct cifsInodeInfo *cinode = CIFS_I(inode);
431 
432 	if (S_ISLNK(inode->i_mode))
433 		kfree(cinode->symlink_target);
434 	kmem_cache_free(cifs_inode_cachep, cinode);
435 }
436 
437 static void
cifs_evict_inode(struct inode * inode)438 cifs_evict_inode(struct inode *inode)
439 {
440 	truncate_inode_pages_final(&inode->i_data);
441 	if (inode->i_state & I_PINNING_FSCACHE_WB)
442 		cifs_fscache_unuse_inode_cookie(inode, true);
443 	cifs_fscache_release_inode_cookie(inode);
444 	clear_inode(inode);
445 }
446 
447 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)448 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
449 {
450 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
451 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
452 
453 	seq_puts(s, ",addr=");
454 
455 	switch (server->dstaddr.ss_family) {
456 	case AF_INET:
457 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
458 		break;
459 	case AF_INET6:
460 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
461 		if (sa6->sin6_scope_id)
462 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
463 		break;
464 	default:
465 		seq_puts(s, "(unknown)");
466 	}
467 	if (server->rdma)
468 		seq_puts(s, ",rdma");
469 }
470 
471 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)472 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
473 {
474 	if (ses->sectype == Unspecified) {
475 		if (ses->user_name == NULL)
476 			seq_puts(s, ",sec=none");
477 		return;
478 	}
479 
480 	seq_puts(s, ",sec=");
481 
482 	switch (ses->sectype) {
483 	case NTLMv2:
484 		seq_puts(s, "ntlmv2");
485 		break;
486 	case Kerberos:
487 		seq_puts(s, "krb5");
488 		break;
489 	case RawNTLMSSP:
490 		seq_puts(s, "ntlmssp");
491 		break;
492 	default:
493 		/* shouldn't ever happen */
494 		seq_puts(s, "unknown");
495 		break;
496 	}
497 
498 	if (ses->sign)
499 		seq_puts(s, "i");
500 
501 	if (ses->sectype == Kerberos)
502 		seq_printf(s, ",cruid=%u",
503 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
504 }
505 
506 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)507 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
508 {
509 	seq_puts(s, ",cache=");
510 
511 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
512 		seq_puts(s, "strict");
513 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
514 		seq_puts(s, "none");
515 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
516 		seq_puts(s, "singleclient"); /* assume only one client access */
517 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
518 		seq_puts(s, "ro"); /* read only caching assumed */
519 	else
520 		seq_puts(s, "loose");
521 }
522 
523 /*
524  * cifs_show_devname() is used so we show the mount device name with correct
525  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
526  */
cifs_show_devname(struct seq_file * m,struct dentry * root)527 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
528 {
529 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
530 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
531 
532 	if (devname == NULL)
533 		seq_puts(m, "none");
534 	else {
535 		convert_delimiter(devname, '/');
536 		/* escape all spaces in share names */
537 		seq_escape(m, devname, " \t");
538 		kfree(devname);
539 	}
540 	return 0;
541 }
542 
543 /*
544  * cifs_show_options() is for displaying mount options in /proc/mounts.
545  * Not all settable options are displayed but most of the important
546  * ones are.
547  */
548 static int
cifs_show_options(struct seq_file * s,struct dentry * root)549 cifs_show_options(struct seq_file *s, struct dentry *root)
550 {
551 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
552 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
553 	struct sockaddr *srcaddr;
554 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
555 
556 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
557 	cifs_show_security(s, tcon->ses);
558 	cifs_show_cache_flavor(s, cifs_sb);
559 
560 	if (tcon->no_lease)
561 		seq_puts(s, ",nolease");
562 	if (cifs_sb->ctx->multiuser)
563 		seq_puts(s, ",multiuser");
564 	else if (tcon->ses->user_name)
565 		seq_show_option(s, "username", tcon->ses->user_name);
566 
567 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
568 		seq_show_option(s, "domain", tcon->ses->domainName);
569 
570 	if (srcaddr->sa_family != AF_UNSPEC) {
571 		struct sockaddr_in *saddr4;
572 		struct sockaddr_in6 *saddr6;
573 		saddr4 = (struct sockaddr_in *)srcaddr;
574 		saddr6 = (struct sockaddr_in6 *)srcaddr;
575 		if (srcaddr->sa_family == AF_INET6)
576 			seq_printf(s, ",srcaddr=%pI6c",
577 				   &saddr6->sin6_addr);
578 		else if (srcaddr->sa_family == AF_INET)
579 			seq_printf(s, ",srcaddr=%pI4",
580 				   &saddr4->sin_addr.s_addr);
581 		else
582 			seq_printf(s, ",srcaddr=BAD-AF:%i",
583 				   (int)(srcaddr->sa_family));
584 	}
585 
586 	seq_printf(s, ",uid=%u",
587 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
588 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
589 		seq_puts(s, ",forceuid");
590 	else
591 		seq_puts(s, ",noforceuid");
592 
593 	seq_printf(s, ",gid=%u",
594 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
595 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
596 		seq_puts(s, ",forcegid");
597 	else
598 		seq_puts(s, ",noforcegid");
599 
600 	cifs_show_address(s, tcon->ses->server);
601 
602 	if (!tcon->unix_ext)
603 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
604 					   cifs_sb->ctx->file_mode,
605 					   cifs_sb->ctx->dir_mode);
606 	if (cifs_sb->ctx->iocharset)
607 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
608 	if (tcon->seal)
609 		seq_puts(s, ",seal");
610 	else if (tcon->ses->server->ignore_signature)
611 		seq_puts(s, ",signloosely");
612 	if (tcon->nocase)
613 		seq_puts(s, ",nocase");
614 	if (tcon->nodelete)
615 		seq_puts(s, ",nodelete");
616 	if (cifs_sb->ctx->no_sparse)
617 		seq_puts(s, ",nosparse");
618 	if (tcon->local_lease)
619 		seq_puts(s, ",locallease");
620 	if (tcon->retry)
621 		seq_puts(s, ",hard");
622 	else
623 		seq_puts(s, ",soft");
624 	if (tcon->use_persistent)
625 		seq_puts(s, ",persistenthandles");
626 	else if (tcon->use_resilient)
627 		seq_puts(s, ",resilienthandles");
628 	if (tcon->posix_extensions)
629 		seq_puts(s, ",posix");
630 	else if (tcon->unix_ext)
631 		seq_puts(s, ",unix");
632 	else
633 		seq_puts(s, ",nounix");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
635 		seq_puts(s, ",nodfs");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
637 		seq_puts(s, ",posixpaths");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
639 		seq_puts(s, ",setuids");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
641 		seq_puts(s, ",idsfromsid");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
643 		seq_puts(s, ",serverino");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
645 		seq_puts(s, ",rwpidforward");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
647 		seq_puts(s, ",forcemand");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
649 		seq_puts(s, ",nouser_xattr");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
651 		seq_puts(s, ",mapchars");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
653 		seq_puts(s, ",mapposix");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
655 		seq_puts(s, ",sfu");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
657 		seq_puts(s, ",nobrl");
658 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
659 		seq_puts(s, ",nohandlecache");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
661 		seq_puts(s, ",modefromsid");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
663 		seq_puts(s, ",cifsacl");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
665 		seq_puts(s, ",dynperm");
666 	if (root->d_sb->s_flags & SB_POSIXACL)
667 		seq_puts(s, ",acl");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
669 		seq_puts(s, ",mfsymlinks");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
671 		seq_puts(s, ",fsc");
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
673 		seq_puts(s, ",nostrictsync");
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
675 		seq_puts(s, ",noperm");
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
677 		seq_printf(s, ",backupuid=%u",
678 			   from_kuid_munged(&init_user_ns,
679 					    cifs_sb->ctx->backupuid));
680 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
681 		seq_printf(s, ",backupgid=%u",
682 			   from_kgid_munged(&init_user_ns,
683 					    cifs_sb->ctx->backupgid));
684 	seq_show_option(s, "reparse",
685 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
686 
687 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
688 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
689 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
690 	if (cifs_sb->ctx->rasize)
691 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
692 	if (tcon->ses->server->min_offload)
693 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
694 	if (tcon->ses->server->retrans)
695 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
696 	seq_printf(s, ",echo_interval=%lu",
697 			tcon->ses->server->echo_interval / HZ);
698 
699 	/* Only display the following if overridden on mount */
700 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
701 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
702 	if (tcon->ses->server->tcp_nodelay)
703 		seq_puts(s, ",tcpnodelay");
704 	if (tcon->ses->server->noautotune)
705 		seq_puts(s, ",noautotune");
706 	if (tcon->ses->server->noblocksnd)
707 		seq_puts(s, ",noblocksend");
708 	if (tcon->ses->server->nosharesock)
709 		seq_puts(s, ",nosharesock");
710 
711 	if (tcon->snapshot_time)
712 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
713 	if (tcon->handle_timeout)
714 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
715 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
716 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
717 
718 	/*
719 	 * Display file and directory attribute timeout in seconds.
720 	 * If file and directory attribute timeout the same then actimeo
721 	 * was likely specified on mount
722 	 */
723 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
724 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
725 	else {
726 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
727 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
728 	}
729 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
730 
731 	if (tcon->ses->chan_max > 1)
732 		seq_printf(s, ",multichannel,max_channels=%zu",
733 			   tcon->ses->chan_max);
734 
735 	if (tcon->use_witness)
736 		seq_puts(s, ",witness");
737 
738 	return 0;
739 }
740 
cifs_umount_begin(struct super_block * sb)741 static void cifs_umount_begin(struct super_block *sb)
742 {
743 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
744 	struct cifs_tcon *tcon;
745 
746 	if (cifs_sb == NULL)
747 		return;
748 
749 	tcon = cifs_sb_master_tcon(cifs_sb);
750 
751 	spin_lock(&cifs_tcp_ses_lock);
752 	spin_lock(&tcon->tc_lock);
753 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
754 			    netfs_trace_tcon_ref_see_umount);
755 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
756 		/* we have other mounts to same share or we have
757 		   already tried to umount this and woken up
758 		   all waiting network requests, nothing to do */
759 		spin_unlock(&tcon->tc_lock);
760 		spin_unlock(&cifs_tcp_ses_lock);
761 		return;
762 	}
763 	/*
764 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
765 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
766 	 */
767 	spin_unlock(&tcon->tc_lock);
768 	spin_unlock(&cifs_tcp_ses_lock);
769 
770 	cifs_close_all_deferred_files(tcon);
771 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
772 	/* cancel_notify_requests(tcon); */
773 	if (tcon->ses && tcon->ses->server) {
774 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
775 		wake_up_all(&tcon->ses->server->request_q);
776 		wake_up_all(&tcon->ses->server->response_q);
777 		msleep(1); /* yield */
778 		/* we have to kick the requests once more */
779 		wake_up_all(&tcon->ses->server->response_q);
780 		msleep(1);
781 	}
782 
783 	return;
784 }
785 
cifs_freeze(struct super_block * sb)786 static int cifs_freeze(struct super_block *sb)
787 {
788 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
789 	struct cifs_tcon *tcon;
790 
791 	if (cifs_sb == NULL)
792 		return 0;
793 
794 	tcon = cifs_sb_master_tcon(cifs_sb);
795 
796 	cifs_close_all_deferred_files(tcon);
797 	return 0;
798 }
799 
800 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)801 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
802 {
803 	/* BB FIXME */
804 	return 0;
805 }
806 #endif
807 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)808 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
809 {
810 	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
811 	return 0;
812 }
813 
cifs_drop_inode(struct inode * inode)814 static int cifs_drop_inode(struct inode *inode)
815 {
816 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
817 
818 	/* no serverino => unconditional eviction */
819 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
820 		generic_drop_inode(inode);
821 }
822 
823 static const struct super_operations cifs_super_ops = {
824 	.statfs = cifs_statfs,
825 	.alloc_inode = cifs_alloc_inode,
826 	.write_inode	= cifs_write_inode,
827 	.free_inode = cifs_free_inode,
828 	.drop_inode	= cifs_drop_inode,
829 	.evict_inode	= cifs_evict_inode,
830 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
831 	.show_devname   = cifs_show_devname,
832 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
833 	function unless later we add lazy close of inodes or unless the
834 	kernel forgets to call us with the same number of releases (closes)
835 	as opens */
836 	.show_options = cifs_show_options,
837 	.umount_begin   = cifs_umount_begin,
838 	.freeze_fs      = cifs_freeze,
839 #ifdef CONFIG_CIFS_STATS2
840 	.show_stats = cifs_show_stats,
841 #endif
842 };
843 
844 /*
845  * Get root dentry from superblock according to prefix path mount option.
846  * Return dentry with refcount + 1 on success and NULL otherwise.
847  */
848 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)849 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
850 {
851 	struct dentry *dentry;
852 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
853 	char *full_path = NULL;
854 	char *s, *p;
855 	char sep;
856 
857 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
858 		return dget(sb->s_root);
859 
860 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
861 				cifs_sb_master_tcon(cifs_sb), 0);
862 	if (full_path == NULL)
863 		return ERR_PTR(-ENOMEM);
864 
865 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
866 
867 	sep = CIFS_DIR_SEP(cifs_sb);
868 	dentry = dget(sb->s_root);
869 	s = full_path;
870 
871 	do {
872 		struct inode *dir = d_inode(dentry);
873 		struct dentry *child;
874 
875 		if (!S_ISDIR(dir->i_mode)) {
876 			dput(dentry);
877 			dentry = ERR_PTR(-ENOTDIR);
878 			break;
879 		}
880 
881 		/* skip separators */
882 		while (*s == sep)
883 			s++;
884 		if (!*s)
885 			break;
886 		p = s++;
887 		/* next separator */
888 		while (*s && *s != sep)
889 			s++;
890 
891 		child = lookup_positive_unlocked(p, dentry, s - p);
892 		dput(dentry);
893 		dentry = child;
894 	} while (!IS_ERR(dentry));
895 	kfree(full_path);
896 	return dentry;
897 }
898 
cifs_set_super(struct super_block * sb,void * data)899 static int cifs_set_super(struct super_block *sb, void *data)
900 {
901 	struct cifs_mnt_data *mnt_data = data;
902 	sb->s_fs_info = mnt_data->cifs_sb;
903 	return set_anon_super(sb, NULL);
904 }
905 
906 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)907 cifs_smb3_do_mount(struct file_system_type *fs_type,
908 	      int flags, struct smb3_fs_context *old_ctx)
909 {
910 	struct cifs_mnt_data mnt_data;
911 	struct cifs_sb_info *cifs_sb;
912 	struct super_block *sb;
913 	struct dentry *root;
914 	int rc;
915 
916 	if (cifsFYI) {
917 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
918 			 old_ctx->source, flags);
919 	} else {
920 		cifs_info("Attempting to mount %s\n", old_ctx->source);
921 	}
922 
923 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
924 	if (!cifs_sb)
925 		return ERR_PTR(-ENOMEM);
926 
927 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
928 	if (!cifs_sb->ctx) {
929 		root = ERR_PTR(-ENOMEM);
930 		goto out;
931 	}
932 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
933 	if (rc) {
934 		root = ERR_PTR(rc);
935 		goto out;
936 	}
937 
938 	rc = cifs_setup_cifs_sb(cifs_sb);
939 	if (rc) {
940 		root = ERR_PTR(rc);
941 		goto out;
942 	}
943 
944 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
945 	if (rc) {
946 		if (!(flags & SB_SILENT))
947 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
948 				 rc);
949 		root = ERR_PTR(rc);
950 		goto out;
951 	}
952 
953 	mnt_data.ctx = cifs_sb->ctx;
954 	mnt_data.cifs_sb = cifs_sb;
955 	mnt_data.flags = flags;
956 
957 	/* BB should we make this contingent on mount parm? */
958 	flags |= SB_NODIRATIME | SB_NOATIME;
959 
960 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
961 	if (IS_ERR(sb)) {
962 		cifs_umount(cifs_sb);
963 		return ERR_CAST(sb);
964 	}
965 
966 	if (sb->s_root) {
967 		cifs_dbg(FYI, "Use existing superblock\n");
968 		cifs_umount(cifs_sb);
969 		cifs_sb = NULL;
970 	} else {
971 		rc = cifs_read_super(sb);
972 		if (rc) {
973 			root = ERR_PTR(rc);
974 			goto out_super;
975 		}
976 
977 		sb->s_flags |= SB_ACTIVE;
978 	}
979 
980 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
981 	if (IS_ERR(root))
982 		goto out_super;
983 
984 	if (cifs_sb)
985 		cifs_sb->root = dget(root);
986 
987 	cifs_dbg(FYI, "dentry root is: %p\n", root);
988 	return root;
989 
990 out_super:
991 	deactivate_locked_super(sb);
992 	return root;
993 out:
994 	kfree(cifs_sb->prepath);
995 	smb3_cleanup_fs_context(cifs_sb->ctx);
996 	kfree(cifs_sb);
997 	return root;
998 }
999 
1000 
1001 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)1002 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1003 {
1004 	ssize_t rc;
1005 	struct inode *inode = file_inode(iocb->ki_filp);
1006 
1007 	if (iocb->ki_flags & IOCB_DIRECT)
1008 		return cifs_user_readv(iocb, iter);
1009 
1010 	rc = cifs_revalidate_mapping(inode);
1011 	if (rc)
1012 		return rc;
1013 
1014 	return generic_file_read_iter(iocb, iter);
1015 }
1016 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1017 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1018 {
1019 	struct inode *inode = file_inode(iocb->ki_filp);
1020 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1021 	ssize_t written;
1022 	int rc;
1023 
1024 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1025 		written = cifs_user_writev(iocb, from);
1026 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1027 			cifs_zap_mapping(inode);
1028 			cifs_dbg(FYI,
1029 				 "Set no oplock for inode=%p after a write operation\n",
1030 				 inode);
1031 			cinode->oplock = 0;
1032 		}
1033 		return written;
1034 	}
1035 
1036 	written = cifs_get_writer(cinode);
1037 	if (written)
1038 		return written;
1039 
1040 	written = generic_file_write_iter(iocb, from);
1041 
1042 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1043 		goto out;
1044 
1045 	rc = filemap_fdatawrite(inode->i_mapping);
1046 	if (rc)
1047 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1048 			 rc, inode);
1049 
1050 out:
1051 	cifs_put_writer(cinode);
1052 	return written;
1053 }
1054 
cifs_llseek(struct file * file,loff_t offset,int whence)1055 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1056 {
1057 	struct cifsFileInfo *cfile = file->private_data;
1058 	struct cifs_tcon *tcon;
1059 
1060 	/*
1061 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1062 	 * the cached file length
1063 	 */
1064 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1065 		int rc;
1066 		struct inode *inode = file_inode(file);
1067 
1068 		/*
1069 		 * We need to be sure that all dirty pages are written and the
1070 		 * server has the newest file length.
1071 		 */
1072 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1073 		    inode->i_mapping->nrpages != 0) {
1074 			rc = filemap_fdatawait(inode->i_mapping);
1075 			if (rc) {
1076 				mapping_set_error(inode->i_mapping, rc);
1077 				return rc;
1078 			}
1079 		}
1080 		/*
1081 		 * Some applications poll for the file length in this strange
1082 		 * way so we must seek to end on non-oplocked files by
1083 		 * setting the revalidate time to zero.
1084 		 */
1085 		CIFS_I(inode)->time = 0;
1086 
1087 		rc = cifs_revalidate_file_attr(file);
1088 		if (rc < 0)
1089 			return (loff_t)rc;
1090 	}
1091 	if (cfile && cfile->tlink) {
1092 		tcon = tlink_tcon(cfile->tlink);
1093 		if (tcon->ses->server->ops->llseek)
1094 			return tcon->ses->server->ops->llseek(file, tcon,
1095 							      offset, whence);
1096 	}
1097 	return generic_file_llseek(file, offset, whence);
1098 }
1099 
1100 static int
cifs_setlease(struct file * file,int arg,struct file_lock ** lease,void ** priv)1101 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1102 {
1103 	/*
1104 	 * Note that this is called by vfs setlease with i_lock held to
1105 	 * protect *lease from going away.
1106 	 */
1107 	struct inode *inode = file_inode(file);
1108 	struct cifsFileInfo *cfile = file->private_data;
1109 
1110 	if (!(S_ISREG(inode->i_mode)))
1111 		return -EINVAL;
1112 
1113 	/* Check if file is oplocked if this is request for new lease */
1114 	if (arg == F_UNLCK ||
1115 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1116 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1117 		return generic_setlease(file, arg, lease, priv);
1118 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1119 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1120 		/*
1121 		 * If the server claims to support oplock on this file, then we
1122 		 * still need to check oplock even if the local_lease mount
1123 		 * option is set, but there are servers which do not support
1124 		 * oplock for which this mount option may be useful if the user
1125 		 * knows that the file won't be changed on the server by anyone
1126 		 * else.
1127 		 */
1128 		return generic_setlease(file, arg, lease, priv);
1129 	else
1130 		return -EAGAIN;
1131 }
1132 
1133 struct file_system_type cifs_fs_type = {
1134 	.owner = THIS_MODULE,
1135 	.name = "cifs",
1136 	.init_fs_context = smb3_init_fs_context,
1137 	.parameters = smb3_fs_parameters,
1138 	.kill_sb = cifs_kill_sb,
1139 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1140 };
1141 MODULE_ALIAS_FS("cifs");
1142 
1143 struct file_system_type smb3_fs_type = {
1144 	.owner = THIS_MODULE,
1145 	.name = "smb3",
1146 	.init_fs_context = smb3_init_fs_context,
1147 	.parameters = smb3_fs_parameters,
1148 	.kill_sb = cifs_kill_sb,
1149 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1150 };
1151 MODULE_ALIAS_FS("smb3");
1152 MODULE_ALIAS("smb3");
1153 
1154 const struct inode_operations cifs_dir_inode_ops = {
1155 	.create = cifs_create,
1156 	.atomic_open = cifs_atomic_open,
1157 	.lookup = cifs_lookup,
1158 	.getattr = cifs_getattr,
1159 	.unlink = cifs_unlink,
1160 	.link = cifs_hardlink,
1161 	.mkdir = cifs_mkdir,
1162 	.rmdir = cifs_rmdir,
1163 	.rename = cifs_rename2,
1164 	.permission = cifs_permission,
1165 	.setattr = cifs_setattr,
1166 	.symlink = cifs_symlink,
1167 	.mknod   = cifs_mknod,
1168 	.listxattr = cifs_listxattr,
1169 	.get_acl = cifs_get_acl,
1170 	.set_acl = cifs_set_acl,
1171 };
1172 
1173 const struct inode_operations cifs_file_inode_ops = {
1174 	.setattr = cifs_setattr,
1175 	.getattr = cifs_getattr,
1176 	.permission = cifs_permission,
1177 	.listxattr = cifs_listxattr,
1178 	.fiemap = cifs_fiemap,
1179 	.get_acl = cifs_get_acl,
1180 	.set_acl = cifs_set_acl,
1181 };
1182 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1183 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1184 			    struct delayed_call *done)
1185 {
1186 	char *target_path;
1187 
1188 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1189 	if (!target_path)
1190 		return ERR_PTR(-ENOMEM);
1191 
1192 	spin_lock(&inode->i_lock);
1193 	if (likely(CIFS_I(inode)->symlink_target)) {
1194 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1195 	} else {
1196 		kfree(target_path);
1197 		target_path = ERR_PTR(-EOPNOTSUPP);
1198 	}
1199 	spin_unlock(&inode->i_lock);
1200 
1201 	if (!IS_ERR(target_path))
1202 		set_delayed_call(done, kfree_link, target_path);
1203 
1204 	return target_path;
1205 }
1206 
1207 const struct inode_operations cifs_symlink_inode_ops = {
1208 	.get_link = cifs_get_link,
1209 	.setattr = cifs_setattr,
1210 	.permission = cifs_permission,
1211 	.listxattr = cifs_listxattr,
1212 };
1213 
1214 /*
1215  * Advance the EOF marker to after the source range.
1216  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1217 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1218 				struct cifs_tcon *src_tcon,
1219 				unsigned int xid, loff_t src_end)
1220 {
1221 	struct cifsFileInfo *writeable_srcfile;
1222 	int rc = -EINVAL;
1223 
1224 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1225 	if (writeable_srcfile) {
1226 		if (src_tcon->ses->server->ops->set_file_size)
1227 			rc = src_tcon->ses->server->ops->set_file_size(
1228 				xid, src_tcon, writeable_srcfile,
1229 				src_inode->i_size, true /* no need to set sparse */);
1230 		else
1231 			rc = -ENOSYS;
1232 		cifsFileInfo_put(writeable_srcfile);
1233 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1234 	}
1235 
1236 	if (rc < 0)
1237 		goto set_failed;
1238 
1239 	netfs_resize_file(&src_cifsi->netfs, src_end);
1240 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1241 	return 0;
1242 
1243 set_failed:
1244 	return filemap_write_and_wait(src_inode->i_mapping);
1245 }
1246 
1247 /*
1248  * Flush out either the folio that overlaps the beginning of a range in which
1249  * pos resides or the folio that overlaps the end of a range unless that folio
1250  * is entirely within the range we're going to invalidate.  We extend the flush
1251  * bounds to encompass the folio.
1252  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1253 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1254 			    bool first)
1255 {
1256 	struct folio *folio;
1257 	unsigned long long fpos, fend;
1258 	pgoff_t index = pos / PAGE_SIZE;
1259 	size_t size;
1260 	int rc = 0;
1261 
1262 	folio = filemap_get_folio(inode->i_mapping, index);
1263 	if (IS_ERR(folio))
1264 		return 0;
1265 
1266 	size = folio_size(folio);
1267 	fpos = folio_pos(folio);
1268 	fend = fpos + size - 1;
1269 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1270 	*_fend   = max_t(unsigned long long, *_fend, fend);
1271 	if ((first && pos == fpos) || (!first && pos == fend))
1272 		goto out;
1273 
1274 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1275 out:
1276 	folio_put(folio);
1277 	return rc;
1278 }
1279 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1280 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1281 		struct file *dst_file, loff_t destoff, loff_t len,
1282 		unsigned int remap_flags)
1283 {
1284 	struct inode *src_inode = file_inode(src_file);
1285 	struct inode *target_inode = file_inode(dst_file);
1286 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1287 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1288 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1289 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1290 	struct cifs_tcon *target_tcon, *src_tcon;
1291 	unsigned long long destend, fstart, fend, new_size;
1292 	unsigned int xid;
1293 	int rc;
1294 
1295 	if (remap_flags & REMAP_FILE_DEDUP)
1296 		return -EOPNOTSUPP;
1297 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1298 		return -EINVAL;
1299 
1300 	cifs_dbg(FYI, "clone range\n");
1301 
1302 	xid = get_xid();
1303 
1304 	if (!smb_file_src || !smb_file_target) {
1305 		rc = -EBADF;
1306 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1307 		goto out;
1308 	}
1309 
1310 	src_tcon = tlink_tcon(smb_file_src->tlink);
1311 	target_tcon = tlink_tcon(smb_file_target->tlink);
1312 
1313 	/*
1314 	 * Note: cifs case is easier than btrfs since server responsible for
1315 	 * checks for proper open modes and file type and if it wants
1316 	 * server could even support copy of range where source = target
1317 	 */
1318 	lock_two_nondirectories(target_inode, src_inode);
1319 
1320 	if (len == 0)
1321 		len = src_inode->i_size - off;
1322 
1323 	cifs_dbg(FYI, "clone range\n");
1324 
1325 	/* Flush the source buffer */
1326 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1327 					  off + len - 1);
1328 	if (rc)
1329 		goto unlock;
1330 
1331 	/* The server-side copy will fail if the source crosses the EOF marker.
1332 	 * Advance the EOF marker after the flush above to the end of the range
1333 	 * if it's short of that.
1334 	 */
1335 	if (src_cifsi->netfs.remote_i_size < off + len) {
1336 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1337 		if (rc < 0)
1338 			goto unlock;
1339 	}
1340 
1341 	new_size = destoff + len;
1342 	destend = destoff + len - 1;
1343 
1344 	/* Flush the folios at either end of the destination range to prevent
1345 	 * accidental loss of dirty data outside of the range.
1346 	 */
1347 	fstart = destoff;
1348 	fend = destend;
1349 
1350 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1351 	if (rc)
1352 		goto unlock;
1353 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1354 	if (rc)
1355 		goto unlock;
1356 
1357 	/* Discard all the folios that overlap the destination region. */
1358 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1359 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1360 
1361 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1362 			   i_size_read(target_inode), 0);
1363 
1364 	rc = -EOPNOTSUPP;
1365 	if (target_tcon->ses->server->ops->duplicate_extents) {
1366 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1367 			smb_file_src, smb_file_target, off, len, destoff);
1368 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1369 			truncate_setsize(target_inode, new_size);
1370 			netfs_resize_file(&target_cifsi->netfs, new_size);
1371 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1372 					      new_size);
1373 		}
1374 	}
1375 
1376 	/* force revalidate of size and timestamps of target file now
1377 	   that target is updated on the server */
1378 	CIFS_I(target_inode)->time = 0;
1379 unlock:
1380 	/* although unlocking in the reverse order from locking is not
1381 	   strictly necessary here it is a little cleaner to be consistent */
1382 	unlock_two_nondirectories(src_inode, target_inode);
1383 out:
1384 	free_xid(xid);
1385 	return rc < 0 ? rc : len;
1386 }
1387 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1388 ssize_t cifs_file_copychunk_range(unsigned int xid,
1389 				struct file *src_file, loff_t off,
1390 				struct file *dst_file, loff_t destoff,
1391 				size_t len, unsigned int flags)
1392 {
1393 	struct inode *src_inode = file_inode(src_file);
1394 	struct inode *target_inode = file_inode(dst_file);
1395 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1396 	struct cifsFileInfo *smb_file_src;
1397 	struct cifsFileInfo *smb_file_target;
1398 	struct cifs_tcon *src_tcon;
1399 	struct cifs_tcon *target_tcon;
1400 	unsigned long long destend, fstart, fend;
1401 	ssize_t rc;
1402 
1403 	cifs_dbg(FYI, "copychunk range\n");
1404 
1405 	if (!src_file->private_data || !dst_file->private_data) {
1406 		rc = -EBADF;
1407 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1408 		goto out;
1409 	}
1410 
1411 	rc = -EXDEV;
1412 	smb_file_target = dst_file->private_data;
1413 	smb_file_src = src_file->private_data;
1414 	src_tcon = tlink_tcon(smb_file_src->tlink);
1415 	target_tcon = tlink_tcon(smb_file_target->tlink);
1416 
1417 	if (src_tcon->ses != target_tcon->ses) {
1418 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1419 		goto out;
1420 	}
1421 
1422 	rc = -EOPNOTSUPP;
1423 	if (!target_tcon->ses->server->ops->copychunk_range)
1424 		goto out;
1425 
1426 	/*
1427 	 * Note: cifs case is easier than btrfs since server responsible for
1428 	 * checks for proper open modes and file type and if it wants
1429 	 * server could even support copy of range where source = target
1430 	 */
1431 	lock_two_nondirectories(target_inode, src_inode);
1432 
1433 	cifs_dbg(FYI, "about to flush pages\n");
1434 
1435 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1436 					  off + len - 1);
1437 	if (rc)
1438 		goto unlock;
1439 
1440 	/* The server-side copy will fail if the source crosses the EOF marker.
1441 	 * Advance the EOF marker after the flush above to the end of the range
1442 	 * if it's short of that.
1443 	 */
1444 	if (src_cifsi->server_eof < off + len) {
1445 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1446 		if (rc < 0)
1447 			goto unlock;
1448 	}
1449 
1450 	destend = destoff + len - 1;
1451 
1452 	/* Flush the folios at either end of the destination range to prevent
1453 	 * accidental loss of dirty data outside of the range.
1454 	 */
1455 	fstart = destoff;
1456 	fend = destend;
1457 
1458 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1459 	if (rc)
1460 		goto unlock;
1461 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1462 	if (rc)
1463 		goto unlock;
1464 
1465 	/* Discard all the folios that overlap the destination region. */
1466 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1467 
1468 	rc = file_modified(dst_file);
1469 	if (!rc) {
1470 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1471 			smb_file_src, smb_file_target, off, len, destoff);
1472 		if (rc > 0 && destoff + rc > i_size_read(target_inode))
1473 			truncate_setsize(target_inode, destoff + rc);
1474 	}
1475 
1476 	file_accessed(src_file);
1477 
1478 	/* force revalidate of size and timestamps of target file now
1479 	 * that target is updated on the server
1480 	 */
1481 	CIFS_I(target_inode)->time = 0;
1482 
1483 unlock:
1484 	/* although unlocking in the reverse order from locking is not
1485 	 * strictly necessary here it is a little cleaner to be consistent
1486 	 */
1487 	unlock_two_nondirectories(src_inode, target_inode);
1488 
1489 out:
1490 	return rc;
1491 }
1492 
1493 /*
1494  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1495  * is a dummy operation.
1496  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1497 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1498 {
1499 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1500 		 file, datasync);
1501 
1502 	return 0;
1503 }
1504 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1505 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1506 				struct file *dst_file, loff_t destoff,
1507 				size_t len, unsigned int flags)
1508 {
1509 	unsigned int xid = get_xid();
1510 	ssize_t rc;
1511 	struct cifsFileInfo *cfile = dst_file->private_data;
1512 
1513 	if (cfile->swapfile) {
1514 		rc = -EOPNOTSUPP;
1515 		free_xid(xid);
1516 		return rc;
1517 	}
1518 
1519 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1520 					len, flags);
1521 	free_xid(xid);
1522 
1523 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1524 		rc = generic_copy_file_range(src_file, off, dst_file,
1525 					     destoff, len, flags);
1526 	return rc;
1527 }
1528 
1529 const struct file_operations cifs_file_ops = {
1530 	.read_iter = cifs_loose_read_iter,
1531 	.write_iter = cifs_file_write_iter,
1532 	.open = cifs_open,
1533 	.release = cifs_close,
1534 	.lock = cifs_lock,
1535 	.flock = cifs_flock,
1536 	.fsync = cifs_fsync,
1537 	.flush = cifs_flush,
1538 	.mmap  = cifs_file_mmap,
1539 	.splice_read = filemap_splice_read,
1540 	.splice_write = iter_file_splice_write,
1541 	.llseek = cifs_llseek,
1542 	.unlocked_ioctl	= cifs_ioctl,
1543 	.copy_file_range = cifs_copy_file_range,
1544 	.remap_file_range = cifs_remap_file_range,
1545 	.setlease = cifs_setlease,
1546 	.fallocate = cifs_fallocate,
1547 };
1548 
1549 const struct file_operations cifs_file_strict_ops = {
1550 	.read_iter = cifs_strict_readv,
1551 	.write_iter = cifs_strict_writev,
1552 	.open = cifs_open,
1553 	.release = cifs_close,
1554 	.lock = cifs_lock,
1555 	.flock = cifs_flock,
1556 	.fsync = cifs_strict_fsync,
1557 	.flush = cifs_flush,
1558 	.mmap = cifs_file_strict_mmap,
1559 	.splice_read = filemap_splice_read,
1560 	.splice_write = iter_file_splice_write,
1561 	.llseek = cifs_llseek,
1562 	.unlocked_ioctl	= cifs_ioctl,
1563 	.copy_file_range = cifs_copy_file_range,
1564 	.remap_file_range = cifs_remap_file_range,
1565 	.setlease = cifs_setlease,
1566 	.fallocate = cifs_fallocate,
1567 };
1568 
1569 const struct file_operations cifs_file_direct_ops = {
1570 	.read_iter = cifs_direct_readv,
1571 	.write_iter = cifs_direct_writev,
1572 	.open = cifs_open,
1573 	.release = cifs_close,
1574 	.lock = cifs_lock,
1575 	.flock = cifs_flock,
1576 	.fsync = cifs_fsync,
1577 	.flush = cifs_flush,
1578 	.mmap = cifs_file_mmap,
1579 	.splice_read = copy_splice_read,
1580 	.splice_write = iter_file_splice_write,
1581 	.unlocked_ioctl  = cifs_ioctl,
1582 	.copy_file_range = cifs_copy_file_range,
1583 	.remap_file_range = cifs_remap_file_range,
1584 	.llseek = cifs_llseek,
1585 	.setlease = cifs_setlease,
1586 	.fallocate = cifs_fallocate,
1587 };
1588 
1589 const struct file_operations cifs_file_nobrl_ops = {
1590 	.read_iter = cifs_loose_read_iter,
1591 	.write_iter = cifs_file_write_iter,
1592 	.open = cifs_open,
1593 	.release = cifs_close,
1594 	.fsync = cifs_fsync,
1595 	.flush = cifs_flush,
1596 	.mmap  = cifs_file_mmap,
1597 	.splice_read = filemap_splice_read,
1598 	.splice_write = iter_file_splice_write,
1599 	.llseek = cifs_llseek,
1600 	.unlocked_ioctl	= cifs_ioctl,
1601 	.copy_file_range = cifs_copy_file_range,
1602 	.remap_file_range = cifs_remap_file_range,
1603 	.setlease = cifs_setlease,
1604 	.fallocate = cifs_fallocate,
1605 };
1606 
1607 const struct file_operations cifs_file_strict_nobrl_ops = {
1608 	.read_iter = cifs_strict_readv,
1609 	.write_iter = cifs_strict_writev,
1610 	.open = cifs_open,
1611 	.release = cifs_close,
1612 	.fsync = cifs_strict_fsync,
1613 	.flush = cifs_flush,
1614 	.mmap = cifs_file_strict_mmap,
1615 	.splice_read = filemap_splice_read,
1616 	.splice_write = iter_file_splice_write,
1617 	.llseek = cifs_llseek,
1618 	.unlocked_ioctl	= cifs_ioctl,
1619 	.copy_file_range = cifs_copy_file_range,
1620 	.remap_file_range = cifs_remap_file_range,
1621 	.setlease = cifs_setlease,
1622 	.fallocate = cifs_fallocate,
1623 };
1624 
1625 const struct file_operations cifs_file_direct_nobrl_ops = {
1626 	.read_iter = cifs_direct_readv,
1627 	.write_iter = cifs_direct_writev,
1628 	.open = cifs_open,
1629 	.release = cifs_close,
1630 	.fsync = cifs_fsync,
1631 	.flush = cifs_flush,
1632 	.mmap = cifs_file_mmap,
1633 	.splice_read = copy_splice_read,
1634 	.splice_write = iter_file_splice_write,
1635 	.unlocked_ioctl  = cifs_ioctl,
1636 	.copy_file_range = cifs_copy_file_range,
1637 	.remap_file_range = cifs_remap_file_range,
1638 	.llseek = cifs_llseek,
1639 	.setlease = cifs_setlease,
1640 	.fallocate = cifs_fallocate,
1641 };
1642 
1643 const struct file_operations cifs_dir_ops = {
1644 	.iterate_shared = cifs_readdir,
1645 	.release = cifs_closedir,
1646 	.read    = generic_read_dir,
1647 	.unlocked_ioctl  = cifs_ioctl,
1648 	.copy_file_range = cifs_copy_file_range,
1649 	.remap_file_range = cifs_remap_file_range,
1650 	.llseek = generic_file_llseek,
1651 	.fsync = cifs_dir_fsync,
1652 };
1653 
1654 static void
cifs_init_once(void * inode)1655 cifs_init_once(void *inode)
1656 {
1657 	struct cifsInodeInfo *cifsi = inode;
1658 
1659 	inode_init_once(&cifsi->netfs.inode);
1660 	init_rwsem(&cifsi->lock_sem);
1661 }
1662 
1663 static int __init
cifs_init_inodecache(void)1664 cifs_init_inodecache(void)
1665 {
1666 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1667 					      sizeof(struct cifsInodeInfo),
1668 					      0, (SLAB_RECLAIM_ACCOUNT|
1669 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1670 					      cifs_init_once);
1671 	if (cifs_inode_cachep == NULL)
1672 		return -ENOMEM;
1673 
1674 	return 0;
1675 }
1676 
1677 static void
cifs_destroy_inodecache(void)1678 cifs_destroy_inodecache(void)
1679 {
1680 	/*
1681 	 * Make sure all delayed rcu free inodes are flushed before we
1682 	 * destroy cache.
1683 	 */
1684 	rcu_barrier();
1685 	kmem_cache_destroy(cifs_inode_cachep);
1686 }
1687 
1688 static int
cifs_init_request_bufs(void)1689 cifs_init_request_bufs(void)
1690 {
1691 	/*
1692 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1693 	 * allocate some more bytes for CIFS.
1694 	 */
1695 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1696 
1697 	if (CIFSMaxBufSize < 8192) {
1698 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1699 	Unicode path name has to fit in any SMB/CIFS path based frames */
1700 		CIFSMaxBufSize = 8192;
1701 	} else if (CIFSMaxBufSize > 1024*127) {
1702 		CIFSMaxBufSize = 1024 * 127;
1703 	} else {
1704 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1705 	}
1706 /*
1707 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1708 		 CIFSMaxBufSize, CIFSMaxBufSize);
1709 */
1710 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1711 					    CIFSMaxBufSize + max_hdr_size, 0,
1712 					    SLAB_HWCACHE_ALIGN, 0,
1713 					    CIFSMaxBufSize + max_hdr_size,
1714 					    NULL);
1715 	if (cifs_req_cachep == NULL)
1716 		return -ENOMEM;
1717 
1718 	if (cifs_min_rcv < 1)
1719 		cifs_min_rcv = 1;
1720 	else if (cifs_min_rcv > 64) {
1721 		cifs_min_rcv = 64;
1722 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1723 	}
1724 
1725 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1726 						  cifs_req_cachep);
1727 
1728 	if (cifs_req_poolp == NULL) {
1729 		kmem_cache_destroy(cifs_req_cachep);
1730 		return -ENOMEM;
1731 	}
1732 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1733 	almost all handle based requests (but not write response, nor is it
1734 	sufficient for path based requests).  A smaller size would have
1735 	been more efficient (compacting multiple slab items on one 4k page)
1736 	for the case in which debug was on, but this larger size allows
1737 	more SMBs to use small buffer alloc and is still much more
1738 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1739 	alloc of large cifs buffers even when page debugging is on */
1740 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1741 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1742 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1743 	if (cifs_sm_req_cachep == NULL) {
1744 		mempool_destroy(cifs_req_poolp);
1745 		kmem_cache_destroy(cifs_req_cachep);
1746 		return -ENOMEM;
1747 	}
1748 
1749 	if (cifs_min_small < 2)
1750 		cifs_min_small = 2;
1751 	else if (cifs_min_small > 256) {
1752 		cifs_min_small = 256;
1753 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1754 	}
1755 
1756 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1757 						     cifs_sm_req_cachep);
1758 
1759 	if (cifs_sm_req_poolp == NULL) {
1760 		mempool_destroy(cifs_req_poolp);
1761 		kmem_cache_destroy(cifs_req_cachep);
1762 		kmem_cache_destroy(cifs_sm_req_cachep);
1763 		return -ENOMEM;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 static void
cifs_destroy_request_bufs(void)1770 cifs_destroy_request_bufs(void)
1771 {
1772 	mempool_destroy(cifs_req_poolp);
1773 	kmem_cache_destroy(cifs_req_cachep);
1774 	mempool_destroy(cifs_sm_req_poolp);
1775 	kmem_cache_destroy(cifs_sm_req_cachep);
1776 }
1777 
init_mids(void)1778 static int init_mids(void)
1779 {
1780 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1781 					    sizeof(struct mid_q_entry), 0,
1782 					    SLAB_HWCACHE_ALIGN, NULL);
1783 	if (cifs_mid_cachep == NULL)
1784 		return -ENOMEM;
1785 
1786 	/* 3 is a reasonable minimum number of simultaneous operations */
1787 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1788 	if (cifs_mid_poolp == NULL) {
1789 		kmem_cache_destroy(cifs_mid_cachep);
1790 		return -ENOMEM;
1791 	}
1792 
1793 	return 0;
1794 }
1795 
destroy_mids(void)1796 static void destroy_mids(void)
1797 {
1798 	mempool_destroy(cifs_mid_poolp);
1799 	kmem_cache_destroy(cifs_mid_cachep);
1800 }
1801 
1802 static int __init
init_cifs(void)1803 init_cifs(void)
1804 {
1805 	int rc = 0;
1806 	cifs_proc_init();
1807 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1808 /*
1809  *  Initialize Global counters
1810  */
1811 	atomic_set(&sesInfoAllocCount, 0);
1812 	atomic_set(&tconInfoAllocCount, 0);
1813 	atomic_set(&tcpSesNextId, 0);
1814 	atomic_set(&tcpSesAllocCount, 0);
1815 	atomic_set(&tcpSesReconnectCount, 0);
1816 	atomic_set(&tconInfoReconnectCount, 0);
1817 
1818 	atomic_set(&buf_alloc_count, 0);
1819 	atomic_set(&small_buf_alloc_count, 0);
1820 #ifdef CONFIG_CIFS_STATS2
1821 	atomic_set(&total_buf_alloc_count, 0);
1822 	atomic_set(&total_small_buf_alloc_count, 0);
1823 	if (slow_rsp_threshold < 1)
1824 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1825 	else if (slow_rsp_threshold > 32767)
1826 		cifs_dbg(VFS,
1827 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1828 #endif /* CONFIG_CIFS_STATS2 */
1829 
1830 	atomic_set(&mid_count, 0);
1831 	GlobalCurrentXid = 0;
1832 	GlobalTotalActiveXid = 0;
1833 	GlobalMaxActiveXid = 0;
1834 	spin_lock_init(&cifs_tcp_ses_lock);
1835 	spin_lock_init(&GlobalMid_Lock);
1836 
1837 	cifs_lock_secret = get_random_u32();
1838 
1839 	if (cifs_max_pending < 2) {
1840 		cifs_max_pending = 2;
1841 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1842 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1843 		cifs_max_pending = CIFS_MAX_REQ;
1844 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1845 			 CIFS_MAX_REQ);
1846 	}
1847 
1848 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1849 	if (dir_cache_timeout > 65000) {
1850 		dir_cache_timeout = 65000;
1851 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1852 	}
1853 
1854 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1855 	if (!cifsiod_wq) {
1856 		rc = -ENOMEM;
1857 		goto out_clean_proc;
1858 	}
1859 
1860 	/*
1861 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1862 	 * so that we don't launch too many worker threads but
1863 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1864 	 */
1865 
1866 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1867 	decrypt_wq = alloc_workqueue("smb3decryptd",
1868 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1869 	if (!decrypt_wq) {
1870 		rc = -ENOMEM;
1871 		goto out_destroy_cifsiod_wq;
1872 	}
1873 
1874 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1875 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1876 	if (!fileinfo_put_wq) {
1877 		rc = -ENOMEM;
1878 		goto out_destroy_decrypt_wq;
1879 	}
1880 
1881 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1882 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1883 	if (!cifsoplockd_wq) {
1884 		rc = -ENOMEM;
1885 		goto out_destroy_fileinfo_put_wq;
1886 	}
1887 
1888 	deferredclose_wq = alloc_workqueue("deferredclose",
1889 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1890 	if (!deferredclose_wq) {
1891 		rc = -ENOMEM;
1892 		goto out_destroy_cifsoplockd_wq;
1893 	}
1894 
1895 	serverclose_wq = alloc_workqueue("serverclose",
1896 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1897 	if (!serverclose_wq) {
1898 		rc = -ENOMEM;
1899 		goto out_destroy_deferredclose_wq;
1900 	}
1901 
1902 	rc = cifs_init_inodecache();
1903 	if (rc)
1904 		goto out_destroy_serverclose_wq;
1905 
1906 	rc = init_mids();
1907 	if (rc)
1908 		goto out_destroy_inodecache;
1909 
1910 	rc = cifs_init_request_bufs();
1911 	if (rc)
1912 		goto out_destroy_mids;
1913 
1914 #ifdef CONFIG_CIFS_DFS_UPCALL
1915 	rc = dfs_cache_init();
1916 	if (rc)
1917 		goto out_destroy_request_bufs;
1918 #endif /* CONFIG_CIFS_DFS_UPCALL */
1919 #ifdef CONFIG_CIFS_UPCALL
1920 	rc = init_cifs_spnego();
1921 	if (rc)
1922 		goto out_destroy_dfs_cache;
1923 #endif /* CONFIG_CIFS_UPCALL */
1924 #ifdef CONFIG_CIFS_SWN_UPCALL
1925 	rc = cifs_genl_init();
1926 	if (rc)
1927 		goto out_register_key_type;
1928 #endif /* CONFIG_CIFS_SWN_UPCALL */
1929 
1930 	rc = init_cifs_idmap();
1931 	if (rc)
1932 		goto out_cifs_swn_init;
1933 
1934 	rc = register_filesystem(&cifs_fs_type);
1935 	if (rc)
1936 		goto out_init_cifs_idmap;
1937 
1938 	rc = register_filesystem(&smb3_fs_type);
1939 	if (rc) {
1940 		unregister_filesystem(&cifs_fs_type);
1941 		goto out_init_cifs_idmap;
1942 	}
1943 
1944 	return 0;
1945 
1946 out_init_cifs_idmap:
1947 	exit_cifs_idmap();
1948 out_cifs_swn_init:
1949 #ifdef CONFIG_CIFS_SWN_UPCALL
1950 	cifs_genl_exit();
1951 out_register_key_type:
1952 #endif
1953 #ifdef CONFIG_CIFS_UPCALL
1954 	exit_cifs_spnego();
1955 out_destroy_dfs_cache:
1956 #endif
1957 #ifdef CONFIG_CIFS_DFS_UPCALL
1958 	dfs_cache_destroy();
1959 out_destroy_request_bufs:
1960 #endif
1961 	cifs_destroy_request_bufs();
1962 out_destroy_mids:
1963 	destroy_mids();
1964 out_destroy_inodecache:
1965 	cifs_destroy_inodecache();
1966 out_destroy_serverclose_wq:
1967 	destroy_workqueue(serverclose_wq);
1968 out_destroy_deferredclose_wq:
1969 	destroy_workqueue(deferredclose_wq);
1970 out_destroy_cifsoplockd_wq:
1971 	destroy_workqueue(cifsoplockd_wq);
1972 out_destroy_fileinfo_put_wq:
1973 	destroy_workqueue(fileinfo_put_wq);
1974 out_destroy_decrypt_wq:
1975 	destroy_workqueue(decrypt_wq);
1976 out_destroy_cifsiod_wq:
1977 	destroy_workqueue(cifsiod_wq);
1978 out_clean_proc:
1979 	cifs_proc_clean();
1980 	return rc;
1981 }
1982 
1983 static void __exit
exit_cifs(void)1984 exit_cifs(void)
1985 {
1986 	cifs_dbg(NOISY, "exit_smb3\n");
1987 	unregister_filesystem(&cifs_fs_type);
1988 	unregister_filesystem(&smb3_fs_type);
1989 	cifs_release_automount_timer();
1990 	exit_cifs_idmap();
1991 #ifdef CONFIG_CIFS_SWN_UPCALL
1992 	cifs_genl_exit();
1993 #endif
1994 #ifdef CONFIG_CIFS_UPCALL
1995 	exit_cifs_spnego();
1996 #endif
1997 #ifdef CONFIG_CIFS_DFS_UPCALL
1998 	dfs_cache_destroy();
1999 #endif
2000 	cifs_destroy_request_bufs();
2001 	destroy_mids();
2002 	cifs_destroy_inodecache();
2003 	destroy_workqueue(deferredclose_wq);
2004 	destroy_workqueue(cifsoplockd_wq);
2005 	destroy_workqueue(decrypt_wq);
2006 	destroy_workqueue(fileinfo_put_wq);
2007 	destroy_workqueue(serverclose_wq);
2008 	destroy_workqueue(cifsiod_wq);
2009 	cifs_proc_clean();
2010 }
2011 
2012 MODULE_AUTHOR("Steve French");
2013 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2014 MODULE_DESCRIPTION
2015 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2016 	"also older servers complying with the SNIA CIFS Specification)");
2017 MODULE_VERSION(CIFS_VERSION);
2018 MODULE_SOFTDEP("ecb");
2019 MODULE_SOFTDEP("hmac");
2020 MODULE_SOFTDEP("md5");
2021 MODULE_SOFTDEP("nls");
2022 MODULE_SOFTDEP("aes");
2023 MODULE_SOFTDEP("cmac");
2024 MODULE_SOFTDEP("sha256");
2025 MODULE_SOFTDEP("sha512");
2026 MODULE_SOFTDEP("aead2");
2027 MODULE_SOFTDEP("ccm");
2028 MODULE_SOFTDEP("gcm");
2029 module_init(init_cifs)
2030 module_exit(exit_cifs)
2031