1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51
52 /*
53 * DOS dates from 1980/1/1 through 2107/12/31
54 * Protocol specifications indicate the range should be to 119, which
55 * limits maximum year to 2099. But this range has not been checked.
56 */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 spinlock_t cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 "before logging that a response is delayed. "
129 "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
137
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 "helpful to restrict the ability to "
147 "override the default dialects (SMB2.1, "
148 "SMB3 and SMB3.02) on mount with old "
149 "dialects (CIFS/SMB1 and SMB2) since "
150 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 " and less secure. Default: n/N/0");
152
153 struct workqueue_struct *cifsiod_wq;
154 struct workqueue_struct *decrypt_wq;
155 struct workqueue_struct *fileinfo_put_wq;
156 struct workqueue_struct *cifsoplockd_wq;
157 struct workqueue_struct *deferredclose_wq;
158 struct workqueue_struct *serverclose_wq;
159 struct workqueue_struct *cfid_put_wq;
160 __u32 cifs_lock_secret;
161
162 /*
163 * Bumps refcount for cifs super block.
164 * Note that it should be only called if a referece to VFS super block is
165 * already held, e.g. in open-type syscalls context. Otherwise it can race with
166 * atomic_dec_and_test in deactivate_locked_super.
167 */
168 void
cifs_sb_active(struct super_block * sb)169 cifs_sb_active(struct super_block *sb)
170 {
171 struct cifs_sb_info *server = CIFS_SB(sb);
172
173 if (atomic_inc_return(&server->active) == 1)
174 atomic_inc(&sb->s_active);
175 }
176
177 void
cifs_sb_deactive(struct super_block * sb)178 cifs_sb_deactive(struct super_block *sb)
179 {
180 struct cifs_sb_info *server = CIFS_SB(sb);
181
182 if (atomic_dec_and_test(&server->active))
183 deactivate_super(sb);
184 }
185
186 static int
cifs_read_super(struct super_block * sb)187 cifs_read_super(struct super_block *sb)
188 {
189 struct inode *inode;
190 struct cifs_sb_info *cifs_sb;
191 struct cifs_tcon *tcon;
192 struct timespec64 ts;
193 int rc = 0;
194
195 cifs_sb = CIFS_SB(sb);
196 tcon = cifs_sb_master_tcon(cifs_sb);
197
198 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 sb->s_flags |= SB_POSIXACL;
200
201 if (tcon->snapshot_time)
202 sb->s_flags |= SB_RDONLY;
203
204 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 sb->s_maxbytes = MAX_LFS_FILESIZE;
206 else
207 sb->s_maxbytes = MAX_NON_LFS;
208
209 /*
210 * Some very old servers like DOS and OS/2 used 2 second granularity
211 * (while all current servers use 100ns granularity - see MS-DTYP)
212 * but 1 second is the maximum allowed granularity for the VFS
213 * so for old servers set time granularity to 1 second while for
214 * everything else (current servers) set it to 100ns.
215 */
216 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 ((tcon->ses->capabilities &
218 tcon->ses->server->vals->cap_nt_find) == 0) &&
219 !tcon->unix_ext) {
220 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 sb->s_time_min = ts.tv_sec;
223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 cpu_to_le16(SMB_TIME_MAX), 0);
225 sb->s_time_max = ts.tv_sec;
226 } else {
227 /*
228 * Almost every server, including all SMB2+, uses DCE TIME
229 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
230 */
231 sb->s_time_gran = 100;
232 ts = cifs_NTtimeToUnix(0);
233 sb->s_time_min = ts.tv_sec;
234 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 sb->s_time_max = ts.tv_sec;
236 }
237
238 sb->s_magic = CIFS_SUPER_MAGIC;
239 sb->s_op = &cifs_super_ops;
240 sb->s_xattr = cifs_xattr_handlers;
241 rc = super_setup_bdi(sb);
242 if (rc)
243 goto out_no_root;
244 /* tune readahead according to rsize if readahead size not set on mount */
245 if (cifs_sb->ctx->rsize == 0)
246 cifs_sb->ctx->rsize =
247 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 if (cifs_sb->ctx->rasize)
249 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 else
251 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252
253 sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
255 inode = cifs_root_iget(sb);
256
257 if (IS_ERR(inode)) {
258 rc = PTR_ERR(inode);
259 goto out_no_root;
260 }
261
262 if (tcon->nocase)
263 sb->s_d_op = &cifs_ci_dentry_ops;
264 else
265 sb->s_d_op = &cifs_dentry_ops;
266
267 sb->s_root = d_make_root(inode);
268 if (!sb->s_root) {
269 rc = -ENOMEM;
270 goto out_no_root;
271 }
272
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 cifs_dbg(FYI, "export ops supported\n");
276 sb->s_export_op = &cifs_export_ops;
277 }
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279
280 return 0;
281
282 out_no_root:
283 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 return rc;
285 }
286
cifs_kill_sb(struct super_block * sb)287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290
291 /*
292 * We ned to release all dentries for the cached directories
293 * before we kill the sb.
294 */
295 if (cifs_sb->root) {
296 close_all_cached_dirs(cifs_sb);
297
298 /* finally release root dentry */
299 dput(cifs_sb->root);
300 cifs_sb->root = NULL;
301 }
302
303 kill_anon_super(sb);
304 cifs_umount(cifs_sb);
305 }
306
307 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 struct super_block *sb = dentry->d_sb;
311 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 struct TCP_Server_Info *server = tcon->ses->server;
314 unsigned int xid;
315 int rc = 0;
316 const char *full_path;
317 void *page;
318
319 xid = get_xid();
320 page = alloc_dentry_path();
321
322 full_path = build_path_from_dentry(dentry, page);
323 if (IS_ERR(full_path)) {
324 rc = PTR_ERR(full_path);
325 goto statfs_out;
326 }
327
328 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
329 buf->f_namelen =
330 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
331 else
332 buf->f_namelen = PATH_MAX;
333
334 buf->f_fsid.val[0] = tcon->vol_serial_number;
335 /* are using part of create time for more randomness, see man statfs */
336 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
337
338 buf->f_files = 0; /* undefined */
339 buf->f_ffree = 0; /* unlimited */
340
341 if (server->ops->queryfs)
342 rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
343
344 statfs_out:
345 free_dentry_path(page);
346 free_xid(xid);
347 return rc;
348 }
349
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)350 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
351 {
352 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
353 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
354 struct TCP_Server_Info *server = tcon->ses->server;
355
356 if (server->ops->fallocate)
357 return server->ops->fallocate(file, tcon, mode, off, len);
358
359 return -EOPNOTSUPP;
360 }
361
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)362 static int cifs_permission(struct mnt_idmap *idmap,
363 struct inode *inode, int mask)
364 {
365 struct cifs_sb_info *cifs_sb;
366
367 cifs_sb = CIFS_SB(inode->i_sb);
368
369 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
370 if ((mask & MAY_EXEC) && !execute_ok(inode))
371 return -EACCES;
372 else
373 return 0;
374 } else /* file mode might have been restricted at mount time
375 on the client (above and beyond ACL on servers) for
376 servers which do not support setting and viewing mode bits,
377 so allowing client to check permissions is useful */
378 return generic_permission(&nop_mnt_idmap, inode, mask);
379 }
380
381 static struct kmem_cache *cifs_inode_cachep;
382 static struct kmem_cache *cifs_req_cachep;
383 static struct kmem_cache *cifs_mid_cachep;
384 static struct kmem_cache *cifs_sm_req_cachep;
385 mempool_t *cifs_sm_req_poolp;
386 mempool_t *cifs_req_poolp;
387 mempool_t *cifs_mid_poolp;
388
389 static struct inode *
cifs_alloc_inode(struct super_block * sb)390 cifs_alloc_inode(struct super_block *sb)
391 {
392 struct cifsInodeInfo *cifs_inode;
393 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
394 if (!cifs_inode)
395 return NULL;
396 cifs_inode->cifsAttrs = 0x20; /* default */
397 cifs_inode->time = 0;
398 /*
399 * Until the file is open and we have gotten oplock info back from the
400 * server, can not assume caching of file data or metadata.
401 */
402 cifs_set_oplock_level(cifs_inode, 0);
403 cifs_inode->lease_granted = false;
404 cifs_inode->flags = 0;
405 spin_lock_init(&cifs_inode->writers_lock);
406 cifs_inode->writers = 0;
407 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
408 cifs_inode->server_eof = 0;
409 cifs_inode->uniqueid = 0;
410 cifs_inode->createtime = 0;
411 cifs_inode->epoch = 0;
412 spin_lock_init(&cifs_inode->open_file_lock);
413 generate_random_uuid(cifs_inode->lease_key);
414 cifs_inode->symlink_target = NULL;
415
416 /*
417 * Can not set i_flags here - they get immediately overwritten to zero
418 * by the VFS.
419 */
420 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
421 INIT_LIST_HEAD(&cifs_inode->openFileList);
422 INIT_LIST_HEAD(&cifs_inode->llist);
423 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
424 spin_lock_init(&cifs_inode->deferred_lock);
425 return &cifs_inode->netfs.inode;
426 }
427
428 static void
cifs_free_inode(struct inode * inode)429 cifs_free_inode(struct inode *inode)
430 {
431 struct cifsInodeInfo *cinode = CIFS_I(inode);
432
433 if (S_ISLNK(inode->i_mode))
434 kfree(cinode->symlink_target);
435 kmem_cache_free(cifs_inode_cachep, cinode);
436 }
437
438 static void
cifs_evict_inode(struct inode * inode)439 cifs_evict_inode(struct inode *inode)
440 {
441 truncate_inode_pages_final(&inode->i_data);
442 if (inode->i_state & I_PINNING_FSCACHE_WB)
443 cifs_fscache_unuse_inode_cookie(inode, true);
444 cifs_fscache_release_inode_cookie(inode);
445 clear_inode(inode);
446 }
447
448 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)449 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
450 {
451 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
452 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
453
454 seq_puts(s, ",addr=");
455
456 switch (server->dstaddr.ss_family) {
457 case AF_INET:
458 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
459 break;
460 case AF_INET6:
461 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
462 if (sa6->sin6_scope_id)
463 seq_printf(s, "%%%u", sa6->sin6_scope_id);
464 break;
465 default:
466 seq_puts(s, "(unknown)");
467 }
468 if (server->rdma)
469 seq_puts(s, ",rdma");
470 }
471
472 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)473 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
474 {
475 if (ses->sectype == Unspecified) {
476 if (ses->user_name == NULL)
477 seq_puts(s, ",sec=none");
478 return;
479 }
480
481 seq_puts(s, ",sec=");
482
483 switch (ses->sectype) {
484 case NTLMv2:
485 seq_puts(s, "ntlmv2");
486 break;
487 case Kerberos:
488 seq_puts(s, "krb5");
489 break;
490 case RawNTLMSSP:
491 seq_puts(s, "ntlmssp");
492 break;
493 default:
494 /* shouldn't ever happen */
495 seq_puts(s, "unknown");
496 break;
497 }
498
499 if (ses->sign)
500 seq_puts(s, "i");
501
502 if (ses->sectype == Kerberos)
503 seq_printf(s, ",cruid=%u",
504 from_kuid_munged(&init_user_ns, ses->cred_uid));
505 }
506
507 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)508 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
509 {
510 seq_puts(s, ",cache=");
511
512 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
513 seq_puts(s, "strict");
514 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
515 seq_puts(s, "none");
516 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
517 seq_puts(s, "singleclient"); /* assume only one client access */
518 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
519 seq_puts(s, "ro"); /* read only caching assumed */
520 else
521 seq_puts(s, "loose");
522 }
523
524 /*
525 * cifs_show_devname() is used so we show the mount device name with correct
526 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
527 */
cifs_show_devname(struct seq_file * m,struct dentry * root)528 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
529 {
530 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
531 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
532
533 if (devname == NULL)
534 seq_puts(m, "none");
535 else {
536 convert_delimiter(devname, '/');
537 /* escape all spaces in share names */
538 seq_escape(m, devname, " \t");
539 kfree(devname);
540 }
541 return 0;
542 }
543
544 /*
545 * cifs_show_options() is for displaying mount options in /proc/mounts.
546 * Not all settable options are displayed but most of the important
547 * ones are.
548 */
549 static int
cifs_show_options(struct seq_file * s,struct dentry * root)550 cifs_show_options(struct seq_file *s, struct dentry *root)
551 {
552 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
553 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
554 struct sockaddr *srcaddr;
555 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
556
557 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
558 cifs_show_security(s, tcon->ses);
559 cifs_show_cache_flavor(s, cifs_sb);
560
561 if (tcon->no_lease)
562 seq_puts(s, ",nolease");
563 if (cifs_sb->ctx->multiuser)
564 seq_puts(s, ",multiuser");
565 else if (tcon->ses->user_name)
566 seq_show_option(s, "username", tcon->ses->user_name);
567
568 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
569 seq_show_option(s, "domain", tcon->ses->domainName);
570
571 if (srcaddr->sa_family != AF_UNSPEC) {
572 struct sockaddr_in *saddr4;
573 struct sockaddr_in6 *saddr6;
574 saddr4 = (struct sockaddr_in *)srcaddr;
575 saddr6 = (struct sockaddr_in6 *)srcaddr;
576 if (srcaddr->sa_family == AF_INET6)
577 seq_printf(s, ",srcaddr=%pI6c",
578 &saddr6->sin6_addr);
579 else if (srcaddr->sa_family == AF_INET)
580 seq_printf(s, ",srcaddr=%pI4",
581 &saddr4->sin_addr.s_addr);
582 else
583 seq_printf(s, ",srcaddr=BAD-AF:%i",
584 (int)(srcaddr->sa_family));
585 }
586
587 seq_printf(s, ",uid=%u",
588 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
590 seq_puts(s, ",forceuid");
591 else
592 seq_puts(s, ",noforceuid");
593
594 seq_printf(s, ",gid=%u",
595 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
596 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
597 seq_puts(s, ",forcegid");
598 else
599 seq_puts(s, ",noforcegid");
600
601 cifs_show_address(s, tcon->ses->server);
602
603 if (!tcon->unix_ext)
604 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
605 cifs_sb->ctx->file_mode,
606 cifs_sb->ctx->dir_mode);
607 if (cifs_sb->ctx->iocharset)
608 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
609 if (tcon->seal)
610 seq_puts(s, ",seal");
611 else if (tcon->ses->server->ignore_signature)
612 seq_puts(s, ",signloosely");
613 if (tcon->nocase)
614 seq_puts(s, ",nocase");
615 if (tcon->nodelete)
616 seq_puts(s, ",nodelete");
617 if (cifs_sb->ctx->no_sparse)
618 seq_puts(s, ",nosparse");
619 if (tcon->local_lease)
620 seq_puts(s, ",locallease");
621 if (tcon->retry)
622 seq_puts(s, ",hard");
623 else
624 seq_puts(s, ",soft");
625 if (tcon->use_persistent)
626 seq_puts(s, ",persistenthandles");
627 else if (tcon->use_resilient)
628 seq_puts(s, ",resilienthandles");
629 if (tcon->posix_extensions)
630 seq_puts(s, ",posix");
631 else if (tcon->unix_ext)
632 seq_puts(s, ",unix");
633 else
634 seq_puts(s, ",nounix");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
636 seq_puts(s, ",nodfs");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
638 seq_puts(s, ",posixpaths");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
640 seq_puts(s, ",setuids");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
642 seq_puts(s, ",idsfromsid");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
644 seq_puts(s, ",serverino");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
646 seq_puts(s, ",rwpidforward");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
648 seq_puts(s, ",forcemand");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
650 seq_puts(s, ",nouser_xattr");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
652 seq_puts(s, ",mapchars");
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
654 seq_puts(s, ",mapposix");
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
656 seq_puts(s, ",sfu");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
658 seq_puts(s, ",nobrl");
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
660 seq_puts(s, ",nohandlecache");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
662 seq_puts(s, ",modefromsid");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
664 seq_puts(s, ",cifsacl");
665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
666 seq_puts(s, ",dynperm");
667 if (root->d_sb->s_flags & SB_POSIXACL)
668 seq_puts(s, ",acl");
669 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
670 seq_puts(s, ",mfsymlinks");
671 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
672 seq_puts(s, ",fsc");
673 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
674 seq_puts(s, ",nostrictsync");
675 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
676 seq_puts(s, ",noperm");
677 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
678 seq_printf(s, ",backupuid=%u",
679 from_kuid_munged(&init_user_ns,
680 cifs_sb->ctx->backupuid));
681 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
682 seq_printf(s, ",backupgid=%u",
683 from_kgid_munged(&init_user_ns,
684 cifs_sb->ctx->backupgid));
685 seq_show_option(s, "reparse",
686 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
687
688 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
689 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
690 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
691 if (cifs_sb->ctx->rasize)
692 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
693 if (tcon->ses->server->min_offload)
694 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
695 if (tcon->ses->server->retrans)
696 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
697 seq_printf(s, ",echo_interval=%lu",
698 tcon->ses->server->echo_interval / HZ);
699
700 /* Only display the following if overridden on mount */
701 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
702 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
703 if (tcon->ses->server->tcp_nodelay)
704 seq_puts(s, ",tcpnodelay");
705 if (tcon->ses->server->noautotune)
706 seq_puts(s, ",noautotune");
707 if (tcon->ses->server->noblocksnd)
708 seq_puts(s, ",noblocksend");
709 if (tcon->ses->server->nosharesock)
710 seq_puts(s, ",nosharesock");
711
712 if (tcon->snapshot_time)
713 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
714 if (tcon->handle_timeout)
715 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
716 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
717 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
718
719 /*
720 * Display file and directory attribute timeout in seconds.
721 * If file and directory attribute timeout the same then actimeo
722 * was likely specified on mount
723 */
724 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
725 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
726 else {
727 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
728 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
729 }
730 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
731
732 if (tcon->ses->chan_max > 1)
733 seq_printf(s, ",multichannel,max_channels=%zu",
734 tcon->ses->chan_max);
735
736 if (tcon->use_witness)
737 seq_puts(s, ",witness");
738
739 return 0;
740 }
741
cifs_umount_begin(struct super_block * sb)742 static void cifs_umount_begin(struct super_block *sb)
743 {
744 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
745 struct cifs_tcon *tcon;
746
747 if (cifs_sb == NULL)
748 return;
749
750 tcon = cifs_sb_master_tcon(cifs_sb);
751
752 spin_lock(&cifs_tcp_ses_lock);
753 spin_lock(&tcon->tc_lock);
754 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
755 netfs_trace_tcon_ref_see_umount);
756 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
757 /* we have other mounts to same share or we have
758 already tried to umount this and woken up
759 all waiting network requests, nothing to do */
760 spin_unlock(&tcon->tc_lock);
761 spin_unlock(&cifs_tcp_ses_lock);
762 return;
763 }
764 /*
765 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
766 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
767 */
768 spin_unlock(&tcon->tc_lock);
769 spin_unlock(&cifs_tcp_ses_lock);
770
771 cifs_close_all_deferred_files(tcon);
772 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
773 /* cancel_notify_requests(tcon); */
774 if (tcon->ses && tcon->ses->server) {
775 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
776 wake_up_all(&tcon->ses->server->request_q);
777 wake_up_all(&tcon->ses->server->response_q);
778 msleep(1); /* yield */
779 /* we have to kick the requests once more */
780 wake_up_all(&tcon->ses->server->response_q);
781 msleep(1);
782 }
783
784 return;
785 }
786
cifs_freeze(struct super_block * sb)787 static int cifs_freeze(struct super_block *sb)
788 {
789 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
790 struct cifs_tcon *tcon;
791
792 if (cifs_sb == NULL)
793 return 0;
794
795 tcon = cifs_sb_master_tcon(cifs_sb);
796
797 cifs_close_all_deferred_files(tcon);
798 return 0;
799 }
800
801 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)802 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
803 {
804 /* BB FIXME */
805 return 0;
806 }
807 #endif
808
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)809 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
810 {
811 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
812 return 0;
813 }
814
cifs_drop_inode(struct inode * inode)815 static int cifs_drop_inode(struct inode *inode)
816 {
817 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
818
819 /* no serverino => unconditional eviction */
820 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
821 generic_drop_inode(inode);
822 }
823
824 static const struct super_operations cifs_super_ops = {
825 .statfs = cifs_statfs,
826 .alloc_inode = cifs_alloc_inode,
827 .write_inode = cifs_write_inode,
828 .free_inode = cifs_free_inode,
829 .drop_inode = cifs_drop_inode,
830 .evict_inode = cifs_evict_inode,
831 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
832 .show_devname = cifs_show_devname,
833 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
834 function unless later we add lazy close of inodes or unless the
835 kernel forgets to call us with the same number of releases (closes)
836 as opens */
837 .show_options = cifs_show_options,
838 .umount_begin = cifs_umount_begin,
839 .freeze_fs = cifs_freeze,
840 #ifdef CONFIG_CIFS_STATS2
841 .show_stats = cifs_show_stats,
842 #endif
843 };
844
845 /*
846 * Get root dentry from superblock according to prefix path mount option.
847 * Return dentry with refcount + 1 on success and NULL otherwise.
848 */
849 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)850 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
851 {
852 struct dentry *dentry;
853 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
854 char *full_path = NULL;
855 char *s, *p;
856 char sep;
857
858 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
859 return dget(sb->s_root);
860
861 full_path = cifs_build_path_to_root(ctx, cifs_sb,
862 cifs_sb_master_tcon(cifs_sb), 0);
863 if (full_path == NULL)
864 return ERR_PTR(-ENOMEM);
865
866 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
867
868 sep = CIFS_DIR_SEP(cifs_sb);
869 dentry = dget(sb->s_root);
870 s = full_path;
871
872 do {
873 struct inode *dir = d_inode(dentry);
874 struct dentry *child;
875
876 if (!S_ISDIR(dir->i_mode)) {
877 dput(dentry);
878 dentry = ERR_PTR(-ENOTDIR);
879 break;
880 }
881
882 /* skip separators */
883 while (*s == sep)
884 s++;
885 if (!*s)
886 break;
887 p = s++;
888 /* next separator */
889 while (*s && *s != sep)
890 s++;
891
892 child = lookup_positive_unlocked(p, dentry, s - p);
893 dput(dentry);
894 dentry = child;
895 } while (!IS_ERR(dentry));
896 kfree(full_path);
897 return dentry;
898 }
899
cifs_set_super(struct super_block * sb,void * data)900 static int cifs_set_super(struct super_block *sb, void *data)
901 {
902 struct cifs_mnt_data *mnt_data = data;
903 sb->s_fs_info = mnt_data->cifs_sb;
904 return set_anon_super(sb, NULL);
905 }
906
907 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)908 cifs_smb3_do_mount(struct file_system_type *fs_type,
909 int flags, struct smb3_fs_context *old_ctx)
910 {
911 struct cifs_mnt_data mnt_data;
912 struct cifs_sb_info *cifs_sb;
913 struct super_block *sb;
914 struct dentry *root;
915 int rc;
916
917 if (cifsFYI) {
918 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
919 old_ctx->source, flags);
920 } else {
921 cifs_info("Attempting to mount %s\n", old_ctx->source);
922 }
923
924 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
925 if (!cifs_sb)
926 return ERR_PTR(-ENOMEM);
927
928 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
929 if (!cifs_sb->ctx) {
930 root = ERR_PTR(-ENOMEM);
931 goto out;
932 }
933 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
934 if (rc) {
935 root = ERR_PTR(rc);
936 goto out;
937 }
938
939 rc = cifs_setup_cifs_sb(cifs_sb);
940 if (rc) {
941 root = ERR_PTR(rc);
942 goto out;
943 }
944
945 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
946 if (rc) {
947 if (!(flags & SB_SILENT))
948 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
949 rc);
950 root = ERR_PTR(rc);
951 goto out;
952 }
953
954 mnt_data.ctx = cifs_sb->ctx;
955 mnt_data.cifs_sb = cifs_sb;
956 mnt_data.flags = flags;
957
958 /* BB should we make this contingent on mount parm? */
959 flags |= SB_NODIRATIME | SB_NOATIME;
960
961 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
962 if (IS_ERR(sb)) {
963 cifs_umount(cifs_sb);
964 return ERR_CAST(sb);
965 }
966
967 if (sb->s_root) {
968 cifs_dbg(FYI, "Use existing superblock\n");
969 cifs_umount(cifs_sb);
970 cifs_sb = NULL;
971 } else {
972 rc = cifs_read_super(sb);
973 if (rc) {
974 root = ERR_PTR(rc);
975 goto out_super;
976 }
977
978 sb->s_flags |= SB_ACTIVE;
979 }
980
981 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
982 if (IS_ERR(root))
983 goto out_super;
984
985 if (cifs_sb)
986 cifs_sb->root = dget(root);
987
988 cifs_dbg(FYI, "dentry root is: %p\n", root);
989 return root;
990
991 out_super:
992 deactivate_locked_super(sb);
993 return root;
994 out:
995 kfree(cifs_sb->prepath);
996 smb3_cleanup_fs_context(cifs_sb->ctx);
997 kfree(cifs_sb);
998 return root;
999 }
1000
1001
1002 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)1003 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1004 {
1005 ssize_t rc;
1006 struct inode *inode = file_inode(iocb->ki_filp);
1007
1008 if (iocb->ki_flags & IOCB_DIRECT)
1009 return cifs_user_readv(iocb, iter);
1010
1011 rc = cifs_revalidate_mapping(inode);
1012 if (rc)
1013 return rc;
1014
1015 return generic_file_read_iter(iocb, iter);
1016 }
1017
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1018 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1019 {
1020 struct inode *inode = file_inode(iocb->ki_filp);
1021 struct cifsInodeInfo *cinode = CIFS_I(inode);
1022 ssize_t written;
1023 int rc;
1024
1025 if (iocb->ki_filp->f_flags & O_DIRECT) {
1026 written = cifs_user_writev(iocb, from);
1027 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1028 cifs_zap_mapping(inode);
1029 cifs_dbg(FYI,
1030 "Set no oplock for inode=%p after a write operation\n",
1031 inode);
1032 cinode->oplock = 0;
1033 }
1034 return written;
1035 }
1036
1037 written = cifs_get_writer(cinode);
1038 if (written)
1039 return written;
1040
1041 written = generic_file_write_iter(iocb, from);
1042
1043 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1044 goto out;
1045
1046 rc = filemap_fdatawrite(inode->i_mapping);
1047 if (rc)
1048 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1049 rc, inode);
1050
1051 out:
1052 cifs_put_writer(cinode);
1053 return written;
1054 }
1055
cifs_llseek(struct file * file,loff_t offset,int whence)1056 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1057 {
1058 struct cifsFileInfo *cfile = file->private_data;
1059 struct cifs_tcon *tcon;
1060
1061 /*
1062 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1063 * the cached file length
1064 */
1065 if (whence != SEEK_SET && whence != SEEK_CUR) {
1066 int rc;
1067 struct inode *inode = file_inode(file);
1068
1069 /*
1070 * We need to be sure that all dirty pages are written and the
1071 * server has the newest file length.
1072 */
1073 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1074 inode->i_mapping->nrpages != 0) {
1075 rc = filemap_fdatawait(inode->i_mapping);
1076 if (rc) {
1077 mapping_set_error(inode->i_mapping, rc);
1078 return rc;
1079 }
1080 }
1081 /*
1082 * Some applications poll for the file length in this strange
1083 * way so we must seek to end on non-oplocked files by
1084 * setting the revalidate time to zero.
1085 */
1086 CIFS_I(inode)->time = 0;
1087
1088 rc = cifs_revalidate_file_attr(file);
1089 if (rc < 0)
1090 return (loff_t)rc;
1091 }
1092 if (cfile && cfile->tlink) {
1093 tcon = tlink_tcon(cfile->tlink);
1094 if (tcon->ses->server->ops->llseek)
1095 return tcon->ses->server->ops->llseek(file, tcon,
1096 offset, whence);
1097 }
1098 return generic_file_llseek(file, offset, whence);
1099 }
1100
1101 static int
cifs_setlease(struct file * file,int arg,struct file_lock ** lease,void ** priv)1102 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1103 {
1104 /*
1105 * Note that this is called by vfs setlease with i_lock held to
1106 * protect *lease from going away.
1107 */
1108 struct inode *inode = file_inode(file);
1109 struct cifsFileInfo *cfile = file->private_data;
1110
1111 if (!(S_ISREG(inode->i_mode)))
1112 return -EINVAL;
1113
1114 /* Check if file is oplocked if this is request for new lease */
1115 if (arg == F_UNLCK ||
1116 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1117 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1118 return generic_setlease(file, arg, lease, priv);
1119 else if (tlink_tcon(cfile->tlink)->local_lease &&
1120 !CIFS_CACHE_READ(CIFS_I(inode)))
1121 /*
1122 * If the server claims to support oplock on this file, then we
1123 * still need to check oplock even if the local_lease mount
1124 * option is set, but there are servers which do not support
1125 * oplock for which this mount option may be useful if the user
1126 * knows that the file won't be changed on the server by anyone
1127 * else.
1128 */
1129 return generic_setlease(file, arg, lease, priv);
1130 else
1131 return -EAGAIN;
1132 }
1133
1134 struct file_system_type cifs_fs_type = {
1135 .owner = THIS_MODULE,
1136 .name = "cifs",
1137 .init_fs_context = smb3_init_fs_context,
1138 .parameters = smb3_fs_parameters,
1139 .kill_sb = cifs_kill_sb,
1140 .fs_flags = FS_RENAME_DOES_D_MOVE,
1141 };
1142 MODULE_ALIAS_FS("cifs");
1143
1144 struct file_system_type smb3_fs_type = {
1145 .owner = THIS_MODULE,
1146 .name = "smb3",
1147 .init_fs_context = smb3_init_fs_context,
1148 .parameters = smb3_fs_parameters,
1149 .kill_sb = cifs_kill_sb,
1150 .fs_flags = FS_RENAME_DOES_D_MOVE,
1151 };
1152 MODULE_ALIAS_FS("smb3");
1153 MODULE_ALIAS("smb3");
1154
1155 const struct inode_operations cifs_dir_inode_ops = {
1156 .create = cifs_create,
1157 .atomic_open = cifs_atomic_open,
1158 .lookup = cifs_lookup,
1159 .getattr = cifs_getattr,
1160 .unlink = cifs_unlink,
1161 .link = cifs_hardlink,
1162 .mkdir = cifs_mkdir,
1163 .rmdir = cifs_rmdir,
1164 .rename = cifs_rename2,
1165 .permission = cifs_permission,
1166 .setattr = cifs_setattr,
1167 .symlink = cifs_symlink,
1168 .mknod = cifs_mknod,
1169 .listxattr = cifs_listxattr,
1170 .get_acl = cifs_get_acl,
1171 .set_acl = cifs_set_acl,
1172 };
1173
1174 const struct inode_operations cifs_file_inode_ops = {
1175 .setattr = cifs_setattr,
1176 .getattr = cifs_getattr,
1177 .permission = cifs_permission,
1178 .listxattr = cifs_listxattr,
1179 .fiemap = cifs_fiemap,
1180 .get_acl = cifs_get_acl,
1181 .set_acl = cifs_set_acl,
1182 };
1183
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1184 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1185 struct delayed_call *done)
1186 {
1187 char *target_path;
1188
1189 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1190 if (!target_path)
1191 return ERR_PTR(-ENOMEM);
1192
1193 spin_lock(&inode->i_lock);
1194 if (likely(CIFS_I(inode)->symlink_target)) {
1195 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1196 } else {
1197 kfree(target_path);
1198 target_path = ERR_PTR(-EOPNOTSUPP);
1199 }
1200 spin_unlock(&inode->i_lock);
1201
1202 if (!IS_ERR(target_path))
1203 set_delayed_call(done, kfree_link, target_path);
1204
1205 return target_path;
1206 }
1207
1208 const struct inode_operations cifs_symlink_inode_ops = {
1209 .get_link = cifs_get_link,
1210 .setattr = cifs_setattr,
1211 .permission = cifs_permission,
1212 .listxattr = cifs_listxattr,
1213 };
1214
1215 /*
1216 * Advance the EOF marker to after the source range.
1217 */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1218 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1219 struct cifs_tcon *src_tcon,
1220 unsigned int xid, loff_t src_end)
1221 {
1222 struct cifsFileInfo *writeable_srcfile;
1223 int rc = -EINVAL;
1224
1225 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1226 if (writeable_srcfile) {
1227 if (src_tcon->ses->server->ops->set_file_size)
1228 rc = src_tcon->ses->server->ops->set_file_size(
1229 xid, src_tcon, writeable_srcfile,
1230 src_inode->i_size, true /* no need to set sparse */);
1231 else
1232 rc = -ENOSYS;
1233 cifsFileInfo_put(writeable_srcfile);
1234 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1235 }
1236
1237 if (rc < 0)
1238 goto set_failed;
1239
1240 netfs_resize_file(&src_cifsi->netfs, src_end);
1241 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1242 return 0;
1243
1244 set_failed:
1245 return filemap_write_and_wait(src_inode->i_mapping);
1246 }
1247
1248 /*
1249 * Flush out either the folio that overlaps the beginning of a range in which
1250 * pos resides or the folio that overlaps the end of a range unless that folio
1251 * is entirely within the range we're going to invalidate. We extend the flush
1252 * bounds to encompass the folio.
1253 */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1254 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1255 bool first)
1256 {
1257 struct folio *folio;
1258 unsigned long long fpos, fend;
1259 pgoff_t index = pos / PAGE_SIZE;
1260 size_t size;
1261 int rc = 0;
1262
1263 folio = filemap_get_folio(inode->i_mapping, index);
1264 if (IS_ERR(folio))
1265 return 0;
1266
1267 size = folio_size(folio);
1268 fpos = folio_pos(folio);
1269 fend = fpos + size - 1;
1270 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1271 *_fend = max_t(unsigned long long, *_fend, fend);
1272 if ((first && pos == fpos) || (!first && pos == fend))
1273 goto out;
1274
1275 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1276 out:
1277 folio_put(folio);
1278 return rc;
1279 }
1280
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1281 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1282 struct file *dst_file, loff_t destoff, loff_t len,
1283 unsigned int remap_flags)
1284 {
1285 struct inode *src_inode = file_inode(src_file);
1286 struct inode *target_inode = file_inode(dst_file);
1287 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1288 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1289 struct cifsFileInfo *smb_file_src = src_file->private_data;
1290 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1291 struct cifs_tcon *target_tcon, *src_tcon;
1292 unsigned long long destend, fstart, fend, new_size;
1293 unsigned int xid;
1294 int rc;
1295
1296 if (remap_flags & REMAP_FILE_DEDUP)
1297 return -EOPNOTSUPP;
1298 if (remap_flags & ~REMAP_FILE_ADVISORY)
1299 return -EINVAL;
1300
1301 cifs_dbg(FYI, "clone range\n");
1302
1303 xid = get_xid();
1304
1305 if (!smb_file_src || !smb_file_target) {
1306 rc = -EBADF;
1307 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1308 goto out;
1309 }
1310
1311 src_tcon = tlink_tcon(smb_file_src->tlink);
1312 target_tcon = tlink_tcon(smb_file_target->tlink);
1313
1314 /*
1315 * Note: cifs case is easier than btrfs since server responsible for
1316 * checks for proper open modes and file type and if it wants
1317 * server could even support copy of range where source = target
1318 */
1319 lock_two_nondirectories(target_inode, src_inode);
1320
1321 if (len == 0)
1322 len = src_inode->i_size - off;
1323
1324 cifs_dbg(FYI, "clone range\n");
1325
1326 /* Flush the source buffer */
1327 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1328 off + len - 1);
1329 if (rc)
1330 goto unlock;
1331
1332 /* The server-side copy will fail if the source crosses the EOF marker.
1333 * Advance the EOF marker after the flush above to the end of the range
1334 * if it's short of that.
1335 */
1336 if (src_cifsi->netfs.remote_i_size < off + len) {
1337 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1338 if (rc < 0)
1339 goto unlock;
1340 }
1341
1342 new_size = destoff + len;
1343 destend = destoff + len - 1;
1344
1345 /* Flush the folios at either end of the destination range to prevent
1346 * accidental loss of dirty data outside of the range.
1347 */
1348 fstart = destoff;
1349 fend = destend;
1350
1351 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1352 if (rc)
1353 goto unlock;
1354 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1355 if (rc)
1356 goto unlock;
1357
1358 /* Discard all the folios that overlap the destination region. */
1359 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1360 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1361
1362 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1363 i_size_read(target_inode), 0);
1364
1365 rc = -EOPNOTSUPP;
1366 if (target_tcon->ses->server->ops->duplicate_extents) {
1367 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1368 smb_file_src, smb_file_target, off, len, destoff);
1369 if (rc == 0 && new_size > i_size_read(target_inode)) {
1370 truncate_setsize(target_inode, new_size);
1371 netfs_resize_file(&target_cifsi->netfs, new_size);
1372 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1373 new_size);
1374 }
1375 }
1376
1377 /* force revalidate of size and timestamps of target file now
1378 that target is updated on the server */
1379 CIFS_I(target_inode)->time = 0;
1380 unlock:
1381 /* although unlocking in the reverse order from locking is not
1382 strictly necessary here it is a little cleaner to be consistent */
1383 unlock_two_nondirectories(src_inode, target_inode);
1384 out:
1385 free_xid(xid);
1386 return rc < 0 ? rc : len;
1387 }
1388
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1389 ssize_t cifs_file_copychunk_range(unsigned int xid,
1390 struct file *src_file, loff_t off,
1391 struct file *dst_file, loff_t destoff,
1392 size_t len, unsigned int flags)
1393 {
1394 struct inode *src_inode = file_inode(src_file);
1395 struct inode *target_inode = file_inode(dst_file);
1396 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1397 struct cifsFileInfo *smb_file_src;
1398 struct cifsFileInfo *smb_file_target;
1399 struct cifs_tcon *src_tcon;
1400 struct cifs_tcon *target_tcon;
1401 unsigned long long destend, fstart, fend;
1402 ssize_t rc;
1403
1404 cifs_dbg(FYI, "copychunk range\n");
1405
1406 if (!src_file->private_data || !dst_file->private_data) {
1407 rc = -EBADF;
1408 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1409 goto out;
1410 }
1411
1412 rc = -EXDEV;
1413 smb_file_target = dst_file->private_data;
1414 smb_file_src = src_file->private_data;
1415 src_tcon = tlink_tcon(smb_file_src->tlink);
1416 target_tcon = tlink_tcon(smb_file_target->tlink);
1417
1418 if (src_tcon->ses != target_tcon->ses) {
1419 cifs_dbg(FYI, "source and target of copy not on same server\n");
1420 goto out;
1421 }
1422
1423 rc = -EOPNOTSUPP;
1424 if (!target_tcon->ses->server->ops->copychunk_range)
1425 goto out;
1426
1427 /*
1428 * Note: cifs case is easier than btrfs since server responsible for
1429 * checks for proper open modes and file type and if it wants
1430 * server could even support copy of range where source = target
1431 */
1432 lock_two_nondirectories(target_inode, src_inode);
1433
1434 cifs_dbg(FYI, "about to flush pages\n");
1435
1436 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1437 off + len - 1);
1438 if (rc)
1439 goto unlock;
1440
1441 /* The server-side copy will fail if the source crosses the EOF marker.
1442 * Advance the EOF marker after the flush above to the end of the range
1443 * if it's short of that.
1444 */
1445 if (src_cifsi->server_eof < off + len) {
1446 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1447 if (rc < 0)
1448 goto unlock;
1449 }
1450
1451 destend = destoff + len - 1;
1452
1453 /* Flush the folios at either end of the destination range to prevent
1454 * accidental loss of dirty data outside of the range.
1455 */
1456 fstart = destoff;
1457 fend = destend;
1458
1459 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1460 if (rc)
1461 goto unlock;
1462 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1463 if (rc)
1464 goto unlock;
1465
1466 /* Discard all the folios that overlap the destination region. */
1467 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1468
1469 rc = file_modified(dst_file);
1470 if (!rc) {
1471 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1472 smb_file_src, smb_file_target, off, len, destoff);
1473 if (rc > 0 && destoff + rc > i_size_read(target_inode))
1474 truncate_setsize(target_inode, destoff + rc);
1475 }
1476
1477 file_accessed(src_file);
1478
1479 /* force revalidate of size and timestamps of target file now
1480 * that target is updated on the server
1481 */
1482 CIFS_I(target_inode)->time = 0;
1483
1484 unlock:
1485 /* although unlocking in the reverse order from locking is not
1486 * strictly necessary here it is a little cleaner to be consistent
1487 */
1488 unlock_two_nondirectories(src_inode, target_inode);
1489
1490 out:
1491 return rc;
1492 }
1493
1494 /*
1495 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1496 * is a dummy operation.
1497 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1498 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1499 {
1500 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1501 file, datasync);
1502
1503 return 0;
1504 }
1505
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1506 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1507 struct file *dst_file, loff_t destoff,
1508 size_t len, unsigned int flags)
1509 {
1510 unsigned int xid = get_xid();
1511 ssize_t rc;
1512 struct cifsFileInfo *cfile = dst_file->private_data;
1513
1514 if (cfile->swapfile) {
1515 rc = -EOPNOTSUPP;
1516 free_xid(xid);
1517 return rc;
1518 }
1519
1520 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1521 len, flags);
1522 free_xid(xid);
1523
1524 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1525 rc = generic_copy_file_range(src_file, off, dst_file,
1526 destoff, len, flags);
1527 return rc;
1528 }
1529
1530 const struct file_operations cifs_file_ops = {
1531 .read_iter = cifs_loose_read_iter,
1532 .write_iter = cifs_file_write_iter,
1533 .open = cifs_open,
1534 .release = cifs_close,
1535 .lock = cifs_lock,
1536 .flock = cifs_flock,
1537 .fsync = cifs_fsync,
1538 .flush = cifs_flush,
1539 .mmap = cifs_file_mmap,
1540 .splice_read = filemap_splice_read,
1541 .splice_write = iter_file_splice_write,
1542 .llseek = cifs_llseek,
1543 .unlocked_ioctl = cifs_ioctl,
1544 .copy_file_range = cifs_copy_file_range,
1545 .remap_file_range = cifs_remap_file_range,
1546 .setlease = cifs_setlease,
1547 .fallocate = cifs_fallocate,
1548 };
1549
1550 const struct file_operations cifs_file_strict_ops = {
1551 .read_iter = cifs_strict_readv,
1552 .write_iter = cifs_strict_writev,
1553 .open = cifs_open,
1554 .release = cifs_close,
1555 .lock = cifs_lock,
1556 .flock = cifs_flock,
1557 .fsync = cifs_strict_fsync,
1558 .flush = cifs_flush,
1559 .mmap = cifs_file_strict_mmap,
1560 .splice_read = filemap_splice_read,
1561 .splice_write = iter_file_splice_write,
1562 .llseek = cifs_llseek,
1563 .unlocked_ioctl = cifs_ioctl,
1564 .copy_file_range = cifs_copy_file_range,
1565 .remap_file_range = cifs_remap_file_range,
1566 .setlease = cifs_setlease,
1567 .fallocate = cifs_fallocate,
1568 };
1569
1570 const struct file_operations cifs_file_direct_ops = {
1571 .read_iter = cifs_direct_readv,
1572 .write_iter = cifs_direct_writev,
1573 .open = cifs_open,
1574 .release = cifs_close,
1575 .lock = cifs_lock,
1576 .flock = cifs_flock,
1577 .fsync = cifs_fsync,
1578 .flush = cifs_flush,
1579 .mmap = cifs_file_mmap,
1580 .splice_read = copy_splice_read,
1581 .splice_write = iter_file_splice_write,
1582 .unlocked_ioctl = cifs_ioctl,
1583 .copy_file_range = cifs_copy_file_range,
1584 .remap_file_range = cifs_remap_file_range,
1585 .llseek = cifs_llseek,
1586 .setlease = cifs_setlease,
1587 .fallocate = cifs_fallocate,
1588 };
1589
1590 const struct file_operations cifs_file_nobrl_ops = {
1591 .read_iter = cifs_loose_read_iter,
1592 .write_iter = cifs_file_write_iter,
1593 .open = cifs_open,
1594 .release = cifs_close,
1595 .fsync = cifs_fsync,
1596 .flush = cifs_flush,
1597 .mmap = cifs_file_mmap,
1598 .splice_read = filemap_splice_read,
1599 .splice_write = iter_file_splice_write,
1600 .llseek = cifs_llseek,
1601 .unlocked_ioctl = cifs_ioctl,
1602 .copy_file_range = cifs_copy_file_range,
1603 .remap_file_range = cifs_remap_file_range,
1604 .setlease = cifs_setlease,
1605 .fallocate = cifs_fallocate,
1606 };
1607
1608 const struct file_operations cifs_file_strict_nobrl_ops = {
1609 .read_iter = cifs_strict_readv,
1610 .write_iter = cifs_strict_writev,
1611 .open = cifs_open,
1612 .release = cifs_close,
1613 .fsync = cifs_strict_fsync,
1614 .flush = cifs_flush,
1615 .mmap = cifs_file_strict_mmap,
1616 .splice_read = filemap_splice_read,
1617 .splice_write = iter_file_splice_write,
1618 .llseek = cifs_llseek,
1619 .unlocked_ioctl = cifs_ioctl,
1620 .copy_file_range = cifs_copy_file_range,
1621 .remap_file_range = cifs_remap_file_range,
1622 .setlease = cifs_setlease,
1623 .fallocate = cifs_fallocate,
1624 };
1625
1626 const struct file_operations cifs_file_direct_nobrl_ops = {
1627 .read_iter = cifs_direct_readv,
1628 .write_iter = cifs_direct_writev,
1629 .open = cifs_open,
1630 .release = cifs_close,
1631 .fsync = cifs_fsync,
1632 .flush = cifs_flush,
1633 .mmap = cifs_file_mmap,
1634 .splice_read = copy_splice_read,
1635 .splice_write = iter_file_splice_write,
1636 .unlocked_ioctl = cifs_ioctl,
1637 .copy_file_range = cifs_copy_file_range,
1638 .remap_file_range = cifs_remap_file_range,
1639 .llseek = cifs_llseek,
1640 .setlease = cifs_setlease,
1641 .fallocate = cifs_fallocate,
1642 };
1643
1644 const struct file_operations cifs_dir_ops = {
1645 .iterate_shared = cifs_readdir,
1646 .release = cifs_closedir,
1647 .read = generic_read_dir,
1648 .unlocked_ioctl = cifs_ioctl,
1649 .copy_file_range = cifs_copy_file_range,
1650 .remap_file_range = cifs_remap_file_range,
1651 .llseek = generic_file_llseek,
1652 .fsync = cifs_dir_fsync,
1653 };
1654
1655 static void
cifs_init_once(void * inode)1656 cifs_init_once(void *inode)
1657 {
1658 struct cifsInodeInfo *cifsi = inode;
1659
1660 inode_init_once(&cifsi->netfs.inode);
1661 init_rwsem(&cifsi->lock_sem);
1662 }
1663
1664 static int __init
cifs_init_inodecache(void)1665 cifs_init_inodecache(void)
1666 {
1667 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1668 sizeof(struct cifsInodeInfo),
1669 0, (SLAB_RECLAIM_ACCOUNT|
1670 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1671 cifs_init_once);
1672 if (cifs_inode_cachep == NULL)
1673 return -ENOMEM;
1674
1675 return 0;
1676 }
1677
1678 static void
cifs_destroy_inodecache(void)1679 cifs_destroy_inodecache(void)
1680 {
1681 /*
1682 * Make sure all delayed rcu free inodes are flushed before we
1683 * destroy cache.
1684 */
1685 rcu_barrier();
1686 kmem_cache_destroy(cifs_inode_cachep);
1687 }
1688
1689 static int
cifs_init_request_bufs(void)1690 cifs_init_request_bufs(void)
1691 {
1692 /*
1693 * SMB2 maximum header size is bigger than CIFS one - no problems to
1694 * allocate some more bytes for CIFS.
1695 */
1696 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1697
1698 if (CIFSMaxBufSize < 8192) {
1699 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1700 Unicode path name has to fit in any SMB/CIFS path based frames */
1701 CIFSMaxBufSize = 8192;
1702 } else if (CIFSMaxBufSize > 1024*127) {
1703 CIFSMaxBufSize = 1024 * 127;
1704 } else {
1705 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1706 }
1707 /*
1708 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1709 CIFSMaxBufSize, CIFSMaxBufSize);
1710 */
1711 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1712 CIFSMaxBufSize + max_hdr_size, 0,
1713 SLAB_HWCACHE_ALIGN, 0,
1714 CIFSMaxBufSize + max_hdr_size,
1715 NULL);
1716 if (cifs_req_cachep == NULL)
1717 return -ENOMEM;
1718
1719 if (cifs_min_rcv < 1)
1720 cifs_min_rcv = 1;
1721 else if (cifs_min_rcv > 64) {
1722 cifs_min_rcv = 64;
1723 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1724 }
1725
1726 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1727 cifs_req_cachep);
1728
1729 if (cifs_req_poolp == NULL) {
1730 kmem_cache_destroy(cifs_req_cachep);
1731 return -ENOMEM;
1732 }
1733 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1734 almost all handle based requests (but not write response, nor is it
1735 sufficient for path based requests). A smaller size would have
1736 been more efficient (compacting multiple slab items on one 4k page)
1737 for the case in which debug was on, but this larger size allows
1738 more SMBs to use small buffer alloc and is still much more
1739 efficient to alloc 1 per page off the slab compared to 17K (5page)
1740 alloc of large cifs buffers even when page debugging is on */
1741 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1742 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1743 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1744 if (cifs_sm_req_cachep == NULL) {
1745 mempool_destroy(cifs_req_poolp);
1746 kmem_cache_destroy(cifs_req_cachep);
1747 return -ENOMEM;
1748 }
1749
1750 if (cifs_min_small < 2)
1751 cifs_min_small = 2;
1752 else if (cifs_min_small > 256) {
1753 cifs_min_small = 256;
1754 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1755 }
1756
1757 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1758 cifs_sm_req_cachep);
1759
1760 if (cifs_sm_req_poolp == NULL) {
1761 mempool_destroy(cifs_req_poolp);
1762 kmem_cache_destroy(cifs_req_cachep);
1763 kmem_cache_destroy(cifs_sm_req_cachep);
1764 return -ENOMEM;
1765 }
1766
1767 return 0;
1768 }
1769
1770 static void
cifs_destroy_request_bufs(void)1771 cifs_destroy_request_bufs(void)
1772 {
1773 mempool_destroy(cifs_req_poolp);
1774 kmem_cache_destroy(cifs_req_cachep);
1775 mempool_destroy(cifs_sm_req_poolp);
1776 kmem_cache_destroy(cifs_sm_req_cachep);
1777 }
1778
init_mids(void)1779 static int init_mids(void)
1780 {
1781 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1782 sizeof(struct mid_q_entry), 0,
1783 SLAB_HWCACHE_ALIGN, NULL);
1784 if (cifs_mid_cachep == NULL)
1785 return -ENOMEM;
1786
1787 /* 3 is a reasonable minimum number of simultaneous operations */
1788 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1789 if (cifs_mid_poolp == NULL) {
1790 kmem_cache_destroy(cifs_mid_cachep);
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
1795 }
1796
destroy_mids(void)1797 static void destroy_mids(void)
1798 {
1799 mempool_destroy(cifs_mid_poolp);
1800 kmem_cache_destroy(cifs_mid_cachep);
1801 }
1802
1803 static int __init
init_cifs(void)1804 init_cifs(void)
1805 {
1806 int rc = 0;
1807 cifs_proc_init();
1808 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1809 /*
1810 * Initialize Global counters
1811 */
1812 atomic_set(&sesInfoAllocCount, 0);
1813 atomic_set(&tconInfoAllocCount, 0);
1814 atomic_set(&tcpSesNextId, 0);
1815 atomic_set(&tcpSesAllocCount, 0);
1816 atomic_set(&tcpSesReconnectCount, 0);
1817 atomic_set(&tconInfoReconnectCount, 0);
1818
1819 atomic_set(&buf_alloc_count, 0);
1820 atomic_set(&small_buf_alloc_count, 0);
1821 #ifdef CONFIG_CIFS_STATS2
1822 atomic_set(&total_buf_alloc_count, 0);
1823 atomic_set(&total_small_buf_alloc_count, 0);
1824 if (slow_rsp_threshold < 1)
1825 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1826 else if (slow_rsp_threshold > 32767)
1827 cifs_dbg(VFS,
1828 "slow response threshold set higher than recommended (0 to 32767)\n");
1829 #endif /* CONFIG_CIFS_STATS2 */
1830
1831 atomic_set(&mid_count, 0);
1832 GlobalCurrentXid = 0;
1833 GlobalTotalActiveXid = 0;
1834 GlobalMaxActiveXid = 0;
1835 spin_lock_init(&cifs_tcp_ses_lock);
1836 spin_lock_init(&GlobalMid_Lock);
1837
1838 cifs_lock_secret = get_random_u32();
1839
1840 if (cifs_max_pending < 2) {
1841 cifs_max_pending = 2;
1842 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1843 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1844 cifs_max_pending = CIFS_MAX_REQ;
1845 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1846 CIFS_MAX_REQ);
1847 }
1848
1849 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1850 if (dir_cache_timeout > 65000) {
1851 dir_cache_timeout = 65000;
1852 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1853 }
1854
1855 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1856 if (!cifsiod_wq) {
1857 rc = -ENOMEM;
1858 goto out_clean_proc;
1859 }
1860
1861 /*
1862 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1863 * so that we don't launch too many worker threads but
1864 * Documentation/core-api/workqueue.rst recommends setting it to 0
1865 */
1866
1867 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1868 decrypt_wq = alloc_workqueue("smb3decryptd",
1869 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1870 if (!decrypt_wq) {
1871 rc = -ENOMEM;
1872 goto out_destroy_cifsiod_wq;
1873 }
1874
1875 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1876 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1877 if (!fileinfo_put_wq) {
1878 rc = -ENOMEM;
1879 goto out_destroy_decrypt_wq;
1880 }
1881
1882 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1883 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1884 if (!cifsoplockd_wq) {
1885 rc = -ENOMEM;
1886 goto out_destroy_fileinfo_put_wq;
1887 }
1888
1889 deferredclose_wq = alloc_workqueue("deferredclose",
1890 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1891 if (!deferredclose_wq) {
1892 rc = -ENOMEM;
1893 goto out_destroy_cifsoplockd_wq;
1894 }
1895
1896 serverclose_wq = alloc_workqueue("serverclose",
1897 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1898 if (!serverclose_wq) {
1899 rc = -ENOMEM;
1900 goto out_destroy_deferredclose_wq;
1901 }
1902
1903 cfid_put_wq = alloc_workqueue("cfid_put_wq",
1904 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1905 if (!cfid_put_wq) {
1906 rc = -ENOMEM;
1907 goto out_destroy_serverclose_wq;
1908 }
1909
1910 rc = cifs_init_inodecache();
1911 if (rc)
1912 goto out_destroy_cfid_put_wq;
1913
1914 rc = init_mids();
1915 if (rc)
1916 goto out_destroy_inodecache;
1917
1918 rc = cifs_init_request_bufs();
1919 if (rc)
1920 goto out_destroy_mids;
1921
1922 #ifdef CONFIG_CIFS_DFS_UPCALL
1923 rc = dfs_cache_init();
1924 if (rc)
1925 goto out_destroy_request_bufs;
1926 #endif /* CONFIG_CIFS_DFS_UPCALL */
1927 #ifdef CONFIG_CIFS_UPCALL
1928 rc = init_cifs_spnego();
1929 if (rc)
1930 goto out_destroy_dfs_cache;
1931 #endif /* CONFIG_CIFS_UPCALL */
1932 #ifdef CONFIG_CIFS_SWN_UPCALL
1933 rc = cifs_genl_init();
1934 if (rc)
1935 goto out_register_key_type;
1936 #endif /* CONFIG_CIFS_SWN_UPCALL */
1937
1938 rc = init_cifs_idmap();
1939 if (rc)
1940 goto out_cifs_swn_init;
1941
1942 rc = register_filesystem(&cifs_fs_type);
1943 if (rc)
1944 goto out_init_cifs_idmap;
1945
1946 rc = register_filesystem(&smb3_fs_type);
1947 if (rc) {
1948 unregister_filesystem(&cifs_fs_type);
1949 goto out_init_cifs_idmap;
1950 }
1951
1952 return 0;
1953
1954 out_init_cifs_idmap:
1955 exit_cifs_idmap();
1956 out_cifs_swn_init:
1957 #ifdef CONFIG_CIFS_SWN_UPCALL
1958 cifs_genl_exit();
1959 out_register_key_type:
1960 #endif
1961 #ifdef CONFIG_CIFS_UPCALL
1962 exit_cifs_spnego();
1963 out_destroy_dfs_cache:
1964 #endif
1965 #ifdef CONFIG_CIFS_DFS_UPCALL
1966 dfs_cache_destroy();
1967 out_destroy_request_bufs:
1968 #endif
1969 cifs_destroy_request_bufs();
1970 out_destroy_mids:
1971 destroy_mids();
1972 out_destroy_inodecache:
1973 cifs_destroy_inodecache();
1974 out_destroy_cfid_put_wq:
1975 destroy_workqueue(cfid_put_wq);
1976 out_destroy_serverclose_wq:
1977 destroy_workqueue(serverclose_wq);
1978 out_destroy_deferredclose_wq:
1979 destroy_workqueue(deferredclose_wq);
1980 out_destroy_cifsoplockd_wq:
1981 destroy_workqueue(cifsoplockd_wq);
1982 out_destroy_fileinfo_put_wq:
1983 destroy_workqueue(fileinfo_put_wq);
1984 out_destroy_decrypt_wq:
1985 destroy_workqueue(decrypt_wq);
1986 out_destroy_cifsiod_wq:
1987 destroy_workqueue(cifsiod_wq);
1988 out_clean_proc:
1989 cifs_proc_clean();
1990 return rc;
1991 }
1992
1993 static void __exit
exit_cifs(void)1994 exit_cifs(void)
1995 {
1996 cifs_dbg(NOISY, "exit_smb3\n");
1997 unregister_filesystem(&cifs_fs_type);
1998 unregister_filesystem(&smb3_fs_type);
1999 cifs_release_automount_timer();
2000 exit_cifs_idmap();
2001 #ifdef CONFIG_CIFS_SWN_UPCALL
2002 cifs_genl_exit();
2003 #endif
2004 #ifdef CONFIG_CIFS_UPCALL
2005 exit_cifs_spnego();
2006 #endif
2007 #ifdef CONFIG_CIFS_DFS_UPCALL
2008 dfs_cache_destroy();
2009 #endif
2010 cifs_destroy_request_bufs();
2011 destroy_mids();
2012 cifs_destroy_inodecache();
2013 destroy_workqueue(deferredclose_wq);
2014 destroy_workqueue(cifsoplockd_wq);
2015 destroy_workqueue(decrypt_wq);
2016 destroy_workqueue(fileinfo_put_wq);
2017 destroy_workqueue(serverclose_wq);
2018 destroy_workqueue(cifsiod_wq);
2019 cifs_proc_clean();
2020 }
2021
2022 MODULE_AUTHOR("Steve French");
2023 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2024 MODULE_DESCRIPTION
2025 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2026 "also older servers complying with the SNIA CIFS Specification)");
2027 MODULE_VERSION(CIFS_VERSION);
2028 MODULE_SOFTDEP("ecb");
2029 MODULE_SOFTDEP("hmac");
2030 MODULE_SOFTDEP("md5");
2031 MODULE_SOFTDEP("nls");
2032 MODULE_SOFTDEP("aes");
2033 MODULE_SOFTDEP("cmac");
2034 MODULE_SOFTDEP("sha256");
2035 MODULE_SOFTDEP("sha512");
2036 MODULE_SOFTDEP("aead2");
2037 MODULE_SOFTDEP("ccm");
2038 MODULE_SOFTDEP("gcm");
2039 module_init(init_cifs)
2040 module_exit(exit_cifs)
2041