1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * Common Internet FileSystem (CIFS) client 8 * 9 */ 10 11 /* Note that BB means BUGBUG (ie something to fix eventually) */ 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/filelock.h> 16 #include <linux/mount.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/seq_file.h> 21 #include <linux/vfs.h> 22 #include <linux/mempool.h> 23 #include <linux/delay.h> 24 #include <linux/kthread.h> 25 #include <linux/freezer.h> 26 #include <linux/namei.h> 27 #include <linux/random.h> 28 #include <linux/uuid.h> 29 #include <linux/xattr.h> 30 #include <uapi/linux/magic.h> 31 #include <net/ipv6.h> 32 #include "cifsfs.h" 33 #include "cifspdu.h" 34 #define DECLARE_GLOBALS_HERE 35 #include "cifsglob.h" 36 #include "cifsproto.h" 37 #include "cifs_debug.h" 38 #include "cifs_fs_sb.h" 39 #include <linux/mm.h> 40 #include <linux/key-type.h> 41 #include "cifs_spnego.h" 42 #include "fscache.h" 43 #ifdef CONFIG_CIFS_DFS_UPCALL 44 #include "dfs_cache.h" 45 #endif 46 #ifdef CONFIG_CIFS_SWN_UPCALL 47 #include "netlink.h" 48 #endif 49 #include "fs_context.h" 50 #include "cached_dir.h" 51 52 /* 53 * DOS dates from 1980/1/1 through 2107/12/31 54 * Protocol specifications indicate the range should be to 119, which 55 * limits maximum year to 2099. But this range has not been checked. 56 */ 57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) 58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) 59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) 60 61 int cifsFYI = 0; 62 bool traceSMB; 63 bool enable_oplocks = true; 64 bool linuxExtEnabled = true; 65 bool lookupCacheEnabled = true; 66 bool disable_legacy_dialects; /* false by default */ 67 bool enable_gcm_256 = true; 68 bool require_gcm_256; /* false by default */ 69 bool enable_negotiate_signing; /* false by default */ 70 unsigned int global_secflags = CIFSSEC_DEF; 71 /* unsigned int ntlmv2_support = 0; */ 72 unsigned int sign_CIFS_PDUs = 1; 73 74 /* 75 * Global transaction id (XID) information 76 */ 77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 81 82 /* 83 * Global counters, updated atomically 84 */ 85 atomic_t sesInfoAllocCount; 86 atomic_t tconInfoAllocCount; 87 atomic_t tcpSesNextId; 88 atomic_t tcpSesAllocCount; 89 atomic_t tcpSesReconnectCount; 90 atomic_t tconInfoReconnectCount; 91 92 atomic_t mid_count; 93 atomic_t buf_alloc_count; 94 atomic_t small_buf_alloc_count; 95 #ifdef CONFIG_CIFS_STATS2 96 atomic_t total_buf_alloc_count; 97 atomic_t total_small_buf_alloc_count; 98 #endif/* STATS2 */ 99 struct list_head cifs_tcp_ses_list; 100 spinlock_t cifs_tcp_ses_lock; 101 static const struct super_operations cifs_super_ops; 102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 103 module_param(CIFSMaxBufSize, uint, 0444); 104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " 105 "for CIFS requests. " 106 "Default: 16384 Range: 8192 to 130048"); 107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; 108 module_param(cifs_min_rcv, uint, 0444); 109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " 110 "1 to 64"); 111 unsigned int cifs_min_small = 30; 112 module_param(cifs_min_small, uint, 0444); 113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 114 "Range: 2 to 256"); 115 unsigned int cifs_max_pending = CIFS_MAX_REQ; 116 module_param(cifs_max_pending, uint, 0444); 117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " 118 "CIFS/SMB1 dialect (N/A for SMB3) " 119 "Default: 32767 Range: 2 to 32767."); 120 unsigned int dir_cache_timeout = 30; 121 module_param(dir_cache_timeout, uint, 0644); 122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 " 123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents"); 124 #ifdef CONFIG_CIFS_STATS2 125 unsigned int slow_rsp_threshold = 1; 126 module_param(slow_rsp_threshold, uint, 0644); 127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " 128 "before logging that a response is delayed. " 129 "Default: 1 (if set to 0 disables msg)."); 130 #endif /* STATS2 */ 131 132 module_param(enable_oplocks, bool, 0644); 133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 134 135 module_param(enable_gcm_256, bool, 0644); 136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 137 138 module_param(require_gcm_256, bool, 0644); 139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); 140 141 module_param(enable_negotiate_signing, bool, 0644); 142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); 143 144 module_param(disable_legacy_dialects, bool, 0644); 145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " 146 "helpful to restrict the ability to " 147 "override the default dialects (SMB2.1, " 148 "SMB3 and SMB3.02) on mount with old " 149 "dialects (CIFS/SMB1 and SMB2) since " 150 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" 151 " and less secure. Default: n/N/0"); 152 153 extern mempool_t *cifs_sm_req_poolp; 154 extern mempool_t *cifs_req_poolp; 155 extern mempool_t *cifs_mid_poolp; 156 157 struct workqueue_struct *cifsiod_wq; 158 struct workqueue_struct *decrypt_wq; 159 struct workqueue_struct *fileinfo_put_wq; 160 struct workqueue_struct *cifsoplockd_wq; 161 struct workqueue_struct *deferredclose_wq; 162 __u32 cifs_lock_secret; 163 164 /* 165 * Bumps refcount for cifs super block. 166 * Note that it should be only called if a referece to VFS super block is 167 * already held, e.g. in open-type syscalls context. Otherwise it can race with 168 * atomic_dec_and_test in deactivate_locked_super. 169 */ 170 void 171 cifs_sb_active(struct super_block *sb) 172 { 173 struct cifs_sb_info *server = CIFS_SB(sb); 174 175 if (atomic_inc_return(&server->active) == 1) 176 atomic_inc(&sb->s_active); 177 } 178 179 void 180 cifs_sb_deactive(struct super_block *sb) 181 { 182 struct cifs_sb_info *server = CIFS_SB(sb); 183 184 if (atomic_dec_and_test(&server->active)) 185 deactivate_super(sb); 186 } 187 188 static int 189 cifs_read_super(struct super_block *sb) 190 { 191 struct inode *inode; 192 struct cifs_sb_info *cifs_sb; 193 struct cifs_tcon *tcon; 194 struct timespec64 ts; 195 int rc = 0; 196 197 cifs_sb = CIFS_SB(sb); 198 tcon = cifs_sb_master_tcon(cifs_sb); 199 200 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 201 sb->s_flags |= SB_POSIXACL; 202 203 if (tcon->snapshot_time) 204 sb->s_flags |= SB_RDONLY; 205 206 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) 207 sb->s_maxbytes = MAX_LFS_FILESIZE; 208 else 209 sb->s_maxbytes = MAX_NON_LFS; 210 211 /* 212 * Some very old servers like DOS and OS/2 used 2 second granularity 213 * (while all current servers use 100ns granularity - see MS-DTYP) 214 * but 1 second is the maximum allowed granularity for the VFS 215 * so for old servers set time granularity to 1 second while for 216 * everything else (current servers) set it to 100ns. 217 */ 218 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && 219 ((tcon->ses->capabilities & 220 tcon->ses->server->vals->cap_nt_find) == 0) && 221 !tcon->unix_ext) { 222 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ 223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); 224 sb->s_time_min = ts.tv_sec; 225 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), 226 cpu_to_le16(SMB_TIME_MAX), 0); 227 sb->s_time_max = ts.tv_sec; 228 } else { 229 /* 230 * Almost every server, including all SMB2+, uses DCE TIME 231 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC 232 */ 233 sb->s_time_gran = 100; 234 ts = cifs_NTtimeToUnix(0); 235 sb->s_time_min = ts.tv_sec; 236 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); 237 sb->s_time_max = ts.tv_sec; 238 } 239 240 sb->s_magic = CIFS_SUPER_MAGIC; 241 sb->s_op = &cifs_super_ops; 242 sb->s_xattr = cifs_xattr_handlers; 243 rc = super_setup_bdi(sb); 244 if (rc) 245 goto out_no_root; 246 /* tune readahead according to rsize if readahead size not set on mount */ 247 if (cifs_sb->ctx->rsize == 0) 248 cifs_sb->ctx->rsize = 249 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 250 if (cifs_sb->ctx->rasize) 251 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; 252 else 253 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); 254 255 sb->s_blocksize = CIFS_MAX_MSGSIZE; 256 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 257 inode = cifs_root_iget(sb); 258 259 if (IS_ERR(inode)) { 260 rc = PTR_ERR(inode); 261 goto out_no_root; 262 } 263 264 if (tcon->nocase) 265 sb->s_d_op = &cifs_ci_dentry_ops; 266 else 267 sb->s_d_op = &cifs_dentry_ops; 268 269 sb->s_root = d_make_root(inode); 270 if (!sb->s_root) { 271 rc = -ENOMEM; 272 goto out_no_root; 273 } 274 275 #ifdef CONFIG_CIFS_NFSD_EXPORT 276 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 277 cifs_dbg(FYI, "export ops supported\n"); 278 sb->s_export_op = &cifs_export_ops; 279 } 280 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 281 282 return 0; 283 284 out_no_root: 285 cifs_dbg(VFS, "%s: get root inode failed\n", __func__); 286 return rc; 287 } 288 289 static void cifs_kill_sb(struct super_block *sb) 290 { 291 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 292 293 /* 294 * We ned to release all dentries for the cached directories 295 * before we kill the sb. 296 */ 297 if (cifs_sb->root) { 298 close_all_cached_dirs(cifs_sb); 299 300 /* finally release root dentry */ 301 dput(cifs_sb->root); 302 cifs_sb->root = NULL; 303 } 304 305 kill_anon_super(sb); 306 cifs_umount(cifs_sb); 307 } 308 309 static int 310 cifs_statfs(struct dentry *dentry, struct kstatfs *buf) 311 { 312 struct super_block *sb = dentry->d_sb; 313 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 314 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 315 struct TCP_Server_Info *server = tcon->ses->server; 316 unsigned int xid; 317 int rc = 0; 318 319 xid = get_xid(); 320 321 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) 322 buf->f_namelen = 323 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); 324 else 325 buf->f_namelen = PATH_MAX; 326 327 buf->f_fsid.val[0] = tcon->vol_serial_number; 328 /* are using part of create time for more randomness, see man statfs */ 329 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); 330 331 buf->f_files = 0; /* undefined */ 332 buf->f_ffree = 0; /* unlimited */ 333 334 if (server->ops->queryfs) 335 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); 336 337 free_xid(xid); 338 return rc; 339 } 340 341 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 342 { 343 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 344 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 345 struct TCP_Server_Info *server = tcon->ses->server; 346 347 if (server->ops->fallocate) 348 return server->ops->fallocate(file, tcon, mode, off, len); 349 350 return -EOPNOTSUPP; 351 } 352 353 static int cifs_permission(struct mnt_idmap *idmap, 354 struct inode *inode, int mask) 355 { 356 struct cifs_sb_info *cifs_sb; 357 358 cifs_sb = CIFS_SB(inode->i_sb); 359 360 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 361 if ((mask & MAY_EXEC) && !execute_ok(inode)) 362 return -EACCES; 363 else 364 return 0; 365 } else /* file mode might have been restricted at mount time 366 on the client (above and beyond ACL on servers) for 367 servers which do not support setting and viewing mode bits, 368 so allowing client to check permissions is useful */ 369 return generic_permission(&nop_mnt_idmap, inode, mask); 370 } 371 372 static struct kmem_cache *cifs_inode_cachep; 373 static struct kmem_cache *cifs_req_cachep; 374 static struct kmem_cache *cifs_mid_cachep; 375 static struct kmem_cache *cifs_sm_req_cachep; 376 mempool_t *cifs_sm_req_poolp; 377 mempool_t *cifs_req_poolp; 378 mempool_t *cifs_mid_poolp; 379 380 static struct inode * 381 cifs_alloc_inode(struct super_block *sb) 382 { 383 struct cifsInodeInfo *cifs_inode; 384 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); 385 if (!cifs_inode) 386 return NULL; 387 cifs_inode->cifsAttrs = 0x20; /* default */ 388 cifs_inode->time = 0; 389 /* 390 * Until the file is open and we have gotten oplock info back from the 391 * server, can not assume caching of file data or metadata. 392 */ 393 cifs_set_oplock_level(cifs_inode, 0); 394 cifs_inode->flags = 0; 395 spin_lock_init(&cifs_inode->writers_lock); 396 cifs_inode->writers = 0; 397 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 398 cifs_inode->server_eof = 0; 399 cifs_inode->uniqueid = 0; 400 cifs_inode->createtime = 0; 401 cifs_inode->epoch = 0; 402 spin_lock_init(&cifs_inode->open_file_lock); 403 generate_random_uuid(cifs_inode->lease_key); 404 cifs_inode->symlink_target = NULL; 405 406 /* 407 * Can not set i_flags here - they get immediately overwritten to zero 408 * by the VFS. 409 */ 410 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ 411 INIT_LIST_HEAD(&cifs_inode->openFileList); 412 INIT_LIST_HEAD(&cifs_inode->llist); 413 INIT_LIST_HEAD(&cifs_inode->deferred_closes); 414 spin_lock_init(&cifs_inode->deferred_lock); 415 return &cifs_inode->netfs.inode; 416 } 417 418 static void 419 cifs_free_inode(struct inode *inode) 420 { 421 struct cifsInodeInfo *cinode = CIFS_I(inode); 422 423 if (S_ISLNK(inode->i_mode)) 424 kfree(cinode->symlink_target); 425 kmem_cache_free(cifs_inode_cachep, cinode); 426 } 427 428 static void 429 cifs_evict_inode(struct inode *inode) 430 { 431 truncate_inode_pages_final(&inode->i_data); 432 if (inode->i_state & I_PINNING_FSCACHE_WB) 433 cifs_fscache_unuse_inode_cookie(inode, true); 434 cifs_fscache_release_inode_cookie(inode); 435 clear_inode(inode); 436 } 437 438 static void 439 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) 440 { 441 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 442 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; 443 444 seq_puts(s, ",addr="); 445 446 switch (server->dstaddr.ss_family) { 447 case AF_INET: 448 seq_printf(s, "%pI4", &sa->sin_addr.s_addr); 449 break; 450 case AF_INET6: 451 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); 452 if (sa6->sin6_scope_id) 453 seq_printf(s, "%%%u", sa6->sin6_scope_id); 454 break; 455 default: 456 seq_puts(s, "(unknown)"); 457 } 458 if (server->rdma) 459 seq_puts(s, ",rdma"); 460 } 461 462 static void 463 cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 464 { 465 if (ses->sectype == Unspecified) { 466 if (ses->user_name == NULL) 467 seq_puts(s, ",sec=none"); 468 return; 469 } 470 471 seq_puts(s, ",sec="); 472 473 switch (ses->sectype) { 474 case NTLMv2: 475 seq_puts(s, "ntlmv2"); 476 break; 477 case Kerberos: 478 seq_puts(s, "krb5"); 479 break; 480 case RawNTLMSSP: 481 seq_puts(s, "ntlmssp"); 482 break; 483 default: 484 /* shouldn't ever happen */ 485 seq_puts(s, "unknown"); 486 break; 487 } 488 489 if (ses->sign) 490 seq_puts(s, "i"); 491 492 if (ses->sectype == Kerberos) 493 seq_printf(s, ",cruid=%u", 494 from_kuid_munged(&init_user_ns, ses->cred_uid)); 495 } 496 497 static void 498 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 499 { 500 seq_puts(s, ",cache="); 501 502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 503 seq_puts(s, "strict"); 504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 505 seq_puts(s, "none"); 506 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 507 seq_puts(s, "singleclient"); /* assume only one client access */ 508 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 509 seq_puts(s, "ro"); /* read only caching assumed */ 510 else 511 seq_puts(s, "loose"); 512 } 513 514 /* 515 * cifs_show_devname() is used so we show the mount device name with correct 516 * format (e.g. forward slashes vs. back slashes) in /proc/mounts 517 */ 518 static int cifs_show_devname(struct seq_file *m, struct dentry *root) 519 { 520 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 521 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); 522 523 if (devname == NULL) 524 seq_puts(m, "none"); 525 else { 526 convert_delimiter(devname, '/'); 527 /* escape all spaces in share names */ 528 seq_escape(m, devname, " \t"); 529 kfree(devname); 530 } 531 return 0; 532 } 533 534 /* 535 * cifs_show_options() is for displaying mount options in /proc/mounts. 536 * Not all settable options are displayed but most of the important 537 * ones are. 538 */ 539 static int 540 cifs_show_options(struct seq_file *s, struct dentry *root) 541 { 542 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 543 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 544 struct sockaddr *srcaddr; 545 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 546 547 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); 548 cifs_show_security(s, tcon->ses); 549 cifs_show_cache_flavor(s, cifs_sb); 550 551 if (tcon->no_lease) 552 seq_puts(s, ",nolease"); 553 if (cifs_sb->ctx->multiuser) 554 seq_puts(s, ",multiuser"); 555 else if (tcon->ses->user_name) 556 seq_show_option(s, "username", tcon->ses->user_name); 557 558 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) 559 seq_show_option(s, "domain", tcon->ses->domainName); 560 561 if (srcaddr->sa_family != AF_UNSPEC) { 562 struct sockaddr_in *saddr4; 563 struct sockaddr_in6 *saddr6; 564 saddr4 = (struct sockaddr_in *)srcaddr; 565 saddr6 = (struct sockaddr_in6 *)srcaddr; 566 if (srcaddr->sa_family == AF_INET6) 567 seq_printf(s, ",srcaddr=%pI6c", 568 &saddr6->sin6_addr); 569 else if (srcaddr->sa_family == AF_INET) 570 seq_printf(s, ",srcaddr=%pI4", 571 &saddr4->sin_addr.s_addr); 572 else 573 seq_printf(s, ",srcaddr=BAD-AF:%i", 574 (int)(srcaddr->sa_family)); 575 } 576 577 seq_printf(s, ",uid=%u", 578 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 580 seq_puts(s, ",forceuid"); 581 else 582 seq_puts(s, ",noforceuid"); 583 584 seq_printf(s, ",gid=%u", 585 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 586 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 587 seq_puts(s, ",forcegid"); 588 else 589 seq_puts(s, ",noforcegid"); 590 591 cifs_show_address(s, tcon->ses->server); 592 593 if (!tcon->unix_ext) 594 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", 595 cifs_sb->ctx->file_mode, 596 cifs_sb->ctx->dir_mode); 597 if (cifs_sb->ctx->iocharset) 598 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); 599 if (tcon->seal) 600 seq_puts(s, ",seal"); 601 else if (tcon->ses->server->ignore_signature) 602 seq_puts(s, ",signloosely"); 603 if (tcon->nocase) 604 seq_puts(s, ",nocase"); 605 if (tcon->nodelete) 606 seq_puts(s, ",nodelete"); 607 if (cifs_sb->ctx->no_sparse) 608 seq_puts(s, ",nosparse"); 609 if (tcon->local_lease) 610 seq_puts(s, ",locallease"); 611 if (tcon->retry) 612 seq_puts(s, ",hard"); 613 else 614 seq_puts(s, ",soft"); 615 if (tcon->use_persistent) 616 seq_puts(s, ",persistenthandles"); 617 else if (tcon->use_resilient) 618 seq_puts(s, ",resilienthandles"); 619 if (tcon->posix_extensions) 620 seq_puts(s, ",posix"); 621 else if (tcon->unix_ext) 622 seq_puts(s, ",unix"); 623 else 624 seq_puts(s, ",nounix"); 625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 626 seq_puts(s, ",nodfs"); 627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 628 seq_puts(s, ",posixpaths"); 629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 630 seq_puts(s, ",setuids"); 631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 632 seq_puts(s, ",idsfromsid"); 633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 634 seq_puts(s, ",serverino"); 635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 636 seq_puts(s, ",rwpidforward"); 637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 638 seq_puts(s, ",forcemand"); 639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 640 seq_puts(s, ",nouser_xattr"); 641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 642 seq_puts(s, ",mapchars"); 643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 644 seq_puts(s, ",mapposix"); 645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 646 seq_puts(s, ",sfu"); 647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 648 seq_puts(s, ",nobrl"); 649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 650 seq_puts(s, ",nohandlecache"); 651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 652 seq_puts(s, ",modefromsid"); 653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 654 seq_puts(s, ",cifsacl"); 655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 656 seq_puts(s, ",dynperm"); 657 if (root->d_sb->s_flags & SB_POSIXACL) 658 seq_puts(s, ",acl"); 659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 660 seq_puts(s, ",mfsymlinks"); 661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 662 seq_puts(s, ",fsc"); 663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 664 seq_puts(s, ",nostrictsync"); 665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 666 seq_puts(s, ",noperm"); 667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 668 seq_printf(s, ",backupuid=%u", 669 from_kuid_munged(&init_user_ns, 670 cifs_sb->ctx->backupuid)); 671 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 672 seq_printf(s, ",backupgid=%u", 673 from_kgid_munged(&init_user_ns, 674 cifs_sb->ctx->backupgid)); 675 676 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 677 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); 678 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); 679 if (cifs_sb->ctx->rasize) 680 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); 681 if (tcon->ses->server->min_offload) 682 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); 683 seq_printf(s, ",echo_interval=%lu", 684 tcon->ses->server->echo_interval / HZ); 685 686 /* Only display the following if overridden on mount */ 687 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) 688 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); 689 if (tcon->ses->server->tcp_nodelay) 690 seq_puts(s, ",tcpnodelay"); 691 if (tcon->ses->server->noautotune) 692 seq_puts(s, ",noautotune"); 693 if (tcon->ses->server->noblocksnd) 694 seq_puts(s, ",noblocksend"); 695 if (tcon->ses->server->nosharesock) 696 seq_puts(s, ",nosharesock"); 697 698 if (tcon->snapshot_time) 699 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 700 if (tcon->handle_timeout) 701 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); 702 if (tcon->max_cached_dirs != MAX_CACHED_FIDS) 703 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs); 704 705 /* 706 * Display file and directory attribute timeout in seconds. 707 * If file and directory attribute timeout the same then actimeo 708 * was likely specified on mount 709 */ 710 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) 711 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); 712 else { 713 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 714 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 715 } 716 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 717 718 if (tcon->ses->chan_max > 1) 719 seq_printf(s, ",multichannel,max_channels=%zu", 720 tcon->ses->chan_max); 721 722 if (tcon->use_witness) 723 seq_puts(s, ",witness"); 724 725 return 0; 726 } 727 728 static void cifs_umount_begin(struct super_block *sb) 729 { 730 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 731 struct cifs_tcon *tcon; 732 733 if (cifs_sb == NULL) 734 return; 735 736 tcon = cifs_sb_master_tcon(cifs_sb); 737 738 spin_lock(&cifs_tcp_ses_lock); 739 spin_lock(&tcon->tc_lock); 740 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 741 /* we have other mounts to same share or we have 742 already tried to umount this and woken up 743 all waiting network requests, nothing to do */ 744 spin_unlock(&tcon->tc_lock); 745 spin_unlock(&cifs_tcp_ses_lock); 746 return; 747 } 748 /* 749 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will 750 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent 751 */ 752 spin_unlock(&tcon->tc_lock); 753 spin_unlock(&cifs_tcp_ses_lock); 754 755 cifs_close_all_deferred_files(tcon); 756 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 757 /* cancel_notify_requests(tcon); */ 758 if (tcon->ses && tcon->ses->server) { 759 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); 760 wake_up_all(&tcon->ses->server->request_q); 761 wake_up_all(&tcon->ses->server->response_q); 762 msleep(1); /* yield */ 763 /* we have to kick the requests once more */ 764 wake_up_all(&tcon->ses->server->response_q); 765 msleep(1); 766 } 767 768 return; 769 } 770 771 static int cifs_freeze(struct super_block *sb) 772 { 773 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 774 struct cifs_tcon *tcon; 775 776 if (cifs_sb == NULL) 777 return 0; 778 779 tcon = cifs_sb_master_tcon(cifs_sb); 780 781 cifs_close_all_deferred_files(tcon); 782 return 0; 783 } 784 785 #ifdef CONFIG_CIFS_STATS2 786 static int cifs_show_stats(struct seq_file *s, struct dentry *root) 787 { 788 /* BB FIXME */ 789 return 0; 790 } 791 #endif 792 793 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 794 { 795 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode)); 796 return 0; 797 } 798 799 static int cifs_drop_inode(struct inode *inode) 800 { 801 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 802 803 /* no serverino => unconditional eviction */ 804 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 805 generic_drop_inode(inode); 806 } 807 808 static const struct super_operations cifs_super_ops = { 809 .statfs = cifs_statfs, 810 .alloc_inode = cifs_alloc_inode, 811 .write_inode = cifs_write_inode, 812 .free_inode = cifs_free_inode, 813 .drop_inode = cifs_drop_inode, 814 .evict_inode = cifs_evict_inode, 815 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ 816 .show_devname = cifs_show_devname, 817 /* .delete_inode = cifs_delete_inode, */ /* Do not need above 818 function unless later we add lazy close of inodes or unless the 819 kernel forgets to call us with the same number of releases (closes) 820 as opens */ 821 .show_options = cifs_show_options, 822 .umount_begin = cifs_umount_begin, 823 .freeze_fs = cifs_freeze, 824 #ifdef CONFIG_CIFS_STATS2 825 .show_stats = cifs_show_stats, 826 #endif 827 }; 828 829 /* 830 * Get root dentry from superblock according to prefix path mount option. 831 * Return dentry with refcount + 1 on success and NULL otherwise. 832 */ 833 static struct dentry * 834 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) 835 { 836 struct dentry *dentry; 837 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 838 char *full_path = NULL; 839 char *s, *p; 840 char sep; 841 842 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 843 return dget(sb->s_root); 844 845 full_path = cifs_build_path_to_root(ctx, cifs_sb, 846 cifs_sb_master_tcon(cifs_sb), 0); 847 if (full_path == NULL) 848 return ERR_PTR(-ENOMEM); 849 850 cifs_dbg(FYI, "Get root dentry for %s\n", full_path); 851 852 sep = CIFS_DIR_SEP(cifs_sb); 853 dentry = dget(sb->s_root); 854 s = full_path; 855 856 do { 857 struct inode *dir = d_inode(dentry); 858 struct dentry *child; 859 860 if (!S_ISDIR(dir->i_mode)) { 861 dput(dentry); 862 dentry = ERR_PTR(-ENOTDIR); 863 break; 864 } 865 866 /* skip separators */ 867 while (*s == sep) 868 s++; 869 if (!*s) 870 break; 871 p = s++; 872 /* next separator */ 873 while (*s && *s != sep) 874 s++; 875 876 child = lookup_positive_unlocked(p, dentry, s - p); 877 dput(dentry); 878 dentry = child; 879 } while (!IS_ERR(dentry)); 880 kfree(full_path); 881 return dentry; 882 } 883 884 static int cifs_set_super(struct super_block *sb, void *data) 885 { 886 struct cifs_mnt_data *mnt_data = data; 887 sb->s_fs_info = mnt_data->cifs_sb; 888 return set_anon_super(sb, NULL); 889 } 890 891 struct dentry * 892 cifs_smb3_do_mount(struct file_system_type *fs_type, 893 int flags, struct smb3_fs_context *old_ctx) 894 { 895 struct cifs_mnt_data mnt_data; 896 struct cifs_sb_info *cifs_sb; 897 struct super_block *sb; 898 struct dentry *root; 899 int rc; 900 901 if (cifsFYI) { 902 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, 903 old_ctx->source, flags); 904 } else { 905 cifs_info("Attempting to mount %s\n", old_ctx->source); 906 } 907 908 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); 909 if (!cifs_sb) 910 return ERR_PTR(-ENOMEM); 911 912 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); 913 if (!cifs_sb->ctx) { 914 root = ERR_PTR(-ENOMEM); 915 goto out; 916 } 917 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); 918 if (rc) { 919 root = ERR_PTR(rc); 920 goto out; 921 } 922 923 rc = cifs_setup_cifs_sb(cifs_sb); 924 if (rc) { 925 root = ERR_PTR(rc); 926 goto out; 927 } 928 929 rc = cifs_mount(cifs_sb, cifs_sb->ctx); 930 if (rc) { 931 if (!(flags & SB_SILENT)) 932 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 933 rc); 934 root = ERR_PTR(rc); 935 goto out; 936 } 937 938 mnt_data.ctx = cifs_sb->ctx; 939 mnt_data.cifs_sb = cifs_sb; 940 mnt_data.flags = flags; 941 942 /* BB should we make this contingent on mount parm? */ 943 flags |= SB_NODIRATIME | SB_NOATIME; 944 945 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); 946 if (IS_ERR(sb)) { 947 cifs_umount(cifs_sb); 948 return ERR_CAST(sb); 949 } 950 951 if (sb->s_root) { 952 cifs_dbg(FYI, "Use existing superblock\n"); 953 cifs_umount(cifs_sb); 954 cifs_sb = NULL; 955 } else { 956 rc = cifs_read_super(sb); 957 if (rc) { 958 root = ERR_PTR(rc); 959 goto out_super; 960 } 961 962 sb->s_flags |= SB_ACTIVE; 963 } 964 965 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); 966 if (IS_ERR(root)) 967 goto out_super; 968 969 if (cifs_sb) 970 cifs_sb->root = dget(root); 971 972 cifs_dbg(FYI, "dentry root is: %p\n", root); 973 return root; 974 975 out_super: 976 deactivate_locked_super(sb); 977 return root; 978 out: 979 kfree(cifs_sb->prepath); 980 smb3_cleanup_fs_context(cifs_sb->ctx); 981 kfree(cifs_sb); 982 return root; 983 } 984 985 986 static ssize_t 987 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 988 { 989 ssize_t rc; 990 struct inode *inode = file_inode(iocb->ki_filp); 991 992 if (iocb->ki_flags & IOCB_DIRECT) 993 return cifs_user_readv(iocb, iter); 994 995 rc = cifs_revalidate_mapping(inode); 996 if (rc) 997 return rc; 998 999 return generic_file_read_iter(iocb, iter); 1000 } 1001 1002 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1003 { 1004 struct inode *inode = file_inode(iocb->ki_filp); 1005 struct cifsInodeInfo *cinode = CIFS_I(inode); 1006 ssize_t written; 1007 int rc; 1008 1009 if (iocb->ki_filp->f_flags & O_DIRECT) { 1010 written = cifs_user_writev(iocb, from); 1011 if (written > 0 && CIFS_CACHE_READ(cinode)) { 1012 cifs_zap_mapping(inode); 1013 cifs_dbg(FYI, 1014 "Set no oplock for inode=%p after a write operation\n", 1015 inode); 1016 cinode->oplock = 0; 1017 } 1018 return written; 1019 } 1020 1021 written = cifs_get_writer(cinode); 1022 if (written) 1023 return written; 1024 1025 written = generic_file_write_iter(iocb, from); 1026 1027 if (CIFS_CACHE_WRITE(CIFS_I(inode))) 1028 goto out; 1029 1030 rc = filemap_fdatawrite(inode->i_mapping); 1031 if (rc) 1032 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 1033 rc, inode); 1034 1035 out: 1036 cifs_put_writer(cinode); 1037 return written; 1038 } 1039 1040 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) 1041 { 1042 struct cifsFileInfo *cfile = file->private_data; 1043 struct cifs_tcon *tcon; 1044 1045 /* 1046 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 1047 * the cached file length 1048 */ 1049 if (whence != SEEK_SET && whence != SEEK_CUR) { 1050 int rc; 1051 struct inode *inode = file_inode(file); 1052 1053 /* 1054 * We need to be sure that all dirty pages are written and the 1055 * server has the newest file length. 1056 */ 1057 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && 1058 inode->i_mapping->nrpages != 0) { 1059 rc = filemap_fdatawait(inode->i_mapping); 1060 if (rc) { 1061 mapping_set_error(inode->i_mapping, rc); 1062 return rc; 1063 } 1064 } 1065 /* 1066 * Some applications poll for the file length in this strange 1067 * way so we must seek to end on non-oplocked files by 1068 * setting the revalidate time to zero. 1069 */ 1070 CIFS_I(inode)->time = 0; 1071 1072 rc = cifs_revalidate_file_attr(file); 1073 if (rc < 0) 1074 return (loff_t)rc; 1075 } 1076 if (cfile && cfile->tlink) { 1077 tcon = tlink_tcon(cfile->tlink); 1078 if (tcon->ses->server->ops->llseek) 1079 return tcon->ses->server->ops->llseek(file, tcon, 1080 offset, whence); 1081 } 1082 return generic_file_llseek(file, offset, whence); 1083 } 1084 1085 static int 1086 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv) 1087 { 1088 /* 1089 * Note that this is called by vfs setlease with i_lock held to 1090 * protect *lease from going away. 1091 */ 1092 struct inode *inode = file_inode(file); 1093 struct cifsFileInfo *cfile = file->private_data; 1094 1095 if (!(S_ISREG(inode->i_mode))) 1096 return -EINVAL; 1097 1098 /* Check if file is oplocked if this is request for new lease */ 1099 if (arg == F_UNLCK || 1100 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || 1101 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) 1102 return generic_setlease(file, arg, lease, priv); 1103 else if (tlink_tcon(cfile->tlink)->local_lease && 1104 !CIFS_CACHE_READ(CIFS_I(inode))) 1105 /* 1106 * If the server claims to support oplock on this file, then we 1107 * still need to check oplock even if the local_lease mount 1108 * option is set, but there are servers which do not support 1109 * oplock for which this mount option may be useful if the user 1110 * knows that the file won't be changed on the server by anyone 1111 * else. 1112 */ 1113 return generic_setlease(file, arg, lease, priv); 1114 else 1115 return -EAGAIN; 1116 } 1117 1118 struct file_system_type cifs_fs_type = { 1119 .owner = THIS_MODULE, 1120 .name = "cifs", 1121 .init_fs_context = smb3_init_fs_context, 1122 .parameters = smb3_fs_parameters, 1123 .kill_sb = cifs_kill_sb, 1124 .fs_flags = FS_RENAME_DOES_D_MOVE, 1125 }; 1126 MODULE_ALIAS_FS("cifs"); 1127 1128 struct file_system_type smb3_fs_type = { 1129 .owner = THIS_MODULE, 1130 .name = "smb3", 1131 .init_fs_context = smb3_init_fs_context, 1132 .parameters = smb3_fs_parameters, 1133 .kill_sb = cifs_kill_sb, 1134 .fs_flags = FS_RENAME_DOES_D_MOVE, 1135 }; 1136 MODULE_ALIAS_FS("smb3"); 1137 MODULE_ALIAS("smb3"); 1138 1139 const struct inode_operations cifs_dir_inode_ops = { 1140 .create = cifs_create, 1141 .atomic_open = cifs_atomic_open, 1142 .lookup = cifs_lookup, 1143 .getattr = cifs_getattr, 1144 .unlink = cifs_unlink, 1145 .link = cifs_hardlink, 1146 .mkdir = cifs_mkdir, 1147 .rmdir = cifs_rmdir, 1148 .rename = cifs_rename2, 1149 .permission = cifs_permission, 1150 .setattr = cifs_setattr, 1151 .symlink = cifs_symlink, 1152 .mknod = cifs_mknod, 1153 .listxattr = cifs_listxattr, 1154 .get_acl = cifs_get_acl, 1155 .set_acl = cifs_set_acl, 1156 }; 1157 1158 const struct inode_operations cifs_file_inode_ops = { 1159 .setattr = cifs_setattr, 1160 .getattr = cifs_getattr, 1161 .permission = cifs_permission, 1162 .listxattr = cifs_listxattr, 1163 .fiemap = cifs_fiemap, 1164 .get_acl = cifs_get_acl, 1165 .set_acl = cifs_set_acl, 1166 }; 1167 1168 const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1169 struct delayed_call *done) 1170 { 1171 char *target_path; 1172 1173 target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1174 if (!target_path) 1175 return ERR_PTR(-ENOMEM); 1176 1177 spin_lock(&inode->i_lock); 1178 if (likely(CIFS_I(inode)->symlink_target)) { 1179 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1180 } else { 1181 kfree(target_path); 1182 target_path = ERR_PTR(-EOPNOTSUPP); 1183 } 1184 spin_unlock(&inode->i_lock); 1185 1186 if (!IS_ERR(target_path)) 1187 set_delayed_call(done, kfree_link, target_path); 1188 1189 return target_path; 1190 } 1191 1192 const struct inode_operations cifs_symlink_inode_ops = { 1193 .get_link = cifs_get_link, 1194 .setattr = cifs_setattr, 1195 .permission = cifs_permission, 1196 .listxattr = cifs_listxattr, 1197 }; 1198 1199 /* 1200 * Advance the EOF marker to after the source range. 1201 */ 1202 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi, 1203 struct cifs_tcon *src_tcon, 1204 unsigned int xid, loff_t src_end) 1205 { 1206 struct cifsFileInfo *writeable_srcfile; 1207 int rc = -EINVAL; 1208 1209 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); 1210 if (writeable_srcfile) { 1211 if (src_tcon->ses->server->ops->set_file_size) 1212 rc = src_tcon->ses->server->ops->set_file_size( 1213 xid, src_tcon, writeable_srcfile, 1214 src_inode->i_size, true /* no need to set sparse */); 1215 else 1216 rc = -ENOSYS; 1217 cifsFileInfo_put(writeable_srcfile); 1218 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc); 1219 } 1220 1221 if (rc < 0) 1222 goto set_failed; 1223 1224 netfs_resize_file(&src_cifsi->netfs, src_end); 1225 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end); 1226 return 0; 1227 1228 set_failed: 1229 return filemap_write_and_wait(src_inode->i_mapping); 1230 } 1231 1232 /* 1233 * Flush out either the folio that overlaps the beginning of a range in which 1234 * pos resides or the folio that overlaps the end of a range unless that folio 1235 * is entirely within the range we're going to invalidate. We extend the flush 1236 * bounds to encompass the folio. 1237 */ 1238 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend, 1239 bool first) 1240 { 1241 struct folio *folio; 1242 unsigned long long fpos, fend; 1243 pgoff_t index = pos / PAGE_SIZE; 1244 size_t size; 1245 int rc = 0; 1246 1247 folio = filemap_get_folio(inode->i_mapping, index); 1248 if (IS_ERR(folio)) 1249 return 0; 1250 1251 size = folio_size(folio); 1252 fpos = folio_pos(folio); 1253 fend = fpos + size - 1; 1254 *_fstart = min_t(unsigned long long, *_fstart, fpos); 1255 *_fend = max_t(unsigned long long, *_fend, fend); 1256 if ((first && pos == fpos) || (!first && pos == fend)) 1257 goto out; 1258 1259 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend); 1260 out: 1261 folio_put(folio); 1262 return rc; 1263 } 1264 1265 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, 1266 struct file *dst_file, loff_t destoff, loff_t len, 1267 unsigned int remap_flags) 1268 { 1269 struct inode *src_inode = file_inode(src_file); 1270 struct inode *target_inode = file_inode(dst_file); 1271 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1272 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); 1273 struct cifsFileInfo *smb_file_src = src_file->private_data; 1274 struct cifsFileInfo *smb_file_target = dst_file->private_data; 1275 struct cifs_tcon *target_tcon, *src_tcon; 1276 unsigned long long destend, fstart, fend, new_size; 1277 unsigned int xid; 1278 int rc; 1279 1280 if (remap_flags & REMAP_FILE_DEDUP) 1281 return -EOPNOTSUPP; 1282 if (remap_flags & ~REMAP_FILE_ADVISORY) 1283 return -EINVAL; 1284 1285 cifs_dbg(FYI, "clone range\n"); 1286 1287 xid = get_xid(); 1288 1289 if (!smb_file_src || !smb_file_target) { 1290 rc = -EBADF; 1291 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1292 goto out; 1293 } 1294 1295 src_tcon = tlink_tcon(smb_file_src->tlink); 1296 target_tcon = tlink_tcon(smb_file_target->tlink); 1297 1298 /* 1299 * Note: cifs case is easier than btrfs since server responsible for 1300 * checks for proper open modes and file type and if it wants 1301 * server could even support copy of range where source = target 1302 */ 1303 lock_two_nondirectories(target_inode, src_inode); 1304 1305 if (len == 0) 1306 len = src_inode->i_size - off; 1307 1308 cifs_dbg(FYI, "clone range\n"); 1309 1310 /* Flush the source buffer */ 1311 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1312 off + len - 1); 1313 if (rc) 1314 goto unlock; 1315 1316 /* The server-side copy will fail if the source crosses the EOF marker. 1317 * Advance the EOF marker after the flush above to the end of the range 1318 * if it's short of that. 1319 */ 1320 if (src_cifsi->netfs.remote_i_size < off + len) { 1321 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1322 if (rc < 0) 1323 goto unlock; 1324 } 1325 1326 new_size = destoff + len; 1327 destend = destoff + len - 1; 1328 1329 /* Flush the folios at either end of the destination range to prevent 1330 * accidental loss of dirty data outside of the range. 1331 */ 1332 fstart = destoff; 1333 fend = destend; 1334 1335 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1336 if (rc) 1337 goto unlock; 1338 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1339 if (rc) 1340 goto unlock; 1341 1342 /* Discard all the folios that overlap the destination region. */ 1343 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend); 1344 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1345 1346 fscache_invalidate(cifs_inode_cookie(target_inode), NULL, 1347 i_size_read(target_inode), 0); 1348 1349 rc = -EOPNOTSUPP; 1350 if (target_tcon->ses->server->ops->duplicate_extents) { 1351 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 1352 smb_file_src, smb_file_target, off, len, destoff); 1353 if (rc == 0 && new_size > i_size_read(target_inode)) { 1354 truncate_setsize(target_inode, new_size); 1355 netfs_resize_file(&target_cifsi->netfs, new_size); 1356 fscache_resize_cookie(cifs_inode_cookie(target_inode), 1357 new_size); 1358 } 1359 } 1360 1361 /* force revalidate of size and timestamps of target file now 1362 that target is updated on the server */ 1363 CIFS_I(target_inode)->time = 0; 1364 unlock: 1365 /* although unlocking in the reverse order from locking is not 1366 strictly necessary here it is a little cleaner to be consistent */ 1367 unlock_two_nondirectories(src_inode, target_inode); 1368 out: 1369 free_xid(xid); 1370 return rc < 0 ? rc : len; 1371 } 1372 1373 ssize_t cifs_file_copychunk_range(unsigned int xid, 1374 struct file *src_file, loff_t off, 1375 struct file *dst_file, loff_t destoff, 1376 size_t len, unsigned int flags) 1377 { 1378 struct inode *src_inode = file_inode(src_file); 1379 struct inode *target_inode = file_inode(dst_file); 1380 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1381 struct cifsFileInfo *smb_file_src; 1382 struct cifsFileInfo *smb_file_target; 1383 struct cifs_tcon *src_tcon; 1384 struct cifs_tcon *target_tcon; 1385 unsigned long long destend, fstart, fend; 1386 ssize_t rc; 1387 1388 cifs_dbg(FYI, "copychunk range\n"); 1389 1390 if (!src_file->private_data || !dst_file->private_data) { 1391 rc = -EBADF; 1392 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1393 goto out; 1394 } 1395 1396 rc = -EXDEV; 1397 smb_file_target = dst_file->private_data; 1398 smb_file_src = src_file->private_data; 1399 src_tcon = tlink_tcon(smb_file_src->tlink); 1400 target_tcon = tlink_tcon(smb_file_target->tlink); 1401 1402 if (src_tcon->ses != target_tcon->ses) { 1403 cifs_dbg(VFS, "source and target of copy not on same server\n"); 1404 goto out; 1405 } 1406 1407 rc = -EOPNOTSUPP; 1408 if (!target_tcon->ses->server->ops->copychunk_range) 1409 goto out; 1410 1411 /* 1412 * Note: cifs case is easier than btrfs since server responsible for 1413 * checks for proper open modes and file type and if it wants 1414 * server could even support copy of range where source = target 1415 */ 1416 lock_two_nondirectories(target_inode, src_inode); 1417 1418 cifs_dbg(FYI, "about to flush pages\n"); 1419 1420 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1421 off + len - 1); 1422 if (rc) 1423 goto unlock; 1424 1425 /* The server-side copy will fail if the source crosses the EOF marker. 1426 * Advance the EOF marker after the flush above to the end of the range 1427 * if it's short of that. 1428 */ 1429 if (src_cifsi->server_eof < off + len) { 1430 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1431 if (rc < 0) 1432 goto unlock; 1433 } 1434 1435 destend = destoff + len - 1; 1436 1437 /* Flush the folios at either end of the destination range to prevent 1438 * accidental loss of dirty data outside of the range. 1439 */ 1440 fstart = destoff; 1441 fend = destend; 1442 1443 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1444 if (rc) 1445 goto unlock; 1446 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1447 if (rc) 1448 goto unlock; 1449 1450 /* Discard all the folios that overlap the destination region. */ 1451 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1452 1453 rc = file_modified(dst_file); 1454 if (!rc) { 1455 rc = target_tcon->ses->server->ops->copychunk_range(xid, 1456 smb_file_src, smb_file_target, off, len, destoff); 1457 if (rc > 0 && destoff + rc > i_size_read(target_inode)) 1458 truncate_setsize(target_inode, destoff + rc); 1459 } 1460 1461 file_accessed(src_file); 1462 1463 /* force revalidate of size and timestamps of target file now 1464 * that target is updated on the server 1465 */ 1466 CIFS_I(target_inode)->time = 0; 1467 1468 unlock: 1469 /* although unlocking in the reverse order from locking is not 1470 * strictly necessary here it is a little cleaner to be consistent 1471 */ 1472 unlock_two_nondirectories(src_inode, target_inode); 1473 1474 out: 1475 return rc; 1476 } 1477 1478 /* 1479 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() 1480 * is a dummy operation. 1481 */ 1482 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1483 { 1484 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", 1485 file, datasync); 1486 1487 return 0; 1488 } 1489 1490 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, 1491 struct file *dst_file, loff_t destoff, 1492 size_t len, unsigned int flags) 1493 { 1494 unsigned int xid = get_xid(); 1495 ssize_t rc; 1496 struct cifsFileInfo *cfile = dst_file->private_data; 1497 1498 if (cfile->swapfile) { 1499 rc = -EOPNOTSUPP; 1500 free_xid(xid); 1501 return rc; 1502 } 1503 1504 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, 1505 len, flags); 1506 free_xid(xid); 1507 1508 if (rc == -EOPNOTSUPP || rc == -EXDEV) 1509 rc = generic_copy_file_range(src_file, off, dst_file, 1510 destoff, len, flags); 1511 return rc; 1512 } 1513 1514 const struct file_operations cifs_file_ops = { 1515 .read_iter = cifs_loose_read_iter, 1516 .write_iter = cifs_file_write_iter, 1517 .open = cifs_open, 1518 .release = cifs_close, 1519 .lock = cifs_lock, 1520 .flock = cifs_flock, 1521 .fsync = cifs_fsync, 1522 .flush = cifs_flush, 1523 .mmap = cifs_file_mmap, 1524 .splice_read = filemap_splice_read, 1525 .splice_write = iter_file_splice_write, 1526 .llseek = cifs_llseek, 1527 .unlocked_ioctl = cifs_ioctl, 1528 .copy_file_range = cifs_copy_file_range, 1529 .remap_file_range = cifs_remap_file_range, 1530 .setlease = cifs_setlease, 1531 .fallocate = cifs_fallocate, 1532 }; 1533 1534 const struct file_operations cifs_file_strict_ops = { 1535 .read_iter = cifs_strict_readv, 1536 .write_iter = cifs_strict_writev, 1537 .open = cifs_open, 1538 .release = cifs_close, 1539 .lock = cifs_lock, 1540 .flock = cifs_flock, 1541 .fsync = cifs_strict_fsync, 1542 .flush = cifs_flush, 1543 .mmap = cifs_file_strict_mmap, 1544 .splice_read = filemap_splice_read, 1545 .splice_write = iter_file_splice_write, 1546 .llseek = cifs_llseek, 1547 .unlocked_ioctl = cifs_ioctl, 1548 .copy_file_range = cifs_copy_file_range, 1549 .remap_file_range = cifs_remap_file_range, 1550 .setlease = cifs_setlease, 1551 .fallocate = cifs_fallocate, 1552 }; 1553 1554 const struct file_operations cifs_file_direct_ops = { 1555 .read_iter = cifs_direct_readv, 1556 .write_iter = cifs_direct_writev, 1557 .open = cifs_open, 1558 .release = cifs_close, 1559 .lock = cifs_lock, 1560 .flock = cifs_flock, 1561 .fsync = cifs_fsync, 1562 .flush = cifs_flush, 1563 .mmap = cifs_file_mmap, 1564 .splice_read = copy_splice_read, 1565 .splice_write = iter_file_splice_write, 1566 .unlocked_ioctl = cifs_ioctl, 1567 .copy_file_range = cifs_copy_file_range, 1568 .remap_file_range = cifs_remap_file_range, 1569 .llseek = cifs_llseek, 1570 .setlease = cifs_setlease, 1571 .fallocate = cifs_fallocate, 1572 }; 1573 1574 const struct file_operations cifs_file_nobrl_ops = { 1575 .read_iter = cifs_loose_read_iter, 1576 .write_iter = cifs_file_write_iter, 1577 .open = cifs_open, 1578 .release = cifs_close, 1579 .fsync = cifs_fsync, 1580 .flush = cifs_flush, 1581 .mmap = cifs_file_mmap, 1582 .splice_read = filemap_splice_read, 1583 .splice_write = iter_file_splice_write, 1584 .llseek = cifs_llseek, 1585 .unlocked_ioctl = cifs_ioctl, 1586 .copy_file_range = cifs_copy_file_range, 1587 .remap_file_range = cifs_remap_file_range, 1588 .setlease = cifs_setlease, 1589 .fallocate = cifs_fallocate, 1590 }; 1591 1592 const struct file_operations cifs_file_strict_nobrl_ops = { 1593 .read_iter = cifs_strict_readv, 1594 .write_iter = cifs_strict_writev, 1595 .open = cifs_open, 1596 .release = cifs_close, 1597 .fsync = cifs_strict_fsync, 1598 .flush = cifs_flush, 1599 .mmap = cifs_file_strict_mmap, 1600 .splice_read = filemap_splice_read, 1601 .splice_write = iter_file_splice_write, 1602 .llseek = cifs_llseek, 1603 .unlocked_ioctl = cifs_ioctl, 1604 .copy_file_range = cifs_copy_file_range, 1605 .remap_file_range = cifs_remap_file_range, 1606 .setlease = cifs_setlease, 1607 .fallocate = cifs_fallocate, 1608 }; 1609 1610 const struct file_operations cifs_file_direct_nobrl_ops = { 1611 .read_iter = cifs_direct_readv, 1612 .write_iter = cifs_direct_writev, 1613 .open = cifs_open, 1614 .release = cifs_close, 1615 .fsync = cifs_fsync, 1616 .flush = cifs_flush, 1617 .mmap = cifs_file_mmap, 1618 .splice_read = copy_splice_read, 1619 .splice_write = iter_file_splice_write, 1620 .unlocked_ioctl = cifs_ioctl, 1621 .copy_file_range = cifs_copy_file_range, 1622 .remap_file_range = cifs_remap_file_range, 1623 .llseek = cifs_llseek, 1624 .setlease = cifs_setlease, 1625 .fallocate = cifs_fallocate, 1626 }; 1627 1628 const struct file_operations cifs_dir_ops = { 1629 .iterate_shared = cifs_readdir, 1630 .release = cifs_closedir, 1631 .read = generic_read_dir, 1632 .unlocked_ioctl = cifs_ioctl, 1633 .copy_file_range = cifs_copy_file_range, 1634 .remap_file_range = cifs_remap_file_range, 1635 .llseek = generic_file_llseek, 1636 .fsync = cifs_dir_fsync, 1637 }; 1638 1639 static void 1640 cifs_init_once(void *inode) 1641 { 1642 struct cifsInodeInfo *cifsi = inode; 1643 1644 inode_init_once(&cifsi->netfs.inode); 1645 init_rwsem(&cifsi->lock_sem); 1646 } 1647 1648 static int __init 1649 cifs_init_inodecache(void) 1650 { 1651 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", 1652 sizeof(struct cifsInodeInfo), 1653 0, (SLAB_RECLAIM_ACCOUNT| 1654 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1655 cifs_init_once); 1656 if (cifs_inode_cachep == NULL) 1657 return -ENOMEM; 1658 1659 return 0; 1660 } 1661 1662 static void 1663 cifs_destroy_inodecache(void) 1664 { 1665 /* 1666 * Make sure all delayed rcu free inodes are flushed before we 1667 * destroy cache. 1668 */ 1669 rcu_barrier(); 1670 kmem_cache_destroy(cifs_inode_cachep); 1671 } 1672 1673 static int 1674 cifs_init_request_bufs(void) 1675 { 1676 /* 1677 * SMB2 maximum header size is bigger than CIFS one - no problems to 1678 * allocate some more bytes for CIFS. 1679 */ 1680 size_t max_hdr_size = MAX_SMB2_HDR_SIZE; 1681 1682 if (CIFSMaxBufSize < 8192) { 1683 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum 1684 Unicode path name has to fit in any SMB/CIFS path based frames */ 1685 CIFSMaxBufSize = 8192; 1686 } else if (CIFSMaxBufSize > 1024*127) { 1687 CIFSMaxBufSize = 1024 * 127; 1688 } else { 1689 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ 1690 } 1691 /* 1692 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1693 CIFSMaxBufSize, CIFSMaxBufSize); 1694 */ 1695 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", 1696 CIFSMaxBufSize + max_hdr_size, 0, 1697 SLAB_HWCACHE_ALIGN, 0, 1698 CIFSMaxBufSize + max_hdr_size, 1699 NULL); 1700 if (cifs_req_cachep == NULL) 1701 return -ENOMEM; 1702 1703 if (cifs_min_rcv < 1) 1704 cifs_min_rcv = 1; 1705 else if (cifs_min_rcv > 64) { 1706 cifs_min_rcv = 64; 1707 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); 1708 } 1709 1710 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 1711 cifs_req_cachep); 1712 1713 if (cifs_req_poolp == NULL) { 1714 kmem_cache_destroy(cifs_req_cachep); 1715 return -ENOMEM; 1716 } 1717 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 1718 almost all handle based requests (but not write response, nor is it 1719 sufficient for path based requests). A smaller size would have 1720 been more efficient (compacting multiple slab items on one 4k page) 1721 for the case in which debug was on, but this larger size allows 1722 more SMBs to use small buffer alloc and is still much more 1723 efficient to alloc 1 per page off the slab compared to 17K (5page) 1724 alloc of large cifs buffers even when page debugging is on */ 1725 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", 1726 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1727 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); 1728 if (cifs_sm_req_cachep == NULL) { 1729 mempool_destroy(cifs_req_poolp); 1730 kmem_cache_destroy(cifs_req_cachep); 1731 return -ENOMEM; 1732 } 1733 1734 if (cifs_min_small < 2) 1735 cifs_min_small = 2; 1736 else if (cifs_min_small > 256) { 1737 cifs_min_small = 256; 1738 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); 1739 } 1740 1741 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, 1742 cifs_sm_req_cachep); 1743 1744 if (cifs_sm_req_poolp == NULL) { 1745 mempool_destroy(cifs_req_poolp); 1746 kmem_cache_destroy(cifs_req_cachep); 1747 kmem_cache_destroy(cifs_sm_req_cachep); 1748 return -ENOMEM; 1749 } 1750 1751 return 0; 1752 } 1753 1754 static void 1755 cifs_destroy_request_bufs(void) 1756 { 1757 mempool_destroy(cifs_req_poolp); 1758 kmem_cache_destroy(cifs_req_cachep); 1759 mempool_destroy(cifs_sm_req_poolp); 1760 kmem_cache_destroy(cifs_sm_req_cachep); 1761 } 1762 1763 static int init_mids(void) 1764 { 1765 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1766 sizeof(struct mid_q_entry), 0, 1767 SLAB_HWCACHE_ALIGN, NULL); 1768 if (cifs_mid_cachep == NULL) 1769 return -ENOMEM; 1770 1771 /* 3 is a reasonable minimum number of simultaneous operations */ 1772 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); 1773 if (cifs_mid_poolp == NULL) { 1774 kmem_cache_destroy(cifs_mid_cachep); 1775 return -ENOMEM; 1776 } 1777 1778 return 0; 1779 } 1780 1781 static void destroy_mids(void) 1782 { 1783 mempool_destroy(cifs_mid_poolp); 1784 kmem_cache_destroy(cifs_mid_cachep); 1785 } 1786 1787 static int __init 1788 init_cifs(void) 1789 { 1790 int rc = 0; 1791 cifs_proc_init(); 1792 INIT_LIST_HEAD(&cifs_tcp_ses_list); 1793 /* 1794 * Initialize Global counters 1795 */ 1796 atomic_set(&sesInfoAllocCount, 0); 1797 atomic_set(&tconInfoAllocCount, 0); 1798 atomic_set(&tcpSesNextId, 0); 1799 atomic_set(&tcpSesAllocCount, 0); 1800 atomic_set(&tcpSesReconnectCount, 0); 1801 atomic_set(&tconInfoReconnectCount, 0); 1802 1803 atomic_set(&buf_alloc_count, 0); 1804 atomic_set(&small_buf_alloc_count, 0); 1805 #ifdef CONFIG_CIFS_STATS2 1806 atomic_set(&total_buf_alloc_count, 0); 1807 atomic_set(&total_small_buf_alloc_count, 0); 1808 if (slow_rsp_threshold < 1) 1809 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1810 else if (slow_rsp_threshold > 32767) 1811 cifs_dbg(VFS, 1812 "slow response threshold set higher than recommended (0 to 32767)\n"); 1813 #endif /* CONFIG_CIFS_STATS2 */ 1814 1815 atomic_set(&mid_count, 0); 1816 GlobalCurrentXid = 0; 1817 GlobalTotalActiveXid = 0; 1818 GlobalMaxActiveXid = 0; 1819 spin_lock_init(&cifs_tcp_ses_lock); 1820 spin_lock_init(&GlobalMid_Lock); 1821 1822 cifs_lock_secret = get_random_u32(); 1823 1824 if (cifs_max_pending < 2) { 1825 cifs_max_pending = 2; 1826 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1827 } else if (cifs_max_pending > CIFS_MAX_REQ) { 1828 cifs_max_pending = CIFS_MAX_REQ; 1829 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", 1830 CIFS_MAX_REQ); 1831 } 1832 1833 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */ 1834 if (dir_cache_timeout > 65000) { 1835 dir_cache_timeout = 65000; 1836 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n"); 1837 } 1838 1839 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1840 if (!cifsiod_wq) { 1841 rc = -ENOMEM; 1842 goto out_clean_proc; 1843 } 1844 1845 /* 1846 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) 1847 * so that we don't launch too many worker threads but 1848 * Documentation/core-api/workqueue.rst recommends setting it to 0 1849 */ 1850 1851 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ 1852 decrypt_wq = alloc_workqueue("smb3decryptd", 1853 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1854 if (!decrypt_wq) { 1855 rc = -ENOMEM; 1856 goto out_destroy_cifsiod_wq; 1857 } 1858 1859 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", 1860 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1861 if (!fileinfo_put_wq) { 1862 rc = -ENOMEM; 1863 goto out_destroy_decrypt_wq; 1864 } 1865 1866 cifsoplockd_wq = alloc_workqueue("cifsoplockd", 1867 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1868 if (!cifsoplockd_wq) { 1869 rc = -ENOMEM; 1870 goto out_destroy_fileinfo_put_wq; 1871 } 1872 1873 deferredclose_wq = alloc_workqueue("deferredclose", 1874 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1875 if (!deferredclose_wq) { 1876 rc = -ENOMEM; 1877 goto out_destroy_cifsoplockd_wq; 1878 } 1879 1880 rc = cifs_init_inodecache(); 1881 if (rc) 1882 goto out_destroy_deferredclose_wq; 1883 1884 rc = init_mids(); 1885 if (rc) 1886 goto out_destroy_inodecache; 1887 1888 rc = cifs_init_request_bufs(); 1889 if (rc) 1890 goto out_destroy_mids; 1891 1892 #ifdef CONFIG_CIFS_DFS_UPCALL 1893 rc = dfs_cache_init(); 1894 if (rc) 1895 goto out_destroy_request_bufs; 1896 #endif /* CONFIG_CIFS_DFS_UPCALL */ 1897 #ifdef CONFIG_CIFS_UPCALL 1898 rc = init_cifs_spnego(); 1899 if (rc) 1900 goto out_destroy_dfs_cache; 1901 #endif /* CONFIG_CIFS_UPCALL */ 1902 #ifdef CONFIG_CIFS_SWN_UPCALL 1903 rc = cifs_genl_init(); 1904 if (rc) 1905 goto out_register_key_type; 1906 #endif /* CONFIG_CIFS_SWN_UPCALL */ 1907 1908 rc = init_cifs_idmap(); 1909 if (rc) 1910 goto out_cifs_swn_init; 1911 1912 rc = register_filesystem(&cifs_fs_type); 1913 if (rc) 1914 goto out_init_cifs_idmap; 1915 1916 rc = register_filesystem(&smb3_fs_type); 1917 if (rc) { 1918 unregister_filesystem(&cifs_fs_type); 1919 goto out_init_cifs_idmap; 1920 } 1921 1922 return 0; 1923 1924 out_init_cifs_idmap: 1925 exit_cifs_idmap(); 1926 out_cifs_swn_init: 1927 #ifdef CONFIG_CIFS_SWN_UPCALL 1928 cifs_genl_exit(); 1929 out_register_key_type: 1930 #endif 1931 #ifdef CONFIG_CIFS_UPCALL 1932 exit_cifs_spnego(); 1933 out_destroy_dfs_cache: 1934 #endif 1935 #ifdef CONFIG_CIFS_DFS_UPCALL 1936 dfs_cache_destroy(); 1937 out_destroy_request_bufs: 1938 #endif 1939 cifs_destroy_request_bufs(); 1940 out_destroy_mids: 1941 destroy_mids(); 1942 out_destroy_inodecache: 1943 cifs_destroy_inodecache(); 1944 out_destroy_deferredclose_wq: 1945 destroy_workqueue(deferredclose_wq); 1946 out_destroy_cifsoplockd_wq: 1947 destroy_workqueue(cifsoplockd_wq); 1948 out_destroy_fileinfo_put_wq: 1949 destroy_workqueue(fileinfo_put_wq); 1950 out_destroy_decrypt_wq: 1951 destroy_workqueue(decrypt_wq); 1952 out_destroy_cifsiod_wq: 1953 destroy_workqueue(cifsiod_wq); 1954 out_clean_proc: 1955 cifs_proc_clean(); 1956 return rc; 1957 } 1958 1959 static void __exit 1960 exit_cifs(void) 1961 { 1962 cifs_dbg(NOISY, "exit_smb3\n"); 1963 unregister_filesystem(&cifs_fs_type); 1964 unregister_filesystem(&smb3_fs_type); 1965 cifs_release_automount_timer(); 1966 exit_cifs_idmap(); 1967 #ifdef CONFIG_CIFS_SWN_UPCALL 1968 cifs_genl_exit(); 1969 #endif 1970 #ifdef CONFIG_CIFS_UPCALL 1971 exit_cifs_spnego(); 1972 #endif 1973 #ifdef CONFIG_CIFS_DFS_UPCALL 1974 dfs_cache_destroy(); 1975 #endif 1976 cifs_destroy_request_bufs(); 1977 destroy_mids(); 1978 cifs_destroy_inodecache(); 1979 destroy_workqueue(deferredclose_wq); 1980 destroy_workqueue(cifsoplockd_wq); 1981 destroy_workqueue(decrypt_wq); 1982 destroy_workqueue(fileinfo_put_wq); 1983 destroy_workqueue(cifsiod_wq); 1984 cifs_proc_clean(); 1985 } 1986 1987 MODULE_AUTHOR("Steve French"); 1988 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ 1989 MODULE_DESCRIPTION 1990 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 1991 "also older servers complying with the SNIA CIFS Specification)"); 1992 MODULE_VERSION(CIFS_VERSION); 1993 MODULE_SOFTDEP("ecb"); 1994 MODULE_SOFTDEP("hmac"); 1995 MODULE_SOFTDEP("md5"); 1996 MODULE_SOFTDEP("nls"); 1997 MODULE_SOFTDEP("aes"); 1998 MODULE_SOFTDEP("cmac"); 1999 MODULE_SOFTDEP("sha256"); 2000 MODULE_SOFTDEP("sha512"); 2001 MODULE_SOFTDEP("aead2"); 2002 MODULE_SOFTDEP("ccm"); 2003 MODULE_SOFTDEP("gcm"); 2004 module_init(init_cifs) 2005 module_exit(exit_cifs) 2006