1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * Common Internet FileSystem (CIFS) client 8 * 9 */ 10 11 /* Note that BB means BUGBUG (ie something to fix eventually) */ 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/filelock.h> 16 #include <linux/mount.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/seq_file.h> 21 #include <linux/vfs.h> 22 #include <linux/mempool.h> 23 #include <linux/delay.h> 24 #include <linux/kthread.h> 25 #include <linux/freezer.h> 26 #include <linux/namei.h> 27 #include <linux/random.h> 28 #include <linux/uuid.h> 29 #include <linux/xattr.h> 30 #include <uapi/linux/magic.h> 31 #include <net/ipv6.h> 32 #include "cifsfs.h" 33 #include "cifspdu.h" 34 #define DECLARE_GLOBALS_HERE 35 #include "cifsglob.h" 36 #include "cifsproto.h" 37 #include "cifs_debug.h" 38 #include "cifs_fs_sb.h" 39 #include <linux/mm.h> 40 #include <linux/key-type.h> 41 #include "cifs_spnego.h" 42 #include "fscache.h" 43 #ifdef CONFIG_CIFS_DFS_UPCALL 44 #include "dfs_cache.h" 45 #endif 46 #ifdef CONFIG_CIFS_SWN_UPCALL 47 #include "netlink.h" 48 #endif 49 #include "fs_context.h" 50 #include "cached_dir.h" 51 52 /* 53 * DOS dates from 1980/1/1 through 2107/12/31 54 * Protocol specifications indicate the range should be to 119, which 55 * limits maximum year to 2099. But this range has not been checked. 56 */ 57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) 58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) 59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) 60 61 int cifsFYI = 0; 62 bool traceSMB; 63 bool enable_oplocks = true; 64 bool linuxExtEnabled = true; 65 bool lookupCacheEnabled = true; 66 bool disable_legacy_dialects; /* false by default */ 67 bool enable_gcm_256 = true; 68 bool require_gcm_256; /* false by default */ 69 bool enable_negotiate_signing; /* false by default */ 70 unsigned int global_secflags = CIFSSEC_DEF; 71 /* unsigned int ntlmv2_support = 0; */ 72 unsigned int sign_CIFS_PDUs = 1; 73 74 /* 75 * Global transaction id (XID) information 76 */ 77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 81 82 /* 83 * Global counters, updated atomically 84 */ 85 atomic_t sesInfoAllocCount; 86 atomic_t tconInfoAllocCount; 87 atomic_t tcpSesNextId; 88 atomic_t tcpSesAllocCount; 89 atomic_t tcpSesReconnectCount; 90 atomic_t tconInfoReconnectCount; 91 92 atomic_t mid_count; 93 atomic_t buf_alloc_count; 94 atomic_t small_buf_alloc_count; 95 #ifdef CONFIG_CIFS_STATS2 96 atomic_t total_buf_alloc_count; 97 atomic_t total_small_buf_alloc_count; 98 #endif/* STATS2 */ 99 struct list_head cifs_tcp_ses_list; 100 spinlock_t cifs_tcp_ses_lock; 101 static const struct super_operations cifs_super_ops; 102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 103 module_param(CIFSMaxBufSize, uint, 0444); 104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " 105 "for CIFS requests. " 106 "Default: 16384 Range: 8192 to 130048"); 107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; 108 module_param(cifs_min_rcv, uint, 0444); 109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " 110 "1 to 64"); 111 unsigned int cifs_min_small = 30; 112 module_param(cifs_min_small, uint, 0444); 113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 114 "Range: 2 to 256"); 115 unsigned int cifs_max_pending = CIFS_MAX_REQ; 116 module_param(cifs_max_pending, uint, 0444); 117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " 118 "CIFS/SMB1 dialect (N/A for SMB3) " 119 "Default: 32767 Range: 2 to 32767."); 120 #ifdef CONFIG_CIFS_STATS2 121 unsigned int slow_rsp_threshold = 1; 122 module_param(slow_rsp_threshold, uint, 0644); 123 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " 124 "before logging that a response is delayed. " 125 "Default: 1 (if set to 0 disables msg)."); 126 #endif /* STATS2 */ 127 128 module_param(enable_oplocks, bool, 0644); 129 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 130 131 module_param(enable_gcm_256, bool, 0644); 132 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 133 134 module_param(require_gcm_256, bool, 0644); 135 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); 136 137 module_param(enable_negotiate_signing, bool, 0644); 138 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); 139 140 module_param(disable_legacy_dialects, bool, 0644); 141 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " 142 "helpful to restrict the ability to " 143 "override the default dialects (SMB2.1, " 144 "SMB3 and SMB3.02) on mount with old " 145 "dialects (CIFS/SMB1 and SMB2) since " 146 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" 147 " and less secure. Default: n/N/0"); 148 149 extern mempool_t *cifs_sm_req_poolp; 150 extern mempool_t *cifs_req_poolp; 151 extern mempool_t *cifs_mid_poolp; 152 153 struct workqueue_struct *cifsiod_wq; 154 struct workqueue_struct *decrypt_wq; 155 struct workqueue_struct *fileinfo_put_wq; 156 struct workqueue_struct *cifsoplockd_wq; 157 struct workqueue_struct *deferredclose_wq; 158 __u32 cifs_lock_secret; 159 160 /* 161 * Bumps refcount for cifs super block. 162 * Note that it should be only called if a referece to VFS super block is 163 * already held, e.g. in open-type syscalls context. Otherwise it can race with 164 * atomic_dec_and_test in deactivate_locked_super. 165 */ 166 void 167 cifs_sb_active(struct super_block *sb) 168 { 169 struct cifs_sb_info *server = CIFS_SB(sb); 170 171 if (atomic_inc_return(&server->active) == 1) 172 atomic_inc(&sb->s_active); 173 } 174 175 void 176 cifs_sb_deactive(struct super_block *sb) 177 { 178 struct cifs_sb_info *server = CIFS_SB(sb); 179 180 if (atomic_dec_and_test(&server->active)) 181 deactivate_super(sb); 182 } 183 184 static int 185 cifs_read_super(struct super_block *sb) 186 { 187 struct inode *inode; 188 struct cifs_sb_info *cifs_sb; 189 struct cifs_tcon *tcon; 190 struct timespec64 ts; 191 int rc = 0; 192 193 cifs_sb = CIFS_SB(sb); 194 tcon = cifs_sb_master_tcon(cifs_sb); 195 196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 197 sb->s_flags |= SB_POSIXACL; 198 199 if (tcon->snapshot_time) 200 sb->s_flags |= SB_RDONLY; 201 202 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) 203 sb->s_maxbytes = MAX_LFS_FILESIZE; 204 else 205 sb->s_maxbytes = MAX_NON_LFS; 206 207 /* 208 * Some very old servers like DOS and OS/2 used 2 second granularity 209 * (while all current servers use 100ns granularity - see MS-DTYP) 210 * but 1 second is the maximum allowed granularity for the VFS 211 * so for old servers set time granularity to 1 second while for 212 * everything else (current servers) set it to 100ns. 213 */ 214 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && 215 ((tcon->ses->capabilities & 216 tcon->ses->server->vals->cap_nt_find) == 0) && 217 !tcon->unix_ext) { 218 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ 219 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); 220 sb->s_time_min = ts.tv_sec; 221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), 222 cpu_to_le16(SMB_TIME_MAX), 0); 223 sb->s_time_max = ts.tv_sec; 224 } else { 225 /* 226 * Almost every server, including all SMB2+, uses DCE TIME 227 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC 228 */ 229 sb->s_time_gran = 100; 230 ts = cifs_NTtimeToUnix(0); 231 sb->s_time_min = ts.tv_sec; 232 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); 233 sb->s_time_max = ts.tv_sec; 234 } 235 236 sb->s_magic = CIFS_SUPER_MAGIC; 237 sb->s_op = &cifs_super_ops; 238 sb->s_xattr = cifs_xattr_handlers; 239 rc = super_setup_bdi(sb); 240 if (rc) 241 goto out_no_root; 242 /* tune readahead according to rsize if readahead size not set on mount */ 243 if (cifs_sb->ctx->rsize == 0) 244 cifs_sb->ctx->rsize = 245 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 246 if (cifs_sb->ctx->rasize) 247 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; 248 else 249 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); 250 251 sb->s_blocksize = CIFS_MAX_MSGSIZE; 252 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 253 inode = cifs_root_iget(sb); 254 255 if (IS_ERR(inode)) { 256 rc = PTR_ERR(inode); 257 goto out_no_root; 258 } 259 260 if (tcon->nocase) 261 sb->s_d_op = &cifs_ci_dentry_ops; 262 else 263 sb->s_d_op = &cifs_dentry_ops; 264 265 sb->s_root = d_make_root(inode); 266 if (!sb->s_root) { 267 rc = -ENOMEM; 268 goto out_no_root; 269 } 270 271 #ifdef CONFIG_CIFS_NFSD_EXPORT 272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 273 cifs_dbg(FYI, "export ops supported\n"); 274 sb->s_export_op = &cifs_export_ops; 275 } 276 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 277 278 return 0; 279 280 out_no_root: 281 cifs_dbg(VFS, "%s: get root inode failed\n", __func__); 282 return rc; 283 } 284 285 static void cifs_kill_sb(struct super_block *sb) 286 { 287 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 288 289 /* 290 * We ned to release all dentries for the cached directories 291 * before we kill the sb. 292 */ 293 if (cifs_sb->root) { 294 close_all_cached_dirs(cifs_sb); 295 296 /* finally release root dentry */ 297 dput(cifs_sb->root); 298 cifs_sb->root = NULL; 299 } 300 301 kill_anon_super(sb); 302 cifs_umount(cifs_sb); 303 } 304 305 static int 306 cifs_statfs(struct dentry *dentry, struct kstatfs *buf) 307 { 308 struct super_block *sb = dentry->d_sb; 309 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 310 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 311 struct TCP_Server_Info *server = tcon->ses->server; 312 unsigned int xid; 313 int rc = 0; 314 315 xid = get_xid(); 316 317 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) 318 buf->f_namelen = 319 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); 320 else 321 buf->f_namelen = PATH_MAX; 322 323 buf->f_fsid.val[0] = tcon->vol_serial_number; 324 /* are using part of create time for more randomness, see man statfs */ 325 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); 326 327 buf->f_files = 0; /* undefined */ 328 buf->f_ffree = 0; /* unlimited */ 329 330 if (server->ops->queryfs) 331 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); 332 333 free_xid(xid); 334 return rc; 335 } 336 337 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 338 { 339 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 340 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 341 struct TCP_Server_Info *server = tcon->ses->server; 342 343 if (server->ops->fallocate) 344 return server->ops->fallocate(file, tcon, mode, off, len); 345 346 return -EOPNOTSUPP; 347 } 348 349 static int cifs_permission(struct mnt_idmap *idmap, 350 struct inode *inode, int mask) 351 { 352 struct cifs_sb_info *cifs_sb; 353 354 cifs_sb = CIFS_SB(inode->i_sb); 355 356 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 357 if ((mask & MAY_EXEC) && !execute_ok(inode)) 358 return -EACCES; 359 else 360 return 0; 361 } else /* file mode might have been restricted at mount time 362 on the client (above and beyond ACL on servers) for 363 servers which do not support setting and viewing mode bits, 364 so allowing client to check permissions is useful */ 365 return generic_permission(&nop_mnt_idmap, inode, mask); 366 } 367 368 static struct kmem_cache *cifs_inode_cachep; 369 static struct kmem_cache *cifs_req_cachep; 370 static struct kmem_cache *cifs_mid_cachep; 371 static struct kmem_cache *cifs_sm_req_cachep; 372 mempool_t *cifs_sm_req_poolp; 373 mempool_t *cifs_req_poolp; 374 mempool_t *cifs_mid_poolp; 375 376 static struct inode * 377 cifs_alloc_inode(struct super_block *sb) 378 { 379 struct cifsInodeInfo *cifs_inode; 380 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); 381 if (!cifs_inode) 382 return NULL; 383 cifs_inode->cifsAttrs = 0x20; /* default */ 384 cifs_inode->time = 0; 385 /* 386 * Until the file is open and we have gotten oplock info back from the 387 * server, can not assume caching of file data or metadata. 388 */ 389 cifs_set_oplock_level(cifs_inode, 0); 390 cifs_inode->flags = 0; 391 spin_lock_init(&cifs_inode->writers_lock); 392 cifs_inode->writers = 0; 393 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 394 cifs_inode->server_eof = 0; 395 cifs_inode->uniqueid = 0; 396 cifs_inode->createtime = 0; 397 cifs_inode->epoch = 0; 398 spin_lock_init(&cifs_inode->open_file_lock); 399 generate_random_uuid(cifs_inode->lease_key); 400 cifs_inode->symlink_target = NULL; 401 402 /* 403 * Can not set i_flags here - they get immediately overwritten to zero 404 * by the VFS. 405 */ 406 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ 407 INIT_LIST_HEAD(&cifs_inode->openFileList); 408 INIT_LIST_HEAD(&cifs_inode->llist); 409 INIT_LIST_HEAD(&cifs_inode->deferred_closes); 410 spin_lock_init(&cifs_inode->deferred_lock); 411 return &cifs_inode->netfs.inode; 412 } 413 414 static void 415 cifs_free_inode(struct inode *inode) 416 { 417 struct cifsInodeInfo *cinode = CIFS_I(inode); 418 419 if (S_ISLNK(inode->i_mode)) 420 kfree(cinode->symlink_target); 421 kmem_cache_free(cifs_inode_cachep, cinode); 422 } 423 424 static void 425 cifs_evict_inode(struct inode *inode) 426 { 427 truncate_inode_pages_final(&inode->i_data); 428 if (inode->i_state & I_PINNING_FSCACHE_WB) 429 cifs_fscache_unuse_inode_cookie(inode, true); 430 cifs_fscache_release_inode_cookie(inode); 431 clear_inode(inode); 432 } 433 434 static void 435 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) 436 { 437 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 438 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; 439 440 seq_puts(s, ",addr="); 441 442 switch (server->dstaddr.ss_family) { 443 case AF_INET: 444 seq_printf(s, "%pI4", &sa->sin_addr.s_addr); 445 break; 446 case AF_INET6: 447 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); 448 if (sa6->sin6_scope_id) 449 seq_printf(s, "%%%u", sa6->sin6_scope_id); 450 break; 451 default: 452 seq_puts(s, "(unknown)"); 453 } 454 if (server->rdma) 455 seq_puts(s, ",rdma"); 456 } 457 458 static void 459 cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 460 { 461 if (ses->sectype == Unspecified) { 462 if (ses->user_name == NULL) 463 seq_puts(s, ",sec=none"); 464 return; 465 } 466 467 seq_puts(s, ",sec="); 468 469 switch (ses->sectype) { 470 case NTLMv2: 471 seq_puts(s, "ntlmv2"); 472 break; 473 case Kerberos: 474 seq_puts(s, "krb5"); 475 break; 476 case RawNTLMSSP: 477 seq_puts(s, "ntlmssp"); 478 break; 479 default: 480 /* shouldn't ever happen */ 481 seq_puts(s, "unknown"); 482 break; 483 } 484 485 if (ses->sign) 486 seq_puts(s, "i"); 487 488 if (ses->sectype == Kerberos) 489 seq_printf(s, ",cruid=%u", 490 from_kuid_munged(&init_user_ns, ses->cred_uid)); 491 } 492 493 static void 494 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 495 { 496 seq_puts(s, ",cache="); 497 498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 499 seq_puts(s, "strict"); 500 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 501 seq_puts(s, "none"); 502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 503 seq_puts(s, "singleclient"); /* assume only one client access */ 504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 505 seq_puts(s, "ro"); /* read only caching assumed */ 506 else 507 seq_puts(s, "loose"); 508 } 509 510 /* 511 * cifs_show_devname() is used so we show the mount device name with correct 512 * format (e.g. forward slashes vs. back slashes) in /proc/mounts 513 */ 514 static int cifs_show_devname(struct seq_file *m, struct dentry *root) 515 { 516 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 517 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); 518 519 if (devname == NULL) 520 seq_puts(m, "none"); 521 else { 522 convert_delimiter(devname, '/'); 523 /* escape all spaces in share names */ 524 seq_escape(m, devname, " \t"); 525 kfree(devname); 526 } 527 return 0; 528 } 529 530 /* 531 * cifs_show_options() is for displaying mount options in /proc/mounts. 532 * Not all settable options are displayed but most of the important 533 * ones are. 534 */ 535 static int 536 cifs_show_options(struct seq_file *s, struct dentry *root) 537 { 538 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 539 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 540 struct sockaddr *srcaddr; 541 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 542 543 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); 544 cifs_show_security(s, tcon->ses); 545 cifs_show_cache_flavor(s, cifs_sb); 546 547 if (tcon->no_lease) 548 seq_puts(s, ",nolease"); 549 if (cifs_sb->ctx->multiuser) 550 seq_puts(s, ",multiuser"); 551 else if (tcon->ses->user_name) 552 seq_show_option(s, "username", tcon->ses->user_name); 553 554 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) 555 seq_show_option(s, "domain", tcon->ses->domainName); 556 557 if (srcaddr->sa_family != AF_UNSPEC) { 558 struct sockaddr_in *saddr4; 559 struct sockaddr_in6 *saddr6; 560 saddr4 = (struct sockaddr_in *)srcaddr; 561 saddr6 = (struct sockaddr_in6 *)srcaddr; 562 if (srcaddr->sa_family == AF_INET6) 563 seq_printf(s, ",srcaddr=%pI6c", 564 &saddr6->sin6_addr); 565 else if (srcaddr->sa_family == AF_INET) 566 seq_printf(s, ",srcaddr=%pI4", 567 &saddr4->sin_addr.s_addr); 568 else 569 seq_printf(s, ",srcaddr=BAD-AF:%i", 570 (int)(srcaddr->sa_family)); 571 } 572 573 seq_printf(s, ",uid=%u", 574 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 576 seq_puts(s, ",forceuid"); 577 else 578 seq_puts(s, ",noforceuid"); 579 580 seq_printf(s, ",gid=%u", 581 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 583 seq_puts(s, ",forcegid"); 584 else 585 seq_puts(s, ",noforcegid"); 586 587 cifs_show_address(s, tcon->ses->server); 588 589 if (!tcon->unix_ext) 590 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", 591 cifs_sb->ctx->file_mode, 592 cifs_sb->ctx->dir_mode); 593 if (cifs_sb->ctx->iocharset) 594 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); 595 if (tcon->seal) 596 seq_puts(s, ",seal"); 597 else if (tcon->ses->server->ignore_signature) 598 seq_puts(s, ",signloosely"); 599 if (tcon->nocase) 600 seq_puts(s, ",nocase"); 601 if (tcon->nodelete) 602 seq_puts(s, ",nodelete"); 603 if (cifs_sb->ctx->no_sparse) 604 seq_puts(s, ",nosparse"); 605 if (tcon->local_lease) 606 seq_puts(s, ",locallease"); 607 if (tcon->retry) 608 seq_puts(s, ",hard"); 609 else 610 seq_puts(s, ",soft"); 611 if (tcon->use_persistent) 612 seq_puts(s, ",persistenthandles"); 613 else if (tcon->use_resilient) 614 seq_puts(s, ",resilienthandles"); 615 if (tcon->posix_extensions) 616 seq_puts(s, ",posix"); 617 else if (tcon->unix_ext) 618 seq_puts(s, ",unix"); 619 else 620 seq_puts(s, ",nounix"); 621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 622 seq_puts(s, ",nodfs"); 623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 624 seq_puts(s, ",posixpaths"); 625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 626 seq_puts(s, ",setuids"); 627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 628 seq_puts(s, ",idsfromsid"); 629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 630 seq_puts(s, ",serverino"); 631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 632 seq_puts(s, ",rwpidforward"); 633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 634 seq_puts(s, ",forcemand"); 635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 636 seq_puts(s, ",nouser_xattr"); 637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 638 seq_puts(s, ",mapchars"); 639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 640 seq_puts(s, ",mapposix"); 641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 642 seq_puts(s, ",sfu"); 643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 644 seq_puts(s, ",nobrl"); 645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 646 seq_puts(s, ",nohandlecache"); 647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 648 seq_puts(s, ",modefromsid"); 649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 650 seq_puts(s, ",cifsacl"); 651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 652 seq_puts(s, ",dynperm"); 653 if (root->d_sb->s_flags & SB_POSIXACL) 654 seq_puts(s, ",acl"); 655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 656 seq_puts(s, ",mfsymlinks"); 657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 658 seq_puts(s, ",fsc"); 659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 660 seq_puts(s, ",nostrictsync"); 661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 662 seq_puts(s, ",noperm"); 663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 664 seq_printf(s, ",backupuid=%u", 665 from_kuid_munged(&init_user_ns, 666 cifs_sb->ctx->backupuid)); 667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 668 seq_printf(s, ",backupgid=%u", 669 from_kgid_munged(&init_user_ns, 670 cifs_sb->ctx->backupgid)); 671 672 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 673 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); 674 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); 675 if (cifs_sb->ctx->rasize) 676 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); 677 if (tcon->ses->server->min_offload) 678 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); 679 seq_printf(s, ",echo_interval=%lu", 680 tcon->ses->server->echo_interval / HZ); 681 682 /* Only display the following if overridden on mount */ 683 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) 684 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); 685 if (tcon->ses->server->tcp_nodelay) 686 seq_puts(s, ",tcpnodelay"); 687 if (tcon->ses->server->noautotune) 688 seq_puts(s, ",noautotune"); 689 if (tcon->ses->server->noblocksnd) 690 seq_puts(s, ",noblocksend"); 691 692 if (tcon->snapshot_time) 693 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 694 if (tcon->handle_timeout) 695 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); 696 697 /* 698 * Display file and directory attribute timeout in seconds. 699 * If file and directory attribute timeout the same then actimeo 700 * was likely specified on mount 701 */ 702 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) 703 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); 704 else { 705 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 706 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 707 } 708 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 709 710 if (tcon->ses->chan_max > 1) 711 seq_printf(s, ",multichannel,max_channels=%zu", 712 tcon->ses->chan_max); 713 714 if (tcon->use_witness) 715 seq_puts(s, ",witness"); 716 717 return 0; 718 } 719 720 static void cifs_umount_begin(struct super_block *sb) 721 { 722 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 723 struct cifs_tcon *tcon; 724 725 if (cifs_sb == NULL) 726 return; 727 728 tcon = cifs_sb_master_tcon(cifs_sb); 729 730 spin_lock(&cifs_tcp_ses_lock); 731 spin_lock(&tcon->tc_lock); 732 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 733 /* we have other mounts to same share or we have 734 already tried to umount this and woken up 735 all waiting network requests, nothing to do */ 736 spin_unlock(&tcon->tc_lock); 737 spin_unlock(&cifs_tcp_ses_lock); 738 return; 739 } 740 /* 741 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will 742 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent 743 */ 744 spin_unlock(&tcon->tc_lock); 745 spin_unlock(&cifs_tcp_ses_lock); 746 747 cifs_close_all_deferred_files(tcon); 748 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 749 /* cancel_notify_requests(tcon); */ 750 if (tcon->ses && tcon->ses->server) { 751 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); 752 wake_up_all(&tcon->ses->server->request_q); 753 wake_up_all(&tcon->ses->server->response_q); 754 msleep(1); /* yield */ 755 /* we have to kick the requests once more */ 756 wake_up_all(&tcon->ses->server->response_q); 757 msleep(1); 758 } 759 760 return; 761 } 762 763 static int cifs_freeze(struct super_block *sb) 764 { 765 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 766 struct cifs_tcon *tcon; 767 768 if (cifs_sb == NULL) 769 return 0; 770 771 tcon = cifs_sb_master_tcon(cifs_sb); 772 773 cifs_close_all_deferred_files(tcon); 774 return 0; 775 } 776 777 #ifdef CONFIG_CIFS_STATS2 778 static int cifs_show_stats(struct seq_file *s, struct dentry *root) 779 { 780 /* BB FIXME */ 781 return 0; 782 } 783 #endif 784 785 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 786 { 787 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode)); 788 return 0; 789 } 790 791 static int cifs_drop_inode(struct inode *inode) 792 { 793 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 794 795 /* no serverino => unconditional eviction */ 796 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 797 generic_drop_inode(inode); 798 } 799 800 static const struct super_operations cifs_super_ops = { 801 .statfs = cifs_statfs, 802 .alloc_inode = cifs_alloc_inode, 803 .write_inode = cifs_write_inode, 804 .free_inode = cifs_free_inode, 805 .drop_inode = cifs_drop_inode, 806 .evict_inode = cifs_evict_inode, 807 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ 808 .show_devname = cifs_show_devname, 809 /* .delete_inode = cifs_delete_inode, */ /* Do not need above 810 function unless later we add lazy close of inodes or unless the 811 kernel forgets to call us with the same number of releases (closes) 812 as opens */ 813 .show_options = cifs_show_options, 814 .umount_begin = cifs_umount_begin, 815 .freeze_fs = cifs_freeze, 816 #ifdef CONFIG_CIFS_STATS2 817 .show_stats = cifs_show_stats, 818 #endif 819 }; 820 821 /* 822 * Get root dentry from superblock according to prefix path mount option. 823 * Return dentry with refcount + 1 on success and NULL otherwise. 824 */ 825 static struct dentry * 826 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) 827 { 828 struct dentry *dentry; 829 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 830 char *full_path = NULL; 831 char *s, *p; 832 char sep; 833 834 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 835 return dget(sb->s_root); 836 837 full_path = cifs_build_path_to_root(ctx, cifs_sb, 838 cifs_sb_master_tcon(cifs_sb), 0); 839 if (full_path == NULL) 840 return ERR_PTR(-ENOMEM); 841 842 cifs_dbg(FYI, "Get root dentry for %s\n", full_path); 843 844 sep = CIFS_DIR_SEP(cifs_sb); 845 dentry = dget(sb->s_root); 846 s = full_path; 847 848 do { 849 struct inode *dir = d_inode(dentry); 850 struct dentry *child; 851 852 if (!S_ISDIR(dir->i_mode)) { 853 dput(dentry); 854 dentry = ERR_PTR(-ENOTDIR); 855 break; 856 } 857 858 /* skip separators */ 859 while (*s == sep) 860 s++; 861 if (!*s) 862 break; 863 p = s++; 864 /* next separator */ 865 while (*s && *s != sep) 866 s++; 867 868 child = lookup_positive_unlocked(p, dentry, s - p); 869 dput(dentry); 870 dentry = child; 871 } while (!IS_ERR(dentry)); 872 kfree(full_path); 873 return dentry; 874 } 875 876 static int cifs_set_super(struct super_block *sb, void *data) 877 { 878 struct cifs_mnt_data *mnt_data = data; 879 sb->s_fs_info = mnt_data->cifs_sb; 880 return set_anon_super(sb, NULL); 881 } 882 883 struct dentry * 884 cifs_smb3_do_mount(struct file_system_type *fs_type, 885 int flags, struct smb3_fs_context *old_ctx) 886 { 887 int rc; 888 struct super_block *sb = NULL; 889 struct cifs_sb_info *cifs_sb = NULL; 890 struct cifs_mnt_data mnt_data; 891 struct dentry *root; 892 893 if (cifsFYI) { 894 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, 895 old_ctx->source, flags); 896 } else { 897 cifs_info("Attempting to mount %s\n", old_ctx->source); 898 } 899 900 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); 901 if (cifs_sb == NULL) { 902 root = ERR_PTR(-ENOMEM); 903 goto out; 904 } 905 906 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); 907 if (!cifs_sb->ctx) { 908 root = ERR_PTR(-ENOMEM); 909 goto out; 910 } 911 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); 912 if (rc) { 913 root = ERR_PTR(rc); 914 goto out; 915 } 916 917 rc = cifs_setup_cifs_sb(cifs_sb); 918 if (rc) { 919 root = ERR_PTR(rc); 920 goto out; 921 } 922 923 rc = cifs_mount(cifs_sb, cifs_sb->ctx); 924 if (rc) { 925 if (!(flags & SB_SILENT)) 926 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 927 rc); 928 root = ERR_PTR(rc); 929 goto out; 930 } 931 932 mnt_data.ctx = cifs_sb->ctx; 933 mnt_data.cifs_sb = cifs_sb; 934 mnt_data.flags = flags; 935 936 /* BB should we make this contingent on mount parm? */ 937 flags |= SB_NODIRATIME | SB_NOATIME; 938 939 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); 940 if (IS_ERR(sb)) { 941 root = ERR_CAST(sb); 942 cifs_umount(cifs_sb); 943 cifs_sb = NULL; 944 goto out; 945 } 946 947 if (sb->s_root) { 948 cifs_dbg(FYI, "Use existing superblock\n"); 949 cifs_umount(cifs_sb); 950 cifs_sb = NULL; 951 } else { 952 rc = cifs_read_super(sb); 953 if (rc) { 954 root = ERR_PTR(rc); 955 goto out_super; 956 } 957 958 sb->s_flags |= SB_ACTIVE; 959 } 960 961 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); 962 if (IS_ERR(root)) 963 goto out_super; 964 965 if (cifs_sb) 966 cifs_sb->root = dget(root); 967 968 cifs_dbg(FYI, "dentry root is: %p\n", root); 969 return root; 970 971 out_super: 972 deactivate_locked_super(sb); 973 return root; 974 out: 975 if (cifs_sb) { 976 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */ 977 kfree(cifs_sb->prepath); 978 smb3_cleanup_fs_context(cifs_sb->ctx); 979 kfree(cifs_sb); 980 } 981 } 982 return root; 983 } 984 985 986 static ssize_t 987 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 988 { 989 ssize_t rc; 990 struct inode *inode = file_inode(iocb->ki_filp); 991 992 if (iocb->ki_flags & IOCB_DIRECT) 993 return cifs_user_readv(iocb, iter); 994 995 rc = cifs_revalidate_mapping(inode); 996 if (rc) 997 return rc; 998 999 return generic_file_read_iter(iocb, iter); 1000 } 1001 1002 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1003 { 1004 struct inode *inode = file_inode(iocb->ki_filp); 1005 struct cifsInodeInfo *cinode = CIFS_I(inode); 1006 ssize_t written; 1007 int rc; 1008 1009 if (iocb->ki_filp->f_flags & O_DIRECT) { 1010 written = cifs_user_writev(iocb, from); 1011 if (written > 0 && CIFS_CACHE_READ(cinode)) { 1012 cifs_zap_mapping(inode); 1013 cifs_dbg(FYI, 1014 "Set no oplock for inode=%p after a write operation\n", 1015 inode); 1016 cinode->oplock = 0; 1017 } 1018 return written; 1019 } 1020 1021 written = cifs_get_writer(cinode); 1022 if (written) 1023 return written; 1024 1025 written = generic_file_write_iter(iocb, from); 1026 1027 if (CIFS_CACHE_WRITE(CIFS_I(inode))) 1028 goto out; 1029 1030 rc = filemap_fdatawrite(inode->i_mapping); 1031 if (rc) 1032 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 1033 rc, inode); 1034 1035 out: 1036 cifs_put_writer(cinode); 1037 return written; 1038 } 1039 1040 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) 1041 { 1042 struct cifsFileInfo *cfile = file->private_data; 1043 struct cifs_tcon *tcon; 1044 1045 /* 1046 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 1047 * the cached file length 1048 */ 1049 if (whence != SEEK_SET && whence != SEEK_CUR) { 1050 int rc; 1051 struct inode *inode = file_inode(file); 1052 1053 /* 1054 * We need to be sure that all dirty pages are written and the 1055 * server has the newest file length. 1056 */ 1057 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && 1058 inode->i_mapping->nrpages != 0) { 1059 rc = filemap_fdatawait(inode->i_mapping); 1060 if (rc) { 1061 mapping_set_error(inode->i_mapping, rc); 1062 return rc; 1063 } 1064 } 1065 /* 1066 * Some applications poll for the file length in this strange 1067 * way so we must seek to end on non-oplocked files by 1068 * setting the revalidate time to zero. 1069 */ 1070 CIFS_I(inode)->time = 0; 1071 1072 rc = cifs_revalidate_file_attr(file); 1073 if (rc < 0) 1074 return (loff_t)rc; 1075 } 1076 if (cfile && cfile->tlink) { 1077 tcon = tlink_tcon(cfile->tlink); 1078 if (tcon->ses->server->ops->llseek) 1079 return tcon->ses->server->ops->llseek(file, tcon, 1080 offset, whence); 1081 } 1082 return generic_file_llseek(file, offset, whence); 1083 } 1084 1085 static int 1086 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv) 1087 { 1088 /* 1089 * Note that this is called by vfs setlease with i_lock held to 1090 * protect *lease from going away. 1091 */ 1092 struct inode *inode = file_inode(file); 1093 struct cifsFileInfo *cfile = file->private_data; 1094 1095 if (!(S_ISREG(inode->i_mode))) 1096 return -EINVAL; 1097 1098 /* Check if file is oplocked if this is request for new lease */ 1099 if (arg == F_UNLCK || 1100 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || 1101 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) 1102 return generic_setlease(file, arg, lease, priv); 1103 else if (tlink_tcon(cfile->tlink)->local_lease && 1104 !CIFS_CACHE_READ(CIFS_I(inode))) 1105 /* 1106 * If the server claims to support oplock on this file, then we 1107 * still need to check oplock even if the local_lease mount 1108 * option is set, but there are servers which do not support 1109 * oplock for which this mount option may be useful if the user 1110 * knows that the file won't be changed on the server by anyone 1111 * else. 1112 */ 1113 return generic_setlease(file, arg, lease, priv); 1114 else 1115 return -EAGAIN; 1116 } 1117 1118 struct file_system_type cifs_fs_type = { 1119 .owner = THIS_MODULE, 1120 .name = "cifs", 1121 .init_fs_context = smb3_init_fs_context, 1122 .parameters = smb3_fs_parameters, 1123 .kill_sb = cifs_kill_sb, 1124 .fs_flags = FS_RENAME_DOES_D_MOVE, 1125 }; 1126 MODULE_ALIAS_FS("cifs"); 1127 1128 struct file_system_type smb3_fs_type = { 1129 .owner = THIS_MODULE, 1130 .name = "smb3", 1131 .init_fs_context = smb3_init_fs_context, 1132 .parameters = smb3_fs_parameters, 1133 .kill_sb = cifs_kill_sb, 1134 .fs_flags = FS_RENAME_DOES_D_MOVE, 1135 }; 1136 MODULE_ALIAS_FS("smb3"); 1137 MODULE_ALIAS("smb3"); 1138 1139 const struct inode_operations cifs_dir_inode_ops = { 1140 .create = cifs_create, 1141 .atomic_open = cifs_atomic_open, 1142 .lookup = cifs_lookup, 1143 .getattr = cifs_getattr, 1144 .unlink = cifs_unlink, 1145 .link = cifs_hardlink, 1146 .mkdir = cifs_mkdir, 1147 .rmdir = cifs_rmdir, 1148 .rename = cifs_rename2, 1149 .permission = cifs_permission, 1150 .setattr = cifs_setattr, 1151 .symlink = cifs_symlink, 1152 .mknod = cifs_mknod, 1153 .listxattr = cifs_listxattr, 1154 .get_acl = cifs_get_acl, 1155 .set_acl = cifs_set_acl, 1156 }; 1157 1158 const struct inode_operations cifs_file_inode_ops = { 1159 .setattr = cifs_setattr, 1160 .getattr = cifs_getattr, 1161 .permission = cifs_permission, 1162 .listxattr = cifs_listxattr, 1163 .fiemap = cifs_fiemap, 1164 .get_acl = cifs_get_acl, 1165 .set_acl = cifs_set_acl, 1166 }; 1167 1168 const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1169 struct delayed_call *done) 1170 { 1171 char *target_path; 1172 1173 target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1174 if (!target_path) 1175 return ERR_PTR(-ENOMEM); 1176 1177 spin_lock(&inode->i_lock); 1178 if (likely(CIFS_I(inode)->symlink_target)) { 1179 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1180 } else { 1181 kfree(target_path); 1182 target_path = ERR_PTR(-EOPNOTSUPP); 1183 } 1184 spin_unlock(&inode->i_lock); 1185 1186 if (!IS_ERR(target_path)) 1187 set_delayed_call(done, kfree_link, target_path); 1188 1189 return target_path; 1190 } 1191 1192 const struct inode_operations cifs_symlink_inode_ops = { 1193 .get_link = cifs_get_link, 1194 .permission = cifs_permission, 1195 .listxattr = cifs_listxattr, 1196 }; 1197 1198 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, 1199 struct file *dst_file, loff_t destoff, loff_t len, 1200 unsigned int remap_flags) 1201 { 1202 struct inode *src_inode = file_inode(src_file); 1203 struct inode *target_inode = file_inode(dst_file); 1204 struct cifsFileInfo *smb_file_src = src_file->private_data; 1205 struct cifsFileInfo *smb_file_target; 1206 struct cifs_tcon *target_tcon; 1207 unsigned int xid; 1208 int rc; 1209 1210 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 1211 return -EINVAL; 1212 1213 cifs_dbg(FYI, "clone range\n"); 1214 1215 xid = get_xid(); 1216 1217 if (!src_file->private_data || !dst_file->private_data) { 1218 rc = -EBADF; 1219 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1220 goto out; 1221 } 1222 1223 smb_file_target = dst_file->private_data; 1224 target_tcon = tlink_tcon(smb_file_target->tlink); 1225 1226 /* 1227 * Note: cifs case is easier than btrfs since server responsible for 1228 * checks for proper open modes and file type and if it wants 1229 * server could even support copy of range where source = target 1230 */ 1231 lock_two_nondirectories(target_inode, src_inode); 1232 1233 if (len == 0) 1234 len = src_inode->i_size - off; 1235 1236 cifs_dbg(FYI, "about to flush pages\n"); 1237 /* should we flush first and last page first */ 1238 truncate_inode_pages_range(&target_inode->i_data, destoff, 1239 PAGE_ALIGN(destoff + len)-1); 1240 1241 if (target_tcon->ses->server->ops->duplicate_extents) 1242 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 1243 smb_file_src, smb_file_target, off, len, destoff); 1244 else 1245 rc = -EOPNOTSUPP; 1246 1247 /* force revalidate of size and timestamps of target file now 1248 that target is updated on the server */ 1249 CIFS_I(target_inode)->time = 0; 1250 /* although unlocking in the reverse order from locking is not 1251 strictly necessary here it is a little cleaner to be consistent */ 1252 unlock_two_nondirectories(src_inode, target_inode); 1253 out: 1254 free_xid(xid); 1255 return rc < 0 ? rc : len; 1256 } 1257 1258 ssize_t cifs_file_copychunk_range(unsigned int xid, 1259 struct file *src_file, loff_t off, 1260 struct file *dst_file, loff_t destoff, 1261 size_t len, unsigned int flags) 1262 { 1263 struct inode *src_inode = file_inode(src_file); 1264 struct inode *target_inode = file_inode(dst_file); 1265 struct cifsFileInfo *smb_file_src; 1266 struct cifsFileInfo *smb_file_target; 1267 struct cifs_tcon *src_tcon; 1268 struct cifs_tcon *target_tcon; 1269 ssize_t rc; 1270 1271 cifs_dbg(FYI, "copychunk range\n"); 1272 1273 if (!src_file->private_data || !dst_file->private_data) { 1274 rc = -EBADF; 1275 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1276 goto out; 1277 } 1278 1279 rc = -EXDEV; 1280 smb_file_target = dst_file->private_data; 1281 smb_file_src = src_file->private_data; 1282 src_tcon = tlink_tcon(smb_file_src->tlink); 1283 target_tcon = tlink_tcon(smb_file_target->tlink); 1284 1285 if (src_tcon->ses != target_tcon->ses) { 1286 cifs_dbg(VFS, "source and target of copy not on same server\n"); 1287 goto out; 1288 } 1289 1290 rc = -EOPNOTSUPP; 1291 if (!target_tcon->ses->server->ops->copychunk_range) 1292 goto out; 1293 1294 /* 1295 * Note: cifs case is easier than btrfs since server responsible for 1296 * checks for proper open modes and file type and if it wants 1297 * server could even support copy of range where source = target 1298 */ 1299 lock_two_nondirectories(target_inode, src_inode); 1300 1301 cifs_dbg(FYI, "about to flush pages\n"); 1302 1303 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1304 off + len - 1); 1305 if (rc) 1306 goto unlock; 1307 1308 /* should we flush first and last page first */ 1309 truncate_inode_pages(&target_inode->i_data, 0); 1310 1311 rc = file_modified(dst_file); 1312 if (!rc) 1313 rc = target_tcon->ses->server->ops->copychunk_range(xid, 1314 smb_file_src, smb_file_target, off, len, destoff); 1315 1316 file_accessed(src_file); 1317 1318 /* force revalidate of size and timestamps of target file now 1319 * that target is updated on the server 1320 */ 1321 CIFS_I(target_inode)->time = 0; 1322 1323 unlock: 1324 /* although unlocking in the reverse order from locking is not 1325 * strictly necessary here it is a little cleaner to be consistent 1326 */ 1327 unlock_two_nondirectories(src_inode, target_inode); 1328 1329 out: 1330 return rc; 1331 } 1332 1333 /* 1334 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() 1335 * is a dummy operation. 1336 */ 1337 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1338 { 1339 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", 1340 file, datasync); 1341 1342 return 0; 1343 } 1344 1345 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, 1346 struct file *dst_file, loff_t destoff, 1347 size_t len, unsigned int flags) 1348 { 1349 unsigned int xid = get_xid(); 1350 ssize_t rc; 1351 struct cifsFileInfo *cfile = dst_file->private_data; 1352 1353 if (cfile->swapfile) { 1354 rc = -EOPNOTSUPP; 1355 free_xid(xid); 1356 return rc; 1357 } 1358 1359 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, 1360 len, flags); 1361 free_xid(xid); 1362 1363 if (rc == -EOPNOTSUPP || rc == -EXDEV) 1364 rc = generic_copy_file_range(src_file, off, dst_file, 1365 destoff, len, flags); 1366 return rc; 1367 } 1368 1369 const struct file_operations cifs_file_ops = { 1370 .read_iter = cifs_loose_read_iter, 1371 .write_iter = cifs_file_write_iter, 1372 .open = cifs_open, 1373 .release = cifs_close, 1374 .lock = cifs_lock, 1375 .flock = cifs_flock, 1376 .fsync = cifs_fsync, 1377 .flush = cifs_flush, 1378 .mmap = cifs_file_mmap, 1379 .splice_read = cifs_splice_read, 1380 .splice_write = iter_file_splice_write, 1381 .llseek = cifs_llseek, 1382 .unlocked_ioctl = cifs_ioctl, 1383 .copy_file_range = cifs_copy_file_range, 1384 .remap_file_range = cifs_remap_file_range, 1385 .setlease = cifs_setlease, 1386 .fallocate = cifs_fallocate, 1387 }; 1388 1389 const struct file_operations cifs_file_strict_ops = { 1390 .read_iter = cifs_strict_readv, 1391 .write_iter = cifs_strict_writev, 1392 .open = cifs_open, 1393 .release = cifs_close, 1394 .lock = cifs_lock, 1395 .flock = cifs_flock, 1396 .fsync = cifs_strict_fsync, 1397 .flush = cifs_flush, 1398 .mmap = cifs_file_strict_mmap, 1399 .splice_read = cifs_splice_read, 1400 .splice_write = iter_file_splice_write, 1401 .llseek = cifs_llseek, 1402 .unlocked_ioctl = cifs_ioctl, 1403 .copy_file_range = cifs_copy_file_range, 1404 .remap_file_range = cifs_remap_file_range, 1405 .setlease = cifs_setlease, 1406 .fallocate = cifs_fallocate, 1407 }; 1408 1409 const struct file_operations cifs_file_direct_ops = { 1410 .read_iter = cifs_direct_readv, 1411 .write_iter = cifs_direct_writev, 1412 .open = cifs_open, 1413 .release = cifs_close, 1414 .lock = cifs_lock, 1415 .flock = cifs_flock, 1416 .fsync = cifs_fsync, 1417 .flush = cifs_flush, 1418 .mmap = cifs_file_mmap, 1419 .splice_read = direct_splice_read, 1420 .splice_write = iter_file_splice_write, 1421 .unlocked_ioctl = cifs_ioctl, 1422 .copy_file_range = cifs_copy_file_range, 1423 .remap_file_range = cifs_remap_file_range, 1424 .llseek = cifs_llseek, 1425 .setlease = cifs_setlease, 1426 .fallocate = cifs_fallocate, 1427 }; 1428 1429 const struct file_operations cifs_file_nobrl_ops = { 1430 .read_iter = cifs_loose_read_iter, 1431 .write_iter = cifs_file_write_iter, 1432 .open = cifs_open, 1433 .release = cifs_close, 1434 .fsync = cifs_fsync, 1435 .flush = cifs_flush, 1436 .mmap = cifs_file_mmap, 1437 .splice_read = cifs_splice_read, 1438 .splice_write = iter_file_splice_write, 1439 .llseek = cifs_llseek, 1440 .unlocked_ioctl = cifs_ioctl, 1441 .copy_file_range = cifs_copy_file_range, 1442 .remap_file_range = cifs_remap_file_range, 1443 .setlease = cifs_setlease, 1444 .fallocate = cifs_fallocate, 1445 }; 1446 1447 const struct file_operations cifs_file_strict_nobrl_ops = { 1448 .read_iter = cifs_strict_readv, 1449 .write_iter = cifs_strict_writev, 1450 .open = cifs_open, 1451 .release = cifs_close, 1452 .fsync = cifs_strict_fsync, 1453 .flush = cifs_flush, 1454 .mmap = cifs_file_strict_mmap, 1455 .splice_read = cifs_splice_read, 1456 .splice_write = iter_file_splice_write, 1457 .llseek = cifs_llseek, 1458 .unlocked_ioctl = cifs_ioctl, 1459 .copy_file_range = cifs_copy_file_range, 1460 .remap_file_range = cifs_remap_file_range, 1461 .setlease = cifs_setlease, 1462 .fallocate = cifs_fallocate, 1463 }; 1464 1465 const struct file_operations cifs_file_direct_nobrl_ops = { 1466 .read_iter = cifs_direct_readv, 1467 .write_iter = cifs_direct_writev, 1468 .open = cifs_open, 1469 .release = cifs_close, 1470 .fsync = cifs_fsync, 1471 .flush = cifs_flush, 1472 .mmap = cifs_file_mmap, 1473 .splice_read = direct_splice_read, 1474 .splice_write = iter_file_splice_write, 1475 .unlocked_ioctl = cifs_ioctl, 1476 .copy_file_range = cifs_copy_file_range, 1477 .remap_file_range = cifs_remap_file_range, 1478 .llseek = cifs_llseek, 1479 .setlease = cifs_setlease, 1480 .fallocate = cifs_fallocate, 1481 }; 1482 1483 const struct file_operations cifs_dir_ops = { 1484 .iterate_shared = cifs_readdir, 1485 .release = cifs_closedir, 1486 .read = generic_read_dir, 1487 .unlocked_ioctl = cifs_ioctl, 1488 .copy_file_range = cifs_copy_file_range, 1489 .remap_file_range = cifs_remap_file_range, 1490 .llseek = generic_file_llseek, 1491 .fsync = cifs_dir_fsync, 1492 }; 1493 1494 static void 1495 cifs_init_once(void *inode) 1496 { 1497 struct cifsInodeInfo *cifsi = inode; 1498 1499 inode_init_once(&cifsi->netfs.inode); 1500 init_rwsem(&cifsi->lock_sem); 1501 } 1502 1503 static int __init 1504 cifs_init_inodecache(void) 1505 { 1506 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", 1507 sizeof(struct cifsInodeInfo), 1508 0, (SLAB_RECLAIM_ACCOUNT| 1509 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1510 cifs_init_once); 1511 if (cifs_inode_cachep == NULL) 1512 return -ENOMEM; 1513 1514 return 0; 1515 } 1516 1517 static void 1518 cifs_destroy_inodecache(void) 1519 { 1520 /* 1521 * Make sure all delayed rcu free inodes are flushed before we 1522 * destroy cache. 1523 */ 1524 rcu_barrier(); 1525 kmem_cache_destroy(cifs_inode_cachep); 1526 } 1527 1528 static int 1529 cifs_init_request_bufs(void) 1530 { 1531 /* 1532 * SMB2 maximum header size is bigger than CIFS one - no problems to 1533 * allocate some more bytes for CIFS. 1534 */ 1535 size_t max_hdr_size = MAX_SMB2_HDR_SIZE; 1536 1537 if (CIFSMaxBufSize < 8192) { 1538 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum 1539 Unicode path name has to fit in any SMB/CIFS path based frames */ 1540 CIFSMaxBufSize = 8192; 1541 } else if (CIFSMaxBufSize > 1024*127) { 1542 CIFSMaxBufSize = 1024 * 127; 1543 } else { 1544 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ 1545 } 1546 /* 1547 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1548 CIFSMaxBufSize, CIFSMaxBufSize); 1549 */ 1550 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", 1551 CIFSMaxBufSize + max_hdr_size, 0, 1552 SLAB_HWCACHE_ALIGN, 0, 1553 CIFSMaxBufSize + max_hdr_size, 1554 NULL); 1555 if (cifs_req_cachep == NULL) 1556 return -ENOMEM; 1557 1558 if (cifs_min_rcv < 1) 1559 cifs_min_rcv = 1; 1560 else if (cifs_min_rcv > 64) { 1561 cifs_min_rcv = 64; 1562 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); 1563 } 1564 1565 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 1566 cifs_req_cachep); 1567 1568 if (cifs_req_poolp == NULL) { 1569 kmem_cache_destroy(cifs_req_cachep); 1570 return -ENOMEM; 1571 } 1572 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 1573 almost all handle based requests (but not write response, nor is it 1574 sufficient for path based requests). A smaller size would have 1575 been more efficient (compacting multiple slab items on one 4k page) 1576 for the case in which debug was on, but this larger size allows 1577 more SMBs to use small buffer alloc and is still much more 1578 efficient to alloc 1 per page off the slab compared to 17K (5page) 1579 alloc of large cifs buffers even when page debugging is on */ 1580 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", 1581 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1582 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); 1583 if (cifs_sm_req_cachep == NULL) { 1584 mempool_destroy(cifs_req_poolp); 1585 kmem_cache_destroy(cifs_req_cachep); 1586 return -ENOMEM; 1587 } 1588 1589 if (cifs_min_small < 2) 1590 cifs_min_small = 2; 1591 else if (cifs_min_small > 256) { 1592 cifs_min_small = 256; 1593 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); 1594 } 1595 1596 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, 1597 cifs_sm_req_cachep); 1598 1599 if (cifs_sm_req_poolp == NULL) { 1600 mempool_destroy(cifs_req_poolp); 1601 kmem_cache_destroy(cifs_req_cachep); 1602 kmem_cache_destroy(cifs_sm_req_cachep); 1603 return -ENOMEM; 1604 } 1605 1606 return 0; 1607 } 1608 1609 static void 1610 cifs_destroy_request_bufs(void) 1611 { 1612 mempool_destroy(cifs_req_poolp); 1613 kmem_cache_destroy(cifs_req_cachep); 1614 mempool_destroy(cifs_sm_req_poolp); 1615 kmem_cache_destroy(cifs_sm_req_cachep); 1616 } 1617 1618 static int init_mids(void) 1619 { 1620 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1621 sizeof(struct mid_q_entry), 0, 1622 SLAB_HWCACHE_ALIGN, NULL); 1623 if (cifs_mid_cachep == NULL) 1624 return -ENOMEM; 1625 1626 /* 3 is a reasonable minimum number of simultaneous operations */ 1627 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); 1628 if (cifs_mid_poolp == NULL) { 1629 kmem_cache_destroy(cifs_mid_cachep); 1630 return -ENOMEM; 1631 } 1632 1633 return 0; 1634 } 1635 1636 static void destroy_mids(void) 1637 { 1638 mempool_destroy(cifs_mid_poolp); 1639 kmem_cache_destroy(cifs_mid_cachep); 1640 } 1641 1642 static int __init 1643 init_cifs(void) 1644 { 1645 int rc = 0; 1646 cifs_proc_init(); 1647 INIT_LIST_HEAD(&cifs_tcp_ses_list); 1648 /* 1649 * Initialize Global counters 1650 */ 1651 atomic_set(&sesInfoAllocCount, 0); 1652 atomic_set(&tconInfoAllocCount, 0); 1653 atomic_set(&tcpSesNextId, 0); 1654 atomic_set(&tcpSesAllocCount, 0); 1655 atomic_set(&tcpSesReconnectCount, 0); 1656 atomic_set(&tconInfoReconnectCount, 0); 1657 1658 atomic_set(&buf_alloc_count, 0); 1659 atomic_set(&small_buf_alloc_count, 0); 1660 #ifdef CONFIG_CIFS_STATS2 1661 atomic_set(&total_buf_alloc_count, 0); 1662 atomic_set(&total_small_buf_alloc_count, 0); 1663 if (slow_rsp_threshold < 1) 1664 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1665 else if (slow_rsp_threshold > 32767) 1666 cifs_dbg(VFS, 1667 "slow response threshold set higher than recommended (0 to 32767)\n"); 1668 #endif /* CONFIG_CIFS_STATS2 */ 1669 1670 atomic_set(&mid_count, 0); 1671 GlobalCurrentXid = 0; 1672 GlobalTotalActiveXid = 0; 1673 GlobalMaxActiveXid = 0; 1674 spin_lock_init(&cifs_tcp_ses_lock); 1675 spin_lock_init(&GlobalMid_Lock); 1676 1677 cifs_lock_secret = get_random_u32(); 1678 1679 if (cifs_max_pending < 2) { 1680 cifs_max_pending = 2; 1681 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1682 } else if (cifs_max_pending > CIFS_MAX_REQ) { 1683 cifs_max_pending = CIFS_MAX_REQ; 1684 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", 1685 CIFS_MAX_REQ); 1686 } 1687 1688 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1689 if (!cifsiod_wq) { 1690 rc = -ENOMEM; 1691 goto out_clean_proc; 1692 } 1693 1694 /* 1695 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) 1696 * so that we don't launch too many worker threads but 1697 * Documentation/core-api/workqueue.rst recommends setting it to 0 1698 */ 1699 1700 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ 1701 decrypt_wq = alloc_workqueue("smb3decryptd", 1702 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1703 if (!decrypt_wq) { 1704 rc = -ENOMEM; 1705 goto out_destroy_cifsiod_wq; 1706 } 1707 1708 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", 1709 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1710 if (!fileinfo_put_wq) { 1711 rc = -ENOMEM; 1712 goto out_destroy_decrypt_wq; 1713 } 1714 1715 cifsoplockd_wq = alloc_workqueue("cifsoplockd", 1716 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1717 if (!cifsoplockd_wq) { 1718 rc = -ENOMEM; 1719 goto out_destroy_fileinfo_put_wq; 1720 } 1721 1722 deferredclose_wq = alloc_workqueue("deferredclose", 1723 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1724 if (!deferredclose_wq) { 1725 rc = -ENOMEM; 1726 goto out_destroy_cifsoplockd_wq; 1727 } 1728 1729 rc = cifs_init_inodecache(); 1730 if (rc) 1731 goto out_destroy_deferredclose_wq; 1732 1733 rc = init_mids(); 1734 if (rc) 1735 goto out_destroy_inodecache; 1736 1737 rc = cifs_init_request_bufs(); 1738 if (rc) 1739 goto out_destroy_mids; 1740 1741 #ifdef CONFIG_CIFS_DFS_UPCALL 1742 rc = dfs_cache_init(); 1743 if (rc) 1744 goto out_destroy_request_bufs; 1745 #endif /* CONFIG_CIFS_DFS_UPCALL */ 1746 #ifdef CONFIG_CIFS_UPCALL 1747 rc = init_cifs_spnego(); 1748 if (rc) 1749 goto out_destroy_dfs_cache; 1750 #endif /* CONFIG_CIFS_UPCALL */ 1751 #ifdef CONFIG_CIFS_SWN_UPCALL 1752 rc = cifs_genl_init(); 1753 if (rc) 1754 goto out_register_key_type; 1755 #endif /* CONFIG_CIFS_SWN_UPCALL */ 1756 1757 rc = init_cifs_idmap(); 1758 if (rc) 1759 goto out_cifs_swn_init; 1760 1761 rc = register_filesystem(&cifs_fs_type); 1762 if (rc) 1763 goto out_init_cifs_idmap; 1764 1765 rc = register_filesystem(&smb3_fs_type); 1766 if (rc) { 1767 unregister_filesystem(&cifs_fs_type); 1768 goto out_init_cifs_idmap; 1769 } 1770 1771 return 0; 1772 1773 out_init_cifs_idmap: 1774 exit_cifs_idmap(); 1775 out_cifs_swn_init: 1776 #ifdef CONFIG_CIFS_SWN_UPCALL 1777 cifs_genl_exit(); 1778 out_register_key_type: 1779 #endif 1780 #ifdef CONFIG_CIFS_UPCALL 1781 exit_cifs_spnego(); 1782 out_destroy_dfs_cache: 1783 #endif 1784 #ifdef CONFIG_CIFS_DFS_UPCALL 1785 dfs_cache_destroy(); 1786 out_destroy_request_bufs: 1787 #endif 1788 cifs_destroy_request_bufs(); 1789 out_destroy_mids: 1790 destroy_mids(); 1791 out_destroy_inodecache: 1792 cifs_destroy_inodecache(); 1793 out_destroy_deferredclose_wq: 1794 destroy_workqueue(deferredclose_wq); 1795 out_destroy_cifsoplockd_wq: 1796 destroy_workqueue(cifsoplockd_wq); 1797 out_destroy_fileinfo_put_wq: 1798 destroy_workqueue(fileinfo_put_wq); 1799 out_destroy_decrypt_wq: 1800 destroy_workqueue(decrypt_wq); 1801 out_destroy_cifsiod_wq: 1802 destroy_workqueue(cifsiod_wq); 1803 out_clean_proc: 1804 cifs_proc_clean(); 1805 return rc; 1806 } 1807 1808 static void __exit 1809 exit_cifs(void) 1810 { 1811 cifs_dbg(NOISY, "exit_smb3\n"); 1812 unregister_filesystem(&cifs_fs_type); 1813 unregister_filesystem(&smb3_fs_type); 1814 cifs_dfs_release_automount_timer(); 1815 exit_cifs_idmap(); 1816 #ifdef CONFIG_CIFS_SWN_UPCALL 1817 cifs_genl_exit(); 1818 #endif 1819 #ifdef CONFIG_CIFS_UPCALL 1820 exit_cifs_spnego(); 1821 #endif 1822 #ifdef CONFIG_CIFS_DFS_UPCALL 1823 dfs_cache_destroy(); 1824 #endif 1825 cifs_destroy_request_bufs(); 1826 destroy_mids(); 1827 cifs_destroy_inodecache(); 1828 destroy_workqueue(deferredclose_wq); 1829 destroy_workqueue(cifsoplockd_wq); 1830 destroy_workqueue(decrypt_wq); 1831 destroy_workqueue(fileinfo_put_wq); 1832 destroy_workqueue(cifsiod_wq); 1833 cifs_proc_clean(); 1834 } 1835 1836 MODULE_AUTHOR("Steve French"); 1837 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ 1838 MODULE_DESCRIPTION 1839 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 1840 "also older servers complying with the SNIA CIFS Specification)"); 1841 MODULE_VERSION(CIFS_VERSION); 1842 MODULE_SOFTDEP("ecb"); 1843 MODULE_SOFTDEP("hmac"); 1844 MODULE_SOFTDEP("md5"); 1845 MODULE_SOFTDEP("nls"); 1846 MODULE_SOFTDEP("aes"); 1847 MODULE_SOFTDEP("cmac"); 1848 MODULE_SOFTDEP("sha256"); 1849 MODULE_SOFTDEP("sha512"); 1850 MODULE_SOFTDEP("aead2"); 1851 MODULE_SOFTDEP("ccm"); 1852 MODULE_SOFTDEP("gcm"); 1853 module_init(init_cifs) 1854 module_exit(exit_cifs) 1855