1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * Common Internet FileSystem (CIFS) client 8 * 9 */ 10 11 /* Note that BB means BUGBUG (ie something to fix eventually) */ 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/filelock.h> 16 #include <linux/mount.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/seq_file.h> 21 #include <linux/vfs.h> 22 #include <linux/mempool.h> 23 #include <linux/delay.h> 24 #include <linux/kthread.h> 25 #include <linux/freezer.h> 26 #include <linux/namei.h> 27 #include <linux/random.h> 28 #include <linux/uuid.h> 29 #include <linux/xattr.h> 30 #include <uapi/linux/magic.h> 31 #include <net/ipv6.h> 32 #include "cifsfs.h" 33 #include "cifspdu.h" 34 #define DECLARE_GLOBALS_HERE 35 #include "cifsglob.h" 36 #include "cifsproto.h" 37 #include "cifs_debug.h" 38 #include "cifs_fs_sb.h" 39 #include <linux/mm.h> 40 #include <linux/key-type.h> 41 #include "cifs_spnego.h" 42 #include "fscache.h" 43 #ifdef CONFIG_CIFS_DFS_UPCALL 44 #include "dfs_cache.h" 45 #endif 46 #ifdef CONFIG_CIFS_SWN_UPCALL 47 #include "netlink.h" 48 #endif 49 #include "fs_context.h" 50 #include "cached_dir.h" 51 52 /* 53 * DOS dates from 1980/1/1 through 2107/12/31 54 * Protocol specifications indicate the range should be to 119, which 55 * limits maximum year to 2099. But this range has not been checked. 56 */ 57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) 58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) 59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) 60 61 int cifsFYI = 0; 62 bool traceSMB; 63 bool enable_oplocks = true; 64 bool linuxExtEnabled = true; 65 bool lookupCacheEnabled = true; 66 bool disable_legacy_dialects; /* false by default */ 67 bool enable_gcm_256 = true; 68 bool require_gcm_256; /* false by default */ 69 bool enable_negotiate_signing; /* false by default */ 70 unsigned int global_secflags = CIFSSEC_DEF; 71 /* unsigned int ntlmv2_support = 0; */ 72 unsigned int sign_CIFS_PDUs = 1; 73 74 /* 75 * Global transaction id (XID) information 76 */ 77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 81 82 /* 83 * Global counters, updated atomically 84 */ 85 atomic_t sesInfoAllocCount; 86 atomic_t tconInfoAllocCount; 87 atomic_t tcpSesNextId; 88 atomic_t tcpSesAllocCount; 89 atomic_t tcpSesReconnectCount; 90 atomic_t tconInfoReconnectCount; 91 92 atomic_t mid_count; 93 atomic_t buf_alloc_count; 94 atomic_t small_buf_alloc_count; 95 #ifdef CONFIG_CIFS_STATS2 96 atomic_t total_buf_alloc_count; 97 atomic_t total_small_buf_alloc_count; 98 #endif/* STATS2 */ 99 struct list_head cifs_tcp_ses_list; 100 spinlock_t cifs_tcp_ses_lock; 101 static const struct super_operations cifs_super_ops; 102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 103 module_param(CIFSMaxBufSize, uint, 0444); 104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " 105 "for CIFS requests. " 106 "Default: 16384 Range: 8192 to 130048"); 107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; 108 module_param(cifs_min_rcv, uint, 0444); 109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " 110 "1 to 64"); 111 unsigned int cifs_min_small = 30; 112 module_param(cifs_min_small, uint, 0444); 113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 114 "Range: 2 to 256"); 115 unsigned int cifs_max_pending = CIFS_MAX_REQ; 116 module_param(cifs_max_pending, uint, 0444); 117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " 118 "CIFS/SMB1 dialect (N/A for SMB3) " 119 "Default: 32767 Range: 2 to 32767."); 120 #ifdef CONFIG_CIFS_STATS2 121 unsigned int slow_rsp_threshold = 1; 122 module_param(slow_rsp_threshold, uint, 0644); 123 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " 124 "before logging that a response is delayed. " 125 "Default: 1 (if set to 0 disables msg)."); 126 #endif /* STATS2 */ 127 128 module_param(enable_oplocks, bool, 0644); 129 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 130 131 module_param(enable_gcm_256, bool, 0644); 132 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 133 134 module_param(require_gcm_256, bool, 0644); 135 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); 136 137 module_param(enable_negotiate_signing, bool, 0644); 138 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); 139 140 module_param(disable_legacy_dialects, bool, 0644); 141 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " 142 "helpful to restrict the ability to " 143 "override the default dialects (SMB2.1, " 144 "SMB3 and SMB3.02) on mount with old " 145 "dialects (CIFS/SMB1 and SMB2) since " 146 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" 147 " and less secure. Default: n/N/0"); 148 149 extern mempool_t *cifs_sm_req_poolp; 150 extern mempool_t *cifs_req_poolp; 151 extern mempool_t *cifs_mid_poolp; 152 153 struct workqueue_struct *cifsiod_wq; 154 struct workqueue_struct *decrypt_wq; 155 struct workqueue_struct *fileinfo_put_wq; 156 struct workqueue_struct *cifsoplockd_wq; 157 struct workqueue_struct *deferredclose_wq; 158 __u32 cifs_lock_secret; 159 160 /* 161 * Bumps refcount for cifs super block. 162 * Note that it should be only called if a referece to VFS super block is 163 * already held, e.g. in open-type syscalls context. Otherwise it can race with 164 * atomic_dec_and_test in deactivate_locked_super. 165 */ 166 void 167 cifs_sb_active(struct super_block *sb) 168 { 169 struct cifs_sb_info *server = CIFS_SB(sb); 170 171 if (atomic_inc_return(&server->active) == 1) 172 atomic_inc(&sb->s_active); 173 } 174 175 void 176 cifs_sb_deactive(struct super_block *sb) 177 { 178 struct cifs_sb_info *server = CIFS_SB(sb); 179 180 if (atomic_dec_and_test(&server->active)) 181 deactivate_super(sb); 182 } 183 184 static int 185 cifs_read_super(struct super_block *sb) 186 { 187 struct inode *inode; 188 struct cifs_sb_info *cifs_sb; 189 struct cifs_tcon *tcon; 190 struct timespec64 ts; 191 int rc = 0; 192 193 cifs_sb = CIFS_SB(sb); 194 tcon = cifs_sb_master_tcon(cifs_sb); 195 196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 197 sb->s_flags |= SB_POSIXACL; 198 199 if (tcon->snapshot_time) 200 sb->s_flags |= SB_RDONLY; 201 202 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) 203 sb->s_maxbytes = MAX_LFS_FILESIZE; 204 else 205 sb->s_maxbytes = MAX_NON_LFS; 206 207 /* 208 * Some very old servers like DOS and OS/2 used 2 second granularity 209 * (while all current servers use 100ns granularity - see MS-DTYP) 210 * but 1 second is the maximum allowed granularity for the VFS 211 * so for old servers set time granularity to 1 second while for 212 * everything else (current servers) set it to 100ns. 213 */ 214 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && 215 ((tcon->ses->capabilities & 216 tcon->ses->server->vals->cap_nt_find) == 0) && 217 !tcon->unix_ext) { 218 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ 219 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); 220 sb->s_time_min = ts.tv_sec; 221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), 222 cpu_to_le16(SMB_TIME_MAX), 0); 223 sb->s_time_max = ts.tv_sec; 224 } else { 225 /* 226 * Almost every server, including all SMB2+, uses DCE TIME 227 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC 228 */ 229 sb->s_time_gran = 100; 230 ts = cifs_NTtimeToUnix(0); 231 sb->s_time_min = ts.tv_sec; 232 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); 233 sb->s_time_max = ts.tv_sec; 234 } 235 236 sb->s_magic = CIFS_SUPER_MAGIC; 237 sb->s_op = &cifs_super_ops; 238 sb->s_xattr = cifs_xattr_handlers; 239 rc = super_setup_bdi(sb); 240 if (rc) 241 goto out_no_root; 242 /* tune readahead according to rsize if readahead size not set on mount */ 243 if (cifs_sb->ctx->rsize == 0) 244 cifs_sb->ctx->rsize = 245 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 246 if (cifs_sb->ctx->rasize) 247 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; 248 else 249 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); 250 251 sb->s_blocksize = CIFS_MAX_MSGSIZE; 252 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 253 inode = cifs_root_iget(sb); 254 255 if (IS_ERR(inode)) { 256 rc = PTR_ERR(inode); 257 goto out_no_root; 258 } 259 260 if (tcon->nocase) 261 sb->s_d_op = &cifs_ci_dentry_ops; 262 else 263 sb->s_d_op = &cifs_dentry_ops; 264 265 sb->s_root = d_make_root(inode); 266 if (!sb->s_root) { 267 rc = -ENOMEM; 268 goto out_no_root; 269 } 270 271 #ifdef CONFIG_CIFS_NFSD_EXPORT 272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 273 cifs_dbg(FYI, "export ops supported\n"); 274 sb->s_export_op = &cifs_export_ops; 275 } 276 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 277 278 return 0; 279 280 out_no_root: 281 cifs_dbg(VFS, "%s: get root inode failed\n", __func__); 282 return rc; 283 } 284 285 static void cifs_kill_sb(struct super_block *sb) 286 { 287 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 288 289 /* 290 * We ned to release all dentries for the cached directories 291 * before we kill the sb. 292 */ 293 if (cifs_sb->root) { 294 close_all_cached_dirs(cifs_sb); 295 296 /* finally release root dentry */ 297 dput(cifs_sb->root); 298 cifs_sb->root = NULL; 299 } 300 301 kill_anon_super(sb); 302 cifs_umount(cifs_sb); 303 } 304 305 static int 306 cifs_statfs(struct dentry *dentry, struct kstatfs *buf) 307 { 308 struct super_block *sb = dentry->d_sb; 309 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 310 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 311 struct TCP_Server_Info *server = tcon->ses->server; 312 unsigned int xid; 313 int rc = 0; 314 315 xid = get_xid(); 316 317 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) 318 buf->f_namelen = 319 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); 320 else 321 buf->f_namelen = PATH_MAX; 322 323 buf->f_fsid.val[0] = tcon->vol_serial_number; 324 /* are using part of create time for more randomness, see man statfs */ 325 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); 326 327 buf->f_files = 0; /* undefined */ 328 buf->f_ffree = 0; /* unlimited */ 329 330 if (server->ops->queryfs) 331 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); 332 333 free_xid(xid); 334 return rc; 335 } 336 337 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 338 { 339 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 340 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 341 struct TCP_Server_Info *server = tcon->ses->server; 342 343 if (server->ops->fallocate) 344 return server->ops->fallocate(file, tcon, mode, off, len); 345 346 return -EOPNOTSUPP; 347 } 348 349 static int cifs_permission(struct mnt_idmap *idmap, 350 struct inode *inode, int mask) 351 { 352 struct cifs_sb_info *cifs_sb; 353 354 cifs_sb = CIFS_SB(inode->i_sb); 355 356 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 357 if ((mask & MAY_EXEC) && !execute_ok(inode)) 358 return -EACCES; 359 else 360 return 0; 361 } else /* file mode might have been restricted at mount time 362 on the client (above and beyond ACL on servers) for 363 servers which do not support setting and viewing mode bits, 364 so allowing client to check permissions is useful */ 365 return generic_permission(&nop_mnt_idmap, inode, mask); 366 } 367 368 static struct kmem_cache *cifs_inode_cachep; 369 static struct kmem_cache *cifs_req_cachep; 370 static struct kmem_cache *cifs_mid_cachep; 371 static struct kmem_cache *cifs_sm_req_cachep; 372 mempool_t *cifs_sm_req_poolp; 373 mempool_t *cifs_req_poolp; 374 mempool_t *cifs_mid_poolp; 375 376 static struct inode * 377 cifs_alloc_inode(struct super_block *sb) 378 { 379 struct cifsInodeInfo *cifs_inode; 380 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); 381 if (!cifs_inode) 382 return NULL; 383 cifs_inode->cifsAttrs = 0x20; /* default */ 384 cifs_inode->time = 0; 385 /* 386 * Until the file is open and we have gotten oplock info back from the 387 * server, can not assume caching of file data or metadata. 388 */ 389 cifs_set_oplock_level(cifs_inode, 0); 390 cifs_inode->flags = 0; 391 spin_lock_init(&cifs_inode->writers_lock); 392 cifs_inode->writers = 0; 393 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 394 cifs_inode->server_eof = 0; 395 cifs_inode->uniqueid = 0; 396 cifs_inode->createtime = 0; 397 cifs_inode->epoch = 0; 398 spin_lock_init(&cifs_inode->open_file_lock); 399 generate_random_uuid(cifs_inode->lease_key); 400 cifs_inode->symlink_target = NULL; 401 402 /* 403 * Can not set i_flags here - they get immediately overwritten to zero 404 * by the VFS. 405 */ 406 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ 407 INIT_LIST_HEAD(&cifs_inode->openFileList); 408 INIT_LIST_HEAD(&cifs_inode->llist); 409 INIT_LIST_HEAD(&cifs_inode->deferred_closes); 410 spin_lock_init(&cifs_inode->deferred_lock); 411 return &cifs_inode->netfs.inode; 412 } 413 414 static void 415 cifs_free_inode(struct inode *inode) 416 { 417 struct cifsInodeInfo *cinode = CIFS_I(inode); 418 419 if (S_ISLNK(inode->i_mode)) 420 kfree(cinode->symlink_target); 421 kmem_cache_free(cifs_inode_cachep, cinode); 422 } 423 424 static void 425 cifs_evict_inode(struct inode *inode) 426 { 427 truncate_inode_pages_final(&inode->i_data); 428 if (inode->i_state & I_PINNING_FSCACHE_WB) 429 cifs_fscache_unuse_inode_cookie(inode, true); 430 cifs_fscache_release_inode_cookie(inode); 431 clear_inode(inode); 432 } 433 434 static void 435 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) 436 { 437 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 438 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; 439 440 seq_puts(s, ",addr="); 441 442 switch (server->dstaddr.ss_family) { 443 case AF_INET: 444 seq_printf(s, "%pI4", &sa->sin_addr.s_addr); 445 break; 446 case AF_INET6: 447 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); 448 if (sa6->sin6_scope_id) 449 seq_printf(s, "%%%u", sa6->sin6_scope_id); 450 break; 451 default: 452 seq_puts(s, "(unknown)"); 453 } 454 if (server->rdma) 455 seq_puts(s, ",rdma"); 456 } 457 458 static void 459 cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 460 { 461 if (ses->sectype == Unspecified) { 462 if (ses->user_name == NULL) 463 seq_puts(s, ",sec=none"); 464 return; 465 } 466 467 seq_puts(s, ",sec="); 468 469 switch (ses->sectype) { 470 case NTLMv2: 471 seq_puts(s, "ntlmv2"); 472 break; 473 case Kerberos: 474 seq_puts(s, "krb5"); 475 break; 476 case RawNTLMSSP: 477 seq_puts(s, "ntlmssp"); 478 break; 479 default: 480 /* shouldn't ever happen */ 481 seq_puts(s, "unknown"); 482 break; 483 } 484 485 if (ses->sign) 486 seq_puts(s, "i"); 487 488 if (ses->sectype == Kerberos) 489 seq_printf(s, ",cruid=%u", 490 from_kuid_munged(&init_user_ns, ses->cred_uid)); 491 } 492 493 static void 494 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 495 { 496 seq_puts(s, ",cache="); 497 498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 499 seq_puts(s, "strict"); 500 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 501 seq_puts(s, "none"); 502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 503 seq_puts(s, "singleclient"); /* assume only one client access */ 504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 505 seq_puts(s, "ro"); /* read only caching assumed */ 506 else 507 seq_puts(s, "loose"); 508 } 509 510 /* 511 * cifs_show_devname() is used so we show the mount device name with correct 512 * format (e.g. forward slashes vs. back slashes) in /proc/mounts 513 */ 514 static int cifs_show_devname(struct seq_file *m, struct dentry *root) 515 { 516 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 517 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); 518 519 if (devname == NULL) 520 seq_puts(m, "none"); 521 else { 522 convert_delimiter(devname, '/'); 523 /* escape all spaces in share names */ 524 seq_escape(m, devname, " \t"); 525 kfree(devname); 526 } 527 return 0; 528 } 529 530 /* 531 * cifs_show_options() is for displaying mount options in /proc/mounts. 532 * Not all settable options are displayed but most of the important 533 * ones are. 534 */ 535 static int 536 cifs_show_options(struct seq_file *s, struct dentry *root) 537 { 538 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 539 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 540 struct sockaddr *srcaddr; 541 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 542 543 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); 544 cifs_show_security(s, tcon->ses); 545 cifs_show_cache_flavor(s, cifs_sb); 546 547 if (tcon->no_lease) 548 seq_puts(s, ",nolease"); 549 if (cifs_sb->ctx->multiuser) 550 seq_puts(s, ",multiuser"); 551 else if (tcon->ses->user_name) 552 seq_show_option(s, "username", tcon->ses->user_name); 553 554 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) 555 seq_show_option(s, "domain", tcon->ses->domainName); 556 557 if (srcaddr->sa_family != AF_UNSPEC) { 558 struct sockaddr_in *saddr4; 559 struct sockaddr_in6 *saddr6; 560 saddr4 = (struct sockaddr_in *)srcaddr; 561 saddr6 = (struct sockaddr_in6 *)srcaddr; 562 if (srcaddr->sa_family == AF_INET6) 563 seq_printf(s, ",srcaddr=%pI6c", 564 &saddr6->sin6_addr); 565 else if (srcaddr->sa_family == AF_INET) 566 seq_printf(s, ",srcaddr=%pI4", 567 &saddr4->sin_addr.s_addr); 568 else 569 seq_printf(s, ",srcaddr=BAD-AF:%i", 570 (int)(srcaddr->sa_family)); 571 } 572 573 seq_printf(s, ",uid=%u", 574 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 576 seq_puts(s, ",forceuid"); 577 else 578 seq_puts(s, ",noforceuid"); 579 580 seq_printf(s, ",gid=%u", 581 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 583 seq_puts(s, ",forcegid"); 584 else 585 seq_puts(s, ",noforcegid"); 586 587 cifs_show_address(s, tcon->ses->server); 588 589 if (!tcon->unix_ext) 590 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", 591 cifs_sb->ctx->file_mode, 592 cifs_sb->ctx->dir_mode); 593 if (cifs_sb->ctx->iocharset) 594 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); 595 if (tcon->seal) 596 seq_puts(s, ",seal"); 597 else if (tcon->ses->server->ignore_signature) 598 seq_puts(s, ",signloosely"); 599 if (tcon->nocase) 600 seq_puts(s, ",nocase"); 601 if (tcon->nodelete) 602 seq_puts(s, ",nodelete"); 603 if (cifs_sb->ctx->no_sparse) 604 seq_puts(s, ",nosparse"); 605 if (tcon->local_lease) 606 seq_puts(s, ",locallease"); 607 if (tcon->retry) 608 seq_puts(s, ",hard"); 609 else 610 seq_puts(s, ",soft"); 611 if (tcon->use_persistent) 612 seq_puts(s, ",persistenthandles"); 613 else if (tcon->use_resilient) 614 seq_puts(s, ",resilienthandles"); 615 if (tcon->posix_extensions) 616 seq_puts(s, ",posix"); 617 else if (tcon->unix_ext) 618 seq_puts(s, ",unix"); 619 else 620 seq_puts(s, ",nounix"); 621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 622 seq_puts(s, ",nodfs"); 623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 624 seq_puts(s, ",posixpaths"); 625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 626 seq_puts(s, ",setuids"); 627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 628 seq_puts(s, ",idsfromsid"); 629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 630 seq_puts(s, ",serverino"); 631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 632 seq_puts(s, ",rwpidforward"); 633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 634 seq_puts(s, ",forcemand"); 635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 636 seq_puts(s, ",nouser_xattr"); 637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 638 seq_puts(s, ",mapchars"); 639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 640 seq_puts(s, ",mapposix"); 641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 642 seq_puts(s, ",sfu"); 643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 644 seq_puts(s, ",nobrl"); 645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 646 seq_puts(s, ",nohandlecache"); 647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 648 seq_puts(s, ",modefromsid"); 649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 650 seq_puts(s, ",cifsacl"); 651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 652 seq_puts(s, ",dynperm"); 653 if (root->d_sb->s_flags & SB_POSIXACL) 654 seq_puts(s, ",acl"); 655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 656 seq_puts(s, ",mfsymlinks"); 657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 658 seq_puts(s, ",fsc"); 659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 660 seq_puts(s, ",nostrictsync"); 661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 662 seq_puts(s, ",noperm"); 663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 664 seq_printf(s, ",backupuid=%u", 665 from_kuid_munged(&init_user_ns, 666 cifs_sb->ctx->backupuid)); 667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 668 seq_printf(s, ",backupgid=%u", 669 from_kgid_munged(&init_user_ns, 670 cifs_sb->ctx->backupgid)); 671 672 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 673 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); 674 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); 675 if (cifs_sb->ctx->rasize) 676 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); 677 if (tcon->ses->server->min_offload) 678 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); 679 seq_printf(s, ",echo_interval=%lu", 680 tcon->ses->server->echo_interval / HZ); 681 682 /* Only display the following if overridden on mount */ 683 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) 684 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); 685 if (tcon->ses->server->tcp_nodelay) 686 seq_puts(s, ",tcpnodelay"); 687 if (tcon->ses->server->noautotune) 688 seq_puts(s, ",noautotune"); 689 if (tcon->ses->server->noblocksnd) 690 seq_puts(s, ",noblocksend"); 691 if (tcon->ses->server->nosharesock) 692 seq_puts(s, ",nosharesock"); 693 694 if (tcon->snapshot_time) 695 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 696 if (tcon->handle_timeout) 697 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); 698 699 /* 700 * Display file and directory attribute timeout in seconds. 701 * If file and directory attribute timeout the same then actimeo 702 * was likely specified on mount 703 */ 704 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) 705 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); 706 else { 707 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 708 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 709 } 710 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 711 712 if (tcon->ses->chan_max > 1) 713 seq_printf(s, ",multichannel,max_channels=%zu", 714 tcon->ses->chan_max); 715 716 if (tcon->use_witness) 717 seq_puts(s, ",witness"); 718 719 return 0; 720 } 721 722 static void cifs_umount_begin(struct super_block *sb) 723 { 724 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 725 struct cifs_tcon *tcon; 726 727 if (cifs_sb == NULL) 728 return; 729 730 tcon = cifs_sb_master_tcon(cifs_sb); 731 732 spin_lock(&cifs_tcp_ses_lock); 733 spin_lock(&tcon->tc_lock); 734 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 735 /* we have other mounts to same share or we have 736 already tried to umount this and woken up 737 all waiting network requests, nothing to do */ 738 spin_unlock(&tcon->tc_lock); 739 spin_unlock(&cifs_tcp_ses_lock); 740 return; 741 } 742 /* 743 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will 744 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent 745 */ 746 spin_unlock(&tcon->tc_lock); 747 spin_unlock(&cifs_tcp_ses_lock); 748 749 cifs_close_all_deferred_files(tcon); 750 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 751 /* cancel_notify_requests(tcon); */ 752 if (tcon->ses && tcon->ses->server) { 753 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); 754 wake_up_all(&tcon->ses->server->request_q); 755 wake_up_all(&tcon->ses->server->response_q); 756 msleep(1); /* yield */ 757 /* we have to kick the requests once more */ 758 wake_up_all(&tcon->ses->server->response_q); 759 msleep(1); 760 } 761 762 return; 763 } 764 765 static int cifs_freeze(struct super_block *sb) 766 { 767 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 768 struct cifs_tcon *tcon; 769 770 if (cifs_sb == NULL) 771 return 0; 772 773 tcon = cifs_sb_master_tcon(cifs_sb); 774 775 cifs_close_all_deferred_files(tcon); 776 return 0; 777 } 778 779 #ifdef CONFIG_CIFS_STATS2 780 static int cifs_show_stats(struct seq_file *s, struct dentry *root) 781 { 782 /* BB FIXME */ 783 return 0; 784 } 785 #endif 786 787 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 788 { 789 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode)); 790 return 0; 791 } 792 793 static int cifs_drop_inode(struct inode *inode) 794 { 795 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 796 797 /* no serverino => unconditional eviction */ 798 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 799 generic_drop_inode(inode); 800 } 801 802 static const struct super_operations cifs_super_ops = { 803 .statfs = cifs_statfs, 804 .alloc_inode = cifs_alloc_inode, 805 .write_inode = cifs_write_inode, 806 .free_inode = cifs_free_inode, 807 .drop_inode = cifs_drop_inode, 808 .evict_inode = cifs_evict_inode, 809 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ 810 .show_devname = cifs_show_devname, 811 /* .delete_inode = cifs_delete_inode, */ /* Do not need above 812 function unless later we add lazy close of inodes or unless the 813 kernel forgets to call us with the same number of releases (closes) 814 as opens */ 815 .show_options = cifs_show_options, 816 .umount_begin = cifs_umount_begin, 817 .freeze_fs = cifs_freeze, 818 #ifdef CONFIG_CIFS_STATS2 819 .show_stats = cifs_show_stats, 820 #endif 821 }; 822 823 /* 824 * Get root dentry from superblock according to prefix path mount option. 825 * Return dentry with refcount + 1 on success and NULL otherwise. 826 */ 827 static struct dentry * 828 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) 829 { 830 struct dentry *dentry; 831 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 832 char *full_path = NULL; 833 char *s, *p; 834 char sep; 835 836 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 837 return dget(sb->s_root); 838 839 full_path = cifs_build_path_to_root(ctx, cifs_sb, 840 cifs_sb_master_tcon(cifs_sb), 0); 841 if (full_path == NULL) 842 return ERR_PTR(-ENOMEM); 843 844 cifs_dbg(FYI, "Get root dentry for %s\n", full_path); 845 846 sep = CIFS_DIR_SEP(cifs_sb); 847 dentry = dget(sb->s_root); 848 s = full_path; 849 850 do { 851 struct inode *dir = d_inode(dentry); 852 struct dentry *child; 853 854 if (!S_ISDIR(dir->i_mode)) { 855 dput(dentry); 856 dentry = ERR_PTR(-ENOTDIR); 857 break; 858 } 859 860 /* skip separators */ 861 while (*s == sep) 862 s++; 863 if (!*s) 864 break; 865 p = s++; 866 /* next separator */ 867 while (*s && *s != sep) 868 s++; 869 870 child = lookup_positive_unlocked(p, dentry, s - p); 871 dput(dentry); 872 dentry = child; 873 } while (!IS_ERR(dentry)); 874 kfree(full_path); 875 return dentry; 876 } 877 878 static int cifs_set_super(struct super_block *sb, void *data) 879 { 880 struct cifs_mnt_data *mnt_data = data; 881 sb->s_fs_info = mnt_data->cifs_sb; 882 return set_anon_super(sb, NULL); 883 } 884 885 struct dentry * 886 cifs_smb3_do_mount(struct file_system_type *fs_type, 887 int flags, struct smb3_fs_context *old_ctx) 888 { 889 struct cifs_mnt_data mnt_data; 890 struct cifs_sb_info *cifs_sb; 891 struct super_block *sb; 892 struct dentry *root; 893 int rc; 894 895 if (cifsFYI) { 896 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, 897 old_ctx->source, flags); 898 } else { 899 cifs_info("Attempting to mount %s\n", old_ctx->source); 900 } 901 902 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); 903 if (!cifs_sb) 904 return ERR_PTR(-ENOMEM); 905 906 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); 907 if (!cifs_sb->ctx) { 908 root = ERR_PTR(-ENOMEM); 909 goto out; 910 } 911 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); 912 if (rc) { 913 root = ERR_PTR(rc); 914 goto out; 915 } 916 917 rc = cifs_setup_cifs_sb(cifs_sb); 918 if (rc) { 919 root = ERR_PTR(rc); 920 goto out; 921 } 922 923 rc = cifs_mount(cifs_sb, cifs_sb->ctx); 924 if (rc) { 925 if (!(flags & SB_SILENT)) 926 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 927 rc); 928 root = ERR_PTR(rc); 929 goto out; 930 } 931 932 mnt_data.ctx = cifs_sb->ctx; 933 mnt_data.cifs_sb = cifs_sb; 934 mnt_data.flags = flags; 935 936 /* BB should we make this contingent on mount parm? */ 937 flags |= SB_NODIRATIME | SB_NOATIME; 938 939 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); 940 if (IS_ERR(sb)) { 941 cifs_umount(cifs_sb); 942 return ERR_CAST(sb); 943 } 944 945 if (sb->s_root) { 946 cifs_dbg(FYI, "Use existing superblock\n"); 947 cifs_umount(cifs_sb); 948 cifs_sb = NULL; 949 } else { 950 rc = cifs_read_super(sb); 951 if (rc) { 952 root = ERR_PTR(rc); 953 goto out_super; 954 } 955 956 sb->s_flags |= SB_ACTIVE; 957 } 958 959 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); 960 if (IS_ERR(root)) 961 goto out_super; 962 963 if (cifs_sb) 964 cifs_sb->root = dget(root); 965 966 cifs_dbg(FYI, "dentry root is: %p\n", root); 967 return root; 968 969 out_super: 970 deactivate_locked_super(sb); 971 return root; 972 out: 973 kfree(cifs_sb->prepath); 974 smb3_cleanup_fs_context(cifs_sb->ctx); 975 kfree(cifs_sb); 976 return root; 977 } 978 979 980 static ssize_t 981 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 982 { 983 ssize_t rc; 984 struct inode *inode = file_inode(iocb->ki_filp); 985 986 if (iocb->ki_flags & IOCB_DIRECT) 987 return cifs_user_readv(iocb, iter); 988 989 rc = cifs_revalidate_mapping(inode); 990 if (rc) 991 return rc; 992 993 return generic_file_read_iter(iocb, iter); 994 } 995 996 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 997 { 998 struct inode *inode = file_inode(iocb->ki_filp); 999 struct cifsInodeInfo *cinode = CIFS_I(inode); 1000 ssize_t written; 1001 int rc; 1002 1003 if (iocb->ki_filp->f_flags & O_DIRECT) { 1004 written = cifs_user_writev(iocb, from); 1005 if (written > 0 && CIFS_CACHE_READ(cinode)) { 1006 cifs_zap_mapping(inode); 1007 cifs_dbg(FYI, 1008 "Set no oplock for inode=%p after a write operation\n", 1009 inode); 1010 cinode->oplock = 0; 1011 } 1012 return written; 1013 } 1014 1015 written = cifs_get_writer(cinode); 1016 if (written) 1017 return written; 1018 1019 written = generic_file_write_iter(iocb, from); 1020 1021 if (CIFS_CACHE_WRITE(CIFS_I(inode))) 1022 goto out; 1023 1024 rc = filemap_fdatawrite(inode->i_mapping); 1025 if (rc) 1026 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 1027 rc, inode); 1028 1029 out: 1030 cifs_put_writer(cinode); 1031 return written; 1032 } 1033 1034 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) 1035 { 1036 struct cifsFileInfo *cfile = file->private_data; 1037 struct cifs_tcon *tcon; 1038 1039 /* 1040 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 1041 * the cached file length 1042 */ 1043 if (whence != SEEK_SET && whence != SEEK_CUR) { 1044 int rc; 1045 struct inode *inode = file_inode(file); 1046 1047 /* 1048 * We need to be sure that all dirty pages are written and the 1049 * server has the newest file length. 1050 */ 1051 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && 1052 inode->i_mapping->nrpages != 0) { 1053 rc = filemap_fdatawait(inode->i_mapping); 1054 if (rc) { 1055 mapping_set_error(inode->i_mapping, rc); 1056 return rc; 1057 } 1058 } 1059 /* 1060 * Some applications poll for the file length in this strange 1061 * way so we must seek to end on non-oplocked files by 1062 * setting the revalidate time to zero. 1063 */ 1064 CIFS_I(inode)->time = 0; 1065 1066 rc = cifs_revalidate_file_attr(file); 1067 if (rc < 0) 1068 return (loff_t)rc; 1069 } 1070 if (cfile && cfile->tlink) { 1071 tcon = tlink_tcon(cfile->tlink); 1072 if (tcon->ses->server->ops->llseek) 1073 return tcon->ses->server->ops->llseek(file, tcon, 1074 offset, whence); 1075 } 1076 return generic_file_llseek(file, offset, whence); 1077 } 1078 1079 static int 1080 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv) 1081 { 1082 /* 1083 * Note that this is called by vfs setlease with i_lock held to 1084 * protect *lease from going away. 1085 */ 1086 struct inode *inode = file_inode(file); 1087 struct cifsFileInfo *cfile = file->private_data; 1088 1089 if (!(S_ISREG(inode->i_mode))) 1090 return -EINVAL; 1091 1092 /* Check if file is oplocked if this is request for new lease */ 1093 if (arg == F_UNLCK || 1094 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || 1095 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) 1096 return generic_setlease(file, arg, lease, priv); 1097 else if (tlink_tcon(cfile->tlink)->local_lease && 1098 !CIFS_CACHE_READ(CIFS_I(inode))) 1099 /* 1100 * If the server claims to support oplock on this file, then we 1101 * still need to check oplock even if the local_lease mount 1102 * option is set, but there are servers which do not support 1103 * oplock for which this mount option may be useful if the user 1104 * knows that the file won't be changed on the server by anyone 1105 * else. 1106 */ 1107 return generic_setlease(file, arg, lease, priv); 1108 else 1109 return -EAGAIN; 1110 } 1111 1112 struct file_system_type cifs_fs_type = { 1113 .owner = THIS_MODULE, 1114 .name = "cifs", 1115 .init_fs_context = smb3_init_fs_context, 1116 .parameters = smb3_fs_parameters, 1117 .kill_sb = cifs_kill_sb, 1118 .fs_flags = FS_RENAME_DOES_D_MOVE, 1119 }; 1120 MODULE_ALIAS_FS("cifs"); 1121 1122 struct file_system_type smb3_fs_type = { 1123 .owner = THIS_MODULE, 1124 .name = "smb3", 1125 .init_fs_context = smb3_init_fs_context, 1126 .parameters = smb3_fs_parameters, 1127 .kill_sb = cifs_kill_sb, 1128 .fs_flags = FS_RENAME_DOES_D_MOVE, 1129 }; 1130 MODULE_ALIAS_FS("smb3"); 1131 MODULE_ALIAS("smb3"); 1132 1133 const struct inode_operations cifs_dir_inode_ops = { 1134 .create = cifs_create, 1135 .atomic_open = cifs_atomic_open, 1136 .lookup = cifs_lookup, 1137 .getattr = cifs_getattr, 1138 .unlink = cifs_unlink, 1139 .link = cifs_hardlink, 1140 .mkdir = cifs_mkdir, 1141 .rmdir = cifs_rmdir, 1142 .rename = cifs_rename2, 1143 .permission = cifs_permission, 1144 .setattr = cifs_setattr, 1145 .symlink = cifs_symlink, 1146 .mknod = cifs_mknod, 1147 .listxattr = cifs_listxattr, 1148 .get_acl = cifs_get_acl, 1149 .set_acl = cifs_set_acl, 1150 }; 1151 1152 const struct inode_operations cifs_file_inode_ops = { 1153 .setattr = cifs_setattr, 1154 .getattr = cifs_getattr, 1155 .permission = cifs_permission, 1156 .listxattr = cifs_listxattr, 1157 .fiemap = cifs_fiemap, 1158 .get_acl = cifs_get_acl, 1159 .set_acl = cifs_set_acl, 1160 }; 1161 1162 const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1163 struct delayed_call *done) 1164 { 1165 char *target_path; 1166 1167 target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1168 if (!target_path) 1169 return ERR_PTR(-ENOMEM); 1170 1171 spin_lock(&inode->i_lock); 1172 if (likely(CIFS_I(inode)->symlink_target)) { 1173 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1174 } else { 1175 kfree(target_path); 1176 target_path = ERR_PTR(-EOPNOTSUPP); 1177 } 1178 spin_unlock(&inode->i_lock); 1179 1180 if (!IS_ERR(target_path)) 1181 set_delayed_call(done, kfree_link, target_path); 1182 1183 return target_path; 1184 } 1185 1186 const struct inode_operations cifs_symlink_inode_ops = { 1187 .get_link = cifs_get_link, 1188 .permission = cifs_permission, 1189 .listxattr = cifs_listxattr, 1190 }; 1191 1192 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, 1193 struct file *dst_file, loff_t destoff, loff_t len, 1194 unsigned int remap_flags) 1195 { 1196 struct inode *src_inode = file_inode(src_file); 1197 struct inode *target_inode = file_inode(dst_file); 1198 struct cifsFileInfo *smb_file_src = src_file->private_data; 1199 struct cifsFileInfo *smb_file_target; 1200 struct cifs_tcon *target_tcon; 1201 unsigned int xid; 1202 int rc; 1203 1204 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 1205 return -EINVAL; 1206 1207 cifs_dbg(FYI, "clone range\n"); 1208 1209 xid = get_xid(); 1210 1211 if (!src_file->private_data || !dst_file->private_data) { 1212 rc = -EBADF; 1213 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1214 goto out; 1215 } 1216 1217 smb_file_target = dst_file->private_data; 1218 target_tcon = tlink_tcon(smb_file_target->tlink); 1219 1220 /* 1221 * Note: cifs case is easier than btrfs since server responsible for 1222 * checks for proper open modes and file type and if it wants 1223 * server could even support copy of range where source = target 1224 */ 1225 lock_two_nondirectories(target_inode, src_inode); 1226 1227 if (len == 0) 1228 len = src_inode->i_size - off; 1229 1230 cifs_dbg(FYI, "about to flush pages\n"); 1231 /* should we flush first and last page first */ 1232 truncate_inode_pages_range(&target_inode->i_data, destoff, 1233 PAGE_ALIGN(destoff + len)-1); 1234 1235 if (target_tcon->ses->server->ops->duplicate_extents) 1236 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 1237 smb_file_src, smb_file_target, off, len, destoff); 1238 else 1239 rc = -EOPNOTSUPP; 1240 1241 /* force revalidate of size and timestamps of target file now 1242 that target is updated on the server */ 1243 CIFS_I(target_inode)->time = 0; 1244 /* although unlocking in the reverse order from locking is not 1245 strictly necessary here it is a little cleaner to be consistent */ 1246 unlock_two_nondirectories(src_inode, target_inode); 1247 out: 1248 free_xid(xid); 1249 return rc < 0 ? rc : len; 1250 } 1251 1252 ssize_t cifs_file_copychunk_range(unsigned int xid, 1253 struct file *src_file, loff_t off, 1254 struct file *dst_file, loff_t destoff, 1255 size_t len, unsigned int flags) 1256 { 1257 struct inode *src_inode = file_inode(src_file); 1258 struct inode *target_inode = file_inode(dst_file); 1259 struct cifsFileInfo *smb_file_src; 1260 struct cifsFileInfo *smb_file_target; 1261 struct cifs_tcon *src_tcon; 1262 struct cifs_tcon *target_tcon; 1263 ssize_t rc; 1264 1265 cifs_dbg(FYI, "copychunk range\n"); 1266 1267 if (!src_file->private_data || !dst_file->private_data) { 1268 rc = -EBADF; 1269 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1270 goto out; 1271 } 1272 1273 rc = -EXDEV; 1274 smb_file_target = dst_file->private_data; 1275 smb_file_src = src_file->private_data; 1276 src_tcon = tlink_tcon(smb_file_src->tlink); 1277 target_tcon = tlink_tcon(smb_file_target->tlink); 1278 1279 if (src_tcon->ses != target_tcon->ses) { 1280 cifs_dbg(VFS, "source and target of copy not on same server\n"); 1281 goto out; 1282 } 1283 1284 rc = -EOPNOTSUPP; 1285 if (!target_tcon->ses->server->ops->copychunk_range) 1286 goto out; 1287 1288 /* 1289 * Note: cifs case is easier than btrfs since server responsible for 1290 * checks for proper open modes and file type and if it wants 1291 * server could even support copy of range where source = target 1292 */ 1293 lock_two_nondirectories(target_inode, src_inode); 1294 1295 cifs_dbg(FYI, "about to flush pages\n"); 1296 1297 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1298 off + len - 1); 1299 if (rc) 1300 goto unlock; 1301 1302 /* should we flush first and last page first */ 1303 truncate_inode_pages(&target_inode->i_data, 0); 1304 1305 rc = file_modified(dst_file); 1306 if (!rc) 1307 rc = target_tcon->ses->server->ops->copychunk_range(xid, 1308 smb_file_src, smb_file_target, off, len, destoff); 1309 1310 file_accessed(src_file); 1311 1312 /* force revalidate of size and timestamps of target file now 1313 * that target is updated on the server 1314 */ 1315 CIFS_I(target_inode)->time = 0; 1316 1317 unlock: 1318 /* although unlocking in the reverse order from locking is not 1319 * strictly necessary here it is a little cleaner to be consistent 1320 */ 1321 unlock_two_nondirectories(src_inode, target_inode); 1322 1323 out: 1324 return rc; 1325 } 1326 1327 /* 1328 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() 1329 * is a dummy operation. 1330 */ 1331 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1332 { 1333 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", 1334 file, datasync); 1335 1336 return 0; 1337 } 1338 1339 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, 1340 struct file *dst_file, loff_t destoff, 1341 size_t len, unsigned int flags) 1342 { 1343 unsigned int xid = get_xid(); 1344 ssize_t rc; 1345 struct cifsFileInfo *cfile = dst_file->private_data; 1346 1347 if (cfile->swapfile) { 1348 rc = -EOPNOTSUPP; 1349 free_xid(xid); 1350 return rc; 1351 } 1352 1353 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, 1354 len, flags); 1355 free_xid(xid); 1356 1357 if (rc == -EOPNOTSUPP || rc == -EXDEV) 1358 rc = generic_copy_file_range(src_file, off, dst_file, 1359 destoff, len, flags); 1360 return rc; 1361 } 1362 1363 const struct file_operations cifs_file_ops = { 1364 .read_iter = cifs_loose_read_iter, 1365 .write_iter = cifs_file_write_iter, 1366 .open = cifs_open, 1367 .release = cifs_close, 1368 .lock = cifs_lock, 1369 .flock = cifs_flock, 1370 .fsync = cifs_fsync, 1371 .flush = cifs_flush, 1372 .mmap = cifs_file_mmap, 1373 .splice_read = filemap_splice_read, 1374 .splice_write = iter_file_splice_write, 1375 .llseek = cifs_llseek, 1376 .unlocked_ioctl = cifs_ioctl, 1377 .copy_file_range = cifs_copy_file_range, 1378 .remap_file_range = cifs_remap_file_range, 1379 .setlease = cifs_setlease, 1380 .fallocate = cifs_fallocate, 1381 }; 1382 1383 const struct file_operations cifs_file_strict_ops = { 1384 .read_iter = cifs_strict_readv, 1385 .write_iter = cifs_strict_writev, 1386 .open = cifs_open, 1387 .release = cifs_close, 1388 .lock = cifs_lock, 1389 .flock = cifs_flock, 1390 .fsync = cifs_strict_fsync, 1391 .flush = cifs_flush, 1392 .mmap = cifs_file_strict_mmap, 1393 .splice_read = filemap_splice_read, 1394 .splice_write = iter_file_splice_write, 1395 .llseek = cifs_llseek, 1396 .unlocked_ioctl = cifs_ioctl, 1397 .copy_file_range = cifs_copy_file_range, 1398 .remap_file_range = cifs_remap_file_range, 1399 .setlease = cifs_setlease, 1400 .fallocate = cifs_fallocate, 1401 }; 1402 1403 const struct file_operations cifs_file_direct_ops = { 1404 .read_iter = cifs_direct_readv, 1405 .write_iter = cifs_direct_writev, 1406 .open = cifs_open, 1407 .release = cifs_close, 1408 .lock = cifs_lock, 1409 .flock = cifs_flock, 1410 .fsync = cifs_fsync, 1411 .flush = cifs_flush, 1412 .mmap = cifs_file_mmap, 1413 .splice_read = copy_splice_read, 1414 .splice_write = iter_file_splice_write, 1415 .unlocked_ioctl = cifs_ioctl, 1416 .copy_file_range = cifs_copy_file_range, 1417 .remap_file_range = cifs_remap_file_range, 1418 .llseek = cifs_llseek, 1419 .setlease = cifs_setlease, 1420 .fallocate = cifs_fallocate, 1421 }; 1422 1423 const struct file_operations cifs_file_nobrl_ops = { 1424 .read_iter = cifs_loose_read_iter, 1425 .write_iter = cifs_file_write_iter, 1426 .open = cifs_open, 1427 .release = cifs_close, 1428 .fsync = cifs_fsync, 1429 .flush = cifs_flush, 1430 .mmap = cifs_file_mmap, 1431 .splice_read = filemap_splice_read, 1432 .splice_write = iter_file_splice_write, 1433 .llseek = cifs_llseek, 1434 .unlocked_ioctl = cifs_ioctl, 1435 .copy_file_range = cifs_copy_file_range, 1436 .remap_file_range = cifs_remap_file_range, 1437 .setlease = cifs_setlease, 1438 .fallocate = cifs_fallocate, 1439 }; 1440 1441 const struct file_operations cifs_file_strict_nobrl_ops = { 1442 .read_iter = cifs_strict_readv, 1443 .write_iter = cifs_strict_writev, 1444 .open = cifs_open, 1445 .release = cifs_close, 1446 .fsync = cifs_strict_fsync, 1447 .flush = cifs_flush, 1448 .mmap = cifs_file_strict_mmap, 1449 .splice_read = filemap_splice_read, 1450 .splice_write = iter_file_splice_write, 1451 .llseek = cifs_llseek, 1452 .unlocked_ioctl = cifs_ioctl, 1453 .copy_file_range = cifs_copy_file_range, 1454 .remap_file_range = cifs_remap_file_range, 1455 .setlease = cifs_setlease, 1456 .fallocate = cifs_fallocate, 1457 }; 1458 1459 const struct file_operations cifs_file_direct_nobrl_ops = { 1460 .read_iter = cifs_direct_readv, 1461 .write_iter = cifs_direct_writev, 1462 .open = cifs_open, 1463 .release = cifs_close, 1464 .fsync = cifs_fsync, 1465 .flush = cifs_flush, 1466 .mmap = cifs_file_mmap, 1467 .splice_read = copy_splice_read, 1468 .splice_write = iter_file_splice_write, 1469 .unlocked_ioctl = cifs_ioctl, 1470 .copy_file_range = cifs_copy_file_range, 1471 .remap_file_range = cifs_remap_file_range, 1472 .llseek = cifs_llseek, 1473 .setlease = cifs_setlease, 1474 .fallocate = cifs_fallocate, 1475 }; 1476 1477 const struct file_operations cifs_dir_ops = { 1478 .iterate_shared = cifs_readdir, 1479 .release = cifs_closedir, 1480 .read = generic_read_dir, 1481 .unlocked_ioctl = cifs_ioctl, 1482 .copy_file_range = cifs_copy_file_range, 1483 .remap_file_range = cifs_remap_file_range, 1484 .llseek = generic_file_llseek, 1485 .fsync = cifs_dir_fsync, 1486 }; 1487 1488 static void 1489 cifs_init_once(void *inode) 1490 { 1491 struct cifsInodeInfo *cifsi = inode; 1492 1493 inode_init_once(&cifsi->netfs.inode); 1494 init_rwsem(&cifsi->lock_sem); 1495 } 1496 1497 static int __init 1498 cifs_init_inodecache(void) 1499 { 1500 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", 1501 sizeof(struct cifsInodeInfo), 1502 0, (SLAB_RECLAIM_ACCOUNT| 1503 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1504 cifs_init_once); 1505 if (cifs_inode_cachep == NULL) 1506 return -ENOMEM; 1507 1508 return 0; 1509 } 1510 1511 static void 1512 cifs_destroy_inodecache(void) 1513 { 1514 /* 1515 * Make sure all delayed rcu free inodes are flushed before we 1516 * destroy cache. 1517 */ 1518 rcu_barrier(); 1519 kmem_cache_destroy(cifs_inode_cachep); 1520 } 1521 1522 static int 1523 cifs_init_request_bufs(void) 1524 { 1525 /* 1526 * SMB2 maximum header size is bigger than CIFS one - no problems to 1527 * allocate some more bytes for CIFS. 1528 */ 1529 size_t max_hdr_size = MAX_SMB2_HDR_SIZE; 1530 1531 if (CIFSMaxBufSize < 8192) { 1532 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum 1533 Unicode path name has to fit in any SMB/CIFS path based frames */ 1534 CIFSMaxBufSize = 8192; 1535 } else if (CIFSMaxBufSize > 1024*127) { 1536 CIFSMaxBufSize = 1024 * 127; 1537 } else { 1538 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ 1539 } 1540 /* 1541 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1542 CIFSMaxBufSize, CIFSMaxBufSize); 1543 */ 1544 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", 1545 CIFSMaxBufSize + max_hdr_size, 0, 1546 SLAB_HWCACHE_ALIGN, 0, 1547 CIFSMaxBufSize + max_hdr_size, 1548 NULL); 1549 if (cifs_req_cachep == NULL) 1550 return -ENOMEM; 1551 1552 if (cifs_min_rcv < 1) 1553 cifs_min_rcv = 1; 1554 else if (cifs_min_rcv > 64) { 1555 cifs_min_rcv = 64; 1556 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); 1557 } 1558 1559 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 1560 cifs_req_cachep); 1561 1562 if (cifs_req_poolp == NULL) { 1563 kmem_cache_destroy(cifs_req_cachep); 1564 return -ENOMEM; 1565 } 1566 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 1567 almost all handle based requests (but not write response, nor is it 1568 sufficient for path based requests). A smaller size would have 1569 been more efficient (compacting multiple slab items on one 4k page) 1570 for the case in which debug was on, but this larger size allows 1571 more SMBs to use small buffer alloc and is still much more 1572 efficient to alloc 1 per page off the slab compared to 17K (5page) 1573 alloc of large cifs buffers even when page debugging is on */ 1574 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", 1575 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1576 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); 1577 if (cifs_sm_req_cachep == NULL) { 1578 mempool_destroy(cifs_req_poolp); 1579 kmem_cache_destroy(cifs_req_cachep); 1580 return -ENOMEM; 1581 } 1582 1583 if (cifs_min_small < 2) 1584 cifs_min_small = 2; 1585 else if (cifs_min_small > 256) { 1586 cifs_min_small = 256; 1587 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); 1588 } 1589 1590 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, 1591 cifs_sm_req_cachep); 1592 1593 if (cifs_sm_req_poolp == NULL) { 1594 mempool_destroy(cifs_req_poolp); 1595 kmem_cache_destroy(cifs_req_cachep); 1596 kmem_cache_destroy(cifs_sm_req_cachep); 1597 return -ENOMEM; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static void 1604 cifs_destroy_request_bufs(void) 1605 { 1606 mempool_destroy(cifs_req_poolp); 1607 kmem_cache_destroy(cifs_req_cachep); 1608 mempool_destroy(cifs_sm_req_poolp); 1609 kmem_cache_destroy(cifs_sm_req_cachep); 1610 } 1611 1612 static int init_mids(void) 1613 { 1614 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1615 sizeof(struct mid_q_entry), 0, 1616 SLAB_HWCACHE_ALIGN, NULL); 1617 if (cifs_mid_cachep == NULL) 1618 return -ENOMEM; 1619 1620 /* 3 is a reasonable minimum number of simultaneous operations */ 1621 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); 1622 if (cifs_mid_poolp == NULL) { 1623 kmem_cache_destroy(cifs_mid_cachep); 1624 return -ENOMEM; 1625 } 1626 1627 return 0; 1628 } 1629 1630 static void destroy_mids(void) 1631 { 1632 mempool_destroy(cifs_mid_poolp); 1633 kmem_cache_destroy(cifs_mid_cachep); 1634 } 1635 1636 static int __init 1637 init_cifs(void) 1638 { 1639 int rc = 0; 1640 cifs_proc_init(); 1641 INIT_LIST_HEAD(&cifs_tcp_ses_list); 1642 /* 1643 * Initialize Global counters 1644 */ 1645 atomic_set(&sesInfoAllocCount, 0); 1646 atomic_set(&tconInfoAllocCount, 0); 1647 atomic_set(&tcpSesNextId, 0); 1648 atomic_set(&tcpSesAllocCount, 0); 1649 atomic_set(&tcpSesReconnectCount, 0); 1650 atomic_set(&tconInfoReconnectCount, 0); 1651 1652 atomic_set(&buf_alloc_count, 0); 1653 atomic_set(&small_buf_alloc_count, 0); 1654 #ifdef CONFIG_CIFS_STATS2 1655 atomic_set(&total_buf_alloc_count, 0); 1656 atomic_set(&total_small_buf_alloc_count, 0); 1657 if (slow_rsp_threshold < 1) 1658 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1659 else if (slow_rsp_threshold > 32767) 1660 cifs_dbg(VFS, 1661 "slow response threshold set higher than recommended (0 to 32767)\n"); 1662 #endif /* CONFIG_CIFS_STATS2 */ 1663 1664 atomic_set(&mid_count, 0); 1665 GlobalCurrentXid = 0; 1666 GlobalTotalActiveXid = 0; 1667 GlobalMaxActiveXid = 0; 1668 spin_lock_init(&cifs_tcp_ses_lock); 1669 spin_lock_init(&GlobalMid_Lock); 1670 1671 cifs_lock_secret = get_random_u32(); 1672 1673 if (cifs_max_pending < 2) { 1674 cifs_max_pending = 2; 1675 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1676 } else if (cifs_max_pending > CIFS_MAX_REQ) { 1677 cifs_max_pending = CIFS_MAX_REQ; 1678 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", 1679 CIFS_MAX_REQ); 1680 } 1681 1682 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1683 if (!cifsiod_wq) { 1684 rc = -ENOMEM; 1685 goto out_clean_proc; 1686 } 1687 1688 /* 1689 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) 1690 * so that we don't launch too many worker threads but 1691 * Documentation/core-api/workqueue.rst recommends setting it to 0 1692 */ 1693 1694 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ 1695 decrypt_wq = alloc_workqueue("smb3decryptd", 1696 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1697 if (!decrypt_wq) { 1698 rc = -ENOMEM; 1699 goto out_destroy_cifsiod_wq; 1700 } 1701 1702 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", 1703 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1704 if (!fileinfo_put_wq) { 1705 rc = -ENOMEM; 1706 goto out_destroy_decrypt_wq; 1707 } 1708 1709 cifsoplockd_wq = alloc_workqueue("cifsoplockd", 1710 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1711 if (!cifsoplockd_wq) { 1712 rc = -ENOMEM; 1713 goto out_destroy_fileinfo_put_wq; 1714 } 1715 1716 deferredclose_wq = alloc_workqueue("deferredclose", 1717 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1718 if (!deferredclose_wq) { 1719 rc = -ENOMEM; 1720 goto out_destroy_cifsoplockd_wq; 1721 } 1722 1723 rc = cifs_init_inodecache(); 1724 if (rc) 1725 goto out_destroy_deferredclose_wq; 1726 1727 rc = init_mids(); 1728 if (rc) 1729 goto out_destroy_inodecache; 1730 1731 rc = cifs_init_request_bufs(); 1732 if (rc) 1733 goto out_destroy_mids; 1734 1735 #ifdef CONFIG_CIFS_DFS_UPCALL 1736 rc = dfs_cache_init(); 1737 if (rc) 1738 goto out_destroy_request_bufs; 1739 #endif /* CONFIG_CIFS_DFS_UPCALL */ 1740 #ifdef CONFIG_CIFS_UPCALL 1741 rc = init_cifs_spnego(); 1742 if (rc) 1743 goto out_destroy_dfs_cache; 1744 #endif /* CONFIG_CIFS_UPCALL */ 1745 #ifdef CONFIG_CIFS_SWN_UPCALL 1746 rc = cifs_genl_init(); 1747 if (rc) 1748 goto out_register_key_type; 1749 #endif /* CONFIG_CIFS_SWN_UPCALL */ 1750 1751 rc = init_cifs_idmap(); 1752 if (rc) 1753 goto out_cifs_swn_init; 1754 1755 rc = register_filesystem(&cifs_fs_type); 1756 if (rc) 1757 goto out_init_cifs_idmap; 1758 1759 rc = register_filesystem(&smb3_fs_type); 1760 if (rc) { 1761 unregister_filesystem(&cifs_fs_type); 1762 goto out_init_cifs_idmap; 1763 } 1764 1765 return 0; 1766 1767 out_init_cifs_idmap: 1768 exit_cifs_idmap(); 1769 out_cifs_swn_init: 1770 #ifdef CONFIG_CIFS_SWN_UPCALL 1771 cifs_genl_exit(); 1772 out_register_key_type: 1773 #endif 1774 #ifdef CONFIG_CIFS_UPCALL 1775 exit_cifs_spnego(); 1776 out_destroy_dfs_cache: 1777 #endif 1778 #ifdef CONFIG_CIFS_DFS_UPCALL 1779 dfs_cache_destroy(); 1780 out_destroy_request_bufs: 1781 #endif 1782 cifs_destroy_request_bufs(); 1783 out_destroy_mids: 1784 destroy_mids(); 1785 out_destroy_inodecache: 1786 cifs_destroy_inodecache(); 1787 out_destroy_deferredclose_wq: 1788 destroy_workqueue(deferredclose_wq); 1789 out_destroy_cifsoplockd_wq: 1790 destroy_workqueue(cifsoplockd_wq); 1791 out_destroy_fileinfo_put_wq: 1792 destroy_workqueue(fileinfo_put_wq); 1793 out_destroy_decrypt_wq: 1794 destroy_workqueue(decrypt_wq); 1795 out_destroy_cifsiod_wq: 1796 destroy_workqueue(cifsiod_wq); 1797 out_clean_proc: 1798 cifs_proc_clean(); 1799 return rc; 1800 } 1801 1802 static void __exit 1803 exit_cifs(void) 1804 { 1805 cifs_dbg(NOISY, "exit_smb3\n"); 1806 unregister_filesystem(&cifs_fs_type); 1807 unregister_filesystem(&smb3_fs_type); 1808 cifs_dfs_release_automount_timer(); 1809 exit_cifs_idmap(); 1810 #ifdef CONFIG_CIFS_SWN_UPCALL 1811 cifs_genl_exit(); 1812 #endif 1813 #ifdef CONFIG_CIFS_UPCALL 1814 exit_cifs_spnego(); 1815 #endif 1816 #ifdef CONFIG_CIFS_DFS_UPCALL 1817 dfs_cache_destroy(); 1818 #endif 1819 cifs_destroy_request_bufs(); 1820 destroy_mids(); 1821 cifs_destroy_inodecache(); 1822 destroy_workqueue(deferredclose_wq); 1823 destroy_workqueue(cifsoplockd_wq); 1824 destroy_workqueue(decrypt_wq); 1825 destroy_workqueue(fileinfo_put_wq); 1826 destroy_workqueue(cifsiod_wq); 1827 cifs_proc_clean(); 1828 } 1829 1830 MODULE_AUTHOR("Steve French"); 1831 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ 1832 MODULE_DESCRIPTION 1833 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 1834 "also older servers complying with the SNIA CIFS Specification)"); 1835 MODULE_VERSION(CIFS_VERSION); 1836 MODULE_SOFTDEP("ecb"); 1837 MODULE_SOFTDEP("hmac"); 1838 MODULE_SOFTDEP("md5"); 1839 MODULE_SOFTDEP("nls"); 1840 MODULE_SOFTDEP("aes"); 1841 MODULE_SOFTDEP("cmac"); 1842 MODULE_SOFTDEP("sha256"); 1843 MODULE_SOFTDEP("sha512"); 1844 MODULE_SOFTDEP("aead2"); 1845 MODULE_SOFTDEP("ccm"); 1846 MODULE_SOFTDEP("gcm"); 1847 module_init(init_cifs) 1848 module_exit(exit_cifs) 1849