1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2011 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 */ 8 #include <linux/fs.h> 9 #include <linux/net.h> 10 #include <linux/string.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/signal.h> 13 #include <linux/list.h> 14 #include <linux/wait.h> 15 #include <linux/slab.h> 16 #include <linux/pagemap.h> 17 #include <linux/ctype.h> 18 #include <linux/utsname.h> 19 #include <linux/mempool.h> 20 #include <linux/delay.h> 21 #include <linux/completion.h> 22 #include <linux/kthread.h> 23 #include <linux/pagevec.h> 24 #include <linux/freezer.h> 25 #include <linux/namei.h> 26 #include <linux/uuid.h> 27 #include <linux/uaccess.h> 28 #include <asm/processor.h> 29 #include <linux/inet.h> 30 #include <linux/module.h> 31 #include <keys/user-type.h> 32 #include <net/ipv6.h> 33 #include <linux/parser.h> 34 #include <linux/bvec.h> 35 #include "cifspdu.h" 36 #include "cifsglob.h" 37 #include "cifsproto.h" 38 #include "cifs_unicode.h" 39 #include "cifs_debug.h" 40 #include "cifs_fs_sb.h" 41 #include "ntlmssp.h" 42 #include "nterr.h" 43 #include "rfc1002pdu.h" 44 #include "fscache.h" 45 #include "smb2proto.h" 46 #include "smbdirect.h" 47 #include "dns_resolve.h" 48 #ifdef CONFIG_CIFS_DFS_UPCALL 49 #include "dfs.h" 50 #include "dfs_cache.h" 51 #endif 52 #include "fs_context.h" 53 #include "cifs_swn.h" 54 55 extern mempool_t *cifs_req_poolp; 56 extern bool disable_legacy_dialects; 57 58 /* FIXME: should these be tunable? */ 59 #define TLINK_ERROR_EXPIRE (1 * HZ) 60 #define TLINK_IDLE_EXPIRE (600 * HZ) 61 62 /* Drop the connection to not overload the server */ 63 #define MAX_STATUS_IO_TIMEOUT 5 64 65 static int ip_connect(struct TCP_Server_Info *server); 66 static int generic_ip_connect(struct TCP_Server_Info *server); 67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 68 static void cifs_prune_tlinks(struct work_struct *work); 69 70 /* 71 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may 72 * get their ip addresses changed at some point. 73 * 74 * This should be called with server->srv_mutex held. 75 */ 76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) 77 { 78 int rc; 79 int len; 80 char *unc; 81 struct sockaddr_storage ss; 82 83 if (!server->hostname) 84 return -EINVAL; 85 86 /* if server hostname isn't populated, there's nothing to do here */ 87 if (server->hostname[0] == '\0') 88 return 0; 89 90 len = strlen(server->hostname) + 3; 91 92 unc = kmalloc(len, GFP_KERNEL); 93 if (!unc) { 94 cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); 95 return -ENOMEM; 96 } 97 scnprintf(unc, len, "\\\\%s", server->hostname); 98 99 spin_lock(&server->srv_lock); 100 ss = server->dstaddr; 101 spin_unlock(&server->srv_lock); 102 103 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); 104 kfree(unc); 105 106 if (rc < 0) { 107 cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", 108 __func__, server->hostname, rc); 109 } else { 110 spin_lock(&server->srv_lock); 111 memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr)); 112 spin_unlock(&server->srv_lock); 113 rc = 0; 114 } 115 116 return rc; 117 } 118 119 static void smb2_query_server_interfaces(struct work_struct *work) 120 { 121 int rc; 122 struct cifs_tcon *tcon = container_of(work, 123 struct cifs_tcon, 124 query_interfaces.work); 125 126 /* 127 * query server network interfaces, in case they change 128 */ 129 rc = SMB3_request_interfaces(0, tcon, false); 130 if (rc) { 131 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 132 __func__, rc); 133 } 134 135 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 136 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 137 } 138 139 /* 140 * Update the tcpStatus for the server. 141 * This is used to signal the cifsd thread to call cifs_reconnect 142 * ONLY cifsd thread should call cifs_reconnect. For any other 143 * thread, use this function 144 * 145 * @server: the tcp ses for which reconnect is needed 146 * @all_channels: if this needs to be done for all channels 147 */ 148 void 149 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, 150 bool all_channels) 151 { 152 struct TCP_Server_Info *pserver; 153 struct cifs_ses *ses; 154 int i; 155 156 /* If server is a channel, select the primary channel */ 157 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 158 159 /* if we need to signal just this channel */ 160 if (!all_channels) { 161 spin_lock(&server->srv_lock); 162 if (server->tcpStatus != CifsExiting) 163 server->tcpStatus = CifsNeedReconnect; 164 spin_unlock(&server->srv_lock); 165 return; 166 } 167 168 spin_lock(&cifs_tcp_ses_lock); 169 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 170 spin_lock(&ses->chan_lock); 171 for (i = 0; i < ses->chan_count; i++) { 172 spin_lock(&ses->chans[i].server->srv_lock); 173 ses->chans[i].server->tcpStatus = CifsNeedReconnect; 174 spin_unlock(&ses->chans[i].server->srv_lock); 175 } 176 spin_unlock(&ses->chan_lock); 177 } 178 spin_unlock(&cifs_tcp_ses_lock); 179 } 180 181 /* 182 * Mark all sessions and tcons for reconnect. 183 * IMPORTANT: make sure that this gets called only from 184 * cifsd thread. For any other thread, use 185 * cifs_signal_cifsd_for_reconnect 186 * 187 * @server: the tcp ses for which reconnect is needed 188 * @server needs to be previously set to CifsNeedReconnect. 189 * @mark_smb_session: whether even sessions need to be marked 190 */ 191 void 192 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, 193 bool mark_smb_session) 194 { 195 struct TCP_Server_Info *pserver; 196 struct cifs_ses *ses, *nses; 197 struct cifs_tcon *tcon; 198 199 /* 200 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they 201 * are not used until reconnected. 202 */ 203 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__); 204 205 /* If server is a channel, select the primary channel */ 206 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 207 208 209 spin_lock(&cifs_tcp_ses_lock); 210 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { 211 /* check if iface is still active */ 212 spin_lock(&ses->chan_lock); 213 if (!cifs_chan_is_iface_active(ses, server)) { 214 spin_unlock(&ses->chan_lock); 215 cifs_chan_update_iface(ses, server); 216 spin_lock(&ses->chan_lock); 217 } 218 219 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) { 220 spin_unlock(&ses->chan_lock); 221 continue; 222 } 223 224 if (mark_smb_session) 225 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses); 226 else 227 cifs_chan_set_need_reconnect(ses, server); 228 229 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 230 __func__, ses->chans_need_reconnect); 231 232 /* If all channels need reconnect, then tcon needs reconnect */ 233 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 234 spin_unlock(&ses->chan_lock); 235 continue; 236 } 237 spin_unlock(&ses->chan_lock); 238 239 spin_lock(&ses->ses_lock); 240 ses->ses_status = SES_NEED_RECON; 241 spin_unlock(&ses->ses_lock); 242 243 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 244 tcon->need_reconnect = true; 245 spin_lock(&tcon->tc_lock); 246 tcon->status = TID_NEED_RECON; 247 spin_unlock(&tcon->tc_lock); 248 } 249 if (ses->tcon_ipc) { 250 ses->tcon_ipc->need_reconnect = true; 251 spin_lock(&ses->tcon_ipc->tc_lock); 252 ses->tcon_ipc->status = TID_NEED_RECON; 253 spin_unlock(&ses->tcon_ipc->tc_lock); 254 } 255 } 256 spin_unlock(&cifs_tcp_ses_lock); 257 } 258 259 static void 260 cifs_abort_connection(struct TCP_Server_Info *server) 261 { 262 struct mid_q_entry *mid, *nmid; 263 struct list_head retry_list; 264 265 server->maxBuf = 0; 266 server->max_read = 0; 267 268 /* do not want to be sending data on a socket we are freeing */ 269 cifs_dbg(FYI, "%s: tearing down socket\n", __func__); 270 cifs_server_lock(server); 271 if (server->ssocket) { 272 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, 273 server->ssocket->flags); 274 kernel_sock_shutdown(server->ssocket, SHUT_WR); 275 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, 276 server->ssocket->flags); 277 sock_release(server->ssocket); 278 server->ssocket = NULL; 279 } 280 server->sequence_number = 0; 281 server->session_estab = false; 282 kfree_sensitive(server->session_key.response); 283 server->session_key.response = NULL; 284 server->session_key.len = 0; 285 server->lstrp = jiffies; 286 287 /* mark submitted MIDs for retry and issue callback */ 288 INIT_LIST_HEAD(&retry_list); 289 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); 290 spin_lock(&server->mid_lock); 291 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 292 kref_get(&mid->refcount); 293 if (mid->mid_state == MID_REQUEST_SUBMITTED) 294 mid->mid_state = MID_RETRY_NEEDED; 295 list_move(&mid->qhead, &retry_list); 296 mid->mid_flags |= MID_DELETED; 297 } 298 spin_unlock(&server->mid_lock); 299 cifs_server_unlock(server); 300 301 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); 302 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { 303 list_del_init(&mid->qhead); 304 mid->callback(mid); 305 release_mid(mid); 306 } 307 308 if (cifs_rdma_enabled(server)) { 309 cifs_server_lock(server); 310 smbd_destroy(server); 311 cifs_server_unlock(server); 312 } 313 } 314 315 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) 316 { 317 spin_lock(&server->srv_lock); 318 server->nr_targets = num_targets; 319 if (server->tcpStatus == CifsExiting) { 320 /* the demux thread will exit normally next time through the loop */ 321 spin_unlock(&server->srv_lock); 322 wake_up(&server->response_q); 323 return false; 324 } 325 326 cifs_dbg(FYI, "Mark tcp session as need reconnect\n"); 327 trace_smb3_reconnect(server->CurrentMid, server->conn_id, 328 server->hostname); 329 server->tcpStatus = CifsNeedReconnect; 330 331 spin_unlock(&server->srv_lock); 332 return true; 333 } 334 335 /* 336 * cifs tcp session reconnection 337 * 338 * mark tcp session as reconnecting so temporarily locked 339 * mark all smb sessions as reconnecting for tcp session 340 * reconnect tcp session 341 * wake up waiters on reconnection? - (not needed currently) 342 * 343 * if mark_smb_session is passed as true, unconditionally mark 344 * the smb session (and tcon) for reconnect as well. This value 345 * doesn't really matter for non-multichannel scenario. 346 * 347 */ 348 static int __cifs_reconnect(struct TCP_Server_Info *server, 349 bool mark_smb_session) 350 { 351 int rc = 0; 352 353 if (!cifs_tcp_ses_needs_reconnect(server, 1)) 354 return 0; 355 356 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session); 357 358 cifs_abort_connection(server); 359 360 do { 361 try_to_freeze(); 362 cifs_server_lock(server); 363 364 if (!cifs_swn_set_server_dstaddr(server)) { 365 /* resolve the hostname again to make sure that IP address is up-to-date */ 366 rc = reconn_set_ipaddr_from_hostname(server); 367 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 368 } 369 370 if (cifs_rdma_enabled(server)) 371 rc = smbd_reconnect(server); 372 else 373 rc = generic_ip_connect(server); 374 if (rc) { 375 cifs_server_unlock(server); 376 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 377 msleep(3000); 378 } else { 379 atomic_inc(&tcpSesReconnectCount); 380 set_credits(server, 1); 381 spin_lock(&server->srv_lock); 382 if (server->tcpStatus != CifsExiting) 383 server->tcpStatus = CifsNeedNegotiate; 384 spin_unlock(&server->srv_lock); 385 cifs_swn_reset_server_dstaddr(server); 386 cifs_server_unlock(server); 387 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 388 } 389 } while (server->tcpStatus == CifsNeedReconnect); 390 391 spin_lock(&server->srv_lock); 392 if (server->tcpStatus == CifsNeedNegotiate) 393 mod_delayed_work(cifsiod_wq, &server->echo, 0); 394 spin_unlock(&server->srv_lock); 395 396 wake_up(&server->response_q); 397 return rc; 398 } 399 400 #ifdef CONFIG_CIFS_DFS_UPCALL 401 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target) 402 { 403 int rc; 404 char *hostname; 405 406 if (!cifs_swn_set_server_dstaddr(server)) { 407 if (server->hostname != target) { 408 hostname = extract_hostname(target); 409 if (!IS_ERR(hostname)) { 410 spin_lock(&server->srv_lock); 411 kfree(server->hostname); 412 server->hostname = hostname; 413 spin_unlock(&server->srv_lock); 414 } else { 415 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n", 416 __func__, PTR_ERR(hostname)); 417 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__, 418 server->hostname); 419 } 420 } 421 /* resolve the hostname again to make sure that IP address is up-to-date. */ 422 rc = reconn_set_ipaddr_from_hostname(server); 423 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 424 } 425 /* Reconnect the socket */ 426 if (cifs_rdma_enabled(server)) 427 rc = smbd_reconnect(server); 428 else 429 rc = generic_ip_connect(server); 430 431 return rc; 432 } 433 434 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl, 435 struct dfs_cache_tgt_iterator **target_hint) 436 { 437 int rc; 438 struct dfs_cache_tgt_iterator *tit; 439 440 *target_hint = NULL; 441 442 /* If dfs target list is empty, then reconnect to last server */ 443 tit = dfs_cache_get_tgt_iterator(tl); 444 if (!tit) 445 return __reconnect_target_unlocked(server, server->hostname); 446 447 /* Otherwise, try every dfs target in @tl */ 448 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { 449 rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit)); 450 if (!rc) { 451 *target_hint = tit; 452 break; 453 } 454 } 455 return rc; 456 } 457 458 static int reconnect_dfs_server(struct TCP_Server_Info *server) 459 { 460 struct dfs_cache_tgt_iterator *target_hint = NULL; 461 DFS_CACHE_TGT_LIST(tl); 462 int num_targets = 0; 463 int rc = 0; 464 465 /* 466 * Determine the number of dfs targets the referral path in @cifs_sb resolves to. 467 * 468 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs 469 * targets (server->nr_targets). It's also possible that the cached referral was cleared 470 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after 471 * refreshing the referral, so, in this case, default it to 1. 472 */ 473 mutex_lock(&server->refpath_lock); 474 if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) 475 num_targets = dfs_cache_get_nr_tgts(&tl); 476 mutex_unlock(&server->refpath_lock); 477 if (!num_targets) 478 num_targets = 1; 479 480 if (!cifs_tcp_ses_needs_reconnect(server, num_targets)) 481 return 0; 482 483 /* 484 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a 485 * different server or share during failover. It could be improved by adding some logic to 486 * only do that in case it connects to a different server or share, though. 487 */ 488 cifs_mark_tcp_ses_conns_for_reconnect(server, true); 489 490 cifs_abort_connection(server); 491 492 do { 493 try_to_freeze(); 494 cifs_server_lock(server); 495 496 rc = reconnect_target_unlocked(server, &tl, &target_hint); 497 if (rc) { 498 /* Failed to reconnect socket */ 499 cifs_server_unlock(server); 500 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 501 msleep(3000); 502 continue; 503 } 504 /* 505 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a 506 * process waiting for reconnect will know it needs to re-establish session and tcon 507 * through the reconnected target server. 508 */ 509 atomic_inc(&tcpSesReconnectCount); 510 set_credits(server, 1); 511 spin_lock(&server->srv_lock); 512 if (server->tcpStatus != CifsExiting) 513 server->tcpStatus = CifsNeedNegotiate; 514 spin_unlock(&server->srv_lock); 515 cifs_swn_reset_server_dstaddr(server); 516 cifs_server_unlock(server); 517 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 518 } while (server->tcpStatus == CifsNeedReconnect); 519 520 mutex_lock(&server->refpath_lock); 521 dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); 522 mutex_unlock(&server->refpath_lock); 523 dfs_cache_free_tgts(&tl); 524 525 /* Need to set up echo worker again once connection has been established */ 526 spin_lock(&server->srv_lock); 527 if (server->tcpStatus == CifsNeedNegotiate) 528 mod_delayed_work(cifsiod_wq, &server->echo, 0); 529 spin_unlock(&server->srv_lock); 530 531 wake_up(&server->response_q); 532 return rc; 533 } 534 535 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 536 { 537 mutex_lock(&server->refpath_lock); 538 if (!server->leaf_fullpath) { 539 mutex_unlock(&server->refpath_lock); 540 return __cifs_reconnect(server, mark_smb_session); 541 } 542 mutex_unlock(&server->refpath_lock); 543 544 return reconnect_dfs_server(server); 545 } 546 #else 547 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 548 { 549 return __cifs_reconnect(server, mark_smb_session); 550 } 551 #endif 552 553 static void 554 cifs_echo_request(struct work_struct *work) 555 { 556 int rc; 557 struct TCP_Server_Info *server = container_of(work, 558 struct TCP_Server_Info, echo.work); 559 560 /* 561 * We cannot send an echo if it is disabled. 562 * Also, no need to ping if we got a response recently. 563 */ 564 565 if (server->tcpStatus == CifsNeedReconnect || 566 server->tcpStatus == CifsExiting || 567 server->tcpStatus == CifsNew || 568 (server->ops->can_echo && !server->ops->can_echo(server)) || 569 time_before(jiffies, server->lstrp + server->echo_interval - HZ)) 570 goto requeue_echo; 571 572 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; 573 cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc); 574 575 /* Check witness registrations */ 576 cifs_swn_check(); 577 578 requeue_echo: 579 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); 580 } 581 582 static bool 583 allocate_buffers(struct TCP_Server_Info *server) 584 { 585 if (!server->bigbuf) { 586 server->bigbuf = (char *)cifs_buf_get(); 587 if (!server->bigbuf) { 588 cifs_server_dbg(VFS, "No memory for large SMB response\n"); 589 msleep(3000); 590 /* retry will check if exiting */ 591 return false; 592 } 593 } else if (server->large_buf) { 594 /* we are reusing a dirty large buf, clear its start */ 595 memset(server->bigbuf, 0, HEADER_SIZE(server)); 596 } 597 598 if (!server->smallbuf) { 599 server->smallbuf = (char *)cifs_small_buf_get(); 600 if (!server->smallbuf) { 601 cifs_server_dbg(VFS, "No memory for SMB response\n"); 602 msleep(1000); 603 /* retry will check if exiting */ 604 return false; 605 } 606 /* beginning of smb buffer is cleared in our buf_get */ 607 } else { 608 /* if existing small buf clear beginning */ 609 memset(server->smallbuf, 0, HEADER_SIZE(server)); 610 } 611 612 return true; 613 } 614 615 static bool 616 server_unresponsive(struct TCP_Server_Info *server) 617 { 618 /* 619 * We need to wait 3 echo intervals to make sure we handle such 620 * situations right: 621 * 1s client sends a normal SMB request 622 * 2s client gets a response 623 * 30s echo workqueue job pops, and decides we got a response recently 624 * and don't need to send another 625 * ... 626 * 65s kernel_recvmsg times out, and we see that we haven't gotten 627 * a response in >60s. 628 */ 629 spin_lock(&server->srv_lock); 630 if ((server->tcpStatus == CifsGood || 631 server->tcpStatus == CifsNeedNegotiate) && 632 (!server->ops->can_echo || server->ops->can_echo(server)) && 633 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { 634 spin_unlock(&server->srv_lock); 635 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", 636 (3 * server->echo_interval) / HZ); 637 cifs_reconnect(server, false); 638 return true; 639 } 640 spin_unlock(&server->srv_lock); 641 642 return false; 643 } 644 645 static inline bool 646 zero_credits(struct TCP_Server_Info *server) 647 { 648 int val; 649 650 spin_lock(&server->req_lock); 651 val = server->credits + server->echo_credits + server->oplock_credits; 652 if (server->in_flight == 0 && val == 0) { 653 spin_unlock(&server->req_lock); 654 return true; 655 } 656 spin_unlock(&server->req_lock); 657 return false; 658 } 659 660 static int 661 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) 662 { 663 int length = 0; 664 int total_read; 665 666 for (total_read = 0; msg_data_left(smb_msg); total_read += length) { 667 try_to_freeze(); 668 669 /* reconnect if no credits and no requests in flight */ 670 if (zero_credits(server)) { 671 cifs_reconnect(server, false); 672 return -ECONNABORTED; 673 } 674 675 if (server_unresponsive(server)) 676 return -ECONNABORTED; 677 if (cifs_rdma_enabled(server) && server->smbd_conn) 678 length = smbd_recv(server->smbd_conn, smb_msg); 679 else 680 length = sock_recvmsg(server->ssocket, smb_msg, 0); 681 682 spin_lock(&server->srv_lock); 683 if (server->tcpStatus == CifsExiting) { 684 spin_unlock(&server->srv_lock); 685 return -ESHUTDOWN; 686 } 687 688 if (server->tcpStatus == CifsNeedReconnect) { 689 spin_unlock(&server->srv_lock); 690 cifs_reconnect(server, false); 691 return -ECONNABORTED; 692 } 693 spin_unlock(&server->srv_lock); 694 695 if (length == -ERESTARTSYS || 696 length == -EAGAIN || 697 length == -EINTR) { 698 /* 699 * Minimum sleep to prevent looping, allowing socket 700 * to clear and app threads to set tcpStatus 701 * CifsNeedReconnect if server hung. 702 */ 703 usleep_range(1000, 2000); 704 length = 0; 705 continue; 706 } 707 708 if (length <= 0) { 709 cifs_dbg(FYI, "Received no data or error: %d\n", length); 710 cifs_reconnect(server, false); 711 return -ECONNABORTED; 712 } 713 } 714 return total_read; 715 } 716 717 int 718 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, 719 unsigned int to_read) 720 { 721 struct msghdr smb_msg = {}; 722 struct kvec iov = {.iov_base = buf, .iov_len = to_read}; 723 iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read); 724 725 return cifs_readv_from_socket(server, &smb_msg); 726 } 727 728 ssize_t 729 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) 730 { 731 struct msghdr smb_msg = {}; 732 733 /* 734 * iov_iter_discard already sets smb_msg.type and count and iov_offset 735 * and cifs_readv_from_socket sets msg_control and msg_controllen 736 * so little to initialize in struct msghdr 737 */ 738 iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read); 739 740 return cifs_readv_from_socket(server, &smb_msg); 741 } 742 743 int 744 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, 745 unsigned int page_offset, unsigned int to_read) 746 { 747 struct msghdr smb_msg = {}; 748 struct bio_vec bv; 749 750 bvec_set_page(&bv, page, to_read, page_offset); 751 iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read); 752 return cifs_readv_from_socket(server, &smb_msg); 753 } 754 755 int 756 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter, 757 unsigned int to_read) 758 { 759 struct msghdr smb_msg = { .msg_iter = *iter }; 760 int ret; 761 762 iov_iter_truncate(&smb_msg.msg_iter, to_read); 763 ret = cifs_readv_from_socket(server, &smb_msg); 764 if (ret > 0) 765 iov_iter_advance(iter, ret); 766 return ret; 767 } 768 769 static bool 770 is_smb_response(struct TCP_Server_Info *server, unsigned char type) 771 { 772 /* 773 * The first byte big endian of the length field, 774 * is actually not part of the length but the type 775 * with the most common, zero, as regular data. 776 */ 777 switch (type) { 778 case RFC1002_SESSION_MESSAGE: 779 /* Regular SMB response */ 780 return true; 781 case RFC1002_SESSION_KEEP_ALIVE: 782 cifs_dbg(FYI, "RFC 1002 session keep alive\n"); 783 break; 784 case RFC1002_POSITIVE_SESSION_RESPONSE: 785 cifs_dbg(FYI, "RFC 1002 positive session response\n"); 786 break; 787 case RFC1002_NEGATIVE_SESSION_RESPONSE: 788 /* 789 * We get this from Windows 98 instead of an error on 790 * SMB negprot response. 791 */ 792 cifs_dbg(FYI, "RFC 1002 negative session response\n"); 793 /* give server a second to clean up */ 794 msleep(1000); 795 /* 796 * Always try 445 first on reconnect since we get NACK 797 * on some if we ever connected to port 139 (the NACK 798 * is since we do not begin with RFC1001 session 799 * initialize frame). 800 */ 801 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); 802 cifs_reconnect(server, true); 803 break; 804 default: 805 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); 806 cifs_reconnect(server, true); 807 } 808 809 return false; 810 } 811 812 void 813 dequeue_mid(struct mid_q_entry *mid, bool malformed) 814 { 815 #ifdef CONFIG_CIFS_STATS2 816 mid->when_received = jiffies; 817 #endif 818 spin_lock(&mid->server->mid_lock); 819 if (!malformed) 820 mid->mid_state = MID_RESPONSE_RECEIVED; 821 else 822 mid->mid_state = MID_RESPONSE_MALFORMED; 823 /* 824 * Trying to handle/dequeue a mid after the send_recv() 825 * function has finished processing it is a bug. 826 */ 827 if (mid->mid_flags & MID_DELETED) { 828 spin_unlock(&mid->server->mid_lock); 829 pr_warn_once("trying to dequeue a deleted mid\n"); 830 } else { 831 list_del_init(&mid->qhead); 832 mid->mid_flags |= MID_DELETED; 833 spin_unlock(&mid->server->mid_lock); 834 } 835 } 836 837 static unsigned int 838 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 839 { 840 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 841 842 /* 843 * SMB1 does not use credits. 844 */ 845 if (is_smb1(server)) 846 return 0; 847 848 return le16_to_cpu(shdr->CreditRequest); 849 } 850 851 static void 852 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, 853 char *buf, int malformed) 854 { 855 if (server->ops->check_trans2 && 856 server->ops->check_trans2(mid, server, buf, malformed)) 857 return; 858 mid->credits_received = smb2_get_credits_from_hdr(buf, server); 859 mid->resp_buf = buf; 860 mid->large_buf = server->large_buf; 861 /* Was previous buf put in mpx struct for multi-rsp? */ 862 if (!mid->multiRsp) { 863 /* smb buffer will be freed by user thread */ 864 if (server->large_buf) 865 server->bigbuf = NULL; 866 else 867 server->smallbuf = NULL; 868 } 869 dequeue_mid(mid, malformed); 870 } 871 872 int 873 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) 874 { 875 bool srv_sign_required = server->sec_mode & server->vals->signing_required; 876 bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; 877 bool mnt_sign_enabled; 878 879 /* 880 * Is signing required by mnt options? If not then check 881 * global_secflags to see if it is there. 882 */ 883 if (!mnt_sign_required) 884 mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == 885 CIFSSEC_MUST_SIGN); 886 887 /* 888 * If signing is required then it's automatically enabled too, 889 * otherwise, check to see if the secflags allow it. 890 */ 891 mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : 892 (global_secflags & CIFSSEC_MAY_SIGN); 893 894 /* If server requires signing, does client allow it? */ 895 if (srv_sign_required) { 896 if (!mnt_sign_enabled) { 897 cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); 898 return -EOPNOTSUPP; 899 } 900 server->sign = true; 901 } 902 903 /* If client requires signing, does server allow it? */ 904 if (mnt_sign_required) { 905 if (!srv_sign_enabled) { 906 cifs_dbg(VFS, "Server does not support signing!\n"); 907 return -EOPNOTSUPP; 908 } 909 server->sign = true; 910 } 911 912 if (cifs_rdma_enabled(server) && server->sign) 913 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); 914 915 return 0; 916 } 917 918 static noinline_for_stack void 919 clean_demultiplex_info(struct TCP_Server_Info *server) 920 { 921 int length; 922 923 /* take it off the list, if it's not already */ 924 spin_lock(&server->srv_lock); 925 list_del_init(&server->tcp_ses_list); 926 spin_unlock(&server->srv_lock); 927 928 cancel_delayed_work_sync(&server->echo); 929 930 spin_lock(&server->srv_lock); 931 server->tcpStatus = CifsExiting; 932 spin_unlock(&server->srv_lock); 933 wake_up_all(&server->response_q); 934 935 /* check if we have blocked requests that need to free */ 936 spin_lock(&server->req_lock); 937 if (server->credits <= 0) 938 server->credits = 1; 939 spin_unlock(&server->req_lock); 940 /* 941 * Although there should not be any requests blocked on this queue it 942 * can not hurt to be paranoid and try to wake up requests that may 943 * haven been blocked when more than 50 at time were on the wire to the 944 * same server - they now will see the session is in exit state and get 945 * out of SendReceive. 946 */ 947 wake_up_all(&server->request_q); 948 /* give those requests time to exit */ 949 msleep(125); 950 if (cifs_rdma_enabled(server)) 951 smbd_destroy(server); 952 if (server->ssocket) { 953 sock_release(server->ssocket); 954 server->ssocket = NULL; 955 } 956 957 if (!list_empty(&server->pending_mid_q)) { 958 struct list_head dispose_list; 959 struct mid_q_entry *mid_entry; 960 struct list_head *tmp, *tmp2; 961 962 INIT_LIST_HEAD(&dispose_list); 963 spin_lock(&server->mid_lock); 964 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 965 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 966 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); 967 kref_get(&mid_entry->refcount); 968 mid_entry->mid_state = MID_SHUTDOWN; 969 list_move(&mid_entry->qhead, &dispose_list); 970 mid_entry->mid_flags |= MID_DELETED; 971 } 972 spin_unlock(&server->mid_lock); 973 974 /* now walk dispose list and issue callbacks */ 975 list_for_each_safe(tmp, tmp2, &dispose_list) { 976 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 977 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); 978 list_del_init(&mid_entry->qhead); 979 mid_entry->callback(mid_entry); 980 release_mid(mid_entry); 981 } 982 /* 1/8th of sec is more than enough time for them to exit */ 983 msleep(125); 984 } 985 986 if (!list_empty(&server->pending_mid_q)) { 987 /* 988 * mpx threads have not exited yet give them at least the smb 989 * send timeout time for long ops. 990 * 991 * Due to delays on oplock break requests, we need to wait at 992 * least 45 seconds before giving up on a request getting a 993 * response and going ahead and killing cifsd. 994 */ 995 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); 996 msleep(46000); 997 /* 998 * If threads still have not exited they are probably never 999 * coming home not much else we can do but free the memory. 1000 */ 1001 } 1002 1003 kfree(server->leaf_fullpath); 1004 kfree(server); 1005 1006 length = atomic_dec_return(&tcpSesAllocCount); 1007 if (length > 0) 1008 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1009 } 1010 1011 static int 1012 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1013 { 1014 int length; 1015 char *buf = server->smallbuf; 1016 unsigned int pdu_length = server->pdu_size; 1017 1018 /* make sure this will fit in a large buffer */ 1019 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 1020 HEADER_PREAMBLE_SIZE(server)) { 1021 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); 1022 cifs_reconnect(server, true); 1023 return -ECONNABORTED; 1024 } 1025 1026 /* switch to large buffer if too big for a small one */ 1027 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 1028 server->large_buf = true; 1029 memcpy(server->bigbuf, buf, server->total_read); 1030 buf = server->bigbuf; 1031 } 1032 1033 /* now read the rest */ 1034 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, 1035 pdu_length - MID_HEADER_SIZE(server)); 1036 1037 if (length < 0) 1038 return length; 1039 server->total_read += length; 1040 1041 dump_smb(buf, server->total_read); 1042 1043 return cifs_handle_standard(server, mid); 1044 } 1045 1046 int 1047 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1048 { 1049 char *buf = server->large_buf ? server->bigbuf : server->smallbuf; 1050 int rc; 1051 1052 /* 1053 * We know that we received enough to get to the MID as we 1054 * checked the pdu_length earlier. Now check to see 1055 * if the rest of the header is OK. 1056 * 1057 * 48 bytes is enough to display the header and a little bit 1058 * into the payload for debugging purposes. 1059 */ 1060 rc = server->ops->check_message(buf, server->total_read, server); 1061 if (rc) 1062 cifs_dump_mem("Bad SMB: ", buf, 1063 min_t(unsigned int, server->total_read, 48)); 1064 1065 if (server->ops->is_session_expired && 1066 server->ops->is_session_expired(buf)) { 1067 cifs_reconnect(server, true); 1068 return -1; 1069 } 1070 1071 if (server->ops->is_status_pending && 1072 server->ops->is_status_pending(buf, server)) 1073 return -1; 1074 1075 if (!mid) 1076 return rc; 1077 1078 handle_mid(mid, server, buf, rc); 1079 return 0; 1080 } 1081 1082 static void 1083 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 1084 { 1085 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 1086 int scredits, in_flight; 1087 1088 /* 1089 * SMB1 does not use credits. 1090 */ 1091 if (is_smb1(server)) 1092 return; 1093 1094 if (shdr->CreditRequest) { 1095 spin_lock(&server->req_lock); 1096 server->credits += le16_to_cpu(shdr->CreditRequest); 1097 scredits = server->credits; 1098 in_flight = server->in_flight; 1099 spin_unlock(&server->req_lock); 1100 wake_up(&server->request_q); 1101 1102 trace_smb3_hdr_credits(server->CurrentMid, 1103 server->conn_id, server->hostname, scredits, 1104 le16_to_cpu(shdr->CreditRequest), in_flight); 1105 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n", 1106 __func__, le16_to_cpu(shdr->CreditRequest), 1107 scredits); 1108 } 1109 } 1110 1111 1112 static int 1113 cifs_demultiplex_thread(void *p) 1114 { 1115 int i, num_mids, length; 1116 struct TCP_Server_Info *server = p; 1117 unsigned int pdu_length; 1118 unsigned int next_offset; 1119 char *buf = NULL; 1120 struct task_struct *task_to_wake = NULL; 1121 struct mid_q_entry *mids[MAX_COMPOUND]; 1122 char *bufs[MAX_COMPOUND]; 1123 unsigned int noreclaim_flag, num_io_timeout = 0; 1124 bool pending_reconnect = false; 1125 1126 noreclaim_flag = memalloc_noreclaim_save(); 1127 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); 1128 1129 length = atomic_inc_return(&tcpSesAllocCount); 1130 if (length > 1) 1131 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1132 1133 set_freezable(); 1134 allow_kernel_signal(SIGKILL); 1135 while (server->tcpStatus != CifsExiting) { 1136 if (try_to_freeze()) 1137 continue; 1138 1139 if (!allocate_buffers(server)) 1140 continue; 1141 1142 server->large_buf = false; 1143 buf = server->smallbuf; 1144 pdu_length = 4; /* enough to get RFC1001 header */ 1145 1146 length = cifs_read_from_socket(server, buf, pdu_length); 1147 if (length < 0) 1148 continue; 1149 1150 if (is_smb1(server)) 1151 server->total_read = length; 1152 else 1153 server->total_read = 0; 1154 1155 /* 1156 * The right amount was read from socket - 4 bytes, 1157 * so we can now interpret the length field. 1158 */ 1159 pdu_length = get_rfc1002_length(buf); 1160 1161 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); 1162 if (!is_smb_response(server, buf[0])) 1163 continue; 1164 1165 pending_reconnect = false; 1166 next_pdu: 1167 server->pdu_size = pdu_length; 1168 1169 /* make sure we have enough to get to the MID */ 1170 if (server->pdu_size < MID_HEADER_SIZE(server)) { 1171 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n", 1172 server->pdu_size); 1173 cifs_reconnect(server, true); 1174 continue; 1175 } 1176 1177 /* read down to the MID */ 1178 length = cifs_read_from_socket(server, 1179 buf + HEADER_PREAMBLE_SIZE(server), 1180 MID_HEADER_SIZE(server)); 1181 if (length < 0) 1182 continue; 1183 server->total_read += length; 1184 1185 if (server->ops->next_header) { 1186 if (server->ops->next_header(server, buf, &next_offset)) { 1187 cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n", 1188 __func__, next_offset); 1189 cifs_reconnect(server, true); 1190 continue; 1191 } 1192 if (next_offset) 1193 server->pdu_size = next_offset; 1194 } 1195 1196 memset(mids, 0, sizeof(mids)); 1197 memset(bufs, 0, sizeof(bufs)); 1198 num_mids = 0; 1199 1200 if (server->ops->is_transform_hdr && 1201 server->ops->receive_transform && 1202 server->ops->is_transform_hdr(buf)) { 1203 length = server->ops->receive_transform(server, 1204 mids, 1205 bufs, 1206 &num_mids); 1207 } else { 1208 mids[0] = server->ops->find_mid(server, buf); 1209 bufs[0] = buf; 1210 num_mids = 1; 1211 1212 if (!mids[0] || !mids[0]->receive) 1213 length = standard_receive3(server, mids[0]); 1214 else 1215 length = mids[0]->receive(server, mids[0]); 1216 } 1217 1218 if (length < 0) { 1219 for (i = 0; i < num_mids; i++) 1220 if (mids[i]) 1221 release_mid(mids[i]); 1222 continue; 1223 } 1224 1225 if (server->ops->is_status_io_timeout && 1226 server->ops->is_status_io_timeout(buf)) { 1227 num_io_timeout++; 1228 if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) { 1229 cifs_server_dbg(VFS, 1230 "Number of request timeouts exceeded %d. Reconnecting", 1231 MAX_STATUS_IO_TIMEOUT); 1232 1233 pending_reconnect = true; 1234 num_io_timeout = 0; 1235 } 1236 } 1237 1238 server->lstrp = jiffies; 1239 1240 for (i = 0; i < num_mids; i++) { 1241 if (mids[i] != NULL) { 1242 mids[i]->resp_buf_size = server->pdu_size; 1243 1244 if (bufs[i] != NULL) { 1245 if (server->ops->is_network_name_deleted && 1246 server->ops->is_network_name_deleted(bufs[i], 1247 server)) { 1248 cifs_server_dbg(FYI, 1249 "Share deleted. Reconnect needed"); 1250 } 1251 } 1252 1253 if (!mids[i]->multiRsp || mids[i]->multiEnd) 1254 mids[i]->callback(mids[i]); 1255 1256 release_mid(mids[i]); 1257 } else if (server->ops->is_oplock_break && 1258 server->ops->is_oplock_break(bufs[i], 1259 server)) { 1260 smb2_add_credits_from_hdr(bufs[i], server); 1261 cifs_dbg(FYI, "Received oplock break\n"); 1262 } else { 1263 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 1264 atomic_read(&mid_count)); 1265 cifs_dump_mem("Received Data is: ", bufs[i], 1266 HEADER_SIZE(server)); 1267 smb2_add_credits_from_hdr(bufs[i], server); 1268 #ifdef CONFIG_CIFS_DEBUG2 1269 if (server->ops->dump_detail) 1270 server->ops->dump_detail(bufs[i], 1271 server); 1272 cifs_dump_mids(server); 1273 #endif /* CIFS_DEBUG2 */ 1274 } 1275 } 1276 1277 if (pdu_length > server->pdu_size) { 1278 if (!allocate_buffers(server)) 1279 continue; 1280 pdu_length -= server->pdu_size; 1281 server->total_read = 0; 1282 server->large_buf = false; 1283 buf = server->smallbuf; 1284 goto next_pdu; 1285 } 1286 1287 /* do this reconnect at the very end after processing all MIDs */ 1288 if (pending_reconnect) 1289 cifs_reconnect(server, true); 1290 1291 } /* end while !EXITING */ 1292 1293 /* buffer usually freed in free_mid - need to free it here on exit */ 1294 cifs_buf_release(server->bigbuf); 1295 if (server->smallbuf) /* no sense logging a debug message if NULL */ 1296 cifs_small_buf_release(server->smallbuf); 1297 1298 task_to_wake = xchg(&server->tsk, NULL); 1299 clean_demultiplex_info(server); 1300 1301 /* if server->tsk was NULL then wait for a signal before exiting */ 1302 if (!task_to_wake) { 1303 set_current_state(TASK_INTERRUPTIBLE); 1304 while (!signal_pending(current)) { 1305 schedule(); 1306 set_current_state(TASK_INTERRUPTIBLE); 1307 } 1308 set_current_state(TASK_RUNNING); 1309 } 1310 1311 memalloc_noreclaim_restore(noreclaim_flag); 1312 module_put_and_kthread_exit(0); 1313 } 1314 1315 int 1316 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs) 1317 { 1318 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1319 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1320 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1321 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1322 1323 switch (srcaddr->sa_family) { 1324 case AF_UNSPEC: 1325 switch (rhs->sa_family) { 1326 case AF_UNSPEC: 1327 return 0; 1328 case AF_INET: 1329 case AF_INET6: 1330 return 1; 1331 default: 1332 return -1; 1333 } 1334 case AF_INET: { 1335 switch (rhs->sa_family) { 1336 case AF_UNSPEC: 1337 return -1; 1338 case AF_INET: 1339 return memcmp(saddr4, vaddr4, 1340 sizeof(struct sockaddr_in)); 1341 case AF_INET6: 1342 return 1; 1343 default: 1344 return -1; 1345 } 1346 } 1347 case AF_INET6: { 1348 switch (rhs->sa_family) { 1349 case AF_UNSPEC: 1350 case AF_INET: 1351 return -1; 1352 case AF_INET6: 1353 return memcmp(saddr6, 1354 vaddr6, 1355 sizeof(struct sockaddr_in6)); 1356 default: 1357 return -1; 1358 } 1359 } 1360 default: 1361 return -1; /* don't expect to be here */ 1362 } 1363 } 1364 1365 /* 1366 * Returns true if srcaddr isn't specified and rhs isn't specified, or 1367 * if srcaddr is specified and matches the IP address of the rhs argument 1368 */ 1369 bool 1370 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs) 1371 { 1372 switch (srcaddr->sa_family) { 1373 case AF_UNSPEC: 1374 return (rhs->sa_family == AF_UNSPEC); 1375 case AF_INET: { 1376 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1377 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1378 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); 1379 } 1380 case AF_INET6: { 1381 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1382 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1383 return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr) 1384 && saddr6->sin6_scope_id == vaddr6->sin6_scope_id); 1385 } 1386 default: 1387 WARN_ON(1); 1388 return false; /* don't expect to be here */ 1389 } 1390 } 1391 1392 /* 1393 * If no port is specified in addr structure, we try to match with 445 port 1394 * and if it fails - with 139 ports. It should be called only if address 1395 * families of server and addr are equal. 1396 */ 1397 static bool 1398 match_port(struct TCP_Server_Info *server, struct sockaddr *addr) 1399 { 1400 __be16 port, *sport; 1401 1402 /* SMBDirect manages its own ports, don't match it here */ 1403 if (server->rdma) 1404 return true; 1405 1406 switch (addr->sa_family) { 1407 case AF_INET: 1408 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; 1409 port = ((struct sockaddr_in *) addr)->sin_port; 1410 break; 1411 case AF_INET6: 1412 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; 1413 port = ((struct sockaddr_in6 *) addr)->sin6_port; 1414 break; 1415 default: 1416 WARN_ON(1); 1417 return false; 1418 } 1419 1420 if (!port) { 1421 port = htons(CIFS_PORT); 1422 if (port == *sport) 1423 return true; 1424 1425 port = htons(RFC1001_PORT); 1426 } 1427 1428 return port == *sport; 1429 } 1430 1431 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr) 1432 { 1433 if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr)) 1434 return false; 1435 1436 return true; 1437 } 1438 1439 static bool 1440 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1441 { 1442 /* 1443 * The select_sectype function should either return the ctx->sectype 1444 * that was specified, or "Unspecified" if that sectype was not 1445 * compatible with the given NEGOTIATE request. 1446 */ 1447 if (server->ops->select_sectype(server, ctx->sectype) 1448 == Unspecified) 1449 return false; 1450 1451 /* 1452 * Now check if signing mode is acceptable. No need to check 1453 * global_secflags at this point since if MUST_SIGN is set then 1454 * the server->sign had better be too. 1455 */ 1456 if (ctx->sign && !server->sign) 1457 return false; 1458 1459 return true; 1460 } 1461 1462 /* this function must be called with srv_lock held */ 1463 static int match_server(struct TCP_Server_Info *server, 1464 struct smb3_fs_context *ctx, 1465 bool match_super) 1466 { 1467 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; 1468 1469 lockdep_assert_held(&server->srv_lock); 1470 1471 if (ctx->nosharesock) 1472 return 0; 1473 1474 /* this server does not share socket */ 1475 if (server->nosharesock) 1476 return 0; 1477 1478 /* If multidialect negotiation see if existing sessions match one */ 1479 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { 1480 if (server->vals->protocol_id < SMB30_PROT_ID) 1481 return 0; 1482 } else if (strcmp(ctx->vals->version_string, 1483 SMBDEFAULT_VERSION_STRING) == 0) { 1484 if (server->vals->protocol_id < SMB21_PROT_ID) 1485 return 0; 1486 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops)) 1487 return 0; 1488 1489 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) 1490 return 0; 1491 1492 if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr, 1493 (struct sockaddr *)&server->srcaddr)) 1494 return 0; 1495 /* 1496 * When matching cifs.ko superblocks (@match_super == true), we can't 1497 * really match either @server->leaf_fullpath or @server->dstaddr 1498 * directly since this @server might belong to a completely different 1499 * server -- in case of domain-based DFS referrals or DFS links -- as 1500 * provided earlier by mount(2) through 'source' and 'ip' options. 1501 * 1502 * Otherwise, match the DFS referral in @server->leaf_fullpath or the 1503 * destination address in @server->dstaddr. 1504 * 1505 * When using 'nodfs' mount option, we avoid sharing it with DFS 1506 * connections as they might failover. 1507 */ 1508 if (!match_super) { 1509 if (!ctx->nodfs) { 1510 if (server->leaf_fullpath) { 1511 if (!ctx->leaf_fullpath || 1512 strcasecmp(server->leaf_fullpath, 1513 ctx->leaf_fullpath)) 1514 return 0; 1515 } else if (ctx->leaf_fullpath) { 1516 return 0; 1517 } 1518 } else if (server->leaf_fullpath) { 1519 return 0; 1520 } 1521 } 1522 1523 /* 1524 * Match for a regular connection (address/hostname/port) which has no 1525 * DFS referrals set. 1526 */ 1527 if (!server->leaf_fullpath && 1528 (strcasecmp(server->hostname, ctx->server_hostname) || 1529 !match_server_address(server, addr) || 1530 !match_port(server, addr))) 1531 return 0; 1532 1533 if (!match_security(server, ctx)) 1534 return 0; 1535 1536 if (server->echo_interval != ctx->echo_interval * HZ) 1537 return 0; 1538 1539 if (server->rdma != ctx->rdma) 1540 return 0; 1541 1542 if (server->ignore_signature != ctx->ignore_signature) 1543 return 0; 1544 1545 if (server->min_offload != ctx->min_offload) 1546 return 0; 1547 1548 return 1; 1549 } 1550 1551 struct TCP_Server_Info * 1552 cifs_find_tcp_session(struct smb3_fs_context *ctx) 1553 { 1554 struct TCP_Server_Info *server; 1555 1556 spin_lock(&cifs_tcp_ses_lock); 1557 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1558 spin_lock(&server->srv_lock); 1559 /* 1560 * Skip ses channels since they're only handled in lower layers 1561 * (e.g. cifs_send_recv). 1562 */ 1563 if (SERVER_IS_CHAN(server) || 1564 !match_server(server, ctx, false)) { 1565 spin_unlock(&server->srv_lock); 1566 continue; 1567 } 1568 spin_unlock(&server->srv_lock); 1569 1570 ++server->srv_count; 1571 spin_unlock(&cifs_tcp_ses_lock); 1572 cifs_dbg(FYI, "Existing tcp session with server found\n"); 1573 return server; 1574 } 1575 spin_unlock(&cifs_tcp_ses_lock); 1576 return NULL; 1577 } 1578 1579 void 1580 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) 1581 { 1582 struct task_struct *task; 1583 1584 spin_lock(&cifs_tcp_ses_lock); 1585 if (--server->srv_count > 0) { 1586 spin_unlock(&cifs_tcp_ses_lock); 1587 return; 1588 } 1589 1590 /* srv_count can never go negative */ 1591 WARN_ON(server->srv_count < 0); 1592 1593 put_net(cifs_net_ns(server)); 1594 1595 list_del_init(&server->tcp_ses_list); 1596 spin_unlock(&cifs_tcp_ses_lock); 1597 1598 /* For secondary channels, we pick up ref-count on the primary server */ 1599 if (SERVER_IS_CHAN(server)) 1600 cifs_put_tcp_session(server->primary_server, from_reconnect); 1601 1602 cancel_delayed_work_sync(&server->echo); 1603 1604 if (from_reconnect) 1605 /* 1606 * Avoid deadlock here: reconnect work calls 1607 * cifs_put_tcp_session() at its end. Need to be sure 1608 * that reconnect work does nothing with server pointer after 1609 * that step. 1610 */ 1611 cancel_delayed_work(&server->reconnect); 1612 else 1613 cancel_delayed_work_sync(&server->reconnect); 1614 1615 spin_lock(&server->srv_lock); 1616 server->tcpStatus = CifsExiting; 1617 spin_unlock(&server->srv_lock); 1618 1619 cifs_crypto_secmech_release(server); 1620 1621 kfree_sensitive(server->session_key.response); 1622 server->session_key.response = NULL; 1623 server->session_key.len = 0; 1624 kfree(server->hostname); 1625 server->hostname = NULL; 1626 1627 task = xchg(&server->tsk, NULL); 1628 if (task) 1629 send_sig(SIGKILL, task, 1); 1630 } 1631 1632 struct TCP_Server_Info * 1633 cifs_get_tcp_session(struct smb3_fs_context *ctx, 1634 struct TCP_Server_Info *primary_server) 1635 { 1636 struct TCP_Server_Info *tcp_ses = NULL; 1637 int rc; 1638 1639 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC); 1640 1641 /* see if we already have a matching tcp_ses */ 1642 tcp_ses = cifs_find_tcp_session(ctx); 1643 if (tcp_ses) 1644 return tcp_ses; 1645 1646 tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); 1647 if (!tcp_ses) { 1648 rc = -ENOMEM; 1649 goto out_err; 1650 } 1651 1652 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL); 1653 if (!tcp_ses->hostname) { 1654 rc = -ENOMEM; 1655 goto out_err; 1656 } 1657 1658 if (ctx->leaf_fullpath) { 1659 tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL); 1660 if (!tcp_ses->leaf_fullpath) { 1661 rc = -ENOMEM; 1662 goto out_err; 1663 } 1664 } 1665 1666 if (ctx->nosharesock) 1667 tcp_ses->nosharesock = true; 1668 1669 tcp_ses->ops = ctx->ops; 1670 tcp_ses->vals = ctx->vals; 1671 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); 1672 1673 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId); 1674 tcp_ses->noblockcnt = ctx->rootfs; 1675 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs; 1676 tcp_ses->noautotune = ctx->noautotune; 1677 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay; 1678 tcp_ses->rdma = ctx->rdma; 1679 tcp_ses->in_flight = 0; 1680 tcp_ses->max_in_flight = 0; 1681 tcp_ses->credits = 1; 1682 if (primary_server) { 1683 spin_lock(&cifs_tcp_ses_lock); 1684 ++primary_server->srv_count; 1685 spin_unlock(&cifs_tcp_ses_lock); 1686 tcp_ses->primary_server = primary_server; 1687 } 1688 init_waitqueue_head(&tcp_ses->response_q); 1689 init_waitqueue_head(&tcp_ses->request_q); 1690 INIT_LIST_HEAD(&tcp_ses->pending_mid_q); 1691 mutex_init(&tcp_ses->_srv_mutex); 1692 memcpy(tcp_ses->workstation_RFC1001_name, 1693 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1694 memcpy(tcp_ses->server_RFC1001_name, 1695 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1696 tcp_ses->session_estab = false; 1697 tcp_ses->sequence_number = 0; 1698 tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */ 1699 tcp_ses->reconnect_instance = 1; 1700 tcp_ses->lstrp = jiffies; 1701 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); 1702 spin_lock_init(&tcp_ses->req_lock); 1703 spin_lock_init(&tcp_ses->srv_lock); 1704 spin_lock_init(&tcp_ses->mid_lock); 1705 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1706 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1707 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); 1708 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); 1709 mutex_init(&tcp_ses->reconnect_mutex); 1710 #ifdef CONFIG_CIFS_DFS_UPCALL 1711 mutex_init(&tcp_ses->refpath_lock); 1712 #endif 1713 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, 1714 sizeof(tcp_ses->srcaddr)); 1715 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr, 1716 sizeof(tcp_ses->dstaddr)); 1717 if (ctx->use_client_guid) 1718 memcpy(tcp_ses->client_guid, ctx->client_guid, 1719 SMB2_CLIENT_GUID_SIZE); 1720 else 1721 generate_random_uuid(tcp_ses->client_guid); 1722 /* 1723 * at this point we are the only ones with the pointer 1724 * to the struct since the kernel thread not created yet 1725 * no need to spinlock this init of tcpStatus or srv_count 1726 */ 1727 tcp_ses->tcpStatus = CifsNew; 1728 ++tcp_ses->srv_count; 1729 1730 if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN && 1731 ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX) 1732 tcp_ses->echo_interval = ctx->echo_interval * HZ; 1733 else 1734 tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; 1735 if (tcp_ses->rdma) { 1736 #ifndef CONFIG_CIFS_SMB_DIRECT 1737 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n"); 1738 rc = -ENOENT; 1739 goto out_err_crypto_release; 1740 #endif 1741 tcp_ses->smbd_conn = smbd_get_connection( 1742 tcp_ses, (struct sockaddr *)&ctx->dstaddr); 1743 if (tcp_ses->smbd_conn) { 1744 cifs_dbg(VFS, "RDMA transport established\n"); 1745 rc = 0; 1746 goto smbd_connected; 1747 } else { 1748 rc = -ENOENT; 1749 goto out_err_crypto_release; 1750 } 1751 } 1752 rc = ip_connect(tcp_ses); 1753 if (rc < 0) { 1754 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); 1755 goto out_err_crypto_release; 1756 } 1757 smbd_connected: 1758 /* 1759 * since we're in a cifs function already, we know that 1760 * this will succeed. No need for try_module_get(). 1761 */ 1762 __module_get(THIS_MODULE); 1763 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, 1764 tcp_ses, "cifsd"); 1765 if (IS_ERR(tcp_ses->tsk)) { 1766 rc = PTR_ERR(tcp_ses->tsk); 1767 cifs_dbg(VFS, "error %d create cifsd thread\n", rc); 1768 module_put(THIS_MODULE); 1769 goto out_err_crypto_release; 1770 } 1771 tcp_ses->min_offload = ctx->min_offload; 1772 /* 1773 * at this point we are the only ones with the pointer 1774 * to the struct since the kernel thread not created yet 1775 * no need to spinlock this update of tcpStatus 1776 */ 1777 spin_lock(&tcp_ses->srv_lock); 1778 tcp_ses->tcpStatus = CifsNeedNegotiate; 1779 spin_unlock(&tcp_ses->srv_lock); 1780 1781 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) 1782 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; 1783 else 1784 tcp_ses->max_credits = ctx->max_credits; 1785 1786 tcp_ses->nr_targets = 1; 1787 tcp_ses->ignore_signature = ctx->ignore_signature; 1788 /* thread spawned, put it on the list */ 1789 spin_lock(&cifs_tcp_ses_lock); 1790 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); 1791 spin_unlock(&cifs_tcp_ses_lock); 1792 1793 /* queue echo request delayed work */ 1794 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); 1795 1796 return tcp_ses; 1797 1798 out_err_crypto_release: 1799 cifs_crypto_secmech_release(tcp_ses); 1800 1801 put_net(cifs_net_ns(tcp_ses)); 1802 1803 out_err: 1804 if (tcp_ses) { 1805 if (SERVER_IS_CHAN(tcp_ses)) 1806 cifs_put_tcp_session(tcp_ses->primary_server, false); 1807 kfree(tcp_ses->hostname); 1808 kfree(tcp_ses->leaf_fullpath); 1809 if (tcp_ses->ssocket) 1810 sock_release(tcp_ses->ssocket); 1811 kfree(tcp_ses); 1812 } 1813 return ERR_PTR(rc); 1814 } 1815 1816 /* this function must be called with ses_lock and chan_lock held */ 1817 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1818 { 1819 if (ctx->sectype != Unspecified && 1820 ctx->sectype != ses->sectype) 1821 return 0; 1822 1823 /* 1824 * If an existing session is limited to less channels than 1825 * requested, it should not be reused 1826 */ 1827 if (ses->chan_max < ctx->max_channels) 1828 return 0; 1829 1830 switch (ses->sectype) { 1831 case Kerberos: 1832 if (!uid_eq(ctx->cred_uid, ses->cred_uid)) 1833 return 0; 1834 break; 1835 default: 1836 /* NULL username means anonymous session */ 1837 if (ses->user_name == NULL) { 1838 if (!ctx->nullauth) 1839 return 0; 1840 break; 1841 } 1842 1843 /* anything else takes username/password */ 1844 if (strncmp(ses->user_name, 1845 ctx->username ? ctx->username : "", 1846 CIFS_MAX_USERNAME_LEN)) 1847 return 0; 1848 if ((ctx->username && strlen(ctx->username) != 0) && 1849 ses->password != NULL && 1850 strncmp(ses->password, 1851 ctx->password ? ctx->password : "", 1852 CIFS_MAX_PASSWORD_LEN)) 1853 return 0; 1854 } 1855 1856 if (strcmp(ctx->local_nls->charset, ses->local_nls->charset)) 1857 return 0; 1858 1859 return 1; 1860 } 1861 1862 /** 1863 * cifs_setup_ipc - helper to setup the IPC tcon for the session 1864 * @ses: smb session to issue the request on 1865 * @ctx: the superblock configuration context to use for building the 1866 * new tree connection for the IPC (interprocess communication RPC) 1867 * 1868 * A new IPC connection is made and stored in the session 1869 * tcon_ipc. The IPC tcon has the same lifetime as the session. 1870 */ 1871 static int 1872 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1873 { 1874 int rc = 0, xid; 1875 struct cifs_tcon *tcon; 1876 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; 1877 bool seal = false; 1878 struct TCP_Server_Info *server = ses->server; 1879 1880 /* 1881 * If the mount request that resulted in the creation of the 1882 * session requires encryption, force IPC to be encrypted too. 1883 */ 1884 if (ctx->seal) { 1885 if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) 1886 seal = true; 1887 else { 1888 cifs_server_dbg(VFS, 1889 "IPC: server doesn't support encryption\n"); 1890 return -EOPNOTSUPP; 1891 } 1892 } 1893 1894 /* no need to setup directory caching on IPC share, so pass in false */ 1895 tcon = tcon_info_alloc(false); 1896 if (tcon == NULL) 1897 return -ENOMEM; 1898 1899 spin_lock(&server->srv_lock); 1900 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); 1901 spin_unlock(&server->srv_lock); 1902 1903 xid = get_xid(); 1904 tcon->ses = ses; 1905 tcon->ipc = true; 1906 tcon->seal = seal; 1907 rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); 1908 free_xid(xid); 1909 1910 if (rc) { 1911 cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); 1912 tconInfoFree(tcon); 1913 goto out; 1914 } 1915 1916 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); 1917 1918 spin_lock(&tcon->tc_lock); 1919 tcon->status = TID_GOOD; 1920 spin_unlock(&tcon->tc_lock); 1921 ses->tcon_ipc = tcon; 1922 out: 1923 return rc; 1924 } 1925 1926 /** 1927 * cifs_free_ipc - helper to release the session IPC tcon 1928 * @ses: smb session to unmount the IPC from 1929 * 1930 * Needs to be called everytime a session is destroyed. 1931 * 1932 * On session close, the IPC is closed and the server must release all tcons of the session. 1933 * No need to send a tree disconnect here. 1934 * 1935 * Besides, it will make the server to not close durable and resilient files on session close, as 1936 * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request. 1937 */ 1938 static int 1939 cifs_free_ipc(struct cifs_ses *ses) 1940 { 1941 struct cifs_tcon *tcon = ses->tcon_ipc; 1942 1943 if (tcon == NULL) 1944 return 0; 1945 1946 tconInfoFree(tcon); 1947 ses->tcon_ipc = NULL; 1948 return 0; 1949 } 1950 1951 static struct cifs_ses * 1952 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1953 { 1954 struct cifs_ses *ses, *ret = NULL; 1955 1956 spin_lock(&cifs_tcp_ses_lock); 1957 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1958 spin_lock(&ses->ses_lock); 1959 if (ses->ses_status == SES_EXITING) { 1960 spin_unlock(&ses->ses_lock); 1961 continue; 1962 } 1963 spin_lock(&ses->chan_lock); 1964 if (match_session(ses, ctx)) { 1965 spin_unlock(&ses->chan_lock); 1966 spin_unlock(&ses->ses_lock); 1967 ret = ses; 1968 break; 1969 } 1970 spin_unlock(&ses->chan_lock); 1971 spin_unlock(&ses->ses_lock); 1972 } 1973 if (ret) 1974 cifs_smb_ses_inc_refcount(ret); 1975 spin_unlock(&cifs_tcp_ses_lock); 1976 return ret; 1977 } 1978 1979 void __cifs_put_smb_ses(struct cifs_ses *ses) 1980 { 1981 unsigned int rc, xid; 1982 unsigned int chan_count; 1983 struct TCP_Server_Info *server = ses->server; 1984 1985 spin_lock(&ses->ses_lock); 1986 if (ses->ses_status == SES_EXITING) { 1987 spin_unlock(&ses->ses_lock); 1988 return; 1989 } 1990 spin_unlock(&ses->ses_lock); 1991 1992 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); 1993 cifs_dbg(FYI, 1994 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE"); 1995 1996 spin_lock(&cifs_tcp_ses_lock); 1997 if (--ses->ses_count > 0) { 1998 spin_unlock(&cifs_tcp_ses_lock); 1999 return; 2000 } 2001 spin_lock(&ses->ses_lock); 2002 if (ses->ses_status == SES_GOOD) 2003 ses->ses_status = SES_EXITING; 2004 spin_unlock(&ses->ses_lock); 2005 spin_unlock(&cifs_tcp_ses_lock); 2006 2007 /* ses_count can never go negative */ 2008 WARN_ON(ses->ses_count < 0); 2009 2010 spin_lock(&ses->ses_lock); 2011 if (ses->ses_status == SES_EXITING && server->ops->logoff) { 2012 spin_unlock(&ses->ses_lock); 2013 cifs_free_ipc(ses); 2014 xid = get_xid(); 2015 rc = server->ops->logoff(xid, ses); 2016 if (rc) 2017 cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n", 2018 __func__, rc); 2019 _free_xid(xid); 2020 } else { 2021 spin_unlock(&ses->ses_lock); 2022 cifs_free_ipc(ses); 2023 } 2024 2025 spin_lock(&cifs_tcp_ses_lock); 2026 list_del_init(&ses->smb_ses_list); 2027 spin_unlock(&cifs_tcp_ses_lock); 2028 2029 chan_count = ses->chan_count; 2030 2031 /* close any extra channels */ 2032 if (chan_count > 1) { 2033 int i; 2034 2035 for (i = 1; i < chan_count; i++) { 2036 if (ses->chans[i].iface) { 2037 kref_put(&ses->chans[i].iface->refcount, release_iface); 2038 ses->chans[i].iface = NULL; 2039 } 2040 cifs_put_tcp_session(ses->chans[i].server, 0); 2041 ses->chans[i].server = NULL; 2042 } 2043 } 2044 2045 /* we now account for primary channel in iface->refcount */ 2046 if (ses->chans[0].iface) { 2047 kref_put(&ses->chans[0].iface->refcount, release_iface); 2048 ses->chans[0].server = NULL; 2049 } 2050 2051 sesInfoFree(ses); 2052 cifs_put_tcp_session(server, 0); 2053 } 2054 2055 #ifdef CONFIG_KEYS 2056 2057 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ 2058 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) 2059 2060 /* Populate username and pw fields from keyring if possible */ 2061 static int 2062 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) 2063 { 2064 int rc = 0; 2065 int is_domain = 0; 2066 const char *delim, *payload; 2067 char *desc; 2068 ssize_t len; 2069 struct key *key; 2070 struct TCP_Server_Info *server = ses->server; 2071 struct sockaddr_in *sa; 2072 struct sockaddr_in6 *sa6; 2073 const struct user_key_payload *upayload; 2074 2075 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); 2076 if (!desc) 2077 return -ENOMEM; 2078 2079 /* try to find an address key first */ 2080 switch (server->dstaddr.ss_family) { 2081 case AF_INET: 2082 sa = (struct sockaddr_in *)&server->dstaddr; 2083 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); 2084 break; 2085 case AF_INET6: 2086 sa6 = (struct sockaddr_in6 *)&server->dstaddr; 2087 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); 2088 break; 2089 default: 2090 cifs_dbg(FYI, "Bad ss_family (%hu)\n", 2091 server->dstaddr.ss_family); 2092 rc = -EINVAL; 2093 goto out_err; 2094 } 2095 2096 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2097 key = request_key(&key_type_logon, desc, ""); 2098 if (IS_ERR(key)) { 2099 if (!ses->domainName) { 2100 cifs_dbg(FYI, "domainName is NULL\n"); 2101 rc = PTR_ERR(key); 2102 goto out_err; 2103 } 2104 2105 /* didn't work, try to find a domain key */ 2106 sprintf(desc, "cifs:d:%s", ses->domainName); 2107 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2108 key = request_key(&key_type_logon, desc, ""); 2109 if (IS_ERR(key)) { 2110 rc = PTR_ERR(key); 2111 goto out_err; 2112 } 2113 is_domain = 1; 2114 } 2115 2116 down_read(&key->sem); 2117 upayload = user_key_payload_locked(key); 2118 if (IS_ERR_OR_NULL(upayload)) { 2119 rc = upayload ? PTR_ERR(upayload) : -EINVAL; 2120 goto out_key_put; 2121 } 2122 2123 /* find first : in payload */ 2124 payload = upayload->data; 2125 delim = strnchr(payload, upayload->datalen, ':'); 2126 cifs_dbg(FYI, "payload=%s\n", payload); 2127 if (!delim) { 2128 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", 2129 upayload->datalen); 2130 rc = -EINVAL; 2131 goto out_key_put; 2132 } 2133 2134 len = delim - payload; 2135 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) { 2136 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", 2137 len); 2138 rc = -EINVAL; 2139 goto out_key_put; 2140 } 2141 2142 ctx->username = kstrndup(payload, len, GFP_KERNEL); 2143 if (!ctx->username) { 2144 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", 2145 len); 2146 rc = -ENOMEM; 2147 goto out_key_put; 2148 } 2149 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username); 2150 2151 len = key->datalen - (len + 1); 2152 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) { 2153 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); 2154 rc = -EINVAL; 2155 kfree(ctx->username); 2156 ctx->username = NULL; 2157 goto out_key_put; 2158 } 2159 2160 ++delim; 2161 ctx->password = kstrndup(delim, len, GFP_KERNEL); 2162 if (!ctx->password) { 2163 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", 2164 len); 2165 rc = -ENOMEM; 2166 kfree(ctx->username); 2167 ctx->username = NULL; 2168 goto out_key_put; 2169 } 2170 2171 /* 2172 * If we have a domain key then we must set the domainName in the 2173 * for the request. 2174 */ 2175 if (is_domain && ses->domainName) { 2176 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL); 2177 if (!ctx->domainname) { 2178 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n", 2179 len); 2180 rc = -ENOMEM; 2181 kfree(ctx->username); 2182 ctx->username = NULL; 2183 kfree_sensitive(ctx->password); 2184 ctx->password = NULL; 2185 goto out_key_put; 2186 } 2187 } 2188 2189 strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name)); 2190 2191 out_key_put: 2192 up_read(&key->sem); 2193 key_put(key); 2194 out_err: 2195 kfree(desc); 2196 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); 2197 return rc; 2198 } 2199 #else /* ! CONFIG_KEYS */ 2200 static inline int 2201 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), 2202 struct cifs_ses *ses __attribute__((unused))) 2203 { 2204 return -ENOSYS; 2205 } 2206 #endif /* CONFIG_KEYS */ 2207 2208 /** 2209 * cifs_get_smb_ses - get a session matching @ctx data from @server 2210 * @server: server to setup the session to 2211 * @ctx: superblock configuration context to use to setup the session 2212 * 2213 * This function assumes it is being called from cifs_mount() where we 2214 * already got a server reference (server refcount +1). See 2215 * cifs_get_tcon() for refcount explanations. 2216 */ 2217 struct cifs_ses * 2218 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 2219 { 2220 int rc = 0; 2221 unsigned int xid; 2222 struct cifs_ses *ses; 2223 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 2224 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 2225 2226 xid = get_xid(); 2227 2228 ses = cifs_find_smb_ses(server, ctx); 2229 if (ses) { 2230 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", 2231 ses->ses_status); 2232 2233 spin_lock(&ses->chan_lock); 2234 if (cifs_chan_needs_reconnect(ses, server)) { 2235 spin_unlock(&ses->chan_lock); 2236 cifs_dbg(FYI, "Session needs reconnect\n"); 2237 2238 mutex_lock(&ses->session_mutex); 2239 rc = cifs_negotiate_protocol(xid, ses, server); 2240 if (rc) { 2241 mutex_unlock(&ses->session_mutex); 2242 /* problem -- put our ses reference */ 2243 cifs_put_smb_ses(ses); 2244 free_xid(xid); 2245 return ERR_PTR(rc); 2246 } 2247 2248 rc = cifs_setup_session(xid, ses, server, 2249 ctx->local_nls); 2250 if (rc) { 2251 mutex_unlock(&ses->session_mutex); 2252 /* problem -- put our reference */ 2253 cifs_put_smb_ses(ses); 2254 free_xid(xid); 2255 return ERR_PTR(rc); 2256 } 2257 mutex_unlock(&ses->session_mutex); 2258 2259 spin_lock(&ses->chan_lock); 2260 } 2261 spin_unlock(&ses->chan_lock); 2262 2263 /* existing SMB ses has a server reference already */ 2264 cifs_put_tcp_session(server, 0); 2265 free_xid(xid); 2266 return ses; 2267 } 2268 2269 rc = -ENOMEM; 2270 2271 cifs_dbg(FYI, "Existing smb sess not found\n"); 2272 ses = sesInfoAlloc(); 2273 if (ses == NULL) 2274 goto get_ses_fail; 2275 2276 /* new SMB session uses our server ref */ 2277 ses->server = server; 2278 if (server->dstaddr.ss_family == AF_INET6) 2279 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr); 2280 else 2281 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr); 2282 2283 if (ctx->username) { 2284 ses->user_name = kstrdup(ctx->username, GFP_KERNEL); 2285 if (!ses->user_name) 2286 goto get_ses_fail; 2287 } 2288 2289 /* ctx->password freed at unmount */ 2290 if (ctx->password) { 2291 ses->password = kstrdup(ctx->password, GFP_KERNEL); 2292 if (!ses->password) 2293 goto get_ses_fail; 2294 } 2295 if (ctx->domainname) { 2296 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); 2297 if (!ses->domainName) 2298 goto get_ses_fail; 2299 } 2300 2301 strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name)); 2302 2303 if (ctx->domainauto) 2304 ses->domainAuto = ctx->domainauto; 2305 ses->cred_uid = ctx->cred_uid; 2306 ses->linux_uid = ctx->linux_uid; 2307 2308 ses->sectype = ctx->sectype; 2309 ses->sign = ctx->sign; 2310 ses->local_nls = load_nls(ctx->local_nls->charset); 2311 2312 /* add server as first channel */ 2313 spin_lock(&ses->chan_lock); 2314 ses->chans[0].server = server; 2315 ses->chan_count = 1; 2316 ses->chan_max = ctx->multichannel ? ctx->max_channels:1; 2317 ses->chans_need_reconnect = 1; 2318 spin_unlock(&ses->chan_lock); 2319 2320 mutex_lock(&ses->session_mutex); 2321 rc = cifs_negotiate_protocol(xid, ses, server); 2322 if (!rc) 2323 rc = cifs_setup_session(xid, ses, server, ctx->local_nls); 2324 mutex_unlock(&ses->session_mutex); 2325 2326 /* each channel uses a different signing key */ 2327 spin_lock(&ses->chan_lock); 2328 memcpy(ses->chans[0].signkey, ses->smb3signingkey, 2329 sizeof(ses->smb3signingkey)); 2330 spin_unlock(&ses->chan_lock); 2331 2332 if (rc) 2333 goto get_ses_fail; 2334 2335 /* 2336 * success, put it on the list and add it as first channel 2337 * note: the session becomes active soon after this. So you'll 2338 * need to lock before changing something in the session. 2339 */ 2340 spin_lock(&cifs_tcp_ses_lock); 2341 ses->dfs_root_ses = ctx->dfs_root_ses; 2342 if (ses->dfs_root_ses) 2343 ses->dfs_root_ses->ses_count++; 2344 list_add(&ses->smb_ses_list, &server->smb_ses_list); 2345 spin_unlock(&cifs_tcp_ses_lock); 2346 2347 cifs_setup_ipc(ses, ctx); 2348 2349 free_xid(xid); 2350 2351 return ses; 2352 2353 get_ses_fail: 2354 sesInfoFree(ses); 2355 free_xid(xid); 2356 return ERR_PTR(rc); 2357 } 2358 2359 /* this function must be called with tc_lock held */ 2360 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 2361 { 2362 struct TCP_Server_Info *server = tcon->ses->server; 2363 2364 if (tcon->status == TID_EXITING) 2365 return 0; 2366 2367 if (tcon->origin_fullpath) { 2368 if (!ctx->source || 2369 !dfs_src_pathname_equal(ctx->source, 2370 tcon->origin_fullpath)) 2371 return 0; 2372 } else if (!server->leaf_fullpath && 2373 strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) { 2374 return 0; 2375 } 2376 if (tcon->seal != ctx->seal) 2377 return 0; 2378 if (tcon->snapshot_time != ctx->snapshot_time) 2379 return 0; 2380 if (tcon->handle_timeout != ctx->handle_timeout) 2381 return 0; 2382 if (tcon->no_lease != ctx->no_lease) 2383 return 0; 2384 if (tcon->nodelete != ctx->nodelete) 2385 return 0; 2386 return 1; 2387 } 2388 2389 static struct cifs_tcon * 2390 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2391 { 2392 struct cifs_tcon *tcon; 2393 2394 spin_lock(&cifs_tcp_ses_lock); 2395 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2396 spin_lock(&tcon->tc_lock); 2397 if (!match_tcon(tcon, ctx)) { 2398 spin_unlock(&tcon->tc_lock); 2399 continue; 2400 } 2401 ++tcon->tc_count; 2402 spin_unlock(&tcon->tc_lock); 2403 spin_unlock(&cifs_tcp_ses_lock); 2404 return tcon; 2405 } 2406 spin_unlock(&cifs_tcp_ses_lock); 2407 return NULL; 2408 } 2409 2410 void 2411 cifs_put_tcon(struct cifs_tcon *tcon) 2412 { 2413 unsigned int xid; 2414 struct cifs_ses *ses; 2415 2416 /* 2417 * IPC tcon share the lifetime of their session and are 2418 * destroyed in the session put function 2419 */ 2420 if (tcon == NULL || tcon->ipc) 2421 return; 2422 2423 ses = tcon->ses; 2424 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); 2425 spin_lock(&cifs_tcp_ses_lock); 2426 spin_lock(&tcon->tc_lock); 2427 if (--tcon->tc_count > 0) { 2428 spin_unlock(&tcon->tc_lock); 2429 spin_unlock(&cifs_tcp_ses_lock); 2430 return; 2431 } 2432 2433 /* tc_count can never go negative */ 2434 WARN_ON(tcon->tc_count < 0); 2435 2436 list_del_init(&tcon->tcon_list); 2437 tcon->status = TID_EXITING; 2438 spin_unlock(&tcon->tc_lock); 2439 spin_unlock(&cifs_tcp_ses_lock); 2440 2441 /* cancel polling of interfaces */ 2442 cancel_delayed_work_sync(&tcon->query_interfaces); 2443 #ifdef CONFIG_CIFS_DFS_UPCALL 2444 cancel_delayed_work_sync(&tcon->dfs_cache_work); 2445 #endif 2446 2447 if (tcon->use_witness) { 2448 int rc; 2449 2450 rc = cifs_swn_unregister(tcon); 2451 if (rc < 0) { 2452 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n", 2453 __func__, rc); 2454 } 2455 } 2456 2457 xid = get_xid(); 2458 if (ses->server->ops->tree_disconnect) 2459 ses->server->ops->tree_disconnect(xid, tcon); 2460 _free_xid(xid); 2461 2462 cifs_fscache_release_super_cookie(tcon); 2463 tconInfoFree(tcon); 2464 cifs_put_smb_ses(ses); 2465 } 2466 2467 /** 2468 * cifs_get_tcon - get a tcon matching @ctx data from @ses 2469 * @ses: smb session to issue the request on 2470 * @ctx: the superblock configuration context to use for building the 2471 * 2472 * - tcon refcount is the number of mount points using the tcon. 2473 * - ses refcount is the number of tcon using the session. 2474 * 2475 * 1. This function assumes it is being called from cifs_mount() where 2476 * we already got a session reference (ses refcount +1). 2477 * 2478 * 2. Since we're in the context of adding a mount point, the end 2479 * result should be either: 2480 * 2481 * a) a new tcon already allocated with refcount=1 (1 mount point) and 2482 * its session refcount incremented (1 new tcon). This +1 was 2483 * already done in (1). 2484 * 2485 * b) an existing tcon with refcount+1 (add a mount point to it) and 2486 * identical ses refcount (no new tcon). Because of (1) we need to 2487 * decrement the ses refcount. 2488 */ 2489 static struct cifs_tcon * 2490 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2491 { 2492 struct cifs_tcon *tcon; 2493 bool nohandlecache; 2494 int rc, xid; 2495 2496 tcon = cifs_find_tcon(ses, ctx); 2497 if (tcon) { 2498 /* 2499 * tcon has refcount already incremented but we need to 2500 * decrement extra ses reference gotten by caller (case b) 2501 */ 2502 cifs_dbg(FYI, "Found match on UNC path\n"); 2503 cifs_put_smb_ses(ses); 2504 return tcon; 2505 } 2506 2507 if (!ses->server->ops->tree_connect) { 2508 rc = -ENOSYS; 2509 goto out_fail; 2510 } 2511 2512 if (ses->server->dialect >= SMB20_PROT_ID && 2513 (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)) 2514 nohandlecache = ctx->nohandlecache; 2515 else 2516 nohandlecache = true; 2517 tcon = tcon_info_alloc(!nohandlecache); 2518 if (tcon == NULL) { 2519 rc = -ENOMEM; 2520 goto out_fail; 2521 } 2522 tcon->nohandlecache = nohandlecache; 2523 2524 if (ctx->snapshot_time) { 2525 if (ses->server->vals->protocol_id == 0) { 2526 cifs_dbg(VFS, 2527 "Use SMB2 or later for snapshot mount option\n"); 2528 rc = -EOPNOTSUPP; 2529 goto out_fail; 2530 } else 2531 tcon->snapshot_time = ctx->snapshot_time; 2532 } 2533 2534 if (ctx->handle_timeout) { 2535 if (ses->server->vals->protocol_id == 0) { 2536 cifs_dbg(VFS, 2537 "Use SMB2.1 or later for handle timeout option\n"); 2538 rc = -EOPNOTSUPP; 2539 goto out_fail; 2540 } else 2541 tcon->handle_timeout = ctx->handle_timeout; 2542 } 2543 2544 tcon->ses = ses; 2545 if (ctx->password) { 2546 tcon->password = kstrdup(ctx->password, GFP_KERNEL); 2547 if (!tcon->password) { 2548 rc = -ENOMEM; 2549 goto out_fail; 2550 } 2551 } 2552 2553 if (ctx->seal) { 2554 if (ses->server->vals->protocol_id == 0) { 2555 cifs_dbg(VFS, 2556 "SMB3 or later required for encryption\n"); 2557 rc = -EOPNOTSUPP; 2558 goto out_fail; 2559 } else if (tcon->ses->server->capabilities & 2560 SMB2_GLOBAL_CAP_ENCRYPTION) 2561 tcon->seal = true; 2562 else { 2563 cifs_dbg(VFS, "Encryption is not supported on share\n"); 2564 rc = -EOPNOTSUPP; 2565 goto out_fail; 2566 } 2567 } 2568 2569 if (ctx->linux_ext) { 2570 if (ses->server->posix_ext_supported) { 2571 tcon->posix_extensions = true; 2572 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n"); 2573 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || 2574 (strcmp(ses->server->vals->version_string, 2575 SMB3ANY_VERSION_STRING) == 0) || 2576 (strcmp(ses->server->vals->version_string, 2577 SMBDEFAULT_VERSION_STRING) == 0)) { 2578 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n"); 2579 rc = -EOPNOTSUPP; 2580 goto out_fail; 2581 } else { 2582 cifs_dbg(VFS, "Check vers= mount option. SMB3.11 " 2583 "disabled but required for POSIX extensions\n"); 2584 rc = -EOPNOTSUPP; 2585 goto out_fail; 2586 } 2587 } 2588 2589 xid = get_xid(); 2590 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon, 2591 ctx->local_nls); 2592 free_xid(xid); 2593 cifs_dbg(FYI, "Tcon rc = %d\n", rc); 2594 if (rc) 2595 goto out_fail; 2596 2597 tcon->use_persistent = false; 2598 /* check if SMB2 or later, CIFS does not support persistent handles */ 2599 if (ctx->persistent) { 2600 if (ses->server->vals->protocol_id == 0) { 2601 cifs_dbg(VFS, 2602 "SMB3 or later required for persistent handles\n"); 2603 rc = -EOPNOTSUPP; 2604 goto out_fail; 2605 } else if (ses->server->capabilities & 2606 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2607 tcon->use_persistent = true; 2608 else /* persistent handles requested but not supported */ { 2609 cifs_dbg(VFS, 2610 "Persistent handles not supported on share\n"); 2611 rc = -EOPNOTSUPP; 2612 goto out_fail; 2613 } 2614 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) 2615 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2616 && (ctx->nopersistent == false)) { 2617 cifs_dbg(FYI, "enabling persistent handles\n"); 2618 tcon->use_persistent = true; 2619 } else if (ctx->resilient) { 2620 if (ses->server->vals->protocol_id == 0) { 2621 cifs_dbg(VFS, 2622 "SMB2.1 or later required for resilient handles\n"); 2623 rc = -EOPNOTSUPP; 2624 goto out_fail; 2625 } 2626 tcon->use_resilient = true; 2627 } 2628 2629 tcon->use_witness = false; 2630 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) { 2631 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) { 2632 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) { 2633 /* 2634 * Set witness in use flag in first place 2635 * to retry registration in the echo task 2636 */ 2637 tcon->use_witness = true; 2638 /* And try to register immediately */ 2639 rc = cifs_swn_register(tcon); 2640 if (rc < 0) { 2641 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc); 2642 goto out_fail; 2643 } 2644 } else { 2645 /* TODO: try to extend for non-cluster uses (eg multichannel) */ 2646 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n"); 2647 rc = -EOPNOTSUPP; 2648 goto out_fail; 2649 } 2650 } else { 2651 cifs_dbg(VFS, "SMB3 or later required for witness option\n"); 2652 rc = -EOPNOTSUPP; 2653 goto out_fail; 2654 } 2655 } 2656 2657 /* If the user really knows what they are doing they can override */ 2658 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) { 2659 if (ctx->cache_ro) 2660 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n"); 2661 else if (ctx->cache_rw) 2662 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n"); 2663 } 2664 2665 if (ctx->no_lease) { 2666 if (ses->server->vals->protocol_id == 0) { 2667 cifs_dbg(VFS, 2668 "SMB2 or later required for nolease option\n"); 2669 rc = -EOPNOTSUPP; 2670 goto out_fail; 2671 } else 2672 tcon->no_lease = ctx->no_lease; 2673 } 2674 2675 /* 2676 * We can have only one retry value for a connection to a share so for 2677 * resources mounted more than once to the same server share the last 2678 * value passed in for the retry flag is used. 2679 */ 2680 tcon->retry = ctx->retry; 2681 tcon->nocase = ctx->nocase; 2682 tcon->broken_sparse_sup = ctx->no_sparse; 2683 tcon->max_cached_dirs = ctx->max_cached_dirs; 2684 tcon->nodelete = ctx->nodelete; 2685 tcon->local_lease = ctx->local_lease; 2686 INIT_LIST_HEAD(&tcon->pending_opens); 2687 tcon->status = TID_GOOD; 2688 2689 INIT_DELAYED_WORK(&tcon->query_interfaces, 2690 smb2_query_server_interfaces); 2691 if (ses->server->dialect >= SMB30_PROT_ID && 2692 (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 2693 /* schedule query interfaces poll */ 2694 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 2695 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 2696 } 2697 #ifdef CONFIG_CIFS_DFS_UPCALL 2698 INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); 2699 #endif 2700 spin_lock(&cifs_tcp_ses_lock); 2701 list_add(&tcon->tcon_list, &ses->tcon_list); 2702 spin_unlock(&cifs_tcp_ses_lock); 2703 2704 return tcon; 2705 2706 out_fail: 2707 tconInfoFree(tcon); 2708 return ERR_PTR(rc); 2709 } 2710 2711 void 2712 cifs_put_tlink(struct tcon_link *tlink) 2713 { 2714 if (!tlink || IS_ERR(tlink)) 2715 return; 2716 2717 if (!atomic_dec_and_test(&tlink->tl_count) || 2718 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { 2719 tlink->tl_time = jiffies; 2720 return; 2721 } 2722 2723 if (!IS_ERR(tlink_tcon(tlink))) 2724 cifs_put_tcon(tlink_tcon(tlink)); 2725 kfree(tlink); 2726 return; 2727 } 2728 2729 static int 2730 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) 2731 { 2732 struct cifs_sb_info *old = CIFS_SB(sb); 2733 struct cifs_sb_info *new = mnt_data->cifs_sb; 2734 unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK; 2735 unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK; 2736 2737 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) 2738 return 0; 2739 2740 if (old->mnt_cifs_serverino_autodisabled) 2741 newflags &= ~CIFS_MOUNT_SERVER_INUM; 2742 2743 if (oldflags != newflags) 2744 return 0; 2745 2746 /* 2747 * We want to share sb only if we don't specify an r/wsize or 2748 * specified r/wsize is greater than or equal to existing one. 2749 */ 2750 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize) 2751 return 0; 2752 2753 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize) 2754 return 0; 2755 2756 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) || 2757 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid)) 2758 return 0; 2759 2760 if (old->ctx->file_mode != new->ctx->file_mode || 2761 old->ctx->dir_mode != new->ctx->dir_mode) 2762 return 0; 2763 2764 if (strcmp(old->local_nls->charset, new->local_nls->charset)) 2765 return 0; 2766 2767 if (old->ctx->acregmax != new->ctx->acregmax) 2768 return 0; 2769 if (old->ctx->acdirmax != new->ctx->acdirmax) 2770 return 0; 2771 if (old->ctx->closetimeo != new->ctx->closetimeo) 2772 return 0; 2773 2774 return 1; 2775 } 2776 2777 static int match_prepath(struct super_block *sb, 2778 struct cifs_tcon *tcon, 2779 struct cifs_mnt_data *mnt_data) 2780 { 2781 struct smb3_fs_context *ctx = mnt_data->ctx; 2782 struct cifs_sb_info *old = CIFS_SB(sb); 2783 struct cifs_sb_info *new = mnt_data->cifs_sb; 2784 bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2785 old->prepath; 2786 bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2787 new->prepath; 2788 2789 if (tcon->origin_fullpath && 2790 dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source)) 2791 return 1; 2792 2793 if (old_set && new_set && !strcmp(new->prepath, old->prepath)) 2794 return 1; 2795 else if (!old_set && !new_set) 2796 return 1; 2797 2798 return 0; 2799 } 2800 2801 int 2802 cifs_match_super(struct super_block *sb, void *data) 2803 { 2804 struct cifs_mnt_data *mnt_data = data; 2805 struct smb3_fs_context *ctx; 2806 struct cifs_sb_info *cifs_sb; 2807 struct TCP_Server_Info *tcp_srv; 2808 struct cifs_ses *ses; 2809 struct cifs_tcon *tcon; 2810 struct tcon_link *tlink; 2811 int rc = 0; 2812 2813 spin_lock(&cifs_tcp_ses_lock); 2814 cifs_sb = CIFS_SB(sb); 2815 2816 /* We do not want to use a superblock that has been shutdown */ 2817 if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { 2818 spin_unlock(&cifs_tcp_ses_lock); 2819 return 0; 2820 } 2821 2822 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 2823 if (IS_ERR_OR_NULL(tlink)) { 2824 pr_warn_once("%s: skip super matching due to bad tlink(%p)\n", 2825 __func__, tlink); 2826 spin_unlock(&cifs_tcp_ses_lock); 2827 return 0; 2828 } 2829 tcon = tlink_tcon(tlink); 2830 ses = tcon->ses; 2831 tcp_srv = ses->server; 2832 2833 ctx = mnt_data->ctx; 2834 2835 spin_lock(&tcp_srv->srv_lock); 2836 spin_lock(&ses->ses_lock); 2837 spin_lock(&ses->chan_lock); 2838 spin_lock(&tcon->tc_lock); 2839 if (!match_server(tcp_srv, ctx, true) || 2840 !match_session(ses, ctx) || 2841 !match_tcon(tcon, ctx) || 2842 !match_prepath(sb, tcon, mnt_data)) { 2843 rc = 0; 2844 goto out; 2845 } 2846 2847 rc = compare_mount_options(sb, mnt_data); 2848 out: 2849 spin_unlock(&tcon->tc_lock); 2850 spin_unlock(&ses->chan_lock); 2851 spin_unlock(&ses->ses_lock); 2852 spin_unlock(&tcp_srv->srv_lock); 2853 2854 spin_unlock(&cifs_tcp_ses_lock); 2855 cifs_put_tlink(tlink); 2856 return rc; 2857 } 2858 2859 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2860 static struct lock_class_key cifs_key[2]; 2861 static struct lock_class_key cifs_slock_key[2]; 2862 2863 static inline void 2864 cifs_reclassify_socket4(struct socket *sock) 2865 { 2866 struct sock *sk = sock->sk; 2867 BUG_ON(!sock_allow_reclassification(sk)); 2868 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", 2869 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); 2870 } 2871 2872 static inline void 2873 cifs_reclassify_socket6(struct socket *sock) 2874 { 2875 struct sock *sk = sock->sk; 2876 BUG_ON(!sock_allow_reclassification(sk)); 2877 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", 2878 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); 2879 } 2880 #else 2881 static inline void 2882 cifs_reclassify_socket4(struct socket *sock) 2883 { 2884 } 2885 2886 static inline void 2887 cifs_reclassify_socket6(struct socket *sock) 2888 { 2889 } 2890 #endif 2891 2892 /* See RFC1001 section 14 on representation of Netbios names */ 2893 static void rfc1002mangle(char *target, char *source, unsigned int length) 2894 { 2895 unsigned int i, j; 2896 2897 for (i = 0, j = 0; i < (length); i++) { 2898 /* mask a nibble at a time and encode */ 2899 target[j] = 'A' + (0x0F & (source[i] >> 4)); 2900 target[j+1] = 'A' + (0x0F & source[i]); 2901 j += 2; 2902 } 2903 2904 } 2905 2906 static int 2907 bind_socket(struct TCP_Server_Info *server) 2908 { 2909 int rc = 0; 2910 if (server->srcaddr.ss_family != AF_UNSPEC) { 2911 /* Bind to the specified local IP address */ 2912 struct socket *socket = server->ssocket; 2913 rc = kernel_bind(socket, 2914 (struct sockaddr *) &server->srcaddr, 2915 sizeof(server->srcaddr)); 2916 if (rc < 0) { 2917 struct sockaddr_in *saddr4; 2918 struct sockaddr_in6 *saddr6; 2919 saddr4 = (struct sockaddr_in *)&server->srcaddr; 2920 saddr6 = (struct sockaddr_in6 *)&server->srcaddr; 2921 if (saddr6->sin6_family == AF_INET6) 2922 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", 2923 &saddr6->sin6_addr, rc); 2924 else 2925 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", 2926 &saddr4->sin_addr.s_addr, rc); 2927 } 2928 } 2929 return rc; 2930 } 2931 2932 static int 2933 ip_rfc1001_connect(struct TCP_Server_Info *server) 2934 { 2935 int rc = 0; 2936 /* 2937 * some servers require RFC1001 sessinit before sending 2938 * negprot - BB check reconnection in case where second 2939 * sessinit is sent but no second negprot 2940 */ 2941 struct rfc1002_session_packet req = {}; 2942 struct smb_hdr *smb_buf = (struct smb_hdr *)&req; 2943 unsigned int len; 2944 2945 req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); 2946 2947 if (server->server_RFC1001_name[0] != 0) 2948 rfc1002mangle(req.trailer.session_req.called_name, 2949 server->server_RFC1001_name, 2950 RFC1001_NAME_LEN_WITH_NULL); 2951 else 2952 rfc1002mangle(req.trailer.session_req.called_name, 2953 DEFAULT_CIFS_CALLED_NAME, 2954 RFC1001_NAME_LEN_WITH_NULL); 2955 2956 req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name); 2957 2958 /* calling name ends in null (byte 16) from old smb convention */ 2959 if (server->workstation_RFC1001_name[0] != 0) 2960 rfc1002mangle(req.trailer.session_req.calling_name, 2961 server->workstation_RFC1001_name, 2962 RFC1001_NAME_LEN_WITH_NULL); 2963 else 2964 rfc1002mangle(req.trailer.session_req.calling_name, 2965 "LINUX_CIFS_CLNT", 2966 RFC1001_NAME_LEN_WITH_NULL); 2967 2968 /* 2969 * As per rfc1002, @len must be the number of bytes that follows the 2970 * length field of a rfc1002 session request payload. 2971 */ 2972 len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); 2973 2974 smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); 2975 rc = smb_send(server, smb_buf, len); 2976 /* 2977 * RFC1001 layer in at least one server requires very short break before 2978 * negprot presumably because not expecting negprot to follow so fast. 2979 * This is a simple solution that works without complicating the code 2980 * and causes no significant slowing down on mount for everyone else 2981 */ 2982 usleep_range(1000, 2000); 2983 2984 return rc; 2985 } 2986 2987 static int 2988 generic_ip_connect(struct TCP_Server_Info *server) 2989 { 2990 struct sockaddr *saddr; 2991 struct socket *socket; 2992 int slen, sfamily; 2993 __be16 sport; 2994 int rc = 0; 2995 2996 saddr = (struct sockaddr *) &server->dstaddr; 2997 2998 if (server->dstaddr.ss_family == AF_INET6) { 2999 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr; 3000 3001 sport = ipv6->sin6_port; 3002 slen = sizeof(struct sockaddr_in6); 3003 sfamily = AF_INET6; 3004 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr, 3005 ntohs(sport)); 3006 } else { 3007 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr; 3008 3009 sport = ipv4->sin_port; 3010 slen = sizeof(struct sockaddr_in); 3011 sfamily = AF_INET; 3012 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr, 3013 ntohs(sport)); 3014 } 3015 3016 if (server->ssocket) { 3017 socket = server->ssocket; 3018 } else { 3019 rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, 3020 IPPROTO_TCP, &server->ssocket, 1); 3021 if (rc < 0) { 3022 cifs_server_dbg(VFS, "Error %d creating socket\n", rc); 3023 return rc; 3024 } 3025 3026 /* BB other socket options to set KEEPALIVE, NODELAY? */ 3027 cifs_dbg(FYI, "Socket created\n"); 3028 socket = server->ssocket; 3029 socket->sk->sk_allocation = GFP_NOFS; 3030 socket->sk->sk_use_task_frag = false; 3031 if (sfamily == AF_INET6) 3032 cifs_reclassify_socket6(socket); 3033 else 3034 cifs_reclassify_socket4(socket); 3035 } 3036 3037 rc = bind_socket(server); 3038 if (rc < 0) 3039 return rc; 3040 3041 /* 3042 * Eventually check for other socket options to change from 3043 * the default. sock_setsockopt not used because it expects 3044 * user space buffer 3045 */ 3046 socket->sk->sk_rcvtimeo = 7 * HZ; 3047 socket->sk->sk_sndtimeo = 5 * HZ; 3048 3049 /* make the bufsizes depend on wsize/rsize and max requests */ 3050 if (server->noautotune) { 3051 if (socket->sk->sk_sndbuf < (200 * 1024)) 3052 socket->sk->sk_sndbuf = 200 * 1024; 3053 if (socket->sk->sk_rcvbuf < (140 * 1024)) 3054 socket->sk->sk_rcvbuf = 140 * 1024; 3055 } 3056 3057 if (server->tcp_nodelay) 3058 tcp_sock_set_nodelay(socket->sk); 3059 3060 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", 3061 socket->sk->sk_sndbuf, 3062 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); 3063 3064 rc = kernel_connect(socket, saddr, slen, 3065 server->noblockcnt ? O_NONBLOCK : 0); 3066 /* 3067 * When mounting SMB root file systems, we do not want to block in 3068 * connect. Otherwise bail out and then let cifs_reconnect() perform 3069 * reconnect failover - if possible. 3070 */ 3071 if (server->noblockcnt && rc == -EINPROGRESS) 3072 rc = 0; 3073 if (rc < 0) { 3074 cifs_dbg(FYI, "Error %d connecting to server\n", rc); 3075 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc); 3076 sock_release(socket); 3077 server->ssocket = NULL; 3078 return rc; 3079 } 3080 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr); 3081 if (sport == htons(RFC1001_PORT)) 3082 rc = ip_rfc1001_connect(server); 3083 3084 return rc; 3085 } 3086 3087 static int 3088 ip_connect(struct TCP_Server_Info *server) 3089 { 3090 __be16 *sport; 3091 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 3092 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 3093 3094 if (server->dstaddr.ss_family == AF_INET6) 3095 sport = &addr6->sin6_port; 3096 else 3097 sport = &addr->sin_port; 3098 3099 if (*sport == 0) { 3100 int rc; 3101 3102 /* try with 445 port at first */ 3103 *sport = htons(CIFS_PORT); 3104 3105 rc = generic_ip_connect(server); 3106 if (rc >= 0) 3107 return rc; 3108 3109 /* if it failed, try with 139 port */ 3110 *sport = htons(RFC1001_PORT); 3111 } 3112 3113 return generic_ip_connect(server); 3114 } 3115 3116 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3117 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, 3118 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3119 { 3120 /* 3121 * If we are reconnecting then should we check to see if 3122 * any requested capabilities changed locally e.g. via 3123 * remount but we can not do much about it here 3124 * if they have (even if we could detect it by the following) 3125 * Perhaps we could add a backpointer to array of sb from tcon 3126 * or if we change to make all sb to same share the same 3127 * sb as NFS - then we only have one backpointer to sb. 3128 * What if we wanted to mount the server share twice once with 3129 * and once without posixacls or posix paths? 3130 */ 3131 __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3132 3133 if (ctx && ctx->no_linux_ext) { 3134 tcon->fsUnixInfo.Capability = 0; 3135 tcon->unix_ext = 0; /* Unix Extensions disabled */ 3136 cifs_dbg(FYI, "Linux protocol extensions disabled\n"); 3137 return; 3138 } else if (ctx) 3139 tcon->unix_ext = 1; /* Unix Extensions supported */ 3140 3141 if (!tcon->unix_ext) { 3142 cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); 3143 return; 3144 } 3145 3146 if (!CIFSSMBQFSUnixInfo(xid, tcon)) { 3147 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3148 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); 3149 /* 3150 * check for reconnect case in which we do not 3151 * want to change the mount behavior if we can avoid it 3152 */ 3153 if (ctx == NULL) { 3154 /* 3155 * turn off POSIX ACL and PATHNAMES if not set 3156 * originally at mount time 3157 */ 3158 if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) 3159 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3160 if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3161 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3162 cifs_dbg(VFS, "POSIXPATH support change\n"); 3163 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3164 } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3165 cifs_dbg(VFS, "possible reconnect error\n"); 3166 cifs_dbg(VFS, "server disabled POSIX path support\n"); 3167 } 3168 } 3169 3170 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3171 cifs_dbg(VFS, "per-share encryption not supported yet\n"); 3172 3173 cap &= CIFS_UNIX_CAP_MASK; 3174 if (ctx && ctx->no_psx_acl) 3175 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3176 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 3177 cifs_dbg(FYI, "negotiated posix acl support\n"); 3178 if (cifs_sb) 3179 cifs_sb->mnt_cifs_flags |= 3180 CIFS_MOUNT_POSIXACL; 3181 } 3182 3183 if (ctx && ctx->posix_paths == 0) 3184 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3185 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 3186 cifs_dbg(FYI, "negotiate posix pathnames\n"); 3187 if (cifs_sb) 3188 cifs_sb->mnt_cifs_flags |= 3189 CIFS_MOUNT_POSIX_PATHS; 3190 } 3191 3192 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); 3193 #ifdef CONFIG_CIFS_DEBUG2 3194 if (cap & CIFS_UNIX_FCNTL_CAP) 3195 cifs_dbg(FYI, "FCNTL cap\n"); 3196 if (cap & CIFS_UNIX_EXTATTR_CAP) 3197 cifs_dbg(FYI, "EXTATTR cap\n"); 3198 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3199 cifs_dbg(FYI, "POSIX path cap\n"); 3200 if (cap & CIFS_UNIX_XATTR_CAP) 3201 cifs_dbg(FYI, "XATTR cap\n"); 3202 if (cap & CIFS_UNIX_POSIX_ACL_CAP) 3203 cifs_dbg(FYI, "POSIX ACL cap\n"); 3204 if (cap & CIFS_UNIX_LARGE_READ_CAP) 3205 cifs_dbg(FYI, "very large read cap\n"); 3206 if (cap & CIFS_UNIX_LARGE_WRITE_CAP) 3207 cifs_dbg(FYI, "very large write cap\n"); 3208 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) 3209 cifs_dbg(FYI, "transport encryption cap\n"); 3210 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3211 cifs_dbg(FYI, "mandatory transport encryption cap\n"); 3212 #endif /* CIFS_DEBUG2 */ 3213 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 3214 if (ctx == NULL) 3215 cifs_dbg(FYI, "resetting capabilities failed\n"); 3216 else 3217 cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); 3218 3219 } 3220 } 3221 } 3222 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3223 3224 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) 3225 { 3226 struct smb3_fs_context *ctx = cifs_sb->ctx; 3227 3228 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 3229 3230 spin_lock_init(&cifs_sb->tlink_tree_lock); 3231 cifs_sb->tlink_tree = RB_ROOT; 3232 3233 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", 3234 ctx->file_mode, ctx->dir_mode); 3235 3236 /* this is needed for ASCII cp to Unicode converts */ 3237 if (ctx->iocharset == NULL) { 3238 /* load_nls_default cannot return null */ 3239 cifs_sb->local_nls = load_nls_default(); 3240 } else { 3241 cifs_sb->local_nls = load_nls(ctx->iocharset); 3242 if (cifs_sb->local_nls == NULL) { 3243 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", 3244 ctx->iocharset); 3245 return -ELIBACC; 3246 } 3247 } 3248 ctx->local_nls = cifs_sb->local_nls; 3249 3250 smb3_update_mnt_flags(cifs_sb); 3251 3252 if (ctx->direct_io) 3253 cifs_dbg(FYI, "mounting share using direct i/o\n"); 3254 if (ctx->cache_ro) { 3255 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n"); 3256 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE; 3257 } else if (ctx->cache_rw) { 3258 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n"); 3259 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE | 3260 CIFS_MOUNT_RW_CACHE); 3261 } 3262 3263 if ((ctx->cifs_acl) && (ctx->dynperm)) 3264 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); 3265 3266 if (ctx->prepath) { 3267 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL); 3268 if (cifs_sb->prepath == NULL) 3269 return -ENOMEM; 3270 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3271 } 3272 3273 return 0; 3274 } 3275 3276 /* Release all succeed connections */ 3277 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) 3278 { 3279 int rc = 0; 3280 3281 if (mnt_ctx->tcon) 3282 cifs_put_tcon(mnt_ctx->tcon); 3283 else if (mnt_ctx->ses) 3284 cifs_put_smb_ses(mnt_ctx->ses); 3285 else if (mnt_ctx->server) 3286 cifs_put_tcp_session(mnt_ctx->server, 0); 3287 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; 3288 free_xid(mnt_ctx->xid); 3289 } 3290 3291 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx) 3292 { 3293 struct TCP_Server_Info *server = NULL; 3294 struct smb3_fs_context *ctx; 3295 struct cifs_ses *ses = NULL; 3296 unsigned int xid; 3297 int rc = 0; 3298 3299 xid = get_xid(); 3300 3301 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) { 3302 rc = -EINVAL; 3303 goto out; 3304 } 3305 ctx = mnt_ctx->fs_ctx; 3306 3307 /* get a reference to a tcp session */ 3308 server = cifs_get_tcp_session(ctx, NULL); 3309 if (IS_ERR(server)) { 3310 rc = PTR_ERR(server); 3311 server = NULL; 3312 goto out; 3313 } 3314 3315 /* get a reference to a SMB session */ 3316 ses = cifs_get_smb_ses(server, ctx); 3317 if (IS_ERR(ses)) { 3318 rc = PTR_ERR(ses); 3319 ses = NULL; 3320 goto out; 3321 } 3322 3323 if ((ctx->persistent == true) && (!(ses->server->capabilities & 3324 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) { 3325 cifs_server_dbg(VFS, "persistent handles not supported by server\n"); 3326 rc = -EOPNOTSUPP; 3327 } 3328 3329 out: 3330 mnt_ctx->xid = xid; 3331 mnt_ctx->server = server; 3332 mnt_ctx->ses = ses; 3333 mnt_ctx->tcon = NULL; 3334 3335 return rc; 3336 } 3337 3338 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) 3339 { 3340 struct TCP_Server_Info *server; 3341 struct cifs_sb_info *cifs_sb; 3342 struct smb3_fs_context *ctx; 3343 struct cifs_tcon *tcon = NULL; 3344 int rc = 0; 3345 3346 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx || 3347 !mnt_ctx->cifs_sb)) { 3348 rc = -EINVAL; 3349 goto out; 3350 } 3351 server = mnt_ctx->server; 3352 ctx = mnt_ctx->fs_ctx; 3353 cifs_sb = mnt_ctx->cifs_sb; 3354 3355 /* search for existing tcon to this server share */ 3356 tcon = cifs_get_tcon(mnt_ctx->ses, ctx); 3357 if (IS_ERR(tcon)) { 3358 rc = PTR_ERR(tcon); 3359 tcon = NULL; 3360 goto out; 3361 } 3362 3363 /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ 3364 if (tcon->posix_extensions) 3365 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 3366 3367 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3368 /* tell server which Unix caps we support */ 3369 if (cap_unix(tcon->ses)) { 3370 /* 3371 * reset of caps checks mount to see if unix extensions disabled 3372 * for just this mount. 3373 */ 3374 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx); 3375 spin_lock(&tcon->ses->server->srv_lock); 3376 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3377 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3378 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3379 spin_unlock(&tcon->ses->server->srv_lock); 3380 rc = -EACCES; 3381 goto out; 3382 } 3383 spin_unlock(&tcon->ses->server->srv_lock); 3384 } else 3385 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3386 tcon->unix_ext = 0; /* server does not support them */ 3387 3388 /* do not care if a following call succeed - informational */ 3389 if (!tcon->pipe && server->ops->qfs_tcon) { 3390 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb); 3391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) { 3392 if (tcon->fsDevInfo.DeviceCharacteristics & 3393 cpu_to_le32(FILE_READ_ONLY_DEVICE)) 3394 cifs_dbg(VFS, "mounted to read only share\n"); 3395 else if ((cifs_sb->mnt_cifs_flags & 3396 CIFS_MOUNT_RW_CACHE) == 0) 3397 cifs_dbg(VFS, "read only mount of RW share\n"); 3398 /* no need to log a RW mount of a typical RW share */ 3399 } 3400 } 3401 3402 /* 3403 * Clamp the rsize/wsize mount arguments if they are too big for the server 3404 * and set the rsize/wsize to the negotiated values if not passed in by 3405 * the user on mount 3406 */ 3407 if ((cifs_sb->ctx->wsize == 0) || 3408 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) 3409 cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx); 3410 if ((cifs_sb->ctx->rsize == 0) || 3411 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) 3412 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); 3413 3414 /* 3415 * The cookie is initialized from volume info returned above. 3416 * Inside cifs_fscache_get_super_cookie it checks 3417 * that we do not get super cookie twice. 3418 */ 3419 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 3420 cifs_fscache_get_super_cookie(tcon); 3421 3422 out: 3423 mnt_ctx->tcon = tcon; 3424 return rc; 3425 } 3426 3427 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, 3428 struct cifs_tcon *tcon) 3429 { 3430 struct tcon_link *tlink; 3431 3432 /* hang the tcon off of the superblock */ 3433 tlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3434 if (tlink == NULL) 3435 return -ENOMEM; 3436 3437 tlink->tl_uid = ses->linux_uid; 3438 tlink->tl_tcon = tcon; 3439 tlink->tl_time = jiffies; 3440 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 3441 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3442 3443 cifs_sb->master_tlink = tlink; 3444 spin_lock(&cifs_sb->tlink_tree_lock); 3445 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 3446 spin_unlock(&cifs_sb->tlink_tree_lock); 3447 3448 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 3449 TLINK_IDLE_EXPIRE); 3450 return 0; 3451 } 3452 3453 static int 3454 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, 3455 unsigned int xid, 3456 struct cifs_tcon *tcon, 3457 struct cifs_sb_info *cifs_sb, 3458 char *full_path, 3459 int added_treename) 3460 { 3461 int rc; 3462 char *s; 3463 char sep, tmp; 3464 int skip = added_treename ? 1 : 0; 3465 3466 sep = CIFS_DIR_SEP(cifs_sb); 3467 s = full_path; 3468 3469 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); 3470 while (rc == 0) { 3471 /* skip separators */ 3472 while (*s == sep) 3473 s++; 3474 if (!*s) 3475 break; 3476 /* next separator */ 3477 while (*s && *s != sep) 3478 s++; 3479 /* 3480 * if the treename is added, we then have to skip the first 3481 * part within the separators 3482 */ 3483 if (skip) { 3484 skip = 0; 3485 continue; 3486 } 3487 /* 3488 * temporarily null-terminate the path at the end of 3489 * the current component 3490 */ 3491 tmp = *s; 3492 *s = 0; 3493 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3494 full_path); 3495 *s = tmp; 3496 } 3497 return rc; 3498 } 3499 3500 /* 3501 * Check if path is remote (i.e. a DFS share). 3502 * 3503 * Return -EREMOTE if it is, otherwise 0 or -errno. 3504 */ 3505 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx) 3506 { 3507 int rc; 3508 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; 3509 struct TCP_Server_Info *server = mnt_ctx->server; 3510 unsigned int xid = mnt_ctx->xid; 3511 struct cifs_tcon *tcon = mnt_ctx->tcon; 3512 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 3513 char *full_path; 3514 3515 if (!server->ops->is_path_accessible) 3516 return -EOPNOTSUPP; 3517 3518 /* 3519 * cifs_build_path_to_root works only when we have a valid tcon 3520 */ 3521 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon, 3522 tcon->Flags & SMB_SHARE_IS_IN_DFS); 3523 if (full_path == NULL) 3524 return -ENOMEM; 3525 3526 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); 3527 3528 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3529 full_path); 3530 if (rc != 0 && rc != -EREMOTE) 3531 goto out; 3532 3533 if (rc != -EREMOTE) { 3534 rc = cifs_are_all_path_components_accessible(server, xid, tcon, 3535 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS); 3536 if (rc != 0) { 3537 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); 3538 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3539 rc = 0; 3540 } 3541 } 3542 3543 out: 3544 kfree(full_path); 3545 return rc; 3546 } 3547 3548 #ifdef CONFIG_CIFS_DFS_UPCALL 3549 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3550 { 3551 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3552 bool isdfs; 3553 int rc; 3554 3555 INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); 3556 3557 rc = dfs_mount_share(&mnt_ctx, &isdfs); 3558 if (rc) 3559 goto error; 3560 if (!isdfs) 3561 goto out; 3562 3563 /* 3564 * After reconnecting to a different server, unique ids won't match anymore, so we disable 3565 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). 3566 */ 3567 cifs_autodisable_serverino(cifs_sb); 3568 /* 3569 * Force the use of prefix path to support failover on DFS paths that resolve to targets 3570 * that have different prefix paths. 3571 */ 3572 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3573 kfree(cifs_sb->prepath); 3574 cifs_sb->prepath = ctx->prepath; 3575 ctx->prepath = NULL; 3576 3577 out: 3578 cifs_try_adding_channels(mnt_ctx.ses); 3579 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3580 if (rc) 3581 goto error; 3582 3583 free_xid(mnt_ctx.xid); 3584 return rc; 3585 3586 error: 3587 dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); 3588 cifs_mount_put_conns(&mnt_ctx); 3589 return rc; 3590 } 3591 #else 3592 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3593 { 3594 int rc = 0; 3595 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3596 3597 rc = cifs_mount_get_session(&mnt_ctx); 3598 if (rc) 3599 goto error; 3600 3601 rc = cifs_mount_get_tcon(&mnt_ctx); 3602 if (rc) 3603 goto error; 3604 3605 rc = cifs_is_path_remote(&mnt_ctx); 3606 if (rc == -EREMOTE) 3607 rc = -EOPNOTSUPP; 3608 if (rc) 3609 goto error; 3610 3611 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3612 if (rc) 3613 goto error; 3614 3615 free_xid(mnt_ctx.xid); 3616 return rc; 3617 3618 error: 3619 cifs_mount_put_conns(&mnt_ctx); 3620 return rc; 3621 } 3622 #endif 3623 3624 /* 3625 * Issue a TREE_CONNECT request. 3626 */ 3627 int 3628 CIFSTCon(const unsigned int xid, struct cifs_ses *ses, 3629 const char *tree, struct cifs_tcon *tcon, 3630 const struct nls_table *nls_codepage) 3631 { 3632 struct smb_hdr *smb_buffer; 3633 struct smb_hdr *smb_buffer_response; 3634 TCONX_REQ *pSMB; 3635 TCONX_RSP *pSMBr; 3636 unsigned char *bcc_ptr; 3637 int rc = 0; 3638 int length; 3639 __u16 bytes_left, count; 3640 3641 if (ses == NULL) 3642 return -EIO; 3643 3644 smb_buffer = cifs_buf_get(); 3645 if (smb_buffer == NULL) 3646 return -ENOMEM; 3647 3648 smb_buffer_response = smb_buffer; 3649 3650 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, 3651 NULL /*no tid */ , 4 /*wct */ ); 3652 3653 smb_buffer->Mid = get_next_mid(ses->server); 3654 smb_buffer->Uid = ses->Suid; 3655 pSMB = (TCONX_REQ *) smb_buffer; 3656 pSMBr = (TCONX_RSP *) smb_buffer_response; 3657 3658 pSMB->AndXCommand = 0xFF; 3659 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3660 bcc_ptr = &pSMB->Password[0]; 3661 3662 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3663 *bcc_ptr = 0; /* password is null byte */ 3664 bcc_ptr++; /* skip password */ 3665 /* already aligned so no need to do it below */ 3666 3667 if (ses->server->sign) 3668 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3669 3670 if (ses->capabilities & CAP_STATUS32) { 3671 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; 3672 } 3673 if (ses->capabilities & CAP_DFS) { 3674 smb_buffer->Flags2 |= SMBFLG2_DFS; 3675 } 3676 if (ses->capabilities & CAP_UNICODE) { 3677 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3678 length = 3679 cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 3680 6 /* max utf8 char length in bytes */ * 3681 (/* server len*/ + 256 /* share len */), nls_codepage); 3682 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3683 bcc_ptr += 2; /* skip trailing null */ 3684 } else { /* ASCII */ 3685 strcpy(bcc_ptr, tree); 3686 bcc_ptr += strlen(tree) + 1; 3687 } 3688 strcpy(bcc_ptr, "?????"); 3689 bcc_ptr += strlen("?????"); 3690 bcc_ptr += 1; 3691 count = bcc_ptr - &pSMB->Password[0]; 3692 be32_add_cpu(&pSMB->hdr.smb_buf_length, count); 3693 pSMB->ByteCount = cpu_to_le16(count); 3694 3695 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 3696 0); 3697 3698 /* above now done in SendReceive */ 3699 if (rc == 0) { 3700 bool is_unicode; 3701 3702 tcon->tid = smb_buffer_response->Tid; 3703 bcc_ptr = pByteArea(smb_buffer_response); 3704 bytes_left = get_bcc(smb_buffer_response); 3705 length = strnlen(bcc_ptr, bytes_left - 2); 3706 if (smb_buffer->Flags2 & SMBFLG2_UNICODE) 3707 is_unicode = true; 3708 else 3709 is_unicode = false; 3710 3711 3712 /* skip service field (NB: this field is always ASCII) */ 3713 if (length == 3) { 3714 if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && 3715 (bcc_ptr[2] == 'C')) { 3716 cifs_dbg(FYI, "IPC connection\n"); 3717 tcon->ipc = true; 3718 tcon->pipe = true; 3719 } 3720 } else if (length == 2) { 3721 if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { 3722 /* the most common case */ 3723 cifs_dbg(FYI, "disk share connection\n"); 3724 } 3725 } 3726 bcc_ptr += length + 1; 3727 bytes_left -= (length + 1); 3728 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 3729 3730 /* mostly informational -- no need to fail on error here */ 3731 kfree(tcon->nativeFileSystem); 3732 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, 3733 bytes_left, is_unicode, 3734 nls_codepage); 3735 3736 cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); 3737 3738 if ((smb_buffer_response->WordCount == 3) || 3739 (smb_buffer_response->WordCount == 7)) 3740 /* field is in same location */ 3741 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); 3742 else 3743 tcon->Flags = 0; 3744 cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); 3745 } 3746 3747 cifs_buf_release(smb_buffer); 3748 return rc; 3749 } 3750 3751 static void delayed_free(struct rcu_head *p) 3752 { 3753 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu); 3754 3755 unload_nls(cifs_sb->local_nls); 3756 smb3_cleanup_fs_context(cifs_sb->ctx); 3757 kfree(cifs_sb); 3758 } 3759 3760 void 3761 cifs_umount(struct cifs_sb_info *cifs_sb) 3762 { 3763 struct rb_root *root = &cifs_sb->tlink_tree; 3764 struct rb_node *node; 3765 struct tcon_link *tlink; 3766 3767 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3768 3769 spin_lock(&cifs_sb->tlink_tree_lock); 3770 while ((node = rb_first(root))) { 3771 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 3772 cifs_get_tlink(tlink); 3773 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3774 rb_erase(node, root); 3775 3776 spin_unlock(&cifs_sb->tlink_tree_lock); 3777 cifs_put_tlink(tlink); 3778 spin_lock(&cifs_sb->tlink_tree_lock); 3779 } 3780 spin_unlock(&cifs_sb->tlink_tree_lock); 3781 3782 kfree(cifs_sb->prepath); 3783 call_rcu(&cifs_sb->rcu, delayed_free); 3784 } 3785 3786 int 3787 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, 3788 struct TCP_Server_Info *server) 3789 { 3790 int rc = 0; 3791 3792 if (!server->ops->need_neg || !server->ops->negotiate) 3793 return -ENOSYS; 3794 3795 /* only send once per connect */ 3796 spin_lock(&server->srv_lock); 3797 if (server->tcpStatus != CifsGood && 3798 server->tcpStatus != CifsNew && 3799 server->tcpStatus != CifsNeedNegotiate) { 3800 spin_unlock(&server->srv_lock); 3801 return -EHOSTDOWN; 3802 } 3803 3804 if (!server->ops->need_neg(server) && 3805 server->tcpStatus == CifsGood) { 3806 spin_unlock(&server->srv_lock); 3807 return 0; 3808 } 3809 3810 server->tcpStatus = CifsInNegotiate; 3811 spin_unlock(&server->srv_lock); 3812 3813 rc = server->ops->negotiate(xid, ses, server); 3814 if (rc == 0) { 3815 spin_lock(&server->srv_lock); 3816 if (server->tcpStatus == CifsInNegotiate) 3817 server->tcpStatus = CifsGood; 3818 else 3819 rc = -EHOSTDOWN; 3820 spin_unlock(&server->srv_lock); 3821 } else { 3822 spin_lock(&server->srv_lock); 3823 if (server->tcpStatus == CifsInNegotiate) 3824 server->tcpStatus = CifsNeedNegotiate; 3825 spin_unlock(&server->srv_lock); 3826 } 3827 3828 return rc; 3829 } 3830 3831 int 3832 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, 3833 struct TCP_Server_Info *server, 3834 struct nls_table *nls_info) 3835 { 3836 int rc = -ENOSYS; 3837 struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 3838 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr; 3839 struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr; 3840 bool is_binding = false; 3841 3842 spin_lock(&ses->ses_lock); 3843 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 3844 __func__, ses->chans_need_reconnect); 3845 3846 if (ses->ses_status != SES_GOOD && 3847 ses->ses_status != SES_NEW && 3848 ses->ses_status != SES_NEED_RECON) { 3849 spin_unlock(&ses->ses_lock); 3850 return -EHOSTDOWN; 3851 } 3852 3853 /* only send once per connect */ 3854 spin_lock(&ses->chan_lock); 3855 if (CIFS_ALL_CHANS_GOOD(ses)) { 3856 if (ses->ses_status == SES_NEED_RECON) 3857 ses->ses_status = SES_GOOD; 3858 spin_unlock(&ses->chan_lock); 3859 spin_unlock(&ses->ses_lock); 3860 return 0; 3861 } 3862 3863 cifs_chan_set_in_reconnect(ses, server); 3864 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); 3865 spin_unlock(&ses->chan_lock); 3866 3867 if (!is_binding) { 3868 ses->ses_status = SES_IN_SETUP; 3869 3870 /* force iface_list refresh */ 3871 ses->iface_last_update = 0; 3872 } 3873 spin_unlock(&ses->ses_lock); 3874 3875 /* update ses ip_addr only for primary chan */ 3876 if (server == pserver) { 3877 if (server->dstaddr.ss_family == AF_INET6) 3878 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); 3879 else 3880 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr); 3881 } 3882 3883 if (!is_binding) { 3884 ses->capabilities = server->capabilities; 3885 if (!linuxExtEnabled) 3886 ses->capabilities &= (~server->vals->cap_unix); 3887 3888 if (ses->auth_key.response) { 3889 cifs_dbg(FYI, "Free previous auth_key.response = %p\n", 3890 ses->auth_key.response); 3891 kfree_sensitive(ses->auth_key.response); 3892 ses->auth_key.response = NULL; 3893 ses->auth_key.len = 0; 3894 } 3895 } 3896 3897 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", 3898 server->sec_mode, server->capabilities, server->timeAdj); 3899 3900 if (server->ops->sess_setup) 3901 rc = server->ops->sess_setup(xid, ses, server, nls_info); 3902 3903 if (rc) { 3904 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); 3905 spin_lock(&ses->ses_lock); 3906 if (ses->ses_status == SES_IN_SETUP) 3907 ses->ses_status = SES_NEED_RECON; 3908 spin_lock(&ses->chan_lock); 3909 cifs_chan_clear_in_reconnect(ses, server); 3910 spin_unlock(&ses->chan_lock); 3911 spin_unlock(&ses->ses_lock); 3912 } else { 3913 spin_lock(&ses->ses_lock); 3914 if (ses->ses_status == SES_IN_SETUP) 3915 ses->ses_status = SES_GOOD; 3916 spin_lock(&ses->chan_lock); 3917 cifs_chan_clear_in_reconnect(ses, server); 3918 cifs_chan_clear_need_reconnect(ses, server); 3919 spin_unlock(&ses->chan_lock); 3920 spin_unlock(&ses->ses_lock); 3921 } 3922 3923 return rc; 3924 } 3925 3926 static int 3927 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses) 3928 { 3929 ctx->sectype = ses->sectype; 3930 3931 /* krb5 is special, since we don't need username or pw */ 3932 if (ctx->sectype == Kerberos) 3933 return 0; 3934 3935 return cifs_set_cifscreds(ctx, ses); 3936 } 3937 3938 static struct cifs_tcon * 3939 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) 3940 { 3941 int rc; 3942 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); 3943 struct cifs_ses *ses; 3944 struct cifs_tcon *tcon = NULL; 3945 struct smb3_fs_context *ctx; 3946 3947 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3948 if (ctx == NULL) 3949 return ERR_PTR(-ENOMEM); 3950 3951 ctx->local_nls = cifs_sb->local_nls; 3952 ctx->linux_uid = fsuid; 3953 ctx->cred_uid = fsuid; 3954 ctx->UNC = master_tcon->tree_name; 3955 ctx->retry = master_tcon->retry; 3956 ctx->nocase = master_tcon->nocase; 3957 ctx->nohandlecache = master_tcon->nohandlecache; 3958 ctx->local_lease = master_tcon->local_lease; 3959 ctx->no_lease = master_tcon->no_lease; 3960 ctx->resilient = master_tcon->use_resilient; 3961 ctx->persistent = master_tcon->use_persistent; 3962 ctx->handle_timeout = master_tcon->handle_timeout; 3963 ctx->no_linux_ext = !master_tcon->unix_ext; 3964 ctx->linux_ext = master_tcon->posix_extensions; 3965 ctx->sectype = master_tcon->ses->sectype; 3966 ctx->sign = master_tcon->ses->sign; 3967 ctx->seal = master_tcon->seal; 3968 ctx->witness = master_tcon->use_witness; 3969 3970 rc = cifs_set_vol_auth(ctx, master_tcon->ses); 3971 if (rc) { 3972 tcon = ERR_PTR(rc); 3973 goto out; 3974 } 3975 3976 /* get a reference for the same TCP session */ 3977 spin_lock(&cifs_tcp_ses_lock); 3978 ++master_tcon->ses->server->srv_count; 3979 spin_unlock(&cifs_tcp_ses_lock); 3980 3981 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx); 3982 if (IS_ERR(ses)) { 3983 tcon = (struct cifs_tcon *)ses; 3984 cifs_put_tcp_session(master_tcon->ses->server, 0); 3985 goto out; 3986 } 3987 3988 tcon = cifs_get_tcon(ses, ctx); 3989 if (IS_ERR(tcon)) { 3990 cifs_put_smb_ses(ses); 3991 goto out; 3992 } 3993 3994 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3995 if (cap_unix(ses)) 3996 reset_cifs_unix_caps(0, tcon, NULL, ctx); 3997 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3998 3999 out: 4000 kfree(ctx->username); 4001 kfree_sensitive(ctx->password); 4002 kfree(ctx); 4003 4004 return tcon; 4005 } 4006 4007 struct cifs_tcon * 4008 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 4009 { 4010 return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); 4011 } 4012 4013 /* find and return a tlink with given uid */ 4014 static struct tcon_link * 4015 tlink_rb_search(struct rb_root *root, kuid_t uid) 4016 { 4017 struct rb_node *node = root->rb_node; 4018 struct tcon_link *tlink; 4019 4020 while (node) { 4021 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 4022 4023 if (uid_gt(tlink->tl_uid, uid)) 4024 node = node->rb_left; 4025 else if (uid_lt(tlink->tl_uid, uid)) 4026 node = node->rb_right; 4027 else 4028 return tlink; 4029 } 4030 return NULL; 4031 } 4032 4033 /* insert a tcon_link into the tree */ 4034 static void 4035 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) 4036 { 4037 struct rb_node **new = &(root->rb_node), *parent = NULL; 4038 struct tcon_link *tlink; 4039 4040 while (*new) { 4041 tlink = rb_entry(*new, struct tcon_link, tl_rbnode); 4042 parent = *new; 4043 4044 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) 4045 new = &((*new)->rb_left); 4046 else 4047 new = &((*new)->rb_right); 4048 } 4049 4050 rb_link_node(&new_tlink->tl_rbnode, parent, new); 4051 rb_insert_color(&new_tlink->tl_rbnode, root); 4052 } 4053 4054 /* 4055 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 4056 * current task. 4057 * 4058 * If the superblock doesn't refer to a multiuser mount, then just return 4059 * the master tcon for the mount. 4060 * 4061 * First, search the rbtree for an existing tcon for this fsuid. If one 4062 * exists, then check to see if it's pending construction. If it is then wait 4063 * for construction to complete. Once it's no longer pending, check to see if 4064 * it failed and either return an error or retry construction, depending on 4065 * the timeout. 4066 * 4067 * If one doesn't exist then insert a new tcon_link struct into the tree and 4068 * try to construct a new one. 4069 */ 4070 struct tcon_link * 4071 cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 4072 { 4073 int ret; 4074 kuid_t fsuid = current_fsuid(); 4075 struct tcon_link *tlink, *newtlink; 4076 4077 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 4078 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 4079 4080 spin_lock(&cifs_sb->tlink_tree_lock); 4081 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4082 if (tlink) 4083 cifs_get_tlink(tlink); 4084 spin_unlock(&cifs_sb->tlink_tree_lock); 4085 4086 if (tlink == NULL) { 4087 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 4088 if (newtlink == NULL) 4089 return ERR_PTR(-ENOMEM); 4090 newtlink->tl_uid = fsuid; 4091 newtlink->tl_tcon = ERR_PTR(-EACCES); 4092 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 4093 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 4094 cifs_get_tlink(newtlink); 4095 4096 spin_lock(&cifs_sb->tlink_tree_lock); 4097 /* was one inserted after previous search? */ 4098 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4099 if (tlink) { 4100 cifs_get_tlink(tlink); 4101 spin_unlock(&cifs_sb->tlink_tree_lock); 4102 kfree(newtlink); 4103 goto wait_for_construction; 4104 } 4105 tlink = newtlink; 4106 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 4107 spin_unlock(&cifs_sb->tlink_tree_lock); 4108 } else { 4109 wait_for_construction: 4110 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, 4111 TASK_INTERRUPTIBLE); 4112 if (ret) { 4113 cifs_put_tlink(tlink); 4114 return ERR_PTR(-ERESTARTSYS); 4115 } 4116 4117 /* if it's good, return it */ 4118 if (!IS_ERR(tlink->tl_tcon)) 4119 return tlink; 4120 4121 /* return error if we tried this already recently */ 4122 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { 4123 cifs_put_tlink(tlink); 4124 return ERR_PTR(-EACCES); 4125 } 4126 4127 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) 4128 goto wait_for_construction; 4129 } 4130 4131 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); 4132 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); 4133 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); 4134 4135 if (IS_ERR(tlink->tl_tcon)) { 4136 cifs_put_tlink(tlink); 4137 return ERR_PTR(-EACCES); 4138 } 4139 4140 return tlink; 4141 } 4142 4143 /* 4144 * periodic workqueue job that scans tcon_tree for a superblock and closes 4145 * out tcons. 4146 */ 4147 static void 4148 cifs_prune_tlinks(struct work_struct *work) 4149 { 4150 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 4151 prune_tlinks.work); 4152 struct rb_root *root = &cifs_sb->tlink_tree; 4153 struct rb_node *node; 4154 struct rb_node *tmp; 4155 struct tcon_link *tlink; 4156 4157 /* 4158 * Because we drop the spinlock in the loop in order to put the tlink 4159 * it's not guarded against removal of links from the tree. The only 4160 * places that remove entries from the tree are this function and 4161 * umounts. Because this function is non-reentrant and is canceled 4162 * before umount can proceed, this is safe. 4163 */ 4164 spin_lock(&cifs_sb->tlink_tree_lock); 4165 node = rb_first(root); 4166 while (node != NULL) { 4167 tmp = node; 4168 node = rb_next(tmp); 4169 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); 4170 4171 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || 4172 atomic_read(&tlink->tl_count) != 0 || 4173 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) 4174 continue; 4175 4176 cifs_get_tlink(tlink); 4177 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 4178 rb_erase(tmp, root); 4179 4180 spin_unlock(&cifs_sb->tlink_tree_lock); 4181 cifs_put_tlink(tlink); 4182 spin_lock(&cifs_sb->tlink_tree_lock); 4183 } 4184 spin_unlock(&cifs_sb->tlink_tree_lock); 4185 4186 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 4187 TLINK_IDLE_EXPIRE); 4188 } 4189 4190 #ifndef CONFIG_CIFS_DFS_UPCALL 4191 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) 4192 { 4193 int rc; 4194 const struct smb_version_operations *ops = tcon->ses->server->ops; 4195 4196 /* only send once per connect */ 4197 spin_lock(&tcon->tc_lock); 4198 if (tcon->status == TID_GOOD) { 4199 spin_unlock(&tcon->tc_lock); 4200 return 0; 4201 } 4202 4203 if (tcon->status != TID_NEW && 4204 tcon->status != TID_NEED_TCON) { 4205 spin_unlock(&tcon->tc_lock); 4206 return -EHOSTDOWN; 4207 } 4208 4209 tcon->status = TID_IN_TCON; 4210 spin_unlock(&tcon->tc_lock); 4211 4212 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc); 4213 if (rc) { 4214 spin_lock(&tcon->tc_lock); 4215 if (tcon->status == TID_IN_TCON) 4216 tcon->status = TID_NEED_TCON; 4217 spin_unlock(&tcon->tc_lock); 4218 } else { 4219 spin_lock(&tcon->tc_lock); 4220 if (tcon->status == TID_IN_TCON) 4221 tcon->status = TID_GOOD; 4222 tcon->need_reconnect = false; 4223 spin_unlock(&tcon->tc_lock); 4224 } 4225 4226 return rc; 4227 } 4228 #endif 4229