1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2011 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 */ 8 #include <linux/fs.h> 9 #include <linux/net.h> 10 #include <linux/string.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/signal.h> 13 #include <linux/list.h> 14 #include <linux/wait.h> 15 #include <linux/slab.h> 16 #include <linux/pagemap.h> 17 #include <linux/ctype.h> 18 #include <linux/utsname.h> 19 #include <linux/mempool.h> 20 #include <linux/delay.h> 21 #include <linux/completion.h> 22 #include <linux/kthread.h> 23 #include <linux/pagevec.h> 24 #include <linux/freezer.h> 25 #include <linux/namei.h> 26 #include <linux/uuid.h> 27 #include <linux/uaccess.h> 28 #include <asm/processor.h> 29 #include <linux/inet.h> 30 #include <linux/module.h> 31 #include <keys/user-type.h> 32 #include <net/ipv6.h> 33 #include <linux/parser.h> 34 #include <linux/bvec.h> 35 #include "cifspdu.h" 36 #include "cifsglob.h" 37 #include "cifsproto.h" 38 #include "cifs_unicode.h" 39 #include "cifs_debug.h" 40 #include "cifs_fs_sb.h" 41 #include "ntlmssp.h" 42 #include "nterr.h" 43 #include "rfc1002pdu.h" 44 #include "fscache.h" 45 #include "smb2proto.h" 46 #include "smbdirect.h" 47 #include "dns_resolve.h" 48 #ifdef CONFIG_CIFS_DFS_UPCALL 49 #include "dfs.h" 50 #include "dfs_cache.h" 51 #endif 52 #include "fs_context.h" 53 #include "cifs_swn.h" 54 55 extern mempool_t *cifs_req_poolp; 56 extern bool disable_legacy_dialects; 57 58 /* FIXME: should these be tunable? */ 59 #define TLINK_ERROR_EXPIRE (1 * HZ) 60 #define TLINK_IDLE_EXPIRE (600 * HZ) 61 62 /* Drop the connection to not overload the server */ 63 #define MAX_STATUS_IO_TIMEOUT 5 64 65 static int ip_connect(struct TCP_Server_Info *server); 66 static int generic_ip_connect(struct TCP_Server_Info *server); 67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 68 static void cifs_prune_tlinks(struct work_struct *work); 69 70 /* 71 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may 72 * get their ip addresses changed at some point. 73 * 74 * This should be called with server->srv_mutex held. 75 */ 76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) 77 { 78 int rc; 79 int len; 80 char *unc; 81 struct sockaddr_storage ss; 82 83 if (!server->hostname) 84 return -EINVAL; 85 86 /* if server hostname isn't populated, there's nothing to do here */ 87 if (server->hostname[0] == '\0') 88 return 0; 89 90 len = strlen(server->hostname) + 3; 91 92 unc = kmalloc(len, GFP_KERNEL); 93 if (!unc) { 94 cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); 95 return -ENOMEM; 96 } 97 scnprintf(unc, len, "\\\\%s", server->hostname); 98 99 spin_lock(&server->srv_lock); 100 ss = server->dstaddr; 101 spin_unlock(&server->srv_lock); 102 103 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); 104 kfree(unc); 105 106 if (rc < 0) { 107 cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", 108 __func__, server->hostname, rc); 109 } else { 110 spin_lock(&server->srv_lock); 111 memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr)); 112 spin_unlock(&server->srv_lock); 113 rc = 0; 114 } 115 116 return rc; 117 } 118 119 static void smb2_query_server_interfaces(struct work_struct *work) 120 { 121 int rc; 122 int xid; 123 struct cifs_tcon *tcon = container_of(work, 124 struct cifs_tcon, 125 query_interfaces.work); 126 struct TCP_Server_Info *server = tcon->ses->server; 127 128 /* 129 * query server network interfaces, in case they change 130 */ 131 if (!server->ops->query_server_interfaces) 132 return; 133 134 xid = get_xid(); 135 rc = server->ops->query_server_interfaces(xid, tcon, false); 136 free_xid(xid); 137 138 if (rc) { 139 if (rc == -EOPNOTSUPP) 140 return; 141 142 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n", 143 __func__, rc); 144 } 145 146 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 147 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 148 } 149 150 /* 151 * Update the tcpStatus for the server. 152 * This is used to signal the cifsd thread to call cifs_reconnect 153 * ONLY cifsd thread should call cifs_reconnect. For any other 154 * thread, use this function 155 * 156 * @server: the tcp ses for which reconnect is needed 157 * @all_channels: if this needs to be done for all channels 158 */ 159 void 160 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, 161 bool all_channels) 162 { 163 struct TCP_Server_Info *pserver; 164 struct cifs_ses *ses; 165 int i; 166 167 /* If server is a channel, select the primary channel */ 168 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 169 170 /* if we need to signal just this channel */ 171 if (!all_channels) { 172 spin_lock(&server->srv_lock); 173 if (server->tcpStatus != CifsExiting) 174 server->tcpStatus = CifsNeedReconnect; 175 spin_unlock(&server->srv_lock); 176 return; 177 } 178 179 spin_lock(&cifs_tcp_ses_lock); 180 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 181 spin_lock(&ses->chan_lock); 182 for (i = 0; i < ses->chan_count; i++) { 183 if (!ses->chans[i].server) 184 continue; 185 186 spin_lock(&ses->chans[i].server->srv_lock); 187 if (ses->chans[i].server->tcpStatus != CifsExiting) 188 ses->chans[i].server->tcpStatus = CifsNeedReconnect; 189 spin_unlock(&ses->chans[i].server->srv_lock); 190 } 191 spin_unlock(&ses->chan_lock); 192 } 193 spin_unlock(&cifs_tcp_ses_lock); 194 } 195 196 /* 197 * Mark all sessions and tcons for reconnect. 198 * IMPORTANT: make sure that this gets called only from 199 * cifsd thread. For any other thread, use 200 * cifs_signal_cifsd_for_reconnect 201 * 202 * @server: the tcp ses for which reconnect is needed 203 * @server needs to be previously set to CifsNeedReconnect. 204 * @mark_smb_session: whether even sessions need to be marked 205 */ 206 void 207 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, 208 bool mark_smb_session) 209 { 210 struct TCP_Server_Info *pserver; 211 struct cifs_ses *ses, *nses; 212 struct cifs_tcon *tcon; 213 214 /* 215 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they 216 * are not used until reconnected. 217 */ 218 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__); 219 220 /* If server is a channel, select the primary channel */ 221 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 222 223 /* 224 * if the server has been marked for termination, there is a 225 * chance that the remaining channels all need reconnect. To be 226 * on the safer side, mark the session and trees for reconnect 227 * for this scenario. This might cause a few redundant session 228 * setup and tree connect requests, but it is better than not doing 229 * a tree connect when needed, and all following requests failing 230 */ 231 if (server->terminate) { 232 mark_smb_session = true; 233 server = pserver; 234 } 235 236 spin_lock(&cifs_tcp_ses_lock); 237 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { 238 /* check if iface is still active */ 239 spin_lock(&ses->chan_lock); 240 if (cifs_ses_get_chan_index(ses, server) == 241 CIFS_INVAL_CHAN_INDEX) { 242 spin_unlock(&ses->chan_lock); 243 continue; 244 } 245 246 if (!cifs_chan_is_iface_active(ses, server)) { 247 spin_unlock(&ses->chan_lock); 248 cifs_chan_update_iface(ses, server); 249 spin_lock(&ses->chan_lock); 250 } 251 252 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) { 253 spin_unlock(&ses->chan_lock); 254 continue; 255 } 256 257 if (mark_smb_session) 258 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses); 259 else 260 cifs_chan_set_need_reconnect(ses, server); 261 262 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 263 __func__, ses->chans_need_reconnect); 264 265 /* If all channels need reconnect, then tcon needs reconnect */ 266 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) { 267 spin_unlock(&ses->chan_lock); 268 continue; 269 } 270 spin_unlock(&ses->chan_lock); 271 272 spin_lock(&ses->ses_lock); 273 ses->ses_status = SES_NEED_RECON; 274 spin_unlock(&ses->ses_lock); 275 276 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 277 tcon->need_reconnect = true; 278 spin_lock(&tcon->tc_lock); 279 tcon->status = TID_NEED_RECON; 280 spin_unlock(&tcon->tc_lock); 281 282 cancel_delayed_work(&tcon->query_interfaces); 283 } 284 if (ses->tcon_ipc) { 285 ses->tcon_ipc->need_reconnect = true; 286 spin_lock(&ses->tcon_ipc->tc_lock); 287 ses->tcon_ipc->status = TID_NEED_RECON; 288 spin_unlock(&ses->tcon_ipc->tc_lock); 289 } 290 } 291 spin_unlock(&cifs_tcp_ses_lock); 292 } 293 294 static void 295 cifs_abort_connection(struct TCP_Server_Info *server) 296 { 297 struct mid_q_entry *mid, *nmid; 298 struct list_head retry_list; 299 300 server->maxBuf = 0; 301 server->max_read = 0; 302 303 /* do not want to be sending data on a socket we are freeing */ 304 cifs_dbg(FYI, "%s: tearing down socket\n", __func__); 305 cifs_server_lock(server); 306 if (server->ssocket) { 307 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state, 308 server->ssocket->flags); 309 kernel_sock_shutdown(server->ssocket, SHUT_WR); 310 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state, 311 server->ssocket->flags); 312 sock_release(server->ssocket); 313 server->ssocket = NULL; 314 } 315 server->sequence_number = 0; 316 server->session_estab = false; 317 kfree_sensitive(server->session_key.response); 318 server->session_key.response = NULL; 319 server->session_key.len = 0; 320 server->lstrp = jiffies; 321 322 /* mark submitted MIDs for retry and issue callback */ 323 INIT_LIST_HEAD(&retry_list); 324 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); 325 spin_lock(&server->mid_lock); 326 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 327 kref_get(&mid->refcount); 328 if (mid->mid_state == MID_REQUEST_SUBMITTED) 329 mid->mid_state = MID_RETRY_NEEDED; 330 list_move(&mid->qhead, &retry_list); 331 mid->mid_flags |= MID_DELETED; 332 } 333 spin_unlock(&server->mid_lock); 334 cifs_server_unlock(server); 335 336 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); 337 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { 338 list_del_init(&mid->qhead); 339 mid->callback(mid); 340 release_mid(mid); 341 } 342 343 if (cifs_rdma_enabled(server)) { 344 cifs_server_lock(server); 345 smbd_destroy(server); 346 cifs_server_unlock(server); 347 } 348 } 349 350 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) 351 { 352 spin_lock(&server->srv_lock); 353 server->nr_targets = num_targets; 354 if (server->tcpStatus == CifsExiting) { 355 /* the demux thread will exit normally next time through the loop */ 356 spin_unlock(&server->srv_lock); 357 wake_up(&server->response_q); 358 return false; 359 } 360 361 cifs_dbg(FYI, "Mark tcp session as need reconnect\n"); 362 trace_smb3_reconnect(server->CurrentMid, server->conn_id, 363 server->hostname); 364 server->tcpStatus = CifsNeedReconnect; 365 366 spin_unlock(&server->srv_lock); 367 return true; 368 } 369 370 /* 371 * cifs tcp session reconnection 372 * 373 * mark tcp session as reconnecting so temporarily locked 374 * mark all smb sessions as reconnecting for tcp session 375 * reconnect tcp session 376 * wake up waiters on reconnection? - (not needed currently) 377 * 378 * if mark_smb_session is passed as true, unconditionally mark 379 * the smb session (and tcon) for reconnect as well. This value 380 * doesn't really matter for non-multichannel scenario. 381 * 382 */ 383 static int __cifs_reconnect(struct TCP_Server_Info *server, 384 bool mark_smb_session) 385 { 386 int rc = 0; 387 388 if (!cifs_tcp_ses_needs_reconnect(server, 1)) 389 return 0; 390 391 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session); 392 393 cifs_abort_connection(server); 394 395 do { 396 try_to_freeze(); 397 cifs_server_lock(server); 398 399 if (!cifs_swn_set_server_dstaddr(server)) { 400 /* resolve the hostname again to make sure that IP address is up-to-date */ 401 rc = reconn_set_ipaddr_from_hostname(server); 402 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 403 } 404 405 if (cifs_rdma_enabled(server)) 406 rc = smbd_reconnect(server); 407 else 408 rc = generic_ip_connect(server); 409 if (rc) { 410 cifs_server_unlock(server); 411 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 412 msleep(3000); 413 } else { 414 atomic_inc(&tcpSesReconnectCount); 415 set_credits(server, 1); 416 spin_lock(&server->srv_lock); 417 if (server->tcpStatus != CifsExiting) 418 server->tcpStatus = CifsNeedNegotiate; 419 spin_unlock(&server->srv_lock); 420 cifs_swn_reset_server_dstaddr(server); 421 cifs_server_unlock(server); 422 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 423 } 424 } while (server->tcpStatus == CifsNeedReconnect); 425 426 spin_lock(&server->srv_lock); 427 if (server->tcpStatus == CifsNeedNegotiate) 428 mod_delayed_work(cifsiod_wq, &server->echo, 0); 429 spin_unlock(&server->srv_lock); 430 431 wake_up(&server->response_q); 432 return rc; 433 } 434 435 #ifdef CONFIG_CIFS_DFS_UPCALL 436 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target) 437 { 438 int rc; 439 char *hostname; 440 441 if (!cifs_swn_set_server_dstaddr(server)) { 442 if (server->hostname != target) { 443 hostname = extract_hostname(target); 444 if (!IS_ERR(hostname)) { 445 spin_lock(&server->srv_lock); 446 kfree(server->hostname); 447 server->hostname = hostname; 448 spin_unlock(&server->srv_lock); 449 } else { 450 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n", 451 __func__, PTR_ERR(hostname)); 452 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__, 453 server->hostname); 454 } 455 } 456 /* resolve the hostname again to make sure that IP address is up-to-date. */ 457 rc = reconn_set_ipaddr_from_hostname(server); 458 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc); 459 } 460 /* Reconnect the socket */ 461 if (cifs_rdma_enabled(server)) 462 rc = smbd_reconnect(server); 463 else 464 rc = generic_ip_connect(server); 465 466 return rc; 467 } 468 469 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl, 470 struct dfs_cache_tgt_iterator **target_hint) 471 { 472 int rc; 473 struct dfs_cache_tgt_iterator *tit; 474 475 *target_hint = NULL; 476 477 /* If dfs target list is empty, then reconnect to last server */ 478 tit = dfs_cache_get_tgt_iterator(tl); 479 if (!tit) 480 return __reconnect_target_unlocked(server, server->hostname); 481 482 /* Otherwise, try every dfs target in @tl */ 483 for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { 484 rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit)); 485 if (!rc) { 486 *target_hint = tit; 487 break; 488 } 489 } 490 return rc; 491 } 492 493 static int reconnect_dfs_server(struct TCP_Server_Info *server) 494 { 495 struct dfs_cache_tgt_iterator *target_hint = NULL; 496 DFS_CACHE_TGT_LIST(tl); 497 int num_targets = 0; 498 int rc = 0; 499 500 /* 501 * Determine the number of dfs targets the referral path in @cifs_sb resolves to. 502 * 503 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs 504 * targets (server->nr_targets). It's also possible that the cached referral was cleared 505 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after 506 * refreshing the referral, so, in this case, default it to 1. 507 */ 508 mutex_lock(&server->refpath_lock); 509 if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) 510 num_targets = dfs_cache_get_nr_tgts(&tl); 511 mutex_unlock(&server->refpath_lock); 512 if (!num_targets) 513 num_targets = 1; 514 515 if (!cifs_tcp_ses_needs_reconnect(server, num_targets)) 516 return 0; 517 518 /* 519 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a 520 * different server or share during failover. It could be improved by adding some logic to 521 * only do that in case it connects to a different server or share, though. 522 */ 523 cifs_mark_tcp_ses_conns_for_reconnect(server, true); 524 525 cifs_abort_connection(server); 526 527 do { 528 try_to_freeze(); 529 cifs_server_lock(server); 530 531 rc = reconnect_target_unlocked(server, &tl, &target_hint); 532 if (rc) { 533 /* Failed to reconnect socket */ 534 cifs_server_unlock(server); 535 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc); 536 msleep(3000); 537 continue; 538 } 539 /* 540 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a 541 * process waiting for reconnect will know it needs to re-establish session and tcon 542 * through the reconnected target server. 543 */ 544 atomic_inc(&tcpSesReconnectCount); 545 set_credits(server, 1); 546 spin_lock(&server->srv_lock); 547 if (server->tcpStatus != CifsExiting) 548 server->tcpStatus = CifsNeedNegotiate; 549 spin_unlock(&server->srv_lock); 550 cifs_swn_reset_server_dstaddr(server); 551 cifs_server_unlock(server); 552 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 553 } while (server->tcpStatus == CifsNeedReconnect); 554 555 mutex_lock(&server->refpath_lock); 556 dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); 557 mutex_unlock(&server->refpath_lock); 558 dfs_cache_free_tgts(&tl); 559 560 /* Need to set up echo worker again once connection has been established */ 561 spin_lock(&server->srv_lock); 562 if (server->tcpStatus == CifsNeedNegotiate) 563 mod_delayed_work(cifsiod_wq, &server->echo, 0); 564 spin_unlock(&server->srv_lock); 565 566 wake_up(&server->response_q); 567 return rc; 568 } 569 570 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 571 { 572 mutex_lock(&server->refpath_lock); 573 if (!server->leaf_fullpath) { 574 mutex_unlock(&server->refpath_lock); 575 return __cifs_reconnect(server, mark_smb_session); 576 } 577 mutex_unlock(&server->refpath_lock); 578 579 return reconnect_dfs_server(server); 580 } 581 #else 582 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 583 { 584 return __cifs_reconnect(server, mark_smb_session); 585 } 586 #endif 587 588 static void 589 cifs_echo_request(struct work_struct *work) 590 { 591 int rc; 592 struct TCP_Server_Info *server = container_of(work, 593 struct TCP_Server_Info, echo.work); 594 595 /* 596 * We cannot send an echo if it is disabled. 597 * Also, no need to ping if we got a response recently. 598 */ 599 600 if (server->tcpStatus == CifsNeedReconnect || 601 server->tcpStatus == CifsExiting || 602 server->tcpStatus == CifsNew || 603 (server->ops->can_echo && !server->ops->can_echo(server)) || 604 time_before(jiffies, server->lstrp + server->echo_interval - HZ)) 605 goto requeue_echo; 606 607 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS; 608 cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc); 609 610 /* Check witness registrations */ 611 cifs_swn_check(); 612 613 requeue_echo: 614 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); 615 } 616 617 static bool 618 allocate_buffers(struct TCP_Server_Info *server) 619 { 620 if (!server->bigbuf) { 621 server->bigbuf = (char *)cifs_buf_get(); 622 if (!server->bigbuf) { 623 cifs_server_dbg(VFS, "No memory for large SMB response\n"); 624 msleep(3000); 625 /* retry will check if exiting */ 626 return false; 627 } 628 } else if (server->large_buf) { 629 /* we are reusing a dirty large buf, clear its start */ 630 memset(server->bigbuf, 0, HEADER_SIZE(server)); 631 } 632 633 if (!server->smallbuf) { 634 server->smallbuf = (char *)cifs_small_buf_get(); 635 if (!server->smallbuf) { 636 cifs_server_dbg(VFS, "No memory for SMB response\n"); 637 msleep(1000); 638 /* retry will check if exiting */ 639 return false; 640 } 641 /* beginning of smb buffer is cleared in our buf_get */ 642 } else { 643 /* if existing small buf clear beginning */ 644 memset(server->smallbuf, 0, HEADER_SIZE(server)); 645 } 646 647 return true; 648 } 649 650 static bool 651 server_unresponsive(struct TCP_Server_Info *server) 652 { 653 /* 654 * We need to wait 3 echo intervals to make sure we handle such 655 * situations right: 656 * 1s client sends a normal SMB request 657 * 2s client gets a response 658 * 30s echo workqueue job pops, and decides we got a response recently 659 * and don't need to send another 660 * ... 661 * 65s kernel_recvmsg times out, and we see that we haven't gotten 662 * a response in >60s. 663 */ 664 spin_lock(&server->srv_lock); 665 if ((server->tcpStatus == CifsGood || 666 server->tcpStatus == CifsNeedNegotiate) && 667 (!server->ops->can_echo || server->ops->can_echo(server)) && 668 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { 669 spin_unlock(&server->srv_lock); 670 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", 671 (3 * server->echo_interval) / HZ); 672 cifs_reconnect(server, false); 673 return true; 674 } 675 spin_unlock(&server->srv_lock); 676 677 return false; 678 } 679 680 static inline bool 681 zero_credits(struct TCP_Server_Info *server) 682 { 683 int val; 684 685 spin_lock(&server->req_lock); 686 val = server->credits + server->echo_credits + server->oplock_credits; 687 if (server->in_flight == 0 && val == 0) { 688 spin_unlock(&server->req_lock); 689 return true; 690 } 691 spin_unlock(&server->req_lock); 692 return false; 693 } 694 695 static int 696 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) 697 { 698 int length = 0; 699 int total_read; 700 701 for (total_read = 0; msg_data_left(smb_msg); total_read += length) { 702 try_to_freeze(); 703 704 /* reconnect if no credits and no requests in flight */ 705 if (zero_credits(server)) { 706 cifs_reconnect(server, false); 707 return -ECONNABORTED; 708 } 709 710 if (server_unresponsive(server)) 711 return -ECONNABORTED; 712 if (cifs_rdma_enabled(server) && server->smbd_conn) 713 length = smbd_recv(server->smbd_conn, smb_msg); 714 else 715 length = sock_recvmsg(server->ssocket, smb_msg, 0); 716 717 spin_lock(&server->srv_lock); 718 if (server->tcpStatus == CifsExiting) { 719 spin_unlock(&server->srv_lock); 720 return -ESHUTDOWN; 721 } 722 723 if (server->tcpStatus == CifsNeedReconnect) { 724 spin_unlock(&server->srv_lock); 725 cifs_reconnect(server, false); 726 return -ECONNABORTED; 727 } 728 spin_unlock(&server->srv_lock); 729 730 if (length == -ERESTARTSYS || 731 length == -EAGAIN || 732 length == -EINTR) { 733 /* 734 * Minimum sleep to prevent looping, allowing socket 735 * to clear and app threads to set tcpStatus 736 * CifsNeedReconnect if server hung. 737 */ 738 usleep_range(1000, 2000); 739 length = 0; 740 continue; 741 } 742 743 if (length <= 0) { 744 cifs_dbg(FYI, "Received no data or error: %d\n", length); 745 cifs_reconnect(server, false); 746 return -ECONNABORTED; 747 } 748 } 749 return total_read; 750 } 751 752 int 753 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, 754 unsigned int to_read) 755 { 756 struct msghdr smb_msg = {}; 757 struct kvec iov = {.iov_base = buf, .iov_len = to_read}; 758 iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read); 759 760 return cifs_readv_from_socket(server, &smb_msg); 761 } 762 763 ssize_t 764 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) 765 { 766 struct msghdr smb_msg = {}; 767 768 /* 769 * iov_iter_discard already sets smb_msg.type and count and iov_offset 770 * and cifs_readv_from_socket sets msg_control and msg_controllen 771 * so little to initialize in struct msghdr 772 */ 773 iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read); 774 775 return cifs_readv_from_socket(server, &smb_msg); 776 } 777 778 int 779 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, 780 unsigned int page_offset, unsigned int to_read) 781 { 782 struct msghdr smb_msg = {}; 783 struct bio_vec bv; 784 785 bvec_set_page(&bv, page, to_read, page_offset); 786 iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read); 787 return cifs_readv_from_socket(server, &smb_msg); 788 } 789 790 int 791 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter, 792 unsigned int to_read) 793 { 794 struct msghdr smb_msg = { .msg_iter = *iter }; 795 int ret; 796 797 iov_iter_truncate(&smb_msg.msg_iter, to_read); 798 ret = cifs_readv_from_socket(server, &smb_msg); 799 if (ret > 0) 800 iov_iter_advance(iter, ret); 801 return ret; 802 } 803 804 static bool 805 is_smb_response(struct TCP_Server_Info *server, unsigned char type) 806 { 807 /* 808 * The first byte big endian of the length field, 809 * is actually not part of the length but the type 810 * with the most common, zero, as regular data. 811 */ 812 switch (type) { 813 case RFC1002_SESSION_MESSAGE: 814 /* Regular SMB response */ 815 return true; 816 case RFC1002_SESSION_KEEP_ALIVE: 817 cifs_dbg(FYI, "RFC 1002 session keep alive\n"); 818 break; 819 case RFC1002_POSITIVE_SESSION_RESPONSE: 820 cifs_dbg(FYI, "RFC 1002 positive session response\n"); 821 break; 822 case RFC1002_NEGATIVE_SESSION_RESPONSE: 823 /* 824 * We get this from Windows 98 instead of an error on 825 * SMB negprot response. 826 */ 827 cifs_dbg(FYI, "RFC 1002 negative session response\n"); 828 /* give server a second to clean up */ 829 msleep(1000); 830 /* 831 * Always try 445 first on reconnect since we get NACK 832 * on some if we ever connected to port 139 (the NACK 833 * is since we do not begin with RFC1001 session 834 * initialize frame). 835 */ 836 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT); 837 cifs_reconnect(server, true); 838 break; 839 default: 840 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type); 841 cifs_reconnect(server, true); 842 } 843 844 return false; 845 } 846 847 void 848 dequeue_mid(struct mid_q_entry *mid, bool malformed) 849 { 850 #ifdef CONFIG_CIFS_STATS2 851 mid->when_received = jiffies; 852 #endif 853 spin_lock(&mid->server->mid_lock); 854 if (!malformed) 855 mid->mid_state = MID_RESPONSE_RECEIVED; 856 else 857 mid->mid_state = MID_RESPONSE_MALFORMED; 858 /* 859 * Trying to handle/dequeue a mid after the send_recv() 860 * function has finished processing it is a bug. 861 */ 862 if (mid->mid_flags & MID_DELETED) { 863 spin_unlock(&mid->server->mid_lock); 864 pr_warn_once("trying to dequeue a deleted mid\n"); 865 } else { 866 list_del_init(&mid->qhead); 867 mid->mid_flags |= MID_DELETED; 868 spin_unlock(&mid->server->mid_lock); 869 } 870 } 871 872 static unsigned int 873 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 874 { 875 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 876 877 /* 878 * SMB1 does not use credits. 879 */ 880 if (is_smb1(server)) 881 return 0; 882 883 return le16_to_cpu(shdr->CreditRequest); 884 } 885 886 static void 887 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, 888 char *buf, int malformed) 889 { 890 if (server->ops->check_trans2 && 891 server->ops->check_trans2(mid, server, buf, malformed)) 892 return; 893 mid->credits_received = smb2_get_credits_from_hdr(buf, server); 894 mid->resp_buf = buf; 895 mid->large_buf = server->large_buf; 896 /* Was previous buf put in mpx struct for multi-rsp? */ 897 if (!mid->multiRsp) { 898 /* smb buffer will be freed by user thread */ 899 if (server->large_buf) 900 server->bigbuf = NULL; 901 else 902 server->smallbuf = NULL; 903 } 904 dequeue_mid(mid, malformed); 905 } 906 907 int 908 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) 909 { 910 bool srv_sign_required = server->sec_mode & server->vals->signing_required; 911 bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; 912 bool mnt_sign_enabled; 913 914 /* 915 * Is signing required by mnt options? If not then check 916 * global_secflags to see if it is there. 917 */ 918 if (!mnt_sign_required) 919 mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == 920 CIFSSEC_MUST_SIGN); 921 922 /* 923 * If signing is required then it's automatically enabled too, 924 * otherwise, check to see if the secflags allow it. 925 */ 926 mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : 927 (global_secflags & CIFSSEC_MAY_SIGN); 928 929 /* If server requires signing, does client allow it? */ 930 if (srv_sign_required) { 931 if (!mnt_sign_enabled) { 932 cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); 933 return -EOPNOTSUPP; 934 } 935 server->sign = true; 936 } 937 938 /* If client requires signing, does server allow it? */ 939 if (mnt_sign_required) { 940 if (!srv_sign_enabled) { 941 cifs_dbg(VFS, "Server does not support signing!\n"); 942 return -EOPNOTSUPP; 943 } 944 server->sign = true; 945 } 946 947 if (cifs_rdma_enabled(server) && server->sign) 948 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); 949 950 return 0; 951 } 952 953 static noinline_for_stack void 954 clean_demultiplex_info(struct TCP_Server_Info *server) 955 { 956 int length; 957 958 /* take it off the list, if it's not already */ 959 spin_lock(&server->srv_lock); 960 list_del_init(&server->tcp_ses_list); 961 spin_unlock(&server->srv_lock); 962 963 cancel_delayed_work_sync(&server->echo); 964 965 spin_lock(&server->srv_lock); 966 server->tcpStatus = CifsExiting; 967 spin_unlock(&server->srv_lock); 968 wake_up_all(&server->response_q); 969 970 /* check if we have blocked requests that need to free */ 971 spin_lock(&server->req_lock); 972 if (server->credits <= 0) 973 server->credits = 1; 974 spin_unlock(&server->req_lock); 975 /* 976 * Although there should not be any requests blocked on this queue it 977 * can not hurt to be paranoid and try to wake up requests that may 978 * haven been blocked when more than 50 at time were on the wire to the 979 * same server - they now will see the session is in exit state and get 980 * out of SendReceive. 981 */ 982 wake_up_all(&server->request_q); 983 /* give those requests time to exit */ 984 msleep(125); 985 if (cifs_rdma_enabled(server)) 986 smbd_destroy(server); 987 if (server->ssocket) { 988 sock_release(server->ssocket); 989 server->ssocket = NULL; 990 } 991 992 if (!list_empty(&server->pending_mid_q)) { 993 struct list_head dispose_list; 994 struct mid_q_entry *mid_entry; 995 struct list_head *tmp, *tmp2; 996 997 INIT_LIST_HEAD(&dispose_list); 998 spin_lock(&server->mid_lock); 999 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 1000 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 1001 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); 1002 kref_get(&mid_entry->refcount); 1003 mid_entry->mid_state = MID_SHUTDOWN; 1004 list_move(&mid_entry->qhead, &dispose_list); 1005 mid_entry->mid_flags |= MID_DELETED; 1006 } 1007 spin_unlock(&server->mid_lock); 1008 1009 /* now walk dispose list and issue callbacks */ 1010 list_for_each_safe(tmp, tmp2, &dispose_list) { 1011 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 1012 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); 1013 list_del_init(&mid_entry->qhead); 1014 mid_entry->callback(mid_entry); 1015 release_mid(mid_entry); 1016 } 1017 /* 1/8th of sec is more than enough time for them to exit */ 1018 msleep(125); 1019 } 1020 1021 if (!list_empty(&server->pending_mid_q)) { 1022 /* 1023 * mpx threads have not exited yet give them at least the smb 1024 * send timeout time for long ops. 1025 * 1026 * Due to delays on oplock break requests, we need to wait at 1027 * least 45 seconds before giving up on a request getting a 1028 * response and going ahead and killing cifsd. 1029 */ 1030 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n"); 1031 msleep(46000); 1032 /* 1033 * If threads still have not exited they are probably never 1034 * coming home not much else we can do but free the memory. 1035 */ 1036 } 1037 1038 kfree(server->leaf_fullpath); 1039 kfree(server); 1040 1041 length = atomic_dec_return(&tcpSesAllocCount); 1042 if (length > 0) 1043 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1044 } 1045 1046 static int 1047 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1048 { 1049 int length; 1050 char *buf = server->smallbuf; 1051 unsigned int pdu_length = server->pdu_size; 1052 1053 /* make sure this will fit in a large buffer */ 1054 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 1055 HEADER_PREAMBLE_SIZE(server)) { 1056 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length); 1057 cifs_reconnect(server, true); 1058 return -ECONNABORTED; 1059 } 1060 1061 /* switch to large buffer if too big for a small one */ 1062 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 1063 server->large_buf = true; 1064 memcpy(server->bigbuf, buf, server->total_read); 1065 buf = server->bigbuf; 1066 } 1067 1068 /* now read the rest */ 1069 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, 1070 pdu_length - MID_HEADER_SIZE(server)); 1071 1072 if (length < 0) 1073 return length; 1074 server->total_read += length; 1075 1076 dump_smb(buf, server->total_read); 1077 1078 return cifs_handle_standard(server, mid); 1079 } 1080 1081 int 1082 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1083 { 1084 char *buf = server->large_buf ? server->bigbuf : server->smallbuf; 1085 int rc; 1086 1087 /* 1088 * We know that we received enough to get to the MID as we 1089 * checked the pdu_length earlier. Now check to see 1090 * if the rest of the header is OK. 1091 * 1092 * 48 bytes is enough to display the header and a little bit 1093 * into the payload for debugging purposes. 1094 */ 1095 rc = server->ops->check_message(buf, server->total_read, server); 1096 if (rc) 1097 cifs_dump_mem("Bad SMB: ", buf, 1098 min_t(unsigned int, server->total_read, 48)); 1099 1100 if (server->ops->is_session_expired && 1101 server->ops->is_session_expired(buf)) { 1102 cifs_reconnect(server, true); 1103 return -1; 1104 } 1105 1106 if (server->ops->is_status_pending && 1107 server->ops->is_status_pending(buf, server)) 1108 return -1; 1109 1110 if (!mid) 1111 return rc; 1112 1113 handle_mid(mid, server, buf, rc); 1114 return 0; 1115 } 1116 1117 static void 1118 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server) 1119 { 1120 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer; 1121 int scredits, in_flight; 1122 1123 /* 1124 * SMB1 does not use credits. 1125 */ 1126 if (is_smb1(server)) 1127 return; 1128 1129 if (shdr->CreditRequest) { 1130 spin_lock(&server->req_lock); 1131 server->credits += le16_to_cpu(shdr->CreditRequest); 1132 scredits = server->credits; 1133 in_flight = server->in_flight; 1134 spin_unlock(&server->req_lock); 1135 wake_up(&server->request_q); 1136 1137 trace_smb3_hdr_credits(server->CurrentMid, 1138 server->conn_id, server->hostname, scredits, 1139 le16_to_cpu(shdr->CreditRequest), in_flight); 1140 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n", 1141 __func__, le16_to_cpu(shdr->CreditRequest), 1142 scredits); 1143 } 1144 } 1145 1146 1147 static int 1148 cifs_demultiplex_thread(void *p) 1149 { 1150 int i, num_mids, length; 1151 struct TCP_Server_Info *server = p; 1152 unsigned int pdu_length; 1153 unsigned int next_offset; 1154 char *buf = NULL; 1155 struct task_struct *task_to_wake = NULL; 1156 struct mid_q_entry *mids[MAX_COMPOUND]; 1157 char *bufs[MAX_COMPOUND]; 1158 unsigned int noreclaim_flag, num_io_timeout = 0; 1159 bool pending_reconnect = false; 1160 1161 noreclaim_flag = memalloc_noreclaim_save(); 1162 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); 1163 1164 length = atomic_inc_return(&tcpSesAllocCount); 1165 if (length > 1) 1166 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1167 1168 set_freezable(); 1169 allow_kernel_signal(SIGKILL); 1170 while (server->tcpStatus != CifsExiting) { 1171 if (try_to_freeze()) 1172 continue; 1173 1174 if (!allocate_buffers(server)) 1175 continue; 1176 1177 server->large_buf = false; 1178 buf = server->smallbuf; 1179 pdu_length = 4; /* enough to get RFC1001 header */ 1180 1181 length = cifs_read_from_socket(server, buf, pdu_length); 1182 if (length < 0) 1183 continue; 1184 1185 if (is_smb1(server)) 1186 server->total_read = length; 1187 else 1188 server->total_read = 0; 1189 1190 /* 1191 * The right amount was read from socket - 4 bytes, 1192 * so we can now interpret the length field. 1193 */ 1194 pdu_length = get_rfc1002_length(buf); 1195 1196 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length); 1197 if (!is_smb_response(server, buf[0])) 1198 continue; 1199 1200 pending_reconnect = false; 1201 next_pdu: 1202 server->pdu_size = pdu_length; 1203 1204 /* make sure we have enough to get to the MID */ 1205 if (server->pdu_size < MID_HEADER_SIZE(server)) { 1206 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n", 1207 server->pdu_size); 1208 cifs_reconnect(server, true); 1209 continue; 1210 } 1211 1212 /* read down to the MID */ 1213 length = cifs_read_from_socket(server, 1214 buf + HEADER_PREAMBLE_SIZE(server), 1215 MID_HEADER_SIZE(server)); 1216 if (length < 0) 1217 continue; 1218 server->total_read += length; 1219 1220 if (server->ops->next_header) { 1221 if (server->ops->next_header(server, buf, &next_offset)) { 1222 cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n", 1223 __func__, next_offset); 1224 cifs_reconnect(server, true); 1225 continue; 1226 } 1227 if (next_offset) 1228 server->pdu_size = next_offset; 1229 } 1230 1231 memset(mids, 0, sizeof(mids)); 1232 memset(bufs, 0, sizeof(bufs)); 1233 num_mids = 0; 1234 1235 if (server->ops->is_transform_hdr && 1236 server->ops->receive_transform && 1237 server->ops->is_transform_hdr(buf)) { 1238 length = server->ops->receive_transform(server, 1239 mids, 1240 bufs, 1241 &num_mids); 1242 } else { 1243 mids[0] = server->ops->find_mid(server, buf); 1244 bufs[0] = buf; 1245 num_mids = 1; 1246 1247 if (!mids[0] || !mids[0]->receive) 1248 length = standard_receive3(server, mids[0]); 1249 else 1250 length = mids[0]->receive(server, mids[0]); 1251 } 1252 1253 if (length < 0) { 1254 for (i = 0; i < num_mids; i++) 1255 if (mids[i]) 1256 release_mid(mids[i]); 1257 continue; 1258 } 1259 1260 if (server->ops->is_status_io_timeout && 1261 server->ops->is_status_io_timeout(buf)) { 1262 num_io_timeout++; 1263 if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) { 1264 cifs_server_dbg(VFS, 1265 "Number of request timeouts exceeded %d. Reconnecting", 1266 MAX_STATUS_IO_TIMEOUT); 1267 1268 pending_reconnect = true; 1269 num_io_timeout = 0; 1270 } 1271 } 1272 1273 server->lstrp = jiffies; 1274 1275 for (i = 0; i < num_mids; i++) { 1276 if (mids[i] != NULL) { 1277 mids[i]->resp_buf_size = server->pdu_size; 1278 1279 if (bufs[i] != NULL) { 1280 if (server->ops->is_network_name_deleted && 1281 server->ops->is_network_name_deleted(bufs[i], 1282 server)) { 1283 cifs_server_dbg(FYI, 1284 "Share deleted. Reconnect needed"); 1285 } 1286 } 1287 1288 if (!mids[i]->multiRsp || mids[i]->multiEnd) 1289 mids[i]->callback(mids[i]); 1290 1291 release_mid(mids[i]); 1292 } else if (server->ops->is_oplock_break && 1293 server->ops->is_oplock_break(bufs[i], 1294 server)) { 1295 smb2_add_credits_from_hdr(bufs[i], server); 1296 cifs_dbg(FYI, "Received oplock break\n"); 1297 } else { 1298 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 1299 atomic_read(&mid_count)); 1300 cifs_dump_mem("Received Data is: ", bufs[i], 1301 HEADER_SIZE(server)); 1302 smb2_add_credits_from_hdr(bufs[i], server); 1303 #ifdef CONFIG_CIFS_DEBUG2 1304 if (server->ops->dump_detail) 1305 server->ops->dump_detail(bufs[i], 1306 server); 1307 cifs_dump_mids(server); 1308 #endif /* CIFS_DEBUG2 */ 1309 } 1310 } 1311 1312 if (pdu_length > server->pdu_size) { 1313 if (!allocate_buffers(server)) 1314 continue; 1315 pdu_length -= server->pdu_size; 1316 server->total_read = 0; 1317 server->large_buf = false; 1318 buf = server->smallbuf; 1319 goto next_pdu; 1320 } 1321 1322 /* do this reconnect at the very end after processing all MIDs */ 1323 if (pending_reconnect) 1324 cifs_reconnect(server, true); 1325 1326 } /* end while !EXITING */ 1327 1328 /* buffer usually freed in free_mid - need to free it here on exit */ 1329 cifs_buf_release(server->bigbuf); 1330 if (server->smallbuf) /* no sense logging a debug message if NULL */ 1331 cifs_small_buf_release(server->smallbuf); 1332 1333 task_to_wake = xchg(&server->tsk, NULL); 1334 clean_demultiplex_info(server); 1335 1336 /* if server->tsk was NULL then wait for a signal before exiting */ 1337 if (!task_to_wake) { 1338 set_current_state(TASK_INTERRUPTIBLE); 1339 while (!signal_pending(current)) { 1340 schedule(); 1341 set_current_state(TASK_INTERRUPTIBLE); 1342 } 1343 set_current_state(TASK_RUNNING); 1344 } 1345 1346 memalloc_noreclaim_restore(noreclaim_flag); 1347 module_put_and_kthread_exit(0); 1348 } 1349 1350 int 1351 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs) 1352 { 1353 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1354 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1355 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1356 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1357 1358 switch (srcaddr->sa_family) { 1359 case AF_UNSPEC: 1360 switch (rhs->sa_family) { 1361 case AF_UNSPEC: 1362 return 0; 1363 case AF_INET: 1364 case AF_INET6: 1365 return 1; 1366 default: 1367 return -1; 1368 } 1369 case AF_INET: { 1370 switch (rhs->sa_family) { 1371 case AF_UNSPEC: 1372 return -1; 1373 case AF_INET: 1374 return memcmp(saddr4, vaddr4, 1375 sizeof(struct sockaddr_in)); 1376 case AF_INET6: 1377 return 1; 1378 default: 1379 return -1; 1380 } 1381 } 1382 case AF_INET6: { 1383 switch (rhs->sa_family) { 1384 case AF_UNSPEC: 1385 case AF_INET: 1386 return -1; 1387 case AF_INET6: 1388 return memcmp(saddr6, 1389 vaddr6, 1390 sizeof(struct sockaddr_in6)); 1391 default: 1392 return -1; 1393 } 1394 } 1395 default: 1396 return -1; /* don't expect to be here */ 1397 } 1398 } 1399 1400 /* 1401 * Returns true if srcaddr isn't specified and rhs isn't specified, or 1402 * if srcaddr is specified and matches the IP address of the rhs argument 1403 */ 1404 bool 1405 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs) 1406 { 1407 switch (srcaddr->sa_family) { 1408 case AF_UNSPEC: 1409 return (rhs->sa_family == AF_UNSPEC); 1410 case AF_INET: { 1411 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; 1412 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; 1413 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); 1414 } 1415 case AF_INET6: { 1416 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1417 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1418 return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr) 1419 && saddr6->sin6_scope_id == vaddr6->sin6_scope_id); 1420 } 1421 default: 1422 WARN_ON(1); 1423 return false; /* don't expect to be here */ 1424 } 1425 } 1426 1427 /* 1428 * If no port is specified in addr structure, we try to match with 445 port 1429 * and if it fails - with 139 ports. It should be called only if address 1430 * families of server and addr are equal. 1431 */ 1432 static bool 1433 match_port(struct TCP_Server_Info *server, struct sockaddr *addr) 1434 { 1435 __be16 port, *sport; 1436 1437 /* SMBDirect manages its own ports, don't match it here */ 1438 if (server->rdma) 1439 return true; 1440 1441 switch (addr->sa_family) { 1442 case AF_INET: 1443 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; 1444 port = ((struct sockaddr_in *) addr)->sin_port; 1445 break; 1446 case AF_INET6: 1447 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; 1448 port = ((struct sockaddr_in6 *) addr)->sin6_port; 1449 break; 1450 default: 1451 WARN_ON(1); 1452 return false; 1453 } 1454 1455 if (!port) { 1456 port = htons(CIFS_PORT); 1457 if (port == *sport) 1458 return true; 1459 1460 port = htons(RFC1001_PORT); 1461 } 1462 1463 return port == *sport; 1464 } 1465 1466 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr) 1467 { 1468 if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr)) 1469 return false; 1470 1471 return true; 1472 } 1473 1474 static bool 1475 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1476 { 1477 /* 1478 * The select_sectype function should either return the ctx->sectype 1479 * that was specified, or "Unspecified" if that sectype was not 1480 * compatible with the given NEGOTIATE request. 1481 */ 1482 if (server->ops->select_sectype(server, ctx->sectype) 1483 == Unspecified) 1484 return false; 1485 1486 /* 1487 * Now check if signing mode is acceptable. No need to check 1488 * global_secflags at this point since if MUST_SIGN is set then 1489 * the server->sign had better be too. 1490 */ 1491 if (ctx->sign && !server->sign) 1492 return false; 1493 1494 return true; 1495 } 1496 1497 /* this function must be called with srv_lock held */ 1498 static int match_server(struct TCP_Server_Info *server, 1499 struct smb3_fs_context *ctx, 1500 bool match_super) 1501 { 1502 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; 1503 1504 lockdep_assert_held(&server->srv_lock); 1505 1506 if (ctx->nosharesock) 1507 return 0; 1508 1509 /* this server does not share socket */ 1510 if (server->nosharesock) 1511 return 0; 1512 1513 /* If multidialect negotiation see if existing sessions match one */ 1514 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { 1515 if (server->vals->protocol_id < SMB30_PROT_ID) 1516 return 0; 1517 } else if (strcmp(ctx->vals->version_string, 1518 SMBDEFAULT_VERSION_STRING) == 0) { 1519 if (server->vals->protocol_id < SMB21_PROT_ID) 1520 return 0; 1521 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops)) 1522 return 0; 1523 1524 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) 1525 return 0; 1526 1527 if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr, 1528 (struct sockaddr *)&server->srcaddr)) 1529 return 0; 1530 /* 1531 * When matching cifs.ko superblocks (@match_super == true), we can't 1532 * really match either @server->leaf_fullpath or @server->dstaddr 1533 * directly since this @server might belong to a completely different 1534 * server -- in case of domain-based DFS referrals or DFS links -- as 1535 * provided earlier by mount(2) through 'source' and 'ip' options. 1536 * 1537 * Otherwise, match the DFS referral in @server->leaf_fullpath or the 1538 * destination address in @server->dstaddr. 1539 * 1540 * When using 'nodfs' mount option, we avoid sharing it with DFS 1541 * connections as they might failover. 1542 */ 1543 if (!match_super) { 1544 if (!ctx->nodfs) { 1545 if (server->leaf_fullpath) { 1546 if (!ctx->leaf_fullpath || 1547 strcasecmp(server->leaf_fullpath, 1548 ctx->leaf_fullpath)) 1549 return 0; 1550 } else if (ctx->leaf_fullpath) { 1551 return 0; 1552 } 1553 } else if (server->leaf_fullpath) { 1554 return 0; 1555 } 1556 } 1557 1558 /* 1559 * Match for a regular connection (address/hostname/port) which has no 1560 * DFS referrals set. 1561 */ 1562 if (!server->leaf_fullpath && 1563 (strcasecmp(server->hostname, ctx->server_hostname) || 1564 !match_server_address(server, addr) || 1565 !match_port(server, addr))) 1566 return 0; 1567 1568 if (!match_security(server, ctx)) 1569 return 0; 1570 1571 if (server->echo_interval != ctx->echo_interval * HZ) 1572 return 0; 1573 1574 if (server->rdma != ctx->rdma) 1575 return 0; 1576 1577 if (server->ignore_signature != ctx->ignore_signature) 1578 return 0; 1579 1580 if (server->min_offload != ctx->min_offload) 1581 return 0; 1582 1583 return 1; 1584 } 1585 1586 struct TCP_Server_Info * 1587 cifs_find_tcp_session(struct smb3_fs_context *ctx) 1588 { 1589 struct TCP_Server_Info *server; 1590 1591 spin_lock(&cifs_tcp_ses_lock); 1592 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1593 spin_lock(&server->srv_lock); 1594 /* 1595 * Skip ses channels since they're only handled in lower layers 1596 * (e.g. cifs_send_recv). 1597 */ 1598 if (SERVER_IS_CHAN(server) || 1599 !match_server(server, ctx, false)) { 1600 spin_unlock(&server->srv_lock); 1601 continue; 1602 } 1603 spin_unlock(&server->srv_lock); 1604 1605 ++server->srv_count; 1606 spin_unlock(&cifs_tcp_ses_lock); 1607 cifs_dbg(FYI, "Existing tcp session with server found\n"); 1608 return server; 1609 } 1610 spin_unlock(&cifs_tcp_ses_lock); 1611 return NULL; 1612 } 1613 1614 void 1615 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) 1616 { 1617 struct task_struct *task; 1618 1619 spin_lock(&cifs_tcp_ses_lock); 1620 if (--server->srv_count > 0) { 1621 spin_unlock(&cifs_tcp_ses_lock); 1622 return; 1623 } 1624 1625 /* srv_count can never go negative */ 1626 WARN_ON(server->srv_count < 0); 1627 1628 put_net(cifs_net_ns(server)); 1629 1630 list_del_init(&server->tcp_ses_list); 1631 spin_unlock(&cifs_tcp_ses_lock); 1632 1633 cancel_delayed_work_sync(&server->echo); 1634 1635 if (from_reconnect) 1636 /* 1637 * Avoid deadlock here: reconnect work calls 1638 * cifs_put_tcp_session() at its end. Need to be sure 1639 * that reconnect work does nothing with server pointer after 1640 * that step. 1641 */ 1642 cancel_delayed_work(&server->reconnect); 1643 else 1644 cancel_delayed_work_sync(&server->reconnect); 1645 1646 /* For secondary channels, we pick up ref-count on the primary server */ 1647 if (SERVER_IS_CHAN(server)) 1648 cifs_put_tcp_session(server->primary_server, from_reconnect); 1649 1650 spin_lock(&server->srv_lock); 1651 server->tcpStatus = CifsExiting; 1652 spin_unlock(&server->srv_lock); 1653 1654 cifs_crypto_secmech_release(server); 1655 1656 kfree_sensitive(server->session_key.response); 1657 server->session_key.response = NULL; 1658 server->session_key.len = 0; 1659 kfree(server->hostname); 1660 server->hostname = NULL; 1661 1662 task = xchg(&server->tsk, NULL); 1663 if (task) 1664 send_sig(SIGKILL, task, 1); 1665 } 1666 1667 struct TCP_Server_Info * 1668 cifs_get_tcp_session(struct smb3_fs_context *ctx, 1669 struct TCP_Server_Info *primary_server) 1670 { 1671 struct TCP_Server_Info *tcp_ses = NULL; 1672 int rc; 1673 1674 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC); 1675 1676 /* see if we already have a matching tcp_ses */ 1677 tcp_ses = cifs_find_tcp_session(ctx); 1678 if (tcp_ses) 1679 return tcp_ses; 1680 1681 tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); 1682 if (!tcp_ses) { 1683 rc = -ENOMEM; 1684 goto out_err; 1685 } 1686 1687 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL); 1688 if (!tcp_ses->hostname) { 1689 rc = -ENOMEM; 1690 goto out_err; 1691 } 1692 1693 if (ctx->leaf_fullpath) { 1694 tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL); 1695 if (!tcp_ses->leaf_fullpath) { 1696 rc = -ENOMEM; 1697 goto out_err; 1698 } 1699 } 1700 1701 if (ctx->nosharesock) 1702 tcp_ses->nosharesock = true; 1703 1704 tcp_ses->ops = ctx->ops; 1705 tcp_ses->vals = ctx->vals; 1706 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); 1707 1708 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId); 1709 tcp_ses->noblockcnt = ctx->rootfs; 1710 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs; 1711 tcp_ses->noautotune = ctx->noautotune; 1712 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay; 1713 tcp_ses->rdma = ctx->rdma; 1714 tcp_ses->in_flight = 0; 1715 tcp_ses->max_in_flight = 0; 1716 tcp_ses->credits = 1; 1717 if (primary_server) { 1718 spin_lock(&cifs_tcp_ses_lock); 1719 ++primary_server->srv_count; 1720 spin_unlock(&cifs_tcp_ses_lock); 1721 tcp_ses->primary_server = primary_server; 1722 } 1723 init_waitqueue_head(&tcp_ses->response_q); 1724 init_waitqueue_head(&tcp_ses->request_q); 1725 INIT_LIST_HEAD(&tcp_ses->pending_mid_q); 1726 mutex_init(&tcp_ses->_srv_mutex); 1727 memcpy(tcp_ses->workstation_RFC1001_name, 1728 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1729 memcpy(tcp_ses->server_RFC1001_name, 1730 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); 1731 tcp_ses->session_estab = false; 1732 tcp_ses->sequence_number = 0; 1733 tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */ 1734 tcp_ses->reconnect_instance = 1; 1735 tcp_ses->lstrp = jiffies; 1736 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); 1737 spin_lock_init(&tcp_ses->req_lock); 1738 spin_lock_init(&tcp_ses->srv_lock); 1739 spin_lock_init(&tcp_ses->mid_lock); 1740 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1741 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1742 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); 1743 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); 1744 mutex_init(&tcp_ses->reconnect_mutex); 1745 #ifdef CONFIG_CIFS_DFS_UPCALL 1746 mutex_init(&tcp_ses->refpath_lock); 1747 #endif 1748 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, 1749 sizeof(tcp_ses->srcaddr)); 1750 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr, 1751 sizeof(tcp_ses->dstaddr)); 1752 if (ctx->use_client_guid) 1753 memcpy(tcp_ses->client_guid, ctx->client_guid, 1754 SMB2_CLIENT_GUID_SIZE); 1755 else 1756 generate_random_uuid(tcp_ses->client_guid); 1757 /* 1758 * at this point we are the only ones with the pointer 1759 * to the struct since the kernel thread not created yet 1760 * no need to spinlock this init of tcpStatus or srv_count 1761 */ 1762 tcp_ses->tcpStatus = CifsNew; 1763 ++tcp_ses->srv_count; 1764 1765 if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN && 1766 ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX) 1767 tcp_ses->echo_interval = ctx->echo_interval * HZ; 1768 else 1769 tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; 1770 if (tcp_ses->rdma) { 1771 #ifndef CONFIG_CIFS_SMB_DIRECT 1772 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n"); 1773 rc = -ENOENT; 1774 goto out_err_crypto_release; 1775 #endif 1776 tcp_ses->smbd_conn = smbd_get_connection( 1777 tcp_ses, (struct sockaddr *)&ctx->dstaddr); 1778 if (tcp_ses->smbd_conn) { 1779 cifs_dbg(VFS, "RDMA transport established\n"); 1780 rc = 0; 1781 goto smbd_connected; 1782 } else { 1783 rc = -ENOENT; 1784 goto out_err_crypto_release; 1785 } 1786 } 1787 rc = ip_connect(tcp_ses); 1788 if (rc < 0) { 1789 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); 1790 goto out_err_crypto_release; 1791 } 1792 smbd_connected: 1793 /* 1794 * since we're in a cifs function already, we know that 1795 * this will succeed. No need for try_module_get(). 1796 */ 1797 __module_get(THIS_MODULE); 1798 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, 1799 tcp_ses, "cifsd"); 1800 if (IS_ERR(tcp_ses->tsk)) { 1801 rc = PTR_ERR(tcp_ses->tsk); 1802 cifs_dbg(VFS, "error %d create cifsd thread\n", rc); 1803 module_put(THIS_MODULE); 1804 goto out_err_crypto_release; 1805 } 1806 tcp_ses->min_offload = ctx->min_offload; 1807 /* 1808 * at this point we are the only ones with the pointer 1809 * to the struct since the kernel thread not created yet 1810 * no need to spinlock this update of tcpStatus 1811 */ 1812 spin_lock(&tcp_ses->srv_lock); 1813 tcp_ses->tcpStatus = CifsNeedNegotiate; 1814 spin_unlock(&tcp_ses->srv_lock); 1815 1816 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) 1817 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; 1818 else 1819 tcp_ses->max_credits = ctx->max_credits; 1820 1821 tcp_ses->nr_targets = 1; 1822 tcp_ses->ignore_signature = ctx->ignore_signature; 1823 /* thread spawned, put it on the list */ 1824 spin_lock(&cifs_tcp_ses_lock); 1825 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); 1826 spin_unlock(&cifs_tcp_ses_lock); 1827 1828 /* queue echo request delayed work */ 1829 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); 1830 1831 return tcp_ses; 1832 1833 out_err_crypto_release: 1834 cifs_crypto_secmech_release(tcp_ses); 1835 1836 put_net(cifs_net_ns(tcp_ses)); 1837 1838 out_err: 1839 if (tcp_ses) { 1840 if (SERVER_IS_CHAN(tcp_ses)) 1841 cifs_put_tcp_session(tcp_ses->primary_server, false); 1842 kfree(tcp_ses->hostname); 1843 kfree(tcp_ses->leaf_fullpath); 1844 if (tcp_ses->ssocket) 1845 sock_release(tcp_ses->ssocket); 1846 kfree(tcp_ses); 1847 } 1848 return ERR_PTR(rc); 1849 } 1850 1851 /* this function must be called with ses_lock and chan_lock held */ 1852 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1853 { 1854 if (ctx->sectype != Unspecified && 1855 ctx->sectype != ses->sectype) 1856 return 0; 1857 1858 /* 1859 * If an existing session is limited to less channels than 1860 * requested, it should not be reused 1861 */ 1862 if (ses->chan_max < ctx->max_channels) 1863 return 0; 1864 1865 switch (ses->sectype) { 1866 case Kerberos: 1867 if (!uid_eq(ctx->cred_uid, ses->cred_uid)) 1868 return 0; 1869 break; 1870 default: 1871 /* NULL username means anonymous session */ 1872 if (ses->user_name == NULL) { 1873 if (!ctx->nullauth) 1874 return 0; 1875 break; 1876 } 1877 1878 /* anything else takes username/password */ 1879 if (strncmp(ses->user_name, 1880 ctx->username ? ctx->username : "", 1881 CIFS_MAX_USERNAME_LEN)) 1882 return 0; 1883 if ((ctx->username && strlen(ctx->username) != 0) && 1884 ses->password != NULL && 1885 strncmp(ses->password, 1886 ctx->password ? ctx->password : "", 1887 CIFS_MAX_PASSWORD_LEN)) 1888 return 0; 1889 } 1890 1891 if (strcmp(ctx->local_nls->charset, ses->local_nls->charset)) 1892 return 0; 1893 1894 return 1; 1895 } 1896 1897 /** 1898 * cifs_setup_ipc - helper to setup the IPC tcon for the session 1899 * @ses: smb session to issue the request on 1900 * @ctx: the superblock configuration context to use for building the 1901 * new tree connection for the IPC (interprocess communication RPC) 1902 * 1903 * A new IPC connection is made and stored in the session 1904 * tcon_ipc. The IPC tcon has the same lifetime as the session. 1905 */ 1906 static int 1907 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1908 { 1909 int rc = 0, xid; 1910 struct cifs_tcon *tcon; 1911 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; 1912 bool seal = false; 1913 struct TCP_Server_Info *server = ses->server; 1914 1915 /* 1916 * If the mount request that resulted in the creation of the 1917 * session requires encryption, force IPC to be encrypted too. 1918 */ 1919 if (ctx->seal) { 1920 if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) 1921 seal = true; 1922 else { 1923 cifs_server_dbg(VFS, 1924 "IPC: server doesn't support encryption\n"); 1925 return -EOPNOTSUPP; 1926 } 1927 } 1928 1929 /* no need to setup directory caching on IPC share, so pass in false */ 1930 tcon = tcon_info_alloc(false); 1931 if (tcon == NULL) 1932 return -ENOMEM; 1933 1934 spin_lock(&server->srv_lock); 1935 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); 1936 spin_unlock(&server->srv_lock); 1937 1938 xid = get_xid(); 1939 tcon->ses = ses; 1940 tcon->ipc = true; 1941 tcon->seal = seal; 1942 rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); 1943 free_xid(xid); 1944 1945 if (rc) { 1946 cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); 1947 tconInfoFree(tcon); 1948 goto out; 1949 } 1950 1951 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); 1952 1953 spin_lock(&tcon->tc_lock); 1954 tcon->status = TID_GOOD; 1955 spin_unlock(&tcon->tc_lock); 1956 ses->tcon_ipc = tcon; 1957 out: 1958 return rc; 1959 } 1960 1961 /** 1962 * cifs_free_ipc - helper to release the session IPC tcon 1963 * @ses: smb session to unmount the IPC from 1964 * 1965 * Needs to be called everytime a session is destroyed. 1966 * 1967 * On session close, the IPC is closed and the server must release all tcons of the session. 1968 * No need to send a tree disconnect here. 1969 * 1970 * Besides, it will make the server to not close durable and resilient files on session close, as 1971 * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request. 1972 */ 1973 static int 1974 cifs_free_ipc(struct cifs_ses *ses) 1975 { 1976 struct cifs_tcon *tcon = ses->tcon_ipc; 1977 1978 if (tcon == NULL) 1979 return 0; 1980 1981 tconInfoFree(tcon); 1982 ses->tcon_ipc = NULL; 1983 return 0; 1984 } 1985 1986 static struct cifs_ses * 1987 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1988 { 1989 struct cifs_ses *ses, *ret = NULL; 1990 1991 spin_lock(&cifs_tcp_ses_lock); 1992 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1993 spin_lock(&ses->ses_lock); 1994 if (ses->ses_status == SES_EXITING) { 1995 spin_unlock(&ses->ses_lock); 1996 continue; 1997 } 1998 spin_lock(&ses->chan_lock); 1999 if (match_session(ses, ctx)) { 2000 spin_unlock(&ses->chan_lock); 2001 spin_unlock(&ses->ses_lock); 2002 ret = ses; 2003 break; 2004 } 2005 spin_unlock(&ses->chan_lock); 2006 spin_unlock(&ses->ses_lock); 2007 } 2008 if (ret) 2009 cifs_smb_ses_inc_refcount(ret); 2010 spin_unlock(&cifs_tcp_ses_lock); 2011 return ret; 2012 } 2013 2014 void __cifs_put_smb_ses(struct cifs_ses *ses) 2015 { 2016 unsigned int rc, xid; 2017 unsigned int chan_count; 2018 struct TCP_Server_Info *server = ses->server; 2019 2020 spin_lock(&ses->ses_lock); 2021 if (ses->ses_status == SES_EXITING) { 2022 spin_unlock(&ses->ses_lock); 2023 return; 2024 } 2025 spin_unlock(&ses->ses_lock); 2026 2027 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); 2028 cifs_dbg(FYI, 2029 "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE"); 2030 2031 spin_lock(&cifs_tcp_ses_lock); 2032 if (--ses->ses_count > 0) { 2033 spin_unlock(&cifs_tcp_ses_lock); 2034 return; 2035 } 2036 spin_lock(&ses->ses_lock); 2037 if (ses->ses_status == SES_GOOD) 2038 ses->ses_status = SES_EXITING; 2039 spin_unlock(&ses->ses_lock); 2040 spin_unlock(&cifs_tcp_ses_lock); 2041 2042 /* ses_count can never go negative */ 2043 WARN_ON(ses->ses_count < 0); 2044 2045 spin_lock(&ses->ses_lock); 2046 if (ses->ses_status == SES_EXITING && server->ops->logoff) { 2047 spin_unlock(&ses->ses_lock); 2048 cifs_free_ipc(ses); 2049 xid = get_xid(); 2050 rc = server->ops->logoff(xid, ses); 2051 if (rc) 2052 cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n", 2053 __func__, rc); 2054 _free_xid(xid); 2055 } else { 2056 spin_unlock(&ses->ses_lock); 2057 cifs_free_ipc(ses); 2058 } 2059 2060 spin_lock(&cifs_tcp_ses_lock); 2061 list_del_init(&ses->smb_ses_list); 2062 spin_unlock(&cifs_tcp_ses_lock); 2063 2064 chan_count = ses->chan_count; 2065 2066 /* close any extra channels */ 2067 if (chan_count > 1) { 2068 int i; 2069 2070 for (i = 1; i < chan_count; i++) { 2071 if (ses->chans[i].iface) { 2072 kref_put(&ses->chans[i].iface->refcount, release_iface); 2073 ses->chans[i].iface = NULL; 2074 } 2075 cifs_put_tcp_session(ses->chans[i].server, 0); 2076 ses->chans[i].server = NULL; 2077 } 2078 } 2079 2080 /* we now account for primary channel in iface->refcount */ 2081 if (ses->chans[0].iface) { 2082 kref_put(&ses->chans[0].iface->refcount, release_iface); 2083 ses->chans[0].server = NULL; 2084 } 2085 2086 sesInfoFree(ses); 2087 cifs_put_tcp_session(server, 0); 2088 } 2089 2090 #ifdef CONFIG_KEYS 2091 2092 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ 2093 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) 2094 2095 /* Populate username and pw fields from keyring if possible */ 2096 static int 2097 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) 2098 { 2099 int rc = 0; 2100 int is_domain = 0; 2101 const char *delim, *payload; 2102 char *desc; 2103 ssize_t len; 2104 struct key *key; 2105 struct TCP_Server_Info *server = ses->server; 2106 struct sockaddr_in *sa; 2107 struct sockaddr_in6 *sa6; 2108 const struct user_key_payload *upayload; 2109 2110 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL); 2111 if (!desc) 2112 return -ENOMEM; 2113 2114 /* try to find an address key first */ 2115 switch (server->dstaddr.ss_family) { 2116 case AF_INET: 2117 sa = (struct sockaddr_in *)&server->dstaddr; 2118 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr); 2119 break; 2120 case AF_INET6: 2121 sa6 = (struct sockaddr_in6 *)&server->dstaddr; 2122 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr); 2123 break; 2124 default: 2125 cifs_dbg(FYI, "Bad ss_family (%hu)\n", 2126 server->dstaddr.ss_family); 2127 rc = -EINVAL; 2128 goto out_err; 2129 } 2130 2131 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2132 key = request_key(&key_type_logon, desc, ""); 2133 if (IS_ERR(key)) { 2134 if (!ses->domainName) { 2135 cifs_dbg(FYI, "domainName is NULL\n"); 2136 rc = PTR_ERR(key); 2137 goto out_err; 2138 } 2139 2140 /* didn't work, try to find a domain key */ 2141 sprintf(desc, "cifs:d:%s", ses->domainName); 2142 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc); 2143 key = request_key(&key_type_logon, desc, ""); 2144 if (IS_ERR(key)) { 2145 rc = PTR_ERR(key); 2146 goto out_err; 2147 } 2148 is_domain = 1; 2149 } 2150 2151 down_read(&key->sem); 2152 upayload = user_key_payload_locked(key); 2153 if (IS_ERR_OR_NULL(upayload)) { 2154 rc = upayload ? PTR_ERR(upayload) : -EINVAL; 2155 goto out_key_put; 2156 } 2157 2158 /* find first : in payload */ 2159 payload = upayload->data; 2160 delim = strnchr(payload, upayload->datalen, ':'); 2161 cifs_dbg(FYI, "payload=%s\n", payload); 2162 if (!delim) { 2163 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n", 2164 upayload->datalen); 2165 rc = -EINVAL; 2166 goto out_key_put; 2167 } 2168 2169 len = delim - payload; 2170 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) { 2171 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n", 2172 len); 2173 rc = -EINVAL; 2174 goto out_key_put; 2175 } 2176 2177 ctx->username = kstrndup(payload, len, GFP_KERNEL); 2178 if (!ctx->username) { 2179 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n", 2180 len); 2181 rc = -ENOMEM; 2182 goto out_key_put; 2183 } 2184 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username); 2185 2186 len = key->datalen - (len + 1); 2187 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) { 2188 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len); 2189 rc = -EINVAL; 2190 kfree(ctx->username); 2191 ctx->username = NULL; 2192 goto out_key_put; 2193 } 2194 2195 ++delim; 2196 ctx->password = kstrndup(delim, len, GFP_KERNEL); 2197 if (!ctx->password) { 2198 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", 2199 len); 2200 rc = -ENOMEM; 2201 kfree(ctx->username); 2202 ctx->username = NULL; 2203 goto out_key_put; 2204 } 2205 2206 /* 2207 * If we have a domain key then we must set the domainName in the 2208 * for the request. 2209 */ 2210 if (is_domain && ses->domainName) { 2211 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL); 2212 if (!ctx->domainname) { 2213 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n", 2214 len); 2215 rc = -ENOMEM; 2216 kfree(ctx->username); 2217 ctx->username = NULL; 2218 kfree_sensitive(ctx->password); 2219 ctx->password = NULL; 2220 goto out_key_put; 2221 } 2222 } 2223 2224 strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name)); 2225 2226 out_key_put: 2227 up_read(&key->sem); 2228 key_put(key); 2229 out_err: 2230 kfree(desc); 2231 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc); 2232 return rc; 2233 } 2234 #else /* ! CONFIG_KEYS */ 2235 static inline int 2236 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), 2237 struct cifs_ses *ses __attribute__((unused))) 2238 { 2239 return -ENOSYS; 2240 } 2241 #endif /* CONFIG_KEYS */ 2242 2243 /** 2244 * cifs_get_smb_ses - get a session matching @ctx data from @server 2245 * @server: server to setup the session to 2246 * @ctx: superblock configuration context to use to setup the session 2247 * 2248 * This function assumes it is being called from cifs_mount() where we 2249 * already got a server reference (server refcount +1). See 2250 * cifs_get_tcon() for refcount explanations. 2251 */ 2252 struct cifs_ses * 2253 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 2254 { 2255 int rc = 0; 2256 unsigned int xid; 2257 struct cifs_ses *ses; 2258 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 2259 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 2260 2261 xid = get_xid(); 2262 2263 ses = cifs_find_smb_ses(server, ctx); 2264 if (ses) { 2265 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n", 2266 ses->ses_status); 2267 2268 spin_lock(&ses->chan_lock); 2269 if (cifs_chan_needs_reconnect(ses, server)) { 2270 spin_unlock(&ses->chan_lock); 2271 cifs_dbg(FYI, "Session needs reconnect\n"); 2272 2273 mutex_lock(&ses->session_mutex); 2274 rc = cifs_negotiate_protocol(xid, ses, server); 2275 if (rc) { 2276 mutex_unlock(&ses->session_mutex); 2277 /* problem -- put our ses reference */ 2278 cifs_put_smb_ses(ses); 2279 free_xid(xid); 2280 return ERR_PTR(rc); 2281 } 2282 2283 rc = cifs_setup_session(xid, ses, server, 2284 ctx->local_nls); 2285 if (rc) { 2286 mutex_unlock(&ses->session_mutex); 2287 /* problem -- put our reference */ 2288 cifs_put_smb_ses(ses); 2289 free_xid(xid); 2290 return ERR_PTR(rc); 2291 } 2292 mutex_unlock(&ses->session_mutex); 2293 2294 spin_lock(&ses->chan_lock); 2295 } 2296 spin_unlock(&ses->chan_lock); 2297 2298 /* existing SMB ses has a server reference already */ 2299 cifs_put_tcp_session(server, 0); 2300 free_xid(xid); 2301 return ses; 2302 } 2303 2304 rc = -ENOMEM; 2305 2306 cifs_dbg(FYI, "Existing smb sess not found\n"); 2307 ses = sesInfoAlloc(); 2308 if (ses == NULL) 2309 goto get_ses_fail; 2310 2311 /* new SMB session uses our server ref */ 2312 ses->server = server; 2313 if (server->dstaddr.ss_family == AF_INET6) 2314 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr); 2315 else 2316 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr); 2317 2318 if (ctx->username) { 2319 ses->user_name = kstrdup(ctx->username, GFP_KERNEL); 2320 if (!ses->user_name) 2321 goto get_ses_fail; 2322 } 2323 2324 /* ctx->password freed at unmount */ 2325 if (ctx->password) { 2326 ses->password = kstrdup(ctx->password, GFP_KERNEL); 2327 if (!ses->password) 2328 goto get_ses_fail; 2329 } 2330 if (ctx->domainname) { 2331 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); 2332 if (!ses->domainName) 2333 goto get_ses_fail; 2334 } 2335 2336 strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name)); 2337 2338 if (ctx->domainauto) 2339 ses->domainAuto = ctx->domainauto; 2340 ses->cred_uid = ctx->cred_uid; 2341 ses->linux_uid = ctx->linux_uid; 2342 2343 ses->sectype = ctx->sectype; 2344 ses->sign = ctx->sign; 2345 ses->local_nls = load_nls(ctx->local_nls->charset); 2346 2347 /* add server as first channel */ 2348 spin_lock(&ses->chan_lock); 2349 ses->chans[0].server = server; 2350 ses->chan_count = 1; 2351 ses->chan_max = ctx->multichannel ? ctx->max_channels:1; 2352 ses->chans_need_reconnect = 1; 2353 spin_unlock(&ses->chan_lock); 2354 2355 mutex_lock(&ses->session_mutex); 2356 rc = cifs_negotiate_protocol(xid, ses, server); 2357 if (!rc) 2358 rc = cifs_setup_session(xid, ses, server, ctx->local_nls); 2359 mutex_unlock(&ses->session_mutex); 2360 2361 /* each channel uses a different signing key */ 2362 spin_lock(&ses->chan_lock); 2363 memcpy(ses->chans[0].signkey, ses->smb3signingkey, 2364 sizeof(ses->smb3signingkey)); 2365 spin_unlock(&ses->chan_lock); 2366 2367 if (rc) 2368 goto get_ses_fail; 2369 2370 /* 2371 * success, put it on the list and add it as first channel 2372 * note: the session becomes active soon after this. So you'll 2373 * need to lock before changing something in the session. 2374 */ 2375 spin_lock(&cifs_tcp_ses_lock); 2376 ses->dfs_root_ses = ctx->dfs_root_ses; 2377 if (ses->dfs_root_ses) 2378 ses->dfs_root_ses->ses_count++; 2379 list_add(&ses->smb_ses_list, &server->smb_ses_list); 2380 spin_unlock(&cifs_tcp_ses_lock); 2381 2382 cifs_setup_ipc(ses, ctx); 2383 2384 free_xid(xid); 2385 2386 return ses; 2387 2388 get_ses_fail: 2389 sesInfoFree(ses); 2390 free_xid(xid); 2391 return ERR_PTR(rc); 2392 } 2393 2394 /* this function must be called with tc_lock held */ 2395 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 2396 { 2397 struct TCP_Server_Info *server = tcon->ses->server; 2398 2399 if (tcon->status == TID_EXITING) 2400 return 0; 2401 2402 if (tcon->origin_fullpath) { 2403 if (!ctx->source || 2404 !dfs_src_pathname_equal(ctx->source, 2405 tcon->origin_fullpath)) 2406 return 0; 2407 } else if (!server->leaf_fullpath && 2408 strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) { 2409 return 0; 2410 } 2411 if (tcon->seal != ctx->seal) 2412 return 0; 2413 if (tcon->snapshot_time != ctx->snapshot_time) 2414 return 0; 2415 if (tcon->handle_timeout != ctx->handle_timeout) 2416 return 0; 2417 if (tcon->no_lease != ctx->no_lease) 2418 return 0; 2419 if (tcon->nodelete != ctx->nodelete) 2420 return 0; 2421 return 1; 2422 } 2423 2424 static struct cifs_tcon * 2425 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2426 { 2427 struct cifs_tcon *tcon; 2428 2429 spin_lock(&cifs_tcp_ses_lock); 2430 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2431 spin_lock(&tcon->tc_lock); 2432 if (!match_tcon(tcon, ctx)) { 2433 spin_unlock(&tcon->tc_lock); 2434 continue; 2435 } 2436 ++tcon->tc_count; 2437 spin_unlock(&tcon->tc_lock); 2438 spin_unlock(&cifs_tcp_ses_lock); 2439 return tcon; 2440 } 2441 spin_unlock(&cifs_tcp_ses_lock); 2442 return NULL; 2443 } 2444 2445 void 2446 cifs_put_tcon(struct cifs_tcon *tcon) 2447 { 2448 unsigned int xid; 2449 struct cifs_ses *ses; 2450 2451 /* 2452 * IPC tcon share the lifetime of their session and are 2453 * destroyed in the session put function 2454 */ 2455 if (tcon == NULL || tcon->ipc) 2456 return; 2457 2458 ses = tcon->ses; 2459 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); 2460 spin_lock(&cifs_tcp_ses_lock); 2461 spin_lock(&tcon->tc_lock); 2462 if (--tcon->tc_count > 0) { 2463 spin_unlock(&tcon->tc_lock); 2464 spin_unlock(&cifs_tcp_ses_lock); 2465 return; 2466 } 2467 2468 /* tc_count can never go negative */ 2469 WARN_ON(tcon->tc_count < 0); 2470 2471 list_del_init(&tcon->tcon_list); 2472 tcon->status = TID_EXITING; 2473 spin_unlock(&tcon->tc_lock); 2474 spin_unlock(&cifs_tcp_ses_lock); 2475 2476 /* cancel polling of interfaces */ 2477 cancel_delayed_work_sync(&tcon->query_interfaces); 2478 #ifdef CONFIG_CIFS_DFS_UPCALL 2479 cancel_delayed_work_sync(&tcon->dfs_cache_work); 2480 #endif 2481 2482 if (tcon->use_witness) { 2483 int rc; 2484 2485 rc = cifs_swn_unregister(tcon); 2486 if (rc < 0) { 2487 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n", 2488 __func__, rc); 2489 } 2490 } 2491 2492 xid = get_xid(); 2493 if (ses->server->ops->tree_disconnect) 2494 ses->server->ops->tree_disconnect(xid, tcon); 2495 _free_xid(xid); 2496 2497 cifs_fscache_release_super_cookie(tcon); 2498 tconInfoFree(tcon); 2499 cifs_put_smb_ses(ses); 2500 } 2501 2502 /** 2503 * cifs_get_tcon - get a tcon matching @ctx data from @ses 2504 * @ses: smb session to issue the request on 2505 * @ctx: the superblock configuration context to use for building the 2506 * 2507 * - tcon refcount is the number of mount points using the tcon. 2508 * - ses refcount is the number of tcon using the session. 2509 * 2510 * 1. This function assumes it is being called from cifs_mount() where 2511 * we already got a session reference (ses refcount +1). 2512 * 2513 * 2. Since we're in the context of adding a mount point, the end 2514 * result should be either: 2515 * 2516 * a) a new tcon already allocated with refcount=1 (1 mount point) and 2517 * its session refcount incremented (1 new tcon). This +1 was 2518 * already done in (1). 2519 * 2520 * b) an existing tcon with refcount+1 (add a mount point to it) and 2521 * identical ses refcount (no new tcon). Because of (1) we need to 2522 * decrement the ses refcount. 2523 */ 2524 static struct cifs_tcon * 2525 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2526 { 2527 struct cifs_tcon *tcon; 2528 bool nohandlecache; 2529 int rc, xid; 2530 2531 tcon = cifs_find_tcon(ses, ctx); 2532 if (tcon) { 2533 /* 2534 * tcon has refcount already incremented but we need to 2535 * decrement extra ses reference gotten by caller (case b) 2536 */ 2537 cifs_dbg(FYI, "Found match on UNC path\n"); 2538 cifs_put_smb_ses(ses); 2539 return tcon; 2540 } 2541 2542 if (!ses->server->ops->tree_connect) { 2543 rc = -ENOSYS; 2544 goto out_fail; 2545 } 2546 2547 if (ses->server->dialect >= SMB20_PROT_ID && 2548 (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)) 2549 nohandlecache = ctx->nohandlecache; 2550 else 2551 nohandlecache = true; 2552 tcon = tcon_info_alloc(!nohandlecache); 2553 if (tcon == NULL) { 2554 rc = -ENOMEM; 2555 goto out_fail; 2556 } 2557 tcon->nohandlecache = nohandlecache; 2558 2559 if (ctx->snapshot_time) { 2560 if (ses->server->vals->protocol_id == 0) { 2561 cifs_dbg(VFS, 2562 "Use SMB2 or later for snapshot mount option\n"); 2563 rc = -EOPNOTSUPP; 2564 goto out_fail; 2565 } else 2566 tcon->snapshot_time = ctx->snapshot_time; 2567 } 2568 2569 if (ctx->handle_timeout) { 2570 if (ses->server->vals->protocol_id == 0) { 2571 cifs_dbg(VFS, 2572 "Use SMB2.1 or later for handle timeout option\n"); 2573 rc = -EOPNOTSUPP; 2574 goto out_fail; 2575 } else 2576 tcon->handle_timeout = ctx->handle_timeout; 2577 } 2578 2579 tcon->ses = ses; 2580 if (ctx->password) { 2581 tcon->password = kstrdup(ctx->password, GFP_KERNEL); 2582 if (!tcon->password) { 2583 rc = -ENOMEM; 2584 goto out_fail; 2585 } 2586 } 2587 2588 if (ctx->seal) { 2589 if (ses->server->vals->protocol_id == 0) { 2590 cifs_dbg(VFS, 2591 "SMB3 or later required for encryption\n"); 2592 rc = -EOPNOTSUPP; 2593 goto out_fail; 2594 } else if (tcon->ses->server->capabilities & 2595 SMB2_GLOBAL_CAP_ENCRYPTION) 2596 tcon->seal = true; 2597 else { 2598 cifs_dbg(VFS, "Encryption is not supported on share\n"); 2599 rc = -EOPNOTSUPP; 2600 goto out_fail; 2601 } 2602 } 2603 2604 if (ctx->linux_ext) { 2605 if (ses->server->posix_ext_supported) { 2606 tcon->posix_extensions = true; 2607 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n"); 2608 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || 2609 (strcmp(ses->server->vals->version_string, 2610 SMB3ANY_VERSION_STRING) == 0) || 2611 (strcmp(ses->server->vals->version_string, 2612 SMBDEFAULT_VERSION_STRING) == 0)) { 2613 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n"); 2614 rc = -EOPNOTSUPP; 2615 goto out_fail; 2616 } else { 2617 cifs_dbg(VFS, "Check vers= mount option. SMB3.11 " 2618 "disabled but required for POSIX extensions\n"); 2619 rc = -EOPNOTSUPP; 2620 goto out_fail; 2621 } 2622 } 2623 2624 xid = get_xid(); 2625 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon, 2626 ctx->local_nls); 2627 free_xid(xid); 2628 cifs_dbg(FYI, "Tcon rc = %d\n", rc); 2629 if (rc) 2630 goto out_fail; 2631 2632 tcon->use_persistent = false; 2633 /* check if SMB2 or later, CIFS does not support persistent handles */ 2634 if (ctx->persistent) { 2635 if (ses->server->vals->protocol_id == 0) { 2636 cifs_dbg(VFS, 2637 "SMB3 or later required for persistent handles\n"); 2638 rc = -EOPNOTSUPP; 2639 goto out_fail; 2640 } else if (ses->server->capabilities & 2641 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2642 tcon->use_persistent = true; 2643 else /* persistent handles requested but not supported */ { 2644 cifs_dbg(VFS, 2645 "Persistent handles not supported on share\n"); 2646 rc = -EOPNOTSUPP; 2647 goto out_fail; 2648 } 2649 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY) 2650 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES) 2651 && (ctx->nopersistent == false)) { 2652 cifs_dbg(FYI, "enabling persistent handles\n"); 2653 tcon->use_persistent = true; 2654 } else if (ctx->resilient) { 2655 if (ses->server->vals->protocol_id == 0) { 2656 cifs_dbg(VFS, 2657 "SMB2.1 or later required for resilient handles\n"); 2658 rc = -EOPNOTSUPP; 2659 goto out_fail; 2660 } 2661 tcon->use_resilient = true; 2662 } 2663 2664 tcon->use_witness = false; 2665 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) { 2666 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) { 2667 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) { 2668 /* 2669 * Set witness in use flag in first place 2670 * to retry registration in the echo task 2671 */ 2672 tcon->use_witness = true; 2673 /* And try to register immediately */ 2674 rc = cifs_swn_register(tcon); 2675 if (rc < 0) { 2676 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc); 2677 goto out_fail; 2678 } 2679 } else { 2680 /* TODO: try to extend for non-cluster uses (eg multichannel) */ 2681 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n"); 2682 rc = -EOPNOTSUPP; 2683 goto out_fail; 2684 } 2685 } else { 2686 cifs_dbg(VFS, "SMB3 or later required for witness option\n"); 2687 rc = -EOPNOTSUPP; 2688 goto out_fail; 2689 } 2690 } 2691 2692 /* If the user really knows what they are doing they can override */ 2693 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) { 2694 if (ctx->cache_ro) 2695 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n"); 2696 else if (ctx->cache_rw) 2697 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n"); 2698 } 2699 2700 if (ctx->no_lease) { 2701 if (ses->server->vals->protocol_id == 0) { 2702 cifs_dbg(VFS, 2703 "SMB2 or later required for nolease option\n"); 2704 rc = -EOPNOTSUPP; 2705 goto out_fail; 2706 } else 2707 tcon->no_lease = ctx->no_lease; 2708 } 2709 2710 /* 2711 * We can have only one retry value for a connection to a share so for 2712 * resources mounted more than once to the same server share the last 2713 * value passed in for the retry flag is used. 2714 */ 2715 tcon->retry = ctx->retry; 2716 tcon->nocase = ctx->nocase; 2717 tcon->broken_sparse_sup = ctx->no_sparse; 2718 tcon->max_cached_dirs = ctx->max_cached_dirs; 2719 tcon->nodelete = ctx->nodelete; 2720 tcon->local_lease = ctx->local_lease; 2721 INIT_LIST_HEAD(&tcon->pending_opens); 2722 tcon->status = TID_GOOD; 2723 2724 INIT_DELAYED_WORK(&tcon->query_interfaces, 2725 smb2_query_server_interfaces); 2726 if (ses->server->dialect >= SMB30_PROT_ID && 2727 (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { 2728 /* schedule query interfaces poll */ 2729 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, 2730 (SMB_INTERFACE_POLL_INTERVAL * HZ)); 2731 } 2732 #ifdef CONFIG_CIFS_DFS_UPCALL 2733 INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); 2734 #endif 2735 spin_lock(&cifs_tcp_ses_lock); 2736 list_add(&tcon->tcon_list, &ses->tcon_list); 2737 spin_unlock(&cifs_tcp_ses_lock); 2738 2739 return tcon; 2740 2741 out_fail: 2742 tconInfoFree(tcon); 2743 return ERR_PTR(rc); 2744 } 2745 2746 void 2747 cifs_put_tlink(struct tcon_link *tlink) 2748 { 2749 if (!tlink || IS_ERR(tlink)) 2750 return; 2751 2752 if (!atomic_dec_and_test(&tlink->tl_count) || 2753 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { 2754 tlink->tl_time = jiffies; 2755 return; 2756 } 2757 2758 if (!IS_ERR(tlink_tcon(tlink))) 2759 cifs_put_tcon(tlink_tcon(tlink)); 2760 kfree(tlink); 2761 return; 2762 } 2763 2764 static int 2765 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) 2766 { 2767 struct cifs_sb_info *old = CIFS_SB(sb); 2768 struct cifs_sb_info *new = mnt_data->cifs_sb; 2769 unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK; 2770 unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK; 2771 2772 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) 2773 return 0; 2774 2775 if (old->mnt_cifs_serverino_autodisabled) 2776 newflags &= ~CIFS_MOUNT_SERVER_INUM; 2777 2778 if (oldflags != newflags) 2779 return 0; 2780 2781 /* 2782 * We want to share sb only if we don't specify an r/wsize or 2783 * specified r/wsize is greater than or equal to existing one. 2784 */ 2785 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize) 2786 return 0; 2787 2788 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize) 2789 return 0; 2790 2791 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) || 2792 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid)) 2793 return 0; 2794 2795 if (old->ctx->file_mode != new->ctx->file_mode || 2796 old->ctx->dir_mode != new->ctx->dir_mode) 2797 return 0; 2798 2799 if (strcmp(old->local_nls->charset, new->local_nls->charset)) 2800 return 0; 2801 2802 if (old->ctx->acregmax != new->ctx->acregmax) 2803 return 0; 2804 if (old->ctx->acdirmax != new->ctx->acdirmax) 2805 return 0; 2806 if (old->ctx->closetimeo != new->ctx->closetimeo) 2807 return 0; 2808 2809 return 1; 2810 } 2811 2812 static int match_prepath(struct super_block *sb, 2813 struct cifs_tcon *tcon, 2814 struct cifs_mnt_data *mnt_data) 2815 { 2816 struct smb3_fs_context *ctx = mnt_data->ctx; 2817 struct cifs_sb_info *old = CIFS_SB(sb); 2818 struct cifs_sb_info *new = mnt_data->cifs_sb; 2819 bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2820 old->prepath; 2821 bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 2822 new->prepath; 2823 2824 if (tcon->origin_fullpath && 2825 dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source)) 2826 return 1; 2827 2828 if (old_set && new_set && !strcmp(new->prepath, old->prepath)) 2829 return 1; 2830 else if (!old_set && !new_set) 2831 return 1; 2832 2833 return 0; 2834 } 2835 2836 int 2837 cifs_match_super(struct super_block *sb, void *data) 2838 { 2839 struct cifs_mnt_data *mnt_data = data; 2840 struct smb3_fs_context *ctx; 2841 struct cifs_sb_info *cifs_sb; 2842 struct TCP_Server_Info *tcp_srv; 2843 struct cifs_ses *ses; 2844 struct cifs_tcon *tcon; 2845 struct tcon_link *tlink; 2846 int rc = 0; 2847 2848 spin_lock(&cifs_tcp_ses_lock); 2849 cifs_sb = CIFS_SB(sb); 2850 2851 /* We do not want to use a superblock that has been shutdown */ 2852 if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) { 2853 spin_unlock(&cifs_tcp_ses_lock); 2854 return 0; 2855 } 2856 2857 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 2858 if (IS_ERR_OR_NULL(tlink)) { 2859 pr_warn_once("%s: skip super matching due to bad tlink(%p)\n", 2860 __func__, tlink); 2861 spin_unlock(&cifs_tcp_ses_lock); 2862 return 0; 2863 } 2864 tcon = tlink_tcon(tlink); 2865 ses = tcon->ses; 2866 tcp_srv = ses->server; 2867 2868 ctx = mnt_data->ctx; 2869 2870 spin_lock(&tcp_srv->srv_lock); 2871 spin_lock(&ses->ses_lock); 2872 spin_lock(&ses->chan_lock); 2873 spin_lock(&tcon->tc_lock); 2874 if (!match_server(tcp_srv, ctx, true) || 2875 !match_session(ses, ctx) || 2876 !match_tcon(tcon, ctx) || 2877 !match_prepath(sb, tcon, mnt_data)) { 2878 rc = 0; 2879 goto out; 2880 } 2881 2882 rc = compare_mount_options(sb, mnt_data); 2883 out: 2884 spin_unlock(&tcon->tc_lock); 2885 spin_unlock(&ses->chan_lock); 2886 spin_unlock(&ses->ses_lock); 2887 spin_unlock(&tcp_srv->srv_lock); 2888 2889 spin_unlock(&cifs_tcp_ses_lock); 2890 cifs_put_tlink(tlink); 2891 return rc; 2892 } 2893 2894 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2895 static struct lock_class_key cifs_key[2]; 2896 static struct lock_class_key cifs_slock_key[2]; 2897 2898 static inline void 2899 cifs_reclassify_socket4(struct socket *sock) 2900 { 2901 struct sock *sk = sock->sk; 2902 BUG_ON(!sock_allow_reclassification(sk)); 2903 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", 2904 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); 2905 } 2906 2907 static inline void 2908 cifs_reclassify_socket6(struct socket *sock) 2909 { 2910 struct sock *sk = sock->sk; 2911 BUG_ON(!sock_allow_reclassification(sk)); 2912 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", 2913 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); 2914 } 2915 #else 2916 static inline void 2917 cifs_reclassify_socket4(struct socket *sock) 2918 { 2919 } 2920 2921 static inline void 2922 cifs_reclassify_socket6(struct socket *sock) 2923 { 2924 } 2925 #endif 2926 2927 /* See RFC1001 section 14 on representation of Netbios names */ 2928 static void rfc1002mangle(char *target, char *source, unsigned int length) 2929 { 2930 unsigned int i, j; 2931 2932 for (i = 0, j = 0; i < (length); i++) { 2933 /* mask a nibble at a time and encode */ 2934 target[j] = 'A' + (0x0F & (source[i] >> 4)); 2935 target[j+1] = 'A' + (0x0F & source[i]); 2936 j += 2; 2937 } 2938 2939 } 2940 2941 static int 2942 bind_socket(struct TCP_Server_Info *server) 2943 { 2944 int rc = 0; 2945 if (server->srcaddr.ss_family != AF_UNSPEC) { 2946 /* Bind to the specified local IP address */ 2947 struct socket *socket = server->ssocket; 2948 rc = kernel_bind(socket, 2949 (struct sockaddr *) &server->srcaddr, 2950 sizeof(server->srcaddr)); 2951 if (rc < 0) { 2952 struct sockaddr_in *saddr4; 2953 struct sockaddr_in6 *saddr6; 2954 saddr4 = (struct sockaddr_in *)&server->srcaddr; 2955 saddr6 = (struct sockaddr_in6 *)&server->srcaddr; 2956 if (saddr6->sin6_family == AF_INET6) 2957 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n", 2958 &saddr6->sin6_addr, rc); 2959 else 2960 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n", 2961 &saddr4->sin_addr.s_addr, rc); 2962 } 2963 } 2964 return rc; 2965 } 2966 2967 static int 2968 ip_rfc1001_connect(struct TCP_Server_Info *server) 2969 { 2970 int rc = 0; 2971 /* 2972 * some servers require RFC1001 sessinit before sending 2973 * negprot - BB check reconnection in case where second 2974 * sessinit is sent but no second negprot 2975 */ 2976 struct rfc1002_session_packet req = {}; 2977 struct smb_hdr *smb_buf = (struct smb_hdr *)&req; 2978 unsigned int len; 2979 2980 req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); 2981 2982 if (server->server_RFC1001_name[0] != 0) 2983 rfc1002mangle(req.trailer.session_req.called_name, 2984 server->server_RFC1001_name, 2985 RFC1001_NAME_LEN_WITH_NULL); 2986 else 2987 rfc1002mangle(req.trailer.session_req.called_name, 2988 DEFAULT_CIFS_CALLED_NAME, 2989 RFC1001_NAME_LEN_WITH_NULL); 2990 2991 req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name); 2992 2993 /* calling name ends in null (byte 16) from old smb convention */ 2994 if (server->workstation_RFC1001_name[0] != 0) 2995 rfc1002mangle(req.trailer.session_req.calling_name, 2996 server->workstation_RFC1001_name, 2997 RFC1001_NAME_LEN_WITH_NULL); 2998 else 2999 rfc1002mangle(req.trailer.session_req.calling_name, 3000 "LINUX_CIFS_CLNT", 3001 RFC1001_NAME_LEN_WITH_NULL); 3002 3003 /* 3004 * As per rfc1002, @len must be the number of bytes that follows the 3005 * length field of a rfc1002 session request payload. 3006 */ 3007 len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); 3008 3009 smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); 3010 rc = smb_send(server, smb_buf, len); 3011 /* 3012 * RFC1001 layer in at least one server requires very short break before 3013 * negprot presumably because not expecting negprot to follow so fast. 3014 * This is a simple solution that works without complicating the code 3015 * and causes no significant slowing down on mount for everyone else 3016 */ 3017 usleep_range(1000, 2000); 3018 3019 return rc; 3020 } 3021 3022 static int 3023 generic_ip_connect(struct TCP_Server_Info *server) 3024 { 3025 struct sockaddr *saddr; 3026 struct socket *socket; 3027 int slen, sfamily; 3028 __be16 sport; 3029 int rc = 0; 3030 3031 saddr = (struct sockaddr *) &server->dstaddr; 3032 3033 if (server->dstaddr.ss_family == AF_INET6) { 3034 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr; 3035 3036 sport = ipv6->sin6_port; 3037 slen = sizeof(struct sockaddr_in6); 3038 sfamily = AF_INET6; 3039 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr, 3040 ntohs(sport)); 3041 } else { 3042 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr; 3043 3044 sport = ipv4->sin_port; 3045 slen = sizeof(struct sockaddr_in); 3046 sfamily = AF_INET; 3047 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr, 3048 ntohs(sport)); 3049 } 3050 3051 if (server->ssocket) { 3052 socket = server->ssocket; 3053 } else { 3054 rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, 3055 IPPROTO_TCP, &server->ssocket, 1); 3056 if (rc < 0) { 3057 cifs_server_dbg(VFS, "Error %d creating socket\n", rc); 3058 return rc; 3059 } 3060 3061 /* BB other socket options to set KEEPALIVE, NODELAY? */ 3062 cifs_dbg(FYI, "Socket created\n"); 3063 socket = server->ssocket; 3064 socket->sk->sk_allocation = GFP_NOFS; 3065 socket->sk->sk_use_task_frag = false; 3066 if (sfamily == AF_INET6) 3067 cifs_reclassify_socket6(socket); 3068 else 3069 cifs_reclassify_socket4(socket); 3070 } 3071 3072 rc = bind_socket(server); 3073 if (rc < 0) 3074 return rc; 3075 3076 /* 3077 * Eventually check for other socket options to change from 3078 * the default. sock_setsockopt not used because it expects 3079 * user space buffer 3080 */ 3081 socket->sk->sk_rcvtimeo = 7 * HZ; 3082 socket->sk->sk_sndtimeo = 5 * HZ; 3083 3084 /* make the bufsizes depend on wsize/rsize and max requests */ 3085 if (server->noautotune) { 3086 if (socket->sk->sk_sndbuf < (200 * 1024)) 3087 socket->sk->sk_sndbuf = 200 * 1024; 3088 if (socket->sk->sk_rcvbuf < (140 * 1024)) 3089 socket->sk->sk_rcvbuf = 140 * 1024; 3090 } 3091 3092 if (server->tcp_nodelay) 3093 tcp_sock_set_nodelay(socket->sk); 3094 3095 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n", 3096 socket->sk->sk_sndbuf, 3097 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); 3098 3099 rc = kernel_connect(socket, saddr, slen, 3100 server->noblockcnt ? O_NONBLOCK : 0); 3101 /* 3102 * When mounting SMB root file systems, we do not want to block in 3103 * connect. Otherwise bail out and then let cifs_reconnect() perform 3104 * reconnect failover - if possible. 3105 */ 3106 if (server->noblockcnt && rc == -EINPROGRESS) 3107 rc = 0; 3108 if (rc < 0) { 3109 cifs_dbg(FYI, "Error %d connecting to server\n", rc); 3110 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc); 3111 sock_release(socket); 3112 server->ssocket = NULL; 3113 return rc; 3114 } 3115 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr); 3116 if (sport == htons(RFC1001_PORT)) 3117 rc = ip_rfc1001_connect(server); 3118 3119 return rc; 3120 } 3121 3122 static int 3123 ip_connect(struct TCP_Server_Info *server) 3124 { 3125 __be16 *sport; 3126 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 3127 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 3128 3129 if (server->dstaddr.ss_family == AF_INET6) 3130 sport = &addr6->sin6_port; 3131 else 3132 sport = &addr->sin_port; 3133 3134 if (*sport == 0) { 3135 int rc; 3136 3137 /* try with 445 port at first */ 3138 *sport = htons(CIFS_PORT); 3139 3140 rc = generic_ip_connect(server); 3141 if (rc >= 0) 3142 return rc; 3143 3144 /* if it failed, try with 139 port */ 3145 *sport = htons(RFC1001_PORT); 3146 } 3147 3148 return generic_ip_connect(server); 3149 } 3150 3151 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3152 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, 3153 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3154 { 3155 /* 3156 * If we are reconnecting then should we check to see if 3157 * any requested capabilities changed locally e.g. via 3158 * remount but we can not do much about it here 3159 * if they have (even if we could detect it by the following) 3160 * Perhaps we could add a backpointer to array of sb from tcon 3161 * or if we change to make all sb to same share the same 3162 * sb as NFS - then we only have one backpointer to sb. 3163 * What if we wanted to mount the server share twice once with 3164 * and once without posixacls or posix paths? 3165 */ 3166 __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3167 3168 if (ctx && ctx->no_linux_ext) { 3169 tcon->fsUnixInfo.Capability = 0; 3170 tcon->unix_ext = 0; /* Unix Extensions disabled */ 3171 cifs_dbg(FYI, "Linux protocol extensions disabled\n"); 3172 return; 3173 } else if (ctx) 3174 tcon->unix_ext = 1; /* Unix Extensions supported */ 3175 3176 if (!tcon->unix_ext) { 3177 cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n"); 3178 return; 3179 } 3180 3181 if (!CIFSSMBQFSUnixInfo(xid, tcon)) { 3182 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3183 cifs_dbg(FYI, "unix caps which server supports %lld\n", cap); 3184 /* 3185 * check for reconnect case in which we do not 3186 * want to change the mount behavior if we can avoid it 3187 */ 3188 if (ctx == NULL) { 3189 /* 3190 * turn off POSIX ACL and PATHNAMES if not set 3191 * originally at mount time 3192 */ 3193 if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) 3194 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3195 if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3196 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3197 cifs_dbg(VFS, "POSIXPATH support change\n"); 3198 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3199 } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { 3200 cifs_dbg(VFS, "possible reconnect error\n"); 3201 cifs_dbg(VFS, "server disabled POSIX path support\n"); 3202 } 3203 } 3204 3205 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3206 cifs_dbg(VFS, "per-share encryption not supported yet\n"); 3207 3208 cap &= CIFS_UNIX_CAP_MASK; 3209 if (ctx && ctx->no_psx_acl) 3210 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 3211 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 3212 cifs_dbg(FYI, "negotiated posix acl support\n"); 3213 if (cifs_sb) 3214 cifs_sb->mnt_cifs_flags |= 3215 CIFS_MOUNT_POSIXACL; 3216 } 3217 3218 if (ctx && ctx->posix_paths == 0) 3219 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 3220 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 3221 cifs_dbg(FYI, "negotiate posix pathnames\n"); 3222 if (cifs_sb) 3223 cifs_sb->mnt_cifs_flags |= 3224 CIFS_MOUNT_POSIX_PATHS; 3225 } 3226 3227 cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap); 3228 #ifdef CONFIG_CIFS_DEBUG2 3229 if (cap & CIFS_UNIX_FCNTL_CAP) 3230 cifs_dbg(FYI, "FCNTL cap\n"); 3231 if (cap & CIFS_UNIX_EXTATTR_CAP) 3232 cifs_dbg(FYI, "EXTATTR cap\n"); 3233 if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) 3234 cifs_dbg(FYI, "POSIX path cap\n"); 3235 if (cap & CIFS_UNIX_XATTR_CAP) 3236 cifs_dbg(FYI, "XATTR cap\n"); 3237 if (cap & CIFS_UNIX_POSIX_ACL_CAP) 3238 cifs_dbg(FYI, "POSIX ACL cap\n"); 3239 if (cap & CIFS_UNIX_LARGE_READ_CAP) 3240 cifs_dbg(FYI, "very large read cap\n"); 3241 if (cap & CIFS_UNIX_LARGE_WRITE_CAP) 3242 cifs_dbg(FYI, "very large write cap\n"); 3243 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) 3244 cifs_dbg(FYI, "transport encryption cap\n"); 3245 if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) 3246 cifs_dbg(FYI, "mandatory transport encryption cap\n"); 3247 #endif /* CIFS_DEBUG2 */ 3248 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { 3249 if (ctx == NULL) 3250 cifs_dbg(FYI, "resetting capabilities failed\n"); 3251 else 3252 cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n"); 3253 3254 } 3255 } 3256 } 3257 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3258 3259 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) 3260 { 3261 struct smb3_fs_context *ctx = cifs_sb->ctx; 3262 3263 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 3264 3265 spin_lock_init(&cifs_sb->tlink_tree_lock); 3266 cifs_sb->tlink_tree = RB_ROOT; 3267 3268 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", 3269 ctx->file_mode, ctx->dir_mode); 3270 3271 /* this is needed for ASCII cp to Unicode converts */ 3272 if (ctx->iocharset == NULL) { 3273 /* load_nls_default cannot return null */ 3274 cifs_sb->local_nls = load_nls_default(); 3275 } else { 3276 cifs_sb->local_nls = load_nls(ctx->iocharset); 3277 if (cifs_sb->local_nls == NULL) { 3278 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n", 3279 ctx->iocharset); 3280 return -ELIBACC; 3281 } 3282 } 3283 ctx->local_nls = cifs_sb->local_nls; 3284 3285 smb3_update_mnt_flags(cifs_sb); 3286 3287 if (ctx->direct_io) 3288 cifs_dbg(FYI, "mounting share using direct i/o\n"); 3289 if (ctx->cache_ro) { 3290 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n"); 3291 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE; 3292 } else if (ctx->cache_rw) { 3293 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n"); 3294 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE | 3295 CIFS_MOUNT_RW_CACHE); 3296 } 3297 3298 if ((ctx->cifs_acl) && (ctx->dynperm)) 3299 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); 3300 3301 if (ctx->prepath) { 3302 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL); 3303 if (cifs_sb->prepath == NULL) 3304 return -ENOMEM; 3305 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3306 } 3307 3308 return 0; 3309 } 3310 3311 /* Release all succeed connections */ 3312 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx) 3313 { 3314 int rc = 0; 3315 3316 if (mnt_ctx->tcon) 3317 cifs_put_tcon(mnt_ctx->tcon); 3318 else if (mnt_ctx->ses) 3319 cifs_put_smb_ses(mnt_ctx->ses); 3320 else if (mnt_ctx->server) 3321 cifs_put_tcp_session(mnt_ctx->server, 0); 3322 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS; 3323 free_xid(mnt_ctx->xid); 3324 } 3325 3326 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx) 3327 { 3328 struct TCP_Server_Info *server = NULL; 3329 struct smb3_fs_context *ctx; 3330 struct cifs_ses *ses = NULL; 3331 unsigned int xid; 3332 int rc = 0; 3333 3334 xid = get_xid(); 3335 3336 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) { 3337 rc = -EINVAL; 3338 goto out; 3339 } 3340 ctx = mnt_ctx->fs_ctx; 3341 3342 /* get a reference to a tcp session */ 3343 server = cifs_get_tcp_session(ctx, NULL); 3344 if (IS_ERR(server)) { 3345 rc = PTR_ERR(server); 3346 server = NULL; 3347 goto out; 3348 } 3349 3350 /* get a reference to a SMB session */ 3351 ses = cifs_get_smb_ses(server, ctx); 3352 if (IS_ERR(ses)) { 3353 rc = PTR_ERR(ses); 3354 ses = NULL; 3355 goto out; 3356 } 3357 3358 if ((ctx->persistent == true) && (!(ses->server->capabilities & 3359 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) { 3360 cifs_server_dbg(VFS, "persistent handles not supported by server\n"); 3361 rc = -EOPNOTSUPP; 3362 } 3363 3364 out: 3365 mnt_ctx->xid = xid; 3366 mnt_ctx->server = server; 3367 mnt_ctx->ses = ses; 3368 mnt_ctx->tcon = NULL; 3369 3370 return rc; 3371 } 3372 3373 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) 3374 { 3375 struct TCP_Server_Info *server; 3376 struct cifs_sb_info *cifs_sb; 3377 struct smb3_fs_context *ctx; 3378 struct cifs_tcon *tcon = NULL; 3379 int rc = 0; 3380 3381 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx || 3382 !mnt_ctx->cifs_sb)) { 3383 rc = -EINVAL; 3384 goto out; 3385 } 3386 server = mnt_ctx->server; 3387 ctx = mnt_ctx->fs_ctx; 3388 cifs_sb = mnt_ctx->cifs_sb; 3389 3390 /* search for existing tcon to this server share */ 3391 tcon = cifs_get_tcon(mnt_ctx->ses, ctx); 3392 if (IS_ERR(tcon)) { 3393 rc = PTR_ERR(tcon); 3394 tcon = NULL; 3395 goto out; 3396 } 3397 3398 /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ 3399 if (tcon->posix_extensions) 3400 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 3401 3402 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3403 /* tell server which Unix caps we support */ 3404 if (cap_unix(tcon->ses)) { 3405 /* 3406 * reset of caps checks mount to see if unix extensions disabled 3407 * for just this mount. 3408 */ 3409 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx); 3410 spin_lock(&tcon->ses->server->srv_lock); 3411 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3412 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3413 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3414 spin_unlock(&tcon->ses->server->srv_lock); 3415 rc = -EACCES; 3416 goto out; 3417 } 3418 spin_unlock(&tcon->ses->server->srv_lock); 3419 } else 3420 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3421 tcon->unix_ext = 0; /* server does not support them */ 3422 3423 /* do not care if a following call succeed - informational */ 3424 if (!tcon->pipe && server->ops->qfs_tcon) { 3425 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb); 3426 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) { 3427 if (tcon->fsDevInfo.DeviceCharacteristics & 3428 cpu_to_le32(FILE_READ_ONLY_DEVICE)) 3429 cifs_dbg(VFS, "mounted to read only share\n"); 3430 else if ((cifs_sb->mnt_cifs_flags & 3431 CIFS_MOUNT_RW_CACHE) == 0) 3432 cifs_dbg(VFS, "read only mount of RW share\n"); 3433 /* no need to log a RW mount of a typical RW share */ 3434 } 3435 } 3436 3437 /* 3438 * Clamp the rsize/wsize mount arguments if they are too big for the server 3439 * and set the rsize/wsize to the negotiated values if not passed in by 3440 * the user on mount 3441 */ 3442 if ((cifs_sb->ctx->wsize == 0) || 3443 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) { 3444 cifs_sb->ctx->wsize = 3445 round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE); 3446 /* 3447 * in the very unlikely event that the server sent a max write size under PAGE_SIZE, 3448 * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096 3449 */ 3450 if (cifs_sb->ctx->wsize == 0) { 3451 cifs_sb->ctx->wsize = PAGE_SIZE; 3452 cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n"); 3453 } 3454 } 3455 if ((cifs_sb->ctx->rsize == 0) || 3456 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) 3457 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); 3458 3459 /* 3460 * The cookie is initialized from volume info returned above. 3461 * Inside cifs_fscache_get_super_cookie it checks 3462 * that we do not get super cookie twice. 3463 */ 3464 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 3465 cifs_fscache_get_super_cookie(tcon); 3466 3467 out: 3468 mnt_ctx->tcon = tcon; 3469 return rc; 3470 } 3471 3472 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, 3473 struct cifs_tcon *tcon) 3474 { 3475 struct tcon_link *tlink; 3476 3477 /* hang the tcon off of the superblock */ 3478 tlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3479 if (tlink == NULL) 3480 return -ENOMEM; 3481 3482 tlink->tl_uid = ses->linux_uid; 3483 tlink->tl_tcon = tcon; 3484 tlink->tl_time = jiffies; 3485 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 3486 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3487 3488 cifs_sb->master_tlink = tlink; 3489 spin_lock(&cifs_sb->tlink_tree_lock); 3490 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 3491 spin_unlock(&cifs_sb->tlink_tree_lock); 3492 3493 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 3494 TLINK_IDLE_EXPIRE); 3495 return 0; 3496 } 3497 3498 static int 3499 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, 3500 unsigned int xid, 3501 struct cifs_tcon *tcon, 3502 struct cifs_sb_info *cifs_sb, 3503 char *full_path, 3504 int added_treename) 3505 { 3506 int rc; 3507 char *s; 3508 char sep, tmp; 3509 int skip = added_treename ? 1 : 0; 3510 3511 sep = CIFS_DIR_SEP(cifs_sb); 3512 s = full_path; 3513 3514 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); 3515 while (rc == 0) { 3516 /* skip separators */ 3517 while (*s == sep) 3518 s++; 3519 if (!*s) 3520 break; 3521 /* next separator */ 3522 while (*s && *s != sep) 3523 s++; 3524 /* 3525 * if the treename is added, we then have to skip the first 3526 * part within the separators 3527 */ 3528 if (skip) { 3529 skip = 0; 3530 continue; 3531 } 3532 /* 3533 * temporarily null-terminate the path at the end of 3534 * the current component 3535 */ 3536 tmp = *s; 3537 *s = 0; 3538 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3539 full_path); 3540 *s = tmp; 3541 } 3542 return rc; 3543 } 3544 3545 /* 3546 * Check if path is remote (i.e. a DFS share). 3547 * 3548 * Return -EREMOTE if it is, otherwise 0 or -errno. 3549 */ 3550 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx) 3551 { 3552 int rc; 3553 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; 3554 struct TCP_Server_Info *server = mnt_ctx->server; 3555 unsigned int xid = mnt_ctx->xid; 3556 struct cifs_tcon *tcon = mnt_ctx->tcon; 3557 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 3558 char *full_path; 3559 3560 if (!server->ops->is_path_accessible) 3561 return -EOPNOTSUPP; 3562 3563 /* 3564 * cifs_build_path_to_root works only when we have a valid tcon 3565 */ 3566 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon, 3567 tcon->Flags & SMB_SHARE_IS_IN_DFS); 3568 if (full_path == NULL) 3569 return -ENOMEM; 3570 3571 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); 3572 3573 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, 3574 full_path); 3575 if (rc != 0 && rc != -EREMOTE) 3576 goto out; 3577 3578 if (rc != -EREMOTE) { 3579 rc = cifs_are_all_path_components_accessible(server, xid, tcon, 3580 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS); 3581 if (rc != 0) { 3582 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); 3583 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3584 rc = 0; 3585 } 3586 } 3587 3588 out: 3589 kfree(full_path); 3590 return rc; 3591 } 3592 3593 #ifdef CONFIG_CIFS_DFS_UPCALL 3594 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3595 { 3596 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3597 bool isdfs; 3598 int rc; 3599 3600 INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); 3601 3602 rc = dfs_mount_share(&mnt_ctx, &isdfs); 3603 if (rc) 3604 goto error; 3605 if (!isdfs) 3606 goto out; 3607 3608 /* 3609 * After reconnecting to a different server, unique ids won't match anymore, so we disable 3610 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). 3611 */ 3612 cifs_autodisable_serverino(cifs_sb); 3613 /* 3614 * Force the use of prefix path to support failover on DFS paths that resolve to targets 3615 * that have different prefix paths. 3616 */ 3617 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 3618 kfree(cifs_sb->prepath); 3619 cifs_sb->prepath = ctx->prepath; 3620 ctx->prepath = NULL; 3621 3622 out: 3623 cifs_try_adding_channels(mnt_ctx.ses); 3624 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3625 if (rc) 3626 goto error; 3627 3628 free_xid(mnt_ctx.xid); 3629 return rc; 3630 3631 error: 3632 dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); 3633 cifs_mount_put_conns(&mnt_ctx); 3634 return rc; 3635 } 3636 #else 3637 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3638 { 3639 int rc = 0; 3640 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, }; 3641 3642 rc = cifs_mount_get_session(&mnt_ctx); 3643 if (rc) 3644 goto error; 3645 3646 rc = cifs_mount_get_tcon(&mnt_ctx); 3647 if (rc) 3648 goto error; 3649 3650 rc = cifs_is_path_remote(&mnt_ctx); 3651 if (rc == -EREMOTE) 3652 rc = -EOPNOTSUPP; 3653 if (rc) 3654 goto error; 3655 3656 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon); 3657 if (rc) 3658 goto error; 3659 3660 free_xid(mnt_ctx.xid); 3661 return rc; 3662 3663 error: 3664 cifs_mount_put_conns(&mnt_ctx); 3665 return rc; 3666 } 3667 #endif 3668 3669 /* 3670 * Issue a TREE_CONNECT request. 3671 */ 3672 int 3673 CIFSTCon(const unsigned int xid, struct cifs_ses *ses, 3674 const char *tree, struct cifs_tcon *tcon, 3675 const struct nls_table *nls_codepage) 3676 { 3677 struct smb_hdr *smb_buffer; 3678 struct smb_hdr *smb_buffer_response; 3679 TCONX_REQ *pSMB; 3680 TCONX_RSP *pSMBr; 3681 unsigned char *bcc_ptr; 3682 int rc = 0; 3683 int length; 3684 __u16 bytes_left, count; 3685 3686 if (ses == NULL) 3687 return -EIO; 3688 3689 smb_buffer = cifs_buf_get(); 3690 if (smb_buffer == NULL) 3691 return -ENOMEM; 3692 3693 smb_buffer_response = smb_buffer; 3694 3695 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, 3696 NULL /*no tid */ , 4 /*wct */ ); 3697 3698 smb_buffer->Mid = get_next_mid(ses->server); 3699 smb_buffer->Uid = ses->Suid; 3700 pSMB = (TCONX_REQ *) smb_buffer; 3701 pSMBr = (TCONX_RSP *) smb_buffer_response; 3702 3703 pSMB->AndXCommand = 0xFF; 3704 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3705 bcc_ptr = &pSMB->Password[0]; 3706 3707 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3708 *bcc_ptr = 0; /* password is null byte */ 3709 bcc_ptr++; /* skip password */ 3710 /* already aligned so no need to do it below */ 3711 3712 if (ses->server->sign) 3713 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3714 3715 if (ses->capabilities & CAP_STATUS32) { 3716 smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; 3717 } 3718 if (ses->capabilities & CAP_DFS) { 3719 smb_buffer->Flags2 |= SMBFLG2_DFS; 3720 } 3721 if (ses->capabilities & CAP_UNICODE) { 3722 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3723 length = 3724 cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 3725 6 /* max utf8 char length in bytes */ * 3726 (/* server len*/ + 256 /* share len */), nls_codepage); 3727 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3728 bcc_ptr += 2; /* skip trailing null */ 3729 } else { /* ASCII */ 3730 strcpy(bcc_ptr, tree); 3731 bcc_ptr += strlen(tree) + 1; 3732 } 3733 strcpy(bcc_ptr, "?????"); 3734 bcc_ptr += strlen("?????"); 3735 bcc_ptr += 1; 3736 count = bcc_ptr - &pSMB->Password[0]; 3737 be32_add_cpu(&pSMB->hdr.smb_buf_length, count); 3738 pSMB->ByteCount = cpu_to_le16(count); 3739 3740 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 3741 0); 3742 3743 /* above now done in SendReceive */ 3744 if (rc == 0) { 3745 bool is_unicode; 3746 3747 tcon->tid = smb_buffer_response->Tid; 3748 bcc_ptr = pByteArea(smb_buffer_response); 3749 bytes_left = get_bcc(smb_buffer_response); 3750 length = strnlen(bcc_ptr, bytes_left - 2); 3751 if (smb_buffer->Flags2 & SMBFLG2_UNICODE) 3752 is_unicode = true; 3753 else 3754 is_unicode = false; 3755 3756 3757 /* skip service field (NB: this field is always ASCII) */ 3758 if (length == 3) { 3759 if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && 3760 (bcc_ptr[2] == 'C')) { 3761 cifs_dbg(FYI, "IPC connection\n"); 3762 tcon->ipc = true; 3763 tcon->pipe = true; 3764 } 3765 } else if (length == 2) { 3766 if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { 3767 /* the most common case */ 3768 cifs_dbg(FYI, "disk share connection\n"); 3769 } 3770 } 3771 bcc_ptr += length + 1; 3772 bytes_left -= (length + 1); 3773 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name)); 3774 3775 /* mostly informational -- no need to fail on error here */ 3776 kfree(tcon->nativeFileSystem); 3777 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, 3778 bytes_left, is_unicode, 3779 nls_codepage); 3780 3781 cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); 3782 3783 if ((smb_buffer_response->WordCount == 3) || 3784 (smb_buffer_response->WordCount == 7)) 3785 /* field is in same location */ 3786 tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); 3787 else 3788 tcon->Flags = 0; 3789 cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); 3790 } 3791 3792 cifs_buf_release(smb_buffer); 3793 return rc; 3794 } 3795 3796 static void delayed_free(struct rcu_head *p) 3797 { 3798 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu); 3799 3800 unload_nls(cifs_sb->local_nls); 3801 smb3_cleanup_fs_context(cifs_sb->ctx); 3802 kfree(cifs_sb); 3803 } 3804 3805 void 3806 cifs_umount(struct cifs_sb_info *cifs_sb) 3807 { 3808 struct rb_root *root = &cifs_sb->tlink_tree; 3809 struct rb_node *node; 3810 struct tcon_link *tlink; 3811 3812 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3813 3814 spin_lock(&cifs_sb->tlink_tree_lock); 3815 while ((node = rb_first(root))) { 3816 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 3817 cifs_get_tlink(tlink); 3818 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3819 rb_erase(node, root); 3820 3821 spin_unlock(&cifs_sb->tlink_tree_lock); 3822 cifs_put_tlink(tlink); 3823 spin_lock(&cifs_sb->tlink_tree_lock); 3824 } 3825 spin_unlock(&cifs_sb->tlink_tree_lock); 3826 3827 kfree(cifs_sb->prepath); 3828 call_rcu(&cifs_sb->rcu, delayed_free); 3829 } 3830 3831 int 3832 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, 3833 struct TCP_Server_Info *server) 3834 { 3835 int rc = 0; 3836 3837 if (!server->ops->need_neg || !server->ops->negotiate) 3838 return -ENOSYS; 3839 3840 /* only send once per connect */ 3841 spin_lock(&server->srv_lock); 3842 if (server->tcpStatus != CifsGood && 3843 server->tcpStatus != CifsNew && 3844 server->tcpStatus != CifsNeedNegotiate) { 3845 spin_unlock(&server->srv_lock); 3846 return -EHOSTDOWN; 3847 } 3848 3849 if (!server->ops->need_neg(server) && 3850 server->tcpStatus == CifsGood) { 3851 spin_unlock(&server->srv_lock); 3852 return 0; 3853 } 3854 3855 server->tcpStatus = CifsInNegotiate; 3856 spin_unlock(&server->srv_lock); 3857 3858 rc = server->ops->negotiate(xid, ses, server); 3859 if (rc == 0) { 3860 spin_lock(&server->srv_lock); 3861 if (server->tcpStatus == CifsInNegotiate) 3862 server->tcpStatus = CifsGood; 3863 else 3864 rc = -EHOSTDOWN; 3865 spin_unlock(&server->srv_lock); 3866 } else { 3867 spin_lock(&server->srv_lock); 3868 if (server->tcpStatus == CifsInNegotiate) 3869 server->tcpStatus = CifsNeedNegotiate; 3870 spin_unlock(&server->srv_lock); 3871 } 3872 3873 return rc; 3874 } 3875 3876 int 3877 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, 3878 struct TCP_Server_Info *server, 3879 struct nls_table *nls_info) 3880 { 3881 int rc = -ENOSYS; 3882 struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server; 3883 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr; 3884 struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr; 3885 bool is_binding = false; 3886 3887 spin_lock(&ses->ses_lock); 3888 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n", 3889 __func__, ses->chans_need_reconnect); 3890 3891 if (ses->ses_status != SES_GOOD && 3892 ses->ses_status != SES_NEW && 3893 ses->ses_status != SES_NEED_RECON) { 3894 spin_unlock(&ses->ses_lock); 3895 return -EHOSTDOWN; 3896 } 3897 3898 /* only send once per connect */ 3899 spin_lock(&ses->chan_lock); 3900 if (CIFS_ALL_CHANS_GOOD(ses)) { 3901 if (ses->ses_status == SES_NEED_RECON) 3902 ses->ses_status = SES_GOOD; 3903 spin_unlock(&ses->chan_lock); 3904 spin_unlock(&ses->ses_lock); 3905 return 0; 3906 } 3907 3908 cifs_chan_set_in_reconnect(ses, server); 3909 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); 3910 spin_unlock(&ses->chan_lock); 3911 3912 if (!is_binding) { 3913 ses->ses_status = SES_IN_SETUP; 3914 3915 /* force iface_list refresh */ 3916 ses->iface_last_update = 0; 3917 } 3918 spin_unlock(&ses->ses_lock); 3919 3920 /* update ses ip_addr only for primary chan */ 3921 if (server == pserver) { 3922 if (server->dstaddr.ss_family == AF_INET6) 3923 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); 3924 else 3925 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr); 3926 } 3927 3928 if (!is_binding) { 3929 ses->capabilities = server->capabilities; 3930 if (!linuxExtEnabled) 3931 ses->capabilities &= (~server->vals->cap_unix); 3932 3933 if (ses->auth_key.response) { 3934 cifs_dbg(FYI, "Free previous auth_key.response = %p\n", 3935 ses->auth_key.response); 3936 kfree_sensitive(ses->auth_key.response); 3937 ses->auth_key.response = NULL; 3938 ses->auth_key.len = 0; 3939 } 3940 } 3941 3942 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", 3943 server->sec_mode, server->capabilities, server->timeAdj); 3944 3945 if (server->ops->sess_setup) 3946 rc = server->ops->sess_setup(xid, ses, server, nls_info); 3947 3948 if (rc) { 3949 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); 3950 spin_lock(&ses->ses_lock); 3951 if (ses->ses_status == SES_IN_SETUP) 3952 ses->ses_status = SES_NEED_RECON; 3953 spin_lock(&ses->chan_lock); 3954 cifs_chan_clear_in_reconnect(ses, server); 3955 spin_unlock(&ses->chan_lock); 3956 spin_unlock(&ses->ses_lock); 3957 } else { 3958 spin_lock(&ses->ses_lock); 3959 if (ses->ses_status == SES_IN_SETUP) 3960 ses->ses_status = SES_GOOD; 3961 spin_lock(&ses->chan_lock); 3962 cifs_chan_clear_in_reconnect(ses, server); 3963 cifs_chan_clear_need_reconnect(ses, server); 3964 spin_unlock(&ses->chan_lock); 3965 spin_unlock(&ses->ses_lock); 3966 } 3967 3968 return rc; 3969 } 3970 3971 static int 3972 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses) 3973 { 3974 ctx->sectype = ses->sectype; 3975 3976 /* krb5 is special, since we don't need username or pw */ 3977 if (ctx->sectype == Kerberos) 3978 return 0; 3979 3980 return cifs_set_cifscreds(ctx, ses); 3981 } 3982 3983 static struct cifs_tcon * 3984 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) 3985 { 3986 int rc; 3987 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); 3988 struct cifs_ses *ses; 3989 struct cifs_tcon *tcon = NULL; 3990 struct smb3_fs_context *ctx; 3991 3992 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3993 if (ctx == NULL) 3994 return ERR_PTR(-ENOMEM); 3995 3996 ctx->local_nls = cifs_sb->local_nls; 3997 ctx->linux_uid = fsuid; 3998 ctx->cred_uid = fsuid; 3999 ctx->UNC = master_tcon->tree_name; 4000 ctx->retry = master_tcon->retry; 4001 ctx->nocase = master_tcon->nocase; 4002 ctx->nohandlecache = master_tcon->nohandlecache; 4003 ctx->local_lease = master_tcon->local_lease; 4004 ctx->no_lease = master_tcon->no_lease; 4005 ctx->resilient = master_tcon->use_resilient; 4006 ctx->persistent = master_tcon->use_persistent; 4007 ctx->handle_timeout = master_tcon->handle_timeout; 4008 ctx->no_linux_ext = !master_tcon->unix_ext; 4009 ctx->linux_ext = master_tcon->posix_extensions; 4010 ctx->sectype = master_tcon->ses->sectype; 4011 ctx->sign = master_tcon->ses->sign; 4012 ctx->seal = master_tcon->seal; 4013 ctx->witness = master_tcon->use_witness; 4014 4015 rc = cifs_set_vol_auth(ctx, master_tcon->ses); 4016 if (rc) { 4017 tcon = ERR_PTR(rc); 4018 goto out; 4019 } 4020 4021 /* get a reference for the same TCP session */ 4022 spin_lock(&cifs_tcp_ses_lock); 4023 ++master_tcon->ses->server->srv_count; 4024 spin_unlock(&cifs_tcp_ses_lock); 4025 4026 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx); 4027 if (IS_ERR(ses)) { 4028 tcon = (struct cifs_tcon *)ses; 4029 cifs_put_tcp_session(master_tcon->ses->server, 0); 4030 goto out; 4031 } 4032 4033 tcon = cifs_get_tcon(ses, ctx); 4034 if (IS_ERR(tcon)) { 4035 cifs_put_smb_ses(ses); 4036 goto out; 4037 } 4038 4039 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 4040 if (cap_unix(ses)) 4041 reset_cifs_unix_caps(0, tcon, NULL, ctx); 4042 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 4043 4044 out: 4045 kfree(ctx->username); 4046 kfree_sensitive(ctx->password); 4047 kfree(ctx); 4048 4049 return tcon; 4050 } 4051 4052 struct cifs_tcon * 4053 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 4054 { 4055 return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); 4056 } 4057 4058 /* find and return a tlink with given uid */ 4059 static struct tcon_link * 4060 tlink_rb_search(struct rb_root *root, kuid_t uid) 4061 { 4062 struct rb_node *node = root->rb_node; 4063 struct tcon_link *tlink; 4064 4065 while (node) { 4066 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 4067 4068 if (uid_gt(tlink->tl_uid, uid)) 4069 node = node->rb_left; 4070 else if (uid_lt(tlink->tl_uid, uid)) 4071 node = node->rb_right; 4072 else 4073 return tlink; 4074 } 4075 return NULL; 4076 } 4077 4078 /* insert a tcon_link into the tree */ 4079 static void 4080 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) 4081 { 4082 struct rb_node **new = &(root->rb_node), *parent = NULL; 4083 struct tcon_link *tlink; 4084 4085 while (*new) { 4086 tlink = rb_entry(*new, struct tcon_link, tl_rbnode); 4087 parent = *new; 4088 4089 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid)) 4090 new = &((*new)->rb_left); 4091 else 4092 new = &((*new)->rb_right); 4093 } 4094 4095 rb_link_node(&new_tlink->tl_rbnode, parent, new); 4096 rb_insert_color(&new_tlink->tl_rbnode, root); 4097 } 4098 4099 /* 4100 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 4101 * current task. 4102 * 4103 * If the superblock doesn't refer to a multiuser mount, then just return 4104 * the master tcon for the mount. 4105 * 4106 * First, search the rbtree for an existing tcon for this fsuid. If one 4107 * exists, then check to see if it's pending construction. If it is then wait 4108 * for construction to complete. Once it's no longer pending, check to see if 4109 * it failed and either return an error or retry construction, depending on 4110 * the timeout. 4111 * 4112 * If one doesn't exist then insert a new tcon_link struct into the tree and 4113 * try to construct a new one. 4114 */ 4115 struct tcon_link * 4116 cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 4117 { 4118 int ret; 4119 kuid_t fsuid = current_fsuid(); 4120 struct tcon_link *tlink, *newtlink; 4121 4122 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 4123 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 4124 4125 spin_lock(&cifs_sb->tlink_tree_lock); 4126 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4127 if (tlink) 4128 cifs_get_tlink(tlink); 4129 spin_unlock(&cifs_sb->tlink_tree_lock); 4130 4131 if (tlink == NULL) { 4132 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 4133 if (newtlink == NULL) 4134 return ERR_PTR(-ENOMEM); 4135 newtlink->tl_uid = fsuid; 4136 newtlink->tl_tcon = ERR_PTR(-EACCES); 4137 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 4138 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 4139 cifs_get_tlink(newtlink); 4140 4141 spin_lock(&cifs_sb->tlink_tree_lock); 4142 /* was one inserted after previous search? */ 4143 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 4144 if (tlink) { 4145 cifs_get_tlink(tlink); 4146 spin_unlock(&cifs_sb->tlink_tree_lock); 4147 kfree(newtlink); 4148 goto wait_for_construction; 4149 } 4150 tlink = newtlink; 4151 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 4152 spin_unlock(&cifs_sb->tlink_tree_lock); 4153 } else { 4154 wait_for_construction: 4155 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, 4156 TASK_INTERRUPTIBLE); 4157 if (ret) { 4158 cifs_put_tlink(tlink); 4159 return ERR_PTR(-ERESTARTSYS); 4160 } 4161 4162 /* if it's good, return it */ 4163 if (!IS_ERR(tlink->tl_tcon)) 4164 return tlink; 4165 4166 /* return error if we tried this already recently */ 4167 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { 4168 cifs_put_tlink(tlink); 4169 return ERR_PTR(-EACCES); 4170 } 4171 4172 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) 4173 goto wait_for_construction; 4174 } 4175 4176 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); 4177 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); 4178 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); 4179 4180 if (IS_ERR(tlink->tl_tcon)) { 4181 cifs_put_tlink(tlink); 4182 return ERR_PTR(-EACCES); 4183 } 4184 4185 return tlink; 4186 } 4187 4188 /* 4189 * periodic workqueue job that scans tcon_tree for a superblock and closes 4190 * out tcons. 4191 */ 4192 static void 4193 cifs_prune_tlinks(struct work_struct *work) 4194 { 4195 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 4196 prune_tlinks.work); 4197 struct rb_root *root = &cifs_sb->tlink_tree; 4198 struct rb_node *node; 4199 struct rb_node *tmp; 4200 struct tcon_link *tlink; 4201 4202 /* 4203 * Because we drop the spinlock in the loop in order to put the tlink 4204 * it's not guarded against removal of links from the tree. The only 4205 * places that remove entries from the tree are this function and 4206 * umounts. Because this function is non-reentrant and is canceled 4207 * before umount can proceed, this is safe. 4208 */ 4209 spin_lock(&cifs_sb->tlink_tree_lock); 4210 node = rb_first(root); 4211 while (node != NULL) { 4212 tmp = node; 4213 node = rb_next(tmp); 4214 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); 4215 4216 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || 4217 atomic_read(&tlink->tl_count) != 0 || 4218 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) 4219 continue; 4220 4221 cifs_get_tlink(tlink); 4222 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 4223 rb_erase(tmp, root); 4224 4225 spin_unlock(&cifs_sb->tlink_tree_lock); 4226 cifs_put_tlink(tlink); 4227 spin_lock(&cifs_sb->tlink_tree_lock); 4228 } 4229 spin_unlock(&cifs_sb->tlink_tree_lock); 4230 4231 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, 4232 TLINK_IDLE_EXPIRE); 4233 } 4234 4235 #ifndef CONFIG_CIFS_DFS_UPCALL 4236 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) 4237 { 4238 int rc; 4239 const struct smb_version_operations *ops = tcon->ses->server->ops; 4240 4241 /* only send once per connect */ 4242 spin_lock(&tcon->tc_lock); 4243 4244 /* if tcon is marked for needing reconnect, update state */ 4245 if (tcon->need_reconnect) 4246 tcon->status = TID_NEED_TCON; 4247 4248 if (tcon->status == TID_GOOD) { 4249 spin_unlock(&tcon->tc_lock); 4250 return 0; 4251 } 4252 4253 if (tcon->status != TID_NEW && 4254 tcon->status != TID_NEED_TCON) { 4255 spin_unlock(&tcon->tc_lock); 4256 return -EHOSTDOWN; 4257 } 4258 4259 tcon->status = TID_IN_TCON; 4260 spin_unlock(&tcon->tc_lock); 4261 4262 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc); 4263 if (rc) { 4264 spin_lock(&tcon->tc_lock); 4265 if (tcon->status == TID_IN_TCON) 4266 tcon->status = TID_NEED_TCON; 4267 spin_unlock(&tcon->tc_lock); 4268 } else { 4269 spin_lock(&tcon->tc_lock); 4270 if (tcon->status == TID_IN_TCON) 4271 tcon->status = TID_GOOD; 4272 tcon->need_reconnect = false; 4273 spin_unlock(&tcon->tc_lock); 4274 } 4275 4276 return rc; 4277 } 4278 #endif 4279