xref: /openbmc/linux/fs/smb/client/connect.c (revision 5e2af67d84450903d6a37df72a82e81ecc899eba)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2011
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 #include <linux/fs.h>
9 #include <linux/net.h>
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
32 #include <net/ipv6.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41 #include "ntlmssp.h"
42 #include "nterr.h"
43 #include "rfc1002pdu.h"
44 #include "fscache.h"
45 #include "smb2proto.h"
46 #include "smbdirect.h"
47 #include "dns_resolve.h"
48 #ifdef CONFIG_CIFS_DFS_UPCALL
49 #include "dfs.h"
50 #include "dfs_cache.h"
51 #endif
52 #include "fs_context.h"
53 #include "cifs_swn.h"
54 
55 extern mempool_t *cifs_req_poolp;
56 extern bool disable_legacy_dialects;
57 
58 /* FIXME: should these be tunable? */
59 #define TLINK_ERROR_EXPIRE	(1 * HZ)
60 #define TLINK_IDLE_EXPIRE	(600 * HZ)
61 
62 /* Drop the connection to not overload the server */
63 #define MAX_STATUS_IO_TIMEOUT   5
64 
65 static int ip_connect(struct TCP_Server_Info *server);
66 static int generic_ip_connect(struct TCP_Server_Info *server);
67 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
68 static void cifs_prune_tlinks(struct work_struct *work);
69 
70 /*
71  * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
72  * get their ip addresses changed at some point.
73  *
74  * This should be called with server->srv_mutex held.
75  */
76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
77 {
78 	int rc;
79 	int len;
80 	char *unc;
81 	struct sockaddr_storage ss;
82 
83 	if (!server->hostname)
84 		return -EINVAL;
85 
86 	/* if server hostname isn't populated, there's nothing to do here */
87 	if (server->hostname[0] == '\0')
88 		return 0;
89 
90 	len = strlen(server->hostname) + 3;
91 
92 	unc = kmalloc(len, GFP_KERNEL);
93 	if (!unc) {
94 		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
95 		return -ENOMEM;
96 	}
97 	scnprintf(unc, len, "\\\\%s", server->hostname);
98 
99 	spin_lock(&server->srv_lock);
100 	ss = server->dstaddr;
101 	spin_unlock(&server->srv_lock);
102 
103 	rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
104 	kfree(unc);
105 
106 	if (rc < 0) {
107 		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
108 			 __func__, server->hostname, rc);
109 	} else {
110 		spin_lock(&server->srv_lock);
111 		memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr));
112 		spin_unlock(&server->srv_lock);
113 		rc = 0;
114 	}
115 
116 	return rc;
117 }
118 
119 static void smb2_query_server_interfaces(struct work_struct *work)
120 {
121 	int rc;
122 	int xid;
123 	struct cifs_tcon *tcon = container_of(work,
124 					struct cifs_tcon,
125 					query_interfaces.work);
126 	struct TCP_Server_Info *server = tcon->ses->server;
127 
128 	/*
129 	 * query server network interfaces, in case they change
130 	 */
131 	if (!server->ops->query_server_interfaces)
132 		return;
133 
134 	xid = get_xid();
135 	rc = server->ops->query_server_interfaces(xid, tcon, false);
136 	free_xid(xid);
137 
138 	if (rc) {
139 		if (rc == -EOPNOTSUPP)
140 			return;
141 
142 		cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
143 				__func__, rc);
144 	}
145 
146 	queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
147 			   (SMB_INTERFACE_POLL_INTERVAL * HZ));
148 }
149 
150 /*
151  * Update the tcpStatus for the server.
152  * This is used to signal the cifsd thread to call cifs_reconnect
153  * ONLY cifsd thread should call cifs_reconnect. For any other
154  * thread, use this function
155  *
156  * @server: the tcp ses for which reconnect is needed
157  * @all_channels: if this needs to be done for all channels
158  */
159 void
160 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
161 				bool all_channels)
162 {
163 	struct TCP_Server_Info *pserver;
164 	struct cifs_ses *ses;
165 	int i;
166 
167 	/* If server is a channel, select the primary channel */
168 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
169 
170 	/* if we need to signal just this channel */
171 	if (!all_channels) {
172 		spin_lock(&server->srv_lock);
173 		if (server->tcpStatus != CifsExiting)
174 			server->tcpStatus = CifsNeedReconnect;
175 		spin_unlock(&server->srv_lock);
176 		return;
177 	}
178 
179 	spin_lock(&cifs_tcp_ses_lock);
180 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
181 		if (cifs_ses_exiting(ses))
182 			continue;
183 		spin_lock(&ses->chan_lock);
184 		for (i = 0; i < ses->chan_count; i++) {
185 			if (!ses->chans[i].server)
186 				continue;
187 
188 			spin_lock(&ses->chans[i].server->srv_lock);
189 			if (ses->chans[i].server->tcpStatus != CifsExiting)
190 				ses->chans[i].server->tcpStatus = CifsNeedReconnect;
191 			spin_unlock(&ses->chans[i].server->srv_lock);
192 		}
193 		spin_unlock(&ses->chan_lock);
194 	}
195 	spin_unlock(&cifs_tcp_ses_lock);
196 }
197 
198 /*
199  * Mark all sessions and tcons for reconnect.
200  * IMPORTANT: make sure that this gets called only from
201  * cifsd thread. For any other thread, use
202  * cifs_signal_cifsd_for_reconnect
203  *
204  * @server: the tcp ses for which reconnect is needed
205  * @server needs to be previously set to CifsNeedReconnect.
206  * @mark_smb_session: whether even sessions need to be marked
207  */
208 void
209 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
210 				      bool mark_smb_session)
211 {
212 	struct TCP_Server_Info *pserver;
213 	struct cifs_ses *ses, *nses;
214 	struct cifs_tcon *tcon;
215 
216 	/*
217 	 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
218 	 * are not used until reconnected.
219 	 */
220 	cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
221 
222 	/* If server is a channel, select the primary channel */
223 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
224 
225 	/*
226 	 * if the server has been marked for termination, there is a
227 	 * chance that the remaining channels all need reconnect. To be
228 	 * on the safer side, mark the session and trees for reconnect
229 	 * for this scenario. This might cause a few redundant session
230 	 * setup and tree connect requests, but it is better than not doing
231 	 * a tree connect when needed, and all following requests failing
232 	 */
233 	if (server->terminate) {
234 		mark_smb_session = true;
235 		server = pserver;
236 	}
237 
238 	spin_lock(&cifs_tcp_ses_lock);
239 	list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
240 		spin_lock(&ses->ses_lock);
241 		if (ses->ses_status == SES_EXITING) {
242 			spin_unlock(&ses->ses_lock);
243 			continue;
244 		}
245 		spin_unlock(&ses->ses_lock);
246 
247 		spin_lock(&ses->chan_lock);
248 		if (cifs_ses_get_chan_index(ses, server) ==
249 		    CIFS_INVAL_CHAN_INDEX) {
250 			spin_unlock(&ses->chan_lock);
251 			continue;
252 		}
253 
254 		if (!cifs_chan_is_iface_active(ses, server)) {
255 			spin_unlock(&ses->chan_lock);
256 			cifs_chan_update_iface(ses, server);
257 			spin_lock(&ses->chan_lock);
258 		}
259 
260 		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
261 			spin_unlock(&ses->chan_lock);
262 			continue;
263 		}
264 
265 		if (mark_smb_session)
266 			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
267 		else
268 			cifs_chan_set_need_reconnect(ses, server);
269 
270 		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
271 			 __func__, ses->chans_need_reconnect);
272 
273 		/* If all channels need reconnect, then tcon needs reconnect */
274 		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
275 			spin_unlock(&ses->chan_lock);
276 			continue;
277 		}
278 		spin_unlock(&ses->chan_lock);
279 
280 		spin_lock(&ses->ses_lock);
281 		ses->ses_status = SES_NEED_RECON;
282 		spin_unlock(&ses->ses_lock);
283 
284 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
285 			tcon->need_reconnect = true;
286 			spin_lock(&tcon->tc_lock);
287 			tcon->status = TID_NEED_RECON;
288 			spin_unlock(&tcon->tc_lock);
289 
290 			cancel_delayed_work(&tcon->query_interfaces);
291 		}
292 		if (ses->tcon_ipc) {
293 			ses->tcon_ipc->need_reconnect = true;
294 			spin_lock(&ses->tcon_ipc->tc_lock);
295 			ses->tcon_ipc->status = TID_NEED_RECON;
296 			spin_unlock(&ses->tcon_ipc->tc_lock);
297 		}
298 	}
299 	spin_unlock(&cifs_tcp_ses_lock);
300 }
301 
302 static void
303 cifs_abort_connection(struct TCP_Server_Info *server)
304 {
305 	struct mid_q_entry *mid, *nmid;
306 	struct list_head retry_list;
307 
308 	server->maxBuf = 0;
309 	server->max_read = 0;
310 
311 	/* do not want to be sending data on a socket we are freeing */
312 	cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
313 	cifs_server_lock(server);
314 	if (server->ssocket) {
315 		cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
316 			 server->ssocket->flags);
317 		kernel_sock_shutdown(server->ssocket, SHUT_WR);
318 		cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
319 			 server->ssocket->flags);
320 		sock_release(server->ssocket);
321 		server->ssocket = NULL;
322 	}
323 	server->sequence_number = 0;
324 	server->session_estab = false;
325 	kfree_sensitive(server->session_key.response);
326 	server->session_key.response = NULL;
327 	server->session_key.len = 0;
328 	server->lstrp = jiffies;
329 
330 	/* mark submitted MIDs for retry and issue callback */
331 	INIT_LIST_HEAD(&retry_list);
332 	cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
333 	spin_lock(&server->mid_lock);
334 	list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
335 		kref_get(&mid->refcount);
336 		if (mid->mid_state == MID_REQUEST_SUBMITTED)
337 			mid->mid_state = MID_RETRY_NEEDED;
338 		list_move(&mid->qhead, &retry_list);
339 		mid->mid_flags |= MID_DELETED;
340 	}
341 	spin_unlock(&server->mid_lock);
342 	cifs_server_unlock(server);
343 
344 	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
345 	list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
346 		list_del_init(&mid->qhead);
347 		mid->callback(mid);
348 		release_mid(mid);
349 	}
350 
351 	if (cifs_rdma_enabled(server)) {
352 		cifs_server_lock(server);
353 		smbd_destroy(server);
354 		cifs_server_unlock(server);
355 	}
356 }
357 
358 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
359 {
360 	spin_lock(&server->srv_lock);
361 	server->nr_targets = num_targets;
362 	if (server->tcpStatus == CifsExiting) {
363 		/* the demux thread will exit normally next time through the loop */
364 		spin_unlock(&server->srv_lock);
365 		wake_up(&server->response_q);
366 		return false;
367 	}
368 
369 	cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
370 	trace_smb3_reconnect(server->CurrentMid, server->conn_id,
371 			     server->hostname);
372 	server->tcpStatus = CifsNeedReconnect;
373 
374 	spin_unlock(&server->srv_lock);
375 	return true;
376 }
377 
378 /*
379  * cifs tcp session reconnection
380  *
381  * mark tcp session as reconnecting so temporarily locked
382  * mark all smb sessions as reconnecting for tcp session
383  * reconnect tcp session
384  * wake up waiters on reconnection? - (not needed currently)
385  *
386  * if mark_smb_session is passed as true, unconditionally mark
387  * the smb session (and tcon) for reconnect as well. This value
388  * doesn't really matter for non-multichannel scenario.
389  *
390  */
391 static int __cifs_reconnect(struct TCP_Server_Info *server,
392 			    bool mark_smb_session)
393 {
394 	int rc = 0;
395 
396 	if (!cifs_tcp_ses_needs_reconnect(server, 1))
397 		return 0;
398 
399 	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
400 
401 	cifs_abort_connection(server);
402 
403 	do {
404 		try_to_freeze();
405 		cifs_server_lock(server);
406 
407 		if (!cifs_swn_set_server_dstaddr(server)) {
408 			/* resolve the hostname again to make sure that IP address is up-to-date */
409 			rc = reconn_set_ipaddr_from_hostname(server);
410 			cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
411 		}
412 
413 		if (cifs_rdma_enabled(server))
414 			rc = smbd_reconnect(server);
415 		else
416 			rc = generic_ip_connect(server);
417 		if (rc) {
418 			cifs_server_unlock(server);
419 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
420 			msleep(3000);
421 		} else {
422 			atomic_inc(&tcpSesReconnectCount);
423 			set_credits(server, 1);
424 			spin_lock(&server->srv_lock);
425 			if (server->tcpStatus != CifsExiting)
426 				server->tcpStatus = CifsNeedNegotiate;
427 			spin_unlock(&server->srv_lock);
428 			cifs_swn_reset_server_dstaddr(server);
429 			cifs_server_unlock(server);
430 			mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
431 		}
432 	} while (server->tcpStatus == CifsNeedReconnect);
433 
434 	spin_lock(&server->srv_lock);
435 	if (server->tcpStatus == CifsNeedNegotiate)
436 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
437 	spin_unlock(&server->srv_lock);
438 
439 	wake_up(&server->response_q);
440 	return rc;
441 }
442 
443 #ifdef CONFIG_CIFS_DFS_UPCALL
444 static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
445 {
446 	int rc;
447 	char *hostname;
448 
449 	if (!cifs_swn_set_server_dstaddr(server)) {
450 		if (server->hostname != target) {
451 			hostname = extract_hostname(target);
452 			if (!IS_ERR(hostname)) {
453 				spin_lock(&server->srv_lock);
454 				kfree(server->hostname);
455 				server->hostname = hostname;
456 				spin_unlock(&server->srv_lock);
457 			} else {
458 				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
459 					 __func__, PTR_ERR(hostname));
460 				cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
461 					 server->hostname);
462 			}
463 		}
464 		/* resolve the hostname again to make sure that IP address is up-to-date. */
465 		rc = reconn_set_ipaddr_from_hostname(server);
466 		cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
467 	}
468 	/* Reconnect the socket */
469 	if (cifs_rdma_enabled(server))
470 		rc = smbd_reconnect(server);
471 	else
472 		rc = generic_ip_connect(server);
473 
474 	return rc;
475 }
476 
477 static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
478 				     struct dfs_cache_tgt_iterator **target_hint)
479 {
480 	int rc;
481 	struct dfs_cache_tgt_iterator *tit;
482 
483 	*target_hint = NULL;
484 
485 	/* If dfs target list is empty, then reconnect to last server */
486 	tit = dfs_cache_get_tgt_iterator(tl);
487 	if (!tit)
488 		return __reconnect_target_unlocked(server, server->hostname);
489 
490 	/* Otherwise, try every dfs target in @tl */
491 	for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
492 		rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
493 		if (!rc) {
494 			*target_hint = tit;
495 			break;
496 		}
497 	}
498 	return rc;
499 }
500 
501 static int reconnect_dfs_server(struct TCP_Server_Info *server)
502 {
503 	struct dfs_cache_tgt_iterator *target_hint = NULL;
504 	DFS_CACHE_TGT_LIST(tl);
505 	int num_targets = 0;
506 	int rc = 0;
507 
508 	/*
509 	 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
510 	 *
511 	 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
512 	 * targets (server->nr_targets).  It's also possible that the cached referral was cleared
513 	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
514 	 * refreshing the referral, so, in this case, default it to 1.
515 	 */
516 	mutex_lock(&server->refpath_lock);
517 	if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
518 		num_targets = dfs_cache_get_nr_tgts(&tl);
519 	mutex_unlock(&server->refpath_lock);
520 	if (!num_targets)
521 		num_targets = 1;
522 
523 	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
524 		return 0;
525 
526 	/*
527 	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
528 	 * different server or share during failover.  It could be improved by adding some logic to
529 	 * only do that in case it connects to a different server or share, though.
530 	 */
531 	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
532 
533 	cifs_abort_connection(server);
534 
535 	do {
536 		try_to_freeze();
537 		cifs_server_lock(server);
538 
539 		rc = reconnect_target_unlocked(server, &tl, &target_hint);
540 		if (rc) {
541 			/* Failed to reconnect socket */
542 			cifs_server_unlock(server);
543 			cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
544 			msleep(3000);
545 			continue;
546 		}
547 		/*
548 		 * Socket was created.  Update tcp session status to CifsNeedNegotiate so that a
549 		 * process waiting for reconnect will know it needs to re-establish session and tcon
550 		 * through the reconnected target server.
551 		 */
552 		atomic_inc(&tcpSesReconnectCount);
553 		set_credits(server, 1);
554 		spin_lock(&server->srv_lock);
555 		if (server->tcpStatus != CifsExiting)
556 			server->tcpStatus = CifsNeedNegotiate;
557 		spin_unlock(&server->srv_lock);
558 		cifs_swn_reset_server_dstaddr(server);
559 		cifs_server_unlock(server);
560 		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
561 	} while (server->tcpStatus == CifsNeedReconnect);
562 
563 	mutex_lock(&server->refpath_lock);
564 	dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
565 	mutex_unlock(&server->refpath_lock);
566 	dfs_cache_free_tgts(&tl);
567 
568 	/* Need to set up echo worker again once connection has been established */
569 	spin_lock(&server->srv_lock);
570 	if (server->tcpStatus == CifsNeedNegotiate)
571 		mod_delayed_work(cifsiod_wq, &server->echo, 0);
572 	spin_unlock(&server->srv_lock);
573 
574 	wake_up(&server->response_q);
575 	return rc;
576 }
577 
578 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
579 {
580 	mutex_lock(&server->refpath_lock);
581 	if (!server->leaf_fullpath) {
582 		mutex_unlock(&server->refpath_lock);
583 		return __cifs_reconnect(server, mark_smb_session);
584 	}
585 	mutex_unlock(&server->refpath_lock);
586 
587 	return reconnect_dfs_server(server);
588 }
589 #else
590 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
591 {
592 	return __cifs_reconnect(server, mark_smb_session);
593 }
594 #endif
595 
596 static void
597 cifs_echo_request(struct work_struct *work)
598 {
599 	int rc;
600 	struct TCP_Server_Info *server = container_of(work,
601 					struct TCP_Server_Info, echo.work);
602 
603 	/*
604 	 * We cannot send an echo if it is disabled.
605 	 * Also, no need to ping if we got a response recently.
606 	 */
607 
608 	if (server->tcpStatus == CifsNeedReconnect ||
609 	    server->tcpStatus == CifsExiting ||
610 	    server->tcpStatus == CifsNew ||
611 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
612 	    time_before(jiffies, server->lstrp + server->echo_interval - HZ))
613 		goto requeue_echo;
614 
615 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
616 	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
617 
618 	/* Check witness registrations */
619 	cifs_swn_check();
620 
621 requeue_echo:
622 	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
623 }
624 
625 static bool
626 allocate_buffers(struct TCP_Server_Info *server)
627 {
628 	if (!server->bigbuf) {
629 		server->bigbuf = (char *)cifs_buf_get();
630 		if (!server->bigbuf) {
631 			cifs_server_dbg(VFS, "No memory for large SMB response\n");
632 			msleep(3000);
633 			/* retry will check if exiting */
634 			return false;
635 		}
636 	} else if (server->large_buf) {
637 		/* we are reusing a dirty large buf, clear its start */
638 		memset(server->bigbuf, 0, HEADER_SIZE(server));
639 	}
640 
641 	if (!server->smallbuf) {
642 		server->smallbuf = (char *)cifs_small_buf_get();
643 		if (!server->smallbuf) {
644 			cifs_server_dbg(VFS, "No memory for SMB response\n");
645 			msleep(1000);
646 			/* retry will check if exiting */
647 			return false;
648 		}
649 		/* beginning of smb buffer is cleared in our buf_get */
650 	} else {
651 		/* if existing small buf clear beginning */
652 		memset(server->smallbuf, 0, HEADER_SIZE(server));
653 	}
654 
655 	return true;
656 }
657 
658 static bool
659 server_unresponsive(struct TCP_Server_Info *server)
660 {
661 	/*
662 	 * We need to wait 3 echo intervals to make sure we handle such
663 	 * situations right:
664 	 * 1s  client sends a normal SMB request
665 	 * 2s  client gets a response
666 	 * 30s echo workqueue job pops, and decides we got a response recently
667 	 *     and don't need to send another
668 	 * ...
669 	 * 65s kernel_recvmsg times out, and we see that we haven't gotten
670 	 *     a response in >60s.
671 	 */
672 	spin_lock(&server->srv_lock);
673 	if ((server->tcpStatus == CifsGood ||
674 	    server->tcpStatus == CifsNeedNegotiate) &&
675 	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
676 	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
677 		spin_unlock(&server->srv_lock);
678 		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
679 			 (3 * server->echo_interval) / HZ);
680 		cifs_reconnect(server, false);
681 		return true;
682 	}
683 	spin_unlock(&server->srv_lock);
684 
685 	return false;
686 }
687 
688 static inline bool
689 zero_credits(struct TCP_Server_Info *server)
690 {
691 	int val;
692 
693 	spin_lock(&server->req_lock);
694 	val = server->credits + server->echo_credits + server->oplock_credits;
695 	if (server->in_flight == 0 && val == 0) {
696 		spin_unlock(&server->req_lock);
697 		return true;
698 	}
699 	spin_unlock(&server->req_lock);
700 	return false;
701 }
702 
703 static int
704 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
705 {
706 	int length = 0;
707 	int total_read;
708 
709 	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
710 		try_to_freeze();
711 
712 		/* reconnect if no credits and no requests in flight */
713 		if (zero_credits(server)) {
714 			cifs_reconnect(server, false);
715 			return -ECONNABORTED;
716 		}
717 
718 		if (server_unresponsive(server))
719 			return -ECONNABORTED;
720 		if (cifs_rdma_enabled(server) && server->smbd_conn)
721 			length = smbd_recv(server->smbd_conn, smb_msg);
722 		else
723 			length = sock_recvmsg(server->ssocket, smb_msg, 0);
724 
725 		spin_lock(&server->srv_lock);
726 		if (server->tcpStatus == CifsExiting) {
727 			spin_unlock(&server->srv_lock);
728 			return -ESHUTDOWN;
729 		}
730 
731 		if (server->tcpStatus == CifsNeedReconnect) {
732 			spin_unlock(&server->srv_lock);
733 			cifs_reconnect(server, false);
734 			return -ECONNABORTED;
735 		}
736 		spin_unlock(&server->srv_lock);
737 
738 		if (length == -ERESTARTSYS ||
739 		    length == -EAGAIN ||
740 		    length == -EINTR) {
741 			/*
742 			 * Minimum sleep to prevent looping, allowing socket
743 			 * to clear and app threads to set tcpStatus
744 			 * CifsNeedReconnect if server hung.
745 			 */
746 			usleep_range(1000, 2000);
747 			length = 0;
748 			continue;
749 		}
750 
751 		if (length <= 0) {
752 			cifs_dbg(FYI, "Received no data or error: %d\n", length);
753 			cifs_reconnect(server, false);
754 			return -ECONNABORTED;
755 		}
756 	}
757 	return total_read;
758 }
759 
760 int
761 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
762 		      unsigned int to_read)
763 {
764 	struct msghdr smb_msg = {};
765 	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
766 	iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
767 
768 	return cifs_readv_from_socket(server, &smb_msg);
769 }
770 
771 ssize_t
772 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
773 {
774 	struct msghdr smb_msg = {};
775 
776 	/*
777 	 *  iov_iter_discard already sets smb_msg.type and count and iov_offset
778 	 *  and cifs_readv_from_socket sets msg_control and msg_controllen
779 	 *  so little to initialize in struct msghdr
780 	 */
781 	iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
782 
783 	return cifs_readv_from_socket(server, &smb_msg);
784 }
785 
786 int
787 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
788 	unsigned int page_offset, unsigned int to_read)
789 {
790 	struct msghdr smb_msg = {};
791 	struct bio_vec bv;
792 
793 	bvec_set_page(&bv, page, to_read, page_offset);
794 	iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
795 	return cifs_readv_from_socket(server, &smb_msg);
796 }
797 
798 int
799 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter,
800 			   unsigned int to_read)
801 {
802 	struct msghdr smb_msg = { .msg_iter = *iter };
803 	int ret;
804 
805 	iov_iter_truncate(&smb_msg.msg_iter, to_read);
806 	ret = cifs_readv_from_socket(server, &smb_msg);
807 	if (ret > 0)
808 		iov_iter_advance(iter, ret);
809 	return ret;
810 }
811 
812 static bool
813 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
814 {
815 	/*
816 	 * The first byte big endian of the length field,
817 	 * is actually not part of the length but the type
818 	 * with the most common, zero, as regular data.
819 	 */
820 	switch (type) {
821 	case RFC1002_SESSION_MESSAGE:
822 		/* Regular SMB response */
823 		return true;
824 	case RFC1002_SESSION_KEEP_ALIVE:
825 		cifs_dbg(FYI, "RFC 1002 session keep alive\n");
826 		break;
827 	case RFC1002_POSITIVE_SESSION_RESPONSE:
828 		cifs_dbg(FYI, "RFC 1002 positive session response\n");
829 		break;
830 	case RFC1002_NEGATIVE_SESSION_RESPONSE:
831 		/*
832 		 * We get this from Windows 98 instead of an error on
833 		 * SMB negprot response.
834 		 */
835 		cifs_dbg(FYI, "RFC 1002 negative session response\n");
836 		/* give server a second to clean up */
837 		msleep(1000);
838 		/*
839 		 * Always try 445 first on reconnect since we get NACK
840 		 * on some if we ever connected to port 139 (the NACK
841 		 * is since we do not begin with RFC1001 session
842 		 * initialize frame).
843 		 */
844 		cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
845 		cifs_reconnect(server, true);
846 		break;
847 	default:
848 		cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
849 		cifs_reconnect(server, true);
850 	}
851 
852 	return false;
853 }
854 
855 void
856 dequeue_mid(struct mid_q_entry *mid, bool malformed)
857 {
858 #ifdef CONFIG_CIFS_STATS2
859 	mid->when_received = jiffies;
860 #endif
861 	spin_lock(&mid->server->mid_lock);
862 	if (!malformed)
863 		mid->mid_state = MID_RESPONSE_RECEIVED;
864 	else
865 		mid->mid_state = MID_RESPONSE_MALFORMED;
866 	/*
867 	 * Trying to handle/dequeue a mid after the send_recv()
868 	 * function has finished processing it is a bug.
869 	 */
870 	if (mid->mid_flags & MID_DELETED) {
871 		spin_unlock(&mid->server->mid_lock);
872 		pr_warn_once("trying to dequeue a deleted mid\n");
873 	} else {
874 		list_del_init(&mid->qhead);
875 		mid->mid_flags |= MID_DELETED;
876 		spin_unlock(&mid->server->mid_lock);
877 	}
878 }
879 
880 static unsigned int
881 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
882 {
883 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
884 
885 	/*
886 	 * SMB1 does not use credits.
887 	 */
888 	if (is_smb1(server))
889 		return 0;
890 
891 	return le16_to_cpu(shdr->CreditRequest);
892 }
893 
894 static void
895 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
896 	   char *buf, int malformed)
897 {
898 	if (server->ops->check_trans2 &&
899 	    server->ops->check_trans2(mid, server, buf, malformed))
900 		return;
901 	mid->credits_received = smb2_get_credits_from_hdr(buf, server);
902 	mid->resp_buf = buf;
903 	mid->large_buf = server->large_buf;
904 	/* Was previous buf put in mpx struct for multi-rsp? */
905 	if (!mid->multiRsp) {
906 		/* smb buffer will be freed by user thread */
907 		if (server->large_buf)
908 			server->bigbuf = NULL;
909 		else
910 			server->smallbuf = NULL;
911 	}
912 	dequeue_mid(mid, malformed);
913 }
914 
915 int
916 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
917 {
918 	bool srv_sign_required = server->sec_mode & server->vals->signing_required;
919 	bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
920 	bool mnt_sign_enabled;
921 
922 	/*
923 	 * Is signing required by mnt options? If not then check
924 	 * global_secflags to see if it is there.
925 	 */
926 	if (!mnt_sign_required)
927 		mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
928 						CIFSSEC_MUST_SIGN);
929 
930 	/*
931 	 * If signing is required then it's automatically enabled too,
932 	 * otherwise, check to see if the secflags allow it.
933 	 */
934 	mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
935 				(global_secflags & CIFSSEC_MAY_SIGN);
936 
937 	/* If server requires signing, does client allow it? */
938 	if (srv_sign_required) {
939 		if (!mnt_sign_enabled) {
940 			cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
941 			return -EOPNOTSUPP;
942 		}
943 		server->sign = true;
944 	}
945 
946 	/* If client requires signing, does server allow it? */
947 	if (mnt_sign_required) {
948 		if (!srv_sign_enabled) {
949 			cifs_dbg(VFS, "Server does not support signing!\n");
950 			return -EOPNOTSUPP;
951 		}
952 		server->sign = true;
953 	}
954 
955 	if (cifs_rdma_enabled(server) && server->sign)
956 		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
957 
958 	return 0;
959 }
960 
961 static noinline_for_stack void
962 clean_demultiplex_info(struct TCP_Server_Info *server)
963 {
964 	int length;
965 
966 	/* take it off the list, if it's not already */
967 	spin_lock(&server->srv_lock);
968 	list_del_init(&server->tcp_ses_list);
969 	spin_unlock(&server->srv_lock);
970 
971 	cancel_delayed_work_sync(&server->echo);
972 
973 	spin_lock(&server->srv_lock);
974 	server->tcpStatus = CifsExiting;
975 	spin_unlock(&server->srv_lock);
976 	wake_up_all(&server->response_q);
977 
978 	/* check if we have blocked requests that need to free */
979 	spin_lock(&server->req_lock);
980 	if (server->credits <= 0)
981 		server->credits = 1;
982 	spin_unlock(&server->req_lock);
983 	/*
984 	 * Although there should not be any requests blocked on this queue it
985 	 * can not hurt to be paranoid and try to wake up requests that may
986 	 * haven been blocked when more than 50 at time were on the wire to the
987 	 * same server - they now will see the session is in exit state and get
988 	 * out of SendReceive.
989 	 */
990 	wake_up_all(&server->request_q);
991 	/* give those requests time to exit */
992 	msleep(125);
993 	if (cifs_rdma_enabled(server))
994 		smbd_destroy(server);
995 	if (server->ssocket) {
996 		sock_release(server->ssocket);
997 		server->ssocket = NULL;
998 	}
999 
1000 	if (!list_empty(&server->pending_mid_q)) {
1001 		struct list_head dispose_list;
1002 		struct mid_q_entry *mid_entry;
1003 		struct list_head *tmp, *tmp2;
1004 
1005 		INIT_LIST_HEAD(&dispose_list);
1006 		spin_lock(&server->mid_lock);
1007 		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
1008 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1009 			cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
1010 			kref_get(&mid_entry->refcount);
1011 			mid_entry->mid_state = MID_SHUTDOWN;
1012 			list_move(&mid_entry->qhead, &dispose_list);
1013 			mid_entry->mid_flags |= MID_DELETED;
1014 		}
1015 		spin_unlock(&server->mid_lock);
1016 
1017 		/* now walk dispose list and issue callbacks */
1018 		list_for_each_safe(tmp, tmp2, &dispose_list) {
1019 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1020 			cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
1021 			list_del_init(&mid_entry->qhead);
1022 			mid_entry->callback(mid_entry);
1023 			release_mid(mid_entry);
1024 		}
1025 		/* 1/8th of sec is more than enough time for them to exit */
1026 		msleep(125);
1027 	}
1028 
1029 	if (!list_empty(&server->pending_mid_q)) {
1030 		/*
1031 		 * mpx threads have not exited yet give them at least the smb
1032 		 * send timeout time for long ops.
1033 		 *
1034 		 * Due to delays on oplock break requests, we need to wait at
1035 		 * least 45 seconds before giving up on a request getting a
1036 		 * response and going ahead and killing cifsd.
1037 		 */
1038 		cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
1039 		msleep(46000);
1040 		/*
1041 		 * If threads still have not exited they are probably never
1042 		 * coming home not much else we can do but free the memory.
1043 		 */
1044 	}
1045 
1046 	kfree(server->leaf_fullpath);
1047 	kfree(server);
1048 
1049 	length = atomic_dec_return(&tcpSesAllocCount);
1050 	if (length > 0)
1051 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1052 }
1053 
1054 static int
1055 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1056 {
1057 	int length;
1058 	char *buf = server->smallbuf;
1059 	unsigned int pdu_length = server->pdu_size;
1060 
1061 	/* make sure this will fit in a large buffer */
1062 	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
1063 	    HEADER_PREAMBLE_SIZE(server)) {
1064 		cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
1065 		cifs_reconnect(server, true);
1066 		return -ECONNABORTED;
1067 	}
1068 
1069 	/* switch to large buffer if too big for a small one */
1070 	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
1071 		server->large_buf = true;
1072 		memcpy(server->bigbuf, buf, server->total_read);
1073 		buf = server->bigbuf;
1074 	}
1075 
1076 	/* now read the rest */
1077 	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
1078 				       pdu_length - MID_HEADER_SIZE(server));
1079 
1080 	if (length < 0)
1081 		return length;
1082 	server->total_read += length;
1083 
1084 	dump_smb(buf, server->total_read);
1085 
1086 	return cifs_handle_standard(server, mid);
1087 }
1088 
1089 int
1090 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1091 {
1092 	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
1093 	int rc;
1094 
1095 	/*
1096 	 * We know that we received enough to get to the MID as we
1097 	 * checked the pdu_length earlier. Now check to see
1098 	 * if the rest of the header is OK.
1099 	 *
1100 	 * 48 bytes is enough to display the header and a little bit
1101 	 * into the payload for debugging purposes.
1102 	 */
1103 	rc = server->ops->check_message(buf, server->total_read, server);
1104 	if (rc)
1105 		cifs_dump_mem("Bad SMB: ", buf,
1106 			min_t(unsigned int, server->total_read, 48));
1107 
1108 	if (server->ops->is_session_expired &&
1109 	    server->ops->is_session_expired(buf)) {
1110 		cifs_reconnect(server, true);
1111 		return -1;
1112 	}
1113 
1114 	if (server->ops->is_status_pending &&
1115 	    server->ops->is_status_pending(buf, server))
1116 		return -1;
1117 
1118 	if (!mid)
1119 		return rc;
1120 
1121 	handle_mid(mid, server, buf, rc);
1122 	return 0;
1123 }
1124 
1125 static void
1126 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
1127 {
1128 	struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
1129 	int scredits, in_flight;
1130 
1131 	/*
1132 	 * SMB1 does not use credits.
1133 	 */
1134 	if (is_smb1(server))
1135 		return;
1136 
1137 	if (shdr->CreditRequest) {
1138 		spin_lock(&server->req_lock);
1139 		server->credits += le16_to_cpu(shdr->CreditRequest);
1140 		scredits = server->credits;
1141 		in_flight = server->in_flight;
1142 		spin_unlock(&server->req_lock);
1143 		wake_up(&server->request_q);
1144 
1145 		trace_smb3_hdr_credits(server->CurrentMid,
1146 				server->conn_id, server->hostname, scredits,
1147 				le16_to_cpu(shdr->CreditRequest), in_flight);
1148 		cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1149 				__func__, le16_to_cpu(shdr->CreditRequest),
1150 				scredits);
1151 	}
1152 }
1153 
1154 
1155 static int
1156 cifs_demultiplex_thread(void *p)
1157 {
1158 	int i, num_mids, length;
1159 	struct TCP_Server_Info *server = p;
1160 	unsigned int pdu_length;
1161 	unsigned int next_offset;
1162 	char *buf = NULL;
1163 	struct task_struct *task_to_wake = NULL;
1164 	struct mid_q_entry *mids[MAX_COMPOUND];
1165 	char *bufs[MAX_COMPOUND];
1166 	unsigned int noreclaim_flag, num_io_timeout = 0;
1167 	bool pending_reconnect = false;
1168 
1169 	noreclaim_flag = memalloc_noreclaim_save();
1170 	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1171 
1172 	length = atomic_inc_return(&tcpSesAllocCount);
1173 	if (length > 1)
1174 		mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1175 
1176 	set_freezable();
1177 	allow_kernel_signal(SIGKILL);
1178 	while (server->tcpStatus != CifsExiting) {
1179 		if (try_to_freeze())
1180 			continue;
1181 
1182 		if (!allocate_buffers(server))
1183 			continue;
1184 
1185 		server->large_buf = false;
1186 		buf = server->smallbuf;
1187 		pdu_length = 4; /* enough to get RFC1001 header */
1188 
1189 		length = cifs_read_from_socket(server, buf, pdu_length);
1190 		if (length < 0)
1191 			continue;
1192 
1193 		if (is_smb1(server))
1194 			server->total_read = length;
1195 		else
1196 			server->total_read = 0;
1197 
1198 		/*
1199 		 * The right amount was read from socket - 4 bytes,
1200 		 * so we can now interpret the length field.
1201 		 */
1202 		pdu_length = get_rfc1002_length(buf);
1203 
1204 		cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1205 		if (!is_smb_response(server, buf[0]))
1206 			continue;
1207 
1208 		pending_reconnect = false;
1209 next_pdu:
1210 		server->pdu_size = pdu_length;
1211 
1212 		/* make sure we have enough to get to the MID */
1213 		if (server->pdu_size < MID_HEADER_SIZE(server)) {
1214 			cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1215 				 server->pdu_size);
1216 			cifs_reconnect(server, true);
1217 			continue;
1218 		}
1219 
1220 		/* read down to the MID */
1221 		length = cifs_read_from_socket(server,
1222 			     buf + HEADER_PREAMBLE_SIZE(server),
1223 			     MID_HEADER_SIZE(server));
1224 		if (length < 0)
1225 			continue;
1226 		server->total_read += length;
1227 
1228 		if (server->ops->next_header) {
1229 			if (server->ops->next_header(server, buf, &next_offset)) {
1230 				cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
1231 					 __func__, next_offset);
1232 				cifs_reconnect(server, true);
1233 				continue;
1234 			}
1235 			if (next_offset)
1236 				server->pdu_size = next_offset;
1237 		}
1238 
1239 		memset(mids, 0, sizeof(mids));
1240 		memset(bufs, 0, sizeof(bufs));
1241 		num_mids = 0;
1242 
1243 		if (server->ops->is_transform_hdr &&
1244 		    server->ops->receive_transform &&
1245 		    server->ops->is_transform_hdr(buf)) {
1246 			length = server->ops->receive_transform(server,
1247 								mids,
1248 								bufs,
1249 								&num_mids);
1250 		} else {
1251 			mids[0] = server->ops->find_mid(server, buf);
1252 			bufs[0] = buf;
1253 			num_mids = 1;
1254 
1255 			if (!mids[0] || !mids[0]->receive)
1256 				length = standard_receive3(server, mids[0]);
1257 			else
1258 				length = mids[0]->receive(server, mids[0]);
1259 		}
1260 
1261 		if (length < 0) {
1262 			for (i = 0; i < num_mids; i++)
1263 				if (mids[i])
1264 					release_mid(mids[i]);
1265 			continue;
1266 		}
1267 
1268 		if (server->ops->is_status_io_timeout &&
1269 		    server->ops->is_status_io_timeout(buf)) {
1270 			num_io_timeout++;
1271 			if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
1272 				cifs_server_dbg(VFS,
1273 						"Number of request timeouts exceeded %d. Reconnecting",
1274 						MAX_STATUS_IO_TIMEOUT);
1275 
1276 				pending_reconnect = true;
1277 				num_io_timeout = 0;
1278 			}
1279 		}
1280 
1281 		server->lstrp = jiffies;
1282 
1283 		for (i = 0; i < num_mids; i++) {
1284 			if (mids[i] != NULL) {
1285 				mids[i]->resp_buf_size = server->pdu_size;
1286 
1287 				if (bufs[i] != NULL) {
1288 					if (server->ops->is_network_name_deleted &&
1289 					    server->ops->is_network_name_deleted(bufs[i],
1290 										 server)) {
1291 						cifs_server_dbg(FYI,
1292 								"Share deleted. Reconnect needed");
1293 					}
1294 				}
1295 
1296 				if (!mids[i]->multiRsp || mids[i]->multiEnd)
1297 					mids[i]->callback(mids[i]);
1298 
1299 				release_mid(mids[i]);
1300 			} else if (server->ops->is_oplock_break &&
1301 				   server->ops->is_oplock_break(bufs[i],
1302 								server)) {
1303 				smb2_add_credits_from_hdr(bufs[i], server);
1304 				cifs_dbg(FYI, "Received oplock break\n");
1305 			} else {
1306 				cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1307 						atomic_read(&mid_count));
1308 				cifs_dump_mem("Received Data is: ", bufs[i],
1309 					      HEADER_SIZE(server));
1310 				smb2_add_credits_from_hdr(bufs[i], server);
1311 #ifdef CONFIG_CIFS_DEBUG2
1312 				if (server->ops->dump_detail)
1313 					server->ops->dump_detail(bufs[i],
1314 								 server);
1315 				cifs_dump_mids(server);
1316 #endif /* CIFS_DEBUG2 */
1317 			}
1318 		}
1319 
1320 		if (pdu_length > server->pdu_size) {
1321 			if (!allocate_buffers(server))
1322 				continue;
1323 			pdu_length -= server->pdu_size;
1324 			server->total_read = 0;
1325 			server->large_buf = false;
1326 			buf = server->smallbuf;
1327 			goto next_pdu;
1328 		}
1329 
1330 		/* do this reconnect at the very end after processing all MIDs */
1331 		if (pending_reconnect)
1332 			cifs_reconnect(server, true);
1333 
1334 	} /* end while !EXITING */
1335 
1336 	/* buffer usually freed in free_mid - need to free it here on exit */
1337 	cifs_buf_release(server->bigbuf);
1338 	if (server->smallbuf) /* no sense logging a debug message if NULL */
1339 		cifs_small_buf_release(server->smallbuf);
1340 
1341 	task_to_wake = xchg(&server->tsk, NULL);
1342 	clean_demultiplex_info(server);
1343 
1344 	/* if server->tsk was NULL then wait for a signal before exiting */
1345 	if (!task_to_wake) {
1346 		set_current_state(TASK_INTERRUPTIBLE);
1347 		while (!signal_pending(current)) {
1348 			schedule();
1349 			set_current_state(TASK_INTERRUPTIBLE);
1350 		}
1351 		set_current_state(TASK_RUNNING);
1352 	}
1353 
1354 	memalloc_noreclaim_restore(noreclaim_flag);
1355 	module_put_and_kthread_exit(0);
1356 }
1357 
1358 int
1359 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
1360 {
1361 	struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1362 	struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1363 	struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1364 	struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1365 
1366 	switch (srcaddr->sa_family) {
1367 	case AF_UNSPEC:
1368 		switch (rhs->sa_family) {
1369 		case AF_UNSPEC:
1370 			return 0;
1371 		case AF_INET:
1372 		case AF_INET6:
1373 			return 1;
1374 		default:
1375 			return -1;
1376 		}
1377 	case AF_INET: {
1378 		switch (rhs->sa_family) {
1379 		case AF_UNSPEC:
1380 			return -1;
1381 		case AF_INET:
1382 			return memcmp(saddr4, vaddr4,
1383 				      sizeof(struct sockaddr_in));
1384 		case AF_INET6:
1385 			return 1;
1386 		default:
1387 			return -1;
1388 		}
1389 	}
1390 	case AF_INET6: {
1391 		switch (rhs->sa_family) {
1392 		case AF_UNSPEC:
1393 		case AF_INET:
1394 			return -1;
1395 		case AF_INET6:
1396 			return memcmp(saddr6,
1397 				      vaddr6,
1398 				      sizeof(struct sockaddr_in6));
1399 		default:
1400 			return -1;
1401 		}
1402 	}
1403 	default:
1404 		return -1; /* don't expect to be here */
1405 	}
1406 }
1407 
1408 /*
1409  * Returns true if srcaddr isn't specified and rhs isn't specified, or
1410  * if srcaddr is specified and matches the IP address of the rhs argument
1411  */
1412 bool
1413 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1414 {
1415 	switch (srcaddr->sa_family) {
1416 	case AF_UNSPEC:
1417 		return (rhs->sa_family == AF_UNSPEC);
1418 	case AF_INET: {
1419 		struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1420 		struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1421 		return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1422 	}
1423 	case AF_INET6: {
1424 		struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1425 		struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1426 		return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr)
1427 			&& saddr6->sin6_scope_id == vaddr6->sin6_scope_id);
1428 	}
1429 	default:
1430 		WARN_ON(1);
1431 		return false; /* don't expect to be here */
1432 	}
1433 }
1434 
1435 /*
1436  * If no port is specified in addr structure, we try to match with 445 port
1437  * and if it fails - with 139 ports. It should be called only if address
1438  * families of server and addr are equal.
1439  */
1440 static bool
1441 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1442 {
1443 	__be16 port, *sport;
1444 
1445 	/* SMBDirect manages its own ports, don't match it here */
1446 	if (server->rdma)
1447 		return true;
1448 
1449 	switch (addr->sa_family) {
1450 	case AF_INET:
1451 		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1452 		port = ((struct sockaddr_in *) addr)->sin_port;
1453 		break;
1454 	case AF_INET6:
1455 		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1456 		port = ((struct sockaddr_in6 *) addr)->sin6_port;
1457 		break;
1458 	default:
1459 		WARN_ON(1);
1460 		return false;
1461 	}
1462 
1463 	if (!port) {
1464 		port = htons(CIFS_PORT);
1465 		if (port == *sport)
1466 			return true;
1467 
1468 		port = htons(RFC1001_PORT);
1469 	}
1470 
1471 	return port == *sport;
1472 }
1473 
1474 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr)
1475 {
1476 	if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr))
1477 		return false;
1478 
1479 	return true;
1480 }
1481 
1482 static bool
1483 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1484 {
1485 	/*
1486 	 * The select_sectype function should either return the ctx->sectype
1487 	 * that was specified, or "Unspecified" if that sectype was not
1488 	 * compatible with the given NEGOTIATE request.
1489 	 */
1490 	if (server->ops->select_sectype(server, ctx->sectype)
1491 	     == Unspecified)
1492 		return false;
1493 
1494 	/*
1495 	 * Now check if signing mode is acceptable. No need to check
1496 	 * global_secflags at this point since if MUST_SIGN is set then
1497 	 * the server->sign had better be too.
1498 	 */
1499 	if (ctx->sign && !server->sign)
1500 		return false;
1501 
1502 	return true;
1503 }
1504 
1505 /* this function must be called with srv_lock held */
1506 static int match_server(struct TCP_Server_Info *server,
1507 			struct smb3_fs_context *ctx,
1508 			bool match_super)
1509 {
1510 	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1511 
1512 	lockdep_assert_held(&server->srv_lock);
1513 
1514 	if (ctx->nosharesock)
1515 		return 0;
1516 
1517 	/* this server does not share socket */
1518 	if (server->nosharesock)
1519 		return 0;
1520 
1521 	/* If multidialect negotiation see if existing sessions match one */
1522 	if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1523 		if (server->vals->protocol_id < SMB30_PROT_ID)
1524 			return 0;
1525 	} else if (strcmp(ctx->vals->version_string,
1526 		   SMBDEFAULT_VERSION_STRING) == 0) {
1527 		if (server->vals->protocol_id < SMB21_PROT_ID)
1528 			return 0;
1529 	} else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1530 		return 0;
1531 
1532 	if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1533 		return 0;
1534 
1535 	if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr,
1536 			       (struct sockaddr *)&server->srcaddr))
1537 		return 0;
1538 	/*
1539 	 * When matching cifs.ko superblocks (@match_super == true), we can't
1540 	 * really match either @server->leaf_fullpath or @server->dstaddr
1541 	 * directly since this @server might belong to a completely different
1542 	 * server -- in case of domain-based DFS referrals or DFS links -- as
1543 	 * provided earlier by mount(2) through 'source' and 'ip' options.
1544 	 *
1545 	 * Otherwise, match the DFS referral in @server->leaf_fullpath or the
1546 	 * destination address in @server->dstaddr.
1547 	 *
1548 	 * When using 'nodfs' mount option, we avoid sharing it with DFS
1549 	 * connections as they might failover.
1550 	 */
1551 	if (!match_super) {
1552 		if (!ctx->nodfs) {
1553 			if (server->leaf_fullpath) {
1554 				if (!ctx->leaf_fullpath ||
1555 				    strcasecmp(server->leaf_fullpath,
1556 					       ctx->leaf_fullpath))
1557 					return 0;
1558 			} else if (ctx->leaf_fullpath) {
1559 				return 0;
1560 			}
1561 		} else if (server->leaf_fullpath) {
1562 			return 0;
1563 		}
1564 	}
1565 
1566 	/*
1567 	 * Match for a regular connection (address/hostname/port) which has no
1568 	 * DFS referrals set.
1569 	 */
1570 	if (!server->leaf_fullpath &&
1571 	    (strcasecmp(server->hostname, ctx->server_hostname) ||
1572 	     !match_server_address(server, addr) ||
1573 	     !match_port(server, addr)))
1574 		return 0;
1575 
1576 	if (!match_security(server, ctx))
1577 		return 0;
1578 
1579 	if (server->echo_interval != ctx->echo_interval * HZ)
1580 		return 0;
1581 
1582 	if (server->rdma != ctx->rdma)
1583 		return 0;
1584 
1585 	if (server->ignore_signature != ctx->ignore_signature)
1586 		return 0;
1587 
1588 	if (server->min_offload != ctx->min_offload)
1589 		return 0;
1590 
1591 	return 1;
1592 }
1593 
1594 struct TCP_Server_Info *
1595 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1596 {
1597 	struct TCP_Server_Info *server;
1598 
1599 	spin_lock(&cifs_tcp_ses_lock);
1600 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1601 		spin_lock(&server->srv_lock);
1602 		/*
1603 		 * Skip ses channels since they're only handled in lower layers
1604 		 * (e.g. cifs_send_recv).
1605 		 */
1606 		if (SERVER_IS_CHAN(server) ||
1607 		    !match_server(server, ctx, false)) {
1608 			spin_unlock(&server->srv_lock);
1609 			continue;
1610 		}
1611 		spin_unlock(&server->srv_lock);
1612 
1613 		++server->srv_count;
1614 		spin_unlock(&cifs_tcp_ses_lock);
1615 		cifs_dbg(FYI, "Existing tcp session with server found\n");
1616 		return server;
1617 	}
1618 	spin_unlock(&cifs_tcp_ses_lock);
1619 	return NULL;
1620 }
1621 
1622 void
1623 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1624 {
1625 	struct task_struct *task;
1626 
1627 	spin_lock(&cifs_tcp_ses_lock);
1628 	if (--server->srv_count > 0) {
1629 		spin_unlock(&cifs_tcp_ses_lock);
1630 		return;
1631 	}
1632 
1633 	/* srv_count can never go negative */
1634 	WARN_ON(server->srv_count < 0);
1635 
1636 	put_net(cifs_net_ns(server));
1637 
1638 	list_del_init(&server->tcp_ses_list);
1639 	spin_unlock(&cifs_tcp_ses_lock);
1640 
1641 	cancel_delayed_work_sync(&server->echo);
1642 
1643 	if (from_reconnect)
1644 		/*
1645 		 * Avoid deadlock here: reconnect work calls
1646 		 * cifs_put_tcp_session() at its end. Need to be sure
1647 		 * that reconnect work does nothing with server pointer after
1648 		 * that step.
1649 		 */
1650 		cancel_delayed_work(&server->reconnect);
1651 	else
1652 		cancel_delayed_work_sync(&server->reconnect);
1653 
1654 	/* For secondary channels, we pick up ref-count on the primary server */
1655 	if (SERVER_IS_CHAN(server))
1656 		cifs_put_tcp_session(server->primary_server, from_reconnect);
1657 
1658 	spin_lock(&server->srv_lock);
1659 	server->tcpStatus = CifsExiting;
1660 	spin_unlock(&server->srv_lock);
1661 
1662 	cifs_crypto_secmech_release(server);
1663 
1664 	kfree_sensitive(server->session_key.response);
1665 	server->session_key.response = NULL;
1666 	server->session_key.len = 0;
1667 	kfree(server->hostname);
1668 	server->hostname = NULL;
1669 
1670 	task = xchg(&server->tsk, NULL);
1671 	if (task)
1672 		send_sig(SIGKILL, task, 1);
1673 }
1674 
1675 struct TCP_Server_Info *
1676 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1677 		     struct TCP_Server_Info *primary_server)
1678 {
1679 	struct TCP_Server_Info *tcp_ses = NULL;
1680 	int rc;
1681 
1682 	cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1683 
1684 	/* see if we already have a matching tcp_ses */
1685 	tcp_ses = cifs_find_tcp_session(ctx);
1686 	if (tcp_ses)
1687 		return tcp_ses;
1688 
1689 	tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
1690 	if (!tcp_ses) {
1691 		rc = -ENOMEM;
1692 		goto out_err;
1693 	}
1694 
1695 	tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1696 	if (!tcp_ses->hostname) {
1697 		rc = -ENOMEM;
1698 		goto out_err;
1699 	}
1700 
1701 	if (ctx->leaf_fullpath) {
1702 		tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL);
1703 		if (!tcp_ses->leaf_fullpath) {
1704 			rc = -ENOMEM;
1705 			goto out_err;
1706 		}
1707 	}
1708 
1709 	if (ctx->nosharesock)
1710 		tcp_ses->nosharesock = true;
1711 
1712 	tcp_ses->ops = ctx->ops;
1713 	tcp_ses->vals = ctx->vals;
1714 	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1715 
1716 	tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1717 	tcp_ses->noblockcnt = ctx->rootfs;
1718 	tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1719 	tcp_ses->noautotune = ctx->noautotune;
1720 	tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1721 	tcp_ses->rdma = ctx->rdma;
1722 	tcp_ses->in_flight = 0;
1723 	tcp_ses->max_in_flight = 0;
1724 	tcp_ses->credits = 1;
1725 	if (primary_server) {
1726 		spin_lock(&cifs_tcp_ses_lock);
1727 		++primary_server->srv_count;
1728 		spin_unlock(&cifs_tcp_ses_lock);
1729 		tcp_ses->primary_server = primary_server;
1730 	}
1731 	init_waitqueue_head(&tcp_ses->response_q);
1732 	init_waitqueue_head(&tcp_ses->request_q);
1733 	INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1734 	mutex_init(&tcp_ses->_srv_mutex);
1735 	memcpy(tcp_ses->workstation_RFC1001_name,
1736 		ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1737 	memcpy(tcp_ses->server_RFC1001_name,
1738 		ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1739 	tcp_ses->session_estab = false;
1740 	tcp_ses->sequence_number = 0;
1741 	tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
1742 	tcp_ses->reconnect_instance = 1;
1743 	tcp_ses->lstrp = jiffies;
1744 	tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
1745 	spin_lock_init(&tcp_ses->req_lock);
1746 	spin_lock_init(&tcp_ses->srv_lock);
1747 	spin_lock_init(&tcp_ses->mid_lock);
1748 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1749 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1750 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1751 	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1752 	mutex_init(&tcp_ses->reconnect_mutex);
1753 #ifdef CONFIG_CIFS_DFS_UPCALL
1754 	mutex_init(&tcp_ses->refpath_lock);
1755 #endif
1756 	memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1757 	       sizeof(tcp_ses->srcaddr));
1758 	memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1759 		sizeof(tcp_ses->dstaddr));
1760 	if (ctx->use_client_guid)
1761 		memcpy(tcp_ses->client_guid, ctx->client_guid,
1762 		       SMB2_CLIENT_GUID_SIZE);
1763 	else
1764 		generate_random_uuid(tcp_ses->client_guid);
1765 	/*
1766 	 * at this point we are the only ones with the pointer
1767 	 * to the struct since the kernel thread not created yet
1768 	 * no need to spinlock this init of tcpStatus or srv_count
1769 	 */
1770 	tcp_ses->tcpStatus = CifsNew;
1771 	++tcp_ses->srv_count;
1772 
1773 	if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
1774 		ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
1775 		tcp_ses->echo_interval = ctx->echo_interval * HZ;
1776 	else
1777 		tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
1778 	if (tcp_ses->rdma) {
1779 #ifndef CONFIG_CIFS_SMB_DIRECT
1780 		cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1781 		rc = -ENOENT;
1782 		goto out_err_crypto_release;
1783 #endif
1784 		tcp_ses->smbd_conn = smbd_get_connection(
1785 			tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1786 		if (tcp_ses->smbd_conn) {
1787 			cifs_dbg(VFS, "RDMA transport established\n");
1788 			rc = 0;
1789 			goto smbd_connected;
1790 		} else {
1791 			rc = -ENOENT;
1792 			goto out_err_crypto_release;
1793 		}
1794 	}
1795 	rc = ip_connect(tcp_ses);
1796 	if (rc < 0) {
1797 		cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1798 		goto out_err_crypto_release;
1799 	}
1800 smbd_connected:
1801 	/*
1802 	 * since we're in a cifs function already, we know that
1803 	 * this will succeed. No need for try_module_get().
1804 	 */
1805 	__module_get(THIS_MODULE);
1806 	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1807 				  tcp_ses, "cifsd");
1808 	if (IS_ERR(tcp_ses->tsk)) {
1809 		rc = PTR_ERR(tcp_ses->tsk);
1810 		cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1811 		module_put(THIS_MODULE);
1812 		goto out_err_crypto_release;
1813 	}
1814 	tcp_ses->min_offload = ctx->min_offload;
1815 	/*
1816 	 * at this point we are the only ones with the pointer
1817 	 * to the struct since the kernel thread not created yet
1818 	 * no need to spinlock this update of tcpStatus
1819 	 */
1820 	spin_lock(&tcp_ses->srv_lock);
1821 	tcp_ses->tcpStatus = CifsNeedNegotiate;
1822 	spin_unlock(&tcp_ses->srv_lock);
1823 
1824 	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1825 		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1826 	else
1827 		tcp_ses->max_credits = ctx->max_credits;
1828 
1829 	tcp_ses->nr_targets = 1;
1830 	tcp_ses->ignore_signature = ctx->ignore_signature;
1831 	/* thread spawned, put it on the list */
1832 	spin_lock(&cifs_tcp_ses_lock);
1833 	list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1834 	spin_unlock(&cifs_tcp_ses_lock);
1835 
1836 	/* queue echo request delayed work */
1837 	queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1838 
1839 	return tcp_ses;
1840 
1841 out_err_crypto_release:
1842 	cifs_crypto_secmech_release(tcp_ses);
1843 
1844 	put_net(cifs_net_ns(tcp_ses));
1845 
1846 out_err:
1847 	if (tcp_ses) {
1848 		if (SERVER_IS_CHAN(tcp_ses))
1849 			cifs_put_tcp_session(tcp_ses->primary_server, false);
1850 		kfree(tcp_ses->hostname);
1851 		kfree(tcp_ses->leaf_fullpath);
1852 		if (tcp_ses->ssocket)
1853 			sock_release(tcp_ses->ssocket);
1854 		kfree(tcp_ses);
1855 	}
1856 	return ERR_PTR(rc);
1857 }
1858 
1859 /* this function must be called with ses_lock and chan_lock held */
1860 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1861 {
1862 	if (ctx->sectype != Unspecified &&
1863 	    ctx->sectype != ses->sectype)
1864 		return 0;
1865 
1866 	if (ctx->dfs_root_ses != ses->dfs_root_ses)
1867 		return 0;
1868 
1869 	/*
1870 	 * If an existing session is limited to less channels than
1871 	 * requested, it should not be reused
1872 	 */
1873 	if (ses->chan_max < ctx->max_channels)
1874 		return 0;
1875 
1876 	switch (ses->sectype) {
1877 	case Kerberos:
1878 		if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1879 			return 0;
1880 		break;
1881 	default:
1882 		/* NULL username means anonymous session */
1883 		if (ses->user_name == NULL) {
1884 			if (!ctx->nullauth)
1885 				return 0;
1886 			break;
1887 		}
1888 
1889 		/* anything else takes username/password */
1890 		if (strncmp(ses->user_name,
1891 			    ctx->username ? ctx->username : "",
1892 			    CIFS_MAX_USERNAME_LEN))
1893 			return 0;
1894 		if ((ctx->username && strlen(ctx->username) != 0) &&
1895 		    ses->password != NULL &&
1896 		    strncmp(ses->password,
1897 			    ctx->password ? ctx->password : "",
1898 			    CIFS_MAX_PASSWORD_LEN))
1899 			return 0;
1900 	}
1901 
1902 	if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
1903 		return 0;
1904 
1905 	return 1;
1906 }
1907 
1908 /**
1909  * cifs_setup_ipc - helper to setup the IPC tcon for the session
1910  * @ses: smb session to issue the request on
1911  * @ctx: the superblock configuration context to use for building the
1912  *       new tree connection for the IPC (interprocess communication RPC)
1913  *
1914  * A new IPC connection is made and stored in the session
1915  * tcon_ipc. The IPC tcon has the same lifetime as the session.
1916  */
1917 static int
1918 cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
1919 {
1920 	int rc = 0, xid;
1921 	struct cifs_tcon *tcon;
1922 	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
1923 	bool seal = false;
1924 	struct TCP_Server_Info *server = ses->server;
1925 
1926 	/*
1927 	 * If the mount request that resulted in the creation of the
1928 	 * session requires encryption, force IPC to be encrypted too.
1929 	 */
1930 	if (ctx->seal) {
1931 		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
1932 			seal = true;
1933 		else {
1934 			cifs_server_dbg(VFS,
1935 				 "IPC: server doesn't support encryption\n");
1936 			return -EOPNOTSUPP;
1937 		}
1938 	}
1939 
1940 	/* no need to setup directory caching on IPC share, so pass in false */
1941 	tcon = tcon_info_alloc(false);
1942 	if (tcon == NULL)
1943 		return -ENOMEM;
1944 
1945 	spin_lock(&server->srv_lock);
1946 	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
1947 	spin_unlock(&server->srv_lock);
1948 
1949 	xid = get_xid();
1950 	tcon->ses = ses;
1951 	tcon->ipc = true;
1952 	tcon->seal = seal;
1953 	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
1954 	free_xid(xid);
1955 
1956 	if (rc) {
1957 		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
1958 		tconInfoFree(tcon);
1959 		goto out;
1960 	}
1961 
1962 	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
1963 
1964 	spin_lock(&tcon->tc_lock);
1965 	tcon->status = TID_GOOD;
1966 	spin_unlock(&tcon->tc_lock);
1967 	ses->tcon_ipc = tcon;
1968 out:
1969 	return rc;
1970 }
1971 
1972 static struct cifs_ses *
1973 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1974 {
1975 	struct cifs_ses *ses, *ret = NULL;
1976 
1977 	spin_lock(&cifs_tcp_ses_lock);
1978 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1979 		spin_lock(&ses->ses_lock);
1980 		if (ses->ses_status == SES_EXITING) {
1981 			spin_unlock(&ses->ses_lock);
1982 			continue;
1983 		}
1984 		spin_lock(&ses->chan_lock);
1985 		if (match_session(ses, ctx)) {
1986 			spin_unlock(&ses->chan_lock);
1987 			spin_unlock(&ses->ses_lock);
1988 			ret = ses;
1989 			break;
1990 		}
1991 		spin_unlock(&ses->chan_lock);
1992 		spin_unlock(&ses->ses_lock);
1993 	}
1994 	if (ret)
1995 		cifs_smb_ses_inc_refcount(ret);
1996 	spin_unlock(&cifs_tcp_ses_lock);
1997 	return ret;
1998 }
1999 
2000 void __cifs_put_smb_ses(struct cifs_ses *ses)
2001 {
2002 	struct TCP_Server_Info *server = ses->server;
2003 	struct cifs_tcon *tcon;
2004 	unsigned int xid;
2005 	size_t i;
2006 	bool do_logoff;
2007 	int rc;
2008 
2009 	spin_lock(&cifs_tcp_ses_lock);
2010 	spin_lock(&ses->ses_lock);
2011 	cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
2012 		 __func__, ses->Suid, ses->ses_count, ses->ses_status,
2013 		 ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
2014 	if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
2015 		spin_unlock(&ses->ses_lock);
2016 		spin_unlock(&cifs_tcp_ses_lock);
2017 		return;
2018 	}
2019 	/* ses_count can never go negative */
2020 	WARN_ON(ses->ses_count < 0);
2021 
2022 	spin_lock(&ses->chan_lock);
2023 	cifs_chan_clear_need_reconnect(ses, server);
2024 	spin_unlock(&ses->chan_lock);
2025 
2026 	do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
2027 	ses->ses_status = SES_EXITING;
2028 	tcon = ses->tcon_ipc;
2029 	ses->tcon_ipc = NULL;
2030 	spin_unlock(&ses->ses_lock);
2031 	spin_unlock(&cifs_tcp_ses_lock);
2032 
2033 	/*
2034 	 * On session close, the IPC is closed and the server must release all
2035 	 * tcons of the session.  No need to send a tree disconnect here.
2036 	 *
2037 	 * Besides, it will make the server to not close durable and resilient
2038 	 * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
2039 	 * SMB2 LOGOFF Request.
2040 	 */
2041 	tconInfoFree(tcon);
2042 	if (do_logoff) {
2043 		xid = get_xid();
2044 		rc = server->ops->logoff(xid, ses);
2045 		if (rc)
2046 			cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
2047 				__func__, rc);
2048 		_free_xid(xid);
2049 	}
2050 
2051 	spin_lock(&cifs_tcp_ses_lock);
2052 	list_del_init(&ses->smb_ses_list);
2053 	spin_unlock(&cifs_tcp_ses_lock);
2054 
2055 	/* close any extra channels */
2056 	for (i = 1; i < ses->chan_count; i++) {
2057 		if (ses->chans[i].iface) {
2058 			kref_put(&ses->chans[i].iface->refcount, release_iface);
2059 			ses->chans[i].iface = NULL;
2060 		}
2061 		cifs_put_tcp_session(ses->chans[i].server, 0);
2062 		ses->chans[i].server = NULL;
2063 	}
2064 
2065 	/* we now account for primary channel in iface->refcount */
2066 	if (ses->chans[0].iface) {
2067 		kref_put(&ses->chans[0].iface->refcount, release_iface);
2068 		ses->chans[0].server = NULL;
2069 	}
2070 
2071 	sesInfoFree(ses);
2072 	cifs_put_tcp_session(server, 0);
2073 }
2074 
2075 #ifdef CONFIG_KEYS
2076 
2077 /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2078 #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2079 
2080 /* Populate username and pw fields from keyring if possible */
2081 static int
2082 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
2083 {
2084 	int rc = 0;
2085 	int is_domain = 0;
2086 	const char *delim, *payload;
2087 	char *desc;
2088 	ssize_t len;
2089 	struct key *key;
2090 	struct TCP_Server_Info *server = ses->server;
2091 	struct sockaddr_in *sa;
2092 	struct sockaddr_in6 *sa6;
2093 	const struct user_key_payload *upayload;
2094 
2095 	desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
2096 	if (!desc)
2097 		return -ENOMEM;
2098 
2099 	/* try to find an address key first */
2100 	switch (server->dstaddr.ss_family) {
2101 	case AF_INET:
2102 		sa = (struct sockaddr_in *)&server->dstaddr;
2103 		sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2104 		break;
2105 	case AF_INET6:
2106 		sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2107 		sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2108 		break;
2109 	default:
2110 		cifs_dbg(FYI, "Bad ss_family (%hu)\n",
2111 			 server->dstaddr.ss_family);
2112 		rc = -EINVAL;
2113 		goto out_err;
2114 	}
2115 
2116 	cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2117 	key = request_key(&key_type_logon, desc, "");
2118 	if (IS_ERR(key)) {
2119 		if (!ses->domainName) {
2120 			cifs_dbg(FYI, "domainName is NULL\n");
2121 			rc = PTR_ERR(key);
2122 			goto out_err;
2123 		}
2124 
2125 		/* didn't work, try to find a domain key */
2126 		sprintf(desc, "cifs:d:%s", ses->domainName);
2127 		cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2128 		key = request_key(&key_type_logon, desc, "");
2129 		if (IS_ERR(key)) {
2130 			rc = PTR_ERR(key);
2131 			goto out_err;
2132 		}
2133 		is_domain = 1;
2134 	}
2135 
2136 	down_read(&key->sem);
2137 	upayload = user_key_payload_locked(key);
2138 	if (IS_ERR_OR_NULL(upayload)) {
2139 		rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2140 		goto out_key_put;
2141 	}
2142 
2143 	/* find first : in payload */
2144 	payload = upayload->data;
2145 	delim = strnchr(payload, upayload->datalen, ':');
2146 	cifs_dbg(FYI, "payload=%s\n", payload);
2147 	if (!delim) {
2148 		cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
2149 			 upayload->datalen);
2150 		rc = -EINVAL;
2151 		goto out_key_put;
2152 	}
2153 
2154 	len = delim - payload;
2155 	if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
2156 		cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
2157 			 len);
2158 		rc = -EINVAL;
2159 		goto out_key_put;
2160 	}
2161 
2162 	ctx->username = kstrndup(payload, len, GFP_KERNEL);
2163 	if (!ctx->username) {
2164 		cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
2165 			 len);
2166 		rc = -ENOMEM;
2167 		goto out_key_put;
2168 	}
2169 	cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
2170 
2171 	len = key->datalen - (len + 1);
2172 	if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
2173 		cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
2174 		rc = -EINVAL;
2175 		kfree(ctx->username);
2176 		ctx->username = NULL;
2177 		goto out_key_put;
2178 	}
2179 
2180 	++delim;
2181 	/* BB consider adding support for password2 (Key Rotation) for multiuser in future */
2182 	ctx->password = kstrndup(delim, len, GFP_KERNEL);
2183 	if (!ctx->password) {
2184 		cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
2185 			 len);
2186 		rc = -ENOMEM;
2187 		kfree(ctx->username);
2188 		ctx->username = NULL;
2189 		goto out_key_put;
2190 	}
2191 
2192 	/*
2193 	 * If we have a domain key then we must set the domainName in the
2194 	 * for the request.
2195 	 */
2196 	if (is_domain && ses->domainName) {
2197 		ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
2198 		if (!ctx->domainname) {
2199 			cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
2200 				 len);
2201 			rc = -ENOMEM;
2202 			kfree(ctx->username);
2203 			ctx->username = NULL;
2204 			kfree_sensitive(ctx->password);
2205 			/* no need to free ctx->password2 since not allocated in this path */
2206 			ctx->password = NULL;
2207 			goto out_key_put;
2208 		}
2209 	}
2210 
2211 	strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
2212 
2213 out_key_put:
2214 	up_read(&key->sem);
2215 	key_put(key);
2216 out_err:
2217 	kfree(desc);
2218 	cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2219 	return rc;
2220 }
2221 #else /* ! CONFIG_KEYS */
2222 static inline int
2223 cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
2224 		   struct cifs_ses *ses __attribute__((unused)))
2225 {
2226 	return -ENOSYS;
2227 }
2228 #endif /* CONFIG_KEYS */
2229 
2230 /**
2231  * cifs_get_smb_ses - get a session matching @ctx data from @server
2232  * @server: server to setup the session to
2233  * @ctx: superblock configuration context to use to setup the session
2234  *
2235  * This function assumes it is being called from cifs_mount() where we
2236  * already got a server reference (server refcount +1). See
2237  * cifs_get_tcon() for refcount explanations.
2238  */
2239 struct cifs_ses *
2240 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2241 {
2242 	int rc = 0;
2243 	unsigned int xid;
2244 	struct cifs_ses *ses;
2245 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2246 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2247 
2248 	xid = get_xid();
2249 
2250 	ses = cifs_find_smb_ses(server, ctx);
2251 	if (ses) {
2252 		cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2253 			 ses->ses_status);
2254 
2255 		spin_lock(&ses->chan_lock);
2256 		if (cifs_chan_needs_reconnect(ses, server)) {
2257 			spin_unlock(&ses->chan_lock);
2258 			cifs_dbg(FYI, "Session needs reconnect\n");
2259 
2260 			mutex_lock(&ses->session_mutex);
2261 			rc = cifs_negotiate_protocol(xid, ses, server);
2262 			if (rc) {
2263 				mutex_unlock(&ses->session_mutex);
2264 				/* problem -- put our ses reference */
2265 				cifs_put_smb_ses(ses);
2266 				free_xid(xid);
2267 				return ERR_PTR(rc);
2268 			}
2269 
2270 			rc = cifs_setup_session(xid, ses, server,
2271 						ctx->local_nls);
2272 			if (rc) {
2273 				mutex_unlock(&ses->session_mutex);
2274 				/* problem -- put our reference */
2275 				cifs_put_smb_ses(ses);
2276 				free_xid(xid);
2277 				return ERR_PTR(rc);
2278 			}
2279 			mutex_unlock(&ses->session_mutex);
2280 
2281 			spin_lock(&ses->chan_lock);
2282 		}
2283 		spin_unlock(&ses->chan_lock);
2284 
2285 		/* existing SMB ses has a server reference already */
2286 		cifs_put_tcp_session(server, 0);
2287 		free_xid(xid);
2288 		return ses;
2289 	}
2290 
2291 	rc = -ENOMEM;
2292 
2293 	cifs_dbg(FYI, "Existing smb sess not found\n");
2294 	ses = sesInfoAlloc();
2295 	if (ses == NULL)
2296 		goto get_ses_fail;
2297 
2298 	/* new SMB session uses our server ref */
2299 	ses->server = server;
2300 	if (server->dstaddr.ss_family == AF_INET6)
2301 		sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2302 	else
2303 		sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2304 
2305 	if (ctx->username) {
2306 		ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2307 		if (!ses->user_name)
2308 			goto get_ses_fail;
2309 	}
2310 
2311 	/* ctx->password freed at unmount */
2312 	if (ctx->password) {
2313 		ses->password = kstrdup(ctx->password, GFP_KERNEL);
2314 		if (!ses->password)
2315 			goto get_ses_fail;
2316 	}
2317 	/* ctx->password freed at unmount */
2318 	if (ctx->password2) {
2319 		ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
2320 		if (!ses->password2)
2321 			goto get_ses_fail;
2322 	}
2323 	if (ctx->domainname) {
2324 		ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2325 		if (!ses->domainName)
2326 			goto get_ses_fail;
2327 	}
2328 
2329 	strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
2330 
2331 	if (ctx->domainauto)
2332 		ses->domainAuto = ctx->domainauto;
2333 	ses->cred_uid = ctx->cred_uid;
2334 	ses->linux_uid = ctx->linux_uid;
2335 
2336 	ses->sectype = ctx->sectype;
2337 	ses->sign = ctx->sign;
2338 	ses->local_nls = load_nls(ctx->local_nls->charset);
2339 
2340 	/* add server as first channel */
2341 	spin_lock(&ses->chan_lock);
2342 	ses->chans[0].server = server;
2343 	ses->chan_count = 1;
2344 	ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2345 	ses->chans_need_reconnect = 1;
2346 	spin_unlock(&ses->chan_lock);
2347 
2348 	mutex_lock(&ses->session_mutex);
2349 	rc = cifs_negotiate_protocol(xid, ses, server);
2350 	if (!rc)
2351 		rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2352 	mutex_unlock(&ses->session_mutex);
2353 
2354 	/* each channel uses a different signing key */
2355 	spin_lock(&ses->chan_lock);
2356 	memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2357 	       sizeof(ses->smb3signingkey));
2358 	spin_unlock(&ses->chan_lock);
2359 
2360 	if (rc)
2361 		goto get_ses_fail;
2362 
2363 	/*
2364 	 * success, put it on the list and add it as first channel
2365 	 * note: the session becomes active soon after this. So you'll
2366 	 * need to lock before changing something in the session.
2367 	 */
2368 	spin_lock(&cifs_tcp_ses_lock);
2369 	if (ctx->dfs_root_ses)
2370 		cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
2371 	ses->dfs_root_ses = ctx->dfs_root_ses;
2372 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
2373 	spin_unlock(&cifs_tcp_ses_lock);
2374 
2375 	cifs_setup_ipc(ses, ctx);
2376 
2377 	free_xid(xid);
2378 
2379 	return ses;
2380 
2381 get_ses_fail:
2382 	sesInfoFree(ses);
2383 	free_xid(xid);
2384 	return ERR_PTR(rc);
2385 }
2386 
2387 /* this function must be called with tc_lock held */
2388 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2389 {
2390 	struct TCP_Server_Info *server = tcon->ses->server;
2391 
2392 	if (tcon->status == TID_EXITING)
2393 		return 0;
2394 
2395 	if (tcon->origin_fullpath) {
2396 		if (!ctx->source ||
2397 		    !dfs_src_pathname_equal(ctx->source,
2398 					    tcon->origin_fullpath))
2399 			return 0;
2400 	} else if (!server->leaf_fullpath &&
2401 		   strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
2402 		return 0;
2403 	}
2404 	if (tcon->seal != ctx->seal)
2405 		return 0;
2406 	if (tcon->snapshot_time != ctx->snapshot_time)
2407 		return 0;
2408 	if (tcon->handle_timeout != ctx->handle_timeout)
2409 		return 0;
2410 	if (tcon->no_lease != ctx->no_lease)
2411 		return 0;
2412 	if (tcon->nodelete != ctx->nodelete)
2413 		return 0;
2414 	return 1;
2415 }
2416 
2417 static struct cifs_tcon *
2418 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2419 {
2420 	struct cifs_tcon *tcon;
2421 
2422 	spin_lock(&cifs_tcp_ses_lock);
2423 	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2424 		spin_lock(&tcon->tc_lock);
2425 		if (!match_tcon(tcon, ctx)) {
2426 			spin_unlock(&tcon->tc_lock);
2427 			continue;
2428 		}
2429 		++tcon->tc_count;
2430 		spin_unlock(&tcon->tc_lock);
2431 		spin_unlock(&cifs_tcp_ses_lock);
2432 		return tcon;
2433 	}
2434 	spin_unlock(&cifs_tcp_ses_lock);
2435 	return NULL;
2436 }
2437 
2438 void
2439 cifs_put_tcon(struct cifs_tcon *tcon)
2440 {
2441 	unsigned int xid;
2442 	struct cifs_ses *ses;
2443 
2444 	/*
2445 	 * IPC tcon share the lifetime of their session and are
2446 	 * destroyed in the session put function
2447 	 */
2448 	if (tcon == NULL || tcon->ipc)
2449 		return;
2450 
2451 	ses = tcon->ses;
2452 	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2453 	spin_lock(&cifs_tcp_ses_lock);
2454 	spin_lock(&tcon->tc_lock);
2455 	if (--tcon->tc_count > 0) {
2456 		spin_unlock(&tcon->tc_lock);
2457 		spin_unlock(&cifs_tcp_ses_lock);
2458 		return;
2459 	}
2460 
2461 	/* tc_count can never go negative */
2462 	WARN_ON(tcon->tc_count < 0);
2463 
2464 	list_del_init(&tcon->tcon_list);
2465 	tcon->status = TID_EXITING;
2466 	spin_unlock(&tcon->tc_lock);
2467 	spin_unlock(&cifs_tcp_ses_lock);
2468 
2469 	/* cancel polling of interfaces */
2470 	cancel_delayed_work_sync(&tcon->query_interfaces);
2471 #ifdef CONFIG_CIFS_DFS_UPCALL
2472 	cancel_delayed_work_sync(&tcon->dfs_cache_work);
2473 #endif
2474 
2475 	if (tcon->use_witness) {
2476 		int rc;
2477 
2478 		rc = cifs_swn_unregister(tcon);
2479 		if (rc < 0) {
2480 			cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2481 					__func__, rc);
2482 		}
2483 	}
2484 
2485 	xid = get_xid();
2486 	if (ses->server->ops->tree_disconnect)
2487 		ses->server->ops->tree_disconnect(xid, tcon);
2488 	_free_xid(xid);
2489 
2490 	cifs_fscache_release_super_cookie(tcon);
2491 	tconInfoFree(tcon);
2492 	cifs_put_smb_ses(ses);
2493 }
2494 
2495 /**
2496  * cifs_get_tcon - get a tcon matching @ctx data from @ses
2497  * @ses: smb session to issue the request on
2498  * @ctx: the superblock configuration context to use for building the
2499  *
2500  * - tcon refcount is the number of mount points using the tcon.
2501  * - ses refcount is the number of tcon using the session.
2502  *
2503  * 1. This function assumes it is being called from cifs_mount() where
2504  *    we already got a session reference (ses refcount +1).
2505  *
2506  * 2. Since we're in the context of adding a mount point, the end
2507  *    result should be either:
2508  *
2509  * a) a new tcon already allocated with refcount=1 (1 mount point) and
2510  *    its session refcount incremented (1 new tcon). This +1 was
2511  *    already done in (1).
2512  *
2513  * b) an existing tcon with refcount+1 (add a mount point to it) and
2514  *    identical ses refcount (no new tcon). Because of (1) we need to
2515  *    decrement the ses refcount.
2516  */
2517 static struct cifs_tcon *
2518 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2519 {
2520 	struct cifs_tcon *tcon;
2521 	bool nohandlecache;
2522 	int rc, xid;
2523 
2524 	tcon = cifs_find_tcon(ses, ctx);
2525 	if (tcon) {
2526 		/*
2527 		 * tcon has refcount already incremented but we need to
2528 		 * decrement extra ses reference gotten by caller (case b)
2529 		 */
2530 		cifs_dbg(FYI, "Found match on UNC path\n");
2531 		cifs_put_smb_ses(ses);
2532 		return tcon;
2533 	}
2534 
2535 	if (!ses->server->ops->tree_connect) {
2536 		rc = -ENOSYS;
2537 		goto out_fail;
2538 	}
2539 
2540 	if (ses->server->dialect >= SMB20_PROT_ID &&
2541 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
2542 		nohandlecache = ctx->nohandlecache;
2543 	else
2544 		nohandlecache = true;
2545 	tcon = tcon_info_alloc(!nohandlecache);
2546 	if (tcon == NULL) {
2547 		rc = -ENOMEM;
2548 		goto out_fail;
2549 	}
2550 	tcon->nohandlecache = nohandlecache;
2551 
2552 	if (ctx->snapshot_time) {
2553 		if (ses->server->vals->protocol_id == 0) {
2554 			cifs_dbg(VFS,
2555 			     "Use SMB2 or later for snapshot mount option\n");
2556 			rc = -EOPNOTSUPP;
2557 			goto out_fail;
2558 		} else
2559 			tcon->snapshot_time = ctx->snapshot_time;
2560 	}
2561 
2562 	if (ctx->handle_timeout) {
2563 		if (ses->server->vals->protocol_id == 0) {
2564 			cifs_dbg(VFS,
2565 			     "Use SMB2.1 or later for handle timeout option\n");
2566 			rc = -EOPNOTSUPP;
2567 			goto out_fail;
2568 		} else
2569 			tcon->handle_timeout = ctx->handle_timeout;
2570 	}
2571 
2572 	tcon->ses = ses;
2573 	if (ctx->password) {
2574 		tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2575 		if (!tcon->password) {
2576 			rc = -ENOMEM;
2577 			goto out_fail;
2578 		}
2579 	}
2580 
2581 	if (ctx->seal) {
2582 		if (ses->server->vals->protocol_id == 0) {
2583 			cifs_dbg(VFS,
2584 				 "SMB3 or later required for encryption\n");
2585 			rc = -EOPNOTSUPP;
2586 			goto out_fail;
2587 		} else if (tcon->ses->server->capabilities &
2588 					SMB2_GLOBAL_CAP_ENCRYPTION)
2589 			tcon->seal = true;
2590 		else {
2591 			cifs_dbg(VFS, "Encryption is not supported on share\n");
2592 			rc = -EOPNOTSUPP;
2593 			goto out_fail;
2594 		}
2595 	}
2596 
2597 	if (ctx->linux_ext) {
2598 		if (ses->server->posix_ext_supported) {
2599 			tcon->posix_extensions = true;
2600 			pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2601 		} else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2602 		    (strcmp(ses->server->vals->version_string,
2603 		     SMB3ANY_VERSION_STRING) == 0) ||
2604 		    (strcmp(ses->server->vals->version_string,
2605 		     SMBDEFAULT_VERSION_STRING) == 0)) {
2606 			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2607 			rc = -EOPNOTSUPP;
2608 			goto out_fail;
2609 		} else {
2610 			cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
2611 				"disabled but required for POSIX extensions\n");
2612 			rc = -EOPNOTSUPP;
2613 			goto out_fail;
2614 		}
2615 	}
2616 
2617 	xid = get_xid();
2618 	rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2619 					    ctx->local_nls);
2620 	free_xid(xid);
2621 	cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2622 	if (rc)
2623 		goto out_fail;
2624 
2625 	tcon->use_persistent = false;
2626 	/* check if SMB2 or later, CIFS does not support persistent handles */
2627 	if (ctx->persistent) {
2628 		if (ses->server->vals->protocol_id == 0) {
2629 			cifs_dbg(VFS,
2630 			     "SMB3 or later required for persistent handles\n");
2631 			rc = -EOPNOTSUPP;
2632 			goto out_fail;
2633 		} else if (ses->server->capabilities &
2634 			   SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2635 			tcon->use_persistent = true;
2636 		else /* persistent handles requested but not supported */ {
2637 			cifs_dbg(VFS,
2638 				"Persistent handles not supported on share\n");
2639 			rc = -EOPNOTSUPP;
2640 			goto out_fail;
2641 		}
2642 	} else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2643 	     && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2644 	     && (ctx->nopersistent == false)) {
2645 		cifs_dbg(FYI, "enabling persistent handles\n");
2646 		tcon->use_persistent = true;
2647 	} else if (ctx->resilient) {
2648 		if (ses->server->vals->protocol_id == 0) {
2649 			cifs_dbg(VFS,
2650 			     "SMB2.1 or later required for resilient handles\n");
2651 			rc = -EOPNOTSUPP;
2652 			goto out_fail;
2653 		}
2654 		tcon->use_resilient = true;
2655 	}
2656 
2657 	tcon->use_witness = false;
2658 	if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2659 		if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2660 			if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2661 				/*
2662 				 * Set witness in use flag in first place
2663 				 * to retry registration in the echo task
2664 				 */
2665 				tcon->use_witness = true;
2666 				/* And try to register immediately */
2667 				rc = cifs_swn_register(tcon);
2668 				if (rc < 0) {
2669 					cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2670 					goto out_fail;
2671 				}
2672 			} else {
2673 				/* TODO: try to extend for non-cluster uses (eg multichannel) */
2674 				cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2675 				rc = -EOPNOTSUPP;
2676 				goto out_fail;
2677 			}
2678 		} else {
2679 			cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2680 			rc = -EOPNOTSUPP;
2681 			goto out_fail;
2682 		}
2683 	}
2684 
2685 	/* If the user really knows what they are doing they can override */
2686 	if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2687 		if (ctx->cache_ro)
2688 			cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2689 		else if (ctx->cache_rw)
2690 			cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2691 	}
2692 
2693 	if (ctx->no_lease) {
2694 		if (ses->server->vals->protocol_id == 0) {
2695 			cifs_dbg(VFS,
2696 				"SMB2 or later required for nolease option\n");
2697 			rc = -EOPNOTSUPP;
2698 			goto out_fail;
2699 		} else
2700 			tcon->no_lease = ctx->no_lease;
2701 	}
2702 
2703 	/*
2704 	 * We can have only one retry value for a connection to a share so for
2705 	 * resources mounted more than once to the same server share the last
2706 	 * value passed in for the retry flag is used.
2707 	 */
2708 	tcon->retry = ctx->retry;
2709 	tcon->nocase = ctx->nocase;
2710 	tcon->broken_sparse_sup = ctx->no_sparse;
2711 	tcon->max_cached_dirs = ctx->max_cached_dirs;
2712 	tcon->nodelete = ctx->nodelete;
2713 	tcon->local_lease = ctx->local_lease;
2714 	INIT_LIST_HEAD(&tcon->pending_opens);
2715 	tcon->status = TID_GOOD;
2716 
2717 	INIT_DELAYED_WORK(&tcon->query_interfaces,
2718 			  smb2_query_server_interfaces);
2719 	if (ses->server->dialect >= SMB30_PROT_ID &&
2720 	    (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
2721 		/* schedule query interfaces poll */
2722 		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
2723 				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
2724 	}
2725 #ifdef CONFIG_CIFS_DFS_UPCALL
2726 	INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
2727 #endif
2728 	spin_lock(&cifs_tcp_ses_lock);
2729 	list_add(&tcon->tcon_list, &ses->tcon_list);
2730 	spin_unlock(&cifs_tcp_ses_lock);
2731 
2732 	return tcon;
2733 
2734 out_fail:
2735 	tconInfoFree(tcon);
2736 	return ERR_PTR(rc);
2737 }
2738 
2739 void
2740 cifs_put_tlink(struct tcon_link *tlink)
2741 {
2742 	if (!tlink || IS_ERR(tlink))
2743 		return;
2744 
2745 	if (!atomic_dec_and_test(&tlink->tl_count) ||
2746 	    test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2747 		tlink->tl_time = jiffies;
2748 		return;
2749 	}
2750 
2751 	if (!IS_ERR(tlink_tcon(tlink)))
2752 		cifs_put_tcon(tlink_tcon(tlink));
2753 	kfree(tlink);
2754 	return;
2755 }
2756 
2757 static int
2758 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2759 {
2760 	struct cifs_sb_info *old = CIFS_SB(sb);
2761 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2762 	unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
2763 	unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
2764 
2765 	if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2766 		return 0;
2767 
2768 	if (old->mnt_cifs_serverino_autodisabled)
2769 		newflags &= ~CIFS_MOUNT_SERVER_INUM;
2770 
2771 	if (oldflags != newflags)
2772 		return 0;
2773 
2774 	/*
2775 	 * We want to share sb only if we don't specify an r/wsize or
2776 	 * specified r/wsize is greater than or equal to existing one.
2777 	 */
2778 	if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2779 		return 0;
2780 
2781 	if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2782 		return 0;
2783 
2784 	if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2785 	    !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2786 		return 0;
2787 
2788 	if (old->ctx->file_mode != new->ctx->file_mode ||
2789 	    old->ctx->dir_mode != new->ctx->dir_mode)
2790 		return 0;
2791 
2792 	if (strcmp(old->local_nls->charset, new->local_nls->charset))
2793 		return 0;
2794 
2795 	if (old->ctx->acregmax != new->ctx->acregmax)
2796 		return 0;
2797 	if (old->ctx->acdirmax != new->ctx->acdirmax)
2798 		return 0;
2799 	if (old->ctx->closetimeo != new->ctx->closetimeo)
2800 		return 0;
2801 
2802 	return 1;
2803 }
2804 
2805 static int match_prepath(struct super_block *sb,
2806 			 struct cifs_tcon *tcon,
2807 			 struct cifs_mnt_data *mnt_data)
2808 {
2809 	struct smb3_fs_context *ctx = mnt_data->ctx;
2810 	struct cifs_sb_info *old = CIFS_SB(sb);
2811 	struct cifs_sb_info *new = mnt_data->cifs_sb;
2812 	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2813 		old->prepath;
2814 	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
2815 		new->prepath;
2816 
2817 	if (tcon->origin_fullpath &&
2818 	    dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
2819 		return 1;
2820 
2821 	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2822 		return 1;
2823 	else if (!old_set && !new_set)
2824 		return 1;
2825 
2826 	return 0;
2827 }
2828 
2829 int
2830 cifs_match_super(struct super_block *sb, void *data)
2831 {
2832 	struct cifs_mnt_data *mnt_data = data;
2833 	struct smb3_fs_context *ctx;
2834 	struct cifs_sb_info *cifs_sb;
2835 	struct TCP_Server_Info *tcp_srv;
2836 	struct cifs_ses *ses;
2837 	struct cifs_tcon *tcon;
2838 	struct tcon_link *tlink;
2839 	int rc = 0;
2840 
2841 	spin_lock(&cifs_tcp_ses_lock);
2842 	cifs_sb = CIFS_SB(sb);
2843 
2844 	/* We do not want to use a superblock that has been shutdown */
2845 	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
2846 		spin_unlock(&cifs_tcp_ses_lock);
2847 		return 0;
2848 	}
2849 
2850 	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2851 	if (IS_ERR_OR_NULL(tlink)) {
2852 		pr_warn_once("%s: skip super matching due to bad tlink(%p)\n",
2853 			     __func__, tlink);
2854 		spin_unlock(&cifs_tcp_ses_lock);
2855 		return 0;
2856 	}
2857 	tcon = tlink_tcon(tlink);
2858 	ses = tcon->ses;
2859 	tcp_srv = ses->server;
2860 
2861 	ctx = mnt_data->ctx;
2862 
2863 	spin_lock(&tcp_srv->srv_lock);
2864 	spin_lock(&ses->ses_lock);
2865 	spin_lock(&ses->chan_lock);
2866 	spin_lock(&tcon->tc_lock);
2867 	if (!match_server(tcp_srv, ctx, true) ||
2868 	    !match_session(ses, ctx) ||
2869 	    !match_tcon(tcon, ctx) ||
2870 	    !match_prepath(sb, tcon, mnt_data)) {
2871 		rc = 0;
2872 		goto out;
2873 	}
2874 
2875 	rc = compare_mount_options(sb, mnt_data);
2876 out:
2877 	spin_unlock(&tcon->tc_lock);
2878 	spin_unlock(&ses->chan_lock);
2879 	spin_unlock(&ses->ses_lock);
2880 	spin_unlock(&tcp_srv->srv_lock);
2881 
2882 	spin_unlock(&cifs_tcp_ses_lock);
2883 	cifs_put_tlink(tlink);
2884 	return rc;
2885 }
2886 
2887 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2888 static struct lock_class_key cifs_key[2];
2889 static struct lock_class_key cifs_slock_key[2];
2890 
2891 static inline void
2892 cifs_reclassify_socket4(struct socket *sock)
2893 {
2894 	struct sock *sk = sock->sk;
2895 	BUG_ON(!sock_allow_reclassification(sk));
2896 	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
2897 		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
2898 }
2899 
2900 static inline void
2901 cifs_reclassify_socket6(struct socket *sock)
2902 {
2903 	struct sock *sk = sock->sk;
2904 	BUG_ON(!sock_allow_reclassification(sk));
2905 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
2906 		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
2907 }
2908 #else
2909 static inline void
2910 cifs_reclassify_socket4(struct socket *sock)
2911 {
2912 }
2913 
2914 static inline void
2915 cifs_reclassify_socket6(struct socket *sock)
2916 {
2917 }
2918 #endif
2919 
2920 /* See RFC1001 section 14 on representation of Netbios names */
2921 static void rfc1002mangle(char *target, char *source, unsigned int length)
2922 {
2923 	unsigned int i, j;
2924 
2925 	for (i = 0, j = 0; i < (length); i++) {
2926 		/* mask a nibble at a time and encode */
2927 		target[j] = 'A' + (0x0F & (source[i] >> 4));
2928 		target[j+1] = 'A' + (0x0F & source[i]);
2929 		j += 2;
2930 	}
2931 
2932 }
2933 
2934 static int
2935 bind_socket(struct TCP_Server_Info *server)
2936 {
2937 	int rc = 0;
2938 	if (server->srcaddr.ss_family != AF_UNSPEC) {
2939 		/* Bind to the specified local IP address */
2940 		struct socket *socket = server->ssocket;
2941 		rc = kernel_bind(socket,
2942 				 (struct sockaddr *) &server->srcaddr,
2943 				 sizeof(server->srcaddr));
2944 		if (rc < 0) {
2945 			struct sockaddr_in *saddr4;
2946 			struct sockaddr_in6 *saddr6;
2947 			saddr4 = (struct sockaddr_in *)&server->srcaddr;
2948 			saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
2949 			if (saddr6->sin6_family == AF_INET6)
2950 				cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
2951 					 &saddr6->sin6_addr, rc);
2952 			else
2953 				cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
2954 					 &saddr4->sin_addr.s_addr, rc);
2955 		}
2956 	}
2957 	return rc;
2958 }
2959 
2960 static int
2961 ip_rfc1001_connect(struct TCP_Server_Info *server)
2962 {
2963 	int rc = 0;
2964 	/*
2965 	 * some servers require RFC1001 sessinit before sending
2966 	 * negprot - BB check reconnection in case where second
2967 	 * sessinit is sent but no second negprot
2968 	 */
2969 	struct rfc1002_session_packet req = {};
2970 	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
2971 	unsigned int len;
2972 
2973 	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
2974 
2975 	if (server->server_RFC1001_name[0] != 0)
2976 		rfc1002mangle(req.trailer.session_req.called_name,
2977 			      server->server_RFC1001_name,
2978 			      RFC1001_NAME_LEN_WITH_NULL);
2979 	else
2980 		rfc1002mangle(req.trailer.session_req.called_name,
2981 			      DEFAULT_CIFS_CALLED_NAME,
2982 			      RFC1001_NAME_LEN_WITH_NULL);
2983 
2984 	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
2985 
2986 	/* calling name ends in null (byte 16) from old smb convention */
2987 	if (server->workstation_RFC1001_name[0] != 0)
2988 		rfc1002mangle(req.trailer.session_req.calling_name,
2989 			      server->workstation_RFC1001_name,
2990 			      RFC1001_NAME_LEN_WITH_NULL);
2991 	else
2992 		rfc1002mangle(req.trailer.session_req.calling_name,
2993 			      "LINUX_CIFS_CLNT",
2994 			      RFC1001_NAME_LEN_WITH_NULL);
2995 
2996 	/*
2997 	 * As per rfc1002, @len must be the number of bytes that follows the
2998 	 * length field of a rfc1002 session request payload.
2999 	 */
3000 	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
3001 
3002 	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
3003 	rc = smb_send(server, smb_buf, len);
3004 	/*
3005 	 * RFC1001 layer in at least one server requires very short break before
3006 	 * negprot presumably because not expecting negprot to follow so fast.
3007 	 * This is a simple solution that works without complicating the code
3008 	 * and causes no significant slowing down on mount for everyone else
3009 	 */
3010 	usleep_range(1000, 2000);
3011 
3012 	return rc;
3013 }
3014 
3015 static int
3016 generic_ip_connect(struct TCP_Server_Info *server)
3017 {
3018 	struct sockaddr *saddr;
3019 	struct socket *socket;
3020 	int slen, sfamily;
3021 	__be16 sport;
3022 	int rc = 0;
3023 
3024 	saddr = (struct sockaddr *) &server->dstaddr;
3025 
3026 	if (server->dstaddr.ss_family == AF_INET6) {
3027 		struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
3028 
3029 		sport = ipv6->sin6_port;
3030 		slen = sizeof(struct sockaddr_in6);
3031 		sfamily = AF_INET6;
3032 		cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
3033 				ntohs(sport));
3034 	} else {
3035 		struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
3036 
3037 		sport = ipv4->sin_port;
3038 		slen = sizeof(struct sockaddr_in);
3039 		sfamily = AF_INET;
3040 		cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
3041 				ntohs(sport));
3042 	}
3043 
3044 	if (server->ssocket) {
3045 		socket = server->ssocket;
3046 	} else {
3047 		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
3048 				   IPPROTO_TCP, &server->ssocket, 1);
3049 		if (rc < 0) {
3050 			cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
3051 			return rc;
3052 		}
3053 
3054 		/* BB other socket options to set KEEPALIVE, NODELAY? */
3055 		cifs_dbg(FYI, "Socket created\n");
3056 		socket = server->ssocket;
3057 		socket->sk->sk_allocation = GFP_NOFS;
3058 		socket->sk->sk_use_task_frag = false;
3059 		if (sfamily == AF_INET6)
3060 			cifs_reclassify_socket6(socket);
3061 		else
3062 			cifs_reclassify_socket4(socket);
3063 	}
3064 
3065 	rc = bind_socket(server);
3066 	if (rc < 0)
3067 		return rc;
3068 
3069 	/*
3070 	 * Eventually check for other socket options to change from
3071 	 * the default. sock_setsockopt not used because it expects
3072 	 * user space buffer
3073 	 */
3074 	socket->sk->sk_rcvtimeo = 7 * HZ;
3075 	socket->sk->sk_sndtimeo = 5 * HZ;
3076 
3077 	/* make the bufsizes depend on wsize/rsize and max requests */
3078 	if (server->noautotune) {
3079 		if (socket->sk->sk_sndbuf < (200 * 1024))
3080 			socket->sk->sk_sndbuf = 200 * 1024;
3081 		if (socket->sk->sk_rcvbuf < (140 * 1024))
3082 			socket->sk->sk_rcvbuf = 140 * 1024;
3083 	}
3084 
3085 	if (server->tcp_nodelay)
3086 		tcp_sock_set_nodelay(socket->sk);
3087 
3088 	cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
3089 		 socket->sk->sk_sndbuf,
3090 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
3091 
3092 	rc = kernel_connect(socket, saddr, slen,
3093 			    server->noblockcnt ? O_NONBLOCK : 0);
3094 	/*
3095 	 * When mounting SMB root file systems, we do not want to block in
3096 	 * connect. Otherwise bail out and then let cifs_reconnect() perform
3097 	 * reconnect failover - if possible.
3098 	 */
3099 	if (server->noblockcnt && rc == -EINPROGRESS)
3100 		rc = 0;
3101 	if (rc < 0) {
3102 		cifs_dbg(FYI, "Error %d connecting to server\n", rc);
3103 		trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
3104 		sock_release(socket);
3105 		server->ssocket = NULL;
3106 		return rc;
3107 	}
3108 	trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
3109 	if (sport == htons(RFC1001_PORT))
3110 		rc = ip_rfc1001_connect(server);
3111 
3112 	return rc;
3113 }
3114 
3115 static int
3116 ip_connect(struct TCP_Server_Info *server)
3117 {
3118 	__be16 *sport;
3119 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
3120 	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
3121 
3122 	if (server->dstaddr.ss_family == AF_INET6)
3123 		sport = &addr6->sin6_port;
3124 	else
3125 		sport = &addr->sin_port;
3126 
3127 	if (*sport == 0) {
3128 		int rc;
3129 
3130 		/* try with 445 port at first */
3131 		*sport = htons(CIFS_PORT);
3132 
3133 		rc = generic_ip_connect(server);
3134 		if (rc >= 0)
3135 			return rc;
3136 
3137 		/* if it failed, try with 139 port */
3138 		*sport = htons(RFC1001_PORT);
3139 	}
3140 
3141 	return generic_ip_connect(server);
3142 }
3143 
3144 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3145 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
3146 			  struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3147 {
3148 	/*
3149 	 * If we are reconnecting then should we check to see if
3150 	 * any requested capabilities changed locally e.g. via
3151 	 * remount but we can not do much about it here
3152 	 * if they have (even if we could detect it by the following)
3153 	 * Perhaps we could add a backpointer to array of sb from tcon
3154 	 * or if we change to make all sb to same share the same
3155 	 * sb as NFS - then we only have one backpointer to sb.
3156 	 * What if we wanted to mount the server share twice once with
3157 	 * and once without posixacls or posix paths?
3158 	 */
3159 	__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3160 
3161 	if (ctx && ctx->no_linux_ext) {
3162 		tcon->fsUnixInfo.Capability = 0;
3163 		tcon->unix_ext = 0; /* Unix Extensions disabled */
3164 		cifs_dbg(FYI, "Linux protocol extensions disabled\n");
3165 		return;
3166 	} else if (ctx)
3167 		tcon->unix_ext = 1; /* Unix Extensions supported */
3168 
3169 	if (!tcon->unix_ext) {
3170 		cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
3171 		return;
3172 	}
3173 
3174 	if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
3175 		__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3176 		cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
3177 		/*
3178 		 * check for reconnect case in which we do not
3179 		 * want to change the mount behavior if we can avoid it
3180 		 */
3181 		if (ctx == NULL) {
3182 			/*
3183 			 * turn off POSIX ACL and PATHNAMES if not set
3184 			 * originally at mount time
3185 			 */
3186 			if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
3187 				cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3188 			if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3189 				if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3190 					cifs_dbg(VFS, "POSIXPATH support change\n");
3191 				cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3192 			} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
3193 				cifs_dbg(VFS, "possible reconnect error\n");
3194 				cifs_dbg(VFS, "server disabled POSIX path support\n");
3195 			}
3196 		}
3197 
3198 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3199 			cifs_dbg(VFS, "per-share encryption not supported yet\n");
3200 
3201 		cap &= CIFS_UNIX_CAP_MASK;
3202 		if (ctx && ctx->no_psx_acl)
3203 			cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
3204 		else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
3205 			cifs_dbg(FYI, "negotiated posix acl support\n");
3206 			if (cifs_sb)
3207 				cifs_sb->mnt_cifs_flags |=
3208 					CIFS_MOUNT_POSIXACL;
3209 		}
3210 
3211 		if (ctx && ctx->posix_paths == 0)
3212 			cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
3213 		else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
3214 			cifs_dbg(FYI, "negotiate posix pathnames\n");
3215 			if (cifs_sb)
3216 				cifs_sb->mnt_cifs_flags |=
3217 					CIFS_MOUNT_POSIX_PATHS;
3218 		}
3219 
3220 		cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
3221 #ifdef CONFIG_CIFS_DEBUG2
3222 		if (cap & CIFS_UNIX_FCNTL_CAP)
3223 			cifs_dbg(FYI, "FCNTL cap\n");
3224 		if (cap & CIFS_UNIX_EXTATTR_CAP)
3225 			cifs_dbg(FYI, "EXTATTR cap\n");
3226 		if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
3227 			cifs_dbg(FYI, "POSIX path cap\n");
3228 		if (cap & CIFS_UNIX_XATTR_CAP)
3229 			cifs_dbg(FYI, "XATTR cap\n");
3230 		if (cap & CIFS_UNIX_POSIX_ACL_CAP)
3231 			cifs_dbg(FYI, "POSIX ACL cap\n");
3232 		if (cap & CIFS_UNIX_LARGE_READ_CAP)
3233 			cifs_dbg(FYI, "very large read cap\n");
3234 		if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
3235 			cifs_dbg(FYI, "very large write cap\n");
3236 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
3237 			cifs_dbg(FYI, "transport encryption cap\n");
3238 		if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
3239 			cifs_dbg(FYI, "mandatory transport encryption cap\n");
3240 #endif /* CIFS_DEBUG2 */
3241 		if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
3242 			if (ctx == NULL)
3243 				cifs_dbg(FYI, "resetting capabilities failed\n");
3244 			else
3245 				cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
3246 
3247 		}
3248 	}
3249 }
3250 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3251 
3252 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3253 {
3254 	struct smb3_fs_context *ctx = cifs_sb->ctx;
3255 
3256 	INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3257 
3258 	spin_lock_init(&cifs_sb->tlink_tree_lock);
3259 	cifs_sb->tlink_tree = RB_ROOT;
3260 
3261 	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
3262 		 ctx->file_mode, ctx->dir_mode);
3263 
3264 	/* this is needed for ASCII cp to Unicode converts */
3265 	if (ctx->iocharset == NULL) {
3266 		/* load_nls_default cannot return null */
3267 		cifs_sb->local_nls = load_nls_default();
3268 	} else {
3269 		cifs_sb->local_nls = load_nls(ctx->iocharset);
3270 		if (cifs_sb->local_nls == NULL) {
3271 			cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3272 				 ctx->iocharset);
3273 			return -ELIBACC;
3274 		}
3275 	}
3276 	ctx->local_nls = cifs_sb->local_nls;
3277 
3278 	smb3_update_mnt_flags(cifs_sb);
3279 
3280 	if (ctx->direct_io)
3281 		cifs_dbg(FYI, "mounting share using direct i/o\n");
3282 	if (ctx->cache_ro) {
3283 		cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3284 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3285 	} else if (ctx->cache_rw) {
3286 		cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3287 		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3288 					    CIFS_MOUNT_RW_CACHE);
3289 	}
3290 
3291 	if ((ctx->cifs_acl) && (ctx->dynperm))
3292 		cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3293 
3294 	if (ctx->prepath) {
3295 		cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3296 		if (cifs_sb->prepath == NULL)
3297 			return -ENOMEM;
3298 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3299 	}
3300 
3301 	return 0;
3302 }
3303 
3304 /* Release all succeed connections */
3305 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
3306 {
3307 	int rc = 0;
3308 
3309 	if (mnt_ctx->tcon)
3310 		cifs_put_tcon(mnt_ctx->tcon);
3311 	else if (mnt_ctx->ses)
3312 		cifs_put_smb_ses(mnt_ctx->ses);
3313 	else if (mnt_ctx->server)
3314 		cifs_put_tcp_session(mnt_ctx->server, 0);
3315 	mnt_ctx->ses = NULL;
3316 	mnt_ctx->tcon = NULL;
3317 	mnt_ctx->server = NULL;
3318 	mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3319 	free_xid(mnt_ctx->xid);
3320 }
3321 
3322 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx)
3323 {
3324 	struct TCP_Server_Info *server = NULL;
3325 	struct smb3_fs_context *ctx;
3326 	struct cifs_ses *ses = NULL;
3327 	unsigned int xid;
3328 	int rc = 0;
3329 
3330 	xid = get_xid();
3331 
3332 	if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) {
3333 		rc = -EINVAL;
3334 		goto out;
3335 	}
3336 	ctx = mnt_ctx->fs_ctx;
3337 
3338 	/* get a reference to a tcp session */
3339 	server = cifs_get_tcp_session(ctx, NULL);
3340 	if (IS_ERR(server)) {
3341 		rc = PTR_ERR(server);
3342 		server = NULL;
3343 		goto out;
3344 	}
3345 
3346 	/* get a reference to a SMB session */
3347 	ses = cifs_get_smb_ses(server, ctx);
3348 	if (IS_ERR(ses)) {
3349 		rc = PTR_ERR(ses);
3350 		ses = NULL;
3351 		goto out;
3352 	}
3353 
3354 	if ((ctx->persistent == true) && (!(ses->server->capabilities &
3355 					    SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3356 		cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3357 		rc = -EOPNOTSUPP;
3358 	}
3359 
3360 out:
3361 	mnt_ctx->xid = xid;
3362 	mnt_ctx->server = server;
3363 	mnt_ctx->ses = ses;
3364 	mnt_ctx->tcon = NULL;
3365 
3366 	return rc;
3367 }
3368 
3369 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
3370 {
3371 	struct TCP_Server_Info *server;
3372 	struct cifs_sb_info *cifs_sb;
3373 	struct smb3_fs_context *ctx;
3374 	struct cifs_tcon *tcon = NULL;
3375 	int rc = 0;
3376 
3377 	if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx ||
3378 			 !mnt_ctx->cifs_sb)) {
3379 		rc = -EINVAL;
3380 		goto out;
3381 	}
3382 	server = mnt_ctx->server;
3383 	ctx = mnt_ctx->fs_ctx;
3384 	cifs_sb = mnt_ctx->cifs_sb;
3385 
3386 	/* search for existing tcon to this server share */
3387 	tcon = cifs_get_tcon(mnt_ctx->ses, ctx);
3388 	if (IS_ERR(tcon)) {
3389 		rc = PTR_ERR(tcon);
3390 		tcon = NULL;
3391 		goto out;
3392 	}
3393 
3394 	/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
3395 	if (tcon->posix_extensions)
3396 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3397 
3398 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3399 	/* tell server which Unix caps we support */
3400 	if (cap_unix(tcon->ses)) {
3401 		/*
3402 		 * reset of caps checks mount to see if unix extensions disabled
3403 		 * for just this mount.
3404 		 */
3405 		reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx);
3406 		spin_lock(&tcon->ses->server->srv_lock);
3407 		if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3408 		    (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3409 		     CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3410 			spin_unlock(&tcon->ses->server->srv_lock);
3411 			rc = -EACCES;
3412 			goto out;
3413 		}
3414 		spin_unlock(&tcon->ses->server->srv_lock);
3415 	} else
3416 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3417 		tcon->unix_ext = 0; /* server does not support them */
3418 
3419 	/* do not care if a following call succeed - informational */
3420 	if (!tcon->pipe && server->ops->qfs_tcon) {
3421 		server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
3422 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3423 			if (tcon->fsDevInfo.DeviceCharacteristics &
3424 			    cpu_to_le32(FILE_READ_ONLY_DEVICE))
3425 				cifs_dbg(VFS, "mounted to read only share\n");
3426 			else if ((cifs_sb->mnt_cifs_flags &
3427 				  CIFS_MOUNT_RW_CACHE) == 0)
3428 				cifs_dbg(VFS, "read only mount of RW share\n");
3429 			/* no need to log a RW mount of a typical RW share */
3430 		}
3431 	}
3432 
3433 	/*
3434 	 * Clamp the rsize/wsize mount arguments if they are too big for the server
3435 	 * and set the rsize/wsize to the negotiated values if not passed in by
3436 	 * the user on mount
3437 	 */
3438 	if ((cifs_sb->ctx->wsize == 0) ||
3439 	    (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
3440 		cifs_sb->ctx->wsize =
3441 			round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
3442 		/*
3443 		 * in the very unlikely event that the server sent a max write size under PAGE_SIZE,
3444 		 * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096
3445 		 */
3446 		if (cifs_sb->ctx->wsize == 0) {
3447 			cifs_sb->ctx->wsize = PAGE_SIZE;
3448 			cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n");
3449 		}
3450 	}
3451 	if ((cifs_sb->ctx->rsize == 0) ||
3452 	    (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3453 		cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3454 
3455 	/*
3456 	 * The cookie is initialized from volume info returned above.
3457 	 * Inside cifs_fscache_get_super_cookie it checks
3458 	 * that we do not get super cookie twice.
3459 	 */
3460 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3461 		cifs_fscache_get_super_cookie(tcon);
3462 
3463 out:
3464 	mnt_ctx->tcon = tcon;
3465 	return rc;
3466 }
3467 
3468 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3469 			     struct cifs_tcon *tcon)
3470 {
3471 	struct tcon_link *tlink;
3472 
3473 	/* hang the tcon off of the superblock */
3474 	tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3475 	if (tlink == NULL)
3476 		return -ENOMEM;
3477 
3478 	tlink->tl_uid = ses->linux_uid;
3479 	tlink->tl_tcon = tcon;
3480 	tlink->tl_time = jiffies;
3481 	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3482 	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3483 
3484 	cifs_sb->master_tlink = tlink;
3485 	spin_lock(&cifs_sb->tlink_tree_lock);
3486 	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3487 	spin_unlock(&cifs_sb->tlink_tree_lock);
3488 
3489 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3490 				TLINK_IDLE_EXPIRE);
3491 	return 0;
3492 }
3493 
3494 static int
3495 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3496 					unsigned int xid,
3497 					struct cifs_tcon *tcon,
3498 					struct cifs_sb_info *cifs_sb,
3499 					char *full_path,
3500 					int added_treename)
3501 {
3502 	int rc;
3503 	char *s;
3504 	char sep, tmp;
3505 	int skip = added_treename ? 1 : 0;
3506 
3507 	sep = CIFS_DIR_SEP(cifs_sb);
3508 	s = full_path;
3509 
3510 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3511 	while (rc == 0) {
3512 		/* skip separators */
3513 		while (*s == sep)
3514 			s++;
3515 		if (!*s)
3516 			break;
3517 		/* next separator */
3518 		while (*s && *s != sep)
3519 			s++;
3520 		/*
3521 		 * if the treename is added, we then have to skip the first
3522 		 * part within the separators
3523 		 */
3524 		if (skip) {
3525 			skip = 0;
3526 			continue;
3527 		}
3528 		/*
3529 		 * temporarily null-terminate the path at the end of
3530 		 * the current component
3531 		 */
3532 		tmp = *s;
3533 		*s = 0;
3534 		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3535 						     full_path);
3536 		*s = tmp;
3537 	}
3538 	return rc;
3539 }
3540 
3541 /*
3542  * Check if path is remote (i.e. a DFS share).
3543  *
3544  * Return -EREMOTE if it is, otherwise 0 or -errno.
3545  */
3546 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx)
3547 {
3548 	int rc;
3549 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3550 	struct TCP_Server_Info *server = mnt_ctx->server;
3551 	unsigned int xid = mnt_ctx->xid;
3552 	struct cifs_tcon *tcon = mnt_ctx->tcon;
3553 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3554 	char *full_path;
3555 
3556 	if (!server->ops->is_path_accessible)
3557 		return -EOPNOTSUPP;
3558 
3559 	/*
3560 	 * cifs_build_path_to_root works only when we have a valid tcon
3561 	 */
3562 	full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3563 					    tcon->Flags & SMB_SHARE_IS_IN_DFS);
3564 	if (full_path == NULL)
3565 		return -ENOMEM;
3566 
3567 	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3568 
3569 	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3570 					     full_path);
3571 	if (rc != 0 && rc != -EREMOTE)
3572 		goto out;
3573 
3574 	if (rc != -EREMOTE) {
3575 		rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3576 			cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3577 		if (rc != 0) {
3578 			cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3579 			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3580 			rc = 0;
3581 		}
3582 	}
3583 
3584 out:
3585 	kfree(full_path);
3586 	return rc;
3587 }
3588 
3589 #ifdef CONFIG_CIFS_DFS_UPCALL
3590 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3591 {
3592 	struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3593 	bool isdfs;
3594 	int rc;
3595 
3596 	rc = dfs_mount_share(&mnt_ctx, &isdfs);
3597 	if (rc)
3598 		goto error;
3599 	if (!isdfs)
3600 		goto out;
3601 
3602 	/*
3603 	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3604 	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3605 	 */
3606 	cifs_autodisable_serverino(cifs_sb);
3607 	/*
3608 	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3609 	 * that have different prefix paths.
3610 	 */
3611 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3612 	kfree(cifs_sb->prepath);
3613 	cifs_sb->prepath = ctx->prepath;
3614 	ctx->prepath = NULL;
3615 
3616 out:
3617 	cifs_try_adding_channels(mnt_ctx.ses);
3618 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3619 	if (rc)
3620 		goto error;
3621 
3622 	free_xid(mnt_ctx.xid);
3623 	return rc;
3624 
3625 error:
3626 	cifs_mount_put_conns(&mnt_ctx);
3627 	return rc;
3628 }
3629 #else
3630 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3631 {
3632 	int rc = 0;
3633 	struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3634 
3635 	rc = cifs_mount_get_session(&mnt_ctx);
3636 	if (rc)
3637 		goto error;
3638 
3639 	rc = cifs_mount_get_tcon(&mnt_ctx);
3640 	if (!rc) {
3641 		/*
3642 		 * Prevent superblock from being created with any missing
3643 		 * connections.
3644 		 */
3645 		if (WARN_ON(!mnt_ctx.server))
3646 			rc = -EHOSTDOWN;
3647 		else if (WARN_ON(!mnt_ctx.ses))
3648 			rc = -EACCES;
3649 		else if (WARN_ON(!mnt_ctx.tcon))
3650 			rc = -ENOENT;
3651 	}
3652 	if (rc)
3653 		goto error;
3654 
3655 	rc = cifs_is_path_remote(&mnt_ctx);
3656 	if (rc == -EREMOTE)
3657 		rc = -EOPNOTSUPP;
3658 	if (rc)
3659 		goto error;
3660 
3661 	rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3662 	if (rc)
3663 		goto error;
3664 
3665 	free_xid(mnt_ctx.xid);
3666 	return rc;
3667 
3668 error:
3669 	cifs_mount_put_conns(&mnt_ctx);
3670 	return rc;
3671 }
3672 #endif
3673 
3674 /*
3675  * Issue a TREE_CONNECT request.
3676  */
3677 int
3678 CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3679 	 const char *tree, struct cifs_tcon *tcon,
3680 	 const struct nls_table *nls_codepage)
3681 {
3682 	struct smb_hdr *smb_buffer;
3683 	struct smb_hdr *smb_buffer_response;
3684 	TCONX_REQ *pSMB;
3685 	TCONX_RSP *pSMBr;
3686 	unsigned char *bcc_ptr;
3687 	int rc = 0;
3688 	int length;
3689 	__u16 bytes_left, count;
3690 
3691 	if (ses == NULL)
3692 		return -EIO;
3693 
3694 	smb_buffer = cifs_buf_get();
3695 	if (smb_buffer == NULL)
3696 		return -ENOMEM;
3697 
3698 	smb_buffer_response = smb_buffer;
3699 
3700 	header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
3701 			NULL /*no tid */ , 4 /*wct */ );
3702 
3703 	smb_buffer->Mid = get_next_mid(ses->server);
3704 	smb_buffer->Uid = ses->Suid;
3705 	pSMB = (TCONX_REQ *) smb_buffer;
3706 	pSMBr = (TCONX_RSP *) smb_buffer_response;
3707 
3708 	pSMB->AndXCommand = 0xFF;
3709 	pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3710 	bcc_ptr = &pSMB->Password[0];
3711 
3712 	pSMB->PasswordLength = cpu_to_le16(1);	/* minimum */
3713 	*bcc_ptr = 0; /* password is null byte */
3714 	bcc_ptr++;              /* skip password */
3715 	/* already aligned so no need to do it below */
3716 
3717 	if (ses->server->sign)
3718 		smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
3719 
3720 	if (ses->capabilities & CAP_STATUS32) {
3721 		smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
3722 	}
3723 	if (ses->capabilities & CAP_DFS) {
3724 		smb_buffer->Flags2 |= SMBFLG2_DFS;
3725 	}
3726 	if (ses->capabilities & CAP_UNICODE) {
3727 		smb_buffer->Flags2 |= SMBFLG2_UNICODE;
3728 		length =
3729 		    cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
3730 			6 /* max utf8 char length in bytes */ *
3731 			(/* server len*/ + 256 /* share len */), nls_codepage);
3732 		bcc_ptr += 2 * length;	/* convert num 16 bit words to bytes */
3733 		bcc_ptr += 2;	/* skip trailing null */
3734 	} else {		/* ASCII */
3735 		strcpy(bcc_ptr, tree);
3736 		bcc_ptr += strlen(tree) + 1;
3737 	}
3738 	strcpy(bcc_ptr, "?????");
3739 	bcc_ptr += strlen("?????");
3740 	bcc_ptr += 1;
3741 	count = bcc_ptr - &pSMB->Password[0];
3742 	be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
3743 	pSMB->ByteCount = cpu_to_le16(count);
3744 
3745 	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
3746 			 0);
3747 
3748 	/* above now done in SendReceive */
3749 	if (rc == 0) {
3750 		bool is_unicode;
3751 
3752 		tcon->tid = smb_buffer_response->Tid;
3753 		bcc_ptr = pByteArea(smb_buffer_response);
3754 		bytes_left = get_bcc(smb_buffer_response);
3755 		length = strnlen(bcc_ptr, bytes_left - 2);
3756 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
3757 			is_unicode = true;
3758 		else
3759 			is_unicode = false;
3760 
3761 
3762 		/* skip service field (NB: this field is always ASCII) */
3763 		if (length == 3) {
3764 			if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
3765 			    (bcc_ptr[2] == 'C')) {
3766 				cifs_dbg(FYI, "IPC connection\n");
3767 				tcon->ipc = true;
3768 				tcon->pipe = true;
3769 			}
3770 		} else if (length == 2) {
3771 			if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
3772 				/* the most common case */
3773 				cifs_dbg(FYI, "disk share connection\n");
3774 			}
3775 		}
3776 		bcc_ptr += length + 1;
3777 		bytes_left -= (length + 1);
3778 		strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
3779 
3780 		/* mostly informational -- no need to fail on error here */
3781 		kfree(tcon->nativeFileSystem);
3782 		tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
3783 						      bytes_left, is_unicode,
3784 						      nls_codepage);
3785 
3786 		cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
3787 
3788 		if ((smb_buffer_response->WordCount == 3) ||
3789 			 (smb_buffer_response->WordCount == 7))
3790 			/* field is in same location */
3791 			tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
3792 		else
3793 			tcon->Flags = 0;
3794 		cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
3795 	}
3796 
3797 	cifs_buf_release(smb_buffer);
3798 	return rc;
3799 }
3800 
3801 static void delayed_free(struct rcu_head *p)
3802 {
3803 	struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3804 
3805 	unload_nls(cifs_sb->local_nls);
3806 	smb3_cleanup_fs_context(cifs_sb->ctx);
3807 	kfree(cifs_sb);
3808 }
3809 
3810 void
3811 cifs_umount(struct cifs_sb_info *cifs_sb)
3812 {
3813 	struct rb_root *root = &cifs_sb->tlink_tree;
3814 	struct rb_node *node;
3815 	struct tcon_link *tlink;
3816 
3817 	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3818 
3819 	spin_lock(&cifs_sb->tlink_tree_lock);
3820 	while ((node = rb_first(root))) {
3821 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3822 		cifs_get_tlink(tlink);
3823 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3824 		rb_erase(node, root);
3825 
3826 		spin_unlock(&cifs_sb->tlink_tree_lock);
3827 		cifs_put_tlink(tlink);
3828 		spin_lock(&cifs_sb->tlink_tree_lock);
3829 	}
3830 	spin_unlock(&cifs_sb->tlink_tree_lock);
3831 
3832 	kfree(cifs_sb->prepath);
3833 	call_rcu(&cifs_sb->rcu, delayed_free);
3834 }
3835 
3836 int
3837 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
3838 			struct TCP_Server_Info *server)
3839 {
3840 	int rc = 0;
3841 
3842 	if (!server->ops->need_neg || !server->ops->negotiate)
3843 		return -ENOSYS;
3844 
3845 	/* only send once per connect */
3846 	spin_lock(&server->srv_lock);
3847 	if (server->tcpStatus != CifsGood &&
3848 	    server->tcpStatus != CifsNew &&
3849 	    server->tcpStatus != CifsNeedNegotiate) {
3850 		spin_unlock(&server->srv_lock);
3851 		return -EHOSTDOWN;
3852 	}
3853 
3854 	if (!server->ops->need_neg(server) &&
3855 	    server->tcpStatus == CifsGood) {
3856 		spin_unlock(&server->srv_lock);
3857 		return 0;
3858 	}
3859 
3860 	server->tcpStatus = CifsInNegotiate;
3861 	spin_unlock(&server->srv_lock);
3862 
3863 	rc = server->ops->negotiate(xid, ses, server);
3864 	if (rc == 0) {
3865 		spin_lock(&server->srv_lock);
3866 		if (server->tcpStatus == CifsInNegotiate)
3867 			server->tcpStatus = CifsGood;
3868 		else
3869 			rc = -EHOSTDOWN;
3870 		spin_unlock(&server->srv_lock);
3871 	} else {
3872 		spin_lock(&server->srv_lock);
3873 		if (server->tcpStatus == CifsInNegotiate)
3874 			server->tcpStatus = CifsNeedNegotiate;
3875 		spin_unlock(&server->srv_lock);
3876 	}
3877 
3878 	return rc;
3879 }
3880 
3881 int
3882 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
3883 		   struct TCP_Server_Info *server,
3884 		   struct nls_table *nls_info)
3885 {
3886 	int rc = -ENOSYS;
3887 	struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
3888 	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
3889 	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
3890 	bool is_binding = false;
3891 
3892 	spin_lock(&ses->ses_lock);
3893 	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
3894 		 __func__, ses->chans_need_reconnect);
3895 
3896 	if (ses->ses_status != SES_GOOD &&
3897 	    ses->ses_status != SES_NEW &&
3898 	    ses->ses_status != SES_NEED_RECON) {
3899 		spin_unlock(&ses->ses_lock);
3900 		return -EHOSTDOWN;
3901 	}
3902 
3903 	/* only send once per connect */
3904 	spin_lock(&ses->chan_lock);
3905 	if (CIFS_ALL_CHANS_GOOD(ses)) {
3906 		if (ses->ses_status == SES_NEED_RECON)
3907 			ses->ses_status = SES_GOOD;
3908 		spin_unlock(&ses->chan_lock);
3909 		spin_unlock(&ses->ses_lock);
3910 		return 0;
3911 	}
3912 
3913 	cifs_chan_set_in_reconnect(ses, server);
3914 	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
3915 	spin_unlock(&ses->chan_lock);
3916 
3917 	if (!is_binding) {
3918 		ses->ses_status = SES_IN_SETUP;
3919 
3920 		/* force iface_list refresh */
3921 		ses->iface_last_update = 0;
3922 	}
3923 	spin_unlock(&ses->ses_lock);
3924 
3925 	/* update ses ip_addr only for primary chan */
3926 	if (server == pserver) {
3927 		if (server->dstaddr.ss_family == AF_INET6)
3928 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
3929 		else
3930 			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
3931 	}
3932 
3933 	if (!is_binding) {
3934 		ses->capabilities = server->capabilities;
3935 		if (!linuxExtEnabled)
3936 			ses->capabilities &= (~server->vals->cap_unix);
3937 
3938 		if (ses->auth_key.response) {
3939 			cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
3940 				 ses->auth_key.response);
3941 			kfree_sensitive(ses->auth_key.response);
3942 			ses->auth_key.response = NULL;
3943 			ses->auth_key.len = 0;
3944 		}
3945 	}
3946 
3947 	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
3948 		 server->sec_mode, server->capabilities, server->timeAdj);
3949 
3950 	if (server->ops->sess_setup)
3951 		rc = server->ops->sess_setup(xid, ses, server, nls_info);
3952 
3953 	if (rc) {
3954 		cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
3955 		spin_lock(&ses->ses_lock);
3956 		if (ses->ses_status == SES_IN_SETUP)
3957 			ses->ses_status = SES_NEED_RECON;
3958 		spin_lock(&ses->chan_lock);
3959 		cifs_chan_clear_in_reconnect(ses, server);
3960 		spin_unlock(&ses->chan_lock);
3961 		spin_unlock(&ses->ses_lock);
3962 	} else {
3963 		spin_lock(&ses->ses_lock);
3964 		if (ses->ses_status == SES_IN_SETUP)
3965 			ses->ses_status = SES_GOOD;
3966 		spin_lock(&ses->chan_lock);
3967 		cifs_chan_clear_in_reconnect(ses, server);
3968 		cifs_chan_clear_need_reconnect(ses, server);
3969 		spin_unlock(&ses->chan_lock);
3970 		spin_unlock(&ses->ses_lock);
3971 	}
3972 
3973 	return rc;
3974 }
3975 
3976 static int
3977 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
3978 {
3979 	ctx->sectype = ses->sectype;
3980 
3981 	/* krb5 is special, since we don't need username or pw */
3982 	if (ctx->sectype == Kerberos)
3983 		return 0;
3984 
3985 	return cifs_set_cifscreds(ctx, ses);
3986 }
3987 
3988 static struct cifs_tcon *
3989 __cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
3990 {
3991 	int rc;
3992 	struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3993 	struct cifs_ses *ses;
3994 	struct cifs_tcon *tcon = NULL;
3995 	struct smb3_fs_context *ctx;
3996 	char *origin_fullpath = NULL;
3997 
3998 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3999 	if (ctx == NULL)
4000 		return ERR_PTR(-ENOMEM);
4001 
4002 	ctx->local_nls = cifs_sb->local_nls;
4003 	ctx->linux_uid = fsuid;
4004 	ctx->cred_uid = fsuid;
4005 	ctx->UNC = master_tcon->tree_name;
4006 	ctx->retry = master_tcon->retry;
4007 	ctx->nocase = master_tcon->nocase;
4008 	ctx->nohandlecache = master_tcon->nohandlecache;
4009 	ctx->local_lease = master_tcon->local_lease;
4010 	ctx->no_lease = master_tcon->no_lease;
4011 	ctx->resilient = master_tcon->use_resilient;
4012 	ctx->persistent = master_tcon->use_persistent;
4013 	ctx->handle_timeout = master_tcon->handle_timeout;
4014 	ctx->no_linux_ext = !master_tcon->unix_ext;
4015 	ctx->linux_ext = master_tcon->posix_extensions;
4016 	ctx->sectype = master_tcon->ses->sectype;
4017 	ctx->sign = master_tcon->ses->sign;
4018 	ctx->seal = master_tcon->seal;
4019 	ctx->witness = master_tcon->use_witness;
4020 	ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
4021 
4022 	rc = cifs_set_vol_auth(ctx, master_tcon->ses);
4023 	if (rc) {
4024 		tcon = ERR_PTR(rc);
4025 		goto out;
4026 	}
4027 
4028 	/* get a reference for the same TCP session */
4029 	spin_lock(&cifs_tcp_ses_lock);
4030 	++master_tcon->ses->server->srv_count;
4031 	spin_unlock(&cifs_tcp_ses_lock);
4032 
4033 	ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
4034 	if (IS_ERR(ses)) {
4035 		tcon = (struct cifs_tcon *)ses;
4036 		cifs_put_tcp_session(master_tcon->ses->server, 0);
4037 		goto out;
4038 	}
4039 
4040 #ifdef CONFIG_CIFS_DFS_UPCALL
4041 	spin_lock(&master_tcon->tc_lock);
4042 	if (master_tcon->origin_fullpath) {
4043 		spin_unlock(&master_tcon->tc_lock);
4044 		origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
4045 		if (IS_ERR(origin_fullpath)) {
4046 			tcon = ERR_CAST(origin_fullpath);
4047 			origin_fullpath = NULL;
4048 			cifs_put_smb_ses(ses);
4049 			goto out;
4050 		}
4051 	} else {
4052 		spin_unlock(&master_tcon->tc_lock);
4053 	}
4054 #endif
4055 
4056 	tcon = cifs_get_tcon(ses, ctx);
4057 	if (IS_ERR(tcon)) {
4058 		cifs_put_smb_ses(ses);
4059 		goto out;
4060 	}
4061 
4062 #ifdef CONFIG_CIFS_DFS_UPCALL
4063 	if (origin_fullpath) {
4064 		spin_lock(&tcon->tc_lock);
4065 		tcon->origin_fullpath = origin_fullpath;
4066 		spin_unlock(&tcon->tc_lock);
4067 		origin_fullpath = NULL;
4068 		queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
4069 				   dfs_cache_get_ttl() * HZ);
4070 	}
4071 #endif
4072 
4073 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4074 	if (cap_unix(ses))
4075 		reset_cifs_unix_caps(0, tcon, NULL, ctx);
4076 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
4077 
4078 out:
4079 	kfree(ctx->username);
4080 	kfree_sensitive(ctx->password);
4081 	kfree(origin_fullpath);
4082 	kfree(ctx);
4083 
4084 	return tcon;
4085 }
4086 
4087 static struct cifs_tcon *
4088 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
4089 {
4090 	struct cifs_tcon *ret;
4091 
4092 	cifs_mount_lock();
4093 	ret = __cifs_construct_tcon(cifs_sb, fsuid);
4094 	cifs_mount_unlock();
4095 	return ret;
4096 }
4097 
4098 struct cifs_tcon *
4099 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4100 {
4101 	return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4102 }
4103 
4104 /* find and return a tlink with given uid */
4105 static struct tcon_link *
4106 tlink_rb_search(struct rb_root *root, kuid_t uid)
4107 {
4108 	struct rb_node *node = root->rb_node;
4109 	struct tcon_link *tlink;
4110 
4111 	while (node) {
4112 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4113 
4114 		if (uid_gt(tlink->tl_uid, uid))
4115 			node = node->rb_left;
4116 		else if (uid_lt(tlink->tl_uid, uid))
4117 			node = node->rb_right;
4118 		else
4119 			return tlink;
4120 	}
4121 	return NULL;
4122 }
4123 
4124 /* insert a tcon_link into the tree */
4125 static void
4126 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4127 {
4128 	struct rb_node **new = &(root->rb_node), *parent = NULL;
4129 	struct tcon_link *tlink;
4130 
4131 	while (*new) {
4132 		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4133 		parent = *new;
4134 
4135 		if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4136 			new = &((*new)->rb_left);
4137 		else
4138 			new = &((*new)->rb_right);
4139 	}
4140 
4141 	rb_link_node(&new_tlink->tl_rbnode, parent, new);
4142 	rb_insert_color(&new_tlink->tl_rbnode, root);
4143 }
4144 
4145 /*
4146  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4147  * current task.
4148  *
4149  * If the superblock doesn't refer to a multiuser mount, then just return
4150  * the master tcon for the mount.
4151  *
4152  * First, search the rbtree for an existing tcon for this fsuid. If one
4153  * exists, then check to see if it's pending construction. If it is then wait
4154  * for construction to complete. Once it's no longer pending, check to see if
4155  * it failed and either return an error or retry construction, depending on
4156  * the timeout.
4157  *
4158  * If one doesn't exist then insert a new tcon_link struct into the tree and
4159  * try to construct a new one.
4160  */
4161 struct tcon_link *
4162 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4163 {
4164 	int ret;
4165 	kuid_t fsuid = current_fsuid();
4166 	struct tcon_link *tlink, *newtlink;
4167 
4168 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4169 		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4170 
4171 	spin_lock(&cifs_sb->tlink_tree_lock);
4172 	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4173 	if (tlink)
4174 		cifs_get_tlink(tlink);
4175 	spin_unlock(&cifs_sb->tlink_tree_lock);
4176 
4177 	if (tlink == NULL) {
4178 		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
4179 		if (newtlink == NULL)
4180 			return ERR_PTR(-ENOMEM);
4181 		newtlink->tl_uid = fsuid;
4182 		newtlink->tl_tcon = ERR_PTR(-EACCES);
4183 		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4184 		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4185 		cifs_get_tlink(newtlink);
4186 
4187 		spin_lock(&cifs_sb->tlink_tree_lock);
4188 		/* was one inserted after previous search? */
4189 		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4190 		if (tlink) {
4191 			cifs_get_tlink(tlink);
4192 			spin_unlock(&cifs_sb->tlink_tree_lock);
4193 			kfree(newtlink);
4194 			goto wait_for_construction;
4195 		}
4196 		tlink = newtlink;
4197 		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4198 		spin_unlock(&cifs_sb->tlink_tree_lock);
4199 	} else {
4200 wait_for_construction:
4201 		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4202 				  TASK_INTERRUPTIBLE);
4203 		if (ret) {
4204 			cifs_put_tlink(tlink);
4205 			return ERR_PTR(-ERESTARTSYS);
4206 		}
4207 
4208 		/* if it's good, return it */
4209 		if (!IS_ERR(tlink->tl_tcon))
4210 			return tlink;
4211 
4212 		/* return error if we tried this already recently */
4213 		if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4214 			cifs_put_tlink(tlink);
4215 			return ERR_PTR(-EACCES);
4216 		}
4217 
4218 		if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4219 			goto wait_for_construction;
4220 	}
4221 
4222 	tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4223 	clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4224 	wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4225 
4226 	if (IS_ERR(tlink->tl_tcon)) {
4227 		cifs_put_tlink(tlink);
4228 		return ERR_PTR(-EACCES);
4229 	}
4230 
4231 	return tlink;
4232 }
4233 
4234 /*
4235  * periodic workqueue job that scans tcon_tree for a superblock and closes
4236  * out tcons.
4237  */
4238 static void
4239 cifs_prune_tlinks(struct work_struct *work)
4240 {
4241 	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4242 						    prune_tlinks.work);
4243 	struct rb_root *root = &cifs_sb->tlink_tree;
4244 	struct rb_node *node;
4245 	struct rb_node *tmp;
4246 	struct tcon_link *tlink;
4247 
4248 	/*
4249 	 * Because we drop the spinlock in the loop in order to put the tlink
4250 	 * it's not guarded against removal of links from the tree. The only
4251 	 * places that remove entries from the tree are this function and
4252 	 * umounts. Because this function is non-reentrant and is canceled
4253 	 * before umount can proceed, this is safe.
4254 	 */
4255 	spin_lock(&cifs_sb->tlink_tree_lock);
4256 	node = rb_first(root);
4257 	while (node != NULL) {
4258 		tmp = node;
4259 		node = rb_next(tmp);
4260 		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4261 
4262 		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4263 		    atomic_read(&tlink->tl_count) != 0 ||
4264 		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4265 			continue;
4266 
4267 		cifs_get_tlink(tlink);
4268 		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4269 		rb_erase(tmp, root);
4270 
4271 		spin_unlock(&cifs_sb->tlink_tree_lock);
4272 		cifs_put_tlink(tlink);
4273 		spin_lock(&cifs_sb->tlink_tree_lock);
4274 	}
4275 	spin_unlock(&cifs_sb->tlink_tree_lock);
4276 
4277 	queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4278 				TLINK_IDLE_EXPIRE);
4279 }
4280 
4281 #ifndef CONFIG_CIFS_DFS_UPCALL
4282 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
4283 {
4284 	int rc;
4285 	const struct smb_version_operations *ops = tcon->ses->server->ops;
4286 
4287 	/* only send once per connect */
4288 	spin_lock(&tcon->tc_lock);
4289 
4290 	/* if tcon is marked for needing reconnect, update state */
4291 	if (tcon->need_reconnect)
4292 		tcon->status = TID_NEED_TCON;
4293 
4294 	if (tcon->status == TID_GOOD) {
4295 		spin_unlock(&tcon->tc_lock);
4296 		return 0;
4297 	}
4298 
4299 	if (tcon->status != TID_NEW &&
4300 	    tcon->status != TID_NEED_TCON) {
4301 		spin_unlock(&tcon->tc_lock);
4302 		return -EHOSTDOWN;
4303 	}
4304 
4305 	tcon->status = TID_IN_TCON;
4306 	spin_unlock(&tcon->tc_lock);
4307 
4308 	rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
4309 	if (rc) {
4310 		spin_lock(&tcon->tc_lock);
4311 		if (tcon->status == TID_IN_TCON)
4312 			tcon->status = TID_NEED_TCON;
4313 		spin_unlock(&tcon->tc_lock);
4314 	} else {
4315 		spin_lock(&tcon->tc_lock);
4316 		if (tcon->status == TID_IN_TCON)
4317 			tcon->status = TID_GOOD;
4318 		tcon->need_reconnect = false;
4319 		spin_unlock(&tcon->tc_lock);
4320 	}
4321 
4322 	return rc;
4323 }
4324 #endif
4325