xref: /openbmc/linux/fs/smb/client/misc.c (revision a395b8d1)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
13 #include "cifspdu.h"
14 #include "cifsglob.h"
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
17 #include "smberr.h"
18 #include "nterr.h"
19 #include "cifs_unicode.h"
20 #include "smb2pdu.h"
21 #include "cifsfs.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
25 #include "dfs.h"
26 #endif
27 #include "fs_context.h"
28 #include "cached_dir.h"
29 
30 extern mempool_t *cifs_sm_req_poolp;
31 extern mempool_t *cifs_req_poolp;
32 
33 /* The xid serves as a useful identifier for each incoming vfs request,
34    in a similar way to the mid which is useful to track each sent smb,
35    and CurrentXid can also provide a running counter (although it
36    will eventually wrap past zero) of the total vfs operations handled
37    since the cifs fs was mounted */
38 
39 unsigned int
40 _get_xid(void)
41 {
42 	unsigned int xid;
43 
44 	spin_lock(&GlobalMid_Lock);
45 	GlobalTotalActiveXid++;
46 
47 	/* keep high water mark for number of simultaneous ops in filesystem */
48 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
49 		GlobalMaxActiveXid = GlobalTotalActiveXid;
50 	if (GlobalTotalActiveXid > 65000)
51 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
52 	xid = GlobalCurrentXid++;
53 	spin_unlock(&GlobalMid_Lock);
54 	return xid;
55 }
56 
57 void
58 _free_xid(unsigned int xid)
59 {
60 	spin_lock(&GlobalMid_Lock);
61 	/* if (GlobalTotalActiveXid == 0)
62 		BUG(); */
63 	GlobalTotalActiveXid--;
64 	spin_unlock(&GlobalMid_Lock);
65 }
66 
67 struct cifs_ses *
68 sesInfoAlloc(void)
69 {
70 	struct cifs_ses *ret_buf;
71 
72 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
73 	if (ret_buf) {
74 		atomic_inc(&sesInfoAllocCount);
75 		spin_lock_init(&ret_buf->ses_lock);
76 		ret_buf->ses_status = SES_NEW;
77 		++ret_buf->ses_count;
78 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
79 		INIT_LIST_HEAD(&ret_buf->tcon_list);
80 		mutex_init(&ret_buf->session_mutex);
81 		spin_lock_init(&ret_buf->iface_lock);
82 		INIT_LIST_HEAD(&ret_buf->iface_list);
83 		spin_lock_init(&ret_buf->chan_lock);
84 	}
85 	return ret_buf;
86 }
87 
88 void
89 sesInfoFree(struct cifs_ses *buf_to_free)
90 {
91 	struct cifs_server_iface *iface = NULL, *niface = NULL;
92 
93 	if (buf_to_free == NULL) {
94 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 		return;
96 	}
97 
98 	atomic_dec(&sesInfoAllocCount);
99 	kfree(buf_to_free->serverOS);
100 	kfree(buf_to_free->serverDomain);
101 	kfree(buf_to_free->serverNOS);
102 	kfree_sensitive(buf_to_free->password);
103 	kfree(buf_to_free->user_name);
104 	kfree(buf_to_free->domainName);
105 	kfree_sensitive(buf_to_free->auth_key.response);
106 	spin_lock(&buf_to_free->iface_lock);
107 	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
108 				 iface_head)
109 		kref_put(&iface->refcount, release_iface);
110 	spin_unlock(&buf_to_free->iface_lock);
111 	kfree_sensitive(buf_to_free);
112 }
113 
114 struct cifs_tcon *
115 tconInfoAlloc(void)
116 {
117 	struct cifs_tcon *ret_buf;
118 
119 	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
120 	if (!ret_buf)
121 		return NULL;
122 	ret_buf->cfids = init_cached_dirs();
123 	if (!ret_buf->cfids) {
124 		kfree(ret_buf);
125 		return NULL;
126 	}
127 
128 	atomic_inc(&tconInfoAllocCount);
129 	ret_buf->status = TID_NEW;
130 	++ret_buf->tc_count;
131 	spin_lock_init(&ret_buf->tc_lock);
132 	INIT_LIST_HEAD(&ret_buf->openFileList);
133 	INIT_LIST_HEAD(&ret_buf->tcon_list);
134 	spin_lock_init(&ret_buf->open_file_lock);
135 	spin_lock_init(&ret_buf->stat_lock);
136 	atomic_set(&ret_buf->num_local_opens, 0);
137 	atomic_set(&ret_buf->num_remote_opens, 0);
138 #ifdef CONFIG_CIFS_DFS_UPCALL
139 	INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
140 #endif
141 
142 	return ret_buf;
143 }
144 
145 void
146 tconInfoFree(struct cifs_tcon *tcon)
147 {
148 	if (tcon == NULL) {
149 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
150 		return;
151 	}
152 	free_cached_dirs(tcon->cfids);
153 	atomic_dec(&tconInfoAllocCount);
154 	kfree(tcon->nativeFileSystem);
155 	kfree_sensitive(tcon->password);
156 #ifdef CONFIG_CIFS_DFS_UPCALL
157 	dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
158 #endif
159 	kfree(tcon);
160 }
161 
162 struct smb_hdr *
163 cifs_buf_get(void)
164 {
165 	struct smb_hdr *ret_buf = NULL;
166 	/*
167 	 * SMB2 header is bigger than CIFS one - no problems to clean some
168 	 * more bytes for CIFS.
169 	 */
170 	size_t buf_size = sizeof(struct smb2_hdr);
171 
172 	/*
173 	 * We could use negotiated size instead of max_msgsize -
174 	 * but it may be more efficient to always alloc same size
175 	 * albeit slightly larger than necessary and maxbuffersize
176 	 * defaults to this and can not be bigger.
177 	 */
178 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
179 
180 	/* clear the first few header bytes */
181 	/* for most paths, more is cleared in header_assemble */
182 	memset(ret_buf, 0, buf_size + 3);
183 	atomic_inc(&buf_alloc_count);
184 #ifdef CONFIG_CIFS_STATS2
185 	atomic_inc(&total_buf_alloc_count);
186 #endif /* CONFIG_CIFS_STATS2 */
187 
188 	return ret_buf;
189 }
190 
191 void
192 cifs_buf_release(void *buf_to_free)
193 {
194 	if (buf_to_free == NULL) {
195 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
196 		return;
197 	}
198 	mempool_free(buf_to_free, cifs_req_poolp);
199 
200 	atomic_dec(&buf_alloc_count);
201 	return;
202 }
203 
204 struct smb_hdr *
205 cifs_small_buf_get(void)
206 {
207 	struct smb_hdr *ret_buf = NULL;
208 
209 /* We could use negotiated size instead of max_msgsize -
210    but it may be more efficient to always alloc same size
211    albeit slightly larger than necessary and maxbuffersize
212    defaults to this and can not be bigger */
213 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
214 	/* No need to clear memory here, cleared in header assemble */
215 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
216 	atomic_inc(&small_buf_alloc_count);
217 #ifdef CONFIG_CIFS_STATS2
218 	atomic_inc(&total_small_buf_alloc_count);
219 #endif /* CONFIG_CIFS_STATS2 */
220 
221 	return ret_buf;
222 }
223 
224 void
225 cifs_small_buf_release(void *buf_to_free)
226 {
227 
228 	if (buf_to_free == NULL) {
229 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
230 		return;
231 	}
232 	mempool_free(buf_to_free, cifs_sm_req_poolp);
233 
234 	atomic_dec(&small_buf_alloc_count);
235 	return;
236 }
237 
238 void
239 free_rsp_buf(int resp_buftype, void *rsp)
240 {
241 	if (resp_buftype == CIFS_SMALL_BUFFER)
242 		cifs_small_buf_release(rsp);
243 	else if (resp_buftype == CIFS_LARGE_BUFFER)
244 		cifs_buf_release(rsp);
245 }
246 
247 /* NB: MID can not be set if treeCon not passed in, in that
248    case it is responsbility of caller to set the mid */
249 void
250 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
251 		const struct cifs_tcon *treeCon, int word_count
252 		/* length of fixed section (word count) in two byte units  */)
253 {
254 	char *temp = (char *) buffer;
255 
256 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
257 
258 	buffer->smb_buf_length = cpu_to_be32(
259 	    (2 * word_count) + sizeof(struct smb_hdr) -
260 	    4 /*  RFC 1001 length field does not count */  +
261 	    2 /* for bcc field itself */) ;
262 
263 	buffer->Protocol[0] = 0xFF;
264 	buffer->Protocol[1] = 'S';
265 	buffer->Protocol[2] = 'M';
266 	buffer->Protocol[3] = 'B';
267 	buffer->Command = smb_command;
268 	buffer->Flags = 0x00;	/* case sensitive */
269 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
270 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
271 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
272 	if (treeCon) {
273 		buffer->Tid = treeCon->tid;
274 		if (treeCon->ses) {
275 			if (treeCon->ses->capabilities & CAP_UNICODE)
276 				buffer->Flags2 |= SMBFLG2_UNICODE;
277 			if (treeCon->ses->capabilities & CAP_STATUS32)
278 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
279 
280 			/* Uid is not converted */
281 			buffer->Uid = treeCon->ses->Suid;
282 			if (treeCon->ses->server)
283 				buffer->Mid = get_next_mid(treeCon->ses->server);
284 		}
285 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
286 			buffer->Flags2 |= SMBFLG2_DFS;
287 		if (treeCon->nocase)
288 			buffer->Flags  |= SMBFLG_CASELESS;
289 		if ((treeCon->ses) && (treeCon->ses->server))
290 			if (treeCon->ses->server->sign)
291 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
292 	}
293 
294 /*  endian conversion of flags is now done just before sending */
295 	buffer->WordCount = (char) word_count;
296 	return;
297 }
298 
299 static int
300 check_smb_hdr(struct smb_hdr *smb)
301 {
302 	/* does it have the right SMB "signature" ? */
303 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
304 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
305 			 *(unsigned int *)smb->Protocol);
306 		return 1;
307 	}
308 
309 	/* if it's a response then accept */
310 	if (smb->Flags & SMBFLG_RESPONSE)
311 		return 0;
312 
313 	/* only one valid case where server sends us request */
314 	if (smb->Command == SMB_COM_LOCKING_ANDX)
315 		return 0;
316 
317 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
318 		 get_mid(smb));
319 	return 1;
320 }
321 
322 int
323 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
324 {
325 	struct smb_hdr *smb = (struct smb_hdr *)buf;
326 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
327 	__u32 clc_len;  /* calculated length */
328 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
329 		 total_read, rfclen);
330 
331 	/* is this frame too small to even get to a BCC? */
332 	if (total_read < 2 + sizeof(struct smb_hdr)) {
333 		if ((total_read >= sizeof(struct smb_hdr) - 1)
334 			    && (smb->Status.CifsError != 0)) {
335 			/* it's an error return */
336 			smb->WordCount = 0;
337 			/* some error cases do not return wct and bcc */
338 			return 0;
339 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
340 				(smb->WordCount == 0)) {
341 			char *tmp = (char *)smb;
342 			/* Need to work around a bug in two servers here */
343 			/* First, check if the part of bcc they sent was zero */
344 			if (tmp[sizeof(struct smb_hdr)] == 0) {
345 				/* some servers return only half of bcc
346 				 * on simple responses (wct, bcc both zero)
347 				 * in particular have seen this on
348 				 * ulogoffX and FindClose. This leaves
349 				 * one byte of bcc potentially unitialized
350 				 */
351 				/* zero rest of bcc */
352 				tmp[sizeof(struct smb_hdr)+1] = 0;
353 				return 0;
354 			}
355 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
356 		} else {
357 			cifs_dbg(VFS, "Length less than smb header size\n");
358 		}
359 		return -EIO;
360 	}
361 
362 	/* otherwise, there is enough to get to the BCC */
363 	if (check_smb_hdr(smb))
364 		return -EIO;
365 	clc_len = smbCalcSize(smb);
366 
367 	if (4 + rfclen != total_read) {
368 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
369 			 rfclen);
370 		return -EIO;
371 	}
372 
373 	if (4 + rfclen != clc_len) {
374 		__u16 mid = get_mid(smb);
375 		/* check if bcc wrapped around for large read responses */
376 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
377 			/* check if lengths match mod 64K */
378 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
379 				return 0; /* bcc wrapped */
380 		}
381 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
382 			 clc_len, 4 + rfclen, mid);
383 
384 		if (4 + rfclen < clc_len) {
385 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
386 				 rfclen, mid);
387 			return -EIO;
388 		} else if (rfclen > clc_len + 512) {
389 			/*
390 			 * Some servers (Windows XP in particular) send more
391 			 * data than the lengths in the SMB packet would
392 			 * indicate on certain calls (byte range locks and
393 			 * trans2 find first calls in particular). While the
394 			 * client can handle such a frame by ignoring the
395 			 * trailing data, we choose limit the amount of extra
396 			 * data to 512 bytes.
397 			 */
398 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
399 				 rfclen, mid);
400 			return -EIO;
401 		}
402 	}
403 	return 0;
404 }
405 
406 bool
407 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
408 {
409 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
410 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
411 	struct TCP_Server_Info *pserver;
412 	struct cifs_ses *ses;
413 	struct cifs_tcon *tcon;
414 	struct cifsInodeInfo *pCifsInode;
415 	struct cifsFileInfo *netfile;
416 
417 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
418 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
419 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
420 		struct smb_com_transaction_change_notify_rsp *pSMBr =
421 			(struct smb_com_transaction_change_notify_rsp *)buf;
422 		struct file_notify_information *pnotify;
423 		__u32 data_offset = 0;
424 		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
425 
426 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
427 			data_offset = le32_to_cpu(pSMBr->DataOffset);
428 
429 			if (data_offset >
430 			    len - sizeof(struct file_notify_information)) {
431 				cifs_dbg(FYI, "Invalid data_offset %u\n",
432 					 data_offset);
433 				return true;
434 			}
435 			pnotify = (struct file_notify_information *)
436 				((char *)&pSMBr->hdr.Protocol + data_offset);
437 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
438 				 pnotify->FileName, pnotify->Action);
439 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
440 				sizeof(struct smb_hdr)+60); */
441 			return true;
442 		}
443 		if (pSMBr->hdr.Status.CifsError) {
444 			cifs_dbg(FYI, "notify err 0x%x\n",
445 				 pSMBr->hdr.Status.CifsError);
446 			return true;
447 		}
448 		return false;
449 	}
450 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
451 		return false;
452 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
453 		/* no sense logging error on invalid handle on oplock
454 		   break - harmless race between close request and oplock
455 		   break response is expected from time to time writing out
456 		   large dirty files cached on the client */
457 		if ((NT_STATUS_INVALID_HANDLE) ==
458 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
459 			cifs_dbg(FYI, "Invalid handle on oplock break\n");
460 			return true;
461 		} else if (ERRbadfid ==
462 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
463 			return true;
464 		} else {
465 			return false; /* on valid oplock brk we get "request" */
466 		}
467 	}
468 	if (pSMB->hdr.WordCount != 8)
469 		return false;
470 
471 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
472 		 pSMB->LockType, pSMB->OplockLevel);
473 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
474 		return false;
475 
476 	/* If server is a channel, select the primary channel */
477 	pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
478 
479 	/* look up tcon based on tid & uid */
480 	spin_lock(&cifs_tcp_ses_lock);
481 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
482 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
483 			if (tcon->tid != buf->Tid)
484 				continue;
485 
486 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
487 			spin_lock(&tcon->open_file_lock);
488 			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
489 				if (pSMB->Fid != netfile->fid.netfid)
490 					continue;
491 
492 				cifs_dbg(FYI, "file id match, oplock break\n");
493 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
494 
495 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
496 					&pCifsInode->flags);
497 
498 				netfile->oplock_epoch = 0;
499 				netfile->oplock_level = pSMB->OplockLevel;
500 				netfile->oplock_break_cancelled = false;
501 				cifs_queue_oplock_break(netfile);
502 
503 				spin_unlock(&tcon->open_file_lock);
504 				spin_unlock(&cifs_tcp_ses_lock);
505 				return true;
506 			}
507 			spin_unlock(&tcon->open_file_lock);
508 			spin_unlock(&cifs_tcp_ses_lock);
509 			cifs_dbg(FYI, "No matching file for oplock break\n");
510 			return true;
511 		}
512 	}
513 	spin_unlock(&cifs_tcp_ses_lock);
514 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
515 	return true;
516 }
517 
518 void
519 dump_smb(void *buf, int smb_buf_length)
520 {
521 	if (traceSMB == 0)
522 		return;
523 
524 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
525 		       smb_buf_length, true);
526 }
527 
528 void
529 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
530 {
531 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
532 		struct cifs_tcon *tcon = NULL;
533 
534 		if (cifs_sb->master_tlink)
535 			tcon = cifs_sb_master_tcon(cifs_sb);
536 
537 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
538 		cifs_sb->mnt_cifs_serverino_autodisabled = true;
539 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
540 			 tcon ? tcon->tree_name : "new server");
541 		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
542 		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
543 
544 	}
545 }
546 
547 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
548 {
549 	oplock &= 0xF;
550 
551 	if (oplock == OPLOCK_EXCLUSIVE) {
552 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
553 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
554 			 &cinode->netfs.inode);
555 	} else if (oplock == OPLOCK_READ) {
556 		cinode->oplock = CIFS_CACHE_READ_FLG;
557 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
558 			 &cinode->netfs.inode);
559 	} else
560 		cinode->oplock = 0;
561 }
562 
563 /*
564  * We wait for oplock breaks to be processed before we attempt to perform
565  * writes.
566  */
567 int cifs_get_writer(struct cifsInodeInfo *cinode)
568 {
569 	int rc;
570 
571 start:
572 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
573 			 TASK_KILLABLE);
574 	if (rc)
575 		return rc;
576 
577 	spin_lock(&cinode->writers_lock);
578 	if (!cinode->writers)
579 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
580 	cinode->writers++;
581 	/* Check to see if we have started servicing an oplock break */
582 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
583 		cinode->writers--;
584 		if (cinode->writers == 0) {
585 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
586 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
587 		}
588 		spin_unlock(&cinode->writers_lock);
589 		goto start;
590 	}
591 	spin_unlock(&cinode->writers_lock);
592 	return 0;
593 }
594 
595 void cifs_put_writer(struct cifsInodeInfo *cinode)
596 {
597 	spin_lock(&cinode->writers_lock);
598 	cinode->writers--;
599 	if (cinode->writers == 0) {
600 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
601 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
602 	}
603 	spin_unlock(&cinode->writers_lock);
604 }
605 
606 /**
607  * cifs_queue_oplock_break - queue the oplock break handler for cfile
608  * @cfile: The file to break the oplock on
609  *
610  * This function is called from the demultiplex thread when it
611  * receives an oplock break for @cfile.
612  *
613  * Assumes the tcon->open_file_lock is held.
614  * Assumes cfile->file_info_lock is NOT held.
615  */
616 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
617 {
618 	/*
619 	 * Bump the handle refcount now while we hold the
620 	 * open_file_lock to enforce the validity of it for the oplock
621 	 * break handler. The matching put is done at the end of the
622 	 * handler.
623 	 */
624 	cifsFileInfo_get(cfile);
625 
626 	queue_work(cifsoplockd_wq, &cfile->oplock_break);
627 }
628 
629 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
630 {
631 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
632 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
633 }
634 
635 bool
636 backup_cred(struct cifs_sb_info *cifs_sb)
637 {
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
639 		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
640 			return true;
641 	}
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
643 		if (in_group_p(cifs_sb->ctx->backupgid))
644 			return true;
645 	}
646 
647 	return false;
648 }
649 
650 void
651 cifs_del_pending_open(struct cifs_pending_open *open)
652 {
653 	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
654 	list_del(&open->olist);
655 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
656 }
657 
658 void
659 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
660 			     struct cifs_pending_open *open)
661 {
662 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
663 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
664 	open->tlink = tlink;
665 	fid->pending_open = open;
666 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
667 }
668 
669 void
670 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
671 		      struct cifs_pending_open *open)
672 {
673 	spin_lock(&tlink_tcon(tlink)->open_file_lock);
674 	cifs_add_pending_open_locked(fid, tlink, open);
675 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
676 }
677 
678 /*
679  * Critical section which runs after acquiring deferred_lock.
680  * As there is no reference count on cifs_deferred_close, pdclose
681  * should not be used outside deferred_lock.
682  */
683 bool
684 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
685 {
686 	struct cifs_deferred_close *dclose;
687 
688 	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
689 		if ((dclose->netfid == cfile->fid.netfid) &&
690 			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
691 			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
692 			*pdclose = dclose;
693 			return true;
694 		}
695 	}
696 	return false;
697 }
698 
699 /*
700  * Critical section which runs after acquiring deferred_lock.
701  */
702 void
703 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
704 {
705 	bool is_deferred = false;
706 	struct cifs_deferred_close *pdclose;
707 
708 	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
709 	if (is_deferred) {
710 		kfree(dclose);
711 		return;
712 	}
713 
714 	dclose->tlink = cfile->tlink;
715 	dclose->netfid = cfile->fid.netfid;
716 	dclose->persistent_fid = cfile->fid.persistent_fid;
717 	dclose->volatile_fid = cfile->fid.volatile_fid;
718 	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
719 }
720 
721 /*
722  * Critical section which runs after acquiring deferred_lock.
723  */
724 void
725 cifs_del_deferred_close(struct cifsFileInfo *cfile)
726 {
727 	bool is_deferred = false;
728 	struct cifs_deferred_close *dclose;
729 
730 	is_deferred = cifs_is_deferred_close(cfile, &dclose);
731 	if (!is_deferred)
732 		return;
733 	list_del(&dclose->dlist);
734 	kfree(dclose);
735 }
736 
737 void
738 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
739 {
740 	struct cifsFileInfo *cfile = NULL;
741 	struct file_list *tmp_list, *tmp_next_list;
742 	struct list_head file_head;
743 
744 	if (cifs_inode == NULL)
745 		return;
746 
747 	INIT_LIST_HEAD(&file_head);
748 	spin_lock(&cifs_inode->open_file_lock);
749 	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
750 		if (delayed_work_pending(&cfile->deferred)) {
751 			if (cancel_delayed_work(&cfile->deferred)) {
752 				spin_lock(&cifs_inode->deferred_lock);
753 				cifs_del_deferred_close(cfile);
754 				spin_unlock(&cifs_inode->deferred_lock);
755 
756 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
757 				if (tmp_list == NULL)
758 					break;
759 				tmp_list->cfile = cfile;
760 				list_add_tail(&tmp_list->list, &file_head);
761 			}
762 		}
763 	}
764 	spin_unlock(&cifs_inode->open_file_lock);
765 
766 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
767 		_cifsFileInfo_put(tmp_list->cfile, false, false);
768 		list_del(&tmp_list->list);
769 		kfree(tmp_list);
770 	}
771 }
772 
773 void
774 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
775 {
776 	struct cifsFileInfo *cfile;
777 	struct file_list *tmp_list, *tmp_next_list;
778 	struct list_head file_head;
779 
780 	INIT_LIST_HEAD(&file_head);
781 	spin_lock(&tcon->open_file_lock);
782 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
783 		if (delayed_work_pending(&cfile->deferred)) {
784 			if (cancel_delayed_work(&cfile->deferred)) {
785 				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
786 				cifs_del_deferred_close(cfile);
787 				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
788 
789 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
790 				if (tmp_list == NULL)
791 					break;
792 				tmp_list->cfile = cfile;
793 				list_add_tail(&tmp_list->list, &file_head);
794 			}
795 		}
796 	}
797 	spin_unlock(&tcon->open_file_lock);
798 
799 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
800 		_cifsFileInfo_put(tmp_list->cfile, true, false);
801 		list_del(&tmp_list->list);
802 		kfree(tmp_list);
803 	}
804 }
805 void
806 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
807 {
808 	struct cifsFileInfo *cfile;
809 	struct file_list *tmp_list, *tmp_next_list;
810 	struct list_head file_head;
811 	void *page;
812 	const char *full_path;
813 
814 	INIT_LIST_HEAD(&file_head);
815 	page = alloc_dentry_path();
816 	spin_lock(&tcon->open_file_lock);
817 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
818 		full_path = build_path_from_dentry(cfile->dentry, page);
819 		if (strstr(full_path, path)) {
820 			if (delayed_work_pending(&cfile->deferred)) {
821 				if (cancel_delayed_work(&cfile->deferred)) {
822 					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
823 					cifs_del_deferred_close(cfile);
824 					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
825 
826 					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
827 					if (tmp_list == NULL)
828 						break;
829 					tmp_list->cfile = cfile;
830 					list_add_tail(&tmp_list->list, &file_head);
831 				}
832 			}
833 		}
834 	}
835 	spin_unlock(&tcon->open_file_lock);
836 
837 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
838 		_cifsFileInfo_put(tmp_list->cfile, true, false);
839 		list_del(&tmp_list->list);
840 		kfree(tmp_list);
841 	}
842 	free_dentry_path(page);
843 }
844 
845 /* parses DFS referral V3 structure
846  * caller is responsible for freeing target_nodes
847  * returns:
848  * - on success - 0
849  * - on failure - errno
850  */
851 int
852 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
853 		    unsigned int *num_of_nodes,
854 		    struct dfs_info3_param **target_nodes,
855 		    const struct nls_table *nls_codepage, int remap,
856 		    const char *searchName, bool is_unicode)
857 {
858 	int i, rc = 0;
859 	char *data_end;
860 	struct dfs_referral_level_3 *ref;
861 
862 	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
863 
864 	if (*num_of_nodes < 1) {
865 		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
866 			 *num_of_nodes);
867 		rc = -EINVAL;
868 		goto parse_DFS_referrals_exit;
869 	}
870 
871 	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
872 	if (ref->VersionNumber != cpu_to_le16(3)) {
873 		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
874 			 le16_to_cpu(ref->VersionNumber));
875 		rc = -EINVAL;
876 		goto parse_DFS_referrals_exit;
877 	}
878 
879 	/* get the upper boundary of the resp buffer */
880 	data_end = (char *)rsp + rsp_size;
881 
882 	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
883 		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
884 
885 	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
886 				GFP_KERNEL);
887 	if (*target_nodes == NULL) {
888 		rc = -ENOMEM;
889 		goto parse_DFS_referrals_exit;
890 	}
891 
892 	/* collect necessary data from referrals */
893 	for (i = 0; i < *num_of_nodes; i++) {
894 		char *temp;
895 		int max_len;
896 		struct dfs_info3_param *node = (*target_nodes)+i;
897 
898 		node->flags = le32_to_cpu(rsp->DFSFlags);
899 		if (is_unicode) {
900 			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
901 						GFP_KERNEL);
902 			if (tmp == NULL) {
903 				rc = -ENOMEM;
904 				goto parse_DFS_referrals_exit;
905 			}
906 			cifsConvertToUTF16((__le16 *) tmp, searchName,
907 					   PATH_MAX, nls_codepage, remap);
908 			node->path_consumed = cifs_utf16_bytes(tmp,
909 					le16_to_cpu(rsp->PathConsumed),
910 					nls_codepage);
911 			kfree(tmp);
912 		} else
913 			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
914 
915 		node->server_type = le16_to_cpu(ref->ServerType);
916 		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
917 
918 		/* copy DfsPath */
919 		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
920 		max_len = data_end - temp;
921 		node->path_name = cifs_strndup_from_utf16(temp, max_len,
922 						is_unicode, nls_codepage);
923 		if (!node->path_name) {
924 			rc = -ENOMEM;
925 			goto parse_DFS_referrals_exit;
926 		}
927 
928 		/* copy link target UNC */
929 		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
930 		max_len = data_end - temp;
931 		node->node_name = cifs_strndup_from_utf16(temp, max_len,
932 						is_unicode, nls_codepage);
933 		if (!node->node_name) {
934 			rc = -ENOMEM;
935 			goto parse_DFS_referrals_exit;
936 		}
937 
938 		node->ttl = le32_to_cpu(ref->TimeToLive);
939 
940 		ref++;
941 	}
942 
943 parse_DFS_referrals_exit:
944 	if (rc) {
945 		free_dfs_info_array(*target_nodes, *num_of_nodes);
946 		*target_nodes = NULL;
947 		*num_of_nodes = 0;
948 	}
949 	return rc;
950 }
951 
952 struct cifs_aio_ctx *
953 cifs_aio_ctx_alloc(void)
954 {
955 	struct cifs_aio_ctx *ctx;
956 
957 	/*
958 	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
959 	 * to false so that we know when we have to unreference pages within
960 	 * cifs_aio_ctx_release()
961 	 */
962 	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
963 	if (!ctx)
964 		return NULL;
965 
966 	INIT_LIST_HEAD(&ctx->list);
967 	mutex_init(&ctx->aio_mutex);
968 	init_completion(&ctx->done);
969 	kref_init(&ctx->refcount);
970 	return ctx;
971 }
972 
973 void
974 cifs_aio_ctx_release(struct kref *refcount)
975 {
976 	struct cifs_aio_ctx *ctx = container_of(refcount,
977 					struct cifs_aio_ctx, refcount);
978 
979 	cifsFileInfo_put(ctx->cfile);
980 
981 	/*
982 	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
983 	 * which means that iov_iter_extract_pages() was a success and thus
984 	 * that we may have references or pins on pages that we need to
985 	 * release.
986 	 */
987 	if (ctx->bv) {
988 		if (ctx->should_dirty || ctx->bv_need_unpin) {
989 			unsigned int i;
990 
991 			for (i = 0; i < ctx->nr_pinned_pages; i++) {
992 				struct page *page = ctx->bv[i].bv_page;
993 
994 				if (ctx->should_dirty)
995 					set_page_dirty(page);
996 				if (ctx->bv_need_unpin)
997 					unpin_user_page(page);
998 			}
999 		}
1000 		kvfree(ctx->bv);
1001 	}
1002 
1003 	kfree(ctx);
1004 }
1005 
1006 /**
1007  * cifs_alloc_hash - allocate hash and hash context together
1008  * @name: The name of the crypto hash algo
1009  * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1010  *
1011  * The caller has to make sure @sdesc is initialized to either NULL or
1012  * a valid context. It can be freed via cifs_free_hash().
1013  */
1014 int
1015 cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1016 {
1017 	int rc = 0;
1018 	struct crypto_shash *alg = NULL;
1019 
1020 	if (*sdesc)
1021 		return 0;
1022 
1023 	alg = crypto_alloc_shash(name, 0, 0);
1024 	if (IS_ERR(alg)) {
1025 		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1026 		rc = PTR_ERR(alg);
1027 		*sdesc = NULL;
1028 		return rc;
1029 	}
1030 
1031 	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1032 	if (*sdesc == NULL) {
1033 		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1034 		crypto_free_shash(alg);
1035 		return -ENOMEM;
1036 	}
1037 
1038 	(*sdesc)->tfm = alg;
1039 	return 0;
1040 }
1041 
1042 /**
1043  * cifs_free_hash - free hash and hash context together
1044  * @sdesc: Where to find the pointer to the hash TFM
1045  *
1046  * Freeing a NULL descriptor is safe.
1047  */
1048 void
1049 cifs_free_hash(struct shash_desc **sdesc)
1050 {
1051 	if (unlikely(!sdesc) || !*sdesc)
1052 		return;
1053 
1054 	if ((*sdesc)->tfm) {
1055 		crypto_free_shash((*sdesc)->tfm);
1056 		(*sdesc)->tfm = NULL;
1057 	}
1058 
1059 	kfree_sensitive(*sdesc);
1060 	*sdesc = NULL;
1061 }
1062 
1063 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1064 {
1065 	const char *end;
1066 
1067 	/* skip initial slashes */
1068 	while (*unc && (*unc == '\\' || *unc == '/'))
1069 		unc++;
1070 
1071 	end = unc;
1072 
1073 	while (*end && !(*end == '\\' || *end == '/'))
1074 		end++;
1075 
1076 	*h = unc;
1077 	*len = end - unc;
1078 }
1079 
1080 /**
1081  * copy_path_name - copy src path to dst, possibly truncating
1082  * @dst: The destination buffer
1083  * @src: The source name
1084  *
1085  * returns number of bytes written (including trailing nul)
1086  */
1087 int copy_path_name(char *dst, const char *src)
1088 {
1089 	int name_len;
1090 
1091 	/*
1092 	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1093 	 * will truncate and strlen(dst) will be PATH_MAX-1
1094 	 */
1095 	name_len = strscpy(dst, src, PATH_MAX);
1096 	if (WARN_ON_ONCE(name_len < 0))
1097 		name_len = PATH_MAX-1;
1098 
1099 	/* we count the trailing nul */
1100 	name_len++;
1101 	return name_len;
1102 }
1103 
1104 struct super_cb_data {
1105 	void *data;
1106 	struct super_block *sb;
1107 };
1108 
1109 static void tcp_super_cb(struct super_block *sb, void *arg)
1110 {
1111 	struct super_cb_data *sd = arg;
1112 	struct TCP_Server_Info *server = sd->data;
1113 	struct cifs_sb_info *cifs_sb;
1114 	struct cifs_tcon *tcon;
1115 
1116 	if (sd->sb)
1117 		return;
1118 
1119 	cifs_sb = CIFS_SB(sb);
1120 	tcon = cifs_sb_master_tcon(cifs_sb);
1121 	if (tcon->ses->server == server)
1122 		sd->sb = sb;
1123 }
1124 
1125 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1126 					    void *data)
1127 {
1128 	struct super_cb_data sd = {
1129 		.data = data,
1130 		.sb = NULL,
1131 	};
1132 	struct file_system_type **fs_type = (struct file_system_type *[]) {
1133 		&cifs_fs_type, &smb3_fs_type, NULL,
1134 	};
1135 
1136 	for (; *fs_type; fs_type++) {
1137 		iterate_supers_type(*fs_type, f, &sd);
1138 		if (sd.sb) {
1139 			/*
1140 			 * Grab an active reference in order to prevent automounts (DFS links)
1141 			 * of expiring and then freeing up our cifs superblock pointer while
1142 			 * we're doing failover.
1143 			 */
1144 			cifs_sb_active(sd.sb);
1145 			return sd.sb;
1146 		}
1147 	}
1148 	return ERR_PTR(-EINVAL);
1149 }
1150 
1151 static void __cifs_put_super(struct super_block *sb)
1152 {
1153 	if (!IS_ERR_OR_NULL(sb))
1154 		cifs_sb_deactive(sb);
1155 }
1156 
1157 struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1158 {
1159 	return __cifs_get_super(tcp_super_cb, server);
1160 }
1161 
1162 void cifs_put_tcp_super(struct super_block *sb)
1163 {
1164 	__cifs_put_super(sb);
1165 }
1166 
1167 #ifdef CONFIG_CIFS_DFS_UPCALL
1168 int match_target_ip(struct TCP_Server_Info *server,
1169 		    const char *share, size_t share_len,
1170 		    bool *result)
1171 {
1172 	int rc;
1173 	char *target;
1174 	struct sockaddr_storage ss;
1175 
1176 	*result = false;
1177 
1178 	target = kzalloc(share_len + 3, GFP_KERNEL);
1179 	if (!target)
1180 		return -ENOMEM;
1181 
1182 	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1183 
1184 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1185 
1186 	rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
1187 	kfree(target);
1188 
1189 	if (rc < 0)
1190 		return rc;
1191 
1192 	spin_lock(&server->srv_lock);
1193 	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1194 	spin_unlock(&server->srv_lock);
1195 	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1196 	return 0;
1197 }
1198 
1199 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1200 {
1201 	kfree(cifs_sb->prepath);
1202 
1203 	if (prefix && *prefix) {
1204 		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
1205 		if (!cifs_sb->prepath)
1206 			return -ENOMEM;
1207 
1208 		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1209 	} else
1210 		cifs_sb->prepath = NULL;
1211 
1212 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1213 	return 0;
1214 }
1215 
1216 /*
1217  * Handle weird Windows SMB server behaviour. It responds with
1218  * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1219  * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1220  * non-ASCII unicode symbols.
1221  */
1222 int cifs_inval_name_dfs_link_error(const unsigned int xid,
1223 				   struct cifs_tcon *tcon,
1224 				   struct cifs_sb_info *cifs_sb,
1225 				   const char *full_path,
1226 				   bool *islink)
1227 {
1228 	struct cifs_ses *ses = tcon->ses;
1229 	size_t len;
1230 	char *path;
1231 	char *ref_path;
1232 
1233 	*islink = false;
1234 
1235 	/*
1236 	 * Fast path - skip check when @full_path doesn't have a prefix path to
1237 	 * look up or tcon is not DFS.
1238 	 */
1239 	if (strlen(full_path) < 2 || !cifs_sb ||
1240 	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
1241 	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
1242 		return 0;
1243 
1244 	/*
1245 	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1246 	 * to get a referral to figure out whether it is an DFS link.
1247 	 */
1248 	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1249 	path = kmalloc(len, GFP_KERNEL);
1250 	if (!path)
1251 		return -ENOMEM;
1252 
1253 	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1254 	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1255 					    cifs_remap(cifs_sb));
1256 	kfree(path);
1257 
1258 	if (IS_ERR(ref_path)) {
1259 		if (PTR_ERR(ref_path) != -EINVAL)
1260 			return PTR_ERR(ref_path);
1261 	} else {
1262 		struct dfs_info3_param *refs = NULL;
1263 		int num_refs = 0;
1264 
1265 		/*
1266 		 * XXX: we are not using dfs_cache_find() here because we might
1267 		 * end filling all the DFS cache and thus potentially
1268 		 * removing cached DFS targets that the client would eventually
1269 		 * need during failover.
1270 		 */
1271 		ses = CIFS_DFS_ROOT_SES(ses);
1272 		if (ses->server->ops->get_dfs_refer &&
1273 		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1274 						     &num_refs, cifs_sb->local_nls,
1275 						     cifs_remap(cifs_sb)))
1276 			*islink = refs[0].server_type == DFS_TYPE_LINK;
1277 		free_dfs_info_array(refs, num_refs);
1278 		kfree(ref_path);
1279 	}
1280 	return 0;
1281 }
1282 #endif
1283 
1284 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1285 {
1286 	int timeout = 10;
1287 	int rc;
1288 
1289 	spin_lock(&server->srv_lock);
1290 	if (server->tcpStatus != CifsNeedReconnect) {
1291 		spin_unlock(&server->srv_lock);
1292 		return 0;
1293 	}
1294 	timeout *= server->nr_targets;
1295 	spin_unlock(&server->srv_lock);
1296 
1297 	/*
1298 	 * Give demultiplex thread up to 10 seconds to each target available for
1299 	 * reconnect -- should be greater than cifs socket timeout which is 7
1300 	 * seconds.
1301 	 *
1302 	 * On "soft" mounts we wait once. Hard mounts keep retrying until
1303 	 * process is killed or server comes back on-line.
1304 	 */
1305 	do {
1306 		rc = wait_event_interruptible_timeout(server->response_q,
1307 						      (server->tcpStatus != CifsNeedReconnect),
1308 						      timeout * HZ);
1309 		if (rc < 0) {
1310 			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1311 				 __func__);
1312 			return -ERESTARTSYS;
1313 		}
1314 
1315 		/* are we still trying to reconnect? */
1316 		spin_lock(&server->srv_lock);
1317 		if (server->tcpStatus != CifsNeedReconnect) {
1318 			spin_unlock(&server->srv_lock);
1319 			return 0;
1320 		}
1321 		spin_unlock(&server->srv_lock);
1322 	} while (retry);
1323 
1324 	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
1325 	return -EHOSTDOWN;
1326 }
1327