xref: /openbmc/linux/fs/nfs/nfs4session.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/nfs/nfs4session.c
4  *
5  * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/string.h>
11 #include <linux/printk.h>
12 #include <linux/slab.h>
13 #include <linux/sunrpc/sched.h>
14 #include <linux/sunrpc/bc_xprt.h>
15 #include <linux/nfs.h>
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/module.h>
19 
20 #include "nfs4_fs.h"
21 #include "internal.h"
22 #include "nfs4session.h"
23 #include "callback.h"
24 
25 #define NFSDBG_FACILITY		NFSDBG_STATE
26 
27 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
28 {
29 	tbl->highest_used_slotid = NFS4_NO_SLOT;
30 	spin_lock_init(&tbl->slot_tbl_lock);
31 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
32 	init_waitqueue_head(&tbl->slot_waitq);
33 	init_completion(&tbl->complete);
34 }
35 
36 /*
37  * nfs4_shrink_slot_table - free retired slots from the slot table
38  */
39 static void nfs4_shrink_slot_table(struct nfs4_slot_table  *tbl, u32 newsize)
40 {
41 	struct nfs4_slot **p;
42 	if (newsize >= tbl->max_slots)
43 		return;
44 
45 	p = &tbl->slots;
46 	while (newsize--)
47 		p = &(*p)->next;
48 	while (*p) {
49 		struct nfs4_slot *slot = *p;
50 
51 		*p = slot->next;
52 		kfree(slot);
53 		tbl->max_slots--;
54 	}
55 }
56 
57 /**
58  * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
59  * @tbl: controlling slot table
60  *
61  */
62 void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
63 {
64 	if (nfs4_slot_tbl_draining(tbl))
65 		complete(&tbl->complete);
66 }
67 
68 /*
69  * nfs4_free_slot - free a slot and efficiently update slot table.
70  *
71  * freeing a slot is trivially done by clearing its respective bit
72  * in the bitmap.
73  * If the freed slotid equals highest_used_slotid we want to update it
74  * so that the server would be able to size down the slot table if needed,
75  * otherwise we know that the highest_used_slotid is still in use.
76  * When updating highest_used_slotid there may be "holes" in the bitmap
77  * so we need to scan down from highest_used_slotid to 0 looking for the now
78  * highest slotid in use.
79  * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
80  *
81  * Must be called while holding tbl->slot_tbl_lock
82  */
83 void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
84 {
85 	u32 slotid = slot->slot_nr;
86 
87 	/* clear used bit in bitmap */
88 	__clear_bit(slotid, tbl->used_slots);
89 
90 	/* update highest_used_slotid when it is freed */
91 	if (slotid == tbl->highest_used_slotid) {
92 		u32 new_max = find_last_bit(tbl->used_slots, slotid);
93 		if (new_max < slotid)
94 			tbl->highest_used_slotid = new_max;
95 		else {
96 			tbl->highest_used_slotid = NFS4_NO_SLOT;
97 			nfs4_slot_tbl_drain_complete(tbl);
98 		}
99 	}
100 	dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
101 		slotid, tbl->highest_used_slotid);
102 }
103 
104 static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table  *tbl,
105 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
106 {
107 	struct nfs4_slot *slot;
108 
109 	slot = kzalloc(sizeof(*slot), gfp_mask);
110 	if (slot) {
111 		slot->table = tbl;
112 		slot->slot_nr = slotid;
113 		slot->seq_nr = seq_init;
114 		slot->seq_nr_highest_sent = seq_init;
115 		slot->seq_nr_last_acked = seq_init - 1;
116 	}
117 	return slot;
118 }
119 
120 static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table  *tbl,
121 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
122 {
123 	struct nfs4_slot **p, *slot;
124 
125 	p = &tbl->slots;
126 	for (;;) {
127 		if (*p == NULL) {
128 			*p = nfs4_new_slot(tbl, tbl->max_slots,
129 					seq_init, gfp_mask);
130 			if (*p == NULL)
131 				break;
132 			tbl->max_slots++;
133 		}
134 		slot = *p;
135 		if (slot->slot_nr == slotid)
136 			return slot;
137 		p = &slot->next;
138 	}
139 	return ERR_PTR(-ENOMEM);
140 }
141 
142 static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
143 		struct nfs4_slot *slot)
144 {
145 	u32 slotid = slot->slot_nr;
146 
147 	__set_bit(slotid, tbl->used_slots);
148 	if (slotid > tbl->highest_used_slotid ||
149 	    tbl->highest_used_slotid == NFS4_NO_SLOT)
150 		tbl->highest_used_slotid = slotid;
151 	slot->generation = tbl->generation;
152 }
153 
154 /*
155  * nfs4_try_to_lock_slot - Given a slot try to allocate it
156  *
157  * Note: must be called with the slot_tbl_lock held.
158  */
159 bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
160 {
161 	if (nfs4_test_locked_slot(tbl, slot->slot_nr))
162 		return false;
163 	nfs4_lock_slot(tbl, slot);
164 	return true;
165 }
166 
167 /*
168  * nfs4_lookup_slot - Find a slot but don't allocate it
169  *
170  * Note: must be called with the slot_tbl_lock held.
171  */
172 struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
173 {
174 	if (slotid <= tbl->max_slotid)
175 		return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT);
176 	return ERR_PTR(-E2BIG);
177 }
178 
179 static int nfs4_slot_get_seqid(struct nfs4_slot_table  *tbl, u32 slotid,
180 		u32 *seq_nr)
181 	__must_hold(&tbl->slot_tbl_lock)
182 {
183 	struct nfs4_slot *slot;
184 	int ret;
185 
186 	slot = nfs4_lookup_slot(tbl, slotid);
187 	ret = PTR_ERR_OR_ZERO(slot);
188 	if (!ret)
189 		*seq_nr = slot->seq_nr;
190 
191 	return ret;
192 }
193 
194 /*
195  * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
196  *
197  * Given a slot table, slot id and sequence number, determine if the
198  * RPC call in question is still in flight. This function is mainly
199  * intended for use by the callback channel.
200  */
201 static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
202 		u32 slotid, u32 seq_nr)
203 {
204 	u32 cur_seq = 0;
205 	bool ret = false;
206 
207 	spin_lock(&tbl->slot_tbl_lock);
208 	if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
209 	    cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
210 		ret = true;
211 	spin_unlock(&tbl->slot_tbl_lock);
212 	return ret;
213 }
214 
215 /*
216  * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
217  *
218  * Given a slot table, slot id and sequence number, wait until the
219  * corresponding RPC call completes. This function is mainly
220  * intended for use by the callback channel.
221  */
222 int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
223 		u32 slotid, u32 seq_nr,
224 		unsigned long timeout)
225 {
226 	if (wait_event_timeout(tbl->slot_waitq,
227 			!nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
228 			timeout) == 0)
229 		return -ETIMEDOUT;
230 	return 0;
231 }
232 
233 /*
234  * nfs4_alloc_slot - efficiently look for a free slot
235  *
236  * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
237  * If found, we mark the slot as used, update the highest_used_slotid,
238  * and respectively set up the sequence operation args.
239  *
240  * Note: must be called with under the slot_tbl_lock.
241  */
242 struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
243 {
244 	struct nfs4_slot *ret = ERR_PTR(-EBUSY);
245 	u32 slotid;
246 
247 	dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
248 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
249 		tbl->max_slotid + 1);
250 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
251 	if (slotid <= tbl->max_slotid) {
252 		ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
253 		if (!IS_ERR(ret))
254 			nfs4_lock_slot(tbl, ret);
255 	}
256 	dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
257 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
258 		!IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
259 	return ret;
260 }
261 
262 static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
263 		 u32 max_reqs, u32 ivalue)
264 {
265 	if (max_reqs <= tbl->max_slots)
266 		return 0;
267 	if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
268 		return 0;
269 	return -ENOMEM;
270 }
271 
272 static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
273 		u32 server_highest_slotid,
274 		u32 ivalue)
275 {
276 	struct nfs4_slot **p;
277 
278 	nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
279 	p = &tbl->slots;
280 	while (*p) {
281 		(*p)->seq_nr = ivalue;
282 		(*p)->seq_nr_highest_sent = ivalue;
283 		(*p)->seq_nr_last_acked = ivalue - 1;
284 		p = &(*p)->next;
285 	}
286 	tbl->highest_used_slotid = NFS4_NO_SLOT;
287 	tbl->target_highest_slotid = server_highest_slotid;
288 	tbl->server_highest_slotid = server_highest_slotid;
289 	tbl->d_target_highest_slotid = 0;
290 	tbl->d2_target_highest_slotid = 0;
291 	tbl->max_slotid = server_highest_slotid;
292 }
293 
294 /*
295  * (re)Initialise a slot table
296  */
297 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
298 		u32 max_reqs, u32 ivalue)
299 {
300 	int ret;
301 
302 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
303 		max_reqs, tbl->max_slots);
304 
305 	if (max_reqs > NFS4_MAX_SLOT_TABLE)
306 		max_reqs = NFS4_MAX_SLOT_TABLE;
307 
308 	ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
309 	if (ret)
310 		goto out;
311 
312 	spin_lock(&tbl->slot_tbl_lock);
313 	nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
314 	spin_unlock(&tbl->slot_tbl_lock);
315 
316 	dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
317 		tbl, tbl->slots, tbl->max_slots);
318 out:
319 	dprintk("<-- %s: return %d\n", __func__, ret);
320 	return ret;
321 }
322 
323 /*
324  * nfs4_release_slot_table - release all slot table entries
325  */
326 static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
327 {
328 	nfs4_shrink_slot_table(tbl, 0);
329 }
330 
331 /**
332  * nfs4_shutdown_slot_table - release resources attached to a slot table
333  * @tbl: slot table to shut down
334  *
335  */
336 void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
337 {
338 	nfs4_release_slot_table(tbl);
339 	rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
340 }
341 
342 /**
343  * nfs4_setup_slot_table - prepare a stand-alone slot table for use
344  * @tbl: slot table to set up
345  * @max_reqs: maximum number of requests allowed
346  * @queue: name to give RPC wait queue
347  *
348  * Returns zero on success, or a negative errno.
349  */
350 int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
351 		const char *queue)
352 {
353 	nfs4_init_slot_table(tbl, queue);
354 	return nfs4_realloc_slot_table(tbl, max_reqs, 0);
355 }
356 
357 static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
358 {
359 	struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
360 	struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
361 	struct nfs4_slot *slot = pslot;
362 	struct nfs4_slot_table *tbl = slot->table;
363 
364 	if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
365 		return false;
366 	slot->generation = tbl->generation;
367 	args->sa_slot = slot;
368 	res->sr_timestamp = jiffies;
369 	res->sr_slot = slot;
370 	res->sr_status_flags = 0;
371 	res->sr_status = 1;
372 	return true;
373 }
374 
375 static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
376 		struct nfs4_slot *slot)
377 {
378 	if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
379 		return true;
380 	return false;
381 }
382 
383 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
384 		struct nfs4_slot *slot)
385 {
386 	if (slot->slot_nr > tbl->max_slotid)
387 		return false;
388 	return __nfs41_wake_and_assign_slot(tbl, slot);
389 }
390 
391 static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
392 {
393 	struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
394 	if (!IS_ERR(slot)) {
395 		bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
396 		if (ret)
397 			return ret;
398 		nfs4_free_slot(tbl, slot);
399 	}
400 	return false;
401 }
402 
403 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
404 {
405 	for (;;) {
406 		if (!nfs41_try_wake_next_slot_table_entry(tbl))
407 			break;
408 	}
409 }
410 
411 #if defined(CONFIG_NFS_V4_1)
412 
413 static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
414 		u32 target_highest_slotid)
415 {
416 	u32 max_slotid;
417 
418 	max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
419 	if (max_slotid > tbl->server_highest_slotid)
420 		max_slotid = tbl->server_highest_slotid;
421 	if (max_slotid > tbl->target_highest_slotid)
422 		max_slotid = tbl->target_highest_slotid;
423 	tbl->max_slotid = max_slotid;
424 	nfs41_wake_slot_table(tbl);
425 }
426 
427 /* Update the client's idea of target_highest_slotid */
428 static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
429 		u32 target_highest_slotid)
430 {
431 	if (tbl->target_highest_slotid == target_highest_slotid)
432 		return;
433 	tbl->target_highest_slotid = target_highest_slotid;
434 	tbl->generation++;
435 }
436 
437 void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
438 		u32 target_highest_slotid)
439 {
440 	spin_lock(&tbl->slot_tbl_lock);
441 	nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
442 	tbl->d_target_highest_slotid = 0;
443 	tbl->d2_target_highest_slotid = 0;
444 	nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
445 	spin_unlock(&tbl->slot_tbl_lock);
446 }
447 
448 static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
449 		u32 highest_slotid)
450 {
451 	if (tbl->server_highest_slotid == highest_slotid)
452 		return;
453 	if (tbl->highest_used_slotid > highest_slotid)
454 		return;
455 	/* Deallocate slots */
456 	nfs4_shrink_slot_table(tbl, highest_slotid + 1);
457 	tbl->server_highest_slotid = highest_slotid;
458 }
459 
460 static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
461 {
462 	s1 -= s2;
463 	if (s1 == 0)
464 		return 0;
465 	if (s1 < 0)
466 		return (s1 - 1) >> 1;
467 	return (s1 + 1) >> 1;
468 }
469 
470 static int nfs41_sign_s32(s32 s1)
471 {
472 	if (s1 > 0)
473 		return 1;
474 	if (s1 < 0)
475 		return -1;
476 	return 0;
477 }
478 
479 static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
480 {
481 	if (!s1 || !s2)
482 		return true;
483 	return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
484 }
485 
486 /* Try to eliminate outliers by checking for sharp changes in the
487  * derivatives and second derivatives
488  */
489 static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
490 		u32 new_target)
491 {
492 	s32 d_target, d2_target;
493 	bool ret = true;
494 
495 	d_target = nfs41_derivative_target_slotid(new_target,
496 			tbl->target_highest_slotid);
497 	d2_target = nfs41_derivative_target_slotid(d_target,
498 			tbl->d_target_highest_slotid);
499 	/* Is first derivative same sign? */
500 	if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
501 		ret = false;
502 	/* Is second derivative same sign? */
503 	if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
504 		ret = false;
505 	tbl->d_target_highest_slotid = d_target;
506 	tbl->d2_target_highest_slotid = d2_target;
507 	return ret;
508 }
509 
510 void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
511 		struct nfs4_slot *slot,
512 		struct nfs4_sequence_res *res)
513 {
514 	u32 target_highest_slotid = min(res->sr_target_highest_slotid,
515 					NFS4_MAX_SLOTID);
516 	u32 highest_slotid = min(res->sr_highest_slotid, NFS4_MAX_SLOTID);
517 
518 	spin_lock(&tbl->slot_tbl_lock);
519 	if (!nfs41_is_outlier_target_slotid(tbl, target_highest_slotid))
520 		nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
521 	if (tbl->generation == slot->generation)
522 		nfs41_set_server_slotid_locked(tbl, highest_slotid);
523 	nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
524 	spin_unlock(&tbl->slot_tbl_lock);
525 }
526 
527 static void nfs4_release_session_slot_tables(struct nfs4_session *session)
528 {
529 	nfs4_release_slot_table(&session->fc_slot_table);
530 	nfs4_release_slot_table(&session->bc_slot_table);
531 }
532 
533 /*
534  * Initialize or reset the forechannel and backchannel tables
535  */
536 int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
537 {
538 	struct nfs4_slot_table *tbl;
539 	int status;
540 
541 	dprintk("--> %s\n", __func__);
542 	/* Fore channel */
543 	tbl = &ses->fc_slot_table;
544 	tbl->session = ses;
545 	status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
546 	if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
547 		return status;
548 	/* Back channel */
549 	tbl = &ses->bc_slot_table;
550 	tbl->session = ses;
551 	status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
552 	if (status && tbl->slots == NULL)
553 		/* Fore and back channel share a connection so get
554 		 * both slot tables or neither */
555 		nfs4_release_session_slot_tables(ses);
556 	return status;
557 }
558 
559 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
560 {
561 	struct nfs4_session *session;
562 
563 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
564 	if (!session)
565 		return NULL;
566 
567 	nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
568 	nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
569 	session->session_state = 1<<NFS4_SESSION_INITING;
570 
571 	session->clp = clp;
572 	return session;
573 }
574 
575 static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
576 {
577 	nfs4_shutdown_slot_table(&session->fc_slot_table);
578 	nfs4_shutdown_slot_table(&session->bc_slot_table);
579 }
580 
581 void nfs4_destroy_session(struct nfs4_session *session)
582 {
583 	struct rpc_xprt *xprt;
584 	const struct cred *cred;
585 
586 	cred = nfs4_get_clid_cred(session->clp);
587 	nfs4_proc_destroy_session(session, cred);
588 	put_cred(cred);
589 
590 	rcu_read_lock();
591 	xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
592 	rcu_read_unlock();
593 	dprintk("%s Destroy backchannel for xprt %p\n",
594 		__func__, xprt);
595 	xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
596 	nfs4_destroy_session_slot_tables(session);
597 	kfree(session);
598 }
599 
600 /*
601  * With sessions, the client is not marked ready until after a
602  * successful EXCHANGE_ID and CREATE_SESSION.
603  *
604  * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
605  * other versions of NFS can be tried.
606  */
607 static int nfs41_check_session_ready(struct nfs_client *clp)
608 {
609 	int ret;
610 
611 	if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
612 		ret = nfs4_client_recover_expired_lease(clp);
613 		if (ret)
614 			return ret;
615 	}
616 	if (clp->cl_cons_state < NFS_CS_READY)
617 		return -EPROTONOSUPPORT;
618 	smp_rmb();
619 	return 0;
620 }
621 
622 int nfs4_init_session(struct nfs_client *clp)
623 {
624 	if (!nfs4_has_session(clp))
625 		return 0;
626 
627 	clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
628 	return nfs41_check_session_ready(clp);
629 }
630 
631 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
632 {
633 	struct nfs4_session *session = clp->cl_session;
634 	int ret;
635 
636 	spin_lock(&clp->cl_lock);
637 	if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
638 		/*
639 		 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
640 		 * DS lease to be equal to the MDS lease.
641 		 */
642 		clp->cl_lease_time = lease_time;
643 		clp->cl_last_renewal = jiffies;
644 	}
645 	spin_unlock(&clp->cl_lock);
646 
647 	ret = nfs41_check_session_ready(clp);
648 	if (ret)
649 		return ret;
650 	/* Test for the DS role */
651 	if (!is_ds_client(clp))
652 		return -ENODEV;
653 	return 0;
654 }
655 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
656 
657 #endif	/* defined(CONFIG_NFS_V4_1) */
658