xref: /openbmc/linux/fs/nfs/nfs4session.c (revision 54f9c4d0)
1 /*
2  * fs/nfs/nfs4session.c
3  *
4  * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
5  *
6  */
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/printk.h>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/sched.h>
13 #include <linux/sunrpc/bc_xprt.h>
14 #include <linux/nfs.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/module.h>
18 
19 #include "nfs4_fs.h"
20 #include "internal.h"
21 #include "nfs4session.h"
22 #include "callback.h"
23 
24 #define NFSDBG_FACILITY		NFSDBG_STATE
25 
26 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
27 {
28 	tbl->highest_used_slotid = NFS4_NO_SLOT;
29 	spin_lock_init(&tbl->slot_tbl_lock);
30 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
31 	init_waitqueue_head(&tbl->slot_waitq);
32 	init_completion(&tbl->complete);
33 }
34 
35 /*
36  * nfs4_shrink_slot_table - free retired slots from the slot table
37  */
38 static void nfs4_shrink_slot_table(struct nfs4_slot_table  *tbl, u32 newsize)
39 {
40 	struct nfs4_slot **p;
41 	if (newsize >= tbl->max_slots)
42 		return;
43 
44 	p = &tbl->slots;
45 	while (newsize--)
46 		p = &(*p)->next;
47 	while (*p) {
48 		struct nfs4_slot *slot = *p;
49 
50 		*p = slot->next;
51 		kfree(slot);
52 		tbl->max_slots--;
53 	}
54 }
55 
56 /**
57  * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
58  * @tbl - controlling slot table
59  *
60  */
61 void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
62 {
63 	if (nfs4_slot_tbl_draining(tbl))
64 		complete(&tbl->complete);
65 }
66 
67 /*
68  * nfs4_free_slot - free a slot and efficiently update slot table.
69  *
70  * freeing a slot is trivially done by clearing its respective bit
71  * in the bitmap.
72  * If the freed slotid equals highest_used_slotid we want to update it
73  * so that the server would be able to size down the slot table if needed,
74  * otherwise we know that the highest_used_slotid is still in use.
75  * When updating highest_used_slotid there may be "holes" in the bitmap
76  * so we need to scan down from highest_used_slotid to 0 looking for the now
77  * highest slotid in use.
78  * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
79  *
80  * Must be called while holding tbl->slot_tbl_lock
81  */
82 void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
83 {
84 	u32 slotid = slot->slot_nr;
85 
86 	/* clear used bit in bitmap */
87 	__clear_bit(slotid, tbl->used_slots);
88 
89 	/* update highest_used_slotid when it is freed */
90 	if (slotid == tbl->highest_used_slotid) {
91 		u32 new_max = find_last_bit(tbl->used_slots, slotid);
92 		if (new_max < slotid)
93 			tbl->highest_used_slotid = new_max;
94 		else {
95 			tbl->highest_used_slotid = NFS4_NO_SLOT;
96 			nfs4_slot_tbl_drain_complete(tbl);
97 		}
98 	}
99 	dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
100 		slotid, tbl->highest_used_slotid);
101 }
102 
103 static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table  *tbl,
104 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
105 {
106 	struct nfs4_slot *slot;
107 
108 	slot = kzalloc(sizeof(*slot), gfp_mask);
109 	if (slot) {
110 		slot->table = tbl;
111 		slot->slot_nr = slotid;
112 		slot->seq_nr = seq_init;
113 	}
114 	return slot;
115 }
116 
117 static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table  *tbl,
118 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
119 {
120 	struct nfs4_slot **p, *slot;
121 
122 	p = &tbl->slots;
123 	for (;;) {
124 		if (*p == NULL) {
125 			*p = nfs4_new_slot(tbl, tbl->max_slots,
126 					seq_init, gfp_mask);
127 			if (*p == NULL)
128 				break;
129 			tbl->max_slots++;
130 		}
131 		slot = *p;
132 		if (slot->slot_nr == slotid)
133 			return slot;
134 		p = &slot->next;
135 	}
136 	return ERR_PTR(-ENOMEM);
137 }
138 
139 static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
140 		struct nfs4_slot *slot)
141 {
142 	u32 slotid = slot->slot_nr;
143 
144 	__set_bit(slotid, tbl->used_slots);
145 	if (slotid > tbl->highest_used_slotid ||
146 	    tbl->highest_used_slotid == NFS4_NO_SLOT)
147 		tbl->highest_used_slotid = slotid;
148 	slot->generation = tbl->generation;
149 }
150 
151 /*
152  * nfs4_try_to_lock_slot - Given a slot try to allocate it
153  *
154  * Note: must be called with the slot_tbl_lock held.
155  */
156 bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
157 {
158 	if (nfs4_test_locked_slot(tbl, slot->slot_nr))
159 		return false;
160 	nfs4_lock_slot(tbl, slot);
161 	return true;
162 }
163 
164 /*
165  * nfs4_lookup_slot - Find a slot but don't allocate it
166  *
167  * Note: must be called with the slot_tbl_lock held.
168  */
169 struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
170 {
171 	if (slotid <= tbl->max_slotid)
172 		return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
173 	return ERR_PTR(-E2BIG);
174 }
175 
176 static int nfs4_slot_get_seqid(struct nfs4_slot_table  *tbl, u32 slotid,
177 		u32 *seq_nr)
178 	__must_hold(&tbl->slot_tbl_lock)
179 {
180 	struct nfs4_slot *slot;
181 
182 	slot = nfs4_lookup_slot(tbl, slotid);
183 	if (IS_ERR(slot))
184 		return PTR_ERR(slot);
185 	*seq_nr = slot->seq_nr;
186 	return 0;
187 }
188 
189 /*
190  * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
191  *
192  * Given a slot table, slot id and sequence number, determine if the
193  * RPC call in question is still in flight. This function is mainly
194  * intended for use by the callback channel.
195  */
196 static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
197 		u32 slotid, u32 seq_nr)
198 {
199 	u32 cur_seq;
200 	bool ret = false;
201 
202 	spin_lock(&tbl->slot_tbl_lock);
203 	if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
204 	    cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
205 		ret = true;
206 	spin_unlock(&tbl->slot_tbl_lock);
207 	return ret;
208 }
209 
210 /*
211  * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
212  *
213  * Given a slot table, slot id and sequence number, wait until the
214  * corresponding RPC call completes. This function is mainly
215  * intended for use by the callback channel.
216  */
217 int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
218 		u32 slotid, u32 seq_nr,
219 		unsigned long timeout)
220 {
221 	if (wait_event_timeout(tbl->slot_waitq,
222 			!nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
223 			timeout) == 0)
224 		return -ETIMEDOUT;
225 	return 0;
226 }
227 
228 /*
229  * nfs4_alloc_slot - efficiently look for a free slot
230  *
231  * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
232  * If found, we mark the slot as used, update the highest_used_slotid,
233  * and respectively set up the sequence operation args.
234  *
235  * Note: must be called with under the slot_tbl_lock.
236  */
237 struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
238 {
239 	struct nfs4_slot *ret = ERR_PTR(-EBUSY);
240 	u32 slotid;
241 
242 	dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
243 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
244 		tbl->max_slotid + 1);
245 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
246 	if (slotid <= tbl->max_slotid) {
247 		ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
248 		if (!IS_ERR(ret))
249 			nfs4_lock_slot(tbl, ret);
250 	}
251 	dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
252 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
253 		!IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
254 	return ret;
255 }
256 
257 static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
258 		 u32 max_reqs, u32 ivalue)
259 {
260 	if (max_reqs <= tbl->max_slots)
261 		return 0;
262 	if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
263 		return 0;
264 	return -ENOMEM;
265 }
266 
267 static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
268 		u32 server_highest_slotid,
269 		u32 ivalue)
270 {
271 	struct nfs4_slot **p;
272 
273 	nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
274 	p = &tbl->slots;
275 	while (*p) {
276 		(*p)->seq_nr = ivalue;
277 		(*p)->interrupted = 0;
278 		p = &(*p)->next;
279 	}
280 	tbl->highest_used_slotid = NFS4_NO_SLOT;
281 	tbl->target_highest_slotid = server_highest_slotid;
282 	tbl->server_highest_slotid = server_highest_slotid;
283 	tbl->d_target_highest_slotid = 0;
284 	tbl->d2_target_highest_slotid = 0;
285 	tbl->max_slotid = server_highest_slotid;
286 }
287 
288 /*
289  * (re)Initialise a slot table
290  */
291 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
292 		u32 max_reqs, u32 ivalue)
293 {
294 	int ret;
295 
296 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
297 		max_reqs, tbl->max_slots);
298 
299 	if (max_reqs > NFS4_MAX_SLOT_TABLE)
300 		max_reqs = NFS4_MAX_SLOT_TABLE;
301 
302 	ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
303 	if (ret)
304 		goto out;
305 
306 	spin_lock(&tbl->slot_tbl_lock);
307 	nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
308 	spin_unlock(&tbl->slot_tbl_lock);
309 
310 	dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
311 		tbl, tbl->slots, tbl->max_slots);
312 out:
313 	dprintk("<-- %s: return %d\n", __func__, ret);
314 	return ret;
315 }
316 
317 /*
318  * nfs4_release_slot_table - release all slot table entries
319  */
320 static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
321 {
322 	nfs4_shrink_slot_table(tbl, 0);
323 }
324 
325 /**
326  * nfs4_shutdown_slot_table - release resources attached to a slot table
327  * @tbl: slot table to shut down
328  *
329  */
330 void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
331 {
332 	nfs4_release_slot_table(tbl);
333 	rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
334 }
335 
336 /**
337  * nfs4_setup_slot_table - prepare a stand-alone slot table for use
338  * @tbl: slot table to set up
339  * @max_reqs: maximum number of requests allowed
340  * @queue: name to give RPC wait queue
341  *
342  * Returns zero on success, or a negative errno.
343  */
344 int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
345 		const char *queue)
346 {
347 	nfs4_init_slot_table(tbl, queue);
348 	return nfs4_realloc_slot_table(tbl, max_reqs, 0);
349 }
350 
351 static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
352 {
353 	struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
354 	struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
355 	struct nfs4_slot *slot = pslot;
356 	struct nfs4_slot_table *tbl = slot->table;
357 
358 	if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
359 		return false;
360 	slot->generation = tbl->generation;
361 	args->sa_slot = slot;
362 	res->sr_timestamp = jiffies;
363 	res->sr_slot = slot;
364 	res->sr_status_flags = 0;
365 	res->sr_status = 1;
366 	return true;
367 }
368 
369 static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
370 		struct nfs4_slot *slot)
371 {
372 	if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
373 		return true;
374 	return false;
375 }
376 
377 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
378 		struct nfs4_slot *slot)
379 {
380 	if (slot->slot_nr > tbl->max_slotid)
381 		return false;
382 	return __nfs41_wake_and_assign_slot(tbl, slot);
383 }
384 
385 static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
386 {
387 	struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
388 	if (!IS_ERR(slot)) {
389 		bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
390 		if (ret)
391 			return ret;
392 		nfs4_free_slot(tbl, slot);
393 	}
394 	return false;
395 }
396 
397 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
398 {
399 	for (;;) {
400 		if (!nfs41_try_wake_next_slot_table_entry(tbl))
401 			break;
402 	}
403 }
404 
405 #if defined(CONFIG_NFS_V4_1)
406 
407 static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
408 		u32 target_highest_slotid)
409 {
410 	u32 max_slotid;
411 
412 	max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
413 	if (max_slotid > tbl->server_highest_slotid)
414 		max_slotid = tbl->server_highest_slotid;
415 	if (max_slotid > tbl->target_highest_slotid)
416 		max_slotid = tbl->target_highest_slotid;
417 	tbl->max_slotid = max_slotid;
418 	nfs41_wake_slot_table(tbl);
419 }
420 
421 /* Update the client's idea of target_highest_slotid */
422 static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
423 		u32 target_highest_slotid)
424 {
425 	if (tbl->target_highest_slotid == target_highest_slotid)
426 		return;
427 	tbl->target_highest_slotid = target_highest_slotid;
428 	tbl->generation++;
429 }
430 
431 void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
432 		u32 target_highest_slotid)
433 {
434 	spin_lock(&tbl->slot_tbl_lock);
435 	nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
436 	tbl->d_target_highest_slotid = 0;
437 	tbl->d2_target_highest_slotid = 0;
438 	nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
439 	spin_unlock(&tbl->slot_tbl_lock);
440 }
441 
442 static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
443 		u32 highest_slotid)
444 {
445 	if (tbl->server_highest_slotid == highest_slotid)
446 		return;
447 	if (tbl->highest_used_slotid > highest_slotid)
448 		return;
449 	/* Deallocate slots */
450 	nfs4_shrink_slot_table(tbl, highest_slotid + 1);
451 	tbl->server_highest_slotid = highest_slotid;
452 }
453 
454 static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
455 {
456 	s1 -= s2;
457 	if (s1 == 0)
458 		return 0;
459 	if (s1 < 0)
460 		return (s1 - 1) >> 1;
461 	return (s1 + 1) >> 1;
462 }
463 
464 static int nfs41_sign_s32(s32 s1)
465 {
466 	if (s1 > 0)
467 		return 1;
468 	if (s1 < 0)
469 		return -1;
470 	return 0;
471 }
472 
473 static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
474 {
475 	if (!s1 || !s2)
476 		return true;
477 	return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
478 }
479 
480 /* Try to eliminate outliers by checking for sharp changes in the
481  * derivatives and second derivatives
482  */
483 static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
484 		u32 new_target)
485 {
486 	s32 d_target, d2_target;
487 	bool ret = true;
488 
489 	d_target = nfs41_derivative_target_slotid(new_target,
490 			tbl->target_highest_slotid);
491 	d2_target = nfs41_derivative_target_slotid(d_target,
492 			tbl->d_target_highest_slotid);
493 	/* Is first derivative same sign? */
494 	if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
495 		ret = false;
496 	/* Is second derivative same sign? */
497 	if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
498 		ret = false;
499 	tbl->d_target_highest_slotid = d_target;
500 	tbl->d2_target_highest_slotid = d2_target;
501 	return ret;
502 }
503 
504 void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
505 		struct nfs4_slot *slot,
506 		struct nfs4_sequence_res *res)
507 {
508 	spin_lock(&tbl->slot_tbl_lock);
509 	if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid))
510 		nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
511 	if (tbl->generation == slot->generation)
512 		nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
513 	nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid);
514 	spin_unlock(&tbl->slot_tbl_lock);
515 }
516 
517 static void nfs4_release_session_slot_tables(struct nfs4_session *session)
518 {
519 	nfs4_release_slot_table(&session->fc_slot_table);
520 	nfs4_release_slot_table(&session->bc_slot_table);
521 }
522 
523 /*
524  * Initialize or reset the forechannel and backchannel tables
525  */
526 int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
527 {
528 	struct nfs4_slot_table *tbl;
529 	int status;
530 
531 	dprintk("--> %s\n", __func__);
532 	/* Fore channel */
533 	tbl = &ses->fc_slot_table;
534 	tbl->session = ses;
535 	status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
536 	if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
537 		return status;
538 	/* Back channel */
539 	tbl = &ses->bc_slot_table;
540 	tbl->session = ses;
541 	status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
542 	if (status && tbl->slots == NULL)
543 		/* Fore and back channel share a connection so get
544 		 * both slot tables or neither */
545 		nfs4_release_session_slot_tables(ses);
546 	return status;
547 }
548 
549 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
550 {
551 	struct nfs4_session *session;
552 
553 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
554 	if (!session)
555 		return NULL;
556 
557 	nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
558 	nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
559 	session->session_state = 1<<NFS4_SESSION_INITING;
560 
561 	session->clp = clp;
562 	return session;
563 }
564 
565 static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
566 {
567 	nfs4_shutdown_slot_table(&session->fc_slot_table);
568 	nfs4_shutdown_slot_table(&session->bc_slot_table);
569 }
570 
571 void nfs4_destroy_session(struct nfs4_session *session)
572 {
573 	struct rpc_xprt *xprt;
574 	struct rpc_cred *cred;
575 
576 	cred = nfs4_get_clid_cred(session->clp);
577 	nfs4_proc_destroy_session(session, cred);
578 	if (cred)
579 		put_rpccred(cred);
580 
581 	rcu_read_lock();
582 	xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
583 	rcu_read_unlock();
584 	dprintk("%s Destroy backchannel for xprt %p\n",
585 		__func__, xprt);
586 	xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
587 	nfs4_destroy_session_slot_tables(session);
588 	kfree(session);
589 }
590 
591 /*
592  * With sessions, the client is not marked ready until after a
593  * successful EXCHANGE_ID and CREATE_SESSION.
594  *
595  * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
596  * other versions of NFS can be tried.
597  */
598 static int nfs41_check_session_ready(struct nfs_client *clp)
599 {
600 	int ret;
601 
602 	if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
603 		ret = nfs4_client_recover_expired_lease(clp);
604 		if (ret)
605 			return ret;
606 	}
607 	if (clp->cl_cons_state < NFS_CS_READY)
608 		return -EPROTONOSUPPORT;
609 	smp_rmb();
610 	return 0;
611 }
612 
613 int nfs4_init_session(struct nfs_client *clp)
614 {
615 	if (!nfs4_has_session(clp))
616 		return 0;
617 
618 	clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
619 	return nfs41_check_session_ready(clp);
620 }
621 
622 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
623 {
624 	struct nfs4_session *session = clp->cl_session;
625 	int ret;
626 
627 	spin_lock(&clp->cl_lock);
628 	if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
629 		/*
630 		 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
631 		 * DS lease to be equal to the MDS lease.
632 		 */
633 		clp->cl_lease_time = lease_time;
634 		clp->cl_last_renewal = jiffies;
635 	}
636 	spin_unlock(&clp->cl_lock);
637 
638 	ret = nfs41_check_session_ready(clp);
639 	if (ret)
640 		return ret;
641 	/* Test for the DS role */
642 	if (!is_ds_client(clp))
643 		return -ENODEV;
644 	return 0;
645 }
646 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
647 
648 #endif	/* defined(CONFIG_NFS_V4_1) */
649