xref: /openbmc/linux/fs/nfsd/nfs4layouts.c (revision 5927145e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Christoph Hellwig.
4  */
5 #include <linux/blkdev.h>
6 #include <linux/kmod.h>
7 #include <linux/file.h>
8 #include <linux/jhash.h>
9 #include <linux/sched.h>
10 #include <linux/sunrpc/addr.h>
11 
12 #include "pnfs.h"
13 #include "netns.h"
14 #include "trace.h"
15 
16 #define NFSDDBG_FACILITY                NFSDDBG_PNFS
17 
18 struct nfs4_layout {
19 	struct list_head		lo_perstate;
20 	struct nfs4_layout_stateid	*lo_state;
21 	struct nfsd4_layout_seg		lo_seg;
22 };
23 
24 static struct kmem_cache *nfs4_layout_cache;
25 static struct kmem_cache *nfs4_layout_stateid_cache;
26 
27 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
28 static const struct lock_manager_operations nfsd4_layouts_lm_ops;
29 
30 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
31 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
32 	[LAYOUT_FLEX_FILES]	= &ff_layout_ops,
33 #endif
34 #ifdef CONFIG_NFSD_BLOCKLAYOUT
35 	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
36 #endif
37 #ifdef CONFIG_NFSD_SCSILAYOUT
38 	[LAYOUT_SCSI]		= &scsi_layout_ops,
39 #endif
40 };
41 
42 /* pNFS device ID to export fsid mapping */
43 #define DEVID_HASH_BITS	8
44 #define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
45 #define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
46 static u64 nfsd_devid_seq = 1;
47 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
48 static DEFINE_SPINLOCK(nfsd_devid_lock);
49 
50 static inline u32 devid_hashfn(u64 idx)
51 {
52 	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
53 }
54 
55 static void
56 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
57 {
58 	const struct knfsd_fh *fh = &fhp->fh_handle;
59 	size_t fsid_len = key_len(fh->fh_fsid_type);
60 	struct nfsd4_deviceid_map *map, *old;
61 	int i;
62 
63 	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
64 	if (!map)
65 		return;
66 
67 	map->fsid_type = fh->fh_fsid_type;
68 	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
69 
70 	spin_lock(&nfsd_devid_lock);
71 	if (fhp->fh_export->ex_devid_map)
72 		goto out_unlock;
73 
74 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
75 		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
76 			if (old->fsid_type != fh->fh_fsid_type)
77 				continue;
78 			if (memcmp(old->fsid, fh->fh_fsid,
79 					key_len(old->fsid_type)))
80 				continue;
81 
82 			fhp->fh_export->ex_devid_map = old;
83 			goto out_unlock;
84 		}
85 	}
86 
87 	map->idx = nfsd_devid_seq++;
88 	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
89 	fhp->fh_export->ex_devid_map = map;
90 	map = NULL;
91 
92 out_unlock:
93 	spin_unlock(&nfsd_devid_lock);
94 	kfree(map);
95 }
96 
97 struct nfsd4_deviceid_map *
98 nfsd4_find_devid_map(int idx)
99 {
100 	struct nfsd4_deviceid_map *map, *ret = NULL;
101 
102 	rcu_read_lock();
103 	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
104 		if (map->idx == idx)
105 			ret = map;
106 	rcu_read_unlock();
107 
108 	return ret;
109 }
110 
111 int
112 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
113 		u32 device_generation)
114 {
115 	if (!fhp->fh_export->ex_devid_map) {
116 		nfsd4_alloc_devid_map(fhp);
117 		if (!fhp->fh_export->ex_devid_map)
118 			return -ENOMEM;
119 	}
120 
121 	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
122 	id->generation = device_generation;
123 	id->pad = 0;
124 	return 0;
125 }
126 
127 void nfsd4_setup_layout_type(struct svc_export *exp)
128 {
129 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
130 	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
131 #endif
132 
133 	if (!(exp->ex_flags & NFSEXP_PNFS))
134 		return;
135 
136 	/*
137 	 * If flex file is configured, use it by default. Otherwise
138 	 * check if the file system supports exporting a block-like layout.
139 	 * If the block device supports reservations prefer the SCSI layout,
140 	 * otherwise advertise the block layout.
141 	 */
142 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
143 	exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
144 #endif
145 #ifdef CONFIG_NFSD_BLOCKLAYOUT
146 	/* overwrite flex file layout selection if needed */
147 	if (sb->s_export_op->get_uuid &&
148 	    sb->s_export_op->map_blocks &&
149 	    sb->s_export_op->commit_blocks)
150 		exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
151 #endif
152 #ifdef CONFIG_NFSD_SCSILAYOUT
153 	/* overwrite block layout selection if needed */
154 	if (sb->s_export_op->map_blocks &&
155 	    sb->s_export_op->commit_blocks &&
156 	    sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
157 		exp->ex_layout_types |= 1 << LAYOUT_SCSI;
158 #endif
159 }
160 
161 static void
162 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
163 {
164 	struct nfs4_layout_stateid *ls = layoutstateid(stid);
165 	struct nfs4_client *clp = ls->ls_stid.sc_client;
166 	struct nfs4_file *fp = ls->ls_stid.sc_file;
167 
168 	trace_layoutstate_free(&ls->ls_stid.sc_stateid);
169 
170 	spin_lock(&clp->cl_lock);
171 	list_del_init(&ls->ls_perclnt);
172 	spin_unlock(&clp->cl_lock);
173 
174 	spin_lock(&fp->fi_lock);
175 	list_del_init(&ls->ls_perfile);
176 	spin_unlock(&fp->fi_lock);
177 
178 	if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
179 		vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
180 	fput(ls->ls_file);
181 
182 	if (ls->ls_recalled)
183 		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
184 
185 	kmem_cache_free(nfs4_layout_stateid_cache, ls);
186 }
187 
188 static int
189 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
190 {
191 	struct file_lock *fl;
192 	int status;
193 
194 	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
195 		return 0;
196 
197 	fl = locks_alloc_lock();
198 	if (!fl)
199 		return -ENOMEM;
200 	locks_init_lock(fl);
201 	fl->fl_lmops = &nfsd4_layouts_lm_ops;
202 	fl->fl_flags = FL_LAYOUT;
203 	fl->fl_type = F_RDLCK;
204 	fl->fl_end = OFFSET_MAX;
205 	fl->fl_owner = ls;
206 	fl->fl_pid = current->tgid;
207 	fl->fl_file = ls->ls_file;
208 
209 	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
210 	if (status) {
211 		locks_free_lock(fl);
212 		return status;
213 	}
214 	BUG_ON(fl != NULL);
215 	return 0;
216 }
217 
218 static struct nfs4_layout_stateid *
219 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
220 		struct nfs4_stid *parent, u32 layout_type)
221 {
222 	struct nfs4_client *clp = cstate->clp;
223 	struct nfs4_file *fp = parent->sc_file;
224 	struct nfs4_layout_stateid *ls;
225 	struct nfs4_stid *stp;
226 
227 	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
228 					nfsd4_free_layout_stateid);
229 	if (!stp)
230 		return NULL;
231 
232 	get_nfs4_file(fp);
233 	stp->sc_file = fp;
234 
235 	ls = layoutstateid(stp);
236 	INIT_LIST_HEAD(&ls->ls_perclnt);
237 	INIT_LIST_HEAD(&ls->ls_perfile);
238 	spin_lock_init(&ls->ls_lock);
239 	INIT_LIST_HEAD(&ls->ls_layouts);
240 	mutex_init(&ls->ls_mutex);
241 	ls->ls_layout_type = layout_type;
242 	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
243 			NFSPROC4_CLNT_CB_LAYOUT);
244 
245 	if (parent->sc_type == NFS4_DELEG_STID)
246 		ls->ls_file = get_file(fp->fi_deleg_file);
247 	else
248 		ls->ls_file = find_any_file(fp);
249 	BUG_ON(!ls->ls_file);
250 
251 	if (nfsd4_layout_setlease(ls)) {
252 		fput(ls->ls_file);
253 		put_nfs4_file(fp);
254 		kmem_cache_free(nfs4_layout_stateid_cache, ls);
255 		return NULL;
256 	}
257 
258 	spin_lock(&clp->cl_lock);
259 	stp->sc_type = NFS4_LAYOUT_STID;
260 	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
261 	spin_unlock(&clp->cl_lock);
262 
263 	spin_lock(&fp->fi_lock);
264 	list_add(&ls->ls_perfile, &fp->fi_lo_states);
265 	spin_unlock(&fp->fi_lock);
266 
267 	trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
268 	return ls;
269 }
270 
271 __be32
272 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
273 		struct nfsd4_compound_state *cstate, stateid_t *stateid,
274 		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
275 {
276 	struct nfs4_layout_stateid *ls;
277 	struct nfs4_stid *stid;
278 	unsigned char typemask = NFS4_LAYOUT_STID;
279 	__be32 status;
280 
281 	if (create)
282 		typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
283 
284 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
285 			net_generic(SVC_NET(rqstp), nfsd_net_id));
286 	if (status)
287 		goto out;
288 
289 	if (!fh_match(&cstate->current_fh.fh_handle,
290 		      &stid->sc_file->fi_fhandle)) {
291 		status = nfserr_bad_stateid;
292 		goto out_put_stid;
293 	}
294 
295 	if (stid->sc_type != NFS4_LAYOUT_STID) {
296 		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
297 		nfs4_put_stid(stid);
298 
299 		status = nfserr_jukebox;
300 		if (!ls)
301 			goto out;
302 		mutex_lock(&ls->ls_mutex);
303 	} else {
304 		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
305 
306 		status = nfserr_bad_stateid;
307 		mutex_lock(&ls->ls_mutex);
308 		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
309 			goto out_unlock_stid;
310 		if (layout_type != ls->ls_layout_type)
311 			goto out_unlock_stid;
312 	}
313 
314 	*lsp = ls;
315 	return 0;
316 
317 out_unlock_stid:
318 	mutex_unlock(&ls->ls_mutex);
319 out_put_stid:
320 	nfs4_put_stid(stid);
321 out:
322 	return status;
323 }
324 
325 static void
326 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
327 {
328 	spin_lock(&ls->ls_lock);
329 	if (ls->ls_recalled)
330 		goto out_unlock;
331 
332 	ls->ls_recalled = true;
333 	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
334 	if (list_empty(&ls->ls_layouts))
335 		goto out_unlock;
336 
337 	trace_layout_recall(&ls->ls_stid.sc_stateid);
338 
339 	refcount_inc(&ls->ls_stid.sc_count);
340 	nfsd4_run_cb(&ls->ls_recall);
341 
342 out_unlock:
343 	spin_unlock(&ls->ls_lock);
344 }
345 
346 static inline u64
347 layout_end(struct nfsd4_layout_seg *seg)
348 {
349 	u64 end = seg->offset + seg->length;
350 	return end >= seg->offset ? end : NFS4_MAX_UINT64;
351 }
352 
353 static void
354 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
355 {
356 	if (end == NFS4_MAX_UINT64)
357 		lo->length = NFS4_MAX_UINT64;
358 	else
359 		lo->length = end - lo->offset;
360 }
361 
362 static bool
363 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
364 {
365 	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
366 		return false;
367 	if (layout_end(&lo->lo_seg) <= s->offset)
368 		return false;
369 	if (layout_end(s) <= lo->lo_seg.offset)
370 		return false;
371 	return true;
372 }
373 
374 static bool
375 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
376 {
377 	if (lo->iomode != new->iomode)
378 		return false;
379 	if (layout_end(new) < lo->offset)
380 		return false;
381 	if (layout_end(lo) < new->offset)
382 		return false;
383 
384 	lo->offset = min(lo->offset, new->offset);
385 	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
386 	return true;
387 }
388 
389 static __be32
390 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
391 {
392 	struct nfs4_file *fp = ls->ls_stid.sc_file;
393 	struct nfs4_layout_stateid *l, *n;
394 	__be32 nfserr = nfs_ok;
395 
396 	assert_spin_locked(&fp->fi_lock);
397 
398 	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
399 		if (l != ls) {
400 			nfsd4_recall_file_layout(l);
401 			nfserr = nfserr_recallconflict;
402 		}
403 	}
404 
405 	return nfserr;
406 }
407 
408 __be32
409 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
410 {
411 	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
412 	struct nfs4_file *fp = ls->ls_stid.sc_file;
413 	struct nfs4_layout *lp, *new = NULL;
414 	__be32 nfserr;
415 
416 	spin_lock(&fp->fi_lock);
417 	nfserr = nfsd4_recall_conflict(ls);
418 	if (nfserr)
419 		goto out;
420 	spin_lock(&ls->ls_lock);
421 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
422 		if (layouts_try_merge(&lp->lo_seg, seg))
423 			goto done;
424 	}
425 	spin_unlock(&ls->ls_lock);
426 	spin_unlock(&fp->fi_lock);
427 
428 	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
429 	if (!new)
430 		return nfserr_jukebox;
431 	memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
432 	new->lo_state = ls;
433 
434 	spin_lock(&fp->fi_lock);
435 	nfserr = nfsd4_recall_conflict(ls);
436 	if (nfserr)
437 		goto out;
438 	spin_lock(&ls->ls_lock);
439 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
440 		if (layouts_try_merge(&lp->lo_seg, seg))
441 			goto done;
442 	}
443 
444 	refcount_inc(&ls->ls_stid.sc_count);
445 	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
446 	new = NULL;
447 done:
448 	nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
449 	spin_unlock(&ls->ls_lock);
450 out:
451 	spin_unlock(&fp->fi_lock);
452 	if (new)
453 		kmem_cache_free(nfs4_layout_cache, new);
454 	return nfserr;
455 }
456 
457 static void
458 nfsd4_free_layouts(struct list_head *reaplist)
459 {
460 	while (!list_empty(reaplist)) {
461 		struct nfs4_layout *lp = list_first_entry(reaplist,
462 				struct nfs4_layout, lo_perstate);
463 
464 		list_del(&lp->lo_perstate);
465 		nfs4_put_stid(&lp->lo_state->ls_stid);
466 		kmem_cache_free(nfs4_layout_cache, lp);
467 	}
468 }
469 
470 static void
471 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
472 		struct list_head *reaplist)
473 {
474 	struct nfsd4_layout_seg *lo = &lp->lo_seg;
475 	u64 end = layout_end(lo);
476 
477 	if (seg->offset <= lo->offset) {
478 		if (layout_end(seg) >= end) {
479 			list_move_tail(&lp->lo_perstate, reaplist);
480 			return;
481 		}
482 		lo->offset = layout_end(seg);
483 	} else {
484 		/* retain the whole layout segment on a split. */
485 		if (layout_end(seg) < end) {
486 			dprintk("%s: split not supported\n", __func__);
487 			return;
488 		}
489 		end = seg->offset;
490 	}
491 
492 	layout_update_len(lo, end);
493 }
494 
495 __be32
496 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
497 		struct nfsd4_compound_state *cstate,
498 		struct nfsd4_layoutreturn *lrp)
499 {
500 	struct nfs4_layout_stateid *ls;
501 	struct nfs4_layout *lp, *n;
502 	LIST_HEAD(reaplist);
503 	__be32 nfserr;
504 	int found = 0;
505 
506 	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
507 						false, lrp->lr_layout_type,
508 						&ls);
509 	if (nfserr) {
510 		trace_layout_return_lookup_fail(&lrp->lr_sid);
511 		return nfserr;
512 	}
513 
514 	spin_lock(&ls->ls_lock);
515 	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
516 		if (layouts_overlapping(lp, &lrp->lr_seg)) {
517 			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
518 			found++;
519 		}
520 	}
521 	if (!list_empty(&ls->ls_layouts)) {
522 		if (found)
523 			nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
524 		lrp->lrs_present = 1;
525 	} else {
526 		trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
527 		nfs4_unhash_stid(&ls->ls_stid);
528 		lrp->lrs_present = 0;
529 	}
530 	spin_unlock(&ls->ls_lock);
531 
532 	mutex_unlock(&ls->ls_mutex);
533 	nfs4_put_stid(&ls->ls_stid);
534 	nfsd4_free_layouts(&reaplist);
535 	return nfs_ok;
536 }
537 
538 __be32
539 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
540 		struct nfsd4_compound_state *cstate,
541 		struct nfsd4_layoutreturn *lrp)
542 {
543 	struct nfs4_layout_stateid *ls, *n;
544 	struct nfs4_client *clp = cstate->clp;
545 	struct nfs4_layout *lp, *t;
546 	LIST_HEAD(reaplist);
547 
548 	lrp->lrs_present = 0;
549 
550 	spin_lock(&clp->cl_lock);
551 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
552 		if (ls->ls_layout_type != lrp->lr_layout_type)
553 			continue;
554 
555 		if (lrp->lr_return_type == RETURN_FSID &&
556 		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
557 				   &cstate->current_fh.fh_handle))
558 			continue;
559 
560 		spin_lock(&ls->ls_lock);
561 		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
562 			if (lrp->lr_seg.iomode == IOMODE_ANY ||
563 			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
564 				list_move_tail(&lp->lo_perstate, &reaplist);
565 		}
566 		spin_unlock(&ls->ls_lock);
567 	}
568 	spin_unlock(&clp->cl_lock);
569 
570 	nfsd4_free_layouts(&reaplist);
571 	return 0;
572 }
573 
574 static void
575 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
576 		struct list_head *reaplist)
577 {
578 	spin_lock(&ls->ls_lock);
579 	list_splice_init(&ls->ls_layouts, reaplist);
580 	spin_unlock(&ls->ls_lock);
581 }
582 
583 void
584 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
585 {
586 	struct nfs4_layout_stateid *ls, *n;
587 	LIST_HEAD(reaplist);
588 
589 	spin_lock(&clp->cl_lock);
590 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
591 		nfsd4_return_all_layouts(ls, &reaplist);
592 	spin_unlock(&clp->cl_lock);
593 
594 	nfsd4_free_layouts(&reaplist);
595 }
596 
597 void
598 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
599 {
600 	struct nfs4_layout_stateid *ls, *n;
601 	LIST_HEAD(reaplist);
602 
603 	spin_lock(&fp->fi_lock);
604 	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
605 		if (ls->ls_stid.sc_client == clp)
606 			nfsd4_return_all_layouts(ls, &reaplist);
607 	}
608 	spin_unlock(&fp->fi_lock);
609 
610 	nfsd4_free_layouts(&reaplist);
611 }
612 
613 static void
614 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
615 {
616 	struct nfs4_client *clp = ls->ls_stid.sc_client;
617 	char addr_str[INET6_ADDRSTRLEN];
618 	static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
619 	static char *envp[] = {
620 		"HOME=/",
621 		"TERM=linux",
622 		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
623 		NULL
624 	};
625 	char *argv[8];
626 	int error;
627 
628 	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
629 
630 	printk(KERN_WARNING
631 		"nfsd: client %s failed to respond to layout recall. "
632 		"  Fencing..\n", addr_str);
633 
634 	argv[0] = (char *)nfsd_recall_failed;
635 	argv[1] = addr_str;
636 	argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
637 	argv[3] = NULL;
638 
639 	error = call_usermodehelper(nfsd_recall_failed, argv, envp,
640 				    UMH_WAIT_PROC);
641 	if (error) {
642 		printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
643 			addr_str, error);
644 	}
645 }
646 
647 static void
648 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
649 {
650 	struct nfs4_layout_stateid *ls =
651 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
652 
653 	mutex_lock(&ls->ls_mutex);
654 	nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
655 	mutex_unlock(&ls->ls_mutex);
656 }
657 
658 static int
659 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
660 {
661 	struct nfs4_layout_stateid *ls =
662 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
663 	struct nfsd_net *nn;
664 	ktime_t now, cutoff;
665 	const struct nfsd4_layout_ops *ops;
666 	LIST_HEAD(reaplist);
667 
668 
669 	switch (task->tk_status) {
670 	case 0:
671 	case -NFS4ERR_DELAY:
672 		/*
673 		 * Anything left? If not, then call it done. Note that we don't
674 		 * take the spinlock since this is an optimization and nothing
675 		 * should get added until the cb counter goes to zero.
676 		 */
677 		if (list_empty(&ls->ls_layouts))
678 			return 1;
679 
680 		/* Poll the client until it's done with the layout */
681 		now = ktime_get();
682 		nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
683 
684 		/* Client gets 2 lease periods to return it */
685 		cutoff = ktime_add_ns(task->tk_start,
686 					 nn->nfsd4_lease * NSEC_PER_SEC * 2);
687 
688 		if (ktime_before(now, cutoff)) {
689 			rpc_delay(task, HZ/100); /* 10 mili-seconds */
690 			return 0;
691 		}
692 		/* Fallthrough */
693 	default:
694 		/*
695 		 * Unknown error or non-responding client, we'll need to fence.
696 		 */
697 		trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
698 
699 		ops = nfsd4_layout_ops[ls->ls_layout_type];
700 		if (ops->fence_client)
701 			ops->fence_client(ls);
702 		else
703 			nfsd4_cb_layout_fail(ls);
704 		return -1;
705 	case -NFS4ERR_NOMATCHING_LAYOUT:
706 		trace_layout_recall_done(&ls->ls_stid.sc_stateid);
707 		task->tk_status = 0;
708 		return 1;
709 	}
710 }
711 
712 static void
713 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
714 {
715 	struct nfs4_layout_stateid *ls =
716 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
717 	LIST_HEAD(reaplist);
718 
719 	trace_layout_recall_release(&ls->ls_stid.sc_stateid);
720 
721 	nfsd4_return_all_layouts(ls, &reaplist);
722 	nfsd4_free_layouts(&reaplist);
723 	nfs4_put_stid(&ls->ls_stid);
724 }
725 
726 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
727 	.prepare	= nfsd4_cb_layout_prepare,
728 	.done		= nfsd4_cb_layout_done,
729 	.release	= nfsd4_cb_layout_release,
730 };
731 
732 static bool
733 nfsd4_layout_lm_break(struct file_lock *fl)
734 {
735 	/*
736 	 * We don't want the locks code to timeout the lease for us;
737 	 * we'll remove it ourself if a layout isn't returned
738 	 * in time:
739 	 */
740 	fl->fl_break_time = 0;
741 	nfsd4_recall_file_layout(fl->fl_owner);
742 	return false;
743 }
744 
745 static int
746 nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
747 		struct list_head *dispose)
748 {
749 	BUG_ON(!(arg & F_UNLCK));
750 	return lease_modify(onlist, arg, dispose);
751 }
752 
753 static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
754 	.lm_break	= nfsd4_layout_lm_break,
755 	.lm_change	= nfsd4_layout_lm_change,
756 };
757 
758 int
759 nfsd4_init_pnfs(void)
760 {
761 	int i;
762 
763 	for (i = 0; i < DEVID_HASH_SIZE; i++)
764 		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
765 
766 	nfs4_layout_cache = kmem_cache_create("nfs4_layout",
767 			sizeof(struct nfs4_layout), 0, 0, NULL);
768 	if (!nfs4_layout_cache)
769 		return -ENOMEM;
770 
771 	nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
772 			sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
773 	if (!nfs4_layout_stateid_cache) {
774 		kmem_cache_destroy(nfs4_layout_cache);
775 		return -ENOMEM;
776 	}
777 	return 0;
778 }
779 
780 void
781 nfsd4_exit_pnfs(void)
782 {
783 	int i;
784 
785 	kmem_cache_destroy(nfs4_layout_cache);
786 	kmem_cache_destroy(nfs4_layout_stateid_cache);
787 
788 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
789 		struct nfsd4_deviceid_map *map, *n;
790 
791 		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
792 			kfree(map);
793 	}
794 }
795