xref: /openbmc/linux/fs/nfsd/nfs4layouts.c (revision 4f3db074)
1 /*
2  * Copyright (c) 2014 Christoph Hellwig.
3  */
4 #include <linux/kmod.h>
5 #include <linux/file.h>
6 #include <linux/jhash.h>
7 #include <linux/sched.h>
8 #include <linux/sunrpc/addr.h>
9 
10 #include "pnfs.h"
11 #include "netns.h"
12 #include "trace.h"
13 
14 #define NFSDDBG_FACILITY                NFSDDBG_PNFS
15 
16 struct nfs4_layout {
17 	struct list_head		lo_perstate;
18 	struct nfs4_layout_stateid	*lo_state;
19 	struct nfsd4_layout_seg		lo_seg;
20 };
21 
22 static struct kmem_cache *nfs4_layout_cache;
23 static struct kmem_cache *nfs4_layout_stateid_cache;
24 
25 static struct nfsd4_callback_ops nfsd4_cb_layout_ops;
26 static const struct lock_manager_operations nfsd4_layouts_lm_ops;
27 
28 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
29 	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
30 };
31 
32 /* pNFS device ID to export fsid mapping */
33 #define DEVID_HASH_BITS	8
34 #define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
35 #define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
36 static u64 nfsd_devid_seq = 1;
37 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
38 static DEFINE_SPINLOCK(nfsd_devid_lock);
39 
40 static inline u32 devid_hashfn(u64 idx)
41 {
42 	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
43 }
44 
45 static void
46 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
47 {
48 	const struct knfsd_fh *fh = &fhp->fh_handle;
49 	size_t fsid_len = key_len(fh->fh_fsid_type);
50 	struct nfsd4_deviceid_map *map, *old;
51 	int i;
52 
53 	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
54 	if (!map)
55 		return;
56 
57 	map->fsid_type = fh->fh_fsid_type;
58 	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
59 
60 	spin_lock(&nfsd_devid_lock);
61 	if (fhp->fh_export->ex_devid_map)
62 		goto out_unlock;
63 
64 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
65 		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
66 			if (old->fsid_type != fh->fh_fsid_type)
67 				continue;
68 			if (memcmp(old->fsid, fh->fh_fsid,
69 					key_len(old->fsid_type)))
70 				continue;
71 
72 			fhp->fh_export->ex_devid_map = old;
73 			goto out_unlock;
74 		}
75 	}
76 
77 	map->idx = nfsd_devid_seq++;
78 	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
79 	fhp->fh_export->ex_devid_map = map;
80 	map = NULL;
81 
82 out_unlock:
83 	spin_unlock(&nfsd_devid_lock);
84 	kfree(map);
85 }
86 
87 struct nfsd4_deviceid_map *
88 nfsd4_find_devid_map(int idx)
89 {
90 	struct nfsd4_deviceid_map *map, *ret = NULL;
91 
92 	rcu_read_lock();
93 	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
94 		if (map->idx == idx)
95 			ret = map;
96 	rcu_read_unlock();
97 
98 	return ret;
99 }
100 
101 int
102 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
103 		u32 device_generation)
104 {
105 	if (!fhp->fh_export->ex_devid_map) {
106 		nfsd4_alloc_devid_map(fhp);
107 		if (!fhp->fh_export->ex_devid_map)
108 			return -ENOMEM;
109 	}
110 
111 	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
112 	id->generation = device_generation;
113 	id->pad = 0;
114 	return 0;
115 }
116 
117 void nfsd4_setup_layout_type(struct svc_export *exp)
118 {
119 	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
120 
121 	if (!(exp->ex_flags & NFSEXP_PNFS))
122 		return;
123 
124 	if (sb->s_export_op->get_uuid &&
125 	    sb->s_export_op->map_blocks &&
126 	    sb->s_export_op->commit_blocks)
127 		exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
128 }
129 
130 static void
131 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
132 {
133 	struct nfs4_layout_stateid *ls = layoutstateid(stid);
134 	struct nfs4_client *clp = ls->ls_stid.sc_client;
135 	struct nfs4_file *fp = ls->ls_stid.sc_file;
136 
137 	trace_layoutstate_free(&ls->ls_stid.sc_stateid);
138 
139 	spin_lock(&clp->cl_lock);
140 	list_del_init(&ls->ls_perclnt);
141 	spin_unlock(&clp->cl_lock);
142 
143 	spin_lock(&fp->fi_lock);
144 	list_del_init(&ls->ls_perfile);
145 	spin_unlock(&fp->fi_lock);
146 
147 	vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
148 	fput(ls->ls_file);
149 
150 	if (ls->ls_recalled)
151 		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
152 
153 	kmem_cache_free(nfs4_layout_stateid_cache, ls);
154 }
155 
156 static int
157 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
158 {
159 	struct file_lock *fl;
160 	int status;
161 
162 	fl = locks_alloc_lock();
163 	if (!fl)
164 		return -ENOMEM;
165 	locks_init_lock(fl);
166 	fl->fl_lmops = &nfsd4_layouts_lm_ops;
167 	fl->fl_flags = FL_LAYOUT;
168 	fl->fl_type = F_RDLCK;
169 	fl->fl_end = OFFSET_MAX;
170 	fl->fl_owner = ls;
171 	fl->fl_pid = current->tgid;
172 	fl->fl_file = ls->ls_file;
173 
174 	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
175 	if (status) {
176 		locks_free_lock(fl);
177 		return status;
178 	}
179 	BUG_ON(fl != NULL);
180 	return 0;
181 }
182 
183 static struct nfs4_layout_stateid *
184 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
185 		struct nfs4_stid *parent, u32 layout_type)
186 {
187 	struct nfs4_client *clp = cstate->clp;
188 	struct nfs4_file *fp = parent->sc_file;
189 	struct nfs4_layout_stateid *ls;
190 	struct nfs4_stid *stp;
191 
192 	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
193 	if (!stp)
194 		return NULL;
195 	stp->sc_free = nfsd4_free_layout_stateid;
196 	get_nfs4_file(fp);
197 	stp->sc_file = fp;
198 
199 	ls = layoutstateid(stp);
200 	INIT_LIST_HEAD(&ls->ls_perclnt);
201 	INIT_LIST_HEAD(&ls->ls_perfile);
202 	spin_lock_init(&ls->ls_lock);
203 	INIT_LIST_HEAD(&ls->ls_layouts);
204 	ls->ls_layout_type = layout_type;
205 	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
206 			NFSPROC4_CLNT_CB_LAYOUT);
207 
208 	if (parent->sc_type == NFS4_DELEG_STID)
209 		ls->ls_file = get_file(fp->fi_deleg_file);
210 	else
211 		ls->ls_file = find_any_file(fp);
212 	BUG_ON(!ls->ls_file);
213 
214 	if (nfsd4_layout_setlease(ls)) {
215 		put_nfs4_file(fp);
216 		kmem_cache_free(nfs4_layout_stateid_cache, ls);
217 		return NULL;
218 	}
219 
220 	spin_lock(&clp->cl_lock);
221 	stp->sc_type = NFS4_LAYOUT_STID;
222 	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
223 	spin_unlock(&clp->cl_lock);
224 
225 	spin_lock(&fp->fi_lock);
226 	list_add(&ls->ls_perfile, &fp->fi_lo_states);
227 	spin_unlock(&fp->fi_lock);
228 
229 	trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
230 	return ls;
231 }
232 
233 __be32
234 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
235 		struct nfsd4_compound_state *cstate, stateid_t *stateid,
236 		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
237 {
238 	struct nfs4_layout_stateid *ls;
239 	struct nfs4_stid *stid;
240 	unsigned char typemask = NFS4_LAYOUT_STID;
241 	__be32 status;
242 
243 	if (create)
244 		typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
245 
246 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
247 			net_generic(SVC_NET(rqstp), nfsd_net_id));
248 	if (status)
249 		goto out;
250 
251 	if (!fh_match(&cstate->current_fh.fh_handle,
252 		      &stid->sc_file->fi_fhandle)) {
253 		status = nfserr_bad_stateid;
254 		goto out_put_stid;
255 	}
256 
257 	if (stid->sc_type != NFS4_LAYOUT_STID) {
258 		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
259 		nfs4_put_stid(stid);
260 
261 		status = nfserr_jukebox;
262 		if (!ls)
263 			goto out;
264 	} else {
265 		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
266 
267 		status = nfserr_bad_stateid;
268 		if (stateid->si_generation > stid->sc_stateid.si_generation)
269 			goto out_put_stid;
270 		if (layout_type != ls->ls_layout_type)
271 			goto out_put_stid;
272 	}
273 
274 	*lsp = ls;
275 	return 0;
276 
277 out_put_stid:
278 	nfs4_put_stid(stid);
279 out:
280 	return status;
281 }
282 
283 static void
284 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
285 {
286 	spin_lock(&ls->ls_lock);
287 	if (ls->ls_recalled)
288 		goto out_unlock;
289 
290 	ls->ls_recalled = true;
291 	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
292 	if (list_empty(&ls->ls_layouts))
293 		goto out_unlock;
294 
295 	trace_layout_recall(&ls->ls_stid.sc_stateid);
296 
297 	atomic_inc(&ls->ls_stid.sc_count);
298 	update_stateid(&ls->ls_stid.sc_stateid);
299 	memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
300 	nfsd4_run_cb(&ls->ls_recall);
301 
302 out_unlock:
303 	spin_unlock(&ls->ls_lock);
304 }
305 
306 static inline u64
307 layout_end(struct nfsd4_layout_seg *seg)
308 {
309 	u64 end = seg->offset + seg->length;
310 	return end >= seg->offset ? end : NFS4_MAX_UINT64;
311 }
312 
313 static void
314 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
315 {
316 	if (end == NFS4_MAX_UINT64)
317 		lo->length = NFS4_MAX_UINT64;
318 	else
319 		lo->length = end - lo->offset;
320 }
321 
322 static bool
323 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
324 {
325 	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
326 		return false;
327 	if (layout_end(&lo->lo_seg) <= s->offset)
328 		return false;
329 	if (layout_end(s) <= lo->lo_seg.offset)
330 		return false;
331 	return true;
332 }
333 
334 static bool
335 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
336 {
337 	if (lo->iomode != new->iomode)
338 		return false;
339 	if (layout_end(new) < lo->offset)
340 		return false;
341 	if (layout_end(lo) < new->offset)
342 		return false;
343 
344 	lo->offset = min(lo->offset, new->offset);
345 	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
346 	return true;
347 }
348 
349 static __be32
350 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
351 {
352 	struct nfs4_file *fp = ls->ls_stid.sc_file;
353 	struct nfs4_layout_stateid *l, *n;
354 	__be32 nfserr = nfs_ok;
355 
356 	assert_spin_locked(&fp->fi_lock);
357 
358 	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
359 		if (l != ls) {
360 			nfsd4_recall_file_layout(l);
361 			nfserr = nfserr_recallconflict;
362 		}
363 	}
364 
365 	return nfserr;
366 }
367 
368 __be32
369 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
370 {
371 	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
372 	struct nfs4_file *fp = ls->ls_stid.sc_file;
373 	struct nfs4_layout *lp, *new = NULL;
374 	__be32 nfserr;
375 
376 	spin_lock(&fp->fi_lock);
377 	nfserr = nfsd4_recall_conflict(ls);
378 	if (nfserr)
379 		goto out;
380 	spin_lock(&ls->ls_lock);
381 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
382 		if (layouts_try_merge(&lp->lo_seg, seg))
383 			goto done;
384 	}
385 	spin_unlock(&ls->ls_lock);
386 	spin_unlock(&fp->fi_lock);
387 
388 	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
389 	if (!new)
390 		return nfserr_jukebox;
391 	memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
392 	new->lo_state = ls;
393 
394 	spin_lock(&fp->fi_lock);
395 	nfserr = nfsd4_recall_conflict(ls);
396 	if (nfserr)
397 		goto out;
398 	spin_lock(&ls->ls_lock);
399 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
400 		if (layouts_try_merge(&lp->lo_seg, seg))
401 			goto done;
402 	}
403 
404 	atomic_inc(&ls->ls_stid.sc_count);
405 	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
406 	new = NULL;
407 done:
408 	update_stateid(&ls->ls_stid.sc_stateid);
409 	memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
410 	spin_unlock(&ls->ls_lock);
411 out:
412 	spin_unlock(&fp->fi_lock);
413 	if (new)
414 		kmem_cache_free(nfs4_layout_cache, new);
415 	return nfserr;
416 }
417 
418 static void
419 nfsd4_free_layouts(struct list_head *reaplist)
420 {
421 	while (!list_empty(reaplist)) {
422 		struct nfs4_layout *lp = list_first_entry(reaplist,
423 				struct nfs4_layout, lo_perstate);
424 
425 		list_del(&lp->lo_perstate);
426 		nfs4_put_stid(&lp->lo_state->ls_stid);
427 		kmem_cache_free(nfs4_layout_cache, lp);
428 	}
429 }
430 
431 static void
432 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
433 		struct list_head *reaplist)
434 {
435 	struct nfsd4_layout_seg *lo = &lp->lo_seg;
436 	u64 end = layout_end(lo);
437 
438 	if (seg->offset <= lo->offset) {
439 		if (layout_end(seg) >= end) {
440 			list_move_tail(&lp->lo_perstate, reaplist);
441 			return;
442 		}
443 		lo->offset = layout_end(seg);
444 	} else {
445 		/* retain the whole layout segment on a split. */
446 		if (layout_end(seg) < end) {
447 			dprintk("%s: split not supported\n", __func__);
448 			return;
449 		}
450 		end = seg->offset;
451 	}
452 
453 	layout_update_len(lo, end);
454 }
455 
456 __be32
457 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
458 		struct nfsd4_compound_state *cstate,
459 		struct nfsd4_layoutreturn *lrp)
460 {
461 	struct nfs4_layout_stateid *ls;
462 	struct nfs4_layout *lp, *n;
463 	LIST_HEAD(reaplist);
464 	__be32 nfserr;
465 	int found = 0;
466 
467 	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
468 						false, lrp->lr_layout_type,
469 						&ls);
470 	if (nfserr) {
471 		trace_layout_return_lookup_fail(&lrp->lr_sid);
472 		return nfserr;
473 	}
474 
475 	spin_lock(&ls->ls_lock);
476 	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
477 		if (layouts_overlapping(lp, &lrp->lr_seg)) {
478 			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
479 			found++;
480 		}
481 	}
482 	if (!list_empty(&ls->ls_layouts)) {
483 		if (found) {
484 			update_stateid(&ls->ls_stid.sc_stateid);
485 			memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid,
486 				sizeof(stateid_t));
487 		}
488 		lrp->lrs_present = 1;
489 	} else {
490 		trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
491 		nfs4_unhash_stid(&ls->ls_stid);
492 		lrp->lrs_present = 0;
493 	}
494 	spin_unlock(&ls->ls_lock);
495 
496 	nfs4_put_stid(&ls->ls_stid);
497 	nfsd4_free_layouts(&reaplist);
498 	return nfs_ok;
499 }
500 
501 __be32
502 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
503 		struct nfsd4_compound_state *cstate,
504 		struct nfsd4_layoutreturn *lrp)
505 {
506 	struct nfs4_layout_stateid *ls, *n;
507 	struct nfs4_client *clp = cstate->clp;
508 	struct nfs4_layout *lp, *t;
509 	LIST_HEAD(reaplist);
510 
511 	lrp->lrs_present = 0;
512 
513 	spin_lock(&clp->cl_lock);
514 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
515 		if (ls->ls_layout_type != lrp->lr_layout_type)
516 			continue;
517 
518 		if (lrp->lr_return_type == RETURN_FSID &&
519 		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
520 				   &cstate->current_fh.fh_handle))
521 			continue;
522 
523 		spin_lock(&ls->ls_lock);
524 		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
525 			if (lrp->lr_seg.iomode == IOMODE_ANY ||
526 			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
527 				list_move_tail(&lp->lo_perstate, &reaplist);
528 		}
529 		spin_unlock(&ls->ls_lock);
530 	}
531 	spin_unlock(&clp->cl_lock);
532 
533 	nfsd4_free_layouts(&reaplist);
534 	return 0;
535 }
536 
537 static void
538 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
539 		struct list_head *reaplist)
540 {
541 	spin_lock(&ls->ls_lock);
542 	list_splice_init(&ls->ls_layouts, reaplist);
543 	spin_unlock(&ls->ls_lock);
544 }
545 
546 void
547 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
548 {
549 	struct nfs4_layout_stateid *ls, *n;
550 	LIST_HEAD(reaplist);
551 
552 	spin_lock(&clp->cl_lock);
553 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
554 		nfsd4_return_all_layouts(ls, &reaplist);
555 	spin_unlock(&clp->cl_lock);
556 
557 	nfsd4_free_layouts(&reaplist);
558 }
559 
560 void
561 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
562 {
563 	struct nfs4_layout_stateid *ls, *n;
564 	LIST_HEAD(reaplist);
565 
566 	spin_lock(&fp->fi_lock);
567 	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
568 		if (ls->ls_stid.sc_client == clp)
569 			nfsd4_return_all_layouts(ls, &reaplist);
570 	}
571 	spin_unlock(&fp->fi_lock);
572 
573 	nfsd4_free_layouts(&reaplist);
574 }
575 
576 static void
577 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
578 {
579 	struct nfs4_client *clp = ls->ls_stid.sc_client;
580 	char addr_str[INET6_ADDRSTRLEN];
581 	static char *envp[] = {
582 		"HOME=/",
583 		"TERM=linux",
584 		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
585 		NULL
586 	};
587 	char *argv[8];
588 	int error;
589 
590 	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
591 
592 	trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
593 
594 	printk(KERN_WARNING
595 		"nfsd: client %s failed to respond to layout recall. "
596 		"  Fencing..\n", addr_str);
597 
598 	argv[0] = "/sbin/nfsd-recall-failed";
599 	argv[1] = addr_str;
600 	argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
601 	argv[3] = NULL;
602 
603 	error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
604 	if (error) {
605 		printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
606 			addr_str, error);
607 	}
608 }
609 
610 static int
611 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
612 {
613 	struct nfs4_layout_stateid *ls =
614 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
615 	LIST_HEAD(reaplist);
616 
617 	switch (task->tk_status) {
618 	case 0:
619 		return 1;
620 	case -NFS4ERR_NOMATCHING_LAYOUT:
621 		trace_layout_recall_done(&ls->ls_stid.sc_stateid);
622 		task->tk_status = 0;
623 		return 1;
624 	case -NFS4ERR_DELAY:
625 		/* Poll the client until it's done with the layout */
626 		/* FIXME: cap number of retries.
627 		 * The pnfs standard states that we need to only expire
628 		 * the client after at-least "lease time" .eg lease-time * 2
629 		 * when failing to communicate a recall
630 		 */
631 		rpc_delay(task, HZ/100); /* 10 mili-seconds */
632 		return 0;
633 	default:
634 		/*
635 		 * Unknown error or non-responding client, we'll need to fence.
636 		 */
637 		nfsd4_cb_layout_fail(ls);
638 		return -1;
639 	}
640 }
641 
642 static void
643 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
644 {
645 	struct nfs4_layout_stateid *ls =
646 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
647 	LIST_HEAD(reaplist);
648 
649 	trace_layout_recall_release(&ls->ls_stid.sc_stateid);
650 
651 	nfsd4_return_all_layouts(ls, &reaplist);
652 	nfsd4_free_layouts(&reaplist);
653 	nfs4_put_stid(&ls->ls_stid);
654 }
655 
656 static struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
657 	.done		= nfsd4_cb_layout_done,
658 	.release	= nfsd4_cb_layout_release,
659 };
660 
661 static bool
662 nfsd4_layout_lm_break(struct file_lock *fl)
663 {
664 	/*
665 	 * We don't want the locks code to timeout the lease for us;
666 	 * we'll remove it ourself if a layout isn't returned
667 	 * in time:
668 	 */
669 	fl->fl_break_time = 0;
670 	nfsd4_recall_file_layout(fl->fl_owner);
671 	return false;
672 }
673 
674 static int
675 nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
676 		struct list_head *dispose)
677 {
678 	BUG_ON(!(arg & F_UNLCK));
679 	return lease_modify(onlist, arg, dispose);
680 }
681 
682 static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
683 	.lm_break	= nfsd4_layout_lm_break,
684 	.lm_change	= nfsd4_layout_lm_change,
685 };
686 
687 int
688 nfsd4_init_pnfs(void)
689 {
690 	int i;
691 
692 	for (i = 0; i < DEVID_HASH_SIZE; i++)
693 		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
694 
695 	nfs4_layout_cache = kmem_cache_create("nfs4_layout",
696 			sizeof(struct nfs4_layout), 0, 0, NULL);
697 	if (!nfs4_layout_cache)
698 		return -ENOMEM;
699 
700 	nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
701 			sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
702 	if (!nfs4_layout_stateid_cache) {
703 		kmem_cache_destroy(nfs4_layout_cache);
704 		return -ENOMEM;
705 	}
706 	return 0;
707 }
708 
709 void
710 nfsd4_exit_pnfs(void)
711 {
712 	int i;
713 
714 	kmem_cache_destroy(nfs4_layout_cache);
715 	kmem_cache_destroy(nfs4_layout_stateid_cache);
716 
717 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
718 		struct nfsd4_deviceid_map *map, *n;
719 
720 		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
721 			kfree(map);
722 	}
723 }
724