1 /*
2  * Module for pnfs flexfile layout driver.
3  *
4  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5  *
6  * Tao Peng <bergwolf@primarydata.com>
7  */
8 
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 
13 #include <linux/sunrpc/metrics.h>
14 
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
24 
25 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
26 
27 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
28 #define FF_LAYOUTRETURN_MAXERR 20
29 
30 
31 static struct group_info	*ff_zero_group;
32 
33 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
34 		struct nfs_pgio_header *hdr);
35 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
36 			       struct nfs42_layoutstat_devinfo *devinfo,
37 			       int dev_limit);
38 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
39 			      const struct nfs42_layoutstat_devinfo *devinfo,
40 			      struct nfs4_ff_layout_mirror *mirror);
41 
42 static struct pnfs_layout_hdr *
43 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
44 {
45 	struct nfs4_flexfile_layout *ffl;
46 
47 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
48 	if (ffl) {
49 		INIT_LIST_HEAD(&ffl->error_list);
50 		INIT_LIST_HEAD(&ffl->mirrors);
51 		ffl->last_report_time = ktime_get();
52 		return &ffl->generic_hdr;
53 	} else
54 		return NULL;
55 }
56 
57 static void
58 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
59 {
60 	struct nfs4_ff_layout_ds_err *err, *n;
61 
62 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
63 				 list) {
64 		list_del(&err->list);
65 		kfree(err);
66 	}
67 	kfree(FF_LAYOUT_FROM_HDR(lo));
68 }
69 
70 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
71 {
72 	__be32 *p;
73 
74 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
75 	if (unlikely(p == NULL))
76 		return -ENOBUFS;
77 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
78 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
79 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
80 		p[0], p[1], p[2], p[3]);
81 	return 0;
82 }
83 
84 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
85 {
86 	__be32 *p;
87 
88 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
89 	if (unlikely(!p))
90 		return -ENOBUFS;
91 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
92 	nfs4_print_deviceid(devid);
93 	return 0;
94 }
95 
96 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, 4);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	fh->size = be32_to_cpup(p++);
104 	if (fh->size > sizeof(struct nfs_fh)) {
105 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
106 		       fh->size);
107 		return -EOVERFLOW;
108 	}
109 	/* fh.data */
110 	p = xdr_inline_decode(xdr, fh->size);
111 	if (unlikely(!p))
112 		return -ENOBUFS;
113 	memcpy(&fh->data, p, fh->size);
114 	dprintk("%s: fh len %d\n", __func__, fh->size);
115 
116 	return 0;
117 }
118 
119 /*
120  * Currently only stringified uids and gids are accepted.
121  * I.e., kerberos is not supported to the DSes, so no pricipals.
122  *
123  * That means that one common function will suffice, but when
124  * principals are added, this should be split to accomodate
125  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
126  */
127 static int
128 decode_name(struct xdr_stream *xdr, u32 *id)
129 {
130 	__be32 *p;
131 	int len;
132 
133 	/* opaque_length(4)*/
134 	p = xdr_inline_decode(xdr, 4);
135 	if (unlikely(!p))
136 		return -ENOBUFS;
137 	len = be32_to_cpup(p++);
138 	if (len < 0)
139 		return -EINVAL;
140 
141 	dprintk("%s: len %u\n", __func__, len);
142 
143 	/* opaque body */
144 	p = xdr_inline_decode(xdr, len);
145 	if (unlikely(!p))
146 		return -ENOBUFS;
147 
148 	if (!nfs_map_string_to_numeric((char *)p, len, id))
149 		return -EINVAL;
150 
151 	return 0;
152 }
153 
154 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
155 		const struct nfs4_ff_layout_mirror *m2)
156 {
157 	int i, j;
158 
159 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
160 		return false;
161 	for (i = 0; i < m1->fh_versions_cnt; i++) {
162 		bool found_fh = false;
163 		for (j = 0; j < m2->fh_versions_cnt; j++) {
164 			if (nfs_compare_fh(&m1->fh_versions[i],
165 					&m2->fh_versions[j]) == 0) {
166 				found_fh = true;
167 				break;
168 			}
169 		}
170 		if (!found_fh)
171 			return false;
172 	}
173 	return true;
174 }
175 
176 static struct nfs4_ff_layout_mirror *
177 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
178 		struct nfs4_ff_layout_mirror *mirror)
179 {
180 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
181 	struct nfs4_ff_layout_mirror *pos;
182 	struct inode *inode = lo->plh_inode;
183 
184 	spin_lock(&inode->i_lock);
185 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
186 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
187 			continue;
188 		if (!ff_mirror_match_fh(mirror, pos))
189 			continue;
190 		if (atomic_inc_not_zero(&pos->ref)) {
191 			spin_unlock(&inode->i_lock);
192 			return pos;
193 		}
194 	}
195 	list_add(&mirror->mirrors, &ff_layout->mirrors);
196 	mirror->layout = lo;
197 	spin_unlock(&inode->i_lock);
198 	return mirror;
199 }
200 
201 static void
202 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
203 {
204 	struct inode *inode;
205 	if (mirror->layout == NULL)
206 		return;
207 	inode = mirror->layout->plh_inode;
208 	spin_lock(&inode->i_lock);
209 	list_del(&mirror->mirrors);
210 	spin_unlock(&inode->i_lock);
211 	mirror->layout = NULL;
212 }
213 
214 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
215 {
216 	struct nfs4_ff_layout_mirror *mirror;
217 
218 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
219 	if (mirror != NULL) {
220 		spin_lock_init(&mirror->lock);
221 		atomic_set(&mirror->ref, 1);
222 		INIT_LIST_HEAD(&mirror->mirrors);
223 	}
224 	return mirror;
225 }
226 
227 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
228 {
229 	struct rpc_cred	*cred;
230 
231 	ff_layout_remove_mirror(mirror);
232 	kfree(mirror->fh_versions);
233 	cred = rcu_access_pointer(mirror->ro_cred);
234 	if (cred)
235 		put_rpccred(cred);
236 	cred = rcu_access_pointer(mirror->rw_cred);
237 	if (cred)
238 		put_rpccred(cred);
239 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
240 	kfree(mirror);
241 }
242 
243 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
244 {
245 	if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
246 		ff_layout_free_mirror(mirror);
247 }
248 
249 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
250 {
251 	int i;
252 
253 	if (fls->mirror_array) {
254 		for (i = 0; i < fls->mirror_array_cnt; i++) {
255 			/* normally mirror_ds is freed in
256 			 * .free_deviceid_node but we still do it here
257 			 * for .alloc_lseg error path */
258 			ff_layout_put_mirror(fls->mirror_array[i]);
259 		}
260 		kfree(fls->mirror_array);
261 		fls->mirror_array = NULL;
262 	}
263 }
264 
265 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
266 {
267 	int ret = 0;
268 
269 	dprintk("--> %s\n", __func__);
270 
271 	/* FIXME: remove this check when layout segment support is added */
272 	if (lgr->range.offset != 0 ||
273 	    lgr->range.length != NFS4_MAX_UINT64) {
274 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
275 			__func__);
276 		ret = -EINVAL;
277 	}
278 
279 	dprintk("--> %s returns %d\n", __func__, ret);
280 	return ret;
281 }
282 
283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284 {
285 	if (fls) {
286 		ff_layout_free_mirror_array(fls);
287 		kfree(fls);
288 	}
289 }
290 
291 static bool
292 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 		const struct pnfs_layout_range *l2)
294 {
295 	u64 end1, end2;
296 
297 	if (l1->iomode != l2->iomode)
298 		return l1->iomode != IOMODE_READ;
299 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 	if (end1 < l2->offset)
302 		return false;
303 	if (end2 < l1->offset)
304 		return true;
305 	return l2->offset <= l1->offset;
306 }
307 
308 static bool
309 ff_lseg_merge(struct pnfs_layout_segment *new,
310 		struct pnfs_layout_segment *old)
311 {
312 	u64 new_end, old_end;
313 
314 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 		return false;
316 	if (new->pls_range.iomode != old->pls_range.iomode)
317 		return false;
318 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 			old->pls_range.length);
320 	if (old_end < new->pls_range.offset)
321 		return false;
322 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 			new->pls_range.length);
324 	if (new_end < old->pls_range.offset)
325 		return false;
326 
327 	/* Mergeable: copy info from 'old' to 'new' */
328 	if (new_end < old_end)
329 		new_end = old_end;
330 	if (new->pls_range.offset < old->pls_range.offset)
331 		new->pls_range.offset = old->pls_range.offset;
332 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
333 			new_end);
334 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
335 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
336 	return true;
337 }
338 
339 static void
340 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
341 		struct pnfs_layout_segment *lseg,
342 		struct list_head *free_me)
343 {
344 	pnfs_generic_layout_insert_lseg(lo, lseg,
345 			ff_lseg_range_is_after,
346 			ff_lseg_merge,
347 			free_me);
348 }
349 
350 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
351 {
352 	int i, j;
353 
354 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
355 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
356 			if (fls->mirror_array[i]->efficiency <
357 			    fls->mirror_array[j]->efficiency)
358 				swap(fls->mirror_array[i],
359 				     fls->mirror_array[j]);
360 	}
361 }
362 
363 static struct pnfs_layout_segment *
364 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
365 		     struct nfs4_layoutget_res *lgr,
366 		     gfp_t gfp_flags)
367 {
368 	struct pnfs_layout_segment *ret;
369 	struct nfs4_ff_layout_segment *fls = NULL;
370 	struct xdr_stream stream;
371 	struct xdr_buf buf;
372 	struct page *scratch;
373 	u64 stripe_unit;
374 	u32 mirror_array_cnt;
375 	__be32 *p;
376 	int i, rc;
377 
378 	dprintk("--> %s\n", __func__);
379 	scratch = alloc_page(gfp_flags);
380 	if (!scratch)
381 		return ERR_PTR(-ENOMEM);
382 
383 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
384 			      lgr->layoutp->len);
385 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
386 
387 	/* stripe unit and mirror_array_cnt */
388 	rc = -EIO;
389 	p = xdr_inline_decode(&stream, 8 + 4);
390 	if (!p)
391 		goto out_err_free;
392 
393 	p = xdr_decode_hyper(p, &stripe_unit);
394 	mirror_array_cnt = be32_to_cpup(p++);
395 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
396 		stripe_unit, mirror_array_cnt);
397 
398 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
399 	    mirror_array_cnt == 0)
400 		goto out_err_free;
401 
402 	rc = -ENOMEM;
403 	fls = kzalloc(sizeof(*fls), gfp_flags);
404 	if (!fls)
405 		goto out_err_free;
406 
407 	fls->mirror_array_cnt = mirror_array_cnt;
408 	fls->stripe_unit = stripe_unit;
409 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
410 				    sizeof(fls->mirror_array[0]), gfp_flags);
411 	if (fls->mirror_array == NULL)
412 		goto out_err_free;
413 
414 	for (i = 0; i < fls->mirror_array_cnt; i++) {
415 		struct nfs4_ff_layout_mirror *mirror;
416 		struct auth_cred acred = { .group_info = ff_zero_group };
417 		struct rpc_cred	__rcu *cred;
418 		u32 ds_count, fh_count, id;
419 		int j;
420 
421 		rc = -EIO;
422 		p = xdr_inline_decode(&stream, 4);
423 		if (!p)
424 			goto out_err_free;
425 		ds_count = be32_to_cpup(p);
426 
427 		/* FIXME: allow for striping? */
428 		if (ds_count != 1)
429 			goto out_err_free;
430 
431 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
432 		if (fls->mirror_array[i] == NULL) {
433 			rc = -ENOMEM;
434 			goto out_err_free;
435 		}
436 
437 		fls->mirror_array[i]->ds_count = ds_count;
438 
439 		/* deviceid */
440 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
441 		if (rc)
442 			goto out_err_free;
443 
444 		/* efficiency */
445 		rc = -EIO;
446 		p = xdr_inline_decode(&stream, 4);
447 		if (!p)
448 			goto out_err_free;
449 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
450 
451 		/* stateid */
452 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
453 		if (rc)
454 			goto out_err_free;
455 
456 		/* fh */
457 		rc = -EIO;
458 		p = xdr_inline_decode(&stream, 4);
459 		if (!p)
460 			goto out_err_free;
461 		fh_count = be32_to_cpup(p);
462 
463 		fls->mirror_array[i]->fh_versions =
464 			kzalloc(fh_count * sizeof(struct nfs_fh),
465 				gfp_flags);
466 		if (fls->mirror_array[i]->fh_versions == NULL) {
467 			rc = -ENOMEM;
468 			goto out_err_free;
469 		}
470 
471 		for (j = 0; j < fh_count; j++) {
472 			rc = decode_nfs_fh(&stream,
473 					   &fls->mirror_array[i]->fh_versions[j]);
474 			if (rc)
475 				goto out_err_free;
476 		}
477 
478 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
479 
480 		/* user */
481 		rc = decode_name(&stream, &id);
482 		if (rc)
483 			goto out_err_free;
484 
485 		acred.uid = make_kuid(&init_user_ns, id);
486 
487 		/* group */
488 		rc = decode_name(&stream, &id);
489 		if (rc)
490 			goto out_err_free;
491 
492 		acred.gid = make_kgid(&init_user_ns, id);
493 
494 		/* find the cred for it */
495 		rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
496 		if (IS_ERR(cred)) {
497 			rc = PTR_ERR(cred);
498 			goto out_err_free;
499 		}
500 
501 		if (lgr->range.iomode == IOMODE_READ)
502 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
503 		else
504 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
505 
506 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
507 		if (mirror != fls->mirror_array[i]) {
508 			/* swap cred ptrs so free_mirror will clean up old */
509 			if (lgr->range.iomode == IOMODE_READ) {
510 				cred = xchg(&mirror->ro_cred, cred);
511 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
512 			} else {
513 				cred = xchg(&mirror->rw_cred, cred);
514 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
515 			}
516 			ff_layout_free_mirror(fls->mirror_array[i]);
517 			fls->mirror_array[i] = mirror;
518 		}
519 
520 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
521 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
522 			from_kuid(&init_user_ns, acred.uid),
523 			from_kgid(&init_user_ns, acred.gid));
524 	}
525 
526 	p = xdr_inline_decode(&stream, 4);
527 	if (!p)
528 		goto out_sort_mirrors;
529 	fls->flags = be32_to_cpup(p);
530 
531 	p = xdr_inline_decode(&stream, 4);
532 	if (!p)
533 		goto out_sort_mirrors;
534 	for (i=0; i < fls->mirror_array_cnt; i++)
535 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
536 
537 out_sort_mirrors:
538 	ff_layout_sort_mirrors(fls);
539 	rc = ff_layout_check_layout(lgr);
540 	if (rc)
541 		goto out_err_free;
542 	ret = &fls->generic_hdr;
543 	dprintk("<-- %s (success)\n", __func__);
544 out_free_page:
545 	__free_page(scratch);
546 	return ret;
547 out_err_free:
548 	_ff_layout_free_lseg(fls);
549 	ret = ERR_PTR(rc);
550 	dprintk("<-- %s (%d)\n", __func__, rc);
551 	goto out_free_page;
552 }
553 
554 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
555 {
556 	struct pnfs_layout_segment *lseg;
557 
558 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
559 		if (lseg->pls_range.iomode == IOMODE_RW)
560 			return true;
561 
562 	return false;
563 }
564 
565 static void
566 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
567 {
568 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
569 
570 	dprintk("--> %s\n", __func__);
571 
572 	if (lseg->pls_range.iomode == IOMODE_RW) {
573 		struct nfs4_flexfile_layout *ffl;
574 		struct inode *inode;
575 
576 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
577 		inode = ffl->generic_hdr.plh_inode;
578 		spin_lock(&inode->i_lock);
579 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
580 			ffl->commit_info.nbuckets = 0;
581 			kfree(ffl->commit_info.buckets);
582 			ffl->commit_info.buckets = NULL;
583 		}
584 		spin_unlock(&inode->i_lock);
585 	}
586 	_ff_layout_free_lseg(fls);
587 }
588 
589 /* Return 1 until we have multiple lsegs support */
590 static int
591 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
592 {
593 	return 1;
594 }
595 
596 static void
597 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
598 {
599 	/* first IO request? */
600 	if (atomic_inc_return(&timer->n_ops) == 1) {
601 		timer->start_time = now;
602 	}
603 }
604 
605 static ktime_t
606 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607 {
608 	ktime_t start;
609 
610 	if (atomic_dec_return(&timer->n_ops) < 0)
611 		WARN_ON_ONCE(1);
612 
613 	start = timer->start_time;
614 	timer->start_time = now;
615 	return ktime_sub(now, start);
616 }
617 
618 static bool
619 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 			    struct nfs4_ff_layoutstat *layoutstat,
621 			    ktime_t now)
622 {
623 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625 
626 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 	if (!mirror->start_time)
628 		mirror->start_time = now;
629 	if (mirror->report_interval != 0)
630 		report_interval = (s64)mirror->report_interval * 1000LL;
631 	else if (layoutstats_timer != 0)
632 		report_interval = (s64)layoutstats_timer * 1000LL;
633 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
634 			report_interval) {
635 		ffl->last_report_time = now;
636 		return true;
637 	}
638 
639 	return false;
640 }
641 
642 static void
643 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
644 		__u64 requested)
645 {
646 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
647 
648 	iostat->ops_requested++;
649 	iostat->bytes_requested += requested;
650 }
651 
652 static void
653 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
654 		__u64 requested,
655 		__u64 completed,
656 		ktime_t time_completed,
657 		ktime_t time_started)
658 {
659 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 	ktime_t completion_time = ktime_sub(time_completed, time_started);
661 	ktime_t timer;
662 
663 	iostat->ops_completed++;
664 	iostat->bytes_completed += completed;
665 	iostat->bytes_not_delivered += requested - completed;
666 
667 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 	iostat->total_busy_time =
669 			ktime_add(iostat->total_busy_time, timer);
670 	iostat->aggregate_completion_time =
671 			ktime_add(iostat->aggregate_completion_time,
672 					completion_time);
673 }
674 
675 static void
676 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 		struct nfs4_ff_layout_mirror *mirror,
678 		__u64 requested, ktime_t now)
679 {
680 	bool report;
681 
682 	spin_lock(&mirror->lock);
683 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 	spin_unlock(&mirror->lock);
687 
688 	if (report)
689 		pnfs_report_layoutstat(inode, GFP_KERNEL);
690 }
691 
692 static void
693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 		struct nfs4_ff_layout_mirror *mirror,
695 		__u64 requested,
696 		__u64 completed)
697 {
698 	spin_lock(&mirror->lock);
699 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 			requested, completed,
701 			ktime_get(), task->tk_start);
702 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 	spin_unlock(&mirror->lock);
704 }
705 
706 static void
707 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 		struct nfs4_ff_layout_mirror *mirror,
709 		__u64 requested, ktime_t now)
710 {
711 	bool report;
712 
713 	spin_lock(&mirror->lock);
714 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 	spin_unlock(&mirror->lock);
718 
719 	if (report)
720 		pnfs_report_layoutstat(inode, GFP_NOIO);
721 }
722 
723 static void
724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 		struct nfs4_ff_layout_mirror *mirror,
726 		__u64 requested,
727 		__u64 completed,
728 		enum nfs3_stable_how committed)
729 {
730 	if (committed == NFS_UNSTABLE)
731 		requested = completed = 0;
732 
733 	spin_lock(&mirror->lock);
734 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 			requested, completed, ktime_get(), task->tk_start);
736 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 	spin_unlock(&mirror->lock);
738 }
739 
740 static int
741 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
742 			    struct nfs_commit_info *cinfo,
743 			    gfp_t gfp_flags)
744 {
745 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
746 	struct pnfs_commit_bucket *buckets;
747 	int size;
748 
749 	if (cinfo->ds->nbuckets != 0) {
750 		/* This assumes there is only one RW lseg per file.
751 		 * To support multiple lseg per file, we need to
752 		 * change struct pnfs_commit_bucket to allow dynamic
753 		 * increasing nbuckets.
754 		 */
755 		return 0;
756 	}
757 
758 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
759 
760 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
761 			  gfp_flags);
762 	if (!buckets)
763 		return -ENOMEM;
764 	else {
765 		int i;
766 
767 		spin_lock(&cinfo->inode->i_lock);
768 		if (cinfo->ds->nbuckets != 0)
769 			kfree(buckets);
770 		else {
771 			cinfo->ds->buckets = buckets;
772 			cinfo->ds->nbuckets = size;
773 			for (i = 0; i < size; i++) {
774 				INIT_LIST_HEAD(&buckets[i].written);
775 				INIT_LIST_HEAD(&buckets[i].committing);
776 				/* mark direct verifier as unset */
777 				buckets[i].direct_verf.committed =
778 					NFS_INVALID_STABLE_HOW;
779 			}
780 		}
781 		spin_unlock(&cinfo->inode->i_lock);
782 		return 0;
783 	}
784 }
785 
786 static struct nfs4_pnfs_ds *
787 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
788 				  int start_idx,
789 				  int *best_idx)
790 {
791 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
792 	struct nfs4_pnfs_ds *ds;
793 	bool fail_return = false;
794 	int idx;
795 
796 	/* mirrors are sorted by efficiency */
797 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
798 		if (idx+1 == fls->mirror_array_cnt)
799 			fail_return = true;
800 		ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
801 		if (ds) {
802 			*best_idx = idx;
803 			return ds;
804 		}
805 	}
806 
807 	return NULL;
808 }
809 
810 static void
811 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
812 		      struct nfs_page *req,
813 		      bool strict_iomode)
814 {
815 retry_strict:
816 	pnfs_put_lseg(pgio->pg_lseg);
817 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
818 					   req->wb_context,
819 					   0,
820 					   NFS4_MAX_UINT64,
821 					   IOMODE_READ,
822 					   strict_iomode,
823 					   GFP_KERNEL);
824 	if (IS_ERR(pgio->pg_lseg)) {
825 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
826 		pgio->pg_lseg = NULL;
827 	}
828 
829 	/* If we don't have checking, do get a IOMODE_RW
830 	 * segment, and the server wants to avoid READs
831 	 * there, then retry!
832 	 */
833 	if (pgio->pg_lseg && !strict_iomode &&
834 	    ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
835 		strict_iomode = true;
836 		goto retry_strict;
837 	}
838 }
839 
840 static void
841 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
842 			struct nfs_page *req)
843 {
844 	struct nfs_pgio_mirror *pgm;
845 	struct nfs4_ff_layout_mirror *mirror;
846 	struct nfs4_pnfs_ds *ds;
847 	int ds_idx;
848 
849 retry:
850 	pnfs_generic_pg_check_layout(pgio);
851 	/* Use full layout for now */
852 	if (!pgio->pg_lseg)
853 		ff_layout_pg_get_read(pgio, req, false);
854 	else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
855 		ff_layout_pg_get_read(pgio, req, true);
856 
857 	/* If no lseg, fall back to read through mds */
858 	if (pgio->pg_lseg == NULL)
859 		goto out_mds;
860 
861 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
862 	if (!ds) {
863 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
864 			goto out_mds;
865 		pnfs_put_lseg(pgio->pg_lseg);
866 		pgio->pg_lseg = NULL;
867 		/* Sleep for 1 second before retrying */
868 		ssleep(1);
869 		goto retry;
870 	}
871 
872 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
873 
874 	pgio->pg_mirror_idx = ds_idx;
875 
876 	/* read always uses only one mirror - idx 0 for pgio layer */
877 	pgm = &pgio->pg_mirrors[0];
878 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
879 
880 	return;
881 out_mds:
882 	pnfs_put_lseg(pgio->pg_lseg);
883 	pgio->pg_lseg = NULL;
884 	nfs_pageio_reset_read_mds(pgio);
885 }
886 
887 static void
888 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
889 			struct nfs_page *req)
890 {
891 	struct nfs4_ff_layout_mirror *mirror;
892 	struct nfs_pgio_mirror *pgm;
893 	struct nfs_commit_info cinfo;
894 	struct nfs4_pnfs_ds *ds;
895 	int i;
896 	int status;
897 
898 retry:
899 	pnfs_generic_pg_check_layout(pgio);
900 	if (!pgio->pg_lseg) {
901 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
902 						   req->wb_context,
903 						   0,
904 						   NFS4_MAX_UINT64,
905 						   IOMODE_RW,
906 						   false,
907 						   GFP_NOFS);
908 		if (IS_ERR(pgio->pg_lseg)) {
909 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
910 			pgio->pg_lseg = NULL;
911 			return;
912 		}
913 	}
914 	/* If no lseg, fall back to write through mds */
915 	if (pgio->pg_lseg == NULL)
916 		goto out_mds;
917 
918 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
919 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
920 	if (status < 0)
921 		goto out_mds;
922 
923 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
924 	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
925 	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
926 		goto out_mds;
927 
928 	for (i = 0; i < pgio->pg_mirror_count; i++) {
929 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
930 		if (!ds) {
931 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
932 				goto out_mds;
933 			pnfs_put_lseg(pgio->pg_lseg);
934 			pgio->pg_lseg = NULL;
935 			/* Sleep for 1 second before retrying */
936 			ssleep(1);
937 			goto retry;
938 		}
939 		pgm = &pgio->pg_mirrors[i];
940 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
941 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
942 	}
943 
944 	return;
945 
946 out_mds:
947 	pnfs_put_lseg(pgio->pg_lseg);
948 	pgio->pg_lseg = NULL;
949 	nfs_pageio_reset_write_mds(pgio);
950 }
951 
952 static unsigned int
953 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
954 				    struct nfs_page *req)
955 {
956 	if (!pgio->pg_lseg) {
957 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
958 						   req->wb_context,
959 						   0,
960 						   NFS4_MAX_UINT64,
961 						   IOMODE_RW,
962 						   false,
963 						   GFP_NOFS);
964 		if (IS_ERR(pgio->pg_lseg)) {
965 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
966 			pgio->pg_lseg = NULL;
967 			goto out;
968 		}
969 	}
970 	if (pgio->pg_lseg)
971 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
972 
973 	/* no lseg means that pnfs is not in use, so no mirroring here */
974 	nfs_pageio_reset_write_mds(pgio);
975 out:
976 	return 1;
977 }
978 
979 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
980 	.pg_init = ff_layout_pg_init_read,
981 	.pg_test = pnfs_generic_pg_test,
982 	.pg_doio = pnfs_generic_pg_readpages,
983 	.pg_cleanup = pnfs_generic_pg_cleanup,
984 };
985 
986 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
987 	.pg_init = ff_layout_pg_init_write,
988 	.pg_test = pnfs_generic_pg_test,
989 	.pg_doio = pnfs_generic_pg_writepages,
990 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
991 	.pg_cleanup = pnfs_generic_pg_cleanup,
992 };
993 
994 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
995 {
996 	struct rpc_task *task = &hdr->task;
997 
998 	pnfs_layoutcommit_inode(hdr->inode, false);
999 
1000 	if (retry_pnfs) {
1001 		dprintk("%s Reset task %5u for i/o through pNFS "
1002 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1003 			hdr->task.tk_pid,
1004 			hdr->inode->i_sb->s_id,
1005 			(unsigned long long)NFS_FILEID(hdr->inode),
1006 			hdr->args.count,
1007 			(unsigned long long)hdr->args.offset);
1008 
1009 		hdr->completion_ops->reschedule_io(hdr);
1010 		return;
1011 	}
1012 
1013 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1014 		dprintk("%s Reset task %5u for i/o through MDS "
1015 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1016 			hdr->task.tk_pid,
1017 			hdr->inode->i_sb->s_id,
1018 			(unsigned long long)NFS_FILEID(hdr->inode),
1019 			hdr->args.count,
1020 			(unsigned long long)hdr->args.offset);
1021 
1022 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1023 	}
1024 }
1025 
1026 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1027 {
1028 	struct rpc_task *task = &hdr->task;
1029 
1030 	pnfs_layoutcommit_inode(hdr->inode, false);
1031 
1032 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1033 		dprintk("%s Reset task %5u for i/o through MDS "
1034 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1035 			hdr->task.tk_pid,
1036 			hdr->inode->i_sb->s_id,
1037 			(unsigned long long)NFS_FILEID(hdr->inode),
1038 			hdr->args.count,
1039 			(unsigned long long)hdr->args.offset);
1040 
1041 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1042 	}
1043 }
1044 
1045 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1046 					   struct nfs4_state *state,
1047 					   struct nfs_client *clp,
1048 					   struct pnfs_layout_segment *lseg,
1049 					   int idx)
1050 {
1051 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1052 	struct inode *inode = lo->plh_inode;
1053 	struct nfs_server *mds_server = NFS_SERVER(inode);
1054 
1055 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1056 	struct nfs_client *mds_client = mds_server->nfs_client;
1057 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1058 
1059 	switch (task->tk_status) {
1060 	/* MDS state errors */
1061 	case -NFS4ERR_DELEG_REVOKED:
1062 	case -NFS4ERR_ADMIN_REVOKED:
1063 	case -NFS4ERR_BAD_STATEID:
1064 		if (state == NULL)
1065 			break;
1066 		nfs_remove_bad_delegation(state->inode, NULL);
1067 	case -NFS4ERR_OPENMODE:
1068 		if (state == NULL)
1069 			break;
1070 		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1071 			goto out_bad_stateid;
1072 		goto wait_on_recovery;
1073 	case -NFS4ERR_EXPIRED:
1074 		if (state != NULL) {
1075 			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1076 				goto out_bad_stateid;
1077 		}
1078 		nfs4_schedule_lease_recovery(mds_client);
1079 		goto wait_on_recovery;
1080 	/* DS session errors */
1081 	case -NFS4ERR_BADSESSION:
1082 	case -NFS4ERR_BADSLOT:
1083 	case -NFS4ERR_BAD_HIGH_SLOT:
1084 	case -NFS4ERR_DEADSESSION:
1085 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1086 	case -NFS4ERR_SEQ_FALSE_RETRY:
1087 	case -NFS4ERR_SEQ_MISORDERED:
1088 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1089 			"flags 0x%x\n", __func__, task->tk_status,
1090 			clp->cl_exchange_flags);
1091 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1092 		break;
1093 	case -NFS4ERR_DELAY:
1094 	case -NFS4ERR_GRACE:
1095 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1096 		break;
1097 	case -NFS4ERR_RETRY_UNCACHED_REP:
1098 		break;
1099 	/* Invalidate Layout errors */
1100 	case -NFS4ERR_PNFS_NO_LAYOUT:
1101 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1102 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1103 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1104 	case -NFS4ERR_FHEXPIRED:
1105 	case -NFS4ERR_WRONG_TYPE:
1106 		dprintk("%s Invalid layout error %d\n", __func__,
1107 			task->tk_status);
1108 		/*
1109 		 * Destroy layout so new i/o will get a new layout.
1110 		 * Layout will not be destroyed until all current lseg
1111 		 * references are put. Mark layout as invalid to resend failed
1112 		 * i/o and all i/o waiting on the slot table to the MDS until
1113 		 * layout is destroyed and a new valid layout is obtained.
1114 		 */
1115 		pnfs_destroy_layout(NFS_I(inode));
1116 		rpc_wake_up(&tbl->slot_tbl_waitq);
1117 		goto reset;
1118 	/* RPC connection errors */
1119 	case -ECONNREFUSED:
1120 	case -EHOSTDOWN:
1121 	case -EHOSTUNREACH:
1122 	case -ENETUNREACH:
1123 	case -EIO:
1124 	case -ETIMEDOUT:
1125 	case -EPIPE:
1126 		dprintk("%s DS connection error %d\n", __func__,
1127 			task->tk_status);
1128 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1129 				&devid->deviceid);
1130 		rpc_wake_up(&tbl->slot_tbl_waitq);
1131 		/* fall through */
1132 	default:
1133 		if (ff_layout_avoid_mds_available_ds(lseg))
1134 			return -NFS4ERR_RESET_TO_PNFS;
1135 reset:
1136 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1137 			task->tk_status);
1138 		return -NFS4ERR_RESET_TO_MDS;
1139 	}
1140 out:
1141 	task->tk_status = 0;
1142 	return -EAGAIN;
1143 out_bad_stateid:
1144 	task->tk_status = -EIO;
1145 	return 0;
1146 wait_on_recovery:
1147 	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1148 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1149 		rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1150 	goto out;
1151 }
1152 
1153 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1154 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1155 					   struct pnfs_layout_segment *lseg,
1156 					   int idx)
1157 {
1158 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1159 
1160 	switch (task->tk_status) {
1161 	/* File access problems. Don't mark the device as unavailable */
1162 	case -EACCES:
1163 	case -ESTALE:
1164 	case -EISDIR:
1165 	case -EBADHANDLE:
1166 	case -ELOOP:
1167 	case -ENOSPC:
1168 		break;
1169 	case -EJUKEBOX:
1170 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1171 		goto out_retry;
1172 	default:
1173 		dprintk("%s DS connection error %d\n", __func__,
1174 			task->tk_status);
1175 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1176 				&devid->deviceid);
1177 	}
1178 	/* FIXME: Need to prevent infinite looping here. */
1179 	return -NFS4ERR_RESET_TO_PNFS;
1180 out_retry:
1181 	task->tk_status = 0;
1182 	rpc_restart_call_prepare(task);
1183 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1184 	return -EAGAIN;
1185 }
1186 
1187 static int ff_layout_async_handle_error(struct rpc_task *task,
1188 					struct nfs4_state *state,
1189 					struct nfs_client *clp,
1190 					struct pnfs_layout_segment *lseg,
1191 					int idx)
1192 {
1193 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1194 
1195 	if (task->tk_status >= 0)
1196 		return 0;
1197 
1198 	/* Handle the case of an invalid layout segment */
1199 	if (!pnfs_is_valid_lseg(lseg))
1200 		return -NFS4ERR_RESET_TO_PNFS;
1201 
1202 	switch (vers) {
1203 	case 3:
1204 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1205 	case 4:
1206 		return ff_layout_async_handle_error_v4(task, state, clp,
1207 						       lseg, idx);
1208 	default:
1209 		/* should never happen */
1210 		WARN_ON_ONCE(1);
1211 		return 0;
1212 	}
1213 }
1214 
1215 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1216 					int idx, u64 offset, u64 length,
1217 					u32 status, int opnum, int error)
1218 {
1219 	struct nfs4_ff_layout_mirror *mirror;
1220 	int err;
1221 
1222 	if (status == 0) {
1223 		switch (error) {
1224 		case -ETIMEDOUT:
1225 		case -EPFNOSUPPORT:
1226 		case -EPROTONOSUPPORT:
1227 		case -EOPNOTSUPP:
1228 		case -ECONNREFUSED:
1229 		case -ECONNRESET:
1230 		case -EHOSTDOWN:
1231 		case -EHOSTUNREACH:
1232 		case -ENETUNREACH:
1233 		case -EADDRINUSE:
1234 		case -ENOBUFS:
1235 		case -EPIPE:
1236 		case -EPERM:
1237 			status = NFS4ERR_NXIO;
1238 			break;
1239 		case -EACCES:
1240 			status = NFS4ERR_ACCESS;
1241 			break;
1242 		default:
1243 			return;
1244 		}
1245 	}
1246 
1247 	switch (status) {
1248 	case NFS4ERR_DELAY:
1249 	case NFS4ERR_GRACE:
1250 		return;
1251 	default:
1252 		break;
1253 	}
1254 
1255 	mirror = FF_LAYOUT_COMP(lseg, idx);
1256 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1257 				       mirror, offset, length, status, opnum,
1258 				       GFP_NOIO);
1259 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1260 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1261 }
1262 
1263 /* NFS_PROTO call done callback routines */
1264 static int ff_layout_read_done_cb(struct rpc_task *task,
1265 				struct nfs_pgio_header *hdr)
1266 {
1267 	int err;
1268 
1269 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1270 	if (task->tk_status < 0)
1271 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1272 					    hdr->args.offset, hdr->args.count,
1273 					    hdr->res.op_status, OP_READ,
1274 					    task->tk_status);
1275 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1276 					   hdr->ds_clp, hdr->lseg,
1277 					   hdr->pgio_mirror_idx);
1278 
1279 	switch (err) {
1280 	case -NFS4ERR_RESET_TO_PNFS:
1281 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1282 					hdr->pgio_mirror_idx + 1,
1283 					&hdr->pgio_mirror_idx))
1284 			goto out_eagain;
1285 		ff_layout_read_record_layoutstats_done(task, hdr);
1286 		pnfs_read_resend_pnfs(hdr);
1287 		return task->tk_status;
1288 	case -NFS4ERR_RESET_TO_MDS:
1289 		ff_layout_reset_read(hdr);
1290 		return task->tk_status;
1291 	case -EAGAIN:
1292 		goto out_eagain;
1293 	}
1294 
1295 	return 0;
1296 out_eagain:
1297 	rpc_restart_call_prepare(task);
1298 	return -EAGAIN;
1299 }
1300 
1301 static bool
1302 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1303 {
1304 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1305 }
1306 
1307 /*
1308  * We reference the rpc_cred of the first WRITE that triggers the need for
1309  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1310  * rfc5661 is not clear about which credential should be used.
1311  *
1312  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1313  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1314  * we always send layoutcommit after DS writes.
1315  */
1316 static void
1317 ff_layout_set_layoutcommit(struct inode *inode,
1318 		struct pnfs_layout_segment *lseg,
1319 		loff_t end_offset)
1320 {
1321 	if (!ff_layout_need_layoutcommit(lseg))
1322 		return;
1323 
1324 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1325 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1326 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1327 }
1328 
1329 static bool
1330 ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
1331 {
1332 	/* No mirroring for now */
1333 	struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1334 
1335 	return ff_layout_test_devid_unavailable(node);
1336 }
1337 
1338 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1339 		struct nfs_pgio_header *hdr)
1340 {
1341 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1342 		return;
1343 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1344 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1345 			hdr->args.count,
1346 			task->tk_start);
1347 }
1348 
1349 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1350 		struct nfs_pgio_header *hdr)
1351 {
1352 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1353 		return;
1354 	nfs4_ff_layout_stat_io_end_read(task,
1355 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1356 			hdr->args.count,
1357 			hdr->res.count);
1358 }
1359 
1360 static int ff_layout_read_prepare_common(struct rpc_task *task,
1361 					 struct nfs_pgio_header *hdr)
1362 {
1363 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1364 		rpc_exit(task, -EIO);
1365 		return -EIO;
1366 	}
1367 	if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1368 		rpc_exit(task, -EHOSTDOWN);
1369 		return -EAGAIN;
1370 	}
1371 
1372 	ff_layout_read_record_layoutstats_start(task, hdr);
1373 	return 0;
1374 }
1375 
1376 /*
1377  * Call ops for the async read/write cases
1378  * In the case of dense layouts, the offset needs to be reset to its
1379  * original value.
1380  */
1381 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1382 {
1383 	struct nfs_pgio_header *hdr = data;
1384 
1385 	if (ff_layout_read_prepare_common(task, hdr))
1386 		return;
1387 
1388 	rpc_call_start(task);
1389 }
1390 
1391 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1392 {
1393 	struct nfs_pgio_header *hdr = data;
1394 
1395 	if (nfs4_setup_sequence(hdr->ds_clp,
1396 				&hdr->args.seq_args,
1397 				&hdr->res.seq_res,
1398 				task))
1399 		return;
1400 
1401 	if (ff_layout_read_prepare_common(task, hdr))
1402 		return;
1403 
1404 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1405 			hdr->args.lock_context, FMODE_READ) == -EIO)
1406 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1407 }
1408 
1409 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1410 {
1411 	struct nfs_pgio_header *hdr = data;
1412 
1413 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1414 
1415 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1416 	    task->tk_status == 0) {
1417 		nfs4_sequence_done(task, &hdr->res.seq_res);
1418 		return;
1419 	}
1420 
1421 	/* Note this may cause RPC to be resent */
1422 	hdr->mds_ops->rpc_call_done(task, hdr);
1423 }
1424 
1425 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1426 {
1427 	struct nfs_pgio_header *hdr = data;
1428 
1429 	ff_layout_read_record_layoutstats_done(task, hdr);
1430 	rpc_count_iostats_metrics(task,
1431 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1432 }
1433 
1434 static void ff_layout_read_release(void *data)
1435 {
1436 	struct nfs_pgio_header *hdr = data;
1437 
1438 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1439 	pnfs_generic_rw_release(data);
1440 }
1441 
1442 
1443 static int ff_layout_write_done_cb(struct rpc_task *task,
1444 				struct nfs_pgio_header *hdr)
1445 {
1446 	loff_t end_offs = 0;
1447 	int err;
1448 
1449 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1450 	if (task->tk_status < 0)
1451 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1452 					    hdr->args.offset, hdr->args.count,
1453 					    hdr->res.op_status, OP_WRITE,
1454 					    task->tk_status);
1455 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1456 					   hdr->ds_clp, hdr->lseg,
1457 					   hdr->pgio_mirror_idx);
1458 
1459 	switch (err) {
1460 	case -NFS4ERR_RESET_TO_PNFS:
1461 		ff_layout_reset_write(hdr, true);
1462 		return task->tk_status;
1463 	case -NFS4ERR_RESET_TO_MDS:
1464 		ff_layout_reset_write(hdr, false);
1465 		return task->tk_status;
1466 	case -EAGAIN:
1467 		return -EAGAIN;
1468 	}
1469 
1470 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1471 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1472 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1473 
1474 	/* Note: if the write is unstable, don't set end_offs until commit */
1475 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1476 
1477 	/* zero out fattr since we don't care DS attr at all */
1478 	hdr->fattr.valid = 0;
1479 	if (task->tk_status >= 0)
1480 		nfs_writeback_update_inode(hdr);
1481 
1482 	return 0;
1483 }
1484 
1485 static int ff_layout_commit_done_cb(struct rpc_task *task,
1486 				     struct nfs_commit_data *data)
1487 {
1488 	int err;
1489 
1490 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1491 	if (task->tk_status < 0)
1492 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1493 					    data->args.offset, data->args.count,
1494 					    data->res.op_status, OP_COMMIT,
1495 					    task->tk_status);
1496 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1497 					   data->lseg, data->ds_commit_index);
1498 
1499 	switch (err) {
1500 	case -NFS4ERR_RESET_TO_PNFS:
1501 		pnfs_generic_prepare_to_resend_writes(data);
1502 		return -EAGAIN;
1503 	case -NFS4ERR_RESET_TO_MDS:
1504 		pnfs_generic_prepare_to_resend_writes(data);
1505 		return -EAGAIN;
1506 	case -EAGAIN:
1507 		rpc_restart_call_prepare(task);
1508 		return -EAGAIN;
1509 	}
1510 
1511 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1512 
1513 	return 0;
1514 }
1515 
1516 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1517 		struct nfs_pgio_header *hdr)
1518 {
1519 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1520 		return;
1521 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1522 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1523 			hdr->args.count,
1524 			task->tk_start);
1525 }
1526 
1527 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1528 		struct nfs_pgio_header *hdr)
1529 {
1530 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1531 		return;
1532 	nfs4_ff_layout_stat_io_end_write(task,
1533 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1534 			hdr->args.count, hdr->res.count,
1535 			hdr->res.verf->committed);
1536 }
1537 
1538 static int ff_layout_write_prepare_common(struct rpc_task *task,
1539 					  struct nfs_pgio_header *hdr)
1540 {
1541 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1542 		rpc_exit(task, -EIO);
1543 		return -EIO;
1544 	}
1545 
1546 	if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1547 		rpc_exit(task, -EHOSTDOWN);
1548 		return -EAGAIN;
1549 	}
1550 
1551 	ff_layout_write_record_layoutstats_start(task, hdr);
1552 	return 0;
1553 }
1554 
1555 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1556 {
1557 	struct nfs_pgio_header *hdr = data;
1558 
1559 	if (ff_layout_write_prepare_common(task, hdr))
1560 		return;
1561 
1562 	rpc_call_start(task);
1563 }
1564 
1565 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1566 {
1567 	struct nfs_pgio_header *hdr = data;
1568 
1569 	if (nfs4_setup_sequence(hdr->ds_clp,
1570 				&hdr->args.seq_args,
1571 				&hdr->res.seq_res,
1572 				task))
1573 		return;
1574 
1575 	if (ff_layout_write_prepare_common(task, hdr))
1576 		return;
1577 
1578 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1579 			hdr->args.lock_context, FMODE_WRITE) == -EIO)
1580 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1581 }
1582 
1583 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1584 {
1585 	struct nfs_pgio_header *hdr = data;
1586 
1587 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1588 	    task->tk_status == 0) {
1589 		nfs4_sequence_done(task, &hdr->res.seq_res);
1590 		return;
1591 	}
1592 
1593 	/* Note this may cause RPC to be resent */
1594 	hdr->mds_ops->rpc_call_done(task, hdr);
1595 }
1596 
1597 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1598 {
1599 	struct nfs_pgio_header *hdr = data;
1600 
1601 	ff_layout_write_record_layoutstats_done(task, hdr);
1602 	rpc_count_iostats_metrics(task,
1603 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1604 }
1605 
1606 static void ff_layout_write_release(void *data)
1607 {
1608 	struct nfs_pgio_header *hdr = data;
1609 
1610 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1611 	pnfs_generic_rw_release(data);
1612 }
1613 
1614 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1615 		struct nfs_commit_data *cdata)
1616 {
1617 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1618 		return;
1619 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1620 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1621 			0, task->tk_start);
1622 }
1623 
1624 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1625 		struct nfs_commit_data *cdata)
1626 {
1627 	struct nfs_page *req;
1628 	__u64 count = 0;
1629 
1630 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1631 		return;
1632 
1633 	if (task->tk_status == 0) {
1634 		list_for_each_entry(req, &cdata->pages, wb_list)
1635 			count += req->wb_bytes;
1636 	}
1637 	nfs4_ff_layout_stat_io_end_write(task,
1638 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1639 			count, count, NFS_FILE_SYNC);
1640 }
1641 
1642 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1643 		struct nfs_commit_data *cdata)
1644 {
1645 	ff_layout_commit_record_layoutstats_start(task, cdata);
1646 }
1647 
1648 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1649 {
1650 	ff_layout_commit_prepare_common(task, data);
1651 	rpc_call_start(task);
1652 }
1653 
1654 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1655 {
1656 	struct nfs_commit_data *wdata = data;
1657 
1658 	if (nfs4_setup_sequence(wdata->ds_clp,
1659 				&wdata->args.seq_args,
1660 				&wdata->res.seq_res,
1661 				task))
1662 		return;
1663 	ff_layout_commit_prepare_common(task, data);
1664 }
1665 
1666 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1667 {
1668 	pnfs_generic_write_commit_done(task, data);
1669 }
1670 
1671 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1672 {
1673 	struct nfs_commit_data *cdata = data;
1674 
1675 	ff_layout_commit_record_layoutstats_done(task, cdata);
1676 	rpc_count_iostats_metrics(task,
1677 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1678 }
1679 
1680 static void ff_layout_commit_release(void *data)
1681 {
1682 	struct nfs_commit_data *cdata = data;
1683 
1684 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1685 	pnfs_generic_commit_release(data);
1686 }
1687 
1688 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1689 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1690 	.rpc_call_done = ff_layout_read_call_done,
1691 	.rpc_count_stats = ff_layout_read_count_stats,
1692 	.rpc_release = ff_layout_read_release,
1693 };
1694 
1695 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1696 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1697 	.rpc_call_done = ff_layout_read_call_done,
1698 	.rpc_count_stats = ff_layout_read_count_stats,
1699 	.rpc_release = ff_layout_read_release,
1700 };
1701 
1702 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1703 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1704 	.rpc_call_done = ff_layout_write_call_done,
1705 	.rpc_count_stats = ff_layout_write_count_stats,
1706 	.rpc_release = ff_layout_write_release,
1707 };
1708 
1709 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1710 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1711 	.rpc_call_done = ff_layout_write_call_done,
1712 	.rpc_count_stats = ff_layout_write_count_stats,
1713 	.rpc_release = ff_layout_write_release,
1714 };
1715 
1716 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1717 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1718 	.rpc_call_done = ff_layout_commit_done,
1719 	.rpc_count_stats = ff_layout_commit_count_stats,
1720 	.rpc_release = ff_layout_commit_release,
1721 };
1722 
1723 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1724 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1725 	.rpc_call_done = ff_layout_commit_done,
1726 	.rpc_count_stats = ff_layout_commit_count_stats,
1727 	.rpc_release = ff_layout_commit_release,
1728 };
1729 
1730 static enum pnfs_try_status
1731 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1732 {
1733 	struct pnfs_layout_segment *lseg = hdr->lseg;
1734 	struct nfs4_pnfs_ds *ds;
1735 	struct rpc_clnt *ds_clnt;
1736 	struct rpc_cred *ds_cred;
1737 	loff_t offset = hdr->args.offset;
1738 	u32 idx = hdr->pgio_mirror_idx;
1739 	int vers;
1740 	struct nfs_fh *fh;
1741 
1742 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1743 		__func__, hdr->inode->i_ino,
1744 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1745 
1746 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1747 	if (!ds)
1748 		goto out_failed;
1749 
1750 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1751 						   hdr->inode);
1752 	if (IS_ERR(ds_clnt))
1753 		goto out_failed;
1754 
1755 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1756 	if (!ds_cred)
1757 		goto out_failed;
1758 
1759 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1760 
1761 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1762 		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1763 
1764 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1765 	atomic_inc(&ds->ds_clp->cl_count);
1766 	hdr->ds_clp = ds->ds_clp;
1767 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1768 	if (fh)
1769 		hdr->args.fh = fh;
1770 	/*
1771 	 * Note that if we ever decide to split across DSes,
1772 	 * then we may need to handle dense-like offsets.
1773 	 */
1774 	hdr->args.offset = offset;
1775 	hdr->mds_offset = offset;
1776 
1777 	/* Perform an asynchronous read to ds */
1778 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1779 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1780 				      &ff_layout_read_call_ops_v4,
1781 			  0, RPC_TASK_SOFTCONN);
1782 	put_rpccred(ds_cred);
1783 	return PNFS_ATTEMPTED;
1784 
1785 out_failed:
1786 	if (ff_layout_avoid_mds_available_ds(lseg))
1787 		return PNFS_TRY_AGAIN;
1788 	return PNFS_NOT_ATTEMPTED;
1789 }
1790 
1791 /* Perform async writes. */
1792 static enum pnfs_try_status
1793 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1794 {
1795 	struct pnfs_layout_segment *lseg = hdr->lseg;
1796 	struct nfs4_pnfs_ds *ds;
1797 	struct rpc_clnt *ds_clnt;
1798 	struct rpc_cred *ds_cred;
1799 	loff_t offset = hdr->args.offset;
1800 	int vers;
1801 	struct nfs_fh *fh;
1802 	int idx = hdr->pgio_mirror_idx;
1803 
1804 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1805 	if (!ds)
1806 		goto out_failed;
1807 
1808 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1809 						   hdr->inode);
1810 	if (IS_ERR(ds_clnt))
1811 		goto out_failed;
1812 
1813 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1814 	if (!ds_cred)
1815 		goto out_failed;
1816 
1817 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1818 
1819 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1820 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1821 		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1822 		vers);
1823 
1824 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1825 	atomic_inc(&ds->ds_clp->cl_count);
1826 	hdr->ds_clp = ds->ds_clp;
1827 	hdr->ds_commit_idx = idx;
1828 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1829 	if (fh)
1830 		hdr->args.fh = fh;
1831 
1832 	/*
1833 	 * Note that if we ever decide to split across DSes,
1834 	 * then we may need to handle dense-like offsets.
1835 	 */
1836 	hdr->args.offset = offset;
1837 
1838 	/* Perform an asynchronous write */
1839 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1840 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1841 				      &ff_layout_write_call_ops_v4,
1842 			  sync, RPC_TASK_SOFTCONN);
1843 	put_rpccred(ds_cred);
1844 	return PNFS_ATTEMPTED;
1845 
1846 out_failed:
1847 	if (ff_layout_avoid_mds_available_ds(lseg))
1848 		return PNFS_TRY_AGAIN;
1849 	return PNFS_NOT_ATTEMPTED;
1850 }
1851 
1852 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1853 {
1854 	return i;
1855 }
1856 
1857 static struct nfs_fh *
1858 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1859 {
1860 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1861 
1862 	/* FIXME: Assume that there is only one NFS version available
1863 	 * for the DS.
1864 	 */
1865 	return &flseg->mirror_array[i]->fh_versions[0];
1866 }
1867 
1868 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1869 {
1870 	struct pnfs_layout_segment *lseg = data->lseg;
1871 	struct nfs4_pnfs_ds *ds;
1872 	struct rpc_clnt *ds_clnt;
1873 	struct rpc_cred *ds_cred;
1874 	u32 idx;
1875 	int vers, ret;
1876 	struct nfs_fh *fh;
1877 
1878 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1879 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1880 	if (!ds)
1881 		goto out_err;
1882 
1883 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1884 						   data->inode);
1885 	if (IS_ERR(ds_clnt))
1886 		goto out_err;
1887 
1888 	ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1889 	if (!ds_cred)
1890 		goto out_err;
1891 
1892 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1893 
1894 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1895 		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1896 		vers);
1897 	data->commit_done_cb = ff_layout_commit_done_cb;
1898 	data->cred = ds_cred;
1899 	atomic_inc(&ds->ds_clp->cl_count);
1900 	data->ds_clp = ds->ds_clp;
1901 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1902 	if (fh)
1903 		data->args.fh = fh;
1904 
1905 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1906 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1907 					       &ff_layout_commit_call_ops_v4,
1908 				   how, RPC_TASK_SOFTCONN);
1909 	put_rpccred(ds_cred);
1910 	return ret;
1911 out_err:
1912 	pnfs_generic_prepare_to_resend_writes(data);
1913 	pnfs_generic_commit_release(data);
1914 	return -EAGAIN;
1915 }
1916 
1917 static int
1918 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1919 			   int how, struct nfs_commit_info *cinfo)
1920 {
1921 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1922 					    ff_layout_initiate_commit);
1923 }
1924 
1925 static struct pnfs_ds_commit_info *
1926 ff_layout_get_ds_info(struct inode *inode)
1927 {
1928 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1929 
1930 	if (layout == NULL)
1931 		return NULL;
1932 
1933 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1934 }
1935 
1936 static void
1937 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1938 {
1939 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1940 						  id_node));
1941 }
1942 
1943 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1944 				  const struct nfs4_layoutreturn_args *args,
1945 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
1946 {
1947 	__be32 *start;
1948 
1949 	start = xdr_reserve_space(xdr, 4);
1950 	if (unlikely(!start))
1951 		return -E2BIG;
1952 
1953 	*start = cpu_to_be32(ff_args->num_errors);
1954 	/* This assume we always return _ALL_ layouts */
1955 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
1956 }
1957 
1958 static void
1959 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
1960 {
1961 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
1962 }
1963 
1964 static void
1965 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
1966 			    const nfs4_stateid *stateid,
1967 			    const struct nfs42_layoutstat_devinfo *devinfo)
1968 {
1969 	__be32 *p;
1970 
1971 	p = xdr_reserve_space(xdr, 8 + 8);
1972 	p = xdr_encode_hyper(p, devinfo->offset);
1973 	p = xdr_encode_hyper(p, devinfo->length);
1974 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
1975 	p = xdr_reserve_space(xdr, 4*8);
1976 	p = xdr_encode_hyper(p, devinfo->read_count);
1977 	p = xdr_encode_hyper(p, devinfo->read_bytes);
1978 	p = xdr_encode_hyper(p, devinfo->write_count);
1979 	p = xdr_encode_hyper(p, devinfo->write_bytes);
1980 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
1981 }
1982 
1983 static void
1984 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
1985 			    const nfs4_stateid *stateid,
1986 			    const struct nfs42_layoutstat_devinfo *devinfo)
1987 {
1988 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
1989 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
1990 			devinfo->ld_private.data);
1991 }
1992 
1993 /* report nothing for now */
1994 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
1995 		const struct nfs4_layoutreturn_args *args,
1996 		struct nfs4_flexfile_layoutreturn_args *ff_args)
1997 {
1998 	__be32 *p;
1999 	int i;
2000 
2001 	p = xdr_reserve_space(xdr, 4);
2002 	*p = cpu_to_be32(ff_args->num_dev);
2003 	for (i = 0; i < ff_args->num_dev; i++)
2004 		ff_layout_encode_ff_iostat(xdr,
2005 				&args->layout->plh_stateid,
2006 				&ff_args->devinfo[i]);
2007 }
2008 
2009 static void
2010 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2011 		unsigned int num_entries)
2012 {
2013 	unsigned int i;
2014 
2015 	for (i = 0; i < num_entries; i++) {
2016 		if (!devinfo[i].ld_private.ops)
2017 			continue;
2018 		if (!devinfo[i].ld_private.ops->free)
2019 			continue;
2020 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2021 	}
2022 }
2023 
2024 static struct nfs4_deviceid_node *
2025 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2026 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2027 {
2028 	struct nfs4_ff_layout_ds *dsaddr;
2029 
2030 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2031 	if (!dsaddr)
2032 		return NULL;
2033 	return &dsaddr->id_node;
2034 }
2035 
2036 static void
2037 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2038 		const void *voidargs,
2039 		const struct nfs4_xdr_opaque_data *ff_opaque)
2040 {
2041 	const struct nfs4_layoutreturn_args *args = voidargs;
2042 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2043 	struct xdr_buf tmp_buf = {
2044 		.head = {
2045 			[0] = {
2046 				.iov_base = page_address(ff_args->pages[0]),
2047 			},
2048 		},
2049 		.buflen = PAGE_SIZE,
2050 	};
2051 	struct xdr_stream tmp_xdr;
2052 	__be32 *start;
2053 
2054 	dprintk("%s: Begin\n", __func__);
2055 
2056 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
2057 
2058 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2059 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2060 
2061 	start = xdr_reserve_space(xdr, 4);
2062 	*start = cpu_to_be32(tmp_buf.len);
2063 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2064 
2065 	dprintk("%s: Return\n", __func__);
2066 }
2067 
2068 static void
2069 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2070 {
2071 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2072 
2073 	if (!args->data)
2074 		return;
2075 	ff_args = args->data;
2076 	args->data = NULL;
2077 
2078 	ff_layout_free_ds_ioerr(&ff_args->errors);
2079 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2080 
2081 	put_page(ff_args->pages[0]);
2082 	kfree(ff_args);
2083 }
2084 
2085 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2086 	.encode = ff_layout_encode_layoutreturn,
2087 	.free = ff_layout_free_layoutreturn,
2088 };
2089 
2090 static int
2091 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2092 {
2093 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2094 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2095 
2096 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2097 	if (!ff_args)
2098 		goto out_nomem;
2099 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2100 	if (!ff_args->pages[0])
2101 		goto out_nomem_free;
2102 
2103 	INIT_LIST_HEAD(&ff_args->errors);
2104 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2105 			&args->range, &ff_args->errors,
2106 			FF_LAYOUTRETURN_MAXERR);
2107 
2108 	spin_lock(&args->inode->i_lock);
2109 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2110 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2111 	spin_unlock(&args->inode->i_lock);
2112 
2113 	args->ld_private->ops = &layoutreturn_ops;
2114 	args->ld_private->data = ff_args;
2115 	return 0;
2116 out_nomem_free:
2117 	kfree(ff_args);
2118 out_nomem:
2119 	return -ENOMEM;
2120 }
2121 
2122 static int
2123 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2124 {
2125 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2126 
2127 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2128 }
2129 
2130 static size_t
2131 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2132 			  const int buflen)
2133 {
2134 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2135 	const struct in6_addr *addr = &sin6->sin6_addr;
2136 
2137 	/*
2138 	 * RFC 4291, Section 2.2.2
2139 	 *
2140 	 * Shorthanded ANY address
2141 	 */
2142 	if (ipv6_addr_any(addr))
2143 		return snprintf(buf, buflen, "::");
2144 
2145 	/*
2146 	 * RFC 4291, Section 2.2.2
2147 	 *
2148 	 * Shorthanded loopback address
2149 	 */
2150 	if (ipv6_addr_loopback(addr))
2151 		return snprintf(buf, buflen, "::1");
2152 
2153 	/*
2154 	 * RFC 4291, Section 2.2.3
2155 	 *
2156 	 * Special presentation address format for mapped v4
2157 	 * addresses.
2158 	 */
2159 	if (ipv6_addr_v4mapped(addr))
2160 		return snprintf(buf, buflen, "::ffff:%pI4",
2161 					&addr->s6_addr32[3]);
2162 
2163 	/*
2164 	 * RFC 4291, Section 2.2.1
2165 	 */
2166 	return snprintf(buf, buflen, "%pI6c", addr);
2167 }
2168 
2169 /* Derived from rpc_sockaddr2uaddr */
2170 static void
2171 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2172 {
2173 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2174 	char portbuf[RPCBIND_MAXUADDRPLEN];
2175 	char addrbuf[RPCBIND_MAXUADDRLEN];
2176 	char *netid;
2177 	unsigned short port;
2178 	int len, netid_len;
2179 	__be32 *p;
2180 
2181 	switch (sap->sa_family) {
2182 	case AF_INET:
2183 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2184 			return;
2185 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2186 		netid = "tcp";
2187 		netid_len = 3;
2188 		break;
2189 	case AF_INET6:
2190 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2191 			return;
2192 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2193 		netid = "tcp6";
2194 		netid_len = 4;
2195 		break;
2196 	default:
2197 		/* we only support tcp and tcp6 */
2198 		WARN_ON_ONCE(1);
2199 		return;
2200 	}
2201 
2202 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2203 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2204 
2205 	p = xdr_reserve_space(xdr, 4 + netid_len);
2206 	xdr_encode_opaque(p, netid, netid_len);
2207 
2208 	p = xdr_reserve_space(xdr, 4 + len);
2209 	xdr_encode_opaque(p, addrbuf, len);
2210 }
2211 
2212 static void
2213 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2214 			 ktime_t t)
2215 {
2216 	struct timespec64 ts;
2217 	__be32 *p;
2218 
2219 	p = xdr_reserve_space(xdr, 12);
2220 	ts = ktime_to_timespec64(t);
2221 	p = xdr_encode_hyper(p, ts.tv_sec);
2222 	*p++ = cpu_to_be32(ts.tv_nsec);
2223 }
2224 
2225 static void
2226 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2227 			    struct nfs4_ff_io_stat *stat)
2228 {
2229 	__be32 *p;
2230 
2231 	p = xdr_reserve_space(xdr, 5 * 8);
2232 	p = xdr_encode_hyper(p, stat->ops_requested);
2233 	p = xdr_encode_hyper(p, stat->bytes_requested);
2234 	p = xdr_encode_hyper(p, stat->ops_completed);
2235 	p = xdr_encode_hyper(p, stat->bytes_completed);
2236 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2237 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2238 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2239 }
2240 
2241 static void
2242 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2243 			      const struct nfs42_layoutstat_devinfo *devinfo,
2244 			      struct nfs4_ff_layout_mirror *mirror)
2245 {
2246 	struct nfs4_pnfs_ds_addr *da;
2247 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2248 	struct nfs_fh *fh = &mirror->fh_versions[0];
2249 	__be32 *p;
2250 
2251 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2252 	dprintk("%s: DS %s: encoding address %s\n",
2253 		__func__, ds->ds_remotestr, da->da_remotestr);
2254 	/* netaddr4 */
2255 	ff_layout_encode_netaddr(xdr, da);
2256 	/* nfs_fh4 */
2257 	p = xdr_reserve_space(xdr, 4 + fh->size);
2258 	xdr_encode_opaque(p, fh->data, fh->size);
2259 	/* ff_io_latency4 read */
2260 	spin_lock(&mirror->lock);
2261 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2262 	/* ff_io_latency4 write */
2263 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2264 	spin_unlock(&mirror->lock);
2265 	/* nfstime4 */
2266 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2267 	/* bool */
2268 	p = xdr_reserve_space(xdr, 4);
2269 	*p = cpu_to_be32(false);
2270 }
2271 
2272 static void
2273 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2274 			     const struct nfs4_xdr_opaque_data *opaque)
2275 {
2276 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2277 			struct nfs42_layoutstat_devinfo, ld_private);
2278 	__be32 *start;
2279 
2280 	/* layoutupdate length */
2281 	start = xdr_reserve_space(xdr, 4);
2282 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2283 
2284 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2285 }
2286 
2287 static void
2288 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2289 {
2290 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2291 
2292 	ff_layout_put_mirror(mirror);
2293 }
2294 
2295 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2296 	.encode = ff_layout_encode_layoutstats,
2297 	.free	= ff_layout_free_layoutstats,
2298 };
2299 
2300 static int
2301 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2302 			       struct nfs42_layoutstat_devinfo *devinfo,
2303 			       int dev_limit)
2304 {
2305 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2306 	struct nfs4_ff_layout_mirror *mirror;
2307 	struct nfs4_deviceid_node *dev;
2308 	int i = 0;
2309 
2310 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2311 		if (i >= dev_limit)
2312 			break;
2313 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2314 			continue;
2315 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2316 			continue;
2317 		/* mirror refcount put in cleanup_layoutstats */
2318 		if (!atomic_inc_not_zero(&mirror->ref))
2319 			continue;
2320 		dev = &mirror->mirror_ds->id_node;
2321 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2322 		devinfo->offset = 0;
2323 		devinfo->length = NFS4_MAX_UINT64;
2324 		spin_lock(&mirror->lock);
2325 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2326 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2327 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2328 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2329 		spin_unlock(&mirror->lock);
2330 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2331 		devinfo->ld_private.ops = &layoutstat_ops;
2332 		devinfo->ld_private.data = mirror;
2333 
2334 		devinfo++;
2335 		i++;
2336 	}
2337 	return i;
2338 }
2339 
2340 static int
2341 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2342 {
2343 	struct nfs4_flexfile_layout *ff_layout;
2344 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2345 
2346 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2347 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2348 	if (!args->devinfo)
2349 		return -ENOMEM;
2350 
2351 	spin_lock(&args->inode->i_lock);
2352 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2353 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2354 			&args->devinfo[0], dev_count);
2355 	spin_unlock(&args->inode->i_lock);
2356 	if (!args->num_dev) {
2357 		kfree(args->devinfo);
2358 		args->devinfo = NULL;
2359 		return -ENOENT;
2360 	}
2361 
2362 	return 0;
2363 }
2364 
2365 static int
2366 ff_layout_set_layoutdriver(struct nfs_server *server,
2367 		const struct nfs_fh *dummy)
2368 {
2369 #if IS_ENABLED(CONFIG_NFS_V4_2)
2370 	server->caps |= NFS_CAP_LAYOUTSTATS;
2371 #endif
2372 	return 0;
2373 }
2374 
2375 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2376 	.id			= LAYOUT_FLEX_FILES,
2377 	.name			= "LAYOUT_FLEX_FILES",
2378 	.owner			= THIS_MODULE,
2379 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2380 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2381 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2382 	.alloc_lseg		= ff_layout_alloc_lseg,
2383 	.free_lseg		= ff_layout_free_lseg,
2384 	.add_lseg		= ff_layout_add_lseg,
2385 	.pg_read_ops		= &ff_layout_pg_read_ops,
2386 	.pg_write_ops		= &ff_layout_pg_write_ops,
2387 	.get_ds_info		= ff_layout_get_ds_info,
2388 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2389 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2390 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2391 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2392 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2393 	.commit_pagelist	= ff_layout_commit_pagelist,
2394 	.read_pagelist		= ff_layout_read_pagelist,
2395 	.write_pagelist		= ff_layout_write_pagelist,
2396 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2397 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2398 	.sync			= pnfs_nfs_generic_sync,
2399 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2400 };
2401 
2402 static int __init nfs4flexfilelayout_init(void)
2403 {
2404 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2405 	       __func__);
2406 	if (!ff_zero_group) {
2407 		ff_zero_group = groups_alloc(0);
2408 		if (!ff_zero_group)
2409 			return -ENOMEM;
2410 	}
2411 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2412 }
2413 
2414 static void __exit nfs4flexfilelayout_exit(void)
2415 {
2416 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2417 	       __func__);
2418 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2419 	if (ff_zero_group) {
2420 		put_group_info(ff_zero_group);
2421 		ff_zero_group = NULL;
2422 	}
2423 }
2424 
2425 MODULE_ALIAS("nfs-layouttype4-4");
2426 
2427 MODULE_LICENSE("GPL");
2428 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2429 
2430 module_init(nfs4flexfilelayout_init);
2431 module_exit(nfs4flexfilelayout_exit);
2432