1 /*
2  * Module for pnfs flexfile layout driver.
3  *
4  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5  *
6  * Tao Peng <bergwolf@primarydata.com>
7  */
8 
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 #include <linux/sched/mm.h>
13 
14 #include <linux/sunrpc/metrics.h>
15 
16 #include "flexfilelayout.h"
17 #include "../nfs4session.h"
18 #include "../nfs4idmap.h"
19 #include "../internal.h"
20 #include "../delegation.h"
21 #include "../nfs4trace.h"
22 #include "../iostat.h"
23 #include "../nfs.h"
24 #include "../nfs42.h"
25 
26 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
27 
28 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
29 #define FF_LAYOUTRETURN_MAXERR 20
30 
31 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
32 		struct nfs_pgio_header *hdr);
33 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
34 			       struct nfs42_layoutstat_devinfo *devinfo,
35 			       int dev_limit);
36 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
37 			      const struct nfs42_layoutstat_devinfo *devinfo,
38 			      struct nfs4_ff_layout_mirror *mirror);
39 
40 static struct pnfs_layout_hdr *
41 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
42 {
43 	struct nfs4_flexfile_layout *ffl;
44 
45 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
46 	if (ffl) {
47 		INIT_LIST_HEAD(&ffl->error_list);
48 		INIT_LIST_HEAD(&ffl->mirrors);
49 		ffl->last_report_time = ktime_get();
50 		return &ffl->generic_hdr;
51 	} else
52 		return NULL;
53 }
54 
55 static void
56 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
57 {
58 	struct nfs4_ff_layout_ds_err *err, *n;
59 
60 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
61 				 list) {
62 		list_del(&err->list);
63 		kfree(err);
64 	}
65 	kfree(FF_LAYOUT_FROM_HDR(lo));
66 }
67 
68 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
69 {
70 	__be32 *p;
71 
72 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
73 	if (unlikely(p == NULL))
74 		return -ENOBUFS;
75 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
76 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
77 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
78 		p[0], p[1], p[2], p[3]);
79 	return 0;
80 }
81 
82 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
87 	if (unlikely(!p))
88 		return -ENOBUFS;
89 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
90 	nfs4_print_deviceid(devid);
91 	return 0;
92 }
93 
94 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
95 {
96 	__be32 *p;
97 
98 	p = xdr_inline_decode(xdr, 4);
99 	if (unlikely(!p))
100 		return -ENOBUFS;
101 	fh->size = be32_to_cpup(p++);
102 	if (fh->size > sizeof(struct nfs_fh)) {
103 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
104 		       fh->size);
105 		return -EOVERFLOW;
106 	}
107 	/* fh.data */
108 	p = xdr_inline_decode(xdr, fh->size);
109 	if (unlikely(!p))
110 		return -ENOBUFS;
111 	memcpy(&fh->data, p, fh->size);
112 	dprintk("%s: fh len %d\n", __func__, fh->size);
113 
114 	return 0;
115 }
116 
117 /*
118  * Currently only stringified uids and gids are accepted.
119  * I.e., kerberos is not supported to the DSes, so no pricipals.
120  *
121  * That means that one common function will suffice, but when
122  * principals are added, this should be split to accomodate
123  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
124  */
125 static int
126 decode_name(struct xdr_stream *xdr, u32 *id)
127 {
128 	__be32 *p;
129 	int len;
130 
131 	/* opaque_length(4)*/
132 	p = xdr_inline_decode(xdr, 4);
133 	if (unlikely(!p))
134 		return -ENOBUFS;
135 	len = be32_to_cpup(p++);
136 	if (len < 0)
137 		return -EINVAL;
138 
139 	dprintk("%s: len %u\n", __func__, len);
140 
141 	/* opaque body */
142 	p = xdr_inline_decode(xdr, len);
143 	if (unlikely(!p))
144 		return -ENOBUFS;
145 
146 	if (!nfs_map_string_to_numeric((char *)p, len, id))
147 		return -EINVAL;
148 
149 	return 0;
150 }
151 
152 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
153 		const struct nfs4_ff_layout_mirror *m2)
154 {
155 	int i, j;
156 
157 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
158 		return false;
159 	for (i = 0; i < m1->fh_versions_cnt; i++) {
160 		bool found_fh = false;
161 		for (j = 0; j < m2->fh_versions_cnt; j++) {
162 			if (nfs_compare_fh(&m1->fh_versions[i],
163 					&m2->fh_versions[j]) == 0) {
164 				found_fh = true;
165 				break;
166 			}
167 		}
168 		if (!found_fh)
169 			return false;
170 	}
171 	return true;
172 }
173 
174 static struct nfs4_ff_layout_mirror *
175 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
176 		struct nfs4_ff_layout_mirror *mirror)
177 {
178 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
179 	struct nfs4_ff_layout_mirror *pos;
180 	struct inode *inode = lo->plh_inode;
181 
182 	spin_lock(&inode->i_lock);
183 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
184 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
185 			continue;
186 		if (!ff_mirror_match_fh(mirror, pos))
187 			continue;
188 		if (refcount_inc_not_zero(&pos->ref)) {
189 			spin_unlock(&inode->i_lock);
190 			return pos;
191 		}
192 	}
193 	list_add(&mirror->mirrors, &ff_layout->mirrors);
194 	mirror->layout = lo;
195 	spin_unlock(&inode->i_lock);
196 	return mirror;
197 }
198 
199 static void
200 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
201 {
202 	struct inode *inode;
203 	if (mirror->layout == NULL)
204 		return;
205 	inode = mirror->layout->plh_inode;
206 	spin_lock(&inode->i_lock);
207 	list_del(&mirror->mirrors);
208 	spin_unlock(&inode->i_lock);
209 	mirror->layout = NULL;
210 }
211 
212 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
213 {
214 	struct nfs4_ff_layout_mirror *mirror;
215 
216 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
217 	if (mirror != NULL) {
218 		spin_lock_init(&mirror->lock);
219 		refcount_set(&mirror->ref, 1);
220 		INIT_LIST_HEAD(&mirror->mirrors);
221 	}
222 	return mirror;
223 }
224 
225 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
226 {
227 	const struct cred	*cred;
228 
229 	ff_layout_remove_mirror(mirror);
230 	kfree(mirror->fh_versions);
231 	cred = rcu_access_pointer(mirror->ro_cred);
232 	put_cred(cred);
233 	cred = rcu_access_pointer(mirror->rw_cred);
234 	put_cred(cred);
235 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
236 	kfree(mirror);
237 }
238 
239 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
240 {
241 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
242 		ff_layout_free_mirror(mirror);
243 }
244 
245 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
246 {
247 	int i;
248 
249 	if (fls->mirror_array) {
250 		for (i = 0; i < fls->mirror_array_cnt; i++) {
251 			/* normally mirror_ds is freed in
252 			 * .free_deviceid_node but we still do it here
253 			 * for .alloc_lseg error path */
254 			ff_layout_put_mirror(fls->mirror_array[i]);
255 		}
256 		kfree(fls->mirror_array);
257 		fls->mirror_array = NULL;
258 	}
259 }
260 
261 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
262 {
263 	int ret = 0;
264 
265 	dprintk("--> %s\n", __func__);
266 
267 	/* FIXME: remove this check when layout segment support is added */
268 	if (lgr->range.offset != 0 ||
269 	    lgr->range.length != NFS4_MAX_UINT64) {
270 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
271 			__func__);
272 		ret = -EINVAL;
273 	}
274 
275 	dprintk("--> %s returns %d\n", __func__, ret);
276 	return ret;
277 }
278 
279 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
280 {
281 	if (fls) {
282 		ff_layout_free_mirror_array(fls);
283 		kfree(fls);
284 	}
285 }
286 
287 static bool
288 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
289 		const struct pnfs_layout_range *l2)
290 {
291 	u64 end1, end2;
292 
293 	if (l1->iomode != l2->iomode)
294 		return l1->iomode != IOMODE_READ;
295 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
296 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
297 	if (end1 < l2->offset)
298 		return false;
299 	if (end2 < l1->offset)
300 		return true;
301 	return l2->offset <= l1->offset;
302 }
303 
304 static bool
305 ff_lseg_merge(struct pnfs_layout_segment *new,
306 		struct pnfs_layout_segment *old)
307 {
308 	u64 new_end, old_end;
309 
310 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
311 		return false;
312 	if (new->pls_range.iomode != old->pls_range.iomode)
313 		return false;
314 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
315 			old->pls_range.length);
316 	if (old_end < new->pls_range.offset)
317 		return false;
318 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
319 			new->pls_range.length);
320 	if (new_end < old->pls_range.offset)
321 		return false;
322 
323 	/* Mergeable: copy info from 'old' to 'new' */
324 	if (new_end < old_end)
325 		new_end = old_end;
326 	if (new->pls_range.offset < old->pls_range.offset)
327 		new->pls_range.offset = old->pls_range.offset;
328 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
329 			new_end);
330 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
331 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
332 	return true;
333 }
334 
335 static void
336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
337 		struct pnfs_layout_segment *lseg,
338 		struct list_head *free_me)
339 {
340 	pnfs_generic_layout_insert_lseg(lo, lseg,
341 			ff_lseg_range_is_after,
342 			ff_lseg_merge,
343 			free_me);
344 }
345 
346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
347 {
348 	int i, j;
349 
350 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
351 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
352 			if (fls->mirror_array[i]->efficiency <
353 			    fls->mirror_array[j]->efficiency)
354 				swap(fls->mirror_array[i],
355 				     fls->mirror_array[j]);
356 	}
357 }
358 
359 static struct pnfs_layout_segment *
360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
361 		     struct nfs4_layoutget_res *lgr,
362 		     gfp_t gfp_flags)
363 {
364 	struct pnfs_layout_segment *ret;
365 	struct nfs4_ff_layout_segment *fls = NULL;
366 	struct xdr_stream stream;
367 	struct xdr_buf buf;
368 	struct page *scratch;
369 	u64 stripe_unit;
370 	u32 mirror_array_cnt;
371 	__be32 *p;
372 	int i, rc;
373 
374 	dprintk("--> %s\n", __func__);
375 	scratch = alloc_page(gfp_flags);
376 	if (!scratch)
377 		return ERR_PTR(-ENOMEM);
378 
379 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
380 			      lgr->layoutp->len);
381 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
382 
383 	/* stripe unit and mirror_array_cnt */
384 	rc = -EIO;
385 	p = xdr_inline_decode(&stream, 8 + 4);
386 	if (!p)
387 		goto out_err_free;
388 
389 	p = xdr_decode_hyper(p, &stripe_unit);
390 	mirror_array_cnt = be32_to_cpup(p++);
391 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
392 		stripe_unit, mirror_array_cnt);
393 
394 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
395 	    mirror_array_cnt == 0)
396 		goto out_err_free;
397 
398 	rc = -ENOMEM;
399 	fls = kzalloc(sizeof(*fls), gfp_flags);
400 	if (!fls)
401 		goto out_err_free;
402 
403 	fls->mirror_array_cnt = mirror_array_cnt;
404 	fls->stripe_unit = stripe_unit;
405 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
406 				    sizeof(fls->mirror_array[0]), gfp_flags);
407 	if (fls->mirror_array == NULL)
408 		goto out_err_free;
409 
410 	for (i = 0; i < fls->mirror_array_cnt; i++) {
411 		struct nfs4_ff_layout_mirror *mirror;
412 		struct cred *kcred;
413 		const struct cred __rcu *cred;
414 		kuid_t uid;
415 		kgid_t gid;
416 		u32 ds_count, fh_count, id;
417 		int j;
418 
419 		rc = -EIO;
420 		p = xdr_inline_decode(&stream, 4);
421 		if (!p)
422 			goto out_err_free;
423 		ds_count = be32_to_cpup(p);
424 
425 		/* FIXME: allow for striping? */
426 		if (ds_count != 1)
427 			goto out_err_free;
428 
429 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
430 		if (fls->mirror_array[i] == NULL) {
431 			rc = -ENOMEM;
432 			goto out_err_free;
433 		}
434 
435 		fls->mirror_array[i]->ds_count = ds_count;
436 
437 		/* deviceid */
438 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
439 		if (rc)
440 			goto out_err_free;
441 
442 		/* efficiency */
443 		rc = -EIO;
444 		p = xdr_inline_decode(&stream, 4);
445 		if (!p)
446 			goto out_err_free;
447 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
448 
449 		/* stateid */
450 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
451 		if (rc)
452 			goto out_err_free;
453 
454 		/* fh */
455 		rc = -EIO;
456 		p = xdr_inline_decode(&stream, 4);
457 		if (!p)
458 			goto out_err_free;
459 		fh_count = be32_to_cpup(p);
460 
461 		fls->mirror_array[i]->fh_versions =
462 			kcalloc(fh_count, sizeof(struct nfs_fh),
463 				gfp_flags);
464 		if (fls->mirror_array[i]->fh_versions == NULL) {
465 			rc = -ENOMEM;
466 			goto out_err_free;
467 		}
468 
469 		for (j = 0; j < fh_count; j++) {
470 			rc = decode_nfs_fh(&stream,
471 					   &fls->mirror_array[i]->fh_versions[j]);
472 			if (rc)
473 				goto out_err_free;
474 		}
475 
476 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
477 
478 		/* user */
479 		rc = decode_name(&stream, &id);
480 		if (rc)
481 			goto out_err_free;
482 
483 		uid = make_kuid(&init_user_ns, id);
484 
485 		/* group */
486 		rc = decode_name(&stream, &id);
487 		if (rc)
488 			goto out_err_free;
489 
490 		gid = make_kgid(&init_user_ns, id);
491 
492 		if (gfp_flags & __GFP_FS)
493 			kcred = prepare_kernel_cred(NULL);
494 		else {
495 			unsigned int nofs_flags = memalloc_nofs_save();
496 			kcred = prepare_kernel_cred(NULL);
497 			memalloc_nofs_restore(nofs_flags);
498 		}
499 		rc = -ENOMEM;
500 		if (!kcred)
501 			goto out_err_free;
502 		kcred->fsuid = uid;
503 		kcred->fsgid = gid;
504 		cred = RCU_INITIALIZER(kcred);
505 
506 		if (lgr->range.iomode == IOMODE_READ)
507 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
508 		else
509 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
510 
511 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
512 		if (mirror != fls->mirror_array[i]) {
513 			/* swap cred ptrs so free_mirror will clean up old */
514 			if (lgr->range.iomode == IOMODE_READ) {
515 				cred = xchg(&mirror->ro_cred, cred);
516 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
517 			} else {
518 				cred = xchg(&mirror->rw_cred, cred);
519 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
520 			}
521 			ff_layout_free_mirror(fls->mirror_array[i]);
522 			fls->mirror_array[i] = mirror;
523 		}
524 
525 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
526 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
527 			from_kuid(&init_user_ns, uid),
528 			from_kgid(&init_user_ns, gid));
529 	}
530 
531 	p = xdr_inline_decode(&stream, 4);
532 	if (!p)
533 		goto out_sort_mirrors;
534 	fls->flags = be32_to_cpup(p);
535 
536 	p = xdr_inline_decode(&stream, 4);
537 	if (!p)
538 		goto out_sort_mirrors;
539 	for (i=0; i < fls->mirror_array_cnt; i++)
540 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
541 
542 out_sort_mirrors:
543 	ff_layout_sort_mirrors(fls);
544 	rc = ff_layout_check_layout(lgr);
545 	if (rc)
546 		goto out_err_free;
547 	ret = &fls->generic_hdr;
548 	dprintk("<-- %s (success)\n", __func__);
549 out_free_page:
550 	__free_page(scratch);
551 	return ret;
552 out_err_free:
553 	_ff_layout_free_lseg(fls);
554 	ret = ERR_PTR(rc);
555 	dprintk("<-- %s (%d)\n", __func__, rc);
556 	goto out_free_page;
557 }
558 
559 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
560 {
561 	struct pnfs_layout_segment *lseg;
562 
563 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
564 		if (lseg->pls_range.iomode == IOMODE_RW)
565 			return true;
566 
567 	return false;
568 }
569 
570 static void
571 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
572 {
573 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
574 
575 	dprintk("--> %s\n", __func__);
576 
577 	if (lseg->pls_range.iomode == IOMODE_RW) {
578 		struct nfs4_flexfile_layout *ffl;
579 		struct inode *inode;
580 
581 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
582 		inode = ffl->generic_hdr.plh_inode;
583 		spin_lock(&inode->i_lock);
584 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
585 			ffl->commit_info.nbuckets = 0;
586 			kfree(ffl->commit_info.buckets);
587 			ffl->commit_info.buckets = NULL;
588 		}
589 		spin_unlock(&inode->i_lock);
590 	}
591 	_ff_layout_free_lseg(fls);
592 }
593 
594 /* Return 1 until we have multiple lsegs support */
595 static int
596 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
597 {
598 	return 1;
599 }
600 
601 static void
602 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
603 {
604 	/* first IO request? */
605 	if (atomic_inc_return(&timer->n_ops) == 1) {
606 		timer->start_time = now;
607 	}
608 }
609 
610 static ktime_t
611 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
612 {
613 	ktime_t start;
614 
615 	if (atomic_dec_return(&timer->n_ops) < 0)
616 		WARN_ON_ONCE(1);
617 
618 	start = timer->start_time;
619 	timer->start_time = now;
620 	return ktime_sub(now, start);
621 }
622 
623 static bool
624 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
625 			    struct nfs4_ff_layoutstat *layoutstat,
626 			    ktime_t now)
627 {
628 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
629 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
630 
631 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
632 	if (!mirror->start_time)
633 		mirror->start_time = now;
634 	if (mirror->report_interval != 0)
635 		report_interval = (s64)mirror->report_interval * 1000LL;
636 	else if (layoutstats_timer != 0)
637 		report_interval = (s64)layoutstats_timer * 1000LL;
638 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
639 			report_interval) {
640 		ffl->last_report_time = now;
641 		return true;
642 	}
643 
644 	return false;
645 }
646 
647 static void
648 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
649 		__u64 requested)
650 {
651 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
652 
653 	iostat->ops_requested++;
654 	iostat->bytes_requested += requested;
655 }
656 
657 static void
658 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
659 		__u64 requested,
660 		__u64 completed,
661 		ktime_t time_completed,
662 		ktime_t time_started)
663 {
664 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
665 	ktime_t completion_time = ktime_sub(time_completed, time_started);
666 	ktime_t timer;
667 
668 	iostat->ops_completed++;
669 	iostat->bytes_completed += completed;
670 	iostat->bytes_not_delivered += requested - completed;
671 
672 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
673 	iostat->total_busy_time =
674 			ktime_add(iostat->total_busy_time, timer);
675 	iostat->aggregate_completion_time =
676 			ktime_add(iostat->aggregate_completion_time,
677 					completion_time);
678 }
679 
680 static void
681 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
682 		struct nfs4_ff_layout_mirror *mirror,
683 		__u64 requested, ktime_t now)
684 {
685 	bool report;
686 
687 	spin_lock(&mirror->lock);
688 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
689 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
690 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
691 	spin_unlock(&mirror->lock);
692 
693 	if (report)
694 		pnfs_report_layoutstat(inode, GFP_KERNEL);
695 }
696 
697 static void
698 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
699 		struct nfs4_ff_layout_mirror *mirror,
700 		__u64 requested,
701 		__u64 completed)
702 {
703 	spin_lock(&mirror->lock);
704 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
705 			requested, completed,
706 			ktime_get(), task->tk_start);
707 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
708 	spin_unlock(&mirror->lock);
709 }
710 
711 static void
712 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
713 		struct nfs4_ff_layout_mirror *mirror,
714 		__u64 requested, ktime_t now)
715 {
716 	bool report;
717 
718 	spin_lock(&mirror->lock);
719 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
720 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
721 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
722 	spin_unlock(&mirror->lock);
723 
724 	if (report)
725 		pnfs_report_layoutstat(inode, GFP_NOIO);
726 }
727 
728 static void
729 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
730 		struct nfs4_ff_layout_mirror *mirror,
731 		__u64 requested,
732 		__u64 completed,
733 		enum nfs3_stable_how committed)
734 {
735 	if (committed == NFS_UNSTABLE)
736 		requested = completed = 0;
737 
738 	spin_lock(&mirror->lock);
739 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
740 			requested, completed, ktime_get(), task->tk_start);
741 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
742 	spin_unlock(&mirror->lock);
743 }
744 
745 static int
746 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
747 			    struct nfs_commit_info *cinfo,
748 			    gfp_t gfp_flags)
749 {
750 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
751 	struct pnfs_commit_bucket *buckets;
752 	int size;
753 
754 	if (cinfo->ds->nbuckets != 0) {
755 		/* This assumes there is only one RW lseg per file.
756 		 * To support multiple lseg per file, we need to
757 		 * change struct pnfs_commit_bucket to allow dynamic
758 		 * increasing nbuckets.
759 		 */
760 		return 0;
761 	}
762 
763 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
764 
765 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
766 			  gfp_flags);
767 	if (!buckets)
768 		return -ENOMEM;
769 	else {
770 		int i;
771 
772 		spin_lock(&cinfo->inode->i_lock);
773 		if (cinfo->ds->nbuckets != 0)
774 			kfree(buckets);
775 		else {
776 			cinfo->ds->buckets = buckets;
777 			cinfo->ds->nbuckets = size;
778 			for (i = 0; i < size; i++) {
779 				INIT_LIST_HEAD(&buckets[i].written);
780 				INIT_LIST_HEAD(&buckets[i].committing);
781 				/* mark direct verifier as unset */
782 				buckets[i].direct_verf.committed =
783 					NFS_INVALID_STABLE_HOW;
784 			}
785 		}
786 		spin_unlock(&cinfo->inode->i_lock);
787 		return 0;
788 	}
789 }
790 
791 static void
792 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
793 {
794 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
795 
796 	if (devid)
797 		nfs4_mark_deviceid_unavailable(devid);
798 }
799 
800 static void
801 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
802 {
803 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
804 
805 	if (devid)
806 		nfs4_mark_deviceid_available(devid);
807 }
808 
809 static struct nfs4_pnfs_ds *
810 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
811 			     int start_idx, int *best_idx,
812 			     bool check_device)
813 {
814 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
815 	struct nfs4_ff_layout_mirror *mirror;
816 	struct nfs4_pnfs_ds *ds;
817 	bool fail_return = false;
818 	int idx;
819 
820 	/* mirrors are initially sorted by efficiency */
821 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
822 		if (idx+1 == fls->mirror_array_cnt)
823 			fail_return = !check_device;
824 
825 		mirror = FF_LAYOUT_COMP(lseg, idx);
826 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
827 		if (!ds)
828 			continue;
829 
830 		if (check_device &&
831 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
832 			continue;
833 
834 		*best_idx = idx;
835 		return ds;
836 	}
837 
838 	return NULL;
839 }
840 
841 static struct nfs4_pnfs_ds *
842 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
843 				 int start_idx, int *best_idx)
844 {
845 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
846 }
847 
848 static struct nfs4_pnfs_ds *
849 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
850 				   int start_idx, int *best_idx)
851 {
852 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
853 }
854 
855 static struct nfs4_pnfs_ds *
856 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
857 				  int start_idx, int *best_idx)
858 {
859 	struct nfs4_pnfs_ds *ds;
860 
861 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
862 	if (ds)
863 		return ds;
864 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
865 }
866 
867 static void
868 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
869 		      struct nfs_page *req,
870 		      bool strict_iomode)
871 {
872 	pnfs_put_lseg(pgio->pg_lseg);
873 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
874 					   req->wb_context,
875 					   0,
876 					   NFS4_MAX_UINT64,
877 					   IOMODE_READ,
878 					   strict_iomode,
879 					   GFP_KERNEL);
880 	if (IS_ERR(pgio->pg_lseg)) {
881 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
882 		pgio->pg_lseg = NULL;
883 	}
884 }
885 
886 static void
887 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
888 			struct nfs_page *req)
889 {
890 	struct nfs_pgio_mirror *pgm;
891 	struct nfs4_ff_layout_mirror *mirror;
892 	struct nfs4_pnfs_ds *ds;
893 	int ds_idx;
894 
895 retry:
896 	pnfs_generic_pg_check_layout(pgio);
897 	/* Use full layout for now */
898 	if (!pgio->pg_lseg) {
899 		ff_layout_pg_get_read(pgio, req, false);
900 		if (!pgio->pg_lseg)
901 			goto out_nolseg;
902 	}
903 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
904 		ff_layout_pg_get_read(pgio, req, true);
905 		if (!pgio->pg_lseg)
906 			goto out_nolseg;
907 	}
908 
909 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
910 	if (!ds) {
911 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
912 			goto out_mds;
913 		pnfs_put_lseg(pgio->pg_lseg);
914 		pgio->pg_lseg = NULL;
915 		/* Sleep for 1 second before retrying */
916 		ssleep(1);
917 		goto retry;
918 	}
919 
920 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
921 
922 	pgio->pg_mirror_idx = ds_idx;
923 
924 	/* read always uses only one mirror - idx 0 for pgio layer */
925 	pgm = &pgio->pg_mirrors[0];
926 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
927 
928 	return;
929 out_nolseg:
930 	if (pgio->pg_error < 0)
931 		return;
932 out_mds:
933 	pnfs_put_lseg(pgio->pg_lseg);
934 	pgio->pg_lseg = NULL;
935 	nfs_pageio_reset_read_mds(pgio);
936 }
937 
938 static void
939 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
940 			struct nfs_page *req)
941 {
942 	struct nfs4_ff_layout_mirror *mirror;
943 	struct nfs_pgio_mirror *pgm;
944 	struct nfs_commit_info cinfo;
945 	struct nfs4_pnfs_ds *ds;
946 	int i;
947 	int status;
948 
949 retry:
950 	pnfs_generic_pg_check_layout(pgio);
951 	if (!pgio->pg_lseg) {
952 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
953 						   req->wb_context,
954 						   0,
955 						   NFS4_MAX_UINT64,
956 						   IOMODE_RW,
957 						   false,
958 						   GFP_NOFS);
959 		if (IS_ERR(pgio->pg_lseg)) {
960 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
961 			pgio->pg_lseg = NULL;
962 			return;
963 		}
964 	}
965 	/* If no lseg, fall back to write through mds */
966 	if (pgio->pg_lseg == NULL)
967 		goto out_mds;
968 
969 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
970 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
971 	if (status < 0)
972 		goto out_mds;
973 
974 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
975 	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
976 	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
977 		goto out_mds;
978 
979 	for (i = 0; i < pgio->pg_mirror_count; i++) {
980 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
981 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
982 		if (!ds) {
983 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
984 				goto out_mds;
985 			pnfs_put_lseg(pgio->pg_lseg);
986 			pgio->pg_lseg = NULL;
987 			/* Sleep for 1 second before retrying */
988 			ssleep(1);
989 			goto retry;
990 		}
991 		pgm = &pgio->pg_mirrors[i];
992 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
993 	}
994 
995 	return;
996 
997 out_mds:
998 	pnfs_put_lseg(pgio->pg_lseg);
999 	pgio->pg_lseg = NULL;
1000 	nfs_pageio_reset_write_mds(pgio);
1001 }
1002 
1003 static unsigned int
1004 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1005 				    struct nfs_page *req)
1006 {
1007 	if (!pgio->pg_lseg) {
1008 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1009 						   req->wb_context,
1010 						   0,
1011 						   NFS4_MAX_UINT64,
1012 						   IOMODE_RW,
1013 						   false,
1014 						   GFP_NOFS);
1015 		if (IS_ERR(pgio->pg_lseg)) {
1016 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1017 			pgio->pg_lseg = NULL;
1018 			goto out;
1019 		}
1020 	}
1021 	if (pgio->pg_lseg)
1022 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1023 
1024 	/* no lseg means that pnfs is not in use, so no mirroring here */
1025 	nfs_pageio_reset_write_mds(pgio);
1026 out:
1027 	return 1;
1028 }
1029 
1030 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1031 	.pg_init = ff_layout_pg_init_read,
1032 	.pg_test = pnfs_generic_pg_test,
1033 	.pg_doio = pnfs_generic_pg_readpages,
1034 	.pg_cleanup = pnfs_generic_pg_cleanup,
1035 };
1036 
1037 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1038 	.pg_init = ff_layout_pg_init_write,
1039 	.pg_test = pnfs_generic_pg_test,
1040 	.pg_doio = pnfs_generic_pg_writepages,
1041 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1042 	.pg_cleanup = pnfs_generic_pg_cleanup,
1043 };
1044 
1045 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1046 {
1047 	struct rpc_task *task = &hdr->task;
1048 
1049 	pnfs_layoutcommit_inode(hdr->inode, false);
1050 
1051 	if (retry_pnfs) {
1052 		dprintk("%s Reset task %5u for i/o through pNFS "
1053 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1054 			hdr->task.tk_pid,
1055 			hdr->inode->i_sb->s_id,
1056 			(unsigned long long)NFS_FILEID(hdr->inode),
1057 			hdr->args.count,
1058 			(unsigned long long)hdr->args.offset);
1059 
1060 		hdr->completion_ops->reschedule_io(hdr);
1061 		return;
1062 	}
1063 
1064 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1065 		dprintk("%s Reset task %5u for i/o through MDS "
1066 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1067 			hdr->task.tk_pid,
1068 			hdr->inode->i_sb->s_id,
1069 			(unsigned long long)NFS_FILEID(hdr->inode),
1070 			hdr->args.count,
1071 			(unsigned long long)hdr->args.offset);
1072 
1073 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1074 	}
1075 }
1076 
1077 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1078 {
1079 	struct rpc_task *task = &hdr->task;
1080 
1081 	pnfs_layoutcommit_inode(hdr->inode, false);
1082 
1083 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1084 		dprintk("%s Reset task %5u for i/o through MDS "
1085 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1086 			hdr->task.tk_pid,
1087 			hdr->inode->i_sb->s_id,
1088 			(unsigned long long)NFS_FILEID(hdr->inode),
1089 			hdr->args.count,
1090 			(unsigned long long)hdr->args.offset);
1091 
1092 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1093 	}
1094 }
1095 
1096 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1097 					   struct nfs4_state *state,
1098 					   struct nfs_client *clp,
1099 					   struct pnfs_layout_segment *lseg,
1100 					   int idx)
1101 {
1102 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1103 	struct inode *inode = lo->plh_inode;
1104 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1105 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1106 
1107 	switch (task->tk_status) {
1108 	case -NFS4ERR_BADSESSION:
1109 	case -NFS4ERR_BADSLOT:
1110 	case -NFS4ERR_BAD_HIGH_SLOT:
1111 	case -NFS4ERR_DEADSESSION:
1112 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1113 	case -NFS4ERR_SEQ_FALSE_RETRY:
1114 	case -NFS4ERR_SEQ_MISORDERED:
1115 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1116 			"flags 0x%x\n", __func__, task->tk_status,
1117 			clp->cl_exchange_flags);
1118 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1119 		break;
1120 	case -NFS4ERR_DELAY:
1121 	case -NFS4ERR_GRACE:
1122 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1123 		break;
1124 	case -NFS4ERR_RETRY_UNCACHED_REP:
1125 		break;
1126 	case -EAGAIN:
1127 		return -NFS4ERR_RESET_TO_PNFS;
1128 	/* Invalidate Layout errors */
1129 	case -NFS4ERR_PNFS_NO_LAYOUT:
1130 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1131 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1132 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1133 	case -NFS4ERR_FHEXPIRED:
1134 	case -NFS4ERR_WRONG_TYPE:
1135 		dprintk("%s Invalid layout error %d\n", __func__,
1136 			task->tk_status);
1137 		/*
1138 		 * Destroy layout so new i/o will get a new layout.
1139 		 * Layout will not be destroyed until all current lseg
1140 		 * references are put. Mark layout as invalid to resend failed
1141 		 * i/o and all i/o waiting on the slot table to the MDS until
1142 		 * layout is destroyed and a new valid layout is obtained.
1143 		 */
1144 		pnfs_destroy_layout(NFS_I(inode));
1145 		rpc_wake_up(&tbl->slot_tbl_waitq);
1146 		goto reset;
1147 	/* RPC connection errors */
1148 	case -ECONNREFUSED:
1149 	case -EHOSTDOWN:
1150 	case -EHOSTUNREACH:
1151 	case -ENETUNREACH:
1152 	case -EIO:
1153 	case -ETIMEDOUT:
1154 	case -EPIPE:
1155 		dprintk("%s DS connection error %d\n", __func__,
1156 			task->tk_status);
1157 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1158 				&devid->deviceid);
1159 		rpc_wake_up(&tbl->slot_tbl_waitq);
1160 		/* fall through */
1161 	default:
1162 		if (ff_layout_avoid_mds_available_ds(lseg))
1163 			return -NFS4ERR_RESET_TO_PNFS;
1164 reset:
1165 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1166 			task->tk_status);
1167 		return -NFS4ERR_RESET_TO_MDS;
1168 	}
1169 	task->tk_status = 0;
1170 	return -EAGAIN;
1171 }
1172 
1173 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1174 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1175 					   struct pnfs_layout_segment *lseg,
1176 					   int idx)
1177 {
1178 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1179 
1180 	switch (task->tk_status) {
1181 	/* File access problems. Don't mark the device as unavailable */
1182 	case -EACCES:
1183 	case -ESTALE:
1184 	case -EISDIR:
1185 	case -EBADHANDLE:
1186 	case -ELOOP:
1187 	case -ENOSPC:
1188 	case -EAGAIN:
1189 		break;
1190 	case -EJUKEBOX:
1191 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1192 		goto out_retry;
1193 	default:
1194 		dprintk("%s DS connection error %d\n", __func__,
1195 			task->tk_status);
1196 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1197 				&devid->deviceid);
1198 	}
1199 	/* FIXME: Need to prevent infinite looping here. */
1200 	return -NFS4ERR_RESET_TO_PNFS;
1201 out_retry:
1202 	task->tk_status = 0;
1203 	rpc_restart_call_prepare(task);
1204 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1205 	return -EAGAIN;
1206 }
1207 
1208 static int ff_layout_async_handle_error(struct rpc_task *task,
1209 					struct nfs4_state *state,
1210 					struct nfs_client *clp,
1211 					struct pnfs_layout_segment *lseg,
1212 					int idx)
1213 {
1214 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1215 
1216 	if (task->tk_status >= 0) {
1217 		ff_layout_mark_ds_reachable(lseg, idx);
1218 		return 0;
1219 	}
1220 
1221 	/* Handle the case of an invalid layout segment */
1222 	if (!pnfs_is_valid_lseg(lseg))
1223 		return -NFS4ERR_RESET_TO_PNFS;
1224 
1225 	switch (vers) {
1226 	case 3:
1227 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1228 	case 4:
1229 		return ff_layout_async_handle_error_v4(task, state, clp,
1230 						       lseg, idx);
1231 	default:
1232 		/* should never happen */
1233 		WARN_ON_ONCE(1);
1234 		return 0;
1235 	}
1236 }
1237 
1238 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1239 					int idx, u64 offset, u64 length,
1240 					u32 status, int opnum, int error)
1241 {
1242 	struct nfs4_ff_layout_mirror *mirror;
1243 	int err;
1244 
1245 	if (status == 0) {
1246 		switch (error) {
1247 		case -ETIMEDOUT:
1248 		case -EPFNOSUPPORT:
1249 		case -EPROTONOSUPPORT:
1250 		case -EOPNOTSUPP:
1251 		case -ECONNREFUSED:
1252 		case -ECONNRESET:
1253 		case -EHOSTDOWN:
1254 		case -EHOSTUNREACH:
1255 		case -ENETUNREACH:
1256 		case -EADDRINUSE:
1257 		case -ENOBUFS:
1258 		case -EPIPE:
1259 		case -EPERM:
1260 			status = NFS4ERR_NXIO;
1261 			break;
1262 		case -EACCES:
1263 			status = NFS4ERR_ACCESS;
1264 			break;
1265 		default:
1266 			return;
1267 		}
1268 	}
1269 
1270 	switch (status) {
1271 	case NFS4ERR_DELAY:
1272 	case NFS4ERR_GRACE:
1273 		return;
1274 	default:
1275 		break;
1276 	}
1277 
1278 	mirror = FF_LAYOUT_COMP(lseg, idx);
1279 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1280 				       mirror, offset, length, status, opnum,
1281 				       GFP_NOIO);
1282 	if (status == NFS4ERR_NXIO)
1283 		ff_layout_mark_ds_unreachable(lseg, idx);
1284 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1285 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1286 }
1287 
1288 /* NFS_PROTO call done callback routines */
1289 static int ff_layout_read_done_cb(struct rpc_task *task,
1290 				struct nfs_pgio_header *hdr)
1291 {
1292 	int err;
1293 
1294 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1295 	if (task->tk_status < 0)
1296 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1297 					    hdr->args.offset, hdr->args.count,
1298 					    hdr->res.op_status, OP_READ,
1299 					    task->tk_status);
1300 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1301 					   hdr->ds_clp, hdr->lseg,
1302 					   hdr->pgio_mirror_idx);
1303 
1304 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1305 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1306 	switch (err) {
1307 	case -NFS4ERR_RESET_TO_PNFS:
1308 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1309 					hdr->pgio_mirror_idx + 1,
1310 					&hdr->pgio_mirror_idx))
1311 			goto out_layouterror;
1312 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1313 		return task->tk_status;
1314 	case -NFS4ERR_RESET_TO_MDS:
1315 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1316 		return task->tk_status;
1317 	case -EAGAIN:
1318 		goto out_eagain;
1319 	}
1320 
1321 	return 0;
1322 out_layouterror:
1323 	ff_layout_send_layouterror(hdr->lseg);
1324 out_eagain:
1325 	rpc_restart_call_prepare(task);
1326 	return -EAGAIN;
1327 }
1328 
1329 static bool
1330 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1331 {
1332 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1333 }
1334 
1335 /*
1336  * We reference the rpc_cred of the first WRITE that triggers the need for
1337  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1338  * rfc5661 is not clear about which credential should be used.
1339  *
1340  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1341  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1342  * we always send layoutcommit after DS writes.
1343  */
1344 static void
1345 ff_layout_set_layoutcommit(struct inode *inode,
1346 		struct pnfs_layout_segment *lseg,
1347 		loff_t end_offset)
1348 {
1349 	if (!ff_layout_need_layoutcommit(lseg))
1350 		return;
1351 
1352 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1353 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1354 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1355 }
1356 
1357 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1358 		struct nfs_pgio_header *hdr)
1359 {
1360 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1361 		return;
1362 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1363 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1364 			hdr->args.count,
1365 			task->tk_start);
1366 }
1367 
1368 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1369 		struct nfs_pgio_header *hdr)
1370 {
1371 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1372 		return;
1373 	nfs4_ff_layout_stat_io_end_read(task,
1374 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1375 			hdr->args.count,
1376 			hdr->res.count);
1377 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1378 }
1379 
1380 static int ff_layout_read_prepare_common(struct rpc_task *task,
1381 					 struct nfs_pgio_header *hdr)
1382 {
1383 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1384 		rpc_exit(task, -EIO);
1385 		return -EIO;
1386 	}
1387 
1388 	ff_layout_read_record_layoutstats_start(task, hdr);
1389 	return 0;
1390 }
1391 
1392 /*
1393  * Call ops for the async read/write cases
1394  * In the case of dense layouts, the offset needs to be reset to its
1395  * original value.
1396  */
1397 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1398 {
1399 	struct nfs_pgio_header *hdr = data;
1400 
1401 	if (ff_layout_read_prepare_common(task, hdr))
1402 		return;
1403 
1404 	rpc_call_start(task);
1405 }
1406 
1407 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1408 {
1409 	struct nfs_pgio_header *hdr = data;
1410 
1411 	if (nfs4_setup_sequence(hdr->ds_clp,
1412 				&hdr->args.seq_args,
1413 				&hdr->res.seq_res,
1414 				task))
1415 		return;
1416 
1417 	ff_layout_read_prepare_common(task, hdr);
1418 }
1419 
1420 static void
1421 ff_layout_io_prepare_transmit(struct rpc_task *task,
1422 		void *data)
1423 {
1424 	struct nfs_pgio_header *hdr = data;
1425 
1426 	if (!pnfs_is_valid_lseg(hdr->lseg))
1427 		rpc_exit(task, -EAGAIN);
1428 }
1429 
1430 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1431 {
1432 	struct nfs_pgio_header *hdr = data;
1433 
1434 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1435 
1436 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1437 	    task->tk_status == 0) {
1438 		nfs4_sequence_done(task, &hdr->res.seq_res);
1439 		return;
1440 	}
1441 
1442 	/* Note this may cause RPC to be resent */
1443 	hdr->mds_ops->rpc_call_done(task, hdr);
1444 }
1445 
1446 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1447 {
1448 	struct nfs_pgio_header *hdr = data;
1449 
1450 	ff_layout_read_record_layoutstats_done(task, hdr);
1451 	rpc_count_iostats_metrics(task,
1452 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1453 }
1454 
1455 static void ff_layout_read_release(void *data)
1456 {
1457 	struct nfs_pgio_header *hdr = data;
1458 
1459 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1460 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1461 		ff_layout_send_layouterror(hdr->lseg);
1462 		pnfs_read_resend_pnfs(hdr);
1463 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1464 		ff_layout_reset_read(hdr);
1465 	pnfs_generic_rw_release(data);
1466 }
1467 
1468 
1469 static int ff_layout_write_done_cb(struct rpc_task *task,
1470 				struct nfs_pgio_header *hdr)
1471 {
1472 	loff_t end_offs = 0;
1473 	int err;
1474 
1475 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1476 	if (task->tk_status < 0)
1477 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1478 					    hdr->args.offset, hdr->args.count,
1479 					    hdr->res.op_status, OP_WRITE,
1480 					    task->tk_status);
1481 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1482 					   hdr->ds_clp, hdr->lseg,
1483 					   hdr->pgio_mirror_idx);
1484 
1485 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1486 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1487 	switch (err) {
1488 	case -NFS4ERR_RESET_TO_PNFS:
1489 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1490 		return task->tk_status;
1491 	case -NFS4ERR_RESET_TO_MDS:
1492 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1493 		return task->tk_status;
1494 	case -EAGAIN:
1495 		return -EAGAIN;
1496 	}
1497 
1498 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1499 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1500 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1501 
1502 	/* Note: if the write is unstable, don't set end_offs until commit */
1503 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1504 
1505 	/* zero out fattr since we don't care DS attr at all */
1506 	hdr->fattr.valid = 0;
1507 	if (task->tk_status >= 0)
1508 		nfs_writeback_update_inode(hdr);
1509 
1510 	return 0;
1511 }
1512 
1513 static int ff_layout_commit_done_cb(struct rpc_task *task,
1514 				     struct nfs_commit_data *data)
1515 {
1516 	int err;
1517 
1518 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1519 	if (task->tk_status < 0)
1520 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1521 					    data->args.offset, data->args.count,
1522 					    data->res.op_status, OP_COMMIT,
1523 					    task->tk_status);
1524 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1525 					   data->lseg, data->ds_commit_index);
1526 
1527 	switch (err) {
1528 	case -NFS4ERR_RESET_TO_PNFS:
1529 		pnfs_generic_prepare_to_resend_writes(data);
1530 		return -EAGAIN;
1531 	case -NFS4ERR_RESET_TO_MDS:
1532 		pnfs_generic_prepare_to_resend_writes(data);
1533 		return -EAGAIN;
1534 	case -EAGAIN:
1535 		rpc_restart_call_prepare(task);
1536 		return -EAGAIN;
1537 	}
1538 
1539 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1540 
1541 	return 0;
1542 }
1543 
1544 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1545 		struct nfs_pgio_header *hdr)
1546 {
1547 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1548 		return;
1549 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1550 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1551 			hdr->args.count,
1552 			task->tk_start);
1553 }
1554 
1555 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1556 		struct nfs_pgio_header *hdr)
1557 {
1558 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1559 		return;
1560 	nfs4_ff_layout_stat_io_end_write(task,
1561 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1562 			hdr->args.count, hdr->res.count,
1563 			hdr->res.verf->committed);
1564 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1565 }
1566 
1567 static int ff_layout_write_prepare_common(struct rpc_task *task,
1568 					  struct nfs_pgio_header *hdr)
1569 {
1570 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1571 		rpc_exit(task, -EIO);
1572 		return -EIO;
1573 	}
1574 
1575 	ff_layout_write_record_layoutstats_start(task, hdr);
1576 	return 0;
1577 }
1578 
1579 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1580 {
1581 	struct nfs_pgio_header *hdr = data;
1582 
1583 	if (ff_layout_write_prepare_common(task, hdr))
1584 		return;
1585 
1586 	rpc_call_start(task);
1587 }
1588 
1589 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1590 {
1591 	struct nfs_pgio_header *hdr = data;
1592 
1593 	if (nfs4_setup_sequence(hdr->ds_clp,
1594 				&hdr->args.seq_args,
1595 				&hdr->res.seq_res,
1596 				task))
1597 		return;
1598 
1599 	ff_layout_write_prepare_common(task, hdr);
1600 }
1601 
1602 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1603 {
1604 	struct nfs_pgio_header *hdr = data;
1605 
1606 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1607 	    task->tk_status == 0) {
1608 		nfs4_sequence_done(task, &hdr->res.seq_res);
1609 		return;
1610 	}
1611 
1612 	/* Note this may cause RPC to be resent */
1613 	hdr->mds_ops->rpc_call_done(task, hdr);
1614 }
1615 
1616 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1617 {
1618 	struct nfs_pgio_header *hdr = data;
1619 
1620 	ff_layout_write_record_layoutstats_done(task, hdr);
1621 	rpc_count_iostats_metrics(task,
1622 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1623 }
1624 
1625 static void ff_layout_write_release(void *data)
1626 {
1627 	struct nfs_pgio_header *hdr = data;
1628 
1629 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1630 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1631 		ff_layout_send_layouterror(hdr->lseg);
1632 		ff_layout_reset_write(hdr, true);
1633 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1634 		ff_layout_reset_write(hdr, false);
1635 	pnfs_generic_rw_release(data);
1636 }
1637 
1638 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1639 		struct nfs_commit_data *cdata)
1640 {
1641 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1642 		return;
1643 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1644 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1645 			0, task->tk_start);
1646 }
1647 
1648 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1649 		struct nfs_commit_data *cdata)
1650 {
1651 	struct nfs_page *req;
1652 	__u64 count = 0;
1653 
1654 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1655 		return;
1656 
1657 	if (task->tk_status == 0) {
1658 		list_for_each_entry(req, &cdata->pages, wb_list)
1659 			count += req->wb_bytes;
1660 	}
1661 	nfs4_ff_layout_stat_io_end_write(task,
1662 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1663 			count, count, NFS_FILE_SYNC);
1664 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1665 }
1666 
1667 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1668 		struct nfs_commit_data *cdata)
1669 {
1670 	ff_layout_commit_record_layoutstats_start(task, cdata);
1671 }
1672 
1673 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1674 {
1675 	ff_layout_commit_prepare_common(task, data);
1676 	rpc_call_start(task);
1677 }
1678 
1679 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1680 {
1681 	struct nfs_commit_data *wdata = data;
1682 
1683 	if (nfs4_setup_sequence(wdata->ds_clp,
1684 				&wdata->args.seq_args,
1685 				&wdata->res.seq_res,
1686 				task))
1687 		return;
1688 	ff_layout_commit_prepare_common(task, data);
1689 }
1690 
1691 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1692 {
1693 	pnfs_generic_write_commit_done(task, data);
1694 }
1695 
1696 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1697 {
1698 	struct nfs_commit_data *cdata = data;
1699 
1700 	ff_layout_commit_record_layoutstats_done(task, cdata);
1701 	rpc_count_iostats_metrics(task,
1702 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1703 }
1704 
1705 static void ff_layout_commit_release(void *data)
1706 {
1707 	struct nfs_commit_data *cdata = data;
1708 
1709 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1710 	pnfs_generic_commit_release(data);
1711 }
1712 
1713 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1714 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1715 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1716 	.rpc_call_done = ff_layout_read_call_done,
1717 	.rpc_count_stats = ff_layout_read_count_stats,
1718 	.rpc_release = ff_layout_read_release,
1719 };
1720 
1721 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1722 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1723 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1724 	.rpc_call_done = ff_layout_read_call_done,
1725 	.rpc_count_stats = ff_layout_read_count_stats,
1726 	.rpc_release = ff_layout_read_release,
1727 };
1728 
1729 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1730 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1731 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1732 	.rpc_call_done = ff_layout_write_call_done,
1733 	.rpc_count_stats = ff_layout_write_count_stats,
1734 	.rpc_release = ff_layout_write_release,
1735 };
1736 
1737 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1738 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1739 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1740 	.rpc_call_done = ff_layout_write_call_done,
1741 	.rpc_count_stats = ff_layout_write_count_stats,
1742 	.rpc_release = ff_layout_write_release,
1743 };
1744 
1745 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1746 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1747 	.rpc_call_done = ff_layout_commit_done,
1748 	.rpc_count_stats = ff_layout_commit_count_stats,
1749 	.rpc_release = ff_layout_commit_release,
1750 };
1751 
1752 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1753 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1754 	.rpc_call_done = ff_layout_commit_done,
1755 	.rpc_count_stats = ff_layout_commit_count_stats,
1756 	.rpc_release = ff_layout_commit_release,
1757 };
1758 
1759 static enum pnfs_try_status
1760 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1761 {
1762 	struct pnfs_layout_segment *lseg = hdr->lseg;
1763 	struct nfs4_pnfs_ds *ds;
1764 	struct rpc_clnt *ds_clnt;
1765 	struct nfs4_ff_layout_mirror *mirror;
1766 	const struct cred *ds_cred;
1767 	loff_t offset = hdr->args.offset;
1768 	u32 idx = hdr->pgio_mirror_idx;
1769 	int vers;
1770 	struct nfs_fh *fh;
1771 
1772 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1773 		__func__, hdr->inode->i_ino,
1774 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1775 
1776 	mirror = FF_LAYOUT_COMP(lseg, idx);
1777 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1778 	if (!ds)
1779 		goto out_failed;
1780 
1781 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1782 						   hdr->inode);
1783 	if (IS_ERR(ds_clnt))
1784 		goto out_failed;
1785 
1786 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1787 	if (!ds_cred)
1788 		goto out_failed;
1789 
1790 	vers = nfs4_ff_layout_ds_version(mirror);
1791 
1792 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1793 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1794 
1795 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1796 	refcount_inc(&ds->ds_clp->cl_count);
1797 	hdr->ds_clp = ds->ds_clp;
1798 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1799 	if (fh)
1800 		hdr->args.fh = fh;
1801 
1802 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1803 
1804 	/*
1805 	 * Note that if we ever decide to split across DSes,
1806 	 * then we may need to handle dense-like offsets.
1807 	 */
1808 	hdr->args.offset = offset;
1809 	hdr->mds_offset = offset;
1810 
1811 	/* Perform an asynchronous read to ds */
1812 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1813 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1814 				      &ff_layout_read_call_ops_v4,
1815 			  0, RPC_TASK_SOFTCONN);
1816 	put_cred(ds_cred);
1817 	return PNFS_ATTEMPTED;
1818 
1819 out_failed:
1820 	if (ff_layout_avoid_mds_available_ds(lseg))
1821 		return PNFS_TRY_AGAIN;
1822 	return PNFS_NOT_ATTEMPTED;
1823 }
1824 
1825 /* Perform async writes. */
1826 static enum pnfs_try_status
1827 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1828 {
1829 	struct pnfs_layout_segment *lseg = hdr->lseg;
1830 	struct nfs4_pnfs_ds *ds;
1831 	struct rpc_clnt *ds_clnt;
1832 	struct nfs4_ff_layout_mirror *mirror;
1833 	const struct cred *ds_cred;
1834 	loff_t offset = hdr->args.offset;
1835 	int vers;
1836 	struct nfs_fh *fh;
1837 	int idx = hdr->pgio_mirror_idx;
1838 
1839 	mirror = FF_LAYOUT_COMP(lseg, idx);
1840 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1841 	if (!ds)
1842 		goto out_failed;
1843 
1844 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1845 						   hdr->inode);
1846 	if (IS_ERR(ds_clnt))
1847 		goto out_failed;
1848 
1849 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1850 	if (!ds_cred)
1851 		goto out_failed;
1852 
1853 	vers = nfs4_ff_layout_ds_version(mirror);
1854 
1855 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1856 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1857 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1858 		vers);
1859 
1860 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1861 	refcount_inc(&ds->ds_clp->cl_count);
1862 	hdr->ds_clp = ds->ds_clp;
1863 	hdr->ds_commit_idx = idx;
1864 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1865 	if (fh)
1866 		hdr->args.fh = fh;
1867 
1868 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1869 
1870 	/*
1871 	 * Note that if we ever decide to split across DSes,
1872 	 * then we may need to handle dense-like offsets.
1873 	 */
1874 	hdr->args.offset = offset;
1875 
1876 	/* Perform an asynchronous write */
1877 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1878 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1879 				      &ff_layout_write_call_ops_v4,
1880 			  sync, RPC_TASK_SOFTCONN);
1881 	put_cred(ds_cred);
1882 	return PNFS_ATTEMPTED;
1883 
1884 out_failed:
1885 	if (ff_layout_avoid_mds_available_ds(lseg))
1886 		return PNFS_TRY_AGAIN;
1887 	return PNFS_NOT_ATTEMPTED;
1888 }
1889 
1890 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1891 {
1892 	return i;
1893 }
1894 
1895 static struct nfs_fh *
1896 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1897 {
1898 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1899 
1900 	/* FIXME: Assume that there is only one NFS version available
1901 	 * for the DS.
1902 	 */
1903 	return &flseg->mirror_array[i]->fh_versions[0];
1904 }
1905 
1906 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1907 {
1908 	struct pnfs_layout_segment *lseg = data->lseg;
1909 	struct nfs4_pnfs_ds *ds;
1910 	struct rpc_clnt *ds_clnt;
1911 	struct nfs4_ff_layout_mirror *mirror;
1912 	const struct cred *ds_cred;
1913 	u32 idx;
1914 	int vers, ret;
1915 	struct nfs_fh *fh;
1916 
1917 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1918 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1919 		goto out_err;
1920 
1921 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1922 	mirror = FF_LAYOUT_COMP(lseg, idx);
1923 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1924 	if (!ds)
1925 		goto out_err;
1926 
1927 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1928 						   data->inode);
1929 	if (IS_ERR(ds_clnt))
1930 		goto out_err;
1931 
1932 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1933 	if (!ds_cred)
1934 		goto out_err;
1935 
1936 	vers = nfs4_ff_layout_ds_version(mirror);
1937 
1938 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1939 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1940 		vers);
1941 	data->commit_done_cb = ff_layout_commit_done_cb;
1942 	data->cred = ds_cred;
1943 	refcount_inc(&ds->ds_clp->cl_count);
1944 	data->ds_clp = ds->ds_clp;
1945 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1946 	if (fh)
1947 		data->args.fh = fh;
1948 
1949 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1950 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1951 					       &ff_layout_commit_call_ops_v4,
1952 				   how, RPC_TASK_SOFTCONN);
1953 	put_cred(ds_cred);
1954 	return ret;
1955 out_err:
1956 	pnfs_generic_prepare_to_resend_writes(data);
1957 	pnfs_generic_commit_release(data);
1958 	return -EAGAIN;
1959 }
1960 
1961 static int
1962 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1963 			   int how, struct nfs_commit_info *cinfo)
1964 {
1965 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1966 					    ff_layout_initiate_commit);
1967 }
1968 
1969 static struct pnfs_ds_commit_info *
1970 ff_layout_get_ds_info(struct inode *inode)
1971 {
1972 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1973 
1974 	if (layout == NULL)
1975 		return NULL;
1976 
1977 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1978 }
1979 
1980 static void
1981 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1982 {
1983 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1984 						  id_node));
1985 }
1986 
1987 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1988 				  const struct nfs4_layoutreturn_args *args,
1989 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
1990 {
1991 	__be32 *start;
1992 
1993 	start = xdr_reserve_space(xdr, 4);
1994 	if (unlikely(!start))
1995 		return -E2BIG;
1996 
1997 	*start = cpu_to_be32(ff_args->num_errors);
1998 	/* This assume we always return _ALL_ layouts */
1999 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2000 }
2001 
2002 static void
2003 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2004 {
2005 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2006 }
2007 
2008 static void
2009 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2010 			    const nfs4_stateid *stateid,
2011 			    const struct nfs42_layoutstat_devinfo *devinfo)
2012 {
2013 	__be32 *p;
2014 
2015 	p = xdr_reserve_space(xdr, 8 + 8);
2016 	p = xdr_encode_hyper(p, devinfo->offset);
2017 	p = xdr_encode_hyper(p, devinfo->length);
2018 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2019 	p = xdr_reserve_space(xdr, 4*8);
2020 	p = xdr_encode_hyper(p, devinfo->read_count);
2021 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2022 	p = xdr_encode_hyper(p, devinfo->write_count);
2023 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2024 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2025 }
2026 
2027 static void
2028 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2029 			    const nfs4_stateid *stateid,
2030 			    const struct nfs42_layoutstat_devinfo *devinfo)
2031 {
2032 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2033 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2034 			devinfo->ld_private.data);
2035 }
2036 
2037 /* report nothing for now */
2038 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2039 		const struct nfs4_layoutreturn_args *args,
2040 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2041 {
2042 	__be32 *p;
2043 	int i;
2044 
2045 	p = xdr_reserve_space(xdr, 4);
2046 	*p = cpu_to_be32(ff_args->num_dev);
2047 	for (i = 0; i < ff_args->num_dev; i++)
2048 		ff_layout_encode_ff_iostat(xdr,
2049 				&args->layout->plh_stateid,
2050 				&ff_args->devinfo[i]);
2051 }
2052 
2053 static void
2054 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2055 		unsigned int num_entries)
2056 {
2057 	unsigned int i;
2058 
2059 	for (i = 0; i < num_entries; i++) {
2060 		if (!devinfo[i].ld_private.ops)
2061 			continue;
2062 		if (!devinfo[i].ld_private.ops->free)
2063 			continue;
2064 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2065 	}
2066 }
2067 
2068 static struct nfs4_deviceid_node *
2069 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2070 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2071 {
2072 	struct nfs4_ff_layout_ds *dsaddr;
2073 
2074 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2075 	if (!dsaddr)
2076 		return NULL;
2077 	return &dsaddr->id_node;
2078 }
2079 
2080 static void
2081 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2082 		const void *voidargs,
2083 		const struct nfs4_xdr_opaque_data *ff_opaque)
2084 {
2085 	const struct nfs4_layoutreturn_args *args = voidargs;
2086 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2087 	struct xdr_buf tmp_buf = {
2088 		.head = {
2089 			[0] = {
2090 				.iov_base = page_address(ff_args->pages[0]),
2091 			},
2092 		},
2093 		.buflen = PAGE_SIZE,
2094 	};
2095 	struct xdr_stream tmp_xdr;
2096 	__be32 *start;
2097 
2098 	dprintk("%s: Begin\n", __func__);
2099 
2100 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2101 
2102 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2103 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2104 
2105 	start = xdr_reserve_space(xdr, 4);
2106 	*start = cpu_to_be32(tmp_buf.len);
2107 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2108 
2109 	dprintk("%s: Return\n", __func__);
2110 }
2111 
2112 static void
2113 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2114 {
2115 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2116 
2117 	if (!args->data)
2118 		return;
2119 	ff_args = args->data;
2120 	args->data = NULL;
2121 
2122 	ff_layout_free_ds_ioerr(&ff_args->errors);
2123 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2124 
2125 	put_page(ff_args->pages[0]);
2126 	kfree(ff_args);
2127 }
2128 
2129 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2130 	.encode = ff_layout_encode_layoutreturn,
2131 	.free = ff_layout_free_layoutreturn,
2132 };
2133 
2134 static int
2135 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2136 {
2137 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2138 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2139 
2140 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2141 	if (!ff_args)
2142 		goto out_nomem;
2143 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2144 	if (!ff_args->pages[0])
2145 		goto out_nomem_free;
2146 
2147 	INIT_LIST_HEAD(&ff_args->errors);
2148 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2149 			&args->range, &ff_args->errors,
2150 			FF_LAYOUTRETURN_MAXERR);
2151 
2152 	spin_lock(&args->inode->i_lock);
2153 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2154 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2155 	spin_unlock(&args->inode->i_lock);
2156 
2157 	args->ld_private->ops = &layoutreturn_ops;
2158 	args->ld_private->data = ff_args;
2159 	return 0;
2160 out_nomem_free:
2161 	kfree(ff_args);
2162 out_nomem:
2163 	return -ENOMEM;
2164 }
2165 
2166 #ifdef CONFIG_NFS_V4_2
2167 void
2168 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2169 {
2170 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2171 	struct nfs42_layout_error *errors;
2172 	LIST_HEAD(head);
2173 
2174 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2175 		return;
2176 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2177 	if (list_empty(&head))
2178 		return;
2179 
2180 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2181 			sizeof(*errors), GFP_NOFS);
2182 	if (errors != NULL) {
2183 		const struct nfs4_ff_layout_ds_err *pos;
2184 		size_t n = 0;
2185 
2186 		list_for_each_entry(pos, &head, list) {
2187 			errors[n].offset = pos->offset;
2188 			errors[n].length = pos->length;
2189 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2190 			errors[n].errors[0].dev_id = pos->deviceid;
2191 			errors[n].errors[0].status = pos->status;
2192 			errors[n].errors[0].opnum = pos->opnum;
2193 			n++;
2194 			if (!list_is_last(&pos->list, &head) &&
2195 			    n < NFS42_LAYOUTERROR_MAX)
2196 				continue;
2197 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2198 				break;
2199 			n = 0;
2200 		}
2201 		kfree(errors);
2202 	}
2203 	ff_layout_free_ds_ioerr(&head);
2204 }
2205 #else
2206 void
2207 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2208 {
2209 }
2210 #endif
2211 
2212 static int
2213 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2214 {
2215 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2216 
2217 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2218 }
2219 
2220 static size_t
2221 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2222 			  const int buflen)
2223 {
2224 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2225 	const struct in6_addr *addr = &sin6->sin6_addr;
2226 
2227 	/*
2228 	 * RFC 4291, Section 2.2.2
2229 	 *
2230 	 * Shorthanded ANY address
2231 	 */
2232 	if (ipv6_addr_any(addr))
2233 		return snprintf(buf, buflen, "::");
2234 
2235 	/*
2236 	 * RFC 4291, Section 2.2.2
2237 	 *
2238 	 * Shorthanded loopback address
2239 	 */
2240 	if (ipv6_addr_loopback(addr))
2241 		return snprintf(buf, buflen, "::1");
2242 
2243 	/*
2244 	 * RFC 4291, Section 2.2.3
2245 	 *
2246 	 * Special presentation address format for mapped v4
2247 	 * addresses.
2248 	 */
2249 	if (ipv6_addr_v4mapped(addr))
2250 		return snprintf(buf, buflen, "::ffff:%pI4",
2251 					&addr->s6_addr32[3]);
2252 
2253 	/*
2254 	 * RFC 4291, Section 2.2.1
2255 	 */
2256 	return snprintf(buf, buflen, "%pI6c", addr);
2257 }
2258 
2259 /* Derived from rpc_sockaddr2uaddr */
2260 static void
2261 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2262 {
2263 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2264 	char portbuf[RPCBIND_MAXUADDRPLEN];
2265 	char addrbuf[RPCBIND_MAXUADDRLEN];
2266 	char *netid;
2267 	unsigned short port;
2268 	int len, netid_len;
2269 	__be32 *p;
2270 
2271 	switch (sap->sa_family) {
2272 	case AF_INET:
2273 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2274 			return;
2275 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2276 		netid = "tcp";
2277 		netid_len = 3;
2278 		break;
2279 	case AF_INET6:
2280 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2281 			return;
2282 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2283 		netid = "tcp6";
2284 		netid_len = 4;
2285 		break;
2286 	default:
2287 		/* we only support tcp and tcp6 */
2288 		WARN_ON_ONCE(1);
2289 		return;
2290 	}
2291 
2292 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2293 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2294 
2295 	p = xdr_reserve_space(xdr, 4 + netid_len);
2296 	xdr_encode_opaque(p, netid, netid_len);
2297 
2298 	p = xdr_reserve_space(xdr, 4 + len);
2299 	xdr_encode_opaque(p, addrbuf, len);
2300 }
2301 
2302 static void
2303 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2304 			 ktime_t t)
2305 {
2306 	struct timespec64 ts;
2307 	__be32 *p;
2308 
2309 	p = xdr_reserve_space(xdr, 12);
2310 	ts = ktime_to_timespec64(t);
2311 	p = xdr_encode_hyper(p, ts.tv_sec);
2312 	*p++ = cpu_to_be32(ts.tv_nsec);
2313 }
2314 
2315 static void
2316 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2317 			    struct nfs4_ff_io_stat *stat)
2318 {
2319 	__be32 *p;
2320 
2321 	p = xdr_reserve_space(xdr, 5 * 8);
2322 	p = xdr_encode_hyper(p, stat->ops_requested);
2323 	p = xdr_encode_hyper(p, stat->bytes_requested);
2324 	p = xdr_encode_hyper(p, stat->ops_completed);
2325 	p = xdr_encode_hyper(p, stat->bytes_completed);
2326 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2327 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2328 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2329 }
2330 
2331 static void
2332 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2333 			      const struct nfs42_layoutstat_devinfo *devinfo,
2334 			      struct nfs4_ff_layout_mirror *mirror)
2335 {
2336 	struct nfs4_pnfs_ds_addr *da;
2337 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2338 	struct nfs_fh *fh = &mirror->fh_versions[0];
2339 	__be32 *p;
2340 
2341 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2342 	dprintk("%s: DS %s: encoding address %s\n",
2343 		__func__, ds->ds_remotestr, da->da_remotestr);
2344 	/* netaddr4 */
2345 	ff_layout_encode_netaddr(xdr, da);
2346 	/* nfs_fh4 */
2347 	p = xdr_reserve_space(xdr, 4 + fh->size);
2348 	xdr_encode_opaque(p, fh->data, fh->size);
2349 	/* ff_io_latency4 read */
2350 	spin_lock(&mirror->lock);
2351 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2352 	/* ff_io_latency4 write */
2353 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2354 	spin_unlock(&mirror->lock);
2355 	/* nfstime4 */
2356 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2357 	/* bool */
2358 	p = xdr_reserve_space(xdr, 4);
2359 	*p = cpu_to_be32(false);
2360 }
2361 
2362 static void
2363 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2364 			     const struct nfs4_xdr_opaque_data *opaque)
2365 {
2366 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2367 			struct nfs42_layoutstat_devinfo, ld_private);
2368 	__be32 *start;
2369 
2370 	/* layoutupdate length */
2371 	start = xdr_reserve_space(xdr, 4);
2372 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2373 
2374 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2375 }
2376 
2377 static void
2378 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2379 {
2380 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2381 
2382 	ff_layout_put_mirror(mirror);
2383 }
2384 
2385 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2386 	.encode = ff_layout_encode_layoutstats,
2387 	.free	= ff_layout_free_layoutstats,
2388 };
2389 
2390 static int
2391 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2392 			       struct nfs42_layoutstat_devinfo *devinfo,
2393 			       int dev_limit)
2394 {
2395 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2396 	struct nfs4_ff_layout_mirror *mirror;
2397 	struct nfs4_deviceid_node *dev;
2398 	int i = 0;
2399 
2400 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2401 		if (i >= dev_limit)
2402 			break;
2403 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2404 			continue;
2405 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2406 			continue;
2407 		/* mirror refcount put in cleanup_layoutstats */
2408 		if (!refcount_inc_not_zero(&mirror->ref))
2409 			continue;
2410 		dev = &mirror->mirror_ds->id_node;
2411 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2412 		devinfo->offset = 0;
2413 		devinfo->length = NFS4_MAX_UINT64;
2414 		spin_lock(&mirror->lock);
2415 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2416 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2417 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2418 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2419 		spin_unlock(&mirror->lock);
2420 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2421 		devinfo->ld_private.ops = &layoutstat_ops;
2422 		devinfo->ld_private.data = mirror;
2423 
2424 		devinfo++;
2425 		i++;
2426 	}
2427 	return i;
2428 }
2429 
2430 static int
2431 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2432 {
2433 	struct nfs4_flexfile_layout *ff_layout;
2434 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2435 
2436 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2437 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2438 	if (!args->devinfo)
2439 		return -ENOMEM;
2440 
2441 	spin_lock(&args->inode->i_lock);
2442 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2443 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2444 			&args->devinfo[0], dev_count);
2445 	spin_unlock(&args->inode->i_lock);
2446 	if (!args->num_dev) {
2447 		kfree(args->devinfo);
2448 		args->devinfo = NULL;
2449 		return -ENOENT;
2450 	}
2451 
2452 	return 0;
2453 }
2454 
2455 static int
2456 ff_layout_set_layoutdriver(struct nfs_server *server,
2457 		const struct nfs_fh *dummy)
2458 {
2459 #if IS_ENABLED(CONFIG_NFS_V4_2)
2460 	server->caps |= NFS_CAP_LAYOUTSTATS;
2461 #endif
2462 	return 0;
2463 }
2464 
2465 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2466 	.id			= LAYOUT_FLEX_FILES,
2467 	.name			= "LAYOUT_FLEX_FILES",
2468 	.owner			= THIS_MODULE,
2469 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2470 	.max_layoutget_response	= 4096, /* 1 page or so... */
2471 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2472 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2473 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2474 	.alloc_lseg		= ff_layout_alloc_lseg,
2475 	.free_lseg		= ff_layout_free_lseg,
2476 	.add_lseg		= ff_layout_add_lseg,
2477 	.pg_read_ops		= &ff_layout_pg_read_ops,
2478 	.pg_write_ops		= &ff_layout_pg_write_ops,
2479 	.get_ds_info		= ff_layout_get_ds_info,
2480 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2481 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2482 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2483 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2484 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2485 	.commit_pagelist	= ff_layout_commit_pagelist,
2486 	.read_pagelist		= ff_layout_read_pagelist,
2487 	.write_pagelist		= ff_layout_write_pagelist,
2488 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2489 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2490 	.sync			= pnfs_nfs_generic_sync,
2491 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2492 };
2493 
2494 static int __init nfs4flexfilelayout_init(void)
2495 {
2496 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2497 	       __func__);
2498 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2499 }
2500 
2501 static void __exit nfs4flexfilelayout_exit(void)
2502 {
2503 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2504 	       __func__);
2505 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2506 }
2507 
2508 MODULE_ALIAS("nfs-layouttype4-4");
2509 
2510 MODULE_LICENSE("GPL");
2511 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2512 
2513 module_init(nfs4flexfilelayout_init);
2514 module_exit(nfs4flexfilelayout_exit);
2515