1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_page.h>
12 #include <linux/module.h>
13 #include <linux/sched/mm.h>
14 
15 #include <linux/sunrpc/metrics.h>
16 
17 #include "flexfilelayout.h"
18 #include "../nfs4session.h"
19 #include "../nfs4idmap.h"
20 #include "../internal.h"
21 #include "../delegation.h"
22 #include "../nfs4trace.h"
23 #include "../iostat.h"
24 #include "../nfs.h"
25 #include "../nfs42.h"
26 
27 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
28 
29 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
30 #define FF_LAYOUTRETURN_MAXERR 20
31 
32 static unsigned short io_maxretrans;
33 
34 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
35 		struct nfs_pgio_header *hdr);
36 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
37 			       struct nfs42_layoutstat_devinfo *devinfo,
38 			       int dev_limit);
39 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
40 			      const struct nfs42_layoutstat_devinfo *devinfo,
41 			      struct nfs4_ff_layout_mirror *mirror);
42 
43 static struct pnfs_layout_hdr *
44 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
45 {
46 	struct nfs4_flexfile_layout *ffl;
47 
48 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
49 	if (ffl) {
50 		INIT_LIST_HEAD(&ffl->error_list);
51 		INIT_LIST_HEAD(&ffl->mirrors);
52 		ffl->last_report_time = ktime_get();
53 		return &ffl->generic_hdr;
54 	} else
55 		return NULL;
56 }
57 
58 static void
59 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
60 {
61 	struct nfs4_ff_layout_ds_err *err, *n;
62 
63 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
64 				 list) {
65 		list_del(&err->list);
66 		kfree(err);
67 	}
68 	kfree(FF_LAYOUT_FROM_HDR(lo));
69 }
70 
71 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
72 {
73 	__be32 *p;
74 
75 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
76 	if (unlikely(p == NULL))
77 		return -ENOBUFS;
78 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
79 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
80 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
81 		p[0], p[1], p[2], p[3]);
82 	return 0;
83 }
84 
85 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
86 {
87 	__be32 *p;
88 
89 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
90 	if (unlikely(!p))
91 		return -ENOBUFS;
92 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
93 	nfs4_print_deviceid(devid);
94 	return 0;
95 }
96 
97 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
98 {
99 	__be32 *p;
100 
101 	p = xdr_inline_decode(xdr, 4);
102 	if (unlikely(!p))
103 		return -ENOBUFS;
104 	fh->size = be32_to_cpup(p++);
105 	if (fh->size > sizeof(struct nfs_fh)) {
106 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
107 		       fh->size);
108 		return -EOVERFLOW;
109 	}
110 	/* fh.data */
111 	p = xdr_inline_decode(xdr, fh->size);
112 	if (unlikely(!p))
113 		return -ENOBUFS;
114 	memcpy(&fh->data, p, fh->size);
115 	dprintk("%s: fh len %d\n", __func__, fh->size);
116 
117 	return 0;
118 }
119 
120 /*
121  * Currently only stringified uids and gids are accepted.
122  * I.e., kerberos is not supported to the DSes, so no pricipals.
123  *
124  * That means that one common function will suffice, but when
125  * principals are added, this should be split to accomodate
126  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
127  */
128 static int
129 decode_name(struct xdr_stream *xdr, u32 *id)
130 {
131 	__be32 *p;
132 	int len;
133 
134 	/* opaque_length(4)*/
135 	p = xdr_inline_decode(xdr, 4);
136 	if (unlikely(!p))
137 		return -ENOBUFS;
138 	len = be32_to_cpup(p++);
139 	if (len < 0)
140 		return -EINVAL;
141 
142 	dprintk("%s: len %u\n", __func__, len);
143 
144 	/* opaque body */
145 	p = xdr_inline_decode(xdr, len);
146 	if (unlikely(!p))
147 		return -ENOBUFS;
148 
149 	if (!nfs_map_string_to_numeric((char *)p, len, id))
150 		return -EINVAL;
151 
152 	return 0;
153 }
154 
155 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
156 		const struct nfs4_ff_layout_mirror *m2)
157 {
158 	int i, j;
159 
160 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
161 		return false;
162 	for (i = 0; i < m1->fh_versions_cnt; i++) {
163 		bool found_fh = false;
164 		for (j = 0; j < m2->fh_versions_cnt; j++) {
165 			if (nfs_compare_fh(&m1->fh_versions[i],
166 					&m2->fh_versions[j]) == 0) {
167 				found_fh = true;
168 				break;
169 			}
170 		}
171 		if (!found_fh)
172 			return false;
173 	}
174 	return true;
175 }
176 
177 static struct nfs4_ff_layout_mirror *
178 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
179 		struct nfs4_ff_layout_mirror *mirror)
180 {
181 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
182 	struct nfs4_ff_layout_mirror *pos;
183 	struct inode *inode = lo->plh_inode;
184 
185 	spin_lock(&inode->i_lock);
186 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
187 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
188 			continue;
189 		if (!ff_mirror_match_fh(mirror, pos))
190 			continue;
191 		if (refcount_inc_not_zero(&pos->ref)) {
192 			spin_unlock(&inode->i_lock);
193 			return pos;
194 		}
195 	}
196 	list_add(&mirror->mirrors, &ff_layout->mirrors);
197 	mirror->layout = lo;
198 	spin_unlock(&inode->i_lock);
199 	return mirror;
200 }
201 
202 static void
203 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
204 {
205 	struct inode *inode;
206 	if (mirror->layout == NULL)
207 		return;
208 	inode = mirror->layout->plh_inode;
209 	spin_lock(&inode->i_lock);
210 	list_del(&mirror->mirrors);
211 	spin_unlock(&inode->i_lock);
212 	mirror->layout = NULL;
213 }
214 
215 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
216 {
217 	struct nfs4_ff_layout_mirror *mirror;
218 
219 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
220 	if (mirror != NULL) {
221 		spin_lock_init(&mirror->lock);
222 		refcount_set(&mirror->ref, 1);
223 		INIT_LIST_HEAD(&mirror->mirrors);
224 	}
225 	return mirror;
226 }
227 
228 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
229 {
230 	const struct cred	*cred;
231 
232 	ff_layout_remove_mirror(mirror);
233 	kfree(mirror->fh_versions);
234 	cred = rcu_access_pointer(mirror->ro_cred);
235 	put_cred(cred);
236 	cred = rcu_access_pointer(mirror->rw_cred);
237 	put_cred(cred);
238 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
239 	kfree(mirror);
240 }
241 
242 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
243 {
244 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
245 		ff_layout_free_mirror(mirror);
246 }
247 
248 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
249 {
250 	int i;
251 
252 	if (fls->mirror_array) {
253 		for (i = 0; i < fls->mirror_array_cnt; i++) {
254 			/* normally mirror_ds is freed in
255 			 * .free_deviceid_node but we still do it here
256 			 * for .alloc_lseg error path */
257 			ff_layout_put_mirror(fls->mirror_array[i]);
258 		}
259 		kfree(fls->mirror_array);
260 		fls->mirror_array = NULL;
261 	}
262 }
263 
264 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
265 {
266 	int ret = 0;
267 
268 	dprintk("--> %s\n", __func__);
269 
270 	/* FIXME: remove this check when layout segment support is added */
271 	if (lgr->range.offset != 0 ||
272 	    lgr->range.length != NFS4_MAX_UINT64) {
273 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
274 			__func__);
275 		ret = -EINVAL;
276 	}
277 
278 	dprintk("--> %s returns %d\n", __func__, ret);
279 	return ret;
280 }
281 
282 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
283 {
284 	if (fls) {
285 		ff_layout_free_mirror_array(fls);
286 		kfree(fls);
287 	}
288 }
289 
290 static bool
291 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
292 		const struct pnfs_layout_range *l2)
293 {
294 	u64 end1, end2;
295 
296 	if (l1->iomode != l2->iomode)
297 		return l1->iomode != IOMODE_READ;
298 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
299 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
300 	if (end1 < l2->offset)
301 		return false;
302 	if (end2 < l1->offset)
303 		return true;
304 	return l2->offset <= l1->offset;
305 }
306 
307 static bool
308 ff_lseg_merge(struct pnfs_layout_segment *new,
309 		struct pnfs_layout_segment *old)
310 {
311 	u64 new_end, old_end;
312 
313 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
314 		return false;
315 	if (new->pls_range.iomode != old->pls_range.iomode)
316 		return false;
317 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
318 			old->pls_range.length);
319 	if (old_end < new->pls_range.offset)
320 		return false;
321 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
322 			new->pls_range.length);
323 	if (new_end < old->pls_range.offset)
324 		return false;
325 
326 	/* Mergeable: copy info from 'old' to 'new' */
327 	if (new_end < old_end)
328 		new_end = old_end;
329 	if (new->pls_range.offset < old->pls_range.offset)
330 		new->pls_range.offset = old->pls_range.offset;
331 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
332 			new_end);
333 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
334 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
335 	return true;
336 }
337 
338 static void
339 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
340 		struct pnfs_layout_segment *lseg,
341 		struct list_head *free_me)
342 {
343 	pnfs_generic_layout_insert_lseg(lo, lseg,
344 			ff_lseg_range_is_after,
345 			ff_lseg_merge,
346 			free_me);
347 }
348 
349 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
350 {
351 	int i, j;
352 
353 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
354 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
355 			if (fls->mirror_array[i]->efficiency <
356 			    fls->mirror_array[j]->efficiency)
357 				swap(fls->mirror_array[i],
358 				     fls->mirror_array[j]);
359 	}
360 }
361 
362 static struct pnfs_layout_segment *
363 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
364 		     struct nfs4_layoutget_res *lgr,
365 		     gfp_t gfp_flags)
366 {
367 	struct pnfs_layout_segment *ret;
368 	struct nfs4_ff_layout_segment *fls = NULL;
369 	struct xdr_stream stream;
370 	struct xdr_buf buf;
371 	struct page *scratch;
372 	u64 stripe_unit;
373 	u32 mirror_array_cnt;
374 	__be32 *p;
375 	int i, rc;
376 
377 	dprintk("--> %s\n", __func__);
378 	scratch = alloc_page(gfp_flags);
379 	if (!scratch)
380 		return ERR_PTR(-ENOMEM);
381 
382 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
383 			      lgr->layoutp->len);
384 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
385 
386 	/* stripe unit and mirror_array_cnt */
387 	rc = -EIO;
388 	p = xdr_inline_decode(&stream, 8 + 4);
389 	if (!p)
390 		goto out_err_free;
391 
392 	p = xdr_decode_hyper(p, &stripe_unit);
393 	mirror_array_cnt = be32_to_cpup(p++);
394 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
395 		stripe_unit, mirror_array_cnt);
396 
397 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
398 	    mirror_array_cnt == 0)
399 		goto out_err_free;
400 
401 	rc = -ENOMEM;
402 	fls = kzalloc(sizeof(*fls), gfp_flags);
403 	if (!fls)
404 		goto out_err_free;
405 
406 	fls->mirror_array_cnt = mirror_array_cnt;
407 	fls->stripe_unit = stripe_unit;
408 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
409 				    sizeof(fls->mirror_array[0]), gfp_flags);
410 	if (fls->mirror_array == NULL)
411 		goto out_err_free;
412 
413 	for (i = 0; i < fls->mirror_array_cnt; i++) {
414 		struct nfs4_ff_layout_mirror *mirror;
415 		struct cred *kcred;
416 		const struct cred __rcu *cred;
417 		kuid_t uid;
418 		kgid_t gid;
419 		u32 ds_count, fh_count, id;
420 		int j;
421 
422 		rc = -EIO;
423 		p = xdr_inline_decode(&stream, 4);
424 		if (!p)
425 			goto out_err_free;
426 		ds_count = be32_to_cpup(p);
427 
428 		/* FIXME: allow for striping? */
429 		if (ds_count != 1)
430 			goto out_err_free;
431 
432 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
433 		if (fls->mirror_array[i] == NULL) {
434 			rc = -ENOMEM;
435 			goto out_err_free;
436 		}
437 
438 		fls->mirror_array[i]->ds_count = ds_count;
439 
440 		/* deviceid */
441 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
442 		if (rc)
443 			goto out_err_free;
444 
445 		/* efficiency */
446 		rc = -EIO;
447 		p = xdr_inline_decode(&stream, 4);
448 		if (!p)
449 			goto out_err_free;
450 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
451 
452 		/* stateid */
453 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
454 		if (rc)
455 			goto out_err_free;
456 
457 		/* fh */
458 		rc = -EIO;
459 		p = xdr_inline_decode(&stream, 4);
460 		if (!p)
461 			goto out_err_free;
462 		fh_count = be32_to_cpup(p);
463 
464 		fls->mirror_array[i]->fh_versions =
465 			kcalloc(fh_count, sizeof(struct nfs_fh),
466 				gfp_flags);
467 		if (fls->mirror_array[i]->fh_versions == NULL) {
468 			rc = -ENOMEM;
469 			goto out_err_free;
470 		}
471 
472 		for (j = 0; j < fh_count; j++) {
473 			rc = decode_nfs_fh(&stream,
474 					   &fls->mirror_array[i]->fh_versions[j]);
475 			if (rc)
476 				goto out_err_free;
477 		}
478 
479 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
480 
481 		/* user */
482 		rc = decode_name(&stream, &id);
483 		if (rc)
484 			goto out_err_free;
485 
486 		uid = make_kuid(&init_user_ns, id);
487 
488 		/* group */
489 		rc = decode_name(&stream, &id);
490 		if (rc)
491 			goto out_err_free;
492 
493 		gid = make_kgid(&init_user_ns, id);
494 
495 		if (gfp_flags & __GFP_FS)
496 			kcred = prepare_kernel_cred(NULL);
497 		else {
498 			unsigned int nofs_flags = memalloc_nofs_save();
499 			kcred = prepare_kernel_cred(NULL);
500 			memalloc_nofs_restore(nofs_flags);
501 		}
502 		rc = -ENOMEM;
503 		if (!kcred)
504 			goto out_err_free;
505 		kcred->fsuid = uid;
506 		kcred->fsgid = gid;
507 		cred = RCU_INITIALIZER(kcred);
508 
509 		if (lgr->range.iomode == IOMODE_READ)
510 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
511 		else
512 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
513 
514 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
515 		if (mirror != fls->mirror_array[i]) {
516 			/* swap cred ptrs so free_mirror will clean up old */
517 			if (lgr->range.iomode == IOMODE_READ) {
518 				cred = xchg(&mirror->ro_cred, cred);
519 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
520 			} else {
521 				cred = xchg(&mirror->rw_cred, cred);
522 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
523 			}
524 			ff_layout_free_mirror(fls->mirror_array[i]);
525 			fls->mirror_array[i] = mirror;
526 		}
527 
528 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
529 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
530 			from_kuid(&init_user_ns, uid),
531 			from_kgid(&init_user_ns, gid));
532 	}
533 
534 	p = xdr_inline_decode(&stream, 4);
535 	if (!p)
536 		goto out_sort_mirrors;
537 	fls->flags = be32_to_cpup(p);
538 
539 	p = xdr_inline_decode(&stream, 4);
540 	if (!p)
541 		goto out_sort_mirrors;
542 	for (i=0; i < fls->mirror_array_cnt; i++)
543 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
544 
545 out_sort_mirrors:
546 	ff_layout_sort_mirrors(fls);
547 	rc = ff_layout_check_layout(lgr);
548 	if (rc)
549 		goto out_err_free;
550 	ret = &fls->generic_hdr;
551 	dprintk("<-- %s (success)\n", __func__);
552 out_free_page:
553 	__free_page(scratch);
554 	return ret;
555 out_err_free:
556 	_ff_layout_free_lseg(fls);
557 	ret = ERR_PTR(rc);
558 	dprintk("<-- %s (%d)\n", __func__, rc);
559 	goto out_free_page;
560 }
561 
562 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
563 {
564 	struct pnfs_layout_segment *lseg;
565 
566 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
567 		if (lseg->pls_range.iomode == IOMODE_RW)
568 			return true;
569 
570 	return false;
571 }
572 
573 static void
574 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
575 {
576 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
577 
578 	dprintk("--> %s\n", __func__);
579 
580 	if (lseg->pls_range.iomode == IOMODE_RW) {
581 		struct nfs4_flexfile_layout *ffl;
582 		struct inode *inode;
583 
584 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
585 		inode = ffl->generic_hdr.plh_inode;
586 		spin_lock(&inode->i_lock);
587 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
588 			ffl->commit_info.nbuckets = 0;
589 			kfree(ffl->commit_info.buckets);
590 			ffl->commit_info.buckets = NULL;
591 		}
592 		spin_unlock(&inode->i_lock);
593 	}
594 	_ff_layout_free_lseg(fls);
595 }
596 
597 /* Return 1 until we have multiple lsegs support */
598 static int
599 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
600 {
601 	return 1;
602 }
603 
604 static void
605 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
606 {
607 	/* first IO request? */
608 	if (atomic_inc_return(&timer->n_ops) == 1) {
609 		timer->start_time = now;
610 	}
611 }
612 
613 static ktime_t
614 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
615 {
616 	ktime_t start;
617 
618 	if (atomic_dec_return(&timer->n_ops) < 0)
619 		WARN_ON_ONCE(1);
620 
621 	start = timer->start_time;
622 	timer->start_time = now;
623 	return ktime_sub(now, start);
624 }
625 
626 static bool
627 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
628 			    struct nfs4_ff_layoutstat *layoutstat,
629 			    ktime_t now)
630 {
631 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
632 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
633 
634 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
635 	if (!mirror->start_time)
636 		mirror->start_time = now;
637 	if (mirror->report_interval != 0)
638 		report_interval = (s64)mirror->report_interval * 1000LL;
639 	else if (layoutstats_timer != 0)
640 		report_interval = (s64)layoutstats_timer * 1000LL;
641 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
642 			report_interval) {
643 		ffl->last_report_time = now;
644 		return true;
645 	}
646 
647 	return false;
648 }
649 
650 static void
651 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
652 		__u64 requested)
653 {
654 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
655 
656 	iostat->ops_requested++;
657 	iostat->bytes_requested += requested;
658 }
659 
660 static void
661 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
662 		__u64 requested,
663 		__u64 completed,
664 		ktime_t time_completed,
665 		ktime_t time_started)
666 {
667 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
668 	ktime_t completion_time = ktime_sub(time_completed, time_started);
669 	ktime_t timer;
670 
671 	iostat->ops_completed++;
672 	iostat->bytes_completed += completed;
673 	iostat->bytes_not_delivered += requested - completed;
674 
675 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
676 	iostat->total_busy_time =
677 			ktime_add(iostat->total_busy_time, timer);
678 	iostat->aggregate_completion_time =
679 			ktime_add(iostat->aggregate_completion_time,
680 					completion_time);
681 }
682 
683 static void
684 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
685 		struct nfs4_ff_layout_mirror *mirror,
686 		__u64 requested, ktime_t now)
687 {
688 	bool report;
689 
690 	spin_lock(&mirror->lock);
691 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
692 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
693 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
694 	spin_unlock(&mirror->lock);
695 
696 	if (report)
697 		pnfs_report_layoutstat(inode, GFP_KERNEL);
698 }
699 
700 static void
701 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
702 		struct nfs4_ff_layout_mirror *mirror,
703 		__u64 requested,
704 		__u64 completed)
705 {
706 	spin_lock(&mirror->lock);
707 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
708 			requested, completed,
709 			ktime_get(), task->tk_start);
710 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
711 	spin_unlock(&mirror->lock);
712 }
713 
714 static void
715 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
716 		struct nfs4_ff_layout_mirror *mirror,
717 		__u64 requested, ktime_t now)
718 {
719 	bool report;
720 
721 	spin_lock(&mirror->lock);
722 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
723 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
724 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
725 	spin_unlock(&mirror->lock);
726 
727 	if (report)
728 		pnfs_report_layoutstat(inode, GFP_NOIO);
729 }
730 
731 static void
732 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
733 		struct nfs4_ff_layout_mirror *mirror,
734 		__u64 requested,
735 		__u64 completed,
736 		enum nfs3_stable_how committed)
737 {
738 	if (committed == NFS_UNSTABLE)
739 		requested = completed = 0;
740 
741 	spin_lock(&mirror->lock);
742 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
743 			requested, completed, ktime_get(), task->tk_start);
744 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
745 	spin_unlock(&mirror->lock);
746 }
747 
748 static int
749 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
750 			    struct nfs_commit_info *cinfo,
751 			    gfp_t gfp_flags)
752 {
753 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
754 	struct pnfs_commit_bucket *buckets;
755 	int size;
756 
757 	if (cinfo->ds->nbuckets != 0) {
758 		/* This assumes there is only one RW lseg per file.
759 		 * To support multiple lseg per file, we need to
760 		 * change struct pnfs_commit_bucket to allow dynamic
761 		 * increasing nbuckets.
762 		 */
763 		return 0;
764 	}
765 
766 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
767 
768 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
769 			  gfp_flags);
770 	if (!buckets)
771 		return -ENOMEM;
772 	else {
773 		int i;
774 
775 		spin_lock(&cinfo->inode->i_lock);
776 		if (cinfo->ds->nbuckets != 0)
777 			kfree(buckets);
778 		else {
779 			cinfo->ds->buckets = buckets;
780 			cinfo->ds->nbuckets = size;
781 			for (i = 0; i < size; i++) {
782 				INIT_LIST_HEAD(&buckets[i].written);
783 				INIT_LIST_HEAD(&buckets[i].committing);
784 				/* mark direct verifier as unset */
785 				buckets[i].direct_verf.committed =
786 					NFS_INVALID_STABLE_HOW;
787 			}
788 		}
789 		spin_unlock(&cinfo->inode->i_lock);
790 		return 0;
791 	}
792 }
793 
794 static void
795 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
796 {
797 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
798 
799 	if (devid)
800 		nfs4_mark_deviceid_unavailable(devid);
801 }
802 
803 static void
804 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
805 {
806 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
807 
808 	if (devid)
809 		nfs4_mark_deviceid_available(devid);
810 }
811 
812 static struct nfs4_pnfs_ds *
813 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
814 			     int start_idx, int *best_idx,
815 			     bool check_device)
816 {
817 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
818 	struct nfs4_ff_layout_mirror *mirror;
819 	struct nfs4_pnfs_ds *ds;
820 	bool fail_return = false;
821 	int idx;
822 
823 	/* mirrors are initially sorted by efficiency */
824 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
825 		if (idx+1 == fls->mirror_array_cnt)
826 			fail_return = !check_device;
827 
828 		mirror = FF_LAYOUT_COMP(lseg, idx);
829 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
830 		if (!ds)
831 			continue;
832 
833 		if (check_device &&
834 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
835 			continue;
836 
837 		*best_idx = idx;
838 		return ds;
839 	}
840 
841 	return NULL;
842 }
843 
844 static struct nfs4_pnfs_ds *
845 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
846 				 int start_idx, int *best_idx)
847 {
848 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
849 }
850 
851 static struct nfs4_pnfs_ds *
852 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
853 				   int start_idx, int *best_idx)
854 {
855 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
856 }
857 
858 static struct nfs4_pnfs_ds *
859 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
860 				  int start_idx, int *best_idx)
861 {
862 	struct nfs4_pnfs_ds *ds;
863 
864 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
865 	if (ds)
866 		return ds;
867 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
868 }
869 
870 static void
871 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
872 		      struct nfs_page *req,
873 		      bool strict_iomode)
874 {
875 	pnfs_put_lseg(pgio->pg_lseg);
876 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
877 					   nfs_req_openctx(req),
878 					   0,
879 					   NFS4_MAX_UINT64,
880 					   IOMODE_READ,
881 					   strict_iomode,
882 					   GFP_KERNEL);
883 	if (IS_ERR(pgio->pg_lseg)) {
884 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
885 		pgio->pg_lseg = NULL;
886 	}
887 }
888 
889 static void
890 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
891 			struct nfs_page *req)
892 {
893 	struct nfs_pgio_mirror *pgm;
894 	struct nfs4_ff_layout_mirror *mirror;
895 	struct nfs4_pnfs_ds *ds;
896 	int ds_idx;
897 
898 retry:
899 	pnfs_generic_pg_check_layout(pgio);
900 	/* Use full layout for now */
901 	if (!pgio->pg_lseg) {
902 		ff_layout_pg_get_read(pgio, req, false);
903 		if (!pgio->pg_lseg)
904 			goto out_nolseg;
905 	}
906 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
907 		ff_layout_pg_get_read(pgio, req, true);
908 		if (!pgio->pg_lseg)
909 			goto out_nolseg;
910 	}
911 
912 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
913 	if (!ds) {
914 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
915 			goto out_mds;
916 		pnfs_put_lseg(pgio->pg_lseg);
917 		pgio->pg_lseg = NULL;
918 		/* Sleep for 1 second before retrying */
919 		ssleep(1);
920 		goto retry;
921 	}
922 
923 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
924 
925 	pgio->pg_mirror_idx = ds_idx;
926 
927 	/* read always uses only one mirror - idx 0 for pgio layer */
928 	pgm = &pgio->pg_mirrors[0];
929 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 
931 	pgio->pg_maxretrans = io_maxretrans;
932 	return;
933 out_nolseg:
934 	if (pgio->pg_error < 0)
935 		return;
936 out_mds:
937 	pnfs_put_lseg(pgio->pg_lseg);
938 	pgio->pg_lseg = NULL;
939 	nfs_pageio_reset_read_mds(pgio);
940 }
941 
942 static void
943 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
944 			struct nfs_page *req)
945 {
946 	struct nfs4_ff_layout_mirror *mirror;
947 	struct nfs_pgio_mirror *pgm;
948 	struct nfs_commit_info cinfo;
949 	struct nfs4_pnfs_ds *ds;
950 	int i;
951 	int status;
952 
953 retry:
954 	pnfs_generic_pg_check_layout(pgio);
955 	if (!pgio->pg_lseg) {
956 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
957 						   nfs_req_openctx(req),
958 						   0,
959 						   NFS4_MAX_UINT64,
960 						   IOMODE_RW,
961 						   false,
962 						   GFP_NOFS);
963 		if (IS_ERR(pgio->pg_lseg)) {
964 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
965 			pgio->pg_lseg = NULL;
966 			return;
967 		}
968 	}
969 	/* If no lseg, fall back to write through mds */
970 	if (pgio->pg_lseg == NULL)
971 		goto out_mds;
972 
973 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
974 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
975 	if (status < 0)
976 		goto out_mds;
977 
978 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
979 	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
980 	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
981 		goto out_mds;
982 
983 	for (i = 0; i < pgio->pg_mirror_count; i++) {
984 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
985 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
986 		if (!ds) {
987 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
988 				goto out_mds;
989 			pnfs_put_lseg(pgio->pg_lseg);
990 			pgio->pg_lseg = NULL;
991 			/* Sleep for 1 second before retrying */
992 			ssleep(1);
993 			goto retry;
994 		}
995 		pgm = &pgio->pg_mirrors[i];
996 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
997 	}
998 
999 	pgio->pg_maxretrans = io_maxretrans;
1000 	return;
1001 
1002 out_mds:
1003 	pnfs_put_lseg(pgio->pg_lseg);
1004 	pgio->pg_lseg = NULL;
1005 	nfs_pageio_reset_write_mds(pgio);
1006 }
1007 
1008 static unsigned int
1009 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1010 				    struct nfs_page *req)
1011 {
1012 	if (!pgio->pg_lseg) {
1013 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1014 						   nfs_req_openctx(req),
1015 						   0,
1016 						   NFS4_MAX_UINT64,
1017 						   IOMODE_RW,
1018 						   false,
1019 						   GFP_NOFS);
1020 		if (IS_ERR(pgio->pg_lseg)) {
1021 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1022 			pgio->pg_lseg = NULL;
1023 			goto out;
1024 		}
1025 	}
1026 	if (pgio->pg_lseg)
1027 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1028 
1029 	/* no lseg means that pnfs is not in use, so no mirroring here */
1030 	nfs_pageio_reset_write_mds(pgio);
1031 out:
1032 	return 1;
1033 }
1034 
1035 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1036 	.pg_init = ff_layout_pg_init_read,
1037 	.pg_test = pnfs_generic_pg_test,
1038 	.pg_doio = pnfs_generic_pg_readpages,
1039 	.pg_cleanup = pnfs_generic_pg_cleanup,
1040 };
1041 
1042 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1043 	.pg_init = ff_layout_pg_init_write,
1044 	.pg_test = pnfs_generic_pg_test,
1045 	.pg_doio = pnfs_generic_pg_writepages,
1046 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1047 	.pg_cleanup = pnfs_generic_pg_cleanup,
1048 };
1049 
1050 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1051 {
1052 	struct rpc_task *task = &hdr->task;
1053 
1054 	pnfs_layoutcommit_inode(hdr->inode, false);
1055 
1056 	if (retry_pnfs) {
1057 		dprintk("%s Reset task %5u for i/o through pNFS "
1058 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1059 			hdr->task.tk_pid,
1060 			hdr->inode->i_sb->s_id,
1061 			(unsigned long long)NFS_FILEID(hdr->inode),
1062 			hdr->args.count,
1063 			(unsigned long long)hdr->args.offset);
1064 
1065 		hdr->completion_ops->reschedule_io(hdr);
1066 		return;
1067 	}
1068 
1069 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1070 		dprintk("%s Reset task %5u for i/o through MDS "
1071 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1072 			hdr->task.tk_pid,
1073 			hdr->inode->i_sb->s_id,
1074 			(unsigned long long)NFS_FILEID(hdr->inode),
1075 			hdr->args.count,
1076 			(unsigned long long)hdr->args.offset);
1077 
1078 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1079 	}
1080 }
1081 
1082 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1083 {
1084 	struct rpc_task *task = &hdr->task;
1085 
1086 	pnfs_layoutcommit_inode(hdr->inode, false);
1087 
1088 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1089 		dprintk("%s Reset task %5u for i/o through MDS "
1090 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1091 			hdr->task.tk_pid,
1092 			hdr->inode->i_sb->s_id,
1093 			(unsigned long long)NFS_FILEID(hdr->inode),
1094 			hdr->args.count,
1095 			(unsigned long long)hdr->args.offset);
1096 
1097 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1098 	}
1099 }
1100 
1101 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1102 					   struct nfs4_state *state,
1103 					   struct nfs_client *clp,
1104 					   struct pnfs_layout_segment *lseg,
1105 					   int idx)
1106 {
1107 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1108 	struct inode *inode = lo->plh_inode;
1109 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1110 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1111 
1112 	switch (task->tk_status) {
1113 	case -NFS4ERR_BADSESSION:
1114 	case -NFS4ERR_BADSLOT:
1115 	case -NFS4ERR_BAD_HIGH_SLOT:
1116 	case -NFS4ERR_DEADSESSION:
1117 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1118 	case -NFS4ERR_SEQ_FALSE_RETRY:
1119 	case -NFS4ERR_SEQ_MISORDERED:
1120 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1121 			"flags 0x%x\n", __func__, task->tk_status,
1122 			clp->cl_exchange_flags);
1123 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1124 		break;
1125 	case -NFS4ERR_DELAY:
1126 	case -NFS4ERR_GRACE:
1127 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1128 		break;
1129 	case -NFS4ERR_RETRY_UNCACHED_REP:
1130 		break;
1131 	case -EAGAIN:
1132 		return -NFS4ERR_RESET_TO_PNFS;
1133 	/* Invalidate Layout errors */
1134 	case -NFS4ERR_PNFS_NO_LAYOUT:
1135 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1136 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1137 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1138 	case -NFS4ERR_FHEXPIRED:
1139 	case -NFS4ERR_WRONG_TYPE:
1140 		dprintk("%s Invalid layout error %d\n", __func__,
1141 			task->tk_status);
1142 		/*
1143 		 * Destroy layout so new i/o will get a new layout.
1144 		 * Layout will not be destroyed until all current lseg
1145 		 * references are put. Mark layout as invalid to resend failed
1146 		 * i/o and all i/o waiting on the slot table to the MDS until
1147 		 * layout is destroyed and a new valid layout is obtained.
1148 		 */
1149 		pnfs_destroy_layout(NFS_I(inode));
1150 		rpc_wake_up(&tbl->slot_tbl_waitq);
1151 		goto reset;
1152 	/* RPC connection errors */
1153 	case -ECONNREFUSED:
1154 	case -EHOSTDOWN:
1155 	case -EHOSTUNREACH:
1156 	case -ENETUNREACH:
1157 	case -EIO:
1158 	case -ETIMEDOUT:
1159 	case -EPIPE:
1160 		dprintk("%s DS connection error %d\n", __func__,
1161 			task->tk_status);
1162 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1163 				&devid->deviceid);
1164 		rpc_wake_up(&tbl->slot_tbl_waitq);
1165 		/* fall through */
1166 	default:
1167 		if (ff_layout_avoid_mds_available_ds(lseg))
1168 			return -NFS4ERR_RESET_TO_PNFS;
1169 reset:
1170 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1171 			task->tk_status);
1172 		return -NFS4ERR_RESET_TO_MDS;
1173 	}
1174 	task->tk_status = 0;
1175 	return -EAGAIN;
1176 }
1177 
1178 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1179 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1180 					   struct pnfs_layout_segment *lseg,
1181 					   int idx)
1182 {
1183 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1184 
1185 	switch (task->tk_status) {
1186 	/* File access problems. Don't mark the device as unavailable */
1187 	case -EACCES:
1188 	case -ESTALE:
1189 	case -EISDIR:
1190 	case -EBADHANDLE:
1191 	case -ELOOP:
1192 	case -ENOSPC:
1193 	case -EAGAIN:
1194 		break;
1195 	case -EJUKEBOX:
1196 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1197 		goto out_retry;
1198 	default:
1199 		dprintk("%s DS connection error %d\n", __func__,
1200 			task->tk_status);
1201 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1202 				&devid->deviceid);
1203 	}
1204 	/* FIXME: Need to prevent infinite looping here. */
1205 	return -NFS4ERR_RESET_TO_PNFS;
1206 out_retry:
1207 	task->tk_status = 0;
1208 	rpc_restart_call_prepare(task);
1209 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1210 	return -EAGAIN;
1211 }
1212 
1213 static int ff_layout_async_handle_error(struct rpc_task *task,
1214 					struct nfs4_state *state,
1215 					struct nfs_client *clp,
1216 					struct pnfs_layout_segment *lseg,
1217 					int idx)
1218 {
1219 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1220 
1221 	if (task->tk_status >= 0) {
1222 		ff_layout_mark_ds_reachable(lseg, idx);
1223 		return 0;
1224 	}
1225 
1226 	/* Handle the case of an invalid layout segment */
1227 	if (!pnfs_is_valid_lseg(lseg))
1228 		return -NFS4ERR_RESET_TO_PNFS;
1229 
1230 	switch (vers) {
1231 	case 3:
1232 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1233 	case 4:
1234 		return ff_layout_async_handle_error_v4(task, state, clp,
1235 						       lseg, idx);
1236 	default:
1237 		/* should never happen */
1238 		WARN_ON_ONCE(1);
1239 		return 0;
1240 	}
1241 }
1242 
1243 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1244 					int idx, u64 offset, u64 length,
1245 					u32 status, int opnum, int error)
1246 {
1247 	struct nfs4_ff_layout_mirror *mirror;
1248 	int err;
1249 
1250 	if (status == 0) {
1251 		switch (error) {
1252 		case -ETIMEDOUT:
1253 		case -EPFNOSUPPORT:
1254 		case -EPROTONOSUPPORT:
1255 		case -EOPNOTSUPP:
1256 		case -ECONNREFUSED:
1257 		case -ECONNRESET:
1258 		case -EHOSTDOWN:
1259 		case -EHOSTUNREACH:
1260 		case -ENETUNREACH:
1261 		case -EADDRINUSE:
1262 		case -ENOBUFS:
1263 		case -EPIPE:
1264 		case -EPERM:
1265 			status = NFS4ERR_NXIO;
1266 			break;
1267 		case -EACCES:
1268 			status = NFS4ERR_ACCESS;
1269 			break;
1270 		default:
1271 			return;
1272 		}
1273 	}
1274 
1275 	switch (status) {
1276 	case NFS4ERR_DELAY:
1277 	case NFS4ERR_GRACE:
1278 		return;
1279 	default:
1280 		break;
1281 	}
1282 
1283 	mirror = FF_LAYOUT_COMP(lseg, idx);
1284 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1285 				       mirror, offset, length, status, opnum,
1286 				       GFP_NOIO);
1287 	if (status == NFS4ERR_NXIO)
1288 		ff_layout_mark_ds_unreachable(lseg, idx);
1289 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1290 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1291 }
1292 
1293 /* NFS_PROTO call done callback routines */
1294 static int ff_layout_read_done_cb(struct rpc_task *task,
1295 				struct nfs_pgio_header *hdr)
1296 {
1297 	int new_idx = hdr->pgio_mirror_idx;
1298 	int err;
1299 
1300 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1301 	if (task->tk_status < 0)
1302 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1303 					    hdr->args.offset, hdr->args.count,
1304 					    hdr->res.op_status, OP_READ,
1305 					    task->tk_status);
1306 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1307 					   hdr->ds_clp, hdr->lseg,
1308 					   hdr->pgio_mirror_idx);
1309 
1310 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1311 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1312 	switch (err) {
1313 	case -NFS4ERR_RESET_TO_PNFS:
1314 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1315 					hdr->pgio_mirror_idx + 1,
1316 					&new_idx))
1317 			goto out_layouterror;
1318 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1319 		return task->tk_status;
1320 	case -NFS4ERR_RESET_TO_MDS:
1321 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1322 		return task->tk_status;
1323 	case -EAGAIN:
1324 		goto out_eagain;
1325 	}
1326 
1327 	return 0;
1328 out_layouterror:
1329 	ff_layout_read_record_layoutstats_done(task, hdr);
1330 	ff_layout_send_layouterror(hdr->lseg);
1331 	hdr->pgio_mirror_idx = new_idx;
1332 out_eagain:
1333 	rpc_restart_call_prepare(task);
1334 	return -EAGAIN;
1335 }
1336 
1337 static bool
1338 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1339 {
1340 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1341 }
1342 
1343 /*
1344  * We reference the rpc_cred of the first WRITE that triggers the need for
1345  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1346  * rfc5661 is not clear about which credential should be used.
1347  *
1348  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1349  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1350  * we always send layoutcommit after DS writes.
1351  */
1352 static void
1353 ff_layout_set_layoutcommit(struct inode *inode,
1354 		struct pnfs_layout_segment *lseg,
1355 		loff_t end_offset)
1356 {
1357 	if (!ff_layout_need_layoutcommit(lseg))
1358 		return;
1359 
1360 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1361 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1362 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1363 }
1364 
1365 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1366 		struct nfs_pgio_header *hdr)
1367 {
1368 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1369 		return;
1370 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1371 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1372 			hdr->args.count,
1373 			task->tk_start);
1374 }
1375 
1376 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1377 		struct nfs_pgio_header *hdr)
1378 {
1379 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1380 		return;
1381 	nfs4_ff_layout_stat_io_end_read(task,
1382 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1383 			hdr->args.count,
1384 			hdr->res.count);
1385 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1386 }
1387 
1388 static int ff_layout_read_prepare_common(struct rpc_task *task,
1389 					 struct nfs_pgio_header *hdr)
1390 {
1391 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1392 		rpc_exit(task, -EIO);
1393 		return -EIO;
1394 	}
1395 
1396 	ff_layout_read_record_layoutstats_start(task, hdr);
1397 	return 0;
1398 }
1399 
1400 /*
1401  * Call ops for the async read/write cases
1402  * In the case of dense layouts, the offset needs to be reset to its
1403  * original value.
1404  */
1405 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1406 {
1407 	struct nfs_pgio_header *hdr = data;
1408 
1409 	if (ff_layout_read_prepare_common(task, hdr))
1410 		return;
1411 
1412 	rpc_call_start(task);
1413 }
1414 
1415 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1416 {
1417 	struct nfs_pgio_header *hdr = data;
1418 
1419 	if (nfs4_setup_sequence(hdr->ds_clp,
1420 				&hdr->args.seq_args,
1421 				&hdr->res.seq_res,
1422 				task))
1423 		return;
1424 
1425 	ff_layout_read_prepare_common(task, hdr);
1426 }
1427 
1428 static void
1429 ff_layout_io_prepare_transmit(struct rpc_task *task,
1430 		void *data)
1431 {
1432 	struct nfs_pgio_header *hdr = data;
1433 
1434 	if (!pnfs_is_valid_lseg(hdr->lseg))
1435 		rpc_exit(task, -EAGAIN);
1436 }
1437 
1438 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1439 {
1440 	struct nfs_pgio_header *hdr = data;
1441 
1442 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1443 
1444 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1445 	    task->tk_status == 0) {
1446 		nfs4_sequence_done(task, &hdr->res.seq_res);
1447 		return;
1448 	}
1449 
1450 	/* Note this may cause RPC to be resent */
1451 	hdr->mds_ops->rpc_call_done(task, hdr);
1452 }
1453 
1454 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1455 {
1456 	struct nfs_pgio_header *hdr = data;
1457 
1458 	ff_layout_read_record_layoutstats_done(task, hdr);
1459 	rpc_count_iostats_metrics(task,
1460 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1461 }
1462 
1463 static void ff_layout_read_release(void *data)
1464 {
1465 	struct nfs_pgio_header *hdr = data;
1466 
1467 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1468 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1469 		ff_layout_send_layouterror(hdr->lseg);
1470 		pnfs_read_resend_pnfs(hdr);
1471 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1472 		ff_layout_reset_read(hdr);
1473 	pnfs_generic_rw_release(data);
1474 }
1475 
1476 
1477 static int ff_layout_write_done_cb(struct rpc_task *task,
1478 				struct nfs_pgio_header *hdr)
1479 {
1480 	loff_t end_offs = 0;
1481 	int err;
1482 
1483 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1484 	if (task->tk_status < 0)
1485 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1486 					    hdr->args.offset, hdr->args.count,
1487 					    hdr->res.op_status, OP_WRITE,
1488 					    task->tk_status);
1489 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1490 					   hdr->ds_clp, hdr->lseg,
1491 					   hdr->pgio_mirror_idx);
1492 
1493 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1494 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1495 	switch (err) {
1496 	case -NFS4ERR_RESET_TO_PNFS:
1497 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1498 		return task->tk_status;
1499 	case -NFS4ERR_RESET_TO_MDS:
1500 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1501 		return task->tk_status;
1502 	case -EAGAIN:
1503 		return -EAGAIN;
1504 	}
1505 
1506 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1507 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1508 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1509 
1510 	/* Note: if the write is unstable, don't set end_offs until commit */
1511 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1512 
1513 	/* zero out fattr since we don't care DS attr at all */
1514 	hdr->fattr.valid = 0;
1515 	if (task->tk_status >= 0)
1516 		nfs_writeback_update_inode(hdr);
1517 
1518 	return 0;
1519 }
1520 
1521 static int ff_layout_commit_done_cb(struct rpc_task *task,
1522 				     struct nfs_commit_data *data)
1523 {
1524 	int err;
1525 
1526 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1527 	if (task->tk_status < 0)
1528 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1529 					    data->args.offset, data->args.count,
1530 					    data->res.op_status, OP_COMMIT,
1531 					    task->tk_status);
1532 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1533 					   data->lseg, data->ds_commit_index);
1534 
1535 	switch (err) {
1536 	case -NFS4ERR_RESET_TO_PNFS:
1537 		pnfs_generic_prepare_to_resend_writes(data);
1538 		return -EAGAIN;
1539 	case -NFS4ERR_RESET_TO_MDS:
1540 		pnfs_generic_prepare_to_resend_writes(data);
1541 		return -EAGAIN;
1542 	case -EAGAIN:
1543 		rpc_restart_call_prepare(task);
1544 		return -EAGAIN;
1545 	}
1546 
1547 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1548 
1549 	return 0;
1550 }
1551 
1552 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1553 		struct nfs_pgio_header *hdr)
1554 {
1555 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1556 		return;
1557 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1558 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1559 			hdr->args.count,
1560 			task->tk_start);
1561 }
1562 
1563 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1564 		struct nfs_pgio_header *hdr)
1565 {
1566 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1567 		return;
1568 	nfs4_ff_layout_stat_io_end_write(task,
1569 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1570 			hdr->args.count, hdr->res.count,
1571 			hdr->res.verf->committed);
1572 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1573 }
1574 
1575 static int ff_layout_write_prepare_common(struct rpc_task *task,
1576 					  struct nfs_pgio_header *hdr)
1577 {
1578 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1579 		rpc_exit(task, -EIO);
1580 		return -EIO;
1581 	}
1582 
1583 	ff_layout_write_record_layoutstats_start(task, hdr);
1584 	return 0;
1585 }
1586 
1587 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1588 {
1589 	struct nfs_pgio_header *hdr = data;
1590 
1591 	if (ff_layout_write_prepare_common(task, hdr))
1592 		return;
1593 
1594 	rpc_call_start(task);
1595 }
1596 
1597 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1598 {
1599 	struct nfs_pgio_header *hdr = data;
1600 
1601 	if (nfs4_setup_sequence(hdr->ds_clp,
1602 				&hdr->args.seq_args,
1603 				&hdr->res.seq_res,
1604 				task))
1605 		return;
1606 
1607 	ff_layout_write_prepare_common(task, hdr);
1608 }
1609 
1610 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1611 {
1612 	struct nfs_pgio_header *hdr = data;
1613 
1614 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1615 	    task->tk_status == 0) {
1616 		nfs4_sequence_done(task, &hdr->res.seq_res);
1617 		return;
1618 	}
1619 
1620 	/* Note this may cause RPC to be resent */
1621 	hdr->mds_ops->rpc_call_done(task, hdr);
1622 }
1623 
1624 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1625 {
1626 	struct nfs_pgio_header *hdr = data;
1627 
1628 	ff_layout_write_record_layoutstats_done(task, hdr);
1629 	rpc_count_iostats_metrics(task,
1630 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1631 }
1632 
1633 static void ff_layout_write_release(void *data)
1634 {
1635 	struct nfs_pgio_header *hdr = data;
1636 
1637 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1638 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1639 		ff_layout_send_layouterror(hdr->lseg);
1640 		ff_layout_reset_write(hdr, true);
1641 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1642 		ff_layout_reset_write(hdr, false);
1643 	pnfs_generic_rw_release(data);
1644 }
1645 
1646 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1647 		struct nfs_commit_data *cdata)
1648 {
1649 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1650 		return;
1651 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1652 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1653 			0, task->tk_start);
1654 }
1655 
1656 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1657 		struct nfs_commit_data *cdata)
1658 {
1659 	struct nfs_page *req;
1660 	__u64 count = 0;
1661 
1662 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1663 		return;
1664 
1665 	if (task->tk_status == 0) {
1666 		list_for_each_entry(req, &cdata->pages, wb_list)
1667 			count += req->wb_bytes;
1668 	}
1669 	nfs4_ff_layout_stat_io_end_write(task,
1670 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1671 			count, count, NFS_FILE_SYNC);
1672 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1673 }
1674 
1675 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1676 		struct nfs_commit_data *cdata)
1677 {
1678 	ff_layout_commit_record_layoutstats_start(task, cdata);
1679 }
1680 
1681 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1682 {
1683 	ff_layout_commit_prepare_common(task, data);
1684 	rpc_call_start(task);
1685 }
1686 
1687 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1688 {
1689 	struct nfs_commit_data *wdata = data;
1690 
1691 	if (nfs4_setup_sequence(wdata->ds_clp,
1692 				&wdata->args.seq_args,
1693 				&wdata->res.seq_res,
1694 				task))
1695 		return;
1696 	ff_layout_commit_prepare_common(task, data);
1697 }
1698 
1699 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1700 {
1701 	pnfs_generic_write_commit_done(task, data);
1702 }
1703 
1704 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1705 {
1706 	struct nfs_commit_data *cdata = data;
1707 
1708 	ff_layout_commit_record_layoutstats_done(task, cdata);
1709 	rpc_count_iostats_metrics(task,
1710 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1711 }
1712 
1713 static void ff_layout_commit_release(void *data)
1714 {
1715 	struct nfs_commit_data *cdata = data;
1716 
1717 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1718 	pnfs_generic_commit_release(data);
1719 }
1720 
1721 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1722 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1723 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1724 	.rpc_call_done = ff_layout_read_call_done,
1725 	.rpc_count_stats = ff_layout_read_count_stats,
1726 	.rpc_release = ff_layout_read_release,
1727 };
1728 
1729 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1730 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1731 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1732 	.rpc_call_done = ff_layout_read_call_done,
1733 	.rpc_count_stats = ff_layout_read_count_stats,
1734 	.rpc_release = ff_layout_read_release,
1735 };
1736 
1737 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1738 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1739 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1740 	.rpc_call_done = ff_layout_write_call_done,
1741 	.rpc_count_stats = ff_layout_write_count_stats,
1742 	.rpc_release = ff_layout_write_release,
1743 };
1744 
1745 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1746 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1747 	.rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1748 	.rpc_call_done = ff_layout_write_call_done,
1749 	.rpc_count_stats = ff_layout_write_count_stats,
1750 	.rpc_release = ff_layout_write_release,
1751 };
1752 
1753 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1754 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1755 	.rpc_call_done = ff_layout_commit_done,
1756 	.rpc_count_stats = ff_layout_commit_count_stats,
1757 	.rpc_release = ff_layout_commit_release,
1758 };
1759 
1760 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1761 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1762 	.rpc_call_done = ff_layout_commit_done,
1763 	.rpc_count_stats = ff_layout_commit_count_stats,
1764 	.rpc_release = ff_layout_commit_release,
1765 };
1766 
1767 static enum pnfs_try_status
1768 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1769 {
1770 	struct pnfs_layout_segment *lseg = hdr->lseg;
1771 	struct nfs4_pnfs_ds *ds;
1772 	struct rpc_clnt *ds_clnt;
1773 	struct nfs4_ff_layout_mirror *mirror;
1774 	const struct cred *ds_cred;
1775 	loff_t offset = hdr->args.offset;
1776 	u32 idx = hdr->pgio_mirror_idx;
1777 	int vers;
1778 	struct nfs_fh *fh;
1779 
1780 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1781 		__func__, hdr->inode->i_ino,
1782 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1783 
1784 	mirror = FF_LAYOUT_COMP(lseg, idx);
1785 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1786 	if (!ds)
1787 		goto out_failed;
1788 
1789 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1790 						   hdr->inode);
1791 	if (IS_ERR(ds_clnt))
1792 		goto out_failed;
1793 
1794 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1795 	if (!ds_cred)
1796 		goto out_failed;
1797 
1798 	vers = nfs4_ff_layout_ds_version(mirror);
1799 
1800 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1801 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1802 
1803 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1804 	refcount_inc(&ds->ds_clp->cl_count);
1805 	hdr->ds_clp = ds->ds_clp;
1806 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1807 	if (fh)
1808 		hdr->args.fh = fh;
1809 
1810 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1811 
1812 	/*
1813 	 * Note that if we ever decide to split across DSes,
1814 	 * then we may need to handle dense-like offsets.
1815 	 */
1816 	hdr->args.offset = offset;
1817 	hdr->mds_offset = offset;
1818 
1819 	/* Perform an asynchronous read to ds */
1820 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1821 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1822 				      &ff_layout_read_call_ops_v4,
1823 			  0, RPC_TASK_SOFTCONN);
1824 	put_cred(ds_cred);
1825 	return PNFS_ATTEMPTED;
1826 
1827 out_failed:
1828 	if (ff_layout_avoid_mds_available_ds(lseg))
1829 		return PNFS_TRY_AGAIN;
1830 	return PNFS_NOT_ATTEMPTED;
1831 }
1832 
1833 /* Perform async writes. */
1834 static enum pnfs_try_status
1835 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1836 {
1837 	struct pnfs_layout_segment *lseg = hdr->lseg;
1838 	struct nfs4_pnfs_ds *ds;
1839 	struct rpc_clnt *ds_clnt;
1840 	struct nfs4_ff_layout_mirror *mirror;
1841 	const struct cred *ds_cred;
1842 	loff_t offset = hdr->args.offset;
1843 	int vers;
1844 	struct nfs_fh *fh;
1845 	int idx = hdr->pgio_mirror_idx;
1846 
1847 	mirror = FF_LAYOUT_COMP(lseg, idx);
1848 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1849 	if (!ds)
1850 		goto out_failed;
1851 
1852 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1853 						   hdr->inode);
1854 	if (IS_ERR(ds_clnt))
1855 		goto out_failed;
1856 
1857 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1858 	if (!ds_cred)
1859 		goto out_failed;
1860 
1861 	vers = nfs4_ff_layout_ds_version(mirror);
1862 
1863 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1864 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1865 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1866 		vers);
1867 
1868 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1869 	refcount_inc(&ds->ds_clp->cl_count);
1870 	hdr->ds_clp = ds->ds_clp;
1871 	hdr->ds_commit_idx = idx;
1872 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1873 	if (fh)
1874 		hdr->args.fh = fh;
1875 
1876 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1877 
1878 	/*
1879 	 * Note that if we ever decide to split across DSes,
1880 	 * then we may need to handle dense-like offsets.
1881 	 */
1882 	hdr->args.offset = offset;
1883 
1884 	/* Perform an asynchronous write */
1885 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1886 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1887 				      &ff_layout_write_call_ops_v4,
1888 			  sync, RPC_TASK_SOFTCONN);
1889 	put_cred(ds_cred);
1890 	return PNFS_ATTEMPTED;
1891 
1892 out_failed:
1893 	if (ff_layout_avoid_mds_available_ds(lseg))
1894 		return PNFS_TRY_AGAIN;
1895 	return PNFS_NOT_ATTEMPTED;
1896 }
1897 
1898 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1899 {
1900 	return i;
1901 }
1902 
1903 static struct nfs_fh *
1904 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1905 {
1906 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1907 
1908 	/* FIXME: Assume that there is only one NFS version available
1909 	 * for the DS.
1910 	 */
1911 	return &flseg->mirror_array[i]->fh_versions[0];
1912 }
1913 
1914 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1915 {
1916 	struct pnfs_layout_segment *lseg = data->lseg;
1917 	struct nfs4_pnfs_ds *ds;
1918 	struct rpc_clnt *ds_clnt;
1919 	struct nfs4_ff_layout_mirror *mirror;
1920 	const struct cred *ds_cred;
1921 	u32 idx;
1922 	int vers, ret;
1923 	struct nfs_fh *fh;
1924 
1925 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1926 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1927 		goto out_err;
1928 
1929 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1930 	mirror = FF_LAYOUT_COMP(lseg, idx);
1931 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1932 	if (!ds)
1933 		goto out_err;
1934 
1935 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1936 						   data->inode);
1937 	if (IS_ERR(ds_clnt))
1938 		goto out_err;
1939 
1940 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1941 	if (!ds_cred)
1942 		goto out_err;
1943 
1944 	vers = nfs4_ff_layout_ds_version(mirror);
1945 
1946 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1947 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1948 		vers);
1949 	data->commit_done_cb = ff_layout_commit_done_cb;
1950 	data->cred = ds_cred;
1951 	refcount_inc(&ds->ds_clp->cl_count);
1952 	data->ds_clp = ds->ds_clp;
1953 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1954 	if (fh)
1955 		data->args.fh = fh;
1956 
1957 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1958 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1959 					       &ff_layout_commit_call_ops_v4,
1960 				   how, RPC_TASK_SOFTCONN);
1961 	put_cred(ds_cred);
1962 	return ret;
1963 out_err:
1964 	pnfs_generic_prepare_to_resend_writes(data);
1965 	pnfs_generic_commit_release(data);
1966 	return -EAGAIN;
1967 }
1968 
1969 static int
1970 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1971 			   int how, struct nfs_commit_info *cinfo)
1972 {
1973 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1974 					    ff_layout_initiate_commit);
1975 }
1976 
1977 static struct pnfs_ds_commit_info *
1978 ff_layout_get_ds_info(struct inode *inode)
1979 {
1980 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1981 
1982 	if (layout == NULL)
1983 		return NULL;
1984 
1985 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1986 }
1987 
1988 static void
1989 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1990 {
1991 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1992 						  id_node));
1993 }
1994 
1995 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1996 				  const struct nfs4_layoutreturn_args *args,
1997 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
1998 {
1999 	__be32 *start;
2000 
2001 	start = xdr_reserve_space(xdr, 4);
2002 	if (unlikely(!start))
2003 		return -E2BIG;
2004 
2005 	*start = cpu_to_be32(ff_args->num_errors);
2006 	/* This assume we always return _ALL_ layouts */
2007 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2008 }
2009 
2010 static void
2011 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2012 {
2013 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2014 }
2015 
2016 static void
2017 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2018 			    const nfs4_stateid *stateid,
2019 			    const struct nfs42_layoutstat_devinfo *devinfo)
2020 {
2021 	__be32 *p;
2022 
2023 	p = xdr_reserve_space(xdr, 8 + 8);
2024 	p = xdr_encode_hyper(p, devinfo->offset);
2025 	p = xdr_encode_hyper(p, devinfo->length);
2026 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2027 	p = xdr_reserve_space(xdr, 4*8);
2028 	p = xdr_encode_hyper(p, devinfo->read_count);
2029 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2030 	p = xdr_encode_hyper(p, devinfo->write_count);
2031 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2032 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2033 }
2034 
2035 static void
2036 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2037 			    const nfs4_stateid *stateid,
2038 			    const struct nfs42_layoutstat_devinfo *devinfo)
2039 {
2040 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2041 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2042 			devinfo->ld_private.data);
2043 }
2044 
2045 /* report nothing for now */
2046 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2047 		const struct nfs4_layoutreturn_args *args,
2048 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2049 {
2050 	__be32 *p;
2051 	int i;
2052 
2053 	p = xdr_reserve_space(xdr, 4);
2054 	*p = cpu_to_be32(ff_args->num_dev);
2055 	for (i = 0; i < ff_args->num_dev; i++)
2056 		ff_layout_encode_ff_iostat(xdr,
2057 				&args->layout->plh_stateid,
2058 				&ff_args->devinfo[i]);
2059 }
2060 
2061 static void
2062 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2063 		unsigned int num_entries)
2064 {
2065 	unsigned int i;
2066 
2067 	for (i = 0; i < num_entries; i++) {
2068 		if (!devinfo[i].ld_private.ops)
2069 			continue;
2070 		if (!devinfo[i].ld_private.ops->free)
2071 			continue;
2072 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2073 	}
2074 }
2075 
2076 static struct nfs4_deviceid_node *
2077 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2078 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2079 {
2080 	struct nfs4_ff_layout_ds *dsaddr;
2081 
2082 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2083 	if (!dsaddr)
2084 		return NULL;
2085 	return &dsaddr->id_node;
2086 }
2087 
2088 static void
2089 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2090 		const void *voidargs,
2091 		const struct nfs4_xdr_opaque_data *ff_opaque)
2092 {
2093 	const struct nfs4_layoutreturn_args *args = voidargs;
2094 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2095 	struct xdr_buf tmp_buf = {
2096 		.head = {
2097 			[0] = {
2098 				.iov_base = page_address(ff_args->pages[0]),
2099 			},
2100 		},
2101 		.buflen = PAGE_SIZE,
2102 	};
2103 	struct xdr_stream tmp_xdr;
2104 	__be32 *start;
2105 
2106 	dprintk("%s: Begin\n", __func__);
2107 
2108 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2109 
2110 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2111 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2112 
2113 	start = xdr_reserve_space(xdr, 4);
2114 	*start = cpu_to_be32(tmp_buf.len);
2115 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2116 
2117 	dprintk("%s: Return\n", __func__);
2118 }
2119 
2120 static void
2121 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2122 {
2123 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2124 
2125 	if (!args->data)
2126 		return;
2127 	ff_args = args->data;
2128 	args->data = NULL;
2129 
2130 	ff_layout_free_ds_ioerr(&ff_args->errors);
2131 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2132 
2133 	put_page(ff_args->pages[0]);
2134 	kfree(ff_args);
2135 }
2136 
2137 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2138 	.encode = ff_layout_encode_layoutreturn,
2139 	.free = ff_layout_free_layoutreturn,
2140 };
2141 
2142 static int
2143 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2144 {
2145 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2146 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2147 
2148 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2149 	if (!ff_args)
2150 		goto out_nomem;
2151 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2152 	if (!ff_args->pages[0])
2153 		goto out_nomem_free;
2154 
2155 	INIT_LIST_HEAD(&ff_args->errors);
2156 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2157 			&args->range, &ff_args->errors,
2158 			FF_LAYOUTRETURN_MAXERR);
2159 
2160 	spin_lock(&args->inode->i_lock);
2161 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2162 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2163 	spin_unlock(&args->inode->i_lock);
2164 
2165 	args->ld_private->ops = &layoutreturn_ops;
2166 	args->ld_private->data = ff_args;
2167 	return 0;
2168 out_nomem_free:
2169 	kfree(ff_args);
2170 out_nomem:
2171 	return -ENOMEM;
2172 }
2173 
2174 #ifdef CONFIG_NFS_V4_2
2175 void
2176 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2177 {
2178 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2179 	struct nfs42_layout_error *errors;
2180 	LIST_HEAD(head);
2181 
2182 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2183 		return;
2184 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2185 	if (list_empty(&head))
2186 		return;
2187 
2188 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2189 			sizeof(*errors), GFP_NOFS);
2190 	if (errors != NULL) {
2191 		const struct nfs4_ff_layout_ds_err *pos;
2192 		size_t n = 0;
2193 
2194 		list_for_each_entry(pos, &head, list) {
2195 			errors[n].offset = pos->offset;
2196 			errors[n].length = pos->length;
2197 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2198 			errors[n].errors[0].dev_id = pos->deviceid;
2199 			errors[n].errors[0].status = pos->status;
2200 			errors[n].errors[0].opnum = pos->opnum;
2201 			n++;
2202 			if (!list_is_last(&pos->list, &head) &&
2203 			    n < NFS42_LAYOUTERROR_MAX)
2204 				continue;
2205 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2206 				break;
2207 			n = 0;
2208 		}
2209 		kfree(errors);
2210 	}
2211 	ff_layout_free_ds_ioerr(&head);
2212 }
2213 #else
2214 void
2215 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2216 {
2217 }
2218 #endif
2219 
2220 static int
2221 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2222 {
2223 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2224 
2225 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2226 }
2227 
2228 static size_t
2229 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2230 			  const int buflen)
2231 {
2232 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2233 	const struct in6_addr *addr = &sin6->sin6_addr;
2234 
2235 	/*
2236 	 * RFC 4291, Section 2.2.2
2237 	 *
2238 	 * Shorthanded ANY address
2239 	 */
2240 	if (ipv6_addr_any(addr))
2241 		return snprintf(buf, buflen, "::");
2242 
2243 	/*
2244 	 * RFC 4291, Section 2.2.2
2245 	 *
2246 	 * Shorthanded loopback address
2247 	 */
2248 	if (ipv6_addr_loopback(addr))
2249 		return snprintf(buf, buflen, "::1");
2250 
2251 	/*
2252 	 * RFC 4291, Section 2.2.3
2253 	 *
2254 	 * Special presentation address format for mapped v4
2255 	 * addresses.
2256 	 */
2257 	if (ipv6_addr_v4mapped(addr))
2258 		return snprintf(buf, buflen, "::ffff:%pI4",
2259 					&addr->s6_addr32[3]);
2260 
2261 	/*
2262 	 * RFC 4291, Section 2.2.1
2263 	 */
2264 	return snprintf(buf, buflen, "%pI6c", addr);
2265 }
2266 
2267 /* Derived from rpc_sockaddr2uaddr */
2268 static void
2269 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2270 {
2271 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2272 	char portbuf[RPCBIND_MAXUADDRPLEN];
2273 	char addrbuf[RPCBIND_MAXUADDRLEN];
2274 	char *netid;
2275 	unsigned short port;
2276 	int len, netid_len;
2277 	__be32 *p;
2278 
2279 	switch (sap->sa_family) {
2280 	case AF_INET:
2281 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2282 			return;
2283 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2284 		netid = "tcp";
2285 		netid_len = 3;
2286 		break;
2287 	case AF_INET6:
2288 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2289 			return;
2290 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2291 		netid = "tcp6";
2292 		netid_len = 4;
2293 		break;
2294 	default:
2295 		/* we only support tcp and tcp6 */
2296 		WARN_ON_ONCE(1);
2297 		return;
2298 	}
2299 
2300 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2301 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2302 
2303 	p = xdr_reserve_space(xdr, 4 + netid_len);
2304 	xdr_encode_opaque(p, netid, netid_len);
2305 
2306 	p = xdr_reserve_space(xdr, 4 + len);
2307 	xdr_encode_opaque(p, addrbuf, len);
2308 }
2309 
2310 static void
2311 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2312 			 ktime_t t)
2313 {
2314 	struct timespec64 ts;
2315 	__be32 *p;
2316 
2317 	p = xdr_reserve_space(xdr, 12);
2318 	ts = ktime_to_timespec64(t);
2319 	p = xdr_encode_hyper(p, ts.tv_sec);
2320 	*p++ = cpu_to_be32(ts.tv_nsec);
2321 }
2322 
2323 static void
2324 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2325 			    struct nfs4_ff_io_stat *stat)
2326 {
2327 	__be32 *p;
2328 
2329 	p = xdr_reserve_space(xdr, 5 * 8);
2330 	p = xdr_encode_hyper(p, stat->ops_requested);
2331 	p = xdr_encode_hyper(p, stat->bytes_requested);
2332 	p = xdr_encode_hyper(p, stat->ops_completed);
2333 	p = xdr_encode_hyper(p, stat->bytes_completed);
2334 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2335 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2336 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2337 }
2338 
2339 static void
2340 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2341 			      const struct nfs42_layoutstat_devinfo *devinfo,
2342 			      struct nfs4_ff_layout_mirror *mirror)
2343 {
2344 	struct nfs4_pnfs_ds_addr *da;
2345 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2346 	struct nfs_fh *fh = &mirror->fh_versions[0];
2347 	__be32 *p;
2348 
2349 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2350 	dprintk("%s: DS %s: encoding address %s\n",
2351 		__func__, ds->ds_remotestr, da->da_remotestr);
2352 	/* netaddr4 */
2353 	ff_layout_encode_netaddr(xdr, da);
2354 	/* nfs_fh4 */
2355 	p = xdr_reserve_space(xdr, 4 + fh->size);
2356 	xdr_encode_opaque(p, fh->data, fh->size);
2357 	/* ff_io_latency4 read */
2358 	spin_lock(&mirror->lock);
2359 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2360 	/* ff_io_latency4 write */
2361 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2362 	spin_unlock(&mirror->lock);
2363 	/* nfstime4 */
2364 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2365 	/* bool */
2366 	p = xdr_reserve_space(xdr, 4);
2367 	*p = cpu_to_be32(false);
2368 }
2369 
2370 static void
2371 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2372 			     const struct nfs4_xdr_opaque_data *opaque)
2373 {
2374 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2375 			struct nfs42_layoutstat_devinfo, ld_private);
2376 	__be32 *start;
2377 
2378 	/* layoutupdate length */
2379 	start = xdr_reserve_space(xdr, 4);
2380 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2381 
2382 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2383 }
2384 
2385 static void
2386 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2387 {
2388 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2389 
2390 	ff_layout_put_mirror(mirror);
2391 }
2392 
2393 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2394 	.encode = ff_layout_encode_layoutstats,
2395 	.free	= ff_layout_free_layoutstats,
2396 };
2397 
2398 static int
2399 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2400 			       struct nfs42_layoutstat_devinfo *devinfo,
2401 			       int dev_limit)
2402 {
2403 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2404 	struct nfs4_ff_layout_mirror *mirror;
2405 	struct nfs4_deviceid_node *dev;
2406 	int i = 0;
2407 
2408 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2409 		if (i >= dev_limit)
2410 			break;
2411 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2412 			continue;
2413 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2414 			continue;
2415 		/* mirror refcount put in cleanup_layoutstats */
2416 		if (!refcount_inc_not_zero(&mirror->ref))
2417 			continue;
2418 		dev = &mirror->mirror_ds->id_node;
2419 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2420 		devinfo->offset = 0;
2421 		devinfo->length = NFS4_MAX_UINT64;
2422 		spin_lock(&mirror->lock);
2423 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2424 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2425 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2426 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2427 		spin_unlock(&mirror->lock);
2428 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2429 		devinfo->ld_private.ops = &layoutstat_ops;
2430 		devinfo->ld_private.data = mirror;
2431 
2432 		devinfo++;
2433 		i++;
2434 	}
2435 	return i;
2436 }
2437 
2438 static int
2439 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2440 {
2441 	struct nfs4_flexfile_layout *ff_layout;
2442 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2443 
2444 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2445 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2446 	if (!args->devinfo)
2447 		return -ENOMEM;
2448 
2449 	spin_lock(&args->inode->i_lock);
2450 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2451 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2452 			&args->devinfo[0], dev_count);
2453 	spin_unlock(&args->inode->i_lock);
2454 	if (!args->num_dev) {
2455 		kfree(args->devinfo);
2456 		args->devinfo = NULL;
2457 		return -ENOENT;
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 static int
2464 ff_layout_set_layoutdriver(struct nfs_server *server,
2465 		const struct nfs_fh *dummy)
2466 {
2467 #if IS_ENABLED(CONFIG_NFS_V4_2)
2468 	server->caps |= NFS_CAP_LAYOUTSTATS;
2469 #endif
2470 	return 0;
2471 }
2472 
2473 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2474 	.id			= LAYOUT_FLEX_FILES,
2475 	.name			= "LAYOUT_FLEX_FILES",
2476 	.owner			= THIS_MODULE,
2477 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2478 	.max_layoutget_response	= 4096, /* 1 page or so... */
2479 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2480 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2481 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2482 	.alloc_lseg		= ff_layout_alloc_lseg,
2483 	.free_lseg		= ff_layout_free_lseg,
2484 	.add_lseg		= ff_layout_add_lseg,
2485 	.pg_read_ops		= &ff_layout_pg_read_ops,
2486 	.pg_write_ops		= &ff_layout_pg_write_ops,
2487 	.get_ds_info		= ff_layout_get_ds_info,
2488 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2489 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2490 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2491 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2492 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2493 	.commit_pagelist	= ff_layout_commit_pagelist,
2494 	.read_pagelist		= ff_layout_read_pagelist,
2495 	.write_pagelist		= ff_layout_write_pagelist,
2496 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2497 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2498 	.sync			= pnfs_nfs_generic_sync,
2499 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2500 };
2501 
2502 static int __init nfs4flexfilelayout_init(void)
2503 {
2504 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2505 	       __func__);
2506 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2507 }
2508 
2509 static void __exit nfs4flexfilelayout_exit(void)
2510 {
2511 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2512 	       __func__);
2513 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2514 }
2515 
2516 MODULE_ALIAS("nfs-layouttype4-4");
2517 
2518 MODULE_LICENSE("GPL");
2519 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2520 
2521 module_init(nfs4flexfilelayout_init);
2522 module_exit(nfs4flexfilelayout_exit);
2523 
2524 module_param(io_maxretrans, ushort, 0644);
2525 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2526 			"retries an I/O request before returning an error. ");
2527