xref: /openbmc/linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision 93b717fd81bf6b9a73c3702e9b079b4de8148b34)
1 /*
2  * Module for pnfs flexfile layout driver.
3  *
4  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5  *
6  * Tao Peng <bergwolf@primarydata.com>
7  */
8 
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 
13 #include <linux/sunrpc/metrics.h>
14 
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
24 
25 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
26 
27 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
28 
29 static struct group_info	*ff_zero_group;
30 
31 static struct pnfs_layout_hdr *
32 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
33 {
34 	struct nfs4_flexfile_layout *ffl;
35 
36 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
37 	if (ffl) {
38 		INIT_LIST_HEAD(&ffl->error_list);
39 		INIT_LIST_HEAD(&ffl->mirrors);
40 		return &ffl->generic_hdr;
41 	} else
42 		return NULL;
43 }
44 
45 static void
46 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
47 {
48 	struct nfs4_ff_layout_ds_err *err, *n;
49 
50 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
51 				 list) {
52 		list_del(&err->list);
53 		kfree(err);
54 	}
55 	kfree(FF_LAYOUT_FROM_HDR(lo));
56 }
57 
58 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
59 {
60 	__be32 *p;
61 
62 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
63 	if (unlikely(p == NULL))
64 		return -ENOBUFS;
65 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
66 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
67 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
68 		p[0], p[1], p[2], p[3]);
69 	return 0;
70 }
71 
72 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
73 {
74 	__be32 *p;
75 
76 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
77 	if (unlikely(!p))
78 		return -ENOBUFS;
79 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
80 	nfs4_print_deviceid(devid);
81 	return 0;
82 }
83 
84 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
85 {
86 	__be32 *p;
87 
88 	p = xdr_inline_decode(xdr, 4);
89 	if (unlikely(!p))
90 		return -ENOBUFS;
91 	fh->size = be32_to_cpup(p++);
92 	if (fh->size > sizeof(struct nfs_fh)) {
93 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
94 		       fh->size);
95 		return -EOVERFLOW;
96 	}
97 	/* fh.data */
98 	p = xdr_inline_decode(xdr, fh->size);
99 	if (unlikely(!p))
100 		return -ENOBUFS;
101 	memcpy(&fh->data, p, fh->size);
102 	dprintk("%s: fh len %d\n", __func__, fh->size);
103 
104 	return 0;
105 }
106 
107 /*
108  * Currently only stringified uids and gids are accepted.
109  * I.e., kerberos is not supported to the DSes, so no pricipals.
110  *
111  * That means that one common function will suffice, but when
112  * principals are added, this should be split to accomodate
113  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
114  */
115 static int
116 decode_name(struct xdr_stream *xdr, u32 *id)
117 {
118 	__be32 *p;
119 	int len;
120 
121 	/* opaque_length(4)*/
122 	p = xdr_inline_decode(xdr, 4);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	len = be32_to_cpup(p++);
126 	if (len < 0)
127 		return -EINVAL;
128 
129 	dprintk("%s: len %u\n", __func__, len);
130 
131 	/* opaque body */
132 	p = xdr_inline_decode(xdr, len);
133 	if (unlikely(!p))
134 		return -ENOBUFS;
135 
136 	if (!nfs_map_string_to_numeric((char *)p, len, id))
137 		return -EINVAL;
138 
139 	return 0;
140 }
141 
142 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
143 		const struct nfs4_ff_layout_mirror *m2)
144 {
145 	int i, j;
146 
147 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
148 		return false;
149 	for (i = 0; i < m1->fh_versions_cnt; i++) {
150 		bool found_fh = false;
151 		for (j = 0; j < m2->fh_versions_cnt; j++) {
152 			if (nfs_compare_fh(&m1->fh_versions[i],
153 					&m2->fh_versions[j]) == 0) {
154 				found_fh = true;
155 				break;
156 			}
157 		}
158 		if (!found_fh)
159 			return false;
160 	}
161 	return true;
162 }
163 
164 static struct nfs4_ff_layout_mirror *
165 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
166 		struct nfs4_ff_layout_mirror *mirror)
167 {
168 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
169 	struct nfs4_ff_layout_mirror *pos;
170 	struct inode *inode = lo->plh_inode;
171 
172 	spin_lock(&inode->i_lock);
173 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
174 		if (mirror->mirror_ds != pos->mirror_ds)
175 			continue;
176 		if (!ff_mirror_match_fh(mirror, pos))
177 			continue;
178 		if (atomic_inc_not_zero(&pos->ref)) {
179 			spin_unlock(&inode->i_lock);
180 			return pos;
181 		}
182 	}
183 	list_add(&mirror->mirrors, &ff_layout->mirrors);
184 	mirror->layout = lo;
185 	spin_unlock(&inode->i_lock);
186 	return mirror;
187 }
188 
189 static void
190 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
191 {
192 	struct inode *inode;
193 	if (mirror->layout == NULL)
194 		return;
195 	inode = mirror->layout->plh_inode;
196 	spin_lock(&inode->i_lock);
197 	list_del(&mirror->mirrors);
198 	spin_unlock(&inode->i_lock);
199 	mirror->layout = NULL;
200 }
201 
202 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
203 {
204 	struct nfs4_ff_layout_mirror *mirror;
205 
206 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
207 	if (mirror != NULL) {
208 		spin_lock_init(&mirror->lock);
209 		atomic_set(&mirror->ref, 1);
210 		INIT_LIST_HEAD(&mirror->mirrors);
211 	}
212 	return mirror;
213 }
214 
215 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
216 {
217 	struct rpc_cred	*cred;
218 
219 	ff_layout_remove_mirror(mirror);
220 	kfree(mirror->fh_versions);
221 	cred = rcu_access_pointer(mirror->ro_cred);
222 	if (cred)
223 		put_rpccred(cred);
224 	cred = rcu_access_pointer(mirror->rw_cred);
225 	if (cred)
226 		put_rpccred(cred);
227 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
228 	kfree(mirror);
229 }
230 
231 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
232 {
233 	if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
234 		ff_layout_free_mirror(mirror);
235 }
236 
237 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
238 {
239 	int i;
240 
241 	if (fls->mirror_array) {
242 		for (i = 0; i < fls->mirror_array_cnt; i++) {
243 			/* normally mirror_ds is freed in
244 			 * .free_deviceid_node but we still do it here
245 			 * for .alloc_lseg error path */
246 			ff_layout_put_mirror(fls->mirror_array[i]);
247 		}
248 		kfree(fls->mirror_array);
249 		fls->mirror_array = NULL;
250 	}
251 }
252 
253 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
254 {
255 	int ret = 0;
256 
257 	dprintk("--> %s\n", __func__);
258 
259 	/* FIXME: remove this check when layout segment support is added */
260 	if (lgr->range.offset != 0 ||
261 	    lgr->range.length != NFS4_MAX_UINT64) {
262 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
263 			__func__);
264 		ret = -EINVAL;
265 	}
266 
267 	dprintk("--> %s returns %d\n", __func__, ret);
268 	return ret;
269 }
270 
271 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
272 {
273 	if (fls) {
274 		ff_layout_free_mirror_array(fls);
275 		kfree(fls);
276 	}
277 }
278 
279 static bool
280 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
281 		const struct pnfs_layout_range *l2)
282 {
283 	u64 end1, end2;
284 
285 	if (l1->iomode != l2->iomode)
286 		return l1->iomode != IOMODE_READ;
287 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
288 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
289 	if (end1 < l2->offset)
290 		return false;
291 	if (end2 < l1->offset)
292 		return true;
293 	return l2->offset <= l1->offset;
294 }
295 
296 static bool
297 ff_lseg_merge(struct pnfs_layout_segment *new,
298 		struct pnfs_layout_segment *old)
299 {
300 	u64 new_end, old_end;
301 
302 	if (new->pls_range.iomode != old->pls_range.iomode)
303 		return false;
304 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
305 			old->pls_range.length);
306 	if (old_end < new->pls_range.offset)
307 		return false;
308 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
309 			new->pls_range.length);
310 	if (new_end < old->pls_range.offset)
311 		return false;
312 
313 	/* Mergeable: copy info from 'old' to 'new' */
314 	if (new_end < old_end)
315 		new_end = old_end;
316 	if (new->pls_range.offset < old->pls_range.offset)
317 		new->pls_range.offset = old->pls_range.offset;
318 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
319 			new_end);
320 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
321 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
322 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
323 		set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
324 	return true;
325 }
326 
327 static void
328 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
329 		struct pnfs_layout_segment *lseg,
330 		struct list_head *free_me)
331 {
332 	pnfs_generic_layout_insert_lseg(lo, lseg,
333 			ff_lseg_range_is_after,
334 			ff_lseg_merge,
335 			free_me);
336 }
337 
338 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
339 {
340 	int i, j;
341 
342 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
343 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
344 			if (fls->mirror_array[i]->efficiency <
345 			    fls->mirror_array[j]->efficiency)
346 				swap(fls->mirror_array[i],
347 				     fls->mirror_array[j]);
348 	}
349 }
350 
351 static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment *fls)
352 {
353 	struct nfs4_deviceid_node *node;
354 	int i;
355 
356 	if (!(fls->flags & FF_FLAGS_NO_IO_THRU_MDS))
357 		return;
358 	for (i = 0; i < fls->mirror_array_cnt; i++) {
359 		node = &fls->mirror_array[i]->mirror_ds->id_node;
360 		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
361 	}
362 }
363 
364 static struct pnfs_layout_segment *
365 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
366 		     struct nfs4_layoutget_res *lgr,
367 		     gfp_t gfp_flags)
368 {
369 	struct pnfs_layout_segment *ret;
370 	struct nfs4_ff_layout_segment *fls = NULL;
371 	struct xdr_stream stream;
372 	struct xdr_buf buf;
373 	struct page *scratch;
374 	u64 stripe_unit;
375 	u32 mirror_array_cnt;
376 	__be32 *p;
377 	int i, rc;
378 
379 	dprintk("--> %s\n", __func__);
380 	scratch = alloc_page(gfp_flags);
381 	if (!scratch)
382 		return ERR_PTR(-ENOMEM);
383 
384 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
385 			      lgr->layoutp->len);
386 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
387 
388 	/* stripe unit and mirror_array_cnt */
389 	rc = -EIO;
390 	p = xdr_inline_decode(&stream, 8 + 4);
391 	if (!p)
392 		goto out_err_free;
393 
394 	p = xdr_decode_hyper(p, &stripe_unit);
395 	mirror_array_cnt = be32_to_cpup(p++);
396 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
397 		stripe_unit, mirror_array_cnt);
398 
399 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
400 	    mirror_array_cnt == 0)
401 		goto out_err_free;
402 
403 	rc = -ENOMEM;
404 	fls = kzalloc(sizeof(*fls), gfp_flags);
405 	if (!fls)
406 		goto out_err_free;
407 
408 	fls->mirror_array_cnt = mirror_array_cnt;
409 	fls->stripe_unit = stripe_unit;
410 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
411 				    sizeof(fls->mirror_array[0]), gfp_flags);
412 	if (fls->mirror_array == NULL)
413 		goto out_err_free;
414 
415 	for (i = 0; i < fls->mirror_array_cnt; i++) {
416 		struct nfs4_ff_layout_mirror *mirror;
417 		struct nfs4_deviceid devid;
418 		struct nfs4_deviceid_node *idnode;
419 		struct auth_cred acred = { .group_info = ff_zero_group };
420 		struct rpc_cred	__rcu *cred;
421 		u32 ds_count, fh_count, id;
422 		int j;
423 
424 		rc = -EIO;
425 		p = xdr_inline_decode(&stream, 4);
426 		if (!p)
427 			goto out_err_free;
428 		ds_count = be32_to_cpup(p);
429 
430 		/* FIXME: allow for striping? */
431 		if (ds_count != 1)
432 			goto out_err_free;
433 
434 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
435 		if (fls->mirror_array[i] == NULL) {
436 			rc = -ENOMEM;
437 			goto out_err_free;
438 		}
439 
440 		fls->mirror_array[i]->ds_count = ds_count;
441 
442 		/* deviceid */
443 		rc = decode_deviceid(&stream, &devid);
444 		if (rc)
445 			goto out_err_free;
446 
447 		idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
448 						&devid, lh->plh_lc_cred,
449 						gfp_flags);
450 		/*
451 		 * upon success, mirror_ds is allocated by previous
452 		 * getdeviceinfo, or newly by .alloc_deviceid_node
453 		 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
454 		 */
455 		if (idnode)
456 			fls->mirror_array[i]->mirror_ds =
457 				FF_LAYOUT_MIRROR_DS(idnode);
458 		else
459 			goto out_err_free;
460 
461 		/* efficiency */
462 		rc = -EIO;
463 		p = xdr_inline_decode(&stream, 4);
464 		if (!p)
465 			goto out_err_free;
466 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
467 
468 		/* stateid */
469 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
470 		if (rc)
471 			goto out_err_free;
472 
473 		/* fh */
474 		p = xdr_inline_decode(&stream, 4);
475 		if (!p)
476 			goto out_err_free;
477 		fh_count = be32_to_cpup(p);
478 
479 		fls->mirror_array[i]->fh_versions =
480 			kzalloc(fh_count * sizeof(struct nfs_fh),
481 				gfp_flags);
482 		if (fls->mirror_array[i]->fh_versions == NULL) {
483 			rc = -ENOMEM;
484 			goto out_err_free;
485 		}
486 
487 		for (j = 0; j < fh_count; j++) {
488 			rc = decode_nfs_fh(&stream,
489 					   &fls->mirror_array[i]->fh_versions[j]);
490 			if (rc)
491 				goto out_err_free;
492 		}
493 
494 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
495 
496 		/* user */
497 		rc = decode_name(&stream, &id);
498 		if (rc)
499 			goto out_err_free;
500 
501 		acred.uid = make_kuid(&init_user_ns, id);
502 
503 		/* group */
504 		rc = decode_name(&stream, &id);
505 		if (rc)
506 			goto out_err_free;
507 
508 		acred.gid = make_kgid(&init_user_ns, id);
509 
510 		/* find the cred for it */
511 		rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
512 		if (IS_ERR(cred)) {
513 			rc = PTR_ERR(cred);
514 			goto out_err_free;
515 		}
516 
517 		if (lgr->range.iomode == IOMODE_READ)
518 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
519 		else
520 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
521 
522 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
523 		if (mirror != fls->mirror_array[i]) {
524 			/* swap cred ptrs so free_mirror will clean up old */
525 			if (lgr->range.iomode == IOMODE_READ) {
526 				cred = xchg(&mirror->ro_cred, cred);
527 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
528 			} else {
529 				cred = xchg(&mirror->rw_cred, cred);
530 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
531 			}
532 			ff_layout_free_mirror(fls->mirror_array[i]);
533 			fls->mirror_array[i] = mirror;
534 		}
535 
536 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
537 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
538 			from_kuid(&init_user_ns, acred.uid),
539 			from_kgid(&init_user_ns, acred.gid));
540 	}
541 
542 	p = xdr_inline_decode(&stream, 4);
543 	if (!p)
544 		goto out_sort_mirrors;
545 	fls->flags = be32_to_cpup(p);
546 
547 	p = xdr_inline_decode(&stream, 4);
548 	if (!p)
549 		goto out_sort_mirrors;
550 	for (i=0; i < fls->mirror_array_cnt; i++)
551 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
552 
553 out_sort_mirrors:
554 	ff_layout_sort_mirrors(fls);
555 	rc = ff_layout_check_layout(lgr);
556 	if (rc)
557 		goto out_err_free;
558 	ff_layout_mark_devices_valid(fls);
559 
560 	ret = &fls->generic_hdr;
561 	dprintk("<-- %s (success)\n", __func__);
562 out_free_page:
563 	__free_page(scratch);
564 	return ret;
565 out_err_free:
566 	_ff_layout_free_lseg(fls);
567 	ret = ERR_PTR(rc);
568 	dprintk("<-- %s (%d)\n", __func__, rc);
569 	goto out_free_page;
570 }
571 
572 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
573 {
574 	struct pnfs_layout_segment *lseg;
575 
576 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
577 		if (lseg->pls_range.iomode == IOMODE_RW)
578 			return true;
579 
580 	return false;
581 }
582 
583 static void
584 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
585 {
586 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
587 
588 	dprintk("--> %s\n", __func__);
589 
590 	if (lseg->pls_range.iomode == IOMODE_RW) {
591 		struct nfs4_flexfile_layout *ffl;
592 		struct inode *inode;
593 
594 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
595 		inode = ffl->generic_hdr.plh_inode;
596 		spin_lock(&inode->i_lock);
597 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
598 			ffl->commit_info.nbuckets = 0;
599 			kfree(ffl->commit_info.buckets);
600 			ffl->commit_info.buckets = NULL;
601 		}
602 		spin_unlock(&inode->i_lock);
603 	}
604 	_ff_layout_free_lseg(fls);
605 }
606 
607 /* Return 1 until we have multiple lsegs support */
608 static int
609 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
610 {
611 	return 1;
612 }
613 
614 static void
615 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
616 {
617 	/* first IO request? */
618 	if (atomic_inc_return(&timer->n_ops) == 1) {
619 		timer->start_time = now;
620 	}
621 }
622 
623 static ktime_t
624 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
625 {
626 	ktime_t start;
627 
628 	if (atomic_dec_return(&timer->n_ops) < 0)
629 		WARN_ON_ONCE(1);
630 
631 	start = timer->start_time;
632 	timer->start_time = now;
633 	return ktime_sub(now, start);
634 }
635 
636 static bool
637 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
638 			    struct nfs4_ff_layoutstat *layoutstat,
639 			    ktime_t now)
640 {
641 	static const ktime_t notime = {0};
642 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
643 
644 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
645 	if (ktime_equal(mirror->start_time, notime))
646 		mirror->start_time = now;
647 	if (ktime_equal(mirror->last_report_time, notime))
648 		mirror->last_report_time = now;
649 	if (mirror->report_interval != 0)
650 		report_interval = (s64)mirror->report_interval * 1000LL;
651 	else if (layoutstats_timer != 0)
652 		report_interval = (s64)layoutstats_timer * 1000LL;
653 	if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
654 			report_interval) {
655 		mirror->last_report_time = now;
656 		return true;
657 	}
658 
659 	return false;
660 }
661 
662 static void
663 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
664 		__u64 requested)
665 {
666 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
667 
668 	iostat->ops_requested++;
669 	iostat->bytes_requested += requested;
670 }
671 
672 static void
673 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
674 		__u64 requested,
675 		__u64 completed,
676 		ktime_t time_completed,
677 		ktime_t time_started)
678 {
679 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
680 	ktime_t completion_time = ktime_sub(time_completed, time_started);
681 	ktime_t timer;
682 
683 	iostat->ops_completed++;
684 	iostat->bytes_completed += completed;
685 	iostat->bytes_not_delivered += requested - completed;
686 
687 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
688 	iostat->total_busy_time =
689 			ktime_add(iostat->total_busy_time, timer);
690 	iostat->aggregate_completion_time =
691 			ktime_add(iostat->aggregate_completion_time,
692 					completion_time);
693 }
694 
695 static void
696 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
697 		struct nfs4_ff_layout_mirror *mirror,
698 		__u64 requested, ktime_t now)
699 {
700 	bool report;
701 
702 	spin_lock(&mirror->lock);
703 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
704 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
705 	spin_unlock(&mirror->lock);
706 
707 	if (report)
708 		pnfs_report_layoutstat(inode, GFP_KERNEL);
709 }
710 
711 static void
712 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
713 		struct nfs4_ff_layout_mirror *mirror,
714 		__u64 requested,
715 		__u64 completed)
716 {
717 	spin_lock(&mirror->lock);
718 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
719 			requested, completed,
720 			ktime_get(), task->tk_start);
721 	spin_unlock(&mirror->lock);
722 }
723 
724 static void
725 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
726 		struct nfs4_ff_layout_mirror *mirror,
727 		__u64 requested, ktime_t now)
728 {
729 	bool report;
730 
731 	spin_lock(&mirror->lock);
732 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
733 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
734 	spin_unlock(&mirror->lock);
735 
736 	if (report)
737 		pnfs_report_layoutstat(inode, GFP_NOIO);
738 }
739 
740 static void
741 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
742 		struct nfs4_ff_layout_mirror *mirror,
743 		__u64 requested,
744 		__u64 completed,
745 		enum nfs3_stable_how committed)
746 {
747 	if (committed == NFS_UNSTABLE)
748 		requested = completed = 0;
749 
750 	spin_lock(&mirror->lock);
751 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
752 			requested, completed, ktime_get(), task->tk_start);
753 	spin_unlock(&mirror->lock);
754 }
755 
756 static int
757 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
758 			    struct nfs_commit_info *cinfo,
759 			    gfp_t gfp_flags)
760 {
761 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
762 	struct pnfs_commit_bucket *buckets;
763 	int size;
764 
765 	if (cinfo->ds->nbuckets != 0) {
766 		/* This assumes there is only one RW lseg per file.
767 		 * To support multiple lseg per file, we need to
768 		 * change struct pnfs_commit_bucket to allow dynamic
769 		 * increasing nbuckets.
770 		 */
771 		return 0;
772 	}
773 
774 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
775 
776 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
777 			  gfp_flags);
778 	if (!buckets)
779 		return -ENOMEM;
780 	else {
781 		int i;
782 
783 		spin_lock(&cinfo->inode->i_lock);
784 		if (cinfo->ds->nbuckets != 0)
785 			kfree(buckets);
786 		else {
787 			cinfo->ds->buckets = buckets;
788 			cinfo->ds->nbuckets = size;
789 			for (i = 0; i < size; i++) {
790 				INIT_LIST_HEAD(&buckets[i].written);
791 				INIT_LIST_HEAD(&buckets[i].committing);
792 				/* mark direct verifier as unset */
793 				buckets[i].direct_verf.committed =
794 					NFS_INVALID_STABLE_HOW;
795 			}
796 		}
797 		spin_unlock(&cinfo->inode->i_lock);
798 		return 0;
799 	}
800 }
801 
802 static struct nfs4_pnfs_ds *
803 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
804 				  int start_idx,
805 				  int *best_idx)
806 {
807 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
808 	struct nfs4_pnfs_ds *ds;
809 	int idx;
810 
811 	/* mirrors are sorted by efficiency */
812 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
813 		ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
814 		if (ds) {
815 			*best_idx = idx;
816 			return ds;
817 		}
818 	}
819 
820 	return NULL;
821 }
822 
823 static void
824 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
825 			struct nfs_page *req)
826 {
827 	struct nfs_pgio_mirror *pgm;
828 	struct nfs4_ff_layout_mirror *mirror;
829 	struct nfs4_pnfs_ds *ds;
830 	int ds_idx;
831 
832 	/* Use full layout for now */
833 	if (!pgio->pg_lseg) {
834 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
835 						   req->wb_context,
836 						   0,
837 						   NFS4_MAX_UINT64,
838 						   IOMODE_READ,
839 						   GFP_KERNEL);
840 		if (IS_ERR(pgio->pg_lseg)) {
841 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
842 			pgio->pg_lseg = NULL;
843 			return;
844 		}
845 	}
846 	/* If no lseg, fall back to read through mds */
847 	if (pgio->pg_lseg == NULL)
848 		goto out_mds;
849 
850 	ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
851 	if (!ds)
852 		goto out_mds;
853 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
854 
855 	pgio->pg_mirror_idx = ds_idx;
856 
857 	/* read always uses only one mirror - idx 0 for pgio layer */
858 	pgm = &pgio->pg_mirrors[0];
859 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
860 
861 	return;
862 out_mds:
863 	pnfs_put_lseg(pgio->pg_lseg);
864 	pgio->pg_lseg = NULL;
865 	nfs_pageio_reset_read_mds(pgio);
866 }
867 
868 static void
869 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
870 			struct nfs_page *req)
871 {
872 	struct nfs4_ff_layout_mirror *mirror;
873 	struct nfs_pgio_mirror *pgm;
874 	struct nfs_commit_info cinfo;
875 	struct nfs4_pnfs_ds *ds;
876 	int i;
877 	int status;
878 
879 	if (!pgio->pg_lseg) {
880 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
881 						   req->wb_context,
882 						   0,
883 						   NFS4_MAX_UINT64,
884 						   IOMODE_RW,
885 						   GFP_NOFS);
886 		if (IS_ERR(pgio->pg_lseg)) {
887 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
888 			pgio->pg_lseg = NULL;
889 			return;
890 		}
891 	}
892 	/* If no lseg, fall back to write through mds */
893 	if (pgio->pg_lseg == NULL)
894 		goto out_mds;
895 
896 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
897 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
898 	if (status < 0)
899 		goto out_mds;
900 
901 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
902 	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
903 	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
904 		goto out_mds;
905 
906 	for (i = 0; i < pgio->pg_mirror_count; i++) {
907 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
908 		if (!ds)
909 			goto out_mds;
910 		pgm = &pgio->pg_mirrors[i];
911 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
912 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
913 	}
914 
915 	return;
916 
917 out_mds:
918 	pnfs_put_lseg(pgio->pg_lseg);
919 	pgio->pg_lseg = NULL;
920 	nfs_pageio_reset_write_mds(pgio);
921 }
922 
923 static unsigned int
924 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
925 				    struct nfs_page *req)
926 {
927 	if (!pgio->pg_lseg) {
928 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
929 						   req->wb_context,
930 						   0,
931 						   NFS4_MAX_UINT64,
932 						   IOMODE_RW,
933 						   GFP_NOFS);
934 		if (IS_ERR(pgio->pg_lseg)) {
935 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
936 			pgio->pg_lseg = NULL;
937 			goto out;
938 		}
939 	}
940 	if (pgio->pg_lseg)
941 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
942 
943 	/* no lseg means that pnfs is not in use, so no mirroring here */
944 	nfs_pageio_reset_write_mds(pgio);
945 out:
946 	return 1;
947 }
948 
949 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
950 	.pg_init = ff_layout_pg_init_read,
951 	.pg_test = pnfs_generic_pg_test,
952 	.pg_doio = pnfs_generic_pg_readpages,
953 	.pg_cleanup = pnfs_generic_pg_cleanup,
954 };
955 
956 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
957 	.pg_init = ff_layout_pg_init_write,
958 	.pg_test = pnfs_generic_pg_test,
959 	.pg_doio = pnfs_generic_pg_writepages,
960 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
961 	.pg_cleanup = pnfs_generic_pg_cleanup,
962 };
963 
964 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
965 {
966 	struct rpc_task *task = &hdr->task;
967 
968 	pnfs_layoutcommit_inode(hdr->inode, false);
969 
970 	if (retry_pnfs) {
971 		dprintk("%s Reset task %5u for i/o through pNFS "
972 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
973 			hdr->task.tk_pid,
974 			hdr->inode->i_sb->s_id,
975 			(unsigned long long)NFS_FILEID(hdr->inode),
976 			hdr->args.count,
977 			(unsigned long long)hdr->args.offset);
978 
979 		hdr->completion_ops->reschedule_io(hdr);
980 		return;
981 	}
982 
983 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
984 		dprintk("%s Reset task %5u for i/o through MDS "
985 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
986 			hdr->task.tk_pid,
987 			hdr->inode->i_sb->s_id,
988 			(unsigned long long)NFS_FILEID(hdr->inode),
989 			hdr->args.count,
990 			(unsigned long long)hdr->args.offset);
991 
992 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
993 	}
994 }
995 
996 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
997 {
998 	struct rpc_task *task = &hdr->task;
999 
1000 	pnfs_layoutcommit_inode(hdr->inode, false);
1001 
1002 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1003 		dprintk("%s Reset task %5u for i/o through MDS "
1004 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1005 			hdr->task.tk_pid,
1006 			hdr->inode->i_sb->s_id,
1007 			(unsigned long long)NFS_FILEID(hdr->inode),
1008 			hdr->args.count,
1009 			(unsigned long long)hdr->args.offset);
1010 
1011 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1012 	}
1013 }
1014 
1015 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1016 					   struct nfs4_state *state,
1017 					   struct nfs_client *clp,
1018 					   struct pnfs_layout_segment *lseg,
1019 					   int idx)
1020 {
1021 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1022 	struct inode *inode = lo->plh_inode;
1023 	struct nfs_server *mds_server = NFS_SERVER(inode);
1024 
1025 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1026 	struct nfs_client *mds_client = mds_server->nfs_client;
1027 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1028 
1029 	if (task->tk_status >= 0)
1030 		return 0;
1031 
1032 	switch (task->tk_status) {
1033 	/* MDS state errors */
1034 	case -NFS4ERR_DELEG_REVOKED:
1035 	case -NFS4ERR_ADMIN_REVOKED:
1036 	case -NFS4ERR_BAD_STATEID:
1037 		if (state == NULL)
1038 			break;
1039 		nfs_remove_bad_delegation(state->inode);
1040 	case -NFS4ERR_OPENMODE:
1041 		if (state == NULL)
1042 			break;
1043 		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1044 			goto out_bad_stateid;
1045 		goto wait_on_recovery;
1046 	case -NFS4ERR_EXPIRED:
1047 		if (state != NULL) {
1048 			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
1049 				goto out_bad_stateid;
1050 		}
1051 		nfs4_schedule_lease_recovery(mds_client);
1052 		goto wait_on_recovery;
1053 	/* DS session errors */
1054 	case -NFS4ERR_BADSESSION:
1055 	case -NFS4ERR_BADSLOT:
1056 	case -NFS4ERR_BAD_HIGH_SLOT:
1057 	case -NFS4ERR_DEADSESSION:
1058 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1059 	case -NFS4ERR_SEQ_FALSE_RETRY:
1060 	case -NFS4ERR_SEQ_MISORDERED:
1061 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1062 			"flags 0x%x\n", __func__, task->tk_status,
1063 			clp->cl_exchange_flags);
1064 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1065 		break;
1066 	case -NFS4ERR_DELAY:
1067 	case -NFS4ERR_GRACE:
1068 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1069 		break;
1070 	case -NFS4ERR_RETRY_UNCACHED_REP:
1071 		break;
1072 	/* Invalidate Layout errors */
1073 	case -NFS4ERR_PNFS_NO_LAYOUT:
1074 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1075 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1076 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1077 	case -NFS4ERR_FHEXPIRED:
1078 	case -NFS4ERR_WRONG_TYPE:
1079 		dprintk("%s Invalid layout error %d\n", __func__,
1080 			task->tk_status);
1081 		/*
1082 		 * Destroy layout so new i/o will get a new layout.
1083 		 * Layout will not be destroyed until all current lseg
1084 		 * references are put. Mark layout as invalid to resend failed
1085 		 * i/o and all i/o waiting on the slot table to the MDS until
1086 		 * layout is destroyed and a new valid layout is obtained.
1087 		 */
1088 		pnfs_destroy_layout(NFS_I(inode));
1089 		rpc_wake_up(&tbl->slot_tbl_waitq);
1090 		goto reset;
1091 	/* RPC connection errors */
1092 	case -ECONNREFUSED:
1093 	case -EHOSTDOWN:
1094 	case -EHOSTUNREACH:
1095 	case -ENETUNREACH:
1096 	case -EIO:
1097 	case -ETIMEDOUT:
1098 	case -EPIPE:
1099 		dprintk("%s DS connection error %d\n", __func__,
1100 			task->tk_status);
1101 		nfs4_mark_deviceid_unavailable(devid);
1102 		rpc_wake_up(&tbl->slot_tbl_waitq);
1103 		/* fall through */
1104 	default:
1105 		if (ff_layout_no_fallback_to_mds(lseg) ||
1106 		    ff_layout_has_available_ds(lseg))
1107 			return -NFS4ERR_RESET_TO_PNFS;
1108 reset:
1109 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1110 			task->tk_status);
1111 		return -NFS4ERR_RESET_TO_MDS;
1112 	}
1113 out:
1114 	task->tk_status = 0;
1115 	return -EAGAIN;
1116 out_bad_stateid:
1117 	task->tk_status = -EIO;
1118 	return 0;
1119 wait_on_recovery:
1120 	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1121 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1122 		rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1123 	goto out;
1124 }
1125 
1126 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1127 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1128 					   struct pnfs_layout_segment *lseg,
1129 					   int idx)
1130 {
1131 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1132 
1133 	if (task->tk_status >= 0)
1134 		return 0;
1135 
1136 	switch (task->tk_status) {
1137 	/* File access problems. Don't mark the device as unavailable */
1138 	case -EACCES:
1139 	case -ESTALE:
1140 	case -EISDIR:
1141 	case -EBADHANDLE:
1142 	case -ELOOP:
1143 	case -ENOSPC:
1144 		break;
1145 	case -EJUKEBOX:
1146 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1147 		goto out_retry;
1148 	default:
1149 		dprintk("%s DS connection error %d\n", __func__,
1150 			task->tk_status);
1151 		nfs4_mark_deviceid_unavailable(devid);
1152 	}
1153 	/* FIXME: Need to prevent infinite looping here. */
1154 	return -NFS4ERR_RESET_TO_PNFS;
1155 out_retry:
1156 	task->tk_status = 0;
1157 	rpc_restart_call_prepare(task);
1158 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1159 	return -EAGAIN;
1160 }
1161 
1162 static int ff_layout_async_handle_error(struct rpc_task *task,
1163 					struct nfs4_state *state,
1164 					struct nfs_client *clp,
1165 					struct pnfs_layout_segment *lseg,
1166 					int idx)
1167 {
1168 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1169 
1170 	switch (vers) {
1171 	case 3:
1172 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1173 	case 4:
1174 		return ff_layout_async_handle_error_v4(task, state, clp,
1175 						       lseg, idx);
1176 	default:
1177 		/* should never happen */
1178 		WARN_ON_ONCE(1);
1179 		return 0;
1180 	}
1181 }
1182 
1183 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1184 					int idx, u64 offset, u64 length,
1185 					u32 status, int opnum, int error)
1186 {
1187 	struct nfs4_ff_layout_mirror *mirror;
1188 	int err;
1189 
1190 	if (status == 0) {
1191 		switch (error) {
1192 		case -ETIMEDOUT:
1193 		case -EPFNOSUPPORT:
1194 		case -EPROTONOSUPPORT:
1195 		case -EOPNOTSUPP:
1196 		case -ECONNREFUSED:
1197 		case -ECONNRESET:
1198 		case -EHOSTDOWN:
1199 		case -EHOSTUNREACH:
1200 		case -ENETUNREACH:
1201 		case -EADDRINUSE:
1202 		case -ENOBUFS:
1203 		case -EPIPE:
1204 		case -EPERM:
1205 			status = NFS4ERR_NXIO;
1206 			break;
1207 		case -EACCES:
1208 			status = NFS4ERR_ACCESS;
1209 			break;
1210 		default:
1211 			return;
1212 		}
1213 	}
1214 
1215 	switch (status) {
1216 	case NFS4ERR_DELAY:
1217 	case NFS4ERR_GRACE:
1218 		return;
1219 	default:
1220 		break;
1221 	}
1222 
1223 	mirror = FF_LAYOUT_COMP(lseg, idx);
1224 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1225 				       mirror, offset, length, status, opnum,
1226 				       GFP_NOIO);
1227 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1228 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1229 }
1230 
1231 /* NFS_PROTO call done callback routines */
1232 static int ff_layout_read_done_cb(struct rpc_task *task,
1233 				struct nfs_pgio_header *hdr)
1234 {
1235 	int err;
1236 
1237 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1238 	if (task->tk_status < 0)
1239 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1240 					    hdr->args.offset, hdr->args.count,
1241 					    hdr->res.op_status, OP_READ,
1242 					    task->tk_status);
1243 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1244 					   hdr->ds_clp, hdr->lseg,
1245 					   hdr->pgio_mirror_idx);
1246 
1247 	switch (err) {
1248 	case -NFS4ERR_RESET_TO_PNFS:
1249 		if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1250 					hdr->pgio_mirror_idx + 1,
1251 					&hdr->pgio_mirror_idx))
1252 			goto out_eagain;
1253 		set_bit(NFS_LAYOUT_RETURN_REQUESTED,
1254 			&hdr->lseg->pls_layout->plh_flags);
1255 		pnfs_read_resend_pnfs(hdr);
1256 		return task->tk_status;
1257 	case -NFS4ERR_RESET_TO_MDS:
1258 		ff_layout_reset_read(hdr);
1259 		return task->tk_status;
1260 	case -EAGAIN:
1261 		goto out_eagain;
1262 	}
1263 
1264 	return 0;
1265 out_eagain:
1266 	rpc_restart_call_prepare(task);
1267 	return -EAGAIN;
1268 }
1269 
1270 static bool
1271 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1272 {
1273 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1274 }
1275 
1276 /*
1277  * We reference the rpc_cred of the first WRITE that triggers the need for
1278  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1279  * rfc5661 is not clear about which credential should be used.
1280  *
1281  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1282  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1283  * we always send layoutcommit after DS writes.
1284  */
1285 static void
1286 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1287 {
1288 	if (!ff_layout_need_layoutcommit(hdr->lseg))
1289 		return;
1290 
1291 	pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1292 			hdr->mds_offset + hdr->res.count);
1293 	dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1294 		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1295 }
1296 
1297 static bool
1298 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1299 {
1300 	/* No mirroring for now */
1301 	struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1302 
1303 	return ff_layout_test_devid_unavailable(node);
1304 }
1305 
1306 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1307 		struct nfs_pgio_header *hdr)
1308 {
1309 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1310 		return;
1311 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1312 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1313 			hdr->args.count,
1314 			task->tk_start);
1315 }
1316 
1317 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1318 		struct nfs_pgio_header *hdr)
1319 {
1320 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1321 		return;
1322 	nfs4_ff_layout_stat_io_end_read(task,
1323 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1324 			hdr->args.count,
1325 			hdr->res.count);
1326 }
1327 
1328 static int ff_layout_read_prepare_common(struct rpc_task *task,
1329 					 struct nfs_pgio_header *hdr)
1330 {
1331 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1332 		rpc_exit(task, -EIO);
1333 		return -EIO;
1334 	}
1335 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1336 		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1337 		if (ff_layout_has_available_ds(hdr->lseg))
1338 			pnfs_read_resend_pnfs(hdr);
1339 		else
1340 			ff_layout_reset_read(hdr);
1341 		rpc_exit(task, 0);
1342 		return -EAGAIN;
1343 	}
1344 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1345 
1346 	ff_layout_read_record_layoutstats_start(task, hdr);
1347 	return 0;
1348 }
1349 
1350 /*
1351  * Call ops for the async read/write cases
1352  * In the case of dense layouts, the offset needs to be reset to its
1353  * original value.
1354  */
1355 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1356 {
1357 	struct nfs_pgio_header *hdr = data;
1358 
1359 	if (ff_layout_read_prepare_common(task, hdr))
1360 		return;
1361 
1362 	rpc_call_start(task);
1363 }
1364 
1365 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1366 				    struct nfs4_sequence_args *args,
1367 				    struct nfs4_sequence_res *res,
1368 				    struct rpc_task *task)
1369 {
1370 	if (ds_clp->cl_session)
1371 		return nfs41_setup_sequence(ds_clp->cl_session,
1372 					   args,
1373 					   res,
1374 					   task);
1375 	return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1376 				   args,
1377 				   res,
1378 				   task);
1379 }
1380 
1381 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1382 {
1383 	struct nfs_pgio_header *hdr = data;
1384 
1385 	if (ff_layout_setup_sequence(hdr->ds_clp,
1386 				     &hdr->args.seq_args,
1387 				     &hdr->res.seq_res,
1388 				     task))
1389 		return;
1390 
1391 	if (ff_layout_read_prepare_common(task, hdr))
1392 		return;
1393 
1394 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1395 			hdr->args.lock_context, FMODE_READ) == -EIO)
1396 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1397 }
1398 
1399 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1400 {
1401 	struct nfs_pgio_header *hdr = data;
1402 
1403 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1404 
1405 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1406 	    task->tk_status == 0) {
1407 		nfs4_sequence_done(task, &hdr->res.seq_res);
1408 		return;
1409 	}
1410 
1411 	/* Note this may cause RPC to be resent */
1412 	hdr->mds_ops->rpc_call_done(task, hdr);
1413 }
1414 
1415 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1416 {
1417 	struct nfs_pgio_header *hdr = data;
1418 
1419 	ff_layout_read_record_layoutstats_done(task, hdr);
1420 	rpc_count_iostats_metrics(task,
1421 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1422 }
1423 
1424 static void ff_layout_read_release(void *data)
1425 {
1426 	struct nfs_pgio_header *hdr = data;
1427 
1428 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1429 	pnfs_generic_rw_release(data);
1430 }
1431 
1432 
1433 static int ff_layout_write_done_cb(struct rpc_task *task,
1434 				struct nfs_pgio_header *hdr)
1435 {
1436 	int err;
1437 
1438 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1439 	if (task->tk_status < 0)
1440 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1441 					    hdr->args.offset, hdr->args.count,
1442 					    hdr->res.op_status, OP_WRITE,
1443 					    task->tk_status);
1444 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1445 					   hdr->ds_clp, hdr->lseg,
1446 					   hdr->pgio_mirror_idx);
1447 
1448 	switch (err) {
1449 	case -NFS4ERR_RESET_TO_PNFS:
1450 		ff_layout_reset_write(hdr, true);
1451 		return task->tk_status;
1452 	case -NFS4ERR_RESET_TO_MDS:
1453 		ff_layout_reset_write(hdr, false);
1454 		return task->tk_status;
1455 	case -EAGAIN:
1456 		return -EAGAIN;
1457 	}
1458 
1459 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1460 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1461 		ff_layout_set_layoutcommit(hdr);
1462 
1463 	/* zero out fattr since we don't care DS attr at all */
1464 	hdr->fattr.valid = 0;
1465 	if (task->tk_status >= 0)
1466 		nfs_writeback_update_inode(hdr);
1467 
1468 	return 0;
1469 }
1470 
1471 static int ff_layout_commit_done_cb(struct rpc_task *task,
1472 				     struct nfs_commit_data *data)
1473 {
1474 	int err;
1475 
1476 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1477 	if (task->tk_status < 0)
1478 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1479 					    data->args.offset, data->args.count,
1480 					    data->res.op_status, OP_COMMIT,
1481 					    task->tk_status);
1482 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1483 					   data->lseg, data->ds_commit_index);
1484 
1485 	switch (err) {
1486 	case -NFS4ERR_RESET_TO_PNFS:
1487 		pnfs_generic_prepare_to_resend_writes(data);
1488 		return -EAGAIN;
1489 	case -NFS4ERR_RESET_TO_MDS:
1490 		pnfs_generic_prepare_to_resend_writes(data);
1491 		return -EAGAIN;
1492 	case -EAGAIN:
1493 		rpc_restart_call_prepare(task);
1494 		return -EAGAIN;
1495 	}
1496 
1497 	if (data->verf.committed == NFS_UNSTABLE
1498 	    && ff_layout_need_layoutcommit(data->lseg))
1499 		pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
1500 
1501 	return 0;
1502 }
1503 
1504 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1505 		struct nfs_pgio_header *hdr)
1506 {
1507 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1508 		return;
1509 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1510 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1511 			hdr->args.count,
1512 			task->tk_start);
1513 }
1514 
1515 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1516 		struct nfs_pgio_header *hdr)
1517 {
1518 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1519 		return;
1520 	nfs4_ff_layout_stat_io_end_write(task,
1521 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1522 			hdr->args.count, hdr->res.count,
1523 			hdr->res.verf->committed);
1524 }
1525 
1526 static int ff_layout_write_prepare_common(struct rpc_task *task,
1527 					  struct nfs_pgio_header *hdr)
1528 {
1529 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1530 		rpc_exit(task, -EIO);
1531 		return -EIO;
1532 	}
1533 
1534 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1535 		bool retry_pnfs;
1536 
1537 		retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1538 		dprintk("%s task %u reset io to %s\n", __func__,
1539 			task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1540 		ff_layout_reset_write(hdr, retry_pnfs);
1541 		rpc_exit(task, 0);
1542 		return -EAGAIN;
1543 	}
1544 
1545 	ff_layout_write_record_layoutstats_start(task, hdr);
1546 	return 0;
1547 }
1548 
1549 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1550 {
1551 	struct nfs_pgio_header *hdr = data;
1552 
1553 	if (ff_layout_write_prepare_common(task, hdr))
1554 		return;
1555 
1556 	rpc_call_start(task);
1557 }
1558 
1559 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1560 {
1561 	struct nfs_pgio_header *hdr = data;
1562 
1563 	if (ff_layout_setup_sequence(hdr->ds_clp,
1564 				     &hdr->args.seq_args,
1565 				     &hdr->res.seq_res,
1566 				     task))
1567 		return;
1568 
1569 	if (ff_layout_write_prepare_common(task, hdr))
1570 		return;
1571 
1572 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1573 			hdr->args.lock_context, FMODE_WRITE) == -EIO)
1574 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1575 }
1576 
1577 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1578 {
1579 	struct nfs_pgio_header *hdr = data;
1580 
1581 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1582 	    task->tk_status == 0) {
1583 		nfs4_sequence_done(task, &hdr->res.seq_res);
1584 		return;
1585 	}
1586 
1587 	/* Note this may cause RPC to be resent */
1588 	hdr->mds_ops->rpc_call_done(task, hdr);
1589 }
1590 
1591 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1592 {
1593 	struct nfs_pgio_header *hdr = data;
1594 
1595 	ff_layout_write_record_layoutstats_done(task, hdr);
1596 	rpc_count_iostats_metrics(task,
1597 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1598 }
1599 
1600 static void ff_layout_write_release(void *data)
1601 {
1602 	struct nfs_pgio_header *hdr = data;
1603 
1604 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1605 	pnfs_generic_rw_release(data);
1606 }
1607 
1608 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1609 		struct nfs_commit_data *cdata)
1610 {
1611 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1612 		return;
1613 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1614 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1615 			0, task->tk_start);
1616 }
1617 
1618 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1619 		struct nfs_commit_data *cdata)
1620 {
1621 	struct nfs_page *req;
1622 	__u64 count = 0;
1623 
1624 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1625 		return;
1626 
1627 	if (task->tk_status == 0) {
1628 		list_for_each_entry(req, &cdata->pages, wb_list)
1629 			count += req->wb_bytes;
1630 	}
1631 	nfs4_ff_layout_stat_io_end_write(task,
1632 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1633 			count, count, NFS_FILE_SYNC);
1634 }
1635 
1636 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1637 		struct nfs_commit_data *cdata)
1638 {
1639 	ff_layout_commit_record_layoutstats_start(task, cdata);
1640 }
1641 
1642 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1643 {
1644 	ff_layout_commit_prepare_common(task, data);
1645 	rpc_call_start(task);
1646 }
1647 
1648 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1649 {
1650 	struct nfs_commit_data *wdata = data;
1651 
1652 	if (ff_layout_setup_sequence(wdata->ds_clp,
1653 				 &wdata->args.seq_args,
1654 				 &wdata->res.seq_res,
1655 				 task))
1656 		return;
1657 	ff_layout_commit_prepare_common(task, data);
1658 }
1659 
1660 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1661 {
1662 	pnfs_generic_write_commit_done(task, data);
1663 }
1664 
1665 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1666 {
1667 	struct nfs_commit_data *cdata = data;
1668 
1669 	ff_layout_commit_record_layoutstats_done(task, cdata);
1670 	rpc_count_iostats_metrics(task,
1671 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1672 }
1673 
1674 static void ff_layout_commit_release(void *data)
1675 {
1676 	struct nfs_commit_data *cdata = data;
1677 
1678 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1679 	pnfs_generic_commit_release(data);
1680 }
1681 
1682 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1683 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1684 	.rpc_call_done = ff_layout_read_call_done,
1685 	.rpc_count_stats = ff_layout_read_count_stats,
1686 	.rpc_release = ff_layout_read_release,
1687 };
1688 
1689 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1690 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1691 	.rpc_call_done = ff_layout_read_call_done,
1692 	.rpc_count_stats = ff_layout_read_count_stats,
1693 	.rpc_release = ff_layout_read_release,
1694 };
1695 
1696 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1697 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1698 	.rpc_call_done = ff_layout_write_call_done,
1699 	.rpc_count_stats = ff_layout_write_count_stats,
1700 	.rpc_release = ff_layout_write_release,
1701 };
1702 
1703 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1704 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1705 	.rpc_call_done = ff_layout_write_call_done,
1706 	.rpc_count_stats = ff_layout_write_count_stats,
1707 	.rpc_release = ff_layout_write_release,
1708 };
1709 
1710 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1711 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1712 	.rpc_call_done = ff_layout_commit_done,
1713 	.rpc_count_stats = ff_layout_commit_count_stats,
1714 	.rpc_release = ff_layout_commit_release,
1715 };
1716 
1717 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1718 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1719 	.rpc_call_done = ff_layout_commit_done,
1720 	.rpc_count_stats = ff_layout_commit_count_stats,
1721 	.rpc_release = ff_layout_commit_release,
1722 };
1723 
1724 static enum pnfs_try_status
1725 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1726 {
1727 	struct pnfs_layout_segment *lseg = hdr->lseg;
1728 	struct nfs4_pnfs_ds *ds;
1729 	struct rpc_clnt *ds_clnt;
1730 	struct rpc_cred *ds_cred;
1731 	loff_t offset = hdr->args.offset;
1732 	u32 idx = hdr->pgio_mirror_idx;
1733 	int vers;
1734 	struct nfs_fh *fh;
1735 
1736 	dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1737 		__func__, hdr->inode->i_ino,
1738 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1739 
1740 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1741 	if (!ds)
1742 		goto out_failed;
1743 
1744 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1745 						   hdr->inode);
1746 	if (IS_ERR(ds_clnt))
1747 		goto out_failed;
1748 
1749 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1750 	if (!ds_cred)
1751 		goto out_failed;
1752 
1753 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1754 
1755 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1756 		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1757 
1758 	atomic_inc(&ds->ds_clp->cl_count);
1759 	hdr->ds_clp = ds->ds_clp;
1760 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1761 	if (fh)
1762 		hdr->args.fh = fh;
1763 	/*
1764 	 * Note that if we ever decide to split across DSes,
1765 	 * then we may need to handle dense-like offsets.
1766 	 */
1767 	hdr->args.offset = offset;
1768 	hdr->mds_offset = offset;
1769 
1770 	/* Perform an asynchronous read to ds */
1771 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1772 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1773 				      &ff_layout_read_call_ops_v4,
1774 			  0, RPC_TASK_SOFTCONN);
1775 	put_rpccred(ds_cred);
1776 	return PNFS_ATTEMPTED;
1777 
1778 out_failed:
1779 	if (ff_layout_has_available_ds(lseg))
1780 		return PNFS_TRY_AGAIN;
1781 	return PNFS_NOT_ATTEMPTED;
1782 }
1783 
1784 /* Perform async writes. */
1785 static enum pnfs_try_status
1786 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1787 {
1788 	struct pnfs_layout_segment *lseg = hdr->lseg;
1789 	struct nfs4_pnfs_ds *ds;
1790 	struct rpc_clnt *ds_clnt;
1791 	struct rpc_cred *ds_cred;
1792 	loff_t offset = hdr->args.offset;
1793 	int vers;
1794 	struct nfs_fh *fh;
1795 	int idx = hdr->pgio_mirror_idx;
1796 
1797 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1798 	if (!ds)
1799 		return PNFS_NOT_ATTEMPTED;
1800 
1801 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1802 						   hdr->inode);
1803 	if (IS_ERR(ds_clnt))
1804 		return PNFS_NOT_ATTEMPTED;
1805 
1806 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1807 	if (!ds_cred)
1808 		return PNFS_NOT_ATTEMPTED;
1809 
1810 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1811 
1812 	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1813 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1814 		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1815 		vers);
1816 
1817 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1818 	atomic_inc(&ds->ds_clp->cl_count);
1819 	hdr->ds_clp = ds->ds_clp;
1820 	hdr->ds_commit_idx = idx;
1821 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1822 	if (fh)
1823 		hdr->args.fh = fh;
1824 
1825 	/*
1826 	 * Note that if we ever decide to split across DSes,
1827 	 * then we may need to handle dense-like offsets.
1828 	 */
1829 	hdr->args.offset = offset;
1830 
1831 	/* Perform an asynchronous write */
1832 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1833 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1834 				      &ff_layout_write_call_ops_v4,
1835 			  sync, RPC_TASK_SOFTCONN);
1836 	put_rpccred(ds_cred);
1837 	return PNFS_ATTEMPTED;
1838 }
1839 
1840 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1841 {
1842 	return i;
1843 }
1844 
1845 static struct nfs_fh *
1846 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1847 {
1848 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1849 
1850 	/* FIXME: Assume that there is only one NFS version available
1851 	 * for the DS.
1852 	 */
1853 	return &flseg->mirror_array[i]->fh_versions[0];
1854 }
1855 
1856 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1857 {
1858 	struct pnfs_layout_segment *lseg = data->lseg;
1859 	struct nfs4_pnfs_ds *ds;
1860 	struct rpc_clnt *ds_clnt;
1861 	struct rpc_cred *ds_cred;
1862 	u32 idx;
1863 	int vers, ret;
1864 	struct nfs_fh *fh;
1865 
1866 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1867 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1868 	if (!ds)
1869 		goto out_err;
1870 
1871 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1872 						   data->inode);
1873 	if (IS_ERR(ds_clnt))
1874 		goto out_err;
1875 
1876 	ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1877 	if (!ds_cred)
1878 		goto out_err;
1879 
1880 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1881 
1882 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1883 		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1884 		vers);
1885 	data->commit_done_cb = ff_layout_commit_done_cb;
1886 	data->cred = ds_cred;
1887 	atomic_inc(&ds->ds_clp->cl_count);
1888 	data->ds_clp = ds->ds_clp;
1889 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1890 	if (fh)
1891 		data->args.fh = fh;
1892 
1893 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1894 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1895 					       &ff_layout_commit_call_ops_v4,
1896 				   how, RPC_TASK_SOFTCONN);
1897 	put_rpccred(ds_cred);
1898 	return ret;
1899 out_err:
1900 	pnfs_generic_prepare_to_resend_writes(data);
1901 	pnfs_generic_commit_release(data);
1902 	return -EAGAIN;
1903 }
1904 
1905 static int
1906 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1907 			   int how, struct nfs_commit_info *cinfo)
1908 {
1909 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1910 					    ff_layout_initiate_commit);
1911 }
1912 
1913 static struct pnfs_ds_commit_info *
1914 ff_layout_get_ds_info(struct inode *inode)
1915 {
1916 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1917 
1918 	if (layout == NULL)
1919 		return NULL;
1920 
1921 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1922 }
1923 
1924 static void
1925 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1926 {
1927 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1928 						  id_node));
1929 }
1930 
1931 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1932 				  struct xdr_stream *xdr,
1933 				  const struct nfs4_layoutreturn_args *args)
1934 {
1935 	struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1936 	__be32 *start;
1937 	int count = 0, ret = 0;
1938 
1939 	start = xdr_reserve_space(xdr, 4);
1940 	if (unlikely(!start))
1941 		return -E2BIG;
1942 
1943 	/* This assume we always return _ALL_ layouts */
1944 	spin_lock(&hdr->plh_inode->i_lock);
1945 	ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1946 	spin_unlock(&hdr->plh_inode->i_lock);
1947 
1948 	*start = cpu_to_be32(count);
1949 
1950 	return ret;
1951 }
1952 
1953 /* report nothing for now */
1954 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1955 				     struct xdr_stream *xdr,
1956 				     const struct nfs4_layoutreturn_args *args)
1957 {
1958 	__be32 *p;
1959 
1960 	p = xdr_reserve_space(xdr, 4);
1961 	if (likely(p))
1962 		*p = cpu_to_be32(0);
1963 }
1964 
1965 static struct nfs4_deviceid_node *
1966 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1967 			      struct pnfs_device *pdev, gfp_t gfp_flags)
1968 {
1969 	struct nfs4_ff_layout_ds *dsaddr;
1970 
1971 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1972 	if (!dsaddr)
1973 		return NULL;
1974 	return &dsaddr->id_node;
1975 }
1976 
1977 static void
1978 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1979 			      struct xdr_stream *xdr,
1980 			      const struct nfs4_layoutreturn_args *args)
1981 {
1982 	struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1983 	__be32 *start;
1984 
1985 	dprintk("%s: Begin\n", __func__);
1986 	start = xdr_reserve_space(xdr, 4);
1987 	BUG_ON(!start);
1988 
1989 	ff_layout_encode_ioerr(flo, xdr, args);
1990 	ff_layout_encode_iostats(flo, xdr, args);
1991 
1992 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
1993 	dprintk("%s: Return\n", __func__);
1994 }
1995 
1996 static int
1997 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1998 {
1999 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2000 
2001 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2002 }
2003 
2004 static size_t
2005 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2006 			  const int buflen)
2007 {
2008 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2009 	const struct in6_addr *addr = &sin6->sin6_addr;
2010 
2011 	/*
2012 	 * RFC 4291, Section 2.2.2
2013 	 *
2014 	 * Shorthanded ANY address
2015 	 */
2016 	if (ipv6_addr_any(addr))
2017 		return snprintf(buf, buflen, "::");
2018 
2019 	/*
2020 	 * RFC 4291, Section 2.2.2
2021 	 *
2022 	 * Shorthanded loopback address
2023 	 */
2024 	if (ipv6_addr_loopback(addr))
2025 		return snprintf(buf, buflen, "::1");
2026 
2027 	/*
2028 	 * RFC 4291, Section 2.2.3
2029 	 *
2030 	 * Special presentation address format for mapped v4
2031 	 * addresses.
2032 	 */
2033 	if (ipv6_addr_v4mapped(addr))
2034 		return snprintf(buf, buflen, "::ffff:%pI4",
2035 					&addr->s6_addr32[3]);
2036 
2037 	/*
2038 	 * RFC 4291, Section 2.2.1
2039 	 */
2040 	return snprintf(buf, buflen, "%pI6c", addr);
2041 }
2042 
2043 /* Derived from rpc_sockaddr2uaddr */
2044 static void
2045 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2046 {
2047 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2048 	char portbuf[RPCBIND_MAXUADDRPLEN];
2049 	char addrbuf[RPCBIND_MAXUADDRLEN];
2050 	char *netid;
2051 	unsigned short port;
2052 	int len, netid_len;
2053 	__be32 *p;
2054 
2055 	switch (sap->sa_family) {
2056 	case AF_INET:
2057 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2058 			return;
2059 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2060 		netid = "tcp";
2061 		netid_len = 3;
2062 		break;
2063 	case AF_INET6:
2064 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2065 			return;
2066 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2067 		netid = "tcp6";
2068 		netid_len = 4;
2069 		break;
2070 	default:
2071 		/* we only support tcp and tcp6 */
2072 		WARN_ON_ONCE(1);
2073 		return;
2074 	}
2075 
2076 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2077 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2078 
2079 	p = xdr_reserve_space(xdr, 4 + netid_len);
2080 	xdr_encode_opaque(p, netid, netid_len);
2081 
2082 	p = xdr_reserve_space(xdr, 4 + len);
2083 	xdr_encode_opaque(p, addrbuf, len);
2084 }
2085 
2086 static void
2087 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2088 			 ktime_t t)
2089 {
2090 	struct timespec64 ts;
2091 	__be32 *p;
2092 
2093 	p = xdr_reserve_space(xdr, 12);
2094 	ts = ktime_to_timespec64(t);
2095 	p = xdr_encode_hyper(p, ts.tv_sec);
2096 	*p++ = cpu_to_be32(ts.tv_nsec);
2097 }
2098 
2099 static void
2100 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2101 			    struct nfs4_ff_io_stat *stat)
2102 {
2103 	__be32 *p;
2104 
2105 	p = xdr_reserve_space(xdr, 5 * 8);
2106 	p = xdr_encode_hyper(p, stat->ops_requested);
2107 	p = xdr_encode_hyper(p, stat->bytes_requested);
2108 	p = xdr_encode_hyper(p, stat->ops_completed);
2109 	p = xdr_encode_hyper(p, stat->bytes_completed);
2110 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2111 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2112 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2113 }
2114 
2115 static void
2116 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
2117 			     struct nfs42_layoutstat_args *args,
2118 			     struct nfs42_layoutstat_devinfo *devinfo)
2119 {
2120 	struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
2121 	struct nfs4_pnfs_ds_addr *da;
2122 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2123 	struct nfs_fh *fh = &mirror->fh_versions[0];
2124 	__be32 *p, *start;
2125 
2126 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2127 	dprintk("%s: DS %s: encoding address %s\n",
2128 		__func__, ds->ds_remotestr, da->da_remotestr);
2129 	/* layoutupdate length */
2130 	start = xdr_reserve_space(xdr, 4);
2131 	/* netaddr4 */
2132 	ff_layout_encode_netaddr(xdr, da);
2133 	/* nfs_fh4 */
2134 	p = xdr_reserve_space(xdr, 4 + fh->size);
2135 	xdr_encode_opaque(p, fh->data, fh->size);
2136 	/* ff_io_latency4 read */
2137 	spin_lock(&mirror->lock);
2138 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2139 	/* ff_io_latency4 write */
2140 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2141 	spin_unlock(&mirror->lock);
2142 	/* nfstime4 */
2143 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2144 	/* bool */
2145 	p = xdr_reserve_space(xdr, 4);
2146 	*p = cpu_to_be32(false);
2147 
2148 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2149 }
2150 
2151 static int
2152 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
2153 			       struct pnfs_layout_hdr *lo,
2154 			       int dev_limit)
2155 {
2156 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2157 	struct nfs4_ff_layout_mirror *mirror;
2158 	struct nfs4_deviceid_node *dev;
2159 	struct nfs42_layoutstat_devinfo *devinfo;
2160 	int i = 0;
2161 
2162 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2163 		if (i >= dev_limit)
2164 			break;
2165 		if (!mirror->mirror_ds)
2166 			continue;
2167 		/* mirror refcount put in cleanup_layoutstats */
2168 		if (!atomic_inc_not_zero(&mirror->ref))
2169 			continue;
2170 		dev = &mirror->mirror_ds->id_node;
2171 		devinfo = &args->devinfo[i];
2172 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2173 		devinfo->offset = 0;
2174 		devinfo->length = NFS4_MAX_UINT64;
2175 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2176 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2177 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2178 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2179 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2180 		devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
2181 		devinfo->layout_private = mirror;
2182 
2183 		i++;
2184 	}
2185 	return i;
2186 }
2187 
2188 static int
2189 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2190 {
2191 	struct nfs4_flexfile_layout *ff_layout;
2192 	struct nfs4_ff_layout_mirror *mirror;
2193 	int dev_count = 0;
2194 
2195 	spin_lock(&args->inode->i_lock);
2196 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2197 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2198 		if (atomic_read(&mirror->ref) != 0)
2199 			dev_count ++;
2200 	}
2201 	spin_unlock(&args->inode->i_lock);
2202 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2203 	if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2204 		dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2205 			__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2206 		dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2207 	}
2208 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2209 	if (!args->devinfo)
2210 		return -ENOMEM;
2211 
2212 	spin_lock(&args->inode->i_lock);
2213 	args->num_dev = ff_layout_mirror_prepare_stats(args,
2214 			&ff_layout->generic_hdr, dev_count);
2215 	spin_unlock(&args->inode->i_lock);
2216 
2217 	return 0;
2218 }
2219 
2220 static void
2221 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2222 {
2223 	struct nfs4_ff_layout_mirror *mirror;
2224 	int i;
2225 
2226 	for (i = 0; i < data->args.num_dev; i++) {
2227 		mirror = data->args.devinfo[i].layout_private;
2228 		data->args.devinfo[i].layout_private = NULL;
2229 		ff_layout_put_mirror(mirror);
2230 	}
2231 }
2232 
2233 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2234 	.id			= LAYOUT_FLEX_FILES,
2235 	.name			= "LAYOUT_FLEX_FILES",
2236 	.owner			= THIS_MODULE,
2237 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2238 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2239 	.alloc_lseg		= ff_layout_alloc_lseg,
2240 	.free_lseg		= ff_layout_free_lseg,
2241 	.add_lseg		= ff_layout_add_lseg,
2242 	.pg_read_ops		= &ff_layout_pg_read_ops,
2243 	.pg_write_ops		= &ff_layout_pg_write_ops,
2244 	.get_ds_info		= ff_layout_get_ds_info,
2245 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2246 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2247 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2248 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2249 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2250 	.commit_pagelist	= ff_layout_commit_pagelist,
2251 	.read_pagelist		= ff_layout_read_pagelist,
2252 	.write_pagelist		= ff_layout_write_pagelist,
2253 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2254 	.encode_layoutreturn    = ff_layout_encode_layoutreturn,
2255 	.sync			= pnfs_nfs_generic_sync,
2256 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2257 	.cleanup_layoutstats	= ff_layout_cleanup_layoutstats,
2258 };
2259 
2260 static int __init nfs4flexfilelayout_init(void)
2261 {
2262 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2263 	       __func__);
2264 	if (!ff_zero_group) {
2265 		ff_zero_group = groups_alloc(0);
2266 		if (!ff_zero_group)
2267 			return -ENOMEM;
2268 	}
2269 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2270 }
2271 
2272 static void __exit nfs4flexfilelayout_exit(void)
2273 {
2274 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2275 	       __func__);
2276 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2277 	if (ff_zero_group) {
2278 		put_group_info(ff_zero_group);
2279 		ff_zero_group = NULL;
2280 	}
2281 }
2282 
2283 MODULE_ALIAS("nfs-layouttype4-4");
2284 
2285 MODULE_LICENSE("GPL");
2286 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2287 
2288 module_init(nfs4flexfilelayout_init);
2289 module_exit(nfs4flexfilelayout_exit);
2290