1 /*
2  * Module for pnfs flexfile layout driver.
3  *
4  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5  *
6  * Tao Peng <bergwolf@primarydata.com>
7  */
8 
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 
13 #include <linux/sunrpc/metrics.h>
14 
15 #include "flexfilelayout.h"
16 #include "../nfs4session.h"
17 #include "../nfs4idmap.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
22 #include "../nfs.h"
23 #include "../nfs42.h"
24 
25 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
26 
27 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
28 
29 static struct pnfs_layout_hdr *
30 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
31 {
32 	struct nfs4_flexfile_layout *ffl;
33 
34 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 	if (ffl) {
36 		INIT_LIST_HEAD(&ffl->error_list);
37 		INIT_LIST_HEAD(&ffl->mirrors);
38 		return &ffl->generic_hdr;
39 	} else
40 		return NULL;
41 }
42 
43 static void
44 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
45 {
46 	struct nfs4_ff_layout_ds_err *err, *n;
47 
48 	list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
49 				 list) {
50 		list_del(&err->list);
51 		kfree(err);
52 	}
53 	kfree(FF_LAYOUT_FROM_HDR(lo));
54 }
55 
56 static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
57 {
58 	__be32 *p;
59 
60 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
61 	if (unlikely(p == NULL))
62 		return -ENOBUFS;
63 	memcpy(stateid, p, NFS4_STATEID_SIZE);
64 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
65 		p[0], p[1], p[2], p[3]);
66 	return 0;
67 }
68 
69 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
70 {
71 	__be32 *p;
72 
73 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
74 	if (unlikely(!p))
75 		return -ENOBUFS;
76 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
77 	nfs4_print_deviceid(devid);
78 	return 0;
79 }
80 
81 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
82 {
83 	__be32 *p;
84 
85 	p = xdr_inline_decode(xdr, 4);
86 	if (unlikely(!p))
87 		return -ENOBUFS;
88 	fh->size = be32_to_cpup(p++);
89 	if (fh->size > sizeof(struct nfs_fh)) {
90 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
91 		       fh->size);
92 		return -EOVERFLOW;
93 	}
94 	/* fh.data */
95 	p = xdr_inline_decode(xdr, fh->size);
96 	if (unlikely(!p))
97 		return -ENOBUFS;
98 	memcpy(&fh->data, p, fh->size);
99 	dprintk("%s: fh len %d\n", __func__, fh->size);
100 
101 	return 0;
102 }
103 
104 /*
105  * Currently only stringified uids and gids are accepted.
106  * I.e., kerberos is not supported to the DSes, so no pricipals.
107  *
108  * That means that one common function will suffice, but when
109  * principals are added, this should be split to accomodate
110  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
111  */
112 static int
113 decode_name(struct xdr_stream *xdr, u32 *id)
114 {
115 	__be32 *p;
116 	int len;
117 
118 	/* opaque_length(4)*/
119 	p = xdr_inline_decode(xdr, 4);
120 	if (unlikely(!p))
121 		return -ENOBUFS;
122 	len = be32_to_cpup(p++);
123 	if (len < 0)
124 		return -EINVAL;
125 
126 	dprintk("%s: len %u\n", __func__, len);
127 
128 	/* opaque body */
129 	p = xdr_inline_decode(xdr, len);
130 	if (unlikely(!p))
131 		return -ENOBUFS;
132 
133 	if (!nfs_map_string_to_numeric((char *)p, len, id))
134 		return -EINVAL;
135 
136 	return 0;
137 }
138 
139 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
140 		const struct nfs4_ff_layout_mirror *m2)
141 {
142 	int i, j;
143 
144 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
145 		return false;
146 	for (i = 0; i < m1->fh_versions_cnt; i++) {
147 		bool found_fh = false;
148 		for (j = 0; j < m2->fh_versions_cnt; i++) {
149 			if (nfs_compare_fh(&m1->fh_versions[i],
150 					&m2->fh_versions[j]) == 0) {
151 				found_fh = true;
152 				break;
153 			}
154 		}
155 		if (!found_fh)
156 			return false;
157 	}
158 	return true;
159 }
160 
161 static struct nfs4_ff_layout_mirror *
162 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
163 		struct nfs4_ff_layout_mirror *mirror)
164 {
165 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
166 	struct nfs4_ff_layout_mirror *pos;
167 	struct inode *inode = lo->plh_inode;
168 
169 	spin_lock(&inode->i_lock);
170 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
171 		if (mirror->mirror_ds != pos->mirror_ds)
172 			continue;
173 		if (!ff_mirror_match_fh(mirror, pos))
174 			continue;
175 		if (atomic_inc_not_zero(&pos->ref)) {
176 			spin_unlock(&inode->i_lock);
177 			return pos;
178 		}
179 	}
180 	list_add(&mirror->mirrors, &ff_layout->mirrors);
181 	mirror->layout = lo;
182 	spin_unlock(&inode->i_lock);
183 	return mirror;
184 }
185 
186 static void
187 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
188 {
189 	struct inode *inode;
190 	if (mirror->layout == NULL)
191 		return;
192 	inode = mirror->layout->plh_inode;
193 	spin_lock(&inode->i_lock);
194 	list_del(&mirror->mirrors);
195 	spin_unlock(&inode->i_lock);
196 	mirror->layout = NULL;
197 }
198 
199 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
200 {
201 	struct nfs4_ff_layout_mirror *mirror;
202 
203 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
204 	if (mirror != NULL) {
205 		spin_lock_init(&mirror->lock);
206 		atomic_set(&mirror->ref, 1);
207 		INIT_LIST_HEAD(&mirror->mirrors);
208 	}
209 	return mirror;
210 }
211 
212 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
213 {
214 	ff_layout_remove_mirror(mirror);
215 	kfree(mirror->fh_versions);
216 	if (mirror->cred)
217 		put_rpccred(mirror->cred);
218 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
219 	kfree(mirror);
220 }
221 
222 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
223 {
224 	if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
225 		ff_layout_free_mirror(mirror);
226 }
227 
228 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
229 {
230 	int i;
231 
232 	if (fls->mirror_array) {
233 		for (i = 0; i < fls->mirror_array_cnt; i++) {
234 			/* normally mirror_ds is freed in
235 			 * .free_deviceid_node but we still do it here
236 			 * for .alloc_lseg error path */
237 			ff_layout_put_mirror(fls->mirror_array[i]);
238 		}
239 		kfree(fls->mirror_array);
240 		fls->mirror_array = NULL;
241 	}
242 }
243 
244 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
245 {
246 	int ret = 0;
247 
248 	dprintk("--> %s\n", __func__);
249 
250 	/* FIXME: remove this check when layout segment support is added */
251 	if (lgr->range.offset != 0 ||
252 	    lgr->range.length != NFS4_MAX_UINT64) {
253 		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
254 			__func__);
255 		ret = -EINVAL;
256 	}
257 
258 	dprintk("--> %s returns %d\n", __func__, ret);
259 	return ret;
260 }
261 
262 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
263 {
264 	if (fls) {
265 		ff_layout_free_mirror_array(fls);
266 		kfree(fls);
267 	}
268 }
269 
270 static bool
271 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
272 		const struct pnfs_layout_range *l2)
273 {
274 	u64 end1, end2;
275 
276 	if (l1->iomode != l2->iomode)
277 		return l1->iomode != IOMODE_READ;
278 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
279 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
280 	if (end1 < l2->offset)
281 		return false;
282 	if (end2 < l1->offset)
283 		return true;
284 	return l2->offset <= l1->offset;
285 }
286 
287 static bool
288 ff_lseg_merge(struct pnfs_layout_segment *new,
289 		struct pnfs_layout_segment *old)
290 {
291 	u64 new_end, old_end;
292 
293 	if (new->pls_range.iomode != old->pls_range.iomode)
294 		return false;
295 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
296 			old->pls_range.length);
297 	if (old_end < new->pls_range.offset)
298 		return false;
299 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
300 			new->pls_range.length);
301 	if (new_end < old->pls_range.offset)
302 		return false;
303 
304 	/* Mergeable: copy info from 'old' to 'new' */
305 	if (new_end < old_end)
306 		new_end = old_end;
307 	if (new->pls_range.offset < old->pls_range.offset)
308 		new->pls_range.offset = old->pls_range.offset;
309 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
310 			new_end);
311 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
312 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
313 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
314 		set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
315 	return true;
316 }
317 
318 static void
319 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
320 		struct pnfs_layout_segment *lseg,
321 		struct list_head *free_me)
322 {
323 	pnfs_generic_layout_insert_lseg(lo, lseg,
324 			ff_lseg_range_is_after,
325 			ff_lseg_merge,
326 			free_me);
327 }
328 
329 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
330 {
331 	int i, j;
332 
333 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
334 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
335 			if (fls->mirror_array[i]->efficiency <
336 			    fls->mirror_array[j]->efficiency)
337 				swap(fls->mirror_array[i],
338 				     fls->mirror_array[j]);
339 	}
340 }
341 
342 static struct pnfs_layout_segment *
343 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
344 		     struct nfs4_layoutget_res *lgr,
345 		     gfp_t gfp_flags)
346 {
347 	struct pnfs_layout_segment *ret;
348 	struct nfs4_ff_layout_segment *fls = NULL;
349 	struct xdr_stream stream;
350 	struct xdr_buf buf;
351 	struct page *scratch;
352 	u64 stripe_unit;
353 	u32 mirror_array_cnt;
354 	__be32 *p;
355 	int i, rc;
356 
357 	dprintk("--> %s\n", __func__);
358 	scratch = alloc_page(gfp_flags);
359 	if (!scratch)
360 		return ERR_PTR(-ENOMEM);
361 
362 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
363 			      lgr->layoutp->len);
364 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
365 
366 	/* stripe unit and mirror_array_cnt */
367 	rc = -EIO;
368 	p = xdr_inline_decode(&stream, 8 + 4);
369 	if (!p)
370 		goto out_err_free;
371 
372 	p = xdr_decode_hyper(p, &stripe_unit);
373 	mirror_array_cnt = be32_to_cpup(p++);
374 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
375 		stripe_unit, mirror_array_cnt);
376 
377 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
378 	    mirror_array_cnt == 0)
379 		goto out_err_free;
380 
381 	rc = -ENOMEM;
382 	fls = kzalloc(sizeof(*fls), gfp_flags);
383 	if (!fls)
384 		goto out_err_free;
385 
386 	fls->mirror_array_cnt = mirror_array_cnt;
387 	fls->stripe_unit = stripe_unit;
388 	fls->mirror_array = kcalloc(fls->mirror_array_cnt,
389 				    sizeof(fls->mirror_array[0]), gfp_flags);
390 	if (fls->mirror_array == NULL)
391 		goto out_err_free;
392 
393 	for (i = 0; i < fls->mirror_array_cnt; i++) {
394 		struct nfs4_ff_layout_mirror *mirror;
395 		struct nfs4_deviceid devid;
396 		struct nfs4_deviceid_node *idnode;
397 		u32 ds_count;
398 		u32 fh_count;
399 		int j;
400 
401 		rc = -EIO;
402 		p = xdr_inline_decode(&stream, 4);
403 		if (!p)
404 			goto out_err_free;
405 		ds_count = be32_to_cpup(p);
406 
407 		/* FIXME: allow for striping? */
408 		if (ds_count != 1)
409 			goto out_err_free;
410 
411 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
412 		if (fls->mirror_array[i] == NULL) {
413 			rc = -ENOMEM;
414 			goto out_err_free;
415 		}
416 
417 		fls->mirror_array[i]->ds_count = ds_count;
418 
419 		/* deviceid */
420 		rc = decode_deviceid(&stream, &devid);
421 		if (rc)
422 			goto out_err_free;
423 
424 		idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
425 						&devid, lh->plh_lc_cred,
426 						gfp_flags);
427 		/*
428 		 * upon success, mirror_ds is allocated by previous
429 		 * getdeviceinfo, or newly by .alloc_deviceid_node
430 		 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
431 		 */
432 		if (idnode)
433 			fls->mirror_array[i]->mirror_ds =
434 				FF_LAYOUT_MIRROR_DS(idnode);
435 		else
436 			goto out_err_free;
437 
438 		/* efficiency */
439 		rc = -EIO;
440 		p = xdr_inline_decode(&stream, 4);
441 		if (!p)
442 			goto out_err_free;
443 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
444 
445 		/* stateid */
446 		rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
447 		if (rc)
448 			goto out_err_free;
449 
450 		/* fh */
451 		p = xdr_inline_decode(&stream, 4);
452 		if (!p)
453 			goto out_err_free;
454 		fh_count = be32_to_cpup(p);
455 
456 		fls->mirror_array[i]->fh_versions =
457 			kzalloc(fh_count * sizeof(struct nfs_fh),
458 				gfp_flags);
459 		if (fls->mirror_array[i]->fh_versions == NULL) {
460 			rc = -ENOMEM;
461 			goto out_err_free;
462 		}
463 
464 		for (j = 0; j < fh_count; j++) {
465 			rc = decode_nfs_fh(&stream,
466 					   &fls->mirror_array[i]->fh_versions[j]);
467 			if (rc)
468 				goto out_err_free;
469 		}
470 
471 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
472 
473 		/* user */
474 		rc = decode_name(&stream, &fls->mirror_array[i]->uid);
475 		if (rc)
476 			goto out_err_free;
477 
478 		/* group */
479 		rc = decode_name(&stream, &fls->mirror_array[i]->gid);
480 		if (rc)
481 			goto out_err_free;
482 
483 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
484 		if (mirror != fls->mirror_array[i]) {
485 			ff_layout_free_mirror(fls->mirror_array[i]);
486 			fls->mirror_array[i] = mirror;
487 		}
488 
489 		dprintk("%s: uid %d gid %d\n", __func__,
490 			fls->mirror_array[i]->uid,
491 			fls->mirror_array[i]->gid);
492 	}
493 
494 	p = xdr_inline_decode(&stream, 4);
495 	if (p)
496 		fls->flags = be32_to_cpup(p);
497 
498 	ff_layout_sort_mirrors(fls);
499 	rc = ff_layout_check_layout(lgr);
500 	if (rc)
501 		goto out_err_free;
502 
503 	ret = &fls->generic_hdr;
504 	dprintk("<-- %s (success)\n", __func__);
505 out_free_page:
506 	__free_page(scratch);
507 	return ret;
508 out_err_free:
509 	_ff_layout_free_lseg(fls);
510 	ret = ERR_PTR(rc);
511 	dprintk("<-- %s (%d)\n", __func__, rc);
512 	goto out_free_page;
513 }
514 
515 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
516 {
517 	struct pnfs_layout_segment *lseg;
518 
519 	list_for_each_entry(lseg, &layout->plh_segs, pls_list)
520 		if (lseg->pls_range.iomode == IOMODE_RW)
521 			return true;
522 
523 	return false;
524 }
525 
526 static void
527 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
528 {
529 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
530 
531 	dprintk("--> %s\n", __func__);
532 
533 	if (lseg->pls_range.iomode == IOMODE_RW) {
534 		struct nfs4_flexfile_layout *ffl;
535 		struct inode *inode;
536 
537 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
538 		inode = ffl->generic_hdr.plh_inode;
539 		spin_lock(&inode->i_lock);
540 		if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
541 			ffl->commit_info.nbuckets = 0;
542 			kfree(ffl->commit_info.buckets);
543 			ffl->commit_info.buckets = NULL;
544 		}
545 		spin_unlock(&inode->i_lock);
546 	}
547 	_ff_layout_free_lseg(fls);
548 }
549 
550 /* Return 1 until we have multiple lsegs support */
551 static int
552 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
553 {
554 	return 1;
555 }
556 
557 static void
558 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
559 {
560 	/* first IO request? */
561 	if (atomic_inc_return(&timer->n_ops) == 1) {
562 		timer->start_time = now;
563 	}
564 }
565 
566 static ktime_t
567 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
568 {
569 	ktime_t start;
570 
571 	if (atomic_dec_return(&timer->n_ops) < 0)
572 		WARN_ON_ONCE(1);
573 
574 	start = timer->start_time;
575 	timer->start_time = now;
576 	return ktime_sub(now, start);
577 }
578 
579 static bool
580 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
581 			    struct nfs4_ff_layoutstat *layoutstat,
582 			    ktime_t now)
583 {
584 	static const ktime_t notime = {0};
585 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
586 
587 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
588 	if (ktime_equal(mirror->start_time, notime))
589 		mirror->start_time = now;
590 	if (ktime_equal(mirror->last_report_time, notime))
591 		mirror->last_report_time = now;
592 	if (layoutstats_timer != 0)
593 		report_interval = (s64)layoutstats_timer * 1000LL;
594 	if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
595 			report_interval) {
596 		mirror->last_report_time = now;
597 		return true;
598 	}
599 
600 	return false;
601 }
602 
603 static void
604 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
605 		__u64 requested)
606 {
607 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
608 
609 	iostat->ops_requested++;
610 	iostat->bytes_requested += requested;
611 }
612 
613 static void
614 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
615 		__u64 requested,
616 		__u64 completed,
617 		ktime_t time_completed,
618 		ktime_t time_started)
619 {
620 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
621 	ktime_t completion_time = ktime_sub(time_completed, time_started);
622 	ktime_t timer;
623 
624 	iostat->ops_completed++;
625 	iostat->bytes_completed += completed;
626 	iostat->bytes_not_delivered += requested - completed;
627 
628 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
629 	iostat->total_busy_time =
630 			ktime_add(iostat->total_busy_time, timer);
631 	iostat->aggregate_completion_time =
632 			ktime_add(iostat->aggregate_completion_time,
633 					completion_time);
634 }
635 
636 static void
637 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
638 		struct nfs4_ff_layout_mirror *mirror,
639 		__u64 requested, ktime_t now)
640 {
641 	bool report;
642 
643 	spin_lock(&mirror->lock);
644 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
645 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
646 	spin_unlock(&mirror->lock);
647 
648 	if (report)
649 		pnfs_report_layoutstat(inode, GFP_KERNEL);
650 }
651 
652 static void
653 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
654 		struct nfs4_ff_layout_mirror *mirror,
655 		__u64 requested,
656 		__u64 completed)
657 {
658 	spin_lock(&mirror->lock);
659 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
660 			requested, completed,
661 			ktime_get(), task->tk_start);
662 	spin_unlock(&mirror->lock);
663 }
664 
665 static void
666 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
667 		struct nfs4_ff_layout_mirror *mirror,
668 		__u64 requested, ktime_t now)
669 {
670 	bool report;
671 
672 	spin_lock(&mirror->lock);
673 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
674 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
675 	spin_unlock(&mirror->lock);
676 
677 	if (report)
678 		pnfs_report_layoutstat(inode, GFP_NOIO);
679 }
680 
681 static void
682 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
683 		struct nfs4_ff_layout_mirror *mirror,
684 		__u64 requested,
685 		__u64 completed,
686 		enum nfs3_stable_how committed)
687 {
688 	if (committed == NFS_UNSTABLE)
689 		requested = completed = 0;
690 
691 	spin_lock(&mirror->lock);
692 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
693 			requested, completed, ktime_get(), task->tk_start);
694 	spin_unlock(&mirror->lock);
695 }
696 
697 static int
698 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
699 			    struct nfs_commit_info *cinfo,
700 			    gfp_t gfp_flags)
701 {
702 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
703 	struct pnfs_commit_bucket *buckets;
704 	int size;
705 
706 	if (cinfo->ds->nbuckets != 0) {
707 		/* This assumes there is only one RW lseg per file.
708 		 * To support multiple lseg per file, we need to
709 		 * change struct pnfs_commit_bucket to allow dynamic
710 		 * increasing nbuckets.
711 		 */
712 		return 0;
713 	}
714 
715 	size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
716 
717 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
718 			  gfp_flags);
719 	if (!buckets)
720 		return -ENOMEM;
721 	else {
722 		int i;
723 
724 		spin_lock(cinfo->lock);
725 		if (cinfo->ds->nbuckets != 0)
726 			kfree(buckets);
727 		else {
728 			cinfo->ds->buckets = buckets;
729 			cinfo->ds->nbuckets = size;
730 			for (i = 0; i < size; i++) {
731 				INIT_LIST_HEAD(&buckets[i].written);
732 				INIT_LIST_HEAD(&buckets[i].committing);
733 				/* mark direct verifier as unset */
734 				buckets[i].direct_verf.committed =
735 					NFS_INVALID_STABLE_HOW;
736 			}
737 		}
738 		spin_unlock(cinfo->lock);
739 		return 0;
740 	}
741 }
742 
743 static struct nfs4_pnfs_ds *
744 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
745 				  int *best_idx)
746 {
747 	struct nfs4_ff_layout_segment *fls;
748 	struct nfs4_pnfs_ds *ds;
749 	int idx;
750 
751 	fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
752 	/* mirrors are sorted by efficiency */
753 	for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
754 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
755 		if (ds) {
756 			*best_idx = idx;
757 			return ds;
758 		}
759 	}
760 
761 	return NULL;
762 }
763 
764 static void
765 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
766 			struct nfs_page *req)
767 {
768 	struct nfs_pgio_mirror *pgm;
769 	struct nfs4_ff_layout_mirror *mirror;
770 	struct nfs4_pnfs_ds *ds;
771 	int ds_idx;
772 
773 	/* Use full layout for now */
774 	if (!pgio->pg_lseg)
775 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
776 						   req->wb_context,
777 						   0,
778 						   NFS4_MAX_UINT64,
779 						   IOMODE_READ,
780 						   GFP_KERNEL);
781 	/* If no lseg, fall back to read through mds */
782 	if (pgio->pg_lseg == NULL)
783 		goto out_mds;
784 
785 	ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
786 	if (!ds)
787 		goto out_mds;
788 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
789 
790 	pgio->pg_mirror_idx = ds_idx;
791 
792 	/* read always uses only one mirror - idx 0 for pgio layer */
793 	pgm = &pgio->pg_mirrors[0];
794 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
795 
796 	return;
797 out_mds:
798 	pnfs_put_lseg(pgio->pg_lseg);
799 	pgio->pg_lseg = NULL;
800 	nfs_pageio_reset_read_mds(pgio);
801 }
802 
803 static void
804 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
805 			struct nfs_page *req)
806 {
807 	struct nfs4_ff_layout_mirror *mirror;
808 	struct nfs_pgio_mirror *pgm;
809 	struct nfs_commit_info cinfo;
810 	struct nfs4_pnfs_ds *ds;
811 	int i;
812 	int status;
813 
814 	if (!pgio->pg_lseg)
815 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
816 						   req->wb_context,
817 						   0,
818 						   NFS4_MAX_UINT64,
819 						   IOMODE_RW,
820 						   GFP_NOFS);
821 	/* If no lseg, fall back to write through mds */
822 	if (pgio->pg_lseg == NULL)
823 		goto out_mds;
824 
825 	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
826 	status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
827 	if (status < 0)
828 		goto out_mds;
829 
830 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
831 	if (WARN_ON_ONCE(pgio->pg_mirror_count !=
832 	    FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
833 		goto out_mds;
834 
835 	for (i = 0; i < pgio->pg_mirror_count; i++) {
836 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
837 		if (!ds)
838 			goto out_mds;
839 		pgm = &pgio->pg_mirrors[i];
840 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
841 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
842 	}
843 
844 	return;
845 
846 out_mds:
847 	pnfs_put_lseg(pgio->pg_lseg);
848 	pgio->pg_lseg = NULL;
849 	nfs_pageio_reset_write_mds(pgio);
850 }
851 
852 static unsigned int
853 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
854 				    struct nfs_page *req)
855 {
856 	if (!pgio->pg_lseg)
857 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
858 						   req->wb_context,
859 						   0,
860 						   NFS4_MAX_UINT64,
861 						   IOMODE_RW,
862 						   GFP_NOFS);
863 	if (pgio->pg_lseg)
864 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
865 
866 	/* no lseg means that pnfs is not in use, so no mirroring here */
867 	nfs_pageio_reset_write_mds(pgio);
868 	return 1;
869 }
870 
871 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
872 	.pg_init = ff_layout_pg_init_read,
873 	.pg_test = pnfs_generic_pg_test,
874 	.pg_doio = pnfs_generic_pg_readpages,
875 	.pg_cleanup = pnfs_generic_pg_cleanup,
876 };
877 
878 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
879 	.pg_init = ff_layout_pg_init_write,
880 	.pg_test = pnfs_generic_pg_test,
881 	.pg_doio = pnfs_generic_pg_writepages,
882 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
883 	.pg_cleanup = pnfs_generic_pg_cleanup,
884 };
885 
886 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
887 {
888 	struct rpc_task *task = &hdr->task;
889 
890 	pnfs_layoutcommit_inode(hdr->inode, false);
891 
892 	if (retry_pnfs) {
893 		dprintk("%s Reset task %5u for i/o through pNFS "
894 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
895 			hdr->task.tk_pid,
896 			hdr->inode->i_sb->s_id,
897 			(unsigned long long)NFS_FILEID(hdr->inode),
898 			hdr->args.count,
899 			(unsigned long long)hdr->args.offset);
900 
901 		if (!hdr->dreq) {
902 			struct nfs_open_context *ctx;
903 
904 			ctx = nfs_list_entry(hdr->pages.next)->wb_context;
905 			set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
906 			hdr->completion_ops->error_cleanup(&hdr->pages);
907 		} else {
908 			nfs_direct_set_resched_writes(hdr->dreq);
909 			/* fake unstable write to let common nfs resend pages */
910 			hdr->verf.committed = NFS_UNSTABLE;
911 			hdr->good_bytes = hdr->args.count;
912 		}
913 		return;
914 	}
915 
916 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
917 		dprintk("%s Reset task %5u for i/o through MDS "
918 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
919 			hdr->task.tk_pid,
920 			hdr->inode->i_sb->s_id,
921 			(unsigned long long)NFS_FILEID(hdr->inode),
922 			hdr->args.count,
923 			(unsigned long long)hdr->args.offset);
924 
925 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
926 	}
927 }
928 
929 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
930 {
931 	struct rpc_task *task = &hdr->task;
932 
933 	pnfs_layoutcommit_inode(hdr->inode, false);
934 
935 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
936 		dprintk("%s Reset task %5u for i/o through MDS "
937 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
938 			hdr->task.tk_pid,
939 			hdr->inode->i_sb->s_id,
940 			(unsigned long long)NFS_FILEID(hdr->inode),
941 			hdr->args.count,
942 			(unsigned long long)hdr->args.offset);
943 
944 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
945 	}
946 }
947 
948 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
949 					   struct nfs4_state *state,
950 					   struct nfs_client *clp,
951 					   struct pnfs_layout_segment *lseg,
952 					   int idx)
953 {
954 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
955 	struct inode *inode = lo->plh_inode;
956 	struct nfs_server *mds_server = NFS_SERVER(inode);
957 
958 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
959 	struct nfs_client *mds_client = mds_server->nfs_client;
960 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
961 
962 	if (task->tk_status >= 0)
963 		return 0;
964 
965 	switch (task->tk_status) {
966 	/* MDS state errors */
967 	case -NFS4ERR_DELEG_REVOKED:
968 	case -NFS4ERR_ADMIN_REVOKED:
969 	case -NFS4ERR_BAD_STATEID:
970 		if (state == NULL)
971 			break;
972 		nfs_remove_bad_delegation(state->inode);
973 	case -NFS4ERR_OPENMODE:
974 		if (state == NULL)
975 			break;
976 		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
977 			goto out_bad_stateid;
978 		goto wait_on_recovery;
979 	case -NFS4ERR_EXPIRED:
980 		if (state != NULL) {
981 			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
982 				goto out_bad_stateid;
983 		}
984 		nfs4_schedule_lease_recovery(mds_client);
985 		goto wait_on_recovery;
986 	/* DS session errors */
987 	case -NFS4ERR_BADSESSION:
988 	case -NFS4ERR_BADSLOT:
989 	case -NFS4ERR_BAD_HIGH_SLOT:
990 	case -NFS4ERR_DEADSESSION:
991 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
992 	case -NFS4ERR_SEQ_FALSE_RETRY:
993 	case -NFS4ERR_SEQ_MISORDERED:
994 		dprintk("%s ERROR %d, Reset session. Exchangeid "
995 			"flags 0x%x\n", __func__, task->tk_status,
996 			clp->cl_exchange_flags);
997 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
998 		break;
999 	case -NFS4ERR_DELAY:
1000 	case -NFS4ERR_GRACE:
1001 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1002 		break;
1003 	case -NFS4ERR_RETRY_UNCACHED_REP:
1004 		break;
1005 	/* Invalidate Layout errors */
1006 	case -NFS4ERR_PNFS_NO_LAYOUT:
1007 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1008 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1009 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1010 	case -NFS4ERR_FHEXPIRED:
1011 	case -NFS4ERR_WRONG_TYPE:
1012 		dprintk("%s Invalid layout error %d\n", __func__,
1013 			task->tk_status);
1014 		/*
1015 		 * Destroy layout so new i/o will get a new layout.
1016 		 * Layout will not be destroyed until all current lseg
1017 		 * references are put. Mark layout as invalid to resend failed
1018 		 * i/o and all i/o waiting on the slot table to the MDS until
1019 		 * layout is destroyed and a new valid layout is obtained.
1020 		 */
1021 		pnfs_destroy_layout(NFS_I(inode));
1022 		rpc_wake_up(&tbl->slot_tbl_waitq);
1023 		goto reset;
1024 	/* RPC connection errors */
1025 	case -ECONNREFUSED:
1026 	case -EHOSTDOWN:
1027 	case -EHOSTUNREACH:
1028 	case -ENETUNREACH:
1029 	case -EIO:
1030 	case -ETIMEDOUT:
1031 	case -EPIPE:
1032 		dprintk("%s DS connection error %d\n", __func__,
1033 			task->tk_status);
1034 		nfs4_mark_deviceid_unavailable(devid);
1035 		rpc_wake_up(&tbl->slot_tbl_waitq);
1036 		/* fall through */
1037 	default:
1038 		if (ff_layout_has_available_ds(lseg))
1039 			return -NFS4ERR_RESET_TO_PNFS;
1040 reset:
1041 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1042 			task->tk_status);
1043 		return -NFS4ERR_RESET_TO_MDS;
1044 	}
1045 out:
1046 	task->tk_status = 0;
1047 	return -EAGAIN;
1048 out_bad_stateid:
1049 	task->tk_status = -EIO;
1050 	return 0;
1051 wait_on_recovery:
1052 	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
1053 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
1054 		rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
1055 	goto out;
1056 }
1057 
1058 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1059 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1060 					   struct pnfs_layout_segment *lseg,
1061 					   int idx)
1062 {
1063 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1064 
1065 	if (task->tk_status >= 0)
1066 		return 0;
1067 
1068 	switch (task->tk_status) {
1069 	/* File access problems. Don't mark the device as unavailable */
1070 	case -EACCES:
1071 	case -ESTALE:
1072 	case -EISDIR:
1073 	case -EBADHANDLE:
1074 	case -ELOOP:
1075 	case -ENOSPC:
1076 		break;
1077 	case -EJUKEBOX:
1078 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1079 		goto out_retry;
1080 	default:
1081 		dprintk("%s DS connection error %d\n", __func__,
1082 			task->tk_status);
1083 		nfs4_mark_deviceid_unavailable(devid);
1084 	}
1085 	/* FIXME: Need to prevent infinite looping here. */
1086 	return -NFS4ERR_RESET_TO_PNFS;
1087 out_retry:
1088 	task->tk_status = 0;
1089 	rpc_restart_call(task);
1090 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1091 	return -EAGAIN;
1092 }
1093 
1094 static int ff_layout_async_handle_error(struct rpc_task *task,
1095 					struct nfs4_state *state,
1096 					struct nfs_client *clp,
1097 					struct pnfs_layout_segment *lseg,
1098 					int idx)
1099 {
1100 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1101 
1102 	switch (vers) {
1103 	case 3:
1104 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1105 	case 4:
1106 		return ff_layout_async_handle_error_v4(task, state, clp,
1107 						       lseg, idx);
1108 	default:
1109 		/* should never happen */
1110 		WARN_ON_ONCE(1);
1111 		return 0;
1112 	}
1113 }
1114 
1115 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1116 					int idx, u64 offset, u64 length,
1117 					u32 status, int opnum, int error)
1118 {
1119 	struct nfs4_ff_layout_mirror *mirror;
1120 	int err;
1121 
1122 	if (status == 0) {
1123 		switch (error) {
1124 		case -ETIMEDOUT:
1125 		case -EPFNOSUPPORT:
1126 		case -EPROTONOSUPPORT:
1127 		case -EOPNOTSUPP:
1128 		case -ECONNREFUSED:
1129 		case -ECONNRESET:
1130 		case -EHOSTDOWN:
1131 		case -EHOSTUNREACH:
1132 		case -ENETUNREACH:
1133 		case -EADDRINUSE:
1134 		case -ENOBUFS:
1135 		case -EPIPE:
1136 		case -EPERM:
1137 			status = NFS4ERR_NXIO;
1138 			break;
1139 		case -EACCES:
1140 			status = NFS4ERR_ACCESS;
1141 			break;
1142 		default:
1143 			return;
1144 		}
1145 	}
1146 
1147 	mirror = FF_LAYOUT_COMP(lseg, idx);
1148 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1149 				       mirror, offset, length, status, opnum,
1150 				       GFP_NOIO);
1151 	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1152 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1153 }
1154 
1155 /* NFS_PROTO call done callback routines */
1156 
1157 static int ff_layout_read_done_cb(struct rpc_task *task,
1158 				struct nfs_pgio_header *hdr)
1159 {
1160 	int err;
1161 
1162 	trace_nfs4_pnfs_read(hdr, task->tk_status);
1163 	if (task->tk_status < 0)
1164 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1165 					    hdr->args.offset, hdr->args.count,
1166 					    hdr->res.op_status, OP_READ,
1167 					    task->tk_status);
1168 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1169 					   hdr->ds_clp, hdr->lseg,
1170 					   hdr->pgio_mirror_idx);
1171 
1172 	switch (err) {
1173 	case -NFS4ERR_RESET_TO_PNFS:
1174 		set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1175 			&hdr->lseg->pls_layout->plh_flags);
1176 		pnfs_read_resend_pnfs(hdr);
1177 		return task->tk_status;
1178 	case -NFS4ERR_RESET_TO_MDS:
1179 		ff_layout_reset_read(hdr);
1180 		return task->tk_status;
1181 	case -EAGAIN:
1182 		rpc_restart_call_prepare(task);
1183 		return -EAGAIN;
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static bool
1190 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1191 {
1192 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1193 }
1194 
1195 /*
1196  * We reference the rpc_cred of the first WRITE that triggers the need for
1197  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1198  * rfc5661 is not clear about which credential should be used.
1199  *
1200  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1201  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1202  * we always send layoutcommit after DS writes.
1203  */
1204 static void
1205 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1206 {
1207 	if (!ff_layout_need_layoutcommit(hdr->lseg))
1208 		return;
1209 
1210 	pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1211 			hdr->mds_offset + hdr->res.count);
1212 	dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1213 		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1214 }
1215 
1216 static bool
1217 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1218 {
1219 	/* No mirroring for now */
1220 	struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1221 
1222 	return ff_layout_test_devid_unavailable(node);
1223 }
1224 
1225 static int ff_layout_read_prepare_common(struct rpc_task *task,
1226 					 struct nfs_pgio_header *hdr)
1227 {
1228 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1229 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1230 			hdr->args.count,
1231 			task->tk_start);
1232 
1233 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1234 		rpc_exit(task, -EIO);
1235 		return -EIO;
1236 	}
1237 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1238 		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1239 		if (ff_layout_has_available_ds(hdr->lseg))
1240 			pnfs_read_resend_pnfs(hdr);
1241 		else
1242 			ff_layout_reset_read(hdr);
1243 		rpc_exit(task, 0);
1244 		return -EAGAIN;
1245 	}
1246 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1247 
1248 	return 0;
1249 }
1250 
1251 /*
1252  * Call ops for the async read/write cases
1253  * In the case of dense layouts, the offset needs to be reset to its
1254  * original value.
1255  */
1256 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1257 {
1258 	struct nfs_pgio_header *hdr = data;
1259 
1260 	if (ff_layout_read_prepare_common(task, hdr))
1261 		return;
1262 
1263 	rpc_call_start(task);
1264 }
1265 
1266 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1267 				    struct nfs4_sequence_args *args,
1268 				    struct nfs4_sequence_res *res,
1269 				    struct rpc_task *task)
1270 {
1271 	if (ds_clp->cl_session)
1272 		return nfs41_setup_sequence(ds_clp->cl_session,
1273 					   args,
1274 					   res,
1275 					   task);
1276 	return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1277 				   args,
1278 				   res,
1279 				   task);
1280 }
1281 
1282 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1283 {
1284 	struct nfs_pgio_header *hdr = data;
1285 
1286 	if (ff_layout_setup_sequence(hdr->ds_clp,
1287 				     &hdr->args.seq_args,
1288 				     &hdr->res.seq_res,
1289 				     task))
1290 		return;
1291 
1292 	if (ff_layout_read_prepare_common(task, hdr))
1293 		return;
1294 
1295 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1296 			hdr->args.lock_context, FMODE_READ) == -EIO)
1297 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1298 }
1299 
1300 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1301 {
1302 	struct nfs_pgio_header *hdr = data;
1303 
1304 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1305 
1306 	nfs4_ff_layout_stat_io_end_read(task,
1307 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1308 			hdr->args.count, hdr->res.count);
1309 
1310 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1311 	    task->tk_status == 0) {
1312 		nfs4_sequence_done(task, &hdr->res.seq_res);
1313 		return;
1314 	}
1315 
1316 	/* Note this may cause RPC to be resent */
1317 	hdr->mds_ops->rpc_call_done(task, hdr);
1318 }
1319 
1320 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1321 {
1322 	struct nfs_pgio_header *hdr = data;
1323 
1324 	rpc_count_iostats_metrics(task,
1325 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1326 }
1327 
1328 static int ff_layout_write_done_cb(struct rpc_task *task,
1329 				struct nfs_pgio_header *hdr)
1330 {
1331 	int err;
1332 
1333 	trace_nfs4_pnfs_write(hdr, task->tk_status);
1334 	if (task->tk_status < 0)
1335 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1336 					    hdr->args.offset, hdr->args.count,
1337 					    hdr->res.op_status, OP_WRITE,
1338 					    task->tk_status);
1339 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1340 					   hdr->ds_clp, hdr->lseg,
1341 					   hdr->pgio_mirror_idx);
1342 
1343 	switch (err) {
1344 	case -NFS4ERR_RESET_TO_PNFS:
1345 		pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1346 		ff_layout_reset_write(hdr, true);
1347 		return task->tk_status;
1348 	case -NFS4ERR_RESET_TO_MDS:
1349 		pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1350 		ff_layout_reset_write(hdr, false);
1351 		return task->tk_status;
1352 	case -EAGAIN:
1353 		rpc_restart_call_prepare(task);
1354 		return -EAGAIN;
1355 	}
1356 
1357 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1358 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1359 		ff_layout_set_layoutcommit(hdr);
1360 
1361 	/* zero out fattr since we don't care DS attr at all */
1362 	hdr->fattr.valid = 0;
1363 	if (task->tk_status >= 0)
1364 		nfs_writeback_update_inode(hdr);
1365 
1366 	return 0;
1367 }
1368 
1369 static int ff_layout_commit_done_cb(struct rpc_task *task,
1370 				     struct nfs_commit_data *data)
1371 {
1372 	int err;
1373 
1374 	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1375 	if (task->tk_status < 0)
1376 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1377 					    data->args.offset, data->args.count,
1378 					    data->res.op_status, OP_COMMIT,
1379 					    task->tk_status);
1380 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1381 					   data->lseg, data->ds_commit_index);
1382 
1383 	switch (err) {
1384 	case -NFS4ERR_RESET_TO_PNFS:
1385 		pnfs_set_retry_layoutget(data->lseg->pls_layout);
1386 		pnfs_generic_prepare_to_resend_writes(data);
1387 		return -EAGAIN;
1388 	case -NFS4ERR_RESET_TO_MDS:
1389 		pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1390 		pnfs_generic_prepare_to_resend_writes(data);
1391 		return -EAGAIN;
1392 	case -EAGAIN:
1393 		rpc_restart_call_prepare(task);
1394 		return -EAGAIN;
1395 	}
1396 
1397 	if (data->verf.committed == NFS_UNSTABLE
1398 	    && ff_layout_need_layoutcommit(data->lseg))
1399 		pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
1400 
1401 	return 0;
1402 }
1403 
1404 static int ff_layout_write_prepare_common(struct rpc_task *task,
1405 					  struct nfs_pgio_header *hdr)
1406 {
1407 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1408 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1409 			hdr->args.count,
1410 			task->tk_start);
1411 
1412 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1413 		rpc_exit(task, -EIO);
1414 		return -EIO;
1415 	}
1416 
1417 	if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1418 		bool retry_pnfs;
1419 
1420 		retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1421 		dprintk("%s task %u reset io to %s\n", __func__,
1422 			task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1423 		ff_layout_reset_write(hdr, retry_pnfs);
1424 		rpc_exit(task, 0);
1425 		return -EAGAIN;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1432 {
1433 	struct nfs_pgio_header *hdr = data;
1434 
1435 	if (ff_layout_write_prepare_common(task, hdr))
1436 		return;
1437 
1438 	rpc_call_start(task);
1439 }
1440 
1441 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1442 {
1443 	struct nfs_pgio_header *hdr = data;
1444 
1445 	if (ff_layout_setup_sequence(hdr->ds_clp,
1446 				     &hdr->args.seq_args,
1447 				     &hdr->res.seq_res,
1448 				     task))
1449 		return;
1450 
1451 	if (ff_layout_write_prepare_common(task, hdr))
1452 		return;
1453 
1454 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1455 			hdr->args.lock_context, FMODE_WRITE) == -EIO)
1456 		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1457 }
1458 
1459 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1460 {
1461 	struct nfs_pgio_header *hdr = data;
1462 
1463 	nfs4_ff_layout_stat_io_end_write(task,
1464 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1465 			hdr->args.count, hdr->res.count,
1466 			hdr->res.verf->committed);
1467 
1468 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1469 	    task->tk_status == 0) {
1470 		nfs4_sequence_done(task, &hdr->res.seq_res);
1471 		return;
1472 	}
1473 
1474 	/* Note this may cause RPC to be resent */
1475 	hdr->mds_ops->rpc_call_done(task, hdr);
1476 }
1477 
1478 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1479 {
1480 	struct nfs_pgio_header *hdr = data;
1481 
1482 	rpc_count_iostats_metrics(task,
1483 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1484 }
1485 
1486 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1487 		struct nfs_commit_data *cdata)
1488 {
1489 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1490 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1491 			0, task->tk_start);
1492 }
1493 
1494 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1495 {
1496 	ff_layout_commit_prepare_common(task, data);
1497 	rpc_call_start(task);
1498 }
1499 
1500 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1501 {
1502 	struct nfs_commit_data *wdata = data;
1503 
1504 	if (ff_layout_setup_sequence(wdata->ds_clp,
1505 				 &wdata->args.seq_args,
1506 				 &wdata->res.seq_res,
1507 				 task))
1508 		return;
1509 	ff_layout_commit_prepare_common(task, data);
1510 }
1511 
1512 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1513 {
1514 	struct nfs_commit_data *cdata = data;
1515 	struct nfs_page *req;
1516 	__u64 count = 0;
1517 
1518 	if (task->tk_status == 0) {
1519 		list_for_each_entry(req, &cdata->pages, wb_list)
1520 			count += req->wb_bytes;
1521 	}
1522 
1523 	nfs4_ff_layout_stat_io_end_write(task,
1524 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1525 			count, count, NFS_FILE_SYNC);
1526 
1527 	pnfs_generic_write_commit_done(task, data);
1528 }
1529 
1530 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1531 {
1532 	struct nfs_commit_data *cdata = data;
1533 
1534 	rpc_count_iostats_metrics(task,
1535 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1536 }
1537 
1538 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1539 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1540 	.rpc_call_done = ff_layout_read_call_done,
1541 	.rpc_count_stats = ff_layout_read_count_stats,
1542 	.rpc_release = pnfs_generic_rw_release,
1543 };
1544 
1545 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1546 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1547 	.rpc_call_done = ff_layout_read_call_done,
1548 	.rpc_count_stats = ff_layout_read_count_stats,
1549 	.rpc_release = pnfs_generic_rw_release,
1550 };
1551 
1552 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1553 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1554 	.rpc_call_done = ff_layout_write_call_done,
1555 	.rpc_count_stats = ff_layout_write_count_stats,
1556 	.rpc_release = pnfs_generic_rw_release,
1557 };
1558 
1559 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1560 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1561 	.rpc_call_done = ff_layout_write_call_done,
1562 	.rpc_count_stats = ff_layout_write_count_stats,
1563 	.rpc_release = pnfs_generic_rw_release,
1564 };
1565 
1566 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1567 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1568 	.rpc_call_done = ff_layout_commit_done,
1569 	.rpc_count_stats = ff_layout_commit_count_stats,
1570 	.rpc_release = pnfs_generic_commit_release,
1571 };
1572 
1573 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1574 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1575 	.rpc_call_done = ff_layout_commit_done,
1576 	.rpc_count_stats = ff_layout_commit_count_stats,
1577 	.rpc_release = pnfs_generic_commit_release,
1578 };
1579 
1580 static enum pnfs_try_status
1581 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1582 {
1583 	struct pnfs_layout_segment *lseg = hdr->lseg;
1584 	struct nfs4_pnfs_ds *ds;
1585 	struct rpc_clnt *ds_clnt;
1586 	struct rpc_cred *ds_cred;
1587 	loff_t offset = hdr->args.offset;
1588 	u32 idx = hdr->pgio_mirror_idx;
1589 	int vers;
1590 	struct nfs_fh *fh;
1591 
1592 	dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1593 		__func__, hdr->inode->i_ino,
1594 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1595 
1596 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1597 	if (!ds)
1598 		goto out_failed;
1599 
1600 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1601 						   hdr->inode);
1602 	if (IS_ERR(ds_clnt))
1603 		goto out_failed;
1604 
1605 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1606 	if (IS_ERR(ds_cred))
1607 		goto out_failed;
1608 
1609 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1610 
1611 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1612 		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1613 
1614 	atomic_inc(&ds->ds_clp->cl_count);
1615 	hdr->ds_clp = ds->ds_clp;
1616 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1617 	if (fh)
1618 		hdr->args.fh = fh;
1619 	/*
1620 	 * Note that if we ever decide to split across DSes,
1621 	 * then we may need to handle dense-like offsets.
1622 	 */
1623 	hdr->args.offset = offset;
1624 	hdr->mds_offset = offset;
1625 
1626 	/* Perform an asynchronous read to ds */
1627 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1628 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1629 				      &ff_layout_read_call_ops_v4,
1630 			  0, RPC_TASK_SOFTCONN);
1631 
1632 	return PNFS_ATTEMPTED;
1633 
1634 out_failed:
1635 	if (ff_layout_has_available_ds(lseg))
1636 		return PNFS_TRY_AGAIN;
1637 	return PNFS_NOT_ATTEMPTED;
1638 }
1639 
1640 /* Perform async writes. */
1641 static enum pnfs_try_status
1642 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1643 {
1644 	struct pnfs_layout_segment *lseg = hdr->lseg;
1645 	struct nfs4_pnfs_ds *ds;
1646 	struct rpc_clnt *ds_clnt;
1647 	struct rpc_cred *ds_cred;
1648 	loff_t offset = hdr->args.offset;
1649 	int vers;
1650 	struct nfs_fh *fh;
1651 	int idx = hdr->pgio_mirror_idx;
1652 
1653 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1654 	if (!ds)
1655 		return PNFS_NOT_ATTEMPTED;
1656 
1657 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1658 						   hdr->inode);
1659 	if (IS_ERR(ds_clnt))
1660 		return PNFS_NOT_ATTEMPTED;
1661 
1662 	ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1663 	if (IS_ERR(ds_cred))
1664 		return PNFS_NOT_ATTEMPTED;
1665 
1666 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1667 
1668 	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1669 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1670 		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1671 		vers);
1672 
1673 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1674 	atomic_inc(&ds->ds_clp->cl_count);
1675 	hdr->ds_clp = ds->ds_clp;
1676 	hdr->ds_commit_idx = idx;
1677 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1678 	if (fh)
1679 		hdr->args.fh = fh;
1680 
1681 	/*
1682 	 * Note that if we ever decide to split across DSes,
1683 	 * then we may need to handle dense-like offsets.
1684 	 */
1685 	hdr->args.offset = offset;
1686 
1687 	/* Perform an asynchronous write */
1688 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1689 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1690 				      &ff_layout_write_call_ops_v4,
1691 			  sync, RPC_TASK_SOFTCONN);
1692 	return PNFS_ATTEMPTED;
1693 }
1694 
1695 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1696 {
1697 	return i;
1698 }
1699 
1700 static struct nfs_fh *
1701 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1702 {
1703 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1704 
1705 	/* FIXME: Assume that there is only one NFS version available
1706 	 * for the DS.
1707 	 */
1708 	return &flseg->mirror_array[i]->fh_versions[0];
1709 }
1710 
1711 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1712 {
1713 	struct pnfs_layout_segment *lseg = data->lseg;
1714 	struct nfs4_pnfs_ds *ds;
1715 	struct rpc_clnt *ds_clnt;
1716 	struct rpc_cred *ds_cred;
1717 	u32 idx;
1718 	int vers;
1719 	struct nfs_fh *fh;
1720 
1721 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1722 	ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1723 	if (!ds)
1724 		goto out_err;
1725 
1726 	ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1727 						   data->inode);
1728 	if (IS_ERR(ds_clnt))
1729 		goto out_err;
1730 
1731 	ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1732 	if (IS_ERR(ds_cred))
1733 		goto out_err;
1734 
1735 	vers = nfs4_ff_layout_ds_version(lseg, idx);
1736 
1737 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1738 		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1739 		vers);
1740 	data->commit_done_cb = ff_layout_commit_done_cb;
1741 	data->cred = ds_cred;
1742 	atomic_inc(&ds->ds_clp->cl_count);
1743 	data->ds_clp = ds->ds_clp;
1744 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1745 	if (fh)
1746 		data->args.fh = fh;
1747 
1748 	return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1749 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1750 					       &ff_layout_commit_call_ops_v4,
1751 				   how, RPC_TASK_SOFTCONN);
1752 out_err:
1753 	pnfs_generic_prepare_to_resend_writes(data);
1754 	pnfs_generic_commit_release(data);
1755 	return -EAGAIN;
1756 }
1757 
1758 static int
1759 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1760 			   int how, struct nfs_commit_info *cinfo)
1761 {
1762 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1763 					    ff_layout_initiate_commit);
1764 }
1765 
1766 static struct pnfs_ds_commit_info *
1767 ff_layout_get_ds_info(struct inode *inode)
1768 {
1769 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1770 
1771 	if (layout == NULL)
1772 		return NULL;
1773 
1774 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1775 }
1776 
1777 static void
1778 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1779 {
1780 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1781 						  id_node));
1782 }
1783 
1784 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1785 				  struct xdr_stream *xdr,
1786 				  const struct nfs4_layoutreturn_args *args)
1787 {
1788 	struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1789 	__be32 *start;
1790 	int count = 0, ret = 0;
1791 
1792 	start = xdr_reserve_space(xdr, 4);
1793 	if (unlikely(!start))
1794 		return -E2BIG;
1795 
1796 	/* This assume we always return _ALL_ layouts */
1797 	spin_lock(&hdr->plh_inode->i_lock);
1798 	ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1799 	spin_unlock(&hdr->plh_inode->i_lock);
1800 
1801 	*start = cpu_to_be32(count);
1802 
1803 	return ret;
1804 }
1805 
1806 /* report nothing for now */
1807 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1808 				     struct xdr_stream *xdr,
1809 				     const struct nfs4_layoutreturn_args *args)
1810 {
1811 	__be32 *p;
1812 
1813 	p = xdr_reserve_space(xdr, 4);
1814 	if (likely(p))
1815 		*p = cpu_to_be32(0);
1816 }
1817 
1818 static struct nfs4_deviceid_node *
1819 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1820 			      struct pnfs_device *pdev, gfp_t gfp_flags)
1821 {
1822 	struct nfs4_ff_layout_ds *dsaddr;
1823 
1824 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1825 	if (!dsaddr)
1826 		return NULL;
1827 	return &dsaddr->id_node;
1828 }
1829 
1830 static void
1831 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1832 			      struct xdr_stream *xdr,
1833 			      const struct nfs4_layoutreturn_args *args)
1834 {
1835 	struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1836 	__be32 *start;
1837 
1838 	dprintk("%s: Begin\n", __func__);
1839 	start = xdr_reserve_space(xdr, 4);
1840 	BUG_ON(!start);
1841 
1842 	if (ff_layout_encode_ioerr(flo, xdr, args))
1843 		goto out;
1844 
1845 	ff_layout_encode_iostats(flo, xdr, args);
1846 out:
1847 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
1848 	dprintk("%s: Return\n", __func__);
1849 }
1850 
1851 static int
1852 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
1853 {
1854 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
1855 
1856 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
1857 }
1858 
1859 static size_t
1860 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
1861 			  const int buflen)
1862 {
1863 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
1864 	const struct in6_addr *addr = &sin6->sin6_addr;
1865 
1866 	/*
1867 	 * RFC 4291, Section 2.2.2
1868 	 *
1869 	 * Shorthanded ANY address
1870 	 */
1871 	if (ipv6_addr_any(addr))
1872 		return snprintf(buf, buflen, "::");
1873 
1874 	/*
1875 	 * RFC 4291, Section 2.2.2
1876 	 *
1877 	 * Shorthanded loopback address
1878 	 */
1879 	if (ipv6_addr_loopback(addr))
1880 		return snprintf(buf, buflen, "::1");
1881 
1882 	/*
1883 	 * RFC 4291, Section 2.2.3
1884 	 *
1885 	 * Special presentation address format for mapped v4
1886 	 * addresses.
1887 	 */
1888 	if (ipv6_addr_v4mapped(addr))
1889 		return snprintf(buf, buflen, "::ffff:%pI4",
1890 					&addr->s6_addr32[3]);
1891 
1892 	/*
1893 	 * RFC 4291, Section 2.2.1
1894 	 */
1895 	return snprintf(buf, buflen, "%pI6c", addr);
1896 }
1897 
1898 /* Derived from rpc_sockaddr2uaddr */
1899 static void
1900 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
1901 {
1902 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
1903 	char portbuf[RPCBIND_MAXUADDRPLEN];
1904 	char addrbuf[RPCBIND_MAXUADDRLEN];
1905 	char *netid;
1906 	unsigned short port;
1907 	int len, netid_len;
1908 	__be32 *p;
1909 
1910 	switch (sap->sa_family) {
1911 	case AF_INET:
1912 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
1913 			return;
1914 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
1915 		netid = "tcp";
1916 		netid_len = 3;
1917 		break;
1918 	case AF_INET6:
1919 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
1920 			return;
1921 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
1922 		netid = "tcp6";
1923 		netid_len = 4;
1924 		break;
1925 	default:
1926 		/* we only support tcp and tcp6 */
1927 		WARN_ON_ONCE(1);
1928 		return;
1929 	}
1930 
1931 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
1932 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
1933 
1934 	p = xdr_reserve_space(xdr, 4 + netid_len);
1935 	xdr_encode_opaque(p, netid, netid_len);
1936 
1937 	p = xdr_reserve_space(xdr, 4 + len);
1938 	xdr_encode_opaque(p, addrbuf, len);
1939 }
1940 
1941 static void
1942 ff_layout_encode_nfstime(struct xdr_stream *xdr,
1943 			 ktime_t t)
1944 {
1945 	struct timespec64 ts;
1946 	__be32 *p;
1947 
1948 	p = xdr_reserve_space(xdr, 12);
1949 	ts = ktime_to_timespec64(t);
1950 	p = xdr_encode_hyper(p, ts.tv_sec);
1951 	*p++ = cpu_to_be32(ts.tv_nsec);
1952 }
1953 
1954 static void
1955 ff_layout_encode_io_latency(struct xdr_stream *xdr,
1956 			    struct nfs4_ff_io_stat *stat)
1957 {
1958 	__be32 *p;
1959 
1960 	p = xdr_reserve_space(xdr, 5 * 8);
1961 	p = xdr_encode_hyper(p, stat->ops_requested);
1962 	p = xdr_encode_hyper(p, stat->bytes_requested);
1963 	p = xdr_encode_hyper(p, stat->ops_completed);
1964 	p = xdr_encode_hyper(p, stat->bytes_completed);
1965 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
1966 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
1967 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
1968 }
1969 
1970 static void
1971 ff_layout_encode_layoutstats(struct xdr_stream *xdr,
1972 			     struct nfs42_layoutstat_args *args,
1973 			     struct nfs42_layoutstat_devinfo *devinfo)
1974 {
1975 	struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
1976 	struct nfs4_pnfs_ds_addr *da;
1977 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
1978 	struct nfs_fh *fh = &mirror->fh_versions[0];
1979 	__be32 *p, *start;
1980 
1981 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
1982 	dprintk("%s: DS %s: encoding address %s\n",
1983 		__func__, ds->ds_remotestr, da->da_remotestr);
1984 	/* layoutupdate length */
1985 	start = xdr_reserve_space(xdr, 4);
1986 	/* netaddr4 */
1987 	ff_layout_encode_netaddr(xdr, da);
1988 	/* nfs_fh4 */
1989 	p = xdr_reserve_space(xdr, 4 + fh->size);
1990 	xdr_encode_opaque(p, fh->data, fh->size);
1991 	/* ff_io_latency4 read */
1992 	spin_lock(&mirror->lock);
1993 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
1994 	/* ff_io_latency4 write */
1995 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
1996 	spin_unlock(&mirror->lock);
1997 	/* nfstime4 */
1998 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
1999 	/* bool */
2000 	p = xdr_reserve_space(xdr, 4);
2001 	*p = cpu_to_be32(false);
2002 
2003 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2004 }
2005 
2006 static int
2007 ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
2008 			       struct pnfs_layout_hdr *lo,
2009 			       int dev_limit)
2010 {
2011 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2012 	struct nfs4_ff_layout_mirror *mirror;
2013 	struct nfs4_deviceid_node *dev;
2014 	struct nfs42_layoutstat_devinfo *devinfo;
2015 	int i = 0;
2016 
2017 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2018 		if (i >= dev_limit)
2019 			break;
2020 		if (!mirror->mirror_ds)
2021 			continue;
2022 		/* mirror refcount put in cleanup_layoutstats */
2023 		if (!atomic_inc_not_zero(&mirror->ref))
2024 			continue;
2025 		dev = &mirror->mirror_ds->id_node;
2026 		devinfo = &args->devinfo[i];
2027 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2028 		devinfo->offset = 0;
2029 		devinfo->length = NFS4_MAX_UINT64;
2030 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2031 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2032 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2033 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2034 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2035 		devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
2036 		devinfo->layout_private = mirror;
2037 
2038 		i++;
2039 	}
2040 	return i;
2041 }
2042 
2043 static int
2044 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2045 {
2046 	struct nfs4_flexfile_layout *ff_layout;
2047 	struct nfs4_ff_layout_mirror *mirror;
2048 	int dev_count = 0;
2049 
2050 	spin_lock(&args->inode->i_lock);
2051 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2052 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2053 		if (atomic_read(&mirror->ref) != 0)
2054 			dev_count ++;
2055 	}
2056 	spin_unlock(&args->inode->i_lock);
2057 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2058 	if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
2059 		dprintk("%s: truncating devinfo to limit (%d:%d)\n",
2060 			__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
2061 		dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2062 	}
2063 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2064 	if (!args->devinfo)
2065 		return -ENOMEM;
2066 
2067 	spin_lock(&args->inode->i_lock);
2068 	args->num_dev = ff_layout_mirror_prepare_stats(args,
2069 			&ff_layout->generic_hdr, dev_count);
2070 	spin_unlock(&args->inode->i_lock);
2071 
2072 	return 0;
2073 }
2074 
2075 static void
2076 ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
2077 {
2078 	struct nfs4_ff_layout_mirror *mirror;
2079 	int i;
2080 
2081 	for (i = 0; i < data->args.num_dev; i++) {
2082 		mirror = data->args.devinfo[i].layout_private;
2083 		data->args.devinfo[i].layout_private = NULL;
2084 		ff_layout_put_mirror(mirror);
2085 	}
2086 }
2087 
2088 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2089 	.id			= LAYOUT_FLEX_FILES,
2090 	.name			= "LAYOUT_FLEX_FILES",
2091 	.owner			= THIS_MODULE,
2092 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2093 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2094 	.alloc_lseg		= ff_layout_alloc_lseg,
2095 	.free_lseg		= ff_layout_free_lseg,
2096 	.add_lseg		= ff_layout_add_lseg,
2097 	.pg_read_ops		= &ff_layout_pg_read_ops,
2098 	.pg_write_ops		= &ff_layout_pg_write_ops,
2099 	.get_ds_info		= ff_layout_get_ds_info,
2100 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2101 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2102 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2103 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2104 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2105 	.commit_pagelist	= ff_layout_commit_pagelist,
2106 	.read_pagelist		= ff_layout_read_pagelist,
2107 	.write_pagelist		= ff_layout_write_pagelist,
2108 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2109 	.encode_layoutreturn    = ff_layout_encode_layoutreturn,
2110 	.sync			= pnfs_nfs_generic_sync,
2111 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2112 	.cleanup_layoutstats	= ff_layout_cleanup_layoutstats,
2113 };
2114 
2115 static int __init nfs4flexfilelayout_init(void)
2116 {
2117 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2118 	       __func__);
2119 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2120 }
2121 
2122 static void __exit nfs4flexfilelayout_exit(void)
2123 {
2124 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2125 	       __func__);
2126 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2127 }
2128 
2129 MODULE_ALIAS("nfs-layouttype4-4");
2130 
2131 MODULE_LICENSE("GPL");
2132 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2133 
2134 module_init(nfs4flexfilelayout_init);
2135 module_exit(nfs4flexfilelayout_exit);
2136