xref: /openbmc/linux/fs/nfs/flexfilelayout/flexfilelayout.c (revision 4ebdac060e5e24a89a7b3ec33ec46a41621e57fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/sched/mm.h>
15 
16 #include <linux/sunrpc/metrics.h>
17 
18 #include "flexfilelayout.h"
19 #include "../nfs4session.h"
20 #include "../nfs4idmap.h"
21 #include "../internal.h"
22 #include "../delegation.h"
23 #include "../nfs4trace.h"
24 #include "../iostat.h"
25 #include "../nfs.h"
26 #include "../nfs42.h"
27 
28 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
29 
30 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
31 #define FF_LAYOUTRETURN_MAXERR 20
32 
33 enum nfs4_ff_op_type {
34 	NFS4_FF_OP_LAYOUTSTATS,
35 	NFS4_FF_OP_LAYOUTRETURN,
36 };
37 
38 static unsigned short io_maxretrans;
39 
40 static const struct pnfs_commit_ops ff_layout_commit_ops;
41 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
42 		struct nfs_pgio_header *hdr);
43 static int
44 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
45 			       struct nfs42_layoutstat_devinfo *devinfo,
46 			       int dev_limit, enum nfs4_ff_op_type type);
47 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
48 			      const struct nfs42_layoutstat_devinfo *devinfo,
49 			      struct nfs4_ff_layout_mirror *mirror);
50 
51 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)52 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
53 {
54 	struct nfs4_flexfile_layout *ffl;
55 
56 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
57 	if (ffl) {
58 		pnfs_init_ds_commit_info(&ffl->commit_info);
59 		INIT_LIST_HEAD(&ffl->error_list);
60 		INIT_LIST_HEAD(&ffl->mirrors);
61 		ffl->last_report_time = ktime_get();
62 		ffl->commit_info.ops = &ff_layout_commit_ops;
63 		return &ffl->generic_hdr;
64 	} else
65 		return NULL;
66 }
67 
68 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)69 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
70 {
71 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
72 	struct nfs4_ff_layout_ds_err *err, *n;
73 
74 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
75 		list_del(&err->list);
76 		kfree(err);
77 	}
78 	kfree_rcu(ffl, generic_hdr.plh_rcu);
79 }
80 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)81 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
82 {
83 	__be32 *p;
84 
85 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
86 	if (unlikely(p == NULL))
87 		return -ENOBUFS;
88 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
89 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
90 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
91 		p[0], p[1], p[2], p[3]);
92 	return 0;
93 }
94 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)95 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
96 {
97 	__be32 *p;
98 
99 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
100 	if (unlikely(!p))
101 		return -ENOBUFS;
102 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
103 	nfs4_print_deviceid(devid);
104 	return 0;
105 }
106 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)107 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
108 {
109 	__be32 *p;
110 
111 	p = xdr_inline_decode(xdr, 4);
112 	if (unlikely(!p))
113 		return -ENOBUFS;
114 	fh->size = be32_to_cpup(p++);
115 	if (fh->size > NFS_MAXFHSIZE) {
116 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
117 		       fh->size);
118 		return -EOVERFLOW;
119 	}
120 	/* fh.data */
121 	p = xdr_inline_decode(xdr, fh->size);
122 	if (unlikely(!p))
123 		return -ENOBUFS;
124 	memcpy(&fh->data, p, fh->size);
125 	dprintk("%s: fh len %d\n", __func__, fh->size);
126 
127 	return 0;
128 }
129 
130 /*
131  * Currently only stringified uids and gids are accepted.
132  * I.e., kerberos is not supported to the DSes, so no pricipals.
133  *
134  * That means that one common function will suffice, but when
135  * principals are added, this should be split to accomodate
136  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
137  */
138 static int
decode_name(struct xdr_stream * xdr,u32 * id)139 decode_name(struct xdr_stream *xdr, u32 *id)
140 {
141 	__be32 *p;
142 	int len;
143 
144 	/* opaque_length(4)*/
145 	p = xdr_inline_decode(xdr, 4);
146 	if (unlikely(!p))
147 		return -ENOBUFS;
148 	len = be32_to_cpup(p++);
149 	if (len < 0)
150 		return -EINVAL;
151 
152 	dprintk("%s: len %u\n", __func__, len);
153 
154 	/* opaque body */
155 	p = xdr_inline_decode(xdr, len);
156 	if (unlikely(!p))
157 		return -ENOBUFS;
158 
159 	if (!nfs_map_string_to_numeric((char *)p, len, id))
160 		return -EINVAL;
161 
162 	return 0;
163 }
164 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)165 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
166 		const struct nfs4_ff_layout_mirror *m2)
167 {
168 	int i, j;
169 
170 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
171 		return false;
172 	for (i = 0; i < m1->fh_versions_cnt; i++) {
173 		bool found_fh = false;
174 		for (j = 0; j < m2->fh_versions_cnt; j++) {
175 			if (nfs_compare_fh(&m1->fh_versions[i],
176 					&m2->fh_versions[j]) == 0) {
177 				found_fh = true;
178 				break;
179 			}
180 		}
181 		if (!found_fh)
182 			return false;
183 	}
184 	return true;
185 }
186 
187 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)188 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
189 		struct nfs4_ff_layout_mirror *mirror)
190 {
191 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
192 	struct nfs4_ff_layout_mirror *pos;
193 	struct inode *inode = lo->plh_inode;
194 
195 	spin_lock(&inode->i_lock);
196 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
197 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
198 			continue;
199 		if (!ff_mirror_match_fh(mirror, pos))
200 			continue;
201 		if (refcount_inc_not_zero(&pos->ref)) {
202 			spin_unlock(&inode->i_lock);
203 			return pos;
204 		}
205 	}
206 	list_add(&mirror->mirrors, &ff_layout->mirrors);
207 	mirror->layout = lo;
208 	spin_unlock(&inode->i_lock);
209 	return mirror;
210 }
211 
212 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)213 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
214 {
215 	struct inode *inode;
216 	if (mirror->layout == NULL)
217 		return;
218 	inode = mirror->layout->plh_inode;
219 	spin_lock(&inode->i_lock);
220 	list_del(&mirror->mirrors);
221 	spin_unlock(&inode->i_lock);
222 	mirror->layout = NULL;
223 }
224 
ff_layout_alloc_mirror(gfp_t gfp_flags)225 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
226 {
227 	struct nfs4_ff_layout_mirror *mirror;
228 
229 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
230 	if (mirror != NULL) {
231 		spin_lock_init(&mirror->lock);
232 		refcount_set(&mirror->ref, 1);
233 		INIT_LIST_HEAD(&mirror->mirrors);
234 	}
235 	return mirror;
236 }
237 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)238 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
239 {
240 	const struct cred	*cred;
241 
242 	ff_layout_remove_mirror(mirror);
243 	kfree(mirror->fh_versions);
244 	cred = rcu_access_pointer(mirror->ro_cred);
245 	put_cred(cred);
246 	cred = rcu_access_pointer(mirror->rw_cred);
247 	put_cred(cred);
248 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
249 	kfree(mirror);
250 }
251 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)252 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
253 {
254 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
255 		ff_layout_free_mirror(mirror);
256 }
257 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)258 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
259 {
260 	u32 i;
261 
262 	for (i = 0; i < fls->mirror_array_cnt; i++)
263 		ff_layout_put_mirror(fls->mirror_array[i]);
264 }
265 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)266 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
267 {
268 	if (fls) {
269 		ff_layout_free_mirror_array(fls);
270 		kfree(fls);
271 	}
272 }
273 
274 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)275 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
276 		struct pnfs_layout_segment *l2)
277 {
278 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
279 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
280 	u32 i;
281 
282 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
283 		return false;
284 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
285 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
286 			return false;
287 	}
288 	return true;
289 }
290 
291 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)292 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 		const struct pnfs_layout_range *l2)
294 {
295 	u64 end1, end2;
296 
297 	if (l1->iomode != l2->iomode)
298 		return l1->iomode != IOMODE_READ;
299 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 	if (end1 < l2->offset)
302 		return false;
303 	if (end2 < l1->offset)
304 		return true;
305 	return l2->offset <= l1->offset;
306 }
307 
308 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)309 ff_lseg_merge(struct pnfs_layout_segment *new,
310 		struct pnfs_layout_segment *old)
311 {
312 	u64 new_end, old_end;
313 
314 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 		return false;
316 	if (new->pls_range.iomode != old->pls_range.iomode)
317 		return false;
318 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 			old->pls_range.length);
320 	if (old_end < new->pls_range.offset)
321 		return false;
322 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 			new->pls_range.length);
324 	if (new_end < old->pls_range.offset)
325 		return false;
326 	if (!ff_lseg_match_mirrors(new, old))
327 		return false;
328 
329 	/* Mergeable: copy info from 'old' to 'new' */
330 	if (new_end < old_end)
331 		new_end = old_end;
332 	if (new->pls_range.offset < old->pls_range.offset)
333 		new->pls_range.offset = old->pls_range.offset;
334 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
335 			new_end);
336 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
337 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
338 	return true;
339 }
340 
341 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)342 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
343 		struct pnfs_layout_segment *lseg,
344 		struct list_head *free_me)
345 {
346 	pnfs_generic_layout_insert_lseg(lo, lseg,
347 			ff_lseg_range_is_after,
348 			ff_lseg_merge,
349 			free_me);
350 }
351 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)352 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
353 {
354 	int i, j;
355 
356 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
357 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
358 			if (fls->mirror_array[i]->efficiency <
359 			    fls->mirror_array[j]->efficiency)
360 				swap(fls->mirror_array[i],
361 				     fls->mirror_array[j]);
362 	}
363 }
364 
365 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)366 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
367 		     struct nfs4_layoutget_res *lgr,
368 		     gfp_t gfp_flags)
369 {
370 	struct pnfs_layout_segment *ret;
371 	struct nfs4_ff_layout_segment *fls = NULL;
372 	struct xdr_stream stream;
373 	struct xdr_buf buf;
374 	struct page *scratch;
375 	u64 stripe_unit;
376 	u32 mirror_array_cnt;
377 	__be32 *p;
378 	int i, rc;
379 
380 	dprintk("--> %s\n", __func__);
381 	scratch = alloc_page(gfp_flags);
382 	if (!scratch)
383 		return ERR_PTR(-ENOMEM);
384 
385 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
386 			      lgr->layoutp->len);
387 	xdr_set_scratch_page(&stream, scratch);
388 
389 	/* stripe unit and mirror_array_cnt */
390 	rc = -EIO;
391 	p = xdr_inline_decode(&stream, 8 + 4);
392 	if (!p)
393 		goto out_err_free;
394 
395 	p = xdr_decode_hyper(p, &stripe_unit);
396 	mirror_array_cnt = be32_to_cpup(p++);
397 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
398 		stripe_unit, mirror_array_cnt);
399 
400 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
401 	    mirror_array_cnt == 0)
402 		goto out_err_free;
403 
404 	rc = -ENOMEM;
405 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
406 			gfp_flags);
407 	if (!fls)
408 		goto out_err_free;
409 
410 	fls->mirror_array_cnt = mirror_array_cnt;
411 	fls->stripe_unit = stripe_unit;
412 
413 	for (i = 0; i < fls->mirror_array_cnt; i++) {
414 		struct nfs4_ff_layout_mirror *mirror;
415 		struct cred *kcred;
416 		const struct cred __rcu *cred;
417 		kuid_t uid;
418 		kgid_t gid;
419 		u32 ds_count, fh_count, id;
420 		int j;
421 
422 		rc = -EIO;
423 		p = xdr_inline_decode(&stream, 4);
424 		if (!p)
425 			goto out_err_free;
426 		ds_count = be32_to_cpup(p);
427 
428 		/* FIXME: allow for striping? */
429 		if (ds_count != 1)
430 			goto out_err_free;
431 
432 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
433 		if (fls->mirror_array[i] == NULL) {
434 			rc = -ENOMEM;
435 			goto out_err_free;
436 		}
437 
438 		fls->mirror_array[i]->ds_count = ds_count;
439 
440 		/* deviceid */
441 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
442 		if (rc)
443 			goto out_err_free;
444 
445 		/* efficiency */
446 		rc = -EIO;
447 		p = xdr_inline_decode(&stream, 4);
448 		if (!p)
449 			goto out_err_free;
450 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
451 
452 		/* stateid */
453 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
454 		if (rc)
455 			goto out_err_free;
456 
457 		/* fh */
458 		rc = -EIO;
459 		p = xdr_inline_decode(&stream, 4);
460 		if (!p)
461 			goto out_err_free;
462 		fh_count = be32_to_cpup(p);
463 
464 		fls->mirror_array[i]->fh_versions =
465 			kcalloc(fh_count, sizeof(struct nfs_fh),
466 				gfp_flags);
467 		if (fls->mirror_array[i]->fh_versions == NULL) {
468 			rc = -ENOMEM;
469 			goto out_err_free;
470 		}
471 
472 		for (j = 0; j < fh_count; j++) {
473 			rc = decode_nfs_fh(&stream,
474 					   &fls->mirror_array[i]->fh_versions[j]);
475 			if (rc)
476 				goto out_err_free;
477 		}
478 
479 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
480 
481 		/* user */
482 		rc = decode_name(&stream, &id);
483 		if (rc)
484 			goto out_err_free;
485 
486 		uid = make_kuid(&init_user_ns, id);
487 
488 		/* group */
489 		rc = decode_name(&stream, &id);
490 		if (rc)
491 			goto out_err_free;
492 
493 		gid = make_kgid(&init_user_ns, id);
494 
495 		if (gfp_flags & __GFP_FS)
496 			kcred = prepare_kernel_cred(&init_task);
497 		else {
498 			unsigned int nofs_flags = memalloc_nofs_save();
499 			kcred = prepare_kernel_cred(&init_task);
500 			memalloc_nofs_restore(nofs_flags);
501 		}
502 		rc = -ENOMEM;
503 		if (!kcred)
504 			goto out_err_free;
505 		kcred->fsuid = uid;
506 		kcred->fsgid = gid;
507 		cred = RCU_INITIALIZER(kcred);
508 
509 		if (lgr->range.iomode == IOMODE_READ)
510 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
511 		else
512 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
513 
514 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
515 		if (mirror != fls->mirror_array[i]) {
516 			/* swap cred ptrs so free_mirror will clean up old */
517 			if (lgr->range.iomode == IOMODE_READ) {
518 				cred = xchg(&mirror->ro_cred, cred);
519 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
520 			} else {
521 				cred = xchg(&mirror->rw_cred, cred);
522 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
523 			}
524 			ff_layout_free_mirror(fls->mirror_array[i]);
525 			fls->mirror_array[i] = mirror;
526 		}
527 
528 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
529 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
530 			from_kuid(&init_user_ns, uid),
531 			from_kgid(&init_user_ns, gid));
532 	}
533 
534 	p = xdr_inline_decode(&stream, 4);
535 	if (!p)
536 		goto out_sort_mirrors;
537 	fls->flags = be32_to_cpup(p);
538 
539 	p = xdr_inline_decode(&stream, 4);
540 	if (!p)
541 		goto out_sort_mirrors;
542 	for (i=0; i < fls->mirror_array_cnt; i++)
543 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
544 
545 out_sort_mirrors:
546 	ff_layout_sort_mirrors(fls);
547 	ret = &fls->generic_hdr;
548 	dprintk("<-- %s (success)\n", __func__);
549 out_free_page:
550 	__free_page(scratch);
551 	return ret;
552 out_err_free:
553 	_ff_layout_free_lseg(fls);
554 	ret = ERR_PTR(rc);
555 	dprintk("<-- %s (%d)\n", __func__, rc);
556 	goto out_free_page;
557 }
558 
559 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)560 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
561 {
562 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
563 
564 	dprintk("--> %s\n", __func__);
565 
566 	if (lseg->pls_range.iomode == IOMODE_RW) {
567 		struct nfs4_flexfile_layout *ffl;
568 		struct inode *inode;
569 
570 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
571 		inode = ffl->generic_hdr.plh_inode;
572 		spin_lock(&inode->i_lock);
573 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
574 		spin_unlock(&inode->i_lock);
575 	}
576 	_ff_layout_free_lseg(fls);
577 }
578 
579 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)580 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
581 {
582 	/* first IO request? */
583 	if (atomic_inc_return(&timer->n_ops) == 1) {
584 		timer->start_time = now;
585 	}
586 }
587 
588 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)589 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
590 {
591 	ktime_t start;
592 
593 	if (atomic_dec_return(&timer->n_ops) < 0)
594 		WARN_ON_ONCE(1);
595 
596 	start = timer->start_time;
597 	timer->start_time = now;
598 	return ktime_sub(now, start);
599 }
600 
601 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)602 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
603 			    struct nfs4_ff_layoutstat *layoutstat,
604 			    ktime_t now)
605 {
606 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
607 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
608 
609 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
610 	if (!mirror->start_time)
611 		mirror->start_time = now;
612 	if (mirror->report_interval != 0)
613 		report_interval = (s64)mirror->report_interval * 1000LL;
614 	else if (layoutstats_timer != 0)
615 		report_interval = (s64)layoutstats_timer * 1000LL;
616 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
617 			report_interval) {
618 		ffl->last_report_time = now;
619 		return true;
620 	}
621 
622 	return false;
623 }
624 
625 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)626 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
627 		__u64 requested)
628 {
629 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
630 
631 	iostat->ops_requested++;
632 	iostat->bytes_requested += requested;
633 }
634 
635 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)636 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
637 		__u64 requested,
638 		__u64 completed,
639 		ktime_t time_completed,
640 		ktime_t time_started)
641 {
642 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
643 	ktime_t completion_time = ktime_sub(time_completed, time_started);
644 	ktime_t timer;
645 
646 	iostat->ops_completed++;
647 	iostat->bytes_completed += completed;
648 	iostat->bytes_not_delivered += requested - completed;
649 
650 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
651 	iostat->total_busy_time =
652 			ktime_add(iostat->total_busy_time, timer);
653 	iostat->aggregate_completion_time =
654 			ktime_add(iostat->aggregate_completion_time,
655 					completion_time);
656 }
657 
658 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)659 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
660 		struct nfs4_ff_layout_mirror *mirror,
661 		__u64 requested, ktime_t now)
662 {
663 	bool report;
664 
665 	spin_lock(&mirror->lock);
666 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
667 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
668 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
669 	spin_unlock(&mirror->lock);
670 
671 	if (report)
672 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
673 }
674 
675 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)676 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
677 		struct nfs4_ff_layout_mirror *mirror,
678 		__u64 requested,
679 		__u64 completed)
680 {
681 	spin_lock(&mirror->lock);
682 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
683 			requested, completed,
684 			ktime_get(), task->tk_start);
685 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 	spin_unlock(&mirror->lock);
687 }
688 
689 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)690 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
691 		struct nfs4_ff_layout_mirror *mirror,
692 		__u64 requested, ktime_t now)
693 {
694 	bool report;
695 
696 	spin_lock(&mirror->lock);
697 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
698 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
699 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
700 	spin_unlock(&mirror->lock);
701 
702 	if (report)
703 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
704 }
705 
706 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)707 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
708 		struct nfs4_ff_layout_mirror *mirror,
709 		__u64 requested,
710 		__u64 completed,
711 		enum nfs3_stable_how committed)
712 {
713 	if (committed == NFS_UNSTABLE)
714 		requested = completed = 0;
715 
716 	spin_lock(&mirror->lock);
717 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
718 			requested, completed, ktime_get(), task->tk_start);
719 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
720 	spin_unlock(&mirror->lock);
721 }
722 
723 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx)724 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
725 {
726 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
727 
728 	if (devid)
729 		nfs4_mark_deviceid_unavailable(devid);
730 }
731 
732 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx)733 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
734 {
735 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
736 
737 	if (devid)
738 		nfs4_mark_deviceid_available(devid);
739 }
740 
741 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,bool check_device)742 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
743 			     u32 start_idx, u32 *best_idx,
744 			     bool check_device)
745 {
746 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
747 	struct nfs4_ff_layout_mirror *mirror;
748 	struct nfs4_pnfs_ds *ds;
749 	u32 idx;
750 
751 	/* mirrors are initially sorted by efficiency */
752 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
753 		mirror = FF_LAYOUT_COMP(lseg, idx);
754 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
755 		if (!ds)
756 			continue;
757 
758 		if (check_device &&
759 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
760 			continue;
761 
762 		*best_idx = idx;
763 		return ds;
764 	}
765 
766 	return NULL;
767 }
768 
769 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)770 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
771 				 u32 start_idx, u32 *best_idx)
772 {
773 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
774 }
775 
776 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)777 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
778 				   u32 start_idx, u32 *best_idx)
779 {
780 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
781 }
782 
783 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)784 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
785 				  u32 start_idx, u32 *best_idx)
786 {
787 	struct nfs4_pnfs_ds *ds;
788 
789 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
790 	if (ds)
791 		return ds;
792 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
793 }
794 
795 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx)796 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
797 			  u32 *best_idx)
798 {
799 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
800 	struct nfs4_pnfs_ds *ds;
801 
802 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
803 					       best_idx);
804 	if (ds || !pgio->pg_mirror_idx)
805 		return ds;
806 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
807 }
808 
809 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)810 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
811 		      struct nfs_page *req,
812 		      bool strict_iomode)
813 {
814 	pnfs_put_lseg(pgio->pg_lseg);
815 	pgio->pg_lseg =
816 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
817 				   req_offset(req), req->wb_bytes, IOMODE_READ,
818 				   strict_iomode, nfs_io_gfp_mask());
819 	if (IS_ERR(pgio->pg_lseg)) {
820 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
821 		pgio->pg_lseg = NULL;
822 	}
823 }
824 
825 static void
ff_layout_pg_check_layout(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)826 ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
827 			  struct nfs_page *req)
828 {
829 	pnfs_generic_pg_check_layout(pgio);
830 	pnfs_generic_pg_check_range(pgio, req);
831 }
832 
833 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)834 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
835 			struct nfs_page *req)
836 {
837 	struct nfs_pgio_mirror *pgm;
838 	struct nfs4_ff_layout_mirror *mirror;
839 	struct nfs4_pnfs_ds *ds;
840 	u32 ds_idx;
841 
842 	if (NFS_SERVER(pgio->pg_inode)->flags &
843 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
844 		pgio->pg_maxretrans = io_maxretrans;
845 retry:
846 	ff_layout_pg_check_layout(pgio, req);
847 	/* Use full layout for now */
848 	if (!pgio->pg_lseg) {
849 		ff_layout_pg_get_read(pgio, req, false);
850 		if (!pgio->pg_lseg)
851 			goto out_nolseg;
852 	}
853 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
854 		ff_layout_pg_get_read(pgio, req, true);
855 		if (!pgio->pg_lseg)
856 			goto out_nolseg;
857 	}
858 	/* Reset wb_nio, since getting layout segment was successful */
859 	req->wb_nio = 0;
860 
861 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
862 	if (!ds) {
863 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
864 			goto out_mds;
865 		pnfs_generic_pg_cleanup(pgio);
866 		/* Sleep for 1 second before retrying */
867 		ssleep(1);
868 		goto retry;
869 	}
870 
871 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
872 	pgm = &pgio->pg_mirrors[0];
873 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
874 
875 	pgio->pg_mirror_idx = ds_idx;
876 	return;
877 out_nolseg:
878 	if (pgio->pg_error < 0) {
879 		if (pgio->pg_error != -EAGAIN)
880 			return;
881 		/* Retry getting layout segment if lower layer returned -EAGAIN */
882 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
883 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
884 				pgio->pg_error = -ETIMEDOUT;
885 			else
886 				pgio->pg_error = -EIO;
887 			return;
888 		}
889 		pgio->pg_error = 0;
890 		/* Sleep for 1 second before retrying */
891 		ssleep(1);
892 		goto retry;
893 	}
894 out_mds:
895 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
896 			0, NFS4_MAX_UINT64, IOMODE_READ,
897 			NFS_I(pgio->pg_inode)->layout,
898 			pgio->pg_lseg);
899 	pgio->pg_maxretrans = 0;
900 	nfs_pageio_reset_read_mds(pgio);
901 }
902 
903 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)904 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
905 			struct nfs_page *req)
906 {
907 	struct nfs4_ff_layout_mirror *mirror;
908 	struct nfs_pgio_mirror *pgm;
909 	struct nfs4_pnfs_ds *ds;
910 	u32 i;
911 
912 retry:
913 	ff_layout_pg_check_layout(pgio, req);
914 	if (!pgio->pg_lseg) {
915 		pgio->pg_lseg =
916 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
917 					   req_offset(req), req->wb_bytes,
918 					   IOMODE_RW, false, nfs_io_gfp_mask());
919 		if (IS_ERR(pgio->pg_lseg)) {
920 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
921 			pgio->pg_lseg = NULL;
922 			return;
923 		}
924 	}
925 	/* If no lseg, fall back to write through mds */
926 	if (pgio->pg_lseg == NULL)
927 		goto out_mds;
928 
929 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
930 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
931 		goto out_eagain;
932 
933 	for (i = 0; i < pgio->pg_mirror_count; i++) {
934 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
935 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
936 		if (!ds) {
937 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
938 				goto out_mds;
939 			pnfs_generic_pg_cleanup(pgio);
940 			/* Sleep for 1 second before retrying */
941 			ssleep(1);
942 			goto retry;
943 		}
944 		pgm = &pgio->pg_mirrors[i];
945 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
946 	}
947 
948 	if (NFS_SERVER(pgio->pg_inode)->flags &
949 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
950 		pgio->pg_maxretrans = io_maxretrans;
951 	return;
952 out_eagain:
953 	pnfs_generic_pg_cleanup(pgio);
954 	pgio->pg_error = -EAGAIN;
955 	return;
956 out_mds:
957 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
958 			0, NFS4_MAX_UINT64, IOMODE_RW,
959 			NFS_I(pgio->pg_inode)->layout,
960 			pgio->pg_lseg);
961 	pgio->pg_maxretrans = 0;
962 	nfs_pageio_reset_write_mds(pgio);
963 	pgio->pg_error = -EAGAIN;
964 }
965 
966 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)967 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
968 				    struct nfs_page *req)
969 {
970 	if (!pgio->pg_lseg) {
971 		pgio->pg_lseg =
972 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
973 					   req_offset(req), req->wb_bytes,
974 					   IOMODE_RW, false, nfs_io_gfp_mask());
975 		if (IS_ERR(pgio->pg_lseg)) {
976 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
977 			pgio->pg_lseg = NULL;
978 			goto out;
979 		}
980 	}
981 	if (pgio->pg_lseg)
982 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
983 
984 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
985 			0, NFS4_MAX_UINT64, IOMODE_RW,
986 			NFS_I(pgio->pg_inode)->layout,
987 			pgio->pg_lseg);
988 	/* no lseg means that pnfs is not in use, so no mirroring here */
989 	nfs_pageio_reset_write_mds(pgio);
990 out:
991 	return 1;
992 }
993 
994 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)995 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
996 {
997 	u32 old = desc->pg_mirror_idx;
998 
999 	desc->pg_mirror_idx = idx;
1000 	return old;
1001 }
1002 
1003 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1004 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1005 {
1006 	return &desc->pg_mirrors[idx];
1007 }
1008 
1009 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1010 	.pg_init = ff_layout_pg_init_read,
1011 	.pg_test = pnfs_generic_pg_test,
1012 	.pg_doio = pnfs_generic_pg_readpages,
1013 	.pg_cleanup = pnfs_generic_pg_cleanup,
1014 };
1015 
1016 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1017 	.pg_init = ff_layout_pg_init_write,
1018 	.pg_test = pnfs_generic_pg_test,
1019 	.pg_doio = pnfs_generic_pg_writepages,
1020 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1021 	.pg_cleanup = pnfs_generic_pg_cleanup,
1022 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1023 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1024 };
1025 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1026 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1027 {
1028 	struct rpc_task *task = &hdr->task;
1029 
1030 	pnfs_layoutcommit_inode(hdr->inode, false);
1031 
1032 	if (retry_pnfs) {
1033 		dprintk("%s Reset task %5u for i/o through pNFS "
1034 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1035 			hdr->task.tk_pid,
1036 			hdr->inode->i_sb->s_id,
1037 			(unsigned long long)NFS_FILEID(hdr->inode),
1038 			hdr->args.count,
1039 			(unsigned long long)hdr->args.offset);
1040 
1041 		hdr->completion_ops->reschedule_io(hdr);
1042 		return;
1043 	}
1044 
1045 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1046 		dprintk("%s Reset task %5u for i/o through MDS "
1047 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1048 			hdr->task.tk_pid,
1049 			hdr->inode->i_sb->s_id,
1050 			(unsigned long long)NFS_FILEID(hdr->inode),
1051 			hdr->args.count,
1052 			(unsigned long long)hdr->args.offset);
1053 
1054 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1055 				hdr->args.offset, hdr->args.count,
1056 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1057 				hdr->lseg);
1058 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1059 	}
1060 }
1061 
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1062 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1063 {
1064 	u32 idx = hdr->pgio_mirror_idx + 1;
1065 	u32 new_idx = 0;
1066 
1067 	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1068 		ff_layout_send_layouterror(hdr->lseg);
1069 	else
1070 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1071 	pnfs_read_resend_pnfs(hdr, new_idx);
1072 }
1073 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1074 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1075 {
1076 	struct rpc_task *task = &hdr->task;
1077 
1078 	pnfs_layoutcommit_inode(hdr->inode, false);
1079 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1080 
1081 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1082 		dprintk("%s Reset task %5u for i/o through MDS "
1083 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1084 			hdr->task.tk_pid,
1085 			hdr->inode->i_sb->s_id,
1086 			(unsigned long long)NFS_FILEID(hdr->inode),
1087 			hdr->args.count,
1088 			(unsigned long long)hdr->args.offset);
1089 
1090 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1091 				hdr->args.offset, hdr->args.count,
1092 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1093 				hdr->lseg);
1094 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1095 	}
1096 }
1097 
ff_layout_async_handle_error_v4(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1098 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1099 					   u32 op_status,
1100 					   struct nfs4_state *state,
1101 					   struct nfs_client *clp,
1102 					   struct pnfs_layout_segment *lseg,
1103 					   u32 idx)
1104 {
1105 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1106 	struct inode *inode = lo->plh_inode;
1107 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1108 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1109 
1110 	switch (op_status) {
1111 	case NFS4_OK:
1112 	case NFS4ERR_NXIO:
1113 		break;
1114 	case NFSERR_PERM:
1115 		if (!task->tk_xprt)
1116 			break;
1117 		xprt_force_disconnect(task->tk_xprt);
1118 		goto out_retry;
1119 	case NFS4ERR_BADSESSION:
1120 	case NFS4ERR_BADSLOT:
1121 	case NFS4ERR_BAD_HIGH_SLOT:
1122 	case NFS4ERR_DEADSESSION:
1123 	case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1124 	case NFS4ERR_SEQ_FALSE_RETRY:
1125 	case NFS4ERR_SEQ_MISORDERED:
1126 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1127 			"flags 0x%x\n", __func__, task->tk_status,
1128 			clp->cl_exchange_flags);
1129 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1130 		goto out_retry;
1131 	case NFS4ERR_DELAY:
1132 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1133 		fallthrough;
1134 	case NFS4ERR_GRACE:
1135 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1136 		goto out_retry;
1137 	case NFS4ERR_RETRY_UNCACHED_REP:
1138 		goto out_retry;
1139 	/* Invalidate Layout errors */
1140 	case NFS4ERR_PNFS_NO_LAYOUT:
1141 	case NFS4ERR_STALE:
1142 	case NFS4ERR_BADHANDLE:
1143 	case NFS4ERR_ISDIR:
1144 	case NFS4ERR_FHEXPIRED:
1145 	case NFS4ERR_WRONG_TYPE:
1146 		dprintk("%s Invalid layout error %d\n", __func__,
1147 			task->tk_status);
1148 		/*
1149 		 * Destroy layout so new i/o will get a new layout.
1150 		 * Layout will not be destroyed until all current lseg
1151 		 * references are put. Mark layout as invalid to resend failed
1152 		 * i/o and all i/o waiting on the slot table to the MDS until
1153 		 * layout is destroyed and a new valid layout is obtained.
1154 		 */
1155 		pnfs_destroy_layout(NFS_I(inode));
1156 		rpc_wake_up(&tbl->slot_tbl_waitq);
1157 		goto reset;
1158 	default:
1159 		break;
1160 	}
1161 
1162 	switch (task->tk_status) {
1163 	/* RPC connection errors */
1164 	case -ECONNREFUSED:
1165 	case -EHOSTDOWN:
1166 	case -EHOSTUNREACH:
1167 	case -ENETUNREACH:
1168 	case -EIO:
1169 	case -ETIMEDOUT:
1170 	case -EPIPE:
1171 	case -EPROTO:
1172 	case -ENODEV:
1173 		dprintk("%s DS connection error %d\n", __func__,
1174 			task->tk_status);
1175 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1176 				&devid->deviceid);
1177 		rpc_wake_up(&tbl->slot_tbl_waitq);
1178 		break;
1179 	default:
1180 		break;
1181 	}
1182 
1183 	if (ff_layout_avoid_mds_available_ds(lseg))
1184 		return -NFS4ERR_RESET_TO_PNFS;
1185 reset:
1186 	dprintk("%s Retry through MDS. Error %d\n", __func__,
1187 		task->tk_status);
1188 	return -NFS4ERR_RESET_TO_MDS;
1189 
1190 out_retry:
1191 	task->tk_status = 0;
1192 	return -EAGAIN;
1193 }
1194 
1195 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,u32 op_status,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1196 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1197 					   u32 op_status,
1198 					   struct nfs_client *clp,
1199 					   struct pnfs_layout_segment *lseg,
1200 					   u32 idx)
1201 {
1202 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1203 
1204 	switch (op_status) {
1205 	case NFS_OK:
1206 	case NFSERR_NXIO:
1207 		break;
1208 	case NFSERR_PERM:
1209 		if (!task->tk_xprt)
1210 			break;
1211 		xprt_force_disconnect(task->tk_xprt);
1212 		goto out_retry;
1213 	case NFSERR_ACCES:
1214 	case NFSERR_BADHANDLE:
1215 	case NFSERR_FBIG:
1216 	case NFSERR_IO:
1217 	case NFSERR_NOSPC:
1218 	case NFSERR_ROFS:
1219 	case NFSERR_STALE:
1220 		goto out_reset_to_pnfs;
1221 	case NFSERR_JUKEBOX:
1222 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1223 		goto out_retry;
1224 	default:
1225 		break;
1226 	}
1227 
1228 	switch (task->tk_status) {
1229 	/* File access problems. Don't mark the device as unavailable */
1230 	case -EACCES:
1231 	case -ESTALE:
1232 	case -EISDIR:
1233 	case -EBADHANDLE:
1234 	case -ELOOP:
1235 	case -ENOSPC:
1236 		break;
1237 	case -EJUKEBOX:
1238 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1239 		goto out_retry;
1240 	default:
1241 		dprintk("%s DS connection error %d\n", __func__,
1242 			task->tk_status);
1243 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1244 				&devid->deviceid);
1245 	}
1246 out_reset_to_pnfs:
1247 	/* FIXME: Need to prevent infinite looping here. */
1248 	return -NFS4ERR_RESET_TO_PNFS;
1249 out_retry:
1250 	task->tk_status = 0;
1251 	rpc_restart_call_prepare(task);
1252 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1253 	return -EAGAIN;
1254 }
1255 
ff_layout_async_handle_error(struct rpc_task * task,u32 op_status,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1256 static int ff_layout_async_handle_error(struct rpc_task *task,
1257 					u32 op_status,
1258 					struct nfs4_state *state,
1259 					struct nfs_client *clp,
1260 					struct pnfs_layout_segment *lseg,
1261 					u32 idx)
1262 {
1263 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1264 
1265 	if (task->tk_status >= 0) {
1266 		ff_layout_mark_ds_reachable(lseg, idx);
1267 		return 0;
1268 	}
1269 
1270 	/* Handle the case of an invalid layout segment */
1271 	if (!pnfs_is_valid_lseg(lseg))
1272 		return -NFS4ERR_RESET_TO_PNFS;
1273 
1274 	switch (vers) {
1275 	case 3:
1276 		return ff_layout_async_handle_error_v3(task, op_status, clp,
1277 						       lseg, idx);
1278 	case 4:
1279 		return ff_layout_async_handle_error_v4(task, op_status, state,
1280 						       clp, lseg, idx);
1281 	default:
1282 		/* should never happen */
1283 		WARN_ON_ONCE(1);
1284 		return 0;
1285 	}
1286 }
1287 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u64 offset,u64 length,u32 * op_status,int opnum,int error)1288 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1289 					u32 idx, u64 offset, u64 length,
1290 					u32 *op_status, int opnum, int error)
1291 {
1292 	struct nfs4_ff_layout_mirror *mirror;
1293 	u32 status = *op_status;
1294 	int err;
1295 
1296 	if (status == 0) {
1297 		switch (error) {
1298 		case -ETIMEDOUT:
1299 		case -EPFNOSUPPORT:
1300 		case -EPROTONOSUPPORT:
1301 		case -EOPNOTSUPP:
1302 		case -EINVAL:
1303 		case -ECONNREFUSED:
1304 		case -ECONNRESET:
1305 		case -EHOSTDOWN:
1306 		case -EHOSTUNREACH:
1307 		case -ENETDOWN:
1308 		case -ENETUNREACH:
1309 		case -EADDRINUSE:
1310 		case -ENOBUFS:
1311 		case -EPIPE:
1312 		case -EPERM:
1313 		case -EPROTO:
1314 		case -ENODEV:
1315 			*op_status = status = NFS4ERR_NXIO;
1316 			break;
1317 		case -EACCES:
1318 			*op_status = status = NFS4ERR_ACCESS;
1319 			break;
1320 		default:
1321 			return;
1322 		}
1323 	}
1324 
1325 	mirror = FF_LAYOUT_COMP(lseg, idx);
1326 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1327 				       mirror, offset, length, status, opnum,
1328 				       nfs_io_gfp_mask());
1329 
1330 	switch (status) {
1331 	case NFS4ERR_DELAY:
1332 	case NFS4ERR_GRACE:
1333 	case NFS4ERR_PERM:
1334 		break;
1335 	case NFS4ERR_NXIO:
1336 		ff_layout_mark_ds_unreachable(lseg, idx);
1337 		/*
1338 		 * Don't return the layout if this is a read and we still
1339 		 * have layouts to try
1340 		 */
1341 		if (opnum == OP_READ)
1342 			break;
1343 		fallthrough;
1344 	default:
1345 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1346 						  lseg);
1347 	}
1348 
1349 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1350 }
1351 
1352 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1353 static int ff_layout_read_done_cb(struct rpc_task *task,
1354 				struct nfs_pgio_header *hdr)
1355 {
1356 	int err;
1357 
1358 	if (task->tk_status < 0) {
1359 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1360 					    hdr->args.offset, hdr->args.count,
1361 					    &hdr->res.op_status, OP_READ,
1362 					    task->tk_status);
1363 		trace_ff_layout_read_error(hdr);
1364 	}
1365 
1366 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1367 					   hdr->args.context->state,
1368 					   hdr->ds_clp, hdr->lseg,
1369 					   hdr->pgio_mirror_idx);
1370 
1371 	trace_nfs4_pnfs_read(hdr, err);
1372 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1373 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1374 	switch (err) {
1375 	case -NFS4ERR_RESET_TO_PNFS:
1376 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1377 		return task->tk_status;
1378 	case -NFS4ERR_RESET_TO_MDS:
1379 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1380 		return task->tk_status;
1381 	case -EAGAIN:
1382 		goto out_eagain;
1383 	}
1384 
1385 	return 0;
1386 out_eagain:
1387 	rpc_restart_call_prepare(task);
1388 	return -EAGAIN;
1389 }
1390 
1391 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1392 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1393 {
1394 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1395 }
1396 
1397 /*
1398  * We reference the rpc_cred of the first WRITE that triggers the need for
1399  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1400  * rfc5661 is not clear about which credential should be used.
1401  *
1402  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1403  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1404  * we always send layoutcommit after DS writes.
1405  */
1406 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1407 ff_layout_set_layoutcommit(struct inode *inode,
1408 		struct pnfs_layout_segment *lseg,
1409 		loff_t end_offset)
1410 {
1411 	if (!ff_layout_need_layoutcommit(lseg))
1412 		return;
1413 
1414 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1415 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1416 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1417 }
1418 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1419 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1420 		struct nfs_pgio_header *hdr)
1421 {
1422 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1423 		return;
1424 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1425 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1426 			hdr->args.count,
1427 			task->tk_start);
1428 }
1429 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1430 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1431 		struct nfs_pgio_header *hdr)
1432 {
1433 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1434 		return;
1435 	nfs4_ff_layout_stat_io_end_read(task,
1436 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1437 			hdr->args.count,
1438 			hdr->res.count);
1439 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1440 }
1441 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1442 static int ff_layout_read_prepare_common(struct rpc_task *task,
1443 					 struct nfs_pgio_header *hdr)
1444 {
1445 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1446 		rpc_exit(task, -EIO);
1447 		return -EIO;
1448 	}
1449 
1450 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1451 		rpc_exit(task, -EAGAIN);
1452 		return -EAGAIN;
1453 	}
1454 
1455 	ff_layout_read_record_layoutstats_start(task, hdr);
1456 	return 0;
1457 }
1458 
1459 /*
1460  * Call ops for the async read/write cases
1461  * In the case of dense layouts, the offset needs to be reset to its
1462  * original value.
1463  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1464 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1465 {
1466 	struct nfs_pgio_header *hdr = data;
1467 
1468 	if (ff_layout_read_prepare_common(task, hdr))
1469 		return;
1470 
1471 	rpc_call_start(task);
1472 }
1473 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1474 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1475 {
1476 	struct nfs_pgio_header *hdr = data;
1477 
1478 	if (nfs4_setup_sequence(hdr->ds_clp,
1479 				&hdr->args.seq_args,
1480 				&hdr->res.seq_res,
1481 				task))
1482 		return;
1483 
1484 	ff_layout_read_prepare_common(task, hdr);
1485 }
1486 
ff_layout_read_call_done(struct rpc_task * task,void * data)1487 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1488 {
1489 	struct nfs_pgio_header *hdr = data;
1490 
1491 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1492 	    task->tk_status == 0) {
1493 		nfs4_sequence_done(task, &hdr->res.seq_res);
1494 		return;
1495 	}
1496 
1497 	/* Note this may cause RPC to be resent */
1498 	hdr->mds_ops->rpc_call_done(task, hdr);
1499 }
1500 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1501 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1502 {
1503 	struct nfs_pgio_header *hdr = data;
1504 
1505 	ff_layout_read_record_layoutstats_done(task, hdr);
1506 	rpc_count_iostats_metrics(task,
1507 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1508 }
1509 
ff_layout_read_release(void * data)1510 static void ff_layout_read_release(void *data)
1511 {
1512 	struct nfs_pgio_header *hdr = data;
1513 
1514 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1515 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1516 		ff_layout_resend_pnfs_read(hdr);
1517 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1518 		ff_layout_reset_read(hdr);
1519 	pnfs_generic_rw_release(data);
1520 }
1521 
1522 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1523 static int ff_layout_write_done_cb(struct rpc_task *task,
1524 				struct nfs_pgio_header *hdr)
1525 {
1526 	loff_t end_offs = 0;
1527 	int err;
1528 
1529 	if (task->tk_status < 0) {
1530 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1531 					    hdr->args.offset, hdr->args.count,
1532 					    &hdr->res.op_status, OP_WRITE,
1533 					    task->tk_status);
1534 		trace_ff_layout_write_error(hdr);
1535 	}
1536 
1537 	err = ff_layout_async_handle_error(task, hdr->res.op_status,
1538 					   hdr->args.context->state,
1539 					   hdr->ds_clp, hdr->lseg,
1540 					   hdr->pgio_mirror_idx);
1541 
1542 	trace_nfs4_pnfs_write(hdr, err);
1543 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1544 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1545 	switch (err) {
1546 	case -NFS4ERR_RESET_TO_PNFS:
1547 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1548 		return task->tk_status;
1549 	case -NFS4ERR_RESET_TO_MDS:
1550 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1551 		return task->tk_status;
1552 	case -EAGAIN:
1553 		return -EAGAIN;
1554 	}
1555 
1556 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1557 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1558 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1559 
1560 	/* Note: if the write is unstable, don't set end_offs until commit */
1561 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1562 
1563 	/* zero out fattr since we don't care DS attr at all */
1564 	hdr->fattr.valid = 0;
1565 	if (task->tk_status >= 0)
1566 		nfs_writeback_update_inode(hdr);
1567 
1568 	return 0;
1569 }
1570 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1571 static int ff_layout_commit_done_cb(struct rpc_task *task,
1572 				     struct nfs_commit_data *data)
1573 {
1574 	int err;
1575 
1576 	if (task->tk_status < 0) {
1577 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1578 					    data->args.offset, data->args.count,
1579 					    &data->res.op_status, OP_COMMIT,
1580 					    task->tk_status);
1581 		trace_ff_layout_commit_error(data);
1582 	}
1583 
1584 	err = ff_layout_async_handle_error(task, data->res.op_status,
1585 					   NULL, data->ds_clp, data->lseg,
1586 					   data->ds_commit_index);
1587 
1588 	trace_nfs4_pnfs_commit_ds(data, err);
1589 	switch (err) {
1590 	case -NFS4ERR_RESET_TO_PNFS:
1591 		pnfs_generic_prepare_to_resend_writes(data);
1592 		return -EAGAIN;
1593 	case -NFS4ERR_RESET_TO_MDS:
1594 		pnfs_generic_prepare_to_resend_writes(data);
1595 		return -EAGAIN;
1596 	case -EAGAIN:
1597 		rpc_restart_call_prepare(task);
1598 		return -EAGAIN;
1599 	}
1600 
1601 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1602 
1603 	return 0;
1604 }
1605 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1606 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1607 		struct nfs_pgio_header *hdr)
1608 {
1609 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1610 		return;
1611 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1612 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1613 			hdr->args.count,
1614 			task->tk_start);
1615 }
1616 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1617 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1618 		struct nfs_pgio_header *hdr)
1619 {
1620 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1621 		return;
1622 	nfs4_ff_layout_stat_io_end_write(task,
1623 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1624 			hdr->args.count, hdr->res.count,
1625 			hdr->res.verf->committed);
1626 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1627 }
1628 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1629 static int ff_layout_write_prepare_common(struct rpc_task *task,
1630 					  struct nfs_pgio_header *hdr)
1631 {
1632 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1633 		rpc_exit(task, -EIO);
1634 		return -EIO;
1635 	}
1636 
1637 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1638 		rpc_exit(task, -EAGAIN);
1639 		return -EAGAIN;
1640 	}
1641 
1642 	ff_layout_write_record_layoutstats_start(task, hdr);
1643 	return 0;
1644 }
1645 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1646 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1647 {
1648 	struct nfs_pgio_header *hdr = data;
1649 
1650 	if (ff_layout_write_prepare_common(task, hdr))
1651 		return;
1652 
1653 	rpc_call_start(task);
1654 }
1655 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1656 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1657 {
1658 	struct nfs_pgio_header *hdr = data;
1659 
1660 	if (nfs4_setup_sequence(hdr->ds_clp,
1661 				&hdr->args.seq_args,
1662 				&hdr->res.seq_res,
1663 				task))
1664 		return;
1665 
1666 	ff_layout_write_prepare_common(task, hdr);
1667 }
1668 
ff_layout_write_call_done(struct rpc_task * task,void * data)1669 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1670 {
1671 	struct nfs_pgio_header *hdr = data;
1672 
1673 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1674 	    task->tk_status == 0) {
1675 		nfs4_sequence_done(task, &hdr->res.seq_res);
1676 		return;
1677 	}
1678 
1679 	/* Note this may cause RPC to be resent */
1680 	hdr->mds_ops->rpc_call_done(task, hdr);
1681 }
1682 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1683 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1684 {
1685 	struct nfs_pgio_header *hdr = data;
1686 
1687 	ff_layout_write_record_layoutstats_done(task, hdr);
1688 	rpc_count_iostats_metrics(task,
1689 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1690 }
1691 
ff_layout_write_release(void * data)1692 static void ff_layout_write_release(void *data)
1693 {
1694 	struct nfs_pgio_header *hdr = data;
1695 
1696 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1697 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1698 		ff_layout_send_layouterror(hdr->lseg);
1699 		ff_layout_reset_write(hdr, true);
1700 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1701 		ff_layout_reset_write(hdr, false);
1702 	pnfs_generic_rw_release(data);
1703 }
1704 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1705 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1706 		struct nfs_commit_data *cdata)
1707 {
1708 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1709 		return;
1710 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1711 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1712 			0, task->tk_start);
1713 }
1714 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1715 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1716 		struct nfs_commit_data *cdata)
1717 {
1718 	struct nfs_page *req;
1719 	__u64 count = 0;
1720 
1721 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1722 		return;
1723 
1724 	if (task->tk_status == 0) {
1725 		list_for_each_entry(req, &cdata->pages, wb_list)
1726 			count += req->wb_bytes;
1727 	}
1728 	nfs4_ff_layout_stat_io_end_write(task,
1729 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1730 			count, count, NFS_FILE_SYNC);
1731 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1732 }
1733 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1734 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1735 					   struct nfs_commit_data *cdata)
1736 {
1737 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1738 		rpc_exit(task, -EAGAIN);
1739 		return -EAGAIN;
1740 	}
1741 
1742 	ff_layout_commit_record_layoutstats_start(task, cdata);
1743 	return 0;
1744 }
1745 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1746 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1747 {
1748 	if (ff_layout_commit_prepare_common(task, data))
1749 		return;
1750 
1751 	rpc_call_start(task);
1752 }
1753 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1754 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1755 {
1756 	struct nfs_commit_data *wdata = data;
1757 
1758 	if (nfs4_setup_sequence(wdata->ds_clp,
1759 				&wdata->args.seq_args,
1760 				&wdata->res.seq_res,
1761 				task))
1762 		return;
1763 	ff_layout_commit_prepare_common(task, data);
1764 }
1765 
ff_layout_commit_done(struct rpc_task * task,void * data)1766 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1767 {
1768 	pnfs_generic_write_commit_done(task, data);
1769 }
1770 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1771 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1772 {
1773 	struct nfs_commit_data *cdata = data;
1774 
1775 	ff_layout_commit_record_layoutstats_done(task, cdata);
1776 	rpc_count_iostats_metrics(task,
1777 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1778 }
1779 
ff_layout_commit_release(void * data)1780 static void ff_layout_commit_release(void *data)
1781 {
1782 	struct nfs_commit_data *cdata = data;
1783 
1784 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1785 	pnfs_generic_commit_release(data);
1786 }
1787 
1788 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1789 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1790 	.rpc_call_done = ff_layout_read_call_done,
1791 	.rpc_count_stats = ff_layout_read_count_stats,
1792 	.rpc_release = ff_layout_read_release,
1793 };
1794 
1795 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1796 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1797 	.rpc_call_done = ff_layout_read_call_done,
1798 	.rpc_count_stats = ff_layout_read_count_stats,
1799 	.rpc_release = ff_layout_read_release,
1800 };
1801 
1802 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1803 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1804 	.rpc_call_done = ff_layout_write_call_done,
1805 	.rpc_count_stats = ff_layout_write_count_stats,
1806 	.rpc_release = ff_layout_write_release,
1807 };
1808 
1809 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1810 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1811 	.rpc_call_done = ff_layout_write_call_done,
1812 	.rpc_count_stats = ff_layout_write_count_stats,
1813 	.rpc_release = ff_layout_write_release,
1814 };
1815 
1816 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1817 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1818 	.rpc_call_done = ff_layout_commit_done,
1819 	.rpc_count_stats = ff_layout_commit_count_stats,
1820 	.rpc_release = ff_layout_commit_release,
1821 };
1822 
1823 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1824 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1825 	.rpc_call_done = ff_layout_commit_done,
1826 	.rpc_count_stats = ff_layout_commit_count_stats,
1827 	.rpc_release = ff_layout_commit_release,
1828 };
1829 
1830 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1831 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1832 {
1833 	struct pnfs_layout_segment *lseg = hdr->lseg;
1834 	struct nfs4_pnfs_ds *ds;
1835 	struct rpc_clnt *ds_clnt;
1836 	struct nfs4_ff_layout_mirror *mirror;
1837 	const struct cred *ds_cred;
1838 	loff_t offset = hdr->args.offset;
1839 	u32 idx = hdr->pgio_mirror_idx;
1840 	int vers;
1841 	struct nfs_fh *fh;
1842 
1843 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1844 		__func__, hdr->inode->i_ino,
1845 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1846 
1847 	mirror = FF_LAYOUT_COMP(lseg, idx);
1848 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1849 	if (!ds)
1850 		goto out_failed;
1851 
1852 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1853 						   hdr->inode);
1854 	if (IS_ERR(ds_clnt))
1855 		goto out_failed;
1856 
1857 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1858 	if (!ds_cred)
1859 		goto out_failed;
1860 
1861 	vers = nfs4_ff_layout_ds_version(mirror);
1862 
1863 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1864 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1865 
1866 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1867 	refcount_inc(&ds->ds_clp->cl_count);
1868 	hdr->ds_clp = ds->ds_clp;
1869 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1870 	if (fh)
1871 		hdr->args.fh = fh;
1872 
1873 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1874 
1875 	/*
1876 	 * Note that if we ever decide to split across DSes,
1877 	 * then we may need to handle dense-like offsets.
1878 	 */
1879 	hdr->args.offset = offset;
1880 	hdr->mds_offset = offset;
1881 
1882 	/* Perform an asynchronous read to ds */
1883 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1884 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1885 				      &ff_layout_read_call_ops_v4,
1886 			  0, RPC_TASK_SOFTCONN);
1887 	put_cred(ds_cred);
1888 	return PNFS_ATTEMPTED;
1889 
1890 out_failed:
1891 	if (ff_layout_avoid_mds_available_ds(lseg))
1892 		return PNFS_TRY_AGAIN;
1893 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1894 			hdr->args.offset, hdr->args.count,
1895 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1896 	return PNFS_NOT_ATTEMPTED;
1897 }
1898 
1899 /* Perform async writes. */
1900 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1901 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1902 {
1903 	struct pnfs_layout_segment *lseg = hdr->lseg;
1904 	struct nfs4_pnfs_ds *ds;
1905 	struct rpc_clnt *ds_clnt;
1906 	struct nfs4_ff_layout_mirror *mirror;
1907 	const struct cred *ds_cred;
1908 	loff_t offset = hdr->args.offset;
1909 	int vers;
1910 	struct nfs_fh *fh;
1911 	u32 idx = hdr->pgio_mirror_idx;
1912 
1913 	mirror = FF_LAYOUT_COMP(lseg, idx);
1914 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1915 	if (!ds)
1916 		goto out_failed;
1917 
1918 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1919 						   hdr->inode);
1920 	if (IS_ERR(ds_clnt))
1921 		goto out_failed;
1922 
1923 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1924 	if (!ds_cred)
1925 		goto out_failed;
1926 
1927 	vers = nfs4_ff_layout_ds_version(mirror);
1928 
1929 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1930 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1931 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1932 		vers);
1933 
1934 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1935 	refcount_inc(&ds->ds_clp->cl_count);
1936 	hdr->ds_clp = ds->ds_clp;
1937 	hdr->ds_commit_idx = idx;
1938 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1939 	if (fh)
1940 		hdr->args.fh = fh;
1941 
1942 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1943 
1944 	/*
1945 	 * Note that if we ever decide to split across DSes,
1946 	 * then we may need to handle dense-like offsets.
1947 	 */
1948 	hdr->args.offset = offset;
1949 
1950 	/* Perform an asynchronous write */
1951 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1952 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1953 				      &ff_layout_write_call_ops_v4,
1954 			  sync, RPC_TASK_SOFTCONN);
1955 	put_cred(ds_cred);
1956 	return PNFS_ATTEMPTED;
1957 
1958 out_failed:
1959 	if (ff_layout_avoid_mds_available_ds(lseg))
1960 		return PNFS_TRY_AGAIN;
1961 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1962 			hdr->args.offset, hdr->args.count,
1963 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1964 	return PNFS_NOT_ATTEMPTED;
1965 }
1966 
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)1967 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1968 {
1969 	return i;
1970 }
1971 
1972 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)1973 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1974 {
1975 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1976 
1977 	/* FIXME: Assume that there is only one NFS version available
1978 	 * for the DS.
1979 	 */
1980 	return &flseg->mirror_array[i]->fh_versions[0];
1981 }
1982 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)1983 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1984 {
1985 	struct pnfs_layout_segment *lseg = data->lseg;
1986 	struct nfs4_pnfs_ds *ds;
1987 	struct rpc_clnt *ds_clnt;
1988 	struct nfs4_ff_layout_mirror *mirror;
1989 	const struct cred *ds_cred;
1990 	u32 idx;
1991 	int vers, ret;
1992 	struct nfs_fh *fh;
1993 
1994 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1995 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1996 		goto out_err;
1997 
1998 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1999 	mirror = FF_LAYOUT_COMP(lseg, idx);
2000 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
2001 	if (!ds)
2002 		goto out_err;
2003 
2004 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
2005 						   data->inode);
2006 	if (IS_ERR(ds_clnt))
2007 		goto out_err;
2008 
2009 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
2010 	if (!ds_cred)
2011 		goto out_err;
2012 
2013 	vers = nfs4_ff_layout_ds_version(mirror);
2014 
2015 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2016 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2017 		vers);
2018 	data->commit_done_cb = ff_layout_commit_done_cb;
2019 	data->cred = ds_cred;
2020 	refcount_inc(&ds->ds_clp->cl_count);
2021 	data->ds_clp = ds->ds_clp;
2022 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
2023 	if (fh)
2024 		data->args.fh = fh;
2025 
2026 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2027 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2028 					       &ff_layout_commit_call_ops_v4,
2029 				   how, RPC_TASK_SOFTCONN);
2030 	put_cred(ds_cred);
2031 	return ret;
2032 out_err:
2033 	pnfs_generic_prepare_to_resend_writes(data);
2034 	pnfs_generic_commit_release(data);
2035 	return -EAGAIN;
2036 }
2037 
2038 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2039 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2040 			   int how, struct nfs_commit_info *cinfo)
2041 {
2042 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2043 					    ff_layout_initiate_commit);
2044 }
2045 
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2046 static bool ff_layout_match_rw(const struct rpc_task *task,
2047 			       const struct nfs_pgio_header *hdr,
2048 			       const struct pnfs_layout_segment *lseg)
2049 {
2050 	return hdr->lseg == lseg;
2051 }
2052 
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2053 static bool ff_layout_match_commit(const struct rpc_task *task,
2054 				   const struct nfs_commit_data *cdata,
2055 				   const struct pnfs_layout_segment *lseg)
2056 {
2057 	return cdata->lseg == lseg;
2058 }
2059 
ff_layout_match_io(const struct rpc_task * task,const void * data)2060 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2061 {
2062 	const struct rpc_call_ops *ops = task->tk_ops;
2063 
2064 	if (ops == &ff_layout_read_call_ops_v3 ||
2065 	    ops == &ff_layout_read_call_ops_v4 ||
2066 	    ops == &ff_layout_write_call_ops_v3 ||
2067 	    ops == &ff_layout_write_call_ops_v4)
2068 		return ff_layout_match_rw(task, task->tk_calldata, data);
2069 	if (ops == &ff_layout_commit_call_ops_v3 ||
2070 	    ops == &ff_layout_commit_call_ops_v4)
2071 		return ff_layout_match_commit(task, task->tk_calldata, data);
2072 	return false;
2073 }
2074 
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2075 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2076 {
2077 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2078 	struct nfs4_ff_layout_mirror *mirror;
2079 	struct nfs4_ff_layout_ds *mirror_ds;
2080 	struct nfs4_pnfs_ds *ds;
2081 	struct nfs_client *ds_clp;
2082 	struct rpc_clnt *clnt;
2083 	u32 idx;
2084 
2085 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2086 		mirror = flseg->mirror_array[idx];
2087 		mirror_ds = mirror->mirror_ds;
2088 		if (IS_ERR_OR_NULL(mirror_ds))
2089 			continue;
2090 		ds = mirror->mirror_ds->ds;
2091 		if (!ds)
2092 			continue;
2093 		ds_clp = ds->ds_clp;
2094 		if (!ds_clp)
2095 			continue;
2096 		clnt = ds_clp->cl_rpcclient;
2097 		if (!clnt)
2098 			continue;
2099 		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2100 			continue;
2101 		rpc_clnt_disconnect(clnt);
2102 	}
2103 }
2104 
2105 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2106 ff_layout_get_ds_info(struct inode *inode)
2107 {
2108 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2109 
2110 	if (layout == NULL)
2111 		return NULL;
2112 
2113 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2114 }
2115 
2116 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2117 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2118 		struct pnfs_layout_segment *lseg)
2119 {
2120 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2121 	struct inode *inode = lseg->pls_layout->plh_inode;
2122 	struct pnfs_commit_array *array, *new;
2123 
2124 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2125 				      nfs_io_gfp_mask());
2126 	if (new) {
2127 		spin_lock(&inode->i_lock);
2128 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2129 		spin_unlock(&inode->i_lock);
2130 		if (array != new)
2131 			pnfs_free_commit_array(new);
2132 	}
2133 }
2134 
2135 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2136 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2137 		struct inode *inode)
2138 {
2139 	spin_lock(&inode->i_lock);
2140 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2141 	spin_unlock(&inode->i_lock);
2142 }
2143 
2144 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2145 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2146 {
2147 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2148 						  id_node));
2149 }
2150 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2151 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2152 				  const struct nfs4_layoutreturn_args *args,
2153 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2154 {
2155 	__be32 *start;
2156 
2157 	start = xdr_reserve_space(xdr, 4);
2158 	if (unlikely(!start))
2159 		return -E2BIG;
2160 
2161 	*start = cpu_to_be32(ff_args->num_errors);
2162 	/* This assume we always return _ALL_ layouts */
2163 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2164 }
2165 
2166 static void
encode_opaque_fixed(struct xdr_stream * xdr,const void * buf,size_t len)2167 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2168 {
2169 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2170 }
2171 
2172 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2173 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2174 			    const nfs4_stateid *stateid,
2175 			    const struct nfs42_layoutstat_devinfo *devinfo)
2176 {
2177 	__be32 *p;
2178 
2179 	p = xdr_reserve_space(xdr, 8 + 8);
2180 	p = xdr_encode_hyper(p, devinfo->offset);
2181 	p = xdr_encode_hyper(p, devinfo->length);
2182 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2183 	p = xdr_reserve_space(xdr, 4*8);
2184 	p = xdr_encode_hyper(p, devinfo->read_count);
2185 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2186 	p = xdr_encode_hyper(p, devinfo->write_count);
2187 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2188 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2189 }
2190 
2191 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2192 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2193 			    const nfs4_stateid *stateid,
2194 			    const struct nfs42_layoutstat_devinfo *devinfo)
2195 {
2196 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2197 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2198 			devinfo->ld_private.data);
2199 }
2200 
2201 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2202 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2203 		const struct nfs4_layoutreturn_args *args,
2204 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2205 {
2206 	__be32 *p;
2207 	int i;
2208 
2209 	p = xdr_reserve_space(xdr, 4);
2210 	*p = cpu_to_be32(ff_args->num_dev);
2211 	for (i = 0; i < ff_args->num_dev; i++)
2212 		ff_layout_encode_ff_iostat(xdr,
2213 				&args->layout->plh_stateid,
2214 				&ff_args->devinfo[i]);
2215 }
2216 
2217 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2218 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2219 		unsigned int num_entries)
2220 {
2221 	unsigned int i;
2222 
2223 	for (i = 0; i < num_entries; i++) {
2224 		if (!devinfo[i].ld_private.ops)
2225 			continue;
2226 		if (!devinfo[i].ld_private.ops->free)
2227 			continue;
2228 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2229 	}
2230 }
2231 
2232 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2233 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2234 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2235 {
2236 	struct nfs4_ff_layout_ds *dsaddr;
2237 
2238 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2239 	if (!dsaddr)
2240 		return NULL;
2241 	return &dsaddr->id_node;
2242 }
2243 
2244 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2245 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2246 		const void *voidargs,
2247 		const struct nfs4_xdr_opaque_data *ff_opaque)
2248 {
2249 	const struct nfs4_layoutreturn_args *args = voidargs;
2250 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2251 	struct xdr_buf tmp_buf = {
2252 		.head = {
2253 			[0] = {
2254 				.iov_base = page_address(ff_args->pages[0]),
2255 			},
2256 		},
2257 		.buflen = PAGE_SIZE,
2258 	};
2259 	struct xdr_stream tmp_xdr;
2260 	__be32 *start;
2261 
2262 	dprintk("%s: Begin\n", __func__);
2263 
2264 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2265 
2266 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2267 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2268 
2269 	start = xdr_reserve_space(xdr, 4);
2270 	*start = cpu_to_be32(tmp_buf.len);
2271 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2272 
2273 	dprintk("%s: Return\n", __func__);
2274 }
2275 
2276 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2277 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2278 {
2279 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2280 
2281 	if (!args->data)
2282 		return;
2283 	ff_args = args->data;
2284 	args->data = NULL;
2285 
2286 	ff_layout_free_ds_ioerr(&ff_args->errors);
2287 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2288 
2289 	put_page(ff_args->pages[0]);
2290 	kfree(ff_args);
2291 }
2292 
2293 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2294 	.encode = ff_layout_encode_layoutreturn,
2295 	.free = ff_layout_free_layoutreturn,
2296 };
2297 
2298 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2299 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2300 {
2301 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2302 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2303 
2304 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2305 	if (!ff_args)
2306 		goto out_nomem;
2307 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2308 	if (!ff_args->pages[0])
2309 		goto out_nomem_free;
2310 
2311 	INIT_LIST_HEAD(&ff_args->errors);
2312 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2313 			&args->range, &ff_args->errors,
2314 			FF_LAYOUTRETURN_MAXERR);
2315 
2316 	spin_lock(&args->inode->i_lock);
2317 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2318 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2319 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2320 	spin_unlock(&args->inode->i_lock);
2321 
2322 	args->ld_private->ops = &layoutreturn_ops;
2323 	args->ld_private->data = ff_args;
2324 	return 0;
2325 out_nomem_free:
2326 	kfree(ff_args);
2327 out_nomem:
2328 	return -ENOMEM;
2329 }
2330 
2331 #ifdef CONFIG_NFS_V4_2
2332 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2333 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2334 {
2335 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2336 	struct nfs42_layout_error *errors;
2337 	LIST_HEAD(head);
2338 
2339 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2340 		return;
2341 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2342 	if (list_empty(&head))
2343 		return;
2344 
2345 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2346 			       nfs_io_gfp_mask());
2347 	if (errors != NULL) {
2348 		const struct nfs4_ff_layout_ds_err *pos;
2349 		size_t n = 0;
2350 
2351 		list_for_each_entry(pos, &head, list) {
2352 			errors[n].offset = pos->offset;
2353 			errors[n].length = pos->length;
2354 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2355 			errors[n].errors[0].dev_id = pos->deviceid;
2356 			errors[n].errors[0].status = pos->status;
2357 			errors[n].errors[0].opnum = pos->opnum;
2358 			n++;
2359 			if (!list_is_last(&pos->list, &head) &&
2360 			    n < NFS42_LAYOUTERROR_MAX)
2361 				continue;
2362 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2363 				break;
2364 			n = 0;
2365 		}
2366 		kfree(errors);
2367 	}
2368 	ff_layout_free_ds_ioerr(&head);
2369 }
2370 #else
2371 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2372 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2373 {
2374 }
2375 #endif
2376 
2377 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2378 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2379 {
2380 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2381 
2382 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2383 }
2384 
2385 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2386 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2387 			  const int buflen)
2388 {
2389 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2390 	const struct in6_addr *addr = &sin6->sin6_addr;
2391 
2392 	/*
2393 	 * RFC 4291, Section 2.2.2
2394 	 *
2395 	 * Shorthanded ANY address
2396 	 */
2397 	if (ipv6_addr_any(addr))
2398 		return snprintf(buf, buflen, "::");
2399 
2400 	/*
2401 	 * RFC 4291, Section 2.2.2
2402 	 *
2403 	 * Shorthanded loopback address
2404 	 */
2405 	if (ipv6_addr_loopback(addr))
2406 		return snprintf(buf, buflen, "::1");
2407 
2408 	/*
2409 	 * RFC 4291, Section 2.2.3
2410 	 *
2411 	 * Special presentation address format for mapped v4
2412 	 * addresses.
2413 	 */
2414 	if (ipv6_addr_v4mapped(addr))
2415 		return snprintf(buf, buflen, "::ffff:%pI4",
2416 					&addr->s6_addr32[3]);
2417 
2418 	/*
2419 	 * RFC 4291, Section 2.2.1
2420 	 */
2421 	return snprintf(buf, buflen, "%pI6c", addr);
2422 }
2423 
2424 /* Derived from rpc_sockaddr2uaddr */
2425 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2426 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2427 {
2428 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2429 	char portbuf[RPCBIND_MAXUADDRPLEN];
2430 	char addrbuf[RPCBIND_MAXUADDRLEN];
2431 	unsigned short port;
2432 	int len, netid_len;
2433 	__be32 *p;
2434 
2435 	switch (sap->sa_family) {
2436 	case AF_INET:
2437 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2438 			return;
2439 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2440 		break;
2441 	case AF_INET6:
2442 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2443 			return;
2444 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2445 		break;
2446 	default:
2447 		WARN_ON_ONCE(1);
2448 		return;
2449 	}
2450 
2451 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2452 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2453 
2454 	netid_len = strlen(da->da_netid);
2455 	p = xdr_reserve_space(xdr, 4 + netid_len);
2456 	xdr_encode_opaque(p, da->da_netid, netid_len);
2457 
2458 	p = xdr_reserve_space(xdr, 4 + len);
2459 	xdr_encode_opaque(p, addrbuf, len);
2460 }
2461 
2462 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2463 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2464 			 ktime_t t)
2465 {
2466 	struct timespec64 ts;
2467 	__be32 *p;
2468 
2469 	p = xdr_reserve_space(xdr, 12);
2470 	ts = ktime_to_timespec64(t);
2471 	p = xdr_encode_hyper(p, ts.tv_sec);
2472 	*p++ = cpu_to_be32(ts.tv_nsec);
2473 }
2474 
2475 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2476 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2477 			    struct nfs4_ff_io_stat *stat)
2478 {
2479 	__be32 *p;
2480 
2481 	p = xdr_reserve_space(xdr, 5 * 8);
2482 	p = xdr_encode_hyper(p, stat->ops_requested);
2483 	p = xdr_encode_hyper(p, stat->bytes_requested);
2484 	p = xdr_encode_hyper(p, stat->ops_completed);
2485 	p = xdr_encode_hyper(p, stat->bytes_completed);
2486 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2487 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2488 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2489 }
2490 
2491 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_mirror * mirror)2492 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2493 			      const struct nfs42_layoutstat_devinfo *devinfo,
2494 			      struct nfs4_ff_layout_mirror *mirror)
2495 {
2496 	struct nfs4_pnfs_ds_addr *da;
2497 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2498 	struct nfs_fh *fh = &mirror->fh_versions[0];
2499 	__be32 *p;
2500 
2501 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2502 	dprintk("%s: DS %s: encoding address %s\n",
2503 		__func__, ds->ds_remotestr, da->da_remotestr);
2504 	/* netaddr4 */
2505 	ff_layout_encode_netaddr(xdr, da);
2506 	/* nfs_fh4 */
2507 	p = xdr_reserve_space(xdr, 4 + fh->size);
2508 	xdr_encode_opaque(p, fh->data, fh->size);
2509 	/* ff_io_latency4 read */
2510 	spin_lock(&mirror->lock);
2511 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2512 	/* ff_io_latency4 write */
2513 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2514 	spin_unlock(&mirror->lock);
2515 	/* nfstime4 */
2516 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2517 	/* bool */
2518 	p = xdr_reserve_space(xdr, 4);
2519 	*p = cpu_to_be32(false);
2520 }
2521 
2522 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2523 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2524 			     const struct nfs4_xdr_opaque_data *opaque)
2525 {
2526 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2527 			struct nfs42_layoutstat_devinfo, ld_private);
2528 	__be32 *start;
2529 
2530 	/* layoutupdate length */
2531 	start = xdr_reserve_space(xdr, 4);
2532 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2533 
2534 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2535 }
2536 
2537 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2538 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2539 {
2540 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2541 
2542 	ff_layout_put_mirror(mirror);
2543 }
2544 
2545 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2546 	.encode = ff_layout_encode_layoutstats,
2547 	.free	= ff_layout_free_layoutstats,
2548 };
2549 
2550 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2551 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2552 			       struct nfs42_layoutstat_devinfo *devinfo,
2553 			       int dev_limit, enum nfs4_ff_op_type type)
2554 {
2555 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2556 	struct nfs4_ff_layout_mirror *mirror;
2557 	struct nfs4_deviceid_node *dev;
2558 	int i = 0;
2559 
2560 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2561 		if (i >= dev_limit)
2562 			break;
2563 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2564 			continue;
2565 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2566 					&mirror->flags) &&
2567 		    type != NFS4_FF_OP_LAYOUTRETURN)
2568 			continue;
2569 		/* mirror refcount put in cleanup_layoutstats */
2570 		if (!refcount_inc_not_zero(&mirror->ref))
2571 			continue;
2572 		dev = &mirror->mirror_ds->id_node;
2573 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2574 		devinfo->offset = 0;
2575 		devinfo->length = NFS4_MAX_UINT64;
2576 		spin_lock(&mirror->lock);
2577 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2578 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2579 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2580 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2581 		spin_unlock(&mirror->lock);
2582 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2583 		devinfo->ld_private.ops = &layoutstat_ops;
2584 		devinfo->ld_private.data = mirror;
2585 
2586 		devinfo++;
2587 		i++;
2588 	}
2589 	return i;
2590 }
2591 
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2592 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2593 {
2594 	struct pnfs_layout_hdr *lo;
2595 	struct nfs4_flexfile_layout *ff_layout;
2596 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2597 
2598 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2599 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2600 				      nfs_io_gfp_mask());
2601 	if (!args->devinfo)
2602 		return -ENOMEM;
2603 
2604 	spin_lock(&args->inode->i_lock);
2605 	lo = NFS_I(args->inode)->layout;
2606 	if (lo && pnfs_layout_is_valid(lo)) {
2607 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2608 		args->num_dev = ff_layout_mirror_prepare_stats(
2609 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2610 			NFS4_FF_OP_LAYOUTSTATS);
2611 	} else
2612 		args->num_dev = 0;
2613 	spin_unlock(&args->inode->i_lock);
2614 	if (!args->num_dev) {
2615 		kfree(args->devinfo);
2616 		args->devinfo = NULL;
2617 		return -ENOENT;
2618 	}
2619 
2620 	return 0;
2621 }
2622 
2623 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2624 ff_layout_set_layoutdriver(struct nfs_server *server,
2625 		const struct nfs_fh *dummy)
2626 {
2627 #if IS_ENABLED(CONFIG_NFS_V4_2)
2628 	server->caps |= NFS_CAP_LAYOUTSTATS;
2629 #endif
2630 	return 0;
2631 }
2632 
2633 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2634 	.setup_ds_info		= ff_layout_setup_ds_info,
2635 	.release_ds_info	= ff_layout_release_ds_info,
2636 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2637 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2638 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2639 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2640 	.commit_pagelist	= ff_layout_commit_pagelist,
2641 };
2642 
2643 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2644 	.id			= LAYOUT_FLEX_FILES,
2645 	.name			= "LAYOUT_FLEX_FILES",
2646 	.owner			= THIS_MODULE,
2647 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2648 	.max_layoutget_response	= 4096, /* 1 page or so... */
2649 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2650 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2651 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2652 	.alloc_lseg		= ff_layout_alloc_lseg,
2653 	.free_lseg		= ff_layout_free_lseg,
2654 	.add_lseg		= ff_layout_add_lseg,
2655 	.pg_read_ops		= &ff_layout_pg_read_ops,
2656 	.pg_write_ops		= &ff_layout_pg_write_ops,
2657 	.get_ds_info		= ff_layout_get_ds_info,
2658 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2659 	.read_pagelist		= ff_layout_read_pagelist,
2660 	.write_pagelist		= ff_layout_write_pagelist,
2661 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2662 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2663 	.sync			= pnfs_nfs_generic_sync,
2664 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2665 	.cancel_io		= ff_layout_cancel_io,
2666 };
2667 
nfs4flexfilelayout_init(void)2668 static int __init nfs4flexfilelayout_init(void)
2669 {
2670 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2671 	       __func__);
2672 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2673 }
2674 
nfs4flexfilelayout_exit(void)2675 static void __exit nfs4flexfilelayout_exit(void)
2676 {
2677 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2678 	       __func__);
2679 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2680 }
2681 
2682 MODULE_ALIAS("nfs-layouttype4-4");
2683 
2684 MODULE_LICENSE("GPL");
2685 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2686 
2687 module_init(nfs4flexfilelayout_init);
2688 module_exit(nfs4flexfilelayout_exit);
2689 
2690 module_param(io_maxretrans, ushort, 0644);
2691 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2692 			"retries an I/O request before returning an error. ");
2693