xref: /openbmc/linux/fs/nfs/pnfs.c (revision 93d90ad7)
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29 
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36 #include "nfs4trace.h"
37 
38 #define NFSDBG_FACILITY		NFSDBG_PNFS
39 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
40 
41 /* Locking:
42  *
43  * pnfs_spinlock:
44  *      protects pnfs_modules_tbl.
45  */
46 static DEFINE_SPINLOCK(pnfs_spinlock);
47 
48 /*
49  * pnfs_modules_tbl holds all pnfs modules
50  */
51 static LIST_HEAD(pnfs_modules_tbl);
52 
53 /* Return the registered pnfs layout driver module matching given id */
54 static struct pnfs_layoutdriver_type *
55 find_pnfs_driver_locked(u32 id)
56 {
57 	struct pnfs_layoutdriver_type *local;
58 
59 	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
60 		if (local->id == id)
61 			goto out;
62 	local = NULL;
63 out:
64 	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
65 	return local;
66 }
67 
68 static struct pnfs_layoutdriver_type *
69 find_pnfs_driver(u32 id)
70 {
71 	struct pnfs_layoutdriver_type *local;
72 
73 	spin_lock(&pnfs_spinlock);
74 	local = find_pnfs_driver_locked(id);
75 	if (local != NULL && !try_module_get(local->owner)) {
76 		dprintk("%s: Could not grab reference on module\n", __func__);
77 		local = NULL;
78 	}
79 	spin_unlock(&pnfs_spinlock);
80 	return local;
81 }
82 
83 void
84 unset_pnfs_layoutdriver(struct nfs_server *nfss)
85 {
86 	if (nfss->pnfs_curr_ld) {
87 		if (nfss->pnfs_curr_ld->clear_layoutdriver)
88 			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
89 		/* Decrement the MDS count. Purge the deviceid cache if zero */
90 		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
91 			nfs4_deviceid_purge_client(nfss->nfs_client);
92 		module_put(nfss->pnfs_curr_ld->owner);
93 	}
94 	nfss->pnfs_curr_ld = NULL;
95 }
96 
97 /*
98  * Try to set the server's pnfs module to the pnfs layout type specified by id.
99  * Currently only one pNFS layout driver per filesystem is supported.
100  *
101  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
102  */
103 void
104 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
105 		      u32 id)
106 {
107 	struct pnfs_layoutdriver_type *ld_type = NULL;
108 
109 	if (id == 0)
110 		goto out_no_driver;
111 	if (!(server->nfs_client->cl_exchange_flags &
112 		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
113 		printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
114 			__func__, id, server->nfs_client->cl_exchange_flags);
115 		goto out_no_driver;
116 	}
117 	ld_type = find_pnfs_driver(id);
118 	if (!ld_type) {
119 		request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
120 		ld_type = find_pnfs_driver(id);
121 		if (!ld_type) {
122 			dprintk("%s: No pNFS module found for %u.\n",
123 				__func__, id);
124 			goto out_no_driver;
125 		}
126 	}
127 	server->pnfs_curr_ld = ld_type;
128 	if (ld_type->set_layoutdriver
129 	    && ld_type->set_layoutdriver(server, mntfh)) {
130 		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
131 			"driver %u.\n", __func__, id);
132 		module_put(ld_type->owner);
133 		goto out_no_driver;
134 	}
135 	/* Bump the MDS count */
136 	atomic_inc(&server->nfs_client->cl_mds_count);
137 
138 	dprintk("%s: pNFS module for %u set\n", __func__, id);
139 	return;
140 
141 out_no_driver:
142 	dprintk("%s: Using NFSv4 I/O\n", __func__);
143 	server->pnfs_curr_ld = NULL;
144 }
145 
146 int
147 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
148 {
149 	int status = -EINVAL;
150 	struct pnfs_layoutdriver_type *tmp;
151 
152 	if (ld_type->id == 0) {
153 		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
154 		return status;
155 	}
156 	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
157 		printk(KERN_ERR "NFS: %s Layout driver must provide "
158 		       "alloc_lseg and free_lseg.\n", __func__);
159 		return status;
160 	}
161 
162 	spin_lock(&pnfs_spinlock);
163 	tmp = find_pnfs_driver_locked(ld_type->id);
164 	if (!tmp) {
165 		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
166 		status = 0;
167 		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
168 			ld_type->name);
169 	} else {
170 		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
171 			__func__, ld_type->id);
172 	}
173 	spin_unlock(&pnfs_spinlock);
174 
175 	return status;
176 }
177 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
178 
179 void
180 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
181 {
182 	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
183 	spin_lock(&pnfs_spinlock);
184 	list_del(&ld_type->pnfs_tblid);
185 	spin_unlock(&pnfs_spinlock);
186 }
187 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
188 
189 /*
190  * pNFS client layout cache
191  */
192 
193 /* Need to hold i_lock if caller does not already hold reference */
194 void
195 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
196 {
197 	atomic_inc(&lo->plh_refcount);
198 }
199 
200 static struct pnfs_layout_hdr *
201 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
202 {
203 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
204 	return ld->alloc_layout_hdr(ino, gfp_flags);
205 }
206 
207 static void
208 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
209 {
210 	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
211 	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
212 
213 	if (!list_empty(&lo->plh_layouts)) {
214 		struct nfs_client *clp = server->nfs_client;
215 
216 		spin_lock(&clp->cl_lock);
217 		list_del_init(&lo->plh_layouts);
218 		spin_unlock(&clp->cl_lock);
219 	}
220 	put_rpccred(lo->plh_lc_cred);
221 	return ld->free_layout_hdr(lo);
222 }
223 
224 static void
225 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
226 {
227 	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
228 	dprintk("%s: freeing layout cache %p\n", __func__, lo);
229 	nfsi->layout = NULL;
230 	/* Reset MDS Threshold I/O counters */
231 	nfsi->write_io = 0;
232 	nfsi->read_io = 0;
233 }
234 
235 void
236 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
237 {
238 	struct inode *inode = lo->plh_inode;
239 
240 	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
241 		pnfs_detach_layout_hdr(lo);
242 		spin_unlock(&inode->i_lock);
243 		pnfs_free_layout_hdr(lo);
244 	}
245 }
246 
247 static int
248 pnfs_iomode_to_fail_bit(u32 iomode)
249 {
250 	return iomode == IOMODE_RW ?
251 		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
252 }
253 
254 static void
255 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
256 {
257 	lo->plh_retry_timestamp = jiffies;
258 	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
259 		atomic_inc(&lo->plh_refcount);
260 }
261 
262 static void
263 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
264 {
265 	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
266 		atomic_dec(&lo->plh_refcount);
267 }
268 
269 static void
270 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
271 {
272 	struct inode *inode = lo->plh_inode;
273 	struct pnfs_layout_range range = {
274 		.iomode = iomode,
275 		.offset = 0,
276 		.length = NFS4_MAX_UINT64,
277 	};
278 	LIST_HEAD(head);
279 
280 	spin_lock(&inode->i_lock);
281 	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
282 	pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
283 	spin_unlock(&inode->i_lock);
284 	pnfs_free_lseg_list(&head);
285 	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
286 			iomode == IOMODE_RW ?  "RW" : "READ");
287 }
288 
289 static bool
290 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
291 {
292 	unsigned long start, end;
293 	int fail_bit = pnfs_iomode_to_fail_bit(iomode);
294 
295 	if (test_bit(fail_bit, &lo->plh_flags) == 0)
296 		return false;
297 	end = jiffies;
298 	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
299 	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
300 		/* It is time to retry the failed layoutgets */
301 		pnfs_layout_clear_fail_bit(lo, fail_bit);
302 		return false;
303 	}
304 	return true;
305 }
306 
307 static void
308 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
309 {
310 	INIT_LIST_HEAD(&lseg->pls_list);
311 	INIT_LIST_HEAD(&lseg->pls_lc_list);
312 	atomic_set(&lseg->pls_refcount, 1);
313 	smp_mb();
314 	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
315 	lseg->pls_layout = lo;
316 }
317 
318 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
319 {
320 	struct inode *ino = lseg->pls_layout->plh_inode;
321 
322 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
323 }
324 
325 static void
326 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
327 		struct pnfs_layout_segment *lseg)
328 {
329 	struct inode *inode = lo->plh_inode;
330 
331 	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
332 	list_del_init(&lseg->pls_list);
333 	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
334 	atomic_dec(&lo->plh_refcount);
335 	if (list_empty(&lo->plh_segs))
336 		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
337 	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
338 }
339 
340 void
341 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
342 {
343 	struct pnfs_layout_hdr *lo;
344 	struct inode *inode;
345 
346 	if (!lseg)
347 		return;
348 
349 	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
350 		atomic_read(&lseg->pls_refcount),
351 		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
352 	lo = lseg->pls_layout;
353 	inode = lo->plh_inode;
354 	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
355 		pnfs_get_layout_hdr(lo);
356 		pnfs_layout_remove_lseg(lo, lseg);
357 		spin_unlock(&inode->i_lock);
358 		pnfs_free_lseg(lseg);
359 		pnfs_put_layout_hdr(lo);
360 	}
361 }
362 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
363 
364 static void pnfs_free_lseg_async_work(struct work_struct *work)
365 {
366 	struct pnfs_layout_segment *lseg;
367 	struct pnfs_layout_hdr *lo;
368 
369 	lseg = container_of(work, struct pnfs_layout_segment, pls_work);
370 	lo = lseg->pls_layout;
371 
372 	pnfs_free_lseg(lseg);
373 	pnfs_put_layout_hdr(lo);
374 }
375 
376 static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
377 {
378 	INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
379 	schedule_work(&lseg->pls_work);
380 }
381 
382 void
383 pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
384 {
385 	if (!lseg)
386 		return;
387 
388 	assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
389 
390 	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
391 		atomic_read(&lseg->pls_refcount),
392 		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
393 	if (atomic_dec_and_test(&lseg->pls_refcount)) {
394 		struct pnfs_layout_hdr *lo = lseg->pls_layout;
395 		pnfs_get_layout_hdr(lo);
396 		pnfs_layout_remove_lseg(lo, lseg);
397 		pnfs_free_lseg_async(lseg);
398 	}
399 }
400 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
401 
402 static u64
403 end_offset(u64 start, u64 len)
404 {
405 	u64 end;
406 
407 	end = start + len;
408 	return end >= start ? end : NFS4_MAX_UINT64;
409 }
410 
411 /*
412  * is l2 fully contained in l1?
413  *   start1                             end1
414  *   [----------------------------------)
415  *           start2           end2
416  *           [----------------)
417  */
418 static bool
419 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
420 		 const struct pnfs_layout_range *l2)
421 {
422 	u64 start1 = l1->offset;
423 	u64 end1 = end_offset(start1, l1->length);
424 	u64 start2 = l2->offset;
425 	u64 end2 = end_offset(start2, l2->length);
426 
427 	return (start1 <= start2) && (end1 >= end2);
428 }
429 
430 /*
431  * is l1 and l2 intersecting?
432  *   start1                             end1
433  *   [----------------------------------)
434  *                              start2           end2
435  *                              [----------------)
436  */
437 static bool
438 pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
439 		    const struct pnfs_layout_range *l2)
440 {
441 	u64 start1 = l1->offset;
442 	u64 end1 = end_offset(start1, l1->length);
443 	u64 start2 = l2->offset;
444 	u64 end2 = end_offset(start2, l2->length);
445 
446 	return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
447 	       (end2 == NFS4_MAX_UINT64 || end2 > start1);
448 }
449 
450 static bool
451 should_free_lseg(const struct pnfs_layout_range *lseg_range,
452 		 const struct pnfs_layout_range *recall_range)
453 {
454 	return (recall_range->iomode == IOMODE_ANY ||
455 		lseg_range->iomode == recall_range->iomode) &&
456 	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
457 }
458 
459 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
460 		struct list_head *tmp_list)
461 {
462 	if (!atomic_dec_and_test(&lseg->pls_refcount))
463 		return false;
464 	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
465 	list_add(&lseg->pls_list, tmp_list);
466 	return true;
467 }
468 
469 /* Returns 1 if lseg is removed from list, 0 otherwise */
470 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
471 			     struct list_head *tmp_list)
472 {
473 	int rv = 0;
474 
475 	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
476 		/* Remove the reference keeping the lseg in the
477 		 * list.  It will now be removed when all
478 		 * outstanding io is finished.
479 		 */
480 		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
481 			atomic_read(&lseg->pls_refcount));
482 		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
483 			rv = 1;
484 	}
485 	return rv;
486 }
487 
488 /* Returns count of number of matching invalid lsegs remaining in list
489  * after call.
490  */
491 int
492 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
493 			    struct list_head *tmp_list,
494 			    struct pnfs_layout_range *recall_range)
495 {
496 	struct pnfs_layout_segment *lseg, *next;
497 	int invalid = 0, removed = 0;
498 
499 	dprintk("%s:Begin lo %p\n", __func__, lo);
500 
501 	if (list_empty(&lo->plh_segs))
502 		return 0;
503 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
504 		if (!recall_range ||
505 		    should_free_lseg(&lseg->pls_range, recall_range)) {
506 			dprintk("%s: freeing lseg %p iomode %d "
507 				"offset %llu length %llu\n", __func__,
508 				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
509 				lseg->pls_range.length);
510 			invalid++;
511 			removed += mark_lseg_invalid(lseg, tmp_list);
512 		}
513 	dprintk("%s:Return %i\n", __func__, invalid - removed);
514 	return invalid - removed;
515 }
516 
517 /* note free_me must contain lsegs from a single layout_hdr */
518 void
519 pnfs_free_lseg_list(struct list_head *free_me)
520 {
521 	struct pnfs_layout_segment *lseg, *tmp;
522 
523 	if (list_empty(free_me))
524 		return;
525 
526 	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
527 		list_del(&lseg->pls_list);
528 		pnfs_free_lseg(lseg);
529 	}
530 }
531 
532 void
533 pnfs_destroy_layout(struct nfs_inode *nfsi)
534 {
535 	struct pnfs_layout_hdr *lo;
536 	LIST_HEAD(tmp_list);
537 
538 	spin_lock(&nfsi->vfs_inode.i_lock);
539 	lo = nfsi->layout;
540 	if (lo) {
541 		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
542 		pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
543 		pnfs_get_layout_hdr(lo);
544 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
545 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
546 		spin_unlock(&nfsi->vfs_inode.i_lock);
547 		pnfs_free_lseg_list(&tmp_list);
548 		pnfs_put_layout_hdr(lo);
549 	} else
550 		spin_unlock(&nfsi->vfs_inode.i_lock);
551 }
552 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
553 
554 static bool
555 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
556 		struct list_head *layout_list)
557 {
558 	struct pnfs_layout_hdr *lo;
559 	bool ret = false;
560 
561 	spin_lock(&inode->i_lock);
562 	lo = NFS_I(inode)->layout;
563 	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
564 		pnfs_get_layout_hdr(lo);
565 		list_add(&lo->plh_bulk_destroy, layout_list);
566 		ret = true;
567 	}
568 	spin_unlock(&inode->i_lock);
569 	return ret;
570 }
571 
572 /* Caller must hold rcu_read_lock and clp->cl_lock */
573 static int
574 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
575 		struct nfs_server *server,
576 		struct list_head *layout_list)
577 {
578 	struct pnfs_layout_hdr *lo, *next;
579 	struct inode *inode;
580 
581 	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
582 		inode = igrab(lo->plh_inode);
583 		if (inode == NULL)
584 			continue;
585 		list_del_init(&lo->plh_layouts);
586 		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
587 			continue;
588 		rcu_read_unlock();
589 		spin_unlock(&clp->cl_lock);
590 		iput(inode);
591 		spin_lock(&clp->cl_lock);
592 		rcu_read_lock();
593 		return -EAGAIN;
594 	}
595 	return 0;
596 }
597 
598 static int
599 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
600 		bool is_bulk_recall)
601 {
602 	struct pnfs_layout_hdr *lo;
603 	struct inode *inode;
604 	struct pnfs_layout_range range = {
605 		.iomode = IOMODE_ANY,
606 		.offset = 0,
607 		.length = NFS4_MAX_UINT64,
608 	};
609 	LIST_HEAD(lseg_list);
610 	int ret = 0;
611 
612 	while (!list_empty(layout_list)) {
613 		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
614 				plh_bulk_destroy);
615 		dprintk("%s freeing layout for inode %lu\n", __func__,
616 			lo->plh_inode->i_ino);
617 		inode = lo->plh_inode;
618 
619 		pnfs_layoutcommit_inode(inode, false);
620 
621 		spin_lock(&inode->i_lock);
622 		list_del_init(&lo->plh_bulk_destroy);
623 		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
624 		if (is_bulk_recall)
625 			set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
626 		if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
627 			ret = -EAGAIN;
628 		spin_unlock(&inode->i_lock);
629 		pnfs_free_lseg_list(&lseg_list);
630 		pnfs_put_layout_hdr(lo);
631 		iput(inode);
632 	}
633 	return ret;
634 }
635 
636 int
637 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
638 		struct nfs_fsid *fsid,
639 		bool is_recall)
640 {
641 	struct nfs_server *server;
642 	LIST_HEAD(layout_list);
643 
644 	spin_lock(&clp->cl_lock);
645 	rcu_read_lock();
646 restart:
647 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
648 		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
649 			continue;
650 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
651 				server,
652 				&layout_list) != 0)
653 			goto restart;
654 	}
655 	rcu_read_unlock();
656 	spin_unlock(&clp->cl_lock);
657 
658 	if (list_empty(&layout_list))
659 		return 0;
660 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
661 }
662 
663 int
664 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
665 		bool is_recall)
666 {
667 	struct nfs_server *server;
668 	LIST_HEAD(layout_list);
669 
670 	spin_lock(&clp->cl_lock);
671 	rcu_read_lock();
672 restart:
673 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
674 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
675 					server,
676 					&layout_list) != 0)
677 			goto restart;
678 	}
679 	rcu_read_unlock();
680 	spin_unlock(&clp->cl_lock);
681 
682 	if (list_empty(&layout_list))
683 		return 0;
684 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
685 }
686 
687 /*
688  * Called by the state manger to remove all layouts established under an
689  * expired lease.
690  */
691 void
692 pnfs_destroy_all_layouts(struct nfs_client *clp)
693 {
694 	nfs4_deviceid_mark_client_invalid(clp);
695 	nfs4_deviceid_purge_client(clp);
696 
697 	pnfs_destroy_layouts_byclid(clp, false);
698 }
699 
700 /*
701  * Compare 2 layout stateid sequence ids, to see which is newer,
702  * taking into account wraparound issues.
703  */
704 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
705 {
706 	return (s32)(s1 - s2) > 0;
707 }
708 
709 /* update lo->plh_stateid with new if is more recent */
710 void
711 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
712 			bool update_barrier)
713 {
714 	u32 oldseq, newseq, new_barrier;
715 	int empty = list_empty(&lo->plh_segs);
716 
717 	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
718 	newseq = be32_to_cpu(new->seqid);
719 	if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
720 		nfs4_stateid_copy(&lo->plh_stateid, new);
721 		if (update_barrier) {
722 			new_barrier = be32_to_cpu(new->seqid);
723 		} else {
724 			/* Because of wraparound, we want to keep the barrier
725 			 * "close" to the current seqids.
726 			 */
727 			new_barrier = newseq - atomic_read(&lo->plh_outstanding);
728 		}
729 		if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
730 			lo->plh_barrier = new_barrier;
731 	}
732 }
733 
734 static bool
735 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
736 		const nfs4_stateid *stateid)
737 {
738 	u32 seqid = be32_to_cpu(stateid->seqid);
739 
740 	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
741 }
742 
743 /* lget is set to 1 if called from inside send_layoutget call chain */
744 static bool
745 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
746 {
747 	return lo->plh_block_lgets ||
748 		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
749 		(list_empty(&lo->plh_segs) &&
750 		 (atomic_read(&lo->plh_outstanding) > lget));
751 }
752 
753 int
754 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
755 			      struct nfs4_state *open_state)
756 {
757 	int status = 0;
758 
759 	dprintk("--> %s\n", __func__);
760 	spin_lock(&lo->plh_inode->i_lock);
761 	if (pnfs_layoutgets_blocked(lo, 1)) {
762 		status = -EAGAIN;
763 	} else if (!nfs4_valid_open_stateid(open_state)) {
764 		status = -EBADF;
765 	} else if (list_empty(&lo->plh_segs) ||
766 		   test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
767 		int seq;
768 
769 		do {
770 			seq = read_seqbegin(&open_state->seqlock);
771 			nfs4_stateid_copy(dst, &open_state->stateid);
772 		} while (read_seqretry(&open_state->seqlock, seq));
773 	} else
774 		nfs4_stateid_copy(dst, &lo->plh_stateid);
775 	spin_unlock(&lo->plh_inode->i_lock);
776 	dprintk("<-- %s\n", __func__);
777 	return status;
778 }
779 
780 /*
781 * Get layout from server.
782 *    for now, assume that whole file layouts are requested.
783 *    arg->offset: 0
784 *    arg->length: all ones
785 */
786 static struct pnfs_layout_segment *
787 send_layoutget(struct pnfs_layout_hdr *lo,
788 	   struct nfs_open_context *ctx,
789 	   struct pnfs_layout_range *range,
790 	   gfp_t gfp_flags)
791 {
792 	struct inode *ino = lo->plh_inode;
793 	struct nfs_server *server = NFS_SERVER(ino);
794 	struct nfs4_layoutget *lgp;
795 	struct pnfs_layout_segment *lseg;
796 
797 	dprintk("--> %s\n", __func__);
798 
799 	lgp = kzalloc(sizeof(*lgp), gfp_flags);
800 	if (lgp == NULL)
801 		return NULL;
802 
803 	lgp->args.minlength = PAGE_CACHE_SIZE;
804 	if (lgp->args.minlength > range->length)
805 		lgp->args.minlength = range->length;
806 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
807 	lgp->args.range = *range;
808 	lgp->args.type = server->pnfs_curr_ld->id;
809 	lgp->args.inode = ino;
810 	lgp->args.ctx = get_nfs_open_context(ctx);
811 	lgp->gfp_flags = gfp_flags;
812 	lgp->cred = lo->plh_lc_cred;
813 
814 	/* Synchronously retrieve layout information from server and
815 	 * store in lseg.
816 	 */
817 	lseg = nfs4_proc_layoutget(lgp, gfp_flags);
818 	if (IS_ERR(lseg)) {
819 		switch (PTR_ERR(lseg)) {
820 		case -ENOMEM:
821 		case -ERESTARTSYS:
822 			break;
823 		default:
824 			/* remember that LAYOUTGET failed and suspend trying */
825 			pnfs_layout_io_set_failed(lo, range->iomode);
826 		}
827 		return NULL;
828 	}
829 
830 	return lseg;
831 }
832 
833 static void pnfs_clear_layoutcommit(struct inode *inode,
834 		struct list_head *head)
835 {
836 	struct nfs_inode *nfsi = NFS_I(inode);
837 	struct pnfs_layout_segment *lseg, *tmp;
838 
839 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
840 		return;
841 	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
842 		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
843 			continue;
844 		pnfs_lseg_dec_and_remove_zero(lseg, head);
845 	}
846 }
847 
848 /*
849  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
850  * when the layout segment list is empty.
851  *
852  * Note that a pnfs_layout_hdr can exist with an empty layout segment
853  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
854  * deviceid is marked invalid.
855  */
856 int
857 _pnfs_return_layout(struct inode *ino)
858 {
859 	struct pnfs_layout_hdr *lo = NULL;
860 	struct nfs_inode *nfsi = NFS_I(ino);
861 	LIST_HEAD(tmp_list);
862 	struct nfs4_layoutreturn *lrp;
863 	nfs4_stateid stateid;
864 	int status = 0, empty;
865 
866 	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
867 
868 	spin_lock(&ino->i_lock);
869 	lo = nfsi->layout;
870 	if (!lo) {
871 		spin_unlock(&ino->i_lock);
872 		dprintk("NFS: %s no layout to return\n", __func__);
873 		goto out;
874 	}
875 	stateid = nfsi->layout->plh_stateid;
876 	/* Reference matched in nfs4_layoutreturn_release */
877 	pnfs_get_layout_hdr(lo);
878 	empty = list_empty(&lo->plh_segs);
879 	pnfs_clear_layoutcommit(ino, &tmp_list);
880 	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
881 
882 	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
883 		struct pnfs_layout_range range = {
884 			.iomode		= IOMODE_ANY,
885 			.offset		= 0,
886 			.length		= NFS4_MAX_UINT64,
887 		};
888 		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
889 	}
890 
891 	/* Don't send a LAYOUTRETURN if list was initially empty */
892 	if (empty) {
893 		spin_unlock(&ino->i_lock);
894 		pnfs_put_layout_hdr(lo);
895 		dprintk("NFS: %s no layout segments to return\n", __func__);
896 		goto out;
897 	}
898 
899 	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
900 	lo->plh_block_lgets++;
901 	spin_unlock(&ino->i_lock);
902 	pnfs_free_lseg_list(&tmp_list);
903 
904 	lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
905 	if (unlikely(lrp == NULL)) {
906 		status = -ENOMEM;
907 		spin_lock(&ino->i_lock);
908 		lo->plh_block_lgets--;
909 		spin_unlock(&ino->i_lock);
910 		pnfs_put_layout_hdr(lo);
911 		goto out;
912 	}
913 
914 	lrp->args.stateid = stateid;
915 	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
916 	lrp->args.inode = ino;
917 	lrp->args.layout = lo;
918 	lrp->clp = NFS_SERVER(ino)->nfs_client;
919 	lrp->cred = lo->plh_lc_cred;
920 
921 	status = nfs4_proc_layoutreturn(lrp);
922 out:
923 	dprintk("<-- %s status: %d\n", __func__, status);
924 	return status;
925 }
926 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
927 
928 int
929 pnfs_commit_and_return_layout(struct inode *inode)
930 {
931 	struct pnfs_layout_hdr *lo;
932 	int ret;
933 
934 	spin_lock(&inode->i_lock);
935 	lo = NFS_I(inode)->layout;
936 	if (lo == NULL) {
937 		spin_unlock(&inode->i_lock);
938 		return 0;
939 	}
940 	pnfs_get_layout_hdr(lo);
941 	/* Block new layoutgets and read/write to ds */
942 	lo->plh_block_lgets++;
943 	spin_unlock(&inode->i_lock);
944 	filemap_fdatawait(inode->i_mapping);
945 	ret = pnfs_layoutcommit_inode(inode, true);
946 	if (ret == 0)
947 		ret = _pnfs_return_layout(inode);
948 	spin_lock(&inode->i_lock);
949 	lo->plh_block_lgets--;
950 	spin_unlock(&inode->i_lock);
951 	pnfs_put_layout_hdr(lo);
952 	return ret;
953 }
954 
955 bool pnfs_roc(struct inode *ino)
956 {
957 	struct pnfs_layout_hdr *lo;
958 	struct pnfs_layout_segment *lseg, *tmp;
959 	LIST_HEAD(tmp_list);
960 	bool found = false;
961 
962 	spin_lock(&ino->i_lock);
963 	lo = NFS_I(ino)->layout;
964 	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
965 	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
966 		goto out_nolayout;
967 	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
968 		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
969 			mark_lseg_invalid(lseg, &tmp_list);
970 			found = true;
971 		}
972 	if (!found)
973 		goto out_nolayout;
974 	lo->plh_block_lgets++;
975 	pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
976 	spin_unlock(&ino->i_lock);
977 	pnfs_free_lseg_list(&tmp_list);
978 	return true;
979 
980 out_nolayout:
981 	spin_unlock(&ino->i_lock);
982 	return false;
983 }
984 
985 void pnfs_roc_release(struct inode *ino)
986 {
987 	struct pnfs_layout_hdr *lo;
988 
989 	spin_lock(&ino->i_lock);
990 	lo = NFS_I(ino)->layout;
991 	lo->plh_block_lgets--;
992 	if (atomic_dec_and_test(&lo->plh_refcount)) {
993 		pnfs_detach_layout_hdr(lo);
994 		spin_unlock(&ino->i_lock);
995 		pnfs_free_layout_hdr(lo);
996 	} else
997 		spin_unlock(&ino->i_lock);
998 }
999 
1000 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
1001 {
1002 	struct pnfs_layout_hdr *lo;
1003 
1004 	spin_lock(&ino->i_lock);
1005 	lo = NFS_I(ino)->layout;
1006 	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
1007 		lo->plh_barrier = barrier;
1008 	spin_unlock(&ino->i_lock);
1009 }
1010 
1011 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1012 {
1013 	struct nfs_inode *nfsi = NFS_I(ino);
1014 	struct pnfs_layout_hdr *lo;
1015 	struct pnfs_layout_segment *lseg;
1016 	u32 current_seqid;
1017 	bool found = false;
1018 
1019 	spin_lock(&ino->i_lock);
1020 	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
1021 		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
1022 			rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1023 			found = true;
1024 			goto out;
1025 		}
1026 	lo = nfsi->layout;
1027 	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1028 
1029 	/* Since close does not return a layout stateid for use as
1030 	 * a barrier, we choose the worst-case barrier.
1031 	 */
1032 	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1033 out:
1034 	spin_unlock(&ino->i_lock);
1035 	return found;
1036 }
1037 
1038 /*
1039  * Compare two layout segments for sorting into layout cache.
1040  * We want to preferentially return RW over RO layouts, so ensure those
1041  * are seen first.
1042  */
1043 static s64
1044 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1045 	   const struct pnfs_layout_range *l2)
1046 {
1047 	s64 d;
1048 
1049 	/* high offset > low offset */
1050 	d = l1->offset - l2->offset;
1051 	if (d)
1052 		return d;
1053 
1054 	/* short length > long length */
1055 	d = l2->length - l1->length;
1056 	if (d)
1057 		return d;
1058 
1059 	/* read > read/write */
1060 	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1061 }
1062 
1063 static void
1064 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1065 		   struct pnfs_layout_segment *lseg)
1066 {
1067 	struct pnfs_layout_segment *lp;
1068 
1069 	dprintk("%s:Begin\n", __func__);
1070 
1071 	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1072 		if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
1073 			continue;
1074 		list_add_tail(&lseg->pls_list, &lp->pls_list);
1075 		dprintk("%s: inserted lseg %p "
1076 			"iomode %d offset %llu length %llu before "
1077 			"lp %p iomode %d offset %llu length %llu\n",
1078 			__func__, lseg, lseg->pls_range.iomode,
1079 			lseg->pls_range.offset, lseg->pls_range.length,
1080 			lp, lp->pls_range.iomode, lp->pls_range.offset,
1081 			lp->pls_range.length);
1082 		goto out;
1083 	}
1084 	list_add_tail(&lseg->pls_list, &lo->plh_segs);
1085 	dprintk("%s: inserted lseg %p "
1086 		"iomode %d offset %llu length %llu at tail\n",
1087 		__func__, lseg, lseg->pls_range.iomode,
1088 		lseg->pls_range.offset, lseg->pls_range.length);
1089 out:
1090 	pnfs_get_layout_hdr(lo);
1091 
1092 	dprintk("%s:Return\n", __func__);
1093 }
1094 
1095 static struct pnfs_layout_hdr *
1096 alloc_init_layout_hdr(struct inode *ino,
1097 		      struct nfs_open_context *ctx,
1098 		      gfp_t gfp_flags)
1099 {
1100 	struct pnfs_layout_hdr *lo;
1101 
1102 	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1103 	if (!lo)
1104 		return NULL;
1105 	atomic_set(&lo->plh_refcount, 1);
1106 	INIT_LIST_HEAD(&lo->plh_layouts);
1107 	INIT_LIST_HEAD(&lo->plh_segs);
1108 	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1109 	lo->plh_inode = ino;
1110 	lo->plh_lc_cred = get_rpccred(ctx->cred);
1111 	return lo;
1112 }
1113 
1114 static struct pnfs_layout_hdr *
1115 pnfs_find_alloc_layout(struct inode *ino,
1116 		       struct nfs_open_context *ctx,
1117 		       gfp_t gfp_flags)
1118 {
1119 	struct nfs_inode *nfsi = NFS_I(ino);
1120 	struct pnfs_layout_hdr *new = NULL;
1121 
1122 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1123 
1124 	if (nfsi->layout != NULL)
1125 		goto out_existing;
1126 	spin_unlock(&ino->i_lock);
1127 	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1128 	spin_lock(&ino->i_lock);
1129 
1130 	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1131 		nfsi->layout = new;
1132 		return new;
1133 	} else if (new != NULL)
1134 		pnfs_free_layout_hdr(new);
1135 out_existing:
1136 	pnfs_get_layout_hdr(nfsi->layout);
1137 	return nfsi->layout;
1138 }
1139 
1140 /*
1141  * iomode matching rules:
1142  * iomode	lseg	match
1143  * -----	-----	-----
1144  * ANY		READ	true
1145  * ANY		RW	true
1146  * RW		READ	false
1147  * RW		RW	true
1148  * READ		READ	true
1149  * READ		RW	true
1150  */
1151 static bool
1152 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1153 		 const struct pnfs_layout_range *range)
1154 {
1155 	struct pnfs_layout_range range1;
1156 
1157 	if ((range->iomode == IOMODE_RW &&
1158 	     ls_range->iomode != IOMODE_RW) ||
1159 	    !pnfs_lseg_range_intersecting(ls_range, range))
1160 		return 0;
1161 
1162 	/* range1 covers only the first byte in the range */
1163 	range1 = *range;
1164 	range1.length = 1;
1165 	return pnfs_lseg_range_contained(ls_range, &range1);
1166 }
1167 
1168 /*
1169  * lookup range in layout
1170  */
1171 static struct pnfs_layout_segment *
1172 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1173 		struct pnfs_layout_range *range)
1174 {
1175 	struct pnfs_layout_segment *lseg, *ret = NULL;
1176 
1177 	dprintk("%s:Begin\n", __func__);
1178 
1179 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1180 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1181 		    pnfs_lseg_range_match(&lseg->pls_range, range)) {
1182 			ret = pnfs_get_lseg(lseg);
1183 			break;
1184 		}
1185 		if (lseg->pls_range.offset > range->offset)
1186 			break;
1187 	}
1188 
1189 	dprintk("%s:Return lseg %p ref %d\n",
1190 		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1191 	return ret;
1192 }
1193 
1194 /*
1195  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1196  * to the MDS or over pNFS
1197  *
1198  * The nfs_inode read_io and write_io fields are cumulative counters reset
1199  * when there are no layout segments. Note that in pnfs_update_layout iomode
1200  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1201  * WRITE request.
1202  *
1203  * A return of true means use MDS I/O.
1204  *
1205  * From rfc 5661:
1206  * If a file's size is smaller than the file size threshold, data accesses
1207  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1208  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1209  * server.  If both file size and I/O size are provided, the client SHOULD
1210  * reach or exceed  both thresholds before sending its read or write
1211  * requests to the data server.
1212  */
1213 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1214 				     struct inode *ino, int iomode)
1215 {
1216 	struct nfs4_threshold *t = ctx->mdsthreshold;
1217 	struct nfs_inode *nfsi = NFS_I(ino);
1218 	loff_t fsize = i_size_read(ino);
1219 	bool size = false, size_set = false, io = false, io_set = false, ret = false;
1220 
1221 	if (t == NULL)
1222 		return ret;
1223 
1224 	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1225 		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1226 
1227 	switch (iomode) {
1228 	case IOMODE_READ:
1229 		if (t->bm & THRESHOLD_RD) {
1230 			dprintk("%s fsize %llu\n", __func__, fsize);
1231 			size_set = true;
1232 			if (fsize < t->rd_sz)
1233 				size = true;
1234 		}
1235 		if (t->bm & THRESHOLD_RD_IO) {
1236 			dprintk("%s nfsi->read_io %llu\n", __func__,
1237 				nfsi->read_io);
1238 			io_set = true;
1239 			if (nfsi->read_io < t->rd_io_sz)
1240 				io = true;
1241 		}
1242 		break;
1243 	case IOMODE_RW:
1244 		if (t->bm & THRESHOLD_WR) {
1245 			dprintk("%s fsize %llu\n", __func__, fsize);
1246 			size_set = true;
1247 			if (fsize < t->wr_sz)
1248 				size = true;
1249 		}
1250 		if (t->bm & THRESHOLD_WR_IO) {
1251 			dprintk("%s nfsi->write_io %llu\n", __func__,
1252 				nfsi->write_io);
1253 			io_set = true;
1254 			if (nfsi->write_io < t->wr_io_sz)
1255 				io = true;
1256 		}
1257 		break;
1258 	}
1259 	if (size_set && io_set) {
1260 		if (size && io)
1261 			ret = true;
1262 	} else if (size || io)
1263 		ret = true;
1264 
1265 	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1266 	return ret;
1267 }
1268 
1269 /*
1270  * Layout segment is retreived from the server if not cached.
1271  * The appropriate layout segment is referenced and returned to the caller.
1272  */
1273 struct pnfs_layout_segment *
1274 pnfs_update_layout(struct inode *ino,
1275 		   struct nfs_open_context *ctx,
1276 		   loff_t pos,
1277 		   u64 count,
1278 		   enum pnfs_iomode iomode,
1279 		   gfp_t gfp_flags)
1280 {
1281 	struct pnfs_layout_range arg = {
1282 		.iomode = iomode,
1283 		.offset = pos,
1284 		.length = count,
1285 	};
1286 	unsigned pg_offset;
1287 	struct nfs_server *server = NFS_SERVER(ino);
1288 	struct nfs_client *clp = server->nfs_client;
1289 	struct pnfs_layout_hdr *lo;
1290 	struct pnfs_layout_segment *lseg = NULL;
1291 	bool first;
1292 
1293 	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1294 		goto out;
1295 
1296 	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1297 		goto out;
1298 
1299 	spin_lock(&ino->i_lock);
1300 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1301 	if (lo == NULL) {
1302 		spin_unlock(&ino->i_lock);
1303 		goto out;
1304 	}
1305 
1306 	/* Do we even need to bother with this? */
1307 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1308 		dprintk("%s matches recall, use MDS\n", __func__);
1309 		goto out_unlock;
1310 	}
1311 
1312 	/* if LAYOUTGET already failed once we don't try again */
1313 	if (pnfs_layout_io_test_failed(lo, iomode))
1314 		goto out_unlock;
1315 
1316 	/* Check to see if the layout for the given range already exists */
1317 	lseg = pnfs_find_lseg(lo, &arg);
1318 	if (lseg)
1319 		goto out_unlock;
1320 
1321 	if (pnfs_layoutgets_blocked(lo, 0))
1322 		goto out_unlock;
1323 	atomic_inc(&lo->plh_outstanding);
1324 
1325 	first = list_empty(&lo->plh_layouts) ? true : false;
1326 	spin_unlock(&ino->i_lock);
1327 
1328 	if (first) {
1329 		/* The lo must be on the clp list if there is any
1330 		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1331 		 */
1332 		spin_lock(&clp->cl_lock);
1333 		list_add_tail(&lo->plh_layouts, &server->layouts);
1334 		spin_unlock(&clp->cl_lock);
1335 	}
1336 
1337 	pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1338 	if (pg_offset) {
1339 		arg.offset -= pg_offset;
1340 		arg.length += pg_offset;
1341 	}
1342 	if (arg.length != NFS4_MAX_UINT64)
1343 		arg.length = PAGE_CACHE_ALIGN(arg.length);
1344 
1345 	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1346 	atomic_dec(&lo->plh_outstanding);
1347 out_put_layout_hdr:
1348 	pnfs_put_layout_hdr(lo);
1349 out:
1350 	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1351 			"(%s, offset: %llu, length: %llu)\n",
1352 			__func__, ino->i_sb->s_id,
1353 			(unsigned long long)NFS_FILEID(ino),
1354 			lseg == NULL ? "not found" : "found",
1355 			iomode==IOMODE_RW ?  "read/write" : "read-only",
1356 			(unsigned long long)pos,
1357 			(unsigned long long)count);
1358 	return lseg;
1359 out_unlock:
1360 	spin_unlock(&ino->i_lock);
1361 	goto out_put_layout_hdr;
1362 }
1363 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1364 
1365 struct pnfs_layout_segment *
1366 pnfs_layout_process(struct nfs4_layoutget *lgp)
1367 {
1368 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1369 	struct nfs4_layoutget_res *res = &lgp->res;
1370 	struct pnfs_layout_segment *lseg;
1371 	struct inode *ino = lo->plh_inode;
1372 	LIST_HEAD(free_me);
1373 	int status = 0;
1374 
1375 	/* Inject layout blob into I/O device driver */
1376 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1377 	if (!lseg || IS_ERR(lseg)) {
1378 		if (!lseg)
1379 			status = -ENOMEM;
1380 		else
1381 			status = PTR_ERR(lseg);
1382 		dprintk("%s: Could not allocate layout: error %d\n",
1383 		       __func__, status);
1384 		goto out;
1385 	}
1386 
1387 	init_lseg(lo, lseg);
1388 	lseg->pls_range = res->range;
1389 
1390 	spin_lock(&ino->i_lock);
1391 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1392 		dprintk("%s forget reply due to recall\n", __func__);
1393 		goto out_forget_reply;
1394 	}
1395 
1396 	if (pnfs_layoutgets_blocked(lo, 1)) {
1397 		dprintk("%s forget reply due to state\n", __func__);
1398 		goto out_forget_reply;
1399 	}
1400 
1401 	if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1402 		/* existing state ID, make sure the sequence number matches. */
1403 		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1404 			dprintk("%s forget reply due to sequence\n", __func__);
1405 			goto out_forget_reply;
1406 		}
1407 		pnfs_set_layout_stateid(lo, &res->stateid, false);
1408 	} else {
1409 		/*
1410 		 * We got an entirely new state ID.  Mark all segments for the
1411 		 * inode invalid, and don't bother validating the stateid
1412 		 * sequence number.
1413 		 */
1414 		pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
1415 
1416 		nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
1417 		lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
1418 	}
1419 
1420 	clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1421 
1422 	pnfs_get_lseg(lseg);
1423 	pnfs_layout_insert_lseg(lo, lseg);
1424 
1425 	if (res->return_on_close) {
1426 		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1427 		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1428 	}
1429 
1430 	spin_unlock(&ino->i_lock);
1431 	pnfs_free_lseg_list(&free_me);
1432 	return lseg;
1433 out:
1434 	return ERR_PTR(status);
1435 
1436 out_forget_reply:
1437 	spin_unlock(&ino->i_lock);
1438 	lseg->pls_layout = lo;
1439 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1440 	goto out;
1441 }
1442 
1443 void
1444 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1445 {
1446 	u64 rd_size = req->wb_bytes;
1447 
1448 	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1449 
1450 	if (pgio->pg_dreq == NULL)
1451 		rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1452 	else
1453 		rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1454 
1455 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1456 					   req->wb_context,
1457 					   req_offset(req),
1458 					   rd_size,
1459 					   IOMODE_READ,
1460 					   GFP_KERNEL);
1461 	/* If no lseg, fall back to read through mds */
1462 	if (pgio->pg_lseg == NULL)
1463 		nfs_pageio_reset_read_mds(pgio);
1464 
1465 }
1466 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1467 
1468 void
1469 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1470 			   struct nfs_page *req, u64 wb_size)
1471 {
1472 	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1473 
1474 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1475 					   req->wb_context,
1476 					   req_offset(req),
1477 					   wb_size,
1478 					   IOMODE_RW,
1479 					   GFP_NOFS);
1480 	/* If no lseg, fall back to write through mds */
1481 	if (pgio->pg_lseg == NULL)
1482 		nfs_pageio_reset_write_mds(pgio);
1483 }
1484 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1485 
1486 /*
1487  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1488  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1489  */
1490 size_t
1491 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1492 		     struct nfs_page *req)
1493 {
1494 	unsigned int size;
1495 	u64 seg_end, req_start, seg_left;
1496 
1497 	size = nfs_generic_pg_test(pgio, prev, req);
1498 	if (!size)
1499 		return 0;
1500 
1501 	/*
1502 	 * 'size' contains the number of bytes left in the current page (up
1503 	 * to the original size asked for in @req->wb_bytes).
1504 	 *
1505 	 * Calculate how many bytes are left in the layout segment
1506 	 * and if there are less bytes than 'size', return that instead.
1507 	 *
1508 	 * Please also note that 'end_offset' is actually the offset of the
1509 	 * first byte that lies outside the pnfs_layout_range. FIXME?
1510 	 *
1511 	 */
1512 	if (pgio->pg_lseg) {
1513 		seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
1514 				     pgio->pg_lseg->pls_range.length);
1515 		req_start = req_offset(req);
1516 		WARN_ON_ONCE(req_start > seg_end);
1517 		/* start of request is past the last byte of this segment */
1518 		if (req_start >= seg_end)
1519 			return 0;
1520 
1521 		/* adjust 'size' iff there are fewer bytes left in the
1522 		 * segment than what nfs_generic_pg_test returned */
1523 		seg_left = seg_end - req_start;
1524 		if (seg_left < size)
1525 			size = (unsigned int)seg_left;
1526 	}
1527 
1528 	return size;
1529 }
1530 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1531 
1532 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1533 {
1534 	struct nfs_pageio_descriptor pgio;
1535 
1536 	/* Resend all requests through the MDS */
1537 	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
1538 			      hdr->completion_ops);
1539 	return nfs_pageio_resend(&pgio, hdr);
1540 }
1541 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1542 
1543 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1544 {
1545 
1546 	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1547 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1548 	    PNFS_LAYOUTRET_ON_ERROR) {
1549 		pnfs_return_layout(hdr->inode);
1550 	}
1551 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1552 		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1553 }
1554 
1555 /*
1556  * Called by non rpc-based layout drivers
1557  */
1558 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1559 {
1560 	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1561 	if (!hdr->pnfs_error) {
1562 		pnfs_set_layoutcommit(hdr);
1563 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1564 	} else
1565 		pnfs_ld_handle_write_error(hdr);
1566 	hdr->mds_ops->rpc_release(hdr);
1567 }
1568 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1569 
1570 static void
1571 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1572 		struct nfs_pgio_header *hdr)
1573 {
1574 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1575 		list_splice_tail_init(&hdr->pages, &desc->pg_list);
1576 		nfs_pageio_reset_write_mds(desc);
1577 		desc->pg_recoalesce = 1;
1578 	}
1579 	nfs_pgio_data_destroy(hdr);
1580 }
1581 
1582 static enum pnfs_try_status
1583 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1584 			const struct rpc_call_ops *call_ops,
1585 			struct pnfs_layout_segment *lseg,
1586 			int how)
1587 {
1588 	struct inode *inode = hdr->inode;
1589 	enum pnfs_try_status trypnfs;
1590 	struct nfs_server *nfss = NFS_SERVER(inode);
1591 
1592 	hdr->mds_ops = call_ops;
1593 
1594 	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1595 		inode->i_ino, hdr->args.count, hdr->args.offset, how);
1596 	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1597 	if (trypnfs != PNFS_NOT_ATTEMPTED)
1598 		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1599 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1600 	return trypnfs;
1601 }
1602 
1603 static void
1604 pnfs_do_write(struct nfs_pageio_descriptor *desc,
1605 	      struct nfs_pgio_header *hdr, int how)
1606 {
1607 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1608 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1609 	enum pnfs_try_status trypnfs;
1610 
1611 	desc->pg_lseg = NULL;
1612 	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1613 	if (trypnfs == PNFS_NOT_ATTEMPTED)
1614 		pnfs_write_through_mds(desc, hdr);
1615 	pnfs_put_lseg(lseg);
1616 }
1617 
1618 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1619 {
1620 	pnfs_put_lseg(hdr->lseg);
1621 	nfs_pgio_header_free(hdr);
1622 }
1623 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1624 
1625 int
1626 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1627 {
1628 	struct nfs_pgio_header *hdr;
1629 	int ret;
1630 
1631 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1632 	if (!hdr) {
1633 		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1634 		pnfs_put_lseg(desc->pg_lseg);
1635 		desc->pg_lseg = NULL;
1636 		return -ENOMEM;
1637 	}
1638 	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1639 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1640 	ret = nfs_generic_pgio(desc, hdr);
1641 	if (ret != 0) {
1642 		pnfs_put_lseg(desc->pg_lseg);
1643 		desc->pg_lseg = NULL;
1644 	} else
1645 		pnfs_do_write(desc, hdr, desc->pg_ioflags);
1646 	return ret;
1647 }
1648 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1649 
1650 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
1651 {
1652 	struct nfs_pageio_descriptor pgio;
1653 
1654 	/* Resend all requests through the MDS */
1655 	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
1656 	return nfs_pageio_resend(&pgio, hdr);
1657 }
1658 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1659 
1660 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
1661 {
1662 	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1663 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1664 	    PNFS_LAYOUTRET_ON_ERROR) {
1665 		pnfs_return_layout(hdr->inode);
1666 	}
1667 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1668 		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
1669 }
1670 
1671 /*
1672  * Called by non rpc-based layout drivers
1673  */
1674 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
1675 {
1676 	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
1677 	if (likely(!hdr->pnfs_error)) {
1678 		__nfs4_read_done_cb(hdr);
1679 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1680 	} else
1681 		pnfs_ld_handle_read_error(hdr);
1682 	hdr->mds_ops->rpc_release(hdr);
1683 }
1684 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1685 
1686 static void
1687 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1688 		struct nfs_pgio_header *hdr)
1689 {
1690 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1691 		list_splice_tail_init(&hdr->pages, &desc->pg_list);
1692 		nfs_pageio_reset_read_mds(desc);
1693 		desc->pg_recoalesce = 1;
1694 	}
1695 	nfs_pgio_data_destroy(hdr);
1696 }
1697 
1698 /*
1699  * Call the appropriate parallel I/O subsystem read function.
1700  */
1701 static enum pnfs_try_status
1702 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
1703 		       const struct rpc_call_ops *call_ops,
1704 		       struct pnfs_layout_segment *lseg)
1705 {
1706 	struct inode *inode = hdr->inode;
1707 	struct nfs_server *nfss = NFS_SERVER(inode);
1708 	enum pnfs_try_status trypnfs;
1709 
1710 	hdr->mds_ops = call_ops;
1711 
1712 	dprintk("%s: Reading ino:%lu %u@%llu\n",
1713 		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
1714 
1715 	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
1716 	if (trypnfs != PNFS_NOT_ATTEMPTED)
1717 		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1718 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1719 	return trypnfs;
1720 }
1721 
1722 static void
1723 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
1724 {
1725 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1726 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1727 	enum pnfs_try_status trypnfs;
1728 
1729 	desc->pg_lseg = NULL;
1730 	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
1731 	if (trypnfs == PNFS_NOT_ATTEMPTED)
1732 		pnfs_read_through_mds(desc, hdr);
1733 	pnfs_put_lseg(lseg);
1734 }
1735 
1736 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1737 {
1738 	pnfs_put_lseg(hdr->lseg);
1739 	nfs_pgio_header_free(hdr);
1740 }
1741 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1742 
1743 int
1744 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1745 {
1746 	struct nfs_pgio_header *hdr;
1747 	int ret;
1748 
1749 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1750 	if (!hdr) {
1751 		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1752 		ret = -ENOMEM;
1753 		pnfs_put_lseg(desc->pg_lseg);
1754 		desc->pg_lseg = NULL;
1755 		return ret;
1756 	}
1757 	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1758 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1759 	ret = nfs_generic_pgio(desc, hdr);
1760 	if (ret != 0) {
1761 		pnfs_put_lseg(desc->pg_lseg);
1762 		desc->pg_lseg = NULL;
1763 	} else
1764 		pnfs_do_read(desc, hdr);
1765 	return ret;
1766 }
1767 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1768 
1769 static void pnfs_clear_layoutcommitting(struct inode *inode)
1770 {
1771 	unsigned long *bitlock = &NFS_I(inode)->flags;
1772 
1773 	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1774 	smp_mb__after_atomic();
1775 	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1776 }
1777 
1778 /*
1779  * There can be multiple RW segments.
1780  */
1781 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1782 {
1783 	struct pnfs_layout_segment *lseg;
1784 
1785 	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1786 		if (lseg->pls_range.iomode == IOMODE_RW &&
1787 		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1788 			list_add(&lseg->pls_lc_list, listp);
1789 	}
1790 }
1791 
1792 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1793 {
1794 	struct pnfs_layout_segment *lseg, *tmp;
1795 
1796 	/* Matched by references in pnfs_set_layoutcommit */
1797 	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1798 		list_del_init(&lseg->pls_lc_list);
1799 		pnfs_put_lseg(lseg);
1800 	}
1801 
1802 	pnfs_clear_layoutcommitting(inode);
1803 }
1804 
1805 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1806 {
1807 	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1808 }
1809 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1810 
1811 void
1812 pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
1813 {
1814 	struct inode *inode = hdr->inode;
1815 	struct nfs_inode *nfsi = NFS_I(inode);
1816 	loff_t end_pos = hdr->mds_offset + hdr->res.count;
1817 	bool mark_as_dirty = false;
1818 
1819 	spin_lock(&inode->i_lock);
1820 	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1821 		mark_as_dirty = true;
1822 		dprintk("%s: Set layoutcommit for inode %lu ",
1823 			__func__, inode->i_ino);
1824 	}
1825 	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1826 		/* references matched in nfs4_layoutcommit_release */
1827 		pnfs_get_lseg(hdr->lseg);
1828 	}
1829 	if (end_pos > nfsi->layout->plh_lwb)
1830 		nfsi->layout->plh_lwb = end_pos;
1831 	spin_unlock(&inode->i_lock);
1832 	dprintk("%s: lseg %p end_pos %llu\n",
1833 		__func__, hdr->lseg, nfsi->layout->plh_lwb);
1834 
1835 	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1836 	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1837 	if (mark_as_dirty)
1838 		mark_inode_dirty_sync(inode);
1839 }
1840 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1841 
1842 void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
1843 {
1844 	struct inode *inode = data->inode;
1845 	struct nfs_inode *nfsi = NFS_I(inode);
1846 	bool mark_as_dirty = false;
1847 
1848 	spin_lock(&inode->i_lock);
1849 	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1850 		mark_as_dirty = true;
1851 		dprintk("%s: Set layoutcommit for inode %lu ",
1852 			__func__, inode->i_ino);
1853 	}
1854 	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
1855 		/* references matched in nfs4_layoutcommit_release */
1856 		pnfs_get_lseg(data->lseg);
1857 	}
1858 	if (data->lwb > nfsi->layout->plh_lwb)
1859 		nfsi->layout->plh_lwb = data->lwb;
1860 	spin_unlock(&inode->i_lock);
1861 	dprintk("%s: lseg %p end_pos %llu\n",
1862 		__func__, data->lseg, nfsi->layout->plh_lwb);
1863 
1864 	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1865 	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1866 	if (mark_as_dirty)
1867 		mark_inode_dirty_sync(inode);
1868 }
1869 EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);
1870 
1871 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1872 {
1873 	struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1874 
1875 	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1876 		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1877 	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1878 }
1879 
1880 /*
1881  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1882  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1883  * data to disk to allow the server to recover the data if it crashes.
1884  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1885  * is off, and a COMMIT is sent to a data server, or
1886  * if WRITEs to a data server return NFS_DATA_SYNC.
1887  */
1888 int
1889 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1890 {
1891 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1892 	struct nfs4_layoutcommit_data *data;
1893 	struct nfs_inode *nfsi = NFS_I(inode);
1894 	loff_t end_pos;
1895 	int status;
1896 
1897 	if (!pnfs_layoutcommit_outstanding(inode))
1898 		return 0;
1899 
1900 	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1901 
1902 	status = -EAGAIN;
1903 	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1904 		if (!sync)
1905 			goto out;
1906 		status = wait_on_bit_lock_action(&nfsi->flags,
1907 				NFS_INO_LAYOUTCOMMITTING,
1908 				nfs_wait_bit_killable,
1909 				TASK_KILLABLE);
1910 		if (status)
1911 			goto out;
1912 	}
1913 
1914 	status = -ENOMEM;
1915 	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1916 	data = kzalloc(sizeof(*data), GFP_NOFS);
1917 	if (!data)
1918 		goto clear_layoutcommitting;
1919 
1920 	status = 0;
1921 	spin_lock(&inode->i_lock);
1922 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1923 		goto out_unlock;
1924 
1925 	INIT_LIST_HEAD(&data->lseg_list);
1926 	pnfs_list_write_lseg(inode, &data->lseg_list);
1927 
1928 	end_pos = nfsi->layout->plh_lwb;
1929 	nfsi->layout->plh_lwb = 0;
1930 
1931 	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1932 	spin_unlock(&inode->i_lock);
1933 
1934 	data->args.inode = inode;
1935 	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1936 	nfs_fattr_init(&data->fattr);
1937 	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1938 	data->res.fattr = &data->fattr;
1939 	data->args.lastbytewritten = end_pos - 1;
1940 	data->res.server = NFS_SERVER(inode);
1941 
1942 	if (ld->prepare_layoutcommit) {
1943 		status = ld->prepare_layoutcommit(&data->args);
1944 		if (status) {
1945 			spin_lock(&inode->i_lock);
1946 			if (end_pos < nfsi->layout->plh_lwb)
1947 				nfsi->layout->plh_lwb = end_pos;
1948 			spin_unlock(&inode->i_lock);
1949 			put_rpccred(data->cred);
1950 			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
1951 			goto clear_layoutcommitting;
1952 		}
1953 	}
1954 
1955 
1956 	status = nfs4_proc_layoutcommit(data, sync);
1957 out:
1958 	if (status)
1959 		mark_inode_dirty_sync(inode);
1960 	dprintk("<-- %s status %d\n", __func__, status);
1961 	return status;
1962 out_unlock:
1963 	spin_unlock(&inode->i_lock);
1964 	kfree(data);
1965 clear_layoutcommitting:
1966 	pnfs_clear_layoutcommitting(inode);
1967 	goto out;
1968 }
1969 
1970 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1971 {
1972 	struct nfs4_threshold *thp;
1973 
1974 	thp = kzalloc(sizeof(*thp), GFP_NOFS);
1975 	if (!thp) {
1976 		dprintk("%s mdsthreshold allocation failed\n", __func__);
1977 		return NULL;
1978 	}
1979 	return thp;
1980 }
1981