xref: /openbmc/linux/fs/nfs/blocklayout/blocklayout.c (revision 4f3db074)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
40 
41 #include "../pnfs.h"
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
45 
46 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
47 
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51 
52 static bool is_hole(struct pnfs_block_extent *be)
53 {
54 	switch (be->be_state) {
55 	case PNFS_BLOCK_NONE_DATA:
56 		return true;
57 	case PNFS_BLOCK_INVALID_DATA:
58 		return be->be_tag ? false : true;
59 	default:
60 		return false;
61 	}
62 }
63 
64 /* The data we are handed might be spread across several bios.  We need
65  * to track when the last one is finished.
66  */
67 struct parallel_io {
68 	struct kref refcnt;
69 	void (*pnfs_callback) (void *data);
70 	void *data;
71 };
72 
73 static inline struct parallel_io *alloc_parallel(void *data)
74 {
75 	struct parallel_io *rv;
76 
77 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
78 	if (rv) {
79 		rv->data = data;
80 		kref_init(&rv->refcnt);
81 	}
82 	return rv;
83 }
84 
85 static inline void get_parallel(struct parallel_io *p)
86 {
87 	kref_get(&p->refcnt);
88 }
89 
90 static void destroy_parallel(struct kref *kref)
91 {
92 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93 
94 	dprintk("%s enter\n", __func__);
95 	p->pnfs_callback(p->data);
96 	kfree(p);
97 }
98 
99 static inline void put_parallel(struct parallel_io *p)
100 {
101 	kref_put(&p->refcnt, destroy_parallel);
102 }
103 
104 static struct bio *
105 bl_submit_bio(int rw, struct bio *bio)
106 {
107 	if (bio) {
108 		get_parallel(bio->bi_private);
109 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
110 			rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111 			(unsigned long long)bio->bi_iter.bi_sector);
112 		submit_bio(rw, bio);
113 	}
114 	return NULL;
115 }
116 
117 static struct bio *
118 bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
119 		void (*end_io)(struct bio *, int err), struct parallel_io *par)
120 {
121 	struct bio *bio;
122 
123 	npg = min(npg, BIO_MAX_PAGES);
124 	bio = bio_alloc(GFP_NOIO, npg);
125 	if (!bio && (current->flags & PF_MEMALLOC)) {
126 		while (!bio && (npg /= 2))
127 			bio = bio_alloc(GFP_NOIO, npg);
128 	}
129 
130 	if (bio) {
131 		bio->bi_iter.bi_sector = disk_sector;
132 		bio->bi_bdev = bdev;
133 		bio->bi_end_io = end_io;
134 		bio->bi_private = par;
135 	}
136 	return bio;
137 }
138 
139 static struct bio *
140 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
141 		struct page *page, struct pnfs_block_dev_map *map,
142 		struct pnfs_block_extent *be,
143 		void (*end_io)(struct bio *, int err),
144 		struct parallel_io *par, unsigned int offset, int *len)
145 {
146 	struct pnfs_block_dev *dev =
147 		container_of(be->be_device, struct pnfs_block_dev, node);
148 	u64 disk_addr, end;
149 
150 	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
151 		npg, rw, (unsigned long long)isect, offset, *len);
152 
153 	/* translate to device offset */
154 	isect += be->be_v_offset;
155 	isect -= be->be_f_offset;
156 
157 	/* translate to physical disk offset */
158 	disk_addr = (u64)isect << SECTOR_SHIFT;
159 	if (disk_addr < map->start || disk_addr >= map->start + map->len) {
160 		if (!dev->map(dev, disk_addr, map))
161 			return ERR_PTR(-EIO);
162 		bio = bl_submit_bio(rw, bio);
163 	}
164 	disk_addr += map->disk_offset;
165 	disk_addr -= map->start;
166 
167 	/* limit length to what the device mapping allows */
168 	end = disk_addr + *len;
169 	if (end >= map->start + map->len)
170 		*len = map->start + map->len - disk_addr;
171 
172 retry:
173 	if (!bio) {
174 		bio = bl_alloc_init_bio(npg, map->bdev,
175 				disk_addr >> SECTOR_SHIFT, end_io, par);
176 		if (!bio)
177 			return ERR_PTR(-ENOMEM);
178 	}
179 	if (bio_add_page(bio, page, *len, offset) < *len) {
180 		bio = bl_submit_bio(rw, bio);
181 		goto retry;
182 	}
183 	return bio;
184 }
185 
186 static void bl_end_io_read(struct bio *bio, int err)
187 {
188 	struct parallel_io *par = bio->bi_private;
189 
190 	if (err) {
191 		struct nfs_pgio_header *header = par->data;
192 
193 		if (!header->pnfs_error)
194 			header->pnfs_error = -EIO;
195 		pnfs_set_lo_fail(header->lseg);
196 	}
197 
198 	bio_put(bio);
199 	put_parallel(par);
200 }
201 
202 static void bl_read_cleanup(struct work_struct *work)
203 {
204 	struct rpc_task *task;
205 	struct nfs_pgio_header *hdr;
206 	dprintk("%s enter\n", __func__);
207 	task = container_of(work, struct rpc_task, u.tk_work);
208 	hdr = container_of(task, struct nfs_pgio_header, task);
209 	pnfs_ld_read_done(hdr);
210 }
211 
212 static void
213 bl_end_par_io_read(void *data)
214 {
215 	struct nfs_pgio_header *hdr = data;
216 
217 	hdr->task.tk_status = hdr->pnfs_error;
218 	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
219 	schedule_work(&hdr->task.u.tk_work);
220 }
221 
222 static enum pnfs_try_status
223 bl_read_pagelist(struct nfs_pgio_header *header)
224 {
225 	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
226 	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
227 	struct bio *bio = NULL;
228 	struct pnfs_block_extent be;
229 	sector_t isect, extent_length = 0;
230 	struct parallel_io *par;
231 	loff_t f_offset = header->args.offset;
232 	size_t bytes_left = header->args.count;
233 	unsigned int pg_offset, pg_len;
234 	struct page **pages = header->args.pages;
235 	int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
236 	const bool is_dio = (header->dreq != NULL);
237 	struct blk_plug plug;
238 	int i;
239 
240 	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
241 		header->page_array.npages, f_offset,
242 		(unsigned int)header->args.count);
243 
244 	par = alloc_parallel(header);
245 	if (!par)
246 		return PNFS_NOT_ATTEMPTED;
247 	par->pnfs_callback = bl_end_par_io_read;
248 
249 	blk_start_plug(&plug);
250 
251 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
252 	/* Code assumes extents are page-aligned */
253 	for (i = pg_index; i < header->page_array.npages; i++) {
254 		if (extent_length <= 0) {
255 			/* We've used up the previous extent */
256 			bio = bl_submit_bio(READ, bio);
257 
258 			/* Get the next one */
259 			if (!ext_tree_lookup(bl, isect, &be, false)) {
260 				header->pnfs_error = -EIO;
261 				goto out;
262 			}
263 			extent_length = be.be_length - (isect - be.be_f_offset);
264 		}
265 
266 		pg_offset = f_offset & ~PAGE_CACHE_MASK;
267 		if (is_dio) {
268 			if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
269 				pg_len = PAGE_CACHE_SIZE - pg_offset;
270 			else
271 				pg_len = bytes_left;
272 		} else {
273 			BUG_ON(pg_offset != 0);
274 			pg_len = PAGE_CACHE_SIZE;
275 		}
276 
277 		isect += (pg_offset >> SECTOR_SHIFT);
278 		extent_length -= (pg_offset >> SECTOR_SHIFT);
279 
280 		if (is_hole(&be)) {
281 			bio = bl_submit_bio(READ, bio);
282 			/* Fill hole w/ zeroes w/o accessing device */
283 			dprintk("%s Zeroing page for hole\n", __func__);
284 			zero_user_segment(pages[i], pg_offset, pg_len);
285 
286 			/* invalidate map */
287 			map.start = NFS4_MAX_UINT64;
288 		} else {
289 			bio = do_add_page_to_bio(bio,
290 						 header->page_array.npages - i,
291 						 READ,
292 						 isect, pages[i], &map, &be,
293 						 bl_end_io_read, par,
294 						 pg_offset, &pg_len);
295 			if (IS_ERR(bio)) {
296 				header->pnfs_error = PTR_ERR(bio);
297 				bio = NULL;
298 				goto out;
299 			}
300 		}
301 		isect += (pg_len >> SECTOR_SHIFT);
302 		extent_length -= (pg_len >> SECTOR_SHIFT);
303 		f_offset += pg_len;
304 		bytes_left -= pg_len;
305 	}
306 	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
307 		header->res.eof = 1;
308 		header->res.count = header->inode->i_size - header->args.offset;
309 	} else {
310 		header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
311 	}
312 out:
313 	bl_submit_bio(READ, bio);
314 	blk_finish_plug(&plug);
315 	put_parallel(par);
316 	return PNFS_ATTEMPTED;
317 }
318 
319 static void bl_end_io_write(struct bio *bio, int err)
320 {
321 	struct parallel_io *par = bio->bi_private;
322 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
323 	struct nfs_pgio_header *header = par->data;
324 
325 	if (!uptodate) {
326 		if (!header->pnfs_error)
327 			header->pnfs_error = -EIO;
328 		pnfs_set_lo_fail(header->lseg);
329 	}
330 	bio_put(bio);
331 	put_parallel(par);
332 }
333 
334 /* Function scheduled for call during bl_end_par_io_write,
335  * it marks sectors as written and extends the commitlist.
336  */
337 static void bl_write_cleanup(struct work_struct *work)
338 {
339 	struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
340 	struct nfs_pgio_header *hdr =
341 			container_of(task, struct nfs_pgio_header, task);
342 
343 	dprintk("%s enter\n", __func__);
344 
345 	if (likely(!hdr->pnfs_error)) {
346 		struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
347 		u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
348 		u64 end = (hdr->args.offset + hdr->args.count +
349 			PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
350 
351 		ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
352 					(end - start) >> SECTOR_SHIFT);
353 	}
354 
355 	pnfs_ld_write_done(hdr);
356 }
357 
358 /* Called when last of bios associated with a bl_write_pagelist call finishes */
359 static void bl_end_par_io_write(void *data)
360 {
361 	struct nfs_pgio_header *hdr = data;
362 
363 	hdr->task.tk_status = hdr->pnfs_error;
364 	hdr->verf.committed = NFS_FILE_SYNC;
365 	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
366 	schedule_work(&hdr->task.u.tk_work);
367 }
368 
369 static enum pnfs_try_status
370 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
371 {
372 	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
373 	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
374 	struct bio *bio = NULL;
375 	struct pnfs_block_extent be;
376 	sector_t isect, extent_length = 0;
377 	struct parallel_io *par = NULL;
378 	loff_t offset = header->args.offset;
379 	size_t count = header->args.count;
380 	struct page **pages = header->args.pages;
381 	int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
382 	unsigned int pg_len;
383 	struct blk_plug plug;
384 	int i;
385 
386 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
387 
388 	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
389 	 * We want to write each, and if there is an error set pnfs_error
390 	 * to have it redone using nfs.
391 	 */
392 	par = alloc_parallel(header);
393 	if (!par)
394 		return PNFS_NOT_ATTEMPTED;
395 	par->pnfs_callback = bl_end_par_io_write;
396 
397 	blk_start_plug(&plug);
398 
399 	/* we always write out the whole page */
400 	offset = offset & (loff_t)PAGE_CACHE_MASK;
401 	isect = offset >> SECTOR_SHIFT;
402 
403 	for (i = pg_index; i < header->page_array.npages; i++) {
404 		if (extent_length <= 0) {
405 			/* We've used up the previous extent */
406 			bio = bl_submit_bio(WRITE, bio);
407 			/* Get the next one */
408 			if (!ext_tree_lookup(bl, isect, &be, true)) {
409 				header->pnfs_error = -EINVAL;
410 				goto out;
411 			}
412 
413 			extent_length = be.be_length - (isect - be.be_f_offset);
414 		}
415 
416 		pg_len = PAGE_CACHE_SIZE;
417 		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
418 					 WRITE, isect, pages[i], &map, &be,
419 					 bl_end_io_write, par,
420 					 0, &pg_len);
421 		if (IS_ERR(bio)) {
422 			header->pnfs_error = PTR_ERR(bio);
423 			bio = NULL;
424 			goto out;
425 		}
426 
427 		offset += pg_len;
428 		count -= pg_len;
429 		isect += (pg_len >> SECTOR_SHIFT);
430 		extent_length -= (pg_len >> SECTOR_SHIFT);
431 	}
432 
433 	header->res.count = header->args.count;
434 out:
435 	bl_submit_bio(WRITE, bio);
436 	blk_finish_plug(&plug);
437 	put_parallel(par);
438 	return PNFS_ATTEMPTED;
439 }
440 
441 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
442 {
443 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
444 	int err;
445 
446 	dprintk("%s enter\n", __func__);
447 
448 	err = ext_tree_remove(bl, true, 0, LLONG_MAX);
449 	WARN_ON(err);
450 
451 	kfree(bl);
452 }
453 
454 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
455 						   gfp_t gfp_flags)
456 {
457 	struct pnfs_block_layout *bl;
458 
459 	dprintk("%s enter\n", __func__);
460 	bl = kzalloc(sizeof(*bl), gfp_flags);
461 	if (!bl)
462 		return NULL;
463 
464 	bl->bl_ext_rw = RB_ROOT;
465 	bl->bl_ext_ro = RB_ROOT;
466 	spin_lock_init(&bl->bl_ext_lock);
467 
468 	return &bl->bl_layout;
469 }
470 
471 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
472 {
473 	dprintk("%s enter\n", __func__);
474 	kfree(lseg);
475 }
476 
477 /* Tracks info needed to ensure extents in layout obey constraints of spec */
478 struct layout_verification {
479 	u32 mode;	/* R or RW */
480 	u64 start;	/* Expected start of next non-COW extent */
481 	u64 inval;	/* Start of INVAL coverage */
482 	u64 cowread;	/* End of COW read coverage */
483 };
484 
485 /* Verify the extent meets the layout requirements of the pnfs-block draft,
486  * section 2.3.1.
487  */
488 static int verify_extent(struct pnfs_block_extent *be,
489 			 struct layout_verification *lv)
490 {
491 	if (lv->mode == IOMODE_READ) {
492 		if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
493 		    be->be_state == PNFS_BLOCK_INVALID_DATA)
494 			return -EIO;
495 		if (be->be_f_offset != lv->start)
496 			return -EIO;
497 		lv->start += be->be_length;
498 		return 0;
499 	}
500 	/* lv->mode == IOMODE_RW */
501 	if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
502 		if (be->be_f_offset != lv->start)
503 			return -EIO;
504 		if (lv->cowread > lv->start)
505 			return -EIO;
506 		lv->start += be->be_length;
507 		lv->inval = lv->start;
508 		return 0;
509 	} else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
510 		if (be->be_f_offset != lv->start)
511 			return -EIO;
512 		lv->start += be->be_length;
513 		return 0;
514 	} else if (be->be_state == PNFS_BLOCK_READ_DATA) {
515 		if (be->be_f_offset > lv->start)
516 			return -EIO;
517 		if (be->be_f_offset < lv->inval)
518 			return -EIO;
519 		if (be->be_f_offset < lv->cowread)
520 			return -EIO;
521 		/* It looks like you might want to min this with lv->start,
522 		 * but you really don't.
523 		 */
524 		lv->inval = lv->inval + be->be_length;
525 		lv->cowread = be->be_f_offset + be->be_length;
526 		return 0;
527 	} else
528 		return -EIO;
529 }
530 
531 static int decode_sector_number(__be32 **rp, sector_t *sp)
532 {
533 	uint64_t s;
534 
535 	*rp = xdr_decode_hyper(*rp, &s);
536 	if (s & 0x1ff) {
537 		printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
538 		return -1;
539 	}
540 	*sp = s >> SECTOR_SHIFT;
541 	return 0;
542 }
543 
544 static int
545 bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
546 		struct layout_verification *lv, struct list_head *extents,
547 		gfp_t gfp_mask)
548 {
549 	struct pnfs_block_extent *be;
550 	struct nfs4_deviceid id;
551 	int error;
552 	__be32 *p;
553 
554 	p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
555 	if (!p)
556 		return -EIO;
557 
558 	be = kzalloc(sizeof(*be), GFP_NOFS);
559 	if (!be)
560 		return -ENOMEM;
561 
562 	memcpy(&id, p, NFS4_DEVICEID4_SIZE);
563 	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
564 
565 	error = -EIO;
566 	be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
567 						lo->plh_lc_cred, gfp_mask);
568 	if (!be->be_device)
569 		goto out_free_be;
570 
571 	/*
572 	 * The next three values are read in as bytes, but stored in the
573 	 * extent structure in 512-byte granularity.
574 	 */
575 	if (decode_sector_number(&p, &be->be_f_offset) < 0)
576 		goto out_put_deviceid;
577 	if (decode_sector_number(&p, &be->be_length) < 0)
578 		goto out_put_deviceid;
579 	if (decode_sector_number(&p, &be->be_v_offset) < 0)
580 		goto out_put_deviceid;
581 	be->be_state = be32_to_cpup(p++);
582 
583 	error = verify_extent(be, lv);
584 	if (error) {
585 		dprintk("%s: extent verification failed\n", __func__);
586 		goto out_put_deviceid;
587 	}
588 
589 	list_add_tail(&be->be_list, extents);
590 	return 0;
591 
592 out_put_deviceid:
593 	nfs4_put_deviceid_node(be->be_device);
594 out_free_be:
595 	kfree(be);
596 	return error;
597 }
598 
599 static struct pnfs_layout_segment *
600 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
601 		gfp_t gfp_mask)
602 {
603 	struct layout_verification lv = {
604 		.mode = lgr->range.iomode,
605 		.start = lgr->range.offset >> SECTOR_SHIFT,
606 		.inval = lgr->range.offset >> SECTOR_SHIFT,
607 		.cowread = lgr->range.offset >> SECTOR_SHIFT,
608 	};
609 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
610 	struct pnfs_layout_segment *lseg;
611 	struct xdr_buf buf;
612 	struct xdr_stream xdr;
613 	struct page *scratch;
614 	int status, i;
615 	uint32_t count;
616 	__be32 *p;
617 	LIST_HEAD(extents);
618 
619 	dprintk("---> %s\n", __func__);
620 
621 	lseg = kzalloc(sizeof(*lseg), gfp_mask);
622 	if (!lseg)
623 		return ERR_PTR(-ENOMEM);
624 
625 	status = -ENOMEM;
626 	scratch = alloc_page(gfp_mask);
627 	if (!scratch)
628 		goto out;
629 
630 	xdr_init_decode_pages(&xdr, &buf,
631 			lgr->layoutp->pages, lgr->layoutp->len);
632 	xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
633 
634 	status = -EIO;
635 	p = xdr_inline_decode(&xdr, 4);
636 	if (unlikely(!p))
637 		goto out_free_scratch;
638 
639 	count = be32_to_cpup(p++);
640 	dprintk("%s: number of extents %d\n", __func__, count);
641 
642 	/*
643 	 * Decode individual extents, putting them in temporary staging area
644 	 * until whole layout is decoded to make error recovery easier.
645 	 */
646 	for (i = 0; i < count; i++) {
647 		status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
648 		if (status)
649 			goto process_extents;
650 	}
651 
652 	if (lgr->range.offset + lgr->range.length !=
653 			lv.start << SECTOR_SHIFT) {
654 		dprintk("%s Final length mismatch\n", __func__);
655 		status = -EIO;
656 		goto process_extents;
657 	}
658 
659 	if (lv.start < lv.cowread) {
660 		dprintk("%s Final uncovered COW extent\n", __func__);
661 		status = -EIO;
662 	}
663 
664 process_extents:
665 	while (!list_empty(&extents)) {
666 		struct pnfs_block_extent *be =
667 			list_first_entry(&extents, struct pnfs_block_extent,
668 					 be_list);
669 		list_del(&be->be_list);
670 
671 		if (!status)
672 			status = ext_tree_insert(bl, be);
673 
674 		if (status) {
675 			nfs4_put_deviceid_node(be->be_device);
676 			kfree(be);
677 		}
678 	}
679 
680 out_free_scratch:
681 	__free_page(scratch);
682 out:
683 	dprintk("%s returns %d\n", __func__, status);
684 	if (status) {
685 		kfree(lseg);
686 		return ERR_PTR(status);
687 	}
688 	return lseg;
689 }
690 
691 static void
692 bl_return_range(struct pnfs_layout_hdr *lo,
693 		struct pnfs_layout_range *range)
694 {
695 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
696 	sector_t offset = range->offset >> SECTOR_SHIFT, end;
697 
698 	if (range->offset % 8) {
699 		dprintk("%s: offset %lld not block size aligned\n",
700 			__func__, range->offset);
701 		return;
702 	}
703 
704 	if (range->length != NFS4_MAX_UINT64) {
705 		if (range->length % 8) {
706 			dprintk("%s: length %lld not block size aligned\n",
707 				__func__, range->length);
708 			return;
709 		}
710 
711 		end = offset + (range->length >> SECTOR_SHIFT);
712 	} else {
713 		end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
714 	}
715 
716 	ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
717 }
718 
719 static int
720 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
721 {
722 	return ext_tree_prepare_commit(arg);
723 }
724 
725 static void
726 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
727 {
728 	ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
729 }
730 
731 static int
732 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
733 {
734 	dprintk("%s enter\n", __func__);
735 
736 	if (server->pnfs_blksize == 0) {
737 		dprintk("%s Server did not return blksize\n", __func__);
738 		return -EINVAL;
739 	}
740 	if (server->pnfs_blksize > PAGE_SIZE) {
741 		printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
742 			__func__, server->pnfs_blksize);
743 		return -EINVAL;
744 	}
745 
746 	return 0;
747 }
748 
749 static bool
750 is_aligned_req(struct nfs_pageio_descriptor *pgio,
751 		struct nfs_page *req, unsigned int alignment)
752 {
753 	/*
754 	 * Always accept buffered writes, higher layers take care of the
755 	 * right alignment.
756 	 */
757 	if (pgio->pg_dreq == NULL)
758 		return true;
759 
760 	if (!IS_ALIGNED(req->wb_offset, alignment))
761 		return false;
762 
763 	if (IS_ALIGNED(req->wb_bytes, alignment))
764 		return true;
765 
766 	if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
767 		/*
768 		 * If the write goes up to the inode size, just write
769 		 * the full page.  Data past the inode size is
770 		 * guaranteed to be zeroed by the higher level client
771 		 * code, and this behaviour is mandated by RFC 5663
772 		 * section 2.3.2.
773 		 */
774 		return true;
775 	}
776 
777 	return false;
778 }
779 
780 static void
781 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
782 {
783 	if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
784 		nfs_pageio_reset_read_mds(pgio);
785 		return;
786 	}
787 
788 	pnfs_generic_pg_init_read(pgio, req);
789 }
790 
791 /*
792  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
793  * of bytes (maximum @req->wb_bytes) that can be coalesced.
794  */
795 static size_t
796 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
797 		struct nfs_page *req)
798 {
799 	if (!is_aligned_req(pgio, req, SECTOR_SIZE))
800 		return 0;
801 	return pnfs_generic_pg_test(pgio, prev, req);
802 }
803 
804 /*
805  * Return the number of contiguous bytes for a given inode
806  * starting at page frame idx.
807  */
808 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
809 {
810 	struct address_space *mapping = inode->i_mapping;
811 	pgoff_t end;
812 
813 	/* Optimize common case that writes from 0 to end of file */
814 	end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
815 	if (end != inode->i_mapping->nrpages) {
816 		rcu_read_lock();
817 		end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
818 		rcu_read_unlock();
819 	}
820 
821 	if (!end)
822 		return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
823 	else
824 		return (end - idx) << PAGE_CACHE_SHIFT;
825 }
826 
827 static void
828 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
829 {
830 	u64 wb_size;
831 
832 	if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
833 		nfs_pageio_reset_write_mds(pgio);
834 		return;
835 	}
836 
837 	if (pgio->pg_dreq == NULL)
838 		wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
839 					      req->wb_index);
840 	else
841 		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
842 
843 	pnfs_generic_pg_init_write(pgio, req, wb_size);
844 }
845 
846 /*
847  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
848  * of bytes (maximum @req->wb_bytes) that can be coalesced.
849  */
850 static size_t
851 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
852 		 struct nfs_page *req)
853 {
854 	if (!is_aligned_req(pgio, req, PAGE_SIZE))
855 		return 0;
856 	return pnfs_generic_pg_test(pgio, prev, req);
857 }
858 
859 static const struct nfs_pageio_ops bl_pg_read_ops = {
860 	.pg_init = bl_pg_init_read,
861 	.pg_test = bl_pg_test_read,
862 	.pg_doio = pnfs_generic_pg_readpages,
863 	.pg_cleanup = pnfs_generic_pg_cleanup,
864 };
865 
866 static const struct nfs_pageio_ops bl_pg_write_ops = {
867 	.pg_init = bl_pg_init_write,
868 	.pg_test = bl_pg_test_write,
869 	.pg_doio = pnfs_generic_pg_writepages,
870 	.pg_cleanup = pnfs_generic_pg_cleanup,
871 };
872 
873 static struct pnfs_layoutdriver_type blocklayout_type = {
874 	.id				= LAYOUT_BLOCK_VOLUME,
875 	.name				= "LAYOUT_BLOCK_VOLUME",
876 	.owner				= THIS_MODULE,
877 	.flags				= PNFS_LAYOUTRET_ON_SETATTR |
878 					  PNFS_READ_WHOLE_PAGE,
879 	.read_pagelist			= bl_read_pagelist,
880 	.write_pagelist			= bl_write_pagelist,
881 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
882 	.free_layout_hdr		= bl_free_layout_hdr,
883 	.alloc_lseg			= bl_alloc_lseg,
884 	.free_lseg			= bl_free_lseg,
885 	.return_range			= bl_return_range,
886 	.prepare_layoutcommit		= bl_prepare_layoutcommit,
887 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
888 	.set_layoutdriver		= bl_set_layoutdriver,
889 	.alloc_deviceid_node		= bl_alloc_deviceid_node,
890 	.free_deviceid_node		= bl_free_deviceid_node,
891 	.pg_read_ops			= &bl_pg_read_ops,
892 	.pg_write_ops			= &bl_pg_write_ops,
893 	.sync				= pnfs_generic_sync,
894 };
895 
896 static int __init nfs4blocklayout_init(void)
897 {
898 	int ret;
899 
900 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
901 
902 	ret = pnfs_register_layoutdriver(&blocklayout_type);
903 	if (ret)
904 		goto out;
905 	ret = bl_init_pipefs();
906 	if (ret)
907 		goto out_unregister;
908 	return 0;
909 
910 out_unregister:
911 	pnfs_unregister_layoutdriver(&blocklayout_type);
912 out:
913 	return ret;
914 }
915 
916 static void __exit nfs4blocklayout_exit(void)
917 {
918 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
919 	       __func__);
920 
921 	bl_cleanup_pipefs();
922 	pnfs_unregister_layoutdriver(&blocklayout_type);
923 }
924 
925 MODULE_ALIAS("nfs-layouttype4-3");
926 
927 module_init(nfs4blocklayout_init);
928 module_exit(nfs4blocklayout_exit);
929