xref: /openbmc/linux/fs/nfs/blocklayout/blocklayout.c (revision 9c1f8594)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/buffer_head.h>	/* various write calls */
39 #include <linux/prefetch.h>
40 
41 #include "blocklayout.h"
42 
43 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
44 
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
48 
49 struct dentry *bl_device_pipe;
50 wait_queue_head_t bl_wq;
51 
52 static void print_page(struct page *page)
53 {
54 	dprintk("PRINTPAGE page %p\n", page);
55 	dprintk("	PagePrivate %d\n", PagePrivate(page));
56 	dprintk("	PageUptodate %d\n", PageUptodate(page));
57 	dprintk("	PageError %d\n", PageError(page));
58 	dprintk("	PageDirty %d\n", PageDirty(page));
59 	dprintk("	PageReferenced %d\n", PageReferenced(page));
60 	dprintk("	PageLocked %d\n", PageLocked(page));
61 	dprintk("	PageWriteback %d\n", PageWriteback(page));
62 	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
63 	dprintk("\n");
64 }
65 
66 /* Given the be associated with isect, determine if page data needs to be
67  * initialized.
68  */
69 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
70 {
71 	if (be->be_state == PNFS_BLOCK_NONE_DATA)
72 		return 1;
73 	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
74 		return 0;
75 	else
76 		return !bl_is_sector_init(be->be_inval, isect);
77 }
78 
79 /* Given the be associated with isect, determine if page data can be
80  * written to disk.
81  */
82 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
83 {
84 	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
85 		be->be_state == PNFS_BLOCK_INVALID_DATA);
86 }
87 
88 /* The data we are handed might be spread across several bios.  We need
89  * to track when the last one is finished.
90  */
91 struct parallel_io {
92 	struct kref refcnt;
93 	struct rpc_call_ops call_ops;
94 	void (*pnfs_callback) (void *data);
95 	void *data;
96 };
97 
98 static inline struct parallel_io *alloc_parallel(void *data)
99 {
100 	struct parallel_io *rv;
101 
102 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
103 	if (rv) {
104 		rv->data = data;
105 		kref_init(&rv->refcnt);
106 	}
107 	return rv;
108 }
109 
110 static inline void get_parallel(struct parallel_io *p)
111 {
112 	kref_get(&p->refcnt);
113 }
114 
115 static void destroy_parallel(struct kref *kref)
116 {
117 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
118 
119 	dprintk("%s enter\n", __func__);
120 	p->pnfs_callback(p->data);
121 	kfree(p);
122 }
123 
124 static inline void put_parallel(struct parallel_io *p)
125 {
126 	kref_put(&p->refcnt, destroy_parallel);
127 }
128 
129 static struct bio *
130 bl_submit_bio(int rw, struct bio *bio)
131 {
132 	if (bio) {
133 		get_parallel(bio->bi_private);
134 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
135 			rw == READ ? "read" : "write",
136 			bio->bi_size, (unsigned long long)bio->bi_sector);
137 		submit_bio(rw, bio);
138 	}
139 	return NULL;
140 }
141 
142 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143 				     struct pnfs_block_extent *be,
144 				     void (*end_io)(struct bio *, int err),
145 				     struct parallel_io *par)
146 {
147 	struct bio *bio;
148 
149 	bio = bio_alloc(GFP_NOIO, npg);
150 	if (!bio)
151 		return NULL;
152 
153 	bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
154 	bio->bi_bdev = be->be_mdev;
155 	bio->bi_end_io = end_io;
156 	bio->bi_private = par;
157 	return bio;
158 }
159 
160 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
161 				      sector_t isect, struct page *page,
162 				      struct pnfs_block_extent *be,
163 				      void (*end_io)(struct bio *, int err),
164 				      struct parallel_io *par)
165 {
166 retry:
167 	if (!bio) {
168 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
169 		if (!bio)
170 			return ERR_PTR(-ENOMEM);
171 	}
172 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
173 		bio = bl_submit_bio(rw, bio);
174 		goto retry;
175 	}
176 	return bio;
177 }
178 
179 static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
180 {
181 	if (lseg->pls_range.iomode == IOMODE_RW) {
182 		dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
183 		set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
184 	} else {
185 		dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
186 		set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
187 	}
188 }
189 
190 /* This is basically copied from mpage_end_io_read */
191 static void bl_end_io_read(struct bio *bio, int err)
192 {
193 	struct parallel_io *par = bio->bi_private;
194 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
195 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
196 	struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
197 
198 	do {
199 		struct page *page = bvec->bv_page;
200 
201 		if (--bvec >= bio->bi_io_vec)
202 			prefetchw(&bvec->bv_page->flags);
203 		if (uptodate)
204 			SetPageUptodate(page);
205 	} while (bvec >= bio->bi_io_vec);
206 	if (!uptodate) {
207 		if (!rdata->pnfs_error)
208 			rdata->pnfs_error = -EIO;
209 		bl_set_lo_fail(rdata->lseg);
210 	}
211 	bio_put(bio);
212 	put_parallel(par);
213 }
214 
215 static void bl_read_cleanup(struct work_struct *work)
216 {
217 	struct rpc_task *task;
218 	struct nfs_read_data *rdata;
219 	dprintk("%s enter\n", __func__);
220 	task = container_of(work, struct rpc_task, u.tk_work);
221 	rdata = container_of(task, struct nfs_read_data, task);
222 	pnfs_ld_read_done(rdata);
223 }
224 
225 static void
226 bl_end_par_io_read(void *data)
227 {
228 	struct nfs_read_data *rdata = data;
229 
230 	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
231 	schedule_work(&rdata->task.u.tk_work);
232 }
233 
234 /* We don't want normal .rpc_call_done callback used, so we replace it
235  * with this stub.
236  */
237 static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
238 {
239 	return;
240 }
241 
242 static enum pnfs_try_status
243 bl_read_pagelist(struct nfs_read_data *rdata)
244 {
245 	int i, hole;
246 	struct bio *bio = NULL;
247 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
248 	sector_t isect, extent_length = 0;
249 	struct parallel_io *par;
250 	loff_t f_offset = rdata->args.offset;
251 	size_t count = rdata->args.count;
252 	struct page **pages = rdata->args.pages;
253 	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
254 
255 	dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
256 	       rdata->npages, f_offset, count);
257 
258 	par = alloc_parallel(rdata);
259 	if (!par)
260 		goto use_mds;
261 	par->call_ops = *rdata->mds_ops;
262 	par->call_ops.rpc_call_done = bl_rpc_do_nothing;
263 	par->pnfs_callback = bl_end_par_io_read;
264 	/* At this point, we can no longer jump to use_mds */
265 
266 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
267 	/* Code assumes extents are page-aligned */
268 	for (i = pg_index; i < rdata->npages; i++) {
269 		if (!extent_length) {
270 			/* We've used up the previous extent */
271 			bl_put_extent(be);
272 			bl_put_extent(cow_read);
273 			bio = bl_submit_bio(READ, bio);
274 			/* Get the next one */
275 			be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
276 					     isect, &cow_read);
277 			if (!be) {
278 				rdata->pnfs_error = -EIO;
279 				goto out;
280 			}
281 			extent_length = be->be_length -
282 				(isect - be->be_f_offset);
283 			if (cow_read) {
284 				sector_t cow_length = cow_read->be_length -
285 					(isect - cow_read->be_f_offset);
286 				extent_length = min(extent_length, cow_length);
287 			}
288 		}
289 		hole = is_hole(be, isect);
290 		if (hole && !cow_read) {
291 			bio = bl_submit_bio(READ, bio);
292 			/* Fill hole w/ zeroes w/o accessing device */
293 			dprintk("%s Zeroing page for hole\n", __func__);
294 			zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
295 			print_page(pages[i]);
296 			SetPageUptodate(pages[i]);
297 		} else {
298 			struct pnfs_block_extent *be_read;
299 
300 			be_read = (hole && cow_read) ? cow_read : be;
301 			bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
302 						 isect, pages[i], be_read,
303 						 bl_end_io_read, par);
304 			if (IS_ERR(bio)) {
305 				rdata->pnfs_error = PTR_ERR(bio);
306 				goto out;
307 			}
308 		}
309 		isect += PAGE_CACHE_SECTORS;
310 		extent_length -= PAGE_CACHE_SECTORS;
311 	}
312 	if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
313 		rdata->res.eof = 1;
314 		rdata->res.count = rdata->inode->i_size - f_offset;
315 	} else {
316 		rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
317 	}
318 out:
319 	bl_put_extent(be);
320 	bl_put_extent(cow_read);
321 	bl_submit_bio(READ, bio);
322 	put_parallel(par);
323 	return PNFS_ATTEMPTED;
324 
325  use_mds:
326 	dprintk("Giving up and using normal NFS\n");
327 	return PNFS_NOT_ATTEMPTED;
328 }
329 
330 static void mark_extents_written(struct pnfs_block_layout *bl,
331 				 __u64 offset, __u32 count)
332 {
333 	sector_t isect, end;
334 	struct pnfs_block_extent *be;
335 
336 	dprintk("%s(%llu, %u)\n", __func__, offset, count);
337 	if (count == 0)
338 		return;
339 	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
340 	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
341 	end >>= SECTOR_SHIFT;
342 	while (isect < end) {
343 		sector_t len;
344 		be = bl_find_get_extent(bl, isect, NULL);
345 		BUG_ON(!be); /* FIXME */
346 		len = min(end, be->be_f_offset + be->be_length) - isect;
347 		if (be->be_state == PNFS_BLOCK_INVALID_DATA)
348 			bl_mark_for_commit(be, isect, len); /* What if fails? */
349 		isect += len;
350 		bl_put_extent(be);
351 	}
352 }
353 
354 static void bl_end_io_write_zero(struct bio *bio, int err)
355 {
356 	struct parallel_io *par = bio->bi_private;
357 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
358 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
359 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
360 
361 	do {
362 		struct page *page = bvec->bv_page;
363 
364 		if (--bvec >= bio->bi_io_vec)
365 			prefetchw(&bvec->bv_page->flags);
366 		/* This is the zeroing page we added */
367 		end_page_writeback(page);
368 		page_cache_release(page);
369 	} while (bvec >= bio->bi_io_vec);
370 	if (!uptodate) {
371 		if (!wdata->pnfs_error)
372 			wdata->pnfs_error = -EIO;
373 		bl_set_lo_fail(wdata->lseg);
374 	}
375 	bio_put(bio);
376 	put_parallel(par);
377 }
378 
379 /* This is basically copied from mpage_end_io_read */
380 static void bl_end_io_write(struct bio *bio, int err)
381 {
382 	struct parallel_io *par = bio->bi_private;
383 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
384 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
385 
386 	if (!uptodate) {
387 		if (!wdata->pnfs_error)
388 			wdata->pnfs_error = -EIO;
389 		bl_set_lo_fail(wdata->lseg);
390 	}
391 	bio_put(bio);
392 	put_parallel(par);
393 }
394 
395 /* Function scheduled for call during bl_end_par_io_write,
396  * it marks sectors as written and extends the commitlist.
397  */
398 static void bl_write_cleanup(struct work_struct *work)
399 {
400 	struct rpc_task *task;
401 	struct nfs_write_data *wdata;
402 	dprintk("%s enter\n", __func__);
403 	task = container_of(work, struct rpc_task, u.tk_work);
404 	wdata = container_of(task, struct nfs_write_data, task);
405 	if (!wdata->pnfs_error) {
406 		/* Marks for LAYOUTCOMMIT */
407 		mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
408 				     wdata->args.offset, wdata->args.count);
409 	}
410 	pnfs_ld_write_done(wdata);
411 }
412 
413 /* Called when last of bios associated with a bl_write_pagelist call finishes */
414 static void bl_end_par_io_write(void *data)
415 {
416 	struct nfs_write_data *wdata = data;
417 
418 	wdata->task.tk_status = 0;
419 	wdata->verf.committed = NFS_FILE_SYNC;
420 	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
421 	schedule_work(&wdata->task.u.tk_work);
422 }
423 
424 /* FIXME STUB - mark intersection of layout and page as bad, so is not
425  * used again.
426  */
427 static void mark_bad_read(void)
428 {
429 	return;
430 }
431 
432 /*
433  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
434  * block_device
435  */
436 static void
437 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
438 {
439 	dprintk("%s enter be=%p\n", __func__, be);
440 
441 	set_buffer_mapped(bh);
442 	bh->b_bdev = be->be_mdev;
443 	bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
444 	    (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
445 
446 	dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
447 		__func__, (unsigned long long)isect, (long)bh->b_blocknr,
448 		bh->b_size);
449 	return;
450 }
451 
452 /* Given an unmapped page, zero it or read in page for COW, page is locked
453  * by caller.
454  */
455 static int
456 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
457 {
458 	struct buffer_head *bh = NULL;
459 	int ret = 0;
460 	sector_t isect;
461 
462 	dprintk("%s enter, %p\n", __func__, page);
463 	BUG_ON(PageUptodate(page));
464 	if (!cow_read) {
465 		zero_user_segment(page, 0, PAGE_SIZE);
466 		SetPageUptodate(page);
467 		goto cleanup;
468 	}
469 
470 	bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
471 	if (!bh) {
472 		ret = -ENOMEM;
473 		goto cleanup;
474 	}
475 
476 	isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
477 	map_block(bh, isect, cow_read);
478 	if (!bh_uptodate_or_lock(bh))
479 		ret = bh_submit_read(bh);
480 	if (ret)
481 		goto cleanup;
482 	SetPageUptodate(page);
483 
484 cleanup:
485 	bl_put_extent(cow_read);
486 	if (bh)
487 		free_buffer_head(bh);
488 	if (ret) {
489 		/* Need to mark layout with bad read...should now
490 		 * just use nfs4 for reads and writes.
491 		 */
492 		mark_bad_read();
493 	}
494 	return ret;
495 }
496 
497 static enum pnfs_try_status
498 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
499 {
500 	int i, ret, npg_zero, pg_index, last = 0;
501 	struct bio *bio = NULL;
502 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
503 	sector_t isect, last_isect = 0, extent_length = 0;
504 	struct parallel_io *par;
505 	loff_t offset = wdata->args.offset;
506 	size_t count = wdata->args.count;
507 	struct page **pages = wdata->args.pages;
508 	struct page *page;
509 	pgoff_t index;
510 	u64 temp;
511 	int npg_per_block =
512 	    NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
513 
514 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
515 	/* At this point, wdata->pages is a (sequential) list of nfs_pages.
516 	 * We want to write each, and if there is an error set pnfs_error
517 	 * to have it redone using nfs.
518 	 */
519 	par = alloc_parallel(wdata);
520 	if (!par)
521 		return PNFS_NOT_ATTEMPTED;
522 	par->call_ops = *wdata->mds_ops;
523 	par->call_ops.rpc_call_done = bl_rpc_do_nothing;
524 	par->pnfs_callback = bl_end_par_io_write;
525 	/* At this point, have to be more careful with error handling */
526 
527 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
528 	be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
529 	if (!be || !is_writable(be, isect)) {
530 		dprintk("%s no matching extents!\n", __func__);
531 		wdata->pnfs_error = -EINVAL;
532 		goto out;
533 	}
534 
535 	/* First page inside INVALID extent */
536 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
537 		temp = offset >> PAGE_CACHE_SHIFT;
538 		npg_zero = do_div(temp, npg_per_block);
539 		isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
540 				     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
541 		extent_length = be->be_length - (isect - be->be_f_offset);
542 
543 fill_invalid_ext:
544 		dprintk("%s need to zero %d pages\n", __func__, npg_zero);
545 		for (;npg_zero > 0; npg_zero--) {
546 			/* page ref released in bl_end_io_write_zero */
547 			index = isect >> PAGE_CACHE_SECTOR_SHIFT;
548 			dprintk("%s zero %dth page: index %lu isect %llu\n",
549 				__func__, npg_zero, index,
550 				(unsigned long long)isect);
551 			page =
552 			    find_or_create_page(wdata->inode->i_mapping, index,
553 						GFP_NOFS);
554 			if (!page) {
555 				dprintk("%s oom\n", __func__);
556 				wdata->pnfs_error = -ENOMEM;
557 				goto out;
558 			}
559 
560 			/* PageDirty: Other will write this out
561 			 * PageWriteback: Other is writing this out
562 			 * PageUptodate: It was read before
563 			 * sector_initialized: already written out
564 			 */
565 			if (PageDirty(page) || PageWriteback(page) ||
566 			    bl_is_sector_init(be->be_inval, isect)) {
567 				print_page(page);
568 				unlock_page(page);
569 				page_cache_release(page);
570 				goto next_page;
571 			}
572 			if (!PageUptodate(page)) {
573 				/* New page, readin or zero it */
574 				init_page_for_write(page, cow_read);
575 			}
576 			set_page_writeback(page);
577 			unlock_page(page);
578 
579 			ret = bl_mark_sectors_init(be->be_inval, isect,
580 						       PAGE_CACHE_SECTORS,
581 						       NULL);
582 			if (unlikely(ret)) {
583 				dprintk("%s bl_mark_sectors_init fail %d\n",
584 					__func__, ret);
585 				end_page_writeback(page);
586 				page_cache_release(page);
587 				wdata->pnfs_error = ret;
588 				goto out;
589 			}
590 			bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
591 						 isect, page, be,
592 						 bl_end_io_write_zero, par);
593 			if (IS_ERR(bio)) {
594 				wdata->pnfs_error = PTR_ERR(bio);
595 				goto out;
596 			}
597 			/* FIXME: This should be done in bi_end_io */
598 			mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
599 					     page->index << PAGE_CACHE_SHIFT,
600 					     PAGE_CACHE_SIZE);
601 next_page:
602 			isect += PAGE_CACHE_SECTORS;
603 			extent_length -= PAGE_CACHE_SECTORS;
604 		}
605 		if (last)
606 			goto write_done;
607 	}
608 	bio = bl_submit_bio(WRITE, bio);
609 
610 	/* Middle pages */
611 	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
612 	for (i = pg_index; i < wdata->npages; i++) {
613 		if (!extent_length) {
614 			/* We've used up the previous extent */
615 			bl_put_extent(be);
616 			bio = bl_submit_bio(WRITE, bio);
617 			/* Get the next one */
618 			be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
619 					     isect, NULL);
620 			if (!be || !is_writable(be, isect)) {
621 				wdata->pnfs_error = -EINVAL;
622 				goto out;
623 			}
624 			extent_length = be->be_length -
625 			    (isect - be->be_f_offset);
626 		}
627 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
628 			ret = bl_mark_sectors_init(be->be_inval, isect,
629 						       PAGE_CACHE_SECTORS,
630 						       NULL);
631 			if (unlikely(ret)) {
632 				dprintk("%s bl_mark_sectors_init fail %d\n",
633 					__func__, ret);
634 				wdata->pnfs_error = ret;
635 				goto out;
636 			}
637 		}
638 		bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
639 					 isect, pages[i], be,
640 					 bl_end_io_write, par);
641 		if (IS_ERR(bio)) {
642 			wdata->pnfs_error = PTR_ERR(bio);
643 			goto out;
644 		}
645 		isect += PAGE_CACHE_SECTORS;
646 		last_isect = isect;
647 		extent_length -= PAGE_CACHE_SECTORS;
648 	}
649 
650 	/* Last page inside INVALID extent */
651 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
652 		bio = bl_submit_bio(WRITE, bio);
653 		temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
654 		npg_zero = npg_per_block - do_div(temp, npg_per_block);
655 		if (npg_zero < npg_per_block) {
656 			last = 1;
657 			goto fill_invalid_ext;
658 		}
659 	}
660 
661 write_done:
662 	wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
663 	if (count < wdata->res.count) {
664 		wdata->res.count = count;
665 	}
666 out:
667 	bl_put_extent(be);
668 	bl_submit_bio(WRITE, bio);
669 	put_parallel(par);
670 	return PNFS_ATTEMPTED;
671 }
672 
673 /* FIXME - range ignored */
674 static void
675 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
676 {
677 	int i;
678 	struct pnfs_block_extent *be;
679 
680 	spin_lock(&bl->bl_ext_lock);
681 	for (i = 0; i < EXTENT_LISTS; i++) {
682 		while (!list_empty(&bl->bl_extents[i])) {
683 			be = list_first_entry(&bl->bl_extents[i],
684 					      struct pnfs_block_extent,
685 					      be_node);
686 			list_del(&be->be_node);
687 			bl_put_extent(be);
688 		}
689 	}
690 	spin_unlock(&bl->bl_ext_lock);
691 }
692 
693 static void
694 release_inval_marks(struct pnfs_inval_markings *marks)
695 {
696 	struct pnfs_inval_tracking *pos, *temp;
697 
698 	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
699 		list_del(&pos->it_link);
700 		kfree(pos);
701 	}
702 	return;
703 }
704 
705 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
706 {
707 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
708 
709 	dprintk("%s enter\n", __func__);
710 	release_extents(bl, NULL);
711 	release_inval_marks(&bl->bl_inval);
712 	kfree(bl);
713 }
714 
715 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
716 						   gfp_t gfp_flags)
717 {
718 	struct pnfs_block_layout *bl;
719 
720 	dprintk("%s enter\n", __func__);
721 	bl = kzalloc(sizeof(*bl), gfp_flags);
722 	if (!bl)
723 		return NULL;
724 	spin_lock_init(&bl->bl_ext_lock);
725 	INIT_LIST_HEAD(&bl->bl_extents[0]);
726 	INIT_LIST_HEAD(&bl->bl_extents[1]);
727 	INIT_LIST_HEAD(&bl->bl_commit);
728 	INIT_LIST_HEAD(&bl->bl_committing);
729 	bl->bl_count = 0;
730 	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
731 	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
732 	return &bl->bl_layout;
733 }
734 
735 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
736 {
737 	dprintk("%s enter\n", __func__);
738 	kfree(lseg);
739 }
740 
741 /* We pretty much ignore lseg, and store all data layout wide, so we
742  * can correctly merge.
743  */
744 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
745 						 struct nfs4_layoutget_res *lgr,
746 						 gfp_t gfp_flags)
747 {
748 	struct pnfs_layout_segment *lseg;
749 	int status;
750 
751 	dprintk("%s enter\n", __func__);
752 	lseg = kzalloc(sizeof(*lseg), gfp_flags);
753 	if (!lseg)
754 		return ERR_PTR(-ENOMEM);
755 	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
756 	if (status) {
757 		/* We don't want to call the full-blown bl_free_lseg,
758 		 * since on error extents were not touched.
759 		 */
760 		kfree(lseg);
761 		return ERR_PTR(status);
762 	}
763 	return lseg;
764 }
765 
766 static void
767 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
768 		       const struct nfs4_layoutcommit_args *arg)
769 {
770 	dprintk("%s enter\n", __func__);
771 	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
772 }
773 
774 static void
775 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
776 {
777 	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
778 
779 	dprintk("%s enter\n", __func__);
780 	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
781 }
782 
783 static void free_blk_mountid(struct block_mount_id *mid)
784 {
785 	if (mid) {
786 		struct pnfs_block_dev *dev;
787 		spin_lock(&mid->bm_lock);
788 		while (!list_empty(&mid->bm_devlist)) {
789 			dev = list_first_entry(&mid->bm_devlist,
790 					       struct pnfs_block_dev,
791 					       bm_node);
792 			list_del(&dev->bm_node);
793 			bl_free_block_dev(dev);
794 		}
795 		spin_unlock(&mid->bm_lock);
796 		kfree(mid);
797 	}
798 }
799 
800 /* This is mostly copied from the filelayout's get_device_info function.
801  * It seems much of this should be at the generic pnfs level.
802  */
803 static struct pnfs_block_dev *
804 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
805 			struct nfs4_deviceid *d_id)
806 {
807 	struct pnfs_device *dev;
808 	struct pnfs_block_dev *rv = NULL;
809 	u32 max_resp_sz;
810 	int max_pages;
811 	struct page **pages = NULL;
812 	int i, rc;
813 
814 	/*
815 	 * Use the session max response size as the basis for setting
816 	 * GETDEVICEINFO's maxcount
817 	 */
818 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
819 	max_pages = max_resp_sz >> PAGE_SHIFT;
820 	dprintk("%s max_resp_sz %u max_pages %d\n",
821 		__func__, max_resp_sz, max_pages);
822 
823 	dev = kmalloc(sizeof(*dev), GFP_NOFS);
824 	if (!dev) {
825 		dprintk("%s kmalloc failed\n", __func__);
826 		return NULL;
827 	}
828 
829 	pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
830 	if (pages == NULL) {
831 		kfree(dev);
832 		return NULL;
833 	}
834 	for (i = 0; i < max_pages; i++) {
835 		pages[i] = alloc_page(GFP_NOFS);
836 		if (!pages[i])
837 			goto out_free;
838 	}
839 
840 	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
841 	dev->layout_type = LAYOUT_BLOCK_VOLUME;
842 	dev->pages = pages;
843 	dev->pgbase = 0;
844 	dev->pglen = PAGE_SIZE * max_pages;
845 	dev->mincount = 0;
846 
847 	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
848 	rc = nfs4_proc_getdeviceinfo(server, dev);
849 	dprintk("%s getdevice info returns %d\n", __func__, rc);
850 	if (rc)
851 		goto out_free;
852 
853 	rv = nfs4_blk_decode_device(server, dev);
854  out_free:
855 	for (i = 0; i < max_pages; i++)
856 		__free_page(pages[i]);
857 	kfree(pages);
858 	kfree(dev);
859 	return rv;
860 }
861 
862 static int
863 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
864 {
865 	struct block_mount_id *b_mt_id = NULL;
866 	struct pnfs_devicelist *dlist = NULL;
867 	struct pnfs_block_dev *bdev;
868 	LIST_HEAD(block_disklist);
869 	int status = 0, i;
870 
871 	dprintk("%s enter\n", __func__);
872 
873 	if (server->pnfs_blksize == 0) {
874 		dprintk("%s Server did not return blksize\n", __func__);
875 		return -EINVAL;
876 	}
877 	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
878 	if (!b_mt_id) {
879 		status = -ENOMEM;
880 		goto out_error;
881 	}
882 	/* Initialize nfs4 block layout mount id */
883 	spin_lock_init(&b_mt_id->bm_lock);
884 	INIT_LIST_HEAD(&b_mt_id->bm_devlist);
885 
886 	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
887 	if (!dlist) {
888 		status = -ENOMEM;
889 		goto out_error;
890 	}
891 	dlist->eof = 0;
892 	while (!dlist->eof) {
893 		status = nfs4_proc_getdevicelist(server, fh, dlist);
894 		if (status)
895 			goto out_error;
896 		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
897 			__func__, dlist->num_devs, dlist->eof);
898 		for (i = 0; i < dlist->num_devs; i++) {
899 			bdev = nfs4_blk_get_deviceinfo(server, fh,
900 						       &dlist->dev_id[i]);
901 			if (!bdev) {
902 				status = -ENODEV;
903 				goto out_error;
904 			}
905 			spin_lock(&b_mt_id->bm_lock);
906 			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
907 			spin_unlock(&b_mt_id->bm_lock);
908 		}
909 	}
910 	dprintk("%s SUCCESS\n", __func__);
911 	server->pnfs_ld_data = b_mt_id;
912 
913  out_return:
914 	kfree(dlist);
915 	return status;
916 
917  out_error:
918 	free_blk_mountid(b_mt_id);
919 	goto out_return;
920 }
921 
922 static int
923 bl_clear_layoutdriver(struct nfs_server *server)
924 {
925 	struct block_mount_id *b_mt_id = server->pnfs_ld_data;
926 
927 	dprintk("%s enter\n", __func__);
928 	free_blk_mountid(b_mt_id);
929 	dprintk("%s RETURNS\n", __func__);
930 	return 0;
931 }
932 
933 static const struct nfs_pageio_ops bl_pg_read_ops = {
934 	.pg_init = pnfs_generic_pg_init_read,
935 	.pg_test = pnfs_generic_pg_test,
936 	.pg_doio = pnfs_generic_pg_readpages,
937 };
938 
939 static const struct nfs_pageio_ops bl_pg_write_ops = {
940 	.pg_init = pnfs_generic_pg_init_write,
941 	.pg_test = pnfs_generic_pg_test,
942 	.pg_doio = pnfs_generic_pg_writepages,
943 };
944 
945 static struct pnfs_layoutdriver_type blocklayout_type = {
946 	.id				= LAYOUT_BLOCK_VOLUME,
947 	.name				= "LAYOUT_BLOCK_VOLUME",
948 	.read_pagelist			= bl_read_pagelist,
949 	.write_pagelist			= bl_write_pagelist,
950 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
951 	.free_layout_hdr		= bl_free_layout_hdr,
952 	.alloc_lseg			= bl_alloc_lseg,
953 	.free_lseg			= bl_free_lseg,
954 	.encode_layoutcommit		= bl_encode_layoutcommit,
955 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
956 	.set_layoutdriver		= bl_set_layoutdriver,
957 	.clear_layoutdriver		= bl_clear_layoutdriver,
958 	.pg_read_ops			= &bl_pg_read_ops,
959 	.pg_write_ops			= &bl_pg_write_ops,
960 };
961 
962 static const struct rpc_pipe_ops bl_upcall_ops = {
963 	.upcall		= bl_pipe_upcall,
964 	.downcall	= bl_pipe_downcall,
965 	.destroy_msg	= bl_pipe_destroy_msg,
966 };
967 
968 static int __init nfs4blocklayout_init(void)
969 {
970 	struct vfsmount *mnt;
971 	struct path path;
972 	int ret;
973 
974 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
975 
976 	ret = pnfs_register_layoutdriver(&blocklayout_type);
977 	if (ret)
978 		goto out;
979 
980 	init_waitqueue_head(&bl_wq);
981 
982 	mnt = rpc_get_mount();
983 	if (IS_ERR(mnt)) {
984 		ret = PTR_ERR(mnt);
985 		goto out_remove;
986 	}
987 
988 	ret = vfs_path_lookup(mnt->mnt_root,
989 			      mnt,
990 			      NFS_PIPE_DIRNAME, 0, &path);
991 	if (ret)
992 		goto out_remove;
993 
994 	bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
995 				    &bl_upcall_ops, 0);
996 	if (IS_ERR(bl_device_pipe)) {
997 		ret = PTR_ERR(bl_device_pipe);
998 		goto out_remove;
999 	}
1000 out:
1001 	return ret;
1002 
1003 out_remove:
1004 	pnfs_unregister_layoutdriver(&blocklayout_type);
1005 	return ret;
1006 }
1007 
1008 static void __exit nfs4blocklayout_exit(void)
1009 {
1010 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1011 	       __func__);
1012 
1013 	pnfs_unregister_layoutdriver(&blocklayout_type);
1014 	rpc_unlink(bl_device_pipe);
1015 }
1016 
1017 MODULE_ALIAS("nfs-layouttype4-3");
1018 
1019 module_init(nfs4blocklayout_init);
1020 module_exit(nfs4blocklayout_exit);
1021