xref: /openbmc/linux/fs/nfs/blocklayout/blocklayout.c (revision 63dc02bd)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/buffer_head.h>	/* various write calls */
39 #include <linux/prefetch.h>
40 
41 #include "../pnfs.h"
42 #include "../internal.h"
43 #include "blocklayout.h"
44 
45 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
46 
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
49 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
50 
51 static void print_page(struct page *page)
52 {
53 	dprintk("PRINTPAGE page %p\n", page);
54 	dprintk("	PagePrivate %d\n", PagePrivate(page));
55 	dprintk("	PageUptodate %d\n", PageUptodate(page));
56 	dprintk("	PageError %d\n", PageError(page));
57 	dprintk("	PageDirty %d\n", PageDirty(page));
58 	dprintk("	PageReferenced %d\n", PageReferenced(page));
59 	dprintk("	PageLocked %d\n", PageLocked(page));
60 	dprintk("	PageWriteback %d\n", PageWriteback(page));
61 	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
62 	dprintk("\n");
63 }
64 
65 /* Given the be associated with isect, determine if page data needs to be
66  * initialized.
67  */
68 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
69 {
70 	if (be->be_state == PNFS_BLOCK_NONE_DATA)
71 		return 1;
72 	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
73 		return 0;
74 	else
75 		return !bl_is_sector_init(be->be_inval, isect);
76 }
77 
78 /* Given the be associated with isect, determine if page data can be
79  * written to disk.
80  */
81 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
82 {
83 	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
84 		be->be_state == PNFS_BLOCK_INVALID_DATA);
85 }
86 
87 /* The data we are handed might be spread across several bios.  We need
88  * to track when the last one is finished.
89  */
90 struct parallel_io {
91 	struct kref refcnt;
92 	void (*pnfs_callback) (void *data, int num_se);
93 	void *data;
94 	int bse_count;
95 };
96 
97 static inline struct parallel_io *alloc_parallel(void *data)
98 {
99 	struct parallel_io *rv;
100 
101 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
102 	if (rv) {
103 		rv->data = data;
104 		kref_init(&rv->refcnt);
105 		rv->bse_count = 0;
106 	}
107 	return rv;
108 }
109 
110 static inline void get_parallel(struct parallel_io *p)
111 {
112 	kref_get(&p->refcnt);
113 }
114 
115 static void destroy_parallel(struct kref *kref)
116 {
117 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
118 
119 	dprintk("%s enter\n", __func__);
120 	p->pnfs_callback(p->data, p->bse_count);
121 	kfree(p);
122 }
123 
124 static inline void put_parallel(struct parallel_io *p)
125 {
126 	kref_put(&p->refcnt, destroy_parallel);
127 }
128 
129 static struct bio *
130 bl_submit_bio(int rw, struct bio *bio)
131 {
132 	if (bio) {
133 		get_parallel(bio->bi_private);
134 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
135 			rw == READ ? "read" : "write",
136 			bio->bi_size, (unsigned long long)bio->bi_sector);
137 		submit_bio(rw, bio);
138 	}
139 	return NULL;
140 }
141 
142 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143 				     struct pnfs_block_extent *be,
144 				     void (*end_io)(struct bio *, int err),
145 				     struct parallel_io *par)
146 {
147 	struct bio *bio;
148 
149 	npg = min(npg, BIO_MAX_PAGES);
150 	bio = bio_alloc(GFP_NOIO, npg);
151 	if (!bio && (current->flags & PF_MEMALLOC)) {
152 		while (!bio && (npg /= 2))
153 			bio = bio_alloc(GFP_NOIO, npg);
154 	}
155 
156 	if (bio) {
157 		bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
158 		bio->bi_bdev = be->be_mdev;
159 		bio->bi_end_io = end_io;
160 		bio->bi_private = par;
161 	}
162 	return bio;
163 }
164 
165 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
166 				      sector_t isect, struct page *page,
167 				      struct pnfs_block_extent *be,
168 				      void (*end_io)(struct bio *, int err),
169 				      struct parallel_io *par)
170 {
171 retry:
172 	if (!bio) {
173 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
174 		if (!bio)
175 			return ERR_PTR(-ENOMEM);
176 	}
177 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
178 		bio = bl_submit_bio(rw, bio);
179 		goto retry;
180 	}
181 	return bio;
182 }
183 
184 /* This is basically copied from mpage_end_io_read */
185 static void bl_end_io_read(struct bio *bio, int err)
186 {
187 	struct parallel_io *par = bio->bi_private;
188 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
189 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
190 	struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
191 
192 	do {
193 		struct page *page = bvec->bv_page;
194 
195 		if (--bvec >= bio->bi_io_vec)
196 			prefetchw(&bvec->bv_page->flags);
197 		if (uptodate)
198 			SetPageUptodate(page);
199 	} while (bvec >= bio->bi_io_vec);
200 	if (!uptodate) {
201 		if (!rdata->pnfs_error)
202 			rdata->pnfs_error = -EIO;
203 		pnfs_set_lo_fail(rdata->lseg);
204 	}
205 	bio_put(bio);
206 	put_parallel(par);
207 }
208 
209 static void bl_read_cleanup(struct work_struct *work)
210 {
211 	struct rpc_task *task;
212 	struct nfs_read_data *rdata;
213 	dprintk("%s enter\n", __func__);
214 	task = container_of(work, struct rpc_task, u.tk_work);
215 	rdata = container_of(task, struct nfs_read_data, task);
216 	pnfs_ld_read_done(rdata);
217 }
218 
219 static void
220 bl_end_par_io_read(void *data, int unused)
221 {
222 	struct nfs_read_data *rdata = data;
223 
224 	rdata->task.tk_status = rdata->pnfs_error;
225 	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
226 	schedule_work(&rdata->task.u.tk_work);
227 }
228 
229 static enum pnfs_try_status
230 bl_read_pagelist(struct nfs_read_data *rdata)
231 {
232 	int i, hole;
233 	struct bio *bio = NULL;
234 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
235 	sector_t isect, extent_length = 0;
236 	struct parallel_io *par;
237 	loff_t f_offset = rdata->args.offset;
238 	struct page **pages = rdata->args.pages;
239 	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
240 
241 	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
242 	       rdata->npages, f_offset, (unsigned int)rdata->args.count);
243 
244 	par = alloc_parallel(rdata);
245 	if (!par)
246 		goto use_mds;
247 	par->pnfs_callback = bl_end_par_io_read;
248 	/* At this point, we can no longer jump to use_mds */
249 
250 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
251 	/* Code assumes extents are page-aligned */
252 	for (i = pg_index; i < rdata->npages; i++) {
253 		if (!extent_length) {
254 			/* We've used up the previous extent */
255 			bl_put_extent(be);
256 			bl_put_extent(cow_read);
257 			bio = bl_submit_bio(READ, bio);
258 			/* Get the next one */
259 			be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
260 					     isect, &cow_read);
261 			if (!be) {
262 				rdata->pnfs_error = -EIO;
263 				goto out;
264 			}
265 			extent_length = be->be_length -
266 				(isect - be->be_f_offset);
267 			if (cow_read) {
268 				sector_t cow_length = cow_read->be_length -
269 					(isect - cow_read->be_f_offset);
270 				extent_length = min(extent_length, cow_length);
271 			}
272 		}
273 		hole = is_hole(be, isect);
274 		if (hole && !cow_read) {
275 			bio = bl_submit_bio(READ, bio);
276 			/* Fill hole w/ zeroes w/o accessing device */
277 			dprintk("%s Zeroing page for hole\n", __func__);
278 			zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
279 			print_page(pages[i]);
280 			SetPageUptodate(pages[i]);
281 		} else {
282 			struct pnfs_block_extent *be_read;
283 
284 			be_read = (hole && cow_read) ? cow_read : be;
285 			bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
286 						 isect, pages[i], be_read,
287 						 bl_end_io_read, par);
288 			if (IS_ERR(bio)) {
289 				rdata->pnfs_error = PTR_ERR(bio);
290 				bio = NULL;
291 				goto out;
292 			}
293 		}
294 		isect += PAGE_CACHE_SECTORS;
295 		extent_length -= PAGE_CACHE_SECTORS;
296 	}
297 	if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
298 		rdata->res.eof = 1;
299 		rdata->res.count = rdata->inode->i_size - f_offset;
300 	} else {
301 		rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
302 	}
303 out:
304 	bl_put_extent(be);
305 	bl_put_extent(cow_read);
306 	bl_submit_bio(READ, bio);
307 	put_parallel(par);
308 	return PNFS_ATTEMPTED;
309 
310  use_mds:
311 	dprintk("Giving up and using normal NFS\n");
312 	return PNFS_NOT_ATTEMPTED;
313 }
314 
315 static void mark_extents_written(struct pnfs_block_layout *bl,
316 				 __u64 offset, __u32 count)
317 {
318 	sector_t isect, end;
319 	struct pnfs_block_extent *be;
320 	struct pnfs_block_short_extent *se;
321 
322 	dprintk("%s(%llu, %u)\n", __func__, offset, count);
323 	if (count == 0)
324 		return;
325 	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
326 	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
327 	end >>= SECTOR_SHIFT;
328 	while (isect < end) {
329 		sector_t len;
330 		be = bl_find_get_extent(bl, isect, NULL);
331 		BUG_ON(!be); /* FIXME */
332 		len = min(end, be->be_f_offset + be->be_length) - isect;
333 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
334 			se = bl_pop_one_short_extent(be->be_inval);
335 			BUG_ON(!se);
336 			bl_mark_for_commit(be, isect, len, se);
337 		}
338 		isect += len;
339 		bl_put_extent(be);
340 	}
341 }
342 
343 static void bl_end_io_write_zero(struct bio *bio, int err)
344 {
345 	struct parallel_io *par = bio->bi_private;
346 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
347 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
348 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
349 
350 	do {
351 		struct page *page = bvec->bv_page;
352 
353 		if (--bvec >= bio->bi_io_vec)
354 			prefetchw(&bvec->bv_page->flags);
355 		/* This is the zeroing page we added */
356 		end_page_writeback(page);
357 		page_cache_release(page);
358 	} while (bvec >= bio->bi_io_vec);
359 
360 	if (unlikely(!uptodate)) {
361 		if (!wdata->pnfs_error)
362 			wdata->pnfs_error = -EIO;
363 		pnfs_set_lo_fail(wdata->lseg);
364 	}
365 	bio_put(bio);
366 	put_parallel(par);
367 }
368 
369 static void bl_end_io_write(struct bio *bio, int err)
370 {
371 	struct parallel_io *par = bio->bi_private;
372 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
373 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
374 
375 	if (!uptodate) {
376 		if (!wdata->pnfs_error)
377 			wdata->pnfs_error = -EIO;
378 		pnfs_set_lo_fail(wdata->lseg);
379 	}
380 	bio_put(bio);
381 	put_parallel(par);
382 }
383 
384 /* Function scheduled for call during bl_end_par_io_write,
385  * it marks sectors as written and extends the commitlist.
386  */
387 static void bl_write_cleanup(struct work_struct *work)
388 {
389 	struct rpc_task *task;
390 	struct nfs_write_data *wdata;
391 	dprintk("%s enter\n", __func__);
392 	task = container_of(work, struct rpc_task, u.tk_work);
393 	wdata = container_of(task, struct nfs_write_data, task);
394 	if (likely(!wdata->pnfs_error)) {
395 		/* Marks for LAYOUTCOMMIT */
396 		mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
397 				     wdata->args.offset, wdata->args.count);
398 	}
399 	pnfs_ld_write_done(wdata);
400 }
401 
402 /* Called when last of bios associated with a bl_write_pagelist call finishes */
403 static void bl_end_par_io_write(void *data, int num_se)
404 {
405 	struct nfs_write_data *wdata = data;
406 
407 	if (unlikely(wdata->pnfs_error)) {
408 		bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
409 					num_se);
410 	}
411 
412 	wdata->task.tk_status = wdata->pnfs_error;
413 	wdata->verf.committed = NFS_FILE_SYNC;
414 	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
415 	schedule_work(&wdata->task.u.tk_work);
416 }
417 
418 /* FIXME STUB - mark intersection of layout and page as bad, so is not
419  * used again.
420  */
421 static void mark_bad_read(void)
422 {
423 	return;
424 }
425 
426 /*
427  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
428  * block_device
429  */
430 static void
431 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
432 {
433 	dprintk("%s enter be=%p\n", __func__, be);
434 
435 	set_buffer_mapped(bh);
436 	bh->b_bdev = be->be_mdev;
437 	bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
438 	    (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
439 
440 	dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
441 		__func__, (unsigned long long)isect, (long)bh->b_blocknr,
442 		bh->b_size);
443 	return;
444 }
445 
446 /* Given an unmapped page, zero it or read in page for COW, page is locked
447  * by caller.
448  */
449 static int
450 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
451 {
452 	struct buffer_head *bh = NULL;
453 	int ret = 0;
454 	sector_t isect;
455 
456 	dprintk("%s enter, %p\n", __func__, page);
457 	BUG_ON(PageUptodate(page));
458 	if (!cow_read) {
459 		zero_user_segment(page, 0, PAGE_SIZE);
460 		SetPageUptodate(page);
461 		goto cleanup;
462 	}
463 
464 	bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
465 	if (!bh) {
466 		ret = -ENOMEM;
467 		goto cleanup;
468 	}
469 
470 	isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
471 	map_block(bh, isect, cow_read);
472 	if (!bh_uptodate_or_lock(bh))
473 		ret = bh_submit_read(bh);
474 	if (ret)
475 		goto cleanup;
476 	SetPageUptodate(page);
477 
478 cleanup:
479 	bl_put_extent(cow_read);
480 	if (bh)
481 		free_buffer_head(bh);
482 	if (ret) {
483 		/* Need to mark layout with bad read...should now
484 		 * just use nfs4 for reads and writes.
485 		 */
486 		mark_bad_read();
487 	}
488 	return ret;
489 }
490 
491 /* Find or create a zeroing page marked being writeback.
492  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
493  * to indicate write out.
494  */
495 static struct page *
496 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
497 			struct pnfs_block_extent *cow_read)
498 {
499 	struct page *page;
500 	int locked = 0;
501 	page = find_get_page(inode->i_mapping, index);
502 	if (page)
503 		goto check_page;
504 
505 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
506 	if (unlikely(!page)) {
507 		dprintk("%s oom\n", __func__);
508 		return ERR_PTR(-ENOMEM);
509 	}
510 	locked = 1;
511 
512 check_page:
513 	/* PageDirty: Other will write this out
514 	 * PageWriteback: Other is writing this out
515 	 * PageUptodate: It was read before
516 	 */
517 	if (PageDirty(page) || PageWriteback(page)) {
518 		print_page(page);
519 		if (locked)
520 			unlock_page(page);
521 		page_cache_release(page);
522 		return NULL;
523 	}
524 
525 	if (!locked) {
526 		lock_page(page);
527 		locked = 1;
528 		goto check_page;
529 	}
530 	if (!PageUptodate(page)) {
531 		/* New page, readin or zero it */
532 		init_page_for_write(page, cow_read);
533 	}
534 	set_page_writeback(page);
535 	unlock_page(page);
536 
537 	return page;
538 }
539 
540 static enum pnfs_try_status
541 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
542 {
543 	int i, ret, npg_zero, pg_index, last = 0;
544 	struct bio *bio = NULL;
545 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
546 	sector_t isect, last_isect = 0, extent_length = 0;
547 	struct parallel_io *par;
548 	loff_t offset = wdata->args.offset;
549 	size_t count = wdata->args.count;
550 	struct page **pages = wdata->args.pages;
551 	struct page *page;
552 	pgoff_t index;
553 	u64 temp;
554 	int npg_per_block =
555 	    NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
556 
557 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
558 	/* At this point, wdata->pages is a (sequential) list of nfs_pages.
559 	 * We want to write each, and if there is an error set pnfs_error
560 	 * to have it redone using nfs.
561 	 */
562 	par = alloc_parallel(wdata);
563 	if (!par)
564 		goto out_mds;
565 	par->pnfs_callback = bl_end_par_io_write;
566 	/* At this point, have to be more careful with error handling */
567 
568 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
569 	be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
570 	if (!be || !is_writable(be, isect)) {
571 		dprintk("%s no matching extents!\n", __func__);
572 		goto out_mds;
573 	}
574 
575 	/* First page inside INVALID extent */
576 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
577 		if (likely(!bl_push_one_short_extent(be->be_inval)))
578 			par->bse_count++;
579 		else
580 			goto out_mds;
581 		temp = offset >> PAGE_CACHE_SHIFT;
582 		npg_zero = do_div(temp, npg_per_block);
583 		isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
584 				     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
585 		extent_length = be->be_length - (isect - be->be_f_offset);
586 
587 fill_invalid_ext:
588 		dprintk("%s need to zero %d pages\n", __func__, npg_zero);
589 		for (;npg_zero > 0; npg_zero--) {
590 			if (bl_is_sector_init(be->be_inval, isect)) {
591 				dprintk("isect %llu already init\n",
592 					(unsigned long long)isect);
593 				goto next_page;
594 			}
595 			/* page ref released in bl_end_io_write_zero */
596 			index = isect >> PAGE_CACHE_SECTOR_SHIFT;
597 			dprintk("%s zero %dth page: index %lu isect %llu\n",
598 				__func__, npg_zero, index,
599 				(unsigned long long)isect);
600 			page = bl_find_get_zeroing_page(wdata->inode, index,
601 							cow_read);
602 			if (unlikely(IS_ERR(page))) {
603 				wdata->pnfs_error = PTR_ERR(page);
604 				goto out;
605 			} else if (page == NULL)
606 				goto next_page;
607 
608 			ret = bl_mark_sectors_init(be->be_inval, isect,
609 						       PAGE_CACHE_SECTORS);
610 			if (unlikely(ret)) {
611 				dprintk("%s bl_mark_sectors_init fail %d\n",
612 					__func__, ret);
613 				end_page_writeback(page);
614 				page_cache_release(page);
615 				wdata->pnfs_error = ret;
616 				goto out;
617 			}
618 			if (likely(!bl_push_one_short_extent(be->be_inval)))
619 				par->bse_count++;
620 			else {
621 				end_page_writeback(page);
622 				page_cache_release(page);
623 				wdata->pnfs_error = -ENOMEM;
624 				goto out;
625 			}
626 			/* FIXME: This should be done in bi_end_io */
627 			mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
628 					     page->index << PAGE_CACHE_SHIFT,
629 					     PAGE_CACHE_SIZE);
630 
631 			bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
632 						 isect, page, be,
633 						 bl_end_io_write_zero, par);
634 			if (IS_ERR(bio)) {
635 				wdata->pnfs_error = PTR_ERR(bio);
636 				bio = NULL;
637 				goto out;
638 			}
639 next_page:
640 			isect += PAGE_CACHE_SECTORS;
641 			extent_length -= PAGE_CACHE_SECTORS;
642 		}
643 		if (last)
644 			goto write_done;
645 	}
646 	bio = bl_submit_bio(WRITE, bio);
647 
648 	/* Middle pages */
649 	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
650 	for (i = pg_index; i < wdata->npages; i++) {
651 		if (!extent_length) {
652 			/* We've used up the previous extent */
653 			bl_put_extent(be);
654 			bio = bl_submit_bio(WRITE, bio);
655 			/* Get the next one */
656 			be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
657 					     isect, NULL);
658 			if (!be || !is_writable(be, isect)) {
659 				wdata->pnfs_error = -EINVAL;
660 				goto out;
661 			}
662 			if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
663 				if (likely(!bl_push_one_short_extent(
664 								be->be_inval)))
665 					par->bse_count++;
666 				else {
667 					wdata->pnfs_error = -ENOMEM;
668 					goto out;
669 				}
670 			}
671 			extent_length = be->be_length -
672 			    (isect - be->be_f_offset);
673 		}
674 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
675 			ret = bl_mark_sectors_init(be->be_inval, isect,
676 						       PAGE_CACHE_SECTORS);
677 			if (unlikely(ret)) {
678 				dprintk("%s bl_mark_sectors_init fail %d\n",
679 					__func__, ret);
680 				wdata->pnfs_error = ret;
681 				goto out;
682 			}
683 		}
684 		bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
685 					 isect, pages[i], be,
686 					 bl_end_io_write, par);
687 		if (IS_ERR(bio)) {
688 			wdata->pnfs_error = PTR_ERR(bio);
689 			bio = NULL;
690 			goto out;
691 		}
692 		isect += PAGE_CACHE_SECTORS;
693 		last_isect = isect;
694 		extent_length -= PAGE_CACHE_SECTORS;
695 	}
696 
697 	/* Last page inside INVALID extent */
698 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
699 		bio = bl_submit_bio(WRITE, bio);
700 		temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
701 		npg_zero = npg_per_block - do_div(temp, npg_per_block);
702 		if (npg_zero < npg_per_block) {
703 			last = 1;
704 			goto fill_invalid_ext;
705 		}
706 	}
707 
708 write_done:
709 	wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
710 	if (count < wdata->res.count) {
711 		wdata->res.count = count;
712 	}
713 out:
714 	bl_put_extent(be);
715 	bl_submit_bio(WRITE, bio);
716 	put_parallel(par);
717 	return PNFS_ATTEMPTED;
718 out_mds:
719 	bl_put_extent(be);
720 	kfree(par);
721 	return PNFS_NOT_ATTEMPTED;
722 }
723 
724 /* FIXME - range ignored */
725 static void
726 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
727 {
728 	int i;
729 	struct pnfs_block_extent *be;
730 
731 	spin_lock(&bl->bl_ext_lock);
732 	for (i = 0; i < EXTENT_LISTS; i++) {
733 		while (!list_empty(&bl->bl_extents[i])) {
734 			be = list_first_entry(&bl->bl_extents[i],
735 					      struct pnfs_block_extent,
736 					      be_node);
737 			list_del(&be->be_node);
738 			bl_put_extent(be);
739 		}
740 	}
741 	spin_unlock(&bl->bl_ext_lock);
742 }
743 
744 static void
745 release_inval_marks(struct pnfs_inval_markings *marks)
746 {
747 	struct pnfs_inval_tracking *pos, *temp;
748 	struct pnfs_block_short_extent *se, *stemp;
749 
750 	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
751 		list_del(&pos->it_link);
752 		kfree(pos);
753 	}
754 
755 	list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
756 		list_del(&se->bse_node);
757 		kfree(se);
758 	}
759 	return;
760 }
761 
762 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
763 {
764 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
765 
766 	dprintk("%s enter\n", __func__);
767 	release_extents(bl, NULL);
768 	release_inval_marks(&bl->bl_inval);
769 	kfree(bl);
770 }
771 
772 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
773 						   gfp_t gfp_flags)
774 {
775 	struct pnfs_block_layout *bl;
776 
777 	dprintk("%s enter\n", __func__);
778 	bl = kzalloc(sizeof(*bl), gfp_flags);
779 	if (!bl)
780 		return NULL;
781 	spin_lock_init(&bl->bl_ext_lock);
782 	INIT_LIST_HEAD(&bl->bl_extents[0]);
783 	INIT_LIST_HEAD(&bl->bl_extents[1]);
784 	INIT_LIST_HEAD(&bl->bl_commit);
785 	INIT_LIST_HEAD(&bl->bl_committing);
786 	bl->bl_count = 0;
787 	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
788 	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
789 	return &bl->bl_layout;
790 }
791 
792 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
793 {
794 	dprintk("%s enter\n", __func__);
795 	kfree(lseg);
796 }
797 
798 /* We pretty much ignore lseg, and store all data layout wide, so we
799  * can correctly merge.
800  */
801 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
802 						 struct nfs4_layoutget_res *lgr,
803 						 gfp_t gfp_flags)
804 {
805 	struct pnfs_layout_segment *lseg;
806 	int status;
807 
808 	dprintk("%s enter\n", __func__);
809 	lseg = kzalloc(sizeof(*lseg), gfp_flags);
810 	if (!lseg)
811 		return ERR_PTR(-ENOMEM);
812 	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
813 	if (status) {
814 		/* We don't want to call the full-blown bl_free_lseg,
815 		 * since on error extents were not touched.
816 		 */
817 		kfree(lseg);
818 		return ERR_PTR(status);
819 	}
820 	return lseg;
821 }
822 
823 static void
824 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
825 		       const struct nfs4_layoutcommit_args *arg)
826 {
827 	dprintk("%s enter\n", __func__);
828 	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
829 }
830 
831 static void
832 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
833 {
834 	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
835 
836 	dprintk("%s enter\n", __func__);
837 	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
838 }
839 
840 static void free_blk_mountid(struct block_mount_id *mid)
841 {
842 	if (mid) {
843 		struct pnfs_block_dev *dev, *tmp;
844 
845 		/* No need to take bm_lock as we are last user freeing bm_devlist */
846 		list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
847 			list_del(&dev->bm_node);
848 			bl_free_block_dev(dev);
849 		}
850 		kfree(mid);
851 	}
852 }
853 
854 /* This is mostly copied from the filelayout's get_device_info function.
855  * It seems much of this should be at the generic pnfs level.
856  */
857 static struct pnfs_block_dev *
858 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
859 			struct nfs4_deviceid *d_id)
860 {
861 	struct pnfs_device *dev;
862 	struct pnfs_block_dev *rv;
863 	u32 max_resp_sz;
864 	int max_pages;
865 	struct page **pages = NULL;
866 	int i, rc;
867 
868 	/*
869 	 * Use the session max response size as the basis for setting
870 	 * GETDEVICEINFO's maxcount
871 	 */
872 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
873 	max_pages = nfs_page_array_len(0, max_resp_sz);
874 	dprintk("%s max_resp_sz %u max_pages %d\n",
875 		__func__, max_resp_sz, max_pages);
876 
877 	dev = kmalloc(sizeof(*dev), GFP_NOFS);
878 	if (!dev) {
879 		dprintk("%s kmalloc failed\n", __func__);
880 		return ERR_PTR(-ENOMEM);
881 	}
882 
883 	pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
884 	if (pages == NULL) {
885 		kfree(dev);
886 		return ERR_PTR(-ENOMEM);
887 	}
888 	for (i = 0; i < max_pages; i++) {
889 		pages[i] = alloc_page(GFP_NOFS);
890 		if (!pages[i]) {
891 			rv = ERR_PTR(-ENOMEM);
892 			goto out_free;
893 		}
894 	}
895 
896 	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
897 	dev->layout_type = LAYOUT_BLOCK_VOLUME;
898 	dev->pages = pages;
899 	dev->pgbase = 0;
900 	dev->pglen = PAGE_SIZE * max_pages;
901 	dev->mincount = 0;
902 
903 	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
904 	rc = nfs4_proc_getdeviceinfo(server, dev);
905 	dprintk("%s getdevice info returns %d\n", __func__, rc);
906 	if (rc) {
907 		rv = ERR_PTR(rc);
908 		goto out_free;
909 	}
910 
911 	rv = nfs4_blk_decode_device(server, dev);
912  out_free:
913 	for (i = 0; i < max_pages; i++)
914 		__free_page(pages[i]);
915 	kfree(pages);
916 	kfree(dev);
917 	return rv;
918 }
919 
920 static int
921 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
922 {
923 	struct block_mount_id *b_mt_id = NULL;
924 	struct pnfs_devicelist *dlist = NULL;
925 	struct pnfs_block_dev *bdev;
926 	LIST_HEAD(block_disklist);
927 	int status, i;
928 
929 	dprintk("%s enter\n", __func__);
930 
931 	if (server->pnfs_blksize == 0) {
932 		dprintk("%s Server did not return blksize\n", __func__);
933 		return -EINVAL;
934 	}
935 	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
936 	if (!b_mt_id) {
937 		status = -ENOMEM;
938 		goto out_error;
939 	}
940 	/* Initialize nfs4 block layout mount id */
941 	spin_lock_init(&b_mt_id->bm_lock);
942 	INIT_LIST_HEAD(&b_mt_id->bm_devlist);
943 
944 	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
945 	if (!dlist) {
946 		status = -ENOMEM;
947 		goto out_error;
948 	}
949 	dlist->eof = 0;
950 	while (!dlist->eof) {
951 		status = nfs4_proc_getdevicelist(server, fh, dlist);
952 		if (status)
953 			goto out_error;
954 		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
955 			__func__, dlist->num_devs, dlist->eof);
956 		for (i = 0; i < dlist->num_devs; i++) {
957 			bdev = nfs4_blk_get_deviceinfo(server, fh,
958 						       &dlist->dev_id[i]);
959 			if (IS_ERR(bdev)) {
960 				status = PTR_ERR(bdev);
961 				goto out_error;
962 			}
963 			spin_lock(&b_mt_id->bm_lock);
964 			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
965 			spin_unlock(&b_mt_id->bm_lock);
966 		}
967 	}
968 	dprintk("%s SUCCESS\n", __func__);
969 	server->pnfs_ld_data = b_mt_id;
970 
971  out_return:
972 	kfree(dlist);
973 	return status;
974 
975  out_error:
976 	free_blk_mountid(b_mt_id);
977 	goto out_return;
978 }
979 
980 static int
981 bl_clear_layoutdriver(struct nfs_server *server)
982 {
983 	struct block_mount_id *b_mt_id = server->pnfs_ld_data;
984 
985 	dprintk("%s enter\n", __func__);
986 	free_blk_mountid(b_mt_id);
987 	dprintk("%s RETURNS\n", __func__);
988 	return 0;
989 }
990 
991 static const struct nfs_pageio_ops bl_pg_read_ops = {
992 	.pg_init = pnfs_generic_pg_init_read,
993 	.pg_test = pnfs_generic_pg_test,
994 	.pg_doio = pnfs_generic_pg_readpages,
995 };
996 
997 static const struct nfs_pageio_ops bl_pg_write_ops = {
998 	.pg_init = pnfs_generic_pg_init_write,
999 	.pg_test = pnfs_generic_pg_test,
1000 	.pg_doio = pnfs_generic_pg_writepages,
1001 };
1002 
1003 static struct pnfs_layoutdriver_type blocklayout_type = {
1004 	.id				= LAYOUT_BLOCK_VOLUME,
1005 	.name				= "LAYOUT_BLOCK_VOLUME",
1006 	.read_pagelist			= bl_read_pagelist,
1007 	.write_pagelist			= bl_write_pagelist,
1008 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
1009 	.free_layout_hdr		= bl_free_layout_hdr,
1010 	.alloc_lseg			= bl_alloc_lseg,
1011 	.free_lseg			= bl_free_lseg,
1012 	.encode_layoutcommit		= bl_encode_layoutcommit,
1013 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
1014 	.set_layoutdriver		= bl_set_layoutdriver,
1015 	.clear_layoutdriver		= bl_clear_layoutdriver,
1016 	.pg_read_ops			= &bl_pg_read_ops,
1017 	.pg_write_ops			= &bl_pg_write_ops,
1018 };
1019 
1020 static const struct rpc_pipe_ops bl_upcall_ops = {
1021 	.upcall		= rpc_pipe_generic_upcall,
1022 	.downcall	= bl_pipe_downcall,
1023 	.destroy_msg	= bl_pipe_destroy_msg,
1024 };
1025 
1026 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1027 					    struct rpc_pipe *pipe)
1028 {
1029 	struct dentry *dir, *dentry;
1030 
1031 	dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1032 	if (dir == NULL)
1033 		return ERR_PTR(-ENOENT);
1034 	dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1035 	dput(dir);
1036 	return dentry;
1037 }
1038 
1039 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1040 					  struct rpc_pipe *pipe)
1041 {
1042 	if (pipe->dentry)
1043 		rpc_unlink(pipe->dentry);
1044 }
1045 
1046 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1047 			   void *ptr)
1048 {
1049 	struct super_block *sb = ptr;
1050 	struct net *net = sb->s_fs_info;
1051 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1052 	struct dentry *dentry;
1053 	int ret = 0;
1054 
1055 	if (!try_module_get(THIS_MODULE))
1056 		return 0;
1057 
1058 	if (nn->bl_device_pipe == NULL) {
1059 		module_put(THIS_MODULE);
1060 		return 0;
1061 	}
1062 
1063 	switch (event) {
1064 	case RPC_PIPEFS_MOUNT:
1065 		dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1066 		if (IS_ERR(dentry)) {
1067 			ret = PTR_ERR(dentry);
1068 			break;
1069 		}
1070 		nn->bl_device_pipe->dentry = dentry;
1071 		break;
1072 	case RPC_PIPEFS_UMOUNT:
1073 		if (nn->bl_device_pipe->dentry)
1074 			nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1075 		break;
1076 	default:
1077 		ret = -ENOTSUPP;
1078 		break;
1079 	}
1080 	module_put(THIS_MODULE);
1081 	return ret;
1082 }
1083 
1084 static struct notifier_block nfs4blocklayout_block = {
1085 	.notifier_call = rpc_pipefs_event,
1086 };
1087 
1088 static struct dentry *nfs4blocklayout_register_net(struct net *net,
1089 						   struct rpc_pipe *pipe)
1090 {
1091 	struct super_block *pipefs_sb;
1092 	struct dentry *dentry;
1093 
1094 	pipefs_sb = rpc_get_sb_net(net);
1095 	if (!pipefs_sb)
1096 		return NULL;
1097 	dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1098 	rpc_put_sb_net(net);
1099 	return dentry;
1100 }
1101 
1102 static void nfs4blocklayout_unregister_net(struct net *net,
1103 					   struct rpc_pipe *pipe)
1104 {
1105 	struct super_block *pipefs_sb;
1106 
1107 	pipefs_sb = rpc_get_sb_net(net);
1108 	if (pipefs_sb) {
1109 		nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1110 		rpc_put_sb_net(net);
1111 	}
1112 }
1113 
1114 static int nfs4blocklayout_net_init(struct net *net)
1115 {
1116 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1117 	struct dentry *dentry;
1118 
1119 	init_waitqueue_head(&nn->bl_wq);
1120 	nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1121 	if (IS_ERR(nn->bl_device_pipe))
1122 		return PTR_ERR(nn->bl_device_pipe);
1123 	dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1124 	if (IS_ERR(dentry)) {
1125 		rpc_destroy_pipe_data(nn->bl_device_pipe);
1126 		return PTR_ERR(dentry);
1127 	}
1128 	nn->bl_device_pipe->dentry = dentry;
1129 	return 0;
1130 }
1131 
1132 static void nfs4blocklayout_net_exit(struct net *net)
1133 {
1134 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1135 
1136 	nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1137 	rpc_destroy_pipe_data(nn->bl_device_pipe);
1138 	nn->bl_device_pipe = NULL;
1139 }
1140 
1141 static struct pernet_operations nfs4blocklayout_net_ops = {
1142 	.init = nfs4blocklayout_net_init,
1143 	.exit = nfs4blocklayout_net_exit,
1144 };
1145 
1146 static int __init nfs4blocklayout_init(void)
1147 {
1148 	int ret;
1149 
1150 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1151 
1152 	ret = pnfs_register_layoutdriver(&blocklayout_type);
1153 	if (ret)
1154 		goto out;
1155 
1156 	ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1157 	if (ret)
1158 		goto out_remove;
1159 	ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1160 	if (ret)
1161 		goto out_notifier;
1162 out:
1163 	return ret;
1164 
1165 out_notifier:
1166 	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1167 out_remove:
1168 	pnfs_unregister_layoutdriver(&blocklayout_type);
1169 	return ret;
1170 }
1171 
1172 static void __exit nfs4blocklayout_exit(void)
1173 {
1174 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1175 	       __func__);
1176 
1177 	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1178 	unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1179 	pnfs_unregister_layoutdriver(&blocklayout_type);
1180 }
1181 
1182 MODULE_ALIAS("nfs-layouttype4-3");
1183 
1184 module_init(nfs4blocklayout_init);
1185 module_exit(nfs4blocklayout_exit);
1186