xref: /openbmc/linux/fs/nfs/blocklayout/blocklayout.c (revision 275876e2)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/buffer_head.h>	/* various write calls */
39 #include <linux/prefetch.h>
40 #include <linux/pagevec.h>
41 
42 #include "../pnfs.h"
43 #include "../nfs4session.h"
44 #include "../internal.h"
45 #include "blocklayout.h"
46 
47 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
48 
49 MODULE_LICENSE("GPL");
50 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
51 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52 
53 static void print_page(struct page *page)
54 {
55 	dprintk("PRINTPAGE page %p\n", page);
56 	dprintk("	PagePrivate %d\n", PagePrivate(page));
57 	dprintk("	PageUptodate %d\n", PageUptodate(page));
58 	dprintk("	PageError %d\n", PageError(page));
59 	dprintk("	PageDirty %d\n", PageDirty(page));
60 	dprintk("	PageReferenced %d\n", PageReferenced(page));
61 	dprintk("	PageLocked %d\n", PageLocked(page));
62 	dprintk("	PageWriteback %d\n", PageWriteback(page));
63 	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
64 	dprintk("\n");
65 }
66 
67 /* Given the be associated with isect, determine if page data needs to be
68  * initialized.
69  */
70 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
71 {
72 	if (be->be_state == PNFS_BLOCK_NONE_DATA)
73 		return 1;
74 	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
75 		return 0;
76 	else
77 		return !bl_is_sector_init(be->be_inval, isect);
78 }
79 
80 /* Given the be associated with isect, determine if page data can be
81  * written to disk.
82  */
83 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
84 {
85 	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
86 		be->be_state == PNFS_BLOCK_INVALID_DATA);
87 }
88 
89 /* The data we are handed might be spread across several bios.  We need
90  * to track when the last one is finished.
91  */
92 struct parallel_io {
93 	struct kref refcnt;
94 	void (*pnfs_callback) (void *data, int num_se);
95 	void *data;
96 	int bse_count;
97 };
98 
99 static inline struct parallel_io *alloc_parallel(void *data)
100 {
101 	struct parallel_io *rv;
102 
103 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
104 	if (rv) {
105 		rv->data = data;
106 		kref_init(&rv->refcnt);
107 		rv->bse_count = 0;
108 	}
109 	return rv;
110 }
111 
112 static inline void get_parallel(struct parallel_io *p)
113 {
114 	kref_get(&p->refcnt);
115 }
116 
117 static void destroy_parallel(struct kref *kref)
118 {
119 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
120 
121 	dprintk("%s enter\n", __func__);
122 	p->pnfs_callback(p->data, p->bse_count);
123 	kfree(p);
124 }
125 
126 static inline void put_parallel(struct parallel_io *p)
127 {
128 	kref_put(&p->refcnt, destroy_parallel);
129 }
130 
131 static struct bio *
132 bl_submit_bio(int rw, struct bio *bio)
133 {
134 	if (bio) {
135 		get_parallel(bio->bi_private);
136 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
137 			rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138 			(unsigned long long)bio->bi_iter.bi_sector);
139 		submit_bio(rw, bio);
140 	}
141 	return NULL;
142 }
143 
144 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
145 				     struct pnfs_block_extent *be,
146 				     void (*end_io)(struct bio *, int err),
147 				     struct parallel_io *par)
148 {
149 	struct bio *bio;
150 
151 	npg = min(npg, BIO_MAX_PAGES);
152 	bio = bio_alloc(GFP_NOIO, npg);
153 	if (!bio && (current->flags & PF_MEMALLOC)) {
154 		while (!bio && (npg /= 2))
155 			bio = bio_alloc(GFP_NOIO, npg);
156 	}
157 
158 	if (bio) {
159 		bio->bi_iter.bi_sector = isect - be->be_f_offset +
160 			be->be_v_offset;
161 		bio->bi_bdev = be->be_mdev;
162 		bio->bi_end_io = end_io;
163 		bio->bi_private = par;
164 	}
165 	return bio;
166 }
167 
168 static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
169 				      sector_t isect, struct page *page,
170 				      struct pnfs_block_extent *be,
171 				      void (*end_io)(struct bio *, int err),
172 				      struct parallel_io *par,
173 				      unsigned int offset, int len)
174 {
175 	isect = isect + (offset >> SECTOR_SHIFT);
176 	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
177 		npg, rw, (unsigned long long)isect, offset, len);
178 retry:
179 	if (!bio) {
180 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
181 		if (!bio)
182 			return ERR_PTR(-ENOMEM);
183 	}
184 	if (bio_add_page(bio, page, len, offset) < len) {
185 		bio = bl_submit_bio(rw, bio);
186 		goto retry;
187 	}
188 	return bio;
189 }
190 
191 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
192 				      sector_t isect, struct page *page,
193 				      struct pnfs_block_extent *be,
194 				      void (*end_io)(struct bio *, int err),
195 				      struct parallel_io *par)
196 {
197 	return do_add_page_to_bio(bio, npg, rw, isect, page, be,
198 				  end_io, par, 0, PAGE_CACHE_SIZE);
199 }
200 
201 /* This is basically copied from mpage_end_io_read */
202 static void bl_end_io_read(struct bio *bio, int err)
203 {
204 	struct parallel_io *par = bio->bi_private;
205 	struct bio_vec *bvec;
206 	int i;
207 
208 	if (!err)
209 		bio_for_each_segment_all(bvec, bio, i)
210 			SetPageUptodate(bvec->bv_page);
211 
212 	if (err) {
213 		struct nfs_pgio_header *header = par->data;
214 
215 		if (!header->pnfs_error)
216 			header->pnfs_error = -EIO;
217 		pnfs_set_lo_fail(header->lseg);
218 	}
219 	bio_put(bio);
220 	put_parallel(par);
221 }
222 
223 static void bl_read_cleanup(struct work_struct *work)
224 {
225 	struct rpc_task *task;
226 	struct nfs_pgio_header *hdr;
227 	dprintk("%s enter\n", __func__);
228 	task = container_of(work, struct rpc_task, u.tk_work);
229 	hdr = container_of(task, struct nfs_pgio_header, task);
230 	pnfs_ld_read_done(hdr);
231 }
232 
233 static void
234 bl_end_par_io_read(void *data, int unused)
235 {
236 	struct nfs_pgio_header *hdr = data;
237 
238 	hdr->task.tk_status = hdr->pnfs_error;
239 	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
240 	schedule_work(&hdr->task.u.tk_work);
241 }
242 
243 static enum pnfs_try_status
244 bl_read_pagelist(struct nfs_pgio_header *hdr)
245 {
246 	struct nfs_pgio_header *header = hdr;
247 	int i, hole;
248 	struct bio *bio = NULL;
249 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
250 	sector_t isect, extent_length = 0;
251 	struct parallel_io *par;
252 	loff_t f_offset = hdr->args.offset;
253 	size_t bytes_left = hdr->args.count;
254 	unsigned int pg_offset, pg_len;
255 	struct page **pages = hdr->args.pages;
256 	int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT;
257 	const bool is_dio = (header->dreq != NULL);
258 
259 	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
260 		hdr->page_array.npages, f_offset,
261 		(unsigned int)hdr->args.count);
262 
263 	par = alloc_parallel(hdr);
264 	if (!par)
265 		goto use_mds;
266 	par->pnfs_callback = bl_end_par_io_read;
267 	/* At this point, we can no longer jump to use_mds */
268 
269 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
270 	/* Code assumes extents are page-aligned */
271 	for (i = pg_index; i < hdr->page_array.npages; i++) {
272 		if (!extent_length) {
273 			/* We've used up the previous extent */
274 			bl_put_extent(be);
275 			bl_put_extent(cow_read);
276 			bio = bl_submit_bio(READ, bio);
277 			/* Get the next one */
278 			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
279 					     isect, &cow_read);
280 			if (!be) {
281 				header->pnfs_error = -EIO;
282 				goto out;
283 			}
284 			extent_length = be->be_length -
285 				(isect - be->be_f_offset);
286 			if (cow_read) {
287 				sector_t cow_length = cow_read->be_length -
288 					(isect - cow_read->be_f_offset);
289 				extent_length = min(extent_length, cow_length);
290 			}
291 		}
292 
293 		if (is_dio) {
294 			pg_offset = f_offset & ~PAGE_CACHE_MASK;
295 			if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
296 				pg_len = PAGE_CACHE_SIZE - pg_offset;
297 			else
298 				pg_len = bytes_left;
299 
300 			f_offset += pg_len;
301 			bytes_left -= pg_len;
302 			isect += (pg_offset >> SECTOR_SHIFT);
303 		} else {
304 			pg_offset = 0;
305 			pg_len = PAGE_CACHE_SIZE;
306 		}
307 
308 		hole = is_hole(be, isect);
309 		if (hole && !cow_read) {
310 			bio = bl_submit_bio(READ, bio);
311 			/* Fill hole w/ zeroes w/o accessing device */
312 			dprintk("%s Zeroing page for hole\n", __func__);
313 			zero_user_segment(pages[i], pg_offset, pg_len);
314 			print_page(pages[i]);
315 			SetPageUptodate(pages[i]);
316 		} else {
317 			struct pnfs_block_extent *be_read;
318 
319 			be_read = (hole && cow_read) ? cow_read : be;
320 			bio = do_add_page_to_bio(bio,
321 						 hdr->page_array.npages - i,
322 						 READ,
323 						 isect, pages[i], be_read,
324 						 bl_end_io_read, par,
325 						 pg_offset, pg_len);
326 			if (IS_ERR(bio)) {
327 				header->pnfs_error = PTR_ERR(bio);
328 				bio = NULL;
329 				goto out;
330 			}
331 		}
332 		isect += (pg_len >> SECTOR_SHIFT);
333 		extent_length -= PAGE_CACHE_SECTORS;
334 	}
335 	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
336 		hdr->res.eof = 1;
337 		hdr->res.count = header->inode->i_size - hdr->args.offset;
338 	} else {
339 		hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset;
340 	}
341 out:
342 	bl_put_extent(be);
343 	bl_put_extent(cow_read);
344 	bl_submit_bio(READ, bio);
345 	put_parallel(par);
346 	return PNFS_ATTEMPTED;
347 
348  use_mds:
349 	dprintk("Giving up and using normal NFS\n");
350 	return PNFS_NOT_ATTEMPTED;
351 }
352 
353 static void mark_extents_written(struct pnfs_block_layout *bl,
354 				 __u64 offset, __u32 count)
355 {
356 	sector_t isect, end;
357 	struct pnfs_block_extent *be;
358 	struct pnfs_block_short_extent *se;
359 
360 	dprintk("%s(%llu, %u)\n", __func__, offset, count);
361 	if (count == 0)
362 		return;
363 	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
364 	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
365 	end >>= SECTOR_SHIFT;
366 	while (isect < end) {
367 		sector_t len;
368 		be = bl_find_get_extent(bl, isect, NULL);
369 		BUG_ON(!be); /* FIXME */
370 		len = min(end, be->be_f_offset + be->be_length) - isect;
371 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
372 			se = bl_pop_one_short_extent(be->be_inval);
373 			BUG_ON(!se);
374 			bl_mark_for_commit(be, isect, len, se);
375 		}
376 		isect += len;
377 		bl_put_extent(be);
378 	}
379 }
380 
381 static void bl_end_io_write_zero(struct bio *bio, int err)
382 {
383 	struct parallel_io *par = bio->bi_private;
384 	struct bio_vec *bvec;
385 	int i;
386 
387 	bio_for_each_segment_all(bvec, bio, i) {
388 		/* This is the zeroing page we added */
389 		end_page_writeback(bvec->bv_page);
390 		page_cache_release(bvec->bv_page);
391 	}
392 
393 	if (unlikely(err)) {
394 		struct nfs_pgio_header *header = par->data;
395 
396 		if (!header->pnfs_error)
397 			header->pnfs_error = -EIO;
398 		pnfs_set_lo_fail(header->lseg);
399 	}
400 	bio_put(bio);
401 	put_parallel(par);
402 }
403 
404 static void bl_end_io_write(struct bio *bio, int err)
405 {
406 	struct parallel_io *par = bio->bi_private;
407 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
408 	struct nfs_pgio_header *header = par->data;
409 
410 	if (!uptodate) {
411 		if (!header->pnfs_error)
412 			header->pnfs_error = -EIO;
413 		pnfs_set_lo_fail(header->lseg);
414 	}
415 	bio_put(bio);
416 	put_parallel(par);
417 }
418 
419 /* Function scheduled for call during bl_end_par_io_write,
420  * it marks sectors as written and extends the commitlist.
421  */
422 static void bl_write_cleanup(struct work_struct *work)
423 {
424 	struct rpc_task *task;
425 	struct nfs_pgio_header *hdr;
426 	dprintk("%s enter\n", __func__);
427 	task = container_of(work, struct rpc_task, u.tk_work);
428 	hdr = container_of(task, struct nfs_pgio_header, task);
429 	if (likely(!hdr->pnfs_error)) {
430 		/* Marks for LAYOUTCOMMIT */
431 		mark_extents_written(BLK_LSEG2EXT(hdr->lseg),
432 				     hdr->args.offset, hdr->args.count);
433 	}
434 	pnfs_ld_write_done(hdr);
435 }
436 
437 /* Called when last of bios associated with a bl_write_pagelist call finishes */
438 static void bl_end_par_io_write(void *data, int num_se)
439 {
440 	struct nfs_pgio_header *hdr = data;
441 
442 	if (unlikely(hdr->pnfs_error)) {
443 		bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval,
444 					num_se);
445 	}
446 
447 	hdr->task.tk_status = hdr->pnfs_error;
448 	hdr->verf.committed = NFS_FILE_SYNC;
449 	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
450 	schedule_work(&hdr->task.u.tk_work);
451 }
452 
453 /* FIXME STUB - mark intersection of layout and page as bad, so is not
454  * used again.
455  */
456 static void mark_bad_read(void)
457 {
458 	return;
459 }
460 
461 /*
462  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
463  * block_device
464  */
465 static void
466 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
467 {
468 	dprintk("%s enter be=%p\n", __func__, be);
469 
470 	set_buffer_mapped(bh);
471 	bh->b_bdev = be->be_mdev;
472 	bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
473 	    (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
474 
475 	dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
476 		__func__, (unsigned long long)isect, (long)bh->b_blocknr,
477 		bh->b_size);
478 	return;
479 }
480 
481 static void
482 bl_read_single_end_io(struct bio *bio, int error)
483 {
484 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
485 	struct page *page = bvec->bv_page;
486 
487 	/* Only one page in bvec */
488 	unlock_page(page);
489 }
490 
491 static int
492 bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
493 		    unsigned int offset, unsigned int len)
494 {
495 	struct bio *bio;
496 	struct page *shadow_page;
497 	sector_t isect;
498 	char *kaddr, *kshadow_addr;
499 	int ret = 0;
500 
501 	dprintk("%s: offset %u len %u\n", __func__, offset, len);
502 
503 	shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
504 	if (shadow_page == NULL)
505 		return -ENOMEM;
506 
507 	bio = bio_alloc(GFP_NOIO, 1);
508 	if (bio == NULL)
509 		return -ENOMEM;
510 
511 	isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
512 		(offset / SECTOR_SIZE);
513 
514 	bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
515 	bio->bi_bdev = be->be_mdev;
516 	bio->bi_end_io = bl_read_single_end_io;
517 
518 	lock_page(shadow_page);
519 	if (bio_add_page(bio, shadow_page,
520 			 SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
521 		unlock_page(shadow_page);
522 		bio_put(bio);
523 		return -EIO;
524 	}
525 
526 	submit_bio(READ, bio);
527 	wait_on_page_locked(shadow_page);
528 	if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
529 		ret = -EIO;
530 	} else {
531 		kaddr = kmap_atomic(page);
532 		kshadow_addr = kmap_atomic(shadow_page);
533 		memcpy(kaddr + offset, kshadow_addr + offset, len);
534 		kunmap_atomic(kshadow_addr);
535 		kunmap_atomic(kaddr);
536 	}
537 	__free_page(shadow_page);
538 	bio_put(bio);
539 
540 	return ret;
541 }
542 
543 static int
544 bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
545 			  unsigned int dirty_offset, unsigned int dirty_len,
546 			  bool full_page)
547 {
548 	int ret = 0;
549 	unsigned int start, end;
550 
551 	if (full_page) {
552 		start = 0;
553 		end = PAGE_CACHE_SIZE;
554 	} else {
555 		start = round_down(dirty_offset, SECTOR_SIZE);
556 		end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
557 	}
558 
559 	dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
560 	if (!be) {
561 		zero_user_segments(page, start, dirty_offset,
562 				   dirty_offset + dirty_len, end);
563 		if (start == 0 && end == PAGE_CACHE_SIZE &&
564 		    trylock_page(page)) {
565 			SetPageUptodate(page);
566 			unlock_page(page);
567 		}
568 		return ret;
569 	}
570 
571 	if (start != dirty_offset)
572 		ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
573 
574 	if (!ret && (dirty_offset + dirty_len < end))
575 		ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
576 					  end - dirty_offset - dirty_len);
577 
578 	return ret;
579 }
580 
581 /* Given an unmapped page, zero it or read in page for COW, page is locked
582  * by caller.
583  */
584 static int
585 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
586 {
587 	struct buffer_head *bh = NULL;
588 	int ret = 0;
589 	sector_t isect;
590 
591 	dprintk("%s enter, %p\n", __func__, page);
592 	BUG_ON(PageUptodate(page));
593 	if (!cow_read) {
594 		zero_user_segment(page, 0, PAGE_SIZE);
595 		SetPageUptodate(page);
596 		goto cleanup;
597 	}
598 
599 	bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
600 	if (!bh) {
601 		ret = -ENOMEM;
602 		goto cleanup;
603 	}
604 
605 	isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
606 	map_block(bh, isect, cow_read);
607 	if (!bh_uptodate_or_lock(bh))
608 		ret = bh_submit_read(bh);
609 	if (ret)
610 		goto cleanup;
611 	SetPageUptodate(page);
612 
613 cleanup:
614 	if (bh)
615 		free_buffer_head(bh);
616 	if (ret) {
617 		/* Need to mark layout with bad read...should now
618 		 * just use nfs4 for reads and writes.
619 		 */
620 		mark_bad_read();
621 	}
622 	return ret;
623 }
624 
625 /* Find or create a zeroing page marked being writeback.
626  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
627  * to indicate write out.
628  */
629 static struct page *
630 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
631 			struct pnfs_block_extent *cow_read)
632 {
633 	struct page *page;
634 	int locked = 0;
635 	page = find_get_page(inode->i_mapping, index);
636 	if (page)
637 		goto check_page;
638 
639 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
640 	if (unlikely(!page)) {
641 		dprintk("%s oom\n", __func__);
642 		return ERR_PTR(-ENOMEM);
643 	}
644 	locked = 1;
645 
646 check_page:
647 	/* PageDirty: Other will write this out
648 	 * PageWriteback: Other is writing this out
649 	 * PageUptodate: It was read before
650 	 */
651 	if (PageDirty(page) || PageWriteback(page)) {
652 		print_page(page);
653 		if (locked)
654 			unlock_page(page);
655 		page_cache_release(page);
656 		return NULL;
657 	}
658 
659 	if (!locked) {
660 		lock_page(page);
661 		locked = 1;
662 		goto check_page;
663 	}
664 	if (!PageUptodate(page)) {
665 		/* New page, readin or zero it */
666 		init_page_for_write(page, cow_read);
667 	}
668 	set_page_writeback(page);
669 	unlock_page(page);
670 
671 	return page;
672 }
673 
674 static enum pnfs_try_status
675 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
676 {
677 	int i, ret, npg_zero, pg_index, last = 0;
678 	struct bio *bio = NULL;
679 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
680 	sector_t isect, last_isect = 0, extent_length = 0;
681 	struct parallel_io *par = NULL;
682 	loff_t offset = header->args.offset;
683 	size_t count = header->args.count;
684 	unsigned int pg_offset, pg_len, saved_len;
685 	struct page **pages = header->args.pages;
686 	struct page *page;
687 	pgoff_t index;
688 	u64 temp;
689 	int npg_per_block =
690 	    NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
691 
692 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
693 
694 	if (header->dreq != NULL &&
695 	    (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
696 	     !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
697 		dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
698 		goto out_mds;
699 	}
700 	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
701 	 * We want to write each, and if there is an error set pnfs_error
702 	 * to have it redone using nfs.
703 	 */
704 	par = alloc_parallel(header);
705 	if (!par)
706 		goto out_mds;
707 	par->pnfs_callback = bl_end_par_io_write;
708 	/* At this point, have to be more careful with error handling */
709 
710 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
711 	be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
712 	if (!be || !is_writable(be, isect)) {
713 		dprintk("%s no matching extents!\n", __func__);
714 		goto out_mds;
715 	}
716 
717 	/* First page inside INVALID extent */
718 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
719 		if (likely(!bl_push_one_short_extent(be->be_inval)))
720 			par->bse_count++;
721 		else
722 			goto out_mds;
723 		temp = offset >> PAGE_CACHE_SHIFT;
724 		npg_zero = do_div(temp, npg_per_block);
725 		isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
726 				     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
727 		extent_length = be->be_length - (isect - be->be_f_offset);
728 
729 fill_invalid_ext:
730 		dprintk("%s need to zero %d pages\n", __func__, npg_zero);
731 		for (;npg_zero > 0; npg_zero--) {
732 			if (bl_is_sector_init(be->be_inval, isect)) {
733 				dprintk("isect %llu already init\n",
734 					(unsigned long long)isect);
735 				goto next_page;
736 			}
737 			/* page ref released in bl_end_io_write_zero */
738 			index = isect >> PAGE_CACHE_SECTOR_SHIFT;
739 			dprintk("%s zero %dth page: index %lu isect %llu\n",
740 				__func__, npg_zero, index,
741 				(unsigned long long)isect);
742 			page = bl_find_get_zeroing_page(header->inode, index,
743 							cow_read);
744 			if (unlikely(IS_ERR(page))) {
745 				header->pnfs_error = PTR_ERR(page);
746 				goto out;
747 			} else if (page == NULL)
748 				goto next_page;
749 
750 			ret = bl_mark_sectors_init(be->be_inval, isect,
751 						       PAGE_CACHE_SECTORS);
752 			if (unlikely(ret)) {
753 				dprintk("%s bl_mark_sectors_init fail %d\n",
754 					__func__, ret);
755 				end_page_writeback(page);
756 				page_cache_release(page);
757 				header->pnfs_error = ret;
758 				goto out;
759 			}
760 			if (likely(!bl_push_one_short_extent(be->be_inval)))
761 				par->bse_count++;
762 			else {
763 				end_page_writeback(page);
764 				page_cache_release(page);
765 				header->pnfs_error = -ENOMEM;
766 				goto out;
767 			}
768 			/* FIXME: This should be done in bi_end_io */
769 			mark_extents_written(BLK_LSEG2EXT(header->lseg),
770 					     page->index << PAGE_CACHE_SHIFT,
771 					     PAGE_CACHE_SIZE);
772 
773 			bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
774 						 isect, page, be,
775 						 bl_end_io_write_zero, par);
776 			if (IS_ERR(bio)) {
777 				header->pnfs_error = PTR_ERR(bio);
778 				bio = NULL;
779 				goto out;
780 			}
781 next_page:
782 			isect += PAGE_CACHE_SECTORS;
783 			extent_length -= PAGE_CACHE_SECTORS;
784 		}
785 		if (last)
786 			goto write_done;
787 	}
788 	bio = bl_submit_bio(WRITE, bio);
789 
790 	/* Middle pages */
791 	pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
792 	for (i = pg_index; i < header->page_array.npages; i++) {
793 		if (!extent_length) {
794 			/* We've used up the previous extent */
795 			bl_put_extent(be);
796 			bl_put_extent(cow_read);
797 			bio = bl_submit_bio(WRITE, bio);
798 			/* Get the next one */
799 			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
800 					     isect, &cow_read);
801 			if (!be || !is_writable(be, isect)) {
802 				header->pnfs_error = -EINVAL;
803 				goto out;
804 			}
805 			if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
806 				if (likely(!bl_push_one_short_extent(
807 								be->be_inval)))
808 					par->bse_count++;
809 				else {
810 					header->pnfs_error = -ENOMEM;
811 					goto out;
812 				}
813 			}
814 			extent_length = be->be_length -
815 			    (isect - be->be_f_offset);
816 		}
817 
818 		dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
819 		pg_offset = offset & ~PAGE_CACHE_MASK;
820 		if (pg_offset + count > PAGE_CACHE_SIZE)
821 			pg_len = PAGE_CACHE_SIZE - pg_offset;
822 		else
823 			pg_len = count;
824 
825 		saved_len = pg_len;
826 		if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
827 		    !bl_is_sector_init(be->be_inval, isect)) {
828 			ret = bl_read_partial_page_sync(pages[i], cow_read,
829 							pg_offset, pg_len, true);
830 			if (ret) {
831 				dprintk("%s bl_read_partial_page_sync fail %d\n",
832 					__func__, ret);
833 				header->pnfs_error = ret;
834 				goto out;
835 			}
836 
837 			ret = bl_mark_sectors_init(be->be_inval, isect,
838 						       PAGE_CACHE_SECTORS);
839 			if (unlikely(ret)) {
840 				dprintk("%s bl_mark_sectors_init fail %d\n",
841 					__func__, ret);
842 				header->pnfs_error = ret;
843 				goto out;
844 			}
845 
846 			/* Expand to full page write */
847 			pg_offset = 0;
848 			pg_len = PAGE_CACHE_SIZE;
849 		} else if  ((pg_offset & (SECTOR_SIZE - 1)) ||
850 			    (pg_len & (SECTOR_SIZE - 1))){
851 			/* ahh, nasty case. We have to do sync full sector
852 			 * read-modify-write cycles.
853 			 */
854 			unsigned int saved_offset = pg_offset;
855 			ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
856 							pg_len, false);
857 			pg_offset = round_down(pg_offset, SECTOR_SIZE);
858 			pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
859 				 - pg_offset;
860 		}
861 
862 
863 		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
864 					 WRITE,
865 					 isect, pages[i], be,
866 					 bl_end_io_write, par,
867 					 pg_offset, pg_len);
868 		if (IS_ERR(bio)) {
869 			header->pnfs_error = PTR_ERR(bio);
870 			bio = NULL;
871 			goto out;
872 		}
873 		offset += saved_len;
874 		count -= saved_len;
875 		isect += PAGE_CACHE_SECTORS;
876 		last_isect = isect;
877 		extent_length -= PAGE_CACHE_SECTORS;
878 	}
879 
880 	/* Last page inside INVALID extent */
881 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
882 		bio = bl_submit_bio(WRITE, bio);
883 		temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
884 		npg_zero = npg_per_block - do_div(temp, npg_per_block);
885 		if (npg_zero < npg_per_block) {
886 			last = 1;
887 			goto fill_invalid_ext;
888 		}
889 	}
890 
891 write_done:
892 	header->res.count = header->args.count;
893 out:
894 	bl_put_extent(be);
895 	bl_put_extent(cow_read);
896 	bl_submit_bio(WRITE, bio);
897 	put_parallel(par);
898 	return PNFS_ATTEMPTED;
899 out_mds:
900 	bl_put_extent(be);
901 	bl_put_extent(cow_read);
902 	kfree(par);
903 	return PNFS_NOT_ATTEMPTED;
904 }
905 
906 /* FIXME - range ignored */
907 static void
908 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
909 {
910 	int i;
911 	struct pnfs_block_extent *be;
912 
913 	spin_lock(&bl->bl_ext_lock);
914 	for (i = 0; i < EXTENT_LISTS; i++) {
915 		while (!list_empty(&bl->bl_extents[i])) {
916 			be = list_first_entry(&bl->bl_extents[i],
917 					      struct pnfs_block_extent,
918 					      be_node);
919 			list_del(&be->be_node);
920 			bl_put_extent(be);
921 		}
922 	}
923 	spin_unlock(&bl->bl_ext_lock);
924 }
925 
926 static void
927 release_inval_marks(struct pnfs_inval_markings *marks)
928 {
929 	struct pnfs_inval_tracking *pos, *temp;
930 	struct pnfs_block_short_extent *se, *stemp;
931 
932 	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
933 		list_del(&pos->it_link);
934 		kfree(pos);
935 	}
936 
937 	list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
938 		list_del(&se->bse_node);
939 		kfree(se);
940 	}
941 	return;
942 }
943 
944 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
945 {
946 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
947 
948 	dprintk("%s enter\n", __func__);
949 	release_extents(bl, NULL);
950 	release_inval_marks(&bl->bl_inval);
951 	kfree(bl);
952 }
953 
954 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
955 						   gfp_t gfp_flags)
956 {
957 	struct pnfs_block_layout *bl;
958 
959 	dprintk("%s enter\n", __func__);
960 	bl = kzalloc(sizeof(*bl), gfp_flags);
961 	if (!bl)
962 		return NULL;
963 	spin_lock_init(&bl->bl_ext_lock);
964 	INIT_LIST_HEAD(&bl->bl_extents[0]);
965 	INIT_LIST_HEAD(&bl->bl_extents[1]);
966 	INIT_LIST_HEAD(&bl->bl_commit);
967 	INIT_LIST_HEAD(&bl->bl_committing);
968 	bl->bl_count = 0;
969 	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
970 	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
971 	return &bl->bl_layout;
972 }
973 
974 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
975 {
976 	dprintk("%s enter\n", __func__);
977 	kfree(lseg);
978 }
979 
980 /* We pretty much ignore lseg, and store all data layout wide, so we
981  * can correctly merge.
982  */
983 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
984 						 struct nfs4_layoutget_res *lgr,
985 						 gfp_t gfp_flags)
986 {
987 	struct pnfs_layout_segment *lseg;
988 	int status;
989 
990 	dprintk("%s enter\n", __func__);
991 	lseg = kzalloc(sizeof(*lseg), gfp_flags);
992 	if (!lseg)
993 		return ERR_PTR(-ENOMEM);
994 	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
995 	if (status) {
996 		/* We don't want to call the full-blown bl_free_lseg,
997 		 * since on error extents were not touched.
998 		 */
999 		kfree(lseg);
1000 		return ERR_PTR(status);
1001 	}
1002 	return lseg;
1003 }
1004 
1005 static void
1006 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
1007 		       const struct nfs4_layoutcommit_args *arg)
1008 {
1009 	dprintk("%s enter\n", __func__);
1010 	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
1011 }
1012 
1013 static void
1014 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
1015 {
1016 	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
1017 
1018 	dprintk("%s enter\n", __func__);
1019 	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
1020 }
1021 
1022 static void free_blk_mountid(struct block_mount_id *mid)
1023 {
1024 	if (mid) {
1025 		struct pnfs_block_dev *dev, *tmp;
1026 
1027 		/* No need to take bm_lock as we are last user freeing bm_devlist */
1028 		list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
1029 			list_del(&dev->bm_node);
1030 			bl_free_block_dev(dev);
1031 		}
1032 		kfree(mid);
1033 	}
1034 }
1035 
1036 /* This is mostly copied from the filelayout_get_device_info function.
1037  * It seems much of this should be at the generic pnfs level.
1038  */
1039 static struct pnfs_block_dev *
1040 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1041 			struct nfs4_deviceid *d_id)
1042 {
1043 	struct pnfs_device *dev;
1044 	struct pnfs_block_dev *rv;
1045 	u32 max_resp_sz;
1046 	int max_pages;
1047 	struct page **pages = NULL;
1048 	int i, rc;
1049 
1050 	/*
1051 	 * Use the session max response size as the basis for setting
1052 	 * GETDEVICEINFO's maxcount
1053 	 */
1054 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
1055 	max_pages = nfs_page_array_len(0, max_resp_sz);
1056 	dprintk("%s max_resp_sz %u max_pages %d\n",
1057 		__func__, max_resp_sz, max_pages);
1058 
1059 	dev = kmalloc(sizeof(*dev), GFP_NOFS);
1060 	if (!dev) {
1061 		dprintk("%s kmalloc failed\n", __func__);
1062 		return ERR_PTR(-ENOMEM);
1063 	}
1064 
1065 	pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS);
1066 	if (pages == NULL) {
1067 		kfree(dev);
1068 		return ERR_PTR(-ENOMEM);
1069 	}
1070 	for (i = 0; i < max_pages; i++) {
1071 		pages[i] = alloc_page(GFP_NOFS);
1072 		if (!pages[i]) {
1073 			rv = ERR_PTR(-ENOMEM);
1074 			goto out_free;
1075 		}
1076 	}
1077 
1078 	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1079 	dev->layout_type = LAYOUT_BLOCK_VOLUME;
1080 	dev->pages = pages;
1081 	dev->pgbase = 0;
1082 	dev->pglen = PAGE_SIZE * max_pages;
1083 	dev->mincount = 0;
1084 	dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
1085 
1086 	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
1087 	rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
1088 	dprintk("%s getdevice info returns %d\n", __func__, rc);
1089 	if (rc) {
1090 		rv = ERR_PTR(rc);
1091 		goto out_free;
1092 	}
1093 
1094 	rv = nfs4_blk_decode_device(server, dev);
1095  out_free:
1096 	for (i = 0; i < max_pages; i++)
1097 		__free_page(pages[i]);
1098 	kfree(pages);
1099 	kfree(dev);
1100 	return rv;
1101 }
1102 
1103 static int
1104 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1105 {
1106 	struct block_mount_id *b_mt_id = NULL;
1107 	struct pnfs_devicelist *dlist = NULL;
1108 	struct pnfs_block_dev *bdev;
1109 	LIST_HEAD(block_disklist);
1110 	int status, i;
1111 
1112 	dprintk("%s enter\n", __func__);
1113 
1114 	if (server->pnfs_blksize == 0) {
1115 		dprintk("%s Server did not return blksize\n", __func__);
1116 		return -EINVAL;
1117 	}
1118 	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1119 	if (!b_mt_id) {
1120 		status = -ENOMEM;
1121 		goto out_error;
1122 	}
1123 	/* Initialize nfs4 block layout mount id */
1124 	spin_lock_init(&b_mt_id->bm_lock);
1125 	INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1126 
1127 	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1128 	if (!dlist) {
1129 		status = -ENOMEM;
1130 		goto out_error;
1131 	}
1132 	dlist->eof = 0;
1133 	while (!dlist->eof) {
1134 		status = nfs4_proc_getdevicelist(server, fh, dlist);
1135 		if (status)
1136 			goto out_error;
1137 		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1138 			__func__, dlist->num_devs, dlist->eof);
1139 		for (i = 0; i < dlist->num_devs; i++) {
1140 			bdev = nfs4_blk_get_deviceinfo(server, fh,
1141 						       &dlist->dev_id[i]);
1142 			if (IS_ERR(bdev)) {
1143 				status = PTR_ERR(bdev);
1144 				goto out_error;
1145 			}
1146 			spin_lock(&b_mt_id->bm_lock);
1147 			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1148 			spin_unlock(&b_mt_id->bm_lock);
1149 		}
1150 	}
1151 	dprintk("%s SUCCESS\n", __func__);
1152 	server->pnfs_ld_data = b_mt_id;
1153 
1154  out_return:
1155 	kfree(dlist);
1156 	return status;
1157 
1158  out_error:
1159 	free_blk_mountid(b_mt_id);
1160 	goto out_return;
1161 }
1162 
1163 static int
1164 bl_clear_layoutdriver(struct nfs_server *server)
1165 {
1166 	struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1167 
1168 	dprintk("%s enter\n", __func__);
1169 	free_blk_mountid(b_mt_id);
1170 	dprintk("%s RETURNS\n", __func__);
1171 	return 0;
1172 }
1173 
1174 static bool
1175 is_aligned_req(struct nfs_page *req, unsigned int alignment)
1176 {
1177 	return IS_ALIGNED(req->wb_offset, alignment) &&
1178 	       IS_ALIGNED(req->wb_bytes, alignment);
1179 }
1180 
1181 static void
1182 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1183 {
1184 	if (pgio->pg_dreq != NULL &&
1185 	    !is_aligned_req(req, SECTOR_SIZE))
1186 		nfs_pageio_reset_read_mds(pgio);
1187 	else
1188 		pnfs_generic_pg_init_read(pgio, req);
1189 }
1190 
1191 /*
1192  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1193  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1194  */
1195 static size_t
1196 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1197 		struct nfs_page *req)
1198 {
1199 	if (pgio->pg_dreq != NULL &&
1200 	    !is_aligned_req(req, SECTOR_SIZE))
1201 		return 0;
1202 
1203 	return pnfs_generic_pg_test(pgio, prev, req);
1204 }
1205 
1206 /*
1207  * Return the number of contiguous bytes for a given inode
1208  * starting at page frame idx.
1209  */
1210 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
1211 {
1212 	struct address_space *mapping = inode->i_mapping;
1213 	pgoff_t end;
1214 
1215 	/* Optimize common case that writes from 0 to end of file */
1216 	end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1217 	if (end != NFS_I(inode)->npages) {
1218 		rcu_read_lock();
1219 		end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
1220 		rcu_read_unlock();
1221 	}
1222 
1223 	if (!end)
1224 		return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
1225 	else
1226 		return (end - idx) << PAGE_CACHE_SHIFT;
1227 }
1228 
1229 static void
1230 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1231 {
1232 	if (pgio->pg_dreq != NULL &&
1233 	    !is_aligned_req(req, PAGE_CACHE_SIZE)) {
1234 		nfs_pageio_reset_write_mds(pgio);
1235 	} else {
1236 		u64 wb_size;
1237 		if (pgio->pg_dreq == NULL)
1238 			wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
1239 						      req->wb_index);
1240 		else
1241 			wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1242 
1243 		pnfs_generic_pg_init_write(pgio, req, wb_size);
1244 	}
1245 }
1246 
1247 /*
1248  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1249  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1250  */
1251 static size_t
1252 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1253 		 struct nfs_page *req)
1254 {
1255 	if (pgio->pg_dreq != NULL &&
1256 	    !is_aligned_req(req, PAGE_CACHE_SIZE))
1257 		return 0;
1258 
1259 	return pnfs_generic_pg_test(pgio, prev, req);
1260 }
1261 
1262 static const struct nfs_pageio_ops bl_pg_read_ops = {
1263 	.pg_init = bl_pg_init_read,
1264 	.pg_test = bl_pg_test_read,
1265 	.pg_doio = pnfs_generic_pg_readpages,
1266 };
1267 
1268 static const struct nfs_pageio_ops bl_pg_write_ops = {
1269 	.pg_init = bl_pg_init_write,
1270 	.pg_test = bl_pg_test_write,
1271 	.pg_doio = pnfs_generic_pg_writepages,
1272 };
1273 
1274 static struct pnfs_layoutdriver_type blocklayout_type = {
1275 	.id				= LAYOUT_BLOCK_VOLUME,
1276 	.name				= "LAYOUT_BLOCK_VOLUME",
1277 	.owner				= THIS_MODULE,
1278 	.read_pagelist			= bl_read_pagelist,
1279 	.write_pagelist			= bl_write_pagelist,
1280 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
1281 	.free_layout_hdr		= bl_free_layout_hdr,
1282 	.alloc_lseg			= bl_alloc_lseg,
1283 	.free_lseg			= bl_free_lseg,
1284 	.encode_layoutcommit		= bl_encode_layoutcommit,
1285 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
1286 	.set_layoutdriver		= bl_set_layoutdriver,
1287 	.clear_layoutdriver		= bl_clear_layoutdriver,
1288 	.pg_read_ops			= &bl_pg_read_ops,
1289 	.pg_write_ops			= &bl_pg_write_ops,
1290 };
1291 
1292 static const struct rpc_pipe_ops bl_upcall_ops = {
1293 	.upcall		= rpc_pipe_generic_upcall,
1294 	.downcall	= bl_pipe_downcall,
1295 	.destroy_msg	= bl_pipe_destroy_msg,
1296 };
1297 
1298 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1299 					    struct rpc_pipe *pipe)
1300 {
1301 	struct dentry *dir, *dentry;
1302 
1303 	dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1304 	if (dir == NULL)
1305 		return ERR_PTR(-ENOENT);
1306 	dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1307 	dput(dir);
1308 	return dentry;
1309 }
1310 
1311 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1312 					  struct rpc_pipe *pipe)
1313 {
1314 	if (pipe->dentry)
1315 		rpc_unlink(pipe->dentry);
1316 }
1317 
1318 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1319 			   void *ptr)
1320 {
1321 	struct super_block *sb = ptr;
1322 	struct net *net = sb->s_fs_info;
1323 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1324 	struct dentry *dentry;
1325 	int ret = 0;
1326 
1327 	if (!try_module_get(THIS_MODULE))
1328 		return 0;
1329 
1330 	if (nn->bl_device_pipe == NULL) {
1331 		module_put(THIS_MODULE);
1332 		return 0;
1333 	}
1334 
1335 	switch (event) {
1336 	case RPC_PIPEFS_MOUNT:
1337 		dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1338 		if (IS_ERR(dentry)) {
1339 			ret = PTR_ERR(dentry);
1340 			break;
1341 		}
1342 		nn->bl_device_pipe->dentry = dentry;
1343 		break;
1344 	case RPC_PIPEFS_UMOUNT:
1345 		if (nn->bl_device_pipe->dentry)
1346 			nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1347 		break;
1348 	default:
1349 		ret = -ENOTSUPP;
1350 		break;
1351 	}
1352 	module_put(THIS_MODULE);
1353 	return ret;
1354 }
1355 
1356 static struct notifier_block nfs4blocklayout_block = {
1357 	.notifier_call = rpc_pipefs_event,
1358 };
1359 
1360 static struct dentry *nfs4blocklayout_register_net(struct net *net,
1361 						   struct rpc_pipe *pipe)
1362 {
1363 	struct super_block *pipefs_sb;
1364 	struct dentry *dentry;
1365 
1366 	pipefs_sb = rpc_get_sb_net(net);
1367 	if (!pipefs_sb)
1368 		return NULL;
1369 	dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1370 	rpc_put_sb_net(net);
1371 	return dentry;
1372 }
1373 
1374 static void nfs4blocklayout_unregister_net(struct net *net,
1375 					   struct rpc_pipe *pipe)
1376 {
1377 	struct super_block *pipefs_sb;
1378 
1379 	pipefs_sb = rpc_get_sb_net(net);
1380 	if (pipefs_sb) {
1381 		nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1382 		rpc_put_sb_net(net);
1383 	}
1384 }
1385 
1386 static int nfs4blocklayout_net_init(struct net *net)
1387 {
1388 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1389 	struct dentry *dentry;
1390 
1391 	init_waitqueue_head(&nn->bl_wq);
1392 	nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1393 	if (IS_ERR(nn->bl_device_pipe))
1394 		return PTR_ERR(nn->bl_device_pipe);
1395 	dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1396 	if (IS_ERR(dentry)) {
1397 		rpc_destroy_pipe_data(nn->bl_device_pipe);
1398 		return PTR_ERR(dentry);
1399 	}
1400 	nn->bl_device_pipe->dentry = dentry;
1401 	return 0;
1402 }
1403 
1404 static void nfs4blocklayout_net_exit(struct net *net)
1405 {
1406 	struct nfs_net *nn = net_generic(net, nfs_net_id);
1407 
1408 	nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1409 	rpc_destroy_pipe_data(nn->bl_device_pipe);
1410 	nn->bl_device_pipe = NULL;
1411 }
1412 
1413 static struct pernet_operations nfs4blocklayout_net_ops = {
1414 	.init = nfs4blocklayout_net_init,
1415 	.exit = nfs4blocklayout_net_exit,
1416 };
1417 
1418 static int __init nfs4blocklayout_init(void)
1419 {
1420 	int ret;
1421 
1422 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1423 
1424 	ret = pnfs_register_layoutdriver(&blocklayout_type);
1425 	if (ret)
1426 		goto out;
1427 
1428 	ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1429 	if (ret)
1430 		goto out_remove;
1431 	ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1432 	if (ret)
1433 		goto out_notifier;
1434 out:
1435 	return ret;
1436 
1437 out_notifier:
1438 	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1439 out_remove:
1440 	pnfs_unregister_layoutdriver(&blocklayout_type);
1441 	return ret;
1442 }
1443 
1444 static void __exit nfs4blocklayout_exit(void)
1445 {
1446 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1447 	       __func__);
1448 
1449 	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1450 	unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1451 	pnfs_unregister_layoutdriver(&blocklayout_type);
1452 }
1453 
1454 MODULE_ALIAS("nfs-layouttype4-3");
1455 
1456 module_init(nfs4blocklayout_init);
1457 module_exit(nfs4blocklayout_exit);
1458