xref: /openbmc/linux/fs/nilfs2/segbuf.c (revision 4bdf0bb7)
1 /*
2  * segbuf.c - NILFS segment buffer
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include "page.h"
28 #include "segbuf.h"
29 
30 
31 static struct kmem_cache *nilfs_segbuf_cachep;
32 
33 static void nilfs_segbuf_init_once(void *obj)
34 {
35 	memset(obj, 0, sizeof(struct nilfs_segment_buffer));
36 }
37 
38 int __init nilfs_init_segbuf_cache(void)
39 {
40 	nilfs_segbuf_cachep =
41 		kmem_cache_create("nilfs2_segbuf_cache",
42 				  sizeof(struct nilfs_segment_buffer),
43 				  0, SLAB_RECLAIM_ACCOUNT,
44 				  nilfs_segbuf_init_once);
45 
46 	return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
47 }
48 
49 void nilfs_destroy_segbuf_cache(void)
50 {
51 	kmem_cache_destroy(nilfs_segbuf_cachep);
52 }
53 
54 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
55 {
56 	struct nilfs_segment_buffer *segbuf;
57 
58 	segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
59 	if (unlikely(!segbuf))
60 		return NULL;
61 
62 	segbuf->sb_super = sb;
63 	INIT_LIST_HEAD(&segbuf->sb_list);
64 	INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
65 	INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
66 	return segbuf;
67 }
68 
69 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
70 {
71 	kmem_cache_free(nilfs_segbuf_cachep, segbuf);
72 }
73 
74 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
75 		     unsigned long offset, struct the_nilfs *nilfs)
76 {
77 	segbuf->sb_segnum = segnum;
78 	nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
79 				&segbuf->sb_fseg_end);
80 
81 	segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
82 	segbuf->sb_rest_blocks =
83 		segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
84 }
85 
86 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
87 				  __u64 nextnum, struct the_nilfs *nilfs)
88 {
89 	segbuf->sb_nextnum = nextnum;
90 	segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
91 }
92 
93 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
94 {
95 	struct buffer_head *bh;
96 
97 	bh = sb_getblk(segbuf->sb_super,
98 		       segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
99 	if (unlikely(!bh))
100 		return -ENOMEM;
101 
102 	nilfs_segbuf_add_segsum_buffer(segbuf, bh);
103 	return 0;
104 }
105 
106 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
107 				struct buffer_head **bhp)
108 {
109 	struct buffer_head *bh;
110 
111 	bh = sb_getblk(segbuf->sb_super,
112 		       segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
113 	if (unlikely(!bh))
114 		return -ENOMEM;
115 
116 	nilfs_segbuf_add_payload_buffer(segbuf, bh);
117 	*bhp = bh;
118 	return 0;
119 }
120 
121 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
122 		       time_t ctime)
123 {
124 	int err;
125 
126 	segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
127 	err = nilfs_segbuf_extend_segsum(segbuf);
128 	if (unlikely(err))
129 		return err;
130 
131 	segbuf->sb_sum.flags = flags;
132 	segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
133 	segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
134 	segbuf->sb_sum.ctime = ctime;
135 
136 	segbuf->sb_io_error = 0;
137 	return 0;
138 }
139 
140 /*
141  * Setup segument summary
142  */
143 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
144 {
145 	struct nilfs_segment_summary *raw_sum;
146 	struct buffer_head *bh_sum;
147 
148 	bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
149 			    struct buffer_head, b_assoc_buffers);
150 	raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
151 
152 	raw_sum->ss_magic    = cpu_to_le32(NILFS_SEGSUM_MAGIC);
153 	raw_sum->ss_bytes    = cpu_to_le16(sizeof(*raw_sum));
154 	raw_sum->ss_flags    = cpu_to_le16(segbuf->sb_sum.flags);
155 	raw_sum->ss_seq      = cpu_to_le64(segbuf->sb_sum.seg_seq);
156 	raw_sum->ss_create   = cpu_to_le64(segbuf->sb_sum.ctime);
157 	raw_sum->ss_next     = cpu_to_le64(segbuf->sb_sum.next);
158 	raw_sum->ss_nblocks  = cpu_to_le32(segbuf->sb_sum.nblocks);
159 	raw_sum->ss_nfinfo   = cpu_to_le32(segbuf->sb_sum.nfinfo);
160 	raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
161 	raw_sum->ss_pad      = 0;
162 }
163 
164 /*
165  * CRC calculation routines
166  */
167 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
168 				     u32 seed)
169 {
170 	struct buffer_head *bh;
171 	struct nilfs_segment_summary *raw_sum;
172 	unsigned long size, bytes = segbuf->sb_sum.sumbytes;
173 	u32 crc;
174 
175 	bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
176 			b_assoc_buffers);
177 
178 	raw_sum = (struct nilfs_segment_summary *)bh->b_data;
179 	size = min_t(unsigned long, bytes, bh->b_size);
180 	crc = crc32_le(seed,
181 		       (unsigned char *)raw_sum +
182 		       sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
183 		       size - (sizeof(raw_sum->ss_datasum) +
184 			       sizeof(raw_sum->ss_sumsum)));
185 
186 	list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
187 				     b_assoc_buffers) {
188 		bytes -= size;
189 		size = min_t(unsigned long, bytes, bh->b_size);
190 		crc = crc32_le(crc, bh->b_data, size);
191 	}
192 	raw_sum->ss_sumsum = cpu_to_le32(crc);
193 }
194 
195 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
196 				   u32 seed)
197 {
198 	struct buffer_head *bh;
199 	struct nilfs_segment_summary *raw_sum;
200 	void *kaddr;
201 	u32 crc;
202 
203 	bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
204 			b_assoc_buffers);
205 	raw_sum = (struct nilfs_segment_summary *)bh->b_data;
206 	crc = crc32_le(seed,
207 		       (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
208 		       bh->b_size - sizeof(raw_sum->ss_datasum));
209 
210 	list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
211 				     b_assoc_buffers) {
212 		crc = crc32_le(crc, bh->b_data, bh->b_size);
213 	}
214 	list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
215 		kaddr = kmap_atomic(bh->b_page, KM_USER0);
216 		crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
217 		kunmap_atomic(kaddr, KM_USER0);
218 	}
219 	raw_sum->ss_datasum = cpu_to_le32(crc);
220 }
221 
222 void nilfs_release_buffers(struct list_head *list)
223 {
224 	struct buffer_head *bh, *n;
225 
226 	list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
227 		list_del_init(&bh->b_assoc_buffers);
228 		if (buffer_nilfs_allocated(bh)) {
229 			struct page *clone_page = bh->b_page;
230 
231 			/* remove clone page */
232 			brelse(bh);
233 			page_cache_release(clone_page); /* for each bh */
234 			if (page_count(clone_page) <= 2) {
235 				lock_page(clone_page);
236 				nilfs_free_private_page(clone_page);
237 			}
238 			continue;
239 		}
240 		brelse(bh);
241 	}
242 }
243 
244 /*
245  * BIO operations
246  */
247 static void nilfs_end_bio_write(struct bio *bio, int err)
248 {
249 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
250 	struct nilfs_write_info *wi = bio->bi_private;
251 
252 	if (err == -EOPNOTSUPP) {
253 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
254 		bio_put(bio);
255 		/* to be detected by submit_seg_bio() */
256 	}
257 
258 	if (!uptodate)
259 		atomic_inc(&wi->err);
260 
261 	bio_put(bio);
262 	complete(&wi->bio_event);
263 }
264 
265 static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
266 {
267 	struct bio *bio = wi->bio;
268 	int err;
269 
270 	if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) {
271 		wait_for_completion(&wi->bio_event);
272 		wi->nbio--;
273 		if (unlikely(atomic_read(&wi->err))) {
274 			bio_put(bio);
275 			err = -EIO;
276 			goto failed;
277 		}
278 	}
279 
280 	bio->bi_end_io = nilfs_end_bio_write;
281 	bio->bi_private = wi;
282 	bio_get(bio);
283 	submit_bio(mode, bio);
284 	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
285 		bio_put(bio);
286 		err = -EOPNOTSUPP;
287 		goto failed;
288 	}
289 	wi->nbio++;
290 	bio_put(bio);
291 
292 	wi->bio = NULL;
293 	wi->rest_blocks -= wi->end - wi->start;
294 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
295 	wi->start = wi->end;
296 	return 0;
297 
298  failed:
299 	wi->bio = NULL;
300 	return err;
301 }
302 
303 /**
304  * nilfs_alloc_seg_bio - allocate a bio for writing segment.
305  * @sb: super block
306  * @start: beginning disk block number of this BIO.
307  * @nr_vecs: request size of page vector.
308  *
309  * alloc_seg_bio() allocates a new BIO structure and initialize it.
310  *
311  * Return Value: On success, pointer to the struct bio is returned.
312  * On error, NULL is returned.
313  */
314 static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
315 				       int nr_vecs)
316 {
317 	struct bio *bio;
318 
319 	bio = bio_alloc(GFP_NOIO, nr_vecs);
320 	if (bio == NULL) {
321 		while (!bio && (nr_vecs >>= 1))
322 			bio = bio_alloc(GFP_NOIO, nr_vecs);
323 	}
324 	if (likely(bio)) {
325 		bio->bi_bdev = sb->s_bdev;
326 		bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9);
327 	}
328 	return bio;
329 }
330 
331 void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
332 				struct nilfs_write_info *wi)
333 {
334 	wi->bio = NULL;
335 	wi->rest_blocks = segbuf->sb_sum.nblocks;
336 	wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev);
337 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
338 	wi->start = wi->end = 0;
339 	wi->nbio = 0;
340 	wi->blocknr = segbuf->sb_pseg_start;
341 
342 	atomic_set(&wi->err, 0);
343 	init_completion(&wi->bio_event);
344 }
345 
346 static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
347 			   int mode)
348 {
349 	int len, err;
350 
351 	BUG_ON(wi->nr_vecs <= 0);
352  repeat:
353 	if (!wi->bio) {
354 		wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end,
355 					      wi->nr_vecs);
356 		if (unlikely(!wi->bio))
357 			return -ENOMEM;
358 	}
359 
360 	len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
361 	if (len == bh->b_size) {
362 		wi->end++;
363 		return 0;
364 	}
365 	/* bio is FULL */
366 	err = nilfs_submit_seg_bio(wi, mode);
367 	/* never submit current bh */
368 	if (likely(!err))
369 		goto repeat;
370 	return err;
371 }
372 
373 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
374 		       struct nilfs_write_info *wi)
375 {
376 	struct buffer_head *bh;
377 	int res, rw = WRITE;
378 
379 	list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
380 		res = nilfs_submit_bh(wi, bh, rw);
381 		if (unlikely(res))
382 			goto failed_bio;
383 	}
384 
385 	list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
386 		res = nilfs_submit_bh(wi, bh, rw);
387 		if (unlikely(res))
388 			goto failed_bio;
389 	}
390 
391 	if (wi->bio) {
392 		/*
393 		 * Last BIO is always sent through the following
394 		 * submission.
395 		 */
396 		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
397 		res = nilfs_submit_seg_bio(wi, rw);
398 		if (unlikely(res))
399 			goto failed_bio;
400 	}
401 
402 	res = 0;
403  out:
404 	return res;
405 
406  failed_bio:
407 	atomic_inc(&wi->err);
408 	goto out;
409 }
410 
411 /**
412  * nilfs_segbuf_wait - wait for completion of requested BIOs
413  * @wi: nilfs_write_info
414  *
415  * Return Value: On Success, 0 is returned. On Error, one of the following
416  * negative error code is returned.
417  *
418  * %-EIO - I/O error
419  */
420 int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf,
421 		      struct nilfs_write_info *wi)
422 {
423 	int err = 0;
424 
425 	if (!wi->nbio)
426 		return 0;
427 
428 	do {
429 		wait_for_completion(&wi->bio_event);
430 	} while (--wi->nbio > 0);
431 
432 	if (unlikely(atomic_read(&wi->err) > 0)) {
433 		printk(KERN_ERR "NILFS: IO error writing segment\n");
434 		err = -EIO;
435 		segbuf->sb_io_error = 1;
436 	}
437 	return err;
438 }
439