xref: /openbmc/linux/fs/nilfs2/segbuf.c (revision ff56535d)
1 /*
2  * segbuf.c - NILFS segment buffer
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include <linux/backing-dev.h>
28 #include <linux/slab.h>
29 #include "page.h"
30 #include "segbuf.h"
31 
32 
33 struct nilfs_write_info {
34 	struct the_nilfs       *nilfs;
35 	struct bio	       *bio;
36 	int			start, end; /* The region to be submitted */
37 	int			rest_blocks;
38 	int			max_pages;
39 	int			nr_vecs;
40 	sector_t		blocknr;
41 };
42 
43 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
44 			      struct the_nilfs *nilfs);
45 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
46 
47 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
48 {
49 	struct nilfs_segment_buffer *segbuf;
50 
51 	segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
52 	if (unlikely(!segbuf))
53 		return NULL;
54 
55 	segbuf->sb_super = sb;
56 	INIT_LIST_HEAD(&segbuf->sb_list);
57 	INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
58 	INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
59 	segbuf->sb_super_root = NULL;
60 
61 	init_completion(&segbuf->sb_bio_event);
62 	atomic_set(&segbuf->sb_err, 0);
63 	segbuf->sb_nbio = 0;
64 
65 	return segbuf;
66 }
67 
68 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
69 {
70 	kmem_cache_free(nilfs_segbuf_cachep, segbuf);
71 }
72 
73 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
74 		     unsigned long offset, struct the_nilfs *nilfs)
75 {
76 	segbuf->sb_segnum = segnum;
77 	nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
78 				&segbuf->sb_fseg_end);
79 
80 	segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
81 	segbuf->sb_rest_blocks =
82 		segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
83 }
84 
85 /**
86  * nilfs_segbuf_map_cont - map a new log behind a given log
87  * @segbuf: new segment buffer
88  * @prev: segment buffer containing a log to be continued
89  */
90 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
91 			   struct nilfs_segment_buffer *prev)
92 {
93 	segbuf->sb_segnum = prev->sb_segnum;
94 	segbuf->sb_fseg_start = prev->sb_fseg_start;
95 	segbuf->sb_fseg_end = prev->sb_fseg_end;
96 	segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
97 	segbuf->sb_rest_blocks =
98 		segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
99 }
100 
101 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
102 				  __u64 nextnum, struct the_nilfs *nilfs)
103 {
104 	segbuf->sb_nextnum = nextnum;
105 	segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
106 }
107 
108 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
109 {
110 	struct buffer_head *bh;
111 
112 	bh = sb_getblk(segbuf->sb_super,
113 		       segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
114 	if (unlikely(!bh))
115 		return -ENOMEM;
116 
117 	nilfs_segbuf_add_segsum_buffer(segbuf, bh);
118 	return 0;
119 }
120 
121 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
122 				struct buffer_head **bhp)
123 {
124 	struct buffer_head *bh;
125 
126 	bh = sb_getblk(segbuf->sb_super,
127 		       segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
128 	if (unlikely(!bh))
129 		return -ENOMEM;
130 
131 	nilfs_segbuf_add_payload_buffer(segbuf, bh);
132 	*bhp = bh;
133 	return 0;
134 }
135 
136 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
137 		       time_t ctime, __u64 cno)
138 {
139 	int err;
140 
141 	segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
142 	err = nilfs_segbuf_extend_segsum(segbuf);
143 	if (unlikely(err))
144 		return err;
145 
146 	segbuf->sb_sum.flags = flags;
147 	segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
148 	segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
149 	segbuf->sb_sum.ctime = ctime;
150 	segbuf->sb_sum.cno = cno;
151 	return 0;
152 }
153 
154 /*
155  * Setup segment summary
156  */
157 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
158 {
159 	struct nilfs_segment_summary *raw_sum;
160 	struct buffer_head *bh_sum;
161 
162 	bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
163 			    struct buffer_head, b_assoc_buffers);
164 	raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
165 
166 	raw_sum->ss_magic    = cpu_to_le32(NILFS_SEGSUM_MAGIC);
167 	raw_sum->ss_bytes    = cpu_to_le16(sizeof(*raw_sum));
168 	raw_sum->ss_flags    = cpu_to_le16(segbuf->sb_sum.flags);
169 	raw_sum->ss_seq      = cpu_to_le64(segbuf->sb_sum.seg_seq);
170 	raw_sum->ss_create   = cpu_to_le64(segbuf->sb_sum.ctime);
171 	raw_sum->ss_next     = cpu_to_le64(segbuf->sb_sum.next);
172 	raw_sum->ss_nblocks  = cpu_to_le32(segbuf->sb_sum.nblocks);
173 	raw_sum->ss_nfinfo   = cpu_to_le32(segbuf->sb_sum.nfinfo);
174 	raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
175 	raw_sum->ss_pad      = 0;
176 	raw_sum->ss_cno      = cpu_to_le64(segbuf->sb_sum.cno);
177 }
178 
179 /*
180  * CRC calculation routines
181  */
182 static void
183 nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
184 {
185 	struct buffer_head *bh;
186 	struct nilfs_segment_summary *raw_sum;
187 	unsigned long size, bytes = segbuf->sb_sum.sumbytes;
188 	u32 crc;
189 
190 	bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
191 			b_assoc_buffers);
192 
193 	raw_sum = (struct nilfs_segment_summary *)bh->b_data;
194 	size = min_t(unsigned long, bytes, bh->b_size);
195 	crc = crc32_le(seed,
196 		       (unsigned char *)raw_sum +
197 		       sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
198 		       size - (sizeof(raw_sum->ss_datasum) +
199 			       sizeof(raw_sum->ss_sumsum)));
200 
201 	list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
202 				     b_assoc_buffers) {
203 		bytes -= size;
204 		size = min_t(unsigned long, bytes, bh->b_size);
205 		crc = crc32_le(crc, bh->b_data, size);
206 	}
207 	raw_sum->ss_sumsum = cpu_to_le32(crc);
208 }
209 
210 static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
211 					  u32 seed)
212 {
213 	struct buffer_head *bh;
214 	struct nilfs_segment_summary *raw_sum;
215 	void *kaddr;
216 	u32 crc;
217 
218 	bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
219 			b_assoc_buffers);
220 	raw_sum = (struct nilfs_segment_summary *)bh->b_data;
221 	crc = crc32_le(seed,
222 		       (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
223 		       bh->b_size - sizeof(raw_sum->ss_datasum));
224 
225 	list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
226 				     b_assoc_buffers) {
227 		crc = crc32_le(crc, bh->b_data, bh->b_size);
228 	}
229 	list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
230 		kaddr = kmap_atomic(bh->b_page, KM_USER0);
231 		crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
232 		kunmap_atomic(kaddr, KM_USER0);
233 	}
234 	raw_sum->ss_datasum = cpu_to_le32(crc);
235 }
236 
237 static void
238 nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
239 				    u32 seed)
240 {
241 	struct nilfs_super_root *raw_sr;
242 	u32 crc;
243 
244 	raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
245 	crc = crc32_le(seed,
246 		       (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
247 		       NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
248 	raw_sr->sr_sum = cpu_to_le32(crc);
249 }
250 
251 static void nilfs_release_buffers(struct list_head *list)
252 {
253 	struct buffer_head *bh, *n;
254 
255 	list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
256 		list_del_init(&bh->b_assoc_buffers);
257 		if (buffer_nilfs_allocated(bh)) {
258 			struct page *clone_page = bh->b_page;
259 
260 			/* remove clone page */
261 			brelse(bh);
262 			page_cache_release(clone_page); /* for each bh */
263 			if (page_count(clone_page) <= 2) {
264 				lock_page(clone_page);
265 				nilfs_free_private_page(clone_page);
266 			}
267 			continue;
268 		}
269 		brelse(bh);
270 	}
271 }
272 
273 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
274 {
275 	nilfs_release_buffers(&segbuf->sb_segsum_buffers);
276 	nilfs_release_buffers(&segbuf->sb_payload_buffers);
277 	segbuf->sb_super_root = NULL;
278 }
279 
280 /*
281  * Iterators for segment buffers
282  */
283 void nilfs_clear_logs(struct list_head *logs)
284 {
285 	struct nilfs_segment_buffer *segbuf;
286 
287 	list_for_each_entry(segbuf, logs, sb_list)
288 		nilfs_segbuf_clear(segbuf);
289 }
290 
291 void nilfs_truncate_logs(struct list_head *logs,
292 			 struct nilfs_segment_buffer *last)
293 {
294 	struct nilfs_segment_buffer *n, *segbuf;
295 
296 	segbuf = list_prepare_entry(last, logs, sb_list);
297 	list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
298 		list_del_init(&segbuf->sb_list);
299 		nilfs_segbuf_clear(segbuf);
300 		nilfs_segbuf_free(segbuf);
301 	}
302 }
303 
304 int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
305 {
306 	struct nilfs_segment_buffer *segbuf;
307 	int ret = 0;
308 
309 	list_for_each_entry(segbuf, logs, sb_list) {
310 		ret = nilfs_segbuf_write(segbuf, nilfs);
311 		if (ret)
312 			break;
313 	}
314 	return ret;
315 }
316 
317 int nilfs_wait_on_logs(struct list_head *logs)
318 {
319 	struct nilfs_segment_buffer *segbuf;
320 	int err, ret = 0;
321 
322 	list_for_each_entry(segbuf, logs, sb_list) {
323 		err = nilfs_segbuf_wait(segbuf);
324 		if (err && !ret)
325 			ret = err;
326 	}
327 	return ret;
328 }
329 
330 /**
331  * nilfs_add_checksums_on_logs - add checksums on the logs
332  * @logs: list of segment buffers storing target logs
333  * @seed: checksum seed value
334  */
335 void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
336 {
337 	struct nilfs_segment_buffer *segbuf;
338 
339 	list_for_each_entry(segbuf, logs, sb_list) {
340 		if (segbuf->sb_super_root)
341 			nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
342 		nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
343 		nilfs_segbuf_fill_in_data_crc(segbuf, seed);
344 	}
345 }
346 
347 /*
348  * BIO operations
349  */
350 static void nilfs_end_bio_write(struct bio *bio, int err)
351 {
352 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
353 	struct nilfs_segment_buffer *segbuf = bio->bi_private;
354 
355 	if (err == -EOPNOTSUPP) {
356 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
357 		bio_put(bio);
358 		/* to be detected by submit_seg_bio() */
359 	}
360 
361 	if (!uptodate)
362 		atomic_inc(&segbuf->sb_err);
363 
364 	bio_put(bio);
365 	complete(&segbuf->sb_bio_event);
366 }
367 
368 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
369 				   struct nilfs_write_info *wi, int mode)
370 {
371 	struct bio *bio = wi->bio;
372 	int err;
373 
374 	if (segbuf->sb_nbio > 0 &&
375 	    bdi_write_congested(segbuf->sb_super->s_bdi)) {
376 		wait_for_completion(&segbuf->sb_bio_event);
377 		segbuf->sb_nbio--;
378 		if (unlikely(atomic_read(&segbuf->sb_err))) {
379 			bio_put(bio);
380 			err = -EIO;
381 			goto failed;
382 		}
383 	}
384 
385 	bio->bi_end_io = nilfs_end_bio_write;
386 	bio->bi_private = segbuf;
387 	bio_get(bio);
388 	submit_bio(mode, bio);
389 	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
390 		bio_put(bio);
391 		err = -EOPNOTSUPP;
392 		goto failed;
393 	}
394 	segbuf->sb_nbio++;
395 	bio_put(bio);
396 
397 	wi->bio = NULL;
398 	wi->rest_blocks -= wi->end - wi->start;
399 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
400 	wi->start = wi->end;
401 	return 0;
402 
403  failed:
404 	wi->bio = NULL;
405 	return err;
406 }
407 
408 /**
409  * nilfs_alloc_seg_bio - allocate a new bio for writing log
410  * @nilfs: nilfs object
411  * @start: start block number of the bio
412  * @nr_vecs: request size of page vector.
413  *
414  * Return Value: On success, pointer to the struct bio is returned.
415  * On error, NULL is returned.
416  */
417 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
418 				       int nr_vecs)
419 {
420 	struct bio *bio;
421 
422 	bio = bio_alloc(GFP_NOIO, nr_vecs);
423 	if (bio == NULL) {
424 		while (!bio && (nr_vecs >>= 1))
425 			bio = bio_alloc(GFP_NOIO, nr_vecs);
426 	}
427 	if (likely(bio)) {
428 		bio->bi_bdev = nilfs->ns_bdev;
429 		bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
430 	}
431 	return bio;
432 }
433 
434 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
435 				       struct nilfs_write_info *wi)
436 {
437 	wi->bio = NULL;
438 	wi->rest_blocks = segbuf->sb_sum.nblocks;
439 	wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
440 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
441 	wi->start = wi->end = 0;
442 	wi->blocknr = segbuf->sb_pseg_start;
443 }
444 
445 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
446 				  struct nilfs_write_info *wi,
447 				  struct buffer_head *bh, int mode)
448 {
449 	int len, err;
450 
451 	BUG_ON(wi->nr_vecs <= 0);
452  repeat:
453 	if (!wi->bio) {
454 		wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
455 					      wi->nr_vecs);
456 		if (unlikely(!wi->bio))
457 			return -ENOMEM;
458 	}
459 
460 	len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
461 	if (len == bh->b_size) {
462 		wi->end++;
463 		return 0;
464 	}
465 	/* bio is FULL */
466 	err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
467 	/* never submit current bh */
468 	if (likely(!err))
469 		goto repeat;
470 	return err;
471 }
472 
473 /**
474  * nilfs_segbuf_write - submit write requests of a log
475  * @segbuf: buffer storing a log to be written
476  * @nilfs: nilfs object
477  *
478  * Return Value: On Success, 0 is returned. On Error, one of the following
479  * negative error code is returned.
480  *
481  * %-EIO - I/O error
482  *
483  * %-ENOMEM - Insufficient memory available.
484  */
485 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
486 			      struct the_nilfs *nilfs)
487 {
488 	struct nilfs_write_info wi;
489 	struct buffer_head *bh;
490 	int res = 0, rw = WRITE;
491 
492 	wi.nilfs = nilfs;
493 	nilfs_segbuf_prepare_write(segbuf, &wi);
494 
495 	list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
496 		res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
497 		if (unlikely(res))
498 			goto failed_bio;
499 	}
500 
501 	list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
502 		res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
503 		if (unlikely(res))
504 			goto failed_bio;
505 	}
506 
507 	if (wi.bio) {
508 		/*
509 		 * Last BIO is always sent through the following
510 		 * submission.
511 		 */
512 		rw |= REQ_SYNC;
513 		res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
514 	}
515 
516  failed_bio:
517 	return res;
518 }
519 
520 /**
521  * nilfs_segbuf_wait - wait for completion of requested BIOs
522  * @segbuf: segment buffer
523  *
524  * Return Value: On Success, 0 is returned. On Error, one of the following
525  * negative error code is returned.
526  *
527  * %-EIO - I/O error
528  */
529 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
530 {
531 	int err = 0;
532 
533 	if (!segbuf->sb_nbio)
534 		return 0;
535 
536 	do {
537 		wait_for_completion(&segbuf->sb_bio_event);
538 	} while (--segbuf->sb_nbio > 0);
539 
540 	if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
541 		printk(KERN_ERR "NILFS: IO error writing segment\n");
542 		err = -EIO;
543 	}
544 	return err;
545 }
546