1 /* 2 * segbuf.c - NILFS segment buffer 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/writeback.h> 26 #include <linux/crc32.h> 27 #include <linux/backing-dev.h> 28 #include "page.h" 29 #include "segbuf.h" 30 31 32 struct nilfs_write_info { 33 struct the_nilfs *nilfs; 34 struct bio *bio; 35 int start, end; /* The region to be submitted */ 36 int rest_blocks; 37 int max_pages; 38 int nr_vecs; 39 sector_t blocknr; 40 }; 41 42 43 static struct kmem_cache *nilfs_segbuf_cachep; 44 45 static void nilfs_segbuf_init_once(void *obj) 46 { 47 memset(obj, 0, sizeof(struct nilfs_segment_buffer)); 48 } 49 50 int __init nilfs_init_segbuf_cache(void) 51 { 52 nilfs_segbuf_cachep = 53 kmem_cache_create("nilfs2_segbuf_cache", 54 sizeof(struct nilfs_segment_buffer), 55 0, SLAB_RECLAIM_ACCOUNT, 56 nilfs_segbuf_init_once); 57 58 return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0; 59 } 60 61 void nilfs_destroy_segbuf_cache(void) 62 { 63 kmem_cache_destroy(nilfs_segbuf_cachep); 64 } 65 66 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb) 67 { 68 struct nilfs_segment_buffer *segbuf; 69 70 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS); 71 if (unlikely(!segbuf)) 72 return NULL; 73 74 segbuf->sb_super = sb; 75 INIT_LIST_HEAD(&segbuf->sb_list); 76 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); 77 INIT_LIST_HEAD(&segbuf->sb_payload_buffers); 78 79 init_completion(&segbuf->sb_bio_event); 80 atomic_set(&segbuf->sb_err, 0); 81 segbuf->sb_nbio = 0; 82 83 return segbuf; 84 } 85 86 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf) 87 { 88 kmem_cache_free(nilfs_segbuf_cachep, segbuf); 89 } 90 91 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum, 92 unsigned long offset, struct the_nilfs *nilfs) 93 { 94 segbuf->sb_segnum = segnum; 95 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start, 96 &segbuf->sb_fseg_end); 97 98 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset; 99 segbuf->sb_rest_blocks = 100 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; 101 } 102 103 /** 104 * nilfs_segbuf_map_cont - map a new log behind a given log 105 * @segbuf: new segment buffer 106 * @prev: segment buffer containing a log to be continued 107 */ 108 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, 109 struct nilfs_segment_buffer *prev) 110 { 111 segbuf->sb_segnum = prev->sb_segnum; 112 segbuf->sb_fseg_start = prev->sb_fseg_start; 113 segbuf->sb_fseg_end = prev->sb_fseg_end; 114 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks; 115 segbuf->sb_rest_blocks = 116 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; 117 } 118 119 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, 120 __u64 nextnum, struct the_nilfs *nilfs) 121 { 122 segbuf->sb_nextnum = nextnum; 123 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum); 124 } 125 126 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf) 127 { 128 struct buffer_head *bh; 129 130 bh = sb_getblk(segbuf->sb_super, 131 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk); 132 if (unlikely(!bh)) 133 return -ENOMEM; 134 135 nilfs_segbuf_add_segsum_buffer(segbuf, bh); 136 return 0; 137 } 138 139 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, 140 struct buffer_head **bhp) 141 { 142 struct buffer_head *bh; 143 144 bh = sb_getblk(segbuf->sb_super, 145 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks); 146 if (unlikely(!bh)) 147 return -ENOMEM; 148 149 nilfs_segbuf_add_payload_buffer(segbuf, bh); 150 *bhp = bh; 151 return 0; 152 } 153 154 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags, 155 time_t ctime) 156 { 157 int err; 158 159 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0; 160 err = nilfs_segbuf_extend_segsum(segbuf); 161 if (unlikely(err)) 162 return err; 163 164 segbuf->sb_sum.flags = flags; 165 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); 166 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; 167 segbuf->sb_sum.ctime = ctime; 168 return 0; 169 } 170 171 /* 172 * Setup segument summary 173 */ 174 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf) 175 { 176 struct nilfs_segment_summary *raw_sum; 177 struct buffer_head *bh_sum; 178 179 bh_sum = list_entry(segbuf->sb_segsum_buffers.next, 180 struct buffer_head, b_assoc_buffers); 181 raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data; 182 183 raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC); 184 raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum)); 185 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags); 186 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq); 187 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime); 188 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next); 189 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks); 190 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo); 191 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes); 192 raw_sum->ss_pad = 0; 193 } 194 195 /* 196 * CRC calculation routines 197 */ 198 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, 199 u32 seed) 200 { 201 struct buffer_head *bh; 202 struct nilfs_segment_summary *raw_sum; 203 unsigned long size, bytes = segbuf->sb_sum.sumbytes; 204 u32 crc; 205 206 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, 207 b_assoc_buffers); 208 209 raw_sum = (struct nilfs_segment_summary *)bh->b_data; 210 size = min_t(unsigned long, bytes, bh->b_size); 211 crc = crc32_le(seed, 212 (unsigned char *)raw_sum + 213 sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum), 214 size - (sizeof(raw_sum->ss_datasum) + 215 sizeof(raw_sum->ss_sumsum))); 216 217 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, 218 b_assoc_buffers) { 219 bytes -= size; 220 size = min_t(unsigned long, bytes, bh->b_size); 221 crc = crc32_le(crc, bh->b_data, size); 222 } 223 raw_sum->ss_sumsum = cpu_to_le32(crc); 224 } 225 226 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, 227 u32 seed) 228 { 229 struct buffer_head *bh; 230 struct nilfs_segment_summary *raw_sum; 231 void *kaddr; 232 u32 crc; 233 234 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, 235 b_assoc_buffers); 236 raw_sum = (struct nilfs_segment_summary *)bh->b_data; 237 crc = crc32_le(seed, 238 (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum), 239 bh->b_size - sizeof(raw_sum->ss_datasum)); 240 241 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, 242 b_assoc_buffers) { 243 crc = crc32_le(crc, bh->b_data, bh->b_size); 244 } 245 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 246 kaddr = kmap_atomic(bh->b_page, KM_USER0); 247 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); 248 kunmap_atomic(kaddr, KM_USER0); 249 } 250 raw_sum->ss_datasum = cpu_to_le32(crc); 251 } 252 253 static void nilfs_release_buffers(struct list_head *list) 254 { 255 struct buffer_head *bh, *n; 256 257 list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { 258 list_del_init(&bh->b_assoc_buffers); 259 if (buffer_nilfs_allocated(bh)) { 260 struct page *clone_page = bh->b_page; 261 262 /* remove clone page */ 263 brelse(bh); 264 page_cache_release(clone_page); /* for each bh */ 265 if (page_count(clone_page) <= 2) { 266 lock_page(clone_page); 267 nilfs_free_private_page(clone_page); 268 } 269 continue; 270 } 271 brelse(bh); 272 } 273 } 274 275 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) 276 { 277 nilfs_release_buffers(&segbuf->sb_segsum_buffers); 278 nilfs_release_buffers(&segbuf->sb_payload_buffers); 279 } 280 281 /* 282 * Iterators for segment buffers 283 */ 284 void nilfs_clear_logs(struct list_head *logs) 285 { 286 struct nilfs_segment_buffer *segbuf; 287 288 list_for_each_entry(segbuf, logs, sb_list) 289 nilfs_segbuf_clear(segbuf); 290 } 291 292 void nilfs_truncate_logs(struct list_head *logs, 293 struct nilfs_segment_buffer *last) 294 { 295 struct nilfs_segment_buffer *n, *segbuf; 296 297 segbuf = list_prepare_entry(last, logs, sb_list); 298 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) { 299 list_del_init(&segbuf->sb_list); 300 nilfs_segbuf_clear(segbuf); 301 nilfs_segbuf_free(segbuf); 302 } 303 } 304 305 int nilfs_wait_on_logs(struct list_head *logs) 306 { 307 struct nilfs_segment_buffer *segbuf; 308 int err; 309 310 list_for_each_entry(segbuf, logs, sb_list) { 311 err = nilfs_segbuf_wait(segbuf); 312 if (err) 313 return err; 314 } 315 return 0; 316 } 317 318 /* 319 * BIO operations 320 */ 321 static void nilfs_end_bio_write(struct bio *bio, int err) 322 { 323 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 324 struct nilfs_segment_buffer *segbuf = bio->bi_private; 325 326 if (err == -EOPNOTSUPP) { 327 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 328 bio_put(bio); 329 /* to be detected by submit_seg_bio() */ 330 } 331 332 if (!uptodate) 333 atomic_inc(&segbuf->sb_err); 334 335 bio_put(bio); 336 complete(&segbuf->sb_bio_event); 337 } 338 339 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, 340 struct nilfs_write_info *wi, int mode) 341 { 342 struct bio *bio = wi->bio; 343 int err; 344 345 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) { 346 wait_for_completion(&segbuf->sb_bio_event); 347 segbuf->sb_nbio--; 348 if (unlikely(atomic_read(&segbuf->sb_err))) { 349 bio_put(bio); 350 err = -EIO; 351 goto failed; 352 } 353 } 354 355 bio->bi_end_io = nilfs_end_bio_write; 356 bio->bi_private = segbuf; 357 bio_get(bio); 358 submit_bio(mode, bio); 359 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 360 bio_put(bio); 361 err = -EOPNOTSUPP; 362 goto failed; 363 } 364 segbuf->sb_nbio++; 365 bio_put(bio); 366 367 wi->bio = NULL; 368 wi->rest_blocks -= wi->end - wi->start; 369 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); 370 wi->start = wi->end; 371 return 0; 372 373 failed: 374 wi->bio = NULL; 375 return err; 376 } 377 378 /** 379 * nilfs_alloc_seg_bio - allocate a new bio for writing log 380 * @nilfs: nilfs object 381 * @start: start block number of the bio 382 * @nr_vecs: request size of page vector. 383 * 384 * Return Value: On success, pointer to the struct bio is returned. 385 * On error, NULL is returned. 386 */ 387 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, 388 int nr_vecs) 389 { 390 struct bio *bio; 391 392 bio = bio_alloc(GFP_NOIO, nr_vecs); 393 if (bio == NULL) { 394 while (!bio && (nr_vecs >>= 1)) 395 bio = bio_alloc(GFP_NOIO, nr_vecs); 396 } 397 if (likely(bio)) { 398 bio->bi_bdev = nilfs->ns_bdev; 399 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); 400 } 401 return bio; 402 } 403 404 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, 405 struct nilfs_write_info *wi) 406 { 407 wi->bio = NULL; 408 wi->rest_blocks = segbuf->sb_sum.nblocks; 409 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev); 410 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); 411 wi->start = wi->end = 0; 412 wi->blocknr = segbuf->sb_pseg_start; 413 } 414 415 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, 416 struct nilfs_write_info *wi, 417 struct buffer_head *bh, int mode) 418 { 419 int len, err; 420 421 BUG_ON(wi->nr_vecs <= 0); 422 repeat: 423 if (!wi->bio) { 424 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end, 425 wi->nr_vecs); 426 if (unlikely(!wi->bio)) 427 return -ENOMEM; 428 } 429 430 len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh)); 431 if (len == bh->b_size) { 432 wi->end++; 433 return 0; 434 } 435 /* bio is FULL */ 436 err = nilfs_segbuf_submit_bio(segbuf, wi, mode); 437 /* never submit current bh */ 438 if (likely(!err)) 439 goto repeat; 440 return err; 441 } 442 443 /** 444 * nilfs_segbuf_write - submit write requests of a log 445 * @segbuf: buffer storing a log to be written 446 * @nilfs: nilfs object 447 * 448 * Return Value: On Success, 0 is returned. On Error, one of the following 449 * negative error code is returned. 450 * 451 * %-EIO - I/O error 452 * 453 * %-ENOMEM - Insufficient memory available. 454 */ 455 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, 456 struct the_nilfs *nilfs) 457 { 458 struct nilfs_write_info wi; 459 struct buffer_head *bh; 460 int res = 0, rw = WRITE; 461 462 wi.nilfs = nilfs; 463 nilfs_segbuf_prepare_write(segbuf, &wi); 464 465 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { 466 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); 467 if (unlikely(res)) 468 goto failed_bio; 469 } 470 471 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 472 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); 473 if (unlikely(res)) 474 goto failed_bio; 475 } 476 477 if (wi.bio) { 478 /* 479 * Last BIO is always sent through the following 480 * submission. 481 */ 482 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 483 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); 484 } 485 486 failed_bio: 487 return res; 488 } 489 490 /** 491 * nilfs_segbuf_wait - wait for completion of requested BIOs 492 * @segbuf: segment buffer 493 * 494 * Return Value: On Success, 0 is returned. On Error, one of the following 495 * negative error code is returned. 496 * 497 * %-EIO - I/O error 498 */ 499 int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) 500 { 501 int err = 0; 502 503 if (!segbuf->sb_nbio) 504 return 0; 505 506 do { 507 wait_for_completion(&segbuf->sb_bio_event); 508 } while (--segbuf->sb_nbio > 0); 509 510 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { 511 printk(KERN_ERR "NILFS: IO error writing segment\n"); 512 err = -EIO; 513 } 514 return err; 515 } 516