1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/delay.h> 17 #include <linux/bio.h> 18 #include <linux/gfs2_ondisk.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "glock.h" 23 #include "glops.h" 24 #include "inode.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "rgrp.h" 29 #include "trans.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) 34 { 35 struct buffer_head *bh, *head; 36 int nr_underway = 0; 37 int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); 38 39 BUG_ON(!PageLocked(page)); 40 BUG_ON(!page_has_buffers(page)); 41 42 head = page_buffers(page); 43 bh = head; 44 45 do { 46 if (!buffer_mapped(bh)) 47 continue; 48 /* 49 * If it's a fully non-blocking write attempt and we cannot 50 * lock the buffer then redirty the page. Note that this can 51 * potentially cause a busy-wait loop from flusher thread and kswapd 52 * activity, but those code paths have their own higher-level 53 * throttling. 54 */ 55 if (wbc->sync_mode != WB_SYNC_NONE) { 56 lock_buffer(bh); 57 } else if (!trylock_buffer(bh)) { 58 redirty_page_for_writepage(wbc, page); 59 continue; 60 } 61 if (test_clear_buffer_dirty(bh)) { 62 mark_buffer_async_write(bh); 63 } else { 64 unlock_buffer(bh); 65 } 66 } while ((bh = bh->b_this_page) != head); 67 68 /* 69 * The page and its buffers are protected by PageWriteback(), so we can 70 * drop the bh refcounts early. 71 */ 72 BUG_ON(PageWriteback(page)); 73 set_page_writeback(page); 74 75 do { 76 struct buffer_head *next = bh->b_this_page; 77 if (buffer_async_write(bh)) { 78 submit_bh(REQ_OP_WRITE, write_flags, bh); 79 nr_underway++; 80 } 81 bh = next; 82 } while (bh != head); 83 unlock_page(page); 84 85 if (nr_underway == 0) 86 end_page_writeback(page); 87 88 return 0; 89 } 90 91 const struct address_space_operations gfs2_meta_aops = { 92 .set_page_dirty = __set_page_dirty_buffers, 93 .writepage = gfs2_aspace_writepage, 94 .releasepage = gfs2_releasepage, 95 }; 96 97 const struct address_space_operations gfs2_rgrp_aops = { 98 .set_page_dirty = __set_page_dirty_buffers, 99 .writepage = gfs2_aspace_writepage, 100 .releasepage = gfs2_releasepage, 101 }; 102 103 /** 104 * gfs2_getbuf - Get a buffer with a given address space 105 * @gl: the glock 106 * @blkno: the block number (filesystem scope) 107 * @create: 1 if the buffer should be created 108 * 109 * Returns: the buffer 110 */ 111 112 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 113 { 114 struct address_space *mapping = gfs2_glock2aspace(gl); 115 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 116 struct page *page; 117 struct buffer_head *bh; 118 unsigned int shift; 119 unsigned long index; 120 unsigned int bufnum; 121 122 if (mapping == NULL) 123 mapping = &sdp->sd_aspace; 124 125 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 126 index = blkno >> shift; /* convert block to page */ 127 bufnum = blkno - (index << shift); /* block buf index within page */ 128 129 if (create) { 130 for (;;) { 131 page = grab_cache_page(mapping, index); 132 if (page) 133 break; 134 yield(); 135 } 136 if (!page_has_buffers(page)) 137 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0); 138 } else { 139 page = find_get_page_flags(mapping, index, 140 FGP_LOCK|FGP_ACCESSED); 141 if (!page) 142 return NULL; 143 if (!page_has_buffers(page)) { 144 bh = NULL; 145 goto out_unlock; 146 } 147 } 148 149 /* Locate header for our buffer within our page */ 150 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) 151 /* Do nothing */; 152 get_bh(bh); 153 154 if (!buffer_mapped(bh)) 155 map_bh(bh, sdp->sd_vfs, blkno); 156 157 out_unlock: 158 unlock_page(page); 159 put_page(page); 160 161 return bh; 162 } 163 164 static void meta_prep_new(struct buffer_head *bh) 165 { 166 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 167 168 lock_buffer(bh); 169 clear_buffer_dirty(bh); 170 set_buffer_uptodate(bh); 171 unlock_buffer(bh); 172 173 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 174 } 175 176 /** 177 * gfs2_meta_new - Get a block 178 * @gl: The glock associated with this block 179 * @blkno: The block number 180 * 181 * Returns: The buffer 182 */ 183 184 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 185 { 186 struct buffer_head *bh; 187 bh = gfs2_getbuf(gl, blkno, CREATE); 188 meta_prep_new(bh); 189 return bh; 190 } 191 192 static void gfs2_meta_read_endio(struct bio *bio) 193 { 194 struct bio_vec *bvec; 195 struct bvec_iter_all iter_all; 196 197 bio_for_each_segment_all(bvec, bio, iter_all) { 198 struct page *page = bvec->bv_page; 199 struct buffer_head *bh = page_buffers(page); 200 unsigned int len = bvec->bv_len; 201 202 while (bh_offset(bh) < bvec->bv_offset) 203 bh = bh->b_this_page; 204 do { 205 struct buffer_head *next = bh->b_this_page; 206 len -= bh->b_size; 207 bh->b_end_io(bh, !bio->bi_status); 208 bh = next; 209 } while (bh && len); 210 } 211 bio_put(bio); 212 } 213 214 /* 215 * Submit several consecutive buffer head I/O requests as a single bio I/O 216 * request. (See submit_bh_wbc.) 217 */ 218 static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], 219 int num) 220 { 221 while (num > 0) { 222 struct buffer_head *bh = *bhs; 223 struct bio *bio; 224 225 bio = bio_alloc(GFP_NOIO, num); 226 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 227 bio_set_dev(bio, bh->b_bdev); 228 while (num > 0) { 229 bh = *bhs; 230 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { 231 BUG_ON(bio->bi_iter.bi_size == 0); 232 break; 233 } 234 bhs++; 235 num--; 236 } 237 bio->bi_end_io = gfs2_meta_read_endio; 238 bio_set_op_attrs(bio, op, op_flags); 239 submit_bio(bio); 240 } 241 } 242 243 /** 244 * gfs2_meta_read - Read a block from disk 245 * @gl: The glock covering the block 246 * @blkno: The block number 247 * @flags: flags 248 * @rahead: Do read-ahead 249 * @bhp: the place where the buffer is returned (NULL on failure) 250 * 251 * Returns: errno 252 */ 253 254 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 255 int rahead, struct buffer_head **bhp) 256 { 257 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 258 struct buffer_head *bh, *bhs[2]; 259 int num = 0; 260 261 if (unlikely(gfs2_withdrawn(sdp)) && 262 (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) { 263 *bhp = NULL; 264 return -EIO; 265 } 266 267 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); 268 269 lock_buffer(bh); 270 if (buffer_uptodate(bh)) { 271 unlock_buffer(bh); 272 flags &= ~DIO_WAIT; 273 } else { 274 bh->b_end_io = end_buffer_read_sync; 275 get_bh(bh); 276 bhs[num++] = bh; 277 } 278 279 if (rahead) { 280 bh = gfs2_getbuf(gl, blkno + 1, CREATE); 281 282 lock_buffer(bh); 283 if (buffer_uptodate(bh)) { 284 unlock_buffer(bh); 285 brelse(bh); 286 } else { 287 bh->b_end_io = end_buffer_read_sync; 288 bhs[num++] = bh; 289 } 290 } 291 292 gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num); 293 if (!(flags & DIO_WAIT)) 294 return 0; 295 296 bh = *bhp; 297 wait_on_buffer(bh); 298 if (unlikely(!buffer_uptodate(bh))) { 299 struct gfs2_trans *tr = current->journal_info; 300 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 301 gfs2_io_error_bh_wd(sdp, bh); 302 brelse(bh); 303 *bhp = NULL; 304 return -EIO; 305 } 306 307 return 0; 308 } 309 310 /** 311 * gfs2_meta_wait - Reread a block from disk 312 * @sdp: the filesystem 313 * @bh: The block to wait for 314 * 315 * Returns: errno 316 */ 317 318 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) 319 { 320 if (unlikely(gfs2_withdrawn(sdp))) 321 return -EIO; 322 323 wait_on_buffer(bh); 324 325 if (!buffer_uptodate(bh)) { 326 struct gfs2_trans *tr = current->journal_info; 327 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 328 gfs2_io_error_bh_wd(sdp, bh); 329 return -EIO; 330 } 331 if (unlikely(gfs2_withdrawn(sdp))) 332 return -EIO; 333 334 return 0; 335 } 336 337 void gfs2_remove_from_journal(struct buffer_head *bh, int meta) 338 { 339 struct address_space *mapping = bh->b_page->mapping; 340 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 341 struct gfs2_bufdata *bd = bh->b_private; 342 struct gfs2_trans *tr = current->journal_info; 343 int was_pinned = 0; 344 345 if (test_clear_buffer_pinned(bh)) { 346 trace_gfs2_pin(bd, 0); 347 atomic_dec(&sdp->sd_log_pinned); 348 list_del_init(&bd->bd_list); 349 if (meta == REMOVE_META) 350 tr->tr_num_buf_rm++; 351 else 352 tr->tr_num_databuf_rm++; 353 set_bit(TR_TOUCHED, &tr->tr_flags); 354 was_pinned = 1; 355 brelse(bh); 356 } 357 if (bd) { 358 if (bd->bd_tr) { 359 gfs2_trans_add_revoke(sdp, bd); 360 } else if (was_pinned) { 361 bh->b_private = NULL; 362 kmem_cache_free(gfs2_bufdata_cachep, bd); 363 } else if (!list_empty(&bd->bd_ail_st_list) && 364 !list_empty(&bd->bd_ail_gl_list)) { 365 gfs2_remove_from_ail(bd); 366 } 367 } 368 clear_buffer_dirty(bh); 369 clear_buffer_uptodate(bh); 370 } 371 372 /** 373 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list 374 * @sdp: superblock 375 * @bstart: starting block address of buffers to remove 376 * @blen: length of buffers to be removed 377 * 378 * This function is called from gfs2_journal wipe, whose job is to remove 379 * buffers, corresponding to deleted blocks, from the journal. If we find any 380 * bufdata elements on the system ail1 list, they haven't been written to 381 * the journal yet. So we remove them. 382 */ 383 static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen) 384 { 385 struct gfs2_trans *tr, *s; 386 struct gfs2_bufdata *bd, *bs; 387 struct buffer_head *bh; 388 u64 end = bstart + blen; 389 390 gfs2_log_lock(sdp); 391 spin_lock(&sdp->sd_ail_lock); 392 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) { 393 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, 394 bd_ail_st_list) { 395 bh = bd->bd_bh; 396 if (bh->b_blocknr < bstart || bh->b_blocknr >= end) 397 continue; 398 399 gfs2_remove_from_journal(bh, REMOVE_JDATA); 400 } 401 } 402 spin_unlock(&sdp->sd_ail_lock); 403 gfs2_log_unlock(sdp); 404 } 405 406 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno) 407 { 408 struct address_space *mapping = ip->i_inode.i_mapping; 409 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 410 struct page *page; 411 struct buffer_head *bh; 412 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 413 unsigned long index = blkno >> shift; /* convert block to page */ 414 unsigned int bufnum = blkno - (index << shift); 415 416 page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED); 417 if (!page) 418 return NULL; 419 if (!page_has_buffers(page)) { 420 unlock_page(page); 421 put_page(page); 422 return NULL; 423 } 424 /* Locate header for our buffer within our page */ 425 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) 426 /* Do nothing */; 427 get_bh(bh); 428 unlock_page(page); 429 put_page(page); 430 return bh; 431 } 432 433 /** 434 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore 435 * @ip: the inode who owns the buffers 436 * @bstart: the first buffer in the run 437 * @blen: the number of buffers in the run 438 * 439 */ 440 441 void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 442 { 443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 444 struct buffer_head *bh; 445 int ty; 446 447 gfs2_ail1_wipe(sdp, bstart, blen); 448 while (blen) { 449 ty = REMOVE_META; 450 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); 451 if (!bh && gfs2_is_jdata(ip)) { 452 bh = gfs2_getjdatabuf(ip, bstart); 453 ty = REMOVE_JDATA; 454 } 455 if (bh) { 456 lock_buffer(bh); 457 gfs2_log_lock(sdp); 458 spin_lock(&sdp->sd_ail_lock); 459 gfs2_remove_from_journal(bh, ty); 460 spin_unlock(&sdp->sd_ail_lock); 461 gfs2_log_unlock(sdp); 462 unlock_buffer(bh); 463 brelse(bh); 464 } 465 466 bstart++; 467 blen--; 468 } 469 } 470 471 /** 472 * gfs2_meta_buffer - Get a metadata buffer 473 * @ip: The GFS2 inode 474 * @mtype: The block type (GFS2_METATYPE_*) 475 * @num: The block number (device relative) of the buffer 476 * @bhp: the buffer is returned here 477 * 478 * Returns: errno 479 */ 480 481 int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num, 482 struct buffer_head **bhp) 483 { 484 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 485 struct gfs2_glock *gl = ip->i_gl; 486 struct buffer_head *bh; 487 int ret = 0; 488 int rahead = 0; 489 490 if (num == ip->i_no_addr) 491 rahead = ip->i_rahead; 492 493 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); 494 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { 495 brelse(bh); 496 ret = -EIO; 497 } else { 498 *bhp = bh; 499 } 500 return ret; 501 } 502 503 /** 504 * gfs2_meta_ra - start readahead on an extent of a file 505 * @gl: the glock the blocks belong to 506 * @dblock: the starting disk block 507 * @extlen: the number of blocks in the extent 508 * 509 * returns: the first buffer in the extent 510 */ 511 512 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 513 { 514 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 515 struct buffer_head *first_bh, *bh; 516 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 517 sdp->sd_sb.sb_bsize_shift; 518 519 BUG_ON(!extlen); 520 521 if (max_ra < 1) 522 max_ra = 1; 523 if (extlen > max_ra) 524 extlen = max_ra; 525 526 first_bh = gfs2_getbuf(gl, dblock, CREATE); 527 528 if (buffer_uptodate(first_bh)) 529 goto out; 530 if (!buffer_locked(first_bh)) 531 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh); 532 533 dblock++; 534 extlen--; 535 536 while (extlen) { 537 bh = gfs2_getbuf(gl, dblock, CREATE); 538 539 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 540 ll_rw_block(REQ_OP_READ, 541 REQ_RAHEAD | REQ_META | REQ_PRIO, 542 1, &bh); 543 brelse(bh); 544 dblock++; 545 extlen--; 546 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) 547 goto out; 548 } 549 550 wait_on_buffer(first_bh); 551 out: 552 return first_bh; 553 } 554 555