1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/delay.h> 17 #include <linux/bio.h> 18 #include <linux/gfs2_ondisk.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "glock.h" 23 #include "glops.h" 24 #include "inode.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "rgrp.h" 29 #include "trans.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) 34 { 35 struct buffer_head *bh, *head; 36 int nr_underway = 0; 37 blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); 38 39 BUG_ON(!PageLocked(page)); 40 BUG_ON(!page_has_buffers(page)); 41 42 head = page_buffers(page); 43 bh = head; 44 45 do { 46 if (!buffer_mapped(bh)) 47 continue; 48 /* 49 * If it's a fully non-blocking write attempt and we cannot 50 * lock the buffer then redirty the page. Note that this can 51 * potentially cause a busy-wait loop from flusher thread and kswapd 52 * activity, but those code paths have their own higher-level 53 * throttling. 54 */ 55 if (wbc->sync_mode != WB_SYNC_NONE) { 56 lock_buffer(bh); 57 } else if (!trylock_buffer(bh)) { 58 redirty_page_for_writepage(wbc, page); 59 continue; 60 } 61 if (test_clear_buffer_dirty(bh)) { 62 mark_buffer_async_write(bh); 63 } else { 64 unlock_buffer(bh); 65 } 66 } while ((bh = bh->b_this_page) != head); 67 68 /* 69 * The page and its buffers are protected by PageWriteback(), so we can 70 * drop the bh refcounts early. 71 */ 72 BUG_ON(PageWriteback(page)); 73 set_page_writeback(page); 74 75 do { 76 struct buffer_head *next = bh->b_this_page; 77 if (buffer_async_write(bh)) { 78 submit_bh(REQ_OP_WRITE | write_flags, bh); 79 nr_underway++; 80 } 81 bh = next; 82 } while (bh != head); 83 unlock_page(page); 84 85 if (nr_underway == 0) 86 end_page_writeback(page); 87 88 return 0; 89 } 90 91 const struct address_space_operations gfs2_meta_aops = { 92 .dirty_folio = block_dirty_folio, 93 .invalidate_folio = block_invalidate_folio, 94 .writepage = gfs2_aspace_writepage, 95 .release_folio = gfs2_release_folio, 96 }; 97 98 const struct address_space_operations gfs2_rgrp_aops = { 99 .dirty_folio = block_dirty_folio, 100 .invalidate_folio = block_invalidate_folio, 101 .writepage = gfs2_aspace_writepage, 102 .release_folio = gfs2_release_folio, 103 }; 104 105 /** 106 * gfs2_getbuf - Get a buffer with a given address space 107 * @gl: the glock 108 * @blkno: the block number (filesystem scope) 109 * @create: 1 if the buffer should be created 110 * 111 * Returns: the buffer 112 */ 113 114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 115 { 116 struct address_space *mapping = gfs2_glock2aspace(gl); 117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 118 struct page *page; 119 struct buffer_head *bh; 120 unsigned int shift; 121 unsigned long index; 122 unsigned int bufnum; 123 124 if (mapping == NULL) 125 mapping = &sdp->sd_aspace; 126 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 128 index = blkno >> shift; /* convert block to page */ 129 bufnum = blkno - (index << shift); /* block buf index within page */ 130 131 if (create) { 132 for (;;) { 133 page = grab_cache_page(mapping, index); 134 if (page) 135 break; 136 yield(); 137 } 138 if (!page_has_buffers(page)) 139 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0); 140 } else { 141 page = find_get_page_flags(mapping, index, 142 FGP_LOCK|FGP_ACCESSED); 143 if (!page) 144 return NULL; 145 if (!page_has_buffers(page)) { 146 bh = NULL; 147 goto out_unlock; 148 } 149 } 150 151 /* Locate header for our buffer within our page */ 152 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) 153 /* Do nothing */; 154 get_bh(bh); 155 156 if (!buffer_mapped(bh)) 157 map_bh(bh, sdp->sd_vfs, blkno); 158 159 out_unlock: 160 unlock_page(page); 161 put_page(page); 162 163 return bh; 164 } 165 166 static void meta_prep_new(struct buffer_head *bh) 167 { 168 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 169 170 lock_buffer(bh); 171 clear_buffer_dirty(bh); 172 set_buffer_uptodate(bh); 173 unlock_buffer(bh); 174 175 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 176 } 177 178 /** 179 * gfs2_meta_new - Get a block 180 * @gl: The glock associated with this block 181 * @blkno: The block number 182 * 183 * Returns: The buffer 184 */ 185 186 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 187 { 188 struct buffer_head *bh; 189 bh = gfs2_getbuf(gl, blkno, CREATE); 190 meta_prep_new(bh); 191 return bh; 192 } 193 194 static void gfs2_meta_read_endio(struct bio *bio) 195 { 196 struct bio_vec *bvec; 197 struct bvec_iter_all iter_all; 198 199 bio_for_each_segment_all(bvec, bio, iter_all) { 200 struct page *page = bvec->bv_page; 201 struct buffer_head *bh = page_buffers(page); 202 unsigned int len = bvec->bv_len; 203 204 while (bh_offset(bh) < bvec->bv_offset) 205 bh = bh->b_this_page; 206 do { 207 struct buffer_head *next = bh->b_this_page; 208 len -= bh->b_size; 209 bh->b_end_io(bh, !bio->bi_status); 210 bh = next; 211 } while (bh && len); 212 } 213 bio_put(bio); 214 } 215 216 /* 217 * Submit several consecutive buffer head I/O requests as a single bio I/O 218 * request. (See submit_bh_wbc.) 219 */ 220 static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) 221 { 222 while (num > 0) { 223 struct buffer_head *bh = *bhs; 224 struct bio *bio; 225 226 bio = bio_alloc(bh->b_bdev, num, opf, GFP_NOIO); 227 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 228 while (num > 0) { 229 bh = *bhs; 230 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { 231 BUG_ON(bio->bi_iter.bi_size == 0); 232 break; 233 } 234 bhs++; 235 num--; 236 } 237 bio->bi_end_io = gfs2_meta_read_endio; 238 submit_bio(bio); 239 } 240 } 241 242 /** 243 * gfs2_meta_read - Read a block from disk 244 * @gl: The glock covering the block 245 * @blkno: The block number 246 * @flags: flags 247 * @rahead: Do read-ahead 248 * @bhp: the place where the buffer is returned (NULL on failure) 249 * 250 * Returns: errno 251 */ 252 253 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 254 int rahead, struct buffer_head **bhp) 255 { 256 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 257 struct buffer_head *bh, *bhs[2]; 258 int num = 0; 259 260 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) { 261 *bhp = NULL; 262 return -EIO; 263 } 264 265 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); 266 267 lock_buffer(bh); 268 if (buffer_uptodate(bh)) { 269 unlock_buffer(bh); 270 flags &= ~DIO_WAIT; 271 } else { 272 bh->b_end_io = end_buffer_read_sync; 273 get_bh(bh); 274 bhs[num++] = bh; 275 } 276 277 if (rahead) { 278 bh = gfs2_getbuf(gl, blkno + 1, CREATE); 279 280 lock_buffer(bh); 281 if (buffer_uptodate(bh)) { 282 unlock_buffer(bh); 283 brelse(bh); 284 } else { 285 bh->b_end_io = end_buffer_read_sync; 286 bhs[num++] = bh; 287 } 288 } 289 290 gfs2_submit_bhs(REQ_OP_READ | REQ_META | REQ_PRIO, bhs, num); 291 if (!(flags & DIO_WAIT)) 292 return 0; 293 294 bh = *bhp; 295 wait_on_buffer(bh); 296 if (unlikely(!buffer_uptodate(bh))) { 297 struct gfs2_trans *tr = current->journal_info; 298 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 299 gfs2_io_error_bh_wd(sdp, bh); 300 brelse(bh); 301 *bhp = NULL; 302 return -EIO; 303 } 304 305 return 0; 306 } 307 308 /** 309 * gfs2_meta_wait - Reread a block from disk 310 * @sdp: the filesystem 311 * @bh: The block to wait for 312 * 313 * Returns: errno 314 */ 315 316 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) 317 { 318 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) 319 return -EIO; 320 321 wait_on_buffer(bh); 322 323 if (!buffer_uptodate(bh)) { 324 struct gfs2_trans *tr = current->journal_info; 325 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 326 gfs2_io_error_bh_wd(sdp, bh); 327 return -EIO; 328 } 329 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) 330 return -EIO; 331 332 return 0; 333 } 334 335 void gfs2_remove_from_journal(struct buffer_head *bh, int meta) 336 { 337 struct address_space *mapping = bh->b_page->mapping; 338 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 339 struct gfs2_bufdata *bd = bh->b_private; 340 struct gfs2_trans *tr = current->journal_info; 341 int was_pinned = 0; 342 343 if (test_clear_buffer_pinned(bh)) { 344 trace_gfs2_pin(bd, 0); 345 atomic_dec(&sdp->sd_log_pinned); 346 list_del_init(&bd->bd_list); 347 if (meta == REMOVE_META) 348 tr->tr_num_buf_rm++; 349 else 350 tr->tr_num_databuf_rm++; 351 set_bit(TR_TOUCHED, &tr->tr_flags); 352 was_pinned = 1; 353 brelse(bh); 354 } 355 if (bd) { 356 if (bd->bd_tr) { 357 gfs2_trans_add_revoke(sdp, bd); 358 } else if (was_pinned) { 359 bh->b_private = NULL; 360 kmem_cache_free(gfs2_bufdata_cachep, bd); 361 } else if (!list_empty(&bd->bd_ail_st_list) && 362 !list_empty(&bd->bd_ail_gl_list)) { 363 gfs2_remove_from_ail(bd); 364 } 365 } 366 clear_buffer_dirty(bh); 367 clear_buffer_uptodate(bh); 368 } 369 370 /** 371 * gfs2_ail1_wipe - remove deleted/freed buffers from the ail1 list 372 * @sdp: superblock 373 * @bstart: starting block address of buffers to remove 374 * @blen: length of buffers to be removed 375 * 376 * This function is called from gfs2_journal wipe, whose job is to remove 377 * buffers, corresponding to deleted blocks, from the journal. If we find any 378 * bufdata elements on the system ail1 list, they haven't been written to 379 * the journal yet. So we remove them. 380 */ 381 static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen) 382 { 383 struct gfs2_trans *tr, *s; 384 struct gfs2_bufdata *bd, *bs; 385 struct buffer_head *bh; 386 u64 end = bstart + blen; 387 388 gfs2_log_lock(sdp); 389 spin_lock(&sdp->sd_ail_lock); 390 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) { 391 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, 392 bd_ail_st_list) { 393 bh = bd->bd_bh; 394 if (bh->b_blocknr < bstart || bh->b_blocknr >= end) 395 continue; 396 397 gfs2_remove_from_journal(bh, REMOVE_JDATA); 398 } 399 } 400 spin_unlock(&sdp->sd_ail_lock); 401 gfs2_log_unlock(sdp); 402 } 403 404 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno) 405 { 406 struct address_space *mapping = ip->i_inode.i_mapping; 407 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 408 struct page *page; 409 struct buffer_head *bh; 410 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 411 unsigned long index = blkno >> shift; /* convert block to page */ 412 unsigned int bufnum = blkno - (index << shift); 413 414 page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED); 415 if (!page) 416 return NULL; 417 if (!page_has_buffers(page)) { 418 unlock_page(page); 419 put_page(page); 420 return NULL; 421 } 422 /* Locate header for our buffer within our page */ 423 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) 424 /* Do nothing */; 425 get_bh(bh); 426 unlock_page(page); 427 put_page(page); 428 return bh; 429 } 430 431 /** 432 * gfs2_journal_wipe - make inode's buffers so they aren't dirty/pinned anymore 433 * @ip: the inode who owns the buffers 434 * @bstart: the first buffer in the run 435 * @blen: the number of buffers in the run 436 * 437 */ 438 439 void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 440 { 441 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 442 struct buffer_head *bh; 443 int ty; 444 445 gfs2_ail1_wipe(sdp, bstart, blen); 446 while (blen) { 447 ty = REMOVE_META; 448 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); 449 if (!bh && gfs2_is_jdata(ip)) { 450 bh = gfs2_getjdatabuf(ip, bstart); 451 ty = REMOVE_JDATA; 452 } 453 if (bh) { 454 lock_buffer(bh); 455 gfs2_log_lock(sdp); 456 spin_lock(&sdp->sd_ail_lock); 457 gfs2_remove_from_journal(bh, ty); 458 spin_unlock(&sdp->sd_ail_lock); 459 gfs2_log_unlock(sdp); 460 unlock_buffer(bh); 461 brelse(bh); 462 } 463 464 bstart++; 465 blen--; 466 } 467 } 468 469 /** 470 * gfs2_meta_buffer - Get a metadata buffer 471 * @ip: The GFS2 inode 472 * @mtype: The block type (GFS2_METATYPE_*) 473 * @num: The block number (device relative) of the buffer 474 * @bhp: the buffer is returned here 475 * 476 * Returns: errno 477 */ 478 479 int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num, 480 struct buffer_head **bhp) 481 { 482 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 483 struct gfs2_glock *gl = ip->i_gl; 484 struct buffer_head *bh; 485 int ret = 0; 486 int rahead = 0; 487 488 if (num == ip->i_no_addr) 489 rahead = ip->i_rahead; 490 491 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); 492 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { 493 brelse(bh); 494 ret = -EIO; 495 } else { 496 *bhp = bh; 497 } 498 return ret; 499 } 500 501 /** 502 * gfs2_meta_ra - start readahead on an extent of a file 503 * @gl: the glock the blocks belong to 504 * @dblock: the starting disk block 505 * @extlen: the number of blocks in the extent 506 * 507 * returns: the first buffer in the extent 508 */ 509 510 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 511 { 512 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 513 struct buffer_head *first_bh, *bh; 514 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 515 sdp->sd_sb.sb_bsize_shift; 516 517 BUG_ON(!extlen); 518 519 if (max_ra < 1) 520 max_ra = 1; 521 if (extlen > max_ra) 522 extlen = max_ra; 523 524 first_bh = gfs2_getbuf(gl, dblock, CREATE); 525 526 if (buffer_uptodate(first_bh)) 527 goto out; 528 bh_read_nowait(first_bh, REQ_META | REQ_PRIO); 529 530 dblock++; 531 extlen--; 532 533 while (extlen) { 534 bh = gfs2_getbuf(gl, dblock, CREATE); 535 536 bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO); 537 brelse(bh); 538 dblock++; 539 extlen--; 540 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) 541 goto out; 542 } 543 544 wait_on_buffer(first_bh); 545 out: 546 return first_bh; 547 } 548 549