1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/completion.h> 11 #include <linux/buffer_head.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/delay.h> 17 #include <linux/bio.h> 18 #include <linux/gfs2_ondisk.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "glock.h" 23 #include "glops.h" 24 #include "inode.h" 25 #include "log.h" 26 #include "lops.h" 27 #include "meta_io.h" 28 #include "rgrp.h" 29 #include "trans.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) 34 { 35 struct buffer_head *bh, *head; 36 int nr_underway = 0; 37 int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); 38 39 BUG_ON(!PageLocked(page)); 40 BUG_ON(!page_has_buffers(page)); 41 42 head = page_buffers(page); 43 bh = head; 44 45 do { 46 if (!buffer_mapped(bh)) 47 continue; 48 /* 49 * If it's a fully non-blocking write attempt and we cannot 50 * lock the buffer then redirty the page. Note that this can 51 * potentially cause a busy-wait loop from flusher thread and kswapd 52 * activity, but those code paths have their own higher-level 53 * throttling. 54 */ 55 if (wbc->sync_mode != WB_SYNC_NONE) { 56 lock_buffer(bh); 57 } else if (!trylock_buffer(bh)) { 58 redirty_page_for_writepage(wbc, page); 59 continue; 60 } 61 if (test_clear_buffer_dirty(bh)) { 62 mark_buffer_async_write(bh); 63 } else { 64 unlock_buffer(bh); 65 } 66 } while ((bh = bh->b_this_page) != head); 67 68 /* 69 * The page and its buffers are protected by PageWriteback(), so we can 70 * drop the bh refcounts early. 71 */ 72 BUG_ON(PageWriteback(page)); 73 set_page_writeback(page); 74 75 do { 76 struct buffer_head *next = bh->b_this_page; 77 if (buffer_async_write(bh)) { 78 submit_bh(REQ_OP_WRITE, write_flags, bh); 79 nr_underway++; 80 } 81 bh = next; 82 } while (bh != head); 83 unlock_page(page); 84 85 if (nr_underway == 0) 86 end_page_writeback(page); 87 88 return 0; 89 } 90 91 const struct address_space_operations gfs2_meta_aops = { 92 .writepage = gfs2_aspace_writepage, 93 .releasepage = gfs2_releasepage, 94 }; 95 96 const struct address_space_operations gfs2_rgrp_aops = { 97 .writepage = gfs2_aspace_writepage, 98 .releasepage = gfs2_releasepage, 99 }; 100 101 /** 102 * gfs2_getbuf - Get a buffer with a given address space 103 * @gl: the glock 104 * @blkno: the block number (filesystem scope) 105 * @create: 1 if the buffer should be created 106 * 107 * Returns: the buffer 108 */ 109 110 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 111 { 112 struct address_space *mapping = gfs2_glock2aspace(gl); 113 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 114 struct page *page; 115 struct buffer_head *bh; 116 unsigned int shift; 117 unsigned long index; 118 unsigned int bufnum; 119 120 if (mapping == NULL) 121 mapping = &sdp->sd_aspace; 122 123 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; 124 index = blkno >> shift; /* convert block to page */ 125 bufnum = blkno - (index << shift); /* block buf index within page */ 126 127 if (create) { 128 for (;;) { 129 page = grab_cache_page(mapping, index); 130 if (page) 131 break; 132 yield(); 133 } 134 } else { 135 page = find_get_page_flags(mapping, index, 136 FGP_LOCK|FGP_ACCESSED); 137 if (!page) 138 return NULL; 139 } 140 141 if (!page_has_buffers(page)) 142 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0); 143 144 /* Locate header for our buffer within our page */ 145 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) 146 /* Do nothing */; 147 get_bh(bh); 148 149 if (!buffer_mapped(bh)) 150 map_bh(bh, sdp->sd_vfs, blkno); 151 152 unlock_page(page); 153 put_page(page); 154 155 return bh; 156 } 157 158 static void meta_prep_new(struct buffer_head *bh) 159 { 160 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 161 162 lock_buffer(bh); 163 clear_buffer_dirty(bh); 164 set_buffer_uptodate(bh); 165 unlock_buffer(bh); 166 167 mh->mh_magic = cpu_to_be32(GFS2_MAGIC); 168 } 169 170 /** 171 * gfs2_meta_new - Get a block 172 * @gl: The glock associated with this block 173 * @blkno: The block number 174 * 175 * Returns: The buffer 176 */ 177 178 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 179 { 180 struct buffer_head *bh; 181 bh = gfs2_getbuf(gl, blkno, CREATE); 182 meta_prep_new(bh); 183 return bh; 184 } 185 186 static void gfs2_meta_read_endio(struct bio *bio) 187 { 188 struct bio_vec *bvec; 189 struct bvec_iter_all iter_all; 190 191 bio_for_each_segment_all(bvec, bio, iter_all) { 192 struct page *page = bvec->bv_page; 193 struct buffer_head *bh = page_buffers(page); 194 unsigned int len = bvec->bv_len; 195 196 while (bh_offset(bh) < bvec->bv_offset) 197 bh = bh->b_this_page; 198 do { 199 struct buffer_head *next = bh->b_this_page; 200 len -= bh->b_size; 201 bh->b_end_io(bh, !bio->bi_status); 202 bh = next; 203 } while (bh && len); 204 } 205 bio_put(bio); 206 } 207 208 /* 209 * Submit several consecutive buffer head I/O requests as a single bio I/O 210 * request. (See submit_bh_wbc.) 211 */ 212 static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], 213 int num) 214 { 215 while (num > 0) { 216 struct buffer_head *bh = *bhs; 217 struct bio *bio; 218 219 bio = bio_alloc(GFP_NOIO, num); 220 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 221 bio_set_dev(bio, bh->b_bdev); 222 while (num > 0) { 223 bh = *bhs; 224 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { 225 BUG_ON(bio->bi_iter.bi_size == 0); 226 break; 227 } 228 bhs++; 229 num--; 230 } 231 bio->bi_end_io = gfs2_meta_read_endio; 232 bio_set_op_attrs(bio, op, op_flags); 233 submit_bio(bio); 234 } 235 } 236 237 /** 238 * gfs2_meta_read - Read a block from disk 239 * @gl: The glock covering the block 240 * @blkno: The block number 241 * @flags: flags 242 * @bhp: the place where the buffer is returned (NULL on failure) 243 * 244 * Returns: errno 245 */ 246 247 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 248 int rahead, struct buffer_head **bhp) 249 { 250 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 251 struct buffer_head *bh, *bhs[2]; 252 int num = 0; 253 254 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 255 *bhp = NULL; 256 return -EIO; 257 } 258 259 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); 260 261 lock_buffer(bh); 262 if (buffer_uptodate(bh)) { 263 unlock_buffer(bh); 264 flags &= ~DIO_WAIT; 265 } else { 266 bh->b_end_io = end_buffer_read_sync; 267 get_bh(bh); 268 bhs[num++] = bh; 269 } 270 271 if (rahead) { 272 bh = gfs2_getbuf(gl, blkno + 1, CREATE); 273 274 lock_buffer(bh); 275 if (buffer_uptodate(bh)) { 276 unlock_buffer(bh); 277 brelse(bh); 278 } else { 279 bh->b_end_io = end_buffer_read_sync; 280 bhs[num++] = bh; 281 } 282 } 283 284 gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num); 285 if (!(flags & DIO_WAIT)) 286 return 0; 287 288 bh = *bhp; 289 wait_on_buffer(bh); 290 if (unlikely(!buffer_uptodate(bh))) { 291 struct gfs2_trans *tr = current->journal_info; 292 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 293 gfs2_io_error_bh_wd(sdp, bh); 294 brelse(bh); 295 *bhp = NULL; 296 return -EIO; 297 } 298 299 return 0; 300 } 301 302 /** 303 * gfs2_meta_wait - Reread a block from disk 304 * @sdp: the filesystem 305 * @bh: The block to wait for 306 * 307 * Returns: errno 308 */ 309 310 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) 311 { 312 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 313 return -EIO; 314 315 wait_on_buffer(bh); 316 317 if (!buffer_uptodate(bh)) { 318 struct gfs2_trans *tr = current->journal_info; 319 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) 320 gfs2_io_error_bh_wd(sdp, bh); 321 return -EIO; 322 } 323 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 324 return -EIO; 325 326 return 0; 327 } 328 329 void gfs2_remove_from_journal(struct buffer_head *bh, int meta) 330 { 331 struct address_space *mapping = bh->b_page->mapping; 332 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); 333 struct gfs2_bufdata *bd = bh->b_private; 334 struct gfs2_trans *tr = current->journal_info; 335 int was_pinned = 0; 336 337 if (test_clear_buffer_pinned(bh)) { 338 trace_gfs2_pin(bd, 0); 339 atomic_dec(&sdp->sd_log_pinned); 340 list_del_init(&bd->bd_list); 341 if (meta == REMOVE_META) 342 tr->tr_num_buf_rm++; 343 else 344 tr->tr_num_databuf_rm++; 345 set_bit(TR_TOUCHED, &tr->tr_flags); 346 was_pinned = 1; 347 brelse(bh); 348 } 349 if (bd) { 350 spin_lock(&sdp->sd_ail_lock); 351 if (bd->bd_tr) { 352 gfs2_trans_add_revoke(sdp, bd); 353 } else if (was_pinned) { 354 bh->b_private = NULL; 355 kmem_cache_free(gfs2_bufdata_cachep, bd); 356 } 357 spin_unlock(&sdp->sd_ail_lock); 358 } 359 clear_buffer_dirty(bh); 360 clear_buffer_uptodate(bh); 361 } 362 363 /** 364 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore 365 * @ip: the inode who owns the buffers 366 * @bstart: the first buffer in the run 367 * @blen: the number of buffers in the run 368 * 369 */ 370 371 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 372 { 373 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 374 struct buffer_head *bh; 375 376 while (blen) { 377 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); 378 if (bh) { 379 lock_buffer(bh); 380 gfs2_log_lock(sdp); 381 gfs2_remove_from_journal(bh, REMOVE_META); 382 gfs2_log_unlock(sdp); 383 unlock_buffer(bh); 384 brelse(bh); 385 } 386 387 bstart++; 388 blen--; 389 } 390 } 391 392 /** 393 * gfs2_meta_indirect_buffer - Get a metadata buffer 394 * @ip: The GFS2 inode 395 * @height: The level of this buf in the metadata (indir addr) tree (if any) 396 * @num: The block number (device relative) of the buffer 397 * @bhp: the buffer is returned here 398 * 399 * Returns: errno 400 */ 401 402 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, 403 struct buffer_head **bhp) 404 { 405 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 406 struct gfs2_glock *gl = ip->i_gl; 407 struct buffer_head *bh; 408 int ret = 0; 409 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI; 410 int rahead = 0; 411 412 if (num == ip->i_no_addr) 413 rahead = ip->i_rahead; 414 415 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); 416 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { 417 brelse(bh); 418 ret = -EIO; 419 } else { 420 *bhp = bh; 421 } 422 return ret; 423 } 424 425 /** 426 * gfs2_meta_ra - start readahead on an extent of a file 427 * @gl: the glock the blocks belong to 428 * @dblock: the starting disk block 429 * @extlen: the number of blocks in the extent 430 * 431 * returns: the first buffer in the extent 432 */ 433 434 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 435 { 436 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 437 struct buffer_head *first_bh, *bh; 438 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 439 sdp->sd_sb.sb_bsize_shift; 440 441 BUG_ON(!extlen); 442 443 if (max_ra < 1) 444 max_ra = 1; 445 if (extlen > max_ra) 446 extlen = max_ra; 447 448 first_bh = gfs2_getbuf(gl, dblock, CREATE); 449 450 if (buffer_uptodate(first_bh)) 451 goto out; 452 if (!buffer_locked(first_bh)) 453 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh); 454 455 dblock++; 456 extlen--; 457 458 while (extlen) { 459 bh = gfs2_getbuf(gl, dblock, CREATE); 460 461 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 462 ll_rw_block(REQ_OP_READ, 463 REQ_RAHEAD | REQ_META | REQ_PRIO, 464 1, &bh); 465 brelse(bh); 466 dblock++; 467 extlen--; 468 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) 469 goto out; 470 } 471 472 wait_on_buffer(first_bh); 473 out: 474 return first_bh; 475 } 476 477