1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34
35 /**
36 * gfs2_pin - Pin a buffer in memory
37 * @sdp: The superblock
38 * @bh: The buffer to be pinned
39 *
40 * The log lock must be held when calling this function
41 */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh_wd(sdp, bh);
53 bd = bh->b_private;
54 /* If this buffer is in the AIL and it has already been written
55 * to in-place disk block, remove it from the AIL.
56 */
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64 }
65
buffer_is_rgrp(const struct gfs2_bufdata * bd)66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70
maybe_release_space(struct gfs2_bufdata * bd)71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 rgrp_lock_local(rgd);
80 if (bi->bi_clone == NULL)
81 goto out;
82 if (sdp->sd_args.ar_discard)
83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 memcpy(bi->bi_clone + bi->bi_offset,
85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 clear_bit(GBF_FULL, &bi->bi_flags);
87 rgd->rd_free_clone = rgd->rd_free;
88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 rgd->rd_extfail_pt = rgd->rd_free;
90
91 out:
92 rgrp_unlock_local(rgd);
93 }
94
95 /**
96 * gfs2_unpin - Unpin a buffer
97 * @sdp: the filesystem the buffer belongs to
98 * @bh: The buffer to unpin
99 * @tr: The system transaction being flushed
100 */
101
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)102 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 struct gfs2_trans *tr)
104 {
105 struct gfs2_bufdata *bd = bh->b_private;
106
107 BUG_ON(!buffer_uptodate(bh));
108 BUG_ON(!buffer_pinned(bh));
109
110 lock_buffer(bh);
111 mark_buffer_dirty(bh);
112 clear_buffer_pinned(bh);
113
114 if (buffer_is_rgrp(bd))
115 maybe_release_space(bd);
116
117 spin_lock(&sdp->sd_ail_lock);
118 if (bd->bd_tr) {
119 list_del(&bd->bd_ail_st_list);
120 brelse(bh);
121 } else {
122 struct gfs2_glock *gl = bd->bd_gl;
123 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 atomic_inc(&gl->gl_ail_count);
125 }
126 bd->bd_tr = tr;
127 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 spin_unlock(&sdp->sd_ail_lock);
129
130 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 trace_gfs2_pin(bd, 0);
132 unlock_buffer(bh);
133 atomic_dec(&sdp->sd_log_pinned);
134 }
135
gfs2_log_incr_head(struct gfs2_sbd * sdp)136 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
137 {
138 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 (sdp->sd_log_flush_head != sdp->sd_log_head));
140
141 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 sdp->sd_log_flush_head = 0;
143 }
144
gfs2_log_bmap(struct gfs2_jdesc * jd,unsigned int lblock)145 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
146 {
147 struct gfs2_journal_extent *je;
148
149 list_for_each_entry(je, &jd->extent_list, list) {
150 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 return je->dblock + lblock - je->lblock;
152 }
153
154 return -1;
155 }
156
157 /**
158 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
159 * @sdp: The superblock
160 * @bvec: The bio_vec
161 * @error: The i/o status
162 *
163 * This finds the relevant buffers and unlocks them and sets the
164 * error flag according to the status of the i/o request. This is
165 * used when the log is writing data which has an in-place version
166 * that is pinned in the pagecache.
167 */
168
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct bio_vec * bvec,blk_status_t error)169 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
170 struct bio_vec *bvec,
171 blk_status_t error)
172 {
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
175 unsigned size;
176
177 bh = page_buffers(page);
178 size = bvec->bv_len;
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
181 do {
182 if (error)
183 mark_buffer_write_io_error(bh);
184 unlock_buffer(bh);
185 next = bh->b_this_page;
186 size -= bh->b_size;
187 brelse(bh);
188 bh = next;
189 } while(bh && size);
190 }
191
192 /**
193 * gfs2_end_log_write - end of i/o to the log
194 * @bio: The bio
195 *
196 * Each bio_vec contains either data from the pagecache or data
197 * relating to the log itself. Here we iterate over the bio_vec
198 * array, processing both kinds of data.
199 *
200 */
201
gfs2_end_log_write(struct bio * bio)202 static void gfs2_end_log_write(struct bio *bio)
203 {
204 struct gfs2_sbd *sdp = bio->bi_private;
205 struct bio_vec *bvec;
206 struct page *page;
207 struct bvec_iter_all iter_all;
208
209 if (bio->bi_status) {
210 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
211 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
212 bio->bi_status, sdp->sd_jdesc->jd_jid);
213 gfs2_withdraw_delayed(sdp);
214 /* prevent more writes to the journal */
215 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
216 wake_up(&sdp->sd_logd_waitq);
217 }
218
219 bio_for_each_segment_all(bvec, bio, iter_all) {
220 page = bvec->bv_page;
221 if (page_has_buffers(page))
222 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
223 else
224 mempool_free(page, gfs2_page_pool);
225 }
226
227 bio_put(bio);
228 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
229 wake_up(&sdp->sd_log_flush_wait);
230 }
231
232 /**
233 * gfs2_log_submit_bio - Submit any pending log bio
234 * @biop: Address of the bio pointer
235 * @opf: REQ_OP | op_flags
236 *
237 * Submit any pending part-built or full bio to the block device. If
238 * there is no pending bio, then this is a no-op.
239 */
240
gfs2_log_submit_bio(struct bio ** biop,blk_opf_t opf)241 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
242 {
243 struct bio *bio = *biop;
244 if (bio) {
245 struct gfs2_sbd *sdp = bio->bi_private;
246 atomic_inc(&sdp->sd_log_in_flight);
247 bio->bi_opf = opf;
248 submit_bio(bio);
249 *biop = NULL;
250 }
251 }
252
253 /**
254 * gfs2_log_alloc_bio - Allocate a bio
255 * @sdp: The super block
256 * @blkno: The device block number we want to write to
257 * @end_io: The bi_end_io callback
258 *
259 * Allocate a new bio, initialize it with the given parameters and return it.
260 *
261 * Returns: The newly allocated bio
262 */
263
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno,bio_end_io_t * end_io)264 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
265 bio_end_io_t *end_io)
266 {
267 struct super_block *sb = sdp->sd_vfs;
268 struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
269
270 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
271 bio->bi_end_io = end_io;
272 bio->bi_private = sdp;
273
274 return bio;
275 }
276
277 /**
278 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
279 * @sdp: The super block
280 * @blkno: The device block number we want to write to
281 * @biop: The bio to get or allocate
282 * @op: REQ_OP
283 * @end_io: The bi_end_io callback
284 * @flush: Always flush the current bio and allocate a new one?
285 *
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is no cached bio, or we just flushed it, then
289 * allocate a new one.
290 *
291 * Returns: The bio to use for log writes
292 */
293
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno,struct bio ** biop,enum req_op op,bio_end_io_t * end_io,bool flush)294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
295 struct bio **biop, enum req_op op,
296 bio_end_io_t *end_io, bool flush)
297 {
298 struct bio *bio = *biop;
299
300 if (bio) {
301 u64 nblk;
302
303 nblk = bio_end_sector(bio);
304 nblk >>= sdp->sd_fsb2bb_shift;
305 if (blkno == nblk && !flush)
306 return bio;
307 gfs2_log_submit_bio(biop, op);
308 }
309
310 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
311 return *biop;
312 }
313
314 /**
315 * gfs2_log_write - write to log
316 * @sdp: the filesystem
317 * @jd: The journal descriptor
318 * @page: the page to write
319 * @size: the size of the data to write
320 * @offset: the offset within the page
321 * @blkno: block number of the log entry
322 *
323 * Try and add the page segment to the current bio. If that fails,
324 * submit the current bio to the device and create a new one, and
325 * then add the page segment to that.
326 */
327
gfs2_log_write(struct gfs2_sbd * sdp,struct gfs2_jdesc * jd,struct page * page,unsigned size,unsigned offset,u64 blkno)328 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
329 struct page *page, unsigned size, unsigned offset,
330 u64 blkno)
331 {
332 struct bio *bio;
333 int ret;
334
335 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
336 gfs2_end_log_write, false);
337 ret = bio_add_page(bio, page, size, offset);
338 if (ret == 0) {
339 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
340 REQ_OP_WRITE, gfs2_end_log_write, true);
341 ret = bio_add_page(bio, page, size, offset);
342 WARN_ON(ret == 0);
343 }
344 }
345
346 /**
347 * gfs2_log_write_bh - write a buffer's content to the log
348 * @sdp: The super block
349 * @bh: The buffer pointing to the in-place location
350 *
351 * This writes the content of the buffer to the next available location
352 * in the log. The buffer will be unlocked once the i/o to the log has
353 * completed.
354 */
355
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)356 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
357 {
358 u64 dblock;
359
360 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
361 gfs2_log_incr_head(sdp);
362 gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
363 bh_offset(bh), dblock);
364 }
365
366 /**
367 * gfs2_log_write_page - write one block stored in a page, into the log
368 * @sdp: The superblock
369 * @page: The struct page
370 *
371 * This writes the first block-sized part of the page into the log. Note
372 * that the page must have been allocated from the gfs2_page_pool mempool
373 * and that after this has been called, ownership has been transferred and
374 * the page may be freed at any time.
375 */
376
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)377 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
378 {
379 struct super_block *sb = sdp->sd_vfs;
380 u64 dblock;
381
382 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
383 gfs2_log_incr_head(sdp);
384 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
385 }
386
387 /**
388 * gfs2_end_log_read - end I/O callback for reads from the log
389 * @bio: The bio
390 *
391 * Simply unlock the pages in the bio. The main thread will wait on them and
392 * process them in order as necessary.
393 */
394
gfs2_end_log_read(struct bio * bio)395 static void gfs2_end_log_read(struct bio *bio)
396 {
397 struct page *page;
398 struct bio_vec *bvec;
399 struct bvec_iter_all iter_all;
400
401 bio_for_each_segment_all(bvec, bio, iter_all) {
402 page = bvec->bv_page;
403 if (bio->bi_status) {
404 int err = blk_status_to_errno(bio->bi_status);
405
406 SetPageError(page);
407 mapping_set_error(page->mapping, err);
408 }
409 unlock_page(page);
410 }
411
412 bio_put(bio);
413 }
414
415 /**
416 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
417 * @jd: The journal descriptor
418 * @head: The journal head to start from
419 * @page: The page to look in
420 *
421 * Returns: 1 if found, 0 otherwise.
422 */
423
gfs2_jhead_pg_srch(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,struct page * page)424 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
425 struct gfs2_log_header_host *head,
426 struct page *page)
427 {
428 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
429 struct gfs2_log_header_host lh;
430 void *kaddr;
431 unsigned int offset;
432 bool ret = false;
433
434 kaddr = kmap_local_page(page);
435 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
436 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
437 if (lh.lh_sequence >= head->lh_sequence)
438 *head = lh;
439 else {
440 ret = true;
441 break;
442 }
443 }
444 }
445 kunmap_local(kaddr);
446 return ret;
447 }
448
449 /**
450 * gfs2_jhead_process_page - Search/cleanup a page
451 * @jd: The journal descriptor
452 * @index: Index of the page to look into
453 * @head: The journal head to start from
454 * @done: If set, perform only cleanup, else search and set if found.
455 *
456 * Find the folio with 'index' in the journal's mapping. Search the folio for
457 * the journal head if requested (cleanup == false). Release refs on the
458 * folio so the page cache can reclaim it. We grabbed a
459 * reference on this folio twice, first when we did a grab_cache_page()
460 * to obtain the folio to add it to the bio and second when we do a
461 * filemap_get_folio() here to get the folio to wait on while I/O on it is being
462 * completed.
463 * This function is also used to free up a folio we might've grabbed but not
464 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
465 * submitted the I/O, but we already found the jhead so we only need to drop
466 * our references to the folio.
467 */
468
gfs2_jhead_process_page(struct gfs2_jdesc * jd,unsigned long index,struct gfs2_log_header_host * head,bool * done)469 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
470 struct gfs2_log_header_host *head,
471 bool *done)
472 {
473 struct folio *folio;
474
475 folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
476
477 folio_wait_locked(folio);
478 if (folio_test_error(folio))
479 *done = true;
480
481 if (!*done)
482 *done = gfs2_jhead_pg_srch(jd, head, &folio->page);
483
484 /* filemap_get_folio() and the earlier grab_cache_page() */
485 folio_put_refs(folio, 2);
486 }
487
gfs2_chain_bio(struct bio * prev,unsigned int nr_iovecs)488 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
489 {
490 struct bio *new;
491
492 new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
493 bio_clone_blkg_association(new, prev);
494 new->bi_iter.bi_sector = bio_end_sector(prev);
495 bio_chain(new, prev);
496 submit_bio(prev);
497 return new;
498 }
499
500 /**
501 * gfs2_find_jhead - find the head of a log
502 * @jd: The journal descriptor
503 * @head: The log descriptor for the head of the log is returned here
504 * @keep_cache: If set inode pages will not be truncated
505 *
506 * Do a search of a journal by reading it in large chunks using bios and find
507 * the valid log entry with the highest sequence number. (i.e. the log head)
508 *
509 * Returns: 0 on success, errno otherwise
510 */
gfs2_find_jhead(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,bool keep_cache)511 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
512 bool keep_cache)
513 {
514 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
515 struct address_space *mapping = jd->jd_inode->i_mapping;
516 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
517 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
518 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
519 unsigned int shift = PAGE_SHIFT - bsize_shift;
520 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
521 struct gfs2_journal_extent *je;
522 int sz, ret = 0;
523 struct bio *bio = NULL;
524 struct page *page = NULL;
525 bool done = false;
526 errseq_t since;
527
528 memset(head, 0, sizeof(*head));
529 if (list_empty(&jd->extent_list))
530 gfs2_map_journal_extents(sdp, jd);
531
532 since = filemap_sample_wb_err(mapping);
533 list_for_each_entry(je, &jd->extent_list, list) {
534 u64 dblock = je->dblock;
535
536 for (; block < je->lblock + je->blocks; block++, dblock++) {
537 if (!page) {
538 page = grab_cache_page(mapping, block >> shift);
539 if (!page) {
540 ret = -ENOMEM;
541 done = true;
542 goto out;
543 }
544 off = 0;
545 }
546
547 if (bio && (off || block < blocks_submitted + max_blocks)) {
548 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
549
550 if (bio_end_sector(bio) == sector) {
551 sz = bio_add_page(bio, page, bsize, off);
552 if (sz == bsize)
553 goto block_added;
554 }
555 if (off) {
556 unsigned int blocks =
557 (PAGE_SIZE - off) >> bsize_shift;
558
559 bio = gfs2_chain_bio(bio, blocks);
560 goto add_block_to_new_bio;
561 }
562 }
563
564 if (bio) {
565 blocks_submitted = block;
566 submit_bio(bio);
567 }
568
569 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
570 bio->bi_opf = REQ_OP_READ;
571 add_block_to_new_bio:
572 sz = bio_add_page(bio, page, bsize, off);
573 BUG_ON(sz != bsize);
574 block_added:
575 off += bsize;
576 if (off == PAGE_SIZE)
577 page = NULL;
578 if (blocks_submitted <= blocks_read + max_blocks) {
579 /* Keep at least one bio in flight */
580 continue;
581 }
582
583 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
584 blocks_read += PAGE_SIZE >> bsize_shift;
585 if (done)
586 goto out; /* found */
587 }
588 }
589
590 out:
591 if (bio)
592 submit_bio(bio);
593 while (blocks_read < block) {
594 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
595 blocks_read += PAGE_SIZE >> bsize_shift;
596 }
597
598 if (!ret)
599 ret = filemap_check_wb_err(mapping, since);
600
601 if (!keep_cache)
602 truncate_inode_pages(mapping, 0);
603
604 return ret;
605 }
606
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)607 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
608 u32 ld_length, u32 ld_data1)
609 {
610 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
611 struct gfs2_log_descriptor *ld = page_address(page);
612 clear_page(ld);
613 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
614 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
615 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
616 ld->ld_type = cpu_to_be32(ld_type);
617 ld->ld_length = cpu_to_be32(ld_length);
618 ld->ld_data1 = cpu_to_be32(ld_data1);
619 ld->ld_data2 = 0;
620 return page;
621 }
622
gfs2_check_magic(struct buffer_head * bh)623 static void gfs2_check_magic(struct buffer_head *bh)
624 {
625 void *kaddr;
626 __be32 *ptr;
627
628 clear_buffer_escaped(bh);
629 kaddr = kmap_local_page(bh->b_page);
630 ptr = kaddr + bh_offset(bh);
631 if (*ptr == cpu_to_be32(GFS2_MAGIC))
632 set_buffer_escaped(bh);
633 kunmap_local(kaddr);
634 }
635
blocknr_cmp(void * priv,const struct list_head * a,const struct list_head * b)636 static int blocknr_cmp(void *priv, const struct list_head *a,
637 const struct list_head *b)
638 {
639 struct gfs2_bufdata *bda, *bdb;
640
641 bda = list_entry(a, struct gfs2_bufdata, bd_list);
642 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
643
644 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
645 return -1;
646 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
647 return 1;
648 return 0;
649 }
650
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)651 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
652 unsigned int total, struct list_head *blist,
653 bool is_databuf)
654 {
655 struct gfs2_log_descriptor *ld;
656 struct gfs2_bufdata *bd1 = NULL, *bd2;
657 struct page *page;
658 unsigned int num;
659 unsigned n;
660 __be64 *ptr;
661
662 gfs2_log_lock(sdp);
663 list_sort(NULL, blist, blocknr_cmp);
664 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
665 while(total) {
666 num = total;
667 if (total > limit)
668 num = limit;
669 gfs2_log_unlock(sdp);
670 page = gfs2_get_log_desc(sdp,
671 is_databuf ? GFS2_LOG_DESC_JDATA :
672 GFS2_LOG_DESC_METADATA, num + 1, num);
673 ld = page_address(page);
674 gfs2_log_lock(sdp);
675 ptr = (__be64 *)(ld + 1);
676
677 n = 0;
678 list_for_each_entry_continue(bd1, blist, bd_list) {
679 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
680 if (is_databuf) {
681 gfs2_check_magic(bd1->bd_bh);
682 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
683 }
684 if (++n >= num)
685 break;
686 }
687
688 gfs2_log_unlock(sdp);
689 gfs2_log_write_page(sdp, page);
690 gfs2_log_lock(sdp);
691
692 n = 0;
693 list_for_each_entry_continue(bd2, blist, bd_list) {
694 get_bh(bd2->bd_bh);
695 gfs2_log_unlock(sdp);
696 lock_buffer(bd2->bd_bh);
697
698 if (buffer_escaped(bd2->bd_bh)) {
699 void *p;
700
701 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
702 p = page_address(page);
703 memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
704 *(__be32 *)p = 0;
705 clear_buffer_escaped(bd2->bd_bh);
706 unlock_buffer(bd2->bd_bh);
707 brelse(bd2->bd_bh);
708 gfs2_log_write_page(sdp, page);
709 } else {
710 gfs2_log_write_bh(sdp, bd2->bd_bh);
711 }
712 gfs2_log_lock(sdp);
713 if (++n >= num)
714 break;
715 }
716
717 BUG_ON(total < num);
718 total -= num;
719 }
720 gfs2_log_unlock(sdp);
721 }
722
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)723 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
724 {
725 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
726 unsigned int nbuf;
727 if (tr == NULL)
728 return;
729 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
730 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
731 }
732
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)733 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
734 {
735 struct list_head *head;
736 struct gfs2_bufdata *bd;
737
738 if (tr == NULL)
739 return;
740
741 head = &tr->tr_buf;
742 while (!list_empty(head)) {
743 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
744 list_del_init(&bd->bd_list);
745 gfs2_unpin(sdp, bd->bd_bh, tr);
746 }
747 }
748
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)749 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
750 struct gfs2_log_header_host *head, int pass)
751 {
752 if (pass != 0)
753 return;
754
755 jd->jd_found_blocks = 0;
756 jd->jd_replayed_blocks = 0;
757 }
758
759 #define obsolete_rgrp_replay \
760 "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
761 #define obsolete_rgrp_replay2 \
762 "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
763
obsolete_rgrp(struct gfs2_jdesc * jd,struct buffer_head * bh_log,u64 blkno)764 static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
765 u64 blkno)
766 {
767 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
768 struct gfs2_rgrpd *rgd;
769 struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
770
771 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
772 if (rgd && rgd->rd_addr == blkno &&
773 rgd->rd_bits && rgd->rd_bits->bi_bh) {
774 fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
775 jd->jd_jid, bh_log->b_blocknr);
776 fs_info(sdp, obsolete_rgrp_replay2,
777 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
778 buffer_pinned(rgd->rd_bits->bi_bh),
779 rgd->rd_igeneration,
780 be64_to_cpu(jrgd->rg_igeneration));
781 gfs2_dump_glock(NULL, rgd->rd_gl, true);
782 }
783 }
784
buf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)785 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
786 struct gfs2_log_descriptor *ld, __be64 *ptr,
787 int pass)
788 {
789 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
790 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
791 struct gfs2_glock *gl = ip->i_gl;
792 unsigned int blks = be32_to_cpu(ld->ld_data1);
793 struct buffer_head *bh_log, *bh_ip;
794 u64 blkno;
795 int error = 0;
796
797 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
798 return 0;
799
800 gfs2_replay_incr_blk(jd, &start);
801
802 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
803 blkno = be64_to_cpu(*ptr++);
804
805 jd->jd_found_blocks++;
806
807 if (gfs2_revoke_check(jd, blkno, start))
808 continue;
809
810 error = gfs2_replay_read_block(jd, start, &bh_log);
811 if (error)
812 return error;
813
814 bh_ip = gfs2_meta_new(gl, blkno);
815 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
816
817 if (gfs2_meta_check(sdp, bh_ip))
818 error = -EIO;
819 else {
820 struct gfs2_meta_header *mh =
821 (struct gfs2_meta_header *)bh_ip->b_data;
822
823 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
824 obsolete_rgrp(jd, bh_log, blkno);
825
826 mark_buffer_dirty(bh_ip);
827 }
828 brelse(bh_log);
829 brelse(bh_ip);
830
831 if (error)
832 break;
833
834 jd->jd_replayed_blocks++;
835 }
836
837 return error;
838 }
839
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)840 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
841 {
842 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
843 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
844
845 if (error) {
846 gfs2_inode_metasync(ip->i_gl);
847 return;
848 }
849 if (pass != 1)
850 return;
851
852 gfs2_inode_metasync(ip->i_gl);
853
854 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
855 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
856 }
857
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)858 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
859 {
860 struct gfs2_meta_header *mh;
861 unsigned int offset;
862 struct list_head *head = &sdp->sd_log_revokes;
863 struct gfs2_bufdata *bd;
864 struct page *page;
865 unsigned int length;
866
867 gfs2_flush_revokes(sdp);
868 if (!sdp->sd_log_num_revoke)
869 return;
870
871 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
872 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
873 offset = sizeof(struct gfs2_log_descriptor);
874
875 list_for_each_entry(bd, head, bd_list) {
876 sdp->sd_log_num_revoke--;
877
878 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
879 gfs2_log_write_page(sdp, page);
880 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
881 mh = page_address(page);
882 clear_page(mh);
883 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
884 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
885 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
886 offset = sizeof(struct gfs2_meta_header);
887 }
888
889 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
890 offset += sizeof(u64);
891 }
892 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
893
894 gfs2_log_write_page(sdp, page);
895 }
896
gfs2_drain_revokes(struct gfs2_sbd * sdp)897 void gfs2_drain_revokes(struct gfs2_sbd *sdp)
898 {
899 struct list_head *head = &sdp->sd_log_revokes;
900 struct gfs2_bufdata *bd;
901 struct gfs2_glock *gl;
902
903 while (!list_empty(head)) {
904 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
905 list_del_init(&bd->bd_list);
906 gl = bd->bd_gl;
907 gfs2_glock_remove_revoke(gl);
908 kmem_cache_free(gfs2_bufdata_cachep, bd);
909 }
910 }
911
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)912 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
913 {
914 gfs2_drain_revokes(sdp);
915 }
916
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)917 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
918 struct gfs2_log_header_host *head, int pass)
919 {
920 if (pass != 0)
921 return;
922
923 jd->jd_found_revokes = 0;
924 jd->jd_replay_tail = head->lh_tail;
925 }
926
revoke_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)927 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
928 struct gfs2_log_descriptor *ld, __be64 *ptr,
929 int pass)
930 {
931 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
932 unsigned int blks = be32_to_cpu(ld->ld_length);
933 unsigned int revokes = be32_to_cpu(ld->ld_data1);
934 struct buffer_head *bh;
935 unsigned int offset;
936 u64 blkno;
937 int first = 1;
938 int error;
939
940 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
941 return 0;
942
943 offset = sizeof(struct gfs2_log_descriptor);
944
945 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
946 error = gfs2_replay_read_block(jd, start, &bh);
947 if (error)
948 return error;
949
950 if (!first)
951 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
952
953 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
954 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
955
956 error = gfs2_revoke_add(jd, blkno, start);
957 if (error < 0) {
958 brelse(bh);
959 return error;
960 }
961 else if (error)
962 jd->jd_found_revokes++;
963
964 if (!--revokes)
965 break;
966 offset += sizeof(u64);
967 }
968
969 brelse(bh);
970 offset = sizeof(struct gfs2_meta_header);
971 first = 0;
972 }
973
974 return 0;
975 }
976
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)977 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
978 {
979 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
980
981 if (error) {
982 gfs2_revoke_clean(jd);
983 return;
984 }
985 if (pass != 1)
986 return;
987
988 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
989 jd->jd_jid, jd->jd_found_revokes);
990
991 gfs2_revoke_clean(jd);
992 }
993
994 /**
995 * databuf_lo_before_commit - Scan the data buffers, writing as we go
996 * @sdp: The filesystem
997 * @tr: The system transaction being flushed
998 */
999
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1000 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1001 {
1002 unsigned int limit = databuf_limit(sdp);
1003 unsigned int nbuf;
1004 if (tr == NULL)
1005 return;
1006 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
1007 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1008 }
1009
databuf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)1010 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1011 struct gfs2_log_descriptor *ld,
1012 __be64 *ptr, int pass)
1013 {
1014 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1015 struct gfs2_glock *gl = ip->i_gl;
1016 unsigned int blks = be32_to_cpu(ld->ld_data1);
1017 struct buffer_head *bh_log, *bh_ip;
1018 u64 blkno;
1019 u64 esc;
1020 int error = 0;
1021
1022 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1023 return 0;
1024
1025 gfs2_replay_incr_blk(jd, &start);
1026 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1027 blkno = be64_to_cpu(*ptr++);
1028 esc = be64_to_cpu(*ptr++);
1029
1030 jd->jd_found_blocks++;
1031
1032 if (gfs2_revoke_check(jd, blkno, start))
1033 continue;
1034
1035 error = gfs2_replay_read_block(jd, start, &bh_log);
1036 if (error)
1037 return error;
1038
1039 bh_ip = gfs2_meta_new(gl, blkno);
1040 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1041
1042 /* Unescape */
1043 if (esc) {
1044 __be32 *eptr = (__be32 *)bh_ip->b_data;
1045 *eptr = cpu_to_be32(GFS2_MAGIC);
1046 }
1047 mark_buffer_dirty(bh_ip);
1048
1049 brelse(bh_log);
1050 brelse(bh_ip);
1051
1052 jd->jd_replayed_blocks++;
1053 }
1054
1055 return error;
1056 }
1057
1058 /* FIXME: sort out accounting for log blocks etc. */
1059
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)1060 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1061 {
1062 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1063 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1064
1065 if (error) {
1066 gfs2_inode_metasync(ip->i_gl);
1067 return;
1068 }
1069 if (pass != 1)
1070 return;
1071
1072 /* data sync? */
1073 gfs2_inode_metasync(ip->i_gl);
1074
1075 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1076 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1077 }
1078
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1079 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1080 {
1081 struct list_head *head;
1082 struct gfs2_bufdata *bd;
1083
1084 if (tr == NULL)
1085 return;
1086
1087 head = &tr->tr_databuf;
1088 while (!list_empty(head)) {
1089 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1090 list_del_init(&bd->bd_list);
1091 gfs2_unpin(sdp, bd->bd_bh, tr);
1092 }
1093 }
1094
1095
1096 static const struct gfs2_log_operations gfs2_buf_lops = {
1097 .lo_before_commit = buf_lo_before_commit,
1098 .lo_after_commit = buf_lo_after_commit,
1099 .lo_before_scan = buf_lo_before_scan,
1100 .lo_scan_elements = buf_lo_scan_elements,
1101 .lo_after_scan = buf_lo_after_scan,
1102 .lo_name = "buf",
1103 };
1104
1105 static const struct gfs2_log_operations gfs2_revoke_lops = {
1106 .lo_before_commit = revoke_lo_before_commit,
1107 .lo_after_commit = revoke_lo_after_commit,
1108 .lo_before_scan = revoke_lo_before_scan,
1109 .lo_scan_elements = revoke_lo_scan_elements,
1110 .lo_after_scan = revoke_lo_after_scan,
1111 .lo_name = "revoke",
1112 };
1113
1114 static const struct gfs2_log_operations gfs2_databuf_lops = {
1115 .lo_before_commit = databuf_lo_before_commit,
1116 .lo_after_commit = databuf_lo_after_commit,
1117 .lo_scan_elements = databuf_lo_scan_elements,
1118 .lo_after_scan = databuf_lo_after_scan,
1119 .lo_name = "databuf",
1120 };
1121
1122 const struct gfs2_log_operations *gfs2_log_ops[] = {
1123 &gfs2_databuf_lops,
1124 &gfs2_buf_lops,
1125 &gfs2_revoke_lops,
1126 NULL,
1127 };
1128
1129