xref: /openbmc/linux/fs/gfs2/lops.c (revision b9ccfda2)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "inode.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "recovery.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
33 /**
34  * gfs2_pin - Pin a buffer in memory
35  * @sdp: The superblock
36  * @bh: The buffer to be pinned
37  *
38  * The log lock must be held when calling this function
39  */
40 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41 {
42 	struct gfs2_bufdata *bd;
43 
44 	BUG_ON(!current->journal_info);
45 
46 	clear_buffer_dirty(bh);
47 	if (test_set_buffer_pinned(bh))
48 		gfs2_assert_withdraw(sdp, 0);
49 	if (!buffer_uptodate(bh))
50 		gfs2_io_error_bh(sdp, bh);
51 	bd = bh->b_private;
52 	/* If this buffer is in the AIL and it has already been written
53 	 * to in-place disk block, remove it from the AIL.
54 	 */
55 	spin_lock(&sdp->sd_ail_lock);
56 	if (bd->bd_ail)
57 		list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58 	spin_unlock(&sdp->sd_ail_lock);
59 	get_bh(bh);
60 	atomic_inc(&sdp->sd_log_pinned);
61 	trace_gfs2_pin(bd, 1);
62 }
63 
64 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
65 {
66 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
67 }
68 
69 static void maybe_release_space(struct gfs2_bufdata *bd)
70 {
71 	struct gfs2_glock *gl = bd->bd_gl;
72 	struct gfs2_sbd *sdp = gl->gl_sbd;
73 	struct gfs2_rgrpd *rgd = gl->gl_object;
74 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
76 
77 	if (bi->bi_clone == 0)
78 		return;
79 	if (sdp->sd_args.ar_discard)
80 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81 	memcpy(bi->bi_clone + bi->bi_offset,
82 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
83 	clear_bit(GBF_FULL, &bi->bi_flags);
84 	rgd->rd_free_clone = rgd->rd_free;
85 }
86 
87 /**
88  * gfs2_unpin - Unpin a buffer
89  * @sdp: the filesystem the buffer belongs to
90  * @bh: The buffer to unpin
91  * @ai:
92  * @flags: The inode dirty flags
93  *
94  */
95 
96 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
97 		       struct gfs2_ail *ai)
98 {
99 	struct gfs2_bufdata *bd = bh->b_private;
100 
101 	BUG_ON(!buffer_uptodate(bh));
102 	BUG_ON(!buffer_pinned(bh));
103 
104 	lock_buffer(bh);
105 	mark_buffer_dirty(bh);
106 	clear_buffer_pinned(bh);
107 
108 	if (buffer_is_rgrp(bd))
109 		maybe_release_space(bd);
110 
111 	spin_lock(&sdp->sd_ail_lock);
112 	if (bd->bd_ail) {
113 		list_del(&bd->bd_ail_st_list);
114 		brelse(bh);
115 	} else {
116 		struct gfs2_glock *gl = bd->bd_gl;
117 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118 		atomic_inc(&gl->gl_ail_count);
119 	}
120 	bd->bd_ail = ai;
121 	list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122 	spin_unlock(&sdp->sd_ail_lock);
123 
124 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125 	trace_gfs2_pin(bd, 0);
126 	unlock_buffer(bh);
127 	atomic_dec(&sdp->sd_log_pinned);
128 }
129 
130 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131 {
132 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
134 
135 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136 		sdp->sd_log_flush_head = 0;
137 		sdp->sd_log_flush_wrapped = 1;
138 	}
139 }
140 
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143 	unsigned int lbn = sdp->sd_log_flush_head;
144 	struct gfs2_journal_extent *je;
145 	u64 block;
146 
147 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148 		if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149 			block = je->dblock + lbn - je->lblock;
150 			gfs2_log_incr_head(sdp);
151 			return block;
152 		}
153 	}
154 
155 	return -1;
156 }
157 
158 /**
159  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160  * @sdp: The superblock
161  * @bvec: The bio_vec
162  * @error: The i/o status
163  *
164  * This finds the relavent buffers and unlocks then and sets the
165  * error flag according to the status of the i/o request. This is
166  * used when the log is writing data which has an in-place version
167  * that is pinned in the pagecache.
168  */
169 
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171 				  int error)
172 {
173 	struct buffer_head *bh, *next;
174 	struct page *page = bvec->bv_page;
175 	unsigned size;
176 
177 	bh = page_buffers(page);
178 	size = bvec->bv_len;
179 	while (bh_offset(bh) < bvec->bv_offset)
180 		bh = bh->b_this_page;
181 	do {
182 		if (error)
183 			set_buffer_write_io_error(bh);
184 		unlock_buffer(bh);
185 		next = bh->b_this_page;
186 		size -= bh->b_size;
187 		brelse(bh);
188 		bh = next;
189 	} while(bh && size);
190 }
191 
192 /**
193  * gfs2_end_log_write - end of i/o to the log
194  * @bio: The bio
195  * @error: Status of i/o request
196  *
197  * Each bio_vec contains either data from the pagecache or data
198  * relating to the log itself. Here we iterate over the bio_vec
199  * array, processing both kinds of data.
200  *
201  */
202 
203 static void gfs2_end_log_write(struct bio *bio, int error)
204 {
205 	struct gfs2_sbd *sdp = bio->bi_private;
206 	struct bio_vec *bvec;
207 	struct page *page;
208 	int i;
209 
210 	if (error) {
211 		sdp->sd_log_error = error;
212 		fs_err(sdp, "Error %d writing to log\n", error);
213 	}
214 
215 	bio_for_each_segment(bvec, bio, i) {
216 		page = bvec->bv_page;
217 		if (page_has_buffers(page))
218 			gfs2_end_log_write_bh(sdp, bvec, error);
219 		else
220 			mempool_free(page, gfs2_page_pool);
221 	}
222 
223 	bio_put(bio);
224 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 		wake_up(&sdp->sd_log_flush_wait);
226 }
227 
228 /**
229  * gfs2_log_flush_bio - Submit any pending log bio
230  * @sdp: The superblock
231  * @rw: The rw flags
232  *
233  * Submit any pending part-built or full bio to the block device. If
234  * there is no pending bio, then this is a no-op.
235  */
236 
237 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238 {
239 	if (sdp->sd_log_bio) {
240 		atomic_inc(&sdp->sd_log_in_flight);
241 		submit_bio(rw, sdp->sd_log_bio);
242 		sdp->sd_log_bio = NULL;
243 	}
244 }
245 
246 /**
247  * gfs2_log_alloc_bio - Allocate a new bio for log writing
248  * @sdp: The superblock
249  * @blkno: The next device block number we want to write to
250  *
251  * This should never be called when there is a cached bio in the
252  * super block. When it returns, there will be a cached bio in the
253  * super block which will have as many bio_vecs as the device is
254  * happy to handle.
255  *
256  * Returns: Newly allocated bio
257  */
258 
259 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260 {
261 	struct super_block *sb = sdp->sd_vfs;
262 	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263 	struct bio *bio;
264 
265 	BUG_ON(sdp->sd_log_bio);
266 
267 	while (1) {
268 		bio = bio_alloc(GFP_NOIO, nrvecs);
269 		if (likely(bio))
270 			break;
271 		nrvecs = max(nrvecs/2, 1U);
272 	}
273 
274 	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275 	bio->bi_bdev = sb->s_bdev;
276 	bio->bi_end_io = gfs2_end_log_write;
277 	bio->bi_private = sdp;
278 
279 	sdp->sd_log_bio = bio;
280 
281 	return bio;
282 }
283 
284 /**
285  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286  * @sdp: The superblock
287  * @blkno: The device block number we want to write to
288  *
289  * If there is a cached bio, then if the next block number is sequential
290  * with the previous one, return it, otherwise flush the bio to the
291  * device. If there is not a cached bio, or we just flushed it, then
292  * allocate a new one.
293  *
294  * Returns: The bio to use for log writes
295  */
296 
297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298 {
299 	struct bio *bio = sdp->sd_log_bio;
300 	u64 nblk;
301 
302 	if (bio) {
303 		nblk = bio->bi_sector + bio_sectors(bio);
304 		nblk >>= sdp->sd_fsb2bb_shift;
305 		if (blkno == nblk)
306 			return bio;
307 		gfs2_log_flush_bio(sdp, WRITE);
308 	}
309 
310 	return gfs2_log_alloc_bio(sdp, blkno);
311 }
312 
313 
314 /**
315  * gfs2_log_write - write to log
316  * @sdp: the filesystem
317  * @page: the page to write
318  * @size: the size of the data to write
319  * @offset: the offset within the page
320  *
321  * Try and add the page segment to the current bio. If that fails,
322  * submit the current bio to the device and create a new one, and
323  * then add the page segment to that.
324  */
325 
326 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327 			   unsigned size, unsigned offset)
328 {
329 	u64 blkno = gfs2_log_bmap(sdp);
330 	struct bio *bio;
331 	int ret;
332 
333 	bio = gfs2_log_get_bio(sdp, blkno);
334 	ret = bio_add_page(bio, page, size, offset);
335 	if (ret == 0) {
336 		gfs2_log_flush_bio(sdp, WRITE);
337 		bio = gfs2_log_alloc_bio(sdp, blkno);
338 		ret = bio_add_page(bio, page, size, offset);
339 		WARN_ON(ret == 0);
340 	}
341 }
342 
343 /**
344  * gfs2_log_write_bh - write a buffer's content to the log
345  * @sdp: The super block
346  * @bh: The buffer pointing to the in-place location
347  *
348  * This writes the content of the buffer to the next available location
349  * in the log. The buffer will be unlocked once the i/o to the log has
350  * completed.
351  */
352 
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354 {
355 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356 }
357 
358 /**
359  * gfs2_log_write_page - write one block stored in a page, into the log
360  * @sdp: The superblock
361  * @page: The struct page
362  *
363  * This writes the first block-sized part of the page into the log. Note
364  * that the page must have been allocated from the gfs2_page_pool mempool
365  * and that after this has been called, ownership has been transferred and
366  * the page may be freed at any time.
367  */
368 
369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370 {
371 	struct super_block *sb = sdp->sd_vfs;
372 	gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373 }
374 
375 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376 				      u32 ld_length, u32 ld_data1)
377 {
378 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379 	struct gfs2_log_descriptor *ld = page_address(page);
380 	clear_page(ld);
381 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384 	ld->ld_type = cpu_to_be32(ld_type);
385 	ld->ld_length = cpu_to_be32(ld_length);
386 	ld->ld_data1 = cpu_to_be32(ld_data1);
387 	ld->ld_data2 = 0;
388 	return page;
389 }
390 
391 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
392 {
393 	struct gfs2_meta_header *mh;
394 	struct gfs2_trans *tr;
395 
396 	lock_buffer(bd->bd_bh);
397 	gfs2_log_lock(sdp);
398 	tr = current->journal_info;
399 	tr->tr_touched = 1;
400 	if (!list_empty(&bd->bd_list))
401 		goto out;
402 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
403 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
404 	mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
405 	if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
406 		printk(KERN_ERR
407 		       "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
408 		       (unsigned long long)bd->bd_bh->b_blocknr);
409 		BUG();
410 	}
411 	gfs2_pin(sdp, bd->bd_bh);
412 	mh->__pad0 = cpu_to_be64(0);
413 	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
414 	sdp->sd_log_num_buf++;
415 	list_add(&bd->bd_list, &sdp->sd_log_le_buf);
416 	tr->tr_num_buf_new++;
417 out:
418 	gfs2_log_unlock(sdp);
419 	unlock_buffer(bd->bd_bh);
420 }
421 
422 static void gfs2_check_magic(struct buffer_head *bh)
423 {
424 	void *kaddr;
425 	__be32 *ptr;
426 
427 	clear_buffer_escaped(bh);
428 	kaddr = kmap_atomic(bh->b_page);
429 	ptr = kaddr + bh_offset(bh);
430 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
431 		set_buffer_escaped(bh);
432 	kunmap_atomic(kaddr);
433 }
434 
435 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
436 				unsigned int total, struct list_head *blist,
437 				bool is_databuf)
438 {
439 	struct gfs2_log_descriptor *ld;
440 	struct gfs2_bufdata *bd1 = NULL, *bd2;
441 	struct page *page;
442 	unsigned int num;
443 	unsigned n;
444 	__be64 *ptr;
445 
446 	gfs2_log_lock(sdp);
447 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
448 	while(total) {
449 		num = total;
450 		if (total > limit)
451 			num = limit;
452 		gfs2_log_unlock(sdp);
453 		page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
454 		ld = page_address(page);
455 		gfs2_log_lock(sdp);
456 		ptr = (__be64 *)(ld + 1);
457 
458 		n = 0;
459 		list_for_each_entry_continue(bd1, blist, bd_list) {
460 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
461 			if (is_databuf) {
462 				gfs2_check_magic(bd1->bd_bh);
463 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
464 			}
465 			if (++n >= num)
466 				break;
467 		}
468 
469 		gfs2_log_unlock(sdp);
470 		gfs2_log_write_page(sdp, page);
471 		gfs2_log_lock(sdp);
472 
473 		n = 0;
474 		list_for_each_entry_continue(bd2, blist, bd_list) {
475 			get_bh(bd2->bd_bh);
476 			gfs2_log_unlock(sdp);
477 			lock_buffer(bd2->bd_bh);
478 
479 			if (buffer_escaped(bd2->bd_bh)) {
480 				void *kaddr;
481 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
482 				ptr = page_address(page);
483 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
484 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
485 				       bd2->bd_bh->b_size);
486 				kunmap_atomic(kaddr);
487 				*(__be32 *)ptr = 0;
488 				clear_buffer_escaped(bd2->bd_bh);
489 				unlock_buffer(bd2->bd_bh);
490 				brelse(bd2->bd_bh);
491 				gfs2_log_write_page(sdp, page);
492 			} else {
493 				gfs2_log_write_bh(sdp, bd2->bd_bh);
494 			}
495 			gfs2_log_lock(sdp);
496 			if (++n >= num)
497 				break;
498 		}
499 
500 		BUG_ON(total < num);
501 		total -= num;
502 	}
503 	gfs2_log_unlock(sdp);
504 }
505 
506 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
507 {
508 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
509 
510 	gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
511 			   &sdp->sd_log_le_buf, 0);
512 }
513 
514 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
515 {
516 	struct list_head *head = &sdp->sd_log_le_buf;
517 	struct gfs2_bufdata *bd;
518 
519 	while (!list_empty(head)) {
520 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
521 		list_del_init(&bd->bd_list);
522 		sdp->sd_log_num_buf--;
523 
524 		gfs2_unpin(sdp, bd->bd_bh, ai);
525 	}
526 	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
527 }
528 
529 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
530 			       struct gfs2_log_header_host *head, int pass)
531 {
532 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
533 
534 	if (pass != 0)
535 		return;
536 
537 	sdp->sd_found_blocks = 0;
538 	sdp->sd_replayed_blocks = 0;
539 }
540 
541 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
542 				struct gfs2_log_descriptor *ld, __be64 *ptr,
543 				int pass)
544 {
545 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
546 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
547 	struct gfs2_glock *gl = ip->i_gl;
548 	unsigned int blks = be32_to_cpu(ld->ld_data1);
549 	struct buffer_head *bh_log, *bh_ip;
550 	u64 blkno;
551 	int error = 0;
552 
553 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
554 		return 0;
555 
556 	gfs2_replay_incr_blk(sdp, &start);
557 
558 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
559 		blkno = be64_to_cpu(*ptr++);
560 
561 		sdp->sd_found_blocks++;
562 
563 		if (gfs2_revoke_check(sdp, blkno, start))
564 			continue;
565 
566 		error = gfs2_replay_read_block(jd, start, &bh_log);
567 		if (error)
568 			return error;
569 
570 		bh_ip = gfs2_meta_new(gl, blkno);
571 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
572 
573 		if (gfs2_meta_check(sdp, bh_ip))
574 			error = -EIO;
575 		else
576 			mark_buffer_dirty(bh_ip);
577 
578 		brelse(bh_log);
579 		brelse(bh_ip);
580 
581 		if (error)
582 			break;
583 
584 		sdp->sd_replayed_blocks++;
585 	}
586 
587 	return error;
588 }
589 
590 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
591 {
592 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
593 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
594 
595 	if (error) {
596 		gfs2_meta_sync(ip->i_gl);
597 		return;
598 	}
599 	if (pass != 1)
600 		return;
601 
602 	gfs2_meta_sync(ip->i_gl);
603 
604 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
605 	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
606 }
607 
608 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
609 {
610 	struct gfs2_glock *gl = bd->bd_gl;
611 	struct gfs2_trans *tr;
612 
613 	tr = current->journal_info;
614 	tr->tr_touched = 1;
615 	tr->tr_num_revoke++;
616 	sdp->sd_log_num_revoke++;
617 	atomic_inc(&gl->gl_revokes);
618 	set_bit(GLF_LFLUSH, &gl->gl_flags);
619 	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
620 }
621 
622 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
623 {
624 	struct gfs2_log_descriptor *ld;
625 	struct gfs2_meta_header *mh;
626 	unsigned int offset;
627 	struct list_head *head = &sdp->sd_log_le_revoke;
628 	struct gfs2_bufdata *bd;
629 	struct page *page;
630 	unsigned int length;
631 
632 	if (!sdp->sd_log_num_revoke)
633 		return;
634 
635 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
636 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
637 	ld = page_address(page);
638 	offset = sizeof(struct gfs2_log_descriptor);
639 
640 	list_for_each_entry(bd, head, bd_list) {
641 		sdp->sd_log_num_revoke--;
642 
643 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
644 
645 			gfs2_log_write_page(sdp, page);
646 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
647 			mh = page_address(page);
648 			clear_page(mh);
649 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
650 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
651 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
652 			offset = sizeof(struct gfs2_meta_header);
653 		}
654 
655 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
656 		offset += sizeof(u64);
657 	}
658 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
659 
660 	gfs2_log_write_page(sdp, page);
661 }
662 
663 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
664 {
665 	struct list_head *head = &sdp->sd_log_le_revoke;
666 	struct gfs2_bufdata *bd;
667 	struct gfs2_glock *gl;
668 
669 	while (!list_empty(head)) {
670 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
671 		list_del_init(&bd->bd_list);
672 		gl = bd->bd_gl;
673 		atomic_dec(&gl->gl_revokes);
674 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
675 		kmem_cache_free(gfs2_bufdata_cachep, bd);
676 	}
677 }
678 
679 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
680 				  struct gfs2_log_header_host *head, int pass)
681 {
682 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
683 
684 	if (pass != 0)
685 		return;
686 
687 	sdp->sd_found_revokes = 0;
688 	sdp->sd_replay_tail = head->lh_tail;
689 }
690 
691 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
692 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
693 				   int pass)
694 {
695 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
696 	unsigned int blks = be32_to_cpu(ld->ld_length);
697 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
698 	struct buffer_head *bh;
699 	unsigned int offset;
700 	u64 blkno;
701 	int first = 1;
702 	int error;
703 
704 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
705 		return 0;
706 
707 	offset = sizeof(struct gfs2_log_descriptor);
708 
709 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
710 		error = gfs2_replay_read_block(jd, start, &bh);
711 		if (error)
712 			return error;
713 
714 		if (!first)
715 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
716 
717 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
718 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
719 
720 			error = gfs2_revoke_add(sdp, blkno, start);
721 			if (error < 0) {
722 				brelse(bh);
723 				return error;
724 			}
725 			else if (error)
726 				sdp->sd_found_revokes++;
727 
728 			if (!--revokes)
729 				break;
730 			offset += sizeof(u64);
731 		}
732 
733 		brelse(bh);
734 		offset = sizeof(struct gfs2_meta_header);
735 		first = 0;
736 	}
737 
738 	return 0;
739 }
740 
741 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
742 {
743 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
744 
745 	if (error) {
746 		gfs2_revoke_clean(sdp);
747 		return;
748 	}
749 	if (pass != 1)
750 		return;
751 
752 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
753 	        jd->jd_jid, sdp->sd_found_revokes);
754 
755 	gfs2_revoke_clean(sdp);
756 }
757 
758 /**
759  * databuf_lo_add - Add a databuf to the transaction.
760  *
761  * This is used in two distinct cases:
762  * i) In ordered write mode
763  *    We put the data buffer on a list so that we can ensure that its
764  *    synced to disk at the right time
765  * ii) In journaled data mode
766  *    We need to journal the data block in the same way as metadata in
767  *    the functions above. The difference is that here we have a tag
768  *    which is two __be64's being the block number (as per meta data)
769  *    and a flag which says whether the data block needs escaping or
770  *    not. This means we need a new log entry for each 251 or so data
771  *    blocks, which isn't an enormous overhead but twice as much as
772  *    for normal metadata blocks.
773  */
774 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
775 {
776 	struct gfs2_trans *tr = current->journal_info;
777 	struct address_space *mapping = bd->bd_bh->b_page->mapping;
778 	struct gfs2_inode *ip = GFS2_I(mapping->host);
779 
780 	lock_buffer(bd->bd_bh);
781 	gfs2_log_lock(sdp);
782 	if (tr)
783 		tr->tr_touched = 1;
784 	if (!list_empty(&bd->bd_list))
785 		goto out;
786 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
787 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
788 	if (gfs2_is_jdata(ip)) {
789 		gfs2_pin(sdp, bd->bd_bh);
790 		tr->tr_num_databuf_new++;
791 		sdp->sd_log_num_databuf++;
792 		list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
793 	} else {
794 		list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
795 	}
796 out:
797 	gfs2_log_unlock(sdp);
798 	unlock_buffer(bd->bd_bh);
799 }
800 
801 /**
802  * databuf_lo_before_commit - Scan the data buffers, writing as we go
803  *
804  */
805 
806 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
807 {
808 	unsigned int limit = buf_limit(sdp) / 2;
809 
810 	gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
811 			   &sdp->sd_log_le_databuf, 1);
812 }
813 
814 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
815 				    struct gfs2_log_descriptor *ld,
816 				    __be64 *ptr, int pass)
817 {
818 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
819 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
820 	struct gfs2_glock *gl = ip->i_gl;
821 	unsigned int blks = be32_to_cpu(ld->ld_data1);
822 	struct buffer_head *bh_log, *bh_ip;
823 	u64 blkno;
824 	u64 esc;
825 	int error = 0;
826 
827 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
828 		return 0;
829 
830 	gfs2_replay_incr_blk(sdp, &start);
831 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
832 		blkno = be64_to_cpu(*ptr++);
833 		esc = be64_to_cpu(*ptr++);
834 
835 		sdp->sd_found_blocks++;
836 
837 		if (gfs2_revoke_check(sdp, blkno, start))
838 			continue;
839 
840 		error = gfs2_replay_read_block(jd, start, &bh_log);
841 		if (error)
842 			return error;
843 
844 		bh_ip = gfs2_meta_new(gl, blkno);
845 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
846 
847 		/* Unescape */
848 		if (esc) {
849 			__be32 *eptr = (__be32 *)bh_ip->b_data;
850 			*eptr = cpu_to_be32(GFS2_MAGIC);
851 		}
852 		mark_buffer_dirty(bh_ip);
853 
854 		brelse(bh_log);
855 		brelse(bh_ip);
856 
857 		sdp->sd_replayed_blocks++;
858 	}
859 
860 	return error;
861 }
862 
863 /* FIXME: sort out accounting for log blocks etc. */
864 
865 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
866 {
867 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
868 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
869 
870 	if (error) {
871 		gfs2_meta_sync(ip->i_gl);
872 		return;
873 	}
874 	if (pass != 1)
875 		return;
876 
877 	/* data sync? */
878 	gfs2_meta_sync(ip->i_gl);
879 
880 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
881 		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
882 }
883 
884 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
885 {
886 	struct list_head *head = &sdp->sd_log_le_databuf;
887 	struct gfs2_bufdata *bd;
888 
889 	while (!list_empty(head)) {
890 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
891 		list_del_init(&bd->bd_list);
892 		sdp->sd_log_num_databuf--;
893 		gfs2_unpin(sdp, bd->bd_bh, ai);
894 	}
895 	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
896 }
897 
898 
899 const struct gfs2_log_operations gfs2_buf_lops = {
900 	.lo_add = buf_lo_add,
901 	.lo_before_commit = buf_lo_before_commit,
902 	.lo_after_commit = buf_lo_after_commit,
903 	.lo_before_scan = buf_lo_before_scan,
904 	.lo_scan_elements = buf_lo_scan_elements,
905 	.lo_after_scan = buf_lo_after_scan,
906 	.lo_name = "buf",
907 };
908 
909 const struct gfs2_log_operations gfs2_revoke_lops = {
910 	.lo_add = revoke_lo_add,
911 	.lo_before_commit = revoke_lo_before_commit,
912 	.lo_after_commit = revoke_lo_after_commit,
913 	.lo_before_scan = revoke_lo_before_scan,
914 	.lo_scan_elements = revoke_lo_scan_elements,
915 	.lo_after_scan = revoke_lo_after_scan,
916 	.lo_name = "revoke",
917 };
918 
919 const struct gfs2_log_operations gfs2_rg_lops = {
920 	.lo_name = "rg",
921 };
922 
923 const struct gfs2_log_operations gfs2_databuf_lops = {
924 	.lo_add = databuf_lo_add,
925 	.lo_before_commit = databuf_lo_before_commit,
926 	.lo_after_commit = databuf_lo_after_commit,
927 	.lo_scan_elements = databuf_lo_scan_elements,
928 	.lo_after_scan = databuf_lo_after_scan,
929 	.lo_name = "databuf",
930 };
931 
932 const struct gfs2_log_operations *gfs2_log_ops[] = {
933 	&gfs2_databuf_lops,
934 	&gfs2_buf_lops,
935 	&gfs2_rg_lops,
936 	&gfs2_revoke_lops,
937 	NULL,
938 };
939 
940