xref: /openbmc/linux/fs/gfs2/lops.c (revision e8c92ed7)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "inode.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "recovery.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
33 /**
34  * gfs2_pin - Pin a buffer in memory
35  * @sdp: The superblock
36  * @bh: The buffer to be pinned
37  *
38  * The log lock must be held when calling this function
39  */
40 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41 {
42 	struct gfs2_bufdata *bd;
43 
44 	BUG_ON(!current->journal_info);
45 
46 	clear_buffer_dirty(bh);
47 	if (test_set_buffer_pinned(bh))
48 		gfs2_assert_withdraw(sdp, 0);
49 	if (!buffer_uptodate(bh))
50 		gfs2_io_error_bh(sdp, bh);
51 	bd = bh->b_private;
52 	/* If this buffer is in the AIL and it has already been written
53 	 * to in-place disk block, remove it from the AIL.
54 	 */
55 	spin_lock(&sdp->sd_ail_lock);
56 	if (bd->bd_ail)
57 		list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58 	spin_unlock(&sdp->sd_ail_lock);
59 	get_bh(bh);
60 	atomic_inc(&sdp->sd_log_pinned);
61 	trace_gfs2_pin(bd, 1);
62 }
63 
64 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
65 {
66 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
67 }
68 
69 static void maybe_release_space(struct gfs2_bufdata *bd)
70 {
71 	struct gfs2_glock *gl = bd->bd_gl;
72 	struct gfs2_sbd *sdp = gl->gl_sbd;
73 	struct gfs2_rgrpd *rgd = gl->gl_object;
74 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
76 
77 	if (bi->bi_clone == 0)
78 		return;
79 	if (sdp->sd_args.ar_discard)
80 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81 	memcpy(bi->bi_clone + bi->bi_offset,
82 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
83 	clear_bit(GBF_FULL, &bi->bi_flags);
84 	rgd->rd_free_clone = rgd->rd_free;
85 }
86 
87 /**
88  * gfs2_unpin - Unpin a buffer
89  * @sdp: the filesystem the buffer belongs to
90  * @bh: The buffer to unpin
91  * @ai:
92  * @flags: The inode dirty flags
93  *
94  */
95 
96 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
97 		       struct gfs2_ail *ai)
98 {
99 	struct gfs2_bufdata *bd = bh->b_private;
100 
101 	BUG_ON(!buffer_uptodate(bh));
102 	BUG_ON(!buffer_pinned(bh));
103 
104 	lock_buffer(bh);
105 	mark_buffer_dirty(bh);
106 	clear_buffer_pinned(bh);
107 
108 	if (buffer_is_rgrp(bd))
109 		maybe_release_space(bd);
110 
111 	spin_lock(&sdp->sd_ail_lock);
112 	if (bd->bd_ail) {
113 		list_del(&bd->bd_ail_st_list);
114 		brelse(bh);
115 	} else {
116 		struct gfs2_glock *gl = bd->bd_gl;
117 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118 		atomic_inc(&gl->gl_ail_count);
119 	}
120 	bd->bd_ail = ai;
121 	list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122 	spin_unlock(&sdp->sd_ail_lock);
123 
124 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125 	trace_gfs2_pin(bd, 0);
126 	unlock_buffer(bh);
127 	atomic_dec(&sdp->sd_log_pinned);
128 }
129 
130 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131 {
132 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
134 
135 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136 		sdp->sd_log_flush_head = 0;
137 		sdp->sd_log_flush_wrapped = 1;
138 	}
139 }
140 
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143 	unsigned int lbn = sdp->sd_log_flush_head;
144 	struct gfs2_journal_extent *je;
145 	u64 block;
146 
147 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148 		if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149 			block = je->dblock + lbn - je->lblock;
150 			gfs2_log_incr_head(sdp);
151 			return block;
152 		}
153 	}
154 
155 	return -1;
156 }
157 
158 /**
159  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160  * @sdp: The superblock
161  * @bvec: The bio_vec
162  * @error: The i/o status
163  *
164  * This finds the relavent buffers and unlocks then and sets the
165  * error flag according to the status of the i/o request. This is
166  * used when the log is writing data which has an in-place version
167  * that is pinned in the pagecache.
168  */
169 
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171 				  int error)
172 {
173 	struct buffer_head *bh, *next;
174 	struct page *page = bvec->bv_page;
175 	unsigned size;
176 
177 	bh = page_buffers(page);
178 	size = bvec->bv_len;
179 	while (bh_offset(bh) < bvec->bv_offset)
180 		bh = bh->b_this_page;
181 	do {
182 		if (error)
183 			set_buffer_write_io_error(bh);
184 		unlock_buffer(bh);
185 		next = bh->b_this_page;
186 		size -= bh->b_size;
187 		brelse(bh);
188 		bh = next;
189 	} while(bh && size);
190 }
191 
192 /**
193  * gfs2_end_log_write - end of i/o to the log
194  * @bio: The bio
195  * @error: Status of i/o request
196  *
197  * Each bio_vec contains either data from the pagecache or data
198  * relating to the log itself. Here we iterate over the bio_vec
199  * array, processing both kinds of data.
200  *
201  */
202 
203 static void gfs2_end_log_write(struct bio *bio, int error)
204 {
205 	struct gfs2_sbd *sdp = bio->bi_private;
206 	struct bio_vec *bvec;
207 	struct page *page;
208 	int i;
209 
210 	if (error) {
211 		sdp->sd_log_error = error;
212 		fs_err(sdp, "Error %d writing to log\n", error);
213 	}
214 
215 	bio_for_each_segment(bvec, bio, i) {
216 		page = bvec->bv_page;
217 		if (page_has_buffers(page))
218 			gfs2_end_log_write_bh(sdp, bvec, error);
219 		else
220 			mempool_free(page, gfs2_page_pool);
221 	}
222 
223 	bio_put(bio);
224 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 		wake_up(&sdp->sd_log_flush_wait);
226 }
227 
228 /**
229  * gfs2_log_flush_bio - Submit any pending log bio
230  * @sdp: The superblock
231  * @rw: The rw flags
232  *
233  * Submit any pending part-built or full bio to the block device. If
234  * there is no pending bio, then this is a no-op.
235  */
236 
237 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238 {
239 	if (sdp->sd_log_bio) {
240 		atomic_inc(&sdp->sd_log_in_flight);
241 		submit_bio(rw, sdp->sd_log_bio);
242 		sdp->sd_log_bio = NULL;
243 	}
244 }
245 
246 /**
247  * gfs2_log_alloc_bio - Allocate a new bio for log writing
248  * @sdp: The superblock
249  * @blkno: The next device block number we want to write to
250  *
251  * This should never be called when there is a cached bio in the
252  * super block. When it returns, there will be a cached bio in the
253  * super block which will have as many bio_vecs as the device is
254  * happy to handle.
255  *
256  * Returns: Newly allocated bio
257  */
258 
259 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260 {
261 	struct super_block *sb = sdp->sd_vfs;
262 	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263 	struct bio *bio;
264 
265 	BUG_ON(sdp->sd_log_bio);
266 
267 	while (1) {
268 		bio = bio_alloc(GFP_NOIO, nrvecs);
269 		if (likely(bio))
270 			break;
271 		nrvecs = max(nrvecs/2, 1U);
272 	}
273 
274 	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275 	bio->bi_bdev = sb->s_bdev;
276 	bio->bi_end_io = gfs2_end_log_write;
277 	bio->bi_private = sdp;
278 
279 	sdp->sd_log_bio = bio;
280 
281 	return bio;
282 }
283 
284 /**
285  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286  * @sdp: The superblock
287  * @blkno: The device block number we want to write to
288  *
289  * If there is a cached bio, then if the next block number is sequential
290  * with the previous one, return it, otherwise flush the bio to the
291  * device. If there is not a cached bio, or we just flushed it, then
292  * allocate a new one.
293  *
294  * Returns: The bio to use for log writes
295  */
296 
297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298 {
299 	struct bio *bio = sdp->sd_log_bio;
300 	u64 nblk;
301 
302 	if (bio) {
303 		nblk = bio->bi_sector + bio_sectors(bio);
304 		nblk >>= sdp->sd_fsb2bb_shift;
305 		if (blkno == nblk)
306 			return bio;
307 		gfs2_log_flush_bio(sdp, WRITE);
308 	}
309 
310 	return gfs2_log_alloc_bio(sdp, blkno);
311 }
312 
313 
314 /**
315  * gfs2_log_write - write to log
316  * @sdp: the filesystem
317  * @page: the page to write
318  * @size: the size of the data to write
319  * @offset: the offset within the page
320  *
321  * Try and add the page segment to the current bio. If that fails,
322  * submit the current bio to the device and create a new one, and
323  * then add the page segment to that.
324  */
325 
326 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327 			   unsigned size, unsigned offset)
328 {
329 	u64 blkno = gfs2_log_bmap(sdp);
330 	struct bio *bio;
331 	int ret;
332 
333 	bio = gfs2_log_get_bio(sdp, blkno);
334 	ret = bio_add_page(bio, page, size, offset);
335 	if (ret == 0) {
336 		gfs2_log_flush_bio(sdp, WRITE);
337 		bio = gfs2_log_alloc_bio(sdp, blkno);
338 		ret = bio_add_page(bio, page, size, offset);
339 		WARN_ON(ret == 0);
340 	}
341 }
342 
343 /**
344  * gfs2_log_write_bh - write a buffer's content to the log
345  * @sdp: The super block
346  * @bh: The buffer pointing to the in-place location
347  *
348  * This writes the content of the buffer to the next available location
349  * in the log. The buffer will be unlocked once the i/o to the log has
350  * completed.
351  */
352 
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354 {
355 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356 }
357 
358 /**
359  * gfs2_log_write_page - write one block stored in a page, into the log
360  * @sdp: The superblock
361  * @page: The struct page
362  *
363  * This writes the first block-sized part of the page into the log. Note
364  * that the page must have been allocated from the gfs2_page_pool mempool
365  * and that after this has been called, ownership has been transferred and
366  * the page may be freed at any time.
367  */
368 
369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370 {
371 	struct super_block *sb = sdp->sd_vfs;
372 	gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373 }
374 
375 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
376 {
377 	void *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
378 	struct gfs2_log_descriptor *ld = page_address(page);
379 	clear_page(ld);
380 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
381 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
382 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
383 	ld->ld_type = cpu_to_be32(ld_type);
384 	ld->ld_length = 0;
385 	ld->ld_data1 = 0;
386 	ld->ld_data2 = 0;
387 	return page;
388 }
389 
390 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
391 {
392 	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
393 	struct gfs2_meta_header *mh;
394 	struct gfs2_trans *tr;
395 
396 	lock_buffer(bd->bd_bh);
397 	gfs2_log_lock(sdp);
398 	if (!list_empty(&bd->bd_list_tr))
399 		goto out;
400 	tr = current->journal_info;
401 	tr->tr_touched = 1;
402 	tr->tr_num_buf++;
403 	list_add(&bd->bd_list_tr, &tr->tr_list_buf);
404 	if (!list_empty(&le->le_list))
405 		goto out;
406 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
407 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
408 	gfs2_meta_check(sdp, bd->bd_bh);
409 	gfs2_pin(sdp, bd->bd_bh);
410 	mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
411 	mh->__pad0 = cpu_to_be64(0);
412 	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
413 	sdp->sd_log_num_buf++;
414 	list_add(&le->le_list, &sdp->sd_log_le_buf);
415 	tr->tr_num_buf_new++;
416 out:
417 	gfs2_log_unlock(sdp);
418 	unlock_buffer(bd->bd_bh);
419 }
420 
421 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
422 {
423 	struct gfs2_log_descriptor *ld;
424 	struct gfs2_bufdata *bd1 = NULL, *bd2;
425 	struct page *page;
426 	unsigned int total;
427 	unsigned int limit;
428 	unsigned int num;
429 	unsigned n;
430 	__be64 *ptr;
431 
432 	limit = buf_limit(sdp);
433 	/* for 4k blocks, limit = 503 */
434 
435 	gfs2_log_lock(sdp);
436 	total = sdp->sd_log_num_buf;
437 	bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
438 	while(total) {
439 		num = total;
440 		if (total > limit)
441 			num = limit;
442 		gfs2_log_unlock(sdp);
443 		page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
444 		ld = page_address(page);
445 		gfs2_log_lock(sdp);
446 		ptr = (__be64 *)(ld + 1);
447 		ld->ld_length = cpu_to_be32(num + 1);
448 		ld->ld_data1 = cpu_to_be32(num);
449 
450 		n = 0;
451 		list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
452 					     bd_le.le_list) {
453 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
454 			if (++n >= num)
455 				break;
456 		}
457 
458 		gfs2_log_unlock(sdp);
459 		gfs2_log_write_page(sdp, page);
460 		gfs2_log_lock(sdp);
461 
462 		n = 0;
463 		list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
464 					     bd_le.le_list) {
465 			get_bh(bd2->bd_bh);
466 			gfs2_log_unlock(sdp);
467 			lock_buffer(bd2->bd_bh);
468 			gfs2_log_write_bh(sdp, bd2->bd_bh);
469 			gfs2_log_lock(sdp);
470 			if (++n >= num)
471 				break;
472 		}
473 
474 		BUG_ON(total < num);
475 		total -= num;
476 	}
477 	gfs2_log_unlock(sdp);
478 }
479 
480 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
481 {
482 	struct list_head *head = &sdp->sd_log_le_buf;
483 	struct gfs2_bufdata *bd;
484 
485 	while (!list_empty(head)) {
486 		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
487 		list_del_init(&bd->bd_le.le_list);
488 		sdp->sd_log_num_buf--;
489 
490 		gfs2_unpin(sdp, bd->bd_bh, ai);
491 	}
492 	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
493 }
494 
495 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
496 			       struct gfs2_log_header_host *head, int pass)
497 {
498 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
499 
500 	if (pass != 0)
501 		return;
502 
503 	sdp->sd_found_blocks = 0;
504 	sdp->sd_replayed_blocks = 0;
505 }
506 
507 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
508 				struct gfs2_log_descriptor *ld, __be64 *ptr,
509 				int pass)
510 {
511 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
512 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
513 	struct gfs2_glock *gl = ip->i_gl;
514 	unsigned int blks = be32_to_cpu(ld->ld_data1);
515 	struct buffer_head *bh_log, *bh_ip;
516 	u64 blkno;
517 	int error = 0;
518 
519 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
520 		return 0;
521 
522 	gfs2_replay_incr_blk(sdp, &start);
523 
524 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
525 		blkno = be64_to_cpu(*ptr++);
526 
527 		sdp->sd_found_blocks++;
528 
529 		if (gfs2_revoke_check(sdp, blkno, start))
530 			continue;
531 
532 		error = gfs2_replay_read_block(jd, start, &bh_log);
533 		if (error)
534 			return error;
535 
536 		bh_ip = gfs2_meta_new(gl, blkno);
537 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
538 
539 		if (gfs2_meta_check(sdp, bh_ip))
540 			error = -EIO;
541 		else
542 			mark_buffer_dirty(bh_ip);
543 
544 		brelse(bh_log);
545 		brelse(bh_ip);
546 
547 		if (error)
548 			break;
549 
550 		sdp->sd_replayed_blocks++;
551 	}
552 
553 	return error;
554 }
555 
556 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
557 {
558 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
559 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
560 
561 	if (error) {
562 		gfs2_meta_sync(ip->i_gl);
563 		return;
564 	}
565 	if (pass != 1)
566 		return;
567 
568 	gfs2_meta_sync(ip->i_gl);
569 
570 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
571 	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
572 }
573 
574 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
575 {
576 	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
577 	struct gfs2_glock *gl = bd->bd_gl;
578 	struct gfs2_trans *tr;
579 
580 	tr = current->journal_info;
581 	tr->tr_touched = 1;
582 	tr->tr_num_revoke++;
583 	sdp->sd_log_num_revoke++;
584 	atomic_inc(&gl->gl_revokes);
585 	set_bit(GLF_LFLUSH, &gl->gl_flags);
586 	list_add(&le->le_list, &sdp->sd_log_le_revoke);
587 }
588 
589 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
590 {
591 	struct gfs2_log_descriptor *ld;
592 	struct gfs2_meta_header *mh;
593 	unsigned int offset;
594 	struct list_head *head = &sdp->sd_log_le_revoke;
595 	struct gfs2_bufdata *bd;
596 	struct page *page;
597 
598 	if (!sdp->sd_log_num_revoke)
599 		return;
600 
601 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
602 	ld = page_address(page);
603 	ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
604 						    sizeof(u64)));
605 	ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
606 	offset = sizeof(struct gfs2_log_descriptor);
607 
608 	list_for_each_entry(bd, head, bd_le.le_list) {
609 		sdp->sd_log_num_revoke--;
610 
611 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
612 
613 			gfs2_log_write_page(sdp, page);
614 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
615 			mh = page_address(page);
616 			clear_page(mh);
617 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
618 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
619 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
620 			offset = sizeof(struct gfs2_meta_header);
621 		}
622 
623 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
624 		offset += sizeof(u64);
625 	}
626 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
627 
628 	gfs2_log_write_page(sdp, page);
629 }
630 
631 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
632 {
633 	struct list_head *head = &sdp->sd_log_le_revoke;
634 	struct gfs2_bufdata *bd;
635 	struct gfs2_glock *gl;
636 
637 	while (!list_empty(head)) {
638 		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
639 		list_del_init(&bd->bd_le.le_list);
640 		gl = bd->bd_gl;
641 		atomic_dec(&gl->gl_revokes);
642 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
643 		kmem_cache_free(gfs2_bufdata_cachep, bd);
644 	}
645 }
646 
647 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
648 				  struct gfs2_log_header_host *head, int pass)
649 {
650 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
651 
652 	if (pass != 0)
653 		return;
654 
655 	sdp->sd_found_revokes = 0;
656 	sdp->sd_replay_tail = head->lh_tail;
657 }
658 
659 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
660 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
661 				   int pass)
662 {
663 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
664 	unsigned int blks = be32_to_cpu(ld->ld_length);
665 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
666 	struct buffer_head *bh;
667 	unsigned int offset;
668 	u64 blkno;
669 	int first = 1;
670 	int error;
671 
672 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
673 		return 0;
674 
675 	offset = sizeof(struct gfs2_log_descriptor);
676 
677 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
678 		error = gfs2_replay_read_block(jd, start, &bh);
679 		if (error)
680 			return error;
681 
682 		if (!first)
683 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
684 
685 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
686 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
687 
688 			error = gfs2_revoke_add(sdp, blkno, start);
689 			if (error < 0) {
690 				brelse(bh);
691 				return error;
692 			}
693 			else if (error)
694 				sdp->sd_found_revokes++;
695 
696 			if (!--revokes)
697 				break;
698 			offset += sizeof(u64);
699 		}
700 
701 		brelse(bh);
702 		offset = sizeof(struct gfs2_meta_header);
703 		first = 0;
704 	}
705 
706 	return 0;
707 }
708 
709 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
710 {
711 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
712 
713 	if (error) {
714 		gfs2_revoke_clean(sdp);
715 		return;
716 	}
717 	if (pass != 1)
718 		return;
719 
720 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
721 	        jd->jd_jid, sdp->sd_found_revokes);
722 
723 	gfs2_revoke_clean(sdp);
724 }
725 
726 /**
727  * databuf_lo_add - Add a databuf to the transaction.
728  *
729  * This is used in two distinct cases:
730  * i) In ordered write mode
731  *    We put the data buffer on a list so that we can ensure that its
732  *    synced to disk at the right time
733  * ii) In journaled data mode
734  *    We need to journal the data block in the same way as metadata in
735  *    the functions above. The difference is that here we have a tag
736  *    which is two __be64's being the block number (as per meta data)
737  *    and a flag which says whether the data block needs escaping or
738  *    not. This means we need a new log entry for each 251 or so data
739  *    blocks, which isn't an enormous overhead but twice as much as
740  *    for normal metadata blocks.
741  */
742 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
743 {
744 	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
745 	struct gfs2_trans *tr = current->journal_info;
746 	struct address_space *mapping = bd->bd_bh->b_page->mapping;
747 	struct gfs2_inode *ip = GFS2_I(mapping->host);
748 
749 	lock_buffer(bd->bd_bh);
750 	gfs2_log_lock(sdp);
751 	if (tr) {
752 		if (!list_empty(&bd->bd_list_tr))
753 			goto out;
754 		tr->tr_touched = 1;
755 		if (gfs2_is_jdata(ip)) {
756 			tr->tr_num_buf++;
757 			list_add(&bd->bd_list_tr, &tr->tr_list_buf);
758 		}
759 	}
760 	if (!list_empty(&le->le_list))
761 		goto out;
762 
763 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
764 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
765 	if (gfs2_is_jdata(ip)) {
766 		gfs2_pin(sdp, bd->bd_bh);
767 		tr->tr_num_databuf_new++;
768 		sdp->sd_log_num_databuf++;
769 		list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
770 	} else {
771 		list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
772 	}
773 out:
774 	gfs2_log_unlock(sdp);
775 	unlock_buffer(bd->bd_bh);
776 }
777 
778 static void gfs2_check_magic(struct buffer_head *bh)
779 {
780 	void *kaddr;
781 	__be32 *ptr;
782 
783 	clear_buffer_escaped(bh);
784 	kaddr = kmap_atomic(bh->b_page);
785 	ptr = kaddr + bh_offset(bh);
786 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
787 		set_buffer_escaped(bh);
788 	kunmap_atomic(kaddr);
789 }
790 
791 static void gfs2_write_blocks(struct gfs2_sbd *sdp,
792 			      struct gfs2_log_descriptor *ld,
793 			      struct page *page,
794 			      struct list_head *list, struct list_head *done,
795 			      unsigned int n)
796 {
797 	struct gfs2_bufdata *bd;
798 	__be64 *ptr;
799 
800 	if (!ld)
801 		return;
802 
803 	ld->ld_length = cpu_to_be32(n + 1);
804 	ld->ld_data1 = cpu_to_be32(n);
805 	ptr = (__force __be64 *)(ld + 1);
806 
807 	gfs2_log_write_page(sdp, page);
808 	gfs2_log_lock(sdp);
809 	while (!list_empty(list)) {
810 		bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
811 		list_move_tail(&bd->bd_le.le_list, done);
812 		get_bh(bd->bd_bh);
813 		gfs2_log_unlock(sdp);
814 		lock_buffer(bd->bd_bh);
815 		if (buffer_escaped(bd->bd_bh)) {
816 			void *kaddr;
817 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
818 			ptr = page_address(page);
819 			kaddr = kmap_atomic(bd->bd_bh->b_page);
820 			memcpy(ptr, kaddr + bh_offset(bd->bd_bh),
821 			       bd->bd_bh->b_size);
822 			kunmap_atomic(kaddr);
823 			*(__be32 *)ptr = 0;
824 			clear_buffer_escaped(bd->bd_bh);
825 			unlock_buffer(bd->bd_bh);
826 			brelse(bd->bd_bh);
827 			gfs2_log_write_page(sdp, page);
828 		} else {
829 			gfs2_log_write_bh(sdp, bd->bd_bh);
830 		}
831 		n--;
832 		gfs2_log_lock(sdp);
833 	}
834 	gfs2_log_unlock(sdp);
835 	BUG_ON(n != 0);
836 }
837 
838 /**
839  * databuf_lo_before_commit - Scan the data buffers, writing as we go
840  *
841  */
842 
843 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
844 {
845 	struct gfs2_bufdata *bd = NULL;
846 	struct gfs2_log_descriptor *ld = NULL;
847 	struct page *page = NULL;
848 	unsigned int n = 0;
849 	__be64 *ptr = NULL, *end = NULL;
850 	LIST_HEAD(processed);
851 	LIST_HEAD(in_progress);
852 
853 	gfs2_log_lock(sdp);
854 	while (!list_empty(&sdp->sd_log_le_databuf)) {
855 		if (ptr == end) {
856 			gfs2_log_unlock(sdp);
857 			gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n);
858 			n = 0;
859 			page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
860 			ld = page_address(page);
861 			ptr = (__force __be64 *)(ld + 1);
862 			end = (__force __be64 *)(page_address(page) + sdp->sd_vfs->s_blocksize);
863 			end--;
864 			gfs2_log_lock(sdp);
865 			continue;
866 		}
867 		bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
868 		list_move_tail(&bd->bd_le.le_list, &in_progress);
869 		gfs2_check_magic(bd->bd_bh);
870 		*ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
871 		*ptr++ = cpu_to_be64(buffer_escaped(bd->bd_bh) ? 1 : 0);
872 		n++;
873 	}
874 	gfs2_log_unlock(sdp);
875 	gfs2_write_blocks(sdp, ld, page, &in_progress, &processed, n);
876 	gfs2_log_lock(sdp);
877 	list_splice(&processed, &sdp->sd_log_le_databuf);
878 	gfs2_log_unlock(sdp);
879 }
880 
881 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
882 				    struct gfs2_log_descriptor *ld,
883 				    __be64 *ptr, int pass)
884 {
885 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
886 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
887 	struct gfs2_glock *gl = ip->i_gl;
888 	unsigned int blks = be32_to_cpu(ld->ld_data1);
889 	struct buffer_head *bh_log, *bh_ip;
890 	u64 blkno;
891 	u64 esc;
892 	int error = 0;
893 
894 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
895 		return 0;
896 
897 	gfs2_replay_incr_blk(sdp, &start);
898 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
899 		blkno = be64_to_cpu(*ptr++);
900 		esc = be64_to_cpu(*ptr++);
901 
902 		sdp->sd_found_blocks++;
903 
904 		if (gfs2_revoke_check(sdp, blkno, start))
905 			continue;
906 
907 		error = gfs2_replay_read_block(jd, start, &bh_log);
908 		if (error)
909 			return error;
910 
911 		bh_ip = gfs2_meta_new(gl, blkno);
912 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
913 
914 		/* Unescape */
915 		if (esc) {
916 			__be32 *eptr = (__be32 *)bh_ip->b_data;
917 			*eptr = cpu_to_be32(GFS2_MAGIC);
918 		}
919 		mark_buffer_dirty(bh_ip);
920 
921 		brelse(bh_log);
922 		brelse(bh_ip);
923 
924 		sdp->sd_replayed_blocks++;
925 	}
926 
927 	return error;
928 }
929 
930 /* FIXME: sort out accounting for log blocks etc. */
931 
932 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
933 {
934 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
935 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
936 
937 	if (error) {
938 		gfs2_meta_sync(ip->i_gl);
939 		return;
940 	}
941 	if (pass != 1)
942 		return;
943 
944 	/* data sync? */
945 	gfs2_meta_sync(ip->i_gl);
946 
947 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
948 		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
949 }
950 
951 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
952 {
953 	struct list_head *head = &sdp->sd_log_le_databuf;
954 	struct gfs2_bufdata *bd;
955 
956 	while (!list_empty(head)) {
957 		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
958 		list_del_init(&bd->bd_le.le_list);
959 		sdp->sd_log_num_databuf--;
960 		gfs2_unpin(sdp, bd->bd_bh, ai);
961 	}
962 	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
963 }
964 
965 
966 const struct gfs2_log_operations gfs2_buf_lops = {
967 	.lo_add = buf_lo_add,
968 	.lo_before_commit = buf_lo_before_commit,
969 	.lo_after_commit = buf_lo_after_commit,
970 	.lo_before_scan = buf_lo_before_scan,
971 	.lo_scan_elements = buf_lo_scan_elements,
972 	.lo_after_scan = buf_lo_after_scan,
973 	.lo_name = "buf",
974 };
975 
976 const struct gfs2_log_operations gfs2_revoke_lops = {
977 	.lo_add = revoke_lo_add,
978 	.lo_before_commit = revoke_lo_before_commit,
979 	.lo_after_commit = revoke_lo_after_commit,
980 	.lo_before_scan = revoke_lo_before_scan,
981 	.lo_scan_elements = revoke_lo_scan_elements,
982 	.lo_after_scan = revoke_lo_after_scan,
983 	.lo_name = "revoke",
984 };
985 
986 const struct gfs2_log_operations gfs2_rg_lops = {
987 	.lo_name = "rg",
988 };
989 
990 const struct gfs2_log_operations gfs2_databuf_lops = {
991 	.lo_add = databuf_lo_add,
992 	.lo_before_commit = databuf_lo_before_commit,
993 	.lo_after_commit = databuf_lo_after_commit,
994 	.lo_scan_elements = databuf_lo_scan_elements,
995 	.lo_after_scan = databuf_lo_after_scan,
996 	.lo_name = "databuf",
997 };
998 
999 const struct gfs2_log_operations *gfs2_log_ops[] = {
1000 	&gfs2_databuf_lops,
1001 	&gfs2_buf_lops,
1002 	&gfs2_rg_lops,
1003 	&gfs2_revoke_lops,
1004 	NULL,
1005 };
1006 
1007