xref: /openbmc/linux/fs/gfs2/lops.c (revision 3a8a9a10)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 
17 #include "gfs2.h"
18 #include "lm_interface.h"
19 #include "incore.h"
20 #include "glock.h"
21 #include "log.h"
22 #include "lops.h"
23 #include "meta_io.h"
24 #include "recovery.h"
25 #include "rgrp.h"
26 #include "trans.h"
27 #include "util.h"
28 
29 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
30 {
31 	struct gfs2_glock *gl;
32 	struct gfs2_trans *tr = current->journal_info;
33 
34 	tr->tr_touched = 1;
35 
36 	if (!list_empty(&le->le_list))
37 		return;
38 
39 	gl = container_of(le, struct gfs2_glock, gl_le);
40 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
41 		return;
42 	gfs2_glock_hold(gl);
43 	set_bit(GLF_DIRTY, &gl->gl_flags);
44 
45 	gfs2_log_lock(sdp);
46 	sdp->sd_log_num_gl++;
47 	list_add(&le->le_list, &sdp->sd_log_le_gl);
48 	gfs2_log_unlock(sdp);
49 }
50 
51 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
52 {
53 	struct list_head *head = &sdp->sd_log_le_gl;
54 	struct gfs2_glock *gl;
55 
56 	while (!list_empty(head)) {
57 		gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
58 		list_del_init(&gl->gl_le.le_list);
59 		sdp->sd_log_num_gl--;
60 
61 		gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
62 		gfs2_glock_put(gl);
63 	}
64 	gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
65 }
66 
67 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
68 {
69 	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 	struct gfs2_trans *tr;
71 
72 	if (!list_empty(&bd->bd_list_tr))
73 		return;
74 
75 	tr = current->journal_info;
76 	tr->tr_touched = 1;
77 	tr->tr_num_buf++;
78 	list_add(&bd->bd_list_tr, &tr->tr_list_buf);
79 
80 	if (!list_empty(&le->le_list))
81 		return;
82 
83 	gfs2_trans_add_gl(bd->bd_gl);
84 
85 	gfs2_meta_check(sdp, bd->bd_bh);
86 	gfs2_pin(sdp, bd->bd_bh);
87 
88 	gfs2_log_lock(sdp);
89 	sdp->sd_log_num_buf++;
90 	list_add(&le->le_list, &sdp->sd_log_le_buf);
91 	gfs2_log_unlock(sdp);
92 
93 	tr->tr_num_buf_new++;
94 }
95 
96 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
97 {
98 	struct list_head *head = &tr->tr_list_buf;
99 	struct gfs2_bufdata *bd;
100 
101 	while (!list_empty(head)) {
102 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 		list_del_init(&bd->bd_list_tr);
104 		tr->tr_num_buf--;
105 	}
106 	gfs2_assert_warn(sdp, !tr->tr_num_buf);
107 }
108 
109 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
110 {
111 	struct buffer_head *bh;
112 	struct gfs2_log_descriptor *ld;
113 	struct gfs2_bufdata *bd1 = NULL, *bd2;
114 	unsigned int total = sdp->sd_log_num_buf;
115 	unsigned int offset = sizeof(struct gfs2_log_descriptor);
116 	unsigned int limit;
117 	unsigned int num;
118 	unsigned n;
119 	__be64 *ptr;
120 
121 	offset += (sizeof(__be64) - 1);
122 	offset &= ~(sizeof(__be64) - 1);
123 	limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
124 	/* for 4k blocks, limit = 503 */
125 
126 	bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
127 	while(total) {
128 		num = total;
129 		if (total > limit)
130 			num = limit;
131 		bh = gfs2_log_get_buf(sdp);
132 		sdp->sd_log_num_hdrs++;
133 		ld = (struct gfs2_log_descriptor *)bh->b_data;
134 		ptr = (__be64 *)(bh->b_data + offset);
135 		ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
136 		ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
137 		ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
138 		ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
139 		ld->ld_length = cpu_to_be32(num + 1);
140 		ld->ld_data1 = cpu_to_be32(num);
141 		ld->ld_data2 = cpu_to_be32(0);
142 		memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
143 
144 		n = 0;
145 		list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
146 					     bd_le.le_list) {
147 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
148 			if (++n >= num)
149 				break;
150 		}
151 
152 		set_buffer_dirty(bh);
153 		ll_rw_block(WRITE, 1, &bh);
154 
155 		n = 0;
156 		list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
157 					     bd_le.le_list) {
158 			bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
159 			set_buffer_dirty(bh);
160 			ll_rw_block(WRITE, 1, &bh);
161 			if (++n >= num)
162 				break;
163 		}
164 
165 		total -= num;
166 	}
167 }
168 
169 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
170 {
171 	struct list_head *head = &sdp->sd_log_le_buf;
172 	struct gfs2_bufdata *bd;
173 
174 	while (!list_empty(head)) {
175 		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
176 		list_del_init(&bd->bd_le.le_list);
177 		sdp->sd_log_num_buf--;
178 
179 		gfs2_unpin(sdp, bd->bd_bh, ai);
180 	}
181 	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
182 }
183 
184 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 			       struct gfs2_log_header *head, int pass)
186 {
187 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
188 	struct gfs2_sbd *sdp = ip->i_sbd;
189 
190 	if (pass != 0)
191 		return;
192 
193 	sdp->sd_found_blocks = 0;
194 	sdp->sd_replayed_blocks = 0;
195 }
196 
197 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
198 				struct gfs2_log_descriptor *ld, __be64 *ptr,
199 				int pass)
200 {
201 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
202 	struct gfs2_sbd *sdp = ip->i_sbd;
203 	struct gfs2_glock *gl = ip->i_gl;
204 	unsigned int blks = be32_to_cpu(ld->ld_data1);
205 	struct buffer_head *bh_log, *bh_ip;
206 	uint64_t blkno;
207 	int error = 0;
208 
209 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
210 		return 0;
211 
212 	gfs2_replay_incr_blk(sdp, &start);
213 
214 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
215 		blkno = be64_to_cpu(*ptr++);
216 
217 		sdp->sd_found_blocks++;
218 
219 		if (gfs2_revoke_check(sdp, blkno, start))
220 			continue;
221 
222 		error = gfs2_replay_read_block(jd, start, &bh_log);
223                 if (error)
224                         return error;
225 
226 		bh_ip = gfs2_meta_new(gl, blkno);
227 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
228 
229 		if (gfs2_meta_check(sdp, bh_ip))
230 			error = -EIO;
231 		else
232 			mark_buffer_dirty(bh_ip);
233 
234 		brelse(bh_log);
235 		brelse(bh_ip);
236 
237 		if (error)
238 			break;
239 
240 		sdp->sd_replayed_blocks++;
241 	}
242 
243 	return error;
244 }
245 
246 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
247 {
248 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
249 	struct gfs2_sbd *sdp = ip->i_sbd;
250 
251 	if (error) {
252 		gfs2_meta_sync(ip->i_gl,
253 			       DIO_START | DIO_WAIT);
254 		return;
255 	}
256 	if (pass != 1)
257 		return;
258 
259 	gfs2_meta_sync(ip->i_gl, DIO_START | DIO_WAIT);
260 
261 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
262 	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
263 }
264 
265 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
266 {
267 	struct gfs2_trans *tr;
268 
269 	tr = current->journal_info;
270 	tr->tr_touched = 1;
271 	tr->tr_num_revoke++;
272 
273 	gfs2_log_lock(sdp);
274 	sdp->sd_log_num_revoke++;
275 	list_add(&le->le_list, &sdp->sd_log_le_revoke);
276 	gfs2_log_unlock(sdp);
277 }
278 
279 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
280 {
281 	struct gfs2_log_descriptor *ld;
282 	struct gfs2_meta_header *mh;
283 	struct buffer_head *bh;
284 	unsigned int offset;
285 	struct list_head *head = &sdp->sd_log_le_revoke;
286 	struct gfs2_revoke *rv;
287 
288 	if (!sdp->sd_log_num_revoke)
289 		return;
290 
291 	bh = gfs2_log_get_buf(sdp);
292 	ld = (struct gfs2_log_descriptor *)bh->b_data;
293 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
294 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
295 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
296 	ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
297 	ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
298 						    sizeof(uint64_t)));
299 	ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
300 	ld->ld_data2 = cpu_to_be32(0);
301 	memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
302 	offset = sizeof(struct gfs2_log_descriptor);
303 
304 	while (!list_empty(head)) {
305 		rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
306 		list_del_init(&rv->rv_le.le_list);
307 		sdp->sd_log_num_revoke--;
308 
309 		if (offset + sizeof(uint64_t) > sdp->sd_sb.sb_bsize) {
310 			set_buffer_dirty(bh);
311 			ll_rw_block(WRITE, 1, &bh);
312 
313 			bh = gfs2_log_get_buf(sdp);
314 			mh = (struct gfs2_meta_header *)bh->b_data;
315 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
316 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
317 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
318 			offset = sizeof(struct gfs2_meta_header);
319 		}
320 
321 		*(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
322 		kfree(rv);
323 
324 		offset += sizeof(uint64_t);
325 	}
326 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
327 
328 	set_buffer_dirty(bh);
329 	ll_rw_block(WRITE, 1, &bh);
330 }
331 
332 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
333 				  struct gfs2_log_header *head, int pass)
334 {
335 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
336 	struct gfs2_sbd *sdp = ip->i_sbd;
337 
338 	if (pass != 0)
339 		return;
340 
341 	sdp->sd_found_revokes = 0;
342 	sdp->sd_replay_tail = head->lh_tail;
343 }
344 
345 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
346 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
347 				   int pass)
348 {
349 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
350 	struct gfs2_sbd *sdp = ip->i_sbd;
351 	unsigned int blks = be32_to_cpu(ld->ld_length);
352 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
353 	struct buffer_head *bh;
354 	unsigned int offset;
355 	uint64_t blkno;
356 	int first = 1;
357 	int error;
358 
359 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
360 		return 0;
361 
362 	offset = sizeof(struct gfs2_log_descriptor);
363 
364 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
365 		error = gfs2_replay_read_block(jd, start, &bh);
366 		if (error)
367 			return error;
368 
369 		if (!first)
370 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
371 
372 		while (offset + sizeof(uint64_t) <= sdp->sd_sb.sb_bsize) {
373 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
374 
375 			error = gfs2_revoke_add(sdp, blkno, start);
376 			if (error < 0)
377 				return error;
378 			else if (error)
379 				sdp->sd_found_revokes++;
380 
381 			if (!--revokes)
382 				break;
383 			offset += sizeof(uint64_t);
384 		}
385 
386 		brelse(bh);
387 		offset = sizeof(struct gfs2_meta_header);
388 		first = 0;
389 	}
390 
391 	return 0;
392 }
393 
394 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
395 {
396 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
397 	struct gfs2_sbd *sdp = ip->i_sbd;
398 
399 	if (error) {
400 		gfs2_revoke_clean(sdp);
401 		return;
402 	}
403 	if (pass != 1)
404 		return;
405 
406 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
407 	        jd->jd_jid, sdp->sd_found_revokes);
408 
409 	gfs2_revoke_clean(sdp);
410 }
411 
412 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
413 {
414 	struct gfs2_rgrpd *rgd;
415 	struct gfs2_trans *tr = current->journal_info;
416 
417 	tr->tr_touched = 1;
418 
419 	if (!list_empty(&le->le_list))
420 		return;
421 
422 	rgd = container_of(le, struct gfs2_rgrpd, rd_le);
423 	gfs2_rgrp_bh_hold(rgd);
424 
425 	gfs2_log_lock(sdp);
426 	sdp->sd_log_num_rg++;
427 	list_add(&le->le_list, &sdp->sd_log_le_rg);
428 	gfs2_log_unlock(sdp);
429 }
430 
431 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
432 {
433 	struct list_head *head = &sdp->sd_log_le_rg;
434 	struct gfs2_rgrpd *rgd;
435 
436 	while (!list_empty(head)) {
437 		rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
438 		list_del_init(&rgd->rd_le.le_list);
439 		sdp->sd_log_num_rg--;
440 
441 		gfs2_rgrp_repolish_clones(rgd);
442 		gfs2_rgrp_bh_put(rgd);
443 	}
444 	gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
445 }
446 
447 /**
448  * databuf_lo_add - Add a databuf to the transaction.
449  *
450  * This is used in two distinct cases:
451  * i) In ordered write mode
452  *    We put the data buffer on a list so that we can ensure that its
453  *    synced to disk at the right time
454  * ii) In journaled data mode
455  *    We need to journal the data block in the same way as metadata in
456  *    the functions above. The difference is that here we have a tag
457  *    which is two __be64's being the block number (as per meta data)
458  *    and a flag which says whether the data block needs escaping or
459  *    not. This means we need a new log entry for each 251 or so data
460  *    blocks, which isn't an enormous overhead but twice as much as
461  *    for normal metadata blocks.
462  */
463 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
464 {
465 	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
466 	struct gfs2_trans *tr = current->journal_info;
467 	struct address_space *mapping = bd->bd_bh->b_page->mapping;
468 	struct gfs2_inode *ip = mapping->host->u.generic_ip;
469 
470 	tr->tr_touched = 1;
471 	if (!list_empty(&bd->bd_list_tr) &&
472 	    (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
473 		tr->tr_num_buf++;
474 		gfs2_trans_add_gl(bd->bd_gl);
475 		list_add(&bd->bd_list_tr, &tr->tr_list_buf);
476 		gfs2_pin(sdp, bd->bd_bh);
477 		tr->tr_num_buf_new++;
478 	}
479 	gfs2_log_lock(sdp);
480 	if (!list_empty(&le->le_list)) {
481 		if (ip->i_di.di_flags & GFS2_DIF_JDATA)
482 			sdp->sd_log_num_jdata++;
483 		sdp->sd_log_num_databuf++;
484 		list_add(&le->le_list, &sdp->sd_log_le_databuf);
485 	}
486 	gfs2_log_unlock(sdp);
487 }
488 
489 static int gfs2_check_magic(struct buffer_head *bh)
490 {
491 	struct page *page = bh->b_page;
492 	void *kaddr;
493 	__be32 *ptr;
494 	int rv = 0;
495 
496 	kaddr = kmap_atomic(page, KM_USER0);
497 	ptr = kaddr + bh_offset(bh);
498 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
499 		rv = 1;
500 	kunmap_atomic(page, KM_USER0);
501 
502 	return rv;
503 }
504 
505 /**
506  * databuf_lo_before_commit - Scan the data buffers, writing as we go
507  *
508  * Here we scan through the lists of buffers and make the assumption
509  * that any buffer thats been pinned is being journaled, and that
510  * any unpinned buffer is an ordered write data buffer and therefore
511  * will be written back rather than journaled.
512  */
513 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
514 {
515 	LIST_HEAD(started);
516 	struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
517 	struct buffer_head *bh = NULL;
518 	unsigned int offset = sizeof(struct gfs2_log_descriptor);
519 	struct gfs2_log_descriptor *ld;
520 	unsigned int limit;
521 	unsigned int total_dbuf = sdp->sd_log_num_databuf;
522 	unsigned int total_jdata = sdp->sd_log_num_jdata;
523 	unsigned int num, n;
524 	__be64 *ptr = NULL;
525 
526 	offset += (2*sizeof(__be64) - 1);
527 	offset &= ~(2*sizeof(__be64) - 1);
528 	limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
529 
530 	/*
531 	 * Start writing ordered buffers, write journaled buffers
532 	 * into the log along with a header
533 	 */
534 	gfs2_log_lock(sdp);
535 	bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
536 				       bd_le.le_list);
537 	while(total_dbuf) {
538 		num = total_jdata;
539 		if (num > limit)
540 			num = limit;
541 		n = 0;
542 		list_for_each_entry_safe_continue(bd1, bdt,
543 						  &sdp->sd_log_le_databuf,
544 						  bd_le.le_list) {
545 			/* An ordered write buffer */
546 			if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
547 				list_move(&bd1->bd_le.le_list, &started);
548 				if (bd1 == bd2) {
549 					bd2 = NULL;
550 					bd2 = list_prepare_entry(bd2,
551 							&sdp->sd_log_le_databuf,
552 							bd_le.le_list);
553 				}
554 				total_dbuf--;
555 				if (bd1->bd_bh) {
556 					get_bh(bd1->bd_bh);
557 					if (buffer_dirty(bd1->bd_bh)) {
558 						gfs2_log_unlock(sdp);
559 						wait_on_buffer(bd1->bd_bh);
560 						ll_rw_block(WRITE, 1,
561 							    &bd1->bd_bh);
562 						gfs2_log_lock(sdp);
563 					}
564 					brelse(bd1->bd_bh);
565 					continue;
566 				}
567 				continue;
568 			} else if (bd1->bd_bh) { /* A journaled buffer */
569 				int magic;
570 				gfs2_log_unlock(sdp);
571 				if (!bh) {
572 					bh = gfs2_log_get_buf(sdp);
573 					sdp->sd_log_num_hdrs++;
574 					ld = (struct gfs2_log_descriptor *)
575 					     bh->b_data;
576 					ptr = (__be64 *)(bh->b_data + offset);
577 					ld->ld_header.mh_magic =
578 						cpu_to_be32(GFS2_MAGIC);
579 					ld->ld_header.mh_type =
580 						cpu_to_be32(GFS2_METATYPE_LD);
581 					ld->ld_header.mh_format =
582 						cpu_to_be32(GFS2_FORMAT_LD);
583 					ld->ld_type =
584 						cpu_to_be32(GFS2_LOG_DESC_JDATA);
585 					ld->ld_length = cpu_to_be32(num + 1);
586 					ld->ld_data1 = cpu_to_be32(num);
587 					ld->ld_data2 = cpu_to_be32(0);
588 					memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
589 				}
590 				magic = gfs2_check_magic(bd1->bd_bh);
591 				*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
592 				*ptr++ = cpu_to_be64((__u64)magic);
593 				clear_buffer_escaped(bd1->bd_bh);
594 				if (unlikely(magic != 0))
595 					set_buffer_escaped(bd1->bd_bh);
596 				gfs2_log_lock(sdp);
597 				if (n++ > num)
598 					break;
599 			}
600 		}
601 		gfs2_log_unlock(sdp);
602 		if (bh) {
603 			set_buffer_dirty(bh);
604 			ll_rw_block(WRITE, 1, &bh);
605 			bh = NULL;
606 		}
607 		n = 0;
608 		gfs2_log_lock(sdp);
609 		list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
610 					     bd_le.le_list) {
611 			if (!bd2->bd_bh)
612 				continue;
613 			/* copy buffer if it needs escaping */
614 			gfs2_log_unlock(sdp);
615 			if (unlikely(buffer_escaped(bd2->bd_bh))) {
616 				void *kaddr;
617 				struct page *page = bd2->bd_bh->b_page;
618 				bh = gfs2_log_get_buf(sdp);
619 				kaddr = kmap_atomic(page, KM_USER0);
620 				memcpy(bh->b_data,
621 				       kaddr + bh_offset(bd2->bd_bh),
622 				       sdp->sd_sb.sb_bsize);
623 				kunmap_atomic(page, KM_USER0);
624 				*(__be32 *)bh->b_data = 0;
625 			} else {
626 				bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
627 			}
628 			set_buffer_dirty(bh);
629 			ll_rw_block(WRITE, 1, &bh);
630 			gfs2_log_lock(sdp);
631 			if (++n >= num)
632 				break;
633 		}
634 		bh = NULL;
635 		total_dbuf -= num;
636 		total_jdata -= num;
637 	}
638 	gfs2_log_unlock(sdp);
639 
640 	/* Wait on all ordered buffers */
641 	while (!list_empty(&started)) {
642 		gfs2_log_lock(sdp);
643 		bd1 = list_entry(started.next, struct gfs2_bufdata,
644 				 bd_le.le_list);
645 		list_del(&bd1->bd_le.le_list);
646 		sdp->sd_log_num_databuf--;
647 
648 		bh = bd1->bd_bh;
649 		if (bh) {
650 			bh->b_private = NULL;
651 			gfs2_log_unlock(sdp);
652 			wait_on_buffer(bh);
653 			brelse(bh);
654 		} else
655 			gfs2_log_unlock(sdp);
656 
657 		kfree(bd1);
658 	}
659 
660 	/* We've removed all the ordered write bufs here, so only jdata left */
661 	gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
662 }
663 
664 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
665 				    struct gfs2_log_descriptor *ld,
666 				    __be64 *ptr, int pass)
667 {
668 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
669 	struct gfs2_sbd *sdp = ip->i_sbd;
670 	struct gfs2_glock *gl = ip->i_gl;
671 	unsigned int blks = be32_to_cpu(ld->ld_data1);
672 	struct buffer_head *bh_log, *bh_ip;
673 	uint64_t blkno;
674 	uint64_t esc;
675 	int error = 0;
676 
677 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
678 		return 0;
679 
680 	gfs2_replay_incr_blk(sdp, &start);
681 	for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
682 		blkno = be64_to_cpu(*ptr++);
683 		esc = be64_to_cpu(*ptr++);
684 
685 		sdp->sd_found_blocks++;
686 
687 		if (gfs2_revoke_check(sdp, blkno, start))
688 			continue;
689 
690 		error = gfs2_replay_read_block(jd, start, &bh_log);
691 		if (error)
692 			return error;
693 
694 		bh_ip = gfs2_meta_new(gl, blkno);
695 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
696 
697 		/* Unescape */
698 		if (esc) {
699 			__be32 *eptr = (__be32 *)bh_ip->b_data;
700 			*eptr = cpu_to_be32(GFS2_MAGIC);
701 		}
702 		mark_buffer_dirty(bh_ip);
703 
704 		brelse(bh_log);
705 		brelse(bh_ip);
706 		if (error)
707 			break;
708 
709 		sdp->sd_replayed_blocks++;
710 	}
711 
712 	return error;
713 }
714 
715 /* FIXME: sort out accounting for log blocks etc. */
716 
717 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
718 {
719 	struct gfs2_inode *ip = jd->jd_inode->u.generic_ip;
720 	struct gfs2_sbd *sdp = ip->i_sbd;
721 
722 	if (error) {
723 		gfs2_meta_sync(ip->i_gl,
724 			       DIO_START | DIO_WAIT);
725 		return;
726 	}
727 	if (pass != 1)
728 		return;
729 
730 	/* data sync? */
731 	gfs2_meta_sync(ip->i_gl, DIO_START | DIO_WAIT);
732 
733 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
734 		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
735 }
736 
737 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
738 {
739 	struct list_head *head = &sdp->sd_log_le_databuf;
740 	struct gfs2_bufdata *bd;
741 
742 	while (!list_empty(head)) {
743 		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
744 		list_del(&bd->bd_le.le_list);
745 		sdp->sd_log_num_databuf--;
746 		sdp->sd_log_num_jdata--;
747 		gfs2_unpin(sdp, bd->bd_bh, ai);
748 	}
749 	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
750 	gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
751 }
752 
753 
754 const struct gfs2_log_operations gfs2_glock_lops = {
755 	.lo_add = glock_lo_add,
756 	.lo_after_commit = glock_lo_after_commit,
757 	.lo_name = "glock"
758 };
759 
760 const struct gfs2_log_operations gfs2_buf_lops = {
761 	.lo_add = buf_lo_add,
762 	.lo_incore_commit = buf_lo_incore_commit,
763 	.lo_before_commit = buf_lo_before_commit,
764 	.lo_after_commit = buf_lo_after_commit,
765 	.lo_before_scan = buf_lo_before_scan,
766 	.lo_scan_elements = buf_lo_scan_elements,
767 	.lo_after_scan = buf_lo_after_scan,
768 	.lo_name = "buf"
769 };
770 
771 const struct gfs2_log_operations gfs2_revoke_lops = {
772 	.lo_add = revoke_lo_add,
773 	.lo_before_commit = revoke_lo_before_commit,
774 	.lo_before_scan = revoke_lo_before_scan,
775 	.lo_scan_elements = revoke_lo_scan_elements,
776 	.lo_after_scan = revoke_lo_after_scan,
777 	.lo_name = "revoke"
778 };
779 
780 const struct gfs2_log_operations gfs2_rg_lops = {
781 	.lo_add = rg_lo_add,
782 	.lo_after_commit = rg_lo_after_commit,
783 	.lo_name = "rg"
784 };
785 
786 const struct gfs2_log_operations gfs2_databuf_lops = {
787 	.lo_add = databuf_lo_add,
788 	.lo_incore_commit = buf_lo_incore_commit,
789 	.lo_before_commit = databuf_lo_before_commit,
790 	.lo_after_commit = databuf_lo_after_commit,
791 	.lo_scan_elements = databuf_lo_scan_elements,
792 	.lo_after_scan = databuf_lo_after_scan,
793 	.lo_name = "databuf"
794 };
795 
796 const struct gfs2_log_operations *gfs2_log_ops[] = {
797 	&gfs2_glock_lops,
798 	&gfs2_buf_lops,
799 	&gfs2_revoke_lops,
800 	&gfs2_rg_lops,
801 	&gfs2_databuf_lops,
802 	NULL
803 };
804 
805