xref: /openbmc/linux/fs/reiserfs/journal.c (revision 08cd84c81f27d5bd22ba958b7cae6d566c509280)
1 /*
2 ** Write ahead logging implementation copyright Chris Mason 2000
3 **
4 ** The background commits make this code very interelated, and
5 ** overly complex.  I need to rethink things a bit....The major players:
6 **
7 ** journal_begin -- call with the number of blocks you expect to log.
8 **                  If the current transaction is too
9 ** 		    old, it will block until the current transaction is
10 ** 		    finished, and then start a new one.
11 **		    Usually, your transaction will get joined in with
12 **                  previous ones for speed.
13 **
14 ** journal_join  -- same as journal_begin, but won't block on the current
15 **                  transaction regardless of age.  Don't ever call
16 **                  this.  Ever.  There are only two places it should be
17 **                  called from, and they are both inside this file.
18 **
19 ** journal_mark_dirty -- adds blocks into this transaction.  clears any flags
20 **                       that might make them get sent to disk
21 **                       and then marks them BH_JDirty.  Puts the buffer head
22 **                       into the current transaction hash.
23 **
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 **                   otherwise, it could do an async/synchronous commit, or
26 **                   a full flush of all log and real blocks in the
27 **                   transaction.
28 **
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 **                      commit blocks are sent to disk.  Forces commit blocks
31 **                      to disk for all backgrounded commits that have been
32 **                      around too long.
33 **		     -- Note, if you call this as an immediate flush from
34 **		        from within kupdate, it will ignore the immediate flag
35 */
36 
37 #include <linux/config.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 
41 #include <linux/time.h>
42 #include <asm/semaphore.h>
43 
44 #include <linux/vmalloc.h>
45 #include <linux/reiserfs_fs.h>
46 
47 #include <linux/kernel.h>
48 #include <linux/errno.h>
49 #include <linux/fcntl.h>
50 #include <linux/stat.h>
51 #include <linux/string.h>
52 #include <linux/smp_lock.h>
53 #include <linux/buffer_head.h>
54 #include <linux/workqueue.h>
55 #include <linux/writeback.h>
56 #include <linux/blkdev.h>
57 
58 /* gets a struct reiserfs_journal_list * from a list head */
59 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60                                j_list))
61 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62                                j_working_list))
63 
64 /* the number of mounted filesystems.  This is used to decide when to
65 ** start and kill the commit workqueue
66 */
67 static int reiserfs_mounted_fs_count;
68 
69 static struct workqueue_struct *commit_wq;
70 
71 #define JOURNAL_TRANS_HALF 1018	/* must be correct to keep the desc and commit
72 				   structs at 4k */
73 #define BUFNR 64		/*read ahead */
74 
75 /* cnode stat bits.  Move these into reiserfs_fs.h */
76 
77 #define BLOCK_FREED 2		/* this block was freed, and can't be written.  */
78 #define BLOCK_FREED_HOLDER 3	/* this block was freed during this transaction, and can't be written */
79 
80 #define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */
81 #define BLOCK_DIRTIED 5
82 
83 /* journal list state bits */
84 #define LIST_TOUCHED 1
85 #define LIST_DIRTY   2
86 #define LIST_COMMIT_PENDING  4	/* someone will commit this list */
87 
88 /* flags for do_journal_end */
89 #define FLUSH_ALL   1		/* flush commit and real blocks */
90 #define COMMIT_NOW  2		/* end and commit this transaction */
91 #define WAIT        4		/* wait for the log blocks to hit the disk */
92 
93 static int do_journal_end(struct reiserfs_transaction_handle *,
94 			  struct super_block *, unsigned long nblocks,
95 			  int flags);
96 static int flush_journal_list(struct super_block *s,
97 			      struct reiserfs_journal_list *jl, int flushall);
98 static int flush_commit_list(struct super_block *s,
99 			     struct reiserfs_journal_list *jl, int flushall);
100 static int can_dirty(struct reiserfs_journal_cnode *cn);
101 static int journal_join(struct reiserfs_transaction_handle *th,
102 			struct super_block *p_s_sb, unsigned long nblocks);
103 static int release_journal_dev(struct super_block *super,
104 			       struct reiserfs_journal *journal);
105 static int dirty_one_transaction(struct super_block *s,
106 				 struct reiserfs_journal_list *jl);
107 static void flush_async_commits(void *p);
108 static void queue_log_writer(struct super_block *s);
109 
110 /* values for join in do_journal_begin_r */
111 enum {
112 	JBEGIN_REG = 0,		/* regular journal begin */
113 	JBEGIN_JOIN = 1,	/* join the running transaction if at all possible */
114 	JBEGIN_ABORT = 2,	/* called from cleanup code, ignores aborted flag */
115 };
116 
117 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
118 			      struct super_block *p_s_sb,
119 			      unsigned long nblocks, int join);
120 
121 static void init_journal_hash(struct super_block *p_s_sb)
122 {
123 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
124 	memset(journal->j_hash_table, 0,
125 	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
126 }
127 
128 /*
129 ** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
130 ** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
131 ** more details.
132 */
133 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134 {
135 	if (bh) {
136 		clear_buffer_dirty(bh);
137 		clear_buffer_journal_test(bh);
138 	}
139 	return 0;
140 }
141 
142 static void disable_barrier(struct super_block *s)
143 {
144 	REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
145 	printk("reiserfs: disabling flush barriers on %s\n",
146 	       reiserfs_bdevname(s));
147 }
148 
149 static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150 							 *p_s_sb)
151 {
152 	struct reiserfs_bitmap_node *bn;
153 	static int id;
154 
155 	bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS,
156 			      p_s_sb);
157 	if (!bn) {
158 		return NULL;
159 	}
160 	bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb);
161 	if (!bn->data) {
162 		reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
163 		return NULL;
164 	}
165 	bn->id = id++;
166 	memset(bn->data, 0, p_s_sb->s_blocksize);
167 	INIT_LIST_HEAD(&bn->list);
168 	return bn;
169 }
170 
171 static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
172 {
173 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
174 	struct reiserfs_bitmap_node *bn = NULL;
175 	struct list_head *entry = journal->j_bitmap_nodes.next;
176 
177 	journal->j_used_bitmap_nodes++;
178       repeat:
179 
180 	if (entry != &journal->j_bitmap_nodes) {
181 		bn = list_entry(entry, struct reiserfs_bitmap_node, list);
182 		list_del(entry);
183 		memset(bn->data, 0, p_s_sb->s_blocksize);
184 		journal->j_free_bitmap_nodes--;
185 		return bn;
186 	}
187 	bn = allocate_bitmap_node(p_s_sb);
188 	if (!bn) {
189 		yield();
190 		goto repeat;
191 	}
192 	return bn;
193 }
194 static inline void free_bitmap_node(struct super_block *p_s_sb,
195 				    struct reiserfs_bitmap_node *bn)
196 {
197 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
198 	journal->j_used_bitmap_nodes--;
199 	if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
200 		reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb);
201 		reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
202 	} else {
203 		list_add(&bn->list, &journal->j_bitmap_nodes);
204 		journal->j_free_bitmap_nodes++;
205 	}
206 }
207 
208 static void allocate_bitmap_nodes(struct super_block *p_s_sb)
209 {
210 	int i;
211 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
212 	struct reiserfs_bitmap_node *bn = NULL;
213 	for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
214 		bn = allocate_bitmap_node(p_s_sb);
215 		if (bn) {
216 			list_add(&bn->list, &journal->j_bitmap_nodes);
217 			journal->j_free_bitmap_nodes++;
218 		} else {
219 			break;	// this is ok, we'll try again when more are needed
220 		}
221 	}
222 }
223 
224 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
225 				  struct reiserfs_list_bitmap *jb)
226 {
227 	int bmap_nr = block / (p_s_sb->s_blocksize << 3);
228 	int bit_nr = block % (p_s_sb->s_blocksize << 3);
229 
230 	if (!jb->bitmaps[bmap_nr]) {
231 		jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
232 	}
233 	set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
234 	return 0;
235 }
236 
237 static void cleanup_bitmap_list(struct super_block *p_s_sb,
238 				struct reiserfs_list_bitmap *jb)
239 {
240 	int i;
241 	if (jb->bitmaps == NULL)
242 		return;
243 
244 	for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
245 		if (jb->bitmaps[i]) {
246 			free_bitmap_node(p_s_sb, jb->bitmaps[i]);
247 			jb->bitmaps[i] = NULL;
248 		}
249 	}
250 }
251 
252 /*
253 ** only call this on FS unmount.
254 */
255 static int free_list_bitmaps(struct super_block *p_s_sb,
256 			     struct reiserfs_list_bitmap *jb_array)
257 {
258 	int i;
259 	struct reiserfs_list_bitmap *jb;
260 	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
261 		jb = jb_array + i;
262 		jb->journal_list = NULL;
263 		cleanup_bitmap_list(p_s_sb, jb);
264 		vfree(jb->bitmaps);
265 		jb->bitmaps = NULL;
266 	}
267 	return 0;
268 }
269 
270 static int free_bitmap_nodes(struct super_block *p_s_sb)
271 {
272 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
273 	struct list_head *next = journal->j_bitmap_nodes.next;
274 	struct reiserfs_bitmap_node *bn;
275 
276 	while (next != &journal->j_bitmap_nodes) {
277 		bn = list_entry(next, struct reiserfs_bitmap_node, list);
278 		list_del(next);
279 		reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb);
280 		reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
281 		next = journal->j_bitmap_nodes.next;
282 		journal->j_free_bitmap_nodes--;
283 	}
284 
285 	return 0;
286 }
287 
288 /*
289 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
290 ** jb_array is the array to be filled in.
291 */
292 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
293 				   struct reiserfs_list_bitmap *jb_array,
294 				   int bmap_nr)
295 {
296 	int i;
297 	int failed = 0;
298 	struct reiserfs_list_bitmap *jb;
299 	int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
300 
301 	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
302 		jb = jb_array + i;
303 		jb->journal_list = NULL;
304 		jb->bitmaps = vmalloc(mem);
305 		if (!jb->bitmaps) {
306 			reiserfs_warning(p_s_sb,
307 					 "clm-2000, unable to allocate bitmaps for journal lists");
308 			failed = 1;
309 			break;
310 		}
311 		memset(jb->bitmaps, 0, mem);
312 	}
313 	if (failed) {
314 		free_list_bitmaps(p_s_sb, jb_array);
315 		return -1;
316 	}
317 	return 0;
318 }
319 
320 /*
321 ** find an available list bitmap.  If you can't find one, flush a commit list
322 ** and try again
323 */
324 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
325 						    struct reiserfs_journal_list
326 						    *jl)
327 {
328 	int i, j;
329 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
330 	struct reiserfs_list_bitmap *jb = NULL;
331 
332 	for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
333 		i = journal->j_list_bitmap_index;
334 		journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
335 		jb = journal->j_list_bitmap + i;
336 		if (journal->j_list_bitmap[i].journal_list) {
337 			flush_commit_list(p_s_sb,
338 					  journal->j_list_bitmap[i].
339 					  journal_list, 1);
340 			if (!journal->j_list_bitmap[i].journal_list) {
341 				break;
342 			}
343 		} else {
344 			break;
345 		}
346 	}
347 	if (jb->journal_list) {	/* double check to make sure if flushed correctly */
348 		return NULL;
349 	}
350 	jb->journal_list = jl;
351 	return jb;
352 }
353 
354 /*
355 ** allocates a new chunk of X nodes, and links them all together as a list.
356 ** Uses the cnode->next and cnode->prev pointers
357 ** returns NULL on failure
358 */
359 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
360 {
361 	struct reiserfs_journal_cnode *head;
362 	int i;
363 	if (num_cnodes <= 0) {
364 		return NULL;
365 	}
366 	head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
367 	if (!head) {
368 		return NULL;
369 	}
370 	memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
371 	head[0].prev = NULL;
372 	head[0].next = head + 1;
373 	for (i = 1; i < num_cnodes; i++) {
374 		head[i].prev = head + (i - 1);
375 		head[i].next = head + (i + 1);	/* if last one, overwrite it after the if */
376 	}
377 	head[num_cnodes - 1].next = NULL;
378 	return head;
379 }
380 
381 /*
382 ** pulls a cnode off the free list, or returns NULL on failure
383 */
384 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
385 {
386 	struct reiserfs_journal_cnode *cn;
387 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
388 
389 	reiserfs_check_lock_depth(p_s_sb, "get_cnode");
390 
391 	if (journal->j_cnode_free <= 0) {
392 		return NULL;
393 	}
394 	journal->j_cnode_used++;
395 	journal->j_cnode_free--;
396 	cn = journal->j_cnode_free_list;
397 	if (!cn) {
398 		return cn;
399 	}
400 	if (cn->next) {
401 		cn->next->prev = NULL;
402 	}
403 	journal->j_cnode_free_list = cn->next;
404 	memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
405 	return cn;
406 }
407 
408 /*
409 ** returns a cnode to the free list
410 */
411 static void free_cnode(struct super_block *p_s_sb,
412 		       struct reiserfs_journal_cnode *cn)
413 {
414 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
415 
416 	reiserfs_check_lock_depth(p_s_sb, "free_cnode");
417 
418 	journal->j_cnode_used--;
419 	journal->j_cnode_free++;
420 	/* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
421 	cn->next = journal->j_cnode_free_list;
422 	if (journal->j_cnode_free_list) {
423 		journal->j_cnode_free_list->prev = cn;
424 	}
425 	cn->prev = NULL;	/* not needed with the memset, but I might kill the memset, and forget to do this */
426 	journal->j_cnode_free_list = cn;
427 }
428 
429 static void clear_prepared_bits(struct buffer_head *bh)
430 {
431 	clear_buffer_journal_prepared(bh);
432 	clear_buffer_journal_restore_dirty(bh);
433 }
434 
435 /* utility function to force a BUG if it is called without the big
436 ** kernel lock held.  caller is the string printed just before calling BUG()
437 */
438 void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
439 {
440 #ifdef CONFIG_SMP
441 	if (current->lock_depth < 0) {
442 		reiserfs_panic(sb, "%s called without kernel lock held",
443 			       caller);
444 	}
445 #else
446 	;
447 #endif
448 }
449 
450 /* return a cnode with same dev, block number and size in table, or null if not found */
451 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
452 								  super_block
453 								  *sb,
454 								  struct
455 								  reiserfs_journal_cnode
456 								  **table,
457 								  long bl)
458 {
459 	struct reiserfs_journal_cnode *cn;
460 	cn = journal_hash(table, sb, bl);
461 	while (cn) {
462 		if (cn->blocknr == bl && cn->sb == sb)
463 			return cn;
464 		cn = cn->hnext;
465 	}
466 	return (struct reiserfs_journal_cnode *)0;
467 }
468 
469 /*
470 ** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
471 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
472 ** being overwritten by a replay after crashing.
473 **
474 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
475 ** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
476 ** sure you never write the block without logging it.
477 **
478 ** next_zero_bit is a suggestion about the next block to try for find_forward.
479 ** when bl is rejected because it is set in a journal list bitmap, we search
480 ** for the next zero bit in the bitmap that rejected bl.  Then, we return that
481 ** through next_zero_bit for find_forward to try.
482 **
483 ** Just because we return something in next_zero_bit does not mean we won't
484 ** reject it on the next call to reiserfs_in_journal
485 **
486 */
487 int reiserfs_in_journal(struct super_block *p_s_sb,
488 			int bmap_nr, int bit_nr, int search_all,
489 			b_blocknr_t * next_zero_bit)
490 {
491 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
492 	struct reiserfs_journal_cnode *cn;
493 	struct reiserfs_list_bitmap *jb;
494 	int i;
495 	unsigned long bl;
496 
497 	*next_zero_bit = 0;	/* always start this at zero. */
498 
499 	PROC_INFO_INC(p_s_sb, journal.in_journal);
500 	/* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
501 	 ** if we crash before the transaction that freed it commits,  this transaction won't
502 	 ** have committed either, and the block will never be written
503 	 */
504 	if (search_all) {
505 		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
506 			PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
507 			jb = journal->j_list_bitmap + i;
508 			if (jb->journal_list && jb->bitmaps[bmap_nr] &&
509 			    test_bit(bit_nr,
510 				     (unsigned long *)jb->bitmaps[bmap_nr]->
511 				     data)) {
512 				*next_zero_bit =
513 				    find_next_zero_bit((unsigned long *)
514 						       (jb->bitmaps[bmap_nr]->
515 							data),
516 						       p_s_sb->s_blocksize << 3,
517 						       bit_nr + 1);
518 				return 1;
519 			}
520 		}
521 	}
522 
523 	bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
524 	/* is it in any old transactions? */
525 	if (search_all
526 	    && (cn =
527 		get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
528 		return 1;
529 	}
530 
531 	/* is it in the current transaction.  This should never happen */
532 	if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
533 		BUG();
534 		return 1;
535 	}
536 
537 	PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
538 	/* safe for reuse */
539 	return 0;
540 }
541 
542 /* insert cn into table
543 */
544 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
545 				       struct reiserfs_journal_cnode *cn)
546 {
547 	struct reiserfs_journal_cnode *cn_orig;
548 
549 	cn_orig = journal_hash(table, cn->sb, cn->blocknr);
550 	cn->hnext = cn_orig;
551 	cn->hprev = NULL;
552 	if (cn_orig) {
553 		cn_orig->hprev = cn;
554 	}
555 	journal_hash(table, cn->sb, cn->blocknr) = cn;
556 }
557 
558 /* lock the current transaction */
559 inline static void lock_journal(struct super_block *p_s_sb)
560 {
561 	PROC_INFO_INC(p_s_sb, journal.lock_journal);
562 	down(&SB_JOURNAL(p_s_sb)->j_lock);
563 }
564 
565 /* unlock the current transaction */
566 inline static void unlock_journal(struct super_block *p_s_sb)
567 {
568 	up(&SB_JOURNAL(p_s_sb)->j_lock);
569 }
570 
571 static inline void get_journal_list(struct reiserfs_journal_list *jl)
572 {
573 	jl->j_refcount++;
574 }
575 
576 static inline void put_journal_list(struct super_block *s,
577 				    struct reiserfs_journal_list *jl)
578 {
579 	if (jl->j_refcount < 1) {
580 		reiserfs_panic(s, "trans id %lu, refcount at %d",
581 			       jl->j_trans_id, jl->j_refcount);
582 	}
583 	if (--jl->j_refcount == 0)
584 		reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
585 }
586 
587 /*
588 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
589 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
590 ** transaction.
591 */
592 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
593 					   struct reiserfs_journal_list *jl)
594 {
595 
596 	struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
597 	if (jb) {
598 		cleanup_bitmap_list(p_s_sb, jb);
599 	}
600 	jl->j_list_bitmap->journal_list = NULL;
601 	jl->j_list_bitmap = NULL;
602 }
603 
604 static int journal_list_still_alive(struct super_block *s,
605 				    unsigned long trans_id)
606 {
607 	struct reiserfs_journal *journal = SB_JOURNAL(s);
608 	struct list_head *entry = &journal->j_journal_list;
609 	struct reiserfs_journal_list *jl;
610 
611 	if (!list_empty(entry)) {
612 		jl = JOURNAL_LIST_ENTRY(entry->next);
613 		if (jl->j_trans_id <= trans_id) {
614 			return 1;
615 		}
616 	}
617 	return 0;
618 }
619 
620 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
621 {
622 	char b[BDEVNAME_SIZE];
623 
624 	if (buffer_journaled(bh)) {
625 		reiserfs_warning(NULL,
626 				 "clm-2084: pinned buffer %lu:%s sent to disk",
627 				 bh->b_blocknr, bdevname(bh->b_bdev, b));
628 	}
629 	if (uptodate)
630 		set_buffer_uptodate(bh);
631 	else
632 		clear_buffer_uptodate(bh);
633 	unlock_buffer(bh);
634 	put_bh(bh);
635 }
636 
637 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
638 {
639 	if (uptodate)
640 		set_buffer_uptodate(bh);
641 	else
642 		clear_buffer_uptodate(bh);
643 	unlock_buffer(bh);
644 	put_bh(bh);
645 }
646 
647 static void submit_logged_buffer(struct buffer_head *bh)
648 {
649 	get_bh(bh);
650 	bh->b_end_io = reiserfs_end_buffer_io_sync;
651 	clear_buffer_journal_new(bh);
652 	clear_buffer_dirty(bh);
653 	if (!test_clear_buffer_journal_test(bh))
654 		BUG();
655 	if (!buffer_uptodate(bh))
656 		BUG();
657 	submit_bh(WRITE, bh);
658 }
659 
660 static void submit_ordered_buffer(struct buffer_head *bh)
661 {
662 	get_bh(bh);
663 	bh->b_end_io = reiserfs_end_ordered_io;
664 	clear_buffer_dirty(bh);
665 	if (!buffer_uptodate(bh))
666 		BUG();
667 	submit_bh(WRITE, bh);
668 }
669 
670 static int submit_barrier_buffer(struct buffer_head *bh)
671 {
672 	get_bh(bh);
673 	bh->b_end_io = reiserfs_end_ordered_io;
674 	clear_buffer_dirty(bh);
675 	if (!buffer_uptodate(bh))
676 		BUG();
677 	return submit_bh(WRITE_BARRIER, bh);
678 }
679 
680 static void check_barrier_completion(struct super_block *s,
681 				     struct buffer_head *bh)
682 {
683 	if (buffer_eopnotsupp(bh)) {
684 		clear_buffer_eopnotsupp(bh);
685 		disable_barrier(s);
686 		set_buffer_uptodate(bh);
687 		set_buffer_dirty(bh);
688 		sync_dirty_buffer(bh);
689 	}
690 }
691 
692 #define CHUNK_SIZE 32
693 struct buffer_chunk {
694 	struct buffer_head *bh[CHUNK_SIZE];
695 	int nr;
696 };
697 
698 static void write_chunk(struct buffer_chunk *chunk)
699 {
700 	int i;
701 	get_fs_excl();
702 	for (i = 0; i < chunk->nr; i++) {
703 		submit_logged_buffer(chunk->bh[i]);
704 	}
705 	chunk->nr = 0;
706 	put_fs_excl();
707 }
708 
709 static void write_ordered_chunk(struct buffer_chunk *chunk)
710 {
711 	int i;
712 	get_fs_excl();
713 	for (i = 0; i < chunk->nr; i++) {
714 		submit_ordered_buffer(chunk->bh[i]);
715 	}
716 	chunk->nr = 0;
717 	put_fs_excl();
718 }
719 
720 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
721 			spinlock_t * lock, void (fn) (struct buffer_chunk *))
722 {
723 	int ret = 0;
724 	if (chunk->nr >= CHUNK_SIZE)
725 		BUG();
726 	chunk->bh[chunk->nr++] = bh;
727 	if (chunk->nr >= CHUNK_SIZE) {
728 		ret = 1;
729 		if (lock)
730 			spin_unlock(lock);
731 		fn(chunk);
732 		if (lock)
733 			spin_lock(lock);
734 	}
735 	return ret;
736 }
737 
738 static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
739 static struct reiserfs_jh *alloc_jh(void)
740 {
741 	struct reiserfs_jh *jh;
742 	while (1) {
743 		jh = kmalloc(sizeof(*jh), GFP_NOFS);
744 		if (jh) {
745 			atomic_inc(&nr_reiserfs_jh);
746 			return jh;
747 		}
748 		yield();
749 	}
750 }
751 
752 /*
753  * we want to free the jh when the buffer has been written
754  * and waited on
755  */
756 void reiserfs_free_jh(struct buffer_head *bh)
757 {
758 	struct reiserfs_jh *jh;
759 
760 	jh = bh->b_private;
761 	if (jh) {
762 		bh->b_private = NULL;
763 		jh->bh = NULL;
764 		list_del_init(&jh->list);
765 		kfree(jh);
766 		if (atomic_read(&nr_reiserfs_jh) <= 0)
767 			BUG();
768 		atomic_dec(&nr_reiserfs_jh);
769 		put_bh(bh);
770 	}
771 }
772 
773 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
774 			   int tail)
775 {
776 	struct reiserfs_jh *jh;
777 
778 	if (bh->b_private) {
779 		spin_lock(&j->j_dirty_buffers_lock);
780 		if (!bh->b_private) {
781 			spin_unlock(&j->j_dirty_buffers_lock);
782 			goto no_jh;
783 		}
784 		jh = bh->b_private;
785 		list_del_init(&jh->list);
786 	} else {
787 	      no_jh:
788 		get_bh(bh);
789 		jh = alloc_jh();
790 		spin_lock(&j->j_dirty_buffers_lock);
791 		/* buffer must be locked for __add_jh, should be able to have
792 		 * two adds at the same time
793 		 */
794 		if (bh->b_private)
795 			BUG();
796 		jh->bh = bh;
797 		bh->b_private = jh;
798 	}
799 	jh->jl = j->j_current_jl;
800 	if (tail)
801 		list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
802 	else {
803 		list_add_tail(&jh->list, &jh->jl->j_bh_list);
804 	}
805 	spin_unlock(&j->j_dirty_buffers_lock);
806 	return 0;
807 }
808 
809 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
810 {
811 	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
812 }
813 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
814 {
815 	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
816 }
817 
818 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
819 static int write_ordered_buffers(spinlock_t * lock,
820 				 struct reiserfs_journal *j,
821 				 struct reiserfs_journal_list *jl,
822 				 struct list_head *list)
823 {
824 	struct buffer_head *bh;
825 	struct reiserfs_jh *jh;
826 	int ret = j->j_errno;
827 	struct buffer_chunk chunk;
828 	struct list_head tmp;
829 	INIT_LIST_HEAD(&tmp);
830 
831 	chunk.nr = 0;
832 	spin_lock(lock);
833 	while (!list_empty(list)) {
834 		jh = JH_ENTRY(list->next);
835 		bh = jh->bh;
836 		get_bh(bh);
837 		if (test_set_buffer_locked(bh)) {
838 			if (!buffer_dirty(bh)) {
839 				list_del_init(&jh->list);
840 				list_add(&jh->list, &tmp);
841 				goto loop_next;
842 			}
843 			spin_unlock(lock);
844 			if (chunk.nr)
845 				write_ordered_chunk(&chunk);
846 			wait_on_buffer(bh);
847 			cond_resched();
848 			spin_lock(lock);
849 			goto loop_next;
850 		}
851 		if (buffer_dirty(bh)) {
852 			list_del_init(&jh->list);
853 			list_add(&jh->list, &tmp);
854 			add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
855 		} else {
856 			reiserfs_free_jh(bh);
857 			unlock_buffer(bh);
858 		}
859 	      loop_next:
860 		put_bh(bh);
861 		cond_resched_lock(lock);
862 	}
863 	if (chunk.nr) {
864 		spin_unlock(lock);
865 		write_ordered_chunk(&chunk);
866 		spin_lock(lock);
867 	}
868 	while (!list_empty(&tmp)) {
869 		jh = JH_ENTRY(tmp.prev);
870 		bh = jh->bh;
871 		get_bh(bh);
872 		reiserfs_free_jh(bh);
873 
874 		if (buffer_locked(bh)) {
875 			spin_unlock(lock);
876 			wait_on_buffer(bh);
877 			spin_lock(lock);
878 		}
879 		if (!buffer_uptodate(bh)) {
880 			ret = -EIO;
881 		}
882 		put_bh(bh);
883 		cond_resched_lock(lock);
884 	}
885 	spin_unlock(lock);
886 	return ret;
887 }
888 
889 static int flush_older_commits(struct super_block *s,
890 			       struct reiserfs_journal_list *jl)
891 {
892 	struct reiserfs_journal *journal = SB_JOURNAL(s);
893 	struct reiserfs_journal_list *other_jl;
894 	struct reiserfs_journal_list *first_jl;
895 	struct list_head *entry;
896 	unsigned long trans_id = jl->j_trans_id;
897 	unsigned long other_trans_id;
898 	unsigned long first_trans_id;
899 
900       find_first:
901 	/*
902 	 * first we walk backwards to find the oldest uncommitted transation
903 	 */
904 	first_jl = jl;
905 	entry = jl->j_list.prev;
906 	while (1) {
907 		other_jl = JOURNAL_LIST_ENTRY(entry);
908 		if (entry == &journal->j_journal_list ||
909 		    atomic_read(&other_jl->j_older_commits_done))
910 			break;
911 
912 		first_jl = other_jl;
913 		entry = other_jl->j_list.prev;
914 	}
915 
916 	/* if we didn't find any older uncommitted transactions, return now */
917 	if (first_jl == jl) {
918 		return 0;
919 	}
920 
921 	first_trans_id = first_jl->j_trans_id;
922 
923 	entry = &first_jl->j_list;
924 	while (1) {
925 		other_jl = JOURNAL_LIST_ENTRY(entry);
926 		other_trans_id = other_jl->j_trans_id;
927 
928 		if (other_trans_id < trans_id) {
929 			if (atomic_read(&other_jl->j_commit_left) != 0) {
930 				flush_commit_list(s, other_jl, 0);
931 
932 				/* list we were called with is gone, return */
933 				if (!journal_list_still_alive(s, trans_id))
934 					return 1;
935 
936 				/* the one we just flushed is gone, this means all
937 				 * older lists are also gone, so first_jl is no longer
938 				 * valid either.  Go back to the beginning.
939 				 */
940 				if (!journal_list_still_alive
941 				    (s, other_trans_id)) {
942 					goto find_first;
943 				}
944 			}
945 			entry = entry->next;
946 			if (entry == &journal->j_journal_list)
947 				return 0;
948 		} else {
949 			return 0;
950 		}
951 	}
952 	return 0;
953 }
954 int reiserfs_async_progress_wait(struct super_block *s)
955 {
956 	DEFINE_WAIT(wait);
957 	struct reiserfs_journal *j = SB_JOURNAL(s);
958 	if (atomic_read(&j->j_async_throttle))
959 		blk_congestion_wait(WRITE, HZ / 10);
960 	return 0;
961 }
962 
963 /*
964 ** if this journal list still has commit blocks unflushed, send them to disk.
965 **
966 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
967 ** Before the commit block can by written, every other log block must be safely on disk
968 **
969 */
970 static int flush_commit_list(struct super_block *s,
971 			     struct reiserfs_journal_list *jl, int flushall)
972 {
973 	int i;
974 	int bn;
975 	struct buffer_head *tbh = NULL;
976 	unsigned long trans_id = jl->j_trans_id;
977 	struct reiserfs_journal *journal = SB_JOURNAL(s);
978 	int barrier = 0;
979 	int retval = 0;
980 
981 	reiserfs_check_lock_depth(s, "flush_commit_list");
982 
983 	if (atomic_read(&jl->j_older_commits_done)) {
984 		return 0;
985 	}
986 
987 	get_fs_excl();
988 
989 	/* before we can put our commit blocks on disk, we have to make sure everyone older than
990 	 ** us is on disk too
991 	 */
992 	BUG_ON(jl->j_len <= 0);
993 	BUG_ON(trans_id == journal->j_trans_id);
994 
995 	get_journal_list(jl);
996 	if (flushall) {
997 		if (flush_older_commits(s, jl) == 1) {
998 			/* list disappeared during flush_older_commits.  return */
999 			goto put_jl;
1000 		}
1001 	}
1002 
1003 	/* make sure nobody is trying to flush this one at the same time */
1004 	down(&jl->j_commit_lock);
1005 	if (!journal_list_still_alive(s, trans_id)) {
1006 		up(&jl->j_commit_lock);
1007 		goto put_jl;
1008 	}
1009 	BUG_ON(jl->j_trans_id == 0);
1010 
1011 	/* this commit is done, exit */
1012 	if (atomic_read(&(jl->j_commit_left)) <= 0) {
1013 		if (flushall) {
1014 			atomic_set(&(jl->j_older_commits_done), 1);
1015 		}
1016 		up(&jl->j_commit_lock);
1017 		goto put_jl;
1018 	}
1019 
1020 	if (!list_empty(&jl->j_bh_list)) {
1021 		unlock_kernel();
1022 		write_ordered_buffers(&journal->j_dirty_buffers_lock,
1023 				      journal, jl, &jl->j_bh_list);
1024 		lock_kernel();
1025 	}
1026 	BUG_ON(!list_empty(&jl->j_bh_list));
1027 	/*
1028 	 * for the description block and all the log blocks, submit any buffers
1029 	 * that haven't already reached the disk
1030 	 */
1031 	atomic_inc(&journal->j_async_throttle);
1032 	for (i = 0; i < (jl->j_len + 1); i++) {
1033 		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1034 		    SB_ONDISK_JOURNAL_SIZE(s);
1035 		tbh = journal_find_get_block(s, bn);
1036 		if (buffer_dirty(tbh))	/* redundant, ll_rw_block() checks */
1037 			ll_rw_block(WRITE, 1, &tbh);
1038 		put_bh(tbh);
1039 	}
1040 	atomic_dec(&journal->j_async_throttle);
1041 
1042 	/* wait on everything written so far before writing the commit
1043 	 * if we are in barrier mode, send the commit down now
1044 	 */
1045 	barrier = reiserfs_barrier_flush(s);
1046 	if (barrier) {
1047 		int ret;
1048 		lock_buffer(jl->j_commit_bh);
1049 		ret = submit_barrier_buffer(jl->j_commit_bh);
1050 		if (ret == -EOPNOTSUPP) {
1051 			set_buffer_uptodate(jl->j_commit_bh);
1052 			disable_barrier(s);
1053 			barrier = 0;
1054 		}
1055 	}
1056 	for (i = 0; i < (jl->j_len + 1); i++) {
1057 		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1058 		    (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1059 		tbh = journal_find_get_block(s, bn);
1060 		wait_on_buffer(tbh);
1061 		// since we're using ll_rw_blk above, it might have skipped over
1062 		// a locked buffer.  Double check here
1063 		//
1064 		if (buffer_dirty(tbh))	/* redundant, sync_dirty_buffer() checks */
1065 			sync_dirty_buffer(tbh);
1066 		if (unlikely(!buffer_uptodate(tbh))) {
1067 #ifdef CONFIG_REISERFS_CHECK
1068 			reiserfs_warning(s, "journal-601, buffer write failed");
1069 #endif
1070 			retval = -EIO;
1071 		}
1072 		put_bh(tbh);	/* once for journal_find_get_block */
1073 		put_bh(tbh);	/* once due to original getblk in do_journal_end */
1074 		atomic_dec(&(jl->j_commit_left));
1075 	}
1076 
1077 	BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1078 
1079 	if (!barrier) {
1080 		if (buffer_dirty(jl->j_commit_bh))
1081 			BUG();
1082 		mark_buffer_dirty(jl->j_commit_bh);
1083 		sync_dirty_buffer(jl->j_commit_bh);
1084 	} else
1085 		wait_on_buffer(jl->j_commit_bh);
1086 
1087 	check_barrier_completion(s, jl->j_commit_bh);
1088 
1089 	/* If there was a write error in the journal - we can't commit this
1090 	 * transaction - it will be invalid and, if successful, will just end
1091 	 * up propogating the write error out to the filesystem. */
1092 	if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1093 #ifdef CONFIG_REISERFS_CHECK
1094 		reiserfs_warning(s, "journal-615: buffer write failed");
1095 #endif
1096 		retval = -EIO;
1097 	}
1098 	bforget(jl->j_commit_bh);
1099 	if (journal->j_last_commit_id != 0 &&
1100 	    (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1101 		reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1102 				 journal->j_last_commit_id, jl->j_trans_id);
1103 	}
1104 	journal->j_last_commit_id = jl->j_trans_id;
1105 
1106 	/* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
1107 	cleanup_freed_for_journal_list(s, jl);
1108 
1109 	retval = retval ? retval : journal->j_errno;
1110 
1111 	/* mark the metadata dirty */
1112 	if (!retval)
1113 		dirty_one_transaction(s, jl);
1114 	atomic_dec(&(jl->j_commit_left));
1115 
1116 	if (flushall) {
1117 		atomic_set(&(jl->j_older_commits_done), 1);
1118 	}
1119 	up(&jl->j_commit_lock);
1120       put_jl:
1121 	put_journal_list(s, jl);
1122 
1123 	if (retval)
1124 		reiserfs_abort(s, retval, "Journal write error in %s",
1125 			       __FUNCTION__);
1126 	put_fs_excl();
1127 	return retval;
1128 }
1129 
1130 /*
1131 ** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or
1132 ** returns NULL if it can't find anything
1133 */
1134 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1135 							  reiserfs_journal_cnode
1136 							  *cn)
1137 {
1138 	struct super_block *sb = cn->sb;
1139 	b_blocknr_t blocknr = cn->blocknr;
1140 
1141 	cn = cn->hprev;
1142 	while (cn) {
1143 		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1144 			return cn->jlist;
1145 		}
1146 		cn = cn->hprev;
1147 	}
1148 	return NULL;
1149 }
1150 
1151 static void remove_journal_hash(struct super_block *,
1152 				struct reiserfs_journal_cnode **,
1153 				struct reiserfs_journal_list *, unsigned long,
1154 				int);
1155 
1156 /*
1157 ** once all the real blocks have been flushed, it is safe to remove them from the
1158 ** journal list for this transaction.  Aside from freeing the cnode, this also allows the
1159 ** block to be reallocated for data blocks if it had been deleted.
1160 */
1161 static void remove_all_from_journal_list(struct super_block *p_s_sb,
1162 					 struct reiserfs_journal_list *jl,
1163 					 int debug)
1164 {
1165 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1166 	struct reiserfs_journal_cnode *cn, *last;
1167 	cn = jl->j_realblock;
1168 
1169 	/* which is better, to lock once around the whole loop, or
1170 	 ** to lock for each call to remove_journal_hash?
1171 	 */
1172 	while (cn) {
1173 		if (cn->blocknr != 0) {
1174 			if (debug) {
1175 				reiserfs_warning(p_s_sb,
1176 						 "block %u, bh is %d, state %ld",
1177 						 cn->blocknr, cn->bh ? 1 : 0,
1178 						 cn->state);
1179 			}
1180 			cn->state = 0;
1181 			remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1182 					    jl, cn->blocknr, 1);
1183 		}
1184 		last = cn;
1185 		cn = cn->next;
1186 		free_cnode(p_s_sb, last);
1187 	}
1188 	jl->j_realblock = NULL;
1189 }
1190 
1191 /*
1192 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1193 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1194 ** releasing blocks in this transaction for reuse as data blocks.
1195 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1196 **
1197 */
1198 static int _update_journal_header_block(struct super_block *p_s_sb,
1199 					unsigned long offset,
1200 					unsigned long trans_id)
1201 {
1202 	struct reiserfs_journal_header *jh;
1203 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1204 
1205 	if (reiserfs_is_journal_aborted(journal))
1206 		return -EIO;
1207 
1208 	if (trans_id >= journal->j_last_flush_trans_id) {
1209 		if (buffer_locked((journal->j_header_bh))) {
1210 			wait_on_buffer((journal->j_header_bh));
1211 			if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1212 #ifdef CONFIG_REISERFS_CHECK
1213 				reiserfs_warning(p_s_sb,
1214 						 "journal-699: buffer write failed");
1215 #endif
1216 				return -EIO;
1217 			}
1218 		}
1219 		journal->j_last_flush_trans_id = trans_id;
1220 		journal->j_first_unflushed_offset = offset;
1221 		jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1222 							b_data);
1223 		jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1224 		jh->j_first_unflushed_offset = cpu_to_le32(offset);
1225 		jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1226 
1227 		if (reiserfs_barrier_flush(p_s_sb)) {
1228 			int ret;
1229 			lock_buffer(journal->j_header_bh);
1230 			ret = submit_barrier_buffer(journal->j_header_bh);
1231 			if (ret == -EOPNOTSUPP) {
1232 				set_buffer_uptodate(journal->j_header_bh);
1233 				disable_barrier(p_s_sb);
1234 				goto sync;
1235 			}
1236 			wait_on_buffer(journal->j_header_bh);
1237 			check_barrier_completion(p_s_sb, journal->j_header_bh);
1238 		} else {
1239 		      sync:
1240 			set_buffer_dirty(journal->j_header_bh);
1241 			sync_dirty_buffer(journal->j_header_bh);
1242 		}
1243 		if (!buffer_uptodate(journal->j_header_bh)) {
1244 			reiserfs_warning(p_s_sb,
1245 					 "journal-837: IO error during journal replay");
1246 			return -EIO;
1247 		}
1248 	}
1249 	return 0;
1250 }
1251 
1252 static int update_journal_header_block(struct super_block *p_s_sb,
1253 				       unsigned long offset,
1254 				       unsigned long trans_id)
1255 {
1256 	return _update_journal_header_block(p_s_sb, offset, trans_id);
1257 }
1258 
1259 /*
1260 ** flush any and all journal lists older than you are
1261 ** can only be called from flush_journal_list
1262 */
1263 static int flush_older_journal_lists(struct super_block *p_s_sb,
1264 				     struct reiserfs_journal_list *jl)
1265 {
1266 	struct list_head *entry;
1267 	struct reiserfs_journal_list *other_jl;
1268 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1269 	unsigned long trans_id = jl->j_trans_id;
1270 
1271 	/* we know we are the only ones flushing things, no extra race
1272 	 * protection is required.
1273 	 */
1274       restart:
1275 	entry = journal->j_journal_list.next;
1276 	/* Did we wrap? */
1277 	if (entry == &journal->j_journal_list)
1278 		return 0;
1279 	other_jl = JOURNAL_LIST_ENTRY(entry);
1280 	if (other_jl->j_trans_id < trans_id) {
1281 		BUG_ON(other_jl->j_refcount <= 0);
1282 		/* do not flush all */
1283 		flush_journal_list(p_s_sb, other_jl, 0);
1284 
1285 		/* other_jl is now deleted from the list */
1286 		goto restart;
1287 	}
1288 	return 0;
1289 }
1290 
1291 static void del_from_work_list(struct super_block *s,
1292 			       struct reiserfs_journal_list *jl)
1293 {
1294 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1295 	if (!list_empty(&jl->j_working_list)) {
1296 		list_del_init(&jl->j_working_list);
1297 		journal->j_num_work_lists--;
1298 	}
1299 }
1300 
1301 /* flush a journal list, both commit and real blocks
1302 **
1303 ** always set flushall to 1, unless you are calling from inside
1304 ** flush_journal_list
1305 **
1306 ** IMPORTANT.  This can only be called while there are no journal writers,
1307 ** and the journal is locked.  That means it can only be called from
1308 ** do_journal_end, or by journal_release
1309 */
1310 static int flush_journal_list(struct super_block *s,
1311 			      struct reiserfs_journal_list *jl, int flushall)
1312 {
1313 	struct reiserfs_journal_list *pjl;
1314 	struct reiserfs_journal_cnode *cn, *last;
1315 	int count;
1316 	int was_jwait = 0;
1317 	int was_dirty = 0;
1318 	struct buffer_head *saved_bh;
1319 	unsigned long j_len_saved = jl->j_len;
1320 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1321 	int err = 0;
1322 
1323 	BUG_ON(j_len_saved <= 0);
1324 
1325 	if (atomic_read(&journal->j_wcount) != 0) {
1326 		reiserfs_warning(s,
1327 				 "clm-2048: flush_journal_list called with wcount %d",
1328 				 atomic_read(&journal->j_wcount));
1329 	}
1330 	BUG_ON(jl->j_trans_id == 0);
1331 
1332 	/* if flushall == 0, the lock is already held */
1333 	if (flushall) {
1334 		down(&journal->j_flush_sem);
1335 	} else if (!down_trylock(&journal->j_flush_sem)) {
1336 		BUG();
1337 	}
1338 
1339 	count = 0;
1340 	if (j_len_saved > journal->j_trans_max) {
1341 		reiserfs_panic(s,
1342 			       "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1343 			       j_len_saved, jl->j_trans_id);
1344 		return 0;
1345 	}
1346 
1347 	get_fs_excl();
1348 
1349 	/* if all the work is already done, get out of here */
1350 	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1351 	    atomic_read(&(jl->j_commit_left)) <= 0) {
1352 		goto flush_older_and_return;
1353 	}
1354 
1355 	/* start by putting the commit list on disk.  This will also flush
1356 	 ** the commit lists of any olders transactions
1357 	 */
1358 	flush_commit_list(s, jl, 1);
1359 
1360 	if (!(jl->j_state & LIST_DIRTY)
1361 	    && !reiserfs_is_journal_aborted(journal))
1362 		BUG();
1363 
1364 	/* are we done now? */
1365 	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1366 	    atomic_read(&(jl->j_commit_left)) <= 0) {
1367 		goto flush_older_and_return;
1368 	}
1369 
1370 	/* loop through each cnode, see if we need to write it,
1371 	 ** or wait on a more recent transaction, or just ignore it
1372 	 */
1373 	if (atomic_read(&(journal->j_wcount)) != 0) {
1374 		reiserfs_panic(s,
1375 			       "journal-844: panic journal list is flushing, wcount is not 0\n");
1376 	}
1377 	cn = jl->j_realblock;
1378 	while (cn) {
1379 		was_jwait = 0;
1380 		was_dirty = 0;
1381 		saved_bh = NULL;
1382 		/* blocknr of 0 is no longer in the hash, ignore it */
1383 		if (cn->blocknr == 0) {
1384 			goto free_cnode;
1385 		}
1386 
1387 		/* This transaction failed commit. Don't write out to the disk */
1388 		if (!(jl->j_state & LIST_DIRTY))
1389 			goto free_cnode;
1390 
1391 		pjl = find_newer_jl_for_cn(cn);
1392 		/* the order is important here.  We check pjl to make sure we
1393 		 ** don't clear BH_JDirty_wait if we aren't the one writing this
1394 		 ** block to disk
1395 		 */
1396 		if (!pjl && cn->bh) {
1397 			saved_bh = cn->bh;
1398 
1399 			/* we do this to make sure nobody releases the buffer while
1400 			 ** we are working with it
1401 			 */
1402 			get_bh(saved_bh);
1403 
1404 			if (buffer_journal_dirty(saved_bh)) {
1405 				BUG_ON(!can_dirty(cn));
1406 				was_jwait = 1;
1407 				was_dirty = 1;
1408 			} else if (can_dirty(cn)) {
1409 				/* everything with !pjl && jwait should be writable */
1410 				BUG();
1411 			}
1412 		}
1413 
1414 		/* if someone has this block in a newer transaction, just make
1415 		 ** sure they are commited, and don't try writing it to disk
1416 		 */
1417 		if (pjl) {
1418 			if (atomic_read(&pjl->j_commit_left))
1419 				flush_commit_list(s, pjl, 1);
1420 			goto free_cnode;
1421 		}
1422 
1423 		/* bh == NULL when the block got to disk on its own, OR,
1424 		 ** the block got freed in a future transaction
1425 		 */
1426 		if (saved_bh == NULL) {
1427 			goto free_cnode;
1428 		}
1429 
1430 		/* this should never happen.  kupdate_one_transaction has this list
1431 		 ** locked while it works, so we should never see a buffer here that
1432 		 ** is not marked JDirty_wait
1433 		 */
1434 		if ((!was_jwait) && !buffer_locked(saved_bh)) {
1435 			reiserfs_warning(s,
1436 					 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1437 					 "not in a newer tranasction",
1438 					 (unsigned long long)saved_bh->
1439 					 b_blocknr, was_dirty ? ' ' : '!',
1440 					 was_jwait ? ' ' : '!');
1441 		}
1442 		if (was_dirty) {
1443 			/* we inc again because saved_bh gets decremented at free_cnode */
1444 			get_bh(saved_bh);
1445 			set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1446 			lock_buffer(saved_bh);
1447 			BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1448 			if (buffer_dirty(saved_bh))
1449 				submit_logged_buffer(saved_bh);
1450 			else
1451 				unlock_buffer(saved_bh);
1452 			count++;
1453 		} else {
1454 			reiserfs_warning(s,
1455 					 "clm-2082: Unable to flush buffer %llu in %s",
1456 					 (unsigned long long)saved_bh->
1457 					 b_blocknr, __FUNCTION__);
1458 		}
1459 	      free_cnode:
1460 		last = cn;
1461 		cn = cn->next;
1462 		if (saved_bh) {
1463 			/* we incremented this to keep others from taking the buffer head away */
1464 			put_bh(saved_bh);
1465 			if (atomic_read(&(saved_bh->b_count)) < 0) {
1466 				reiserfs_warning(s,
1467 						 "journal-945: saved_bh->b_count < 0");
1468 			}
1469 		}
1470 	}
1471 	if (count > 0) {
1472 		cn = jl->j_realblock;
1473 		while (cn) {
1474 			if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1475 				if (!cn->bh) {
1476 					reiserfs_panic(s,
1477 						       "journal-1011: cn->bh is NULL\n");
1478 				}
1479 				wait_on_buffer(cn->bh);
1480 				if (!cn->bh) {
1481 					reiserfs_panic(s,
1482 						       "journal-1012: cn->bh is NULL\n");
1483 				}
1484 				if (unlikely(!buffer_uptodate(cn->bh))) {
1485 #ifdef CONFIG_REISERFS_CHECK
1486 					reiserfs_warning(s,
1487 							 "journal-949: buffer write failed\n");
1488 #endif
1489 					err = -EIO;
1490 				}
1491 				/* note, we must clear the JDirty_wait bit after the up to date
1492 				 ** check, otherwise we race against our flushpage routine
1493 				 */
1494 				BUG_ON(!test_clear_buffer_journal_dirty
1495 				       (cn->bh));
1496 
1497 				/* undo the inc from journal_mark_dirty */
1498 				put_bh(cn->bh);
1499 				brelse(cn->bh);
1500 			}
1501 			cn = cn->next;
1502 		}
1503 	}
1504 
1505 	if (err)
1506 		reiserfs_abort(s, -EIO,
1507 			       "Write error while pushing transaction to disk in %s",
1508 			       __FUNCTION__);
1509       flush_older_and_return:
1510 
1511 	/* before we can update the journal header block, we _must_ flush all
1512 	 ** real blocks from all older transactions to disk.  This is because
1513 	 ** once the header block is updated, this transaction will not be
1514 	 ** replayed after a crash
1515 	 */
1516 	if (flushall) {
1517 		flush_older_journal_lists(s, jl);
1518 	}
1519 
1520 	err = journal->j_errno;
1521 	/* before we can remove everything from the hash tables for this
1522 	 ** transaction, we must make sure it can never be replayed
1523 	 **
1524 	 ** since we are only called from do_journal_end, we know for sure there
1525 	 ** are no allocations going on while we are flushing journal lists.  So,
1526 	 ** we only need to update the journal header block for the last list
1527 	 ** being flushed
1528 	 */
1529 	if (!err && flushall) {
1530 		err =
1531 		    update_journal_header_block(s,
1532 						(jl->j_start + jl->j_len +
1533 						 2) % SB_ONDISK_JOURNAL_SIZE(s),
1534 						jl->j_trans_id);
1535 		if (err)
1536 			reiserfs_abort(s, -EIO,
1537 				       "Write error while updating journal header in %s",
1538 				       __FUNCTION__);
1539 	}
1540 	remove_all_from_journal_list(s, jl, 0);
1541 	list_del_init(&jl->j_list);
1542 	journal->j_num_lists--;
1543 	del_from_work_list(s, jl);
1544 
1545 	if (journal->j_last_flush_id != 0 &&
1546 	    (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1547 		reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1548 				 journal->j_last_flush_id, jl->j_trans_id);
1549 	}
1550 	journal->j_last_flush_id = jl->j_trans_id;
1551 
1552 	/* not strictly required since we are freeing the list, but it should
1553 	 * help find code using dead lists later on
1554 	 */
1555 	jl->j_len = 0;
1556 	atomic_set(&(jl->j_nonzerolen), 0);
1557 	jl->j_start = 0;
1558 	jl->j_realblock = NULL;
1559 	jl->j_commit_bh = NULL;
1560 	jl->j_trans_id = 0;
1561 	jl->j_state = 0;
1562 	put_journal_list(s, jl);
1563 	if (flushall)
1564 		up(&journal->j_flush_sem);
1565 	put_fs_excl();
1566 	return err;
1567 }
1568 
1569 static int write_one_transaction(struct super_block *s,
1570 				 struct reiserfs_journal_list *jl,
1571 				 struct buffer_chunk *chunk)
1572 {
1573 	struct reiserfs_journal_cnode *cn;
1574 	int ret = 0;
1575 
1576 	jl->j_state |= LIST_TOUCHED;
1577 	del_from_work_list(s, jl);
1578 	if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1579 		return 0;
1580 	}
1581 
1582 	cn = jl->j_realblock;
1583 	while (cn) {
1584 		/* if the blocknr == 0, this has been cleared from the hash,
1585 		 ** skip it
1586 		 */
1587 		if (cn->blocknr == 0) {
1588 			goto next;
1589 		}
1590 		if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1591 			struct buffer_head *tmp_bh;
1592 			/* we can race against journal_mark_freed when we try
1593 			 * to lock_buffer(cn->bh), so we have to inc the buffer
1594 			 * count, and recheck things after locking
1595 			 */
1596 			tmp_bh = cn->bh;
1597 			get_bh(tmp_bh);
1598 			lock_buffer(tmp_bh);
1599 			if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1600 				if (!buffer_journal_dirty(tmp_bh) ||
1601 				    buffer_journal_prepared(tmp_bh))
1602 					BUG();
1603 				add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1604 				ret++;
1605 			} else {
1606 				/* note, cn->bh might be null now */
1607 				unlock_buffer(tmp_bh);
1608 			}
1609 			put_bh(tmp_bh);
1610 		}
1611 	      next:
1612 		cn = cn->next;
1613 		cond_resched();
1614 	}
1615 	return ret;
1616 }
1617 
1618 /* used by flush_commit_list */
1619 static int dirty_one_transaction(struct super_block *s,
1620 				 struct reiserfs_journal_list *jl)
1621 {
1622 	struct reiserfs_journal_cnode *cn;
1623 	struct reiserfs_journal_list *pjl;
1624 	int ret = 0;
1625 
1626 	jl->j_state |= LIST_DIRTY;
1627 	cn = jl->j_realblock;
1628 	while (cn) {
1629 		/* look for a more recent transaction that logged this
1630 		 ** buffer.  Only the most recent transaction with a buffer in
1631 		 ** it is allowed to send that buffer to disk
1632 		 */
1633 		pjl = find_newer_jl_for_cn(cn);
1634 		if (!pjl && cn->blocknr && cn->bh
1635 		    && buffer_journal_dirty(cn->bh)) {
1636 			BUG_ON(!can_dirty(cn));
1637 			/* if the buffer is prepared, it will either be logged
1638 			 * or restored.  If restored, we need to make sure
1639 			 * it actually gets marked dirty
1640 			 */
1641 			clear_buffer_journal_new(cn->bh);
1642 			if (buffer_journal_prepared(cn->bh)) {
1643 				set_buffer_journal_restore_dirty(cn->bh);
1644 			} else {
1645 				set_buffer_journal_test(cn->bh);
1646 				mark_buffer_dirty(cn->bh);
1647 			}
1648 		}
1649 		cn = cn->next;
1650 	}
1651 	return ret;
1652 }
1653 
1654 static int kupdate_transactions(struct super_block *s,
1655 				struct reiserfs_journal_list *jl,
1656 				struct reiserfs_journal_list **next_jl,
1657 				unsigned long *next_trans_id,
1658 				int num_blocks, int num_trans)
1659 {
1660 	int ret = 0;
1661 	int written = 0;
1662 	int transactions_flushed = 0;
1663 	unsigned long orig_trans_id = jl->j_trans_id;
1664 	struct buffer_chunk chunk;
1665 	struct list_head *entry;
1666 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1667 	chunk.nr = 0;
1668 
1669 	down(&journal->j_flush_sem);
1670 	if (!journal_list_still_alive(s, orig_trans_id)) {
1671 		goto done;
1672 	}
1673 
1674 	/* we've got j_flush_sem held, nobody is going to delete any
1675 	 * of these lists out from underneath us
1676 	 */
1677 	while ((num_trans && transactions_flushed < num_trans) ||
1678 	       (!num_trans && written < num_blocks)) {
1679 
1680 		if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1681 		    atomic_read(&jl->j_commit_left)
1682 		    || !(jl->j_state & LIST_DIRTY)) {
1683 			del_from_work_list(s, jl);
1684 			break;
1685 		}
1686 		ret = write_one_transaction(s, jl, &chunk);
1687 
1688 		if (ret < 0)
1689 			goto done;
1690 		transactions_flushed++;
1691 		written += ret;
1692 		entry = jl->j_list.next;
1693 
1694 		/* did we wrap? */
1695 		if (entry == &journal->j_journal_list) {
1696 			break;
1697 		}
1698 		jl = JOURNAL_LIST_ENTRY(entry);
1699 
1700 		/* don't bother with older transactions */
1701 		if (jl->j_trans_id <= orig_trans_id)
1702 			break;
1703 	}
1704 	if (chunk.nr) {
1705 		write_chunk(&chunk);
1706 	}
1707 
1708       done:
1709 	up(&journal->j_flush_sem);
1710 	return ret;
1711 }
1712 
1713 /* for o_sync and fsync heavy applications, they tend to use
1714 ** all the journa list slots with tiny transactions.  These
1715 ** trigger lots and lots of calls to update the header block, which
1716 ** adds seeks and slows things down.
1717 **
1718 ** This function tries to clear out a large chunk of the journal lists
1719 ** at once, which makes everything faster since only the newest journal
1720 ** list updates the header block
1721 */
1722 static int flush_used_journal_lists(struct super_block *s,
1723 				    struct reiserfs_journal_list *jl)
1724 {
1725 	unsigned long len = 0;
1726 	unsigned long cur_len;
1727 	int ret;
1728 	int i;
1729 	int limit = 256;
1730 	struct reiserfs_journal_list *tjl;
1731 	struct reiserfs_journal_list *flush_jl;
1732 	unsigned long trans_id;
1733 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1734 
1735 	flush_jl = tjl = jl;
1736 
1737 	/* in data logging mode, try harder to flush a lot of blocks */
1738 	if (reiserfs_data_log(s))
1739 		limit = 1024;
1740 	/* flush for 256 transactions or limit blocks, whichever comes first */
1741 	for (i = 0; i < 256 && len < limit; i++) {
1742 		if (atomic_read(&tjl->j_commit_left) ||
1743 		    tjl->j_trans_id < jl->j_trans_id) {
1744 			break;
1745 		}
1746 		cur_len = atomic_read(&tjl->j_nonzerolen);
1747 		if (cur_len > 0) {
1748 			tjl->j_state &= ~LIST_TOUCHED;
1749 		}
1750 		len += cur_len;
1751 		flush_jl = tjl;
1752 		if (tjl->j_list.next == &journal->j_journal_list)
1753 			break;
1754 		tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1755 	}
1756 	/* try to find a group of blocks we can flush across all the
1757 	 ** transactions, but only bother if we've actually spanned
1758 	 ** across multiple lists
1759 	 */
1760 	if (flush_jl != jl) {
1761 		ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1762 	}
1763 	flush_journal_list(s, flush_jl, 1);
1764 	return 0;
1765 }
1766 
1767 /*
1768 ** removes any nodes in table with name block and dev as bh.
1769 ** only touchs the hnext and hprev pointers.
1770 */
1771 void remove_journal_hash(struct super_block *sb,
1772 			 struct reiserfs_journal_cnode **table,
1773 			 struct reiserfs_journal_list *jl,
1774 			 unsigned long block, int remove_freed)
1775 {
1776 	struct reiserfs_journal_cnode *cur;
1777 	struct reiserfs_journal_cnode **head;
1778 
1779 	head = &(journal_hash(table, sb, block));
1780 	if (!head) {
1781 		return;
1782 	}
1783 	cur = *head;
1784 	while (cur) {
1785 		if (cur->blocknr == block && cur->sb == sb
1786 		    && (jl == NULL || jl == cur->jlist)
1787 		    && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1788 			if (cur->hnext) {
1789 				cur->hnext->hprev = cur->hprev;
1790 			}
1791 			if (cur->hprev) {
1792 				cur->hprev->hnext = cur->hnext;
1793 			} else {
1794 				*head = cur->hnext;
1795 			}
1796 			cur->blocknr = 0;
1797 			cur->sb = NULL;
1798 			cur->state = 0;
1799 			if (cur->bh && cur->jlist)	/* anybody who clears the cur->bh will also dec the nonzerolen */
1800 				atomic_dec(&(cur->jlist->j_nonzerolen));
1801 			cur->bh = NULL;
1802 			cur->jlist = NULL;
1803 		}
1804 		cur = cur->hnext;
1805 	}
1806 }
1807 
1808 static void free_journal_ram(struct super_block *p_s_sb)
1809 {
1810 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1811 	reiserfs_kfree(journal->j_current_jl,
1812 		       sizeof(struct reiserfs_journal_list), p_s_sb);
1813 	journal->j_num_lists--;
1814 
1815 	vfree(journal->j_cnode_free_orig);
1816 	free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1817 	free_bitmap_nodes(p_s_sb);	/* must be after free_list_bitmaps */
1818 	if (journal->j_header_bh) {
1819 		brelse(journal->j_header_bh);
1820 	}
1821 	/* j_header_bh is on the journal dev, make sure not to release the journal
1822 	 * dev until we brelse j_header_bh
1823 	 */
1824 	release_journal_dev(p_s_sb, journal);
1825 	vfree(journal);
1826 }
1827 
1828 /*
1829 ** call on unmount.  Only set error to 1 if you haven't made your way out
1830 ** of read_super() yet.  Any other caller must keep error at 0.
1831 */
1832 static int do_journal_release(struct reiserfs_transaction_handle *th,
1833 			      struct super_block *p_s_sb, int error)
1834 {
1835 	struct reiserfs_transaction_handle myth;
1836 	int flushed = 0;
1837 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1838 
1839 	/* we only want to flush out transactions if we were called with error == 0
1840 	 */
1841 	if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1842 		/* end the current trans */
1843 		BUG_ON(!th->t_trans_id);
1844 		do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1845 
1846 		/* make sure something gets logged to force our way into the flush code */
1847 		if (!journal_join(&myth, p_s_sb, 1)) {
1848 			reiserfs_prepare_for_journal(p_s_sb,
1849 						     SB_BUFFER_WITH_SB(p_s_sb),
1850 						     1);
1851 			journal_mark_dirty(&myth, p_s_sb,
1852 					   SB_BUFFER_WITH_SB(p_s_sb));
1853 			do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1854 			flushed = 1;
1855 		}
1856 	}
1857 
1858 	/* this also catches errors during the do_journal_end above */
1859 	if (!error && reiserfs_is_journal_aborted(journal)) {
1860 		memset(&myth, 0, sizeof(myth));
1861 		if (!journal_join_abort(&myth, p_s_sb, 1)) {
1862 			reiserfs_prepare_for_journal(p_s_sb,
1863 						     SB_BUFFER_WITH_SB(p_s_sb),
1864 						     1);
1865 			journal_mark_dirty(&myth, p_s_sb,
1866 					   SB_BUFFER_WITH_SB(p_s_sb));
1867 			do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1868 		}
1869 	}
1870 
1871 	reiserfs_mounted_fs_count--;
1872 	/* wait for all commits to finish */
1873 	cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1874 	flush_workqueue(commit_wq);
1875 	if (!reiserfs_mounted_fs_count) {
1876 		destroy_workqueue(commit_wq);
1877 		commit_wq = NULL;
1878 	}
1879 
1880 	free_journal_ram(p_s_sb);
1881 
1882 	return 0;
1883 }
1884 
1885 /*
1886 ** call on unmount.  flush all journal trans, release all alloc'd ram
1887 */
1888 int journal_release(struct reiserfs_transaction_handle *th,
1889 		    struct super_block *p_s_sb)
1890 {
1891 	return do_journal_release(th, p_s_sb, 0);
1892 }
1893 
1894 /*
1895 ** only call from an error condition inside reiserfs_read_super!
1896 */
1897 int journal_release_error(struct reiserfs_transaction_handle *th,
1898 			  struct super_block *p_s_sb)
1899 {
1900 	return do_journal_release(th, p_s_sb, 1);
1901 }
1902 
1903 /* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
1904 static int journal_compare_desc_commit(struct super_block *p_s_sb,
1905 				       struct reiserfs_journal_desc *desc,
1906 				       struct reiserfs_journal_commit *commit)
1907 {
1908 	if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1909 	    get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1910 	    get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1911 	    get_commit_trans_len(commit) <= 0) {
1912 		return 1;
1913 	}
1914 	return 0;
1915 }
1916 
1917 /* returns 0 if it did not find a description block
1918 ** returns -1 if it found a corrupt commit block
1919 ** returns 1 if both desc and commit were valid
1920 */
1921 static int journal_transaction_is_valid(struct super_block *p_s_sb,
1922 					struct buffer_head *d_bh,
1923 					unsigned long *oldest_invalid_trans_id,
1924 					unsigned long *newest_mount_id)
1925 {
1926 	struct reiserfs_journal_desc *desc;
1927 	struct reiserfs_journal_commit *commit;
1928 	struct buffer_head *c_bh;
1929 	unsigned long offset;
1930 
1931 	if (!d_bh)
1932 		return 0;
1933 
1934 	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
1935 	if (get_desc_trans_len(desc) > 0
1936 	    && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
1937 		if (oldest_invalid_trans_id && *oldest_invalid_trans_id
1938 		    && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1939 			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1940 				       "journal-986: transaction "
1941 				       "is valid returning because trans_id %d is greater than "
1942 				       "oldest_invalid %lu",
1943 				       get_desc_trans_id(desc),
1944 				       *oldest_invalid_trans_id);
1945 			return 0;
1946 		}
1947 		if (newest_mount_id
1948 		    && *newest_mount_id > get_desc_mount_id(desc)) {
1949 			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1950 				       "journal-1087: transaction "
1951 				       "is valid returning because mount_id %d is less than "
1952 				       "newest_mount_id %lu",
1953 				       get_desc_mount_id(desc),
1954 				       *newest_mount_id);
1955 			return -1;
1956 		}
1957 		if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
1958 			reiserfs_warning(p_s_sb,
1959 					 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
1960 					 get_desc_trans_len(desc));
1961 			return -1;
1962 		}
1963 		offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
1964 
1965 		/* ok, we have a journal description block, lets see if the transaction was valid */
1966 		c_bh =
1967 		    journal_bread(p_s_sb,
1968 				  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1969 				  ((offset + get_desc_trans_len(desc) +
1970 				    1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
1971 		if (!c_bh)
1972 			return 0;
1973 		commit = (struct reiserfs_journal_commit *)c_bh->b_data;
1974 		if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1975 			reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1976 				       "journal_transaction_is_valid, commit offset %ld had bad "
1977 				       "time %d or length %d",
1978 				       c_bh->b_blocknr -
1979 				       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1980 				       get_commit_trans_id(commit),
1981 				       get_commit_trans_len(commit));
1982 			brelse(c_bh);
1983 			if (oldest_invalid_trans_id) {
1984 				*oldest_invalid_trans_id =
1985 				    get_desc_trans_id(desc);
1986 				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1987 					       "journal-1004: "
1988 					       "transaction_is_valid setting oldest invalid trans_id "
1989 					       "to %d",
1990 					       get_desc_trans_id(desc));
1991 			}
1992 			return -1;
1993 		}
1994 		brelse(c_bh);
1995 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1996 			       "journal-1006: found valid "
1997 			       "transaction start offset %llu, len %d id %d",
1998 			       d_bh->b_blocknr -
1999 			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2000 			       get_desc_trans_len(desc),
2001 			       get_desc_trans_id(desc));
2002 		return 1;
2003 	} else {
2004 		return 0;
2005 	}
2006 }
2007 
2008 static void brelse_array(struct buffer_head **heads, int num)
2009 {
2010 	int i;
2011 	for (i = 0; i < num; i++) {
2012 		brelse(heads[i]);
2013 	}
2014 }
2015 
2016 /*
2017 ** given the start, and values for the oldest acceptable transactions,
2018 ** this either reads in a replays a transaction, or returns because the transaction
2019 ** is invalid, or too old.
2020 */
2021 static int journal_read_transaction(struct super_block *p_s_sb,
2022 				    unsigned long cur_dblock,
2023 				    unsigned long oldest_start,
2024 				    unsigned long oldest_trans_id,
2025 				    unsigned long newest_mount_id)
2026 {
2027 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2028 	struct reiserfs_journal_desc *desc;
2029 	struct reiserfs_journal_commit *commit;
2030 	unsigned long trans_id = 0;
2031 	struct buffer_head *c_bh;
2032 	struct buffer_head *d_bh;
2033 	struct buffer_head **log_blocks = NULL;
2034 	struct buffer_head **real_blocks = NULL;
2035 	unsigned long trans_offset;
2036 	int i;
2037 	int trans_half;
2038 
2039 	d_bh = journal_bread(p_s_sb, cur_dblock);
2040 	if (!d_bh)
2041 		return 1;
2042 	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2043 	trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2044 	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2045 		       "journal_read_transaction, offset %llu, len %d mount_id %d",
2046 		       d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2047 		       get_desc_trans_len(desc), get_desc_mount_id(desc));
2048 	if (get_desc_trans_id(desc) < oldest_trans_id) {
2049 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2050 			       "journal_read_trans skipping because %lu is too old",
2051 			       cur_dblock -
2052 			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2053 		brelse(d_bh);
2054 		return 1;
2055 	}
2056 	if (get_desc_mount_id(desc) != newest_mount_id) {
2057 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2058 			       "journal_read_trans skipping because %d is != "
2059 			       "newest_mount_id %lu", get_desc_mount_id(desc),
2060 			       newest_mount_id);
2061 		brelse(d_bh);
2062 		return 1;
2063 	}
2064 	c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2065 			     ((trans_offset + get_desc_trans_len(desc) + 1) %
2066 			      SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2067 	if (!c_bh) {
2068 		brelse(d_bh);
2069 		return 1;
2070 	}
2071 	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2072 	if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2073 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2074 			       "journal_read_transaction, "
2075 			       "commit offset %llu had bad time %d or length %d",
2076 			       c_bh->b_blocknr -
2077 			       SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2078 			       get_commit_trans_id(commit),
2079 			       get_commit_trans_len(commit));
2080 		brelse(c_bh);
2081 		brelse(d_bh);
2082 		return 1;
2083 	}
2084 	trans_id = get_desc_trans_id(desc);
2085 	/* now we know we've got a good transaction, and it was inside the valid time ranges */
2086 	log_blocks =
2087 	    reiserfs_kmalloc(get_desc_trans_len(desc) *
2088 			     sizeof(struct buffer_head *), GFP_NOFS, p_s_sb);
2089 	real_blocks =
2090 	    reiserfs_kmalloc(get_desc_trans_len(desc) *
2091 			     sizeof(struct buffer_head *), GFP_NOFS, p_s_sb);
2092 	if (!log_blocks || !real_blocks) {
2093 		brelse(c_bh);
2094 		brelse(d_bh);
2095 		reiserfs_kfree(log_blocks,
2096 			       get_desc_trans_len(desc) *
2097 			       sizeof(struct buffer_head *), p_s_sb);
2098 		reiserfs_kfree(real_blocks,
2099 			       get_desc_trans_len(desc) *
2100 			       sizeof(struct buffer_head *), p_s_sb);
2101 		reiserfs_warning(p_s_sb,
2102 				 "journal-1169: kmalloc failed, unable to mount FS");
2103 		return -1;
2104 	}
2105 	/* get all the buffer heads */
2106 	trans_half = journal_trans_half(p_s_sb->s_blocksize);
2107 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2108 		log_blocks[i] =
2109 		    journal_getblk(p_s_sb,
2110 				   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2111 				   (trans_offset + 1 +
2112 				    i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2113 		if (i < trans_half) {
2114 			real_blocks[i] =
2115 			    sb_getblk(p_s_sb,
2116 				      le32_to_cpu(desc->j_realblock[i]));
2117 		} else {
2118 			real_blocks[i] =
2119 			    sb_getblk(p_s_sb,
2120 				      le32_to_cpu(commit->
2121 						  j_realblock[i - trans_half]));
2122 		}
2123 		if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2124 			reiserfs_warning(p_s_sb,
2125 					 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2126 			goto abort_replay;
2127 		}
2128 		/* make sure we don't try to replay onto log or reserved area */
2129 		if (is_block_in_log_or_reserved_area
2130 		    (p_s_sb, real_blocks[i]->b_blocknr)) {
2131 			reiserfs_warning(p_s_sb,
2132 					 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2133 		      abort_replay:
2134 			brelse_array(log_blocks, i);
2135 			brelse_array(real_blocks, i);
2136 			brelse(c_bh);
2137 			brelse(d_bh);
2138 			reiserfs_kfree(log_blocks,
2139 				       get_desc_trans_len(desc) *
2140 				       sizeof(struct buffer_head *), p_s_sb);
2141 			reiserfs_kfree(real_blocks,
2142 				       get_desc_trans_len(desc) *
2143 				       sizeof(struct buffer_head *), p_s_sb);
2144 			return -1;
2145 		}
2146 	}
2147 	/* read in the log blocks, memcpy to the corresponding real block */
2148 	ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2149 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2150 		wait_on_buffer(log_blocks[i]);
2151 		if (!buffer_uptodate(log_blocks[i])) {
2152 			reiserfs_warning(p_s_sb,
2153 					 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2154 			brelse_array(log_blocks + i,
2155 				     get_desc_trans_len(desc) - i);
2156 			brelse_array(real_blocks, get_desc_trans_len(desc));
2157 			brelse(c_bh);
2158 			brelse(d_bh);
2159 			reiserfs_kfree(log_blocks,
2160 				       get_desc_trans_len(desc) *
2161 				       sizeof(struct buffer_head *), p_s_sb);
2162 			reiserfs_kfree(real_blocks,
2163 				       get_desc_trans_len(desc) *
2164 				       sizeof(struct buffer_head *), p_s_sb);
2165 			return -1;
2166 		}
2167 		memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2168 		       real_blocks[i]->b_size);
2169 		set_buffer_uptodate(real_blocks[i]);
2170 		brelse(log_blocks[i]);
2171 	}
2172 	/* flush out the real blocks */
2173 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2174 		set_buffer_dirty(real_blocks[i]);
2175 		ll_rw_block(WRITE, 1, real_blocks + i);
2176 	}
2177 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2178 		wait_on_buffer(real_blocks[i]);
2179 		if (!buffer_uptodate(real_blocks[i])) {
2180 			reiserfs_warning(p_s_sb,
2181 					 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2182 			brelse_array(real_blocks + i,
2183 				     get_desc_trans_len(desc) - i);
2184 			brelse(c_bh);
2185 			brelse(d_bh);
2186 			reiserfs_kfree(log_blocks,
2187 				       get_desc_trans_len(desc) *
2188 				       sizeof(struct buffer_head *), p_s_sb);
2189 			reiserfs_kfree(real_blocks,
2190 				       get_desc_trans_len(desc) *
2191 				       sizeof(struct buffer_head *), p_s_sb);
2192 			return -1;
2193 		}
2194 		brelse(real_blocks[i]);
2195 	}
2196 	cur_dblock =
2197 	    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2198 	    ((trans_offset + get_desc_trans_len(desc) +
2199 	      2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2200 	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2201 		       "journal-1095: setting journal " "start to offset %ld",
2202 		       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2203 
2204 	/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2205 	journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2206 	journal->j_last_flush_trans_id = trans_id;
2207 	journal->j_trans_id = trans_id + 1;
2208 	brelse(c_bh);
2209 	brelse(d_bh);
2210 	reiserfs_kfree(log_blocks,
2211 		       le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *),
2212 		       p_s_sb);
2213 	reiserfs_kfree(real_blocks,
2214 		       le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *),
2215 		       p_s_sb);
2216 	return 0;
2217 }
2218 
2219 /* This function reads blocks starting from block and to max_block of bufsize
2220    size (but no more than BUFNR blocks at a time). This proved to improve
2221    mounting speed on self-rebuilding raid5 arrays at least.
2222    Right now it is only used from journal code. But later we might use it
2223    from other places.
2224    Note: Do not use journal_getblk/sb_getblk functions here! */
2225 static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2226 					   int bufsize, unsigned int max_block)
2227 {
2228 	struct buffer_head *bhlist[BUFNR];
2229 	unsigned int blocks = BUFNR;
2230 	struct buffer_head *bh;
2231 	int i, j;
2232 
2233 	bh = __getblk(dev, block, bufsize);
2234 	if (buffer_uptodate(bh))
2235 		return (bh);
2236 
2237 	if (block + BUFNR > max_block) {
2238 		blocks = max_block - block;
2239 	}
2240 	bhlist[0] = bh;
2241 	j = 1;
2242 	for (i = 1; i < blocks; i++) {
2243 		bh = __getblk(dev, block + i, bufsize);
2244 		if (buffer_uptodate(bh)) {
2245 			brelse(bh);
2246 			break;
2247 		} else
2248 			bhlist[j++] = bh;
2249 	}
2250 	ll_rw_block(READ, j, bhlist);
2251 	for (i = 1; i < j; i++)
2252 		brelse(bhlist[i]);
2253 	bh = bhlist[0];
2254 	wait_on_buffer(bh);
2255 	if (buffer_uptodate(bh))
2256 		return bh;
2257 	brelse(bh);
2258 	return NULL;
2259 }
2260 
2261 /*
2262 ** read and replay the log
2263 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2264 ** transaction.  This tests that before finding all the transactions in the log, which makes normal mount times fast.
2265 **
2266 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2267 **
2268 ** On exit, it sets things up so the first transaction will work correctly.
2269 */
2270 static int journal_read(struct super_block *p_s_sb)
2271 {
2272 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2273 	struct reiserfs_journal_desc *desc;
2274 	unsigned long oldest_trans_id = 0;
2275 	unsigned long oldest_invalid_trans_id = 0;
2276 	time_t start;
2277 	unsigned long oldest_start = 0;
2278 	unsigned long cur_dblock = 0;
2279 	unsigned long newest_mount_id = 9;
2280 	struct buffer_head *d_bh;
2281 	struct reiserfs_journal_header *jh;
2282 	int valid_journal_header = 0;
2283 	int replay_count = 0;
2284 	int continue_replay = 1;
2285 	int ret;
2286 	char b[BDEVNAME_SIZE];
2287 
2288 	cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2289 	reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2290 		      bdevname(journal->j_dev_bd, b));
2291 	start = get_seconds();
2292 
2293 	/* step 1, read in the journal header block.  Check the transaction it says
2294 	 ** is the first unflushed, and if that transaction is not valid,
2295 	 ** replay is done
2296 	 */
2297 	journal->j_header_bh = journal_bread(p_s_sb,
2298 					     SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2299 					     + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2300 	if (!journal->j_header_bh) {
2301 		return 1;
2302 	}
2303 	jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2304 	if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
2305 	    le32_to_cpu(jh->j_first_unflushed_offset) <
2306 	    SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2307 	    && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2308 		oldest_start =
2309 		    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2310 		    le32_to_cpu(jh->j_first_unflushed_offset);
2311 		oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2312 		newest_mount_id = le32_to_cpu(jh->j_mount_id);
2313 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2314 			       "journal-1153: found in "
2315 			       "header: first_unflushed_offset %d, last_flushed_trans_id "
2316 			       "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2317 			       le32_to_cpu(jh->j_last_flush_trans_id));
2318 		valid_journal_header = 1;
2319 
2320 		/* now, we try to read the first unflushed offset.  If it is not valid,
2321 		 ** there is nothing more we can do, and it makes no sense to read
2322 		 ** through the whole log.
2323 		 */
2324 		d_bh =
2325 		    journal_bread(p_s_sb,
2326 				  SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2327 				  le32_to_cpu(jh->j_first_unflushed_offset));
2328 		ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2329 		if (!ret) {
2330 			continue_replay = 0;
2331 		}
2332 		brelse(d_bh);
2333 		goto start_log_replay;
2334 	}
2335 
2336 	if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2337 		reiserfs_warning(p_s_sb,
2338 				 "clm-2076: device is readonly, unable to replay log");
2339 		return -1;
2340 	}
2341 
2342 	/* ok, there are transactions that need to be replayed.  start with the first log block, find
2343 	 ** all the valid transactions, and pick out the oldest.
2344 	 */
2345 	while (continue_replay
2346 	       && cur_dblock <
2347 	       (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2348 		SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2349 		/* Note that it is required for blocksize of primary fs device and journal
2350 		   device to be the same */
2351 		d_bh =
2352 		    reiserfs_breada(journal->j_dev_bd, cur_dblock,
2353 				    p_s_sb->s_blocksize,
2354 				    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2355 				    SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2356 		ret =
2357 		    journal_transaction_is_valid(p_s_sb, d_bh,
2358 						 &oldest_invalid_trans_id,
2359 						 &newest_mount_id);
2360 		if (ret == 1) {
2361 			desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2362 			if (oldest_start == 0) {	/* init all oldest_ values */
2363 				oldest_trans_id = get_desc_trans_id(desc);
2364 				oldest_start = d_bh->b_blocknr;
2365 				newest_mount_id = get_desc_mount_id(desc);
2366 				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2367 					       "journal-1179: Setting "
2368 					       "oldest_start to offset %llu, trans_id %lu",
2369 					       oldest_start -
2370 					       SB_ONDISK_JOURNAL_1st_BLOCK
2371 					       (p_s_sb), oldest_trans_id);
2372 			} else if (oldest_trans_id > get_desc_trans_id(desc)) {
2373 				/* one we just read was older */
2374 				oldest_trans_id = get_desc_trans_id(desc);
2375 				oldest_start = d_bh->b_blocknr;
2376 				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2377 					       "journal-1180: Resetting "
2378 					       "oldest_start to offset %lu, trans_id %lu",
2379 					       oldest_start -
2380 					       SB_ONDISK_JOURNAL_1st_BLOCK
2381 					       (p_s_sb), oldest_trans_id);
2382 			}
2383 			if (newest_mount_id < get_desc_mount_id(desc)) {
2384 				newest_mount_id = get_desc_mount_id(desc);
2385 				reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2386 					       "journal-1299: Setting "
2387 					       "newest_mount_id to %d",
2388 					       get_desc_mount_id(desc));
2389 			}
2390 			cur_dblock += get_desc_trans_len(desc) + 2;
2391 		} else {
2392 			cur_dblock++;
2393 		}
2394 		brelse(d_bh);
2395 	}
2396 
2397       start_log_replay:
2398 	cur_dblock = oldest_start;
2399 	if (oldest_trans_id) {
2400 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2401 			       "journal-1206: Starting replay "
2402 			       "from offset %llu, trans_id %lu",
2403 			       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2404 			       oldest_trans_id);
2405 
2406 	}
2407 	replay_count = 0;
2408 	while (continue_replay && oldest_trans_id > 0) {
2409 		ret =
2410 		    journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2411 					     oldest_trans_id, newest_mount_id);
2412 		if (ret < 0) {
2413 			return ret;
2414 		} else if (ret != 0) {
2415 			break;
2416 		}
2417 		cur_dblock =
2418 		    SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2419 		replay_count++;
2420 		if (cur_dblock == oldest_start)
2421 			break;
2422 	}
2423 
2424 	if (oldest_trans_id == 0) {
2425 		reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2426 			       "journal-1225: No valid " "transactions found");
2427 	}
2428 	/* j_start does not get set correctly if we don't replay any transactions.
2429 	 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2430 	 ** copy the trans_id from the header
2431 	 */
2432 	if (valid_journal_header && replay_count == 0) {
2433 		journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2434 		journal->j_trans_id =
2435 		    le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2436 		journal->j_last_flush_trans_id =
2437 		    le32_to_cpu(jh->j_last_flush_trans_id);
2438 		journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2439 	} else {
2440 		journal->j_mount_id = newest_mount_id + 1;
2441 	}
2442 	reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2443 		       "newest_mount_id to %lu", journal->j_mount_id);
2444 	journal->j_first_unflushed_offset = journal->j_start;
2445 	if (replay_count > 0) {
2446 		reiserfs_info(p_s_sb,
2447 			      "replayed %d transactions in %lu seconds\n",
2448 			      replay_count, get_seconds() - start);
2449 	}
2450 	if (!bdev_read_only(p_s_sb->s_bdev) &&
2451 	    _update_journal_header_block(p_s_sb, journal->j_start,
2452 					 journal->j_last_flush_trans_id)) {
2453 		/* replay failed, caller must call free_journal_ram and abort
2454 		 ** the mount
2455 		 */
2456 		return -1;
2457 	}
2458 	return 0;
2459 }
2460 
2461 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2462 {
2463 	struct reiserfs_journal_list *jl;
2464       retry:
2465 	jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS,
2466 			      s);
2467 	if (!jl) {
2468 		yield();
2469 		goto retry;
2470 	}
2471 	memset(jl, 0, sizeof(*jl));
2472 	INIT_LIST_HEAD(&jl->j_list);
2473 	INIT_LIST_HEAD(&jl->j_working_list);
2474 	INIT_LIST_HEAD(&jl->j_tail_bh_list);
2475 	INIT_LIST_HEAD(&jl->j_bh_list);
2476 	sema_init(&jl->j_commit_lock, 1);
2477 	SB_JOURNAL(s)->j_num_lists++;
2478 	get_journal_list(jl);
2479 	return jl;
2480 }
2481 
2482 static void journal_list_init(struct super_block *p_s_sb)
2483 {
2484 	SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2485 }
2486 
2487 static int release_journal_dev(struct super_block *super,
2488 			       struct reiserfs_journal *journal)
2489 {
2490 	int result;
2491 
2492 	result = 0;
2493 
2494 	if (journal->j_dev_file != NULL) {
2495 		result = filp_close(journal->j_dev_file, NULL);
2496 		journal->j_dev_file = NULL;
2497 		journal->j_dev_bd = NULL;
2498 	} else if (journal->j_dev_bd != NULL) {
2499 		result = blkdev_put(journal->j_dev_bd);
2500 		journal->j_dev_bd = NULL;
2501 	}
2502 
2503 	if (result != 0) {
2504 		reiserfs_warning(super,
2505 				 "sh-457: release_journal_dev: Cannot release journal device: %i",
2506 				 result);
2507 	}
2508 	return result;
2509 }
2510 
2511 static int journal_init_dev(struct super_block *super,
2512 			    struct reiserfs_journal *journal,
2513 			    const char *jdev_name)
2514 {
2515 	int result;
2516 	dev_t jdev;
2517 	int blkdev_mode = FMODE_READ | FMODE_WRITE;
2518 	char b[BDEVNAME_SIZE];
2519 
2520 	result = 0;
2521 
2522 	journal->j_dev_bd = NULL;
2523 	journal->j_dev_file = NULL;
2524 	jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2525 	    new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2526 
2527 	if (bdev_read_only(super->s_bdev))
2528 		blkdev_mode = FMODE_READ;
2529 
2530 	/* there is no "jdev" option and journal is on separate device */
2531 	if ((!jdev_name || !jdev_name[0])) {
2532 		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2533 		if (IS_ERR(journal->j_dev_bd)) {
2534 			result = PTR_ERR(journal->j_dev_bd);
2535 			journal->j_dev_bd = NULL;
2536 			reiserfs_warning(super, "sh-458: journal_init_dev: "
2537 					 "cannot init journal device '%s': %i",
2538 					 __bdevname(jdev, b), result);
2539 			return result;
2540 		} else if (jdev != super->s_dev)
2541 			set_blocksize(journal->j_dev_bd, super->s_blocksize);
2542 		return 0;
2543 	}
2544 
2545 	journal->j_dev_file = filp_open(jdev_name, 0, 0);
2546 	if (!IS_ERR(journal->j_dev_file)) {
2547 		struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
2548 		if (!S_ISBLK(jdev_inode->i_mode)) {
2549 			reiserfs_warning(super, "journal_init_dev: '%s' is "
2550 					 "not a block device", jdev_name);
2551 			result = -ENOTBLK;
2552 			release_journal_dev(super, journal);
2553 		} else {
2554 			/* ok */
2555 			journal->j_dev_bd = I_BDEV(jdev_inode);
2556 			set_blocksize(journal->j_dev_bd, super->s_blocksize);
2557 			reiserfs_info(super,
2558 				      "journal_init_dev: journal device: %s\n",
2559 				      bdevname(journal->j_dev_bd, b));
2560 		}
2561 	} else {
2562 		result = PTR_ERR(journal->j_dev_file);
2563 		journal->j_dev_file = NULL;
2564 		reiserfs_warning(super,
2565 				 "journal_init_dev: Cannot open '%s': %i",
2566 				 jdev_name, result);
2567 	}
2568 	return result;
2569 }
2570 
2571 /*
2572 ** must be called once on fs mount.  calls journal_read for you
2573 */
2574 int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2575 		 int old_format, unsigned int commit_max_age)
2576 {
2577 	int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2578 	struct buffer_head *bhjh;
2579 	struct reiserfs_super_block *rs;
2580 	struct reiserfs_journal_header *jh;
2581 	struct reiserfs_journal *journal;
2582 	struct reiserfs_journal_list *jl;
2583 	char b[BDEVNAME_SIZE];
2584 
2585 	journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2586 	if (!journal) {
2587 		reiserfs_warning(p_s_sb,
2588 				 "journal-1256: unable to get memory for journal structure");
2589 		return 1;
2590 	}
2591 	memset(journal, 0, sizeof(struct reiserfs_journal));
2592 	INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2593 	INIT_LIST_HEAD(&journal->j_prealloc_list);
2594 	INIT_LIST_HEAD(&journal->j_working_list);
2595 	INIT_LIST_HEAD(&journal->j_journal_list);
2596 	journal->j_persistent_trans = 0;
2597 	if (reiserfs_allocate_list_bitmaps(p_s_sb,
2598 					   journal->j_list_bitmap,
2599 					   SB_BMAP_NR(p_s_sb)))
2600 		goto free_and_return;
2601 	allocate_bitmap_nodes(p_s_sb);
2602 
2603 	/* reserved for journal area support */
2604 	SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2605 						 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2606 						 / p_s_sb->s_blocksize +
2607 						 SB_BMAP_NR(p_s_sb) +
2608 						 1 :
2609 						 REISERFS_DISK_OFFSET_IN_BYTES /
2610 						 p_s_sb->s_blocksize + 2);
2611 
2612 	/* Sanity check to see is the standard journal fitting withing first bitmap
2613 	   (actual for small blocksizes) */
2614 	if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2615 	    (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2616 	     SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2617 		reiserfs_warning(p_s_sb,
2618 				 "journal-1393: journal does not fit for area "
2619 				 "addressed by first of bitmap blocks. It starts at "
2620 				 "%u and its size is %u. Block size %ld",
2621 				 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2622 				 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2623 				 p_s_sb->s_blocksize);
2624 		goto free_and_return;
2625 	}
2626 
2627 	if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2628 		reiserfs_warning(p_s_sb,
2629 				 "sh-462: unable to initialize jornal device");
2630 		goto free_and_return;
2631 	}
2632 
2633 	rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2634 
2635 	/* read journal header */
2636 	bhjh = journal_bread(p_s_sb,
2637 			     SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2638 			     SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2639 	if (!bhjh) {
2640 		reiserfs_warning(p_s_sb,
2641 				 "sh-459: unable to read journal header");
2642 		goto free_and_return;
2643 	}
2644 	jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2645 
2646 	/* make sure that journal matches to the super block */
2647 	if (is_reiserfs_jr(rs)
2648 	    && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2649 		sb_jp_journal_magic(rs))) {
2650 		reiserfs_warning(p_s_sb,
2651 				 "sh-460: journal header magic %x "
2652 				 "(device %s) does not match to magic found in super "
2653 				 "block %x", jh->jh_journal.jp_journal_magic,
2654 				 bdevname(journal->j_dev_bd, b),
2655 				 sb_jp_journal_magic(rs));
2656 		brelse(bhjh);
2657 		goto free_and_return;
2658 	}
2659 
2660 	journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2661 	journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2662 	journal->j_max_commit_age =
2663 	    le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2664 	journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2665 
2666 	if (journal->j_trans_max) {
2667 		/* make sure these parameters are available, assign it if they are not */
2668 		__u32 initial = journal->j_trans_max;
2669 		__u32 ratio = 1;
2670 
2671 		if (p_s_sb->s_blocksize < 4096)
2672 			ratio = 4096 / p_s_sb->s_blocksize;
2673 
2674 		if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2675 		    JOURNAL_MIN_RATIO)
2676 			journal->j_trans_max =
2677 			    SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2678 		if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2679 			journal->j_trans_max =
2680 			    JOURNAL_TRANS_MAX_DEFAULT / ratio;
2681 		if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2682 			journal->j_trans_max =
2683 			    JOURNAL_TRANS_MIN_DEFAULT / ratio;
2684 
2685 		if (journal->j_trans_max != initial)
2686 			reiserfs_warning(p_s_sb,
2687 					 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2688 					 initial, journal->j_trans_max);
2689 
2690 		journal->j_max_batch = journal->j_trans_max *
2691 		    JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT;
2692 	}
2693 
2694 	if (!journal->j_trans_max) {
2695 		/*we have the file system was created by old version of mkreiserfs
2696 		   so this field contains zero value */
2697 		journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2698 		journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2699 		journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2700 
2701 		/* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2702 		   trans max size is decreased proportionally */
2703 		if (p_s_sb->s_blocksize < 4096) {
2704 			journal->j_trans_max /= (4096 / p_s_sb->s_blocksize);
2705 			journal->j_max_batch = (journal->j_trans_max) * 9 / 10;
2706 		}
2707 	}
2708 
2709 	journal->j_default_max_commit_age = journal->j_max_commit_age;
2710 
2711 	if (commit_max_age != 0) {
2712 		journal->j_max_commit_age = commit_max_age;
2713 		journal->j_max_trans_age = commit_max_age;
2714 	}
2715 
2716 	reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2717 		      "journal first block %u, max trans len %u, max batch %u, "
2718 		      "max commit age %u, max trans age %u\n",
2719 		      bdevname(journal->j_dev_bd, b),
2720 		      SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2721 		      SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2722 		      journal->j_trans_max,
2723 		      journal->j_max_batch,
2724 		      journal->j_max_commit_age, journal->j_max_trans_age);
2725 
2726 	brelse(bhjh);
2727 
2728 	journal->j_list_bitmap_index = 0;
2729 	journal_list_init(p_s_sb);
2730 
2731 	memset(journal->j_list_hash_table, 0,
2732 	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2733 
2734 	INIT_LIST_HEAD(&journal->j_dirty_buffers);
2735 	spin_lock_init(&journal->j_dirty_buffers_lock);
2736 
2737 	journal->j_start = 0;
2738 	journal->j_len = 0;
2739 	journal->j_len_alloc = 0;
2740 	atomic_set(&(journal->j_wcount), 0);
2741 	atomic_set(&(journal->j_async_throttle), 0);
2742 	journal->j_bcount = 0;
2743 	journal->j_trans_start_time = 0;
2744 	journal->j_last = NULL;
2745 	journal->j_first = NULL;
2746 	init_waitqueue_head(&(journal->j_join_wait));
2747 	sema_init(&journal->j_lock, 1);
2748 	sema_init(&journal->j_flush_sem, 1);
2749 
2750 	journal->j_trans_id = 10;
2751 	journal->j_mount_id = 10;
2752 	journal->j_state = 0;
2753 	atomic_set(&(journal->j_jlock), 0);
2754 	journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2755 	journal->j_cnode_free_orig = journal->j_cnode_free_list;
2756 	journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2757 	journal->j_cnode_used = 0;
2758 	journal->j_must_wait = 0;
2759 
2760 	init_journal_hash(p_s_sb);
2761 	jl = journal->j_current_jl;
2762 	jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2763 	if (!jl->j_list_bitmap) {
2764 		reiserfs_warning(p_s_sb,
2765 				 "journal-2005, get_list_bitmap failed for journal list 0");
2766 		goto free_and_return;
2767 	}
2768 	if (journal_read(p_s_sb) < 0) {
2769 		reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2770 		goto free_and_return;
2771 	}
2772 
2773 	reiserfs_mounted_fs_count++;
2774 	if (reiserfs_mounted_fs_count <= 1)
2775 		commit_wq = create_workqueue("reiserfs");
2776 
2777 	INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2778 	return 0;
2779       free_and_return:
2780 	free_journal_ram(p_s_sb);
2781 	return 1;
2782 }
2783 
2784 /*
2785 ** test for a polite end of the current transaction.  Used by file_write, and should
2786 ** be used by delete to make sure they don't write more than can fit inside a single
2787 ** transaction
2788 */
2789 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2790 				   int new_alloc)
2791 {
2792 	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2793 	time_t now = get_seconds();
2794 	/* cannot restart while nested */
2795 	BUG_ON(!th->t_trans_id);
2796 	if (th->t_refcount > 1)
2797 		return 0;
2798 	if (journal->j_must_wait > 0 ||
2799 	    (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2800 	    atomic_read(&(journal->j_jlock)) ||
2801 	    (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2802 	    journal->j_cnode_free < (journal->j_trans_max * 3)) {
2803 		return 1;
2804 	}
2805 	return 0;
2806 }
2807 
2808 /* this must be called inside a transaction, and requires the
2809 ** kernel_lock to be held
2810 */
2811 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2812 {
2813 	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2814 	BUG_ON(!th->t_trans_id);
2815 	journal->j_must_wait = 1;
2816 	set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2817 	return;
2818 }
2819 
2820 /* this must be called without a transaction started, and does not
2821 ** require BKL
2822 */
2823 void reiserfs_allow_writes(struct super_block *s)
2824 {
2825 	struct reiserfs_journal *journal = SB_JOURNAL(s);
2826 	clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2827 	wake_up(&journal->j_join_wait);
2828 }
2829 
2830 /* this must be called without a transaction started, and does not
2831 ** require BKL
2832 */
2833 void reiserfs_wait_on_write_block(struct super_block *s)
2834 {
2835 	struct reiserfs_journal *journal = SB_JOURNAL(s);
2836 	wait_event(journal->j_join_wait,
2837 		   !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2838 }
2839 
2840 static void queue_log_writer(struct super_block *s)
2841 {
2842 	wait_queue_t wait;
2843 	struct reiserfs_journal *journal = SB_JOURNAL(s);
2844 	set_bit(J_WRITERS_QUEUED, &journal->j_state);
2845 
2846 	/*
2847 	 * we don't want to use wait_event here because
2848 	 * we only want to wait once.
2849 	 */
2850 	init_waitqueue_entry(&wait, current);
2851 	add_wait_queue(&journal->j_join_wait, &wait);
2852 	set_current_state(TASK_UNINTERRUPTIBLE);
2853 	if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2854 		schedule();
2855 	current->state = TASK_RUNNING;
2856 	remove_wait_queue(&journal->j_join_wait, &wait);
2857 }
2858 
2859 static void wake_queued_writers(struct super_block *s)
2860 {
2861 	struct reiserfs_journal *journal = SB_JOURNAL(s);
2862 	if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2863 		wake_up(&journal->j_join_wait);
2864 }
2865 
2866 static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2867 {
2868 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
2869 	unsigned long bcount = journal->j_bcount;
2870 	while (1) {
2871 		set_current_state(TASK_UNINTERRUPTIBLE);
2872 		schedule_timeout(1);
2873 		journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2874 		while ((atomic_read(&journal->j_wcount) > 0 ||
2875 			atomic_read(&journal->j_jlock)) &&
2876 		       journal->j_trans_id == trans_id) {
2877 			queue_log_writer(sb);
2878 		}
2879 		if (journal->j_trans_id != trans_id)
2880 			break;
2881 		if (bcount == journal->j_bcount)
2882 			break;
2883 		bcount = journal->j_bcount;
2884 	}
2885 }
2886 
2887 /* join == true if you must join an existing transaction.
2888 ** join == false if you can deal with waiting for others to finish
2889 **
2890 ** this will block until the transaction is joinable.  send the number of blocks you
2891 ** expect to use in nblocks.
2892 */
2893 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2894 			      struct super_block *p_s_sb, unsigned long nblocks,
2895 			      int join)
2896 {
2897 	time_t now = get_seconds();
2898 	int old_trans_id;
2899 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2900 	struct reiserfs_transaction_handle myth;
2901 	int sched_count = 0;
2902 	int retval;
2903 
2904 	reiserfs_check_lock_depth(p_s_sb, "journal_begin");
2905 	if (nblocks > journal->j_trans_max)
2906 		BUG();
2907 
2908 	PROC_INFO_INC(p_s_sb, journal.journal_being);
2909 	/* set here for journal_join */
2910 	th->t_refcount = 1;
2911 	th->t_super = p_s_sb;
2912 
2913       relock:
2914 	lock_journal(p_s_sb);
2915 	if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
2916 		unlock_journal(p_s_sb);
2917 		retval = journal->j_errno;
2918 		goto out_fail;
2919 	}
2920 	journal->j_bcount++;
2921 
2922 	if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2923 		unlock_journal(p_s_sb);
2924 		reiserfs_wait_on_write_block(p_s_sb);
2925 		PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
2926 		goto relock;
2927 	}
2928 	now = get_seconds();
2929 
2930 	/* if there is no room in the journal OR
2931 	 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2932 	 ** we don't sleep if there aren't other writers
2933 	 */
2934 
2935 	if ((!join && journal->j_must_wait > 0) ||
2936 	    (!join
2937 	     && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
2938 	    || (!join && atomic_read(&journal->j_wcount) > 0
2939 		&& journal->j_trans_start_time > 0
2940 		&& (now - journal->j_trans_start_time) >
2941 		journal->j_max_trans_age) || (!join
2942 					      && atomic_read(&journal->j_jlock))
2943 	    || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
2944 
2945 		old_trans_id = journal->j_trans_id;
2946 		unlock_journal(p_s_sb);	/* allow others to finish this transaction */
2947 
2948 		if (!join && (journal->j_len_alloc + nblocks + 2) >=
2949 		    journal->j_max_batch &&
2950 		    ((journal->j_len + nblocks + 2) * 100) <
2951 		    (journal->j_len_alloc * 75)) {
2952 			if (atomic_read(&journal->j_wcount) > 10) {
2953 				sched_count++;
2954 				queue_log_writer(p_s_sb);
2955 				goto relock;
2956 			}
2957 		}
2958 		/* don't mess with joining the transaction if all we have to do is
2959 		 * wait for someone else to do a commit
2960 		 */
2961 		if (atomic_read(&journal->j_jlock)) {
2962 			while (journal->j_trans_id == old_trans_id &&
2963 			       atomic_read(&journal->j_jlock)) {
2964 				queue_log_writer(p_s_sb);
2965 			}
2966 			goto relock;
2967 		}
2968 		retval = journal_join(&myth, p_s_sb, 1);
2969 		if (retval)
2970 			goto out_fail;
2971 
2972 		/* someone might have ended the transaction while we joined */
2973 		if (old_trans_id != journal->j_trans_id) {
2974 			retval = do_journal_end(&myth, p_s_sb, 1, 0);
2975 		} else {
2976 			retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
2977 		}
2978 
2979 		if (retval)
2980 			goto out_fail;
2981 
2982 		PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
2983 		goto relock;
2984 	}
2985 	/* we are the first writer, set trans_id */
2986 	if (journal->j_trans_start_time == 0) {
2987 		journal->j_trans_start_time = get_seconds();
2988 	}
2989 	atomic_inc(&(journal->j_wcount));
2990 	journal->j_len_alloc += nblocks;
2991 	th->t_blocks_logged = 0;
2992 	th->t_blocks_allocated = nblocks;
2993 	th->t_trans_id = journal->j_trans_id;
2994 	unlock_journal(p_s_sb);
2995 	INIT_LIST_HEAD(&th->t_list);
2996 	get_fs_excl();
2997 	return 0;
2998 
2999       out_fail:
3000 	memset(th, 0, sizeof(*th));
3001 	/* Re-set th->t_super, so we can properly keep track of how many
3002 	 * persistent transactions there are. We need to do this so if this
3003 	 * call is part of a failed restart_transaction, we can free it later */
3004 	th->t_super = p_s_sb;
3005 	return retval;
3006 }
3007 
3008 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3009 								    super_block
3010 								    *s,
3011 								    int nblocks)
3012 {
3013 	int ret;
3014 	struct reiserfs_transaction_handle *th;
3015 
3016 	/* if we're nesting into an existing transaction.  It will be
3017 	 ** persistent on its own
3018 	 */
3019 	if (reiserfs_transaction_running(s)) {
3020 		th = current->journal_info;
3021 		th->t_refcount++;
3022 		if (th->t_refcount < 2) {
3023 			BUG();
3024 		}
3025 		return th;
3026 	}
3027 	th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle),
3028 			      GFP_NOFS, s);
3029 	if (!th)
3030 		return NULL;
3031 	ret = journal_begin(th, s, nblocks);
3032 	if (ret) {
3033 		reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle),
3034 			       s);
3035 		return NULL;
3036 	}
3037 
3038 	SB_JOURNAL(s)->j_persistent_trans++;
3039 	return th;
3040 }
3041 
3042 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3043 {
3044 	struct super_block *s = th->t_super;
3045 	int ret = 0;
3046 	if (th->t_trans_id)
3047 		ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3048 	else
3049 		ret = -EIO;
3050 	if (th->t_refcount == 0) {
3051 		SB_JOURNAL(s)->j_persistent_trans--;
3052 		reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle),
3053 			       s);
3054 	}
3055 	return ret;
3056 }
3057 
3058 static int journal_join(struct reiserfs_transaction_handle *th,
3059 			struct super_block *p_s_sb, unsigned long nblocks)
3060 {
3061 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3062 
3063 	/* this keeps do_journal_end from NULLing out the current->journal_info
3064 	 ** pointer
3065 	 */
3066 	th->t_handle_save = cur_th;
3067 	if (cur_th && cur_th->t_refcount > 1) {
3068 		BUG();
3069 	}
3070 	return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3071 }
3072 
3073 int journal_join_abort(struct reiserfs_transaction_handle *th,
3074 		       struct super_block *p_s_sb, unsigned long nblocks)
3075 {
3076 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3077 
3078 	/* this keeps do_journal_end from NULLing out the current->journal_info
3079 	 ** pointer
3080 	 */
3081 	th->t_handle_save = cur_th;
3082 	if (cur_th && cur_th->t_refcount > 1) {
3083 		BUG();
3084 	}
3085 	return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3086 }
3087 
3088 int journal_begin(struct reiserfs_transaction_handle *th,
3089 		  struct super_block *p_s_sb, unsigned long nblocks)
3090 {
3091 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3092 	int ret;
3093 
3094 	th->t_handle_save = NULL;
3095 	if (cur_th) {
3096 		/* we are nesting into the current transaction */
3097 		if (cur_th->t_super == p_s_sb) {
3098 			BUG_ON(!cur_th->t_refcount);
3099 			cur_th->t_refcount++;
3100 			memcpy(th, cur_th, sizeof(*th));
3101 			if (th->t_refcount <= 1)
3102 				reiserfs_warning(p_s_sb,
3103 						 "BAD: refcount <= 1, but journal_info != 0");
3104 			return 0;
3105 		} else {
3106 			/* we've ended up with a handle from a different filesystem.
3107 			 ** save it and restore on journal_end.  This should never
3108 			 ** really happen...
3109 			 */
3110 			reiserfs_warning(p_s_sb,
3111 					 "clm-2100: nesting info a different FS");
3112 			th->t_handle_save = current->journal_info;
3113 			current->journal_info = th;
3114 		}
3115 	} else {
3116 		current->journal_info = th;
3117 	}
3118 	ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
3119 	if (current->journal_info != th)
3120 		BUG();
3121 
3122 	/* I guess this boils down to being the reciprocal of clm-2100 above.
3123 	 * If do_journal_begin_r fails, we need to put it back, since journal_end
3124 	 * won't be called to do it. */
3125 	if (ret)
3126 		current->journal_info = th->t_handle_save;
3127 	else
3128 		BUG_ON(!th->t_refcount);
3129 
3130 	return ret;
3131 }
3132 
3133 /*
3134 ** puts bh into the current transaction.  If it was already there, reorders removes the
3135 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3136 **
3137 ** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
3138 ** transaction is committed.
3139 **
3140 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3141 */
3142 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3143 		       struct super_block *p_s_sb, struct buffer_head *bh)
3144 {
3145 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3146 	struct reiserfs_journal_cnode *cn = NULL;
3147 	int count_already_incd = 0;
3148 	int prepared = 0;
3149 	BUG_ON(!th->t_trans_id);
3150 
3151 	PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3152 	if (th->t_trans_id != journal->j_trans_id) {
3153 		reiserfs_panic(th->t_super,
3154 			       "journal-1577: handle trans id %ld != current trans id %ld\n",
3155 			       th->t_trans_id, journal->j_trans_id);
3156 	}
3157 
3158 	p_s_sb->s_dirt = 1;
3159 
3160 	prepared = test_clear_buffer_journal_prepared(bh);
3161 	clear_buffer_journal_restore_dirty(bh);
3162 	/* already in this transaction, we are done */
3163 	if (buffer_journaled(bh)) {
3164 		PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3165 		return 0;
3166 	}
3167 
3168 	/* this must be turned into a panic instead of a warning.  We can't allow
3169 	 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3170 	 ** could get to disk too early.  NOT GOOD.
3171 	 */
3172 	if (!prepared || buffer_dirty(bh)) {
3173 		reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3174 				 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3175 				 (unsigned long long)bh->b_blocknr,
3176 				 prepared ? ' ' : '!',
3177 				 buffer_locked(bh) ? ' ' : '!',
3178 				 buffer_dirty(bh) ? ' ' : '!',
3179 				 buffer_journal_dirty(bh) ? ' ' : '!');
3180 	}
3181 
3182 	if (atomic_read(&(journal->j_wcount)) <= 0) {
3183 		reiserfs_warning(p_s_sb,
3184 				 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3185 				 atomic_read(&(journal->j_wcount)));
3186 		return 1;
3187 	}
3188 	/* this error means I've screwed up, and we've overflowed the transaction.
3189 	 ** Nothing can be done here, except make the FS readonly or panic.
3190 	 */
3191 	if (journal->j_len >= journal->j_trans_max) {
3192 		reiserfs_panic(th->t_super,
3193 			       "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3194 			       journal->j_len);
3195 	}
3196 
3197 	if (buffer_journal_dirty(bh)) {
3198 		count_already_incd = 1;
3199 		PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3200 		clear_buffer_journal_dirty(bh);
3201 	}
3202 
3203 	if (journal->j_len > journal->j_len_alloc) {
3204 		journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3205 	}
3206 
3207 	set_buffer_journaled(bh);
3208 
3209 	/* now put this guy on the end */
3210 	if (!cn) {
3211 		cn = get_cnode(p_s_sb);
3212 		if (!cn) {
3213 			reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3214 		}
3215 
3216 		if (th->t_blocks_logged == th->t_blocks_allocated) {
3217 			th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3218 			journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3219 		}
3220 		th->t_blocks_logged++;
3221 		journal->j_len++;
3222 
3223 		cn->bh = bh;
3224 		cn->blocknr = bh->b_blocknr;
3225 		cn->sb = p_s_sb;
3226 		cn->jlist = NULL;
3227 		insert_journal_hash(journal->j_hash_table, cn);
3228 		if (!count_already_incd) {
3229 			get_bh(bh);
3230 		}
3231 	}
3232 	cn->next = NULL;
3233 	cn->prev = journal->j_last;
3234 	cn->bh = bh;
3235 	if (journal->j_last) {
3236 		journal->j_last->next = cn;
3237 		journal->j_last = cn;
3238 	} else {
3239 		journal->j_first = cn;
3240 		journal->j_last = cn;
3241 	}
3242 	return 0;
3243 }
3244 
3245 int journal_end(struct reiserfs_transaction_handle *th,
3246 		struct super_block *p_s_sb, unsigned long nblocks)
3247 {
3248 	if (!current->journal_info && th->t_refcount > 1)
3249 		reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3250 				 th->t_refcount);
3251 
3252 	if (!th->t_trans_id) {
3253 		WARN_ON(1);
3254 		return -EIO;
3255 	}
3256 
3257 	th->t_refcount--;
3258 	if (th->t_refcount > 0) {
3259 		struct reiserfs_transaction_handle *cur_th =
3260 		    current->journal_info;
3261 
3262 		/* we aren't allowed to close a nested transaction on a different
3263 		 ** filesystem from the one in the task struct
3264 		 */
3265 		if (cur_th->t_super != th->t_super)
3266 			BUG();
3267 
3268 		if (th != cur_th) {
3269 			memcpy(current->journal_info, th, sizeof(*th));
3270 			th->t_trans_id = 0;
3271 		}
3272 		return 0;
3273 	} else {
3274 		return do_journal_end(th, p_s_sb, nblocks, 0);
3275 	}
3276 }
3277 
3278 /* removes from the current transaction, relsing and descrementing any counters.
3279 ** also files the removed buffer directly onto the clean list
3280 **
3281 ** called by journal_mark_freed when a block has been deleted
3282 **
3283 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3284 */
3285 static int remove_from_transaction(struct super_block *p_s_sb,
3286 				   b_blocknr_t blocknr, int already_cleaned)
3287 {
3288 	struct buffer_head *bh;
3289 	struct reiserfs_journal_cnode *cn;
3290 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3291 	int ret = 0;
3292 
3293 	cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3294 	if (!cn || !cn->bh) {
3295 		return ret;
3296 	}
3297 	bh = cn->bh;
3298 	if (cn->prev) {
3299 		cn->prev->next = cn->next;
3300 	}
3301 	if (cn->next) {
3302 		cn->next->prev = cn->prev;
3303 	}
3304 	if (cn == journal->j_first) {
3305 		journal->j_first = cn->next;
3306 	}
3307 	if (cn == journal->j_last) {
3308 		journal->j_last = cn->prev;
3309 	}
3310 	if (bh)
3311 		remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3312 				    bh->b_blocknr, 0);
3313 	clear_buffer_journaled(bh);	/* don't log this one */
3314 
3315 	if (!already_cleaned) {
3316 		clear_buffer_journal_dirty(bh);
3317 		clear_buffer_dirty(bh);
3318 		clear_buffer_journal_test(bh);
3319 		put_bh(bh);
3320 		if (atomic_read(&(bh->b_count)) < 0) {
3321 			reiserfs_warning(p_s_sb,
3322 					 "journal-1752: remove from trans, b_count < 0");
3323 		}
3324 		ret = 1;
3325 	}
3326 	journal->j_len--;
3327 	journal->j_len_alloc--;
3328 	free_cnode(p_s_sb, cn);
3329 	return ret;
3330 }
3331 
3332 /*
3333 ** for any cnode in a journal list, it can only be dirtied of all the
3334 ** transactions that include it are commited to disk.
3335 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3336 ** and 0 if you aren't
3337 **
3338 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3339 ** blocks for a given transaction on disk
3340 **
3341 */
3342 static int can_dirty(struct reiserfs_journal_cnode *cn)
3343 {
3344 	struct super_block *sb = cn->sb;
3345 	b_blocknr_t blocknr = cn->blocknr;
3346 	struct reiserfs_journal_cnode *cur = cn->hprev;
3347 	int can_dirty = 1;
3348 
3349 	/* first test hprev.  These are all newer than cn, so any node here
3350 	 ** with the same block number and dev means this node can't be sent
3351 	 ** to disk right now.
3352 	 */
3353 	while (cur && can_dirty) {
3354 		if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3355 		    cur->blocknr == blocknr) {
3356 			can_dirty = 0;
3357 		}
3358 		cur = cur->hprev;
3359 	}
3360 	/* then test hnext.  These are all older than cn.  As long as they
3361 	 ** are committed to the log, it is safe to write cn to disk
3362 	 */
3363 	cur = cn->hnext;
3364 	while (cur && can_dirty) {
3365 		if (cur->jlist && cur->jlist->j_len > 0 &&
3366 		    atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3367 		    cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3368 			can_dirty = 0;
3369 		}
3370 		cur = cur->hnext;
3371 	}
3372 	return can_dirty;
3373 }
3374 
3375 /* syncs the commit blocks, but does not force the real buffers to disk
3376 ** will wait until the current transaction is done/commited before returning
3377 */
3378 int journal_end_sync(struct reiserfs_transaction_handle *th,
3379 		     struct super_block *p_s_sb, unsigned long nblocks)
3380 {
3381 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3382 
3383 	BUG_ON(!th->t_trans_id);
3384 	/* you can sync while nested, very, very bad */
3385 	if (th->t_refcount > 1) {
3386 		BUG();
3387 	}
3388 	if (journal->j_len == 0) {
3389 		reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3390 					     1);
3391 		journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3392 	}
3393 	return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
3394 }
3395 
3396 /*
3397 ** writeback the pending async commits to disk
3398 */
3399 static void flush_async_commits(void *p)
3400 {
3401 	struct super_block *p_s_sb = p;
3402 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3403 	struct reiserfs_journal_list *jl;
3404 	struct list_head *entry;
3405 
3406 	lock_kernel();
3407 	if (!list_empty(&journal->j_journal_list)) {
3408 		/* last entry is the youngest, commit it and you get everything */
3409 		entry = journal->j_journal_list.prev;
3410 		jl = JOURNAL_LIST_ENTRY(entry);
3411 		flush_commit_list(p_s_sb, jl, 1);
3412 	}
3413 	unlock_kernel();
3414 	/*
3415 	 * this is a little racey, but there's no harm in missing
3416 	 * the filemap_fdata_write
3417 	 */
3418 	if (!atomic_read(&journal->j_async_throttle)
3419 	    && !reiserfs_is_journal_aborted(journal)) {
3420 		atomic_inc(&journal->j_async_throttle);
3421 		filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
3422 		atomic_dec(&journal->j_async_throttle);
3423 	}
3424 }
3425 
3426 /*
3427 ** flushes any old transactions to disk
3428 ** ends the current transaction if it is too old
3429 */
3430 int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3431 {
3432 	time_t now;
3433 	struct reiserfs_transaction_handle th;
3434 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3435 
3436 	now = get_seconds();
3437 	/* safety check so we don't flush while we are replaying the log during
3438 	 * mount
3439 	 */
3440 	if (list_empty(&journal->j_journal_list)) {
3441 		return 0;
3442 	}
3443 
3444 	/* check the current transaction.  If there are no writers, and it is
3445 	 * too old, finish it, and force the commit blocks to disk
3446 	 */
3447 	if (atomic_read(&journal->j_wcount) <= 0 &&
3448 	    journal->j_trans_start_time > 0 &&
3449 	    journal->j_len > 0 &&
3450 	    (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3451 		if (!journal_join(&th, p_s_sb, 1)) {
3452 			reiserfs_prepare_for_journal(p_s_sb,
3453 						     SB_BUFFER_WITH_SB(p_s_sb),
3454 						     1);
3455 			journal_mark_dirty(&th, p_s_sb,
3456 					   SB_BUFFER_WITH_SB(p_s_sb));
3457 
3458 			/* we're only being called from kreiserfsd, it makes no sense to do
3459 			 ** an async commit so that kreiserfsd can do it later
3460 			 */
3461 			do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3462 		}
3463 	}
3464 	return p_s_sb->s_dirt;
3465 }
3466 
3467 /*
3468 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3469 **
3470 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3471 ** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
3472 ** flushes the commit list and returns 0.
3473 **
3474 ** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
3475 **
3476 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3477 */
3478 static int check_journal_end(struct reiserfs_transaction_handle *th,
3479 			     struct super_block *p_s_sb, unsigned long nblocks,
3480 			     int flags)
3481 {
3482 
3483 	time_t now;
3484 	int flush = flags & FLUSH_ALL;
3485 	int commit_now = flags & COMMIT_NOW;
3486 	int wait_on_commit = flags & WAIT;
3487 	struct reiserfs_journal_list *jl;
3488 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3489 
3490 	BUG_ON(!th->t_trans_id);
3491 
3492 	if (th->t_trans_id != journal->j_trans_id) {
3493 		reiserfs_panic(th->t_super,
3494 			       "journal-1577: handle trans id %ld != current trans id %ld\n",
3495 			       th->t_trans_id, journal->j_trans_id);
3496 	}
3497 
3498 	journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3499 	if (atomic_read(&(journal->j_wcount)) > 0) {	/* <= 0 is allowed.  unmounting might not call begin */
3500 		atomic_dec(&(journal->j_wcount));
3501 	}
3502 
3503 	/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3504 	 ** will be dealt with by next transaction that actually writes something, but should be taken
3505 	 ** care of in this trans
3506 	 */
3507 	if (journal->j_len == 0) {
3508 		BUG();
3509 	}
3510 	/* if wcount > 0, and we are called to with flush or commit_now,
3511 	 ** we wait on j_join_wait.  We will wake up when the last writer has
3512 	 ** finished the transaction, and started it on its way to the disk.
3513 	 ** Then, we flush the commit or journal list, and just return 0
3514 	 ** because the rest of journal end was already done for this transaction.
3515 	 */
3516 	if (atomic_read(&(journal->j_wcount)) > 0) {
3517 		if (flush || commit_now) {
3518 			unsigned trans_id;
3519 
3520 			jl = journal->j_current_jl;
3521 			trans_id = jl->j_trans_id;
3522 			if (wait_on_commit)
3523 				jl->j_state |= LIST_COMMIT_PENDING;
3524 			atomic_set(&(journal->j_jlock), 1);
3525 			if (flush) {
3526 				journal->j_next_full_flush = 1;
3527 			}
3528 			unlock_journal(p_s_sb);
3529 
3530 			/* sleep while the current transaction is still j_jlocked */
3531 			while (journal->j_trans_id == trans_id) {
3532 				if (atomic_read(&journal->j_jlock)) {
3533 					queue_log_writer(p_s_sb);
3534 				} else {
3535 					lock_journal(p_s_sb);
3536 					if (journal->j_trans_id == trans_id) {
3537 						atomic_set(&(journal->j_jlock),
3538 							   1);
3539 					}
3540 					unlock_journal(p_s_sb);
3541 				}
3542 			}
3543 			if (journal->j_trans_id == trans_id) {
3544 				BUG();
3545 			}
3546 			if (commit_now
3547 			    && journal_list_still_alive(p_s_sb, trans_id)
3548 			    && wait_on_commit) {
3549 				flush_commit_list(p_s_sb, jl, 1);
3550 			}
3551 			return 0;
3552 		}
3553 		unlock_journal(p_s_sb);
3554 		return 0;
3555 	}
3556 
3557 	/* deal with old transactions where we are the last writers */
3558 	now = get_seconds();
3559 	if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3560 		commit_now = 1;
3561 		journal->j_next_async_flush = 1;
3562 	}
3563 	/* don't batch when someone is waiting on j_join_wait */
3564 	/* don't batch when syncing the commit or flushing the whole trans */
3565 	if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3566 	    && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3567 	    && journal->j_len_alloc < journal->j_max_batch
3568 	    && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3569 		journal->j_bcount++;
3570 		unlock_journal(p_s_sb);
3571 		return 0;
3572 	}
3573 
3574 	if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3575 		reiserfs_panic(p_s_sb,
3576 			       "journal-003: journal_end: j_start (%ld) is too high\n",
3577 			       journal->j_start);
3578 	}
3579 	return 1;
3580 }
3581 
3582 /*
3583 ** Does all the work that makes deleting blocks safe.
3584 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3585 **
3586 ** otherwise:
3587 ** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
3588 ** before this transaction has finished.
3589 **
3590 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
3591 ** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
3592 ** the block can't be reallocated yet.
3593 **
3594 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3595 */
3596 int journal_mark_freed(struct reiserfs_transaction_handle *th,
3597 		       struct super_block *p_s_sb, b_blocknr_t blocknr)
3598 {
3599 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3600 	struct reiserfs_journal_cnode *cn = NULL;
3601 	struct buffer_head *bh = NULL;
3602 	struct reiserfs_list_bitmap *jb = NULL;
3603 	int cleaned = 0;
3604 	BUG_ON(!th->t_trans_id);
3605 
3606 	cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3607 	if (cn && cn->bh) {
3608 		bh = cn->bh;
3609 		get_bh(bh);
3610 	}
3611 	/* if it is journal new, we just remove it from this transaction */
3612 	if (bh && buffer_journal_new(bh)) {
3613 		clear_buffer_journal_new(bh);
3614 		clear_prepared_bits(bh);
3615 		reiserfs_clean_and_file_buffer(bh);
3616 		cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3617 	} else {
3618 		/* set the bit for this block in the journal bitmap for this transaction */
3619 		jb = journal->j_current_jl->j_list_bitmap;
3620 		if (!jb) {
3621 			reiserfs_panic(p_s_sb,
3622 				       "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3623 		}
3624 		set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3625 
3626 		/* Note, the entire while loop is not allowed to schedule.  */
3627 
3628 		if (bh) {
3629 			clear_prepared_bits(bh);
3630 			reiserfs_clean_and_file_buffer(bh);
3631 		}
3632 		cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3633 
3634 		/* find all older transactions with this block, make sure they don't try to write it out */
3635 		cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3636 					  blocknr);
3637 		while (cn) {
3638 			if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3639 				set_bit(BLOCK_FREED, &cn->state);
3640 				if (cn->bh) {
3641 					if (!cleaned) {
3642 						/* remove_from_transaction will brelse the buffer if it was
3643 						 ** in the current trans
3644 						 */
3645 						clear_buffer_journal_dirty(cn->
3646 									   bh);
3647 						clear_buffer_dirty(cn->bh);
3648 						clear_buffer_journal_test(cn->
3649 									  bh);
3650 						cleaned = 1;
3651 						put_bh(cn->bh);
3652 						if (atomic_read
3653 						    (&(cn->bh->b_count)) < 0) {
3654 							reiserfs_warning(p_s_sb,
3655 									 "journal-2138: cn->bh->b_count < 0");
3656 						}
3657 					}
3658 					if (cn->jlist) {	/* since we are clearing the bh, we MUST dec nonzerolen */
3659 						atomic_dec(&
3660 							   (cn->jlist->
3661 							    j_nonzerolen));
3662 					}
3663 					cn->bh = NULL;
3664 				}
3665 			}
3666 			cn = cn->hnext;
3667 		}
3668 	}
3669 
3670 	if (bh) {
3671 		put_bh(bh);	/* get_hash grabs the buffer */
3672 		if (atomic_read(&(bh->b_count)) < 0) {
3673 			reiserfs_warning(p_s_sb,
3674 					 "journal-2165: bh->b_count < 0");
3675 		}
3676 	}
3677 	return 0;
3678 }
3679 
3680 void reiserfs_update_inode_transaction(struct inode *inode)
3681 {
3682 	struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3683 	REISERFS_I(inode)->i_jl = journal->j_current_jl;
3684 	REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3685 }
3686 
3687 /*
3688  * returns -1 on error, 0 if no commits/barriers were done and 1
3689  * if a transaction was actually committed and the barrier was done
3690  */
3691 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3692 			     struct reiserfs_journal_list *jl)
3693 {
3694 	struct reiserfs_transaction_handle th;
3695 	struct super_block *sb = inode->i_sb;
3696 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3697 	int ret = 0;
3698 
3699 	/* is it from the current transaction, or from an unknown transaction? */
3700 	if (id == journal->j_trans_id) {
3701 		jl = journal->j_current_jl;
3702 		/* try to let other writers come in and grow this transaction */
3703 		let_transaction_grow(sb, id);
3704 		if (journal->j_trans_id != id) {
3705 			goto flush_commit_only;
3706 		}
3707 
3708 		ret = journal_begin(&th, sb, 1);
3709 		if (ret)
3710 			return ret;
3711 
3712 		/* someone might have ended this transaction while we joined */
3713 		if (journal->j_trans_id != id) {
3714 			reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3715 						     1);
3716 			journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3717 			ret = journal_end(&th, sb, 1);
3718 			goto flush_commit_only;
3719 		}
3720 
3721 		ret = journal_end_sync(&th, sb, 1);
3722 		if (!ret)
3723 			ret = 1;
3724 
3725 	} else {
3726 		/* this gets tricky, we have to make sure the journal list in
3727 		 * the inode still exists.  We know the list is still around
3728 		 * if we've got a larger transaction id than the oldest list
3729 		 */
3730 	      flush_commit_only:
3731 		if (journal_list_still_alive(inode->i_sb, id)) {
3732 			/*
3733 			 * we only set ret to 1 when we know for sure
3734 			 * the barrier hasn't been started yet on the commit
3735 			 * block.
3736 			 */
3737 			if (atomic_read(&jl->j_commit_left) > 1)
3738 				ret = 1;
3739 			flush_commit_list(sb, jl, 1);
3740 			if (journal->j_errno)
3741 				ret = journal->j_errno;
3742 		}
3743 	}
3744 	/* otherwise the list is gone, and long since committed */
3745 	return ret;
3746 }
3747 
3748 int reiserfs_commit_for_inode(struct inode *inode)
3749 {
3750 	unsigned long id = REISERFS_I(inode)->i_trans_id;
3751 	struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3752 
3753 	/* for the whole inode, assume unset id means it was
3754 	 * changed in the current transaction.  More conservative
3755 	 */
3756 	if (!id || !jl) {
3757 		reiserfs_update_inode_transaction(inode);
3758 		id = REISERFS_I(inode)->i_trans_id;
3759 		/* jl will be updated in __commit_trans_jl */
3760 	}
3761 
3762 	return __commit_trans_jl(inode, id, jl);
3763 }
3764 
3765 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3766 				      struct buffer_head *bh)
3767 {
3768 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3769 	PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3770 	if (!bh) {
3771 		return;
3772 	}
3773 	if (test_clear_buffer_journal_restore_dirty(bh) &&
3774 	    buffer_journal_dirty(bh)) {
3775 		struct reiserfs_journal_cnode *cn;
3776 		cn = get_journal_hash_dev(p_s_sb,
3777 					  journal->j_list_hash_table,
3778 					  bh->b_blocknr);
3779 		if (cn && can_dirty(cn)) {
3780 			set_buffer_journal_test(bh);
3781 			mark_buffer_dirty(bh);
3782 		}
3783 	}
3784 	clear_buffer_journal_prepared(bh);
3785 }
3786 
3787 extern struct tree_balance *cur_tb;
3788 /*
3789 ** before we can change a metadata block, we have to make sure it won't
3790 ** be written to disk while we are altering it.  So, we must:
3791 ** clean it
3792 ** wait on it.
3793 **
3794 */
3795 int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
3796 				 struct buffer_head *bh, int wait)
3797 {
3798 	PROC_INFO_INC(p_s_sb, journal.prepare);
3799 
3800 	if (test_set_buffer_locked(bh)) {
3801 		if (!wait)
3802 			return 0;
3803 		lock_buffer(bh);
3804 	}
3805 	set_buffer_journal_prepared(bh);
3806 	if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3807 		clear_buffer_journal_test(bh);
3808 		set_buffer_journal_restore_dirty(bh);
3809 	}
3810 	unlock_buffer(bh);
3811 	return 1;
3812 }
3813 
3814 static void flush_old_journal_lists(struct super_block *s)
3815 {
3816 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3817 	struct reiserfs_journal_list *jl;
3818 	struct list_head *entry;
3819 	time_t now = get_seconds();
3820 
3821 	while (!list_empty(&journal->j_journal_list)) {
3822 		entry = journal->j_journal_list.next;
3823 		jl = JOURNAL_LIST_ENTRY(entry);
3824 		/* this check should always be run, to send old lists to disk */
3825 		if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
3826 			flush_used_journal_lists(s, jl);
3827 		} else {
3828 			break;
3829 		}
3830 	}
3831 }
3832 
3833 /*
3834 ** long and ugly.  If flush, will not return until all commit
3835 ** blocks and all real buffers in the trans are on disk.
3836 ** If no_async, won't return until all commit blocks are on disk.
3837 **
3838 ** keep reading, there are comments as you go along
3839 **
3840 ** If the journal is aborted, we just clean up. Things like flushing
3841 ** journal lists, etc just won't happen.
3842 */
3843 static int do_journal_end(struct reiserfs_transaction_handle *th,
3844 			  struct super_block *p_s_sb, unsigned long nblocks,
3845 			  int flags)
3846 {
3847 	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3848 	struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3849 	struct reiserfs_journal_cnode *last_cn = NULL;
3850 	struct reiserfs_journal_desc *desc;
3851 	struct reiserfs_journal_commit *commit;
3852 	struct buffer_head *c_bh;	/* commit bh */
3853 	struct buffer_head *d_bh;	/* desc bh */
3854 	int cur_write_start = 0;	/* start index of current log write */
3855 	int old_start;
3856 	int i;
3857 	int flush = flags & FLUSH_ALL;
3858 	int wait_on_commit = flags & WAIT;
3859 	struct reiserfs_journal_list *jl, *temp_jl;
3860 	struct list_head *entry, *safe;
3861 	unsigned long jindex;
3862 	unsigned long commit_trans_id;
3863 	int trans_half;
3864 
3865 	BUG_ON(th->t_refcount > 1);
3866 	BUG_ON(!th->t_trans_id);
3867 
3868 	put_fs_excl();
3869 	current->journal_info = th->t_handle_save;
3870 	reiserfs_check_lock_depth(p_s_sb, "journal end");
3871 	if (journal->j_len == 0) {
3872 		reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3873 					     1);
3874 		journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3875 	}
3876 
3877 	lock_journal(p_s_sb);
3878 	if (journal->j_next_full_flush) {
3879 		flags |= FLUSH_ALL;
3880 		flush = 1;
3881 	}
3882 	if (journal->j_next_async_flush) {
3883 		flags |= COMMIT_NOW | WAIT;
3884 		wait_on_commit = 1;
3885 	}
3886 
3887 	/* check_journal_end locks the journal, and unlocks if it does not return 1
3888 	 ** it tells us if we should continue with the journal_end, or just return
3889 	 */
3890 	if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3891 		p_s_sb->s_dirt = 1;
3892 		wake_queued_writers(p_s_sb);
3893 		reiserfs_async_progress_wait(p_s_sb);
3894 		goto out;
3895 	}
3896 
3897 	/* check_journal_end might set these, check again */
3898 	if (journal->j_next_full_flush) {
3899 		flush = 1;
3900 	}
3901 
3902 	/*
3903 	 ** j must wait means we have to flush the log blocks, and the real blocks for
3904 	 ** this transaction
3905 	 */
3906 	if (journal->j_must_wait > 0) {
3907 		flush = 1;
3908 	}
3909 #ifdef REISERFS_PREALLOCATE
3910 	/* quota ops might need to nest, setup the journal_info pointer for them */
3911 	current->journal_info = th;
3912 	reiserfs_discard_all_prealloc(th);	/* it should not involve new blocks into
3913 						 * the transaction */
3914 	current->journal_info = th->t_handle_save;
3915 #endif
3916 
3917 	/* setup description block */
3918 	d_bh =
3919 	    journal_getblk(p_s_sb,
3920 			   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3921 			   journal->j_start);
3922 	set_buffer_uptodate(d_bh);
3923 	desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
3924 	memset(d_bh->b_data, 0, d_bh->b_size);
3925 	memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
3926 	set_desc_trans_id(desc, journal->j_trans_id);
3927 
3928 	/* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
3929 	c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3930 			      ((journal->j_start + journal->j_len +
3931 				1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
3932 	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
3933 	memset(c_bh->b_data, 0, c_bh->b_size);
3934 	set_commit_trans_id(commit, journal->j_trans_id);
3935 	set_buffer_uptodate(c_bh);
3936 
3937 	/* init this journal list */
3938 	jl = journal->j_current_jl;
3939 
3940 	/* we lock the commit before doing anything because
3941 	 * we want to make sure nobody tries to run flush_commit_list until
3942 	 * the new transaction is fully setup, and we've already flushed the
3943 	 * ordered bh list
3944 	 */
3945 	down(&jl->j_commit_lock);
3946 
3947 	/* save the transaction id in case we need to commit it later */
3948 	commit_trans_id = jl->j_trans_id;
3949 
3950 	atomic_set(&jl->j_older_commits_done, 0);
3951 	jl->j_trans_id = journal->j_trans_id;
3952 	jl->j_timestamp = journal->j_trans_start_time;
3953 	jl->j_commit_bh = c_bh;
3954 	jl->j_start = journal->j_start;
3955 	jl->j_len = journal->j_len;
3956 	atomic_set(&jl->j_nonzerolen, journal->j_len);
3957 	atomic_set(&jl->j_commit_left, journal->j_len + 2);
3958 	jl->j_realblock = NULL;
3959 
3960 	/* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3961 	 **  for each real block, add it to the journal list hash,
3962 	 ** copy into real block index array in the commit or desc block
3963 	 */
3964 	trans_half = journal_trans_half(p_s_sb->s_blocksize);
3965 	for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
3966 		if (buffer_journaled(cn->bh)) {
3967 			jl_cn = get_cnode(p_s_sb);
3968 			if (!jl_cn) {
3969 				reiserfs_panic(p_s_sb,
3970 					       "journal-1676, get_cnode returned NULL\n");
3971 			}
3972 			if (i == 0) {
3973 				jl->j_realblock = jl_cn;
3974 			}
3975 			jl_cn->prev = last_cn;
3976 			jl_cn->next = NULL;
3977 			if (last_cn) {
3978 				last_cn->next = jl_cn;
3979 			}
3980 			last_cn = jl_cn;
3981 			/* make sure the block we are trying to log is not a block
3982 			   of journal or reserved area */
3983 
3984 			if (is_block_in_log_or_reserved_area
3985 			    (p_s_sb, cn->bh->b_blocknr)) {
3986 				reiserfs_panic(p_s_sb,
3987 					       "journal-2332: Trying to log block %lu, which is a log block\n",
3988 					       cn->bh->b_blocknr);
3989 			}
3990 			jl_cn->blocknr = cn->bh->b_blocknr;
3991 			jl_cn->state = 0;
3992 			jl_cn->sb = p_s_sb;
3993 			jl_cn->bh = cn->bh;
3994 			jl_cn->jlist = jl;
3995 			insert_journal_hash(journal->j_list_hash_table, jl_cn);
3996 			if (i < trans_half) {
3997 				desc->j_realblock[i] =
3998 				    cpu_to_le32(cn->bh->b_blocknr);
3999 			} else {
4000 				commit->j_realblock[i - trans_half] =
4001 				    cpu_to_le32(cn->bh->b_blocknr);
4002 			}
4003 		} else {
4004 			i--;
4005 		}
4006 	}
4007 	set_desc_trans_len(desc, journal->j_len);
4008 	set_desc_mount_id(desc, journal->j_mount_id);
4009 	set_desc_trans_id(desc, journal->j_trans_id);
4010 	set_commit_trans_len(commit, journal->j_len);
4011 
4012 	/* special check in case all buffers in the journal were marked for not logging */
4013 	if (journal->j_len == 0) {
4014 		BUG();
4015 	}
4016 
4017 	/* we're about to dirty all the log blocks, mark the description block
4018 	 * dirty now too.  Don't mark the commit block dirty until all the
4019 	 * others are on disk
4020 	 */
4021 	mark_buffer_dirty(d_bh);
4022 
4023 	/* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4024 	cur_write_start = journal->j_start;
4025 	cn = journal->j_first;
4026 	jindex = 1;		/* start at one so we don't get the desc again */
4027 	while (cn) {
4028 		clear_buffer_journal_new(cn->bh);
4029 		/* copy all the real blocks into log area.  dirty log blocks */
4030 		if (buffer_journaled(cn->bh)) {
4031 			struct buffer_head *tmp_bh;
4032 			char *addr;
4033 			struct page *page;
4034 			tmp_bh =
4035 			    journal_getblk(p_s_sb,
4036 					   SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4037 					   ((cur_write_start +
4038 					     jindex) %
4039 					    SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4040 			set_buffer_uptodate(tmp_bh);
4041 			page = cn->bh->b_page;
4042 			addr = kmap(page);
4043 			memcpy(tmp_bh->b_data,
4044 			       addr + offset_in_page(cn->bh->b_data),
4045 			       cn->bh->b_size);
4046 			kunmap(page);
4047 			mark_buffer_dirty(tmp_bh);
4048 			jindex++;
4049 			set_buffer_journal_dirty(cn->bh);
4050 			clear_buffer_journaled(cn->bh);
4051 		} else {
4052 			/* JDirty cleared sometime during transaction.  don't log this one */
4053 			reiserfs_warning(p_s_sb,
4054 					 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4055 			brelse(cn->bh);
4056 		}
4057 		next = cn->next;
4058 		free_cnode(p_s_sb, cn);
4059 		cn = next;
4060 		cond_resched();
4061 	}
4062 
4063 	/* we are done  with both the c_bh and d_bh, but
4064 	 ** c_bh must be written after all other commit blocks,
4065 	 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4066 	 */
4067 
4068 	journal->j_current_jl = alloc_journal_list(p_s_sb);
4069 
4070 	/* now it is safe to insert this transaction on the main list */
4071 	list_add_tail(&jl->j_list, &journal->j_journal_list);
4072 	list_add_tail(&jl->j_working_list, &journal->j_working_list);
4073 	journal->j_num_work_lists++;
4074 
4075 	/* reset journal values for the next transaction */
4076 	old_start = journal->j_start;
4077 	journal->j_start =
4078 	    (journal->j_start + journal->j_len +
4079 	     2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4080 	atomic_set(&(journal->j_wcount), 0);
4081 	journal->j_bcount = 0;
4082 	journal->j_last = NULL;
4083 	journal->j_first = NULL;
4084 	journal->j_len = 0;
4085 	journal->j_trans_start_time = 0;
4086 	journal->j_trans_id++;
4087 	journal->j_current_jl->j_trans_id = journal->j_trans_id;
4088 	journal->j_must_wait = 0;
4089 	journal->j_len_alloc = 0;
4090 	journal->j_next_full_flush = 0;
4091 	journal->j_next_async_flush = 0;
4092 	init_journal_hash(p_s_sb);
4093 
4094 	// make sure reiserfs_add_jh sees the new current_jl before we
4095 	// write out the tails
4096 	smp_mb();
4097 
4098 	/* tail conversion targets have to hit the disk before we end the
4099 	 * transaction.  Otherwise a later transaction might repack the tail
4100 	 * before this transaction commits, leaving the data block unflushed and
4101 	 * clean, if we crash before the later transaction commits, the data block
4102 	 * is lost.
4103 	 */
4104 	if (!list_empty(&jl->j_tail_bh_list)) {
4105 		unlock_kernel();
4106 		write_ordered_buffers(&journal->j_dirty_buffers_lock,
4107 				      journal, jl, &jl->j_tail_bh_list);
4108 		lock_kernel();
4109 	}
4110 	if (!list_empty(&jl->j_tail_bh_list))
4111 		BUG();
4112 	up(&jl->j_commit_lock);
4113 
4114 	/* honor the flush wishes from the caller, simple commits can
4115 	 ** be done outside the journal lock, they are done below
4116 	 **
4117 	 ** if we don't flush the commit list right now, we put it into
4118 	 ** the work queue so the people waiting on the async progress work
4119 	 ** queue don't wait for this proc to flush journal lists and such.
4120 	 */
4121 	if (flush) {
4122 		flush_commit_list(p_s_sb, jl, 1);
4123 		flush_journal_list(p_s_sb, jl, 1);
4124 	} else if (!(jl->j_state & LIST_COMMIT_PENDING))
4125 		queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4126 
4127 	/* if the next transaction has any chance of wrapping, flush
4128 	 ** transactions that might get overwritten.  If any journal lists are very
4129 	 ** old flush them as well.
4130 	 */
4131       first_jl:
4132 	list_for_each_safe(entry, safe, &journal->j_journal_list) {
4133 		temp_jl = JOURNAL_LIST_ENTRY(entry);
4134 		if (journal->j_start <= temp_jl->j_start) {
4135 			if ((journal->j_start + journal->j_trans_max + 1) >=
4136 			    temp_jl->j_start) {
4137 				flush_used_journal_lists(p_s_sb, temp_jl);
4138 				goto first_jl;
4139 			} else if ((journal->j_start +
4140 				    journal->j_trans_max + 1) <
4141 				   SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4142 				/* if we don't cross into the next transaction and we don't
4143 				 * wrap, there is no way we can overlap any later transactions
4144 				 * break now
4145 				 */
4146 				break;
4147 			}
4148 		} else if ((journal->j_start +
4149 			    journal->j_trans_max + 1) >
4150 			   SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4151 			if (((journal->j_start + journal->j_trans_max + 1) %
4152 			     SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4153 			    temp_jl->j_start) {
4154 				flush_used_journal_lists(p_s_sb, temp_jl);
4155 				goto first_jl;
4156 			} else {
4157 				/* we don't overlap anything from out start to the end of the
4158 				 * log, and our wrapped portion doesn't overlap anything at
4159 				 * the start of the log.  We can break
4160 				 */
4161 				break;
4162 			}
4163 		}
4164 	}
4165 	flush_old_journal_lists(p_s_sb);
4166 
4167 	journal->j_current_jl->j_list_bitmap =
4168 	    get_list_bitmap(p_s_sb, journal->j_current_jl);
4169 
4170 	if (!(journal->j_current_jl->j_list_bitmap)) {
4171 		reiserfs_panic(p_s_sb,
4172 			       "journal-1996: do_journal_end, could not get a list bitmap\n");
4173 	}
4174 
4175 	atomic_set(&(journal->j_jlock), 0);
4176 	unlock_journal(p_s_sb);
4177 	/* wake up any body waiting to join. */
4178 	clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4179 	wake_up(&(journal->j_join_wait));
4180 
4181 	if (!flush && wait_on_commit &&
4182 	    journal_list_still_alive(p_s_sb, commit_trans_id)) {
4183 		flush_commit_list(p_s_sb, jl, 1);
4184 	}
4185       out:
4186 	reiserfs_check_lock_depth(p_s_sb, "journal end2");
4187 
4188 	memset(th, 0, sizeof(*th));
4189 	/* Re-set th->t_super, so we can properly keep track of how many
4190 	 * persistent transactions there are. We need to do this so if this
4191 	 * call is part of a failed restart_transaction, we can free it later */
4192 	th->t_super = p_s_sb;
4193 
4194 	return journal->j_errno;
4195 }
4196 
4197 static void __reiserfs_journal_abort_hard(struct super_block *sb)
4198 {
4199 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4200 	if (test_bit(J_ABORTED, &journal->j_state))
4201 		return;
4202 
4203 	printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4204 	       reiserfs_bdevname(sb));
4205 
4206 	sb->s_flags |= MS_RDONLY;
4207 	set_bit(J_ABORTED, &journal->j_state);
4208 
4209 #ifdef CONFIG_REISERFS_CHECK
4210 	dump_stack();
4211 #endif
4212 }
4213 
4214 static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
4215 {
4216 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4217 	if (test_bit(J_ABORTED, &journal->j_state))
4218 		return;
4219 
4220 	if (!journal->j_errno)
4221 		journal->j_errno = errno;
4222 
4223 	__reiserfs_journal_abort_hard(sb);
4224 }
4225 
4226 void reiserfs_journal_abort(struct super_block *sb, int errno)
4227 {
4228 	return __reiserfs_journal_abort_soft(sb, errno);
4229 }
4230