xref: /openbmc/linux/fs/reiserfs/journal.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2 ** Write ahead logging implementation copyright Chris Mason 2000
3 **
4 ** The background commits make this code very interelated, and
5 ** overly complex.  I need to rethink things a bit....The major players:
6 **
7 ** journal_begin -- call with the number of blocks you expect to log.
8 **                  If the current transaction is too
9 ** 		    old, it will block until the current transaction is
10 ** 		    finished, and then start a new one.
11 **		    Usually, your transaction will get joined in with
12 **                  previous ones for speed.
13 **
14 ** journal_join  -- same as journal_begin, but won't block on the current
15 **                  transaction regardless of age.  Don't ever call
16 **                  this.  Ever.  There are only two places it should be
17 **                  called from, and they are both inside this file.
18 **
19 ** journal_mark_dirty -- adds blocks into this transaction.  clears any flags
20 **                       that might make them get sent to disk
21 **                       and then marks them BH_JDirty.  Puts the buffer head
22 **                       into the current transaction hash.
23 **
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 **                   otherwise, it could do an async/synchronous commit, or
26 **                   a full flush of all log and real blocks in the
27 **                   transaction.
28 **
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 **                      commit blocks are sent to disk.  Forces commit blocks
31 **                      to disk for all backgrounded commits that have been
32 **                      around too long.
33 **		     -- Note, if you call this as an immediate flush from
34 **		        from within kupdate, it will ignore the immediate flag
35 */
36 
37 #include <linux/time.h>
38 #include <linux/semaphore.h>
39 #include <linux/vmalloc.h>
40 #include <linux/reiserfs_fs.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <linux/fcntl.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/smp_lock.h>
47 #include <linux/buffer_head.h>
48 #include <linux/workqueue.h>
49 #include <linux/writeback.h>
50 #include <linux/blkdev.h>
51 #include <linux/backing-dev.h>
52 #include <linux/uaccess.h>
53 
54 #include <asm/system.h>
55 
56 /* gets a struct reiserfs_journal_list * from a list head */
57 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
58                                j_list))
59 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60                                j_working_list))
61 
62 /* the number of mounted filesystems.  This is used to decide when to
63 ** start and kill the commit workqueue
64 */
65 static int reiserfs_mounted_fs_count;
66 
67 static struct workqueue_struct *commit_wq;
68 
69 #define JOURNAL_TRANS_HALF 1018	/* must be correct to keep the desc and commit
70 				   structs at 4k */
71 #define BUFNR 64		/*read ahead */
72 
73 /* cnode stat bits.  Move these into reiserfs_fs.h */
74 
75 #define BLOCK_FREED 2		/* this block was freed, and can't be written.  */
76 #define BLOCK_FREED_HOLDER 3	/* this block was freed during this transaction, and can't be written */
77 
78 #define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */
79 #define BLOCK_DIRTIED 5
80 
81 /* journal list state bits */
82 #define LIST_TOUCHED 1
83 #define LIST_DIRTY   2
84 #define LIST_COMMIT_PENDING  4	/* someone will commit this list */
85 
86 /* flags for do_journal_end */
87 #define FLUSH_ALL   1		/* flush commit and real blocks */
88 #define COMMIT_NOW  2		/* end and commit this transaction */
89 #define WAIT        4		/* wait for the log blocks to hit the disk */
90 
91 static int do_journal_end(struct reiserfs_transaction_handle *,
92 			  struct super_block *, unsigned long nblocks,
93 			  int flags);
94 static int flush_journal_list(struct super_block *s,
95 			      struct reiserfs_journal_list *jl, int flushall);
96 static int flush_commit_list(struct super_block *s,
97 			     struct reiserfs_journal_list *jl, int flushall);
98 static int can_dirty(struct reiserfs_journal_cnode *cn);
99 static int journal_join(struct reiserfs_transaction_handle *th,
100 			struct super_block *sb, unsigned long nblocks);
101 static int release_journal_dev(struct super_block *super,
102 			       struct reiserfs_journal *journal);
103 static int dirty_one_transaction(struct super_block *s,
104 				 struct reiserfs_journal_list *jl);
105 static void flush_async_commits(struct work_struct *work);
106 static void queue_log_writer(struct super_block *s);
107 
108 /* values for join in do_journal_begin_r */
109 enum {
110 	JBEGIN_REG = 0,		/* regular journal begin */
111 	JBEGIN_JOIN = 1,	/* join the running transaction if at all possible */
112 	JBEGIN_ABORT = 2,	/* called from cleanup code, ignores aborted flag */
113 };
114 
115 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
116 			      struct super_block *sb,
117 			      unsigned long nblocks, int join);
118 
119 static void init_journal_hash(struct super_block *sb)
120 {
121 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
122 	memset(journal->j_hash_table, 0,
123 	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
124 }
125 
126 /*
127 ** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to
128 ** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for
129 ** more details.
130 */
131 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
132 {
133 	if (bh) {
134 		clear_buffer_dirty(bh);
135 		clear_buffer_journal_test(bh);
136 	}
137 	return 0;
138 }
139 
140 static void disable_barrier(struct super_block *s)
141 {
142 	REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
143 	printk("reiserfs: disabling flush barriers on %s\n",
144 	       reiserfs_bdevname(s));
145 }
146 
147 static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
148 							 *sb)
149 {
150 	struct reiserfs_bitmap_node *bn;
151 	static int id;
152 
153 	bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
154 	if (!bn) {
155 		return NULL;
156 	}
157 	bn->data = kzalloc(sb->s_blocksize, GFP_NOFS);
158 	if (!bn->data) {
159 		kfree(bn);
160 		return NULL;
161 	}
162 	bn->id = id++;
163 	INIT_LIST_HEAD(&bn->list);
164 	return bn;
165 }
166 
167 static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
168 {
169 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
170 	struct reiserfs_bitmap_node *bn = NULL;
171 	struct list_head *entry = journal->j_bitmap_nodes.next;
172 
173 	journal->j_used_bitmap_nodes++;
174       repeat:
175 
176 	if (entry != &journal->j_bitmap_nodes) {
177 		bn = list_entry(entry, struct reiserfs_bitmap_node, list);
178 		list_del(entry);
179 		memset(bn->data, 0, sb->s_blocksize);
180 		journal->j_free_bitmap_nodes--;
181 		return bn;
182 	}
183 	bn = allocate_bitmap_node(sb);
184 	if (!bn) {
185 		yield();
186 		goto repeat;
187 	}
188 	return bn;
189 }
190 static inline void free_bitmap_node(struct super_block *sb,
191 				    struct reiserfs_bitmap_node *bn)
192 {
193 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
194 	journal->j_used_bitmap_nodes--;
195 	if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
196 		kfree(bn->data);
197 		kfree(bn);
198 	} else {
199 		list_add(&bn->list, &journal->j_bitmap_nodes);
200 		journal->j_free_bitmap_nodes++;
201 	}
202 }
203 
204 static void allocate_bitmap_nodes(struct super_block *sb)
205 {
206 	int i;
207 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
208 	struct reiserfs_bitmap_node *bn = NULL;
209 	for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
210 		bn = allocate_bitmap_node(sb);
211 		if (bn) {
212 			list_add(&bn->list, &journal->j_bitmap_nodes);
213 			journal->j_free_bitmap_nodes++;
214 		} else {
215 			break;	/* this is ok, we'll try again when more are needed */
216 		}
217 	}
218 }
219 
220 static int set_bit_in_list_bitmap(struct super_block *sb,
221 				  b_blocknr_t block,
222 				  struct reiserfs_list_bitmap *jb)
223 {
224 	unsigned int bmap_nr = block / (sb->s_blocksize << 3);
225 	unsigned int bit_nr = block % (sb->s_blocksize << 3);
226 
227 	if (!jb->bitmaps[bmap_nr]) {
228 		jb->bitmaps[bmap_nr] = get_bitmap_node(sb);
229 	}
230 	set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
231 	return 0;
232 }
233 
234 static void cleanup_bitmap_list(struct super_block *sb,
235 				struct reiserfs_list_bitmap *jb)
236 {
237 	int i;
238 	if (jb->bitmaps == NULL)
239 		return;
240 
241 	for (i = 0; i < reiserfs_bmap_count(sb); i++) {
242 		if (jb->bitmaps[i]) {
243 			free_bitmap_node(sb, jb->bitmaps[i]);
244 			jb->bitmaps[i] = NULL;
245 		}
246 	}
247 }
248 
249 /*
250 ** only call this on FS unmount.
251 */
252 static int free_list_bitmaps(struct super_block *sb,
253 			     struct reiserfs_list_bitmap *jb_array)
254 {
255 	int i;
256 	struct reiserfs_list_bitmap *jb;
257 	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
258 		jb = jb_array + i;
259 		jb->journal_list = NULL;
260 		cleanup_bitmap_list(sb, jb);
261 		vfree(jb->bitmaps);
262 		jb->bitmaps = NULL;
263 	}
264 	return 0;
265 }
266 
267 static int free_bitmap_nodes(struct super_block *sb)
268 {
269 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
270 	struct list_head *next = journal->j_bitmap_nodes.next;
271 	struct reiserfs_bitmap_node *bn;
272 
273 	while (next != &journal->j_bitmap_nodes) {
274 		bn = list_entry(next, struct reiserfs_bitmap_node, list);
275 		list_del(next);
276 		kfree(bn->data);
277 		kfree(bn);
278 		next = journal->j_bitmap_nodes.next;
279 		journal->j_free_bitmap_nodes--;
280 	}
281 
282 	return 0;
283 }
284 
285 /*
286 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287 ** jb_array is the array to be filled in.
288 */
289 int reiserfs_allocate_list_bitmaps(struct super_block *sb,
290 				   struct reiserfs_list_bitmap *jb_array,
291 				   unsigned int bmap_nr)
292 {
293 	int i;
294 	int failed = 0;
295 	struct reiserfs_list_bitmap *jb;
296 	int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
297 
298 	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
299 		jb = jb_array + i;
300 		jb->journal_list = NULL;
301 		jb->bitmaps = vmalloc(mem);
302 		if (!jb->bitmaps) {
303 			reiserfs_warning(sb, "clm-2000", "unable to "
304 					 "allocate bitmaps for journal lists");
305 			failed = 1;
306 			break;
307 		}
308 		memset(jb->bitmaps, 0, mem);
309 	}
310 	if (failed) {
311 		free_list_bitmaps(sb, jb_array);
312 		return -1;
313 	}
314 	return 0;
315 }
316 
317 /*
318 ** find an available list bitmap.  If you can't find one, flush a commit list
319 ** and try again
320 */
321 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
322 						    struct reiserfs_journal_list
323 						    *jl)
324 {
325 	int i, j;
326 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
327 	struct reiserfs_list_bitmap *jb = NULL;
328 
329 	for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
330 		i = journal->j_list_bitmap_index;
331 		journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
332 		jb = journal->j_list_bitmap + i;
333 		if (journal->j_list_bitmap[i].journal_list) {
334 			flush_commit_list(sb,
335 					  journal->j_list_bitmap[i].
336 					  journal_list, 1);
337 			if (!journal->j_list_bitmap[i].journal_list) {
338 				break;
339 			}
340 		} else {
341 			break;
342 		}
343 	}
344 	if (jb->journal_list) {	/* double check to make sure if flushed correctly */
345 		return NULL;
346 	}
347 	jb->journal_list = jl;
348 	return jb;
349 }
350 
351 /*
352 ** allocates a new chunk of X nodes, and links them all together as a list.
353 ** Uses the cnode->next and cnode->prev pointers
354 ** returns NULL on failure
355 */
356 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
357 {
358 	struct reiserfs_journal_cnode *head;
359 	int i;
360 	if (num_cnodes <= 0) {
361 		return NULL;
362 	}
363 	head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
364 	if (!head) {
365 		return NULL;
366 	}
367 	memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
368 	head[0].prev = NULL;
369 	head[0].next = head + 1;
370 	for (i = 1; i < num_cnodes; i++) {
371 		head[i].prev = head + (i - 1);
372 		head[i].next = head + (i + 1);	/* if last one, overwrite it after the if */
373 	}
374 	head[num_cnodes - 1].next = NULL;
375 	return head;
376 }
377 
378 /*
379 ** pulls a cnode off the free list, or returns NULL on failure
380 */
381 static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
382 {
383 	struct reiserfs_journal_cnode *cn;
384 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
385 
386 	reiserfs_check_lock_depth(sb, "get_cnode");
387 
388 	if (journal->j_cnode_free <= 0) {
389 		return NULL;
390 	}
391 	journal->j_cnode_used++;
392 	journal->j_cnode_free--;
393 	cn = journal->j_cnode_free_list;
394 	if (!cn) {
395 		return cn;
396 	}
397 	if (cn->next) {
398 		cn->next->prev = NULL;
399 	}
400 	journal->j_cnode_free_list = cn->next;
401 	memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
402 	return cn;
403 }
404 
405 /*
406 ** returns a cnode to the free list
407 */
408 static void free_cnode(struct super_block *sb,
409 		       struct reiserfs_journal_cnode *cn)
410 {
411 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
412 
413 	reiserfs_check_lock_depth(sb, "free_cnode");
414 
415 	journal->j_cnode_used--;
416 	journal->j_cnode_free++;
417 	/* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
418 	cn->next = journal->j_cnode_free_list;
419 	if (journal->j_cnode_free_list) {
420 		journal->j_cnode_free_list->prev = cn;
421 	}
422 	cn->prev = NULL;	/* not needed with the memset, but I might kill the memset, and forget to do this */
423 	journal->j_cnode_free_list = cn;
424 }
425 
426 static void clear_prepared_bits(struct buffer_head *bh)
427 {
428 	clear_buffer_journal_prepared(bh);
429 	clear_buffer_journal_restore_dirty(bh);
430 }
431 
432 /* return a cnode with same dev, block number and size in table, or null if not found */
433 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
434 								  super_block
435 								  *sb,
436 								  struct
437 								  reiserfs_journal_cnode
438 								  **table,
439 								  long bl)
440 {
441 	struct reiserfs_journal_cnode *cn;
442 	cn = journal_hash(table, sb, bl);
443 	while (cn) {
444 		if (cn->blocknr == bl && cn->sb == sb)
445 			return cn;
446 		cn = cn->hnext;
447 	}
448 	return (struct reiserfs_journal_cnode *)0;
449 }
450 
451 /*
452 ** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated
453 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
454 ** being overwritten by a replay after crashing.
455 **
456 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting
457 ** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make
458 ** sure you never write the block without logging it.
459 **
460 ** next_zero_bit is a suggestion about the next block to try for find_forward.
461 ** when bl is rejected because it is set in a journal list bitmap, we search
462 ** for the next zero bit in the bitmap that rejected bl.  Then, we return that
463 ** through next_zero_bit for find_forward to try.
464 **
465 ** Just because we return something in next_zero_bit does not mean we won't
466 ** reject it on the next call to reiserfs_in_journal
467 **
468 */
469 int reiserfs_in_journal(struct super_block *sb,
470 			unsigned int bmap_nr, int bit_nr, int search_all,
471 			b_blocknr_t * next_zero_bit)
472 {
473 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
474 	struct reiserfs_journal_cnode *cn;
475 	struct reiserfs_list_bitmap *jb;
476 	int i;
477 	unsigned long bl;
478 
479 	*next_zero_bit = 0;	/* always start this at zero. */
480 
481 	PROC_INFO_INC(sb, journal.in_journal);
482 	/* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
483 	 ** if we crash before the transaction that freed it commits,  this transaction won't
484 	 ** have committed either, and the block will never be written
485 	 */
486 	if (search_all) {
487 		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
488 			PROC_INFO_INC(sb, journal.in_journal_bitmap);
489 			jb = journal->j_list_bitmap + i;
490 			if (jb->journal_list && jb->bitmaps[bmap_nr] &&
491 			    test_bit(bit_nr,
492 				     (unsigned long *)jb->bitmaps[bmap_nr]->
493 				     data)) {
494 				*next_zero_bit =
495 				    find_next_zero_bit((unsigned long *)
496 						       (jb->bitmaps[bmap_nr]->
497 							data),
498 						       sb->s_blocksize << 3,
499 						       bit_nr + 1);
500 				return 1;
501 			}
502 		}
503 	}
504 
505 	bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
506 	/* is it in any old transactions? */
507 	if (search_all
508 	    && (cn =
509 		get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
510 		return 1;
511 	}
512 
513 	/* is it in the current transaction.  This should never happen */
514 	if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
515 		BUG();
516 		return 1;
517 	}
518 
519 	PROC_INFO_INC(sb, journal.in_journal_reusable);
520 	/* safe for reuse */
521 	return 0;
522 }
523 
524 /* insert cn into table
525 */
526 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
527 				       struct reiserfs_journal_cnode *cn)
528 {
529 	struct reiserfs_journal_cnode *cn_orig;
530 
531 	cn_orig = journal_hash(table, cn->sb, cn->blocknr);
532 	cn->hnext = cn_orig;
533 	cn->hprev = NULL;
534 	if (cn_orig) {
535 		cn_orig->hprev = cn;
536 	}
537 	journal_hash(table, cn->sb, cn->blocknr) = cn;
538 }
539 
540 /* lock the current transaction */
541 static inline void lock_journal(struct super_block *sb)
542 {
543 	PROC_INFO_INC(sb, journal.lock_journal);
544 
545 	reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
546 }
547 
548 /* unlock the current transaction */
549 static inline void unlock_journal(struct super_block *sb)
550 {
551 	mutex_unlock(&SB_JOURNAL(sb)->j_mutex);
552 }
553 
554 static inline void get_journal_list(struct reiserfs_journal_list *jl)
555 {
556 	jl->j_refcount++;
557 }
558 
559 static inline void put_journal_list(struct super_block *s,
560 				    struct reiserfs_journal_list *jl)
561 {
562 	if (jl->j_refcount < 1) {
563 		reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d",
564 			       jl->j_trans_id, jl->j_refcount);
565 	}
566 	if (--jl->j_refcount == 0)
567 		kfree(jl);
568 }
569 
570 /*
571 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
572 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
573 ** transaction.
574 */
575 static void cleanup_freed_for_journal_list(struct super_block *sb,
576 					   struct reiserfs_journal_list *jl)
577 {
578 
579 	struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
580 	if (jb) {
581 		cleanup_bitmap_list(sb, jb);
582 	}
583 	jl->j_list_bitmap->journal_list = NULL;
584 	jl->j_list_bitmap = NULL;
585 }
586 
587 static int journal_list_still_alive(struct super_block *s,
588 				    unsigned int trans_id)
589 {
590 	struct reiserfs_journal *journal = SB_JOURNAL(s);
591 	struct list_head *entry = &journal->j_journal_list;
592 	struct reiserfs_journal_list *jl;
593 
594 	if (!list_empty(entry)) {
595 		jl = JOURNAL_LIST_ENTRY(entry->next);
596 		if (jl->j_trans_id <= trans_id) {
597 			return 1;
598 		}
599 	}
600 	return 0;
601 }
602 
603 /*
604  * If page->mapping was null, we failed to truncate this page for
605  * some reason.  Most likely because it was truncated after being
606  * logged via data=journal.
607  *
608  * This does a check to see if the buffer belongs to one of these
609  * lost pages before doing the final put_bh.  If page->mapping was
610  * null, it tries to free buffers on the page, which should make the
611  * final page_cache_release drop the page from the lru.
612  */
613 static void release_buffer_page(struct buffer_head *bh)
614 {
615 	struct page *page = bh->b_page;
616 	if (!page->mapping && trylock_page(page)) {
617 		page_cache_get(page);
618 		put_bh(bh);
619 		if (!page->mapping)
620 			try_to_free_buffers(page);
621 		unlock_page(page);
622 		page_cache_release(page);
623 	} else {
624 		put_bh(bh);
625 	}
626 }
627 
628 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
629 {
630 	char b[BDEVNAME_SIZE];
631 
632 	if (buffer_journaled(bh)) {
633 		reiserfs_warning(NULL, "clm-2084",
634 				 "pinned buffer %lu:%s sent to disk",
635 				 bh->b_blocknr, bdevname(bh->b_bdev, b));
636 	}
637 	if (uptodate)
638 		set_buffer_uptodate(bh);
639 	else
640 		clear_buffer_uptodate(bh);
641 
642 	unlock_buffer(bh);
643 	release_buffer_page(bh);
644 }
645 
646 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
647 {
648 	if (uptodate)
649 		set_buffer_uptodate(bh);
650 	else
651 		clear_buffer_uptodate(bh);
652 	unlock_buffer(bh);
653 	put_bh(bh);
654 }
655 
656 static void submit_logged_buffer(struct buffer_head *bh)
657 {
658 	get_bh(bh);
659 	bh->b_end_io = reiserfs_end_buffer_io_sync;
660 	clear_buffer_journal_new(bh);
661 	clear_buffer_dirty(bh);
662 	if (!test_clear_buffer_journal_test(bh))
663 		BUG();
664 	if (!buffer_uptodate(bh))
665 		BUG();
666 	submit_bh(WRITE, bh);
667 }
668 
669 static void submit_ordered_buffer(struct buffer_head *bh)
670 {
671 	get_bh(bh);
672 	bh->b_end_io = reiserfs_end_ordered_io;
673 	clear_buffer_dirty(bh);
674 	if (!buffer_uptodate(bh))
675 		BUG();
676 	submit_bh(WRITE, bh);
677 }
678 
679 static int submit_barrier_buffer(struct buffer_head *bh)
680 {
681 	get_bh(bh);
682 	bh->b_end_io = reiserfs_end_ordered_io;
683 	clear_buffer_dirty(bh);
684 	if (!buffer_uptodate(bh))
685 		BUG();
686 	return submit_bh(WRITE_BARRIER, bh);
687 }
688 
689 static void check_barrier_completion(struct super_block *s,
690 				     struct buffer_head *bh)
691 {
692 	if (buffer_eopnotsupp(bh)) {
693 		clear_buffer_eopnotsupp(bh);
694 		disable_barrier(s);
695 		set_buffer_uptodate(bh);
696 		set_buffer_dirty(bh);
697 		reiserfs_write_unlock(s);
698 		sync_dirty_buffer(bh);
699 		reiserfs_write_lock(s);
700 	}
701 }
702 
703 #define CHUNK_SIZE 32
704 struct buffer_chunk {
705 	struct buffer_head *bh[CHUNK_SIZE];
706 	int nr;
707 };
708 
709 static void write_chunk(struct buffer_chunk *chunk)
710 {
711 	int i;
712 	get_fs_excl();
713 	for (i = 0; i < chunk->nr; i++) {
714 		submit_logged_buffer(chunk->bh[i]);
715 	}
716 	chunk->nr = 0;
717 	put_fs_excl();
718 }
719 
720 static void write_ordered_chunk(struct buffer_chunk *chunk)
721 {
722 	int i;
723 	get_fs_excl();
724 	for (i = 0; i < chunk->nr; i++) {
725 		submit_ordered_buffer(chunk->bh[i]);
726 	}
727 	chunk->nr = 0;
728 	put_fs_excl();
729 }
730 
731 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
732 			spinlock_t * lock, void (fn) (struct buffer_chunk *))
733 {
734 	int ret = 0;
735 	BUG_ON(chunk->nr >= CHUNK_SIZE);
736 	chunk->bh[chunk->nr++] = bh;
737 	if (chunk->nr >= CHUNK_SIZE) {
738 		ret = 1;
739 		if (lock)
740 			spin_unlock(lock);
741 		fn(chunk);
742 		if (lock)
743 			spin_lock(lock);
744 	}
745 	return ret;
746 }
747 
748 static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
749 static struct reiserfs_jh *alloc_jh(void)
750 {
751 	struct reiserfs_jh *jh;
752 	while (1) {
753 		jh = kmalloc(sizeof(*jh), GFP_NOFS);
754 		if (jh) {
755 			atomic_inc(&nr_reiserfs_jh);
756 			return jh;
757 		}
758 		yield();
759 	}
760 }
761 
762 /*
763  * we want to free the jh when the buffer has been written
764  * and waited on
765  */
766 void reiserfs_free_jh(struct buffer_head *bh)
767 {
768 	struct reiserfs_jh *jh;
769 
770 	jh = bh->b_private;
771 	if (jh) {
772 		bh->b_private = NULL;
773 		jh->bh = NULL;
774 		list_del_init(&jh->list);
775 		kfree(jh);
776 		if (atomic_read(&nr_reiserfs_jh) <= 0)
777 			BUG();
778 		atomic_dec(&nr_reiserfs_jh);
779 		put_bh(bh);
780 	}
781 }
782 
783 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
784 			   int tail)
785 {
786 	struct reiserfs_jh *jh;
787 
788 	if (bh->b_private) {
789 		spin_lock(&j->j_dirty_buffers_lock);
790 		if (!bh->b_private) {
791 			spin_unlock(&j->j_dirty_buffers_lock);
792 			goto no_jh;
793 		}
794 		jh = bh->b_private;
795 		list_del_init(&jh->list);
796 	} else {
797 	      no_jh:
798 		get_bh(bh);
799 		jh = alloc_jh();
800 		spin_lock(&j->j_dirty_buffers_lock);
801 		/* buffer must be locked for __add_jh, should be able to have
802 		 * two adds at the same time
803 		 */
804 		BUG_ON(bh->b_private);
805 		jh->bh = bh;
806 		bh->b_private = jh;
807 	}
808 	jh->jl = j->j_current_jl;
809 	if (tail)
810 		list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
811 	else {
812 		list_add_tail(&jh->list, &jh->jl->j_bh_list);
813 	}
814 	spin_unlock(&j->j_dirty_buffers_lock);
815 	return 0;
816 }
817 
818 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
819 {
820 	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
821 }
822 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
823 {
824 	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
825 }
826 
827 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
828 static int write_ordered_buffers(spinlock_t * lock,
829 				 struct reiserfs_journal *j,
830 				 struct reiserfs_journal_list *jl,
831 				 struct list_head *list)
832 {
833 	struct buffer_head *bh;
834 	struct reiserfs_jh *jh;
835 	int ret = j->j_errno;
836 	struct buffer_chunk chunk;
837 	struct list_head tmp;
838 	INIT_LIST_HEAD(&tmp);
839 
840 	chunk.nr = 0;
841 	spin_lock(lock);
842 	while (!list_empty(list)) {
843 		jh = JH_ENTRY(list->next);
844 		bh = jh->bh;
845 		get_bh(bh);
846 		if (!trylock_buffer(bh)) {
847 			if (!buffer_dirty(bh)) {
848 				list_move(&jh->list, &tmp);
849 				goto loop_next;
850 			}
851 			spin_unlock(lock);
852 			if (chunk.nr)
853 				write_ordered_chunk(&chunk);
854 			wait_on_buffer(bh);
855 			cond_resched();
856 			spin_lock(lock);
857 			goto loop_next;
858 		}
859 		/* in theory, dirty non-uptodate buffers should never get here,
860 		 * but the upper layer io error paths still have a few quirks.
861 		 * Handle them here as gracefully as we can
862 		 */
863 		if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
864 			clear_buffer_dirty(bh);
865 			ret = -EIO;
866 		}
867 		if (buffer_dirty(bh)) {
868 			list_move(&jh->list, &tmp);
869 			add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
870 		} else {
871 			reiserfs_free_jh(bh);
872 			unlock_buffer(bh);
873 		}
874 	      loop_next:
875 		put_bh(bh);
876 		cond_resched_lock(lock);
877 	}
878 	if (chunk.nr) {
879 		spin_unlock(lock);
880 		write_ordered_chunk(&chunk);
881 		spin_lock(lock);
882 	}
883 	while (!list_empty(&tmp)) {
884 		jh = JH_ENTRY(tmp.prev);
885 		bh = jh->bh;
886 		get_bh(bh);
887 		reiserfs_free_jh(bh);
888 
889 		if (buffer_locked(bh)) {
890 			spin_unlock(lock);
891 			wait_on_buffer(bh);
892 			spin_lock(lock);
893 		}
894 		if (!buffer_uptodate(bh)) {
895 			ret = -EIO;
896 		}
897 		/* ugly interaction with invalidatepage here.
898 		 * reiserfs_invalidate_page will pin any buffer that has a valid
899 		 * journal head from an older transaction.  If someone else sets
900 		 * our buffer dirty after we write it in the first loop, and
901 		 * then someone truncates the page away, nobody will ever write
902 		 * the buffer. We're safe if we write the page one last time
903 		 * after freeing the journal header.
904 		 */
905 		if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
906 			spin_unlock(lock);
907 			ll_rw_block(WRITE, 1, &bh);
908 			spin_lock(lock);
909 		}
910 		put_bh(bh);
911 		cond_resched_lock(lock);
912 	}
913 	spin_unlock(lock);
914 	return ret;
915 }
916 
917 static int flush_older_commits(struct super_block *s,
918 			       struct reiserfs_journal_list *jl)
919 {
920 	struct reiserfs_journal *journal = SB_JOURNAL(s);
921 	struct reiserfs_journal_list *other_jl;
922 	struct reiserfs_journal_list *first_jl;
923 	struct list_head *entry;
924 	unsigned int trans_id = jl->j_trans_id;
925 	unsigned int other_trans_id;
926 	unsigned int first_trans_id;
927 
928       find_first:
929 	/*
930 	 * first we walk backwards to find the oldest uncommitted transation
931 	 */
932 	first_jl = jl;
933 	entry = jl->j_list.prev;
934 	while (1) {
935 		other_jl = JOURNAL_LIST_ENTRY(entry);
936 		if (entry == &journal->j_journal_list ||
937 		    atomic_read(&other_jl->j_older_commits_done))
938 			break;
939 
940 		first_jl = other_jl;
941 		entry = other_jl->j_list.prev;
942 	}
943 
944 	/* if we didn't find any older uncommitted transactions, return now */
945 	if (first_jl == jl) {
946 		return 0;
947 	}
948 
949 	first_trans_id = first_jl->j_trans_id;
950 
951 	entry = &first_jl->j_list;
952 	while (1) {
953 		other_jl = JOURNAL_LIST_ENTRY(entry);
954 		other_trans_id = other_jl->j_trans_id;
955 
956 		if (other_trans_id < trans_id) {
957 			if (atomic_read(&other_jl->j_commit_left) != 0) {
958 				flush_commit_list(s, other_jl, 0);
959 
960 				/* list we were called with is gone, return */
961 				if (!journal_list_still_alive(s, trans_id))
962 					return 1;
963 
964 				/* the one we just flushed is gone, this means all
965 				 * older lists are also gone, so first_jl is no longer
966 				 * valid either.  Go back to the beginning.
967 				 */
968 				if (!journal_list_still_alive
969 				    (s, other_trans_id)) {
970 					goto find_first;
971 				}
972 			}
973 			entry = entry->next;
974 			if (entry == &journal->j_journal_list)
975 				return 0;
976 		} else {
977 			return 0;
978 		}
979 	}
980 	return 0;
981 }
982 
983 static int reiserfs_async_progress_wait(struct super_block *s)
984 {
985 	DEFINE_WAIT(wait);
986 	struct reiserfs_journal *j = SB_JOURNAL(s);
987 
988 	if (atomic_read(&j->j_async_throttle)) {
989 		reiserfs_write_unlock(s);
990 		congestion_wait(BLK_RW_ASYNC, HZ / 10);
991 		reiserfs_write_lock(s);
992 	}
993 
994 	return 0;
995 }
996 
997 /*
998 ** if this journal list still has commit blocks unflushed, send them to disk.
999 **
1000 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
1001 ** Before the commit block can by written, every other log block must be safely on disk
1002 **
1003 */
1004 static int flush_commit_list(struct super_block *s,
1005 			     struct reiserfs_journal_list *jl, int flushall)
1006 {
1007 	int i;
1008 	b_blocknr_t bn;
1009 	struct buffer_head *tbh = NULL;
1010 	unsigned int trans_id = jl->j_trans_id;
1011 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1012 	int barrier = 0;
1013 	int retval = 0;
1014 	int write_len;
1015 
1016 	reiserfs_check_lock_depth(s, "flush_commit_list");
1017 
1018 	if (atomic_read(&jl->j_older_commits_done)) {
1019 		return 0;
1020 	}
1021 
1022 	get_fs_excl();
1023 
1024 	/* before we can put our commit blocks on disk, we have to make sure everyone older than
1025 	 ** us is on disk too
1026 	 */
1027 	BUG_ON(jl->j_len <= 0);
1028 	BUG_ON(trans_id == journal->j_trans_id);
1029 
1030 	get_journal_list(jl);
1031 	if (flushall) {
1032 		if (flush_older_commits(s, jl) == 1) {
1033 			/* list disappeared during flush_older_commits.  return */
1034 			goto put_jl;
1035 		}
1036 	}
1037 
1038 	/* make sure nobody is trying to flush this one at the same time */
1039 	reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
1040 
1041 	if (!journal_list_still_alive(s, trans_id)) {
1042 		mutex_unlock(&jl->j_commit_mutex);
1043 		goto put_jl;
1044 	}
1045 	BUG_ON(jl->j_trans_id == 0);
1046 
1047 	/* this commit is done, exit */
1048 	if (atomic_read(&(jl->j_commit_left)) <= 0) {
1049 		if (flushall) {
1050 			atomic_set(&(jl->j_older_commits_done), 1);
1051 		}
1052 		mutex_unlock(&jl->j_commit_mutex);
1053 		goto put_jl;
1054 	}
1055 
1056 	if (!list_empty(&jl->j_bh_list)) {
1057 		int ret;
1058 
1059 		/*
1060 		 * We might sleep in numerous places inside
1061 		 * write_ordered_buffers. Relax the write lock.
1062 		 */
1063 		reiserfs_write_unlock(s);
1064 		ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1065 					    journal, jl, &jl->j_bh_list);
1066 		if (ret < 0 && retval == 0)
1067 			retval = ret;
1068 		reiserfs_write_lock(s);
1069 	}
1070 	BUG_ON(!list_empty(&jl->j_bh_list));
1071 	/*
1072 	 * for the description block and all the log blocks, submit any buffers
1073 	 * that haven't already reached the disk.  Try to write at least 256
1074 	 * log blocks. later on, we will only wait on blocks that correspond
1075 	 * to this transaction, but while we're unplugging we might as well
1076 	 * get a chunk of data on there.
1077 	 */
1078 	atomic_inc(&journal->j_async_throttle);
1079 	write_len = jl->j_len + 1;
1080 	if (write_len < 256)
1081 		write_len = 256;
1082 	for (i = 0 ; i < write_len ; i++) {
1083 		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1084 		    SB_ONDISK_JOURNAL_SIZE(s);
1085 		tbh = journal_find_get_block(s, bn);
1086 		if (tbh) {
1087 			if (buffer_dirty(tbh)) {
1088 		            reiserfs_write_unlock(s);
1089 			    ll_rw_block(WRITE, 1, &tbh);
1090 			    reiserfs_write_lock(s);
1091 			}
1092 			put_bh(tbh) ;
1093 		}
1094 	}
1095 	atomic_dec(&journal->j_async_throttle);
1096 
1097 	/* We're skipping the commit if there's an error */
1098 	if (retval || reiserfs_is_journal_aborted(journal))
1099 		barrier = 0;
1100 
1101 	/* wait on everything written so far before writing the commit
1102 	 * if we are in barrier mode, send the commit down now
1103 	 */
1104 	barrier = reiserfs_barrier_flush(s);
1105 	if (barrier) {
1106 		int ret;
1107 		lock_buffer(jl->j_commit_bh);
1108 		ret = submit_barrier_buffer(jl->j_commit_bh);
1109 		if (ret == -EOPNOTSUPP) {
1110 			set_buffer_uptodate(jl->j_commit_bh);
1111 			disable_barrier(s);
1112 			barrier = 0;
1113 		}
1114 	}
1115 	for (i = 0; i < (jl->j_len + 1); i++) {
1116 		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1117 		    (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1118 		tbh = journal_find_get_block(s, bn);
1119 
1120 		reiserfs_write_unlock(s);
1121 		wait_on_buffer(tbh);
1122 		reiserfs_write_lock(s);
1123 		// since we're using ll_rw_blk above, it might have skipped over
1124 		// a locked buffer.  Double check here
1125 		//
1126 		/* redundant, sync_dirty_buffer() checks */
1127 		if (buffer_dirty(tbh)) {
1128 			reiserfs_write_unlock(s);
1129 			sync_dirty_buffer(tbh);
1130 			reiserfs_write_lock(s);
1131 		}
1132 		if (unlikely(!buffer_uptodate(tbh))) {
1133 #ifdef CONFIG_REISERFS_CHECK
1134 			reiserfs_warning(s, "journal-601",
1135 					 "buffer write failed");
1136 #endif
1137 			retval = -EIO;
1138 		}
1139 		put_bh(tbh);	/* once for journal_find_get_block */
1140 		put_bh(tbh);	/* once due to original getblk in do_journal_end */
1141 		atomic_dec(&(jl->j_commit_left));
1142 	}
1143 
1144 	BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1145 
1146 	if (!barrier) {
1147 		/* If there was a write error in the journal - we can't commit
1148 		 * this transaction - it will be invalid and, if successful,
1149 		 * will just end up propagating the write error out to
1150 		 * the file system. */
1151 		if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1152 			if (buffer_dirty(jl->j_commit_bh))
1153 				BUG();
1154 			mark_buffer_dirty(jl->j_commit_bh) ;
1155 			reiserfs_write_unlock(s);
1156 			sync_dirty_buffer(jl->j_commit_bh) ;
1157 			reiserfs_write_lock(s);
1158 		}
1159 	} else {
1160 		reiserfs_write_unlock(s);
1161 		wait_on_buffer(jl->j_commit_bh);
1162 		reiserfs_write_lock(s);
1163 	}
1164 
1165 	check_barrier_completion(s, jl->j_commit_bh);
1166 
1167 	/* If there was a write error in the journal - we can't commit this
1168 	 * transaction - it will be invalid and, if successful, will just end
1169 	 * up propagating the write error out to the filesystem. */
1170 	if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1171 #ifdef CONFIG_REISERFS_CHECK
1172 		reiserfs_warning(s, "journal-615", "buffer write failed");
1173 #endif
1174 		retval = -EIO;
1175 	}
1176 	bforget(jl->j_commit_bh);
1177 	if (journal->j_last_commit_id != 0 &&
1178 	    (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1179 		reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
1180 				 journal->j_last_commit_id, jl->j_trans_id);
1181 	}
1182 	journal->j_last_commit_id = jl->j_trans_id;
1183 
1184 	/* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */
1185 	cleanup_freed_for_journal_list(s, jl);
1186 
1187 	retval = retval ? retval : journal->j_errno;
1188 
1189 	/* mark the metadata dirty */
1190 	if (!retval)
1191 		dirty_one_transaction(s, jl);
1192 	atomic_dec(&(jl->j_commit_left));
1193 
1194 	if (flushall) {
1195 		atomic_set(&(jl->j_older_commits_done), 1);
1196 	}
1197 	mutex_unlock(&jl->j_commit_mutex);
1198       put_jl:
1199 	put_journal_list(s, jl);
1200 
1201 	if (retval)
1202 		reiserfs_abort(s, retval, "Journal write error in %s",
1203 			       __func__);
1204 	put_fs_excl();
1205 	return retval;
1206 }
1207 
1208 /*
1209 ** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or
1210 ** returns NULL if it can't find anything
1211 */
1212 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1213 							  reiserfs_journal_cnode
1214 							  *cn)
1215 {
1216 	struct super_block *sb = cn->sb;
1217 	b_blocknr_t blocknr = cn->blocknr;
1218 
1219 	cn = cn->hprev;
1220 	while (cn) {
1221 		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1222 			return cn->jlist;
1223 		}
1224 		cn = cn->hprev;
1225 	}
1226 	return NULL;
1227 }
1228 
1229 static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1230 {
1231 	struct super_block *sb = cn->sb;
1232 	b_blocknr_t blocknr = cn->blocknr;
1233 
1234 	cn = cn->hprev;
1235 	while (cn) {
1236 		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1237 		    atomic_read(&cn->jlist->j_commit_left) != 0)
1238 				    return 0;
1239 		cn = cn->hprev;
1240 	}
1241 	return 1;
1242 }
1243 
1244 static void remove_journal_hash(struct super_block *,
1245 				struct reiserfs_journal_cnode **,
1246 				struct reiserfs_journal_list *, unsigned long,
1247 				int);
1248 
1249 /*
1250 ** once all the real blocks have been flushed, it is safe to remove them from the
1251 ** journal list for this transaction.  Aside from freeing the cnode, this also allows the
1252 ** block to be reallocated for data blocks if it had been deleted.
1253 */
1254 static void remove_all_from_journal_list(struct super_block *sb,
1255 					 struct reiserfs_journal_list *jl,
1256 					 int debug)
1257 {
1258 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
1259 	struct reiserfs_journal_cnode *cn, *last;
1260 	cn = jl->j_realblock;
1261 
1262 	/* which is better, to lock once around the whole loop, or
1263 	 ** to lock for each call to remove_journal_hash?
1264 	 */
1265 	while (cn) {
1266 		if (cn->blocknr != 0) {
1267 			if (debug) {
1268 				reiserfs_warning(sb, "reiserfs-2201",
1269 						 "block %u, bh is %d, state %ld",
1270 						 cn->blocknr, cn->bh ? 1 : 0,
1271 						 cn->state);
1272 			}
1273 			cn->state = 0;
1274 			remove_journal_hash(sb, journal->j_list_hash_table,
1275 					    jl, cn->blocknr, 1);
1276 		}
1277 		last = cn;
1278 		cn = cn->next;
1279 		free_cnode(sb, last);
1280 	}
1281 	jl->j_realblock = NULL;
1282 }
1283 
1284 /*
1285 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1286 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1287 ** releasing blocks in this transaction for reuse as data blocks.
1288 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1289 **
1290 */
1291 static int _update_journal_header_block(struct super_block *sb,
1292 					unsigned long offset,
1293 					unsigned int trans_id)
1294 {
1295 	struct reiserfs_journal_header *jh;
1296 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
1297 
1298 	if (reiserfs_is_journal_aborted(journal))
1299 		return -EIO;
1300 
1301 	if (trans_id >= journal->j_last_flush_trans_id) {
1302 		if (buffer_locked((journal->j_header_bh))) {
1303 			reiserfs_write_unlock(sb);
1304 			wait_on_buffer((journal->j_header_bh));
1305 			reiserfs_write_lock(sb);
1306 			if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1307 #ifdef CONFIG_REISERFS_CHECK
1308 				reiserfs_warning(sb, "journal-699",
1309 						 "buffer write failed");
1310 #endif
1311 				return -EIO;
1312 			}
1313 		}
1314 		journal->j_last_flush_trans_id = trans_id;
1315 		journal->j_first_unflushed_offset = offset;
1316 		jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1317 							b_data);
1318 		jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1319 		jh->j_first_unflushed_offset = cpu_to_le32(offset);
1320 		jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1321 
1322 		if (reiserfs_barrier_flush(sb)) {
1323 			int ret;
1324 			lock_buffer(journal->j_header_bh);
1325 			ret = submit_barrier_buffer(journal->j_header_bh);
1326 			if (ret == -EOPNOTSUPP) {
1327 				set_buffer_uptodate(journal->j_header_bh);
1328 				disable_barrier(sb);
1329 				goto sync;
1330 			}
1331 			reiserfs_write_unlock(sb);
1332 			wait_on_buffer(journal->j_header_bh);
1333 			reiserfs_write_lock(sb);
1334 			check_barrier_completion(sb, journal->j_header_bh);
1335 		} else {
1336 		      sync:
1337 			set_buffer_dirty(journal->j_header_bh);
1338 			reiserfs_write_unlock(sb);
1339 			sync_dirty_buffer(journal->j_header_bh);
1340 			reiserfs_write_lock(sb);
1341 		}
1342 		if (!buffer_uptodate(journal->j_header_bh)) {
1343 			reiserfs_warning(sb, "journal-837",
1344 					 "IO error during journal replay");
1345 			return -EIO;
1346 		}
1347 	}
1348 	return 0;
1349 }
1350 
1351 static int update_journal_header_block(struct super_block *sb,
1352 				       unsigned long offset,
1353 				       unsigned int trans_id)
1354 {
1355 	return _update_journal_header_block(sb, offset, trans_id);
1356 }
1357 
1358 /*
1359 ** flush any and all journal lists older than you are
1360 ** can only be called from flush_journal_list
1361 */
1362 static int flush_older_journal_lists(struct super_block *sb,
1363 				     struct reiserfs_journal_list *jl)
1364 {
1365 	struct list_head *entry;
1366 	struct reiserfs_journal_list *other_jl;
1367 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
1368 	unsigned int trans_id = jl->j_trans_id;
1369 
1370 	/* we know we are the only ones flushing things, no extra race
1371 	 * protection is required.
1372 	 */
1373       restart:
1374 	entry = journal->j_journal_list.next;
1375 	/* Did we wrap? */
1376 	if (entry == &journal->j_journal_list)
1377 		return 0;
1378 	other_jl = JOURNAL_LIST_ENTRY(entry);
1379 	if (other_jl->j_trans_id < trans_id) {
1380 		BUG_ON(other_jl->j_refcount <= 0);
1381 		/* do not flush all */
1382 		flush_journal_list(sb, other_jl, 0);
1383 
1384 		/* other_jl is now deleted from the list */
1385 		goto restart;
1386 	}
1387 	return 0;
1388 }
1389 
1390 static void del_from_work_list(struct super_block *s,
1391 			       struct reiserfs_journal_list *jl)
1392 {
1393 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1394 	if (!list_empty(&jl->j_working_list)) {
1395 		list_del_init(&jl->j_working_list);
1396 		journal->j_num_work_lists--;
1397 	}
1398 }
1399 
1400 /* flush a journal list, both commit and real blocks
1401 **
1402 ** always set flushall to 1, unless you are calling from inside
1403 ** flush_journal_list
1404 **
1405 ** IMPORTANT.  This can only be called while there are no journal writers,
1406 ** and the journal is locked.  That means it can only be called from
1407 ** do_journal_end, or by journal_release
1408 */
1409 static int flush_journal_list(struct super_block *s,
1410 			      struct reiserfs_journal_list *jl, int flushall)
1411 {
1412 	struct reiserfs_journal_list *pjl;
1413 	struct reiserfs_journal_cnode *cn, *last;
1414 	int count;
1415 	int was_jwait = 0;
1416 	int was_dirty = 0;
1417 	struct buffer_head *saved_bh;
1418 	unsigned long j_len_saved = jl->j_len;
1419 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1420 	int err = 0;
1421 
1422 	BUG_ON(j_len_saved <= 0);
1423 
1424 	if (atomic_read(&journal->j_wcount) != 0) {
1425 		reiserfs_warning(s, "clm-2048", "called with wcount %d",
1426 				 atomic_read(&journal->j_wcount));
1427 	}
1428 	BUG_ON(jl->j_trans_id == 0);
1429 
1430 	/* if flushall == 0, the lock is already held */
1431 	if (flushall) {
1432 		reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1433 	} else if (mutex_trylock(&journal->j_flush_mutex)) {
1434 		BUG();
1435 	}
1436 
1437 	count = 0;
1438 	if (j_len_saved > journal->j_trans_max) {
1439 		reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu",
1440 			       j_len_saved, jl->j_trans_id);
1441 		return 0;
1442 	}
1443 
1444 	get_fs_excl();
1445 
1446 	/* if all the work is already done, get out of here */
1447 	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1448 	    atomic_read(&(jl->j_commit_left)) <= 0) {
1449 		goto flush_older_and_return;
1450 	}
1451 
1452 	/* start by putting the commit list on disk.  This will also flush
1453 	 ** the commit lists of any olders transactions
1454 	 */
1455 	flush_commit_list(s, jl, 1);
1456 
1457 	if (!(jl->j_state & LIST_DIRTY)
1458 	    && !reiserfs_is_journal_aborted(journal))
1459 		BUG();
1460 
1461 	/* are we done now? */
1462 	if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1463 	    atomic_read(&(jl->j_commit_left)) <= 0) {
1464 		goto flush_older_and_return;
1465 	}
1466 
1467 	/* loop through each cnode, see if we need to write it,
1468 	 ** or wait on a more recent transaction, or just ignore it
1469 	 */
1470 	if (atomic_read(&(journal->j_wcount)) != 0) {
1471 		reiserfs_panic(s, "journal-844", "journal list is flushing, "
1472 			       "wcount is not 0");
1473 	}
1474 	cn = jl->j_realblock;
1475 	while (cn) {
1476 		was_jwait = 0;
1477 		was_dirty = 0;
1478 		saved_bh = NULL;
1479 		/* blocknr of 0 is no longer in the hash, ignore it */
1480 		if (cn->blocknr == 0) {
1481 			goto free_cnode;
1482 		}
1483 
1484 		/* This transaction failed commit. Don't write out to the disk */
1485 		if (!(jl->j_state & LIST_DIRTY))
1486 			goto free_cnode;
1487 
1488 		pjl = find_newer_jl_for_cn(cn);
1489 		/* the order is important here.  We check pjl to make sure we
1490 		 ** don't clear BH_JDirty_wait if we aren't the one writing this
1491 		 ** block to disk
1492 		 */
1493 		if (!pjl && cn->bh) {
1494 			saved_bh = cn->bh;
1495 
1496 			/* we do this to make sure nobody releases the buffer while
1497 			 ** we are working with it
1498 			 */
1499 			get_bh(saved_bh);
1500 
1501 			if (buffer_journal_dirty(saved_bh)) {
1502 				BUG_ON(!can_dirty(cn));
1503 				was_jwait = 1;
1504 				was_dirty = 1;
1505 			} else if (can_dirty(cn)) {
1506 				/* everything with !pjl && jwait should be writable */
1507 				BUG();
1508 			}
1509 		}
1510 
1511 		/* if someone has this block in a newer transaction, just make
1512 		 ** sure they are committed, and don't try writing it to disk
1513 		 */
1514 		if (pjl) {
1515 			if (atomic_read(&pjl->j_commit_left))
1516 				flush_commit_list(s, pjl, 1);
1517 			goto free_cnode;
1518 		}
1519 
1520 		/* bh == NULL when the block got to disk on its own, OR,
1521 		 ** the block got freed in a future transaction
1522 		 */
1523 		if (saved_bh == NULL) {
1524 			goto free_cnode;
1525 		}
1526 
1527 		/* this should never happen.  kupdate_one_transaction has this list
1528 		 ** locked while it works, so we should never see a buffer here that
1529 		 ** is not marked JDirty_wait
1530 		 */
1531 		if ((!was_jwait) && !buffer_locked(saved_bh)) {
1532 			reiserfs_warning(s, "journal-813",
1533 					 "BAD! buffer %llu %cdirty %cjwait, "
1534 					 "not in a newer tranasction",
1535 					 (unsigned long long)saved_bh->
1536 					 b_blocknr, was_dirty ? ' ' : '!',
1537 					 was_jwait ? ' ' : '!');
1538 		}
1539 		if (was_dirty) {
1540 			/* we inc again because saved_bh gets decremented at free_cnode */
1541 			get_bh(saved_bh);
1542 			set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1543 			lock_buffer(saved_bh);
1544 			BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1545 			if (buffer_dirty(saved_bh))
1546 				submit_logged_buffer(saved_bh);
1547 			else
1548 				unlock_buffer(saved_bh);
1549 			count++;
1550 		} else {
1551 			reiserfs_warning(s, "clm-2082",
1552 					 "Unable to flush buffer %llu in %s",
1553 					 (unsigned long long)saved_bh->
1554 					 b_blocknr, __func__);
1555 		}
1556 	      free_cnode:
1557 		last = cn;
1558 		cn = cn->next;
1559 		if (saved_bh) {
1560 			/* we incremented this to keep others from taking the buffer head away */
1561 			put_bh(saved_bh);
1562 			if (atomic_read(&(saved_bh->b_count)) < 0) {
1563 				reiserfs_warning(s, "journal-945",
1564 						 "saved_bh->b_count < 0");
1565 			}
1566 		}
1567 	}
1568 	if (count > 0) {
1569 		cn = jl->j_realblock;
1570 		while (cn) {
1571 			if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1572 				if (!cn->bh) {
1573 					reiserfs_panic(s, "journal-1011",
1574 						       "cn->bh is NULL");
1575 				}
1576 
1577 				reiserfs_write_unlock(s);
1578 				wait_on_buffer(cn->bh);
1579 				reiserfs_write_lock(s);
1580 
1581 				if (!cn->bh) {
1582 					reiserfs_panic(s, "journal-1012",
1583 						       "cn->bh is NULL");
1584 				}
1585 				if (unlikely(!buffer_uptodate(cn->bh))) {
1586 #ifdef CONFIG_REISERFS_CHECK
1587 					reiserfs_warning(s, "journal-949",
1588 							 "buffer write failed");
1589 #endif
1590 					err = -EIO;
1591 				}
1592 				/* note, we must clear the JDirty_wait bit after the up to date
1593 				 ** check, otherwise we race against our flushpage routine
1594 				 */
1595 				BUG_ON(!test_clear_buffer_journal_dirty
1596 				       (cn->bh));
1597 
1598 				/* drop one ref for us */
1599 				put_bh(cn->bh);
1600 				/* drop one ref for journal_mark_dirty */
1601 				release_buffer_page(cn->bh);
1602 			}
1603 			cn = cn->next;
1604 		}
1605 	}
1606 
1607 	if (err)
1608 		reiserfs_abort(s, -EIO,
1609 			       "Write error while pushing transaction to disk in %s",
1610 			       __func__);
1611       flush_older_and_return:
1612 
1613 	/* before we can update the journal header block, we _must_ flush all
1614 	 ** real blocks from all older transactions to disk.  This is because
1615 	 ** once the header block is updated, this transaction will not be
1616 	 ** replayed after a crash
1617 	 */
1618 	if (flushall) {
1619 		flush_older_journal_lists(s, jl);
1620 	}
1621 
1622 	err = journal->j_errno;
1623 	/* before we can remove everything from the hash tables for this
1624 	 ** transaction, we must make sure it can never be replayed
1625 	 **
1626 	 ** since we are only called from do_journal_end, we know for sure there
1627 	 ** are no allocations going on while we are flushing journal lists.  So,
1628 	 ** we only need to update the journal header block for the last list
1629 	 ** being flushed
1630 	 */
1631 	if (!err && flushall) {
1632 		err =
1633 		    update_journal_header_block(s,
1634 						(jl->j_start + jl->j_len +
1635 						 2) % SB_ONDISK_JOURNAL_SIZE(s),
1636 						jl->j_trans_id);
1637 		if (err)
1638 			reiserfs_abort(s, -EIO,
1639 				       "Write error while updating journal header in %s",
1640 				       __func__);
1641 	}
1642 	remove_all_from_journal_list(s, jl, 0);
1643 	list_del_init(&jl->j_list);
1644 	journal->j_num_lists--;
1645 	del_from_work_list(s, jl);
1646 
1647 	if (journal->j_last_flush_id != 0 &&
1648 	    (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1649 		reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
1650 				 journal->j_last_flush_id, jl->j_trans_id);
1651 	}
1652 	journal->j_last_flush_id = jl->j_trans_id;
1653 
1654 	/* not strictly required since we are freeing the list, but it should
1655 	 * help find code using dead lists later on
1656 	 */
1657 	jl->j_len = 0;
1658 	atomic_set(&(jl->j_nonzerolen), 0);
1659 	jl->j_start = 0;
1660 	jl->j_realblock = NULL;
1661 	jl->j_commit_bh = NULL;
1662 	jl->j_trans_id = 0;
1663 	jl->j_state = 0;
1664 	put_journal_list(s, jl);
1665 	if (flushall)
1666 		mutex_unlock(&journal->j_flush_mutex);
1667 	put_fs_excl();
1668 	return err;
1669 }
1670 
1671 static int test_transaction(struct super_block *s,
1672                             struct reiserfs_journal_list *jl)
1673 {
1674 	struct reiserfs_journal_cnode *cn;
1675 
1676 	if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1677 		return 1;
1678 
1679 	cn = jl->j_realblock;
1680 	while (cn) {
1681 		/* if the blocknr == 0, this has been cleared from the hash,
1682 		 ** skip it
1683 		 */
1684 		if (cn->blocknr == 0) {
1685 			goto next;
1686 		}
1687 		if (cn->bh && !newer_jl_done(cn))
1688 			return 0;
1689 	      next:
1690 		cn = cn->next;
1691 		cond_resched();
1692 	}
1693 	return 0;
1694 }
1695 
1696 static int write_one_transaction(struct super_block *s,
1697 				 struct reiserfs_journal_list *jl,
1698 				 struct buffer_chunk *chunk)
1699 {
1700 	struct reiserfs_journal_cnode *cn;
1701 	int ret = 0;
1702 
1703 	jl->j_state |= LIST_TOUCHED;
1704 	del_from_work_list(s, jl);
1705 	if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1706 		return 0;
1707 	}
1708 
1709 	cn = jl->j_realblock;
1710 	while (cn) {
1711 		/* if the blocknr == 0, this has been cleared from the hash,
1712 		 ** skip it
1713 		 */
1714 		if (cn->blocknr == 0) {
1715 			goto next;
1716 		}
1717 		if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1718 			struct buffer_head *tmp_bh;
1719 			/* we can race against journal_mark_freed when we try
1720 			 * to lock_buffer(cn->bh), so we have to inc the buffer
1721 			 * count, and recheck things after locking
1722 			 */
1723 			tmp_bh = cn->bh;
1724 			get_bh(tmp_bh);
1725 			lock_buffer(tmp_bh);
1726 			if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1727 				if (!buffer_journal_dirty(tmp_bh) ||
1728 				    buffer_journal_prepared(tmp_bh))
1729 					BUG();
1730 				add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1731 				ret++;
1732 			} else {
1733 				/* note, cn->bh might be null now */
1734 				unlock_buffer(tmp_bh);
1735 			}
1736 			put_bh(tmp_bh);
1737 		}
1738 	      next:
1739 		cn = cn->next;
1740 		cond_resched();
1741 	}
1742 	return ret;
1743 }
1744 
1745 /* used by flush_commit_list */
1746 static int dirty_one_transaction(struct super_block *s,
1747 				 struct reiserfs_journal_list *jl)
1748 {
1749 	struct reiserfs_journal_cnode *cn;
1750 	struct reiserfs_journal_list *pjl;
1751 	int ret = 0;
1752 
1753 	jl->j_state |= LIST_DIRTY;
1754 	cn = jl->j_realblock;
1755 	while (cn) {
1756 		/* look for a more recent transaction that logged this
1757 		 ** buffer.  Only the most recent transaction with a buffer in
1758 		 ** it is allowed to send that buffer to disk
1759 		 */
1760 		pjl = find_newer_jl_for_cn(cn);
1761 		if (!pjl && cn->blocknr && cn->bh
1762 		    && buffer_journal_dirty(cn->bh)) {
1763 			BUG_ON(!can_dirty(cn));
1764 			/* if the buffer is prepared, it will either be logged
1765 			 * or restored.  If restored, we need to make sure
1766 			 * it actually gets marked dirty
1767 			 */
1768 			clear_buffer_journal_new(cn->bh);
1769 			if (buffer_journal_prepared(cn->bh)) {
1770 				set_buffer_journal_restore_dirty(cn->bh);
1771 			} else {
1772 				set_buffer_journal_test(cn->bh);
1773 				mark_buffer_dirty(cn->bh);
1774 			}
1775 		}
1776 		cn = cn->next;
1777 	}
1778 	return ret;
1779 }
1780 
1781 static int kupdate_transactions(struct super_block *s,
1782 				struct reiserfs_journal_list *jl,
1783 				struct reiserfs_journal_list **next_jl,
1784 				unsigned int *next_trans_id,
1785 				int num_blocks, int num_trans)
1786 {
1787 	int ret = 0;
1788 	int written = 0;
1789 	int transactions_flushed = 0;
1790 	unsigned int orig_trans_id = jl->j_trans_id;
1791 	struct buffer_chunk chunk;
1792 	struct list_head *entry;
1793 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1794 	chunk.nr = 0;
1795 
1796 	reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1797 	if (!journal_list_still_alive(s, orig_trans_id)) {
1798 		goto done;
1799 	}
1800 
1801 	/* we've got j_flush_mutex held, nobody is going to delete any
1802 	 * of these lists out from underneath us
1803 	 */
1804 	while ((num_trans && transactions_flushed < num_trans) ||
1805 	       (!num_trans && written < num_blocks)) {
1806 
1807 		if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1808 		    atomic_read(&jl->j_commit_left)
1809 		    || !(jl->j_state & LIST_DIRTY)) {
1810 			del_from_work_list(s, jl);
1811 			break;
1812 		}
1813 		ret = write_one_transaction(s, jl, &chunk);
1814 
1815 		if (ret < 0)
1816 			goto done;
1817 		transactions_flushed++;
1818 		written += ret;
1819 		entry = jl->j_list.next;
1820 
1821 		/* did we wrap? */
1822 		if (entry == &journal->j_journal_list) {
1823 			break;
1824 		}
1825 		jl = JOURNAL_LIST_ENTRY(entry);
1826 
1827 		/* don't bother with older transactions */
1828 		if (jl->j_trans_id <= orig_trans_id)
1829 			break;
1830 	}
1831 	if (chunk.nr) {
1832 		write_chunk(&chunk);
1833 	}
1834 
1835       done:
1836 	mutex_unlock(&journal->j_flush_mutex);
1837 	return ret;
1838 }
1839 
1840 /* for o_sync and fsync heavy applications, they tend to use
1841 ** all the journa list slots with tiny transactions.  These
1842 ** trigger lots and lots of calls to update the header block, which
1843 ** adds seeks and slows things down.
1844 **
1845 ** This function tries to clear out a large chunk of the journal lists
1846 ** at once, which makes everything faster since only the newest journal
1847 ** list updates the header block
1848 */
1849 static int flush_used_journal_lists(struct super_block *s,
1850 				    struct reiserfs_journal_list *jl)
1851 {
1852 	unsigned long len = 0;
1853 	unsigned long cur_len;
1854 	int ret;
1855 	int i;
1856 	int limit = 256;
1857 	struct reiserfs_journal_list *tjl;
1858 	struct reiserfs_journal_list *flush_jl;
1859 	unsigned int trans_id;
1860 	struct reiserfs_journal *journal = SB_JOURNAL(s);
1861 
1862 	flush_jl = tjl = jl;
1863 
1864 	/* in data logging mode, try harder to flush a lot of blocks */
1865 	if (reiserfs_data_log(s))
1866 		limit = 1024;
1867 	/* flush for 256 transactions or limit blocks, whichever comes first */
1868 	for (i = 0; i < 256 && len < limit; i++) {
1869 		if (atomic_read(&tjl->j_commit_left) ||
1870 		    tjl->j_trans_id < jl->j_trans_id) {
1871 			break;
1872 		}
1873 		cur_len = atomic_read(&tjl->j_nonzerolen);
1874 		if (cur_len > 0) {
1875 			tjl->j_state &= ~LIST_TOUCHED;
1876 		}
1877 		len += cur_len;
1878 		flush_jl = tjl;
1879 		if (tjl->j_list.next == &journal->j_journal_list)
1880 			break;
1881 		tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1882 	}
1883 	/* try to find a group of blocks we can flush across all the
1884 	 ** transactions, but only bother if we've actually spanned
1885 	 ** across multiple lists
1886 	 */
1887 	if (flush_jl != jl) {
1888 		ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1889 	}
1890 	flush_journal_list(s, flush_jl, 1);
1891 	return 0;
1892 }
1893 
1894 /*
1895 ** removes any nodes in table with name block and dev as bh.
1896 ** only touchs the hnext and hprev pointers.
1897 */
1898 void remove_journal_hash(struct super_block *sb,
1899 			 struct reiserfs_journal_cnode **table,
1900 			 struct reiserfs_journal_list *jl,
1901 			 unsigned long block, int remove_freed)
1902 {
1903 	struct reiserfs_journal_cnode *cur;
1904 	struct reiserfs_journal_cnode **head;
1905 
1906 	head = &(journal_hash(table, sb, block));
1907 	if (!head) {
1908 		return;
1909 	}
1910 	cur = *head;
1911 	while (cur) {
1912 		if (cur->blocknr == block && cur->sb == sb
1913 		    && (jl == NULL || jl == cur->jlist)
1914 		    && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1915 			if (cur->hnext) {
1916 				cur->hnext->hprev = cur->hprev;
1917 			}
1918 			if (cur->hprev) {
1919 				cur->hprev->hnext = cur->hnext;
1920 			} else {
1921 				*head = cur->hnext;
1922 			}
1923 			cur->blocknr = 0;
1924 			cur->sb = NULL;
1925 			cur->state = 0;
1926 			if (cur->bh && cur->jlist)	/* anybody who clears the cur->bh will also dec the nonzerolen */
1927 				atomic_dec(&(cur->jlist->j_nonzerolen));
1928 			cur->bh = NULL;
1929 			cur->jlist = NULL;
1930 		}
1931 		cur = cur->hnext;
1932 	}
1933 }
1934 
1935 static void free_journal_ram(struct super_block *sb)
1936 {
1937 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
1938 	kfree(journal->j_current_jl);
1939 	journal->j_num_lists--;
1940 
1941 	vfree(journal->j_cnode_free_orig);
1942 	free_list_bitmaps(sb, journal->j_list_bitmap);
1943 	free_bitmap_nodes(sb);	/* must be after free_list_bitmaps */
1944 	if (journal->j_header_bh) {
1945 		brelse(journal->j_header_bh);
1946 	}
1947 	/* j_header_bh is on the journal dev, make sure not to release the journal
1948 	 * dev until we brelse j_header_bh
1949 	 */
1950 	release_journal_dev(sb, journal);
1951 	vfree(journal);
1952 }
1953 
1954 /*
1955 ** call on unmount.  Only set error to 1 if you haven't made your way out
1956 ** of read_super() yet.  Any other caller must keep error at 0.
1957 */
1958 static int do_journal_release(struct reiserfs_transaction_handle *th,
1959 			      struct super_block *sb, int error)
1960 {
1961 	struct reiserfs_transaction_handle myth;
1962 	int flushed = 0;
1963 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
1964 
1965 	/* we only want to flush out transactions if we were called with error == 0
1966 	 */
1967 	if (!error && !(sb->s_flags & MS_RDONLY)) {
1968 		/* end the current trans */
1969 		BUG_ON(!th->t_trans_id);
1970 		do_journal_end(th, sb, 10, FLUSH_ALL);
1971 
1972 		/* make sure something gets logged to force our way into the flush code */
1973 		if (!journal_join(&myth, sb, 1)) {
1974 			reiserfs_prepare_for_journal(sb,
1975 						     SB_BUFFER_WITH_SB(sb),
1976 						     1);
1977 			journal_mark_dirty(&myth, sb,
1978 					   SB_BUFFER_WITH_SB(sb));
1979 			do_journal_end(&myth, sb, 1, FLUSH_ALL);
1980 			flushed = 1;
1981 		}
1982 	}
1983 
1984 	/* this also catches errors during the do_journal_end above */
1985 	if (!error && reiserfs_is_journal_aborted(journal)) {
1986 		memset(&myth, 0, sizeof(myth));
1987 		if (!journal_join_abort(&myth, sb, 1)) {
1988 			reiserfs_prepare_for_journal(sb,
1989 						     SB_BUFFER_WITH_SB(sb),
1990 						     1);
1991 			journal_mark_dirty(&myth, sb,
1992 					   SB_BUFFER_WITH_SB(sb));
1993 			do_journal_end(&myth, sb, 1, FLUSH_ALL);
1994 		}
1995 	}
1996 
1997 	reiserfs_mounted_fs_count--;
1998 	/* wait for all commits to finish */
1999 	cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
2000 
2001 	/*
2002 	 * We must release the write lock here because
2003 	 * the workqueue job (flush_async_commit) needs this lock
2004 	 */
2005 	reiserfs_write_unlock(sb);
2006 	flush_workqueue(commit_wq);
2007 
2008 	if (!reiserfs_mounted_fs_count) {
2009 		destroy_workqueue(commit_wq);
2010 		commit_wq = NULL;
2011 	}
2012 
2013 	free_journal_ram(sb);
2014 
2015 	reiserfs_write_lock(sb);
2016 
2017 	return 0;
2018 }
2019 
2020 /*
2021 ** call on unmount.  flush all journal trans, release all alloc'd ram
2022 */
2023 int journal_release(struct reiserfs_transaction_handle *th,
2024 		    struct super_block *sb)
2025 {
2026 	return do_journal_release(th, sb, 0);
2027 }
2028 
2029 /*
2030 ** only call from an error condition inside reiserfs_read_super!
2031 */
2032 int journal_release_error(struct reiserfs_transaction_handle *th,
2033 			  struct super_block *sb)
2034 {
2035 	return do_journal_release(th, sb, 1);
2036 }
2037 
2038 /* compares description block with commit block.  returns 1 if they differ, 0 if they are the same */
2039 static int journal_compare_desc_commit(struct super_block *sb,
2040 				       struct reiserfs_journal_desc *desc,
2041 				       struct reiserfs_journal_commit *commit)
2042 {
2043 	if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
2044 	    get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
2045 	    get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max ||
2046 	    get_commit_trans_len(commit) <= 0) {
2047 		return 1;
2048 	}
2049 	return 0;
2050 }
2051 
2052 /* returns 0 if it did not find a description block
2053 ** returns -1 if it found a corrupt commit block
2054 ** returns 1 if both desc and commit were valid
2055 */
2056 static int journal_transaction_is_valid(struct super_block *sb,
2057 					struct buffer_head *d_bh,
2058 					unsigned int *oldest_invalid_trans_id,
2059 					unsigned long *newest_mount_id)
2060 {
2061 	struct reiserfs_journal_desc *desc;
2062 	struct reiserfs_journal_commit *commit;
2063 	struct buffer_head *c_bh;
2064 	unsigned long offset;
2065 
2066 	if (!d_bh)
2067 		return 0;
2068 
2069 	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2070 	if (get_desc_trans_len(desc) > 0
2071 	    && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2072 		if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2073 		    && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2074 			reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2075 				       "journal-986: transaction "
2076 				       "is valid returning because trans_id %d is greater than "
2077 				       "oldest_invalid %lu",
2078 				       get_desc_trans_id(desc),
2079 				       *oldest_invalid_trans_id);
2080 			return 0;
2081 		}
2082 		if (newest_mount_id
2083 		    && *newest_mount_id > get_desc_mount_id(desc)) {
2084 			reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2085 				       "journal-1087: transaction "
2086 				       "is valid returning because mount_id %d is less than "
2087 				       "newest_mount_id %lu",
2088 				       get_desc_mount_id(desc),
2089 				       *newest_mount_id);
2090 			return -1;
2091 		}
2092 		if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) {
2093 			reiserfs_warning(sb, "journal-2018",
2094 					 "Bad transaction length %d "
2095 					 "encountered, ignoring transaction",
2096 					 get_desc_trans_len(desc));
2097 			return -1;
2098 		}
2099 		offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2100 
2101 		/* ok, we have a journal description block, lets see if the transaction was valid */
2102 		c_bh =
2103 		    journal_bread(sb,
2104 				  SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2105 				  ((offset + get_desc_trans_len(desc) +
2106 				    1) % SB_ONDISK_JOURNAL_SIZE(sb)));
2107 		if (!c_bh)
2108 			return 0;
2109 		commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2110 		if (journal_compare_desc_commit(sb, desc, commit)) {
2111 			reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2112 				       "journal_transaction_is_valid, commit offset %ld had bad "
2113 				       "time %d or length %d",
2114 				       c_bh->b_blocknr -
2115 				       SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2116 				       get_commit_trans_id(commit),
2117 				       get_commit_trans_len(commit));
2118 			brelse(c_bh);
2119 			if (oldest_invalid_trans_id) {
2120 				*oldest_invalid_trans_id =
2121 				    get_desc_trans_id(desc);
2122 				reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2123 					       "journal-1004: "
2124 					       "transaction_is_valid setting oldest invalid trans_id "
2125 					       "to %d",
2126 					       get_desc_trans_id(desc));
2127 			}
2128 			return -1;
2129 		}
2130 		brelse(c_bh);
2131 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2132 			       "journal-1006: found valid "
2133 			       "transaction start offset %llu, len %d id %d",
2134 			       d_bh->b_blocknr -
2135 			       SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2136 			       get_desc_trans_len(desc),
2137 			       get_desc_trans_id(desc));
2138 		return 1;
2139 	} else {
2140 		return 0;
2141 	}
2142 }
2143 
2144 static void brelse_array(struct buffer_head **heads, int num)
2145 {
2146 	int i;
2147 	for (i = 0; i < num; i++) {
2148 		brelse(heads[i]);
2149 	}
2150 }
2151 
2152 /*
2153 ** given the start, and values for the oldest acceptable transactions,
2154 ** this either reads in a replays a transaction, or returns because the transaction
2155 ** is invalid, or too old.
2156 */
2157 static int journal_read_transaction(struct super_block *sb,
2158 				    unsigned long cur_dblock,
2159 				    unsigned long oldest_start,
2160 				    unsigned int oldest_trans_id,
2161 				    unsigned long newest_mount_id)
2162 {
2163 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
2164 	struct reiserfs_journal_desc *desc;
2165 	struct reiserfs_journal_commit *commit;
2166 	unsigned int trans_id = 0;
2167 	struct buffer_head *c_bh;
2168 	struct buffer_head *d_bh;
2169 	struct buffer_head **log_blocks = NULL;
2170 	struct buffer_head **real_blocks = NULL;
2171 	unsigned int trans_offset;
2172 	int i;
2173 	int trans_half;
2174 
2175 	d_bh = journal_bread(sb, cur_dblock);
2176 	if (!d_bh)
2177 		return 1;
2178 	desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2179 	trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2180 	reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: "
2181 		       "journal_read_transaction, offset %llu, len %d mount_id %d",
2182 		       d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2183 		       get_desc_trans_len(desc), get_desc_mount_id(desc));
2184 	if (get_desc_trans_id(desc) < oldest_trans_id) {
2185 		reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: "
2186 			       "journal_read_trans skipping because %lu is too old",
2187 			       cur_dblock -
2188 			       SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2189 		brelse(d_bh);
2190 		return 1;
2191 	}
2192 	if (get_desc_mount_id(desc) != newest_mount_id) {
2193 		reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: "
2194 			       "journal_read_trans skipping because %d is != "
2195 			       "newest_mount_id %lu", get_desc_mount_id(desc),
2196 			       newest_mount_id);
2197 		brelse(d_bh);
2198 		return 1;
2199 	}
2200 	c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2201 			     ((trans_offset + get_desc_trans_len(desc) + 1) %
2202 			      SB_ONDISK_JOURNAL_SIZE(sb)));
2203 	if (!c_bh) {
2204 		brelse(d_bh);
2205 		return 1;
2206 	}
2207 	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2208 	if (journal_compare_desc_commit(sb, desc, commit)) {
2209 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2210 			       "journal_read_transaction, "
2211 			       "commit offset %llu had bad time %d or length %d",
2212 			       c_bh->b_blocknr -
2213 			       SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2214 			       get_commit_trans_id(commit),
2215 			       get_commit_trans_len(commit));
2216 		brelse(c_bh);
2217 		brelse(d_bh);
2218 		return 1;
2219 	}
2220 	trans_id = get_desc_trans_id(desc);
2221 	/* now we know we've got a good transaction, and it was inside the valid time ranges */
2222 	log_blocks = kmalloc(get_desc_trans_len(desc) *
2223 			     sizeof(struct buffer_head *), GFP_NOFS);
2224 	real_blocks = kmalloc(get_desc_trans_len(desc) *
2225 			      sizeof(struct buffer_head *), GFP_NOFS);
2226 	if (!log_blocks || !real_blocks) {
2227 		brelse(c_bh);
2228 		brelse(d_bh);
2229 		kfree(log_blocks);
2230 		kfree(real_blocks);
2231 		reiserfs_warning(sb, "journal-1169",
2232 				 "kmalloc failed, unable to mount FS");
2233 		return -1;
2234 	}
2235 	/* get all the buffer heads */
2236 	trans_half = journal_trans_half(sb->s_blocksize);
2237 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2238 		log_blocks[i] =
2239 		    journal_getblk(sb,
2240 				   SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2241 				   (trans_offset + 1 +
2242 				    i) % SB_ONDISK_JOURNAL_SIZE(sb));
2243 		if (i < trans_half) {
2244 			real_blocks[i] =
2245 			    sb_getblk(sb,
2246 				      le32_to_cpu(desc->j_realblock[i]));
2247 		} else {
2248 			real_blocks[i] =
2249 			    sb_getblk(sb,
2250 				      le32_to_cpu(commit->
2251 						  j_realblock[i - trans_half]));
2252 		}
2253 		if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) {
2254 			reiserfs_warning(sb, "journal-1207",
2255 					 "REPLAY FAILURE fsck required! "
2256 					 "Block to replay is outside of "
2257 					 "filesystem");
2258 			goto abort_replay;
2259 		}
2260 		/* make sure we don't try to replay onto log or reserved area */
2261 		if (is_block_in_log_or_reserved_area
2262 		    (sb, real_blocks[i]->b_blocknr)) {
2263 			reiserfs_warning(sb, "journal-1204",
2264 					 "REPLAY FAILURE fsck required! "
2265 					 "Trying to replay onto a log block");
2266 		      abort_replay:
2267 			brelse_array(log_blocks, i);
2268 			brelse_array(real_blocks, i);
2269 			brelse(c_bh);
2270 			brelse(d_bh);
2271 			kfree(log_blocks);
2272 			kfree(real_blocks);
2273 			return -1;
2274 		}
2275 	}
2276 	/* read in the log blocks, memcpy to the corresponding real block */
2277 	ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2278 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2279 
2280 		reiserfs_write_unlock(sb);
2281 		wait_on_buffer(log_blocks[i]);
2282 		reiserfs_write_lock(sb);
2283 
2284 		if (!buffer_uptodate(log_blocks[i])) {
2285 			reiserfs_warning(sb, "journal-1212",
2286 					 "REPLAY FAILURE fsck required! "
2287 					 "buffer write failed");
2288 			brelse_array(log_blocks + i,
2289 				     get_desc_trans_len(desc) - i);
2290 			brelse_array(real_blocks, get_desc_trans_len(desc));
2291 			brelse(c_bh);
2292 			brelse(d_bh);
2293 			kfree(log_blocks);
2294 			kfree(real_blocks);
2295 			return -1;
2296 		}
2297 		memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2298 		       real_blocks[i]->b_size);
2299 		set_buffer_uptodate(real_blocks[i]);
2300 		brelse(log_blocks[i]);
2301 	}
2302 	/* flush out the real blocks */
2303 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2304 		set_buffer_dirty(real_blocks[i]);
2305 		ll_rw_block(SWRITE, 1, real_blocks + i);
2306 	}
2307 	for (i = 0; i < get_desc_trans_len(desc); i++) {
2308 		wait_on_buffer(real_blocks[i]);
2309 		if (!buffer_uptodate(real_blocks[i])) {
2310 			reiserfs_warning(sb, "journal-1226",
2311 					 "REPLAY FAILURE, fsck required! "
2312 					 "buffer write failed");
2313 			brelse_array(real_blocks + i,
2314 				     get_desc_trans_len(desc) - i);
2315 			brelse(c_bh);
2316 			brelse(d_bh);
2317 			kfree(log_blocks);
2318 			kfree(real_blocks);
2319 			return -1;
2320 		}
2321 		brelse(real_blocks[i]);
2322 	}
2323 	cur_dblock =
2324 	    SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2325 	    ((trans_offset + get_desc_trans_len(desc) +
2326 	      2) % SB_ONDISK_JOURNAL_SIZE(sb));
2327 	reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2328 		       "journal-1095: setting journal " "start to offset %ld",
2329 		       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2330 
2331 	/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2332 	journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2333 	journal->j_last_flush_trans_id = trans_id;
2334 	journal->j_trans_id = trans_id + 1;
2335 	/* check for trans_id overflow */
2336 	if (journal->j_trans_id == 0)
2337 		journal->j_trans_id = 10;
2338 	brelse(c_bh);
2339 	brelse(d_bh);
2340 	kfree(log_blocks);
2341 	kfree(real_blocks);
2342 	return 0;
2343 }
2344 
2345 /* This function reads blocks starting from block and to max_block of bufsize
2346    size (but no more than BUFNR blocks at a time). This proved to improve
2347    mounting speed on self-rebuilding raid5 arrays at least.
2348    Right now it is only used from journal code. But later we might use it
2349    from other places.
2350    Note: Do not use journal_getblk/sb_getblk functions here! */
2351 static struct buffer_head *reiserfs_breada(struct block_device *dev,
2352 					   b_blocknr_t block, int bufsize,
2353 					   b_blocknr_t max_block)
2354 {
2355 	struct buffer_head *bhlist[BUFNR];
2356 	unsigned int blocks = BUFNR;
2357 	struct buffer_head *bh;
2358 	int i, j;
2359 
2360 	bh = __getblk(dev, block, bufsize);
2361 	if (buffer_uptodate(bh))
2362 		return (bh);
2363 
2364 	if (block + BUFNR > max_block) {
2365 		blocks = max_block - block;
2366 	}
2367 	bhlist[0] = bh;
2368 	j = 1;
2369 	for (i = 1; i < blocks; i++) {
2370 		bh = __getblk(dev, block + i, bufsize);
2371 		if (buffer_uptodate(bh)) {
2372 			brelse(bh);
2373 			break;
2374 		} else
2375 			bhlist[j++] = bh;
2376 	}
2377 	ll_rw_block(READ, j, bhlist);
2378 	for (i = 1; i < j; i++)
2379 		brelse(bhlist[i]);
2380 	bh = bhlist[0];
2381 	wait_on_buffer(bh);
2382 	if (buffer_uptodate(bh))
2383 		return bh;
2384 	brelse(bh);
2385 	return NULL;
2386 }
2387 
2388 /*
2389 ** read and replay the log
2390 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2391 ** transaction.  This tests that before finding all the transactions in the log, which makes normal mount times fast.
2392 **
2393 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2394 **
2395 ** On exit, it sets things up so the first transaction will work correctly.
2396 */
2397 static int journal_read(struct super_block *sb)
2398 {
2399 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
2400 	struct reiserfs_journal_desc *desc;
2401 	unsigned int oldest_trans_id = 0;
2402 	unsigned int oldest_invalid_trans_id = 0;
2403 	time_t start;
2404 	unsigned long oldest_start = 0;
2405 	unsigned long cur_dblock = 0;
2406 	unsigned long newest_mount_id = 9;
2407 	struct buffer_head *d_bh;
2408 	struct reiserfs_journal_header *jh;
2409 	int valid_journal_header = 0;
2410 	int replay_count = 0;
2411 	int continue_replay = 1;
2412 	int ret;
2413 	char b[BDEVNAME_SIZE];
2414 
2415 	cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2416 	reiserfs_info(sb, "checking transaction log (%s)\n",
2417 		      bdevname(journal->j_dev_bd, b));
2418 	start = get_seconds();
2419 
2420 	/* step 1, read in the journal header block.  Check the transaction it says
2421 	 ** is the first unflushed, and if that transaction is not valid,
2422 	 ** replay is done
2423 	 */
2424 	journal->j_header_bh = journal_bread(sb,
2425 					     SB_ONDISK_JOURNAL_1st_BLOCK(sb)
2426 					     + SB_ONDISK_JOURNAL_SIZE(sb));
2427 	if (!journal->j_header_bh) {
2428 		return 1;
2429 	}
2430 	jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2431 	if (le32_to_cpu(jh->j_first_unflushed_offset) <
2432 	    SB_ONDISK_JOURNAL_SIZE(sb)
2433 	    && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2434 		oldest_start =
2435 		    SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2436 		    le32_to_cpu(jh->j_first_unflushed_offset);
2437 		oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2438 		newest_mount_id = le32_to_cpu(jh->j_mount_id);
2439 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2440 			       "journal-1153: found in "
2441 			       "header: first_unflushed_offset %d, last_flushed_trans_id "
2442 			       "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2443 			       le32_to_cpu(jh->j_last_flush_trans_id));
2444 		valid_journal_header = 1;
2445 
2446 		/* now, we try to read the first unflushed offset.  If it is not valid,
2447 		 ** there is nothing more we can do, and it makes no sense to read
2448 		 ** through the whole log.
2449 		 */
2450 		d_bh =
2451 		    journal_bread(sb,
2452 				  SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2453 				  le32_to_cpu(jh->j_first_unflushed_offset));
2454 		ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL);
2455 		if (!ret) {
2456 			continue_replay = 0;
2457 		}
2458 		brelse(d_bh);
2459 		goto start_log_replay;
2460 	}
2461 
2462 	if (continue_replay && bdev_read_only(sb->s_bdev)) {
2463 		reiserfs_warning(sb, "clm-2076",
2464 				 "device is readonly, unable to replay log");
2465 		return -1;
2466 	}
2467 
2468 	/* ok, there are transactions that need to be replayed.  start with the first log block, find
2469 	 ** all the valid transactions, and pick out the oldest.
2470 	 */
2471 	while (continue_replay
2472 	       && cur_dblock <
2473 	       (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2474 		SB_ONDISK_JOURNAL_SIZE(sb))) {
2475 		/* Note that it is required for blocksize of primary fs device and journal
2476 		   device to be the same */
2477 		d_bh =
2478 		    reiserfs_breada(journal->j_dev_bd, cur_dblock,
2479 				    sb->s_blocksize,
2480 				    SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2481 				    SB_ONDISK_JOURNAL_SIZE(sb));
2482 		ret =
2483 		    journal_transaction_is_valid(sb, d_bh,
2484 						 &oldest_invalid_trans_id,
2485 						 &newest_mount_id);
2486 		if (ret == 1) {
2487 			desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2488 			if (oldest_start == 0) {	/* init all oldest_ values */
2489 				oldest_trans_id = get_desc_trans_id(desc);
2490 				oldest_start = d_bh->b_blocknr;
2491 				newest_mount_id = get_desc_mount_id(desc);
2492 				reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2493 					       "journal-1179: Setting "
2494 					       "oldest_start to offset %llu, trans_id %lu",
2495 					       oldest_start -
2496 					       SB_ONDISK_JOURNAL_1st_BLOCK
2497 					       (sb), oldest_trans_id);
2498 			} else if (oldest_trans_id > get_desc_trans_id(desc)) {
2499 				/* one we just read was older */
2500 				oldest_trans_id = get_desc_trans_id(desc);
2501 				oldest_start = d_bh->b_blocknr;
2502 				reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2503 					       "journal-1180: Resetting "
2504 					       "oldest_start to offset %lu, trans_id %lu",
2505 					       oldest_start -
2506 					       SB_ONDISK_JOURNAL_1st_BLOCK
2507 					       (sb), oldest_trans_id);
2508 			}
2509 			if (newest_mount_id < get_desc_mount_id(desc)) {
2510 				newest_mount_id = get_desc_mount_id(desc);
2511 				reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2512 					       "journal-1299: Setting "
2513 					       "newest_mount_id to %d",
2514 					       get_desc_mount_id(desc));
2515 			}
2516 			cur_dblock += get_desc_trans_len(desc) + 2;
2517 		} else {
2518 			cur_dblock++;
2519 		}
2520 		brelse(d_bh);
2521 	}
2522 
2523       start_log_replay:
2524 	cur_dblock = oldest_start;
2525 	if (oldest_trans_id) {
2526 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2527 			       "journal-1206: Starting replay "
2528 			       "from offset %llu, trans_id %lu",
2529 			       cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2530 			       oldest_trans_id);
2531 
2532 	}
2533 	replay_count = 0;
2534 	while (continue_replay && oldest_trans_id > 0) {
2535 		ret =
2536 		    journal_read_transaction(sb, cur_dblock, oldest_start,
2537 					     oldest_trans_id, newest_mount_id);
2538 		if (ret < 0) {
2539 			return ret;
2540 		} else if (ret != 0) {
2541 			break;
2542 		}
2543 		cur_dblock =
2544 		    SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start;
2545 		replay_count++;
2546 		if (cur_dblock == oldest_start)
2547 			break;
2548 	}
2549 
2550 	if (oldest_trans_id == 0) {
2551 		reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2552 			       "journal-1225: No valid " "transactions found");
2553 	}
2554 	/* j_start does not get set correctly if we don't replay any transactions.
2555 	 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2556 	 ** copy the trans_id from the header
2557 	 */
2558 	if (valid_journal_header && replay_count == 0) {
2559 		journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2560 		journal->j_trans_id =
2561 		    le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2562 		/* check for trans_id overflow */
2563 		if (journal->j_trans_id == 0)
2564 			journal->j_trans_id = 10;
2565 		journal->j_last_flush_trans_id =
2566 		    le32_to_cpu(jh->j_last_flush_trans_id);
2567 		journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2568 	} else {
2569 		journal->j_mount_id = newest_mount_id + 1;
2570 	}
2571 	reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2572 		       "newest_mount_id to %lu", journal->j_mount_id);
2573 	journal->j_first_unflushed_offset = journal->j_start;
2574 	if (replay_count > 0) {
2575 		reiserfs_info(sb,
2576 			      "replayed %d transactions in %lu seconds\n",
2577 			      replay_count, get_seconds() - start);
2578 	}
2579 	if (!bdev_read_only(sb->s_bdev) &&
2580 	    _update_journal_header_block(sb, journal->j_start,
2581 					 journal->j_last_flush_trans_id)) {
2582 		/* replay failed, caller must call free_journal_ram and abort
2583 		 ** the mount
2584 		 */
2585 		return -1;
2586 	}
2587 	return 0;
2588 }
2589 
2590 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2591 {
2592 	struct reiserfs_journal_list *jl;
2593 	jl = kzalloc(sizeof(struct reiserfs_journal_list),
2594 		     GFP_NOFS | __GFP_NOFAIL);
2595 	INIT_LIST_HEAD(&jl->j_list);
2596 	INIT_LIST_HEAD(&jl->j_working_list);
2597 	INIT_LIST_HEAD(&jl->j_tail_bh_list);
2598 	INIT_LIST_HEAD(&jl->j_bh_list);
2599 	mutex_init(&jl->j_commit_mutex);
2600 	SB_JOURNAL(s)->j_num_lists++;
2601 	get_journal_list(jl);
2602 	return jl;
2603 }
2604 
2605 static void journal_list_init(struct super_block *sb)
2606 {
2607 	SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
2608 }
2609 
2610 static int release_journal_dev(struct super_block *super,
2611 			       struct reiserfs_journal *journal)
2612 {
2613 	int result;
2614 
2615 	result = 0;
2616 
2617 	if (journal->j_dev_bd != NULL) {
2618 		if (journal->j_dev_bd->bd_dev != super->s_dev)
2619 			bd_release(journal->j_dev_bd);
2620 		result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
2621 		journal->j_dev_bd = NULL;
2622 	}
2623 
2624 	if (result != 0) {
2625 		reiserfs_warning(super, "sh-457",
2626 				 "Cannot release journal device: %i", result);
2627 	}
2628 	return result;
2629 }
2630 
2631 static int journal_init_dev(struct super_block *super,
2632 			    struct reiserfs_journal *journal,
2633 			    const char *jdev_name)
2634 {
2635 	int result;
2636 	dev_t jdev;
2637 	fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
2638 	char b[BDEVNAME_SIZE];
2639 
2640 	result = 0;
2641 
2642 	journal->j_dev_bd = NULL;
2643 	jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2644 	    new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2645 
2646 	if (bdev_read_only(super->s_bdev))
2647 		blkdev_mode = FMODE_READ;
2648 
2649 	/* there is no "jdev" option and journal is on separate device */
2650 	if ((!jdev_name || !jdev_name[0])) {
2651 		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2652 		journal->j_dev_mode = blkdev_mode;
2653 		if (IS_ERR(journal->j_dev_bd)) {
2654 			result = PTR_ERR(journal->j_dev_bd);
2655 			journal->j_dev_bd = NULL;
2656 			reiserfs_warning(super, "sh-458",
2657 					 "cannot init journal device '%s': %i",
2658 					 __bdevname(jdev, b), result);
2659 			return result;
2660 		} else if (jdev != super->s_dev) {
2661 			result = bd_claim(journal->j_dev_bd, journal);
2662 			if (result) {
2663 				blkdev_put(journal->j_dev_bd, blkdev_mode);
2664 				return result;
2665 			}
2666 
2667 			set_blocksize(journal->j_dev_bd, super->s_blocksize);
2668 		}
2669 
2670 		return 0;
2671 	}
2672 
2673 	journal->j_dev_mode = blkdev_mode;
2674 	journal->j_dev_bd = open_bdev_exclusive(jdev_name,
2675 						blkdev_mode, journal);
2676 	if (IS_ERR(journal->j_dev_bd)) {
2677 		result = PTR_ERR(journal->j_dev_bd);
2678 		journal->j_dev_bd = NULL;
2679 		reiserfs_warning(super,
2680 				 "journal_init_dev: Cannot open '%s': %i",
2681 				 jdev_name, result);
2682 		return result;
2683 	}
2684 
2685 	set_blocksize(journal->j_dev_bd, super->s_blocksize);
2686 	reiserfs_info(super,
2687 		      "journal_init_dev: journal device: %s\n",
2688 		      bdevname(journal->j_dev_bd, b));
2689 	return 0;
2690 }
2691 
2692 /**
2693  * When creating/tuning a file system user can assign some
2694  * journal params within boundaries which depend on the ratio
2695  * blocksize/standard_blocksize.
2696  *
2697  * For blocks >= standard_blocksize transaction size should
2698  * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2699  * then JOURNAL_TRANS_MAX_DEFAULT.
2700  *
2701  * For blocks < standard_blocksize these boundaries should be
2702  * decreased proportionally.
2703  */
2704 #define REISERFS_STANDARD_BLKSIZE (4096)
2705 
2706 static int check_advise_trans_params(struct super_block *sb,
2707 				     struct reiserfs_journal *journal)
2708 {
2709         if (journal->j_trans_max) {
2710 	        /* Non-default journal params.
2711 		   Do sanity check for them. */
2712 	        int ratio = 1;
2713 		if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2714 		        ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
2715 
2716 		if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2717 		    journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2718 		    SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max <
2719 		    JOURNAL_MIN_RATIO) {
2720 			reiserfs_warning(sb, "sh-462",
2721 					 "bad transaction max size (%u). "
2722 					 "FSCK?", journal->j_trans_max);
2723 			return 1;
2724 		}
2725 		if (journal->j_max_batch != (journal->j_trans_max) *
2726 		        JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
2727 			reiserfs_warning(sb, "sh-463",
2728 					 "bad transaction max batch (%u). "
2729 					 "FSCK?", journal->j_max_batch);
2730 			return 1;
2731 		}
2732 	} else {
2733 		/* Default journal params.
2734                    The file system was created by old version
2735 		   of mkreiserfs, so some fields contain zeros,
2736 		   and we need to advise proper values for them */
2737 		if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
2738 			reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
2739 					 sb->s_blocksize);
2740 			return 1;
2741 		}
2742 		journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2743 		journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2744 		journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2745 	}
2746 	return 0;
2747 }
2748 
2749 /*
2750 ** must be called once on fs mount.  calls journal_read for you
2751 */
2752 int journal_init(struct super_block *sb, const char *j_dev_name,
2753 		 int old_format, unsigned int commit_max_age)
2754 {
2755 	int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2;
2756 	struct buffer_head *bhjh;
2757 	struct reiserfs_super_block *rs;
2758 	struct reiserfs_journal_header *jh;
2759 	struct reiserfs_journal *journal;
2760 	struct reiserfs_journal_list *jl;
2761 	char b[BDEVNAME_SIZE];
2762 	int ret;
2763 
2764 	/*
2765 	 * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS
2766 	 * dependency inversion warnings.
2767 	 */
2768 	reiserfs_write_unlock(sb);
2769 	journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal));
2770 	if (!journal) {
2771 		reiserfs_warning(sb, "journal-1256",
2772 				 "unable to get memory for journal structure");
2773 		reiserfs_write_lock(sb);
2774 		return 1;
2775 	}
2776 	memset(journal, 0, sizeof(struct reiserfs_journal));
2777 	INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2778 	INIT_LIST_HEAD(&journal->j_prealloc_list);
2779 	INIT_LIST_HEAD(&journal->j_working_list);
2780 	INIT_LIST_HEAD(&journal->j_journal_list);
2781 	journal->j_persistent_trans = 0;
2782 	ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
2783 					   reiserfs_bmap_count(sb));
2784 	reiserfs_write_lock(sb);
2785 	if (ret)
2786 		goto free_and_return;
2787 
2788 	allocate_bitmap_nodes(sb);
2789 
2790 	/* reserved for journal area support */
2791 	SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ?
2792 						 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2793 						 / sb->s_blocksize +
2794 						 reiserfs_bmap_count(sb) +
2795 						 1 :
2796 						 REISERFS_DISK_OFFSET_IN_BYTES /
2797 						 sb->s_blocksize + 2);
2798 
2799 	/* Sanity check to see is the standard journal fitting withing first bitmap
2800 	   (actual for small blocksizes) */
2801 	if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2802 	    (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
2803 	     SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
2804 		reiserfs_warning(sb, "journal-1393",
2805 				 "journal does not fit for area addressed "
2806 				 "by first of bitmap blocks. It starts at "
2807 				 "%u and its size is %u. Block size %ld",
2808 				 SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2809 				 SB_ONDISK_JOURNAL_SIZE(sb),
2810 				 sb->s_blocksize);
2811 		goto free_and_return;
2812 	}
2813 
2814 	/*
2815 	 * We need to unlock here to avoid creating the following
2816 	 * dependency:
2817 	 * reiserfs_lock -> sysfs_mutex
2818 	 * Because the reiserfs mmap path creates the following dependency:
2819 	 * mm->mmap -> reiserfs_lock, hence we have
2820 	 * mm->mmap -> reiserfs_lock ->sysfs_mutex
2821 	 * This would ends up in a circular dependency with sysfs readdir path
2822 	 * which does sysfs_mutex -> mm->mmap_sem
2823 	 * This is fine because the reiserfs lock is useless in mount path,
2824 	 * at least until we call journal_begin. We keep it for paranoid
2825 	 * reasons.
2826 	 */
2827 	reiserfs_write_unlock(sb);
2828 	if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2829 		reiserfs_write_lock(sb);
2830 		reiserfs_warning(sb, "sh-462",
2831 				 "unable to initialize jornal device");
2832 		goto free_and_return;
2833 	}
2834 	reiserfs_write_lock(sb);
2835 
2836 	rs = SB_DISK_SUPER_BLOCK(sb);
2837 
2838 	/* read journal header */
2839 	bhjh = journal_bread(sb,
2840 			     SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2841 			     SB_ONDISK_JOURNAL_SIZE(sb));
2842 	if (!bhjh) {
2843 		reiserfs_warning(sb, "sh-459",
2844 				 "unable to read journal header");
2845 		goto free_and_return;
2846 	}
2847 	jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2848 
2849 	/* make sure that journal matches to the super block */
2850 	if (is_reiserfs_jr(rs)
2851 	    && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2852 		sb_jp_journal_magic(rs))) {
2853 		reiserfs_warning(sb, "sh-460",
2854 				 "journal header magic %x (device %s) does "
2855 				 "not match to magic found in super block %x",
2856 				 jh->jh_journal.jp_journal_magic,
2857 				 bdevname(journal->j_dev_bd, b),
2858 				 sb_jp_journal_magic(rs));
2859 		brelse(bhjh);
2860 		goto free_and_return;
2861 	}
2862 
2863 	journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2864 	journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2865 	journal->j_max_commit_age =
2866 	    le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2867 	journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2868 
2869 	if (check_advise_trans_params(sb, journal) != 0)
2870 	        goto free_and_return;
2871 	journal->j_default_max_commit_age = journal->j_max_commit_age;
2872 
2873 	if (commit_max_age != 0) {
2874 		journal->j_max_commit_age = commit_max_age;
2875 		journal->j_max_trans_age = commit_max_age;
2876 	}
2877 
2878 	reiserfs_info(sb, "journal params: device %s, size %u, "
2879 		      "journal first block %u, max trans len %u, max batch %u, "
2880 		      "max commit age %u, max trans age %u\n",
2881 		      bdevname(journal->j_dev_bd, b),
2882 		      SB_ONDISK_JOURNAL_SIZE(sb),
2883 		      SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2884 		      journal->j_trans_max,
2885 		      journal->j_max_batch,
2886 		      journal->j_max_commit_age, journal->j_max_trans_age);
2887 
2888 	brelse(bhjh);
2889 
2890 	journal->j_list_bitmap_index = 0;
2891 	journal_list_init(sb);
2892 
2893 	memset(journal->j_list_hash_table, 0,
2894 	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2895 
2896 	INIT_LIST_HEAD(&journal->j_dirty_buffers);
2897 	spin_lock_init(&journal->j_dirty_buffers_lock);
2898 
2899 	journal->j_start = 0;
2900 	journal->j_len = 0;
2901 	journal->j_len_alloc = 0;
2902 	atomic_set(&(journal->j_wcount), 0);
2903 	atomic_set(&(journal->j_async_throttle), 0);
2904 	journal->j_bcount = 0;
2905 	journal->j_trans_start_time = 0;
2906 	journal->j_last = NULL;
2907 	journal->j_first = NULL;
2908 	init_waitqueue_head(&(journal->j_join_wait));
2909 	mutex_init(&journal->j_mutex);
2910 	mutex_init(&journal->j_flush_mutex);
2911 
2912 	journal->j_trans_id = 10;
2913 	journal->j_mount_id = 10;
2914 	journal->j_state = 0;
2915 	atomic_set(&(journal->j_jlock), 0);
2916 	journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2917 	journal->j_cnode_free_orig = journal->j_cnode_free_list;
2918 	journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2919 	journal->j_cnode_used = 0;
2920 	journal->j_must_wait = 0;
2921 
2922 	if (journal->j_cnode_free == 0) {
2923 		reiserfs_warning(sb, "journal-2004", "Journal cnode memory "
2924 		                 "allocation failed (%ld bytes). Journal is "
2925 		                 "too large for available memory. Usually "
2926 		                 "this is due to a journal that is too large.",
2927 		                 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2928         	goto free_and_return;
2929 	}
2930 
2931 	init_journal_hash(sb);
2932 	jl = journal->j_current_jl;
2933 	jl->j_list_bitmap = get_list_bitmap(sb, jl);
2934 	if (!jl->j_list_bitmap) {
2935 		reiserfs_warning(sb, "journal-2005",
2936 				 "get_list_bitmap failed for journal list 0");
2937 		goto free_and_return;
2938 	}
2939 	if (journal_read(sb) < 0) {
2940 		reiserfs_warning(sb, "reiserfs-2006",
2941 				 "Replay Failure, unable to mount");
2942 		goto free_and_return;
2943 	}
2944 
2945 	reiserfs_mounted_fs_count++;
2946 	if (reiserfs_mounted_fs_count <= 1) {
2947 		reiserfs_write_unlock(sb);
2948 		commit_wq = create_workqueue("reiserfs");
2949 		reiserfs_write_lock(sb);
2950 	}
2951 
2952 	INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2953 	journal->j_work_sb = sb;
2954 	return 0;
2955       free_and_return:
2956 	free_journal_ram(sb);
2957 	return 1;
2958 }
2959 
2960 /*
2961 ** test for a polite end of the current transaction.  Used by file_write, and should
2962 ** be used by delete to make sure they don't write more than can fit inside a single
2963 ** transaction
2964 */
2965 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2966 				   int new_alloc)
2967 {
2968 	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2969 	time_t now = get_seconds();
2970 	/* cannot restart while nested */
2971 	BUG_ON(!th->t_trans_id);
2972 	if (th->t_refcount > 1)
2973 		return 0;
2974 	if (journal->j_must_wait > 0 ||
2975 	    (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2976 	    atomic_read(&(journal->j_jlock)) ||
2977 	    (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2978 	    journal->j_cnode_free < (journal->j_trans_max * 3)) {
2979 		return 1;
2980 	}
2981 	/* protected by the BKL here */
2982 	journal->j_len_alloc += new_alloc;
2983 	th->t_blocks_allocated += new_alloc ;
2984 	return 0;
2985 }
2986 
2987 /* this must be called inside a transaction, and requires the
2988 ** kernel_lock to be held
2989 */
2990 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2991 {
2992 	struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2993 	BUG_ON(!th->t_trans_id);
2994 	journal->j_must_wait = 1;
2995 	set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2996 	return;
2997 }
2998 
2999 /* this must be called without a transaction started, and does not
3000 ** require BKL
3001 */
3002 void reiserfs_allow_writes(struct super_block *s)
3003 {
3004 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3005 	clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
3006 	wake_up(&journal->j_join_wait);
3007 }
3008 
3009 /* this must be called without a transaction started, and does not
3010 ** require BKL
3011 */
3012 void reiserfs_wait_on_write_block(struct super_block *s)
3013 {
3014 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3015 	wait_event(journal->j_join_wait,
3016 		   !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
3017 }
3018 
3019 static void queue_log_writer(struct super_block *s)
3020 {
3021 	wait_queue_t wait;
3022 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3023 	set_bit(J_WRITERS_QUEUED, &journal->j_state);
3024 
3025 	/*
3026 	 * we don't want to use wait_event here because
3027 	 * we only want to wait once.
3028 	 */
3029 	init_waitqueue_entry(&wait, current);
3030 	add_wait_queue(&journal->j_join_wait, &wait);
3031 	set_current_state(TASK_UNINTERRUPTIBLE);
3032 	if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
3033 		reiserfs_write_unlock(s);
3034 		schedule();
3035 		reiserfs_write_lock(s);
3036 	}
3037 	__set_current_state(TASK_RUNNING);
3038 	remove_wait_queue(&journal->j_join_wait, &wait);
3039 }
3040 
3041 static void wake_queued_writers(struct super_block *s)
3042 {
3043 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3044 	if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
3045 		wake_up(&journal->j_join_wait);
3046 }
3047 
3048 static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
3049 {
3050 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3051 	unsigned long bcount = journal->j_bcount;
3052 	while (1) {
3053 		reiserfs_write_unlock(sb);
3054 		schedule_timeout_uninterruptible(1);
3055 		reiserfs_write_lock(sb);
3056 		journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
3057 		while ((atomic_read(&journal->j_wcount) > 0 ||
3058 			atomic_read(&journal->j_jlock)) &&
3059 		       journal->j_trans_id == trans_id) {
3060 			queue_log_writer(sb);
3061 		}
3062 		if (journal->j_trans_id != trans_id)
3063 			break;
3064 		if (bcount == journal->j_bcount)
3065 			break;
3066 		bcount = journal->j_bcount;
3067 	}
3068 }
3069 
3070 /* join == true if you must join an existing transaction.
3071 ** join == false if you can deal with waiting for others to finish
3072 **
3073 ** this will block until the transaction is joinable.  send the number of blocks you
3074 ** expect to use in nblocks.
3075 */
3076 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3077 			      struct super_block *sb, unsigned long nblocks,
3078 			      int join)
3079 {
3080 	time_t now = get_seconds();
3081 	unsigned int old_trans_id;
3082 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3083 	struct reiserfs_transaction_handle myth;
3084 	int sched_count = 0;
3085 	int retval;
3086 
3087 	reiserfs_check_lock_depth(sb, "journal_begin");
3088 	BUG_ON(nblocks > journal->j_trans_max);
3089 
3090 	PROC_INFO_INC(sb, journal.journal_being);
3091 	/* set here for journal_join */
3092 	th->t_refcount = 1;
3093 	th->t_super = sb;
3094 
3095       relock:
3096 	lock_journal(sb);
3097 	if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3098 		unlock_journal(sb);
3099 		retval = journal->j_errno;
3100 		goto out_fail;
3101 	}
3102 	journal->j_bcount++;
3103 
3104 	if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3105 		unlock_journal(sb);
3106 		reiserfs_write_unlock(sb);
3107 		reiserfs_wait_on_write_block(sb);
3108 		reiserfs_write_lock(sb);
3109 		PROC_INFO_INC(sb, journal.journal_relock_writers);
3110 		goto relock;
3111 	}
3112 	now = get_seconds();
3113 
3114 	/* if there is no room in the journal OR
3115 	 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3116 	 ** we don't sleep if there aren't other writers
3117 	 */
3118 
3119 	if ((!join && journal->j_must_wait > 0) ||
3120 	    (!join
3121 	     && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3122 	    || (!join && atomic_read(&journal->j_wcount) > 0
3123 		&& journal->j_trans_start_time > 0
3124 		&& (now - journal->j_trans_start_time) >
3125 		journal->j_max_trans_age) || (!join
3126 					      && atomic_read(&journal->j_jlock))
3127 	    || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3128 
3129 		old_trans_id = journal->j_trans_id;
3130 		unlock_journal(sb);	/* allow others to finish this transaction */
3131 
3132 		if (!join && (journal->j_len_alloc + nblocks + 2) >=
3133 		    journal->j_max_batch &&
3134 		    ((journal->j_len + nblocks + 2) * 100) <
3135 		    (journal->j_len_alloc * 75)) {
3136 			if (atomic_read(&journal->j_wcount) > 10) {
3137 				sched_count++;
3138 				queue_log_writer(sb);
3139 				goto relock;
3140 			}
3141 		}
3142 		/* don't mess with joining the transaction if all we have to do is
3143 		 * wait for someone else to do a commit
3144 		 */
3145 		if (atomic_read(&journal->j_jlock)) {
3146 			while (journal->j_trans_id == old_trans_id &&
3147 			       atomic_read(&journal->j_jlock)) {
3148 				queue_log_writer(sb);
3149 			}
3150 			goto relock;
3151 		}
3152 		retval = journal_join(&myth, sb, 1);
3153 		if (retval)
3154 			goto out_fail;
3155 
3156 		/* someone might have ended the transaction while we joined */
3157 		if (old_trans_id != journal->j_trans_id) {
3158 			retval = do_journal_end(&myth, sb, 1, 0);
3159 		} else {
3160 			retval = do_journal_end(&myth, sb, 1, COMMIT_NOW);
3161 		}
3162 
3163 		if (retval)
3164 			goto out_fail;
3165 
3166 		PROC_INFO_INC(sb, journal.journal_relock_wcount);
3167 		goto relock;
3168 	}
3169 	/* we are the first writer, set trans_id */
3170 	if (journal->j_trans_start_time == 0) {
3171 		journal->j_trans_start_time = get_seconds();
3172 	}
3173 	atomic_inc(&(journal->j_wcount));
3174 	journal->j_len_alloc += nblocks;
3175 	th->t_blocks_logged = 0;
3176 	th->t_blocks_allocated = nblocks;
3177 	th->t_trans_id = journal->j_trans_id;
3178 	unlock_journal(sb);
3179 	INIT_LIST_HEAD(&th->t_list);
3180 	get_fs_excl();
3181 	return 0;
3182 
3183       out_fail:
3184 	memset(th, 0, sizeof(*th));
3185 	/* Re-set th->t_super, so we can properly keep track of how many
3186 	 * persistent transactions there are. We need to do this so if this
3187 	 * call is part of a failed restart_transaction, we can free it later */
3188 	th->t_super = sb;
3189 	return retval;
3190 }
3191 
3192 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3193 								    super_block
3194 								    *s,
3195 								    int nblocks)
3196 {
3197 	int ret;
3198 	struct reiserfs_transaction_handle *th;
3199 
3200 	/* if we're nesting into an existing transaction.  It will be
3201 	 ** persistent on its own
3202 	 */
3203 	if (reiserfs_transaction_running(s)) {
3204 		th = current->journal_info;
3205 		th->t_refcount++;
3206 		BUG_ON(th->t_refcount < 2);
3207 
3208 		return th;
3209 	}
3210 	th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3211 	if (!th)
3212 		return NULL;
3213 	ret = journal_begin(th, s, nblocks);
3214 	if (ret) {
3215 		kfree(th);
3216 		return NULL;
3217 	}
3218 
3219 	SB_JOURNAL(s)->j_persistent_trans++;
3220 	return th;
3221 }
3222 
3223 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3224 {
3225 	struct super_block *s = th->t_super;
3226 	int ret = 0;
3227 	if (th->t_trans_id)
3228 		ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3229 	else
3230 		ret = -EIO;
3231 	if (th->t_refcount == 0) {
3232 		SB_JOURNAL(s)->j_persistent_trans--;
3233 		kfree(th);
3234 	}
3235 	return ret;
3236 }
3237 
3238 static int journal_join(struct reiserfs_transaction_handle *th,
3239 			struct super_block *sb, unsigned long nblocks)
3240 {
3241 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3242 
3243 	/* this keeps do_journal_end from NULLing out the current->journal_info
3244 	 ** pointer
3245 	 */
3246 	th->t_handle_save = cur_th;
3247 	BUG_ON(cur_th && cur_th->t_refcount > 1);
3248 	return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN);
3249 }
3250 
3251 int journal_join_abort(struct reiserfs_transaction_handle *th,
3252 		       struct super_block *sb, unsigned long nblocks)
3253 {
3254 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3255 
3256 	/* this keeps do_journal_end from NULLing out the current->journal_info
3257 	 ** pointer
3258 	 */
3259 	th->t_handle_save = cur_th;
3260 	BUG_ON(cur_th && cur_th->t_refcount > 1);
3261 	return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT);
3262 }
3263 
3264 int journal_begin(struct reiserfs_transaction_handle *th,
3265 		  struct super_block *sb, unsigned long nblocks)
3266 {
3267 	struct reiserfs_transaction_handle *cur_th = current->journal_info;
3268 	int ret;
3269 
3270 	th->t_handle_save = NULL;
3271 	if (cur_th) {
3272 		/* we are nesting into the current transaction */
3273 		if (cur_th->t_super == sb) {
3274 			BUG_ON(!cur_th->t_refcount);
3275 			cur_th->t_refcount++;
3276 			memcpy(th, cur_th, sizeof(*th));
3277 			if (th->t_refcount <= 1)
3278 				reiserfs_warning(sb, "reiserfs-2005",
3279 						 "BAD: refcount <= 1, but "
3280 						 "journal_info != 0");
3281 			return 0;
3282 		} else {
3283 			/* we've ended up with a handle from a different filesystem.
3284 			 ** save it and restore on journal_end.  This should never
3285 			 ** really happen...
3286 			 */
3287 			reiserfs_warning(sb, "clm-2100",
3288 					 "nesting info a different FS");
3289 			th->t_handle_save = current->journal_info;
3290 			current->journal_info = th;
3291 		}
3292 	} else {
3293 		current->journal_info = th;
3294 	}
3295 	ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
3296 	BUG_ON(current->journal_info != th);
3297 
3298 	/* I guess this boils down to being the reciprocal of clm-2100 above.
3299 	 * If do_journal_begin_r fails, we need to put it back, since journal_end
3300 	 * won't be called to do it. */
3301 	if (ret)
3302 		current->journal_info = th->t_handle_save;
3303 	else
3304 		BUG_ON(!th->t_refcount);
3305 
3306 	return ret;
3307 }
3308 
3309 /*
3310 ** puts bh into the current transaction.  If it was already there, reorders removes the
3311 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3312 **
3313 ** if it was dirty, cleans and files onto the clean list.  I can't let it be dirty again until the
3314 ** transaction is committed.
3315 **
3316 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3317 */
3318 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3319 		       struct super_block *sb, struct buffer_head *bh)
3320 {
3321 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3322 	struct reiserfs_journal_cnode *cn = NULL;
3323 	int count_already_incd = 0;
3324 	int prepared = 0;
3325 	BUG_ON(!th->t_trans_id);
3326 
3327 	PROC_INFO_INC(sb, journal.mark_dirty);
3328 	if (th->t_trans_id != journal->j_trans_id) {
3329 		reiserfs_panic(th->t_super, "journal-1577",
3330 			       "handle trans id %ld != current trans id %ld",
3331 			       th->t_trans_id, journal->j_trans_id);
3332 	}
3333 
3334 	sb->s_dirt = 1;
3335 
3336 	prepared = test_clear_buffer_journal_prepared(bh);
3337 	clear_buffer_journal_restore_dirty(bh);
3338 	/* already in this transaction, we are done */
3339 	if (buffer_journaled(bh)) {
3340 		PROC_INFO_INC(sb, journal.mark_dirty_already);
3341 		return 0;
3342 	}
3343 
3344 	/* this must be turned into a panic instead of a warning.  We can't allow
3345 	 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3346 	 ** could get to disk too early.  NOT GOOD.
3347 	 */
3348 	if (!prepared || buffer_dirty(bh)) {
3349 		reiserfs_warning(sb, "journal-1777",
3350 				 "buffer %llu bad state "
3351 				 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3352 				 (unsigned long long)bh->b_blocknr,
3353 				 prepared ? ' ' : '!',
3354 				 buffer_locked(bh) ? ' ' : '!',
3355 				 buffer_dirty(bh) ? ' ' : '!',
3356 				 buffer_journal_dirty(bh) ? ' ' : '!');
3357 	}
3358 
3359 	if (atomic_read(&(journal->j_wcount)) <= 0) {
3360 		reiserfs_warning(sb, "journal-1409",
3361 				 "returning because j_wcount was %d",
3362 				 atomic_read(&(journal->j_wcount)));
3363 		return 1;
3364 	}
3365 	/* this error means I've screwed up, and we've overflowed the transaction.
3366 	 ** Nothing can be done here, except make the FS readonly or panic.
3367 	 */
3368 	if (journal->j_len >= journal->j_trans_max) {
3369 		reiserfs_panic(th->t_super, "journal-1413",
3370 			       "j_len (%lu) is too big",
3371 			       journal->j_len);
3372 	}
3373 
3374 	if (buffer_journal_dirty(bh)) {
3375 		count_already_incd = 1;
3376 		PROC_INFO_INC(sb, journal.mark_dirty_notjournal);
3377 		clear_buffer_journal_dirty(bh);
3378 	}
3379 
3380 	if (journal->j_len > journal->j_len_alloc) {
3381 		journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3382 	}
3383 
3384 	set_buffer_journaled(bh);
3385 
3386 	/* now put this guy on the end */
3387 	if (!cn) {
3388 		cn = get_cnode(sb);
3389 		if (!cn) {
3390 			reiserfs_panic(sb, "journal-4", "get_cnode failed!");
3391 		}
3392 
3393 		if (th->t_blocks_logged == th->t_blocks_allocated) {
3394 			th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3395 			journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3396 		}
3397 		th->t_blocks_logged++;
3398 		journal->j_len++;
3399 
3400 		cn->bh = bh;
3401 		cn->blocknr = bh->b_blocknr;
3402 		cn->sb = sb;
3403 		cn->jlist = NULL;
3404 		insert_journal_hash(journal->j_hash_table, cn);
3405 		if (!count_already_incd) {
3406 			get_bh(bh);
3407 		}
3408 	}
3409 	cn->next = NULL;
3410 	cn->prev = journal->j_last;
3411 	cn->bh = bh;
3412 	if (journal->j_last) {
3413 		journal->j_last->next = cn;
3414 		journal->j_last = cn;
3415 	} else {
3416 		journal->j_first = cn;
3417 		journal->j_last = cn;
3418 	}
3419 	return 0;
3420 }
3421 
3422 int journal_end(struct reiserfs_transaction_handle *th,
3423 		struct super_block *sb, unsigned long nblocks)
3424 {
3425 	if (!current->journal_info && th->t_refcount > 1)
3426 		reiserfs_warning(sb, "REISER-NESTING",
3427 				 "th NULL, refcount %d", th->t_refcount);
3428 
3429 	if (!th->t_trans_id) {
3430 		WARN_ON(1);
3431 		return -EIO;
3432 	}
3433 
3434 	th->t_refcount--;
3435 	if (th->t_refcount > 0) {
3436 		struct reiserfs_transaction_handle *cur_th =
3437 		    current->journal_info;
3438 
3439 		/* we aren't allowed to close a nested transaction on a different
3440 		 ** filesystem from the one in the task struct
3441 		 */
3442 		BUG_ON(cur_th->t_super != th->t_super);
3443 
3444 		if (th != cur_th) {
3445 			memcpy(current->journal_info, th, sizeof(*th));
3446 			th->t_trans_id = 0;
3447 		}
3448 		return 0;
3449 	} else {
3450 		return do_journal_end(th, sb, nblocks, 0);
3451 	}
3452 }
3453 
3454 /* removes from the current transaction, relsing and descrementing any counters.
3455 ** also files the removed buffer directly onto the clean list
3456 **
3457 ** called by journal_mark_freed when a block has been deleted
3458 **
3459 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3460 */
3461 static int remove_from_transaction(struct super_block *sb,
3462 				   b_blocknr_t blocknr, int already_cleaned)
3463 {
3464 	struct buffer_head *bh;
3465 	struct reiserfs_journal_cnode *cn;
3466 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3467 	int ret = 0;
3468 
3469 	cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3470 	if (!cn || !cn->bh) {
3471 		return ret;
3472 	}
3473 	bh = cn->bh;
3474 	if (cn->prev) {
3475 		cn->prev->next = cn->next;
3476 	}
3477 	if (cn->next) {
3478 		cn->next->prev = cn->prev;
3479 	}
3480 	if (cn == journal->j_first) {
3481 		journal->j_first = cn->next;
3482 	}
3483 	if (cn == journal->j_last) {
3484 		journal->j_last = cn->prev;
3485 	}
3486 	if (bh)
3487 		remove_journal_hash(sb, journal->j_hash_table, NULL,
3488 				    bh->b_blocknr, 0);
3489 	clear_buffer_journaled(bh);	/* don't log this one */
3490 
3491 	if (!already_cleaned) {
3492 		clear_buffer_journal_dirty(bh);
3493 		clear_buffer_dirty(bh);
3494 		clear_buffer_journal_test(bh);
3495 		put_bh(bh);
3496 		if (atomic_read(&(bh->b_count)) < 0) {
3497 			reiserfs_warning(sb, "journal-1752",
3498 					 "b_count < 0");
3499 		}
3500 		ret = 1;
3501 	}
3502 	journal->j_len--;
3503 	journal->j_len_alloc--;
3504 	free_cnode(sb, cn);
3505 	return ret;
3506 }
3507 
3508 /*
3509 ** for any cnode in a journal list, it can only be dirtied of all the
3510 ** transactions that include it are committed to disk.
3511 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3512 ** and 0 if you aren't
3513 **
3514 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3515 ** blocks for a given transaction on disk
3516 **
3517 */
3518 static int can_dirty(struct reiserfs_journal_cnode *cn)
3519 {
3520 	struct super_block *sb = cn->sb;
3521 	b_blocknr_t blocknr = cn->blocknr;
3522 	struct reiserfs_journal_cnode *cur = cn->hprev;
3523 	int can_dirty = 1;
3524 
3525 	/* first test hprev.  These are all newer than cn, so any node here
3526 	 ** with the same block number and dev means this node can't be sent
3527 	 ** to disk right now.
3528 	 */
3529 	while (cur && can_dirty) {
3530 		if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3531 		    cur->blocknr == blocknr) {
3532 			can_dirty = 0;
3533 		}
3534 		cur = cur->hprev;
3535 	}
3536 	/* then test hnext.  These are all older than cn.  As long as they
3537 	 ** are committed to the log, it is safe to write cn to disk
3538 	 */
3539 	cur = cn->hnext;
3540 	while (cur && can_dirty) {
3541 		if (cur->jlist && cur->jlist->j_len > 0 &&
3542 		    atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3543 		    cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3544 			can_dirty = 0;
3545 		}
3546 		cur = cur->hnext;
3547 	}
3548 	return can_dirty;
3549 }
3550 
3551 /* syncs the commit blocks, but does not force the real buffers to disk
3552 ** will wait until the current transaction is done/committed before returning
3553 */
3554 int journal_end_sync(struct reiserfs_transaction_handle *th,
3555 		     struct super_block *sb, unsigned long nblocks)
3556 {
3557 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3558 
3559 	BUG_ON(!th->t_trans_id);
3560 	/* you can sync while nested, very, very bad */
3561 	BUG_ON(th->t_refcount > 1);
3562 	if (journal->j_len == 0) {
3563 		reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3564 					     1);
3565 		journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
3566 	}
3567 	return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT);
3568 }
3569 
3570 /*
3571 ** writeback the pending async commits to disk
3572 */
3573 static void flush_async_commits(struct work_struct *work)
3574 {
3575 	struct reiserfs_journal *journal =
3576 		container_of(work, struct reiserfs_journal, j_work.work);
3577 	struct super_block *sb = journal->j_work_sb;
3578 	struct reiserfs_journal_list *jl;
3579 	struct list_head *entry;
3580 
3581 	reiserfs_write_lock(sb);
3582 	if (!list_empty(&journal->j_journal_list)) {
3583 		/* last entry is the youngest, commit it and you get everything */
3584 		entry = journal->j_journal_list.prev;
3585 		jl = JOURNAL_LIST_ENTRY(entry);
3586 		flush_commit_list(sb, jl, 1);
3587 	}
3588 	reiserfs_write_unlock(sb);
3589 }
3590 
3591 /*
3592 ** flushes any old transactions to disk
3593 ** ends the current transaction if it is too old
3594 */
3595 int reiserfs_flush_old_commits(struct super_block *sb)
3596 {
3597 	time_t now;
3598 	struct reiserfs_transaction_handle th;
3599 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3600 
3601 	now = get_seconds();
3602 	/* safety check so we don't flush while we are replaying the log during
3603 	 * mount
3604 	 */
3605 	if (list_empty(&journal->j_journal_list)) {
3606 		return 0;
3607 	}
3608 
3609 	/* check the current transaction.  If there are no writers, and it is
3610 	 * too old, finish it, and force the commit blocks to disk
3611 	 */
3612 	if (atomic_read(&journal->j_wcount) <= 0 &&
3613 	    journal->j_trans_start_time > 0 &&
3614 	    journal->j_len > 0 &&
3615 	    (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3616 		if (!journal_join(&th, sb, 1)) {
3617 			reiserfs_prepare_for_journal(sb,
3618 						     SB_BUFFER_WITH_SB(sb),
3619 						     1);
3620 			journal_mark_dirty(&th, sb,
3621 					   SB_BUFFER_WITH_SB(sb));
3622 
3623 			/* we're only being called from kreiserfsd, it makes no sense to do
3624 			 ** an async commit so that kreiserfsd can do it later
3625 			 */
3626 			do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
3627 		}
3628 	}
3629 	return sb->s_dirt;
3630 }
3631 
3632 /*
3633 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3634 **
3635 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3636 ** the writers are done.  By the time it wakes up, the transaction it was called has already ended, so it just
3637 ** flushes the commit list and returns 0.
3638 **
3639 ** Won't batch when flush or commit_now is set.  Also won't batch when others are waiting on j_join_wait.
3640 **
3641 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3642 */
3643 static int check_journal_end(struct reiserfs_transaction_handle *th,
3644 			     struct super_block *sb, unsigned long nblocks,
3645 			     int flags)
3646 {
3647 
3648 	time_t now;
3649 	int flush = flags & FLUSH_ALL;
3650 	int commit_now = flags & COMMIT_NOW;
3651 	int wait_on_commit = flags & WAIT;
3652 	struct reiserfs_journal_list *jl;
3653 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3654 
3655 	BUG_ON(!th->t_trans_id);
3656 
3657 	if (th->t_trans_id != journal->j_trans_id) {
3658 		reiserfs_panic(th->t_super, "journal-1577",
3659 			       "handle trans id %ld != current trans id %ld",
3660 			       th->t_trans_id, journal->j_trans_id);
3661 	}
3662 
3663 	journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3664 	if (atomic_read(&(journal->j_wcount)) > 0) {	/* <= 0 is allowed.  unmounting might not call begin */
3665 		atomic_dec(&(journal->j_wcount));
3666 	}
3667 
3668 	/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3669 	 ** will be dealt with by next transaction that actually writes something, but should be taken
3670 	 ** care of in this trans
3671 	 */
3672 	BUG_ON(journal->j_len == 0);
3673 
3674 	/* if wcount > 0, and we are called to with flush or commit_now,
3675 	 ** we wait on j_join_wait.  We will wake up when the last writer has
3676 	 ** finished the transaction, and started it on its way to the disk.
3677 	 ** Then, we flush the commit or journal list, and just return 0
3678 	 ** because the rest of journal end was already done for this transaction.
3679 	 */
3680 	if (atomic_read(&(journal->j_wcount)) > 0) {
3681 		if (flush || commit_now) {
3682 			unsigned trans_id;
3683 
3684 			jl = journal->j_current_jl;
3685 			trans_id = jl->j_trans_id;
3686 			if (wait_on_commit)
3687 				jl->j_state |= LIST_COMMIT_PENDING;
3688 			atomic_set(&(journal->j_jlock), 1);
3689 			if (flush) {
3690 				journal->j_next_full_flush = 1;
3691 			}
3692 			unlock_journal(sb);
3693 
3694 			/* sleep while the current transaction is still j_jlocked */
3695 			while (journal->j_trans_id == trans_id) {
3696 				if (atomic_read(&journal->j_jlock)) {
3697 					queue_log_writer(sb);
3698 				} else {
3699 					lock_journal(sb);
3700 					if (journal->j_trans_id == trans_id) {
3701 						atomic_set(&(journal->j_jlock),
3702 							   1);
3703 					}
3704 					unlock_journal(sb);
3705 				}
3706 			}
3707 			BUG_ON(journal->j_trans_id == trans_id);
3708 
3709 			if (commit_now
3710 			    && journal_list_still_alive(sb, trans_id)
3711 			    && wait_on_commit) {
3712 				flush_commit_list(sb, jl, 1);
3713 			}
3714 			return 0;
3715 		}
3716 		unlock_journal(sb);
3717 		return 0;
3718 	}
3719 
3720 	/* deal with old transactions where we are the last writers */
3721 	now = get_seconds();
3722 	if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3723 		commit_now = 1;
3724 		journal->j_next_async_flush = 1;
3725 	}
3726 	/* don't batch when someone is waiting on j_join_wait */
3727 	/* don't batch when syncing the commit or flushing the whole trans */
3728 	if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3729 	    && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3730 	    && journal->j_len_alloc < journal->j_max_batch
3731 	    && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3732 		journal->j_bcount++;
3733 		unlock_journal(sb);
3734 		return 0;
3735 	}
3736 
3737 	if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) {
3738 		reiserfs_panic(sb, "journal-003",
3739 			       "j_start (%ld) is too high",
3740 			       journal->j_start);
3741 	}
3742 	return 1;
3743 }
3744 
3745 /*
3746 ** Does all the work that makes deleting blocks safe.
3747 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3748 **
3749 ** otherwise:
3750 ** set a bit for the block in the journal bitmap.  That will prevent it from being allocated for unformatted nodes
3751 ** before this transaction has finished.
3752 **
3753 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.  That will prevent any old transactions with
3754 ** this block from trying to flush to the real location.  Since we aren't removing the cnode from the journal_list_hash,
3755 ** the block can't be reallocated yet.
3756 **
3757 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3758 */
3759 int journal_mark_freed(struct reiserfs_transaction_handle *th,
3760 		       struct super_block *sb, b_blocknr_t blocknr)
3761 {
3762 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3763 	struct reiserfs_journal_cnode *cn = NULL;
3764 	struct buffer_head *bh = NULL;
3765 	struct reiserfs_list_bitmap *jb = NULL;
3766 	int cleaned = 0;
3767 	BUG_ON(!th->t_trans_id);
3768 
3769 	cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3770 	if (cn && cn->bh) {
3771 		bh = cn->bh;
3772 		get_bh(bh);
3773 	}
3774 	/* if it is journal new, we just remove it from this transaction */
3775 	if (bh && buffer_journal_new(bh)) {
3776 		clear_buffer_journal_new(bh);
3777 		clear_prepared_bits(bh);
3778 		reiserfs_clean_and_file_buffer(bh);
3779 		cleaned = remove_from_transaction(sb, blocknr, cleaned);
3780 	} else {
3781 		/* set the bit for this block in the journal bitmap for this transaction */
3782 		jb = journal->j_current_jl->j_list_bitmap;
3783 		if (!jb) {
3784 			reiserfs_panic(sb, "journal-1702",
3785 				       "journal_list_bitmap is NULL");
3786 		}
3787 		set_bit_in_list_bitmap(sb, blocknr, jb);
3788 
3789 		/* Note, the entire while loop is not allowed to schedule.  */
3790 
3791 		if (bh) {
3792 			clear_prepared_bits(bh);
3793 			reiserfs_clean_and_file_buffer(bh);
3794 		}
3795 		cleaned = remove_from_transaction(sb, blocknr, cleaned);
3796 
3797 		/* find all older transactions with this block, make sure they don't try to write it out */
3798 		cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
3799 					  blocknr);
3800 		while (cn) {
3801 			if (sb == cn->sb && blocknr == cn->blocknr) {
3802 				set_bit(BLOCK_FREED, &cn->state);
3803 				if (cn->bh) {
3804 					if (!cleaned) {
3805 						/* remove_from_transaction will brelse the buffer if it was
3806 						 ** in the current trans
3807 						 */
3808 						clear_buffer_journal_dirty(cn->
3809 									   bh);
3810 						clear_buffer_dirty(cn->bh);
3811 						clear_buffer_journal_test(cn->
3812 									  bh);
3813 						cleaned = 1;
3814 						put_bh(cn->bh);
3815 						if (atomic_read
3816 						    (&(cn->bh->b_count)) < 0) {
3817 							reiserfs_warning(sb,
3818 								 "journal-2138",
3819 								 "cn->bh->b_count < 0");
3820 						}
3821 					}
3822 					if (cn->jlist) {	/* since we are clearing the bh, we MUST dec nonzerolen */
3823 						atomic_dec(&
3824 							   (cn->jlist->
3825 							    j_nonzerolen));
3826 					}
3827 					cn->bh = NULL;
3828 				}
3829 			}
3830 			cn = cn->hnext;
3831 		}
3832 	}
3833 
3834 	if (bh)
3835 		release_buffer_page(bh); /* get_hash grabs the buffer */
3836 	return 0;
3837 }
3838 
3839 void reiserfs_update_inode_transaction(struct inode *inode)
3840 {
3841 	struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3842 	REISERFS_I(inode)->i_jl = journal->j_current_jl;
3843 	REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3844 }
3845 
3846 /*
3847  * returns -1 on error, 0 if no commits/barriers were done and 1
3848  * if a transaction was actually committed and the barrier was done
3849  */
3850 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3851 			     struct reiserfs_journal_list *jl)
3852 {
3853 	struct reiserfs_transaction_handle th;
3854 	struct super_block *sb = inode->i_sb;
3855 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3856 	int ret = 0;
3857 
3858 	/* is it from the current transaction, or from an unknown transaction? */
3859 	if (id == journal->j_trans_id) {
3860 		jl = journal->j_current_jl;
3861 		/* try to let other writers come in and grow this transaction */
3862 		let_transaction_grow(sb, id);
3863 		if (journal->j_trans_id != id) {
3864 			goto flush_commit_only;
3865 		}
3866 
3867 		ret = journal_begin(&th, sb, 1);
3868 		if (ret)
3869 			return ret;
3870 
3871 		/* someone might have ended this transaction while we joined */
3872 		if (journal->j_trans_id != id) {
3873 			reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3874 						     1);
3875 			journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3876 			ret = journal_end(&th, sb, 1);
3877 			goto flush_commit_only;
3878 		}
3879 
3880 		ret = journal_end_sync(&th, sb, 1);
3881 		if (!ret)
3882 			ret = 1;
3883 
3884 	} else {
3885 		/* this gets tricky, we have to make sure the journal list in
3886 		 * the inode still exists.  We know the list is still around
3887 		 * if we've got a larger transaction id than the oldest list
3888 		 */
3889 	      flush_commit_only:
3890 		if (journal_list_still_alive(inode->i_sb, id)) {
3891 			/*
3892 			 * we only set ret to 1 when we know for sure
3893 			 * the barrier hasn't been started yet on the commit
3894 			 * block.
3895 			 */
3896 			if (atomic_read(&jl->j_commit_left) > 1)
3897 				ret = 1;
3898 			flush_commit_list(sb, jl, 1);
3899 			if (journal->j_errno)
3900 				ret = journal->j_errno;
3901 		}
3902 	}
3903 	/* otherwise the list is gone, and long since committed */
3904 	return ret;
3905 }
3906 
3907 int reiserfs_commit_for_inode(struct inode *inode)
3908 {
3909 	unsigned int id = REISERFS_I(inode)->i_trans_id;
3910 	struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3911 
3912 	/* for the whole inode, assume unset id means it was
3913 	 * changed in the current transaction.  More conservative
3914 	 */
3915 	if (!id || !jl) {
3916 		reiserfs_update_inode_transaction(inode);
3917 		id = REISERFS_I(inode)->i_trans_id;
3918 		/* jl will be updated in __commit_trans_jl */
3919 	}
3920 
3921 	return __commit_trans_jl(inode, id, jl);
3922 }
3923 
3924 void reiserfs_restore_prepared_buffer(struct super_block *sb,
3925 				      struct buffer_head *bh)
3926 {
3927 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
3928 	PROC_INFO_INC(sb, journal.restore_prepared);
3929 	if (!bh) {
3930 		return;
3931 	}
3932 	if (test_clear_buffer_journal_restore_dirty(bh) &&
3933 	    buffer_journal_dirty(bh)) {
3934 		struct reiserfs_journal_cnode *cn;
3935 		cn = get_journal_hash_dev(sb,
3936 					  journal->j_list_hash_table,
3937 					  bh->b_blocknr);
3938 		if (cn && can_dirty(cn)) {
3939 			set_buffer_journal_test(bh);
3940 			mark_buffer_dirty(bh);
3941 		}
3942 	}
3943 	clear_buffer_journal_prepared(bh);
3944 }
3945 
3946 extern struct tree_balance *cur_tb;
3947 /*
3948 ** before we can change a metadata block, we have to make sure it won't
3949 ** be written to disk while we are altering it.  So, we must:
3950 ** clean it
3951 ** wait on it.
3952 **
3953 */
3954 int reiserfs_prepare_for_journal(struct super_block *sb,
3955 				 struct buffer_head *bh, int wait)
3956 {
3957 	PROC_INFO_INC(sb, journal.prepare);
3958 
3959 	if (!trylock_buffer(bh)) {
3960 		if (!wait)
3961 			return 0;
3962 		lock_buffer(bh);
3963 	}
3964 	set_buffer_journal_prepared(bh);
3965 	if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3966 		clear_buffer_journal_test(bh);
3967 		set_buffer_journal_restore_dirty(bh);
3968 	}
3969 	unlock_buffer(bh);
3970 	return 1;
3971 }
3972 
3973 static void flush_old_journal_lists(struct super_block *s)
3974 {
3975 	struct reiserfs_journal *journal = SB_JOURNAL(s);
3976 	struct reiserfs_journal_list *jl;
3977 	struct list_head *entry;
3978 	time_t now = get_seconds();
3979 
3980 	while (!list_empty(&journal->j_journal_list)) {
3981 		entry = journal->j_journal_list.next;
3982 		jl = JOURNAL_LIST_ENTRY(entry);
3983 		/* this check should always be run, to send old lists to disk */
3984 		if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3985 		    atomic_read(&jl->j_commit_left) == 0 &&
3986 		    test_transaction(s, jl)) {
3987 			flush_used_journal_lists(s, jl);
3988 		} else {
3989 			break;
3990 		}
3991 	}
3992 }
3993 
3994 /*
3995 ** long and ugly.  If flush, will not return until all commit
3996 ** blocks and all real buffers in the trans are on disk.
3997 ** If no_async, won't return until all commit blocks are on disk.
3998 **
3999 ** keep reading, there are comments as you go along
4000 **
4001 ** If the journal is aborted, we just clean up. Things like flushing
4002 ** journal lists, etc just won't happen.
4003 */
4004 static int do_journal_end(struct reiserfs_transaction_handle *th,
4005 			  struct super_block *sb, unsigned long nblocks,
4006 			  int flags)
4007 {
4008 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4009 	struct reiserfs_journal_cnode *cn, *next, *jl_cn;
4010 	struct reiserfs_journal_cnode *last_cn = NULL;
4011 	struct reiserfs_journal_desc *desc;
4012 	struct reiserfs_journal_commit *commit;
4013 	struct buffer_head *c_bh;	/* commit bh */
4014 	struct buffer_head *d_bh;	/* desc bh */
4015 	int cur_write_start = 0;	/* start index of current log write */
4016 	int old_start;
4017 	int i;
4018 	int flush;
4019 	int wait_on_commit;
4020 	struct reiserfs_journal_list *jl, *temp_jl;
4021 	struct list_head *entry, *safe;
4022 	unsigned long jindex;
4023 	unsigned int commit_trans_id;
4024 	int trans_half;
4025 
4026 	BUG_ON(th->t_refcount > 1);
4027 	BUG_ON(!th->t_trans_id);
4028 
4029 	/* protect flush_older_commits from doing mistakes if the
4030            transaction ID counter gets overflowed.  */
4031 	if (th->t_trans_id == ~0U)
4032 		flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
4033 	flush = flags & FLUSH_ALL;
4034 	wait_on_commit = flags & WAIT;
4035 
4036 	put_fs_excl();
4037 	current->journal_info = th->t_handle_save;
4038 	reiserfs_check_lock_depth(sb, "journal end");
4039 	if (journal->j_len == 0) {
4040 		reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
4041 					     1);
4042 		journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
4043 	}
4044 
4045 	lock_journal(sb);
4046 	if (journal->j_next_full_flush) {
4047 		flags |= FLUSH_ALL;
4048 		flush = 1;
4049 	}
4050 	if (journal->j_next_async_flush) {
4051 		flags |= COMMIT_NOW | WAIT;
4052 		wait_on_commit = 1;
4053 	}
4054 
4055 	/* check_journal_end locks the journal, and unlocks if it does not return 1
4056 	 ** it tells us if we should continue with the journal_end, or just return
4057 	 */
4058 	if (!check_journal_end(th, sb, nblocks, flags)) {
4059 		sb->s_dirt = 1;
4060 		wake_queued_writers(sb);
4061 		reiserfs_async_progress_wait(sb);
4062 		goto out;
4063 	}
4064 
4065 	/* check_journal_end might set these, check again */
4066 	if (journal->j_next_full_flush) {
4067 		flush = 1;
4068 	}
4069 
4070 	/*
4071 	 ** j must wait means we have to flush the log blocks, and the real blocks for
4072 	 ** this transaction
4073 	 */
4074 	if (journal->j_must_wait > 0) {
4075 		flush = 1;
4076 	}
4077 #ifdef REISERFS_PREALLOCATE
4078 	/* quota ops might need to nest, setup the journal_info pointer for them
4079 	 * and raise the refcount so that it is > 0. */
4080 	current->journal_info = th;
4081 	th->t_refcount++;
4082 	reiserfs_discard_all_prealloc(th);	/* it should not involve new blocks into
4083 						 * the transaction */
4084 	th->t_refcount--;
4085 	current->journal_info = th->t_handle_save;
4086 #endif
4087 
4088 	/* setup description block */
4089 	d_bh =
4090 	    journal_getblk(sb,
4091 			   SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4092 			   journal->j_start);
4093 	set_buffer_uptodate(d_bh);
4094 	desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4095 	memset(d_bh->b_data, 0, d_bh->b_size);
4096 	memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4097 	set_desc_trans_id(desc, journal->j_trans_id);
4098 
4099 	/* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
4100 	c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4101 			      ((journal->j_start + journal->j_len +
4102 				1) % SB_ONDISK_JOURNAL_SIZE(sb)));
4103 	commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4104 	memset(c_bh->b_data, 0, c_bh->b_size);
4105 	set_commit_trans_id(commit, journal->j_trans_id);
4106 	set_buffer_uptodate(c_bh);
4107 
4108 	/* init this journal list */
4109 	jl = journal->j_current_jl;
4110 
4111 	/* we lock the commit before doing anything because
4112 	 * we want to make sure nobody tries to run flush_commit_list until
4113 	 * the new transaction is fully setup, and we've already flushed the
4114 	 * ordered bh list
4115 	 */
4116 	reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
4117 
4118 	/* save the transaction id in case we need to commit it later */
4119 	commit_trans_id = jl->j_trans_id;
4120 
4121 	atomic_set(&jl->j_older_commits_done, 0);
4122 	jl->j_trans_id = journal->j_trans_id;
4123 	jl->j_timestamp = journal->j_trans_start_time;
4124 	jl->j_commit_bh = c_bh;
4125 	jl->j_start = journal->j_start;
4126 	jl->j_len = journal->j_len;
4127 	atomic_set(&jl->j_nonzerolen, journal->j_len);
4128 	atomic_set(&jl->j_commit_left, journal->j_len + 2);
4129 	jl->j_realblock = NULL;
4130 
4131 	/* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4132 	 **  for each real block, add it to the journal list hash,
4133 	 ** copy into real block index array in the commit or desc block
4134 	 */
4135 	trans_half = journal_trans_half(sb->s_blocksize);
4136 	for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4137 		if (buffer_journaled(cn->bh)) {
4138 			jl_cn = get_cnode(sb);
4139 			if (!jl_cn) {
4140 				reiserfs_panic(sb, "journal-1676",
4141 					       "get_cnode returned NULL");
4142 			}
4143 			if (i == 0) {
4144 				jl->j_realblock = jl_cn;
4145 			}
4146 			jl_cn->prev = last_cn;
4147 			jl_cn->next = NULL;
4148 			if (last_cn) {
4149 				last_cn->next = jl_cn;
4150 			}
4151 			last_cn = jl_cn;
4152 			/* make sure the block we are trying to log is not a block
4153 			   of journal or reserved area */
4154 
4155 			if (is_block_in_log_or_reserved_area
4156 			    (sb, cn->bh->b_blocknr)) {
4157 				reiserfs_panic(sb, "journal-2332",
4158 					       "Trying to log block %lu, "
4159 					       "which is a log block",
4160 					       cn->bh->b_blocknr);
4161 			}
4162 			jl_cn->blocknr = cn->bh->b_blocknr;
4163 			jl_cn->state = 0;
4164 			jl_cn->sb = sb;
4165 			jl_cn->bh = cn->bh;
4166 			jl_cn->jlist = jl;
4167 			insert_journal_hash(journal->j_list_hash_table, jl_cn);
4168 			if (i < trans_half) {
4169 				desc->j_realblock[i] =
4170 				    cpu_to_le32(cn->bh->b_blocknr);
4171 			} else {
4172 				commit->j_realblock[i - trans_half] =
4173 				    cpu_to_le32(cn->bh->b_blocknr);
4174 			}
4175 		} else {
4176 			i--;
4177 		}
4178 	}
4179 	set_desc_trans_len(desc, journal->j_len);
4180 	set_desc_mount_id(desc, journal->j_mount_id);
4181 	set_desc_trans_id(desc, journal->j_trans_id);
4182 	set_commit_trans_len(commit, journal->j_len);
4183 
4184 	/* special check in case all buffers in the journal were marked for not logging */
4185 	BUG_ON(journal->j_len == 0);
4186 
4187 	/* we're about to dirty all the log blocks, mark the description block
4188 	 * dirty now too.  Don't mark the commit block dirty until all the
4189 	 * others are on disk
4190 	 */
4191 	mark_buffer_dirty(d_bh);
4192 
4193 	/* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4194 	cur_write_start = journal->j_start;
4195 	cn = journal->j_first;
4196 	jindex = 1;		/* start at one so we don't get the desc again */
4197 	while (cn) {
4198 		clear_buffer_journal_new(cn->bh);
4199 		/* copy all the real blocks into log area.  dirty log blocks */
4200 		if (buffer_journaled(cn->bh)) {
4201 			struct buffer_head *tmp_bh;
4202 			char *addr;
4203 			struct page *page;
4204 			tmp_bh =
4205 			    journal_getblk(sb,
4206 					   SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4207 					   ((cur_write_start +
4208 					     jindex) %
4209 					    SB_ONDISK_JOURNAL_SIZE(sb)));
4210 			set_buffer_uptodate(tmp_bh);
4211 			page = cn->bh->b_page;
4212 			addr = kmap(page);
4213 			memcpy(tmp_bh->b_data,
4214 			       addr + offset_in_page(cn->bh->b_data),
4215 			       cn->bh->b_size);
4216 			kunmap(page);
4217 			mark_buffer_dirty(tmp_bh);
4218 			jindex++;
4219 			set_buffer_journal_dirty(cn->bh);
4220 			clear_buffer_journaled(cn->bh);
4221 		} else {
4222 			/* JDirty cleared sometime during transaction.  don't log this one */
4223 			reiserfs_warning(sb, "journal-2048",
4224 					 "BAD, buffer in journal hash, "
4225 					 "but not JDirty!");
4226 			brelse(cn->bh);
4227 		}
4228 		next = cn->next;
4229 		free_cnode(sb, cn);
4230 		cn = next;
4231 		reiserfs_write_unlock(sb);
4232 		cond_resched();
4233 		reiserfs_write_lock(sb);
4234 	}
4235 
4236 	/* we are done  with both the c_bh and d_bh, but
4237 	 ** c_bh must be written after all other commit blocks,
4238 	 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4239 	 */
4240 
4241 	journal->j_current_jl = alloc_journal_list(sb);
4242 
4243 	/* now it is safe to insert this transaction on the main list */
4244 	list_add_tail(&jl->j_list, &journal->j_journal_list);
4245 	list_add_tail(&jl->j_working_list, &journal->j_working_list);
4246 	journal->j_num_work_lists++;
4247 
4248 	/* reset journal values for the next transaction */
4249 	old_start = journal->j_start;
4250 	journal->j_start =
4251 	    (journal->j_start + journal->j_len +
4252 	     2) % SB_ONDISK_JOURNAL_SIZE(sb);
4253 	atomic_set(&(journal->j_wcount), 0);
4254 	journal->j_bcount = 0;
4255 	journal->j_last = NULL;
4256 	journal->j_first = NULL;
4257 	journal->j_len = 0;
4258 	journal->j_trans_start_time = 0;
4259 	/* check for trans_id overflow */
4260 	if (++journal->j_trans_id == 0)
4261 		journal->j_trans_id = 10;
4262 	journal->j_current_jl->j_trans_id = journal->j_trans_id;
4263 	journal->j_must_wait = 0;
4264 	journal->j_len_alloc = 0;
4265 	journal->j_next_full_flush = 0;
4266 	journal->j_next_async_flush = 0;
4267 	init_journal_hash(sb);
4268 
4269 	// make sure reiserfs_add_jh sees the new current_jl before we
4270 	// write out the tails
4271 	smp_mb();
4272 
4273 	/* tail conversion targets have to hit the disk before we end the
4274 	 * transaction.  Otherwise a later transaction might repack the tail
4275 	 * before this transaction commits, leaving the data block unflushed and
4276 	 * clean, if we crash before the later transaction commits, the data block
4277 	 * is lost.
4278 	 */
4279 	if (!list_empty(&jl->j_tail_bh_list)) {
4280 		reiserfs_write_unlock(sb);
4281 		write_ordered_buffers(&journal->j_dirty_buffers_lock,
4282 				      journal, jl, &jl->j_tail_bh_list);
4283 		reiserfs_write_lock(sb);
4284 	}
4285 	BUG_ON(!list_empty(&jl->j_tail_bh_list));
4286 	mutex_unlock(&jl->j_commit_mutex);
4287 
4288 	/* honor the flush wishes from the caller, simple commits can
4289 	 ** be done outside the journal lock, they are done below
4290 	 **
4291 	 ** if we don't flush the commit list right now, we put it into
4292 	 ** the work queue so the people waiting on the async progress work
4293 	 ** queue don't wait for this proc to flush journal lists and such.
4294 	 */
4295 	if (flush) {
4296 		flush_commit_list(sb, jl, 1);
4297 		flush_journal_list(sb, jl, 1);
4298 	} else if (!(jl->j_state & LIST_COMMIT_PENDING))
4299 		queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4300 
4301 	/* if the next transaction has any chance of wrapping, flush
4302 	 ** transactions that might get overwritten.  If any journal lists are very
4303 	 ** old flush them as well.
4304 	 */
4305       first_jl:
4306 	list_for_each_safe(entry, safe, &journal->j_journal_list) {
4307 		temp_jl = JOURNAL_LIST_ENTRY(entry);
4308 		if (journal->j_start <= temp_jl->j_start) {
4309 			if ((journal->j_start + journal->j_trans_max + 1) >=
4310 			    temp_jl->j_start) {
4311 				flush_used_journal_lists(sb, temp_jl);
4312 				goto first_jl;
4313 			} else if ((journal->j_start +
4314 				    journal->j_trans_max + 1) <
4315 				   SB_ONDISK_JOURNAL_SIZE(sb)) {
4316 				/* if we don't cross into the next transaction and we don't
4317 				 * wrap, there is no way we can overlap any later transactions
4318 				 * break now
4319 				 */
4320 				break;
4321 			}
4322 		} else if ((journal->j_start +
4323 			    journal->j_trans_max + 1) >
4324 			   SB_ONDISK_JOURNAL_SIZE(sb)) {
4325 			if (((journal->j_start + journal->j_trans_max + 1) %
4326 			     SB_ONDISK_JOURNAL_SIZE(sb)) >=
4327 			    temp_jl->j_start) {
4328 				flush_used_journal_lists(sb, temp_jl);
4329 				goto first_jl;
4330 			} else {
4331 				/* we don't overlap anything from out start to the end of the
4332 				 * log, and our wrapped portion doesn't overlap anything at
4333 				 * the start of the log.  We can break
4334 				 */
4335 				break;
4336 			}
4337 		}
4338 	}
4339 	flush_old_journal_lists(sb);
4340 
4341 	journal->j_current_jl->j_list_bitmap =
4342 	    get_list_bitmap(sb, journal->j_current_jl);
4343 
4344 	if (!(journal->j_current_jl->j_list_bitmap)) {
4345 		reiserfs_panic(sb, "journal-1996",
4346 			       "could not get a list bitmap");
4347 	}
4348 
4349 	atomic_set(&(journal->j_jlock), 0);
4350 	unlock_journal(sb);
4351 	/* wake up any body waiting to join. */
4352 	clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4353 	wake_up(&(journal->j_join_wait));
4354 
4355 	if (!flush && wait_on_commit &&
4356 	    journal_list_still_alive(sb, commit_trans_id)) {
4357 		flush_commit_list(sb, jl, 1);
4358 	}
4359       out:
4360 	reiserfs_check_lock_depth(sb, "journal end2");
4361 
4362 	memset(th, 0, sizeof(*th));
4363 	/* Re-set th->t_super, so we can properly keep track of how many
4364 	 * persistent transactions there are. We need to do this so if this
4365 	 * call is part of a failed restart_transaction, we can free it later */
4366 	th->t_super = sb;
4367 
4368 	return journal->j_errno;
4369 }
4370 
4371 /* Send the file system read only and refuse new transactions */
4372 void reiserfs_abort_journal(struct super_block *sb, int errno)
4373 {
4374 	struct reiserfs_journal *journal = SB_JOURNAL(sb);
4375 	if (test_bit(J_ABORTED, &journal->j_state))
4376 		return;
4377 
4378 	if (!journal->j_errno)
4379 		journal->j_errno = errno;
4380 
4381 	sb->s_flags |= MS_RDONLY;
4382 	set_bit(J_ABORTED, &journal->j_state);
4383 
4384 #ifdef CONFIG_REISERFS_CHECK
4385 	dump_stack();
4386 #endif
4387 }
4388 
4389