xref: /openbmc/linux/fs/reiserfs/reiserfs.h (revision 7b6d864b)
1 /*
2  * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details
3  */
4 
5 #include <linux/reiserfs_fs.h>
6 
7 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched.h>
10 #include <linux/bug.h>
11 #include <linux/workqueue.h>
12 #include <asm/unaligned.h>
13 #include <linux/bitops.h>
14 #include <linux/proc_fs.h>
15 #include <linux/buffer_head.h>
16 
17 /* the 32 bit compat definitions with int argument */
18 #define REISERFS_IOC32_UNPACK		_IOW(0xCD, 1, int)
19 #define REISERFS_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
20 #define REISERFS_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
21 #define REISERFS_IOC32_GETVERSION	FS_IOC32_GETVERSION
22 #define REISERFS_IOC32_SETVERSION	FS_IOC32_SETVERSION
23 
24 struct reiserfs_journal_list;
25 
26 /** bitmasks for i_flags field in reiserfs-specific part of inode */
27 typedef enum {
28     /** this says what format of key do all items (but stat data) of
29       an object have.  If this is set, that format is 3.6 otherwise
30       - 3.5 */
31 	i_item_key_version_mask = 0x0001,
32     /** If this is unset, object has 3.5 stat data, otherwise, it has
33       3.6 stat data with 64bit size, 32bit nlink etc. */
34 	i_stat_data_version_mask = 0x0002,
35     /** file might need tail packing on close */
36 	i_pack_on_close_mask = 0x0004,
37     /** don't pack tail of file */
38 	i_nopack_mask = 0x0008,
39     /** If those is set, "safe link" was created for this file during
40       truncate or unlink. Safe link is used to avoid leakage of disk
41       space on crash with some files open, but unlinked. */
42 	i_link_saved_unlink_mask = 0x0010,
43 	i_link_saved_truncate_mask = 0x0020,
44 	i_has_xattr_dir = 0x0040,
45 	i_data_log = 0x0080,
46 } reiserfs_inode_flags;
47 
48 struct reiserfs_inode_info {
49 	__u32 i_key[4];		/* key is still 4 32 bit integers */
50     /** transient inode flags that are never stored on disk. Bitmasks
51       for this field are defined above. */
52 	__u32 i_flags;
53 
54 	__u32 i_first_direct_byte;	// offset of first byte stored in direct item.
55 
56 	/* copy of persistent inode flags read from sd_attrs. */
57 	__u32 i_attrs;
58 
59 	int i_prealloc_block;	/* first unused block of a sequence of unused blocks */
60 	int i_prealloc_count;	/* length of that sequence */
61 	struct list_head i_prealloc_list;	/* per-transaction list of inodes which
62 						 * have preallocated blocks */
63 
64 	unsigned new_packing_locality:1;	/* new_packig_locality is created; new blocks
65 						 * for the contents of this directory should be
66 						 * displaced */
67 
68 	/* we use these for fsync or O_SYNC to decide which transaction
69 	 ** needs to be committed in order for this inode to be properly
70 	 ** flushed */
71 	unsigned int i_trans_id;
72 	struct reiserfs_journal_list *i_jl;
73 	atomic_t openers;
74 	struct mutex tailpack;
75 #ifdef CONFIG_REISERFS_FS_XATTR
76 	struct rw_semaphore i_xattr_sem;
77 #endif
78 	struct inode vfs_inode;
79 };
80 
81 typedef enum {
82 	reiserfs_attrs_cleared = 0x00000001,
83 } reiserfs_super_block_flags;
84 
85 /* struct reiserfs_super_block accessors/mutators
86  * since this is a disk structure, it will always be in
87  * little endian format. */
88 #define sb_block_count(sbp)         (le32_to_cpu((sbp)->s_v1.s_block_count))
89 #define set_sb_block_count(sbp,v)   ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
90 #define sb_free_blocks(sbp)         (le32_to_cpu((sbp)->s_v1.s_free_blocks))
91 #define set_sb_free_blocks(sbp,v)   ((sbp)->s_v1.s_free_blocks = cpu_to_le32(v))
92 #define sb_root_block(sbp)          (le32_to_cpu((sbp)->s_v1.s_root_block))
93 #define set_sb_root_block(sbp,v)    ((sbp)->s_v1.s_root_block = cpu_to_le32(v))
94 
95 #define sb_jp_journal_1st_block(sbp)  \
96               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_1st_block))
97 #define set_sb_jp_journal_1st_block(sbp,v) \
98               ((sbp)->s_v1.s_journal.jp_journal_1st_block = cpu_to_le32(v))
99 #define sb_jp_journal_dev(sbp) \
100               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_dev))
101 #define set_sb_jp_journal_dev(sbp,v) \
102               ((sbp)->s_v1.s_journal.jp_journal_dev = cpu_to_le32(v))
103 #define sb_jp_journal_size(sbp) \
104               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_size))
105 #define set_sb_jp_journal_size(sbp,v) \
106               ((sbp)->s_v1.s_journal.jp_journal_size = cpu_to_le32(v))
107 #define sb_jp_journal_trans_max(sbp) \
108               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_trans_max))
109 #define set_sb_jp_journal_trans_max(sbp,v) \
110               ((sbp)->s_v1.s_journal.jp_journal_trans_max = cpu_to_le32(v))
111 #define sb_jp_journal_magic(sbp) \
112               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_magic))
113 #define set_sb_jp_journal_magic(sbp,v) \
114               ((sbp)->s_v1.s_journal.jp_journal_magic = cpu_to_le32(v))
115 #define sb_jp_journal_max_batch(sbp) \
116               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_batch))
117 #define set_sb_jp_journal_max_batch(sbp,v) \
118               ((sbp)->s_v1.s_journal.jp_journal_max_batch = cpu_to_le32(v))
119 #define sb_jp_jourmal_max_commit_age(sbp) \
120               (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_commit_age))
121 #define set_sb_jp_journal_max_commit_age(sbp,v) \
122               ((sbp)->s_v1.s_journal.jp_journal_max_commit_age = cpu_to_le32(v))
123 
124 #define sb_blocksize(sbp)          (le16_to_cpu((sbp)->s_v1.s_blocksize))
125 #define set_sb_blocksize(sbp,v)    ((sbp)->s_v1.s_blocksize = cpu_to_le16(v))
126 #define sb_oid_maxsize(sbp)        (le16_to_cpu((sbp)->s_v1.s_oid_maxsize))
127 #define set_sb_oid_maxsize(sbp,v)  ((sbp)->s_v1.s_oid_maxsize = cpu_to_le16(v))
128 #define sb_oid_cursize(sbp)        (le16_to_cpu((sbp)->s_v1.s_oid_cursize))
129 #define set_sb_oid_cursize(sbp,v)  ((sbp)->s_v1.s_oid_cursize = cpu_to_le16(v))
130 #define sb_umount_state(sbp)       (le16_to_cpu((sbp)->s_v1.s_umount_state))
131 #define set_sb_umount_state(sbp,v) ((sbp)->s_v1.s_umount_state = cpu_to_le16(v))
132 #define sb_fs_state(sbp)           (le16_to_cpu((sbp)->s_v1.s_fs_state))
133 #define set_sb_fs_state(sbp,v)     ((sbp)->s_v1.s_fs_state = cpu_to_le16(v))
134 #define sb_hash_function_code(sbp) \
135               (le32_to_cpu((sbp)->s_v1.s_hash_function_code))
136 #define set_sb_hash_function_code(sbp,v) \
137               ((sbp)->s_v1.s_hash_function_code = cpu_to_le32(v))
138 #define sb_tree_height(sbp)        (le16_to_cpu((sbp)->s_v1.s_tree_height))
139 #define set_sb_tree_height(sbp,v)  ((sbp)->s_v1.s_tree_height = cpu_to_le16(v))
140 #define sb_bmap_nr(sbp)            (le16_to_cpu((sbp)->s_v1.s_bmap_nr))
141 #define set_sb_bmap_nr(sbp,v)      ((sbp)->s_v1.s_bmap_nr = cpu_to_le16(v))
142 #define sb_version(sbp)            (le16_to_cpu((sbp)->s_v1.s_version))
143 #define set_sb_version(sbp,v)      ((sbp)->s_v1.s_version = cpu_to_le16(v))
144 
145 #define sb_mnt_count(sbp)	   (le16_to_cpu((sbp)->s_mnt_count))
146 #define set_sb_mnt_count(sbp, v)   ((sbp)->s_mnt_count = cpu_to_le16(v))
147 
148 #define sb_reserved_for_journal(sbp) \
149               (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
150 #define set_sb_reserved_for_journal(sbp,v) \
151               ((sbp)->s_v1.s_reserved_for_journal = cpu_to_le16(v))
152 
153 /* LOGGING -- */
154 
155 /* These all interelate for performance.
156 **
157 ** If the journal block count is smaller than n transactions, you lose speed.
158 ** I don't know what n is yet, I'm guessing 8-16.
159 **
160 ** typical transaction size depends on the application, how often fsync is
161 ** called, and how many metadata blocks you dirty in a 30 second period.
162 ** The more small files (<16k) you use, the larger your transactions will
163 ** be.
164 **
165 ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
166 ** to wrap, which slows things down.  If you need high speed meta data updates, the journal should be big enough
167 ** to prevent wrapping before dirty meta blocks get to disk.
168 **
169 ** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal
170 ** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping.
171 **
172 ** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash.
173 **
174 */
175 
176 /* don't mess with these for a while */
177 				/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
178 #define JOURNAL_BLOCK_SIZE  4096	/* BUG gotta get rid of this */
179 #define JOURNAL_MAX_CNODE   1500	/* max cnodes to allocate. */
180 #define JOURNAL_HASH_SIZE 8192
181 #define JOURNAL_NUM_BITMAPS 5	/* number of copies of the bitmaps to have floating.  Must be >= 2 */
182 
183 /* One of these for every block in every transaction
184 ** Each one is in two hash tables.  First, a hash of the current transaction, and after journal_end, a
185 ** hash of all the in memory transactions.
186 ** next and prev are used by the current transaction (journal_hash).
187 ** hnext and hprev are used by journal_list_hash.  If a block is in more than one transaction, the journal_list_hash
188 ** links it in multiple times.  This allows flush_journal_list to remove just the cnode belonging
189 ** to a given transaction.
190 */
191 struct reiserfs_journal_cnode {
192 	struct buffer_head *bh;	/* real buffer head */
193 	struct super_block *sb;	/* dev of real buffer head */
194 	__u32 blocknr;		/* block number of real buffer head, == 0 when buffer on disk */
195 	unsigned long state;
196 	struct reiserfs_journal_list *jlist;	/* journal list this cnode lives in */
197 	struct reiserfs_journal_cnode *next;	/* next in transaction list */
198 	struct reiserfs_journal_cnode *prev;	/* prev in transaction list */
199 	struct reiserfs_journal_cnode *hprev;	/* prev in hash list */
200 	struct reiserfs_journal_cnode *hnext;	/* next in hash list */
201 };
202 
203 struct reiserfs_bitmap_node {
204 	int id;
205 	char *data;
206 	struct list_head list;
207 };
208 
209 struct reiserfs_list_bitmap {
210 	struct reiserfs_journal_list *journal_list;
211 	struct reiserfs_bitmap_node **bitmaps;
212 };
213 
214 /*
215 ** one of these for each transaction.  The most important part here is the j_realblock.
216 ** this list of cnodes is used to hash all the blocks in all the commits, to mark all the
217 ** real buffer heads dirty once all the commits hit the disk,
218 ** and to make sure every real block in a transaction is on disk before allowing the log area
219 ** to be overwritten */
220 struct reiserfs_journal_list {
221 	unsigned long j_start;
222 	unsigned long j_state;
223 	unsigned long j_len;
224 	atomic_t j_nonzerolen;
225 	atomic_t j_commit_left;
226 	atomic_t j_older_commits_done;	/* all commits older than this on disk */
227 	struct mutex j_commit_mutex;
228 	unsigned int j_trans_id;
229 	time_t j_timestamp;
230 	struct reiserfs_list_bitmap *j_list_bitmap;
231 	struct buffer_head *j_commit_bh;	/* commit buffer head */
232 	struct reiserfs_journal_cnode *j_realblock;
233 	struct reiserfs_journal_cnode *j_freedlist;	/* list of buffers that were freed during this trans.  free each of these on flush */
234 	/* time ordered list of all active transactions */
235 	struct list_head j_list;
236 
237 	/* time ordered list of all transactions we haven't tried to flush yet */
238 	struct list_head j_working_list;
239 
240 	/* list of tail conversion targets in need of flush before commit */
241 	struct list_head j_tail_bh_list;
242 	/* list of data=ordered buffers in need of flush before commit */
243 	struct list_head j_bh_list;
244 	int j_refcount;
245 };
246 
247 struct reiserfs_journal {
248 	struct buffer_head **j_ap_blocks;	/* journal blocks on disk */
249 	struct reiserfs_journal_cnode *j_last;	/* newest journal block */
250 	struct reiserfs_journal_cnode *j_first;	/*  oldest journal block.  start here for traverse */
251 
252 	struct block_device *j_dev_bd;
253 	fmode_t j_dev_mode;
254 	int j_1st_reserved_block;	/* first block on s_dev of reserved area journal */
255 
256 	unsigned long j_state;
257 	unsigned int j_trans_id;
258 	unsigned long j_mount_id;
259 	unsigned long j_start;	/* start of current waiting commit (index into j_ap_blocks) */
260 	unsigned long j_len;	/* length of current waiting commit */
261 	unsigned long j_len_alloc;	/* number of buffers requested by journal_begin() */
262 	atomic_t j_wcount;	/* count of writers for current commit */
263 	unsigned long j_bcount;	/* batch count. allows turning X transactions into 1 */
264 	unsigned long j_first_unflushed_offset;	/* first unflushed transactions offset */
265 	unsigned j_last_flush_trans_id;	/* last fully flushed journal timestamp */
266 	struct buffer_head *j_header_bh;
267 
268 	time_t j_trans_start_time;	/* time this transaction started */
269 	struct mutex j_mutex;
270 	struct mutex j_flush_mutex;
271 	wait_queue_head_t j_join_wait;	/* wait for current transaction to finish before starting new one */
272 	atomic_t j_jlock;	/* lock for j_join_wait */
273 	int j_list_bitmap_index;	/* number of next list bitmap to use */
274 	int j_must_wait;	/* no more journal begins allowed. MUST sleep on j_join_wait */
275 	int j_next_full_flush;	/* next journal_end will flush all journal list */
276 	int j_next_async_flush;	/* next journal_end will flush all async commits */
277 
278 	int j_cnode_used;	/* number of cnodes on the used list */
279 	int j_cnode_free;	/* number of cnodes on the free list */
280 
281 	unsigned int j_trans_max;	/* max number of blocks in a transaction.  */
282 	unsigned int j_max_batch;	/* max number of blocks to batch into a trans */
283 	unsigned int j_max_commit_age;	/* in seconds, how old can an async commit be */
284 	unsigned int j_max_trans_age;	/* in seconds, how old can a transaction be */
285 	unsigned int j_default_max_commit_age;	/* the default for the max commit age */
286 
287 	struct reiserfs_journal_cnode *j_cnode_free_list;
288 	struct reiserfs_journal_cnode *j_cnode_free_orig;	/* orig pointer returned from vmalloc */
289 
290 	struct reiserfs_journal_list *j_current_jl;
291 	int j_free_bitmap_nodes;
292 	int j_used_bitmap_nodes;
293 
294 	int j_num_lists;	/* total number of active transactions */
295 	int j_num_work_lists;	/* number that need attention from kreiserfsd */
296 
297 	/* debugging to make sure things are flushed in order */
298 	unsigned int j_last_flush_id;
299 
300 	/* debugging to make sure things are committed in order */
301 	unsigned int j_last_commit_id;
302 
303 	struct list_head j_bitmap_nodes;
304 	struct list_head j_dirty_buffers;
305 	spinlock_t j_dirty_buffers_lock;	/* protects j_dirty_buffers */
306 
307 	/* list of all active transactions */
308 	struct list_head j_journal_list;
309 	/* lists that haven't been touched by writeback attempts */
310 	struct list_head j_working_list;
311 
312 	struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS];	/* array of bitmaps to record the deleted blocks */
313 	struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE];	/* hash table for real buffer heads in current trans */
314 	struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE];	/* hash table for all the real buffer heads in all
315 										   the transactions */
316 	struct list_head j_prealloc_list;	/* list of inodes which have preallocated blocks */
317 	int j_persistent_trans;
318 	unsigned long j_max_trans_size;
319 	unsigned long j_max_batch_size;
320 
321 	int j_errno;
322 
323 	/* when flushing ordered buffers, throttle new ordered writers */
324 	struct delayed_work j_work;
325 	struct super_block *j_work_sb;
326 	atomic_t j_async_throttle;
327 };
328 
329 enum journal_state_bits {
330 	J_WRITERS_BLOCKED = 1,	/* set when new writers not allowed */
331 	J_WRITERS_QUEUED,	/* set when log is full due to too many writers */
332 	J_ABORTED,		/* set when log is aborted */
333 };
334 
335 #define JOURNAL_DESC_MAGIC "ReIsErLB"	/* ick.  magic string to find desc blocks in the journal */
336 
337 typedef __u32(*hashf_t) (const signed char *, int);
338 
339 struct reiserfs_bitmap_info {
340 	__u32 free_count;
341 };
342 
343 struct proc_dir_entry;
344 
345 #if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
346 typedef unsigned long int stat_cnt_t;
347 typedef struct reiserfs_proc_info_data {
348 	spinlock_t lock;
349 	int exiting;
350 	int max_hash_collisions;
351 
352 	stat_cnt_t breads;
353 	stat_cnt_t bread_miss;
354 	stat_cnt_t search_by_key;
355 	stat_cnt_t search_by_key_fs_changed;
356 	stat_cnt_t search_by_key_restarted;
357 
358 	stat_cnt_t insert_item_restarted;
359 	stat_cnt_t paste_into_item_restarted;
360 	stat_cnt_t cut_from_item_restarted;
361 	stat_cnt_t delete_solid_item_restarted;
362 	stat_cnt_t delete_item_restarted;
363 
364 	stat_cnt_t leaked_oid;
365 	stat_cnt_t leaves_removable;
366 
367 	/* balances per level. Use explicit 5 as MAX_HEIGHT is not visible yet. */
368 	stat_cnt_t balance_at[5];	/* XXX */
369 	/* sbk == search_by_key */
370 	stat_cnt_t sbk_read_at[5];	/* XXX */
371 	stat_cnt_t sbk_fs_changed[5];
372 	stat_cnt_t sbk_restarted[5];
373 	stat_cnt_t items_at[5];	/* XXX */
374 	stat_cnt_t free_at[5];	/* XXX */
375 	stat_cnt_t can_node_be_removed[5];	/* XXX */
376 	long int lnum[5];	/* XXX */
377 	long int rnum[5];	/* XXX */
378 	long int lbytes[5];	/* XXX */
379 	long int rbytes[5];	/* XXX */
380 	stat_cnt_t get_neighbors[5];
381 	stat_cnt_t get_neighbors_restart[5];
382 	stat_cnt_t need_l_neighbor[5];
383 	stat_cnt_t need_r_neighbor[5];
384 
385 	stat_cnt_t free_block;
386 	struct __scan_bitmap_stats {
387 		stat_cnt_t call;
388 		stat_cnt_t wait;
389 		stat_cnt_t bmap;
390 		stat_cnt_t retry;
391 		stat_cnt_t in_journal_hint;
392 		stat_cnt_t in_journal_nohint;
393 		stat_cnt_t stolen;
394 	} scan_bitmap;
395 	struct __journal_stats {
396 		stat_cnt_t in_journal;
397 		stat_cnt_t in_journal_bitmap;
398 		stat_cnt_t in_journal_reusable;
399 		stat_cnt_t lock_journal;
400 		stat_cnt_t lock_journal_wait;
401 		stat_cnt_t journal_being;
402 		stat_cnt_t journal_relock_writers;
403 		stat_cnt_t journal_relock_wcount;
404 		stat_cnt_t mark_dirty;
405 		stat_cnt_t mark_dirty_already;
406 		stat_cnt_t mark_dirty_notjournal;
407 		stat_cnt_t restore_prepared;
408 		stat_cnt_t prepare;
409 		stat_cnt_t prepare_retry;
410 	} journal;
411 } reiserfs_proc_info_data_t;
412 #else
413 typedef struct reiserfs_proc_info_data {
414 } reiserfs_proc_info_data_t;
415 #endif
416 
417 /* reiserfs union of in-core super block data */
418 struct reiserfs_sb_info {
419 	struct buffer_head *s_sbh;	/* Buffer containing the super block */
420 	/* both the comment and the choice of
421 	   name are unclear for s_rs -Hans */
422 	struct reiserfs_super_block *s_rs;	/* Pointer to the super block in the buffer */
423 	struct reiserfs_bitmap_info *s_ap_bitmap;
424 	struct reiserfs_journal *s_journal;	/* pointer to journal information */
425 	unsigned short s_mount_state;	/* reiserfs state (valid, invalid) */
426 
427 	/* Serialize writers access, replace the old bkl */
428 	struct mutex lock;
429 	/* Owner of the lock (can be recursive) */
430 	struct task_struct *lock_owner;
431 	/* Depth of the lock, start from -1 like the bkl */
432 	int lock_depth;
433 
434 	/* Comment? -Hans */
435 	void (*end_io_handler) (struct buffer_head *, int);
436 	hashf_t s_hash_function;	/* pointer to function which is used
437 					   to sort names in directory. Set on
438 					   mount */
439 	unsigned long s_mount_opt;	/* reiserfs's mount options are set
440 					   here (currently - NOTAIL, NOLOG,
441 					   REPLAYONLY) */
442 
443 	struct {		/* This is a structure that describes block allocator options */
444 		unsigned long bits;	/* Bitfield for enable/disable kind of options */
445 		unsigned long large_file_size;	/* size started from which we consider file to be a large one(in blocks) */
446 		int border;	/* percentage of disk, border takes */
447 		int preallocmin;	/* Minimal file size (in blocks) starting from which we do preallocations */
448 		int preallocsize;	/* Number of blocks we try to prealloc when file
449 					   reaches preallocmin size (in blocks) or
450 					   prealloc_list is empty. */
451 	} s_alloc_options;
452 
453 	/* Comment? -Hans */
454 	wait_queue_head_t s_wait;
455 	/* To be obsoleted soon by per buffer seals.. -Hans */
456 	atomic_t s_generation_counter;	// increased by one every time the
457 	// tree gets re-balanced
458 	unsigned long s_properties;	/* File system properties. Currently holds
459 					   on-disk FS format */
460 
461 	/* session statistics */
462 	int s_disk_reads;
463 	int s_disk_writes;
464 	int s_fix_nodes;
465 	int s_do_balance;
466 	int s_unneeded_left_neighbor;
467 	int s_good_search_by_key_reada;
468 	int s_bmaps;
469 	int s_bmaps_without_search;
470 	int s_direct2indirect;
471 	int s_indirect2direct;
472 	/* set up when it's ok for reiserfs_read_inode2() to read from
473 	   disk inode with nlink==0. Currently this is only used during
474 	   finish_unfinished() processing at mount time */
475 	int s_is_unlinked_ok;
476 	reiserfs_proc_info_data_t s_proc_info_data;
477 	struct proc_dir_entry *procdir;
478 	int reserved_blocks;	/* amount of blocks reserved for further allocations */
479 	spinlock_t bitmap_lock;	/* this lock on now only used to protect reserved_blocks variable */
480 	struct dentry *priv_root;	/* root of /.reiserfs_priv */
481 	struct dentry *xattr_root;	/* root of /.reiserfs_priv/xattrs */
482 	int j_errno;
483 
484 	int work_queued;              /* non-zero delayed work is queued */
485 	struct delayed_work old_work; /* old transactions flush delayed work */
486 	spinlock_t old_work_lock;     /* protects old_work and work_queued */
487 
488 #ifdef CONFIG_QUOTA
489 	char *s_qf_names[MAXQUOTAS];
490 	int s_jquota_fmt;
491 #endif
492 	char *s_jdev;		/* Stored jdev for mount option showing */
493 #ifdef CONFIG_REISERFS_CHECK
494 
495 	struct tree_balance *cur_tb;	/*
496 					 * Detects whether more than one
497 					 * copy of tb exists per superblock
498 					 * as a means of checking whether
499 					 * do_balance is executing concurrently
500 					 * against another tree reader/writer
501 					 * on a same mount point.
502 					 */
503 #endif
504 };
505 
506 /* Definitions of reiserfs on-disk properties: */
507 #define REISERFS_3_5 0
508 #define REISERFS_3_6 1
509 #define REISERFS_OLD_FORMAT 2
510 
511 enum reiserfs_mount_options {
512 /* Mount options */
513 	REISERFS_LARGETAIL,	/* large tails will be created in a session */
514 	REISERFS_SMALLTAIL,	/* small (for files less than block size) tails will be created in a session */
515 	REPLAYONLY,		/* replay journal and return 0. Use by fsck */
516 	REISERFS_CONVERT,	/* -o conv: causes conversion of old
517 				   format super block to the new
518 				   format. If not specified - old
519 				   partition will be dealt with in a
520 				   manner of 3.5.x */
521 
522 /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
523 ** reiserfs disks from 3.5.19 or earlier.  99% of the time, this option
524 ** is not required.  If the normal autodection code can't determine which
525 ** hash to use (because both hashes had the same value for a file)
526 ** use this option to force a specific hash.  It won't allow you to override
527 ** the existing hash on the FS, so if you have a tea hash disk, and mount
528 ** with -o hash=rupasov, the mount will fail.
529 */
530 	FORCE_TEA_HASH,		/* try to force tea hash on mount */
531 	FORCE_RUPASOV_HASH,	/* try to force rupasov hash on mount */
532 	FORCE_R5_HASH,		/* try to force rupasov hash on mount */
533 	FORCE_HASH_DETECT,	/* try to detect hash function on mount */
534 
535 	REISERFS_DATA_LOG,
536 	REISERFS_DATA_ORDERED,
537 	REISERFS_DATA_WRITEBACK,
538 
539 /* used for testing experimental features, makes benchmarking new
540    features with and without more convenient, should never be used by
541    users in any code shipped to users (ideally) */
542 
543 	REISERFS_NO_BORDER,
544 	REISERFS_NO_UNHASHED_RELOCATION,
545 	REISERFS_HASHED_RELOCATION,
546 	REISERFS_ATTRS,
547 	REISERFS_XATTRS_USER,
548 	REISERFS_POSIXACL,
549 	REISERFS_EXPOSE_PRIVROOT,
550 	REISERFS_BARRIER_NONE,
551 	REISERFS_BARRIER_FLUSH,
552 
553 	/* Actions on error */
554 	REISERFS_ERROR_PANIC,
555 	REISERFS_ERROR_RO,
556 	REISERFS_ERROR_CONTINUE,
557 
558 	REISERFS_USRQUOTA,	/* User quota option specified */
559 	REISERFS_GRPQUOTA,	/* Group quota option specified */
560 
561 	REISERFS_TEST1,
562 	REISERFS_TEST2,
563 	REISERFS_TEST3,
564 	REISERFS_TEST4,
565 	REISERFS_UNSUPPORTED_OPT,
566 };
567 
568 #define reiserfs_r5_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_R5_HASH))
569 #define reiserfs_rupasov_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_RUPASOV_HASH))
570 #define reiserfs_tea_hash(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_TEA_HASH))
571 #define reiserfs_hash_detect(s) (REISERFS_SB(s)->s_mount_opt & (1 << FORCE_HASH_DETECT))
572 #define reiserfs_no_border(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_BORDER))
573 #define reiserfs_no_unhashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_NO_UNHASHED_RELOCATION))
574 #define reiserfs_hashed_relocation(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_HASHED_RELOCATION))
575 #define reiserfs_test4(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TEST4))
576 
577 #define have_large_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_LARGETAIL))
578 #define have_small_tails(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_SMALLTAIL))
579 #define replay_only(s) (REISERFS_SB(s)->s_mount_opt & (1 << REPLAYONLY))
580 #define reiserfs_attrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ATTRS))
581 #define old_format_only(s) (REISERFS_SB(s)->s_properties & (1 << REISERFS_3_5))
582 #define convert_reiserfs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_CONVERT))
583 #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
584 #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
585 #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
586 #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
587 #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
588 #define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT))
589 #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
590 #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE))
591 #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH))
592 
593 #define reiserfs_error_panic(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_PANIC))
594 #define reiserfs_error_ro(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_ERROR_RO))
595 
596 void reiserfs_file_buffer(struct buffer_head *bh, int list);
597 extern struct file_system_type reiserfs_fs_type;
598 int reiserfs_resize(struct super_block *, unsigned long);
599 
600 #define CARRY_ON                0
601 #define SCHEDULE_OCCURRED       1
602 
603 #define SB_BUFFER_WITH_SB(s) (REISERFS_SB(s)->s_sbh)
604 #define SB_JOURNAL(s) (REISERFS_SB(s)->s_journal)
605 #define SB_JOURNAL_1st_RESERVED_BLOCK(s) (SB_JOURNAL(s)->j_1st_reserved_block)
606 #define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free)
607 #define SB_AP_BITMAP(s) (REISERFS_SB(s)->s_ap_bitmap)
608 
609 #define SB_DISK_JOURNAL_HEAD(s) (SB_JOURNAL(s)->j_header_bh->)
610 
611 /* A safe version of the "bdevname", which returns the "s_id" field of
612  * a superblock or else "Null superblock" if the super block is NULL.
613  */
614 static inline char *reiserfs_bdevname(struct super_block *s)
615 {
616 	return (s == NULL) ? "Null superblock" : s->s_id;
617 }
618 
619 #define reiserfs_is_journal_aborted(journal) (unlikely (__reiserfs_is_journal_aborted (journal)))
620 static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
621 						*journal)
622 {
623 	return test_bit(J_ABORTED, &journal->j_state);
624 }
625 
626 /*
627  * Locking primitives. The write lock is a per superblock
628  * special mutex that has properties close to the Big Kernel Lock
629  * which was used in the previous locking scheme.
630  */
631 void reiserfs_write_lock(struct super_block *s);
632 void reiserfs_write_unlock(struct super_block *s);
633 int reiserfs_write_lock_once(struct super_block *s);
634 void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
635 
636 #ifdef CONFIG_REISERFS_CHECK
637 void reiserfs_lock_check_recursive(struct super_block *s);
638 #else
639 static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
640 #endif
641 
642 /*
643  * Several mutexes depend on the write lock.
644  * However sometimes we want to relax the write lock while we hold
645  * these mutexes, according to the release/reacquire on schedule()
646  * properties of the Bkl that were used.
647  * Reiserfs performances and locking were based on this scheme.
648  * Now that the write lock is a mutex and not the bkl anymore, doing so
649  * may result in a deadlock:
650  *
651  * A acquire write_lock
652  * A acquire j_commit_mutex
653  * A release write_lock and wait for something
654  * B acquire write_lock
655  * B can't acquire j_commit_mutex and sleep
656  * A can't acquire write lock anymore
657  * deadlock
658  *
659  * What we do here is avoiding such deadlock by playing the same game
660  * than the Bkl: if we can't acquire a mutex that depends on the write lock,
661  * we release the write lock, wait a bit and then retry.
662  *
663  * The mutexes concerned by this hack are:
664  * - The commit mutex of a journal list
665  * - The flush mutex
666  * - The journal lock
667  * - The inode mutex
668  */
669 static inline void reiserfs_mutex_lock_safe(struct mutex *m,
670 			       struct super_block *s)
671 {
672 	reiserfs_lock_check_recursive(s);
673 	reiserfs_write_unlock(s);
674 	mutex_lock(m);
675 	reiserfs_write_lock(s);
676 }
677 
678 static inline void
679 reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
680 			       struct super_block *s)
681 {
682 	reiserfs_lock_check_recursive(s);
683 	reiserfs_write_unlock(s);
684 	mutex_lock_nested(m, subclass);
685 	reiserfs_write_lock(s);
686 }
687 
688 static inline void
689 reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
690 {
691 	reiserfs_lock_check_recursive(s);
692 	reiserfs_write_unlock(s);
693 	down_read(sem);
694 	reiserfs_write_lock(s);
695 }
696 
697 /*
698  * When we schedule, we usually want to also release the write lock,
699  * according to the previous bkl based locking scheme of reiserfs.
700  */
701 static inline void reiserfs_cond_resched(struct super_block *s)
702 {
703 	if (need_resched()) {
704 		reiserfs_write_unlock(s);
705 		schedule();
706 		reiserfs_write_lock(s);
707 	}
708 }
709 
710 struct fid;
711 
712 /* in reading the #defines, it may help to understand that they employ
713    the following abbreviations:
714 
715    B = Buffer
716    I = Item header
717    H = Height within the tree (should be changed to LEV)
718    N = Number of the item in the node
719    STAT = stat data
720    DEH = Directory Entry Header
721    EC = Entry Count
722    E = Entry number
723    UL = Unsigned Long
724    BLKH = BLocK Header
725    UNFM = UNForMatted node
726    DC = Disk Child
727    P = Path
728 
729    These #defines are named by concatenating these abbreviations,
730    where first comes the arguments, and last comes the return value,
731    of the macro.
732 
733 */
734 
735 #define USE_INODE_GENERATION_COUNTER
736 
737 #define REISERFS_PREALLOCATE
738 #define DISPLACE_NEW_PACKING_LOCALITIES
739 #define PREALLOCATION_SIZE 9
740 
741 /* n must be power of 2 */
742 #define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
743 
744 // to be ok for alpha and others we have to align structures to 8 byte
745 // boundary.
746 // FIXME: do not change 4 by anything else: there is code which relies on that
747 #define ROUND_UP(x) _ROUND_UP(x,8LL)
748 
749 /* debug levels.  Right now, CONFIG_REISERFS_CHECK means print all debug
750 ** messages.
751 */
752 #define REISERFS_DEBUG_CODE 5	/* extra messages to help find/debug errors */
753 
754 void __reiserfs_warning(struct super_block *s, const char *id,
755 			 const char *func, const char *fmt, ...);
756 #define reiserfs_warning(s, id, fmt, args...) \
757 	 __reiserfs_warning(s, id, __func__, fmt, ##args)
758 /* assertions handling */
759 
760 /** always check a condition and panic if it's false. */
761 #define __RASSERT(cond, scond, format, args...)			\
762 do {									\
763 	if (!(cond))							\
764 		reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
765 			       __FILE__ ":%i:%s: " format "\n",		\
766 			       in_interrupt() ? -1 : task_pid_nr(current), \
767 			       __LINE__, __func__ , ##args);		\
768 } while (0)
769 
770 #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
771 
772 #if defined( CONFIG_REISERFS_CHECK )
773 #define RFALSE(cond, format, args...) __RASSERT(!(cond), "!(" #cond ")", format, ##args)
774 #else
775 #define RFALSE( cond, format, args... ) do {;} while( 0 )
776 #endif
777 
778 #define CONSTF __attribute_const__
779 /*
780  * Disk Data Structures
781  */
782 
783 /***************************************************************************/
784 /*                             SUPER BLOCK                                 */
785 /***************************************************************************/
786 
787 /*
788  * Structure of super block on disk, a version of which in RAM is often accessed as REISERFS_SB(s)->s_rs
789  * the version in RAM is part of a larger structure containing fields never written to disk.
790  */
791 #define UNSET_HASH 0		// read_super will guess about, what hash names
792 		     // in directories were sorted with
793 #define TEA_HASH  1
794 #define YURA_HASH 2
795 #define R5_HASH   3
796 #define DEFAULT_HASH R5_HASH
797 
798 struct journal_params {
799 	__le32 jp_journal_1st_block;	/* where does journal start from on its
800 					 * device */
801 	__le32 jp_journal_dev;	/* journal device st_rdev */
802 	__le32 jp_journal_size;	/* size of the journal */
803 	__le32 jp_journal_trans_max;	/* max number of blocks in a transaction. */
804 	__le32 jp_journal_magic;	/* random value made on fs creation (this
805 					 * was sb_journal_block_count) */
806 	__le32 jp_journal_max_batch;	/* max number of blocks to batch into a
807 					 * trans */
808 	__le32 jp_journal_max_commit_age;	/* in seconds, how old can an async
809 						 * commit be */
810 	__le32 jp_journal_max_trans_age;	/* in seconds, how old can a transaction
811 						 * be */
812 };
813 
814 /* this is the super from 3.5.X, where X >= 10 */
815 struct reiserfs_super_block_v1 {
816 	__le32 s_block_count;	/* blocks count         */
817 	__le32 s_free_blocks;	/* free blocks count    */
818 	__le32 s_root_block;	/* root block number    */
819 	struct journal_params s_journal;
820 	__le16 s_blocksize;	/* block size */
821 	__le16 s_oid_maxsize;	/* max size of object id array, see
822 				 * get_objectid() commentary  */
823 	__le16 s_oid_cursize;	/* current size of object id array */
824 	__le16 s_umount_state;	/* this is set to 1 when filesystem was
825 				 * umounted, to 2 - when not */
826 	char s_magic[10];	/* reiserfs magic string indicates that
827 				 * file system is reiserfs:
828 				 * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
829 	__le16 s_fs_state;	/* it is set to used by fsck to mark which
830 				 * phase of rebuilding is done */
831 	__le32 s_hash_function_code;	/* indicate, what hash function is being use
832 					 * to sort names in a directory*/
833 	__le16 s_tree_height;	/* height of disk tree */
834 	__le16 s_bmap_nr;	/* amount of bitmap blocks needed to address
835 				 * each block of file system */
836 	__le16 s_version;	/* this field is only reliable on filesystem
837 				 * with non-standard journal */
838 	__le16 s_reserved_for_journal;	/* size in blocks of journal area on main
839 					 * device, we need to keep after
840 					 * making fs with non-standard journal */
841 } __attribute__ ((__packed__));
842 
843 #define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
844 
845 /* this is the on disk super block */
846 struct reiserfs_super_block {
847 	struct reiserfs_super_block_v1 s_v1;
848 	__le32 s_inode_generation;
849 	__le32 s_flags;		/* Right now used only by inode-attributes, if enabled */
850 	unsigned char s_uuid[16];	/* filesystem unique identifier */
851 	unsigned char s_label[16];	/* filesystem volume label */
852 	__le16 s_mnt_count;		/* Count of mounts since last fsck */
853 	__le16 s_max_mnt_count;		/* Maximum mounts before check */
854 	__le32 s_lastcheck;		/* Timestamp of last fsck */
855 	__le32 s_check_interval;	/* Interval between checks */
856 	char s_unused[76];	/* zero filled by mkreiserfs and
857 				 * reiserfs_convert_objectid_map_v1()
858 				 * so any additions must be updated
859 				 * there as well. */
860 } __attribute__ ((__packed__));
861 
862 #define SB_SIZE (sizeof(struct reiserfs_super_block))
863 
864 #define REISERFS_VERSION_1 0
865 #define REISERFS_VERSION_2 2
866 
867 // on-disk super block fields converted to cpu form
868 #define SB_DISK_SUPER_BLOCK(s) (REISERFS_SB(s)->s_rs)
869 #define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
870 #define SB_BLOCKSIZE(s) \
871         le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_blocksize))
872 #define SB_BLOCK_COUNT(s) \
873         le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_block_count))
874 #define SB_FREE_BLOCKS(s) \
875         le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks))
876 #define SB_REISERFS_MAGIC(s) \
877         (SB_V1_DISK_SUPER_BLOCK(s)->s_magic)
878 #define SB_ROOT_BLOCK(s) \
879         le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_root_block))
880 #define SB_TREE_HEIGHT(s) \
881         le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height))
882 #define SB_REISERFS_STATE(s) \
883         le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state))
884 #define SB_VERSION(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_version))
885 #define SB_BMAP_NR(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr))
886 
887 #define PUT_SB_BLOCK_COUNT(s, val) \
888    do { SB_V1_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0)
889 #define PUT_SB_FREE_BLOCKS(s, val) \
890    do { SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0)
891 #define PUT_SB_ROOT_BLOCK(s, val) \
892    do { SB_V1_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0)
893 #define PUT_SB_TREE_HEIGHT(s, val) \
894    do { SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0)
895 #define PUT_SB_REISERFS_STATE(s, val) \
896    do { SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state = cpu_to_le16(val); } while (0)
897 #define PUT_SB_VERSION(s, val) \
898    do { SB_V1_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0)
899 #define PUT_SB_BMAP_NR(s, val) \
900    do { SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0)
901 
902 #define SB_ONDISK_JP(s) (&SB_V1_DISK_SUPER_BLOCK(s)->s_journal)
903 #define SB_ONDISK_JOURNAL_SIZE(s) \
904          le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_size))
905 #define SB_ONDISK_JOURNAL_1st_BLOCK(s) \
906          le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_1st_block))
907 #define SB_ONDISK_JOURNAL_DEVICE(s) \
908          le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev))
909 #define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \
910          le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal))
911 
912 #define is_block_in_log_or_reserved_area(s, block) \
913          block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \
914          && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) +  \
915          ((!is_reiserfs_jr(SB_DISK_SUPER_BLOCK(s)) ? \
916          SB_ONDISK_JOURNAL_SIZE(s) + 1 : SB_ONDISK_RESERVED_FOR_JOURNAL(s)))
917 
918 int is_reiserfs_3_5(struct reiserfs_super_block *rs);
919 int is_reiserfs_3_6(struct reiserfs_super_block *rs);
920 int is_reiserfs_jr(struct reiserfs_super_block *rs);
921 
922 /* ReiserFS leaves the first 64k unused, so that partition labels have
923    enough space.  If someone wants to write a fancy bootloader that
924    needs more than 64k, let us know, and this will be increased in size.
925    This number must be larger than than the largest block size on any
926    platform, or code will break.  -Hans */
927 #define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
928 #define REISERFS_FIRST_BLOCK unused_define
929 #define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
930 
931 /* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */
932 #define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
933 
934 /* reiserfs internal error code (used by search_by_key and fix_nodes)) */
935 #define CARRY_ON      0
936 #define REPEAT_SEARCH -1
937 #define IO_ERROR      -2
938 #define NO_DISK_SPACE -3
939 #define NO_BALANCING_NEEDED  (-4)
940 #define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5)
941 #define QUOTA_EXCEEDED -6
942 
943 typedef __u32 b_blocknr_t;
944 typedef __le32 unp_t;
945 
946 struct unfm_nodeinfo {
947 	unp_t unfm_nodenum;
948 	unsigned short unfm_freespace;
949 };
950 
951 /* there are two formats of keys: 3.5 and 3.6
952  */
953 #define KEY_FORMAT_3_5 0
954 #define KEY_FORMAT_3_6 1
955 
956 /* there are two stat datas */
957 #define STAT_DATA_V1 0
958 #define STAT_DATA_V2 1
959 
960 static inline struct reiserfs_inode_info *REISERFS_I(const struct inode *inode)
961 {
962 	return container_of(inode, struct reiserfs_inode_info, vfs_inode);
963 }
964 
965 static inline struct reiserfs_sb_info *REISERFS_SB(const struct super_block *sb)
966 {
967 	return sb->s_fs_info;
968 }
969 
970 /* Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
971  * which overflows on large file systems. */
972 static inline __u32 reiserfs_bmap_count(struct super_block *sb)
973 {
974 	return (SB_BLOCK_COUNT(sb) - 1) / (sb->s_blocksize * 8) + 1;
975 }
976 
977 static inline int bmap_would_wrap(unsigned bmap_nr)
978 {
979 	return bmap_nr > ((1LL << 16) - 1);
980 }
981 
982 /** this says about version of key of all items (but stat data) the
983     object consists of */
984 #define get_inode_item_key_version( inode )                                    \
985     ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
986 
987 #define set_inode_item_key_version( inode, version )                           \
988          ({ if((version)==KEY_FORMAT_3_6)                                      \
989                 REISERFS_I(inode)->i_flags |= i_item_key_version_mask;      \
990             else                                                               \
991                 REISERFS_I(inode)->i_flags &= ~i_item_key_version_mask; })
992 
993 #define get_inode_sd_version(inode)                                            \
994     ((REISERFS_I(inode)->i_flags & i_stat_data_version_mask) ? STAT_DATA_V2 : STAT_DATA_V1)
995 
996 #define set_inode_sd_version(inode, version)                                   \
997          ({ if((version)==STAT_DATA_V2)                                        \
998                 REISERFS_I(inode)->i_flags |= i_stat_data_version_mask;     \
999             else                                                               \
1000                 REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
1001 
1002 /* This is an aggressive tail suppression policy, I am hoping it
1003    improves our benchmarks. The principle behind it is that percentage
1004    space saving is what matters, not absolute space saving.  This is
1005    non-intuitive, but it helps to understand it if you consider that the
1006    cost to access 4 blocks is not much more than the cost to access 1
1007    block, if you have to do a seek and rotate.  A tail risks a
1008    non-linear disk access that is significant as a percentage of total
1009    time cost for a 4 block file and saves an amount of space that is
1010    less significant as a percentage of space, or so goes the hypothesis.
1011    -Hans */
1012 #define STORE_TAIL_IN_UNFM_S1(n_file_size,n_tail_size,n_block_size) \
1013 (\
1014   (!(n_tail_size)) || \
1015   (((n_tail_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) || \
1016    ( (n_file_size) >= (n_block_size) * 4 ) || \
1017    ( ( (n_file_size) >= (n_block_size) * 3 ) && \
1018      ( (n_tail_size) >=   (MAX_DIRECT_ITEM_LEN(n_block_size))/4) ) || \
1019    ( ( (n_file_size) >= (n_block_size) * 2 ) && \
1020      ( (n_tail_size) >=   (MAX_DIRECT_ITEM_LEN(n_block_size))/2) ) || \
1021    ( ( (n_file_size) >= (n_block_size) ) && \
1022      ( (n_tail_size) >=   (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
1023 )
1024 
1025 /* Another strategy for tails, this one means only create a tail if all the
1026    file would fit into one DIRECT item.
1027    Primary intention for this one is to increase performance by decreasing
1028    seeking.
1029 */
1030 #define STORE_TAIL_IN_UNFM_S2(n_file_size,n_tail_size,n_block_size) \
1031 (\
1032   (!(n_tail_size)) || \
1033   (((n_file_size) > MAX_DIRECT_ITEM_LEN(n_block_size)) ) \
1034 )
1035 
1036 /*
1037  * values for s_umount_state field
1038  */
1039 #define REISERFS_VALID_FS    1
1040 #define REISERFS_ERROR_FS    2
1041 
1042 //
1043 // there are 5 item types currently
1044 //
1045 #define TYPE_STAT_DATA 0
1046 #define TYPE_INDIRECT 1
1047 #define TYPE_DIRECT 2
1048 #define TYPE_DIRENTRY 3
1049 #define TYPE_MAXTYPE 3
1050 #define TYPE_ANY 15		// FIXME: comment is required
1051 
1052 /***************************************************************************/
1053 /*                       KEY & ITEM HEAD                                   */
1054 /***************************************************************************/
1055 
1056 //
1057 // directories use this key as well as old files
1058 //
1059 struct offset_v1 {
1060 	__le32 k_offset;
1061 	__le32 k_uniqueness;
1062 } __attribute__ ((__packed__));
1063 
1064 struct offset_v2 {
1065 	__le64 v;
1066 } __attribute__ ((__packed__));
1067 
1068 static inline __u16 offset_v2_k_type(const struct offset_v2 *v2)
1069 {
1070 	__u8 type = le64_to_cpu(v2->v) >> 60;
1071 	return (type <= TYPE_MAXTYPE) ? type : TYPE_ANY;
1072 }
1073 
1074 static inline void set_offset_v2_k_type(struct offset_v2 *v2, int type)
1075 {
1076 	v2->v =
1077 	    (v2->v & cpu_to_le64(~0ULL >> 4)) | cpu_to_le64((__u64) type << 60);
1078 }
1079 
1080 static inline loff_t offset_v2_k_offset(const struct offset_v2 *v2)
1081 {
1082 	return le64_to_cpu(v2->v) & (~0ULL >> 4);
1083 }
1084 
1085 static inline void set_offset_v2_k_offset(struct offset_v2 *v2, loff_t offset)
1086 {
1087 	offset &= (~0ULL >> 4);
1088 	v2->v = (v2->v & cpu_to_le64(15ULL << 60)) | cpu_to_le64(offset);
1089 }
1090 
1091 /* Key of an item determines its location in the S+tree, and
1092    is composed of 4 components */
1093 struct reiserfs_key {
1094 	__le32 k_dir_id;	/* packing locality: by default parent
1095 				   directory object id */
1096 	__le32 k_objectid;	/* object identifier */
1097 	union {
1098 		struct offset_v1 k_offset_v1;
1099 		struct offset_v2 k_offset_v2;
1100 	} __attribute__ ((__packed__)) u;
1101 } __attribute__ ((__packed__));
1102 
1103 struct in_core_key {
1104 	__u32 k_dir_id;		/* packing locality: by default parent
1105 				   directory object id */
1106 	__u32 k_objectid;	/* object identifier */
1107 	__u64 k_offset;
1108 	__u8 k_type;
1109 };
1110 
1111 struct cpu_key {
1112 	struct in_core_key on_disk_key;
1113 	int version;
1114 	int key_length;		/* 3 in all cases but direct2indirect and
1115 				   indirect2direct conversion */
1116 };
1117 
1118 /* Our function for comparing keys can compare keys of different
1119    lengths.  It takes as a parameter the length of the keys it is to
1120    compare.  These defines are used in determining what is to be passed
1121    to it as that parameter. */
1122 #define REISERFS_FULL_KEY_LEN     4
1123 #define REISERFS_SHORT_KEY_LEN    2
1124 
1125 /* The result of the key compare */
1126 #define FIRST_GREATER 1
1127 #define SECOND_GREATER -1
1128 #define KEYS_IDENTICAL 0
1129 #define KEY_FOUND 1
1130 #define KEY_NOT_FOUND 0
1131 
1132 #define KEY_SIZE (sizeof(struct reiserfs_key))
1133 #define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
1134 
1135 /* return values for search_by_key and clones */
1136 #define ITEM_FOUND 1
1137 #define ITEM_NOT_FOUND 0
1138 #define ENTRY_FOUND 1
1139 #define ENTRY_NOT_FOUND 0
1140 #define DIRECTORY_NOT_FOUND -1
1141 #define REGULAR_FILE_FOUND -2
1142 #define DIRECTORY_FOUND -3
1143 #define BYTE_FOUND 1
1144 #define BYTE_NOT_FOUND 0
1145 #define FILE_NOT_FOUND -1
1146 
1147 #define POSITION_FOUND 1
1148 #define POSITION_NOT_FOUND 0
1149 
1150 // return values for reiserfs_find_entry and search_by_entry_key
1151 #define NAME_FOUND 1
1152 #define NAME_NOT_FOUND 0
1153 #define GOTO_PREVIOUS_ITEM 2
1154 #define NAME_FOUND_INVISIBLE 3
1155 
1156 /*  Everything in the filesystem is stored as a set of items.  The
1157     item head contains the key of the item, its free space (for
1158     indirect items) and specifies the location of the item itself
1159     within the block.  */
1160 
1161 struct item_head {
1162 	/* Everything in the tree is found by searching for it based on
1163 	 * its key.*/
1164 	struct reiserfs_key ih_key;
1165 	union {
1166 		/* The free space in the last unformatted node of an
1167 		   indirect item if this is an indirect item.  This
1168 		   equals 0xFFFF iff this is a direct item or stat data
1169 		   item. Note that the key, not this field, is used to
1170 		   determine the item type, and thus which field this
1171 		   union contains. */
1172 		__le16 ih_free_space_reserved;
1173 		/* Iff this is a directory item, this field equals the
1174 		   number of directory entries in the directory item. */
1175 		__le16 ih_entry_count;
1176 	} __attribute__ ((__packed__)) u;
1177 	__le16 ih_item_len;	/* total size of the item body */
1178 	__le16 ih_item_location;	/* an offset to the item body
1179 					 * within the block */
1180 	__le16 ih_version;	/* 0 for all old items, 2 for new
1181 				   ones. Highest bit is set by fsck
1182 				   temporary, cleaned after all
1183 				   done */
1184 } __attribute__ ((__packed__));
1185 /* size of item header     */
1186 #define IH_SIZE (sizeof(struct item_head))
1187 
1188 #define ih_free_space(ih)            le16_to_cpu((ih)->u.ih_free_space_reserved)
1189 #define ih_version(ih)               le16_to_cpu((ih)->ih_version)
1190 #define ih_entry_count(ih)           le16_to_cpu((ih)->u.ih_entry_count)
1191 #define ih_location(ih)              le16_to_cpu((ih)->ih_item_location)
1192 #define ih_item_len(ih)              le16_to_cpu((ih)->ih_item_len)
1193 
1194 #define put_ih_free_space(ih, val)   do { (ih)->u.ih_free_space_reserved = cpu_to_le16(val); } while(0)
1195 #define put_ih_version(ih, val)      do { (ih)->ih_version = cpu_to_le16(val); } while (0)
1196 #define put_ih_entry_count(ih, val)  do { (ih)->u.ih_entry_count = cpu_to_le16(val); } while (0)
1197 #define put_ih_location(ih, val)     do { (ih)->ih_item_location = cpu_to_le16(val); } while (0)
1198 #define put_ih_item_len(ih, val)     do { (ih)->ih_item_len = cpu_to_le16(val); } while (0)
1199 
1200 #define unreachable_item(ih) (ih_version(ih) & (1 << 15))
1201 
1202 #define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
1203 #define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
1204 
1205 /* these operate on indirect items, where you've got an array of ints
1206 ** at a possibly unaligned location.  These are a noop on ia32
1207 **
1208 ** p is the array of __u32, i is the index into the array, v is the value
1209 ** to store there.
1210 */
1211 #define get_block_num(p, i) get_unaligned_le32((p) + (i))
1212 #define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
1213 
1214 //
1215 // in old version uniqueness field shows key type
1216 //
1217 #define V1_SD_UNIQUENESS 0
1218 #define V1_INDIRECT_UNIQUENESS 0xfffffffe
1219 #define V1_DIRECT_UNIQUENESS 0xffffffff
1220 #define V1_DIRENTRY_UNIQUENESS 500
1221 #define V1_ANY_UNIQUENESS 555	// FIXME: comment is required
1222 
1223 //
1224 // here are conversion routines
1225 //
1226 static inline int uniqueness2type(__u32 uniqueness) CONSTF;
1227 static inline int uniqueness2type(__u32 uniqueness)
1228 {
1229 	switch ((int)uniqueness) {
1230 	case V1_SD_UNIQUENESS:
1231 		return TYPE_STAT_DATA;
1232 	case V1_INDIRECT_UNIQUENESS:
1233 		return TYPE_INDIRECT;
1234 	case V1_DIRECT_UNIQUENESS:
1235 		return TYPE_DIRECT;
1236 	case V1_DIRENTRY_UNIQUENESS:
1237 		return TYPE_DIRENTRY;
1238 	case V1_ANY_UNIQUENESS:
1239 	default:
1240 		return TYPE_ANY;
1241 	}
1242 }
1243 
1244 static inline __u32 type2uniqueness(int type) CONSTF;
1245 static inline __u32 type2uniqueness(int type)
1246 {
1247 	switch (type) {
1248 	case TYPE_STAT_DATA:
1249 		return V1_SD_UNIQUENESS;
1250 	case TYPE_INDIRECT:
1251 		return V1_INDIRECT_UNIQUENESS;
1252 	case TYPE_DIRECT:
1253 		return V1_DIRECT_UNIQUENESS;
1254 	case TYPE_DIRENTRY:
1255 		return V1_DIRENTRY_UNIQUENESS;
1256 	case TYPE_ANY:
1257 	default:
1258 		return V1_ANY_UNIQUENESS;
1259 	}
1260 }
1261 
1262 //
1263 // key is pointer to on disk key which is stored in le, result is cpu,
1264 // there is no way to get version of object from key, so, provide
1265 // version to these defines
1266 //
1267 static inline loff_t le_key_k_offset(int version,
1268 				     const struct reiserfs_key *key)
1269 {
1270 	return (version == KEY_FORMAT_3_5) ?
1271 	    le32_to_cpu(key->u.k_offset_v1.k_offset) :
1272 	    offset_v2_k_offset(&(key->u.k_offset_v2));
1273 }
1274 
1275 static inline loff_t le_ih_k_offset(const struct item_head *ih)
1276 {
1277 	return le_key_k_offset(ih_version(ih), &(ih->ih_key));
1278 }
1279 
1280 static inline loff_t le_key_k_type(int version, const struct reiserfs_key *key)
1281 {
1282 	return (version == KEY_FORMAT_3_5) ?
1283 	    uniqueness2type(le32_to_cpu(key->u.k_offset_v1.k_uniqueness)) :
1284 	    offset_v2_k_type(&(key->u.k_offset_v2));
1285 }
1286 
1287 static inline loff_t le_ih_k_type(const struct item_head *ih)
1288 {
1289 	return le_key_k_type(ih_version(ih), &(ih->ih_key));
1290 }
1291 
1292 static inline void set_le_key_k_offset(int version, struct reiserfs_key *key,
1293 				       loff_t offset)
1294 {
1295 	(version == KEY_FORMAT_3_5) ? (void)(key->u.k_offset_v1.k_offset = cpu_to_le32(offset)) :	/* jdm check */
1296 	    (void)(set_offset_v2_k_offset(&(key->u.k_offset_v2), offset));
1297 }
1298 
1299 static inline void set_le_ih_k_offset(struct item_head *ih, loff_t offset)
1300 {
1301 	set_le_key_k_offset(ih_version(ih), &(ih->ih_key), offset);
1302 }
1303 
1304 static inline void set_le_key_k_type(int version, struct reiserfs_key *key,
1305 				     int type)
1306 {
1307 	(version == KEY_FORMAT_3_5) ?
1308 	    (void)(key->u.k_offset_v1.k_uniqueness =
1309 		   cpu_to_le32(type2uniqueness(type)))
1310 	    : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type));
1311 }
1312 
1313 static inline void set_le_ih_k_type(struct item_head *ih, int type)
1314 {
1315 	set_le_key_k_type(ih_version(ih), &(ih->ih_key), type);
1316 }
1317 
1318 static inline int is_direntry_le_key(int version, struct reiserfs_key *key)
1319 {
1320 	return le_key_k_type(version, key) == TYPE_DIRENTRY;
1321 }
1322 
1323 static inline int is_direct_le_key(int version, struct reiserfs_key *key)
1324 {
1325 	return le_key_k_type(version, key) == TYPE_DIRECT;
1326 }
1327 
1328 static inline int is_indirect_le_key(int version, struct reiserfs_key *key)
1329 {
1330 	return le_key_k_type(version, key) == TYPE_INDIRECT;
1331 }
1332 
1333 static inline int is_statdata_le_key(int version, struct reiserfs_key *key)
1334 {
1335 	return le_key_k_type(version, key) == TYPE_STAT_DATA;
1336 }
1337 
1338 //
1339 // item header has version.
1340 //
1341 static inline int is_direntry_le_ih(struct item_head *ih)
1342 {
1343 	return is_direntry_le_key(ih_version(ih), &ih->ih_key);
1344 }
1345 
1346 static inline int is_direct_le_ih(struct item_head *ih)
1347 {
1348 	return is_direct_le_key(ih_version(ih), &ih->ih_key);
1349 }
1350 
1351 static inline int is_indirect_le_ih(struct item_head *ih)
1352 {
1353 	return is_indirect_le_key(ih_version(ih), &ih->ih_key);
1354 }
1355 
1356 static inline int is_statdata_le_ih(struct item_head *ih)
1357 {
1358 	return is_statdata_le_key(ih_version(ih), &ih->ih_key);
1359 }
1360 
1361 //
1362 // key is pointer to cpu key, result is cpu
1363 //
1364 static inline loff_t cpu_key_k_offset(const struct cpu_key *key)
1365 {
1366 	return key->on_disk_key.k_offset;
1367 }
1368 
1369 static inline loff_t cpu_key_k_type(const struct cpu_key *key)
1370 {
1371 	return key->on_disk_key.k_type;
1372 }
1373 
1374 static inline void set_cpu_key_k_offset(struct cpu_key *key, loff_t offset)
1375 {
1376 	key->on_disk_key.k_offset = offset;
1377 }
1378 
1379 static inline void set_cpu_key_k_type(struct cpu_key *key, int type)
1380 {
1381 	key->on_disk_key.k_type = type;
1382 }
1383 
1384 static inline void cpu_key_k_offset_dec(struct cpu_key *key)
1385 {
1386 	key->on_disk_key.k_offset--;
1387 }
1388 
1389 #define is_direntry_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRENTRY)
1390 #define is_direct_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRECT)
1391 #define is_indirect_cpu_key(key) (cpu_key_k_type (key) == TYPE_INDIRECT)
1392 #define is_statdata_cpu_key(key) (cpu_key_k_type (key) == TYPE_STAT_DATA)
1393 
1394 /* are these used ? */
1395 #define is_direntry_cpu_ih(ih) (is_direntry_cpu_key (&((ih)->ih_key)))
1396 #define is_direct_cpu_ih(ih) (is_direct_cpu_key (&((ih)->ih_key)))
1397 #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
1398 #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
1399 
1400 #define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \
1401     (!COMP_SHORT_KEYS(ih, key) && \
1402 	  I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize))
1403 
1404 /* maximal length of item */
1405 #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
1406 #define MIN_ITEM_LEN 1
1407 
1408 /* object identifier for root dir */
1409 #define REISERFS_ROOT_OBJECTID 2
1410 #define REISERFS_ROOT_PARENT_OBJECTID 1
1411 
1412 extern struct reiserfs_key root_key;
1413 
1414 /*
1415  * Picture represents a leaf of the S+tree
1416  *  ______________________________________________________
1417  * |      |  Array of     |                   |           |
1418  * |Block |  Object-Item  |      F r e e      |  Objects- |
1419  * | head |  Headers      |     S p a c e     |   Items   |
1420  * |______|_______________|___________________|___________|
1421  */
1422 
1423 /* Header of a disk block.  More precisely, header of a formatted leaf
1424    or internal node, and not the header of an unformatted node. */
1425 struct block_head {
1426 	__le16 blk_level;	/* Level of a block in the tree. */
1427 	__le16 blk_nr_item;	/* Number of keys/items in a block. */
1428 	__le16 blk_free_space;	/* Block free space in bytes. */
1429 	__le16 blk_reserved;
1430 	/* dump this in v4/planA */
1431 	struct reiserfs_key blk_right_delim_key;	/* kept only for compatibility */
1432 };
1433 
1434 #define BLKH_SIZE                     (sizeof(struct block_head))
1435 #define blkh_level(p_blkh)            (le16_to_cpu((p_blkh)->blk_level))
1436 #define blkh_nr_item(p_blkh)          (le16_to_cpu((p_blkh)->blk_nr_item))
1437 #define blkh_free_space(p_blkh)       (le16_to_cpu((p_blkh)->blk_free_space))
1438 #define blkh_reserved(p_blkh)         (le16_to_cpu((p_blkh)->blk_reserved))
1439 #define set_blkh_level(p_blkh,val)    ((p_blkh)->blk_level = cpu_to_le16(val))
1440 #define set_blkh_nr_item(p_blkh,val)  ((p_blkh)->blk_nr_item = cpu_to_le16(val))
1441 #define set_blkh_free_space(p_blkh,val) ((p_blkh)->blk_free_space = cpu_to_le16(val))
1442 #define set_blkh_reserved(p_blkh,val) ((p_blkh)->blk_reserved = cpu_to_le16(val))
1443 #define blkh_right_delim_key(p_blkh)  ((p_blkh)->blk_right_delim_key)
1444 #define set_blkh_right_delim_key(p_blkh,val)  ((p_blkh)->blk_right_delim_key = val)
1445 
1446 /*
1447  * values for blk_level field of the struct block_head
1448  */
1449 
1450 #define FREE_LEVEL 0		/* when node gets removed from the tree its
1451 				   blk_level is set to FREE_LEVEL. It is then
1452 				   used to see whether the node is still in the
1453 				   tree */
1454 
1455 #define DISK_LEAF_NODE_LEVEL  1	/* Leaf node level. */
1456 
1457 /* Given the buffer head of a formatted node, resolve to the block head of that node. */
1458 #define B_BLK_HEAD(bh)			((struct block_head *)((bh)->b_data))
1459 /* Number of items that are in buffer. */
1460 #define B_NR_ITEMS(bh)			(blkh_nr_item(B_BLK_HEAD(bh)))
1461 #define B_LEVEL(bh)			(blkh_level(B_BLK_HEAD(bh)))
1462 #define B_FREE_SPACE(bh)		(blkh_free_space(B_BLK_HEAD(bh)))
1463 
1464 #define PUT_B_NR_ITEMS(bh, val)		do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0)
1465 #define PUT_B_LEVEL(bh, val)		do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0)
1466 #define PUT_B_FREE_SPACE(bh, val)	do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0)
1467 
1468 /* Get right delimiting key. -- little endian */
1469 #define B_PRIGHT_DELIM_KEY(bh)		(&(blk_right_delim_key(B_BLK_HEAD(bh))))
1470 
1471 /* Does the buffer contain a disk leaf. */
1472 #define B_IS_ITEMS_LEVEL(bh)		(B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL)
1473 
1474 /* Does the buffer contain a disk internal node */
1475 #define B_IS_KEYS_LEVEL(bh)      (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
1476 					    && B_LEVEL(bh) <= MAX_HEIGHT)
1477 
1478 /***************************************************************************/
1479 /*                             STAT DATA                                   */
1480 /***************************************************************************/
1481 
1482 //
1483 // old stat data is 32 bytes long. We are going to distinguish new one by
1484 // different size
1485 //
1486 struct stat_data_v1 {
1487 	__le16 sd_mode;		/* file type, permissions */
1488 	__le16 sd_nlink;	/* number of hard links */
1489 	__le16 sd_uid;		/* owner */
1490 	__le16 sd_gid;		/* group */
1491 	__le32 sd_size;		/* file size */
1492 	__le32 sd_atime;	/* time of last access */
1493 	__le32 sd_mtime;	/* time file was last modified  */
1494 	__le32 sd_ctime;	/* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
1495 	union {
1496 		__le32 sd_rdev;
1497 		__le32 sd_blocks;	/* number of blocks file uses */
1498 	} __attribute__ ((__packed__)) u;
1499 	__le32 sd_first_direct_byte;	/* first byte of file which is stored
1500 					   in a direct item: except that if it
1501 					   equals 1 it is a symlink and if it
1502 					   equals ~(__u32)0 there is no
1503 					   direct item.  The existence of this
1504 					   field really grates on me. Let's
1505 					   replace it with a macro based on
1506 					   sd_size and our tail suppression
1507 					   policy.  Someday.  -Hans */
1508 } __attribute__ ((__packed__));
1509 
1510 #define SD_V1_SIZE              (sizeof(struct stat_data_v1))
1511 #define stat_data_v1(ih)        (ih_version (ih) == KEY_FORMAT_3_5)
1512 #define sd_v1_mode(sdp)         (le16_to_cpu((sdp)->sd_mode))
1513 #define set_sd_v1_mode(sdp,v)   ((sdp)->sd_mode = cpu_to_le16(v))
1514 #define sd_v1_nlink(sdp)        (le16_to_cpu((sdp)->sd_nlink))
1515 #define set_sd_v1_nlink(sdp,v)  ((sdp)->sd_nlink = cpu_to_le16(v))
1516 #define sd_v1_uid(sdp)          (le16_to_cpu((sdp)->sd_uid))
1517 #define set_sd_v1_uid(sdp,v)    ((sdp)->sd_uid = cpu_to_le16(v))
1518 #define sd_v1_gid(sdp)          (le16_to_cpu((sdp)->sd_gid))
1519 #define set_sd_v1_gid(sdp,v)    ((sdp)->sd_gid = cpu_to_le16(v))
1520 #define sd_v1_size(sdp)         (le32_to_cpu((sdp)->sd_size))
1521 #define set_sd_v1_size(sdp,v)   ((sdp)->sd_size = cpu_to_le32(v))
1522 #define sd_v1_atime(sdp)        (le32_to_cpu((sdp)->sd_atime))
1523 #define set_sd_v1_atime(sdp,v)  ((sdp)->sd_atime = cpu_to_le32(v))
1524 #define sd_v1_mtime(sdp)        (le32_to_cpu((sdp)->sd_mtime))
1525 #define set_sd_v1_mtime(sdp,v)  ((sdp)->sd_mtime = cpu_to_le32(v))
1526 #define sd_v1_ctime(sdp)        (le32_to_cpu((sdp)->sd_ctime))
1527 #define set_sd_v1_ctime(sdp,v)  ((sdp)->sd_ctime = cpu_to_le32(v))
1528 #define sd_v1_rdev(sdp)         (le32_to_cpu((sdp)->u.sd_rdev))
1529 #define set_sd_v1_rdev(sdp,v)   ((sdp)->u.sd_rdev = cpu_to_le32(v))
1530 #define sd_v1_blocks(sdp)       (le32_to_cpu((sdp)->u.sd_blocks))
1531 #define set_sd_v1_blocks(sdp,v) ((sdp)->u.sd_blocks = cpu_to_le32(v))
1532 #define sd_v1_first_direct_byte(sdp) \
1533                                 (le32_to_cpu((sdp)->sd_first_direct_byte))
1534 #define set_sd_v1_first_direct_byte(sdp,v) \
1535                                 ((sdp)->sd_first_direct_byte = cpu_to_le32(v))
1536 
1537 /* inode flags stored in sd_attrs (nee sd_reserved) */
1538 
1539 /* we want common flags to have the same values as in ext2,
1540    so chattr(1) will work without problems */
1541 #define REISERFS_IMMUTABLE_FL FS_IMMUTABLE_FL
1542 #define REISERFS_APPEND_FL    FS_APPEND_FL
1543 #define REISERFS_SYNC_FL      FS_SYNC_FL
1544 #define REISERFS_NOATIME_FL   FS_NOATIME_FL
1545 #define REISERFS_NODUMP_FL    FS_NODUMP_FL
1546 #define REISERFS_SECRM_FL     FS_SECRM_FL
1547 #define REISERFS_UNRM_FL      FS_UNRM_FL
1548 #define REISERFS_COMPR_FL     FS_COMPR_FL
1549 #define REISERFS_NOTAIL_FL    FS_NOTAIL_FL
1550 
1551 /* persistent flags that file inherits from the parent directory */
1552 #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL |	\
1553 				REISERFS_SYNC_FL |	\
1554 				REISERFS_NOATIME_FL |	\
1555 				REISERFS_NODUMP_FL |	\
1556 				REISERFS_SECRM_FL |	\
1557 				REISERFS_COMPR_FL |	\
1558 				REISERFS_NOTAIL_FL )
1559 
1560 /* Stat Data on disk (reiserfs version of UFS disk inode minus the
1561    address blocks) */
1562 struct stat_data {
1563 	__le16 sd_mode;		/* file type, permissions */
1564 	__le16 sd_attrs;	/* persistent inode flags */
1565 	__le32 sd_nlink;	/* number of hard links */
1566 	__le64 sd_size;		/* file size */
1567 	__le32 sd_uid;		/* owner */
1568 	__le32 sd_gid;		/* group */
1569 	__le32 sd_atime;	/* time of last access */
1570 	__le32 sd_mtime;	/* time file was last modified  */
1571 	__le32 sd_ctime;	/* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
1572 	__le32 sd_blocks;
1573 	union {
1574 		__le32 sd_rdev;
1575 		__le32 sd_generation;
1576 		//__le32 sd_first_direct_byte;
1577 		/* first byte of file which is stored in a
1578 		   direct item: except that if it equals 1
1579 		   it is a symlink and if it equals
1580 		   ~(__u32)0 there is no direct item.  The
1581 		   existence of this field really grates
1582 		   on me. Let's replace it with a macro
1583 		   based on sd_size and our tail
1584 		   suppression policy? */
1585 	} __attribute__ ((__packed__)) u;
1586 } __attribute__ ((__packed__));
1587 //
1588 // this is 44 bytes long
1589 //
1590 #define SD_SIZE (sizeof(struct stat_data))
1591 #define SD_V2_SIZE              SD_SIZE
1592 #define stat_data_v2(ih)        (ih_version (ih) == KEY_FORMAT_3_6)
1593 #define sd_v2_mode(sdp)         (le16_to_cpu((sdp)->sd_mode))
1594 #define set_sd_v2_mode(sdp,v)   ((sdp)->sd_mode = cpu_to_le16(v))
1595 /* sd_reserved */
1596 /* set_sd_reserved */
1597 #define sd_v2_nlink(sdp)        (le32_to_cpu((sdp)->sd_nlink))
1598 #define set_sd_v2_nlink(sdp,v)  ((sdp)->sd_nlink = cpu_to_le32(v))
1599 #define sd_v2_size(sdp)         (le64_to_cpu((sdp)->sd_size))
1600 #define set_sd_v2_size(sdp,v)   ((sdp)->sd_size = cpu_to_le64(v))
1601 #define sd_v2_uid(sdp)          (le32_to_cpu((sdp)->sd_uid))
1602 #define set_sd_v2_uid(sdp,v)    ((sdp)->sd_uid = cpu_to_le32(v))
1603 #define sd_v2_gid(sdp)          (le32_to_cpu((sdp)->sd_gid))
1604 #define set_sd_v2_gid(sdp,v)    ((sdp)->sd_gid = cpu_to_le32(v))
1605 #define sd_v2_atime(sdp)        (le32_to_cpu((sdp)->sd_atime))
1606 #define set_sd_v2_atime(sdp,v)  ((sdp)->sd_atime = cpu_to_le32(v))
1607 #define sd_v2_mtime(sdp)        (le32_to_cpu((sdp)->sd_mtime))
1608 #define set_sd_v2_mtime(sdp,v)  ((sdp)->sd_mtime = cpu_to_le32(v))
1609 #define sd_v2_ctime(sdp)        (le32_to_cpu((sdp)->sd_ctime))
1610 #define set_sd_v2_ctime(sdp,v)  ((sdp)->sd_ctime = cpu_to_le32(v))
1611 #define sd_v2_blocks(sdp)       (le32_to_cpu((sdp)->sd_blocks))
1612 #define set_sd_v2_blocks(sdp,v) ((sdp)->sd_blocks = cpu_to_le32(v))
1613 #define sd_v2_rdev(sdp)         (le32_to_cpu((sdp)->u.sd_rdev))
1614 #define set_sd_v2_rdev(sdp,v)   ((sdp)->u.sd_rdev = cpu_to_le32(v))
1615 #define sd_v2_generation(sdp)   (le32_to_cpu((sdp)->u.sd_generation))
1616 #define set_sd_v2_generation(sdp,v) ((sdp)->u.sd_generation = cpu_to_le32(v))
1617 #define sd_v2_attrs(sdp)         (le16_to_cpu((sdp)->sd_attrs))
1618 #define set_sd_v2_attrs(sdp,v)   ((sdp)->sd_attrs = cpu_to_le16(v))
1619 
1620 /***************************************************************************/
1621 /*                      DIRECTORY STRUCTURE                                */
1622 /***************************************************************************/
1623 /*
1624    Picture represents the structure of directory items
1625    ________________________________________________
1626    |  Array of     |   |     |        |       |   |
1627    | directory     |N-1| N-2 | ....   |   1st |0th|
1628    | entry headers |   |     |        |       |   |
1629    |_______________|___|_____|________|_______|___|
1630                     <----   directory entries         ------>
1631 
1632  First directory item has k_offset component 1. We store "." and ".."
1633  in one item, always, we never split "." and ".." into differing
1634  items.  This makes, among other things, the code for removing
1635  directories simpler. */
1636 #define SD_OFFSET  0
1637 #define SD_UNIQUENESS 0
1638 #define DOT_OFFSET 1
1639 #define DOT_DOT_OFFSET 2
1640 #define DIRENTRY_UNIQUENESS 500
1641 
1642 /* */
1643 #define FIRST_ITEM_OFFSET 1
1644 
1645 /*
1646    Q: How to get key of object pointed to by entry from entry?
1647 
1648    A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key
1649       of object, entry points to */
1650 
1651 /* NOT IMPLEMENTED:
1652    Directory will someday contain stat data of object */
1653 
1654 struct reiserfs_de_head {
1655 	__le32 deh_offset;	/* third component of the directory entry key */
1656 	__le32 deh_dir_id;	/* objectid of the parent directory of the object, that is referenced
1657 				   by directory entry */
1658 	__le32 deh_objectid;	/* objectid of the object, that is referenced by directory entry */
1659 	__le16 deh_location;	/* offset of name in the whole item */
1660 	__le16 deh_state;	/* whether 1) entry contains stat data (for future), and 2) whether
1661 				   entry is hidden (unlinked) */
1662 } __attribute__ ((__packed__));
1663 #define DEH_SIZE                  sizeof(struct reiserfs_de_head)
1664 #define deh_offset(p_deh)         (le32_to_cpu((p_deh)->deh_offset))
1665 #define deh_dir_id(p_deh)         (le32_to_cpu((p_deh)->deh_dir_id))
1666 #define deh_objectid(p_deh)       (le32_to_cpu((p_deh)->deh_objectid))
1667 #define deh_location(p_deh)       (le16_to_cpu((p_deh)->deh_location))
1668 #define deh_state(p_deh)          (le16_to_cpu((p_deh)->deh_state))
1669 
1670 #define put_deh_offset(p_deh,v)   ((p_deh)->deh_offset = cpu_to_le32((v)))
1671 #define put_deh_dir_id(p_deh,v)   ((p_deh)->deh_dir_id = cpu_to_le32((v)))
1672 #define put_deh_objectid(p_deh,v) ((p_deh)->deh_objectid = cpu_to_le32((v)))
1673 #define put_deh_location(p_deh,v) ((p_deh)->deh_location = cpu_to_le16((v)))
1674 #define put_deh_state(p_deh,v)    ((p_deh)->deh_state = cpu_to_le16((v)))
1675 
1676 /* empty directory contains two entries "." and ".." and their headers */
1677 #define EMPTY_DIR_SIZE \
1678 (DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen ("..")))
1679 
1680 /* old format directories have this size when empty */
1681 #define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3)
1682 
1683 #define DEH_Statdata 0		/* not used now */
1684 #define DEH_Visible 2
1685 
1686 /* 64 bit systems (and the S/390) need to be aligned explicitly -jdm */
1687 #if BITS_PER_LONG == 64 || defined(__s390__) || defined(__hppa__)
1688 #   define ADDR_UNALIGNED_BITS  (3)
1689 #endif
1690 
1691 /* These are only used to manipulate deh_state.
1692  * Because of this, we'll use the ext2_ bit routines,
1693  * since they are little endian */
1694 #ifdef ADDR_UNALIGNED_BITS
1695 
1696 #   define aligned_address(addr)           ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1)))
1697 #   define unaligned_offset(addr)          (((int)((long)(addr) & ((1 << ADDR_UNALIGNED_BITS) - 1))) << 3)
1698 
1699 #   define set_bit_unaligned(nr, addr)	\
1700 	__test_and_set_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1701 #   define clear_bit_unaligned(nr, addr)	\
1702 	__test_and_clear_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1703 #   define test_bit_unaligned(nr, addr)	\
1704 	test_bit_le((nr) + unaligned_offset(addr), aligned_address(addr))
1705 
1706 #else
1707 
1708 #   define set_bit_unaligned(nr, addr)	__test_and_set_bit_le(nr, addr)
1709 #   define clear_bit_unaligned(nr, addr)	__test_and_clear_bit_le(nr, addr)
1710 #   define test_bit_unaligned(nr, addr)	test_bit_le(nr, addr)
1711 
1712 #endif
1713 
1714 #define mark_de_with_sd(deh)        set_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1715 #define mark_de_without_sd(deh)     clear_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1716 #define mark_de_visible(deh)	    set_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1717 #define mark_de_hidden(deh)	    clear_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1718 
1719 #define de_with_sd(deh)		    test_bit_unaligned (DEH_Statdata, &((deh)->deh_state))
1720 #define de_visible(deh)	    	    test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1721 #define de_hidden(deh)	    	    !test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
1722 
1723 extern void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
1724 				   __le32 par_dirid, __le32 par_objid);
1725 extern void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
1726 				__le32 par_dirid, __le32 par_objid);
1727 
1728 /* array of the entry headers */
1729  /* get item body */
1730 #define B_I_PITEM(bh,ih) ( (bh)->b_data + ih_location(ih) )
1731 #define B_I_DEH(bh,ih) ((struct reiserfs_de_head *)(B_I_PITEM(bh,ih)))
1732 
1733 /* length of the directory entry in directory item. This define
1734    calculates length of i-th directory entry using directory entry
1735    locations from dir entry head. When it calculates length of 0-th
1736    directory entry, it uses length of whole item in place of entry
1737    location of the non-existent following entry in the calculation.
1738    See picture above.*/
1739 /*
1740 #define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \
1741 ((i) ? (deh_location((deh)-1) - deh_location((deh))) : (ih_item_len((ih)) - deh_location((deh))))
1742 */
1743 static inline int entry_length(const struct buffer_head *bh,
1744 			       const struct item_head *ih, int pos_in_item)
1745 {
1746 	struct reiserfs_de_head *deh;
1747 
1748 	deh = B_I_DEH(bh, ih) + pos_in_item;
1749 	if (pos_in_item)
1750 		return deh_location(deh - 1) - deh_location(deh);
1751 
1752 	return ih_item_len(ih) - deh_location(deh);
1753 }
1754 
1755 /* number of entries in the directory item, depends on ENTRY_COUNT being at the start of directory dynamic data. */
1756 #define I_ENTRY_COUNT(ih) (ih_entry_count((ih)))
1757 
1758 /* name by bh, ih and entry_num */
1759 #define B_I_E_NAME(bh,ih,entry_num) ((char *)(bh->b_data + ih_location(ih) + deh_location(B_I_DEH(bh,ih)+(entry_num))))
1760 
1761 // two entries per block (at least)
1762 #define REISERFS_MAX_NAME(block_size) 255
1763 
1764 /* this structure is used for operations on directory entries. It is
1765    not a disk structure. */
1766 /* When reiserfs_find_entry or search_by_entry_key find directory
1767    entry, they return filled reiserfs_dir_entry structure */
1768 struct reiserfs_dir_entry {
1769 	struct buffer_head *de_bh;
1770 	int de_item_num;
1771 	struct item_head *de_ih;
1772 	int de_entry_num;
1773 	struct reiserfs_de_head *de_deh;
1774 	int de_entrylen;
1775 	int de_namelen;
1776 	char *de_name;
1777 	unsigned long *de_gen_number_bit_string;
1778 
1779 	__u32 de_dir_id;
1780 	__u32 de_objectid;
1781 
1782 	struct cpu_key de_entry_key;
1783 };
1784 
1785 /* these defines are useful when a particular member of a reiserfs_dir_entry is needed */
1786 
1787 /* pointer to file name, stored in entry */
1788 #define B_I_DEH_ENTRY_FILE_NAME(bh,ih,deh) (B_I_PITEM (bh, ih) + deh_location(deh))
1789 
1790 /* length of name */
1791 #define I_DEH_N_ENTRY_FILE_NAME_LENGTH(ih,deh,entry_num) \
1792 (I_DEH_N_ENTRY_LENGTH (ih, deh, entry_num) - (de_with_sd (deh) ? SD_SIZE : 0))
1793 
1794 /* hash value occupies bits from 7 up to 30 */
1795 #define GET_HASH_VALUE(offset) ((offset) & 0x7fffff80LL)
1796 /* generation number occupies 7 bits starting from 0 up to 6 */
1797 #define GET_GENERATION_NUMBER(offset) ((offset) & 0x7fLL)
1798 #define MAX_GENERATION_NUMBER  127
1799 
1800 #define SET_GENERATION_NUMBER(offset,gen_number) (GET_HASH_VALUE(offset)|(gen_number))
1801 
1802 /*
1803  * Picture represents an internal node of the reiserfs tree
1804  *  ______________________________________________________
1805  * |      |  Array of     |  Array of         |  Free     |
1806  * |block |    keys       |  pointers         | space     |
1807  * | head |      N        |      N+1          |           |
1808  * |______|_______________|___________________|___________|
1809  */
1810 
1811 /***************************************************************************/
1812 /*                      DISK CHILD                                         */
1813 /***************************************************************************/
1814 /* Disk child pointer: The pointer from an internal node of the tree
1815    to a node that is on disk. */
1816 struct disk_child {
1817 	__le32 dc_block_number;	/* Disk child's block number. */
1818 	__le16 dc_size;		/* Disk child's used space.   */
1819 	__le16 dc_reserved;
1820 };
1821 
1822 #define DC_SIZE (sizeof(struct disk_child))
1823 #define dc_block_number(dc_p)	(le32_to_cpu((dc_p)->dc_block_number))
1824 #define dc_size(dc_p)		(le16_to_cpu((dc_p)->dc_size))
1825 #define put_dc_block_number(dc_p, val)   do { (dc_p)->dc_block_number = cpu_to_le32(val); } while(0)
1826 #define put_dc_size(dc_p, val)   do { (dc_p)->dc_size = cpu_to_le16(val); } while(0)
1827 
1828 /* Get disk child by buffer header and position in the tree node. */
1829 #define B_N_CHILD(bh, n_pos)  ((struct disk_child *)\
1830 ((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos)))
1831 
1832 /* Get disk child number by buffer header and position in the tree node. */
1833 #define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos)))
1834 #define PUT_B_N_CHILD_NUM(bh, n_pos, val) \
1835 				(put_dc_block_number(B_N_CHILD(bh, n_pos), val))
1836 
1837  /* maximal value of field child_size in structure disk_child */
1838  /* child size is the combined size of all items and their headers */
1839 #define MAX_CHILD_SIZE(bh) ((int)( (bh)->b_size - BLKH_SIZE ))
1840 
1841 /* amount of used space in buffer (not including block head) */
1842 #define B_CHILD_SIZE(cur) (MAX_CHILD_SIZE(cur)-(B_FREE_SPACE(cur)))
1843 
1844 /* max and min number of keys in internal node */
1845 #define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) )
1846 #define MIN_NR_KEY(bh)    (MAX_NR_KEY(bh)/2)
1847 
1848 /***************************************************************************/
1849 /*                      PATH STRUCTURES AND DEFINES                        */
1850 /***************************************************************************/
1851 
1852 /* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the
1853    key.  It uses reiserfs_bread to try to find buffers in the cache given their block number.  If it
1854    does not find them in the cache it reads them from disk.  For each node search_by_key finds using
1855    reiserfs_bread it then uses bin_search to look through that node.  bin_search will find the
1856    position of the block_number of the next node if it is looking through an internal node.  If it
1857    is looking through a leaf node bin_search will find the position of the item which has key either
1858    equal to given key, or which is the maximal key less than the given key. */
1859 
1860 struct path_element {
1861 	struct buffer_head *pe_buffer;	/* Pointer to the buffer at the path in the tree. */
1862 	int pe_position;	/* Position in the tree node which is placed in the */
1863 	/* buffer above.                                  */
1864 };
1865 
1866 #define MAX_HEIGHT 5		/* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */
1867 #define EXTENDED_MAX_HEIGHT         7	/* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
1868 #define FIRST_PATH_ELEMENT_OFFSET   2	/* Must be equal to at least 2. */
1869 
1870 #define ILLEGAL_PATH_ELEMENT_OFFSET 1	/* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
1871 #define MAX_FEB_SIZE 6		/* this MUST be MAX_HEIGHT + 1. See about FEB below */
1872 
1873 /* We need to keep track of who the ancestors of nodes are.  When we
1874    perform a search we record which nodes were visited while
1875    descending the tree looking for the node we searched for. This list
1876    of nodes is called the path.  This information is used while
1877    performing balancing.  Note that this path information may become
1878    invalid, and this means we must check it when using it to see if it
1879    is still valid. You'll need to read search_by_key and the comments
1880    in it, especially about decrement_counters_in_path(), to understand
1881    this structure.
1882 
1883 Paths make the code so much harder to work with and debug.... An
1884 enormous number of bugs are due to them, and trying to write or modify
1885 code that uses them just makes my head hurt.  They are based on an
1886 excessive effort to avoid disturbing the precious VFS code.:-( The
1887 gods only know how we are going to SMP the code that uses them.
1888 znodes are the way! */
1889 
1890 #define PATH_READA	0x1	/* do read ahead */
1891 #define PATH_READA_BACK 0x2	/* read backwards */
1892 
1893 struct treepath {
1894 	int path_length;	/* Length of the array above.   */
1895 	int reada;
1896 	struct path_element path_elements[EXTENDED_MAX_HEIGHT];	/* Array of the path elements.  */
1897 	int pos_in_item;
1898 };
1899 
1900 #define pos_in_item(path) ((path)->pos_in_item)
1901 
1902 #define INITIALIZE_PATH(var) \
1903 struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
1904 
1905 /* Get path element by path and path position. */
1906 #define PATH_OFFSET_PELEMENT(path, n_offset)  ((path)->path_elements + (n_offset))
1907 
1908 /* Get buffer header at the path by path and path position. */
1909 #define PATH_OFFSET_PBUFFER(path, n_offset)   (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer)
1910 
1911 /* Get position in the element at the path by path and path position. */
1912 #define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
1913 
1914 #define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
1915 				/* you know, to the person who didn't
1916 				   write this the macro name does not
1917 				   at first suggest what it does.
1918 				   Maybe POSITION_FROM_PATH_END? Or
1919 				   maybe we should just focus on
1920 				   dumping paths... -Hans */
1921 #define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
1922 
1923 #define PATH_PITEM_HEAD(path)    B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
1924 
1925 /* in do_balance leaf has h == 0 in contrast with path structure,
1926    where root has level == 0. That is why we need these defines */
1927 #define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h))	/* tb->S[h] */
1928 #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1)	/* tb->F[h] or tb->S[0]->b_parent */
1929 #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
1930 #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1)	/* tb->S[h]->b_item_order */
1931 
1932 #define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
1933 
1934 #define get_last_bh(path) PATH_PLAST_BUFFER(path)
1935 #define get_ih(path) PATH_PITEM_HEAD(path)
1936 #define get_item_pos(path) PATH_LAST_POSITION(path)
1937 #define get_item(path) ((void *)B_N_PITEM(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION (path)))
1938 #define item_moved(ih,path) comp_items(ih, path)
1939 #define path_changed(ih,path) comp_items (ih, path)
1940 
1941 /***************************************************************************/
1942 /*                       MISC                                              */
1943 /***************************************************************************/
1944 
1945 /* Size of pointer to the unformatted node. */
1946 #define UNFM_P_SIZE (sizeof(unp_t))
1947 #define UNFM_P_SHIFT 2
1948 
1949 // in in-core inode key is stored on le form
1950 #define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
1951 
1952 #define MAX_UL_INT 0xffffffff
1953 #define MAX_INT    0x7ffffff
1954 #define MAX_US_INT 0xffff
1955 
1956 // reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
1957 #define U32_MAX (~(__u32)0)
1958 
1959 static inline loff_t max_reiserfs_offset(struct inode *inode)
1960 {
1961 	if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
1962 		return (loff_t) U32_MAX;
1963 
1964 	return (loff_t) ((~(__u64) 0) >> 4);
1965 }
1966 
1967 /*#define MAX_KEY_UNIQUENESS	MAX_UL_INT*/
1968 #define MAX_KEY_OBJECTID	MAX_UL_INT
1969 
1970 #define MAX_B_NUM  MAX_UL_INT
1971 #define MAX_FC_NUM MAX_US_INT
1972 
1973 /* the purpose is to detect overflow of an unsigned short */
1974 #define REISERFS_LINK_MAX (MAX_US_INT - 1000)
1975 
1976 /* The following defines are used in reiserfs_insert_item and reiserfs_append_item  */
1977 #define REISERFS_KERNEL_MEM		0	/* reiserfs kernel memory mode  */
1978 #define REISERFS_USER_MEM		1	/* reiserfs user memory mode            */
1979 
1980 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
1981 #define get_generation(s) atomic_read (&fs_generation(s))
1982 #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
1983 #define __fs_changed(gen,s) (gen != get_generation (s))
1984 #define fs_changed(gen,s)		\
1985 ({					\
1986 	reiserfs_cond_resched(s);	\
1987 	__fs_changed(gen, s);		\
1988 })
1989 
1990 /***************************************************************************/
1991 /*                  FIXATE NODES                                           */
1992 /***************************************************************************/
1993 
1994 #define VI_TYPE_LEFT_MERGEABLE 1
1995 #define VI_TYPE_RIGHT_MERGEABLE 2
1996 
1997 /* To make any changes in the tree we always first find node, that
1998    contains item to be changed/deleted or place to insert a new
1999    item. We call this node S. To do balancing we need to decide what
2000    we will shift to left/right neighbor, or to a new node, where new
2001    item will be etc. To make this analysis simpler we build virtual
2002    node. Virtual node is an array of items, that will replace items of
2003    node S. (For instance if we are going to delete an item, virtual
2004    node does not contain it). Virtual node keeps information about
2005    item sizes and types, mergeability of first and last items, sizes
2006    of all entries in directory item. We use this array of items when
2007    calculating what we can shift to neighbors and how many nodes we
2008    have to have if we do not any shiftings, if we shift to left/right
2009    neighbor or to both. */
2010 struct virtual_item {
2011 	int vi_index;		// index in the array of item operations
2012 	unsigned short vi_type;	// left/right mergeability
2013 	unsigned short vi_item_len;	/* length of item that it will have after balancing */
2014 	struct item_head *vi_ih;
2015 	const char *vi_item;	// body of item (old or new)
2016 	const void *vi_new_data;	// 0 always but paste mode
2017 	void *vi_uarea;		// item specific area
2018 };
2019 
2020 struct virtual_node {
2021 	char *vn_free_ptr;	/* this is a pointer to the free space in the buffer */
2022 	unsigned short vn_nr_item;	/* number of items in virtual node */
2023 	short vn_size;		/* size of node , that node would have if it has unlimited size and no balancing is performed */
2024 	short vn_mode;		/* mode of balancing (paste, insert, delete, cut) */
2025 	short vn_affected_item_num;
2026 	short vn_pos_in_item;
2027 	struct item_head *vn_ins_ih;	/* item header of inserted item, 0 for other modes */
2028 	const void *vn_data;
2029 	struct virtual_item *vn_vi;	/* array of items (including a new one, excluding item to be deleted) */
2030 };
2031 
2032 /* used by directory items when creating virtual nodes */
2033 struct direntry_uarea {
2034 	int flags;
2035 	__u16 entry_count;
2036 	__u16 entry_sizes[1];
2037 } __attribute__ ((__packed__));
2038 
2039 /***************************************************************************/
2040 /*                  TREE BALANCE                                           */
2041 /***************************************************************************/
2042 
2043 /* This temporary structure is used in tree balance algorithms, and
2044    constructed as we go to the extent that its various parts are
2045    needed.  It contains arrays of nodes that can potentially be
2046    involved in the balancing of node S, and parameters that define how
2047    each of the nodes must be balanced.  Note that in these algorithms
2048    for balancing the worst case is to need to balance the current node
2049    S and the left and right neighbors and all of their parents plus
2050    create a new node.  We implement S1 balancing for the leaf nodes
2051    and S0 balancing for the internal nodes (S1 and S0 are defined in
2052    our papers.)*/
2053 
2054 #define MAX_FREE_BLOCK 7	/* size of the array of buffers to free at end of do_balance */
2055 
2056 /* maximum number of FEB blocknrs on a single level */
2057 #define MAX_AMOUNT_NEEDED 2
2058 
2059 /* someday somebody will prefix every field in this struct with tb_ */
2060 struct tree_balance {
2061 	int tb_mode;
2062 	int need_balance_dirty;
2063 	struct super_block *tb_sb;
2064 	struct reiserfs_transaction_handle *transaction_handle;
2065 	struct treepath *tb_path;
2066 	struct buffer_head *L[MAX_HEIGHT];	/* array of left neighbors of nodes in the path */
2067 	struct buffer_head *R[MAX_HEIGHT];	/* array of right neighbors of nodes in the path */
2068 	struct buffer_head *FL[MAX_HEIGHT];	/* array of fathers of the left  neighbors      */
2069 	struct buffer_head *FR[MAX_HEIGHT];	/* array of fathers of the right neighbors      */
2070 	struct buffer_head *CFL[MAX_HEIGHT];	/* array of common parents of center node and its left neighbor  */
2071 	struct buffer_head *CFR[MAX_HEIGHT];	/* array of common parents of center node and its right neighbor */
2072 
2073 	struct buffer_head *FEB[MAX_FEB_SIZE];	/* array of empty buffers. Number of buffers in array equals
2074 						   cur_blknum. */
2075 	struct buffer_head *used[MAX_FEB_SIZE];
2076 	struct buffer_head *thrown[MAX_FEB_SIZE];
2077 	int lnum[MAX_HEIGHT];	/* array of number of items which must be
2078 				   shifted to the left in order to balance the
2079 				   current node; for leaves includes item that
2080 				   will be partially shifted; for internal
2081 				   nodes, it is the number of child pointers
2082 				   rather than items. It includes the new item
2083 				   being created. The code sometimes subtracts
2084 				   one to get the number of wholly shifted
2085 				   items for other purposes. */
2086 	int rnum[MAX_HEIGHT];	/* substitute right for left in comment above */
2087 	int lkey[MAX_HEIGHT];	/* array indexed by height h mapping the key delimiting L[h] and
2088 				   S[h] to its item number within the node CFL[h] */
2089 	int rkey[MAX_HEIGHT];	/* substitute r for l in comment above */
2090 	int insert_size[MAX_HEIGHT];	/* the number of bytes by we are trying to add or remove from
2091 					   S[h]. A negative value means removing.  */
2092 	int blknum[MAX_HEIGHT];	/* number of nodes that will replace node S[h] after
2093 				   balancing on the level h of the tree.  If 0 then S is
2094 				   being deleted, if 1 then S is remaining and no new nodes
2095 				   are being created, if 2 or 3 then 1 or 2 new nodes is
2096 				   being created */
2097 
2098 	/* fields that are used only for balancing leaves of the tree */
2099 	int cur_blknum;		/* number of empty blocks having been already allocated                 */
2100 	int s0num;		/* number of items that fall into left most  node when S[0] splits     */
2101 	int s1num;		/* number of items that fall into first  new node when S[0] splits     */
2102 	int s2num;		/* number of items that fall into second new node when S[0] splits     */
2103 	int lbytes;		/* number of bytes which can flow to the left neighbor from the        left    */
2104 	/* most liquid item that cannot be shifted from S[0] entirely         */
2105 	/* if -1 then nothing will be partially shifted */
2106 	int rbytes;		/* number of bytes which will flow to the right neighbor from the right        */
2107 	/* most liquid item that cannot be shifted from S[0] entirely         */
2108 	/* if -1 then nothing will be partially shifted                           */
2109 	int s1bytes;		/* number of bytes which flow to the first  new node when S[0] splits   */
2110 	/* note: if S[0] splits into 3 nodes, then items do not need to be cut  */
2111 	int s2bytes;
2112 	struct buffer_head *buf_to_free[MAX_FREE_BLOCK];	/* buffers which are to be freed after do_balance finishes by unfix_nodes */
2113 	char *vn_buf;		/* kmalloced memory. Used to create
2114 				   virtual node and keep map of
2115 				   dirtied bitmap blocks */
2116 	int vn_buf_size;	/* size of the vn_buf */
2117 	struct virtual_node *tb_vn;	/* VN starts after bitmap of bitmap blocks */
2118 
2119 	int fs_gen;		/* saved value of `reiserfs_generation' counter
2120 				   see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
2121 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
2122 	struct in_core_key key;	/* key pointer, to pass to block allocator or
2123 				   another low-level subsystem */
2124 #endif
2125 };
2126 
2127 /* These are modes of balancing */
2128 
2129 /* When inserting an item. */
2130 #define M_INSERT	'i'
2131 /* When inserting into (directories only) or appending onto an already
2132    existent item. */
2133 #define M_PASTE		'p'
2134 /* When deleting an item. */
2135 #define M_DELETE	'd'
2136 /* When truncating an item or removing an entry from a (directory) item. */
2137 #define M_CUT 		'c'
2138 
2139 /* used when balancing on leaf level skipped (in reiserfsck) */
2140 #define M_INTERNAL	'n'
2141 
2142 /* When further balancing is not needed, then do_balance does not need
2143    to be called. */
2144 #define M_SKIP_BALANCING 		's'
2145 #define M_CONVERT	'v'
2146 
2147 /* modes of leaf_move_items */
2148 #define LEAF_FROM_S_TO_L 0
2149 #define LEAF_FROM_S_TO_R 1
2150 #define LEAF_FROM_R_TO_L 2
2151 #define LEAF_FROM_L_TO_R 3
2152 #define LEAF_FROM_S_TO_SNEW 4
2153 
2154 #define FIRST_TO_LAST 0
2155 #define LAST_TO_FIRST 1
2156 
2157 /* used in do_balance for passing parent of node information that has
2158    been gotten from tb struct */
2159 struct buffer_info {
2160 	struct tree_balance *tb;
2161 	struct buffer_head *bi_bh;
2162 	struct buffer_head *bi_parent;
2163 	int bi_position;
2164 };
2165 
2166 static inline struct super_block *sb_from_tb(struct tree_balance *tb)
2167 {
2168 	return tb ? tb->tb_sb : NULL;
2169 }
2170 
2171 static inline struct super_block *sb_from_bi(struct buffer_info *bi)
2172 {
2173 	return bi ? sb_from_tb(bi->tb) : NULL;
2174 }
2175 
2176 /* there are 4 types of items: stat data, directory item, indirect, direct.
2177 +-------------------+------------+--------------+------------+
2178 |	            |  k_offset  | k_uniqueness | mergeable? |
2179 +-------------------+------------+--------------+------------+
2180 |     stat data     |	0        |      0       |   no       |
2181 +-------------------+------------+--------------+------------+
2182 | 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS|   no       |
2183 | non 1st directory | hash value |              |   yes      |
2184 |     item          |            |              |            |
2185 +-------------------+------------+--------------+------------+
2186 | indirect item     | offset + 1 |TYPE_INDIRECT |   if this is not the first indirect item of the object
2187 +-------------------+------------+--------------+------------+
2188 | direct item       | offset + 1 |TYPE_DIRECT   | if not this is not the first direct item of the object
2189 +-------------------+------------+--------------+------------+
2190 */
2191 
2192 struct item_operations {
2193 	int (*bytes_number) (struct item_head * ih, int block_size);
2194 	void (*decrement_key) (struct cpu_key *);
2195 	int (*is_left_mergeable) (struct reiserfs_key * ih,
2196 				  unsigned long bsize);
2197 	void (*print_item) (struct item_head *, char *item);
2198 	void (*check_item) (struct item_head *, char *item);
2199 
2200 	int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
2201 			  int is_affected, int insert_size);
2202 	int (*check_left) (struct virtual_item * vi, int free,
2203 			   int start_skip, int end_skip);
2204 	int (*check_right) (struct virtual_item * vi, int free);
2205 	int (*part_size) (struct virtual_item * vi, int from, int to);
2206 	int (*unit_num) (struct virtual_item * vi);
2207 	void (*print_vi) (struct virtual_item * vi);
2208 };
2209 
2210 extern struct item_operations *item_ops[TYPE_ANY + 1];
2211 
2212 #define op_bytes_number(ih,bsize)                    item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
2213 #define op_is_left_mergeable(key,bsize)              item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
2214 #define op_print_item(ih,item)                       item_ops[le_ih_k_type (ih)]->print_item (ih, item)
2215 #define op_check_item(ih,item)                       item_ops[le_ih_k_type (ih)]->check_item (ih, item)
2216 #define op_create_vi(vn,vi,is_affected,insert_size)  item_ops[le_ih_k_type ((vi)->vi_ih)]->create_vi (vn,vi,is_affected,insert_size)
2217 #define op_check_left(vi,free,start_skip,end_skip) item_ops[(vi)->vi_index]->check_left (vi, free, start_skip, end_skip)
2218 #define op_check_right(vi,free)                      item_ops[(vi)->vi_index]->check_right (vi, free)
2219 #define op_part_size(vi,from,to)                     item_ops[(vi)->vi_index]->part_size (vi, from, to)
2220 #define op_unit_num(vi)				     item_ops[(vi)->vi_index]->unit_num (vi)
2221 #define op_print_vi(vi)                              item_ops[(vi)->vi_index]->print_vi (vi)
2222 
2223 #define COMP_SHORT_KEYS comp_short_keys
2224 
2225 /* number of blocks pointed to by the indirect item */
2226 #define I_UNFM_NUM(ih)	(ih_item_len(ih) / UNFM_P_SIZE)
2227 
2228 /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
2229 #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
2230 
2231 /* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */
2232 
2233 /* get the item header */
2234 #define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
2235 
2236 /* get key */
2237 #define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
2238 
2239 /* get the key */
2240 #define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
2241 
2242 /* get item body */
2243 #define B_N_PITEM(bh,item_num) ( (bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(item_num))))
2244 
2245 /* get the stat data by the buffer header and the item order */
2246 #define B_N_STAT_DATA(bh,nr) \
2247 ( (struct stat_data *)((bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(nr))) ) )
2248 
2249     /* following defines use reiserfs buffer header and item header */
2250 
2251 /* get stat-data */
2252 #define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + ih_location(ih)) )
2253 
2254 // this is 3976 for size==4096
2255 #define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE)
2256 
2257 /* indirect items consist of entries which contain blocknrs, pos
2258    indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
2259    blocknr contained by the entry pos points to */
2260 #define B_I_POS_UNFM_POINTER(bh,ih,pos) le32_to_cpu(*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)))
2261 #define PUT_B_I_POS_UNFM_POINTER(bh,ih,pos, val) do {*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)) = cpu_to_le32(val); } while (0)
2262 
2263 struct reiserfs_iget_args {
2264 	__u32 objectid;
2265 	__u32 dirid;
2266 };
2267 
2268 /***************************************************************************/
2269 /*                    FUNCTION DECLARATIONS                                */
2270 /***************************************************************************/
2271 
2272 #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
2273 
2274 #define journal_trans_half(blocksize) \
2275 	((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32))
2276 
2277 /* journal.c see journal.c for all the comments here */
2278 
2279 /* first block written in a commit.  */
2280 struct reiserfs_journal_desc {
2281 	__le32 j_trans_id;	/* id of commit */
2282 	__le32 j_len;		/* length of commit. len +1 is the commit block */
2283 	__le32 j_mount_id;	/* mount id of this trans */
2284 	__le32 j_realblock[1];	/* real locations for each block */
2285 };
2286 
2287 #define get_desc_trans_id(d)   le32_to_cpu((d)->j_trans_id)
2288 #define get_desc_trans_len(d)  le32_to_cpu((d)->j_len)
2289 #define get_desc_mount_id(d)   le32_to_cpu((d)->j_mount_id)
2290 
2291 #define set_desc_trans_id(d,val)       do { (d)->j_trans_id = cpu_to_le32 (val); } while (0)
2292 #define set_desc_trans_len(d,val)      do { (d)->j_len = cpu_to_le32 (val); } while (0)
2293 #define set_desc_mount_id(d,val)       do { (d)->j_mount_id = cpu_to_le32 (val); } while (0)
2294 
2295 /* last block written in a commit */
2296 struct reiserfs_journal_commit {
2297 	__le32 j_trans_id;	/* must match j_trans_id from the desc block */
2298 	__le32 j_len;		/* ditto */
2299 	__le32 j_realblock[1];	/* real locations for each block */
2300 };
2301 
2302 #define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id)
2303 #define get_commit_trans_len(c)        le32_to_cpu((c)->j_len)
2304 #define get_commit_mount_id(c) le32_to_cpu((c)->j_mount_id)
2305 
2306 #define set_commit_trans_id(c,val)     do { (c)->j_trans_id = cpu_to_le32 (val); } while (0)
2307 #define set_commit_trans_len(c,val)    do { (c)->j_len = cpu_to_le32 (val); } while (0)
2308 
2309 /* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
2310 ** last fully flushed transaction.  fully flushed means all the log blocks and all the real blocks are on disk,
2311 ** and this transaction does not need to be replayed.
2312 */
2313 struct reiserfs_journal_header {
2314 	__le32 j_last_flush_trans_id;	/* id of last fully flushed transaction */
2315 	__le32 j_first_unflushed_offset;	/* offset in the log of where to start replay after a crash */
2316 	__le32 j_mount_id;
2317 	/* 12 */ struct journal_params jh_journal;
2318 };
2319 
2320 /* biggest tunable defines are right here */
2321 #define JOURNAL_BLOCK_COUNT 8192	/* number of blocks in the journal */
2322 #define JOURNAL_TRANS_MAX_DEFAULT 1024	/* biggest possible single transaction, don't change for now (8/3/99) */
2323 #define JOURNAL_TRANS_MIN_DEFAULT 256
2324 #define JOURNAL_MAX_BATCH_DEFAULT   900	/* max blocks to batch into one transaction, don't make this any bigger than 900 */
2325 #define JOURNAL_MIN_RATIO 2
2326 #define JOURNAL_MAX_COMMIT_AGE 30
2327 #define JOURNAL_MAX_TRANS_AGE 30
2328 #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
2329 #define JOURNAL_BLOCKS_PER_OBJECT(sb)  (JOURNAL_PER_BALANCE_CNT * 3 + \
2330 					 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \
2331 					      REISERFS_QUOTA_TRANS_BLOCKS(sb)))
2332 
2333 #ifdef CONFIG_QUOTA
2334 #define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA))
2335 /* We need to update data and inode (atime) */
2336 #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0)
2337 /* 1 balancing, 1 bitmap, 1 data per write + stat data update */
2338 #define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
2339 (DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0)
2340 /* same as with INIT */
2341 #define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
2342 (DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0)
2343 #else
2344 #define REISERFS_QUOTA_TRANS_BLOCKS(s) 0
2345 #define REISERFS_QUOTA_INIT_BLOCKS(s) 0
2346 #define REISERFS_QUOTA_DEL_BLOCKS(s) 0
2347 #endif
2348 
2349 /* both of these can be as low as 1, or as high as you want.  The min is the
2350 ** number of 4k bitmap nodes preallocated on mount. New nodes are allocated
2351 ** as needed, and released when transactions are committed.  On release, if
2352 ** the current number of nodes is > max, the node is freed, otherwise,
2353 ** it is put on a free list for faster use later.
2354 */
2355 #define REISERFS_MIN_BITMAP_NODES 10
2356 #define REISERFS_MAX_BITMAP_NODES 100
2357 
2358 #define JBH_HASH_SHIFT 13	/* these are based on journal hash size of 8192 */
2359 #define JBH_HASH_MASK 8191
2360 
2361 #define _jhashfn(sb,block)	\
2362 	(((unsigned long)sb>>L1_CACHE_SHIFT) ^ \
2363 	 (((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
2364 #define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
2365 
2366 // We need these to make journal.c code more readable
2367 #define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2368 #define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2369 #define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
2370 
2371 enum reiserfs_bh_state_bits {
2372 	BH_JDirty = BH_PrivateStart,	/* buffer is in current transaction */
2373 	BH_JDirty_wait,
2374 	BH_JNew,		/* disk block was taken off free list before
2375 				 * being in a finished transaction, or
2376 				 * written to disk. Can be reused immed. */
2377 	BH_JPrepared,
2378 	BH_JRestore_dirty,
2379 	BH_JTest,		// debugging only will go away
2380 };
2381 
2382 BUFFER_FNS(JDirty, journaled);
2383 TAS_BUFFER_FNS(JDirty, journaled);
2384 BUFFER_FNS(JDirty_wait, journal_dirty);
2385 TAS_BUFFER_FNS(JDirty_wait, journal_dirty);
2386 BUFFER_FNS(JNew, journal_new);
2387 TAS_BUFFER_FNS(JNew, journal_new);
2388 BUFFER_FNS(JPrepared, journal_prepared);
2389 TAS_BUFFER_FNS(JPrepared, journal_prepared);
2390 BUFFER_FNS(JRestore_dirty, journal_restore_dirty);
2391 TAS_BUFFER_FNS(JRestore_dirty, journal_restore_dirty);
2392 BUFFER_FNS(JTest, journal_test);
2393 TAS_BUFFER_FNS(JTest, journal_test);
2394 
2395 /*
2396 ** transaction handle which is passed around for all journal calls
2397 */
2398 struct reiserfs_transaction_handle {
2399 	struct super_block *t_super;	/* super for this FS when journal_begin was
2400 					   called. saves calls to reiserfs_get_super
2401 					   also used by nested transactions to make
2402 					   sure they are nesting on the right FS
2403 					   _must_ be first in the handle
2404 					 */
2405 	int t_refcount;
2406 	int t_blocks_logged;	/* number of blocks this writer has logged */
2407 	int t_blocks_allocated;	/* number of blocks this writer allocated */
2408 	unsigned int t_trans_id;	/* sanity check, equals the current trans id */
2409 	void *t_handle_save;	/* save existing current->journal_info */
2410 	unsigned displace_new_blocks:1;	/* if new block allocation occurres, that block
2411 					   should be displaced from others */
2412 	struct list_head t_list;
2413 };
2414 
2415 /* used to keep track of ordered and tail writes, attached to the buffer
2416  * head through b_journal_head.
2417  */
2418 struct reiserfs_jh {
2419 	struct reiserfs_journal_list *jl;
2420 	struct buffer_head *bh;
2421 	struct list_head list;
2422 };
2423 
2424 void reiserfs_free_jh(struct buffer_head *bh);
2425 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh);
2426 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh);
2427 int journal_mark_dirty(struct reiserfs_transaction_handle *,
2428 		       struct super_block *, struct buffer_head *bh);
2429 
2430 static inline int reiserfs_file_data_log(struct inode *inode)
2431 {
2432 	if (reiserfs_data_log(inode->i_sb) ||
2433 	    (REISERFS_I(inode)->i_flags & i_data_log))
2434 		return 1;
2435 	return 0;
2436 }
2437 
2438 static inline int reiserfs_transaction_running(struct super_block *s)
2439 {
2440 	struct reiserfs_transaction_handle *th = current->journal_info;
2441 	if (th && th->t_super == s)
2442 		return 1;
2443 	if (th && th->t_super == NULL)
2444 		BUG();
2445 	return 0;
2446 }
2447 
2448 static inline int reiserfs_transaction_free_space(struct reiserfs_transaction_handle *th)
2449 {
2450 	return th->t_blocks_allocated - th->t_blocks_logged;
2451 }
2452 
2453 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
2454 								    super_block
2455 								    *,
2456 								    int count);
2457 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
2458 void reiserfs_vfs_truncate_file(struct inode *inode);
2459 int reiserfs_commit_page(struct inode *inode, struct page *page,
2460 			 unsigned from, unsigned to);
2461 void reiserfs_flush_old_commits(struct super_block *);
2462 int reiserfs_commit_for_inode(struct inode *);
2463 int reiserfs_inode_needs_commit(struct inode *);
2464 void reiserfs_update_inode_transaction(struct inode *);
2465 void reiserfs_wait_on_write_block(struct super_block *s);
2466 void reiserfs_block_writes(struct reiserfs_transaction_handle *th);
2467 void reiserfs_allow_writes(struct super_block *s);
2468 void reiserfs_check_lock_depth(struct super_block *s, char *caller);
2469 int reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh,
2470 				 int wait);
2471 void reiserfs_restore_prepared_buffer(struct super_block *,
2472 				      struct buffer_head *bh);
2473 int journal_init(struct super_block *, const char *j_dev_name, int old_format,
2474 		 unsigned int);
2475 int journal_release(struct reiserfs_transaction_handle *, struct super_block *);
2476 int journal_release_error(struct reiserfs_transaction_handle *,
2477 			  struct super_block *);
2478 int journal_end(struct reiserfs_transaction_handle *, struct super_block *,
2479 		unsigned long);
2480 int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *,
2481 		     unsigned long);
2482 int journal_mark_freed(struct reiserfs_transaction_handle *,
2483 		       struct super_block *, b_blocknr_t blocknr);
2484 int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
2485 int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
2486 			 int bit_nr, int searchall, b_blocknr_t *next);
2487 int journal_begin(struct reiserfs_transaction_handle *,
2488 		  struct super_block *sb, unsigned long);
2489 int journal_join_abort(struct reiserfs_transaction_handle *,
2490 		       struct super_block *sb, unsigned long);
2491 void reiserfs_abort_journal(struct super_block *sb, int errno);
2492 void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
2493 int reiserfs_allocate_list_bitmaps(struct super_block *s,
2494 				   struct reiserfs_list_bitmap *, unsigned int);
2495 
2496 void reiserfs_schedule_old_flush(struct super_block *s);
2497 void add_save_link(struct reiserfs_transaction_handle *th,
2498 		   struct inode *inode, int truncate);
2499 int remove_save_link(struct inode *inode, int truncate);
2500 
2501 /* objectid.c */
2502 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th);
2503 void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
2504 			       __u32 objectid_to_release);
2505 int reiserfs_convert_objectid_map_v1(struct super_block *);
2506 
2507 /* stree.c */
2508 int B_IS_IN_TREE(const struct buffer_head *);
2509 extern void copy_item_head(struct item_head *to,
2510 			   const struct item_head *from);
2511 
2512 // first key is in cpu form, second - le
2513 extern int comp_short_keys(const struct reiserfs_key *le_key,
2514 			   const struct cpu_key *cpu_key);
2515 extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from);
2516 
2517 // both are in le form
2518 extern int comp_le_keys(const struct reiserfs_key *,
2519 			const struct reiserfs_key *);
2520 extern int comp_short_le_keys(const struct reiserfs_key *,
2521 			      const struct reiserfs_key *);
2522 
2523 //
2524 // get key version from on disk key - kludge
2525 //
2526 static inline int le_key_version(const struct reiserfs_key *key)
2527 {
2528 	int type;
2529 
2530 	type = offset_v2_k_type(&(key->u.k_offset_v2));
2531 	if (type != TYPE_DIRECT && type != TYPE_INDIRECT
2532 	    && type != TYPE_DIRENTRY)
2533 		return KEY_FORMAT_3_5;
2534 
2535 	return KEY_FORMAT_3_6;
2536 
2537 }
2538 
2539 static inline void copy_key(struct reiserfs_key *to,
2540 			    const struct reiserfs_key *from)
2541 {
2542 	memcpy(to, from, KEY_SIZE);
2543 }
2544 
2545 int comp_items(const struct item_head *stored_ih, const struct treepath *path);
2546 const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
2547 				    const struct super_block *sb);
2548 int search_by_key(struct super_block *, const struct cpu_key *,
2549 		  struct treepath *, int);
2550 #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
2551 int search_for_position_by_key(struct super_block *sb,
2552 			       const struct cpu_key *cpu_key,
2553 			       struct treepath *search_path);
2554 extern void decrement_bcount(struct buffer_head *bh);
2555 void decrement_counters_in_path(struct treepath *search_path);
2556 void pathrelse(struct treepath *search_path);
2557 int reiserfs_check_path(struct treepath *p);
2558 void pathrelse_and_restore(struct super_block *s, struct treepath *search_path);
2559 
2560 int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2561 			 struct treepath *path,
2562 			 const struct cpu_key *key,
2563 			 struct item_head *ih,
2564 			 struct inode *inode, const char *body);
2565 
2566 int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th,
2567 			     struct treepath *path,
2568 			     const struct cpu_key *key,
2569 			     struct inode *inode,
2570 			     const char *body, int paste_size);
2571 
2572 int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
2573 			   struct treepath *path,
2574 			   struct cpu_key *key,
2575 			   struct inode *inode,
2576 			   struct page *page, loff_t new_file_size);
2577 
2578 int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
2579 			 struct treepath *path,
2580 			 const struct cpu_key *key,
2581 			 struct inode *inode, struct buffer_head *un_bh);
2582 
2583 void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
2584 				struct inode *inode, struct reiserfs_key *key);
2585 int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
2586 			   struct inode *inode);
2587 int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
2588 			 struct inode *inode, struct page *,
2589 			 int update_timestamps);
2590 
2591 #define i_block_size(inode) ((inode)->i_sb->s_blocksize)
2592 #define file_size(inode) ((inode)->i_size)
2593 #define tail_size(inode) (file_size (inode) & (i_block_size (inode) - 1))
2594 
2595 #define tail_has_to_be_packed(inode) (have_large_tails ((inode)->i_sb)?\
2596 !STORE_TAIL_IN_UNFM_S1(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):have_small_tails ((inode)->i_sb)?!STORE_TAIL_IN_UNFM_S2(file_size (inode), tail_size(inode), inode->i_sb->s_blocksize):0 )
2597 
2598 void padd_item(char *item, int total_length, int length);
2599 
2600 /* inode.c */
2601 /* args for the create parameter of reiserfs_get_block */
2602 #define GET_BLOCK_NO_CREATE 0	/* don't create new blocks or convert tails */
2603 #define GET_BLOCK_CREATE 1	/* add anything you need to find block */
2604 #define GET_BLOCK_NO_HOLE 2	/* return -ENOENT for file holes */
2605 #define GET_BLOCK_READ_DIRECT 4	/* read the tail if indirect item not found */
2606 #define GET_BLOCK_NO_IMUX     8	/* i_mutex is not held, don't preallocate */
2607 #define GET_BLOCK_NO_DANGLE   16	/* don't leave any transactions running */
2608 
2609 void reiserfs_read_locked_inode(struct inode *inode,
2610 				struct reiserfs_iget_args *args);
2611 int reiserfs_find_actor(struct inode *inode, void *p);
2612 int reiserfs_init_locked_inode(struct inode *inode, void *p);
2613 void reiserfs_evict_inode(struct inode *inode);
2614 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2615 int reiserfs_get_block(struct inode *inode, sector_t block,
2616 		       struct buffer_head *bh_result, int create);
2617 struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2618 				     int fh_len, int fh_type);
2619 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
2620 				     int fh_len, int fh_type);
2621 int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
2622 		       struct inode *parent);
2623 
2624 int reiserfs_truncate_file(struct inode *, int update_timestamps);
2625 void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
2626 		  int type, int key_length);
2627 void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
2628 		       int version,
2629 		       loff_t offset, int type, int length, int entry_count);
2630 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
2631 
2632 struct reiserfs_security_handle;
2633 int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
2634 		       struct inode *dir, umode_t mode,
2635 		       const char *symname, loff_t i_size,
2636 		       struct dentry *dentry, struct inode *inode,
2637 		       struct reiserfs_security_handle *security);
2638 
2639 void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
2640 			     struct inode *inode, loff_t size);
2641 
2642 static inline void reiserfs_update_sd(struct reiserfs_transaction_handle *th,
2643 				      struct inode *inode)
2644 {
2645 	reiserfs_update_sd_size(th, inode, inode->i_size);
2646 }
2647 
2648 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
2649 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
2650 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
2651 
2652 int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
2653 
2654 /* namei.c */
2655 void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
2656 int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
2657 			struct treepath *path, struct reiserfs_dir_entry *de);
2658 struct dentry *reiserfs_get_parent(struct dentry *);
2659 
2660 #ifdef CONFIG_REISERFS_PROC_INFO
2661 int reiserfs_proc_info_init(struct super_block *sb);
2662 int reiserfs_proc_info_done(struct super_block *sb);
2663 int reiserfs_proc_info_global_init(void);
2664 int reiserfs_proc_info_global_done(void);
2665 
2666 #define PROC_EXP( e )   e
2667 
2668 #define __PINFO( sb ) REISERFS_SB(sb) -> s_proc_info_data
2669 #define PROC_INFO_MAX( sb, field, value )								\
2670     __PINFO( sb ).field =												\
2671         max( REISERFS_SB( sb ) -> s_proc_info_data.field, value )
2672 #define PROC_INFO_INC( sb, field ) ( ++ ( __PINFO( sb ).field ) )
2673 #define PROC_INFO_ADD( sb, field, val ) ( __PINFO( sb ).field += ( val ) )
2674 #define PROC_INFO_BH_STAT( sb, bh, level )							\
2675     PROC_INFO_INC( sb, sbk_read_at[ ( level ) ] );						\
2676     PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) );	\
2677     PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
2678 #else
2679 static inline int reiserfs_proc_info_init(struct super_block *sb)
2680 {
2681 	return 0;
2682 }
2683 
2684 static inline int reiserfs_proc_info_done(struct super_block *sb)
2685 {
2686 	return 0;
2687 }
2688 
2689 static inline int reiserfs_proc_info_global_init(void)
2690 {
2691 	return 0;
2692 }
2693 
2694 static inline int reiserfs_proc_info_global_done(void)
2695 {
2696 	return 0;
2697 }
2698 
2699 #define PROC_EXP( e )
2700 #define VOID_V ( ( void ) 0 )
2701 #define PROC_INFO_MAX( sb, field, value ) VOID_V
2702 #define PROC_INFO_INC( sb, field ) VOID_V
2703 #define PROC_INFO_ADD( sb, field, val ) VOID_V
2704 #define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V
2705 #endif
2706 
2707 /* dir.c */
2708 extern const struct inode_operations reiserfs_dir_inode_operations;
2709 extern const struct inode_operations reiserfs_symlink_inode_operations;
2710 extern const struct inode_operations reiserfs_special_inode_operations;
2711 extern const struct file_operations reiserfs_dir_operations;
2712 int reiserfs_readdir_inode(struct inode *, struct dir_context *);
2713 
2714 /* tail_conversion.c */
2715 int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
2716 		    struct treepath *, struct buffer_head *, loff_t);
2717 int indirect2direct(struct reiserfs_transaction_handle *, struct inode *,
2718 		    struct page *, struct treepath *, const struct cpu_key *,
2719 		    loff_t, char *);
2720 void reiserfs_unmap_buffer(struct buffer_head *);
2721 
2722 /* file.c */
2723 extern const struct inode_operations reiserfs_file_inode_operations;
2724 extern const struct file_operations reiserfs_file_operations;
2725 extern const struct address_space_operations reiserfs_address_space_operations;
2726 
2727 /* fix_nodes.c */
2728 
2729 int fix_nodes(int n_op_mode, struct tree_balance *tb,
2730 	      struct item_head *ins_ih, const void *);
2731 void unfix_nodes(struct tree_balance *);
2732 
2733 /* prints.c */
2734 void __reiserfs_panic(struct super_block *s, const char *id,
2735 		      const char *function, const char *fmt, ...)
2736     __attribute__ ((noreturn));
2737 #define reiserfs_panic(s, id, fmt, args...) \
2738 	__reiserfs_panic(s, id, __func__, fmt, ##args)
2739 void __reiserfs_error(struct super_block *s, const char *id,
2740 		      const char *function, const char *fmt, ...);
2741 #define reiserfs_error(s, id, fmt, args...) \
2742 	 __reiserfs_error(s, id, __func__, fmt, ##args)
2743 void reiserfs_info(struct super_block *s, const char *fmt, ...);
2744 void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
2745 void print_indirect_item(struct buffer_head *bh, int item_num);
2746 void store_print_tb(struct tree_balance *tb);
2747 void print_cur_tb(char *mes);
2748 void print_de(struct reiserfs_dir_entry *de);
2749 void print_bi(struct buffer_info *bi, char *mes);
2750 #define PRINT_LEAF_ITEMS 1	/* print all items */
2751 #define PRINT_DIRECTORY_ITEMS 2	/* print directory items */
2752 #define PRINT_DIRECT_ITEMS 4	/* print contents of direct items */
2753 void print_block(struct buffer_head *bh, ...);
2754 void print_bmap(struct super_block *s, int silent);
2755 void print_bmap_block(int i, char *data, int size, int silent);
2756 /*void print_super_block (struct super_block * s, char * mes);*/
2757 void print_objectid_map(struct super_block *s);
2758 void print_block_head(struct buffer_head *bh, char *mes);
2759 void check_leaf(struct buffer_head *bh);
2760 void check_internal(struct buffer_head *bh);
2761 void print_statistics(struct super_block *s);
2762 char *reiserfs_hashname(int code);
2763 
2764 /* lbalance.c */
2765 int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
2766 		    int mov_bytes, struct buffer_head *Snew);
2767 int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes);
2768 int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
2769 void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
2770 		       int del_num, int del_bytes);
2771 void leaf_insert_into_buf(struct buffer_info *bi, int before,
2772 			  struct item_head *inserted_item_ih,
2773 			  const char *inserted_item_body, int zeros_number);
2774 void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
2775 			  int pos_in_item, int paste_size, const char *body,
2776 			  int zeros_number);
2777 void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
2778 			  int pos_in_item, int cut_size);
2779 void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
2780 			int new_entry_count, struct reiserfs_de_head *new_dehs,
2781 			const char *records, int paste_size);
2782 /* ibalance.c */
2783 int balance_internal(struct tree_balance *, int, int, struct item_head *,
2784 		     struct buffer_head **);
2785 
2786 /* do_balance.c */
2787 void do_balance_mark_leaf_dirty(struct tree_balance *tb,
2788 				struct buffer_head *bh, int flag);
2789 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
2790 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
2791 
2792 void do_balance(struct tree_balance *tb, struct item_head *ih,
2793 		const char *body, int flag);
2794 void reiserfs_invalidate_buffer(struct tree_balance *tb,
2795 				struct buffer_head *bh);
2796 
2797 int get_left_neighbor_position(struct tree_balance *tb, int h);
2798 int get_right_neighbor_position(struct tree_balance *tb, int h);
2799 void replace_key(struct tree_balance *tb, struct buffer_head *, int,
2800 		 struct buffer_head *, int);
2801 void make_empty_node(struct buffer_info *);
2802 struct buffer_head *get_FEB(struct tree_balance *);
2803 
2804 /* bitmap.c */
2805 
2806 /* structure contains hints for block allocator, and it is a container for
2807  * arguments, such as node, search path, transaction_handle, etc. */
2808 struct __reiserfs_blocknr_hint {
2809 	struct inode *inode;	/* inode passed to allocator, if we allocate unf. nodes */
2810 	sector_t block;		/* file offset, in blocks */
2811 	struct in_core_key key;
2812 	struct treepath *path;	/* search path, used by allocator to deternine search_start by
2813 				 * various ways */
2814 	struct reiserfs_transaction_handle *th;	/* transaction handle is needed to log super blocks and
2815 						 * bitmap blocks changes  */
2816 	b_blocknr_t beg, end;
2817 	b_blocknr_t search_start;	/* a field used to transfer search start value (block number)
2818 					 * between different block allocator procedures
2819 					 * (determine_search_start() and others) */
2820 	int prealloc_size;	/* is set in determine_prealloc_size() function, used by underlayed
2821 				 * function that do actual allocation */
2822 
2823 	unsigned formatted_node:1;	/* the allocator uses different polices for getting disk space for
2824 					 * formatted/unformatted blocks with/without preallocation */
2825 	unsigned preallocate:1;
2826 };
2827 
2828 typedef struct __reiserfs_blocknr_hint reiserfs_blocknr_hint_t;
2829 
2830 int reiserfs_parse_alloc_options(struct super_block *, char *);
2831 void reiserfs_init_alloc_options(struct super_block *s);
2832 
2833 /*
2834  * given a directory, this will tell you what packing locality
2835  * to use for a new object underneat it.  The locality is returned
2836  * in disk byte order (le).
2837  */
2838 __le32 reiserfs_choose_packing(struct inode *dir);
2839 
2840 int reiserfs_init_bitmap_cache(struct super_block *sb);
2841 void reiserfs_free_bitmap_cache(struct super_block *sb);
2842 void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info);
2843 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, unsigned int bitmap);
2844 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
2845 void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *,
2846 			 b_blocknr_t, int for_unformatted);
2847 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int,
2848 			       int);
2849 static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
2850 					     b_blocknr_t * new_blocknrs,
2851 					     int amount_needed)
2852 {
2853 	reiserfs_blocknr_hint_t hint = {
2854 		.th = tb->transaction_handle,
2855 		.path = tb->tb_path,
2856 		.inode = NULL,
2857 		.key = tb->key,
2858 		.block = 0,
2859 		.formatted_node = 1
2860 	};
2861 	return reiserfs_allocate_blocknrs(&hint, new_blocknrs, amount_needed,
2862 					  0);
2863 }
2864 
2865 static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
2866 					    *th, struct inode *inode,
2867 					    b_blocknr_t * new_blocknrs,
2868 					    struct treepath *path,
2869 					    sector_t block)
2870 {
2871 	reiserfs_blocknr_hint_t hint = {
2872 		.th = th,
2873 		.path = path,
2874 		.inode = inode,
2875 		.block = block,
2876 		.formatted_node = 0,
2877 		.preallocate = 0
2878 	};
2879 	return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0);
2880 }
2881 
2882 #ifdef REISERFS_PREALLOCATE
2883 static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
2884 					     *th, struct inode *inode,
2885 					     b_blocknr_t * new_blocknrs,
2886 					     struct treepath *path,
2887 					     sector_t block)
2888 {
2889 	reiserfs_blocknr_hint_t hint = {
2890 		.th = th,
2891 		.path = path,
2892 		.inode = inode,
2893 		.block = block,
2894 		.formatted_node = 0,
2895 		.preallocate = 1
2896 	};
2897 	return reiserfs_allocate_blocknrs(&hint, new_blocknrs, 1, 0);
2898 }
2899 
2900 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
2901 			       struct inode *inode);
2902 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th);
2903 #endif
2904 
2905 /* hashes.c */
2906 __u32 keyed_hash(const signed char *msg, int len);
2907 __u32 yura_hash(const signed char *msg, int len);
2908 __u32 r5_hash(const signed char *msg, int len);
2909 
2910 #define reiserfs_set_le_bit		__set_bit_le
2911 #define reiserfs_test_and_set_le_bit	__test_and_set_bit_le
2912 #define reiserfs_clear_le_bit		__clear_bit_le
2913 #define reiserfs_test_and_clear_le_bit	__test_and_clear_bit_le
2914 #define reiserfs_test_le_bit		test_bit_le
2915 #define reiserfs_find_next_zero_le_bit	find_next_zero_bit_le
2916 
2917 /* sometimes reiserfs_truncate may require to allocate few new blocks
2918    to perform indirect2direct conversion. People probably used to
2919    think, that truncate should work without problems on a filesystem
2920    without free disk space. They may complain that they can not
2921    truncate due to lack of free disk space. This spare space allows us
2922    to not worry about it. 500 is probably too much, but it should be
2923    absolutely safe */
2924 #define SPARE_SPACE 500
2925 
2926 /* prototypes from ioctl.c */
2927 long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
2928 long reiserfs_compat_ioctl(struct file *filp,
2929 		   unsigned int cmd, unsigned long arg);
2930 int reiserfs_unpack(struct inode *inode, struct file *filp);
2931