xref: /openbmc/linux/fs/reiserfs/objectid.c (revision 1da177e4)
1 /*
2  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3  */
4 
5 #include <linux/config.h>
6 #include <linux/string.h>
7 #include <linux/random.h>
8 #include <linux/time.h>
9 #include <linux/reiserfs_fs.h>
10 #include <linux/reiserfs_fs_sb.h>
11 
12 // find where objectid map starts
13 #define objectid_map(s,rs) (old_format_only (s) ? \
14                          (__u32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
15 			 (__u32 *)((rs) + 1))
16 
17 
18 #ifdef CONFIG_REISERFS_CHECK
19 
20 static void check_objectid_map (struct super_block * s, __u32 * map)
21 {
22     if (le32_to_cpu (map[0]) != 1)
23 	reiserfs_panic (s, "vs-15010: check_objectid_map: map corrupted: %lx",
24 			( long unsigned int ) le32_to_cpu (map[0]));
25 
26     // FIXME: add something else here
27 }
28 
29 #else
30 static void check_objectid_map (struct super_block * s, __u32 * map)
31 {;}
32 #endif
33 
34 
35 /* When we allocate objectids we allocate the first unused objectid.
36    Each sequence of objectids in use (the odd sequences) is followed
37    by a sequence of objectids not in use (the even sequences).  We
38    only need to record the last objectid in each of these sequences
39    (both the odd and even sequences) in order to fully define the
40    boundaries of the sequences.  A consequence of allocating the first
41    objectid not in use is that under most conditions this scheme is
42    extremely compact.  The exception is immediately after a sequence
43    of operations which deletes a large number of objects of
44    non-sequential objectids, and even then it will become compact
45    again as soon as more objects are created.  Note that many
46    interesting optimizations of layout could result from complicating
47    objectid assignment, but we have deferred making them for now. */
48 
49 
50 /* get unique object identifier */
51 __u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th)
52 {
53     struct super_block * s = th->t_super;
54     struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
55     __u32 * map = objectid_map (s, rs);
56     __u32 unused_objectid;
57 
58     BUG_ON (!th->t_trans_id);
59 
60     check_objectid_map (s, map);
61 
62     reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
63                                 /* comment needed -Hans */
64     unused_objectid = le32_to_cpu (map[1]);
65     if (unused_objectid == U32_MAX) {
66 	reiserfs_warning (s, "%s: no more object ids", __FUNCTION__);
67 	reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s)) ;
68 	return 0;
69     }
70 
71     /* This incrementation allocates the first unused objectid. That
72        is to say, the first entry on the objectid map is the first
73        unused objectid, and by incrementing it we use it.  See below
74        where we check to see if we eliminated a sequence of unused
75        objectids.... */
76     map[1] = cpu_to_le32 (unused_objectid + 1);
77 
78     /* Now we check to see if we eliminated the last remaining member of
79        the first even sequence (and can eliminate the sequence by
80        eliminating its last objectid from oids), and can collapse the
81        first two odd sequences into one sequence.  If so, then the net
82        result is to eliminate a pair of objectids from oids.  We do this
83        by shifting the entire map to the left. */
84     if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
85 	memmove (map + 1, map + 3, (sb_oid_cursize(rs) - 3) * sizeof(__u32));
86         set_sb_oid_cursize( rs, sb_oid_cursize(rs) - 2 );
87     }
88 
89     journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s));
90     return unused_objectid;
91 }
92 
93 
94 /* makes object identifier unused */
95 void reiserfs_release_objectid (struct reiserfs_transaction_handle *th,
96 				__u32 objectid_to_release)
97 {
98     struct super_block * s = th->t_super;
99     struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
100     __u32 * map = objectid_map (s, rs);
101     int i = 0;
102 
103     BUG_ON (!th->t_trans_id);
104     //return;
105     check_objectid_map (s, map);
106 
107     reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
108     journal_mark_dirty(th, s, SB_BUFFER_WITH_SB (s));
109 
110     /* start at the beginning of the objectid map (i = 0) and go to
111        the end of it (i = disk_sb->s_oid_cursize).  Linear search is
112        what we use, though it is possible that binary search would be
113        more efficient after performing lots of deletions (which is
114        when oids is large.)  We only check even i's. */
115     while (i < sb_oid_cursize(rs)) {
116 	if (objectid_to_release == le32_to_cpu (map[i])) {
117 	    /* This incrementation unallocates the objectid. */
118 	    //map[i]++;
119 	    map[i] = cpu_to_le32 (le32_to_cpu (map[i]) + 1);
120 
121 	    /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
122 	    if (map[i] == map[i+1]) {
123 		/* shrink objectid map */
124 		memmove (map + i, map + i + 2,
125 			 (sb_oid_cursize(rs) - i - 2) * sizeof (__u32));
126 		//disk_sb->s_oid_cursize -= 2;
127                 set_sb_oid_cursize( rs, sb_oid_cursize(rs) - 2 );
128 
129 		RFALSE( sb_oid_cursize(rs) < 2 ||
130 		        sb_oid_cursize(rs) > sb_oid_maxsize(rs),
131 		        "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
132                         sb_oid_cursize(rs), sb_oid_maxsize(rs));
133 	    }
134 	    return;
135 	}
136 
137 	if (objectid_to_release > le32_to_cpu (map[i]) &&
138 	    objectid_to_release < le32_to_cpu (map[i + 1])) {
139 	    /* size of objectid map is not changed */
140 	    if (objectid_to_release + 1 == le32_to_cpu (map[i + 1])) {
141 		//objectid_map[i+1]--;
142 		map[i + 1] = cpu_to_le32 (le32_to_cpu (map[i + 1]) - 1);
143 		return;
144 	    }
145 
146             /* JDM comparing two little-endian values for equality -- safe */
147 	if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
148 		/* objectid map must be expanded, but there is no space */
149 		PROC_INFO_INC( s, leaked_oid );
150 		return;
151 	}
152 
153 	    /* expand the objectid map*/
154 	    memmove (map + i + 3, map + i + 1,
155 		     (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
156 	    map[i + 1] = cpu_to_le32 (objectid_to_release);
157 	    map[i + 2] = cpu_to_le32 (objectid_to_release + 1);
158             set_sb_oid_cursize( rs, sb_oid_cursize(rs) + 2 );
159 	    return;
160 	}
161 	i += 2;
162     }
163 
164     reiserfs_warning (s, "vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)",
165 		      ( long unsigned ) objectid_to_release);
166 }
167 
168 
169 int reiserfs_convert_objectid_map_v1(struct super_block *s) {
170     struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK (s);
171     int cur_size = sb_oid_cursize(disk_sb);
172     int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2 ;
173     int old_max = sb_oid_maxsize(disk_sb);
174     struct reiserfs_super_block_v1 *disk_sb_v1 ;
175     __u32 *objectid_map, *new_objectid_map ;
176     int i ;
177 
178     disk_sb_v1=(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
179     objectid_map = (__u32 *)(disk_sb_v1 + 1) ;
180     new_objectid_map = (__u32 *)(disk_sb + 1) ;
181 
182     if (cur_size > new_size) {
183 	/* mark everyone used that was listed as free at the end of the objectid
184 	** map
185 	*/
186 	objectid_map[new_size - 1] = objectid_map[cur_size - 1] ;
187 	set_sb_oid_cursize(disk_sb,new_size) ;
188     }
189     /* move the smaller objectid map past the end of the new super */
190     for (i = new_size - 1 ; i >= 0 ; i--) {
191         objectid_map[i + (old_max - new_size)] = objectid_map[i] ;
192     }
193 
194 
195     /* set the max size so we don't overflow later */
196     set_sb_oid_maxsize(disk_sb,new_size) ;
197 
198     /* Zero out label and generate random UUID */
199     memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label)) ;
200     generate_random_uuid(disk_sb->s_uuid);
201 
202     /* finally, zero out the unused chunk of the new super */
203     memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused)) ;
204     return 0 ;
205 }
206 
207