xref: /openbmc/linux/fs/reiserfs/objectid.c (revision 7fe2f639)
1 /*
2  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3  */
4 
5 #include <linux/string.h>
6 #include <linux/random.h>
7 #include <linux/time.h>
8 #include <linux/reiserfs_fs.h>
9 #include <linux/reiserfs_fs_sb.h>
10 
11 // find where objectid map starts
12 #define objectid_map(s,rs) (old_format_only (s) ? \
13                          (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
14 			 (__le32 *)((rs) + 1))
15 
16 #ifdef CONFIG_REISERFS_CHECK
17 
18 static void check_objectid_map(struct super_block *s, __le32 * map)
19 {
20 	if (le32_to_cpu(map[0]) != 1)
21 		reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
22 			       (long unsigned int)le32_to_cpu(map[0]));
23 
24 	// FIXME: add something else here
25 }
26 
27 #else
28 static void check_objectid_map(struct super_block *s, __le32 * map)
29 {;
30 }
31 #endif
32 
33 /* When we allocate objectids we allocate the first unused objectid.
34    Each sequence of objectids in use (the odd sequences) is followed
35    by a sequence of objectids not in use (the even sequences).  We
36    only need to record the last objectid in each of these sequences
37    (both the odd and even sequences) in order to fully define the
38    boundaries of the sequences.  A consequence of allocating the first
39    objectid not in use is that under most conditions this scheme is
40    extremely compact.  The exception is immediately after a sequence
41    of operations which deletes a large number of objects of
42    non-sequential objectids, and even then it will become compact
43    again as soon as more objects are created.  Note that many
44    interesting optimizations of layout could result from complicating
45    objectid assignment, but we have deferred making them for now. */
46 
47 /* get unique object identifier */
48 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
49 {
50 	struct super_block *s = th->t_super;
51 	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
52 	__le32 *map = objectid_map(s, rs);
53 	__u32 unused_objectid;
54 
55 	BUG_ON(!th->t_trans_id);
56 
57 	check_objectid_map(s, map);
58 
59 	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
60 	/* comment needed -Hans */
61 	unused_objectid = le32_to_cpu(map[1]);
62 	if (unused_objectid == U32_MAX) {
63 		reiserfs_warning(s, "reiserfs-15100", "no more object ids");
64 		reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
65 		return 0;
66 	}
67 
68 	/* This incrementation allocates the first unused objectid. That
69 	   is to say, the first entry on the objectid map is the first
70 	   unused objectid, and by incrementing it we use it.  See below
71 	   where we check to see if we eliminated a sequence of unused
72 	   objectids.... */
73 	map[1] = cpu_to_le32(unused_objectid + 1);
74 
75 	/* Now we check to see if we eliminated the last remaining member of
76 	   the first even sequence (and can eliminate the sequence by
77 	   eliminating its last objectid from oids), and can collapse the
78 	   first two odd sequences into one sequence.  If so, then the net
79 	   result is to eliminate a pair of objectids from oids.  We do this
80 	   by shifting the entire map to the left. */
81 	if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
82 		memmove(map + 1, map + 3,
83 			(sb_oid_cursize(rs) - 3) * sizeof(__u32));
84 		set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
85 	}
86 
87 	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
88 	return unused_objectid;
89 }
90 
91 /* makes object identifier unused */
92 void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
93 			       __u32 objectid_to_release)
94 {
95 	struct super_block *s = th->t_super;
96 	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
97 	__le32 *map = objectid_map(s, rs);
98 	int i = 0;
99 
100 	BUG_ON(!th->t_trans_id);
101 	//return;
102 	check_objectid_map(s, map);
103 
104 	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
105 	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
106 
107 	/* start at the beginning of the objectid map (i = 0) and go to
108 	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
109 	   what we use, though it is possible that binary search would be
110 	   more efficient after performing lots of deletions (which is
111 	   when oids is large.)  We only check even i's. */
112 	while (i < sb_oid_cursize(rs)) {
113 		if (objectid_to_release == le32_to_cpu(map[i])) {
114 			/* This incrementation unallocates the objectid. */
115 			//map[i]++;
116 			le32_add_cpu(&map[i], 1);
117 
118 			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
119 			if (map[i] == map[i + 1]) {
120 				/* shrink objectid map */
121 				memmove(map + i, map + i + 2,
122 					(sb_oid_cursize(rs) - i -
123 					 2) * sizeof(__u32));
124 				//disk_sb->s_oid_cursize -= 2;
125 				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
126 
127 				RFALSE(sb_oid_cursize(rs) < 2 ||
128 				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
129 				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
130 				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
131 			}
132 			return;
133 		}
134 
135 		if (objectid_to_release > le32_to_cpu(map[i]) &&
136 		    objectid_to_release < le32_to_cpu(map[i + 1])) {
137 			/* size of objectid map is not changed */
138 			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
139 				//objectid_map[i+1]--;
140 				le32_add_cpu(&map[i + 1], -1);
141 				return;
142 			}
143 
144 			/* JDM comparing two little-endian values for equality -- safe */
145 			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
146 				/* objectid map must be expanded, but there is no space */
147 				PROC_INFO_INC(s, leaked_oid);
148 				return;
149 			}
150 
151 			/* expand the objectid map */
152 			memmove(map + i + 3, map + i + 1,
153 				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
154 			map[i + 1] = cpu_to_le32(objectid_to_release);
155 			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
156 			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
157 			return;
158 		}
159 		i += 2;
160 	}
161 
162 	reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
163 		       (long unsigned)objectid_to_release);
164 }
165 
166 int reiserfs_convert_objectid_map_v1(struct super_block *s)
167 {
168 	struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
169 	int cur_size = sb_oid_cursize(disk_sb);
170 	int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
171 	int old_max = sb_oid_maxsize(disk_sb);
172 	struct reiserfs_super_block_v1 *disk_sb_v1;
173 	__le32 *objectid_map, *new_objectid_map;
174 	int i;
175 
176 	disk_sb_v1 =
177 	    (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
178 	objectid_map = (__le32 *) (disk_sb_v1 + 1);
179 	new_objectid_map = (__le32 *) (disk_sb + 1);
180 
181 	if (cur_size > new_size) {
182 		/* mark everyone used that was listed as free at the end of the objectid
183 		 ** map
184 		 */
185 		objectid_map[new_size - 1] = objectid_map[cur_size - 1];
186 		set_sb_oid_cursize(disk_sb, new_size);
187 	}
188 	/* move the smaller objectid map past the end of the new super */
189 	for (i = new_size - 1; i >= 0; i--) {
190 		objectid_map[i + (old_max - new_size)] = objectid_map[i];
191 	}
192 
193 	/* set the max size so we don't overflow later */
194 	set_sb_oid_maxsize(disk_sb, new_size);
195 
196 	/* Zero out label and generate random UUID */
197 	memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
198 	generate_random_uuid(disk_sb->s_uuid);
199 
200 	/* finally, zero out the unused chunk of the new super */
201 	memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
202 	return 0;
203 }
204