xref: /openbmc/linux/fs/reiserfs/objectid.c (revision 8e9356c6)
1 /*
2  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3  */
4 
5 #include <linux/string.h>
6 #include <linux/random.h>
7 #include <linux/time.h>
8 #include "reiserfs.h"
9 
10 // find where objectid map starts
11 #define objectid_map(s,rs) (old_format_only (s) ? \
12                          (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
13 			 (__le32 *)((rs) + 1))
14 
15 #ifdef CONFIG_REISERFS_CHECK
16 
17 static void check_objectid_map(struct super_block *s, __le32 * map)
18 {
19 	if (le32_to_cpu(map[0]) != 1)
20 		reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
21 			       (long unsigned int)le32_to_cpu(map[0]));
22 
23 	// FIXME: add something else here
24 }
25 
26 #else
27 static void check_objectid_map(struct super_block *s, __le32 * map)
28 {;
29 }
30 #endif
31 
32 /* When we allocate objectids we allocate the first unused objectid.
33    Each sequence of objectids in use (the odd sequences) is followed
34    by a sequence of objectids not in use (the even sequences).  We
35    only need to record the last objectid in each of these sequences
36    (both the odd and even sequences) in order to fully define the
37    boundaries of the sequences.  A consequence of allocating the first
38    objectid not in use is that under most conditions this scheme is
39    extremely compact.  The exception is immediately after a sequence
40    of operations which deletes a large number of objects of
41    non-sequential objectids, and even then it will become compact
42    again as soon as more objects are created.  Note that many
43    interesting optimizations of layout could result from complicating
44    objectid assignment, but we have deferred making them for now. */
45 
46 /* get unique object identifier */
47 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
48 {
49 	struct super_block *s = th->t_super;
50 	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
51 	__le32 *map = objectid_map(s, rs);
52 	__u32 unused_objectid;
53 
54 	BUG_ON(!th->t_trans_id);
55 
56 	check_objectid_map(s, map);
57 
58 	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
59 	/* comment needed -Hans */
60 	unused_objectid = le32_to_cpu(map[1]);
61 	if (unused_objectid == U32_MAX) {
62 		reiserfs_warning(s, "reiserfs-15100", "no more object ids");
63 		reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
64 		return 0;
65 	}
66 
67 	/* This incrementation allocates the first unused objectid. That
68 	   is to say, the first entry on the objectid map is the first
69 	   unused objectid, and by incrementing it we use it.  See below
70 	   where we check to see if we eliminated a sequence of unused
71 	   objectids.... */
72 	map[1] = cpu_to_le32(unused_objectid + 1);
73 
74 	/* Now we check to see if we eliminated the last remaining member of
75 	   the first even sequence (and can eliminate the sequence by
76 	   eliminating its last objectid from oids), and can collapse the
77 	   first two odd sequences into one sequence.  If so, then the net
78 	   result is to eliminate a pair of objectids from oids.  We do this
79 	   by shifting the entire map to the left. */
80 	if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
81 		memmove(map + 1, map + 3,
82 			(sb_oid_cursize(rs) - 3) * sizeof(__u32));
83 		set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
84 	}
85 
86 	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
87 	return unused_objectid;
88 }
89 
90 /* makes object identifier unused */
91 void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
92 			       __u32 objectid_to_release)
93 {
94 	struct super_block *s = th->t_super;
95 	struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
96 	__le32 *map = objectid_map(s, rs);
97 	int i = 0;
98 
99 	BUG_ON(!th->t_trans_id);
100 	//return;
101 	check_objectid_map(s, map);
102 
103 	reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
104 	journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
105 
106 	/* start at the beginning of the objectid map (i = 0) and go to
107 	   the end of it (i = disk_sb->s_oid_cursize).  Linear search is
108 	   what we use, though it is possible that binary search would be
109 	   more efficient after performing lots of deletions (which is
110 	   when oids is large.)  We only check even i's. */
111 	while (i < sb_oid_cursize(rs)) {
112 		if (objectid_to_release == le32_to_cpu(map[i])) {
113 			/* This incrementation unallocates the objectid. */
114 			//map[i]++;
115 			le32_add_cpu(&map[i], 1);
116 
117 			/* Did we unallocate the last member of an odd sequence, and can shrink oids? */
118 			if (map[i] == map[i + 1]) {
119 				/* shrink objectid map */
120 				memmove(map + i, map + i + 2,
121 					(sb_oid_cursize(rs) - i -
122 					 2) * sizeof(__u32));
123 				//disk_sb->s_oid_cursize -= 2;
124 				set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
125 
126 				RFALSE(sb_oid_cursize(rs) < 2 ||
127 				       sb_oid_cursize(rs) > sb_oid_maxsize(rs),
128 				       "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
129 				       sb_oid_cursize(rs), sb_oid_maxsize(rs));
130 			}
131 			return;
132 		}
133 
134 		if (objectid_to_release > le32_to_cpu(map[i]) &&
135 		    objectid_to_release < le32_to_cpu(map[i + 1])) {
136 			/* size of objectid map is not changed */
137 			if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
138 				//objectid_map[i+1]--;
139 				le32_add_cpu(&map[i + 1], -1);
140 				return;
141 			}
142 
143 			/* JDM comparing two little-endian values for equality -- safe */
144 			if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
145 				/* objectid map must be expanded, but there is no space */
146 				PROC_INFO_INC(s, leaked_oid);
147 				return;
148 			}
149 
150 			/* expand the objectid map */
151 			memmove(map + i + 3, map + i + 1,
152 				(sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
153 			map[i + 1] = cpu_to_le32(objectid_to_release);
154 			map[i + 2] = cpu_to_le32(objectid_to_release + 1);
155 			set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
156 			return;
157 		}
158 		i += 2;
159 	}
160 
161 	reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
162 		       (long unsigned)objectid_to_release);
163 }
164 
165 int reiserfs_convert_objectid_map_v1(struct super_block *s)
166 {
167 	struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
168 	int cur_size = sb_oid_cursize(disk_sb);
169 	int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
170 	int old_max = sb_oid_maxsize(disk_sb);
171 	struct reiserfs_super_block_v1 *disk_sb_v1;
172 	__le32 *objectid_map, *new_objectid_map;
173 	int i;
174 
175 	disk_sb_v1 =
176 	    (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
177 	objectid_map = (__le32 *) (disk_sb_v1 + 1);
178 	new_objectid_map = (__le32 *) (disk_sb + 1);
179 
180 	if (cur_size > new_size) {
181 		/* mark everyone used that was listed as free at the end of the objectid
182 		 ** map
183 		 */
184 		objectid_map[new_size - 1] = objectid_map[cur_size - 1];
185 		set_sb_oid_cursize(disk_sb, new_size);
186 	}
187 	/* move the smaller objectid map past the end of the new super */
188 	for (i = new_size - 1; i >= 0; i--) {
189 		objectid_map[i + (old_max - new_size)] = objectid_map[i];
190 	}
191 
192 	/* set the max size so we don't overflow later */
193 	set_sb_oid_maxsize(disk_sb, new_size);
194 
195 	/* Zero out label and generate random UUID */
196 	memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
197 	generate_random_uuid(disk_sb->s_uuid);
198 
199 	/* finally, zero out the unused chunk of the new super */
200 	memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
201 	return 0;
202 }
203