xref: /openbmc/linux/fs/minix/itree_common.c (revision bc5aa3a0)
1 /* Generic part */
2 
3 typedef struct {
4 	block_t	*p;
5 	block_t	key;
6 	struct buffer_head *bh;
7 } Indirect;
8 
9 static DEFINE_RWLOCK(pointers_lock);
10 
11 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
12 {
13 	p->key = *(p->p = v);
14 	p->bh = bh;
15 }
16 
17 static inline int verify_chain(Indirect *from, Indirect *to)
18 {
19 	while (from <= to && from->key == *from->p)
20 		from++;
21 	return (from > to);
22 }
23 
24 static inline block_t *block_end(struct buffer_head *bh)
25 {
26 	return (block_t *)((char*)bh->b_data + bh->b_size);
27 }
28 
29 static inline Indirect *get_branch(struct inode *inode,
30 					int depth,
31 					int *offsets,
32 					Indirect chain[DEPTH],
33 					int *err)
34 {
35 	struct super_block *sb = inode->i_sb;
36 	Indirect *p = chain;
37 	struct buffer_head *bh;
38 
39 	*err = 0;
40 	/* i_data is not going away, no lock needed */
41 	add_chain (chain, NULL, i_data(inode) + *offsets);
42 	if (!p->key)
43 		goto no_block;
44 	while (--depth) {
45 		bh = sb_bread(sb, block_to_cpu(p->key));
46 		if (!bh)
47 			goto failure;
48 		read_lock(&pointers_lock);
49 		if (!verify_chain(chain, p))
50 			goto changed;
51 		add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
52 		read_unlock(&pointers_lock);
53 		if (!p->key)
54 			goto no_block;
55 	}
56 	return NULL;
57 
58 changed:
59 	read_unlock(&pointers_lock);
60 	brelse(bh);
61 	*err = -EAGAIN;
62 	goto no_block;
63 failure:
64 	*err = -EIO;
65 no_block:
66 	return p;
67 }
68 
69 static int alloc_branch(struct inode *inode,
70 			     int num,
71 			     int *offsets,
72 			     Indirect *branch)
73 {
74 	int n = 0;
75 	int i;
76 	int parent = minix_new_block(inode);
77 
78 	branch[0].key = cpu_to_block(parent);
79 	if (parent) for (n = 1; n < num; n++) {
80 		struct buffer_head *bh;
81 		/* Allocate the next block */
82 		int nr = minix_new_block(inode);
83 		if (!nr)
84 			break;
85 		branch[n].key = cpu_to_block(nr);
86 		bh = sb_getblk(inode->i_sb, parent);
87 		lock_buffer(bh);
88 		memset(bh->b_data, 0, bh->b_size);
89 		branch[n].bh = bh;
90 		branch[n].p = (block_t*) bh->b_data + offsets[n];
91 		*branch[n].p = branch[n].key;
92 		set_buffer_uptodate(bh);
93 		unlock_buffer(bh);
94 		mark_buffer_dirty_inode(bh, inode);
95 		parent = nr;
96 	}
97 	if (n == num)
98 		return 0;
99 
100 	/* Allocation failed, free what we already allocated */
101 	for (i = 1; i < n; i++)
102 		bforget(branch[i].bh);
103 	for (i = 0; i < n; i++)
104 		minix_free_block(inode, block_to_cpu(branch[i].key));
105 	return -ENOSPC;
106 }
107 
108 static inline int splice_branch(struct inode *inode,
109 				     Indirect chain[DEPTH],
110 				     Indirect *where,
111 				     int num)
112 {
113 	int i;
114 
115 	write_lock(&pointers_lock);
116 
117 	/* Verify that place we are splicing to is still there and vacant */
118 	if (!verify_chain(chain, where-1) || *where->p)
119 		goto changed;
120 
121 	*where->p = where->key;
122 
123 	write_unlock(&pointers_lock);
124 
125 	/* We are done with atomic stuff, now do the rest of housekeeping */
126 
127 	inode->i_ctime = CURRENT_TIME_SEC;
128 
129 	/* had we spliced it onto indirect block? */
130 	if (where->bh)
131 		mark_buffer_dirty_inode(where->bh, inode);
132 
133 	mark_inode_dirty(inode);
134 	return 0;
135 
136 changed:
137 	write_unlock(&pointers_lock);
138 	for (i = 1; i < num; i++)
139 		bforget(where[i].bh);
140 	for (i = 0; i < num; i++)
141 		minix_free_block(inode, block_to_cpu(where[i].key));
142 	return -EAGAIN;
143 }
144 
145 static inline int get_block(struct inode * inode, sector_t block,
146 			struct buffer_head *bh, int create)
147 {
148 	int err = -EIO;
149 	int offsets[DEPTH];
150 	Indirect chain[DEPTH];
151 	Indirect *partial;
152 	int left;
153 	int depth = block_to_path(inode, block, offsets);
154 
155 	if (depth == 0)
156 		goto out;
157 
158 reread:
159 	partial = get_branch(inode, depth, offsets, chain, &err);
160 
161 	/* Simplest case - block found, no allocation needed */
162 	if (!partial) {
163 got_it:
164 		map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
165 		/* Clean up and exit */
166 		partial = chain+depth-1; /* the whole chain */
167 		goto cleanup;
168 	}
169 
170 	/* Next simple case - plain lookup or failed read of indirect block */
171 	if (!create || err == -EIO) {
172 cleanup:
173 		while (partial > chain) {
174 			brelse(partial->bh);
175 			partial--;
176 		}
177 out:
178 		return err;
179 	}
180 
181 	/*
182 	 * Indirect block might be removed by truncate while we were
183 	 * reading it. Handling of that case (forget what we've got and
184 	 * reread) is taken out of the main path.
185 	 */
186 	if (err == -EAGAIN)
187 		goto changed;
188 
189 	left = (chain + depth) - partial;
190 	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
191 	if (err)
192 		goto cleanup;
193 
194 	if (splice_branch(inode, chain, partial, left) < 0)
195 		goto changed;
196 
197 	set_buffer_new(bh);
198 	goto got_it;
199 
200 changed:
201 	while (partial > chain) {
202 		brelse(partial->bh);
203 		partial--;
204 	}
205 	goto reread;
206 }
207 
208 static inline int all_zeroes(block_t *p, block_t *q)
209 {
210 	while (p < q)
211 		if (*p++)
212 			return 0;
213 	return 1;
214 }
215 
216 static Indirect *find_shared(struct inode *inode,
217 				int depth,
218 				int offsets[DEPTH],
219 				Indirect chain[DEPTH],
220 				block_t *top)
221 {
222 	Indirect *partial, *p;
223 	int k, err;
224 
225 	*top = 0;
226 	for (k = depth; k > 1 && !offsets[k-1]; k--)
227 		;
228 	partial = get_branch(inode, k, offsets, chain, &err);
229 
230 	write_lock(&pointers_lock);
231 	if (!partial)
232 		partial = chain + k-1;
233 	if (!partial->key && *partial->p) {
234 		write_unlock(&pointers_lock);
235 		goto no_top;
236 	}
237 	for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
238 		;
239 	if (p == chain + k - 1 && p > chain) {
240 		p->p--;
241 	} else {
242 		*top = *p->p;
243 		*p->p = 0;
244 	}
245 	write_unlock(&pointers_lock);
246 
247 	while(partial > p)
248 	{
249 		brelse(partial->bh);
250 		partial--;
251 	}
252 no_top:
253 	return partial;
254 }
255 
256 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
257 {
258 	unsigned long nr;
259 
260 	for ( ; p < q ; p++) {
261 		nr = block_to_cpu(*p);
262 		if (nr) {
263 			*p = 0;
264 			minix_free_block(inode, nr);
265 		}
266 	}
267 }
268 
269 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
270 {
271 	struct buffer_head * bh;
272 	unsigned long nr;
273 
274 	if (depth--) {
275 		for ( ; p < q ; p++) {
276 			nr = block_to_cpu(*p);
277 			if (!nr)
278 				continue;
279 			*p = 0;
280 			bh = sb_bread(inode->i_sb, nr);
281 			if (!bh)
282 				continue;
283 			free_branches(inode, (block_t*)bh->b_data,
284 				      block_end(bh), depth);
285 			bforget(bh);
286 			minix_free_block(inode, nr);
287 			mark_inode_dirty(inode);
288 		}
289 	} else
290 		free_data(inode, p, q);
291 }
292 
293 static inline void truncate (struct inode * inode)
294 {
295 	struct super_block *sb = inode->i_sb;
296 	block_t *idata = i_data(inode);
297 	int offsets[DEPTH];
298 	Indirect chain[DEPTH];
299 	Indirect *partial;
300 	block_t nr = 0;
301 	int n;
302 	int first_whole;
303 	long iblock;
304 
305 	iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
306 	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
307 
308 	n = block_to_path(inode, iblock, offsets);
309 	if (!n)
310 		return;
311 
312 	if (n == 1) {
313 		free_data(inode, idata+offsets[0], idata + DIRECT);
314 		first_whole = 0;
315 		goto do_indirects;
316 	}
317 
318 	first_whole = offsets[0] + 1 - DIRECT;
319 	partial = find_shared(inode, n, offsets, chain, &nr);
320 	if (nr) {
321 		if (partial == chain)
322 			mark_inode_dirty(inode);
323 		else
324 			mark_buffer_dirty_inode(partial->bh, inode);
325 		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
326 	}
327 	/* Clear the ends of indirect blocks on the shared branch */
328 	while (partial > chain) {
329 		free_branches(inode, partial->p + 1, block_end(partial->bh),
330 				(chain+n-1) - partial);
331 		mark_buffer_dirty_inode(partial->bh, inode);
332 		brelse (partial->bh);
333 		partial--;
334 	}
335 do_indirects:
336 	/* Kill the remaining (whole) subtrees */
337 	while (first_whole < DEPTH-1) {
338 		nr = idata[DIRECT+first_whole];
339 		if (nr) {
340 			idata[DIRECT+first_whole] = 0;
341 			mark_inode_dirty(inode);
342 			free_branches(inode, &nr, &nr+1, first_whole+1);
343 		}
344 		first_whole++;
345 	}
346 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
347 	mark_inode_dirty(inode);
348 }
349 
350 static inline unsigned nblocks(loff_t size, struct super_block *sb)
351 {
352 	int k = sb->s_blocksize_bits - 10;
353 	unsigned blocks, res, direct = DIRECT, i = DEPTH;
354 	blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
355 	res = blocks;
356 	while (--i && blocks > direct) {
357 		blocks -= direct;
358 		blocks += sb->s_blocksize/sizeof(block_t) - 1;
359 		blocks /= sb->s_blocksize/sizeof(block_t);
360 		res += blocks;
361 		direct = 1;
362 	}
363 	return res;
364 }
365