xref: /openbmc/linux/fs/sysv/itree.c (revision 49c23519)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/sysv/itree.c
4  *
5  *  Handling of indirect blocks' trees.
6  *  AV, Sep--Dec 2000
7  */
8 
9 #include <linux/buffer_head.h>
10 #include <linux/mount.h>
11 #include <linux/string.h>
12 #include "sysv.h"
13 
14 enum {DIRECT = 10, DEPTH = 4};	/* Have triple indirect */
15 
16 static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
17 {
18 	mark_buffer_dirty_inode(bh, inode);
19 	if (IS_SYNC(inode))
20 		sync_dirty_buffer(bh);
21 }
22 
23 static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
24 {
25 	struct super_block *sb = inode->i_sb;
26 	struct sysv_sb_info *sbi = SYSV_SB(sb);
27 	int ptrs_bits = sbi->s_ind_per_block_bits;
28 	unsigned long	indirect_blocks = sbi->s_ind_per_block,
29 			double_blocks = sbi->s_ind_per_block_2;
30 	int n = 0;
31 
32 	if (block < 0) {
33 		printk("sysv_block_map: block < 0\n");
34 	} else if (block < DIRECT) {
35 		offsets[n++] = block;
36 	} else if ( (block -= DIRECT) < indirect_blocks) {
37 		offsets[n++] = DIRECT;
38 		offsets[n++] = block;
39 	} else if ((block -= indirect_blocks) < double_blocks) {
40 		offsets[n++] = DIRECT+1;
41 		offsets[n++] = block >> ptrs_bits;
42 		offsets[n++] = block & (indirect_blocks - 1);
43 	} else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
44 		offsets[n++] = DIRECT+2;
45 		offsets[n++] = block >> (ptrs_bits * 2);
46 		offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
47 		offsets[n++] = block & (indirect_blocks - 1);
48 	} else {
49 		/* nothing */;
50 	}
51 	return n;
52 }
53 
54 static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr)
55 {
56 	return sbi->s_block_base + fs32_to_cpu(sbi, nr);
57 }
58 
59 typedef struct {
60 	sysv_zone_t     *p;
61 	sysv_zone_t     key;
62 	struct buffer_head *bh;
63 } Indirect;
64 
65 static DEFINE_RWLOCK(pointers_lock);
66 
67 static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
68 {
69 	p->key = *(p->p = v);
70 	p->bh = bh;
71 }
72 
73 static inline int verify_chain(Indirect *from, Indirect *to)
74 {
75 	while (from <= to && from->key == *from->p)
76 		from++;
77 	return (from > to);
78 }
79 
80 static inline sysv_zone_t *block_end(struct buffer_head *bh)
81 {
82 	return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
83 }
84 
85 static Indirect *get_branch(struct inode *inode,
86 			    int depth,
87 			    int offsets[],
88 			    Indirect chain[],
89 			    int *err)
90 {
91 	struct super_block *sb = inode->i_sb;
92 	Indirect *p = chain;
93 	struct buffer_head *bh;
94 
95 	*err = 0;
96 	add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets);
97 	if (!p->key)
98 		goto no_block;
99 	while (--depth) {
100 		int block = block_to_cpu(SYSV_SB(sb), p->key);
101 		bh = sb_bread(sb, block);
102 		if (!bh)
103 			goto failure;
104 		read_lock(&pointers_lock);
105 		if (!verify_chain(chain, p))
106 			goto changed;
107 		add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
108 		read_unlock(&pointers_lock);
109 		if (!p->key)
110 			goto no_block;
111 	}
112 	return NULL;
113 
114 changed:
115 	read_unlock(&pointers_lock);
116 	brelse(bh);
117 	*err = -EAGAIN;
118 	goto no_block;
119 failure:
120 	*err = -EIO;
121 no_block:
122 	return p;
123 }
124 
125 static int alloc_branch(struct inode *inode,
126 			int num,
127 			int *offsets,
128 			Indirect *branch)
129 {
130 	int blocksize = inode->i_sb->s_blocksize;
131 	int n = 0;
132 	int i;
133 
134 	branch[0].key = sysv_new_block(inode->i_sb);
135 	if (branch[0].key) for (n = 1; n < num; n++) {
136 		struct buffer_head *bh;
137 		int parent;
138 		/* Allocate the next block */
139 		branch[n].key = sysv_new_block(inode->i_sb);
140 		if (!branch[n].key)
141 			break;
142 		/*
143 		 * Get buffer_head for parent block, zero it out and set
144 		 * the pointer to new one, then send parent to disk.
145 		 */
146 		parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
147 		bh = sb_getblk(inode->i_sb, parent);
148 		if (!bh) {
149 			sysv_free_block(inode->i_sb, branch[n].key);
150 			break;
151 		}
152 		lock_buffer(bh);
153 		memset(bh->b_data, 0, blocksize);
154 		branch[n].bh = bh;
155 		branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
156 		*branch[n].p = branch[n].key;
157 		set_buffer_uptodate(bh);
158 		unlock_buffer(bh);
159 		dirty_indirect(bh, inode);
160 	}
161 	if (n == num)
162 		return 0;
163 
164 	/* Allocation failed, free what we already allocated */
165 	for (i = 1; i < n; i++)
166 		bforget(branch[i].bh);
167 	for (i = 0; i < n; i++)
168 		sysv_free_block(inode->i_sb, branch[i].key);
169 	return -ENOSPC;
170 }
171 
172 static inline int splice_branch(struct inode *inode,
173 				Indirect chain[],
174 				Indirect *where,
175 				int num)
176 {
177 	int i;
178 
179 	/* Verify that place we are splicing to is still there and vacant */
180 	write_lock(&pointers_lock);
181 	if (!verify_chain(chain, where-1) || *where->p)
182 		goto changed;
183 	*where->p = where->key;
184 	write_unlock(&pointers_lock);
185 
186 	inode_set_ctime_current(inode);
187 
188 	/* had we spliced it onto indirect block? */
189 	if (where->bh)
190 		dirty_indirect(where->bh, inode);
191 
192 	if (IS_SYNC(inode))
193 		sysv_sync_inode(inode);
194 	else
195 		mark_inode_dirty(inode);
196 	return 0;
197 
198 changed:
199 	write_unlock(&pointers_lock);
200 	for (i = 1; i < num; i++)
201 		bforget(where[i].bh);
202 	for (i = 0; i < num; i++)
203 		sysv_free_block(inode->i_sb, where[i].key);
204 	return -EAGAIN;
205 }
206 
207 static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
208 {
209 	int err = -EIO;
210 	int offsets[DEPTH];
211 	Indirect chain[DEPTH];
212 	struct super_block *sb = inode->i_sb;
213 	Indirect *partial;
214 	int left;
215 	int depth = block_to_path(inode, iblock, offsets);
216 
217 	if (depth == 0)
218 		goto out;
219 
220 reread:
221 	partial = get_branch(inode, depth, offsets, chain, &err);
222 
223 	/* Simplest case - block found, no allocation needed */
224 	if (!partial) {
225 got_it:
226 		map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb),
227 					chain[depth-1].key));
228 		/* Clean up and exit */
229 		partial = chain+depth-1; /* the whole chain */
230 		goto cleanup;
231 	}
232 
233 	/* Next simple case - plain lookup or failed read of indirect block */
234 	if (!create || err == -EIO) {
235 cleanup:
236 		while (partial > chain) {
237 			brelse(partial->bh);
238 			partial--;
239 		}
240 out:
241 		return err;
242 	}
243 
244 	/*
245 	 * Indirect block might be removed by truncate while we were
246 	 * reading it. Handling of that case (forget what we've got and
247 	 * reread) is taken out of the main path.
248 	 */
249 	if (err == -EAGAIN)
250 		goto changed;
251 
252 	left = (chain + depth) - partial;
253 	err = alloc_branch(inode, left, offsets+(partial-chain), partial);
254 	if (err)
255 		goto cleanup;
256 
257 	if (splice_branch(inode, chain, partial, left) < 0)
258 		goto changed;
259 
260 	set_buffer_new(bh_result);
261 	goto got_it;
262 
263 changed:
264 	while (partial > chain) {
265 		brelse(partial->bh);
266 		partial--;
267 	}
268 	goto reread;
269 }
270 
271 static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
272 {
273 	while (p < q)
274 		if (*p++)
275 			return 0;
276 	return 1;
277 }
278 
279 static Indirect *find_shared(struct inode *inode,
280 				int depth,
281 				int offsets[],
282 				Indirect chain[],
283 				sysv_zone_t *top)
284 {
285 	Indirect *partial, *p;
286 	int k, err;
287 
288 	*top = 0;
289 	for (k = depth; k > 1 && !offsets[k-1]; k--)
290 		;
291 	partial = get_branch(inode, k, offsets, chain, &err);
292 
293 	write_lock(&pointers_lock);
294 	if (!partial)
295 		partial = chain + k-1;
296 	/*
297 	 * If the branch acquired continuation since we've looked at it -
298 	 * fine, it should all survive and (new) top doesn't belong to us.
299 	 */
300 	if (!partial->key && *partial->p) {
301 		write_unlock(&pointers_lock);
302 		goto no_top;
303 	}
304 	for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
305 		;
306 	/*
307 	 * OK, we've found the last block that must survive. The rest of our
308 	 * branch should be detached before unlocking. However, if that rest
309 	 * of branch is all ours and does not grow immediately from the inode
310 	 * it's easier to cheat and just decrement partial->p.
311 	 */
312 	if (p == chain + k - 1 && p > chain) {
313 		p->p--;
314 	} else {
315 		*top = *p->p;
316 		*p->p = 0;
317 	}
318 	write_unlock(&pointers_lock);
319 
320 	while (partial > p) {
321 		brelse(partial->bh);
322 		partial--;
323 	}
324 no_top:
325 	return partial;
326 }
327 
328 static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
329 {
330 	for ( ; p < q ; p++) {
331 		sysv_zone_t nr = *p;
332 		if (nr) {
333 			*p = 0;
334 			sysv_free_block(inode->i_sb, nr);
335 			mark_inode_dirty(inode);
336 		}
337 	}
338 }
339 
340 static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
341 {
342 	struct buffer_head * bh;
343 	struct super_block *sb = inode->i_sb;
344 
345 	if (depth--) {
346 		for ( ; p < q ; p++) {
347 			int block;
348 			sysv_zone_t nr = *p;
349 			if (!nr)
350 				continue;
351 			*p = 0;
352 			block = block_to_cpu(SYSV_SB(sb), nr);
353 			bh = sb_bread(sb, block);
354 			if (!bh)
355 				continue;
356 			free_branches(inode, (sysv_zone_t*)bh->b_data,
357 					block_end(bh), depth);
358 			bforget(bh);
359 			sysv_free_block(sb, nr);
360 			mark_inode_dirty(inode);
361 		}
362 	} else
363 		free_data(inode, p, q);
364 }
365 
366 void sysv_truncate (struct inode * inode)
367 {
368 	sysv_zone_t *i_data = SYSV_I(inode)->i_data;
369 	int offsets[DEPTH];
370 	Indirect chain[DEPTH];
371 	Indirect *partial;
372 	sysv_zone_t nr = 0;
373 	int n;
374 	long iblock;
375 	unsigned blocksize;
376 
377 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
378 	    S_ISLNK(inode->i_mode)))
379 		return;
380 
381 	blocksize = inode->i_sb->s_blocksize;
382 	iblock = (inode->i_size + blocksize-1)
383 					>> inode->i_sb->s_blocksize_bits;
384 
385 	block_truncate_page(inode->i_mapping, inode->i_size, get_block);
386 
387 	n = block_to_path(inode, iblock, offsets);
388 	if (n == 0)
389 		return;
390 
391 	if (n == 1) {
392 		free_data(inode, i_data+offsets[0], i_data + DIRECT);
393 		goto do_indirects;
394 	}
395 
396 	partial = find_shared(inode, n, offsets, chain, &nr);
397 	/* Kill the top of shared branch (already detached) */
398 	if (nr) {
399 		if (partial == chain)
400 			mark_inode_dirty(inode);
401 		else
402 			dirty_indirect(partial->bh, inode);
403 		free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
404 	}
405 	/* Clear the ends of indirect blocks on the shared branch */
406 	while (partial > chain) {
407 		free_branches(inode, partial->p + 1, block_end(partial->bh),
408 				(chain+n-1) - partial);
409 		dirty_indirect(partial->bh, inode);
410 		brelse (partial->bh);
411 		partial--;
412 	}
413 do_indirects:
414 	/* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
415 	while (n < DEPTH) {
416 		nr = i_data[DIRECT + n - 1];
417 		if (nr) {
418 			i_data[DIRECT + n - 1] = 0;
419 			mark_inode_dirty(inode);
420 			free_branches(inode, &nr, &nr+1, n);
421 		}
422 		n++;
423 	}
424 	inode->i_mtime = inode_set_ctime_current(inode);
425 	if (IS_SYNC(inode))
426 		sysv_sync_inode (inode);
427 	else
428 		mark_inode_dirty(inode);
429 }
430 
431 static unsigned sysv_nblocks(struct super_block *s, loff_t size)
432 {
433 	struct sysv_sb_info *sbi = SYSV_SB(s);
434 	int ptrs_bits = sbi->s_ind_per_block_bits;
435 	unsigned blocks, res, direct = DIRECT, i = DEPTH;
436 	blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits;
437 	res = blocks;
438 	while (--i && blocks > direct) {
439 		blocks = ((blocks - direct - 1) >> ptrs_bits) + 1;
440 		res += blocks;
441 		direct = 1;
442 	}
443 	return res;
444 }
445 
446 int sysv_getattr(struct mnt_idmap *idmap, const struct path *path,
447 		 struct kstat *stat, u32 request_mask, unsigned int flags)
448 {
449 	struct super_block *s = path->dentry->d_sb;
450 	generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
451 			 stat);
452 	stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
453 	stat->blksize = s->s_blocksize;
454 	return 0;
455 }
456 
457 static int sysv_writepage(struct page *page, struct writeback_control *wbc)
458 {
459 	return block_write_full_page(page,get_block,wbc);
460 }
461 
462 static int sysv_read_folio(struct file *file, struct folio *folio)
463 {
464 	return block_read_full_folio(folio, get_block);
465 }
466 
467 int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
468 {
469 	return __block_write_begin(page, pos, len, get_block);
470 }
471 
472 static void sysv_write_failed(struct address_space *mapping, loff_t to)
473 {
474 	struct inode *inode = mapping->host;
475 
476 	if (to > inode->i_size) {
477 		truncate_pagecache(inode, inode->i_size);
478 		sysv_truncate(inode);
479 	}
480 }
481 
482 static int sysv_write_begin(struct file *file, struct address_space *mapping,
483 			loff_t pos, unsigned len,
484 			struct page **pagep, void **fsdata)
485 {
486 	int ret;
487 
488 	ret = block_write_begin(mapping, pos, len, pagep, get_block);
489 	if (unlikely(ret))
490 		sysv_write_failed(mapping, pos + len);
491 
492 	return ret;
493 }
494 
495 static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
496 {
497 	return generic_block_bmap(mapping,block,get_block);
498 }
499 
500 const struct address_space_operations sysv_aops = {
501 	.dirty_folio = block_dirty_folio,
502 	.invalidate_folio = block_invalidate_folio,
503 	.read_folio = sysv_read_folio,
504 	.writepage = sysv_writepage,
505 	.write_begin = sysv_write_begin,
506 	.write_end = generic_write_end,
507 	.bmap = sysv_bmap
508 };
509