xref: /openbmc/linux/fs/hpfs/anode.c (revision 8bd1369b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hpfs/anode.c
4  *
5  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6  *
7  *  handling HPFS anode tree that contains file allocation info
8  */
9 
10 #include "hpfs_fn.h"
11 
12 /* Find a sector in allocation tree */
13 
14 secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
15 		   struct bplus_header *btree, unsigned sec,
16 		   struct buffer_head *bh)
17 {
18 	anode_secno a = -1;
19 	struct anode *anode;
20 	int i;
21 	int c1, c2 = 0;
22 	go_down:
23 	if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
24 	if (bp_internal(btree)) {
25 		for (i = 0; i < btree->n_used_nodes; i++)
26 			if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
27 				a = le32_to_cpu(btree->u.internal[i].down);
28 				brelse(bh);
29 				if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
30 				btree = &anode->btree;
31 				goto go_down;
32 			}
33 		hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
34 		brelse(bh);
35 		return -1;
36 	}
37 	for (i = 0; i < btree->n_used_nodes; i++)
38 		if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
39 		    le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
40 			a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
41 			if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
42 				brelse(bh);
43 				return -1;
44 			}
45 			if (inode) {
46 				struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
47 				hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
48 				hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
49 				hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
50 			}
51 			brelse(bh);
52 			return a;
53 		}
54 	hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
55 	brelse(bh);
56 	return -1;
57 }
58 
59 /* Add a sector to tree */
60 
61 secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
62 {
63 	struct bplus_header *btree;
64 	struct anode *anode = NULL, *ranode = NULL;
65 	struct fnode *fnode;
66 	anode_secno a, na = -1, ra, up = -1;
67 	secno se;
68 	struct buffer_head *bh, *bh1, *bh2;
69 	int n;
70 	unsigned fs;
71 	int c1, c2 = 0;
72 	if (fnod) {
73 		if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
74 		btree = &fnode->btree;
75 	} else {
76 		if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
77 		btree = &anode->btree;
78 	}
79 	a = node;
80 	go_down:
81 	if ((n = btree->n_used_nodes - 1) < -!!fnod) {
82 		hpfs_error(s, "anode %08x has no entries", a);
83 		brelse(bh);
84 		return -1;
85 	}
86 	if (bp_internal(btree)) {
87 		a = le32_to_cpu(btree->u.internal[n].down);
88 		btree->u.internal[n].file_secno = cpu_to_le32(-1);
89 		mark_buffer_dirty(bh);
90 		brelse(bh);
91 		if (hpfs_sb(s)->sb_chk)
92 			if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
93 		if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
94 		btree = &anode->btree;
95 		goto go_down;
96 	}
97 	if (n >= 0) {
98 		if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
99 			hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
100 				le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
101 				fnod?'f':'a', node);
102 			brelse(bh);
103 			return -1;
104 		}
105 		if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
106 			le32_add_cpu(&btree->u.external[n].length, 1);
107 			mark_buffer_dirty(bh);
108 			brelse(bh);
109 			return se;
110 		}
111 	} else {
112 		if (fsecno) {
113 			hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
114 			brelse(bh);
115 			return -1;
116 		}
117 		se = !fnod ? node : (node + 16384) & ~16383;
118 	}
119 	if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
120 		brelse(bh);
121 		return -1;
122 	}
123 	fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
124 	if (!btree->n_free_nodes) {
125 		up = a != node ? le32_to_cpu(anode->up) : -1;
126 		if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
127 			brelse(bh);
128 			hpfs_free_sectors(s, se, 1);
129 			return -1;
130 		}
131 		if (a == node && fnod) {
132 			anode->up = cpu_to_le32(node);
133 			anode->btree.flags |= BP_fnode_parent;
134 			anode->btree.n_used_nodes = btree->n_used_nodes;
135 			anode->btree.first_free = btree->first_free;
136 			anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
137 			memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
138 			btree->flags |= BP_internal;
139 			btree->n_free_nodes = 11;
140 			btree->n_used_nodes = 1;
141 			btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
142 			btree->u.internal[0].file_secno = cpu_to_le32(-1);
143 			btree->u.internal[0].down = cpu_to_le32(na);
144 			mark_buffer_dirty(bh);
145 		} else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
146 			brelse(bh);
147 			brelse(bh1);
148 			hpfs_free_sectors(s, se, 1);
149 			hpfs_free_sectors(s, na, 1);
150 			return -1;
151 		}
152 		brelse(bh);
153 		bh = bh1;
154 		btree = &anode->btree;
155 	}
156 	btree->n_free_nodes--; n = btree->n_used_nodes++;
157 	le16_add_cpu(&btree->first_free, 12);
158 	btree->u.external[n].disk_secno = cpu_to_le32(se);
159 	btree->u.external[n].file_secno = cpu_to_le32(fs);
160 	btree->u.external[n].length = cpu_to_le32(1);
161 	mark_buffer_dirty(bh);
162 	brelse(bh);
163 	if ((a == node && fnod) || na == -1) return se;
164 	c2 = 0;
165 	while (up != (anode_secno)-1) {
166 		struct anode *new_anode;
167 		if (hpfs_sb(s)->sb_chk)
168 			if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
169 		if (up != node || !fnod) {
170 			if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
171 			btree = &anode->btree;
172 		} else {
173 			if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
174 			btree = &fnode->btree;
175 		}
176 		if (btree->n_free_nodes) {
177 			btree->n_free_nodes--; n = btree->n_used_nodes++;
178 			le16_add_cpu(&btree->first_free, 8);
179 			btree->u.internal[n].file_secno = cpu_to_le32(-1);
180 			btree->u.internal[n].down = cpu_to_le32(na);
181 			btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
182 			mark_buffer_dirty(bh);
183 			brelse(bh);
184 			brelse(bh2);
185 			hpfs_free_sectors(s, ra, 1);
186 			if ((anode = hpfs_map_anode(s, na, &bh))) {
187 				anode->up = cpu_to_le32(up);
188 				if (up == node && fnod)
189 					anode->btree.flags |= BP_fnode_parent;
190 				else
191 					anode->btree.flags &= ~BP_fnode_parent;
192 				mark_buffer_dirty(bh);
193 				brelse(bh);
194 			}
195 			return se;
196 		}
197 		up = up != node ? le32_to_cpu(anode->up) : -1;
198 		btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
199 		mark_buffer_dirty(bh);
200 		brelse(bh);
201 		a = na;
202 		if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
203 			anode = new_anode;
204 			/*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
205 			anode->btree.flags |= BP_internal;
206 			anode->btree.n_used_nodes = 1;
207 			anode->btree.n_free_nodes = 59;
208 			anode->btree.first_free = cpu_to_le16(16);
209 			anode->btree.u.internal[0].down = cpu_to_le32(a);
210 			anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
211 			mark_buffer_dirty(bh);
212 			brelse(bh);
213 			if ((anode = hpfs_map_anode(s, a, &bh))) {
214 				anode->up = cpu_to_le32(na);
215 				mark_buffer_dirty(bh);
216 				brelse(bh);
217 			}
218 		} else na = a;
219 	}
220 	if ((anode = hpfs_map_anode(s, na, &bh))) {
221 		anode->up = cpu_to_le32(node);
222 		if (fnod)
223 			anode->btree.flags |= BP_fnode_parent;
224 		mark_buffer_dirty(bh);
225 		brelse(bh);
226 	}
227 	if (!fnod) {
228 		if (!(anode = hpfs_map_anode(s, node, &bh))) {
229 			brelse(bh2);
230 			return -1;
231 		}
232 		btree = &anode->btree;
233 	} else {
234 		if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
235 			brelse(bh2);
236 			return -1;
237 		}
238 		btree = &fnode->btree;
239 	}
240 	ranode->up = cpu_to_le32(node);
241 	memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
242 	if (fnod)
243 		ranode->btree.flags |= BP_fnode_parent;
244 	ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
245 	if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
246 		struct anode *unode;
247 		if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
248 			unode->up = cpu_to_le32(ra);
249 			unode->btree.flags &= ~BP_fnode_parent;
250 			mark_buffer_dirty(bh1);
251 			brelse(bh1);
252 		}
253 	}
254 	btree->flags |= BP_internal;
255 	btree->n_free_nodes = fnod ? 10 : 58;
256 	btree->n_used_nodes = 2;
257 	btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
258 	btree->u.internal[0].file_secno = cpu_to_le32(fs);
259 	btree->u.internal[0].down = cpu_to_le32(ra);
260 	btree->u.internal[1].file_secno = cpu_to_le32(-1);
261 	btree->u.internal[1].down = cpu_to_le32(na);
262 	mark_buffer_dirty(bh);
263 	brelse(bh);
264 	mark_buffer_dirty(bh2);
265 	brelse(bh2);
266 	return se;
267 }
268 
269 /*
270  * Remove allocation tree. Recursion would look much nicer but
271  * I want to avoid it because it can cause stack overflow.
272  */
273 
274 void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
275 {
276 	struct bplus_header *btree1 = btree;
277 	struct anode *anode = NULL;
278 	anode_secno ano = 0, oano;
279 	struct buffer_head *bh;
280 	int level = 0;
281 	int pos = 0;
282 	int i;
283 	int c1, c2 = 0;
284 	int d1, d2;
285 	go_down:
286 	d2 = 0;
287 	while (bp_internal(btree1)) {
288 		ano = le32_to_cpu(btree1->u.internal[pos].down);
289 		if (level) brelse(bh);
290 		if (hpfs_sb(s)->sb_chk)
291 			if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
292 				return;
293 		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
294 		btree1 = &anode->btree;
295 		level++;
296 		pos = 0;
297 	}
298 	for (i = 0; i < btree1->n_used_nodes; i++)
299 		hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
300 	go_up:
301 	if (!level) return;
302 	brelse(bh);
303 	if (hpfs_sb(s)->sb_chk)
304 		if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
305 	hpfs_free_sectors(s, ano, 1);
306 	oano = ano;
307 	ano = le32_to_cpu(anode->up);
308 	if (--level) {
309 		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
310 		btree1 = &anode->btree;
311 	} else btree1 = btree;
312 	for (i = 0; i < btree1->n_used_nodes; i++) {
313 		if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
314 			if ((pos = i + 1) < btree1->n_used_nodes)
315 				goto go_down;
316 			else
317 				goto go_up;
318 		}
319 	}
320 	hpfs_error(s,
321 		   "reference to anode %08x not found in anode %08x "
322 		   "(probably bad up pointer)",
323 		   oano, level ? ano : -1);
324 	if (level)
325 		brelse(bh);
326 }
327 
328 /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
329 
330 static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
331 {
332 	struct anode *anode;
333 	struct buffer_head *bh;
334 	if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
335 	return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
336 }
337 
338 int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
339 	    unsigned len, char *buf)
340 {
341 	struct buffer_head *bh;
342 	char *data;
343 	secno sec;
344 	unsigned l;
345 	while (len) {
346 		if (ano) {
347 			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
348 				return -1;
349 		} else sec = a + (pos >> 9);
350 		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
351 		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
352 			return -1;
353 		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
354 		memcpy(buf, data + (pos & 0x1ff), l);
355 		brelse(bh);
356 		buf += l; pos += l; len -= l;
357 	}
358 	return 0;
359 }
360 
361 int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
362 	     unsigned len, const char *buf)
363 {
364 	struct buffer_head *bh;
365 	char *data;
366 	secno sec;
367 	unsigned l;
368 	while (len) {
369 		if (ano) {
370 			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
371 				return -1;
372 		} else sec = a + (pos >> 9);
373 		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
374 		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
375 			return -1;
376 		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
377 		memcpy(data + (pos & 0x1ff), buf, l);
378 		mark_buffer_dirty(bh);
379 		brelse(bh);
380 		buf += l; pos += l; len -= l;
381 	}
382 	return 0;
383 }
384 
385 void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
386 {
387 	struct anode *anode;
388 	struct buffer_head *bh;
389 	if (ano) {
390 		if (!(anode = hpfs_map_anode(s, a, &bh))) return;
391 		hpfs_remove_btree(s, &anode->btree);
392 		brelse(bh);
393 		hpfs_free_sectors(s, a, 1);
394 	} else hpfs_free_sectors(s, a, (len + 511) >> 9);
395 }
396 
397 /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
398 
399 void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
400 {
401 	struct fnode *fnode;
402 	struct anode *anode;
403 	struct buffer_head *bh;
404 	struct bplus_header *btree;
405 	anode_secno node = f;
406 	int i, j, nodes;
407 	int c1, c2 = 0;
408 	if (fno) {
409 		if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
410 		btree = &fnode->btree;
411 	} else {
412 		if (!(anode = hpfs_map_anode(s, f, &bh))) return;
413 		btree = &anode->btree;
414 	}
415 	if (!secs) {
416 		hpfs_remove_btree(s, btree);
417 		if (fno) {
418 			btree->n_free_nodes = 8;
419 			btree->n_used_nodes = 0;
420 			btree->first_free = cpu_to_le16(8);
421 			btree->flags &= ~BP_internal;
422 			mark_buffer_dirty(bh);
423 		} else hpfs_free_sectors(s, f, 1);
424 		brelse(bh);
425 		return;
426 	}
427 	while (bp_internal(btree)) {
428 		nodes = btree->n_used_nodes + btree->n_free_nodes;
429 		for (i = 0; i < btree->n_used_nodes; i++)
430 			if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
431 		brelse(bh);
432 		hpfs_error(s, "internal btree %08x doesn't end with -1", node);
433 		return;
434 		f:
435 		for (j = i + 1; j < btree->n_used_nodes; j++)
436 			hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
437 		btree->n_used_nodes = i + 1;
438 		btree->n_free_nodes = nodes - btree->n_used_nodes;
439 		btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
440 		mark_buffer_dirty(bh);
441 		if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
442 			brelse(bh);
443 			return;
444 		}
445 		node = le32_to_cpu(btree->u.internal[i].down);
446 		brelse(bh);
447 		if (hpfs_sb(s)->sb_chk)
448 			if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
449 				return;
450 		if (!(anode = hpfs_map_anode(s, node, &bh))) return;
451 		btree = &anode->btree;
452 	}
453 	nodes = btree->n_used_nodes + btree->n_free_nodes;
454 	for (i = 0; i < btree->n_used_nodes; i++)
455 		if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
456 	brelse(bh);
457 	return;
458 	ff:
459 	if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
460 		hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
461 		if (i) i--;
462 	}
463 	else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
464 		hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
465 			le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
466 			- secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
467 		btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
468 	}
469 	for (j = i + 1; j < btree->n_used_nodes; j++)
470 		hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
471 	btree->n_used_nodes = i + 1;
472 	btree->n_free_nodes = nodes - btree->n_used_nodes;
473 	btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
474 	mark_buffer_dirty(bh);
475 	brelse(bh);
476 }
477 
478 /* Remove file or directory and it's eas - note that directory must
479    be empty when this is called. */
480 
481 void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
482 {
483 	struct buffer_head *bh;
484 	struct fnode *fnode;
485 	struct extended_attribute *ea;
486 	struct extended_attribute *ea_end;
487 	if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
488 	if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
489 	else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
490 	ea_end = fnode_end_ea(fnode);
491 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
492 		if (ea_indirect(ea))
493 			hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
494 	hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
495 	brelse(bh);
496 	hpfs_free_sectors(s, fno, 1);
497 }
498