xref: /openbmc/linux/fs/hfsplus/bnode.c (revision 77423a62)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/bnode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handle basic btree node operations
10  */
11 
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/fs.h>
16 #include <linux/swap.h>
17 
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20 
21 /* Copy a specified range of bytes from the raw data of a node */
22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23 {
24 	struct page **pagep;
25 	int l;
26 
27 	off += node->page_offset;
28 	pagep = node->page + (off >> PAGE_SHIFT);
29 	off &= ~PAGE_MASK;
30 
31 	l = min_t(int, len, PAGE_SIZE - off);
32 	memcpy(buf, kmap(*pagep) + off, l);
33 	kunmap(*pagep);
34 
35 	while ((len -= l) != 0) {
36 		buf += l;
37 		l = min_t(int, len, PAGE_SIZE);
38 		memcpy(buf, kmap(*++pagep), l);
39 		kunmap(*pagep);
40 	}
41 }
42 
43 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
44 {
45 	__be16 data;
46 	/* TODO: optimize later... */
47 	hfs_bnode_read(node, &data, off, 2);
48 	return be16_to_cpu(data);
49 }
50 
51 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
52 {
53 	u8 data;
54 	/* TODO: optimize later... */
55 	hfs_bnode_read(node, &data, off, 1);
56 	return data;
57 }
58 
59 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
60 {
61 	struct hfs_btree *tree;
62 	int key_len;
63 
64 	tree = node->tree;
65 	if (node->type == HFS_NODE_LEAF ||
66 	    tree->attributes & HFS_TREE_VARIDXKEYS ||
67 	    node->tree->cnid == HFSPLUS_ATTR_CNID)
68 		key_len = hfs_bnode_read_u16(node, off) + 2;
69 	else
70 		key_len = tree->max_key_len + 2;
71 
72 	hfs_bnode_read(node, key, off, key_len);
73 }
74 
75 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
76 {
77 	struct page **pagep;
78 	int l;
79 
80 	off += node->page_offset;
81 	pagep = node->page + (off >> PAGE_SHIFT);
82 	off &= ~PAGE_MASK;
83 
84 	l = min_t(int, len, PAGE_SIZE - off);
85 	memcpy(kmap(*pagep) + off, buf, l);
86 	set_page_dirty(*pagep);
87 	kunmap(*pagep);
88 
89 	while ((len -= l) != 0) {
90 		buf += l;
91 		l = min_t(int, len, PAGE_SIZE);
92 		memcpy(kmap(*++pagep), buf, l);
93 		set_page_dirty(*pagep);
94 		kunmap(*pagep);
95 	}
96 }
97 
98 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
99 {
100 	__be16 v = cpu_to_be16(data);
101 	/* TODO: optimize later... */
102 	hfs_bnode_write(node, &v, off, 2);
103 }
104 
105 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
106 {
107 	struct page **pagep;
108 	int l;
109 
110 	off += node->page_offset;
111 	pagep = node->page + (off >> PAGE_SHIFT);
112 	off &= ~PAGE_MASK;
113 
114 	l = min_t(int, len, PAGE_SIZE - off);
115 	memset(kmap(*pagep) + off, 0, l);
116 	set_page_dirty(*pagep);
117 	kunmap(*pagep);
118 
119 	while ((len -= l) != 0) {
120 		l = min_t(int, len, PAGE_SIZE);
121 		memset(kmap(*++pagep), 0, l);
122 		set_page_dirty(*pagep);
123 		kunmap(*pagep);
124 	}
125 }
126 
127 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
128 		    struct hfs_bnode *src_node, int src, int len)
129 {
130 	struct page **src_page, **dst_page;
131 	int l;
132 
133 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 	if (!len)
135 		return;
136 	src += src_node->page_offset;
137 	dst += dst_node->page_offset;
138 	src_page = src_node->page + (src >> PAGE_SHIFT);
139 	src &= ~PAGE_MASK;
140 	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
141 	dst &= ~PAGE_MASK;
142 
143 	if (src == dst) {
144 		l = min_t(int, len, PAGE_SIZE - src);
145 		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
146 		kunmap(*src_page);
147 		set_page_dirty(*dst_page);
148 		kunmap(*dst_page);
149 
150 		while ((len -= l) != 0) {
151 			l = min_t(int, len, PAGE_SIZE);
152 			memcpy(kmap(*++dst_page), kmap(*++src_page), l);
153 			kunmap(*src_page);
154 			set_page_dirty(*dst_page);
155 			kunmap(*dst_page);
156 		}
157 	} else {
158 		void *src_ptr, *dst_ptr;
159 
160 		do {
161 			src_ptr = kmap(*src_page) + src;
162 			dst_ptr = kmap(*dst_page) + dst;
163 			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
164 				l = PAGE_SIZE - src;
165 				src = 0;
166 				dst += l;
167 			} else {
168 				l = PAGE_SIZE - dst;
169 				src += l;
170 				dst = 0;
171 			}
172 			l = min(len, l);
173 			memcpy(dst_ptr, src_ptr, l);
174 			kunmap(*src_page);
175 			set_page_dirty(*dst_page);
176 			kunmap(*dst_page);
177 			if (!dst)
178 				dst_page++;
179 			else
180 				src_page++;
181 		} while ((len -= l));
182 	}
183 }
184 
185 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
186 {
187 	struct page **src_page, **dst_page;
188 	int l;
189 
190 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
191 	if (!len)
192 		return;
193 	src += node->page_offset;
194 	dst += node->page_offset;
195 	if (dst > src) {
196 		src += len - 1;
197 		src_page = node->page + (src >> PAGE_SHIFT);
198 		src = (src & ~PAGE_MASK) + 1;
199 		dst += len - 1;
200 		dst_page = node->page + (dst >> PAGE_SHIFT);
201 		dst = (dst & ~PAGE_MASK) + 1;
202 
203 		if (src == dst) {
204 			while (src < len) {
205 				memmove(kmap(*dst_page), kmap(*src_page), src);
206 				kunmap(*src_page);
207 				set_page_dirty(*dst_page);
208 				kunmap(*dst_page);
209 				len -= src;
210 				src = PAGE_SIZE;
211 				src_page--;
212 				dst_page--;
213 			}
214 			src -= len;
215 			memmove(kmap(*dst_page) + src,
216 				kmap(*src_page) + src, len);
217 			kunmap(*src_page);
218 			set_page_dirty(*dst_page);
219 			kunmap(*dst_page);
220 		} else {
221 			void *src_ptr, *dst_ptr;
222 
223 			do {
224 				src_ptr = kmap(*src_page) + src;
225 				dst_ptr = kmap(*dst_page) + dst;
226 				if (src < dst) {
227 					l = src;
228 					src = PAGE_SIZE;
229 					dst -= l;
230 				} else {
231 					l = dst;
232 					src -= l;
233 					dst = PAGE_SIZE;
234 				}
235 				l = min(len, l);
236 				memmove(dst_ptr - l, src_ptr - l, l);
237 				kunmap(*src_page);
238 				set_page_dirty(*dst_page);
239 				kunmap(*dst_page);
240 				if (dst == PAGE_SIZE)
241 					dst_page--;
242 				else
243 					src_page--;
244 			} while ((len -= l));
245 		}
246 	} else {
247 		src_page = node->page + (src >> PAGE_SHIFT);
248 		src &= ~PAGE_MASK;
249 		dst_page = node->page + (dst >> PAGE_SHIFT);
250 		dst &= ~PAGE_MASK;
251 
252 		if (src == dst) {
253 			l = min_t(int, len, PAGE_SIZE - src);
254 			memmove(kmap(*dst_page) + src,
255 				kmap(*src_page) + src, l);
256 			kunmap(*src_page);
257 			set_page_dirty(*dst_page);
258 			kunmap(*dst_page);
259 
260 			while ((len -= l) != 0) {
261 				l = min_t(int, len, PAGE_SIZE);
262 				memmove(kmap(*++dst_page),
263 					kmap(*++src_page), l);
264 				kunmap(*src_page);
265 				set_page_dirty(*dst_page);
266 				kunmap(*dst_page);
267 			}
268 		} else {
269 			void *src_ptr, *dst_ptr;
270 
271 			do {
272 				src_ptr = kmap(*src_page) + src;
273 				dst_ptr = kmap(*dst_page) + dst;
274 				if (PAGE_SIZE - src <
275 						PAGE_SIZE - dst) {
276 					l = PAGE_SIZE - src;
277 					src = 0;
278 					dst += l;
279 				} else {
280 					l = PAGE_SIZE - dst;
281 					src += l;
282 					dst = 0;
283 				}
284 				l = min(len, l);
285 				memmove(dst_ptr, src_ptr, l);
286 				kunmap(*src_page);
287 				set_page_dirty(*dst_page);
288 				kunmap(*dst_page);
289 				if (!dst)
290 					dst_page++;
291 				else
292 					src_page++;
293 			} while ((len -= l));
294 		}
295 	}
296 }
297 
298 void hfs_bnode_dump(struct hfs_bnode *node)
299 {
300 	struct hfs_bnode_desc desc;
301 	__be32 cnid;
302 	int i, off, key_off;
303 
304 	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
305 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
306 	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
307 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
308 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
309 
310 	off = node->tree->node_size - 2;
311 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
312 		key_off = hfs_bnode_read_u16(node, off);
313 		hfs_dbg(BNODE_MOD, " %d", key_off);
314 		if (i && node->type == HFS_NODE_INDEX) {
315 			int tmp;
316 
317 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
318 					node->tree->cnid == HFSPLUS_ATTR_CNID)
319 				tmp = hfs_bnode_read_u16(node, key_off) + 2;
320 			else
321 				tmp = node->tree->max_key_len + 2;
322 			hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
323 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
324 			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
325 		} else if (i && node->type == HFS_NODE_LEAF) {
326 			int tmp;
327 
328 			tmp = hfs_bnode_read_u16(node, key_off);
329 			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
330 		}
331 	}
332 	hfs_dbg_cont(BNODE_MOD, "\n");
333 }
334 
335 void hfs_bnode_unlink(struct hfs_bnode *node)
336 {
337 	struct hfs_btree *tree;
338 	struct hfs_bnode *tmp;
339 	__be32 cnid;
340 
341 	tree = node->tree;
342 	if (node->prev) {
343 		tmp = hfs_bnode_find(tree, node->prev);
344 		if (IS_ERR(tmp))
345 			return;
346 		tmp->next = node->next;
347 		cnid = cpu_to_be32(tmp->next);
348 		hfs_bnode_write(tmp, &cnid,
349 			offsetof(struct hfs_bnode_desc, next), 4);
350 		hfs_bnode_put(tmp);
351 	} else if (node->type == HFS_NODE_LEAF)
352 		tree->leaf_head = node->next;
353 
354 	if (node->next) {
355 		tmp = hfs_bnode_find(tree, node->next);
356 		if (IS_ERR(tmp))
357 			return;
358 		tmp->prev = node->prev;
359 		cnid = cpu_to_be32(tmp->prev);
360 		hfs_bnode_write(tmp, &cnid,
361 			offsetof(struct hfs_bnode_desc, prev), 4);
362 		hfs_bnode_put(tmp);
363 	} else if (node->type == HFS_NODE_LEAF)
364 		tree->leaf_tail = node->prev;
365 
366 	/* move down? */
367 	if (!node->prev && !node->next)
368 		hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
369 	if (!node->parent) {
370 		tree->root = 0;
371 		tree->depth = 0;
372 	}
373 	set_bit(HFS_BNODE_DELETED, &node->flags);
374 }
375 
376 static inline int hfs_bnode_hash(u32 num)
377 {
378 	num = (num >> 16) + num;
379 	num += num >> 8;
380 	return num & (NODE_HASH_SIZE - 1);
381 }
382 
383 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
384 {
385 	struct hfs_bnode *node;
386 
387 	if (cnid >= tree->node_count) {
388 		pr_err("request for non-existent node %d in B*Tree\n",
389 		       cnid);
390 		return NULL;
391 	}
392 
393 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
394 			node; node = node->next_hash)
395 		if (node->this == cnid)
396 			return node;
397 	return NULL;
398 }
399 
400 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
401 {
402 	struct hfs_bnode *node, *node2;
403 	struct address_space *mapping;
404 	struct page *page;
405 	int size, block, i, hash;
406 	loff_t off;
407 
408 	if (cnid >= tree->node_count) {
409 		pr_err("request for non-existent node %d in B*Tree\n",
410 		       cnid);
411 		return NULL;
412 	}
413 
414 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
415 		sizeof(struct page *);
416 	node = kzalloc(size, GFP_KERNEL);
417 	if (!node)
418 		return NULL;
419 	node->tree = tree;
420 	node->this = cnid;
421 	set_bit(HFS_BNODE_NEW, &node->flags);
422 	atomic_set(&node->refcnt, 1);
423 	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
424 		node->tree->cnid, node->this);
425 	init_waitqueue_head(&node->lock_wq);
426 	spin_lock(&tree->hash_lock);
427 	node2 = hfs_bnode_findhash(tree, cnid);
428 	if (!node2) {
429 		hash = hfs_bnode_hash(cnid);
430 		node->next_hash = tree->node_hash[hash];
431 		tree->node_hash[hash] = node;
432 		tree->node_hash_cnt++;
433 	} else {
434 		spin_unlock(&tree->hash_lock);
435 		kfree(node);
436 		wait_event(node2->lock_wq,
437 			!test_bit(HFS_BNODE_NEW, &node2->flags));
438 		return node2;
439 	}
440 	spin_unlock(&tree->hash_lock);
441 
442 	mapping = tree->inode->i_mapping;
443 	off = (loff_t)cnid << tree->node_size_shift;
444 	block = off >> PAGE_SHIFT;
445 	node->page_offset = off & ~PAGE_MASK;
446 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
447 		page = read_mapping_page(mapping, block, NULL);
448 		if (IS_ERR(page))
449 			goto fail;
450 		node->page[i] = page;
451 	}
452 
453 	return node;
454 fail:
455 	set_bit(HFS_BNODE_ERROR, &node->flags);
456 	return node;
457 }
458 
459 void hfs_bnode_unhash(struct hfs_bnode *node)
460 {
461 	struct hfs_bnode **p;
462 
463 	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
464 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
465 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
466 	     *p && *p != node; p = &(*p)->next_hash)
467 		;
468 	BUG_ON(!*p);
469 	*p = node->next_hash;
470 	node->tree->node_hash_cnt--;
471 }
472 
473 /* Load a particular node out of a tree */
474 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
475 {
476 	struct hfs_bnode *node;
477 	struct hfs_bnode_desc *desc;
478 	int i, rec_off, off, next_off;
479 	int entry_size, key_size;
480 
481 	spin_lock(&tree->hash_lock);
482 	node = hfs_bnode_findhash(tree, num);
483 	if (node) {
484 		hfs_bnode_get(node);
485 		spin_unlock(&tree->hash_lock);
486 		wait_event(node->lock_wq,
487 			!test_bit(HFS_BNODE_NEW, &node->flags));
488 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
489 			goto node_error;
490 		return node;
491 	}
492 	spin_unlock(&tree->hash_lock);
493 	node = __hfs_bnode_create(tree, num);
494 	if (!node)
495 		return ERR_PTR(-ENOMEM);
496 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
497 		goto node_error;
498 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
499 		return node;
500 
501 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
502 			node->page_offset);
503 	node->prev = be32_to_cpu(desc->prev);
504 	node->next = be32_to_cpu(desc->next);
505 	node->num_recs = be16_to_cpu(desc->num_recs);
506 	node->type = desc->type;
507 	node->height = desc->height;
508 	kunmap(node->page[0]);
509 
510 	switch (node->type) {
511 	case HFS_NODE_HEADER:
512 	case HFS_NODE_MAP:
513 		if (node->height != 0)
514 			goto node_error;
515 		break;
516 	case HFS_NODE_LEAF:
517 		if (node->height != 1)
518 			goto node_error;
519 		break;
520 	case HFS_NODE_INDEX:
521 		if (node->height <= 1 || node->height > tree->depth)
522 			goto node_error;
523 		break;
524 	default:
525 		goto node_error;
526 	}
527 
528 	rec_off = tree->node_size - 2;
529 	off = hfs_bnode_read_u16(node, rec_off);
530 	if (off != sizeof(struct hfs_bnode_desc))
531 		goto node_error;
532 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
533 		rec_off -= 2;
534 		next_off = hfs_bnode_read_u16(node, rec_off);
535 		if (next_off <= off ||
536 		    next_off > tree->node_size ||
537 		    next_off & 1)
538 			goto node_error;
539 		entry_size = next_off - off;
540 		if (node->type != HFS_NODE_INDEX &&
541 		    node->type != HFS_NODE_LEAF)
542 			continue;
543 		key_size = hfs_bnode_read_u16(node, off) + 2;
544 		if (key_size >= entry_size || key_size & 1)
545 			goto node_error;
546 	}
547 	clear_bit(HFS_BNODE_NEW, &node->flags);
548 	wake_up(&node->lock_wq);
549 	return node;
550 
551 node_error:
552 	set_bit(HFS_BNODE_ERROR, &node->flags);
553 	clear_bit(HFS_BNODE_NEW, &node->flags);
554 	wake_up(&node->lock_wq);
555 	hfs_bnode_put(node);
556 	return ERR_PTR(-EIO);
557 }
558 
559 void hfs_bnode_free(struct hfs_bnode *node)
560 {
561 	int i;
562 
563 	for (i = 0; i < node->tree->pages_per_bnode; i++)
564 		if (node->page[i])
565 			put_page(node->page[i]);
566 	kfree(node);
567 }
568 
569 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
570 {
571 	struct hfs_bnode *node;
572 	struct page **pagep;
573 	int i;
574 
575 	spin_lock(&tree->hash_lock);
576 	node = hfs_bnode_findhash(tree, num);
577 	spin_unlock(&tree->hash_lock);
578 	if (node) {
579 		pr_crit("new node %u already hashed?\n", num);
580 		WARN_ON(1);
581 		return node;
582 	}
583 	node = __hfs_bnode_create(tree, num);
584 	if (!node)
585 		return ERR_PTR(-ENOMEM);
586 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
587 		hfs_bnode_put(node);
588 		return ERR_PTR(-EIO);
589 	}
590 
591 	pagep = node->page;
592 	memset(kmap(*pagep) + node->page_offset, 0,
593 	       min_t(int, PAGE_SIZE, tree->node_size));
594 	set_page_dirty(*pagep);
595 	kunmap(*pagep);
596 	for (i = 1; i < tree->pages_per_bnode; i++) {
597 		memset(kmap(*++pagep), 0, PAGE_SIZE);
598 		set_page_dirty(*pagep);
599 		kunmap(*pagep);
600 	}
601 	clear_bit(HFS_BNODE_NEW, &node->flags);
602 	wake_up(&node->lock_wq);
603 
604 	return node;
605 }
606 
607 void hfs_bnode_get(struct hfs_bnode *node)
608 {
609 	if (node) {
610 		atomic_inc(&node->refcnt);
611 		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
612 			node->tree->cnid, node->this,
613 			atomic_read(&node->refcnt));
614 	}
615 }
616 
617 /* Dispose of resources used by a node */
618 void hfs_bnode_put(struct hfs_bnode *node)
619 {
620 	if (node) {
621 		struct hfs_btree *tree = node->tree;
622 		int i;
623 
624 		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
625 			node->tree->cnid, node->this,
626 			atomic_read(&node->refcnt));
627 		BUG_ON(!atomic_read(&node->refcnt));
628 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
629 			return;
630 		for (i = 0; i < tree->pages_per_bnode; i++) {
631 			if (!node->page[i])
632 				continue;
633 			mark_page_accessed(node->page[i]);
634 		}
635 
636 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
637 			hfs_bnode_unhash(node);
638 			spin_unlock(&tree->hash_lock);
639 			if (hfs_bnode_need_zeroout(tree))
640 				hfs_bnode_clear(node, 0, tree->node_size);
641 			hfs_bmap_free(node);
642 			hfs_bnode_free(node);
643 			return;
644 		}
645 		spin_unlock(&tree->hash_lock);
646 	}
647 }
648 
649 /*
650  * Unused nodes have to be zeroed if this is the catalog tree and
651  * a corresponding flag in the volume header is set.
652  */
653 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
654 {
655 	struct super_block *sb = tree->inode->i_sb;
656 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
657 	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
658 
659 	return tree->cnid == HFSPLUS_CAT_CNID &&
660 		volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
661 }
662