xref: /openbmc/linux/fs/hfsplus/bnode.c (revision c4ee0af3)
1 /*
2  *  linux/fs/hfsplus/bnode.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handle basic btree node operations
9  */
10 
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/fs.h>
15 #include <linux/swap.h>
16 
17 #include "hfsplus_fs.h"
18 #include "hfsplus_raw.h"
19 
20 /* Copy a specified range of bytes from the raw data of a node */
21 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
22 {
23 	struct page **pagep;
24 	int l;
25 
26 	off += node->page_offset;
27 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
28 	off &= ~PAGE_CACHE_MASK;
29 
30 	l = min(len, (int)PAGE_CACHE_SIZE - off);
31 	memcpy(buf, kmap(*pagep) + off, l);
32 	kunmap(*pagep);
33 
34 	while ((len -= l) != 0) {
35 		buf += l;
36 		l = min(len, (int)PAGE_CACHE_SIZE);
37 		memcpy(buf, kmap(*++pagep), l);
38 		kunmap(*pagep);
39 	}
40 }
41 
42 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
43 {
44 	__be16 data;
45 	/* TODO: optimize later... */
46 	hfs_bnode_read(node, &data, off, 2);
47 	return be16_to_cpu(data);
48 }
49 
50 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
51 {
52 	u8 data;
53 	/* TODO: optimize later... */
54 	hfs_bnode_read(node, &data, off, 1);
55 	return data;
56 }
57 
58 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
59 {
60 	struct hfs_btree *tree;
61 	int key_len;
62 
63 	tree = node->tree;
64 	if (node->type == HFS_NODE_LEAF ||
65 	    tree->attributes & HFS_TREE_VARIDXKEYS ||
66 	    node->tree->cnid == HFSPLUS_ATTR_CNID)
67 		key_len = hfs_bnode_read_u16(node, off) + 2;
68 	else
69 		key_len = tree->max_key_len + 2;
70 
71 	hfs_bnode_read(node, key, off, key_len);
72 }
73 
74 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
75 {
76 	struct page **pagep;
77 	int l;
78 
79 	off += node->page_offset;
80 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
81 	off &= ~PAGE_CACHE_MASK;
82 
83 	l = min(len, (int)PAGE_CACHE_SIZE - off);
84 	memcpy(kmap(*pagep) + off, buf, l);
85 	set_page_dirty(*pagep);
86 	kunmap(*pagep);
87 
88 	while ((len -= l) != 0) {
89 		buf += l;
90 		l = min(len, (int)PAGE_CACHE_SIZE);
91 		memcpy(kmap(*++pagep), buf, l);
92 		set_page_dirty(*pagep);
93 		kunmap(*pagep);
94 	}
95 }
96 
97 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
98 {
99 	__be16 v = cpu_to_be16(data);
100 	/* TODO: optimize later... */
101 	hfs_bnode_write(node, &v, off, 2);
102 }
103 
104 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
105 {
106 	struct page **pagep;
107 	int l;
108 
109 	off += node->page_offset;
110 	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
111 	off &= ~PAGE_CACHE_MASK;
112 
113 	l = min(len, (int)PAGE_CACHE_SIZE - off);
114 	memset(kmap(*pagep) + off, 0, l);
115 	set_page_dirty(*pagep);
116 	kunmap(*pagep);
117 
118 	while ((len -= l) != 0) {
119 		l = min(len, (int)PAGE_CACHE_SIZE);
120 		memset(kmap(*++pagep), 0, l);
121 		set_page_dirty(*pagep);
122 		kunmap(*pagep);
123 	}
124 }
125 
126 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
127 		    struct hfs_bnode *src_node, int src, int len)
128 {
129 	struct hfs_btree *tree;
130 	struct page **src_page, **dst_page;
131 	int l;
132 
133 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 	if (!len)
135 		return;
136 	tree = src_node->tree;
137 	src += src_node->page_offset;
138 	dst += dst_node->page_offset;
139 	src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
140 	src &= ~PAGE_CACHE_MASK;
141 	dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
142 	dst &= ~PAGE_CACHE_MASK;
143 
144 	if (src == dst) {
145 		l = min(len, (int)PAGE_CACHE_SIZE - src);
146 		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 		kunmap(*src_page);
148 		set_page_dirty(*dst_page);
149 		kunmap(*dst_page);
150 
151 		while ((len -= l) != 0) {
152 			l = min(len, (int)PAGE_CACHE_SIZE);
153 			memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 			kunmap(*src_page);
155 			set_page_dirty(*dst_page);
156 			kunmap(*dst_page);
157 		}
158 	} else {
159 		void *src_ptr, *dst_ptr;
160 
161 		do {
162 			src_ptr = kmap(*src_page) + src;
163 			dst_ptr = kmap(*dst_page) + dst;
164 			if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
165 				l = PAGE_CACHE_SIZE - src;
166 				src = 0;
167 				dst += l;
168 			} else {
169 				l = PAGE_CACHE_SIZE - dst;
170 				src += l;
171 				dst = 0;
172 			}
173 			l = min(len, l);
174 			memcpy(dst_ptr, src_ptr, l);
175 			kunmap(*src_page);
176 			set_page_dirty(*dst_page);
177 			kunmap(*dst_page);
178 			if (!dst)
179 				dst_page++;
180 			else
181 				src_page++;
182 		} while ((len -= l));
183 	}
184 }
185 
186 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
187 {
188 	struct page **src_page, **dst_page;
189 	int l;
190 
191 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
192 	if (!len)
193 		return;
194 	src += node->page_offset;
195 	dst += node->page_offset;
196 	if (dst > src) {
197 		src += len - 1;
198 		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
199 		src = (src & ~PAGE_CACHE_MASK) + 1;
200 		dst += len - 1;
201 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
202 		dst = (dst & ~PAGE_CACHE_MASK) + 1;
203 
204 		if (src == dst) {
205 			while (src < len) {
206 				memmove(kmap(*dst_page), kmap(*src_page), src);
207 				kunmap(*src_page);
208 				set_page_dirty(*dst_page);
209 				kunmap(*dst_page);
210 				len -= src;
211 				src = PAGE_CACHE_SIZE;
212 				src_page--;
213 				dst_page--;
214 			}
215 			src -= len;
216 			memmove(kmap(*dst_page) + src,
217 				kmap(*src_page) + src, len);
218 			kunmap(*src_page);
219 			set_page_dirty(*dst_page);
220 			kunmap(*dst_page);
221 		} else {
222 			void *src_ptr, *dst_ptr;
223 
224 			do {
225 				src_ptr = kmap(*src_page) + src;
226 				dst_ptr = kmap(*dst_page) + dst;
227 				if (src < dst) {
228 					l = src;
229 					src = PAGE_CACHE_SIZE;
230 					dst -= l;
231 				} else {
232 					l = dst;
233 					src -= l;
234 					dst = PAGE_CACHE_SIZE;
235 				}
236 				l = min(len, l);
237 				memmove(dst_ptr - l, src_ptr - l, l);
238 				kunmap(*src_page);
239 				set_page_dirty(*dst_page);
240 				kunmap(*dst_page);
241 				if (dst == PAGE_CACHE_SIZE)
242 					dst_page--;
243 				else
244 					src_page--;
245 			} while ((len -= l));
246 		}
247 	} else {
248 		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
249 		src &= ~PAGE_CACHE_MASK;
250 		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
251 		dst &= ~PAGE_CACHE_MASK;
252 
253 		if (src == dst) {
254 			l = min(len, (int)PAGE_CACHE_SIZE - src);
255 			memmove(kmap(*dst_page) + src,
256 				kmap(*src_page) + src, l);
257 			kunmap(*src_page);
258 			set_page_dirty(*dst_page);
259 			kunmap(*dst_page);
260 
261 			while ((len -= l) != 0) {
262 				l = min(len, (int)PAGE_CACHE_SIZE);
263 				memmove(kmap(*++dst_page),
264 					kmap(*++src_page), l);
265 				kunmap(*src_page);
266 				set_page_dirty(*dst_page);
267 				kunmap(*dst_page);
268 			}
269 		} else {
270 			void *src_ptr, *dst_ptr;
271 
272 			do {
273 				src_ptr = kmap(*src_page) + src;
274 				dst_ptr = kmap(*dst_page) + dst;
275 				if (PAGE_CACHE_SIZE - src <
276 						PAGE_CACHE_SIZE - dst) {
277 					l = PAGE_CACHE_SIZE - src;
278 					src = 0;
279 					dst += l;
280 				} else {
281 					l = PAGE_CACHE_SIZE - dst;
282 					src += l;
283 					dst = 0;
284 				}
285 				l = min(len, l);
286 				memmove(dst_ptr, src_ptr, l);
287 				kunmap(*src_page);
288 				set_page_dirty(*dst_page);
289 				kunmap(*dst_page);
290 				if (!dst)
291 					dst_page++;
292 				else
293 					src_page++;
294 			} while ((len -= l));
295 		}
296 	}
297 }
298 
299 void hfs_bnode_dump(struct hfs_bnode *node)
300 {
301 	struct hfs_bnode_desc desc;
302 	__be32 cnid;
303 	int i, off, key_off;
304 
305 	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
306 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
307 	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
308 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
309 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
310 
311 	off = node->tree->node_size - 2;
312 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
313 		key_off = hfs_bnode_read_u16(node, off);
314 		hfs_dbg(BNODE_MOD, " %d", key_off);
315 		if (i && node->type == HFS_NODE_INDEX) {
316 			int tmp;
317 
318 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
319 					node->tree->cnid == HFSPLUS_ATTR_CNID)
320 				tmp = hfs_bnode_read_u16(node, key_off) + 2;
321 			else
322 				tmp = node->tree->max_key_len + 2;
323 			hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
324 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
325 			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
326 		} else if (i && node->type == HFS_NODE_LEAF) {
327 			int tmp;
328 
329 			tmp = hfs_bnode_read_u16(node, key_off);
330 			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
331 		}
332 	}
333 	hfs_dbg_cont(BNODE_MOD, "\n");
334 }
335 
336 void hfs_bnode_unlink(struct hfs_bnode *node)
337 {
338 	struct hfs_btree *tree;
339 	struct hfs_bnode *tmp;
340 	__be32 cnid;
341 
342 	tree = node->tree;
343 	if (node->prev) {
344 		tmp = hfs_bnode_find(tree, node->prev);
345 		if (IS_ERR(tmp))
346 			return;
347 		tmp->next = node->next;
348 		cnid = cpu_to_be32(tmp->next);
349 		hfs_bnode_write(tmp, &cnid,
350 			offsetof(struct hfs_bnode_desc, next), 4);
351 		hfs_bnode_put(tmp);
352 	} else if (node->type == HFS_NODE_LEAF)
353 		tree->leaf_head = node->next;
354 
355 	if (node->next) {
356 		tmp = hfs_bnode_find(tree, node->next);
357 		if (IS_ERR(tmp))
358 			return;
359 		tmp->prev = node->prev;
360 		cnid = cpu_to_be32(tmp->prev);
361 		hfs_bnode_write(tmp, &cnid,
362 			offsetof(struct hfs_bnode_desc, prev), 4);
363 		hfs_bnode_put(tmp);
364 	} else if (node->type == HFS_NODE_LEAF)
365 		tree->leaf_tail = node->prev;
366 
367 	/* move down? */
368 	if (!node->prev && !node->next)
369 		hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
370 	if (!node->parent) {
371 		tree->root = 0;
372 		tree->depth = 0;
373 	}
374 	set_bit(HFS_BNODE_DELETED, &node->flags);
375 }
376 
377 static inline int hfs_bnode_hash(u32 num)
378 {
379 	num = (num >> 16) + num;
380 	num += num >> 8;
381 	return num & (NODE_HASH_SIZE - 1);
382 }
383 
384 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
385 {
386 	struct hfs_bnode *node;
387 
388 	if (cnid >= tree->node_count) {
389 		pr_err("request for non-existent node "
390 				"%d in B*Tree\n",
391 			cnid);
392 		return NULL;
393 	}
394 
395 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
396 			node; node = node->next_hash)
397 		if (node->this == cnid)
398 			return node;
399 	return NULL;
400 }
401 
402 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
403 {
404 	struct super_block *sb;
405 	struct hfs_bnode *node, *node2;
406 	struct address_space *mapping;
407 	struct page *page;
408 	int size, block, i, hash;
409 	loff_t off;
410 
411 	if (cnid >= tree->node_count) {
412 		pr_err("request for non-existent node "
413 				"%d in B*Tree\n",
414 			cnid);
415 		return NULL;
416 	}
417 
418 	sb = tree->inode->i_sb;
419 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
420 		sizeof(struct page *);
421 	node = kzalloc(size, GFP_KERNEL);
422 	if (!node)
423 		return NULL;
424 	node->tree = tree;
425 	node->this = cnid;
426 	set_bit(HFS_BNODE_NEW, &node->flags);
427 	atomic_set(&node->refcnt, 1);
428 	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
429 		node->tree->cnid, node->this);
430 	init_waitqueue_head(&node->lock_wq);
431 	spin_lock(&tree->hash_lock);
432 	node2 = hfs_bnode_findhash(tree, cnid);
433 	if (!node2) {
434 		hash = hfs_bnode_hash(cnid);
435 		node->next_hash = tree->node_hash[hash];
436 		tree->node_hash[hash] = node;
437 		tree->node_hash_cnt++;
438 	} else {
439 		spin_unlock(&tree->hash_lock);
440 		kfree(node);
441 		wait_event(node2->lock_wq,
442 			!test_bit(HFS_BNODE_NEW, &node2->flags));
443 		return node2;
444 	}
445 	spin_unlock(&tree->hash_lock);
446 
447 	mapping = tree->inode->i_mapping;
448 	off = (loff_t)cnid << tree->node_size_shift;
449 	block = off >> PAGE_CACHE_SHIFT;
450 	node->page_offset = off & ~PAGE_CACHE_MASK;
451 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
452 		page = read_mapping_page(mapping, block, NULL);
453 		if (IS_ERR(page))
454 			goto fail;
455 		if (PageError(page)) {
456 			page_cache_release(page);
457 			goto fail;
458 		}
459 		page_cache_release(page);
460 		node->page[i] = page;
461 	}
462 
463 	return node;
464 fail:
465 	set_bit(HFS_BNODE_ERROR, &node->flags);
466 	return node;
467 }
468 
469 void hfs_bnode_unhash(struct hfs_bnode *node)
470 {
471 	struct hfs_bnode **p;
472 
473 	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
474 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
475 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
476 	     *p && *p != node; p = &(*p)->next_hash)
477 		;
478 	BUG_ON(!*p);
479 	*p = node->next_hash;
480 	node->tree->node_hash_cnt--;
481 }
482 
483 /* Load a particular node out of a tree */
484 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
485 {
486 	struct hfs_bnode *node;
487 	struct hfs_bnode_desc *desc;
488 	int i, rec_off, off, next_off;
489 	int entry_size, key_size;
490 
491 	spin_lock(&tree->hash_lock);
492 	node = hfs_bnode_findhash(tree, num);
493 	if (node) {
494 		hfs_bnode_get(node);
495 		spin_unlock(&tree->hash_lock);
496 		wait_event(node->lock_wq,
497 			!test_bit(HFS_BNODE_NEW, &node->flags));
498 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
499 			goto node_error;
500 		return node;
501 	}
502 	spin_unlock(&tree->hash_lock);
503 	node = __hfs_bnode_create(tree, num);
504 	if (!node)
505 		return ERR_PTR(-ENOMEM);
506 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
507 		goto node_error;
508 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
509 		return node;
510 
511 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
512 			node->page_offset);
513 	node->prev = be32_to_cpu(desc->prev);
514 	node->next = be32_to_cpu(desc->next);
515 	node->num_recs = be16_to_cpu(desc->num_recs);
516 	node->type = desc->type;
517 	node->height = desc->height;
518 	kunmap(node->page[0]);
519 
520 	switch (node->type) {
521 	case HFS_NODE_HEADER:
522 	case HFS_NODE_MAP:
523 		if (node->height != 0)
524 			goto node_error;
525 		break;
526 	case HFS_NODE_LEAF:
527 		if (node->height != 1)
528 			goto node_error;
529 		break;
530 	case HFS_NODE_INDEX:
531 		if (node->height <= 1 || node->height > tree->depth)
532 			goto node_error;
533 		break;
534 	default:
535 		goto node_error;
536 	}
537 
538 	rec_off = tree->node_size - 2;
539 	off = hfs_bnode_read_u16(node, rec_off);
540 	if (off != sizeof(struct hfs_bnode_desc))
541 		goto node_error;
542 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
543 		rec_off -= 2;
544 		next_off = hfs_bnode_read_u16(node, rec_off);
545 		if (next_off <= off ||
546 		    next_off > tree->node_size ||
547 		    next_off & 1)
548 			goto node_error;
549 		entry_size = next_off - off;
550 		if (node->type != HFS_NODE_INDEX &&
551 		    node->type != HFS_NODE_LEAF)
552 			continue;
553 		key_size = hfs_bnode_read_u16(node, off) + 2;
554 		if (key_size >= entry_size || key_size & 1)
555 			goto node_error;
556 	}
557 	clear_bit(HFS_BNODE_NEW, &node->flags);
558 	wake_up(&node->lock_wq);
559 	return node;
560 
561 node_error:
562 	set_bit(HFS_BNODE_ERROR, &node->flags);
563 	clear_bit(HFS_BNODE_NEW, &node->flags);
564 	wake_up(&node->lock_wq);
565 	hfs_bnode_put(node);
566 	return ERR_PTR(-EIO);
567 }
568 
569 void hfs_bnode_free(struct hfs_bnode *node)
570 {
571 #if 0
572 	int i;
573 
574 	for (i = 0; i < node->tree->pages_per_bnode; i++)
575 		if (node->page[i])
576 			page_cache_release(node->page[i]);
577 #endif
578 	kfree(node);
579 }
580 
581 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
582 {
583 	struct hfs_bnode *node;
584 	struct page **pagep;
585 	int i;
586 
587 	spin_lock(&tree->hash_lock);
588 	node = hfs_bnode_findhash(tree, num);
589 	spin_unlock(&tree->hash_lock);
590 	if (node) {
591 		pr_crit("new node %u already hashed?\n", num);
592 		WARN_ON(1);
593 		return node;
594 	}
595 	node = __hfs_bnode_create(tree, num);
596 	if (!node)
597 		return ERR_PTR(-ENOMEM);
598 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
599 		hfs_bnode_put(node);
600 		return ERR_PTR(-EIO);
601 	}
602 
603 	pagep = node->page;
604 	memset(kmap(*pagep) + node->page_offset, 0,
605 	       min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
606 	set_page_dirty(*pagep);
607 	kunmap(*pagep);
608 	for (i = 1; i < tree->pages_per_bnode; i++) {
609 		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
610 		set_page_dirty(*pagep);
611 		kunmap(*pagep);
612 	}
613 	clear_bit(HFS_BNODE_NEW, &node->flags);
614 	wake_up(&node->lock_wq);
615 
616 	return node;
617 }
618 
619 void hfs_bnode_get(struct hfs_bnode *node)
620 {
621 	if (node) {
622 		atomic_inc(&node->refcnt);
623 		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
624 			node->tree->cnid, node->this,
625 			atomic_read(&node->refcnt));
626 	}
627 }
628 
629 /* Dispose of resources used by a node */
630 void hfs_bnode_put(struct hfs_bnode *node)
631 {
632 	if (node) {
633 		struct hfs_btree *tree = node->tree;
634 		int i;
635 
636 		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
637 			node->tree->cnid, node->this,
638 			atomic_read(&node->refcnt));
639 		BUG_ON(!atomic_read(&node->refcnt));
640 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
641 			return;
642 		for (i = 0; i < tree->pages_per_bnode; i++) {
643 			if (!node->page[i])
644 				continue;
645 			mark_page_accessed(node->page[i]);
646 		}
647 
648 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
649 			hfs_bnode_unhash(node);
650 			spin_unlock(&tree->hash_lock);
651 			hfs_bnode_clear(node, 0,
652 				PAGE_CACHE_SIZE * tree->pages_per_bnode);
653 			hfs_bmap_free(node);
654 			hfs_bnode_free(node);
655 			return;
656 		}
657 		spin_unlock(&tree->hash_lock);
658 	}
659 }
660 
661