xref: /openbmc/linux/fs/ufs/dir.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *  linux/fs/ufs/ufs_dir.c
3  *
4  * Copyright (C) 1996
5  * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6  * Laboratory for Computer Science Research Computing Facility
7  * Rutgers, The State University of New Jersey
8  *
9  * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
10  *
11  * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12  * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13  * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
14  *
15  * Migration to usage of "page cache" on May 2006 by
16  * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
17  */
18 
19 #include <linux/time.h>
20 #include <linux/fs.h>
21 #include <linux/ufs_fs.h>
22 
23 #include "swab.h"
24 #include "util.h"
25 
26 /*
27  * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
28  *
29  * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
30  */
31 static inline int ufs_match(struct super_block *sb, int len,
32 		const char * const name, struct ufs_dir_entry * de)
33 {
34 	if (len != ufs_get_de_namlen(sb, de))
35 		return 0;
36 	if (!de->d_ino)
37 		return 0;
38 	return !memcmp(name, de->d_name, len);
39 }
40 
41 static int ufs_commit_chunk(struct page *page, unsigned from, unsigned to)
42 {
43 	struct inode *dir = page->mapping->host;
44 	int err = 0;
45 	dir->i_version++;
46 	page->mapping->a_ops->commit_write(NULL, page, from, to);
47 	if (IS_DIRSYNC(dir))
48 		err = write_one_page(page, 1);
49 	else
50 		unlock_page(page);
51 	return err;
52 }
53 
54 static inline void ufs_put_page(struct page *page)
55 {
56 	kunmap(page);
57 	page_cache_release(page);
58 }
59 
60 static inline unsigned long ufs_dir_pages(struct inode *inode)
61 {
62 	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
63 }
64 
65 ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
66 {
67 	ino_t res = 0;
68 	struct ufs_dir_entry *de;
69 	struct page *page;
70 
71 	de = ufs_find_entry(dir, dentry, &page);
72 	if (de) {
73 		res = fs32_to_cpu(dir->i_sb, de->d_ino);
74 		ufs_put_page(page);
75 	}
76 	return res;
77 }
78 
79 
80 /* Releases the page */
81 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
82 		  struct page *page, struct inode *inode)
83 {
84 	unsigned from = (char *) de - (char *) page_address(page);
85 	unsigned to = from + fs16_to_cpu(dir->i_sb, de->d_reclen);
86 	int err;
87 
88 	lock_page(page);
89 	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
90 	BUG_ON(err);
91 	de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
92 	ufs_set_de_type(dir->i_sb, de, inode->i_mode);
93 	err = ufs_commit_chunk(page, from, to);
94 	ufs_put_page(page);
95 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
96 	mark_inode_dirty(dir);
97 }
98 
99 
100 static void ufs_check_page(struct page *page)
101 {
102 	struct inode *dir = page->mapping->host;
103 	struct super_block *sb = dir->i_sb;
104 	char *kaddr = page_address(page);
105 	unsigned offs, rec_len;
106 	unsigned limit = PAGE_CACHE_SIZE;
107 	const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
108 	struct ufs_dir_entry *p;
109 	char *error;
110 
111 	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
112 		limit = dir->i_size & ~PAGE_CACHE_MASK;
113 		if (limit & chunk_mask)
114 			goto Ebadsize;
115 		if (!limit)
116 			goto out;
117 	}
118 	for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) {
119 		p = (struct ufs_dir_entry *)(kaddr + offs);
120 		rec_len = fs16_to_cpu(sb, p->d_reclen);
121 
122 		if (rec_len < UFS_DIR_REC_LEN(1))
123 			goto Eshort;
124 		if (rec_len & 3)
125 			goto Ealign;
126 		if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
127 			goto Enamelen;
128 		if (((offs + rec_len - 1) ^ offs) & ~chunk_mask)
129 			goto Espan;
130 		if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
131 						  UFS_SB(sb)->s_uspi->s_ncg))
132 			goto Einumber;
133 	}
134 	if (offs != limit)
135 		goto Eend;
136 out:
137 	SetPageChecked(page);
138 	return;
139 
140 	/* Too bad, we had an error */
141 
142 Ebadsize:
143 	ufs_error(sb, "ufs_check_page",
144 		  "size of directory #%lu is not a multiple of chunk size",
145 		  dir->i_ino
146 	);
147 	goto fail;
148 Eshort:
149 	error = "rec_len is smaller than minimal";
150 	goto bad_entry;
151 Ealign:
152 	error = "unaligned directory entry";
153 	goto bad_entry;
154 Enamelen:
155 	error = "rec_len is too small for name_len";
156 	goto bad_entry;
157 Espan:
158 	error = "directory entry across blocks";
159 	goto bad_entry;
160 Einumber:
161 	error = "inode out of bounds";
162 bad_entry:
163 	ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
164 		   "offset=%lu, rec_len=%d, name_len=%d",
165 		   dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
166 		   rec_len, ufs_get_de_namlen(sb, p));
167 	goto fail;
168 Eend:
169 	p = (struct ufs_dir_entry *)(kaddr + offs);
170 	ufs_error (sb, "ext2_check_page",
171 		   "entry in directory #%lu spans the page boundary"
172 		   "offset=%lu",
173 		   dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
174 fail:
175 	SetPageChecked(page);
176 	SetPageError(page);
177 }
178 
179 static struct page *ufs_get_page(struct inode *dir, unsigned long n)
180 {
181 	struct address_space *mapping = dir->i_mapping;
182 	struct page *page = read_mapping_page(mapping, n, NULL);
183 	if (!IS_ERR(page)) {
184 		kmap(page);
185 		if (!PageChecked(page))
186 			ufs_check_page(page);
187 		if (PageError(page))
188 			goto fail;
189 	}
190 	return page;
191 
192 fail:
193 	ufs_put_page(page);
194 	return ERR_PTR(-EIO);
195 }
196 
197 /*
198  * Return the offset into page `page_nr' of the last valid
199  * byte in that page, plus one.
200  */
201 static unsigned
202 ufs_last_byte(struct inode *inode, unsigned long page_nr)
203 {
204 	unsigned last_byte = inode->i_size;
205 
206 	last_byte -= page_nr << PAGE_CACHE_SHIFT;
207 	if (last_byte > PAGE_CACHE_SIZE)
208 		last_byte = PAGE_CACHE_SIZE;
209 	return last_byte;
210 }
211 
212 static inline struct ufs_dir_entry *
213 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
214 {
215 	return (struct ufs_dir_entry *)((char *)p +
216 					fs16_to_cpu(sb, p->d_reclen));
217 }
218 
219 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
220 {
221 	struct page *page = ufs_get_page(dir, 0);
222 	struct ufs_dir_entry *de = NULL;
223 
224 	if (!IS_ERR(page)) {
225 		de = ufs_next_entry(dir->i_sb,
226 				    (struct ufs_dir_entry *)page_address(page));
227 		*p = page;
228 	}
229 	return de;
230 }
231 
232 /*
233  *	ufs_find_entry()
234  *
235  * finds an entry in the specified directory with the wanted name. It
236  * returns the page in which the entry was found, and the entry itself
237  * (as a parameter - res_dir). Page is returned mapped and unlocked.
238  * Entry is guaranteed to be valid.
239  */
240 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
241 				     struct page **res_page)
242 {
243 	struct super_block *sb = dir->i_sb;
244 	const char *name = dentry->d_name.name;
245 	int namelen = dentry->d_name.len;
246 	unsigned reclen = UFS_DIR_REC_LEN(namelen);
247 	unsigned long start, n;
248 	unsigned long npages = ufs_dir_pages(dir);
249 	struct page *page = NULL;
250 	struct ufs_inode_info *ui = UFS_I(dir);
251 	struct ufs_dir_entry *de;
252 
253 	UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen);
254 
255 	if (npages == 0 || namelen > UFS_MAXNAMLEN)
256 		goto out;
257 
258 	/* OFFSET_CACHE */
259 	*res_page = NULL;
260 
261 	start = ui->i_dir_start_lookup;
262 
263 	if (start >= npages)
264 		start = 0;
265 	n = start;
266 	do {
267 		char *kaddr;
268 		page = ufs_get_page(dir, n);
269 		if (!IS_ERR(page)) {
270 			kaddr = page_address(page);
271 			de = (struct ufs_dir_entry *) kaddr;
272 			kaddr += ufs_last_byte(dir, n) - reclen;
273 			while ((char *) de <= kaddr) {
274 				if (de->d_reclen == 0) {
275 					ufs_error(dir->i_sb, __FUNCTION__,
276 						  "zero-length directory entry");
277 					ufs_put_page(page);
278 					goto out;
279 				}
280 				if (ufs_match(sb, namelen, name, de))
281 					goto found;
282 				de = ufs_next_entry(sb, de);
283 			}
284 			ufs_put_page(page);
285 		}
286 		if (++n >= npages)
287 			n = 0;
288 	} while (n != start);
289 out:
290 	return NULL;
291 
292 found:
293 	*res_page = page;
294 	ui->i_dir_start_lookup = n;
295 	return de;
296 }
297 
298 /*
299  *	Parent is locked.
300  */
301 int ufs_add_link(struct dentry *dentry, struct inode *inode)
302 {
303 	struct inode *dir = dentry->d_parent->d_inode;
304 	const char *name = dentry->d_name.name;
305 	int namelen = dentry->d_name.len;
306 	struct super_block *sb = dir->i_sb;
307 	unsigned reclen = UFS_DIR_REC_LEN(namelen);
308 	const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
309 	unsigned short rec_len, name_len;
310 	struct page *page = NULL;
311 	struct ufs_dir_entry *de;
312 	unsigned long npages = ufs_dir_pages(dir);
313 	unsigned long n;
314 	char *kaddr;
315 	unsigned from, to;
316 	int err;
317 
318 	UFSD("ENTER, name %s, namelen %u\n", name, namelen);
319 
320 	/*
321 	 * We take care of directory expansion in the same loop.
322 	 * This code plays outside i_size, so it locks the page
323 	 * to protect that region.
324 	 */
325 	for (n = 0; n <= npages; n++) {
326 		char *dir_end;
327 
328 		page = ufs_get_page(dir, n);
329 		err = PTR_ERR(page);
330 		if (IS_ERR(page))
331 			goto out;
332 		lock_page(page);
333 		kaddr = page_address(page);
334 		dir_end = kaddr + ufs_last_byte(dir, n);
335 		de = (struct ufs_dir_entry *)kaddr;
336 		kaddr += PAGE_CACHE_SIZE - reclen;
337 		while ((char *)de <= kaddr) {
338 			if ((char *)de == dir_end) {
339 				/* We hit i_size */
340 				name_len = 0;
341 				rec_len = chunk_size;
342 				de->d_reclen = cpu_to_fs16(sb, chunk_size);
343 				de->d_ino = 0;
344 				goto got_it;
345 			}
346 			if (de->d_reclen == 0) {
347 				ufs_error(dir->i_sb, __FUNCTION__,
348 					  "zero-length directory entry");
349 				err = -EIO;
350 				goto out_unlock;
351 			}
352 			err = -EEXIST;
353 			if (ufs_match(sb, namelen, name, de))
354 				goto out_unlock;
355 			name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de));
356 			rec_len = fs16_to_cpu(sb, de->d_reclen);
357 			if (!de->d_ino && rec_len >= reclen)
358 				goto got_it;
359 			if (rec_len >= name_len + reclen)
360 				goto got_it;
361 			de = (struct ufs_dir_entry *) ((char *) de + rec_len);
362 		}
363 		unlock_page(page);
364 		ufs_put_page(page);
365 	}
366 	BUG();
367 	return -EINVAL;
368 
369 got_it:
370 	from = (char*)de - (char*)page_address(page);
371 	to = from + rec_len;
372 	err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
373 	if (err)
374 		goto out_unlock;
375 	if (de->d_ino) {
376 		struct ufs_dir_entry *de1 =
377 			(struct ufs_dir_entry *) ((char *) de + name_len);
378 		de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len);
379 		de->d_reclen = cpu_to_fs16(sb, name_len);
380 
381 		de = de1;
382 	}
383 
384 	ufs_set_de_namlen(sb, de, namelen);
385 	memcpy(de->d_name, name, namelen + 1);
386 	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
387 	ufs_set_de_type(sb, de, inode->i_mode);
388 
389 	err = ufs_commit_chunk(page, from, to);
390 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
391 
392 	mark_inode_dirty(dir);
393 	/* OFFSET_CACHE */
394 out_put:
395 	ufs_put_page(page);
396 out:
397 	return err;
398 out_unlock:
399 	unlock_page(page);
400 	goto out_put;
401 }
402 
403 static inline unsigned
404 ufs_validate_entry(struct super_block *sb, char *base,
405 		   unsigned offset, unsigned mask)
406 {
407 	struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
408 	struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
409 	while ((char*)p < (char*)de) {
410 		if (p->d_reclen == 0)
411 			break;
412 		p = ufs_next_entry(sb, p);
413 	}
414 	return (char *)p - base;
415 }
416 
417 
418 /*
419  * This is blatantly stolen from ext2fs
420  */
421 static int
422 ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
423 {
424 	loff_t pos = filp->f_pos;
425 	struct inode *inode = filp->f_path.dentry->d_inode;
426 	struct super_block *sb = inode->i_sb;
427 	unsigned int offset = pos & ~PAGE_CACHE_MASK;
428 	unsigned long n = pos >> PAGE_CACHE_SHIFT;
429 	unsigned long npages = ufs_dir_pages(inode);
430 	unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
431 	int need_revalidate = filp->f_version != inode->i_version;
432 	unsigned flags = UFS_SB(sb)->s_flags;
433 
434 	UFSD("BEGIN\n");
435 
436 	if (pos > inode->i_size - UFS_DIR_REC_LEN(1))
437 		return 0;
438 
439 	for ( ; n < npages; n++, offset = 0) {
440 		char *kaddr, *limit;
441 		struct ufs_dir_entry *de;
442 
443 		struct page *page = ufs_get_page(inode, n);
444 
445 		if (IS_ERR(page)) {
446 			ufs_error(sb, __FUNCTION__,
447 				  "bad page in #%lu",
448 				  inode->i_ino);
449 			filp->f_pos += PAGE_CACHE_SIZE - offset;
450 			return -EIO;
451 		}
452 		kaddr = page_address(page);
453 		if (unlikely(need_revalidate)) {
454 			if (offset) {
455 				offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
456 				filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
457 			}
458 			filp->f_version = inode->i_version;
459 			need_revalidate = 0;
460 		}
461 		de = (struct ufs_dir_entry *)(kaddr+offset);
462 		limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
463 		for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
464 			if (de->d_reclen == 0) {
465 				ufs_error(sb, __FUNCTION__,
466 					"zero-length directory entry");
467 				ufs_put_page(page);
468 				return -EIO;
469 			}
470 			if (de->d_ino) {
471 				int over;
472 				unsigned char d_type = DT_UNKNOWN;
473 
474 				offset = (char *)de - kaddr;
475 
476 				UFSD("filldir(%s,%u)\n", de->d_name,
477 				      fs32_to_cpu(sb, de->d_ino));
478 				UFSD("namlen %u\n", ufs_get_de_namlen(sb, de));
479 
480 				if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
481 					d_type = de->d_u.d_44.d_type;
482 
483 				over = filldir(dirent, de->d_name,
484 					       ufs_get_de_namlen(sb, de),
485 						(n<<PAGE_CACHE_SHIFT) | offset,
486 					       fs32_to_cpu(sb, de->d_ino), d_type);
487 				if (over) {
488 					ufs_put_page(page);
489 					return 0;
490 				}
491 			}
492 			filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
493 		}
494 		ufs_put_page(page);
495 	}
496 	return 0;
497 }
498 
499 
500 /*
501  * ufs_delete_entry deletes a directory entry by merging it with the
502  * previous entry.
503  */
504 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
505 		     struct page * page)
506 {
507 	struct super_block *sb = inode->i_sb;
508 	struct address_space *mapping = page->mapping;
509 	char *kaddr = page_address(page);
510 	unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
511 	unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
512 	struct ufs_dir_entry *pde = NULL;
513 	struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
514 	int err;
515 
516 	UFSD("ENTER\n");
517 
518 	UFSD("ino %u, reclen %u, namlen %u, name %s\n",
519 	      fs32_to_cpu(sb, de->d_ino),
520 	      fs16_to_cpu(sb, de->d_reclen),
521 	      ufs_get_de_namlen(sb, de), de->d_name);
522 
523 	while ((char*)de < (char*)dir) {
524 		if (de->d_reclen == 0) {
525 			ufs_error(inode->i_sb, __FUNCTION__,
526 				  "zero-length directory entry");
527 			err = -EIO;
528 			goto out;
529 		}
530 		pde = de;
531 		de = ufs_next_entry(sb, de);
532 	}
533 	if (pde)
534 		from = (char*)pde - (char*)page_address(page);
535 	lock_page(page);
536 	err = mapping->a_ops->prepare_write(NULL, page, from, to);
537 	BUG_ON(err);
538 	if (pde)
539 		pde->d_reclen = cpu_to_fs16(sb, to-from);
540 	dir->d_ino = 0;
541 	err = ufs_commit_chunk(page, from, to);
542 	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
543 	mark_inode_dirty(inode);
544 out:
545 	ufs_put_page(page);
546 	UFSD("EXIT\n");
547 	return err;
548 }
549 
550 int ufs_make_empty(struct inode * inode, struct inode *dir)
551 {
552 	struct super_block * sb = dir->i_sb;
553 	struct address_space *mapping = inode->i_mapping;
554 	struct page *page = grab_cache_page(mapping, 0);
555 	const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
556 	struct ufs_dir_entry * de;
557 	char *base;
558 	int err;
559 
560 	if (!page)
561 		return -ENOMEM;
562 	kmap(page);
563 	err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
564 	if (err) {
565 		unlock_page(page);
566 		goto fail;
567 	}
568 
569 
570 	base = (char*)page_address(page);
571 	memset(base, 0, PAGE_CACHE_SIZE);
572 
573 	de = (struct ufs_dir_entry *) base;
574 
575 	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
576 	ufs_set_de_type(sb, de, inode->i_mode);
577 	ufs_set_de_namlen(sb, de, 1);
578 	de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
579 	strcpy (de->d_name, ".");
580 	de = (struct ufs_dir_entry *)
581 		((char *)de + fs16_to_cpu(sb, de->d_reclen));
582 	de->d_ino = cpu_to_fs32(sb, dir->i_ino);
583 	ufs_set_de_type(sb, de, dir->i_mode);
584 	de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
585 	ufs_set_de_namlen(sb, de, 2);
586 	strcpy (de->d_name, "..");
587 
588 	err = ufs_commit_chunk(page, 0, chunk_size);
589 fail:
590 	kunmap(page);
591 	page_cache_release(page);
592 	return err;
593 }
594 
595 /*
596  * routine to check that the specified directory is empty (for rmdir)
597  */
598 int ufs_empty_dir(struct inode * inode)
599 {
600 	struct super_block *sb = inode->i_sb;
601 	struct page *page = NULL;
602 	unsigned long i, npages = ufs_dir_pages(inode);
603 
604 	for (i = 0; i < npages; i++) {
605 		char *kaddr;
606 		struct ufs_dir_entry *de;
607 		page = ufs_get_page(inode, i);
608 
609 		if (IS_ERR(page))
610 			continue;
611 
612 		kaddr = page_address(page);
613 		de = (struct ufs_dir_entry *)kaddr;
614 		kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
615 
616 		while ((char *)de <= kaddr) {
617 			if (de->d_reclen == 0) {
618 				ufs_error(inode->i_sb, __FUNCTION__,
619 					"zero-length directory entry: "
620 					"kaddr=%p, de=%p\n", kaddr, de);
621 				goto not_empty;
622 			}
623 			if (de->d_ino) {
624 				u16 namelen=ufs_get_de_namlen(sb, de);
625 				/* check for . and .. */
626 				if (de->d_name[0] != '.')
627 					goto not_empty;
628 				if (namelen > 2)
629 					goto not_empty;
630 				if (namelen < 2) {
631 					if (inode->i_ino !=
632 					    fs32_to_cpu(sb, de->d_ino))
633 						goto not_empty;
634 				} else if (de->d_name[1] != '.')
635 					goto not_empty;
636 			}
637 			de = ufs_next_entry(sb, de);
638 		}
639 		ufs_put_page(page);
640 	}
641 	return 1;
642 
643 not_empty:
644 	ufs_put_page(page);
645 	return 0;
646 }
647 
648 const struct file_operations ufs_dir_operations = {
649 	.read		= generic_read_dir,
650 	.readdir	= ufs_readdir,
651 	.fsync		= file_fsync,
652 };
653