xref: /openbmc/linux/fs/ufs/dir.c (revision 6eeb017e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ufs/ufs_dir.c
4  *
5  * Copyright (C) 1996
6  * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
7  * Laboratory for Computer Science Research Computing Facility
8  * Rutgers, The State University of New Jersey
9  *
10  * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
11  *
12  * 4.4BSD (FreeBSD) support added on February 1st 1998 by
13  * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
14  * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
15  *
16  * Migration to usage of "page cache" on May 2006 by
17  * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
18  */
19 
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/swap.h>
23 #include <linux/iversion.h>
24 
25 #include "ufs_fs.h"
26 #include "ufs.h"
27 #include "swab.h"
28 #include "util.h"
29 
30 /*
31  * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
32  *
33  * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
34  */
ufs_match(struct super_block * sb,int len,const unsigned char * name,struct ufs_dir_entry * de)35 static inline int ufs_match(struct super_block *sb, int len,
36 		const unsigned char *name, struct ufs_dir_entry *de)
37 {
38 	if (len != ufs_get_de_namlen(sb, de))
39 		return 0;
40 	if (!de->d_ino)
41 		return 0;
42 	return !memcmp(name, de->d_name, len);
43 }
44 
ufs_commit_chunk(struct page * page,loff_t pos,unsigned len)45 static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
46 {
47 	struct address_space *mapping = page->mapping;
48 	struct inode *dir = mapping->host;
49 
50 	inode_inc_iversion(dir);
51 	block_write_end(NULL, mapping, pos, len, len, page, NULL);
52 	if (pos+len > dir->i_size) {
53 		i_size_write(dir, pos+len);
54 		mark_inode_dirty(dir);
55 	}
56 	unlock_page(page);
57 }
58 
ufs_handle_dirsync(struct inode * dir)59 static int ufs_handle_dirsync(struct inode *dir)
60 {
61 	int err;
62 
63 	err = filemap_write_and_wait(dir->i_mapping);
64 	if (!err)
65 		err = sync_inode_metadata(dir, 1);
66 	return err;
67 }
68 
ufs_put_page(struct page * page)69 static inline void ufs_put_page(struct page *page)
70 {
71 	kunmap(page);
72 	put_page(page);
73 }
74 
ufs_inode_by_name(struct inode * dir,const struct qstr * qstr)75 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
76 {
77 	ino_t res = 0;
78 	struct ufs_dir_entry *de;
79 	struct page *page;
80 
81 	de = ufs_find_entry(dir, qstr, &page);
82 	if (de) {
83 		res = fs32_to_cpu(dir->i_sb, de->d_ino);
84 		ufs_put_page(page);
85 	}
86 	return res;
87 }
88 
89 
90 /* Releases the page */
ufs_set_link(struct inode * dir,struct ufs_dir_entry * de,struct page * page,struct inode * inode,bool update_times)91 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
92 		  struct page *page, struct inode *inode,
93 		  bool update_times)
94 {
95 	loff_t pos = page_offset(page) +
96 			(char *) de - (char *) page_address(page);
97 	unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen);
98 	int err;
99 
100 	lock_page(page);
101 	err = ufs_prepare_chunk(page, pos, len);
102 	BUG_ON(err);
103 
104 	de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
105 	ufs_set_de_type(dir->i_sb, de, inode->i_mode);
106 
107 	ufs_commit_chunk(page, pos, len);
108 	ufs_put_page(page);
109 	if (update_times)
110 		dir->i_mtime = inode_set_ctime_current(dir);
111 	mark_inode_dirty(dir);
112 	ufs_handle_dirsync(dir);
113 }
114 
115 
ufs_check_page(struct page * page)116 static bool ufs_check_page(struct page *page)
117 {
118 	struct inode *dir = page->mapping->host;
119 	struct super_block *sb = dir->i_sb;
120 	char *kaddr = page_address(page);
121 	unsigned offs, rec_len;
122 	unsigned limit = PAGE_SIZE;
123 	const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
124 	struct ufs_dir_entry *p;
125 	char *error;
126 
127 	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
128 		limit = dir->i_size & ~PAGE_MASK;
129 		if (limit & chunk_mask)
130 			goto Ebadsize;
131 		if (!limit)
132 			goto out;
133 	}
134 	for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) {
135 		p = (struct ufs_dir_entry *)(kaddr + offs);
136 		rec_len = fs16_to_cpu(sb, p->d_reclen);
137 
138 		if (rec_len < UFS_DIR_REC_LEN(1))
139 			goto Eshort;
140 		if (rec_len & 3)
141 			goto Ealign;
142 		if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
143 			goto Enamelen;
144 		if (((offs + rec_len - 1) ^ offs) & ~chunk_mask)
145 			goto Espan;
146 		if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
147 						  UFS_SB(sb)->s_uspi->s_ncg))
148 			goto Einumber;
149 	}
150 	if (offs != limit)
151 		goto Eend;
152 out:
153 	SetPageChecked(page);
154 	return true;
155 
156 	/* Too bad, we had an error */
157 
158 Ebadsize:
159 	ufs_error(sb, "ufs_check_page",
160 		  "size of directory #%lu is not a multiple of chunk size",
161 		  dir->i_ino
162 	);
163 	goto fail;
164 Eshort:
165 	error = "rec_len is smaller than minimal";
166 	goto bad_entry;
167 Ealign:
168 	error = "unaligned directory entry";
169 	goto bad_entry;
170 Enamelen:
171 	error = "rec_len is too small for name_len";
172 	goto bad_entry;
173 Espan:
174 	error = "directory entry across blocks";
175 	goto bad_entry;
176 Einumber:
177 	error = "inode out of bounds";
178 bad_entry:
179 	ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
180 		   "offset=%lu, rec_len=%d, name_len=%d",
181 		   dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
182 		   rec_len, ufs_get_de_namlen(sb, p));
183 	goto fail;
184 Eend:
185 	p = (struct ufs_dir_entry *)(kaddr + offs);
186 	ufs_error(sb, __func__,
187 		   "entry in directory #%lu spans the page boundary"
188 		   "offset=%lu",
189 		   dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
190 fail:
191 	SetPageError(page);
192 	return false;
193 }
194 
ufs_get_page(struct inode * dir,unsigned long n)195 static struct page *ufs_get_page(struct inode *dir, unsigned long n)
196 {
197 	struct address_space *mapping = dir->i_mapping;
198 	struct page *page = read_mapping_page(mapping, n, NULL);
199 	if (!IS_ERR(page)) {
200 		kmap(page);
201 		if (unlikely(!PageChecked(page))) {
202 			if (!ufs_check_page(page))
203 				goto fail;
204 		}
205 	}
206 	return page;
207 
208 fail:
209 	ufs_put_page(page);
210 	return ERR_PTR(-EIO);
211 }
212 
213 /*
214  * Return the offset into page `page_nr' of the last valid
215  * byte in that page, plus one.
216  */
217 static unsigned
ufs_last_byte(struct inode * inode,unsigned long page_nr)218 ufs_last_byte(struct inode *inode, unsigned long page_nr)
219 {
220 	unsigned last_byte = inode->i_size;
221 
222 	last_byte -= page_nr << PAGE_SHIFT;
223 	if (last_byte > PAGE_SIZE)
224 		last_byte = PAGE_SIZE;
225 	return last_byte;
226 }
227 
228 static inline struct ufs_dir_entry *
ufs_next_entry(struct super_block * sb,struct ufs_dir_entry * p)229 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
230 {
231 	return (struct ufs_dir_entry *)((char *)p +
232 					fs16_to_cpu(sb, p->d_reclen));
233 }
234 
ufs_dotdot(struct inode * dir,struct page ** p)235 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
236 {
237 	struct page *page = ufs_get_page(dir, 0);
238 	struct ufs_dir_entry *de = NULL;
239 
240 	if (!IS_ERR(page)) {
241 		de = ufs_next_entry(dir->i_sb,
242 				    (struct ufs_dir_entry *)page_address(page));
243 		*p = page;
244 	}
245 	return de;
246 }
247 
248 /*
249  *	ufs_find_entry()
250  *
251  * finds an entry in the specified directory with the wanted name. It
252  * returns the page in which the entry was found, and the entry itself
253  * (as a parameter - res_dir). Page is returned mapped and unlocked.
254  * Entry is guaranteed to be valid.
255  */
ufs_find_entry(struct inode * dir,const struct qstr * qstr,struct page ** res_page)256 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
257 				     struct page **res_page)
258 {
259 	struct super_block *sb = dir->i_sb;
260 	const unsigned char *name = qstr->name;
261 	int namelen = qstr->len;
262 	unsigned reclen = UFS_DIR_REC_LEN(namelen);
263 	unsigned long start, n;
264 	unsigned long npages = dir_pages(dir);
265 	struct page *page = NULL;
266 	struct ufs_inode_info *ui = UFS_I(dir);
267 	struct ufs_dir_entry *de;
268 
269 	UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen);
270 
271 	if (npages == 0 || namelen > UFS_MAXNAMLEN)
272 		goto out;
273 
274 	/* OFFSET_CACHE */
275 	*res_page = NULL;
276 
277 	start = ui->i_dir_start_lookup;
278 
279 	if (start >= npages)
280 		start = 0;
281 	n = start;
282 	do {
283 		char *kaddr;
284 		page = ufs_get_page(dir, n);
285 		if (!IS_ERR(page)) {
286 			kaddr = page_address(page);
287 			de = (struct ufs_dir_entry *) kaddr;
288 			kaddr += ufs_last_byte(dir, n) - reclen;
289 			while ((char *) de <= kaddr) {
290 				if (ufs_match(sb, namelen, name, de))
291 					goto found;
292 				de = ufs_next_entry(sb, de);
293 			}
294 			ufs_put_page(page);
295 		}
296 		if (++n >= npages)
297 			n = 0;
298 	} while (n != start);
299 out:
300 	return NULL;
301 
302 found:
303 	*res_page = page;
304 	ui->i_dir_start_lookup = n;
305 	return de;
306 }
307 
308 /*
309  *	Parent is locked.
310  */
ufs_add_link(struct dentry * dentry,struct inode * inode)311 int ufs_add_link(struct dentry *dentry, struct inode *inode)
312 {
313 	struct inode *dir = d_inode(dentry->d_parent);
314 	const unsigned char *name = dentry->d_name.name;
315 	int namelen = dentry->d_name.len;
316 	struct super_block *sb = dir->i_sb;
317 	unsigned reclen = UFS_DIR_REC_LEN(namelen);
318 	const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
319 	unsigned short rec_len, name_len;
320 	struct page *page = NULL;
321 	struct ufs_dir_entry *de;
322 	unsigned long npages = dir_pages(dir);
323 	unsigned long n;
324 	char *kaddr;
325 	loff_t pos;
326 	int err;
327 
328 	UFSD("ENTER, name %s, namelen %u\n", name, namelen);
329 
330 	/*
331 	 * We take care of directory expansion in the same loop.
332 	 * This code plays outside i_size, so it locks the page
333 	 * to protect that region.
334 	 */
335 	for (n = 0; n <= npages; n++) {
336 		char *dir_end;
337 
338 		page = ufs_get_page(dir, n);
339 		err = PTR_ERR(page);
340 		if (IS_ERR(page))
341 			goto out;
342 		lock_page(page);
343 		kaddr = page_address(page);
344 		dir_end = kaddr + ufs_last_byte(dir, n);
345 		de = (struct ufs_dir_entry *)kaddr;
346 		kaddr += PAGE_SIZE - reclen;
347 		while ((char *)de <= kaddr) {
348 			if ((char *)de == dir_end) {
349 				/* We hit i_size */
350 				name_len = 0;
351 				rec_len = chunk_size;
352 				de->d_reclen = cpu_to_fs16(sb, chunk_size);
353 				de->d_ino = 0;
354 				goto got_it;
355 			}
356 			if (de->d_reclen == 0) {
357 				ufs_error(dir->i_sb, __func__,
358 					  "zero-length directory entry");
359 				err = -EIO;
360 				goto out_unlock;
361 			}
362 			err = -EEXIST;
363 			if (ufs_match(sb, namelen, name, de))
364 				goto out_unlock;
365 			name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de));
366 			rec_len = fs16_to_cpu(sb, de->d_reclen);
367 			if (!de->d_ino && rec_len >= reclen)
368 				goto got_it;
369 			if (rec_len >= name_len + reclen)
370 				goto got_it;
371 			de = (struct ufs_dir_entry *) ((char *) de + rec_len);
372 		}
373 		unlock_page(page);
374 		ufs_put_page(page);
375 	}
376 	BUG();
377 	return -EINVAL;
378 
379 got_it:
380 	pos = page_offset(page) +
381 			(char*)de - (char*)page_address(page);
382 	err = ufs_prepare_chunk(page, pos, rec_len);
383 	if (err)
384 		goto out_unlock;
385 	if (de->d_ino) {
386 		struct ufs_dir_entry *de1 =
387 			(struct ufs_dir_entry *) ((char *) de + name_len);
388 		de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len);
389 		de->d_reclen = cpu_to_fs16(sb, name_len);
390 
391 		de = de1;
392 	}
393 
394 	ufs_set_de_namlen(sb, de, namelen);
395 	memcpy(de->d_name, name, namelen + 1);
396 	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
397 	ufs_set_de_type(sb, de, inode->i_mode);
398 
399 	ufs_commit_chunk(page, pos, rec_len);
400 	dir->i_mtime = inode_set_ctime_current(dir);
401 
402 	mark_inode_dirty(dir);
403 	err = ufs_handle_dirsync(dir);
404 	/* OFFSET_CACHE */
405 out_put:
406 	ufs_put_page(page);
407 out:
408 	return err;
409 out_unlock:
410 	unlock_page(page);
411 	goto out_put;
412 }
413 
414 static inline unsigned
ufs_validate_entry(struct super_block * sb,char * base,unsigned offset,unsigned mask)415 ufs_validate_entry(struct super_block *sb, char *base,
416 		   unsigned offset, unsigned mask)
417 {
418 	struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
419 	struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
420 	while ((char*)p < (char*)de)
421 		p = ufs_next_entry(sb, p);
422 	return (char *)p - base;
423 }
424 
425 
426 /*
427  * This is blatantly stolen from ext2fs
428  */
429 static int
ufs_readdir(struct file * file,struct dir_context * ctx)430 ufs_readdir(struct file *file, struct dir_context *ctx)
431 {
432 	loff_t pos = ctx->pos;
433 	struct inode *inode = file_inode(file);
434 	struct super_block *sb = inode->i_sb;
435 	unsigned int offset = pos & ~PAGE_MASK;
436 	unsigned long n = pos >> PAGE_SHIFT;
437 	unsigned long npages = dir_pages(inode);
438 	unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
439 	bool need_revalidate = !inode_eq_iversion(inode, file->f_version);
440 	unsigned flags = UFS_SB(sb)->s_flags;
441 
442 	UFSD("BEGIN\n");
443 
444 	if (pos > inode->i_size - UFS_DIR_REC_LEN(1))
445 		return 0;
446 
447 	for ( ; n < npages; n++, offset = 0) {
448 		char *kaddr, *limit;
449 		struct ufs_dir_entry *de;
450 
451 		struct page *page = ufs_get_page(inode, n);
452 
453 		if (IS_ERR(page)) {
454 			ufs_error(sb, __func__,
455 				  "bad page in #%lu",
456 				  inode->i_ino);
457 			ctx->pos += PAGE_SIZE - offset;
458 			return -EIO;
459 		}
460 		kaddr = page_address(page);
461 		if (unlikely(need_revalidate)) {
462 			if (offset) {
463 				offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
464 				ctx->pos = (n<<PAGE_SHIFT) + offset;
465 			}
466 			file->f_version = inode_query_iversion(inode);
467 			need_revalidate = false;
468 		}
469 		de = (struct ufs_dir_entry *)(kaddr+offset);
470 		limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
471 		for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
472 			if (de->d_ino) {
473 				unsigned char d_type = DT_UNKNOWN;
474 
475 				UFSD("filldir(%s,%u)\n", de->d_name,
476 				      fs32_to_cpu(sb, de->d_ino));
477 				UFSD("namlen %u\n", ufs_get_de_namlen(sb, de));
478 
479 				if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
480 					d_type = de->d_u.d_44.d_type;
481 
482 				if (!dir_emit(ctx, de->d_name,
483 					       ufs_get_de_namlen(sb, de),
484 					       fs32_to_cpu(sb, de->d_ino),
485 					       d_type)) {
486 					ufs_put_page(page);
487 					return 0;
488 				}
489 			}
490 			ctx->pos += fs16_to_cpu(sb, de->d_reclen);
491 		}
492 		ufs_put_page(page);
493 	}
494 	return 0;
495 }
496 
497 
498 /*
499  * ufs_delete_entry deletes a directory entry by merging it with the
500  * previous entry.
501  */
ufs_delete_entry(struct inode * inode,struct ufs_dir_entry * dir,struct page * page)502 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
503 		     struct page * page)
504 {
505 	struct super_block *sb = inode->i_sb;
506 	char *kaddr = page_address(page);
507 	unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
508 	unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
509 	loff_t pos;
510 	struct ufs_dir_entry *pde = NULL;
511 	struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
512 	int err;
513 
514 	UFSD("ENTER\n");
515 
516 	UFSD("ino %u, reclen %u, namlen %u, name %s\n",
517 	      fs32_to_cpu(sb, de->d_ino),
518 	      fs16_to_cpu(sb, de->d_reclen),
519 	      ufs_get_de_namlen(sb, de), de->d_name);
520 
521 	while ((char*)de < (char*)dir) {
522 		if (de->d_reclen == 0) {
523 			ufs_error(inode->i_sb, __func__,
524 				  "zero-length directory entry");
525 			err = -EIO;
526 			goto out;
527 		}
528 		pde = de;
529 		de = ufs_next_entry(sb, de);
530 	}
531 	if (pde)
532 		from = (char*)pde - (char*)page_address(page);
533 
534 	pos = page_offset(page) + from;
535 	lock_page(page);
536 	err = ufs_prepare_chunk(page, pos, to - from);
537 	BUG_ON(err);
538 	if (pde)
539 		pde->d_reclen = cpu_to_fs16(sb, to - from);
540 	dir->d_ino = 0;
541 	ufs_commit_chunk(page, pos, to - from);
542 	inode->i_mtime = inode_set_ctime_current(inode);
543 	mark_inode_dirty(inode);
544 	err = ufs_handle_dirsync(inode);
545 out:
546 	ufs_put_page(page);
547 	UFSD("EXIT\n");
548 	return err;
549 }
550 
ufs_make_empty(struct inode * inode,struct inode * dir)551 int ufs_make_empty(struct inode * inode, struct inode *dir)
552 {
553 	struct super_block * sb = dir->i_sb;
554 	struct address_space *mapping = inode->i_mapping;
555 	struct page *page = grab_cache_page(mapping, 0);
556 	const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
557 	struct ufs_dir_entry * de;
558 	char *base;
559 	int err;
560 
561 	if (!page)
562 		return -ENOMEM;
563 
564 	err = ufs_prepare_chunk(page, 0, chunk_size);
565 	if (err) {
566 		unlock_page(page);
567 		goto fail;
568 	}
569 
570 	kmap(page);
571 	base = (char*)page_address(page);
572 	memset(base, 0, PAGE_SIZE);
573 
574 	de = (struct ufs_dir_entry *) base;
575 
576 	de->d_ino = cpu_to_fs32(sb, inode->i_ino);
577 	ufs_set_de_type(sb, de, inode->i_mode);
578 	ufs_set_de_namlen(sb, de, 1);
579 	de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
580 	strcpy (de->d_name, ".");
581 	de = (struct ufs_dir_entry *)
582 		((char *)de + fs16_to_cpu(sb, de->d_reclen));
583 	de->d_ino = cpu_to_fs32(sb, dir->i_ino);
584 	ufs_set_de_type(sb, de, dir->i_mode);
585 	de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
586 	ufs_set_de_namlen(sb, de, 2);
587 	strcpy (de->d_name, "..");
588 	kunmap(page);
589 
590 	ufs_commit_chunk(page, 0, chunk_size);
591 	err = ufs_handle_dirsync(inode);
592 fail:
593 	put_page(page);
594 	return err;
595 }
596 
597 /*
598  * routine to check that the specified directory is empty (for rmdir)
599  */
ufs_empty_dir(struct inode * inode)600 int ufs_empty_dir(struct inode * inode)
601 {
602 	struct super_block *sb = inode->i_sb;
603 	struct page *page = NULL;
604 	unsigned long i, npages = dir_pages(inode);
605 
606 	for (i = 0; i < npages; i++) {
607 		char *kaddr;
608 		struct ufs_dir_entry *de;
609 		page = ufs_get_page(inode, i);
610 
611 		if (IS_ERR(page))
612 			continue;
613 
614 		kaddr = page_address(page);
615 		de = (struct ufs_dir_entry *)kaddr;
616 		kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
617 
618 		while ((char *)de <= kaddr) {
619 			if (de->d_reclen == 0) {
620 				ufs_error(inode->i_sb, __func__,
621 					"zero-length directory entry: "
622 					"kaddr=%p, de=%p\n", kaddr, de);
623 				goto not_empty;
624 			}
625 			if (de->d_ino) {
626 				u16 namelen=ufs_get_de_namlen(sb, de);
627 				/* check for . and .. */
628 				if (de->d_name[0] != '.')
629 					goto not_empty;
630 				if (namelen > 2)
631 					goto not_empty;
632 				if (namelen < 2) {
633 					if (inode->i_ino !=
634 					    fs32_to_cpu(sb, de->d_ino))
635 						goto not_empty;
636 				} else if (de->d_name[1] != '.')
637 					goto not_empty;
638 			}
639 			de = ufs_next_entry(sb, de);
640 		}
641 		ufs_put_page(page);
642 	}
643 	return 1;
644 
645 not_empty:
646 	ufs_put_page(page);
647 	return 0;
648 }
649 
650 const struct file_operations ufs_dir_operations = {
651 	.read		= generic_read_dir,
652 	.iterate_shared	= ufs_readdir,
653 	.fsync		= generic_file_fsync,
654 	.llseek		= generic_file_llseek,
655 };
656