1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/dir.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/dir.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * ext2 directory handling functions 17 * 18 * Big-endian to little-endian byte-swapping/bitmaps by 19 * David S. Miller (davem@caip.rutgers.edu), 1995 20 * 21 * All code that works with directory layout had been switched to pagecache 22 * and moved here. AV 23 */ 24 25 #include "ext2.h" 26 #include <linux/buffer_head.h> 27 #include <linux/pagemap.h> 28 #include <linux/swap.h> 29 #include <linux/iversion.h> 30 31 typedef struct ext2_dir_entry_2 ext2_dirent; 32 33 /* 34 * Tests against MAX_REC_LEN etc were put in place for 64k block 35 * sizes; if that is not possible on this arch, we can skip 36 * those tests and speed things up. 37 */ 38 static inline unsigned ext2_rec_len_from_disk(__le16 dlen) 39 { 40 unsigned len = le16_to_cpu(dlen); 41 42 #if (PAGE_SIZE >= 65536) 43 if (len == EXT2_MAX_REC_LEN) 44 return 1 << 16; 45 #endif 46 return len; 47 } 48 49 static inline __le16 ext2_rec_len_to_disk(unsigned len) 50 { 51 #if (PAGE_SIZE >= 65536) 52 if (len == (1 << 16)) 53 return cpu_to_le16(EXT2_MAX_REC_LEN); 54 else 55 BUG_ON(len > (1 << 16)); 56 #endif 57 return cpu_to_le16(len); 58 } 59 60 /* 61 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be 62 * more robust, but we have what we have 63 */ 64 static inline unsigned ext2_chunk_size(struct inode *inode) 65 { 66 return inode->i_sb->s_blocksize; 67 } 68 69 /* 70 * Return the offset into page `page_nr' of the last valid 71 * byte in that page, plus one. 72 */ 73 static unsigned 74 ext2_last_byte(struct inode *inode, unsigned long page_nr) 75 { 76 unsigned last_byte = inode->i_size; 77 78 last_byte -= page_nr << PAGE_SHIFT; 79 if (last_byte > PAGE_SIZE) 80 last_byte = PAGE_SIZE; 81 return last_byte; 82 } 83 84 static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) 85 { 86 struct address_space *mapping = page->mapping; 87 struct inode *dir = mapping->host; 88 int err = 0; 89 90 inode_inc_iversion(dir); 91 block_write_end(NULL, mapping, pos, len, len, page, NULL); 92 93 if (pos+len > dir->i_size) { 94 i_size_write(dir, pos+len); 95 mark_inode_dirty(dir); 96 } 97 98 if (IS_DIRSYNC(dir)) { 99 err = write_one_page(page); 100 if (!err) 101 err = sync_inode_metadata(dir, 1); 102 } else { 103 unlock_page(page); 104 } 105 106 return err; 107 } 108 109 static bool ext2_check_page(struct page *page, int quiet, char *kaddr) 110 { 111 struct inode *dir = page->mapping->host; 112 struct super_block *sb = dir->i_sb; 113 unsigned chunk_size = ext2_chunk_size(dir); 114 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); 115 unsigned offs, rec_len; 116 unsigned limit = PAGE_SIZE; 117 ext2_dirent *p; 118 char *error; 119 120 if ((dir->i_size >> PAGE_SHIFT) == page->index) { 121 limit = dir->i_size & ~PAGE_MASK; 122 if (limit & (chunk_size - 1)) 123 goto Ebadsize; 124 if (!limit) 125 goto out; 126 } 127 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { 128 p = (ext2_dirent *)(kaddr + offs); 129 rec_len = ext2_rec_len_from_disk(p->rec_len); 130 131 if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) 132 goto Eshort; 133 if (unlikely(rec_len & 3)) 134 goto Ealign; 135 if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) 136 goto Enamelen; 137 if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) 138 goto Espan; 139 if (unlikely(le32_to_cpu(p->inode) > max_inumber)) 140 goto Einumber; 141 } 142 if (offs != limit) 143 goto Eend; 144 out: 145 SetPageChecked(page); 146 return true; 147 148 /* Too bad, we had an error */ 149 150 Ebadsize: 151 if (!quiet) 152 ext2_error(sb, __func__, 153 "size of directory #%lu is not a multiple " 154 "of chunk size", dir->i_ino); 155 goto fail; 156 Eshort: 157 error = "rec_len is smaller than minimal"; 158 goto bad_entry; 159 Ealign: 160 error = "unaligned directory entry"; 161 goto bad_entry; 162 Enamelen: 163 error = "rec_len is too small for name_len"; 164 goto bad_entry; 165 Espan: 166 error = "directory entry across blocks"; 167 goto bad_entry; 168 Einumber: 169 error = "inode out of bounds"; 170 bad_entry: 171 if (!quiet) 172 ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " 173 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 174 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 175 (unsigned long) le32_to_cpu(p->inode), 176 rec_len, p->name_len); 177 goto fail; 178 Eend: 179 if (!quiet) { 180 p = (ext2_dirent *)(kaddr + offs); 181 ext2_error(sb, "ext2_check_page", 182 "entry in directory #%lu spans the page boundary" 183 "offset=%lu, inode=%lu", 184 dir->i_ino, (page->index<<PAGE_SHIFT)+offs, 185 (unsigned long) le32_to_cpu(p->inode)); 186 } 187 fail: 188 SetPageError(page); 189 return false; 190 } 191 192 /* 193 * Calls to ext2_get_page()/ext2_put_page() must be nested according to the 194 * rules documented in kmap_local_page()/kunmap_local(). 195 * 196 * NOTE: ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() 197 * and should be treated as a call to ext2_get_page() for nesting purposes. 198 */ 199 static struct page * ext2_get_page(struct inode *dir, unsigned long n, 200 int quiet, void **page_addr) 201 { 202 struct address_space *mapping = dir->i_mapping; 203 struct page *page = read_mapping_page(mapping, n, NULL); 204 if (!IS_ERR(page)) { 205 *page_addr = kmap_local_page(page); 206 if (unlikely(!PageChecked(page))) { 207 if (PageError(page) || !ext2_check_page(page, quiet, 208 *page_addr)) 209 goto fail; 210 } 211 } 212 return page; 213 214 fail: 215 ext2_put_page(page, *page_addr); 216 return ERR_PTR(-EIO); 217 } 218 219 /* 220 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure. 221 * 222 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller. 223 */ 224 static inline int ext2_match (int len, const char * const name, 225 struct ext2_dir_entry_2 * de) 226 { 227 if (len != de->name_len) 228 return 0; 229 if (!de->inode) 230 return 0; 231 return !memcmp(name, de->name, len); 232 } 233 234 /* 235 * p is at least 6 bytes before the end of page 236 */ 237 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) 238 { 239 return (ext2_dirent *)((char *)p + 240 ext2_rec_len_from_disk(p->rec_len)); 241 } 242 243 static inline unsigned 244 ext2_validate_entry(char *base, unsigned offset, unsigned mask) 245 { 246 ext2_dirent *de = (ext2_dirent*)(base + offset); 247 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask)); 248 while ((char*)p < (char*)de) { 249 if (p->rec_len == 0) 250 break; 251 p = ext2_next_entry(p); 252 } 253 return (char *)p - base; 254 } 255 256 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) 257 { 258 if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) 259 de->file_type = fs_umode_to_ftype(inode->i_mode); 260 else 261 de->file_type = 0; 262 } 263 264 static int 265 ext2_readdir(struct file *file, struct dir_context *ctx) 266 { 267 loff_t pos = ctx->pos; 268 struct inode *inode = file_inode(file); 269 struct super_block *sb = inode->i_sb; 270 unsigned int offset = pos & ~PAGE_MASK; 271 unsigned long n = pos >> PAGE_SHIFT; 272 unsigned long npages = dir_pages(inode); 273 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); 274 bool need_revalidate = !inode_eq_iversion(inode, file->f_version); 275 bool has_filetype; 276 277 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) 278 return 0; 279 280 has_filetype = 281 EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE); 282 283 for ( ; n < npages; n++, offset = 0) { 284 char *kaddr, *limit; 285 ext2_dirent *de; 286 struct page *page = ext2_get_page(inode, n, 0, (void **)&kaddr); 287 288 if (IS_ERR(page)) { 289 ext2_error(sb, __func__, 290 "bad page in #%lu", 291 inode->i_ino); 292 ctx->pos += PAGE_SIZE - offset; 293 return PTR_ERR(page); 294 } 295 if (unlikely(need_revalidate)) { 296 if (offset) { 297 offset = ext2_validate_entry(kaddr, offset, chunk_mask); 298 ctx->pos = (n<<PAGE_SHIFT) + offset; 299 } 300 file->f_version = inode_query_iversion(inode); 301 need_revalidate = false; 302 } 303 de = (ext2_dirent *)(kaddr+offset); 304 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); 305 for ( ;(char*)de <= limit; de = ext2_next_entry(de)) { 306 if (de->rec_len == 0) { 307 ext2_error(sb, __func__, 308 "zero-length directory entry"); 309 ext2_put_page(page, kaddr); 310 return -EIO; 311 } 312 if (de->inode) { 313 unsigned char d_type = DT_UNKNOWN; 314 315 if (has_filetype) 316 d_type = fs_ftype_to_dtype(de->file_type); 317 318 if (!dir_emit(ctx, de->name, de->name_len, 319 le32_to_cpu(de->inode), 320 d_type)) { 321 ext2_put_page(page, kaddr); 322 return 0; 323 } 324 } 325 ctx->pos += ext2_rec_len_from_disk(de->rec_len); 326 } 327 ext2_put_page(page, kaddr); 328 } 329 return 0; 330 } 331 332 /* 333 * ext2_find_entry() 334 * 335 * finds an entry in the specified directory with the wanted name. It 336 * returns the page in which the entry was found (as a parameter - res_page), 337 * and the entry itself. Page is returned mapped and unlocked. 338 * Entry is guaranteed to be valid. 339 * 340 * On Success ext2_put_page() should be called on *res_page. 341 * 342 * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to 343 * the rules documented in kmap_local_page()/kunmap_local(). 344 * 345 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and 346 * should be treated as a call to ext2_get_page() for nesting purposes. 347 */ 348 struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, 349 const struct qstr *child, struct page **res_page, 350 void **res_page_addr) 351 { 352 const char *name = child->name; 353 int namelen = child->len; 354 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 355 unsigned long start, n; 356 unsigned long npages = dir_pages(dir); 357 struct page *page = NULL; 358 struct ext2_inode_info *ei = EXT2_I(dir); 359 ext2_dirent * de; 360 void *page_addr; 361 362 if (npages == 0) 363 goto out; 364 365 /* OFFSET_CACHE */ 366 *res_page = NULL; 367 *res_page_addr = NULL; 368 369 start = ei->i_dir_start_lookup; 370 if (start >= npages) 371 start = 0; 372 n = start; 373 do { 374 char *kaddr; 375 page = ext2_get_page(dir, n, 0, &page_addr); 376 if (IS_ERR(page)) 377 return ERR_CAST(page); 378 379 kaddr = page_addr; 380 de = (ext2_dirent *) kaddr; 381 kaddr += ext2_last_byte(dir, n) - reclen; 382 while ((char *) de <= kaddr) { 383 if (de->rec_len == 0) { 384 ext2_error(dir->i_sb, __func__, 385 "zero-length directory entry"); 386 ext2_put_page(page, page_addr); 387 goto out; 388 } 389 if (ext2_match(namelen, name, de)) 390 goto found; 391 de = ext2_next_entry(de); 392 } 393 ext2_put_page(page, page_addr); 394 395 if (++n >= npages) 396 n = 0; 397 /* next page is past the blocks we've got */ 398 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { 399 ext2_error(dir->i_sb, __func__, 400 "dir %lu size %lld exceeds block count %llu", 401 dir->i_ino, dir->i_size, 402 (unsigned long long)dir->i_blocks); 403 goto out; 404 } 405 } while (n != start); 406 out: 407 return ERR_PTR(-ENOENT); 408 409 found: 410 *res_page = page; 411 *res_page_addr = page_addr; 412 ei->i_dir_start_lookup = n; 413 return de; 414 } 415 416 /** 417 * Return the '..' directory entry and the page in which the entry was found 418 * (as a parameter - p). 419 * 420 * On Success ext2_put_page() should be called on *p. 421 * 422 * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to 423 * the rules documented in kmap_local_page()/kunmap_local(). 424 * 425 * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and 426 * should be treated as a call to ext2_get_page() for nesting purposes. 427 */ 428 struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, 429 void **pa) 430 { 431 void *page_addr; 432 struct page *page = ext2_get_page(dir, 0, 0, &page_addr); 433 ext2_dirent *de = NULL; 434 435 if (!IS_ERR(page)) { 436 de = ext2_next_entry((ext2_dirent *) page_addr); 437 *p = page; 438 *pa = page_addr; 439 } 440 return de; 441 } 442 443 int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) 444 { 445 struct ext2_dir_entry_2 *de; 446 struct page *page; 447 void *page_addr; 448 449 de = ext2_find_entry(dir, child, &page, &page_addr); 450 if (IS_ERR(de)) 451 return PTR_ERR(de); 452 453 *ino = le32_to_cpu(de->inode); 454 ext2_put_page(page, page_addr); 455 return 0; 456 } 457 458 static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) 459 { 460 return __block_write_begin(page, pos, len, ext2_get_block); 461 } 462 463 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, 464 struct page *page, void *page_addr, struct inode *inode, 465 int update_times) 466 { 467 loff_t pos = page_offset(page) + 468 (char *) de - (char *) page_addr; 469 unsigned len = ext2_rec_len_from_disk(de->rec_len); 470 int err; 471 472 lock_page(page); 473 err = ext2_prepare_chunk(page, pos, len); 474 BUG_ON(err); 475 de->inode = cpu_to_le32(inode->i_ino); 476 ext2_set_de_type(de, inode); 477 err = ext2_commit_chunk(page, pos, len); 478 if (update_times) 479 dir->i_mtime = dir->i_ctime = current_time(dir); 480 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 481 mark_inode_dirty(dir); 482 } 483 484 /* 485 * Parent is locked. 486 */ 487 int ext2_add_link (struct dentry *dentry, struct inode *inode) 488 { 489 struct inode *dir = d_inode(dentry->d_parent); 490 const char *name = dentry->d_name.name; 491 int namelen = dentry->d_name.len; 492 unsigned chunk_size = ext2_chunk_size(dir); 493 unsigned reclen = EXT2_DIR_REC_LEN(namelen); 494 unsigned short rec_len, name_len; 495 struct page *page = NULL; 496 void *page_addr = NULL; 497 ext2_dirent * de; 498 unsigned long npages = dir_pages(dir); 499 unsigned long n; 500 loff_t pos; 501 int err; 502 503 /* 504 * We take care of directory expansion in the same loop. 505 * This code plays outside i_size, so it locks the page 506 * to protect that region. 507 */ 508 for (n = 0; n <= npages; n++) { 509 char *kaddr; 510 char *dir_end; 511 512 page = ext2_get_page(dir, n, 0, &page_addr); 513 err = PTR_ERR(page); 514 if (IS_ERR(page)) 515 goto out; 516 lock_page(page); 517 kaddr = page_addr; 518 dir_end = kaddr + ext2_last_byte(dir, n); 519 de = (ext2_dirent *)kaddr; 520 kaddr += PAGE_SIZE - reclen; 521 while ((char *)de <= kaddr) { 522 if ((char *)de == dir_end) { 523 /* We hit i_size */ 524 name_len = 0; 525 rec_len = chunk_size; 526 de->rec_len = ext2_rec_len_to_disk(chunk_size); 527 de->inode = 0; 528 goto got_it; 529 } 530 if (de->rec_len == 0) { 531 ext2_error(dir->i_sb, __func__, 532 "zero-length directory entry"); 533 err = -EIO; 534 goto out_unlock; 535 } 536 err = -EEXIST; 537 if (ext2_match (namelen, name, de)) 538 goto out_unlock; 539 name_len = EXT2_DIR_REC_LEN(de->name_len); 540 rec_len = ext2_rec_len_from_disk(de->rec_len); 541 if (!de->inode && rec_len >= reclen) 542 goto got_it; 543 if (rec_len >= name_len + reclen) 544 goto got_it; 545 de = (ext2_dirent *) ((char *) de + rec_len); 546 } 547 unlock_page(page); 548 ext2_put_page(page, page_addr); 549 } 550 BUG(); 551 return -EINVAL; 552 553 got_it: 554 pos = page_offset(page) + 555 (char *)de - (char *)page_addr; 556 err = ext2_prepare_chunk(page, pos, rec_len); 557 if (err) 558 goto out_unlock; 559 if (de->inode) { 560 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); 561 de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len); 562 de->rec_len = ext2_rec_len_to_disk(name_len); 563 de = de1; 564 } 565 de->name_len = namelen; 566 memcpy(de->name, name, namelen); 567 de->inode = cpu_to_le32(inode->i_ino); 568 ext2_set_de_type (de, inode); 569 err = ext2_commit_chunk(page, pos, rec_len); 570 dir->i_mtime = dir->i_ctime = current_time(dir); 571 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; 572 mark_inode_dirty(dir); 573 /* OFFSET_CACHE */ 574 out_put: 575 ext2_put_page(page, page_addr); 576 out: 577 return err; 578 out_unlock: 579 unlock_page(page); 580 goto out_put; 581 } 582 583 /* 584 * ext2_delete_entry deletes a directory entry by merging it with the 585 * previous entry. Page is up-to-date. 586 */ 587 int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page, 588 char *kaddr) 589 { 590 struct inode *inode = page->mapping->host; 591 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); 592 unsigned to = ((char *)dir - kaddr) + 593 ext2_rec_len_from_disk(dir->rec_len); 594 loff_t pos; 595 ext2_dirent * pde = NULL; 596 ext2_dirent * de = (ext2_dirent *) (kaddr + from); 597 int err; 598 599 while ((char*)de < (char*)dir) { 600 if (de->rec_len == 0) { 601 ext2_error(inode->i_sb, __func__, 602 "zero-length directory entry"); 603 err = -EIO; 604 goto out; 605 } 606 pde = de; 607 de = ext2_next_entry(de); 608 } 609 if (pde) 610 from = (char *)pde - kaddr; 611 pos = page_offset(page) + from; 612 lock_page(page); 613 err = ext2_prepare_chunk(page, pos, to - from); 614 BUG_ON(err); 615 if (pde) 616 pde->rec_len = ext2_rec_len_to_disk(to - from); 617 dir->inode = 0; 618 err = ext2_commit_chunk(page, pos, to - from); 619 inode->i_ctime = inode->i_mtime = current_time(inode); 620 EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; 621 mark_inode_dirty(inode); 622 out: 623 return err; 624 } 625 626 /* 627 * Set the first fragment of directory. 628 */ 629 int ext2_make_empty(struct inode *inode, struct inode *parent) 630 { 631 struct page *page = grab_cache_page(inode->i_mapping, 0); 632 unsigned chunk_size = ext2_chunk_size(inode); 633 struct ext2_dir_entry_2 * de; 634 int err; 635 void *kaddr; 636 637 if (!page) 638 return -ENOMEM; 639 640 err = ext2_prepare_chunk(page, 0, chunk_size); 641 if (err) { 642 unlock_page(page); 643 goto fail; 644 } 645 kaddr = kmap_atomic(page); 646 memset(kaddr, 0, chunk_size); 647 de = (struct ext2_dir_entry_2 *)kaddr; 648 de->name_len = 1; 649 de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1)); 650 memcpy (de->name, ".\0\0", 4); 651 de->inode = cpu_to_le32(inode->i_ino); 652 ext2_set_de_type (de, inode); 653 654 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); 655 de->name_len = 2; 656 de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1)); 657 de->inode = cpu_to_le32(parent->i_ino); 658 memcpy (de->name, "..\0", 4); 659 ext2_set_de_type (de, inode); 660 kunmap_atomic(kaddr); 661 err = ext2_commit_chunk(page, 0, chunk_size); 662 fail: 663 put_page(page); 664 return err; 665 } 666 667 /* 668 * routine to check that the specified directory is empty (for rmdir) 669 */ 670 int ext2_empty_dir (struct inode * inode) 671 { 672 void *page_addr = NULL; 673 struct page *page = NULL; 674 unsigned long i, npages = dir_pages(inode); 675 676 for (i = 0; i < npages; i++) { 677 char *kaddr; 678 ext2_dirent * de; 679 page = ext2_get_page(inode, i, 0, &page_addr); 680 681 if (IS_ERR(page)) 682 goto not_empty; 683 684 kaddr = page_addr; 685 de = (ext2_dirent *)kaddr; 686 kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1); 687 688 while ((char *)de <= kaddr) { 689 if (de->rec_len == 0) { 690 ext2_error(inode->i_sb, __func__, 691 "zero-length directory entry"); 692 printk("kaddr=%p, de=%p\n", kaddr, de); 693 goto not_empty; 694 } 695 if (de->inode != 0) { 696 /* check for . and .. */ 697 if (de->name[0] != '.') 698 goto not_empty; 699 if (de->name_len > 2) 700 goto not_empty; 701 if (de->name_len < 2) { 702 if (de->inode != 703 cpu_to_le32(inode->i_ino)) 704 goto not_empty; 705 } else if (de->name[1] != '.') 706 goto not_empty; 707 } 708 de = ext2_next_entry(de); 709 } 710 ext2_put_page(page, page_addr); 711 } 712 return 1; 713 714 not_empty: 715 ext2_put_page(page, page_addr); 716 return 0; 717 } 718 719 const struct file_operations ext2_dir_operations = { 720 .llseek = generic_file_llseek, 721 .read = generic_read_dir, 722 .iterate_shared = ext2_readdir, 723 .unlocked_ioctl = ext2_ioctl, 724 #ifdef CONFIG_COMPAT 725 .compat_ioctl = ext2_compat_ioctl, 726 #endif 727 .fsync = ext2_fsync, 728 }; 729