1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS directory entry operations 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Modified for NILFS by Amagai Yoshiji. 8 */ 9 /* 10 * linux/fs/ext2/dir.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/dir.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * ext2 directory handling functions 24 * 25 * Big-endian to little-endian byte-swapping/bitmaps by 26 * David S. Miller (davem@caip.rutgers.edu), 1995 27 * 28 * All code that works with directory layout had been switched to pagecache 29 * and moved here. AV 30 */ 31 32 #include <linux/pagemap.h> 33 #include "nilfs.h" 34 #include "page.h" 35 36 static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) 37 { 38 unsigned int len = le16_to_cpu(dlen); 39 40 #if (PAGE_SIZE >= 65536) 41 if (len == NILFS_MAX_REC_LEN) 42 return 1 << 16; 43 #endif 44 return len; 45 } 46 47 static inline __le16 nilfs_rec_len_to_disk(unsigned int len) 48 { 49 #if (PAGE_SIZE >= 65536) 50 if (len == (1 << 16)) 51 return cpu_to_le16(NILFS_MAX_REC_LEN); 52 53 BUG_ON(len > (1 << 16)); 54 #endif 55 return cpu_to_le16(len); 56 } 57 58 /* 59 * nilfs uses block-sized chunks. Arguably, sector-sized ones would be 60 * more robust, but we have what we have 61 */ 62 static inline unsigned int nilfs_chunk_size(struct inode *inode) 63 { 64 return inode->i_sb->s_blocksize; 65 } 66 67 static inline void nilfs_put_page(struct page *page) 68 { 69 kunmap(page); 70 put_page(page); 71 } 72 73 /* 74 * Return the offset into page `page_nr' of the last valid 75 * byte in that page, plus one. 76 */ 77 static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) 78 { 79 unsigned int last_byte = inode->i_size; 80 81 last_byte -= page_nr << PAGE_SHIFT; 82 if (last_byte > PAGE_SIZE) 83 last_byte = PAGE_SIZE; 84 return last_byte; 85 } 86 87 static int nilfs_prepare_chunk(struct page *page, unsigned int from, 88 unsigned int to) 89 { 90 loff_t pos = page_offset(page) + from; 91 92 return __block_write_begin(page, pos, to - from, nilfs_get_block); 93 } 94 95 static void nilfs_commit_chunk(struct page *page, 96 struct address_space *mapping, 97 unsigned int from, unsigned int to) 98 { 99 struct inode *dir = mapping->host; 100 loff_t pos = page_offset(page) + from; 101 unsigned int len = to - from; 102 unsigned int nr_dirty, copied; 103 int err; 104 105 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); 106 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); 107 if (pos + copied > dir->i_size) 108 i_size_write(dir, pos + copied); 109 if (IS_DIRSYNC(dir)) 110 nilfs_set_transaction_flag(NILFS_TI_SYNC); 111 err = nilfs_set_file_dirty(dir, nr_dirty); 112 WARN_ON(err); /* do not happen */ 113 unlock_page(page); 114 } 115 116 static bool nilfs_check_page(struct page *page) 117 { 118 struct inode *dir = page->mapping->host; 119 struct super_block *sb = dir->i_sb; 120 unsigned int chunk_size = nilfs_chunk_size(dir); 121 char *kaddr = page_address(page); 122 unsigned int offs, rec_len; 123 unsigned int limit = PAGE_SIZE; 124 struct nilfs_dir_entry *p; 125 char *error; 126 127 if ((dir->i_size >> PAGE_SHIFT) == page->index) { 128 limit = dir->i_size & ~PAGE_MASK; 129 if (limit & (chunk_size - 1)) 130 goto Ebadsize; 131 if (!limit) 132 goto out; 133 } 134 for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { 135 p = (struct nilfs_dir_entry *)(kaddr + offs); 136 rec_len = nilfs_rec_len_from_disk(p->rec_len); 137 138 if (rec_len < NILFS_DIR_REC_LEN(1)) 139 goto Eshort; 140 if (rec_len & 3) 141 goto Ealign; 142 if (rec_len < NILFS_DIR_REC_LEN(p->name_len)) 143 goto Enamelen; 144 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) 145 goto Espan; 146 if (unlikely(p->inode && 147 NILFS_PRIVATE_INODE(le64_to_cpu(p->inode)))) 148 goto Einumber; 149 } 150 if (offs != limit) 151 goto Eend; 152 out: 153 SetPageChecked(page); 154 return true; 155 156 /* Too bad, we had an error */ 157 158 Ebadsize: 159 nilfs_error(sb, 160 "size of directory #%lu is not a multiple of chunk size", 161 dir->i_ino); 162 goto fail; 163 Eshort: 164 error = "rec_len is smaller than minimal"; 165 goto bad_entry; 166 Ealign: 167 error = "unaligned directory entry"; 168 goto bad_entry; 169 Enamelen: 170 error = "rec_len is too small for name_len"; 171 goto bad_entry; 172 Espan: 173 error = "directory entry across blocks"; 174 goto bad_entry; 175 Einumber: 176 error = "disallowed inode number"; 177 bad_entry: 178 nilfs_error(sb, 179 "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 180 dir->i_ino, error, (page->index << PAGE_SHIFT) + offs, 181 (unsigned long)le64_to_cpu(p->inode), 182 rec_len, p->name_len); 183 goto fail; 184 Eend: 185 p = (struct nilfs_dir_entry *)(kaddr + offs); 186 nilfs_error(sb, 187 "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu", 188 dir->i_ino, (page->index << PAGE_SHIFT) + offs, 189 (unsigned long)le64_to_cpu(p->inode)); 190 fail: 191 SetPageError(page); 192 return false; 193 } 194 195 static void *nilfs_get_page(struct inode *dir, unsigned long n, 196 struct page **pagep) 197 { 198 struct address_space *mapping = dir->i_mapping; 199 struct page *page = read_mapping_page(mapping, n, NULL); 200 void *kaddr; 201 202 if (IS_ERR(page)) 203 return page; 204 205 kaddr = kmap(page); 206 if (unlikely(!PageChecked(page))) { 207 if (!nilfs_check_page(page)) 208 goto fail; 209 } 210 211 *pagep = page; 212 return kaddr; 213 214 fail: 215 nilfs_put_page(page); 216 return ERR_PTR(-EIO); 217 } 218 219 /* 220 * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure. 221 * 222 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. 223 */ 224 static int 225 nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) 226 { 227 if (len != de->name_len) 228 return 0; 229 if (!de->inode) 230 return 0; 231 return !memcmp(name, de->name, len); 232 } 233 234 /* 235 * p is at least 6 bytes before the end of page 236 */ 237 static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p) 238 { 239 return (struct nilfs_dir_entry *)((char *)p + 240 nilfs_rec_len_from_disk(p->rec_len)); 241 } 242 243 static unsigned char 244 nilfs_filetype_table[NILFS_FT_MAX] = { 245 [NILFS_FT_UNKNOWN] = DT_UNKNOWN, 246 [NILFS_FT_REG_FILE] = DT_REG, 247 [NILFS_FT_DIR] = DT_DIR, 248 [NILFS_FT_CHRDEV] = DT_CHR, 249 [NILFS_FT_BLKDEV] = DT_BLK, 250 [NILFS_FT_FIFO] = DT_FIFO, 251 [NILFS_FT_SOCK] = DT_SOCK, 252 [NILFS_FT_SYMLINK] = DT_LNK, 253 }; 254 255 #define S_SHIFT 12 256 static unsigned char 257 nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = { 258 [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, 259 [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, 260 [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, 261 [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV, 262 [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO, 263 [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK, 264 [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK, 265 }; 266 267 static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) 268 { 269 umode_t mode = inode->i_mode; 270 271 de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; 272 } 273 274 static int nilfs_readdir(struct file *file, struct dir_context *ctx) 275 { 276 loff_t pos = ctx->pos; 277 struct inode *inode = file_inode(file); 278 struct super_block *sb = inode->i_sb; 279 unsigned int offset = pos & ~PAGE_MASK; 280 unsigned long n = pos >> PAGE_SHIFT; 281 unsigned long npages = dir_pages(inode); 282 283 if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) 284 return 0; 285 286 for ( ; n < npages; n++, offset = 0) { 287 char *kaddr, *limit; 288 struct nilfs_dir_entry *de; 289 struct page *page; 290 291 kaddr = nilfs_get_page(inode, n, &page); 292 if (IS_ERR(kaddr)) { 293 nilfs_error(sb, "bad page in #%lu", inode->i_ino); 294 ctx->pos += PAGE_SIZE - offset; 295 return -EIO; 296 } 297 de = (struct nilfs_dir_entry *)(kaddr + offset); 298 limit = kaddr + nilfs_last_byte(inode, n) - 299 NILFS_DIR_REC_LEN(1); 300 for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) { 301 if (de->rec_len == 0) { 302 nilfs_error(sb, "zero-length directory entry"); 303 nilfs_put_page(page); 304 return -EIO; 305 } 306 if (de->inode) { 307 unsigned char t; 308 309 if (de->file_type < NILFS_FT_MAX) 310 t = nilfs_filetype_table[de->file_type]; 311 else 312 t = DT_UNKNOWN; 313 314 if (!dir_emit(ctx, de->name, de->name_len, 315 le64_to_cpu(de->inode), t)) { 316 nilfs_put_page(page); 317 return 0; 318 } 319 } 320 ctx->pos += nilfs_rec_len_from_disk(de->rec_len); 321 } 322 nilfs_put_page(page); 323 } 324 return 0; 325 } 326 327 /* 328 * nilfs_find_entry() 329 * 330 * finds an entry in the specified directory with the wanted name. It 331 * returns the page in which the entry was found, and the entry itself 332 * (as a parameter - res_dir). Page is returned mapped and unlocked. 333 * Entry is guaranteed to be valid. 334 * 335 * On failure, returns an error pointer and the caller should ignore res_page. 336 */ 337 struct nilfs_dir_entry * 338 nilfs_find_entry(struct inode *dir, const struct qstr *qstr, 339 struct page **res_page) 340 { 341 const unsigned char *name = qstr->name; 342 int namelen = qstr->len; 343 unsigned int reclen = NILFS_DIR_REC_LEN(namelen); 344 unsigned long start, n; 345 unsigned long npages = dir_pages(dir); 346 struct page *page = NULL; 347 struct nilfs_inode_info *ei = NILFS_I(dir); 348 struct nilfs_dir_entry *de; 349 350 if (npages == 0) 351 goto out; 352 353 /* OFFSET_CACHE */ 354 *res_page = NULL; 355 356 start = ei->i_dir_start_lookup; 357 if (start >= npages) 358 start = 0; 359 n = start; 360 do { 361 char *kaddr = nilfs_get_page(dir, n, &page); 362 363 if (IS_ERR(kaddr)) 364 return ERR_CAST(kaddr); 365 366 de = (struct nilfs_dir_entry *)kaddr; 367 kaddr += nilfs_last_byte(dir, n) - reclen; 368 while ((char *)de <= kaddr) { 369 if (de->rec_len == 0) { 370 nilfs_error(dir->i_sb, 371 "zero-length directory entry"); 372 nilfs_put_page(page); 373 goto out; 374 } 375 if (nilfs_match(namelen, name, de)) 376 goto found; 377 de = nilfs_next_entry(de); 378 } 379 nilfs_put_page(page); 380 381 if (++n >= npages) 382 n = 0; 383 /* next page is past the blocks we've got */ 384 if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { 385 nilfs_error(dir->i_sb, 386 "dir %lu size %lld exceeds block count %llu", 387 dir->i_ino, dir->i_size, 388 (unsigned long long)dir->i_blocks); 389 goto out; 390 } 391 } while (n != start); 392 out: 393 return ERR_PTR(-ENOENT); 394 395 found: 396 *res_page = page; 397 ei->i_dir_start_lookup = n; 398 return de; 399 } 400 401 struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) 402 { 403 struct page *page; 404 struct nilfs_dir_entry *de, *next_de; 405 size_t limit; 406 char *msg; 407 408 de = nilfs_get_page(dir, 0, &page); 409 if (IS_ERR(de)) 410 return NULL; 411 412 limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */ 413 if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino || 414 !nilfs_match(1, ".", de))) { 415 msg = "missing '.'"; 416 goto fail; 417 } 418 419 next_de = nilfs_next_entry(de); 420 /* 421 * If "next_de" has not reached the end of the chunk, there is 422 * at least one more record. Check whether it matches "..". 423 */ 424 if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) || 425 !nilfs_match(2, "..", next_de))) { 426 msg = "missing '..'"; 427 goto fail; 428 } 429 *p = page; 430 return next_de; 431 432 fail: 433 nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg); 434 nilfs_put_page(page); 435 return NULL; 436 } 437 438 int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino) 439 { 440 struct nilfs_dir_entry *de; 441 struct page *page; 442 443 de = nilfs_find_entry(dir, qstr, &page); 444 if (IS_ERR(de)) 445 return PTR_ERR(de); 446 447 *ino = le64_to_cpu(de->inode); 448 kunmap(page); 449 put_page(page); 450 return 0; 451 } 452 453 /* Releases the page */ 454 void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, 455 struct page *page, struct inode *inode) 456 { 457 unsigned int from = (char *)de - (char *)page_address(page); 458 unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len); 459 struct address_space *mapping = page->mapping; 460 int err; 461 462 lock_page(page); 463 err = nilfs_prepare_chunk(page, from, to); 464 BUG_ON(err); 465 de->inode = cpu_to_le64(inode->i_ino); 466 nilfs_set_de_type(de, inode); 467 nilfs_commit_chunk(page, mapping, from, to); 468 nilfs_put_page(page); 469 dir->i_mtime = inode_set_ctime_current(dir); 470 } 471 472 /* 473 * Parent is locked. 474 */ 475 int nilfs_add_link(struct dentry *dentry, struct inode *inode) 476 { 477 struct inode *dir = d_inode(dentry->d_parent); 478 const unsigned char *name = dentry->d_name.name; 479 int namelen = dentry->d_name.len; 480 unsigned int chunk_size = nilfs_chunk_size(dir); 481 unsigned int reclen = NILFS_DIR_REC_LEN(namelen); 482 unsigned short rec_len, name_len; 483 struct page *page = NULL; 484 struct nilfs_dir_entry *de; 485 unsigned long npages = dir_pages(dir); 486 unsigned long n; 487 char *kaddr; 488 unsigned int from, to; 489 int err; 490 491 /* 492 * We take care of directory expansion in the same loop. 493 * This code plays outside i_size, so it locks the page 494 * to protect that region. 495 */ 496 for (n = 0; n <= npages; n++) { 497 char *dir_end; 498 499 kaddr = nilfs_get_page(dir, n, &page); 500 err = PTR_ERR(kaddr); 501 if (IS_ERR(kaddr)) 502 goto out; 503 lock_page(page); 504 dir_end = kaddr + nilfs_last_byte(dir, n); 505 de = (struct nilfs_dir_entry *)kaddr; 506 kaddr += PAGE_SIZE - reclen; 507 while ((char *)de <= kaddr) { 508 if ((char *)de == dir_end) { 509 /* We hit i_size */ 510 name_len = 0; 511 rec_len = chunk_size; 512 de->rec_len = nilfs_rec_len_to_disk(chunk_size); 513 de->inode = 0; 514 goto got_it; 515 } 516 if (de->rec_len == 0) { 517 nilfs_error(dir->i_sb, 518 "zero-length directory entry"); 519 err = -EIO; 520 goto out_unlock; 521 } 522 err = -EEXIST; 523 if (nilfs_match(namelen, name, de)) 524 goto out_unlock; 525 name_len = NILFS_DIR_REC_LEN(de->name_len); 526 rec_len = nilfs_rec_len_from_disk(de->rec_len); 527 if (!de->inode && rec_len >= reclen) 528 goto got_it; 529 if (rec_len >= name_len + reclen) 530 goto got_it; 531 de = (struct nilfs_dir_entry *)((char *)de + rec_len); 532 } 533 unlock_page(page); 534 nilfs_put_page(page); 535 } 536 BUG(); 537 return -EINVAL; 538 539 got_it: 540 from = (char *)de - (char *)page_address(page); 541 to = from + rec_len; 542 err = nilfs_prepare_chunk(page, from, to); 543 if (err) 544 goto out_unlock; 545 if (de->inode) { 546 struct nilfs_dir_entry *de1; 547 548 de1 = (struct nilfs_dir_entry *)((char *)de + name_len); 549 de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len); 550 de->rec_len = nilfs_rec_len_to_disk(name_len); 551 de = de1; 552 } 553 de->name_len = namelen; 554 memcpy(de->name, name, namelen); 555 de->inode = cpu_to_le64(inode->i_ino); 556 nilfs_set_de_type(de, inode); 557 nilfs_commit_chunk(page, page->mapping, from, to); 558 dir->i_mtime = inode_set_ctime_current(dir); 559 nilfs_mark_inode_dirty(dir); 560 /* OFFSET_CACHE */ 561 out_put: 562 nilfs_put_page(page); 563 out: 564 return err; 565 out_unlock: 566 unlock_page(page); 567 goto out_put; 568 } 569 570 /* 571 * nilfs_delete_entry deletes a directory entry by merging it with the 572 * previous entry. Page is up-to-date. Releases the page. 573 */ 574 int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) 575 { 576 struct address_space *mapping = page->mapping; 577 struct inode *inode = mapping->host; 578 char *kaddr = page_address(page); 579 unsigned int from, to; 580 struct nilfs_dir_entry *de, *pde = NULL; 581 int err; 582 583 from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); 584 to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len); 585 de = (struct nilfs_dir_entry *)(kaddr + from); 586 587 while ((char *)de < (char *)dir) { 588 if (de->rec_len == 0) { 589 nilfs_error(inode->i_sb, 590 "zero-length directory entry"); 591 err = -EIO; 592 goto out; 593 } 594 pde = de; 595 de = nilfs_next_entry(de); 596 } 597 if (pde) 598 from = (char *)pde - (char *)page_address(page); 599 lock_page(page); 600 err = nilfs_prepare_chunk(page, from, to); 601 BUG_ON(err); 602 if (pde) 603 pde->rec_len = nilfs_rec_len_to_disk(to - from); 604 dir->inode = 0; 605 nilfs_commit_chunk(page, mapping, from, to); 606 inode->i_mtime = inode_set_ctime_current(inode); 607 out: 608 nilfs_put_page(page); 609 return err; 610 } 611 612 /* 613 * Set the first fragment of directory. 614 */ 615 int nilfs_make_empty(struct inode *inode, struct inode *parent) 616 { 617 struct address_space *mapping = inode->i_mapping; 618 struct page *page = grab_cache_page(mapping, 0); 619 unsigned int chunk_size = nilfs_chunk_size(inode); 620 struct nilfs_dir_entry *de; 621 int err; 622 void *kaddr; 623 624 if (!page) 625 return -ENOMEM; 626 627 err = nilfs_prepare_chunk(page, 0, chunk_size); 628 if (unlikely(err)) { 629 unlock_page(page); 630 goto fail; 631 } 632 kaddr = kmap_atomic(page); 633 memset(kaddr, 0, chunk_size); 634 de = (struct nilfs_dir_entry *)kaddr; 635 de->name_len = 1; 636 de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1)); 637 memcpy(de->name, ".\0\0", 4); 638 de->inode = cpu_to_le64(inode->i_ino); 639 nilfs_set_de_type(de, inode); 640 641 de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1)); 642 de->name_len = 2; 643 de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1)); 644 de->inode = cpu_to_le64(parent->i_ino); 645 memcpy(de->name, "..\0", 4); 646 nilfs_set_de_type(de, inode); 647 kunmap_atomic(kaddr); 648 nilfs_commit_chunk(page, mapping, 0, chunk_size); 649 fail: 650 put_page(page); 651 return err; 652 } 653 654 /* 655 * routine to check that the specified directory is empty (for rmdir) 656 */ 657 int nilfs_empty_dir(struct inode *inode) 658 { 659 struct page *page = NULL; 660 unsigned long i, npages = dir_pages(inode); 661 662 for (i = 0; i < npages; i++) { 663 char *kaddr; 664 struct nilfs_dir_entry *de; 665 666 kaddr = nilfs_get_page(inode, i, &page); 667 if (IS_ERR(kaddr)) 668 return 0; 669 670 de = (struct nilfs_dir_entry *)kaddr; 671 kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1); 672 673 while ((char *)de <= kaddr) { 674 if (de->rec_len == 0) { 675 nilfs_error(inode->i_sb, 676 "zero-length directory entry (kaddr=%p, de=%p)", 677 kaddr, de); 678 goto not_empty; 679 } 680 if (de->inode != 0) { 681 /* check for . and .. */ 682 if (de->name[0] != '.') 683 goto not_empty; 684 if (de->name_len > 2) 685 goto not_empty; 686 if (de->name_len < 2) { 687 if (de->inode != 688 cpu_to_le64(inode->i_ino)) 689 goto not_empty; 690 } else if (de->name[1] != '.') 691 goto not_empty; 692 } 693 de = nilfs_next_entry(de); 694 } 695 nilfs_put_page(page); 696 } 697 return 1; 698 699 not_empty: 700 nilfs_put_page(page); 701 return 0; 702 } 703 704 const struct file_operations nilfs_dir_operations = { 705 .llseek = generic_file_llseek, 706 .read = generic_read_dir, 707 .iterate_shared = nilfs_readdir, 708 .unlocked_ioctl = nilfs_ioctl, 709 #ifdef CONFIG_COMPAT 710 .compat_ioctl = nilfs_compat_ioctl, 711 #endif /* CONFIG_COMPAT */ 712 .fsync = nilfs_sync_file, 713 714 }; 715