1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ufs/ufs_dir.c 4 * 5 * Copyright (C) 1996 6 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) 7 * Laboratory for Computer Science Research Computing Facility 8 * Rutgers, The State University of New Jersey 9 * 10 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406 11 * 12 * 4.4BSD (FreeBSD) support added on February 1st 1998 by 13 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based 14 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. 15 * 16 * Migration to usage of "page cache" on May 2006 by 17 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base. 18 */ 19 20 #include <linux/time.h> 21 #include <linux/fs.h> 22 #include <linux/swap.h> 23 #include <linux/iversion.h> 24 25 #include "ufs_fs.h" 26 #include "ufs.h" 27 #include "swab.h" 28 #include "util.h" 29 30 /* 31 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. 32 * 33 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. 34 */ 35 static inline int ufs_match(struct super_block *sb, int len, 36 const unsigned char *name, struct ufs_dir_entry *de) 37 { 38 if (len != ufs_get_de_namlen(sb, de)) 39 return 0; 40 if (!de->d_ino) 41 return 0; 42 return !memcmp(name, de->d_name, len); 43 } 44 45 static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) 46 { 47 struct address_space *mapping = page->mapping; 48 struct inode *dir = mapping->host; 49 int err = 0; 50 51 inode_inc_iversion(dir); 52 block_write_end(NULL, mapping, pos, len, len, page, NULL); 53 if (pos+len > dir->i_size) { 54 i_size_write(dir, pos+len); 55 mark_inode_dirty(dir); 56 } 57 if (IS_DIRSYNC(dir)) 58 err = write_one_page(page); 59 else 60 unlock_page(page); 61 return err; 62 } 63 64 static inline void ufs_put_page(struct page *page) 65 { 66 kunmap(page); 67 put_page(page); 68 } 69 70 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) 71 { 72 ino_t res = 0; 73 struct ufs_dir_entry *de; 74 struct page *page; 75 76 de = ufs_find_entry(dir, qstr, &page); 77 if (de) { 78 res = fs32_to_cpu(dir->i_sb, de->d_ino); 79 ufs_put_page(page); 80 } 81 return res; 82 } 83 84 85 /* Releases the page */ 86 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 87 struct page *page, struct inode *inode, 88 bool update_times) 89 { 90 loff_t pos = page_offset(page) + 91 (char *) de - (char *) page_address(page); 92 unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); 93 int err; 94 95 lock_page(page); 96 err = ufs_prepare_chunk(page, pos, len); 97 BUG_ON(err); 98 99 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 100 ufs_set_de_type(dir->i_sb, de, inode->i_mode); 101 102 err = ufs_commit_chunk(page, pos, len); 103 ufs_put_page(page); 104 if (update_times) 105 dir->i_mtime = dir->i_ctime = current_time(dir); 106 mark_inode_dirty(dir); 107 } 108 109 110 static bool ufs_check_page(struct page *page) 111 { 112 struct inode *dir = page->mapping->host; 113 struct super_block *sb = dir->i_sb; 114 char *kaddr = page_address(page); 115 unsigned offs, rec_len; 116 unsigned limit = PAGE_SIZE; 117 const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; 118 struct ufs_dir_entry *p; 119 char *error; 120 121 if ((dir->i_size >> PAGE_SHIFT) == page->index) { 122 limit = dir->i_size & ~PAGE_MASK; 123 if (limit & chunk_mask) 124 goto Ebadsize; 125 if (!limit) 126 goto out; 127 } 128 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { 129 p = (struct ufs_dir_entry *)(kaddr + offs); 130 rec_len = fs16_to_cpu(sb, p->d_reclen); 131 132 if (rec_len < UFS_DIR_REC_LEN(1)) 133 goto Eshort; 134 if (rec_len & 3) 135 goto Ealign; 136 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) 137 goto Enamelen; 138 if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) 139 goto Espan; 140 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * 141 UFS_SB(sb)->s_uspi->s_ncg)) 142 goto Einumber; 143 } 144 if (offs != limit) 145 goto Eend; 146 out: 147 SetPageChecked(page); 148 return true; 149 150 /* Too bad, we had an error */ 151 152 Ebadsize: 153 ufs_error(sb, "ufs_check_page", 154 "size of directory #%lu is not a multiple of chunk size", 155 dir->i_ino 156 ); 157 goto fail; 158 Eshort: 159 error = "rec_len is smaller than minimal"; 160 goto bad_entry; 161 Ealign: 162 error = "unaligned directory entry"; 163 goto bad_entry; 164 Enamelen: 165 error = "rec_len is too small for name_len"; 166 goto bad_entry; 167 Espan: 168 error = "directory entry across blocks"; 169 goto bad_entry; 170 Einumber: 171 error = "inode out of bounds"; 172 bad_entry: 173 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " 174 "offset=%lu, rec_len=%d, name_len=%d", 175 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 176 rec_len, ufs_get_de_namlen(sb, p)); 177 goto fail; 178 Eend: 179 p = (struct ufs_dir_entry *)(kaddr + offs); 180 ufs_error(sb, __func__, 181 "entry in directory #%lu spans the page boundary" 182 "offset=%lu", 183 dir->i_ino, (page->index<<PAGE_SHIFT)+offs); 184 fail: 185 SetPageError(page); 186 return false; 187 } 188 189 static struct page *ufs_get_page(struct inode *dir, unsigned long n) 190 { 191 struct address_space *mapping = dir->i_mapping; 192 struct page *page = read_mapping_page(mapping, n, NULL); 193 if (!IS_ERR(page)) { 194 kmap(page); 195 if (unlikely(!PageChecked(page))) { 196 if (PageError(page) || !ufs_check_page(page)) 197 goto fail; 198 } 199 } 200 return page; 201 202 fail: 203 ufs_put_page(page); 204 return ERR_PTR(-EIO); 205 } 206 207 /* 208 * Return the offset into page `page_nr' of the last valid 209 * byte in that page, plus one. 210 */ 211 static unsigned 212 ufs_last_byte(struct inode *inode, unsigned long page_nr) 213 { 214 unsigned last_byte = inode->i_size; 215 216 last_byte -= page_nr << PAGE_SHIFT; 217 if (last_byte > PAGE_SIZE) 218 last_byte = PAGE_SIZE; 219 return last_byte; 220 } 221 222 static inline struct ufs_dir_entry * 223 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p) 224 { 225 return (struct ufs_dir_entry *)((char *)p + 226 fs16_to_cpu(sb, p->d_reclen)); 227 } 228 229 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) 230 { 231 struct page *page = ufs_get_page(dir, 0); 232 struct ufs_dir_entry *de = NULL; 233 234 if (!IS_ERR(page)) { 235 de = ufs_next_entry(dir->i_sb, 236 (struct ufs_dir_entry *)page_address(page)); 237 *p = page; 238 } 239 return de; 240 } 241 242 /* 243 * ufs_find_entry() 244 * 245 * finds an entry in the specified directory with the wanted name. It 246 * returns the page in which the entry was found, and the entry itself 247 * (as a parameter - res_dir). Page is returned mapped and unlocked. 248 * Entry is guaranteed to be valid. 249 */ 250 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, 251 struct page **res_page) 252 { 253 struct super_block *sb = dir->i_sb; 254 const unsigned char *name = qstr->name; 255 int namelen = qstr->len; 256 unsigned reclen = UFS_DIR_REC_LEN(namelen); 257 unsigned long start, n; 258 unsigned long npages = dir_pages(dir); 259 struct page *page = NULL; 260 struct ufs_inode_info *ui = UFS_I(dir); 261 struct ufs_dir_entry *de; 262 263 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen); 264 265 if (npages == 0 || namelen > UFS_MAXNAMLEN) 266 goto out; 267 268 /* OFFSET_CACHE */ 269 *res_page = NULL; 270 271 start = ui->i_dir_start_lookup; 272 273 if (start >= npages) 274 start = 0; 275 n = start; 276 do { 277 char *kaddr; 278 page = ufs_get_page(dir, n); 279 if (!IS_ERR(page)) { 280 kaddr = page_address(page); 281 de = (struct ufs_dir_entry *) kaddr; 282 kaddr += ufs_last_byte(dir, n) - reclen; 283 while ((char *) de <= kaddr) { 284 if (ufs_match(sb, namelen, name, de)) 285 goto found; 286 de = ufs_next_entry(sb, de); 287 } 288 ufs_put_page(page); 289 } 290 if (++n >= npages) 291 n = 0; 292 } while (n != start); 293 out: 294 return NULL; 295 296 found: 297 *res_page = page; 298 ui->i_dir_start_lookup = n; 299 return de; 300 } 301 302 /* 303 * Parent is locked. 304 */ 305 int ufs_add_link(struct dentry *dentry, struct inode *inode) 306 { 307 struct inode *dir = d_inode(dentry->d_parent); 308 const unsigned char *name = dentry->d_name.name; 309 int namelen = dentry->d_name.len; 310 struct super_block *sb = dir->i_sb; 311 unsigned reclen = UFS_DIR_REC_LEN(namelen); 312 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 313 unsigned short rec_len, name_len; 314 struct page *page = NULL; 315 struct ufs_dir_entry *de; 316 unsigned long npages = dir_pages(dir); 317 unsigned long n; 318 char *kaddr; 319 loff_t pos; 320 int err; 321 322 UFSD("ENTER, name %s, namelen %u\n", name, namelen); 323 324 /* 325 * We take care of directory expansion in the same loop. 326 * This code plays outside i_size, so it locks the page 327 * to protect that region. 328 */ 329 for (n = 0; n <= npages; n++) { 330 char *dir_end; 331 332 page = ufs_get_page(dir, n); 333 err = PTR_ERR(page); 334 if (IS_ERR(page)) 335 goto out; 336 lock_page(page); 337 kaddr = page_address(page); 338 dir_end = kaddr + ufs_last_byte(dir, n); 339 de = (struct ufs_dir_entry *)kaddr; 340 kaddr += PAGE_SIZE - reclen; 341 while ((char *)de <= kaddr) { 342 if ((char *)de == dir_end) { 343 /* We hit i_size */ 344 name_len = 0; 345 rec_len = chunk_size; 346 de->d_reclen = cpu_to_fs16(sb, chunk_size); 347 de->d_ino = 0; 348 goto got_it; 349 } 350 if (de->d_reclen == 0) { 351 ufs_error(dir->i_sb, __func__, 352 "zero-length directory entry"); 353 err = -EIO; 354 goto out_unlock; 355 } 356 err = -EEXIST; 357 if (ufs_match(sb, namelen, name, de)) 358 goto out_unlock; 359 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)); 360 rec_len = fs16_to_cpu(sb, de->d_reclen); 361 if (!de->d_ino && rec_len >= reclen) 362 goto got_it; 363 if (rec_len >= name_len + reclen) 364 goto got_it; 365 de = (struct ufs_dir_entry *) ((char *) de + rec_len); 366 } 367 unlock_page(page); 368 ufs_put_page(page); 369 } 370 BUG(); 371 return -EINVAL; 372 373 got_it: 374 pos = page_offset(page) + 375 (char*)de - (char*)page_address(page); 376 err = ufs_prepare_chunk(page, pos, rec_len); 377 if (err) 378 goto out_unlock; 379 if (de->d_ino) { 380 struct ufs_dir_entry *de1 = 381 (struct ufs_dir_entry *) ((char *) de + name_len); 382 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len); 383 de->d_reclen = cpu_to_fs16(sb, name_len); 384 385 de = de1; 386 } 387 388 ufs_set_de_namlen(sb, de, namelen); 389 memcpy(de->d_name, name, namelen + 1); 390 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 391 ufs_set_de_type(sb, de, inode->i_mode); 392 393 err = ufs_commit_chunk(page, pos, rec_len); 394 dir->i_mtime = dir->i_ctime = current_time(dir); 395 396 mark_inode_dirty(dir); 397 /* OFFSET_CACHE */ 398 out_put: 399 ufs_put_page(page); 400 out: 401 return err; 402 out_unlock: 403 unlock_page(page); 404 goto out_put; 405 } 406 407 static inline unsigned 408 ufs_validate_entry(struct super_block *sb, char *base, 409 unsigned offset, unsigned mask) 410 { 411 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset); 412 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask)); 413 while ((char*)p < (char*)de) 414 p = ufs_next_entry(sb, p); 415 return (char *)p - base; 416 } 417 418 419 /* 420 * This is blatantly stolen from ext2fs 421 */ 422 static int 423 ufs_readdir(struct file *file, struct dir_context *ctx) 424 { 425 loff_t pos = ctx->pos; 426 struct inode *inode = file_inode(file); 427 struct super_block *sb = inode->i_sb; 428 unsigned int offset = pos & ~PAGE_MASK; 429 unsigned long n = pos >> PAGE_SHIFT; 430 unsigned long npages = dir_pages(inode); 431 unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 432 bool need_revalidate = !inode_eq_iversion(inode, file->f_version); 433 unsigned flags = UFS_SB(sb)->s_flags; 434 435 UFSD("BEGIN\n"); 436 437 if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) 438 return 0; 439 440 for ( ; n < npages; n++, offset = 0) { 441 char *kaddr, *limit; 442 struct ufs_dir_entry *de; 443 444 struct page *page = ufs_get_page(inode, n); 445 446 if (IS_ERR(page)) { 447 ufs_error(sb, __func__, 448 "bad page in #%lu", 449 inode->i_ino); 450 ctx->pos += PAGE_SIZE - offset; 451 return -EIO; 452 } 453 kaddr = page_address(page); 454 if (unlikely(need_revalidate)) { 455 if (offset) { 456 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); 457 ctx->pos = (n<<PAGE_SHIFT) + offset; 458 } 459 file->f_version = inode_query_iversion(inode); 460 need_revalidate = false; 461 } 462 de = (struct ufs_dir_entry *)(kaddr+offset); 463 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); 464 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) { 465 if (de->d_ino) { 466 unsigned char d_type = DT_UNKNOWN; 467 468 UFSD("filldir(%s,%u)\n", de->d_name, 469 fs32_to_cpu(sb, de->d_ino)); 470 UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); 471 472 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) 473 d_type = de->d_u.d_44.d_type; 474 475 if (!dir_emit(ctx, de->d_name, 476 ufs_get_de_namlen(sb, de), 477 fs32_to_cpu(sb, de->d_ino), 478 d_type)) { 479 ufs_put_page(page); 480 return 0; 481 } 482 } 483 ctx->pos += fs16_to_cpu(sb, de->d_reclen); 484 } 485 ufs_put_page(page); 486 } 487 return 0; 488 } 489 490 491 /* 492 * ufs_delete_entry deletes a directory entry by merging it with the 493 * previous entry. 494 */ 495 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, 496 struct page * page) 497 { 498 struct super_block *sb = inode->i_sb; 499 char *kaddr = page_address(page); 500 unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); 501 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen); 502 loff_t pos; 503 struct ufs_dir_entry *pde = NULL; 504 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from); 505 int err; 506 507 UFSD("ENTER\n"); 508 509 UFSD("ino %u, reclen %u, namlen %u, name %s\n", 510 fs32_to_cpu(sb, de->d_ino), 511 fs16_to_cpu(sb, de->d_reclen), 512 ufs_get_de_namlen(sb, de), de->d_name); 513 514 while ((char*)de < (char*)dir) { 515 if (de->d_reclen == 0) { 516 ufs_error(inode->i_sb, __func__, 517 "zero-length directory entry"); 518 err = -EIO; 519 goto out; 520 } 521 pde = de; 522 de = ufs_next_entry(sb, de); 523 } 524 if (pde) 525 from = (char*)pde - (char*)page_address(page); 526 527 pos = page_offset(page) + from; 528 lock_page(page); 529 err = ufs_prepare_chunk(page, pos, to - from); 530 BUG_ON(err); 531 if (pde) 532 pde->d_reclen = cpu_to_fs16(sb, to - from); 533 dir->d_ino = 0; 534 err = ufs_commit_chunk(page, pos, to - from); 535 inode->i_ctime = inode->i_mtime = current_time(inode); 536 mark_inode_dirty(inode); 537 out: 538 ufs_put_page(page); 539 UFSD("EXIT\n"); 540 return err; 541 } 542 543 int ufs_make_empty(struct inode * inode, struct inode *dir) 544 { 545 struct super_block * sb = dir->i_sb; 546 struct address_space *mapping = inode->i_mapping; 547 struct page *page = grab_cache_page(mapping, 0); 548 const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; 549 struct ufs_dir_entry * de; 550 char *base; 551 int err; 552 553 if (!page) 554 return -ENOMEM; 555 556 err = ufs_prepare_chunk(page, 0, chunk_size); 557 if (err) { 558 unlock_page(page); 559 goto fail; 560 } 561 562 kmap(page); 563 base = (char*)page_address(page); 564 memset(base, 0, PAGE_SIZE); 565 566 de = (struct ufs_dir_entry *) base; 567 568 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 569 ufs_set_de_type(sb, de, inode->i_mode); 570 ufs_set_de_namlen(sb, de, 1); 571 de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1)); 572 strcpy (de->d_name, "."); 573 de = (struct ufs_dir_entry *) 574 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 575 de->d_ino = cpu_to_fs32(sb, dir->i_ino); 576 ufs_set_de_type(sb, de, dir->i_mode); 577 de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); 578 ufs_set_de_namlen(sb, de, 2); 579 strcpy (de->d_name, ".."); 580 kunmap(page); 581 582 err = ufs_commit_chunk(page, 0, chunk_size); 583 fail: 584 put_page(page); 585 return err; 586 } 587 588 /* 589 * routine to check that the specified directory is empty (for rmdir) 590 */ 591 int ufs_empty_dir(struct inode * inode) 592 { 593 struct super_block *sb = inode->i_sb; 594 struct page *page = NULL; 595 unsigned long i, npages = dir_pages(inode); 596 597 for (i = 0; i < npages; i++) { 598 char *kaddr; 599 struct ufs_dir_entry *de; 600 page = ufs_get_page(inode, i); 601 602 if (IS_ERR(page)) 603 continue; 604 605 kaddr = page_address(page); 606 de = (struct ufs_dir_entry *)kaddr; 607 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); 608 609 while ((char *)de <= kaddr) { 610 if (de->d_reclen == 0) { 611 ufs_error(inode->i_sb, __func__, 612 "zero-length directory entry: " 613 "kaddr=%p, de=%p\n", kaddr, de); 614 goto not_empty; 615 } 616 if (de->d_ino) { 617 u16 namelen=ufs_get_de_namlen(sb, de); 618 /* check for . and .. */ 619 if (de->d_name[0] != '.') 620 goto not_empty; 621 if (namelen > 2) 622 goto not_empty; 623 if (namelen < 2) { 624 if (inode->i_ino != 625 fs32_to_cpu(sb, de->d_ino)) 626 goto not_empty; 627 } else if (de->d_name[1] != '.') 628 goto not_empty; 629 } 630 de = ufs_next_entry(sb, de); 631 } 632 ufs_put_page(page); 633 } 634 return 1; 635 636 not_empty: 637 ufs_put_page(page); 638 return 0; 639 } 640 641 const struct file_operations ufs_dir_operations = { 642 .read = generic_read_dir, 643 .iterate_shared = ufs_readdir, 644 .fsync = generic_file_fsync, 645 .llseek = generic_file_llseek, 646 }; 647