1 /* 2 * fs/libfs.c 3 * Library for filesystems writers. 4 */ 5 6 #include <linux/blkdev.h> 7 #include <linux/export.h> 8 #include <linux/pagemap.h> 9 #include <linux/slab.h> 10 #include <linux/cred.h> 11 #include <linux/mount.h> 12 #include <linux/vfs.h> 13 #include <linux/quotaops.h> 14 #include <linux/mutex.h> 15 #include <linux/namei.h> 16 #include <linux/exportfs.h> 17 #include <linux/writeback.h> 18 #include <linux/buffer_head.h> /* sync_mapping_buffers */ 19 20 #include <linux/uaccess.h> 21 22 #include "internal.h" 23 24 int simple_getattr(const struct path *path, struct kstat *stat, 25 u32 request_mask, unsigned int query_flags) 26 { 27 struct inode *inode = d_inode(path->dentry); 28 generic_fillattr(inode, stat); 29 stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9); 30 return 0; 31 } 32 EXPORT_SYMBOL(simple_getattr); 33 34 int simple_statfs(struct dentry *dentry, struct kstatfs *buf) 35 { 36 buf->f_type = dentry->d_sb->s_magic; 37 buf->f_bsize = PAGE_SIZE; 38 buf->f_namelen = NAME_MAX; 39 return 0; 40 } 41 EXPORT_SYMBOL(simple_statfs); 42 43 /* 44 * Retaining negative dentries for an in-memory filesystem just wastes 45 * memory and lookup time: arrange for them to be deleted immediately. 46 */ 47 int always_delete_dentry(const struct dentry *dentry) 48 { 49 return 1; 50 } 51 EXPORT_SYMBOL(always_delete_dentry); 52 53 const struct dentry_operations simple_dentry_operations = { 54 .d_delete = always_delete_dentry, 55 }; 56 EXPORT_SYMBOL(simple_dentry_operations); 57 58 /* 59 * Lookup the data. This is trivial - if the dentry didn't already 60 * exist, we know it is negative. Set d_op to delete negative dentries. 61 */ 62 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 63 { 64 if (dentry->d_name.len > NAME_MAX) 65 return ERR_PTR(-ENAMETOOLONG); 66 if (!dentry->d_sb->s_d_op) 67 d_set_d_op(dentry, &simple_dentry_operations); 68 d_add(dentry, NULL); 69 return NULL; 70 } 71 EXPORT_SYMBOL(simple_lookup); 72 73 int dcache_dir_open(struct inode *inode, struct file *file) 74 { 75 file->private_data = d_alloc_cursor(file->f_path.dentry); 76 77 return file->private_data ? 0 : -ENOMEM; 78 } 79 EXPORT_SYMBOL(dcache_dir_open); 80 81 int dcache_dir_close(struct inode *inode, struct file *file) 82 { 83 dput(file->private_data); 84 return 0; 85 } 86 EXPORT_SYMBOL(dcache_dir_close); 87 88 /* parent is locked at least shared */ 89 static struct dentry *next_positive(struct dentry *parent, 90 struct list_head *from, 91 int count) 92 { 93 unsigned *seq = &parent->d_inode->i_dir_seq, n; 94 struct dentry *res; 95 struct list_head *p; 96 bool skipped; 97 int i; 98 99 retry: 100 i = count; 101 skipped = false; 102 n = smp_load_acquire(seq) & ~1; 103 res = NULL; 104 rcu_read_lock(); 105 for (p = from->next; p != &parent->d_subdirs; p = p->next) { 106 struct dentry *d = list_entry(p, struct dentry, d_child); 107 if (!simple_positive(d)) { 108 skipped = true; 109 } else if (!--i) { 110 res = d; 111 break; 112 } 113 } 114 rcu_read_unlock(); 115 if (skipped) { 116 smp_rmb(); 117 if (unlikely(*seq != n)) 118 goto retry; 119 } 120 return res; 121 } 122 123 static void move_cursor(struct dentry *cursor, struct list_head *after) 124 { 125 struct dentry *parent = cursor->d_parent; 126 unsigned n, *seq = &parent->d_inode->i_dir_seq; 127 spin_lock(&parent->d_lock); 128 for (;;) { 129 n = *seq; 130 if (!(n & 1) && cmpxchg(seq, n, n + 1) == n) 131 break; 132 cpu_relax(); 133 } 134 __list_del(cursor->d_child.prev, cursor->d_child.next); 135 if (after) 136 list_add(&cursor->d_child, after); 137 else 138 list_add_tail(&cursor->d_child, &parent->d_subdirs); 139 smp_store_release(seq, n + 2); 140 spin_unlock(&parent->d_lock); 141 } 142 143 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) 144 { 145 struct dentry *dentry = file->f_path.dentry; 146 switch (whence) { 147 case 1: 148 offset += file->f_pos; 149 case 0: 150 if (offset >= 0) 151 break; 152 default: 153 return -EINVAL; 154 } 155 if (offset != file->f_pos) { 156 file->f_pos = offset; 157 if (file->f_pos >= 2) { 158 struct dentry *cursor = file->private_data; 159 struct dentry *to; 160 loff_t n = file->f_pos - 2; 161 162 inode_lock_shared(dentry->d_inode); 163 to = next_positive(dentry, &dentry->d_subdirs, n); 164 move_cursor(cursor, to ? &to->d_child : NULL); 165 inode_unlock_shared(dentry->d_inode); 166 } 167 } 168 return offset; 169 } 170 EXPORT_SYMBOL(dcache_dir_lseek); 171 172 /* Relationship between i_mode and the DT_xxx types */ 173 static inline unsigned char dt_type(struct inode *inode) 174 { 175 return (inode->i_mode >> 12) & 15; 176 } 177 178 /* 179 * Directory is locked and all positive dentries in it are safe, since 180 * for ramfs-type trees they can't go away without unlink() or rmdir(), 181 * both impossible due to the lock on directory. 182 */ 183 184 int dcache_readdir(struct file *file, struct dir_context *ctx) 185 { 186 struct dentry *dentry = file->f_path.dentry; 187 struct dentry *cursor = file->private_data; 188 struct list_head *p = &cursor->d_child; 189 struct dentry *next; 190 bool moved = false; 191 192 if (!dir_emit_dots(file, ctx)) 193 return 0; 194 195 if (ctx->pos == 2) 196 p = &dentry->d_subdirs; 197 while ((next = next_positive(dentry, p, 1)) != NULL) { 198 if (!dir_emit(ctx, next->d_name.name, next->d_name.len, 199 d_inode(next)->i_ino, dt_type(d_inode(next)))) 200 break; 201 moved = true; 202 p = &next->d_child; 203 ctx->pos++; 204 } 205 if (moved) 206 move_cursor(cursor, p); 207 return 0; 208 } 209 EXPORT_SYMBOL(dcache_readdir); 210 211 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) 212 { 213 return -EISDIR; 214 } 215 EXPORT_SYMBOL(generic_read_dir); 216 217 const struct file_operations simple_dir_operations = { 218 .open = dcache_dir_open, 219 .release = dcache_dir_close, 220 .llseek = dcache_dir_lseek, 221 .read = generic_read_dir, 222 .iterate_shared = dcache_readdir, 223 .fsync = noop_fsync, 224 }; 225 EXPORT_SYMBOL(simple_dir_operations); 226 227 const struct inode_operations simple_dir_inode_operations = { 228 .lookup = simple_lookup, 229 }; 230 EXPORT_SYMBOL(simple_dir_inode_operations); 231 232 static const struct super_operations simple_super_operations = { 233 .statfs = simple_statfs, 234 }; 235 236 /* 237 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that 238 * will never be mountable) 239 */ 240 struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name, 241 const struct super_operations *ops, const struct xattr_handler **xattr, 242 const struct dentry_operations *dops, unsigned long magic) 243 { 244 struct super_block *s; 245 struct dentry *dentry; 246 struct inode *root; 247 struct qstr d_name = QSTR_INIT(name, strlen(name)); 248 249 s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER, 250 &init_user_ns, NULL); 251 if (IS_ERR(s)) 252 return ERR_CAST(s); 253 254 s->s_maxbytes = MAX_LFS_FILESIZE; 255 s->s_blocksize = PAGE_SIZE; 256 s->s_blocksize_bits = PAGE_SHIFT; 257 s->s_magic = magic; 258 s->s_op = ops ? ops : &simple_super_operations; 259 s->s_xattr = xattr; 260 s->s_time_gran = 1; 261 root = new_inode(s); 262 if (!root) 263 goto Enomem; 264 /* 265 * since this is the first inode, make it number 1. New inodes created 266 * after this must take care not to collide with it (by passing 267 * max_reserved of 1 to iunique). 268 */ 269 root->i_ino = 1; 270 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; 271 root->i_atime = root->i_mtime = root->i_ctime = current_time(root); 272 dentry = __d_alloc(s, &d_name); 273 if (!dentry) { 274 iput(root); 275 goto Enomem; 276 } 277 d_instantiate(dentry, root); 278 s->s_root = dentry; 279 s->s_d_op = dops; 280 s->s_flags |= SB_ACTIVE; 281 return dget(s->s_root); 282 283 Enomem: 284 deactivate_locked_super(s); 285 return ERR_PTR(-ENOMEM); 286 } 287 EXPORT_SYMBOL(mount_pseudo_xattr); 288 289 int simple_open(struct inode *inode, struct file *file) 290 { 291 if (inode->i_private) 292 file->private_data = inode->i_private; 293 return 0; 294 } 295 EXPORT_SYMBOL(simple_open); 296 297 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 298 { 299 struct inode *inode = d_inode(old_dentry); 300 301 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 302 inc_nlink(inode); 303 ihold(inode); 304 dget(dentry); 305 d_instantiate(dentry, inode); 306 return 0; 307 } 308 EXPORT_SYMBOL(simple_link); 309 310 int simple_empty(struct dentry *dentry) 311 { 312 struct dentry *child; 313 int ret = 0; 314 315 spin_lock(&dentry->d_lock); 316 list_for_each_entry(child, &dentry->d_subdirs, d_child) { 317 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); 318 if (simple_positive(child)) { 319 spin_unlock(&child->d_lock); 320 goto out; 321 } 322 spin_unlock(&child->d_lock); 323 } 324 ret = 1; 325 out: 326 spin_unlock(&dentry->d_lock); 327 return ret; 328 } 329 EXPORT_SYMBOL(simple_empty); 330 331 int simple_unlink(struct inode *dir, struct dentry *dentry) 332 { 333 struct inode *inode = d_inode(dentry); 334 335 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 336 drop_nlink(inode); 337 dput(dentry); 338 return 0; 339 } 340 EXPORT_SYMBOL(simple_unlink); 341 342 int simple_rmdir(struct inode *dir, struct dentry *dentry) 343 { 344 if (!simple_empty(dentry)) 345 return -ENOTEMPTY; 346 347 drop_nlink(d_inode(dentry)); 348 simple_unlink(dir, dentry); 349 drop_nlink(dir); 350 return 0; 351 } 352 EXPORT_SYMBOL(simple_rmdir); 353 354 int simple_rename(struct inode *old_dir, struct dentry *old_dentry, 355 struct inode *new_dir, struct dentry *new_dentry, 356 unsigned int flags) 357 { 358 struct inode *inode = d_inode(old_dentry); 359 int they_are_dirs = d_is_dir(old_dentry); 360 361 if (flags & ~RENAME_NOREPLACE) 362 return -EINVAL; 363 364 if (!simple_empty(new_dentry)) 365 return -ENOTEMPTY; 366 367 if (d_really_is_positive(new_dentry)) { 368 simple_unlink(new_dir, new_dentry); 369 if (they_are_dirs) { 370 drop_nlink(d_inode(new_dentry)); 371 drop_nlink(old_dir); 372 } 373 } else if (they_are_dirs) { 374 drop_nlink(old_dir); 375 inc_nlink(new_dir); 376 } 377 378 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = 379 new_dir->i_mtime = inode->i_ctime = current_time(old_dir); 380 381 return 0; 382 } 383 EXPORT_SYMBOL(simple_rename); 384 385 /** 386 * simple_setattr - setattr for simple filesystem 387 * @dentry: dentry 388 * @iattr: iattr structure 389 * 390 * Returns 0 on success, -error on failure. 391 * 392 * simple_setattr is a simple ->setattr implementation without a proper 393 * implementation of size changes. 394 * 395 * It can either be used for in-memory filesystems or special files 396 * on simple regular filesystems. Anything that needs to change on-disk 397 * or wire state on size changes needs its own setattr method. 398 */ 399 int simple_setattr(struct dentry *dentry, struct iattr *iattr) 400 { 401 struct inode *inode = d_inode(dentry); 402 int error; 403 404 error = setattr_prepare(dentry, iattr); 405 if (error) 406 return error; 407 408 if (iattr->ia_valid & ATTR_SIZE) 409 truncate_setsize(inode, iattr->ia_size); 410 setattr_copy(inode, iattr); 411 mark_inode_dirty(inode); 412 return 0; 413 } 414 EXPORT_SYMBOL(simple_setattr); 415 416 int simple_readpage(struct file *file, struct page *page) 417 { 418 clear_highpage(page); 419 flush_dcache_page(page); 420 SetPageUptodate(page); 421 unlock_page(page); 422 return 0; 423 } 424 EXPORT_SYMBOL(simple_readpage); 425 426 int simple_write_begin(struct file *file, struct address_space *mapping, 427 loff_t pos, unsigned len, unsigned flags, 428 struct page **pagep, void **fsdata) 429 { 430 struct page *page; 431 pgoff_t index; 432 433 index = pos >> PAGE_SHIFT; 434 435 page = grab_cache_page_write_begin(mapping, index, flags); 436 if (!page) 437 return -ENOMEM; 438 439 *pagep = page; 440 441 if (!PageUptodate(page) && (len != PAGE_SIZE)) { 442 unsigned from = pos & (PAGE_SIZE - 1); 443 444 zero_user_segments(page, 0, from, from + len, PAGE_SIZE); 445 } 446 return 0; 447 } 448 EXPORT_SYMBOL(simple_write_begin); 449 450 /** 451 * simple_write_end - .write_end helper for non-block-device FSes 452 * @available: See .write_end of address_space_operations 453 * @file: " 454 * @mapping: " 455 * @pos: " 456 * @len: " 457 * @copied: " 458 * @page: " 459 * @fsdata: " 460 * 461 * simple_write_end does the minimum needed for updating a page after writing is 462 * done. It has the same API signature as the .write_end of 463 * address_space_operations vector. So it can just be set onto .write_end for 464 * FSes that don't need any other processing. i_mutex is assumed to be held. 465 * Block based filesystems should use generic_write_end(). 466 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty 467 * is not called, so a filesystem that actually does store data in .write_inode 468 * should extend on what's done here with a call to mark_inode_dirty() in the 469 * case that i_size has changed. 470 * 471 * Use *ONLY* with simple_readpage() 472 */ 473 int simple_write_end(struct file *file, struct address_space *mapping, 474 loff_t pos, unsigned len, unsigned copied, 475 struct page *page, void *fsdata) 476 { 477 struct inode *inode = page->mapping->host; 478 loff_t last_pos = pos + copied; 479 480 /* zero the stale part of the page if we did a short copy */ 481 if (!PageUptodate(page)) { 482 if (copied < len) { 483 unsigned from = pos & (PAGE_SIZE - 1); 484 485 zero_user(page, from + copied, len - copied); 486 } 487 SetPageUptodate(page); 488 } 489 /* 490 * No need to use i_size_read() here, the i_size 491 * cannot change under us because we hold the i_mutex. 492 */ 493 if (last_pos > inode->i_size) 494 i_size_write(inode, last_pos); 495 496 set_page_dirty(page); 497 unlock_page(page); 498 put_page(page); 499 500 return copied; 501 } 502 EXPORT_SYMBOL(simple_write_end); 503 504 /* 505 * the inodes created here are not hashed. If you use iunique to generate 506 * unique inode values later for this filesystem, then you must take care 507 * to pass it an appropriate max_reserved value to avoid collisions. 508 */ 509 int simple_fill_super(struct super_block *s, unsigned long magic, 510 const struct tree_descr *files) 511 { 512 struct inode *inode; 513 struct dentry *root; 514 struct dentry *dentry; 515 int i; 516 517 s->s_blocksize = PAGE_SIZE; 518 s->s_blocksize_bits = PAGE_SHIFT; 519 s->s_magic = magic; 520 s->s_op = &simple_super_operations; 521 s->s_time_gran = 1; 522 523 inode = new_inode(s); 524 if (!inode) 525 return -ENOMEM; 526 /* 527 * because the root inode is 1, the files array must not contain an 528 * entry at index 1 529 */ 530 inode->i_ino = 1; 531 inode->i_mode = S_IFDIR | 0755; 532 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 533 inode->i_op = &simple_dir_inode_operations; 534 inode->i_fop = &simple_dir_operations; 535 set_nlink(inode, 2); 536 root = d_make_root(inode); 537 if (!root) 538 return -ENOMEM; 539 for (i = 0; !files->name || files->name[0]; i++, files++) { 540 if (!files->name) 541 continue; 542 543 /* warn if it tries to conflict with the root inode */ 544 if (unlikely(i == 1)) 545 printk(KERN_WARNING "%s: %s passed in a files array" 546 "with an index of 1!\n", __func__, 547 s->s_type->name); 548 549 dentry = d_alloc_name(root, files->name); 550 if (!dentry) 551 goto out; 552 inode = new_inode(s); 553 if (!inode) { 554 dput(dentry); 555 goto out; 556 } 557 inode->i_mode = S_IFREG | files->mode; 558 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 559 inode->i_fop = files->ops; 560 inode->i_ino = i; 561 d_add(dentry, inode); 562 } 563 s->s_root = root; 564 return 0; 565 out: 566 d_genocide(root); 567 shrink_dcache_parent(root); 568 dput(root); 569 return -ENOMEM; 570 } 571 EXPORT_SYMBOL(simple_fill_super); 572 573 static DEFINE_SPINLOCK(pin_fs_lock); 574 575 int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count) 576 { 577 struct vfsmount *mnt = NULL; 578 spin_lock(&pin_fs_lock); 579 if (unlikely(!*mount)) { 580 spin_unlock(&pin_fs_lock); 581 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 582 if (IS_ERR(mnt)) 583 return PTR_ERR(mnt); 584 spin_lock(&pin_fs_lock); 585 if (!*mount) 586 *mount = mnt; 587 } 588 mntget(*mount); 589 ++*count; 590 spin_unlock(&pin_fs_lock); 591 mntput(mnt); 592 return 0; 593 } 594 EXPORT_SYMBOL(simple_pin_fs); 595 596 void simple_release_fs(struct vfsmount **mount, int *count) 597 { 598 struct vfsmount *mnt; 599 spin_lock(&pin_fs_lock); 600 mnt = *mount; 601 if (!--*count) 602 *mount = NULL; 603 spin_unlock(&pin_fs_lock); 604 mntput(mnt); 605 } 606 EXPORT_SYMBOL(simple_release_fs); 607 608 /** 609 * simple_read_from_buffer - copy data from the buffer to user space 610 * @to: the user space buffer to read to 611 * @count: the maximum number of bytes to read 612 * @ppos: the current position in the buffer 613 * @from: the buffer to read from 614 * @available: the size of the buffer 615 * 616 * The simple_read_from_buffer() function reads up to @count bytes from the 617 * buffer @from at offset @ppos into the user space address starting at @to. 618 * 619 * On success, the number of bytes read is returned and the offset @ppos is 620 * advanced by this number, or negative value is returned on error. 621 **/ 622 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, 623 const void *from, size_t available) 624 { 625 loff_t pos = *ppos; 626 size_t ret; 627 628 if (pos < 0) 629 return -EINVAL; 630 if (pos >= available || !count) 631 return 0; 632 if (count > available - pos) 633 count = available - pos; 634 ret = copy_to_user(to, from + pos, count); 635 if (ret == count) 636 return -EFAULT; 637 count -= ret; 638 *ppos = pos + count; 639 return count; 640 } 641 EXPORT_SYMBOL(simple_read_from_buffer); 642 643 /** 644 * simple_write_to_buffer - copy data from user space to the buffer 645 * @to: the buffer to write to 646 * @available: the size of the buffer 647 * @ppos: the current position in the buffer 648 * @from: the user space buffer to read from 649 * @count: the maximum number of bytes to read 650 * 651 * The simple_write_to_buffer() function reads up to @count bytes from the user 652 * space address starting at @from into the buffer @to at offset @ppos. 653 * 654 * On success, the number of bytes written is returned and the offset @ppos is 655 * advanced by this number, or negative value is returned on error. 656 **/ 657 ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 658 const void __user *from, size_t count) 659 { 660 loff_t pos = *ppos; 661 size_t res; 662 663 if (pos < 0) 664 return -EINVAL; 665 if (pos >= available || !count) 666 return 0; 667 if (count > available - pos) 668 count = available - pos; 669 res = copy_from_user(to + pos, from, count); 670 if (res == count) 671 return -EFAULT; 672 count -= res; 673 *ppos = pos + count; 674 return count; 675 } 676 EXPORT_SYMBOL(simple_write_to_buffer); 677 678 /** 679 * memory_read_from_buffer - copy data from the buffer 680 * @to: the kernel space buffer to read to 681 * @count: the maximum number of bytes to read 682 * @ppos: the current position in the buffer 683 * @from: the buffer to read from 684 * @available: the size of the buffer 685 * 686 * The memory_read_from_buffer() function reads up to @count bytes from the 687 * buffer @from at offset @ppos into the kernel space address starting at @to. 688 * 689 * On success, the number of bytes read is returned and the offset @ppos is 690 * advanced by this number, or negative value is returned on error. 691 **/ 692 ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 693 const void *from, size_t available) 694 { 695 loff_t pos = *ppos; 696 697 if (pos < 0) 698 return -EINVAL; 699 if (pos >= available) 700 return 0; 701 if (count > available - pos) 702 count = available - pos; 703 memcpy(to, from + pos, count); 704 *ppos = pos + count; 705 706 return count; 707 } 708 EXPORT_SYMBOL(memory_read_from_buffer); 709 710 /* 711 * Transaction based IO. 712 * The file expects a single write which triggers the transaction, and then 713 * possibly a read which collects the result - which is stored in a 714 * file-local buffer. 715 */ 716 717 void simple_transaction_set(struct file *file, size_t n) 718 { 719 struct simple_transaction_argresp *ar = file->private_data; 720 721 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); 722 723 /* 724 * The barrier ensures that ar->size will really remain zero until 725 * ar->data is ready for reading. 726 */ 727 smp_mb(); 728 ar->size = n; 729 } 730 EXPORT_SYMBOL(simple_transaction_set); 731 732 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size) 733 { 734 struct simple_transaction_argresp *ar; 735 static DEFINE_SPINLOCK(simple_transaction_lock); 736 737 if (size > SIMPLE_TRANSACTION_LIMIT - 1) 738 return ERR_PTR(-EFBIG); 739 740 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL); 741 if (!ar) 742 return ERR_PTR(-ENOMEM); 743 744 spin_lock(&simple_transaction_lock); 745 746 /* only one write allowed per open */ 747 if (file->private_data) { 748 spin_unlock(&simple_transaction_lock); 749 free_page((unsigned long)ar); 750 return ERR_PTR(-EBUSY); 751 } 752 753 file->private_data = ar; 754 755 spin_unlock(&simple_transaction_lock); 756 757 if (copy_from_user(ar->data, buf, size)) 758 return ERR_PTR(-EFAULT); 759 760 return ar->data; 761 } 762 EXPORT_SYMBOL(simple_transaction_get); 763 764 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) 765 { 766 struct simple_transaction_argresp *ar = file->private_data; 767 768 if (!ar) 769 return 0; 770 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size); 771 } 772 EXPORT_SYMBOL(simple_transaction_read); 773 774 int simple_transaction_release(struct inode *inode, struct file *file) 775 { 776 free_page((unsigned long)file->private_data); 777 return 0; 778 } 779 EXPORT_SYMBOL(simple_transaction_release); 780 781 /* Simple attribute files */ 782 783 struct simple_attr { 784 int (*get)(void *, u64 *); 785 int (*set)(void *, u64); 786 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 787 char set_buf[24]; 788 void *data; 789 const char *fmt; /* format for read operation */ 790 struct mutex mutex; /* protects access to these buffers */ 791 }; 792 793 /* simple_attr_open is called by an actual attribute open file operation 794 * to set the attribute specific access operations. */ 795 int simple_attr_open(struct inode *inode, struct file *file, 796 int (*get)(void *, u64 *), int (*set)(void *, u64), 797 const char *fmt) 798 { 799 struct simple_attr *attr; 800 801 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 802 if (!attr) 803 return -ENOMEM; 804 805 attr->get = get; 806 attr->set = set; 807 attr->data = inode->i_private; 808 attr->fmt = fmt; 809 mutex_init(&attr->mutex); 810 811 file->private_data = attr; 812 813 return nonseekable_open(inode, file); 814 } 815 EXPORT_SYMBOL_GPL(simple_attr_open); 816 817 int simple_attr_release(struct inode *inode, struct file *file) 818 { 819 kfree(file->private_data); 820 return 0; 821 } 822 EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */ 823 824 /* read from the buffer that is filled with the get function */ 825 ssize_t simple_attr_read(struct file *file, char __user *buf, 826 size_t len, loff_t *ppos) 827 { 828 struct simple_attr *attr; 829 size_t size; 830 ssize_t ret; 831 832 attr = file->private_data; 833 834 if (!attr->get) 835 return -EACCES; 836 837 ret = mutex_lock_interruptible(&attr->mutex); 838 if (ret) 839 return ret; 840 841 if (*ppos) { /* continued read */ 842 size = strlen(attr->get_buf); 843 } else { /* first read */ 844 u64 val; 845 ret = attr->get(attr->data, &val); 846 if (ret) 847 goto out; 848 849 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 850 attr->fmt, (unsigned long long)val); 851 } 852 853 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 854 out: 855 mutex_unlock(&attr->mutex); 856 return ret; 857 } 858 EXPORT_SYMBOL_GPL(simple_attr_read); 859 860 /* interpret the buffer as a number to call the set function with */ 861 ssize_t simple_attr_write(struct file *file, const char __user *buf, 862 size_t len, loff_t *ppos) 863 { 864 struct simple_attr *attr; 865 u64 val; 866 size_t size; 867 ssize_t ret; 868 869 attr = file->private_data; 870 if (!attr->set) 871 return -EACCES; 872 873 ret = mutex_lock_interruptible(&attr->mutex); 874 if (ret) 875 return ret; 876 877 ret = -EFAULT; 878 size = min(sizeof(attr->set_buf) - 1, len); 879 if (copy_from_user(attr->set_buf, buf, size)) 880 goto out; 881 882 attr->set_buf[size] = '\0'; 883 val = simple_strtoll(attr->set_buf, NULL, 0); 884 ret = attr->set(attr->data, val); 885 if (ret == 0) 886 ret = len; /* on success, claim we got the whole input */ 887 out: 888 mutex_unlock(&attr->mutex); 889 return ret; 890 } 891 EXPORT_SYMBOL_GPL(simple_attr_write); 892 893 /** 894 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation 895 * @sb: filesystem to do the file handle conversion on 896 * @fid: file handle to convert 897 * @fh_len: length of the file handle in bytes 898 * @fh_type: type of file handle 899 * @get_inode: filesystem callback to retrieve inode 900 * 901 * This function decodes @fid as long as it has one of the well-known 902 * Linux filehandle types and calls @get_inode on it to retrieve the 903 * inode for the object specified in the file handle. 904 */ 905 struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, 906 int fh_len, int fh_type, struct inode *(*get_inode) 907 (struct super_block *sb, u64 ino, u32 gen)) 908 { 909 struct inode *inode = NULL; 910 911 if (fh_len < 2) 912 return NULL; 913 914 switch (fh_type) { 915 case FILEID_INO32_GEN: 916 case FILEID_INO32_GEN_PARENT: 917 inode = get_inode(sb, fid->i32.ino, fid->i32.gen); 918 break; 919 } 920 921 return d_obtain_alias(inode); 922 } 923 EXPORT_SYMBOL_GPL(generic_fh_to_dentry); 924 925 /** 926 * generic_fh_to_parent - generic helper for the fh_to_parent export operation 927 * @sb: filesystem to do the file handle conversion on 928 * @fid: file handle to convert 929 * @fh_len: length of the file handle in bytes 930 * @fh_type: type of file handle 931 * @get_inode: filesystem callback to retrieve inode 932 * 933 * This function decodes @fid as long as it has one of the well-known 934 * Linux filehandle types and calls @get_inode on it to retrieve the 935 * inode for the _parent_ object specified in the file handle if it 936 * is specified in the file handle, or NULL otherwise. 937 */ 938 struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, 939 int fh_len, int fh_type, struct inode *(*get_inode) 940 (struct super_block *sb, u64 ino, u32 gen)) 941 { 942 struct inode *inode = NULL; 943 944 if (fh_len <= 2) 945 return NULL; 946 947 switch (fh_type) { 948 case FILEID_INO32_GEN_PARENT: 949 inode = get_inode(sb, fid->i32.parent_ino, 950 (fh_len > 3 ? fid->i32.parent_gen : 0)); 951 break; 952 } 953 954 return d_obtain_alias(inode); 955 } 956 EXPORT_SYMBOL_GPL(generic_fh_to_parent); 957 958 /** 959 * __generic_file_fsync - generic fsync implementation for simple filesystems 960 * 961 * @file: file to synchronize 962 * @start: start offset in bytes 963 * @end: end offset in bytes (inclusive) 964 * @datasync: only synchronize essential metadata if true 965 * 966 * This is a generic implementation of the fsync method for simple 967 * filesystems which track all non-inode metadata in the buffers list 968 * hanging off the address_space structure. 969 */ 970 int __generic_file_fsync(struct file *file, loff_t start, loff_t end, 971 int datasync) 972 { 973 struct inode *inode = file->f_mapping->host; 974 int err; 975 int ret; 976 977 err = file_write_and_wait_range(file, start, end); 978 if (err) 979 return err; 980 981 inode_lock(inode); 982 ret = sync_mapping_buffers(inode->i_mapping); 983 if (!(inode->i_state & I_DIRTY_ALL)) 984 goto out; 985 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 986 goto out; 987 988 err = sync_inode_metadata(inode, 1); 989 if (ret == 0) 990 ret = err; 991 992 out: 993 inode_unlock(inode); 994 /* check and advance again to catch errors after syncing out buffers */ 995 err = file_check_and_advance_wb_err(file); 996 if (ret == 0) 997 ret = err; 998 return ret; 999 } 1000 EXPORT_SYMBOL(__generic_file_fsync); 1001 1002 /** 1003 * generic_file_fsync - generic fsync implementation for simple filesystems 1004 * with flush 1005 * @file: file to synchronize 1006 * @start: start offset in bytes 1007 * @end: end offset in bytes (inclusive) 1008 * @datasync: only synchronize essential metadata if true 1009 * 1010 */ 1011 1012 int generic_file_fsync(struct file *file, loff_t start, loff_t end, 1013 int datasync) 1014 { 1015 struct inode *inode = file->f_mapping->host; 1016 int err; 1017 1018 err = __generic_file_fsync(file, start, end, datasync); 1019 if (err) 1020 return err; 1021 return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 1022 } 1023 EXPORT_SYMBOL(generic_file_fsync); 1024 1025 /** 1026 * generic_check_addressable - Check addressability of file system 1027 * @blocksize_bits: log of file system block size 1028 * @num_blocks: number of blocks in file system 1029 * 1030 * Determine whether a file system with @num_blocks blocks (and a 1031 * block size of 2**@blocksize_bits) is addressable by the sector_t 1032 * and page cache of the system. Return 0 if so and -EFBIG otherwise. 1033 */ 1034 int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) 1035 { 1036 u64 last_fs_block = num_blocks - 1; 1037 u64 last_fs_page = 1038 last_fs_block >> (PAGE_SHIFT - blocksize_bits); 1039 1040 if (unlikely(num_blocks == 0)) 1041 return 0; 1042 1043 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT)) 1044 return -EINVAL; 1045 1046 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || 1047 (last_fs_page > (pgoff_t)(~0ULL))) { 1048 return -EFBIG; 1049 } 1050 return 0; 1051 } 1052 EXPORT_SYMBOL(generic_check_addressable); 1053 1054 /* 1055 * No-op implementation of ->fsync for in-memory filesystems. 1056 */ 1057 int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1058 { 1059 return 0; 1060 } 1061 EXPORT_SYMBOL(noop_fsync); 1062 1063 int noop_set_page_dirty(struct page *page) 1064 { 1065 /* 1066 * Unlike __set_page_dirty_no_writeback that handles dirty page 1067 * tracking in the page object, dax does all dirty tracking in 1068 * the inode address_space in response to mkwrite faults. In the 1069 * dax case we only need to worry about potentially dirty CPU 1070 * caches, not dirty page cache pages to write back. 1071 * 1072 * This callback is defined to prevent fallback to 1073 * __set_page_dirty_buffers() in set_page_dirty(). 1074 */ 1075 return 0; 1076 } 1077 EXPORT_SYMBOL_GPL(noop_set_page_dirty); 1078 1079 void noop_invalidatepage(struct page *page, unsigned int offset, 1080 unsigned int length) 1081 { 1082 /* 1083 * There is no page cache to invalidate in the dax case, however 1084 * we need this callback defined to prevent falling back to 1085 * block_invalidatepage() in do_invalidatepage(). 1086 */ 1087 } 1088 EXPORT_SYMBOL_GPL(noop_invalidatepage); 1089 1090 ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 1091 { 1092 /* 1093 * iomap based filesystems support direct I/O without need for 1094 * this callback. However, it still needs to be set in 1095 * inode->a_ops so that open/fcntl know that direct I/O is 1096 * generally supported. 1097 */ 1098 return -EINVAL; 1099 } 1100 EXPORT_SYMBOL_GPL(noop_direct_IO); 1101 1102 /* Because kfree isn't assignment-compatible with void(void*) ;-/ */ 1103 void kfree_link(void *p) 1104 { 1105 kfree(p); 1106 } 1107 EXPORT_SYMBOL(kfree_link); 1108 1109 /* 1110 * nop .set_page_dirty method so that people can use .page_mkwrite on 1111 * anon inodes. 1112 */ 1113 static int anon_set_page_dirty(struct page *page) 1114 { 1115 return 0; 1116 }; 1117 1118 /* 1119 * A single inode exists for all anon_inode files. Contrary to pipes, 1120 * anon_inode inodes have no associated per-instance data, so we need 1121 * only allocate one of them. 1122 */ 1123 struct inode *alloc_anon_inode(struct super_block *s) 1124 { 1125 static const struct address_space_operations anon_aops = { 1126 .set_page_dirty = anon_set_page_dirty, 1127 }; 1128 struct inode *inode = new_inode_pseudo(s); 1129 1130 if (!inode) 1131 return ERR_PTR(-ENOMEM); 1132 1133 inode->i_ino = get_next_ino(); 1134 inode->i_mapping->a_ops = &anon_aops; 1135 1136 /* 1137 * Mark the inode dirty from the very beginning, 1138 * that way it will never be moved to the dirty 1139 * list because mark_inode_dirty() will think 1140 * that it already _is_ on the dirty list. 1141 */ 1142 inode->i_state = I_DIRTY; 1143 inode->i_mode = S_IRUSR | S_IWUSR; 1144 inode->i_uid = current_fsuid(); 1145 inode->i_gid = current_fsgid(); 1146 inode->i_flags |= S_PRIVATE; 1147 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 1148 return inode; 1149 } 1150 EXPORT_SYMBOL(alloc_anon_inode); 1151 1152 /** 1153 * simple_nosetlease - generic helper for prohibiting leases 1154 * @filp: file pointer 1155 * @arg: type of lease to obtain 1156 * @flp: new lease supplied for insertion 1157 * @priv: private data for lm_setup operation 1158 * 1159 * Generic helper for filesystems that do not wish to allow leases to be set. 1160 * All arguments are ignored and it just returns -EINVAL. 1161 */ 1162 int 1163 simple_nosetlease(struct file *filp, long arg, struct file_lock **flp, 1164 void **priv) 1165 { 1166 return -EINVAL; 1167 } 1168 EXPORT_SYMBOL(simple_nosetlease); 1169 1170 const char *simple_get_link(struct dentry *dentry, struct inode *inode, 1171 struct delayed_call *done) 1172 { 1173 return inode->i_link; 1174 } 1175 EXPORT_SYMBOL(simple_get_link); 1176 1177 const struct inode_operations simple_symlink_inode_operations = { 1178 .get_link = simple_get_link, 1179 }; 1180 EXPORT_SYMBOL(simple_symlink_inode_operations); 1181 1182 /* 1183 * Operations for a permanently empty directory. 1184 */ 1185 static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 1186 { 1187 return ERR_PTR(-ENOENT); 1188 } 1189 1190 static int empty_dir_getattr(const struct path *path, struct kstat *stat, 1191 u32 request_mask, unsigned int query_flags) 1192 { 1193 struct inode *inode = d_inode(path->dentry); 1194 generic_fillattr(inode, stat); 1195 return 0; 1196 } 1197 1198 static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr) 1199 { 1200 return -EPERM; 1201 } 1202 1203 static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size) 1204 { 1205 return -EOPNOTSUPP; 1206 } 1207 1208 static const struct inode_operations empty_dir_inode_operations = { 1209 .lookup = empty_dir_lookup, 1210 .permission = generic_permission, 1211 .setattr = empty_dir_setattr, 1212 .getattr = empty_dir_getattr, 1213 .listxattr = empty_dir_listxattr, 1214 }; 1215 1216 static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence) 1217 { 1218 /* An empty directory has two entries . and .. at offsets 0 and 1 */ 1219 return generic_file_llseek_size(file, offset, whence, 2, 2); 1220 } 1221 1222 static int empty_dir_readdir(struct file *file, struct dir_context *ctx) 1223 { 1224 dir_emit_dots(file, ctx); 1225 return 0; 1226 } 1227 1228 static const struct file_operations empty_dir_operations = { 1229 .llseek = empty_dir_llseek, 1230 .read = generic_read_dir, 1231 .iterate_shared = empty_dir_readdir, 1232 .fsync = noop_fsync, 1233 }; 1234 1235 1236 void make_empty_dir_inode(struct inode *inode) 1237 { 1238 set_nlink(inode, 2); 1239 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; 1240 inode->i_uid = GLOBAL_ROOT_UID; 1241 inode->i_gid = GLOBAL_ROOT_GID; 1242 inode->i_rdev = 0; 1243 inode->i_size = 0; 1244 inode->i_blkbits = PAGE_SHIFT; 1245 inode->i_blocks = 0; 1246 1247 inode->i_op = &empty_dir_inode_operations; 1248 inode->i_opflags &= ~IOP_XATTR; 1249 inode->i_fop = &empty_dir_operations; 1250 } 1251 1252 bool is_empty_dir_inode(struct inode *inode) 1253 { 1254 return (inode->i_fop == &empty_dir_operations) && 1255 (inode->i_op == &empty_dir_inode_operations); 1256 } 1257