inode.c (6bc6e63fcd7dac9e633ea29f1fddd9580ab28f3f) inode.c (79f0be8d2e6ebde27dfb3beff18eb689d5c4e36c)
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *

--- 2444 unchanged lines hidden (view full) ---

2453 }
2454
2455out_writepages:
2456 wbc->nr_to_write = to_write - nr_to_writebump;
2457 wbc->range_start = range_start;
2458 return ret;
2459}
2460
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *

--- 2444 unchanged lines hidden (view full) ---

2453 }
2454
2455out_writepages:
2456 wbc->nr_to_write = to_write - nr_to_writebump;
2457 wbc->range_start = range_start;
2458 return ret;
2459}
2460
2461#define FALL_BACK_TO_NONDELALLOC 1
2462static int ext4_nonda_switch(struct super_block *sb)
2463{
2464 s64 free_blocks, dirty_blocks;
2465 struct ext4_sb_info *sbi = EXT4_SB(sb);
2466
2467 /*
2468 * switch to non delalloc mode if we are running low
2469 * on free block. The free block accounting via percpu
2470 * counters can get slightly wrong with FBC_BATCH getting
2471 * accumulated on each CPU without updating global counters
2472 * Delalloc need an accurate free block accounting. So switch
2473 * to non delalloc when we are near to error range.
2474 */
2475 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
2476 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
2477 if (2 * free_blocks < 3 * dirty_blocks ||
2478 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
2479 /*
2480 * free block count is less that 150% of dirty blocks
2481 * or free blocks is less that watermark
2482 */
2483 return 1;
2484 }
2485 return 0;
2486}
2487
2461static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2462 loff_t pos, unsigned len, unsigned flags,
2463 struct page **pagep, void **fsdata)
2464{
2465 int ret, retries = 0;
2466 struct page *page;
2467 pgoff_t index;
2468 unsigned from, to;
2469 struct inode *inode = mapping->host;
2470 handle_t *handle;
2471
2472 index = pos >> PAGE_CACHE_SHIFT;
2473 from = pos & (PAGE_CACHE_SIZE - 1);
2474 to = from + len;
2488static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2489 loff_t pos, unsigned len, unsigned flags,
2490 struct page **pagep, void **fsdata)
2491{
2492 int ret, retries = 0;
2493 struct page *page;
2494 pgoff_t index;
2495 unsigned from, to;
2496 struct inode *inode = mapping->host;
2497 handle_t *handle;
2498
2499 index = pos >> PAGE_CACHE_SHIFT;
2500 from = pos & (PAGE_CACHE_SIZE - 1);
2501 to = from + len;
2502
2503 if (ext4_nonda_switch(inode->i_sb)) {
2504 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2505 return ext4_write_begin(file, mapping, pos,
2506 len, flags, pagep, fsdata);
2507 }
2508 *fsdata = (void *)0;
2475retry:
2476 /*
2477 * With delayed allocation, we don't log the i_disksize update
2478 * if there is delayed block allocation. But we still need
2479 * to journalling the i_disksize update if writes to the end
2480 * of file which has an already mapped buffer.
2481 */
2482 handle = ext4_journal_start(inode, 1);

--- 52 unchanged lines hidden (view full) ---

2535 loff_t pos, unsigned len, unsigned copied,
2536 struct page *page, void *fsdata)
2537{
2538 struct inode *inode = mapping->host;
2539 int ret = 0, ret2;
2540 handle_t *handle = ext4_journal_current_handle();
2541 loff_t new_i_size;
2542 unsigned long start, end;
2509retry:
2510 /*
2511 * With delayed allocation, we don't log the i_disksize update
2512 * if there is delayed block allocation. But we still need
2513 * to journalling the i_disksize update if writes to the end
2514 * of file which has an already mapped buffer.
2515 */
2516 handle = ext4_journal_start(inode, 1);

--- 52 unchanged lines hidden (view full) ---

2569 loff_t pos, unsigned len, unsigned copied,
2570 struct page *page, void *fsdata)
2571{
2572 struct inode *inode = mapping->host;
2573 int ret = 0, ret2;
2574 handle_t *handle = ext4_journal_current_handle();
2575 loff_t new_i_size;
2576 unsigned long start, end;
2577 int write_mode = (int)(unsigned long)fsdata;
2543
2578
2579 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2580 if (ext4_should_order_data(inode)) {
2581 return ext4_ordered_write_end(file, mapping, pos,
2582 len, copied, page, fsdata);
2583 } else if (ext4_should_writeback_data(inode)) {
2584 return ext4_writeback_write_end(file, mapping, pos,
2585 len, copied, page, fsdata);
2586 } else {
2587 BUG();
2588 }
2589 }
2590
2544 start = pos & (PAGE_CACHE_SIZE - 1);
2545 end = start + copied - 1;
2546
2547 /*
2548 * generic_write_end() will run mark_inode_dirty() if i_size
2549 * changes. So let's piggyback the i_disksize mark_inode_dirty
2550 * into that.
2551 */

--- 2320 unchanged lines hidden (view full) ---

4872 return !buffer_mapped(bh);
4873}
4874
4875int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4876{
4877 loff_t size;
4878 unsigned long len;
4879 int ret = -EINVAL;
2591 start = pos & (PAGE_CACHE_SIZE - 1);
2592 end = start + copied - 1;
2593
2594 /*
2595 * generic_write_end() will run mark_inode_dirty() if i_size
2596 * changes. So let's piggyback the i_disksize mark_inode_dirty
2597 * into that.
2598 */

--- 2320 unchanged lines hidden (view full) ---

4919 return !buffer_mapped(bh);
4920}
4921
4922int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4923{
4924 loff_t size;
4925 unsigned long len;
4926 int ret = -EINVAL;
4927 void *fsdata;
4880 struct file *file = vma->vm_file;
4881 struct inode *inode = file->f_path.dentry->d_inode;
4882 struct address_space *mapping = inode->i_mapping;
4883
4884 /*
4885 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
4886 * get i_mutex because we are already holding mmap_sem.
4887 */

--- 22 unchanged lines hidden (view full) ---

4910 /*
4911 * OK, we need to fill the hole... Do write_begin write_end
4912 * to do block allocation/reservation.We are not holding
4913 * inode.i__mutex here. That allow * parallel write_begin,
4914 * write_end call. lock_page prevent this from happening
4915 * on the same page though
4916 */
4917 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
4928 struct file *file = vma->vm_file;
4929 struct inode *inode = file->f_path.dentry->d_inode;
4930 struct address_space *mapping = inode->i_mapping;
4931
4932 /*
4933 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
4934 * get i_mutex because we are already holding mmap_sem.
4935 */

--- 22 unchanged lines hidden (view full) ---

4958 /*
4959 * OK, we need to fill the hole... Do write_begin write_end
4960 * to do block allocation/reservation.We are not holding
4961 * inode.i__mutex here. That allow * parallel write_begin,
4962 * write_end call. lock_page prevent this from happening
4963 * on the same page though
4964 */
4965 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
4918 len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
4966 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
4919 if (ret < 0)
4920 goto out_unlock;
4921 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
4967 if (ret < 0)
4968 goto out_unlock;
4969 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
4922 len, len, page, NULL);
4970 len, len, page, fsdata);
4923 if (ret < 0)
4924 goto out_unlock;
4925 ret = 0;
4926out_unlock:
4927 up_read(&inode->i_alloc_sem);
4928 return ret;
4929}
4971 if (ret < 0)
4972 goto out_unlock;
4973 ret = 0;
4974out_unlock:
4975 up_read(&inode->i_alloc_sem);
4976 return ret;
4977}