buffer.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) | buffer.c (70246286e94c335b5bea0cbc68a17a96dd620281) |
---|---|
1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 31 unchanged lines hidden (view full) --- 40#include <linux/notifier.h> 41#include <linux/cpu.h> 42#include <linux/bitops.h> 43#include <linux/mpage.h> 44#include <linux/bit_spinlock.h> 45#include <trace/events/block.h> 46 47static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 31 unchanged lines hidden (view full) --- 40#include <linux/notifier.h> 41#include <linux/cpu.h> 42#include <linux/bitops.h> 43#include <linux/mpage.h> 44#include <linux/bit_spinlock.h> 45#include <trace/events/block.h> 46 47static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
48static int submit_bh_wbc(int rw, struct buffer_head *bh, | 48static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
49 unsigned long bio_flags, 50 struct writeback_control *wbc); 51 52#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 53 54void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 55{ 56 bh->b_end_io = handler; --- 91 unchanged lines hidden (view full) --- 148 * hashing after unlocking the buffer, so it doesn't actually touch the bh 149 * itself. 150 */ 151static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 152{ 153 if (uptodate) { 154 set_buffer_uptodate(bh); 155 } else { | 49 unsigned long bio_flags, 50 struct writeback_control *wbc); 51 52#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 53 54void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) 55{ 56 bh->b_end_io = handler; --- 91 unchanged lines hidden (view full) --- 148 * hashing after unlocking the buffer, so it doesn't actually touch the bh 149 * itself. 150 */ 151static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 152{ 153 if (uptodate) { 154 set_buffer_uptodate(bh); 155 } else { |
156 /* This happens, due to failed READA attempts. */ | 156 /* This happens, due to failed read-ahead attempts. */ |
157 clear_buffer_uptodate(bh); 158 } 159 unlock_buffer(bh); 160} 161 162/* 163 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 164 * unlock the buffer. This is what ll_rw_block uses too. --- 85 unchanged lines hidden (view full) --- 250 return ret; 251} 252 253/* 254 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 255 */ 256static void free_more_memory(void) 257{ | 157 clear_buffer_uptodate(bh); 158 } 159 unlock_buffer(bh); 160} 161 162/* 163 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 164 * unlock the buffer. This is what ll_rw_block uses too. --- 85 unchanged lines hidden (view full) --- 250 return ret; 251} 252 253/* 254 * Kick the writeback threads then try to free up some ZONE_NORMAL memory. 255 */ 256static void free_more_memory(void) 257{ |
258 struct zone *zone; | 258 struct zoneref *z; |
259 int nid; 260 261 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); 262 yield(); 263 264 for_each_online_node(nid) { | 259 int nid; 260 261 wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); 262 yield(); 263 264 for_each_online_node(nid) { |
265 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 266 gfp_zone(GFP_NOFS), NULL, 267 &zone); 268 if (zone) | 265 266 z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 267 gfp_zone(GFP_NOFS), NULL); 268 if (z->zone) |
269 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 270 GFP_NOFS, NULL); 271 } 272} 273 274/* 275 * I/O completion handler for block_read_full_page() - pages 276 * which come unlocked at the end of I/O. --- 306 unchanged lines hidden (view full) --- 583 * dirty, schedule it for IO. So that indirects merge nicely with their data. 584 */ 585void write_boundary_block(struct block_device *bdev, 586 sector_t bblock, unsigned blocksize) 587{ 588 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 589 if (bh) { 590 if (buffer_dirty(bh)) | 269 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 270 GFP_NOFS, NULL); 271 } 272} 273 274/* 275 * I/O completion handler for block_read_full_page() - pages 276 * which come unlocked at the end of I/O. --- 306 unchanged lines hidden (view full) --- 583 * dirty, schedule it for IO. So that indirects merge nicely with their data. 584 */ 585void write_boundary_block(struct block_device *bdev, 586 sector_t bblock, unsigned blocksize) 587{ 588 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 589 if (bh) { 590 if (buffer_dirty(bh)) |
591 ll_rw_block(WRITE, 1, &bh); | 591 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); |
592 put_bh(bh); 593 } 594} 595 596void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 597{ 598 struct address_space *mapping = inode->i_mapping; 599 struct address_space *buffer_mapping = bh->b_page->mapping; --- 620 unchanged lines hidden (view full) --- 1220{ 1221 lock_buffer(bh); 1222 if (buffer_uptodate(bh)) { 1223 unlock_buffer(bh); 1224 return bh; 1225 } else { 1226 get_bh(bh); 1227 bh->b_end_io = end_buffer_read_sync; | 592 put_bh(bh); 593 } 594} 595 596void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 597{ 598 struct address_space *mapping = inode->i_mapping; 599 struct address_space *buffer_mapping = bh->b_page->mapping; --- 620 unchanged lines hidden (view full) --- 1220{ 1221 lock_buffer(bh); 1222 if (buffer_uptodate(bh)) { 1223 unlock_buffer(bh); 1224 return bh; 1225 } else { 1226 get_bh(bh); 1227 bh->b_end_io = end_buffer_read_sync; |
1228 submit_bh(READ, bh); | 1228 submit_bh(REQ_OP_READ, 0, bh); |
1229 wait_on_buffer(bh); 1230 if (buffer_uptodate(bh)) 1231 return bh; 1232 } 1233 brelse(bh); 1234 return NULL; 1235} 1236 --- 153 unchanged lines hidden (view full) --- 1390 1391/* 1392 * Do async read-ahead on a buffer.. 1393 */ 1394void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1395{ 1396 struct buffer_head *bh = __getblk(bdev, block, size); 1397 if (likely(bh)) { | 1229 wait_on_buffer(bh); 1230 if (buffer_uptodate(bh)) 1231 return bh; 1232 } 1233 brelse(bh); 1234 return NULL; 1235} 1236 --- 153 unchanged lines hidden (view full) --- 1390 1391/* 1392 * Do async read-ahead on a buffer.. 1393 */ 1394void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1395{ 1396 struct buffer_head *bh = __getblk(bdev, block, size); 1397 if (likely(bh)) { |
1398 ll_rw_block(READA, 1, &bh); | 1398 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); |
1399 brelse(bh); 1400 } 1401} 1402EXPORT_SYMBOL(__breadahead); 1403 1404/** 1405 * __bread_gfp() - reads a specified block and returns the bh 1406 * @bdev: the block_device to read from --- 285 unchanged lines hidden (view full) --- 1692 bh_end_io_t *handler) 1693{ 1694 int err; 1695 sector_t block; 1696 sector_t last_block; 1697 struct buffer_head *bh, *head; 1698 unsigned int blocksize, bbits; 1699 int nr_underway = 0; | 1399 brelse(bh); 1400 } 1401} 1402EXPORT_SYMBOL(__breadahead); 1403 1404/** 1405 * __bread_gfp() - reads a specified block and returns the bh 1406 * @bdev: the block_device to read from --- 285 unchanged lines hidden (view full) --- 1692 bh_end_io_t *handler) 1693{ 1694 int err; 1695 sector_t block; 1696 sector_t last_block; 1697 struct buffer_head *bh, *head; 1698 unsigned int blocksize, bbits; 1699 int nr_underway = 0; |
1700 int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1700 int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
1701 1702 head = create_page_buffers(page, inode, 1703 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1704 1705 /* 1706 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1707 * here, and the (potentially unmapped) buffers may become dirty at 1708 * any time. If a buffer becomes dirty here after we've inspected it --- 72 unchanged lines hidden (view full) --- 1781 * drop the bh refcounts early. 1782 */ 1783 BUG_ON(PageWriteback(page)); 1784 set_page_writeback(page); 1785 1786 do { 1787 struct buffer_head *next = bh->b_this_page; 1788 if (buffer_async_write(bh)) { | 1701 1702 head = create_page_buffers(page, inode, 1703 (1 << BH_Dirty)|(1 << BH_Uptodate)); 1704 1705 /* 1706 * Be very careful. We have no exclusion from __set_page_dirty_buffers 1707 * here, and the (potentially unmapped) buffers may become dirty at 1708 * any time. If a buffer becomes dirty here after we've inspected it --- 72 unchanged lines hidden (view full) --- 1781 * drop the bh refcounts early. 1782 */ 1783 BUG_ON(PageWriteback(page)); 1784 set_page_writeback(page); 1785 1786 do { 1787 struct buffer_head *next = bh->b_this_page; 1788 if (buffer_async_write(bh)) { |
1789 submit_bh_wbc(write_op, bh, 0, wbc); | 1789 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1790 nr_underway++; 1791 } 1792 bh = next; 1793 } while (bh != head); 1794 unlock_page(page); 1795 1796 err = 0; 1797done: --- 37 unchanged lines hidden (view full) --- 1835 SetPageError(page); 1836 BUG_ON(PageWriteback(page)); 1837 mapping_set_error(page->mapping, err); 1838 set_page_writeback(page); 1839 do { 1840 struct buffer_head *next = bh->b_this_page; 1841 if (buffer_async_write(bh)) { 1842 clear_buffer_dirty(bh); | 1790 nr_underway++; 1791 } 1792 bh = next; 1793 } while (bh != head); 1794 unlock_page(page); 1795 1796 err = 0; 1797done: --- 37 unchanged lines hidden (view full) --- 1835 SetPageError(page); 1836 BUG_ON(PageWriteback(page)); 1837 mapping_set_error(page->mapping, err); 1838 set_page_writeback(page); 1839 do { 1840 struct buffer_head *next = bh->b_this_page; 1841 if (buffer_async_write(bh)) { 1842 clear_buffer_dirty(bh); |
1843 submit_bh_wbc(write_op, bh, 0, wbc); | 1843 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1844 nr_underway++; 1845 } 1846 bh = next; 1847 } while (bh != head); 1848 unlock_page(page); 1849 goto done; 1850} 1851 --- 98 unchanged lines hidden (view full) --- 1950 if (PageUptodate(page)) { 1951 if (!buffer_uptodate(bh)) 1952 set_buffer_uptodate(bh); 1953 continue; 1954 } 1955 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1956 !buffer_unwritten(bh) && 1957 (block_start < from || block_end > to)) { | 1844 nr_underway++; 1845 } 1846 bh = next; 1847 } while (bh != head); 1848 unlock_page(page); 1849 goto done; 1850} 1851 --- 98 unchanged lines hidden (view full) --- 1950 if (PageUptodate(page)) { 1951 if (!buffer_uptodate(bh)) 1952 set_buffer_uptodate(bh); 1953 continue; 1954 } 1955 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1956 !buffer_unwritten(bh) && 1957 (block_start < from || block_end > to)) { |
1958 ll_rw_block(READ, 1, &bh); | 1958 ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1959 *wait_bh++=bh; 1960 } 1961 } 1962 /* 1963 * If we issued read requests - let them complete. 1964 */ 1965 while(wait_bh > wait) { 1966 wait_on_buffer(*--wait_bh); --- 276 unchanged lines hidden (view full) --- 2243 * inside the buffer lock in case another process reading 2244 * the underlying blockdev brought it uptodate (the sct fix). 2245 */ 2246 for (i = 0; i < nr; i++) { 2247 bh = arr[i]; 2248 if (buffer_uptodate(bh)) 2249 end_buffer_async_read(bh, 1); 2250 else | 1959 *wait_bh++=bh; 1960 } 1961 } 1962 /* 1963 * If we issued read requests - let them complete. 1964 */ 1965 while(wait_bh > wait) { 1966 wait_on_buffer(*--wait_bh); --- 276 unchanged lines hidden (view full) --- 2243 * inside the buffer lock in case another process reading 2244 * the underlying blockdev brought it uptodate (the sct fix). 2245 */ 2246 for (i = 0; i < nr; i++) { 2247 bh = arr[i]; 2248 if (buffer_uptodate(bh)) 2249 end_buffer_async_read(bh, 1); 2250 else |
2251 submit_bh(READ, bh); | 2251 submit_bh(REQ_OP_READ, 0, bh); |
2252 } 2253 return 0; 2254} 2255EXPORT_SYMBOL(block_read_full_page); 2256 2257/* utility function for filesystems that need to do work on expanding 2258 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2259 * deal with the hole. --- 317 unchanged lines hidden (view full) --- 2577 to, block_end); 2578 continue; 2579 } 2580 if (buffer_uptodate(bh)) 2581 continue; /* reiserfs does this */ 2582 if (block_start < from || block_end > to) { 2583 lock_buffer(bh); 2584 bh->b_end_io = end_buffer_read_nobh; | 2252 } 2253 return 0; 2254} 2255EXPORT_SYMBOL(block_read_full_page); 2256 2257/* utility function for filesystems that need to do work on expanding 2258 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2259 * deal with the hole. --- 317 unchanged lines hidden (view full) --- 2577 to, block_end); 2578 continue; 2579 } 2580 if (buffer_uptodate(bh)) 2581 continue; /* reiserfs does this */ 2582 if (block_start < from || block_end > to) { 2583 lock_buffer(bh); 2584 bh->b_end_io = end_buffer_read_nobh; |
2585 submit_bh(READ, bh); | 2585 submit_bh(REQ_OP_READ, 0, bh); |
2586 nr_reads++; 2587 } 2588 } 2589 2590 if (nr_reads) { 2591 /* 2592 * The page is locked, so these buffers are protected from 2593 * any VM or truncate activity. Hence we don't need to care --- 253 unchanged lines hidden (view full) --- 2847 } 2848 2849 /* Ok, it's mapped. Make sure it's up-to-date */ 2850 if (PageUptodate(page)) 2851 set_buffer_uptodate(bh); 2852 2853 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2854 err = -EIO; | 2586 nr_reads++; 2587 } 2588 } 2589 2590 if (nr_reads) { 2591 /* 2592 * The page is locked, so these buffers are protected from 2593 * any VM or truncate activity. Hence we don't need to care --- 253 unchanged lines hidden (view full) --- 2847 } 2848 2849 /* Ok, it's mapped. Make sure it's up-to-date */ 2850 if (PageUptodate(page)) 2851 set_buffer_uptodate(bh); 2852 2853 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2854 err = -EIO; |
2855 ll_rw_block(READ, 1, &bh); | 2855 ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
2856 wait_on_buffer(bh); 2857 /* Uhhuh. Read error. Complain and punt. */ 2858 if (!buffer_uptodate(bh)) 2859 goto unlock; 2860 } 2861 2862 zero_user(page, offset, length); 2863 mark_buffer_dirty(bh); --- 80 unchanged lines hidden (view full) --- 2944 * 2945 * We'll just truncate the bio to the size of the device, 2946 * and clear the end of the buffer head manually. 2947 * 2948 * Truly out-of-range accesses will turn into actual IO 2949 * errors, this only handles the "we need to be able to 2950 * do IO at the final sector" case. 2951 */ | 2856 wait_on_buffer(bh); 2857 /* Uhhuh. Read error. Complain and punt. */ 2858 if (!buffer_uptodate(bh)) 2859 goto unlock; 2860 } 2861 2862 zero_user(page, offset, length); 2863 mark_buffer_dirty(bh); --- 80 unchanged lines hidden (view full) --- 2944 * 2945 * We'll just truncate the bio to the size of the device, 2946 * and clear the end of the buffer head manually. 2947 * 2948 * Truly out-of-range accesses will turn into actual IO 2949 * errors, this only handles the "we need to be able to 2950 * do IO at the final sector" case. 2951 */ |
2952void guard_bio_eod(int rw, struct bio *bio) | 2952void guard_bio_eod(int op, struct bio *bio) |
2953{ 2954 sector_t maxsector; 2955 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 2956 unsigned truncated_bytes; 2957 2958 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 2959 if (!maxsector) 2960 return; --- 13 unchanged lines hidden (view full) --- 2974 /* Uhhuh. We've got a bio that straddles the device size! */ 2975 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 2976 2977 /* Truncate the bio.. */ 2978 bio->bi_iter.bi_size -= truncated_bytes; 2979 bvec->bv_len -= truncated_bytes; 2980 2981 /* ..and clear the end of the buffer for reads */ | 2953{ 2954 sector_t maxsector; 2955 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 2956 unsigned truncated_bytes; 2957 2958 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 2959 if (!maxsector) 2960 return; --- 13 unchanged lines hidden (view full) --- 2974 /* Uhhuh. We've got a bio that straddles the device size! */ 2975 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); 2976 2977 /* Truncate the bio.. */ 2978 bio->bi_iter.bi_size -= truncated_bytes; 2979 bvec->bv_len -= truncated_bytes; 2980 2981 /* ..and clear the end of the buffer for reads */ |
2982 if ((rw & RW_MASK) == READ) { | 2982 if (op == REQ_OP_READ) { |
2983 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 2984 truncated_bytes); 2985 } 2986} 2987 | 2983 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 2984 truncated_bytes); 2985 } 2986} 2987 |
2988static int submit_bh_wbc(int rw, struct buffer_head *bh, | 2988static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
2989 unsigned long bio_flags, struct writeback_control *wbc) 2990{ 2991 struct bio *bio; 2992 2993 BUG_ON(!buffer_locked(bh)); 2994 BUG_ON(!buffer_mapped(bh)); 2995 BUG_ON(!bh->b_end_io); 2996 BUG_ON(buffer_delay(bh)); 2997 BUG_ON(buffer_unwritten(bh)); 2998 2999 /* 3000 * Only clear out a write error when rewriting 3001 */ | 2989 unsigned long bio_flags, struct writeback_control *wbc) 2990{ 2991 struct bio *bio; 2992 2993 BUG_ON(!buffer_locked(bh)); 2994 BUG_ON(!buffer_mapped(bh)); 2995 BUG_ON(!bh->b_end_io); 2996 BUG_ON(buffer_delay(bh)); 2997 BUG_ON(buffer_unwritten(bh)); 2998 2999 /* 3000 * Only clear out a write error when rewriting 3001 */ |
3002 if (test_set_buffer_req(bh) && (rw & WRITE)) | 3002 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) |
3003 clear_buffer_write_io_error(bh); 3004 3005 /* 3006 * from here on down, it's all bio -- do the initial mapping, 3007 * submit_bio -> generic_make_request may further map this bio around 3008 */ 3009 bio = bio_alloc(GFP_NOIO, 1); 3010 --- 8 unchanged lines hidden (view full) --- 3019 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 3020 BUG_ON(bio->bi_iter.bi_size != bh->b_size); 3021 3022 bio->bi_end_io = end_bio_bh_io_sync; 3023 bio->bi_private = bh; 3024 bio->bi_flags |= bio_flags; 3025 3026 /* Take care of bh's that straddle the end of the device */ | 3003 clear_buffer_write_io_error(bh); 3004 3005 /* 3006 * from here on down, it's all bio -- do the initial mapping, 3007 * submit_bio -> generic_make_request may further map this bio around 3008 */ 3009 bio = bio_alloc(GFP_NOIO, 1); 3010 --- 8 unchanged lines hidden (view full) --- 3019 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 3020 BUG_ON(bio->bi_iter.bi_size != bh->b_size); 3021 3022 bio->bi_end_io = end_bio_bh_io_sync; 3023 bio->bi_private = bh; 3024 bio->bi_flags |= bio_flags; 3025 3026 /* Take care of bh's that straddle the end of the device */ |
3027 guard_bio_eod(rw, bio); | 3027 guard_bio_eod(op, bio); |
3028 3029 if (buffer_meta(bh)) | 3028 3029 if (buffer_meta(bh)) |
3030 rw |= REQ_META; | 3030 op_flags |= REQ_META; |
3031 if (buffer_prio(bh)) | 3031 if (buffer_prio(bh)) |
3032 rw |= REQ_PRIO; | 3032 op_flags |= REQ_PRIO; 3033 bio_set_op_attrs(bio, op, op_flags); |
3033 | 3034 |
3034 submit_bio(rw, bio); | 3035 submit_bio(bio); |
3035 return 0; 3036} 3037 | 3036 return 0; 3037} 3038 |
3038int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) | 3039int _submit_bh(int op, int op_flags, struct buffer_head *bh, 3040 unsigned long bio_flags) |
3039{ | 3041{ |
3040 return submit_bh_wbc(rw, bh, bio_flags, NULL); | 3042 return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL); |
3041} 3042EXPORT_SYMBOL_GPL(_submit_bh); 3043 | 3043} 3044EXPORT_SYMBOL_GPL(_submit_bh); 3045 |
3044int submit_bh(int rw, struct buffer_head *bh) | 3046int submit_bh(int op, int op_flags, struct buffer_head *bh) |
3045{ | 3047{ |
3046 return submit_bh_wbc(rw, bh, 0, NULL); | 3048 return submit_bh_wbc(op, op_flags, bh, 0, NULL); |
3047} 3048EXPORT_SYMBOL(submit_bh); 3049 3050/** 3051 * ll_rw_block: low-level access to block devices (DEPRECATED) | 3049} 3050EXPORT_SYMBOL(submit_bh); 3051 3052/** 3053 * ll_rw_block: low-level access to block devices (DEPRECATED) |
3052 * @rw: whether to %READ or %WRITE or maybe %READA (readahead) | 3054 * @op: whether to %READ or %WRITE 3055 * @op_flags: rq_flag_bits |
3053 * @nr: number of &struct buffer_heads in the array 3054 * @bhs: array of pointers to &struct buffer_head 3055 * 3056 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and | 3056 * @nr: number of &struct buffer_heads in the array 3057 * @bhs: array of pointers to &struct buffer_head 3058 * 3059 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and |
3057 * requests an I/O operation on them, either a %READ or a %WRITE. The third 3058 * %READA option is described in the documentation for generic_make_request() 3059 * which ll_rw_block() calls. | 3060 * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE. 3061 * @op_flags contains flags modifying the detailed I/O behavior, most notably 3062 * %REQ_RAHEAD. |
3060 * 3061 * This function drops any buffer that it cannot get a lock on (with the 3062 * BH_Lock state bit), any buffer that appears to be clean when doing a write 3063 * request, and any buffer that appears to be up-to-date when doing read 3064 * request. Further it marks as clean buffers that are processed for 3065 * writing (the buffer cache won't assume that they are actually clean 3066 * until the buffer gets unlocked). 3067 * 3068 * ll_rw_block sets b_end_io to simple completion handler that marks 3069 * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 3070 * any waiters. 3071 * 3072 * All of the buffers must be for the same device, and must also be a 3073 * multiple of the current approved size for the device. 3074 */ | 3063 * 3064 * This function drops any buffer that it cannot get a lock on (with the 3065 * BH_Lock state bit), any buffer that appears to be clean when doing a write 3066 * request, and any buffer that appears to be up-to-date when doing read 3067 * request. Further it marks as clean buffers that are processed for 3068 * writing (the buffer cache won't assume that they are actually clean 3069 * until the buffer gets unlocked). 3070 * 3071 * ll_rw_block sets b_end_io to simple completion handler that marks 3072 * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 3073 * any waiters. 3074 * 3075 * All of the buffers must be for the same device, and must also be a 3076 * multiple of the current approved size for the device. 3077 */ |
3075void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | 3078void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) |
3076{ 3077 int i; 3078 3079 for (i = 0; i < nr; i++) { 3080 struct buffer_head *bh = bhs[i]; 3081 3082 if (!trylock_buffer(bh)) 3083 continue; | 3079{ 3080 int i; 3081 3082 for (i = 0; i < nr; i++) { 3083 struct buffer_head *bh = bhs[i]; 3084 3085 if (!trylock_buffer(bh)) 3086 continue; |
3084 if (rw == WRITE) { | 3087 if (op == WRITE) { |
3085 if (test_clear_buffer_dirty(bh)) { 3086 bh->b_end_io = end_buffer_write_sync; 3087 get_bh(bh); | 3088 if (test_clear_buffer_dirty(bh)) { 3089 bh->b_end_io = end_buffer_write_sync; 3090 get_bh(bh); |
3088 submit_bh(WRITE, bh); | 3091 submit_bh(op, op_flags, bh); |
3089 continue; 3090 } 3091 } else { 3092 if (!buffer_uptodate(bh)) { 3093 bh->b_end_io = end_buffer_read_sync; 3094 get_bh(bh); | 3092 continue; 3093 } 3094 } else { 3095 if (!buffer_uptodate(bh)) { 3096 bh->b_end_io = end_buffer_read_sync; 3097 get_bh(bh); |
3095 submit_bh(rw, bh); | 3098 submit_bh(op, op_flags, bh); |
3096 continue; 3097 } 3098 } 3099 unlock_buffer(bh); 3100 } 3101} 3102EXPORT_SYMBOL(ll_rw_block); 3103 | 3099 continue; 3100 } 3101 } 3102 unlock_buffer(bh); 3103 } 3104} 3105EXPORT_SYMBOL(ll_rw_block); 3106 |
3104void write_dirty_buffer(struct buffer_head *bh, int rw) | 3107void write_dirty_buffer(struct buffer_head *bh, int op_flags) |
3105{ 3106 lock_buffer(bh); 3107 if (!test_clear_buffer_dirty(bh)) { 3108 unlock_buffer(bh); 3109 return; 3110 } 3111 bh->b_end_io = end_buffer_write_sync; 3112 get_bh(bh); | 3108{ 3109 lock_buffer(bh); 3110 if (!test_clear_buffer_dirty(bh)) { 3111 unlock_buffer(bh); 3112 return; 3113 } 3114 bh->b_end_io = end_buffer_write_sync; 3115 get_bh(bh); |
3113 submit_bh(rw, bh); | 3116 submit_bh(REQ_OP_WRITE, op_flags, bh); |
3114} 3115EXPORT_SYMBOL(write_dirty_buffer); 3116 3117/* 3118 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3119 * and then start new I/O and then wait upon it. The caller must have a ref on 3120 * the buffer_head. 3121 */ | 3117} 3118EXPORT_SYMBOL(write_dirty_buffer); 3119 3120/* 3121 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3122 * and then start new I/O and then wait upon it. The caller must have a ref on 3123 * the buffer_head. 3124 */ |
3122int __sync_dirty_buffer(struct buffer_head *bh, int rw) | 3125int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) |
3123{ 3124 int ret = 0; 3125 3126 WARN_ON(atomic_read(&bh->b_count) < 1); 3127 lock_buffer(bh); 3128 if (test_clear_buffer_dirty(bh)) { 3129 get_bh(bh); 3130 bh->b_end_io = end_buffer_write_sync; | 3126{ 3127 int ret = 0; 3128 3129 WARN_ON(atomic_read(&bh->b_count) < 1); 3130 lock_buffer(bh); 3131 if (test_clear_buffer_dirty(bh)) { 3132 get_bh(bh); 3133 bh->b_end_io = end_buffer_write_sync; |
3131 ret = submit_bh(rw, bh); | 3134 ret = submit_bh(REQ_OP_WRITE, op_flags, bh); |
3132 wait_on_buffer(bh); 3133 if (!ret && !buffer_uptodate(bh)) 3134 ret = -EIO; 3135 } else { 3136 unlock_buffer(bh); 3137 } 3138 return ret; 3139} --- 246 unchanged lines hidden (view full) --- 3386 3387 if (buffer_uptodate(bh)) { 3388 unlock_buffer(bh); 3389 return 0; 3390 } 3391 3392 get_bh(bh); 3393 bh->b_end_io = end_buffer_read_sync; | 3135 wait_on_buffer(bh); 3136 if (!ret && !buffer_uptodate(bh)) 3137 ret = -EIO; 3138 } else { 3139 unlock_buffer(bh); 3140 } 3141 return ret; 3142} --- 246 unchanged lines hidden (view full) --- 3389 3390 if (buffer_uptodate(bh)) { 3391 unlock_buffer(bh); 3392 return 0; 3393 } 3394 3395 get_bh(bh); 3396 bh->b_end_io = end_buffer_read_sync; |
3394 submit_bh(READ, bh); | 3397 submit_bh(REQ_OP_READ, 0, bh); |
3395 wait_on_buffer(bh); 3396 if (buffer_uptodate(bh)) 3397 return 0; 3398 return -EIO; 3399} 3400EXPORT_SYMBOL(bh_submit_read); 3401 3402void __init buffer_init(void) --- 16 unchanged lines hidden --- | 3398 wait_on_buffer(bh); 3399 if (buffer_uptodate(bh)) 3400 return 0; 3401 return -EIO; 3402} 3403EXPORT_SYMBOL(bh_submit_read); 3404 3405void __init buffer_init(void) --- 16 unchanged lines hidden --- |