buffer.c (6e1db88d536adcbbfe562b2d4b7d6425784fff12) | buffer.c (155130a4f7848b1aac439cab6bda1a175507c71c) |
---|---|
1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 1948 unchanged lines hidden (view full) --- 1957{ 1958 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1959 1960 return block_prepare_write(page, start, start + len, get_block); 1961} 1962EXPORT_SYMBOL(__block_write_begin); 1963 1964/* | 1/* 2 * linux/fs/buffer.c 3 * 4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 5 */ 6 7/* 8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 --- 1948 unchanged lines hidden (view full) --- 1957{ 1958 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1959 1960 return block_prepare_write(page, start, start + len, get_block); 1961} 1962EXPORT_SYMBOL(__block_write_begin); 1963 1964/* |
1965 * Filesystems implementing the new truncate sequence should use the 1966 * _newtrunc postfix variant which won't incorrectly call vmtruncate. | 1965 * block_write_begin takes care of the basic task of block allocation and 1966 * bringing partial write blocks uptodate first. 1967 * |
1967 * The filesystem needs to handle block truncation upon failure. 1968 */ | 1968 * The filesystem needs to handle block truncation upon failure. 1969 */ |
1969int block_write_begin_newtrunc(struct file *file, struct address_space *mapping, 1970 loff_t pos, unsigned len, unsigned flags, 1971 struct page **pagep, void **fsdata, 1972 get_block_t *get_block) | 1970int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 1971 unsigned flags, struct page **pagep, get_block_t *get_block) |
1973{ 1974 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1975 struct page *page; 1976 int status; 1977 1978 page = grab_cache_page_write_begin(mapping, index, flags); 1979 if (!page) 1980 return -ENOMEM; 1981 1982 status = __block_write_begin(page, pos, len, get_block); 1983 if (unlikely(status)) { 1984 unlock_page(page); 1985 page_cache_release(page); 1986 page = NULL; 1987 } 1988 1989 *pagep = page; 1990 return status; 1991} | 1972{ 1973 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1974 struct page *page; 1975 int status; 1976 1977 page = grab_cache_page_write_begin(mapping, index, flags); 1978 if (!page) 1979 return -ENOMEM; 1980 1981 status = __block_write_begin(page, pos, len, get_block); 1982 if (unlikely(status)) { 1983 unlock_page(page); 1984 page_cache_release(page); 1985 page = NULL; 1986 } 1987 1988 *pagep = page; 1989 return status; 1990} |
1992EXPORT_SYMBOL(block_write_begin_newtrunc); 1993 1994/* 1995 * block_write_begin takes care of the basic task of block allocation and 1996 * bringing partial write blocks uptodate first. 1997 * 1998 * If *pagep is not NULL, then block_write_begin uses the locked page 1999 * at *pagep rather than allocating its own. In this case, the page will 2000 * not be unlocked or deallocated on failure. 2001 */ 2002int block_write_begin(struct file *file, struct address_space *mapping, 2003 loff_t pos, unsigned len, unsigned flags, 2004 struct page **pagep, void **fsdata, 2005 get_block_t *get_block) 2006{ 2007 int ret; 2008 2009 ret = block_write_begin_newtrunc(file, mapping, pos, len, flags, 2010 pagep, fsdata, get_block); 2011 2012 /* 2013 * prepare_write() may have instantiated a few blocks 2014 * outside i_size. Trim these off again. Don't need 2015 * i_size_read because we hold i_mutex. 2016 * 2017 * Filesystems which pass down their own page also cannot 2018 * call into vmtruncate here because it would lead to lock 2019 * inversion problems (*pagep is locked). This is a further 2020 * example of where the old truncate sequence is inadequate. 2021 */ 2022 if (unlikely(ret) && *pagep == NULL) { 2023 loff_t isize = mapping->host->i_size; 2024 if (pos + len > isize) 2025 vmtruncate(mapping->host, isize); 2026 } 2027 2028 return ret; 2029} | |
2030EXPORT_SYMBOL(block_write_begin); 2031 2032int block_write_end(struct file *file, struct address_space *mapping, 2033 loff_t pos, unsigned len, unsigned copied, 2034 struct page *page, void *fsdata) 2035{ 2036 struct inode *inode = mapping->host; 2037 unsigned start; --- 314 unchanged lines hidden (view full) --- 2352{ 2353 struct inode *inode = mapping->host; 2354 unsigned blocksize = 1 << inode->i_blkbits; 2355 unsigned zerofrom; 2356 int err; 2357 2358 err = cont_expand_zero(file, mapping, pos, bytes); 2359 if (err) | 1991EXPORT_SYMBOL(block_write_begin); 1992 1993int block_write_end(struct file *file, struct address_space *mapping, 1994 loff_t pos, unsigned len, unsigned copied, 1995 struct page *page, void *fsdata) 1996{ 1997 struct inode *inode = mapping->host; 1998 unsigned start; --- 314 unchanged lines hidden (view full) --- 2313{ 2314 struct inode *inode = mapping->host; 2315 unsigned blocksize = 1 << inode->i_blkbits; 2316 unsigned zerofrom; 2317 int err; 2318 2319 err = cont_expand_zero(file, mapping, pos, bytes); 2320 if (err) |
2360 goto out; | 2321 return err; |
2361 2362 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2363 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2364 *bytes |= (blocksize-1); 2365 (*bytes)++; 2366 } 2367 | 2322 2323 zerofrom = *bytes & ~PAGE_CACHE_MASK; 2324 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2325 *bytes |= (blocksize-1); 2326 (*bytes)++; 2327 } 2328 |
2368 *pagep = NULL; 2369 err = block_write_begin_newtrunc(file, mapping, pos, len, 2370 flags, pagep, fsdata, get_block); 2371out: 2372 return err; | 2329 return block_write_begin(mapping, pos, len, flags, pagep, get_block); |
2373} 2374EXPORT_SYMBOL(cont_write_begin); 2375 2376int block_commit_write(struct page *page, unsigned from, unsigned to) 2377{ 2378 struct inode *inode = page->mapping->host; 2379 __block_commit_write(inode,page,from,to); 2380 return 0; --- 125 unchanged lines hidden (view full) --- 2506 return -ENOMEM; 2507 *pagep = page; 2508 *fsdata = NULL; 2509 2510 if (page_has_buffers(page)) { 2511 unlock_page(page); 2512 page_cache_release(page); 2513 *pagep = NULL; | 2330} 2331EXPORT_SYMBOL(cont_write_begin); 2332 2333int block_commit_write(struct page *page, unsigned from, unsigned to) 2334{ 2335 struct inode *inode = page->mapping->host; 2336 __block_commit_write(inode,page,from,to); 2337 return 0; --- 125 unchanged lines hidden (view full) --- 2463 return -ENOMEM; 2464 *pagep = page; 2465 *fsdata = NULL; 2466 2467 if (page_has_buffers(page)) { 2468 unlock_page(page); 2469 page_cache_release(page); 2470 *pagep = NULL; |
2514 return block_write_begin_newtrunc(NULL, mapping, pos, len, 2515 flags, pagep, fsdata, get_block); | 2471 return block_write_begin(mapping, pos, len, flags, pagep, 2472 get_block); |
2516 } 2517 2518 if (PageMappedToDisk(page)) 2519 return 0; 2520 2521 /* 2522 * Allocate buffers so that we can keep track of state, and potentially 2523 * attach them to the page if an error occurs. In the common case of --- 848 unchanged lines hidden --- | 2473 } 2474 2475 if (PageMappedToDisk(page)) 2476 return 0; 2477 2478 /* 2479 * Allocate buffers so that we can keep track of state, and potentially 2480 * attach them to the page if an error occurs. In the common case of --- 848 unchanged lines hidden --- |