11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Copyright (C) International Business Machines Corp., 2000-2004
41da177e4SLinus Torvalds * Portions Copyright (C) Christoph Hellwig, 2001-2002
51da177e4SLinus Torvalds */
61da177e4SLinus Torvalds
71da177e4SLinus Torvalds #include <linux/fs.h>
81da177e4SLinus Torvalds #include <linux/mpage.h>
91da177e4SLinus Torvalds #include <linux/buffer_head.h>
101da177e4SLinus Torvalds #include <linux/pagemap.h>
111da177e4SLinus Torvalds #include <linux/quotaops.h>
12e2e40f2cSChristoph Hellwig #include <linux/uio.h>
13a9185b41SChristoph Hellwig #include <linux/writeback.h>
141da177e4SLinus Torvalds #include "jfs_incore.h"
151868f4aaSDave Kleikamp #include "jfs_inode.h"
161da177e4SLinus Torvalds #include "jfs_filsys.h"
171da177e4SLinus Torvalds #include "jfs_imap.h"
181da177e4SLinus Torvalds #include "jfs_extent.h"
191da177e4SLinus Torvalds #include "jfs_unicode.h"
201da177e4SLinus Torvalds #include "jfs_debug.h"
21b3b4a6e3SAl Viro #include "jfs_dmap.h"
221da177e4SLinus Torvalds
231da177e4SLinus Torvalds
jfs_iget(struct super_block * sb,unsigned long ino)24eab1df71SDavid Howells struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
251da177e4SLinus Torvalds {
26eab1df71SDavid Howells struct inode *inode;
27eab1df71SDavid Howells int ret;
28eab1df71SDavid Howells
29eab1df71SDavid Howells inode = iget_locked(sb, ino);
30eab1df71SDavid Howells if (!inode)
31eab1df71SDavid Howells return ERR_PTR(-ENOMEM);
32eab1df71SDavid Howells if (!(inode->i_state & I_NEW))
33eab1df71SDavid Howells return inode;
34eab1df71SDavid Howells
35eab1df71SDavid Howells ret = diRead(inode);
36eab1df71SDavid Howells if (ret < 0) {
37eab1df71SDavid Howells iget_failed(inode);
38eab1df71SDavid Howells return ERR_PTR(ret);
391da177e4SLinus Torvalds }
401da177e4SLinus Torvalds
411da177e4SLinus Torvalds if (S_ISREG(inode->i_mode)) {
421da177e4SLinus Torvalds inode->i_op = &jfs_file_inode_operations;
431da177e4SLinus Torvalds inode->i_fop = &jfs_file_operations;
441da177e4SLinus Torvalds inode->i_mapping->a_ops = &jfs_aops;
451da177e4SLinus Torvalds } else if (S_ISDIR(inode->i_mode)) {
461da177e4SLinus Torvalds inode->i_op = &jfs_dir_inode_operations;
471da177e4SLinus Torvalds inode->i_fop = &jfs_dir_operations;
481da177e4SLinus Torvalds } else if (S_ISLNK(inode->i_mode)) {
491da177e4SLinus Torvalds if (inode->i_size >= IDATASIZE) {
501da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations;
5121fc61c7SAl Viro inode_nohighmem(inode);
521da177e4SLinus Torvalds inode->i_mapping->a_ops = &jfs_aops;
53d69e83d9SDave Kleikamp } else {
54c7f2e1f0SDmitry Monakhov inode->i_op = &jfs_fast_symlink_inode_operations;
55ad476fedSAl Viro inode->i_link = JFS_IP(inode)->i_inline;
56d69e83d9SDave Kleikamp /*
57d69e83d9SDave Kleikamp * The inline data should be null-terminated, but
58d69e83d9SDave Kleikamp * don't let on-disk corruption crash the kernel
59d69e83d9SDave Kleikamp */
60ad476fedSAl Viro inode->i_link[inode->i_size] = '\0';
61d69e83d9SDave Kleikamp }
621da177e4SLinus Torvalds } else {
631da177e4SLinus Torvalds inode->i_op = &jfs_file_inode_operations;
641da177e4SLinus Torvalds init_special_inode(inode, inode->i_mode, inode->i_rdev);
651da177e4SLinus Torvalds }
66eab1df71SDavid Howells unlock_new_inode(inode);
67eab1df71SDavid Howells return inode;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds
701da177e4SLinus Torvalds /*
711da177e4SLinus Torvalds * Workhorse of both fsync & write_inode
721da177e4SLinus Torvalds */
jfs_commit_inode(struct inode * inode,int wait)731da177e4SLinus Torvalds int jfs_commit_inode(struct inode *inode, int wait)
741da177e4SLinus Torvalds {
751da177e4SLinus Torvalds int rc = 0;
761da177e4SLinus Torvalds tid_t tid;
771da177e4SLinus Torvalds static int noisy = 5;
781da177e4SLinus Torvalds
791da177e4SLinus Torvalds jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
801da177e4SLinus Torvalds
811da177e4SLinus Torvalds /*
821da177e4SLinus Torvalds * Don't commit if inode has been committed since last being
831da177e4SLinus Torvalds * marked dirty, or if it has been deleted.
841da177e4SLinus Torvalds */
851da177e4SLinus Torvalds if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
861da177e4SLinus Torvalds return 0;
871da177e4SLinus Torvalds
881da177e4SLinus Torvalds if (isReadOnly(inode)) {
891da177e4SLinus Torvalds /* kernel allows writes to devices on read-only
901da177e4SLinus Torvalds * partitions and may think inode is dirty
911da177e4SLinus Torvalds */
921da177e4SLinus Torvalds if (!special_file(inode->i_mode) && noisy) {
936ed71e98SJoe Perches jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
946ed71e98SJoe Perches inode);
951da177e4SLinus Torvalds jfs_err("Is remount racy?");
961da177e4SLinus Torvalds noisy--;
971da177e4SLinus Torvalds }
981da177e4SLinus Torvalds return 0;
991da177e4SLinus Torvalds }
1001da177e4SLinus Torvalds
1011da177e4SLinus Torvalds tid = txBegin(inode->i_sb, COMMIT_INODE);
1021de87444SIngo Molnar mutex_lock(&JFS_IP(inode)->commit_mutex);
1031da177e4SLinus Torvalds
1041da177e4SLinus Torvalds /*
1051de87444SIngo Molnar * Retest inode state after taking commit_mutex
1061da177e4SLinus Torvalds */
1071da177e4SLinus Torvalds if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
1081da177e4SLinus Torvalds rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
1091da177e4SLinus Torvalds
1101da177e4SLinus Torvalds txEnd(tid);
1111de87444SIngo Molnar mutex_unlock(&JFS_IP(inode)->commit_mutex);
1121da177e4SLinus Torvalds return rc;
1131da177e4SLinus Torvalds }
1141da177e4SLinus Torvalds
jfs_write_inode(struct inode * inode,struct writeback_control * wbc)115a9185b41SChristoph Hellwig int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1161da177e4SLinus Torvalds {
117a9185b41SChristoph Hellwig int wait = wbc->sync_mode == WB_SYNC_ALL;
118a9185b41SChristoph Hellwig
11973aaa22dSDave Kleikamp if (inode->i_nlink == 0)
1201da177e4SLinus Torvalds return 0;
1211da177e4SLinus Torvalds /*
1221da177e4SLinus Torvalds * If COMMIT_DIRTY is not set, the inode isn't really dirty.
1231da177e4SLinus Torvalds * It has been committed since the last change, but was still
1241da177e4SLinus Torvalds * on the dirty inode list.
1251da177e4SLinus Torvalds */
1261da177e4SLinus Torvalds if (!test_cflag(COMMIT_Dirty, inode)) {
1271da177e4SLinus Torvalds /* Make sure committed changes hit the disk */
1281da177e4SLinus Torvalds jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
1291da177e4SLinus Torvalds return 0;
1301da177e4SLinus Torvalds }
1311da177e4SLinus Torvalds
1321da177e4SLinus Torvalds if (jfs_commit_inode(inode, wait)) {
1331da177e4SLinus Torvalds jfs_err("jfs_write_inode: jfs_commit_inode failed!");
1341da177e4SLinus Torvalds return -EIO;
1351da177e4SLinus Torvalds } else
1361da177e4SLinus Torvalds return 0;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds
jfs_evict_inode(struct inode * inode)13962aff86fSAl Viro void jfs_evict_inode(struct inode *inode)
1401da177e4SLinus Torvalds {
141b3b4a6e3SAl Viro struct jfs_inode_info *ji = JFS_IP(inode);
142b3b4a6e3SAl Viro
14362aff86fSAl Viro jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
1441da177e4SLinus Torvalds
14562aff86fSAl Viro if (!inode->i_nlink && !is_bad_inode(inode)) {
146871a2931SChristoph Hellwig dquot_initialize(inode);
147907f4554SChristoph Hellwig
14862aff86fSAl Viro if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
149a5304629SHaimin Zhang struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
15091b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data);
151fef26658SMark Fasheh
1521da177e4SLinus Torvalds if (test_cflag(COMMIT_Freewmap, inode))
1531868f4aaSDave Kleikamp jfs_free_zero_link(inode);
1541da177e4SLinus Torvalds
155a5304629SHaimin Zhang if (ipimap && JFS_IP(ipimap)->i_imap)
1561da177e4SLinus Torvalds diFree(inode);
1571da177e4SLinus Torvalds
1581da177e4SLinus Torvalds /*
1591da177e4SLinus Torvalds * Free the inode from the quota allocation.
1601da177e4SLinus Torvalds */
16163936ddaSChristoph Hellwig dquot_free_inode(inode);
162b1b5d7f9SDave Kleikamp }
16362aff86fSAl Viro } else {
16491b0abe3SJohannes Weiner truncate_inode_pages_final(&inode->i_data);
16562aff86fSAl Viro }
166dbd5768fSJan Kara clear_inode(inode);
16762aff86fSAl Viro dquot_drop(inode);
168b3b4a6e3SAl Viro
169b3b4a6e3SAl Viro BUG_ON(!list_empty(&ji->anon_inode_list));
170b3b4a6e3SAl Viro
171b3b4a6e3SAl Viro spin_lock_irq(&ji->ag_lock);
172b3b4a6e3SAl Viro if (ji->active_ag != -1) {
173b3b4a6e3SAl Viro struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
174b3b4a6e3SAl Viro atomic_dec(&bmap->db_active[ji->active_ag]);
175b3b4a6e3SAl Viro ji->active_ag = -1;
176b3b4a6e3SAl Viro }
177b3b4a6e3SAl Viro spin_unlock_irq(&ji->ag_lock);
1781da177e4SLinus Torvalds }
1791da177e4SLinus Torvalds
jfs_dirty_inode(struct inode * inode,int flags)180aa385729SChristoph Hellwig void jfs_dirty_inode(struct inode *inode, int flags)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds static int noisy = 5;
1831da177e4SLinus Torvalds
1841da177e4SLinus Torvalds if (isReadOnly(inode)) {
1851da177e4SLinus Torvalds if (!special_file(inode->i_mode) && noisy) {
1861da177e4SLinus Torvalds /* kernel allows writes to devices on read-only
1871da177e4SLinus Torvalds * partitions and may try to mark inode dirty
1881da177e4SLinus Torvalds */
1891da177e4SLinus Torvalds jfs_err("jfs_dirty_inode called on read-only volume");
1901da177e4SLinus Torvalds jfs_err("Is remount racy?");
1911da177e4SLinus Torvalds noisy--;
1921da177e4SLinus Torvalds }
1931da177e4SLinus Torvalds return;
1941da177e4SLinus Torvalds }
1951da177e4SLinus Torvalds
1961da177e4SLinus Torvalds set_cflag(COMMIT_Dirty, inode);
1971da177e4SLinus Torvalds }
1981da177e4SLinus Torvalds
jfs_get_block(struct inode * ip,sector_t lblock,struct buffer_head * bh_result,int create)199115ff50bSDave Kleikamp int jfs_get_block(struct inode *ip, sector_t lblock,
2001da177e4SLinus Torvalds struct buffer_head *bh_result, int create)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds s64 lblock64 = lblock;
2031da177e4SLinus Torvalds int rc = 0;
2041da177e4SLinus Torvalds xad_t xad;
2051da177e4SLinus Torvalds s64 xaddr;
2061da177e4SLinus Torvalds int xflag;
207115ff50bSDave Kleikamp s32 xlen = bh_result->b_size >> ip->i_blkbits;
2081da177e4SLinus Torvalds
2091da177e4SLinus Torvalds /*
2101da177e4SLinus Torvalds * Take appropriate lock on inode
2111da177e4SLinus Torvalds */
2121da177e4SLinus Torvalds if (create)
21382d5b9a7SDave Kleikamp IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
2141da177e4SLinus Torvalds else
21582d5b9a7SDave Kleikamp IREAD_LOCK(ip, RDWRLOCK_NORMAL);
2161da177e4SLinus Torvalds
2171da177e4SLinus Torvalds if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
218115ff50bSDave Kleikamp (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
2196628465eSDave Kleikamp xaddr) {
2201da177e4SLinus Torvalds if (xflag & XAD_NOTRECORDED) {
2211da177e4SLinus Torvalds if (!create)
2221da177e4SLinus Torvalds /*
2231da177e4SLinus Torvalds * Allocated but not recorded, read treats
2241da177e4SLinus Torvalds * this as a hole
2251da177e4SLinus Torvalds */
2261da177e4SLinus Torvalds goto unlock;
2271da177e4SLinus Torvalds XADoffset(&xad, lblock64);
2281da177e4SLinus Torvalds XADlength(&xad, xlen);
2291da177e4SLinus Torvalds XADaddress(&xad, xaddr);
2301da177e4SLinus Torvalds rc = extRecord(ip, &xad);
2311da177e4SLinus Torvalds if (rc)
2321da177e4SLinus Torvalds goto unlock;
2331da177e4SLinus Torvalds set_buffer_new(bh_result);
2341da177e4SLinus Torvalds }
2351da177e4SLinus Torvalds
2361da177e4SLinus Torvalds map_bh(bh_result, ip->i_sb, xaddr);
2371da177e4SLinus Torvalds bh_result->b_size = xlen << ip->i_blkbits;
2381da177e4SLinus Torvalds goto unlock;
2391da177e4SLinus Torvalds }
2401da177e4SLinus Torvalds if (!create)
2411da177e4SLinus Torvalds goto unlock;
2421da177e4SLinus Torvalds
2431da177e4SLinus Torvalds /*
2441da177e4SLinus Torvalds * Allocate a new block
2451da177e4SLinus Torvalds */
2461da177e4SLinus Torvalds if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
2471da177e4SLinus Torvalds goto unlock;
2484d81715fSRichard Knutsson rc = extAlloc(ip, xlen, lblock64, &xad, false);
2491da177e4SLinus Torvalds if (rc)
2501da177e4SLinus Torvalds goto unlock;
2511da177e4SLinus Torvalds
2521da177e4SLinus Torvalds set_buffer_new(bh_result);
2531da177e4SLinus Torvalds map_bh(bh_result, ip->i_sb, addressXAD(&xad));
2541da177e4SLinus Torvalds bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
2551da177e4SLinus Torvalds
2561da177e4SLinus Torvalds unlock:
2571da177e4SLinus Torvalds /*
2581da177e4SLinus Torvalds * Release lock on inode
2591da177e4SLinus Torvalds */
2601da177e4SLinus Torvalds if (create)
2611da177e4SLinus Torvalds IWRITE_UNLOCK(ip);
2621da177e4SLinus Torvalds else
2631da177e4SLinus Torvalds IREAD_UNLOCK(ip);
2641da177e4SLinus Torvalds return rc;
2651da177e4SLinus Torvalds }
2661da177e4SLinus Torvalds
jfs_writepages(struct address_space * mapping,struct writeback_control * wbc)2671da177e4SLinus Torvalds static int jfs_writepages(struct address_space *mapping,
2681da177e4SLinus Torvalds struct writeback_control *wbc)
2691da177e4SLinus Torvalds {
2701da177e4SLinus Torvalds return mpage_writepages(mapping, wbc, jfs_get_block);
2711da177e4SLinus Torvalds }
2721da177e4SLinus Torvalds
jfs_read_folio(struct file * file,struct folio * folio)273f132ab7dSMatthew Wilcox (Oracle) static int jfs_read_folio(struct file *file, struct folio *folio)
2741da177e4SLinus Torvalds {
275f132ab7dSMatthew Wilcox (Oracle) return mpage_read_folio(folio, jfs_get_block);
2761da177e4SLinus Torvalds }
2771da177e4SLinus Torvalds
jfs_readahead(struct readahead_control * rac)278d4388340SMatthew Wilcox (Oracle) static void jfs_readahead(struct readahead_control *rac)
2791da177e4SLinus Torvalds {
280d4388340SMatthew Wilcox (Oracle) mpage_readahead(rac, jfs_get_block);
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds
jfs_write_failed(struct address_space * mapping,loff_t to)28386dd07d6SMarco Stornelli static void jfs_write_failed(struct address_space *mapping, loff_t to)
28486dd07d6SMarco Stornelli {
28586dd07d6SMarco Stornelli struct inode *inode = mapping->host;
28686dd07d6SMarco Stornelli
28786dd07d6SMarco Stornelli if (to > inode->i_size) {
2887caef267SKirill A. Shutemov truncate_pagecache(inode, inode->i_size);
28986dd07d6SMarco Stornelli jfs_truncate(inode);
29086dd07d6SMarco Stornelli }
29186dd07d6SMarco Stornelli }
29286dd07d6SMarco Stornelli
jfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)293d5c5f84bSNick Piggin static int jfs_write_begin(struct file *file, struct address_space *mapping,
2949d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
295d5c5f84bSNick Piggin struct page **pagep, void **fsdata)
2961da177e4SLinus Torvalds {
297ea0f04e5SChristoph Hellwig int ret;
298ea0f04e5SChristoph Hellwig
299002cbb13SChristoph Hellwig ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
30086dd07d6SMarco Stornelli if (unlikely(ret))
30186dd07d6SMarco Stornelli jfs_write_failed(mapping, pos + len);
302ea0f04e5SChristoph Hellwig
303ea0f04e5SChristoph Hellwig return ret;
3041da177e4SLinus Torvalds }
3051da177e4SLinus Torvalds
jfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)306002cbb13SChristoph Hellwig static int jfs_write_end(struct file *file, struct address_space *mapping,
307002cbb13SChristoph Hellwig loff_t pos, unsigned len, unsigned copied, struct page *page,
308002cbb13SChristoph Hellwig void *fsdata)
309002cbb13SChristoph Hellwig {
310002cbb13SChristoph Hellwig int ret;
311002cbb13SChristoph Hellwig
312002cbb13SChristoph Hellwig ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
313002cbb13SChristoph Hellwig if (ret < len)
314002cbb13SChristoph Hellwig jfs_write_failed(mapping, pos + len);
315002cbb13SChristoph Hellwig return ret;
316002cbb13SChristoph Hellwig }
317002cbb13SChristoph Hellwig
jfs_bmap(struct address_space * mapping,sector_t block)3181da177e4SLinus Torvalds static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
3191da177e4SLinus Torvalds {
3201da177e4SLinus Torvalds return generic_block_bmap(mapping, block, jfs_get_block);
3211da177e4SLinus Torvalds }
3221da177e4SLinus Torvalds
jfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)323c8b8e32dSChristoph Hellwig static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3241da177e4SLinus Torvalds {
3251da177e4SLinus Torvalds struct file *file = iocb->ki_filp;
32686dd07d6SMarco Stornelli struct address_space *mapping = file->f_mapping;
3271da177e4SLinus Torvalds struct inode *inode = file->f_mapping->host;
328a6cbcd4aSAl Viro size_t count = iov_iter_count(iter);
329eafdc7d1SChristoph Hellwig ssize_t ret;
3301da177e4SLinus Torvalds
331c8b8e32dSChristoph Hellwig ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
332eafdc7d1SChristoph Hellwig
333eafdc7d1SChristoph Hellwig /*
334eafdc7d1SChristoph Hellwig * In case of error extending write may have instantiated a few
335eafdc7d1SChristoph Hellwig * blocks outside i_size. Trim these off again.
336eafdc7d1SChristoph Hellwig */
3376f673763SOmar Sandoval if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
338eafdc7d1SChristoph Hellwig loff_t isize = i_size_read(inode);
339c8b8e32dSChristoph Hellwig loff_t end = iocb->ki_pos + count;
340eafdc7d1SChristoph Hellwig
341eafdc7d1SChristoph Hellwig if (end > isize)
34286dd07d6SMarco Stornelli jfs_write_failed(mapping, end);
343eafdc7d1SChristoph Hellwig }
344eafdc7d1SChristoph Hellwig
345eafdc7d1SChristoph Hellwig return ret;
3461da177e4SLinus Torvalds }
3471da177e4SLinus Torvalds
348f5e54d6eSChristoph Hellwig const struct address_space_operations jfs_aops = {
349e621900aSMatthew Wilcox (Oracle) .dirty_folio = block_dirty_folio,
3507ba13abbSMatthew Wilcox (Oracle) .invalidate_folio = block_invalidate_folio,
351f132ab7dSMatthew Wilcox (Oracle) .read_folio = jfs_read_folio,
352d4388340SMatthew Wilcox (Oracle) .readahead = jfs_readahead,
3531da177e4SLinus Torvalds .writepages = jfs_writepages,
354d5c5f84bSNick Piggin .write_begin = jfs_write_begin,
355002cbb13SChristoph Hellwig .write_end = jfs_write_end,
3561da177e4SLinus Torvalds .bmap = jfs_bmap,
3571da177e4SLinus Torvalds .direct_IO = jfs_direct_IO,
3582274c3b2SChristoph Hellwig .migrate_folio = buffer_migrate_folio,
3591da177e4SLinus Torvalds };
3601da177e4SLinus Torvalds
3611da177e4SLinus Torvalds /*
3621da177e4SLinus Torvalds * Guts of jfs_truncate. Called with locks already held. Can be called
3631da177e4SLinus Torvalds * with directory for truncating directory index table.
3641da177e4SLinus Torvalds */
jfs_truncate_nolock(struct inode * ip,loff_t length)3651da177e4SLinus Torvalds void jfs_truncate_nolock(struct inode *ip, loff_t length)
3661da177e4SLinus Torvalds {
3671da177e4SLinus Torvalds loff_t newsize;
3681da177e4SLinus Torvalds tid_t tid;
3691da177e4SLinus Torvalds
3701da177e4SLinus Torvalds ASSERT(length >= 0);
3711da177e4SLinus Torvalds
3721da177e4SLinus Torvalds if (test_cflag(COMMIT_Nolink, ip)) {
3731da177e4SLinus Torvalds xtTruncate(0, ip, length, COMMIT_WMAP);
3741da177e4SLinus Torvalds return;
3751da177e4SLinus Torvalds }
3761da177e4SLinus Torvalds
3771da177e4SLinus Torvalds do {
3781da177e4SLinus Torvalds tid = txBegin(ip->i_sb, 0);
3791da177e4SLinus Torvalds
3801da177e4SLinus Torvalds /*
3811de87444SIngo Molnar * The commit_mutex cannot be taken before txBegin.
3821da177e4SLinus Torvalds * txBegin may block and there is a chance the inode
3831da177e4SLinus Torvalds * could be marked dirty and need to be committed
3841da177e4SLinus Torvalds * before txBegin unblocks
3851da177e4SLinus Torvalds */
3861de87444SIngo Molnar mutex_lock(&JFS_IP(ip)->commit_mutex);
3871da177e4SLinus Torvalds
3881da177e4SLinus Torvalds newsize = xtTruncate(tid, ip, length,
3891da177e4SLinus Torvalds COMMIT_TRUNCATE | COMMIT_PWMAP);
3901da177e4SLinus Torvalds if (newsize < 0) {
3911da177e4SLinus Torvalds txEnd(tid);
3921de87444SIngo Molnar mutex_unlock(&JFS_IP(ip)->commit_mutex);
3931da177e4SLinus Torvalds break;
3941da177e4SLinus Torvalds }
3951da177e4SLinus Torvalds
396*ad9dc5dfSJeff Layton ip->i_mtime = inode_set_ctime_current(ip);
3971da177e4SLinus Torvalds mark_inode_dirty(ip);
3981da177e4SLinus Torvalds
3991da177e4SLinus Torvalds txCommit(tid, 1, &ip, 0);
4001da177e4SLinus Torvalds txEnd(tid);
4011de87444SIngo Molnar mutex_unlock(&JFS_IP(ip)->commit_mutex);
4021da177e4SLinus Torvalds } while (newsize > length); /* Truncate isn't always atomic */
4031da177e4SLinus Torvalds }
4041da177e4SLinus Torvalds
jfs_truncate(struct inode * ip)4051da177e4SLinus Torvalds void jfs_truncate(struct inode *ip)
4061da177e4SLinus Torvalds {
4071da177e4SLinus Torvalds jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
4081da177e4SLinus Torvalds
409002cbb13SChristoph Hellwig block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
4101da177e4SLinus Torvalds
41182d5b9a7SDave Kleikamp IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
4121da177e4SLinus Torvalds jfs_truncate_nolock(ip, ip->i_size);
4131da177e4SLinus Torvalds IWRITE_UNLOCK(ip);
4141da177e4SLinus Torvalds }
415