xref: /openbmc/linux/fs/reiserfs/file.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1*1da177e4SLinus Torvalds /*
2*1da177e4SLinus Torvalds  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3*1da177e4SLinus Torvalds  */
4*1da177e4SLinus Torvalds 
5*1da177e4SLinus Torvalds 
6*1da177e4SLinus Torvalds #include <linux/time.h>
7*1da177e4SLinus Torvalds #include <linux/reiserfs_fs.h>
8*1da177e4SLinus Torvalds #include <linux/reiserfs_acl.h>
9*1da177e4SLinus Torvalds #include <linux/reiserfs_xattr.h>
10*1da177e4SLinus Torvalds #include <linux/smp_lock.h>
11*1da177e4SLinus Torvalds #include <asm/uaccess.h>
12*1da177e4SLinus Torvalds #include <linux/pagemap.h>
13*1da177e4SLinus Torvalds #include <linux/swap.h>
14*1da177e4SLinus Torvalds #include <linux/writeback.h>
15*1da177e4SLinus Torvalds #include <linux/blkdev.h>
16*1da177e4SLinus Torvalds #include <linux/buffer_head.h>
17*1da177e4SLinus Torvalds #include <linux/quotaops.h>
18*1da177e4SLinus Torvalds 
19*1da177e4SLinus Torvalds /*
20*1da177e4SLinus Torvalds ** We pack the tails of files on file close, not at the time they are written.
21*1da177e4SLinus Torvalds ** This implies an unnecessary copy of the tail and an unnecessary indirect item
22*1da177e4SLinus Torvalds ** insertion/balancing, for files that are written in one write.
23*1da177e4SLinus Torvalds ** It avoids unnecessary tail packings (balances) for files that are written in
24*1da177e4SLinus Torvalds ** multiple writes and are small enough to have tails.
25*1da177e4SLinus Torvalds **
26*1da177e4SLinus Torvalds ** file_release is called by the VFS layer when the file is closed.  If
27*1da177e4SLinus Torvalds ** this is the last open file descriptor, and the file
28*1da177e4SLinus Torvalds ** small enough to have a tail, and the tail is currently in an
29*1da177e4SLinus Torvalds ** unformatted node, the tail is converted back into a direct item.
30*1da177e4SLinus Torvalds **
31*1da177e4SLinus Torvalds ** We use reiserfs_truncate_file to pack the tail, since it already has
32*1da177e4SLinus Torvalds ** all the conditions coded.
33*1da177e4SLinus Torvalds */
34*1da177e4SLinus Torvalds static int reiserfs_file_release (struct inode * inode, struct file * filp)
35*1da177e4SLinus Torvalds {
36*1da177e4SLinus Torvalds 
37*1da177e4SLinus Torvalds     struct reiserfs_transaction_handle th ;
38*1da177e4SLinus Torvalds     int err;
39*1da177e4SLinus Torvalds     int jbegin_failure = 0;
40*1da177e4SLinus Torvalds 
41*1da177e4SLinus Torvalds     if (!S_ISREG (inode->i_mode))
42*1da177e4SLinus Torvalds 	BUG ();
43*1da177e4SLinus Torvalds 
44*1da177e4SLinus Torvalds     /* fast out for when nothing needs to be done */
45*1da177e4SLinus Torvalds     if ((atomic_read(&inode->i_count) > 1 ||
46*1da177e4SLinus Torvalds 	!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
47*1da177e4SLinus Torvalds          !tail_has_to_be_packed(inode))       &&
48*1da177e4SLinus Torvalds 	REISERFS_I(inode)->i_prealloc_count <= 0) {
49*1da177e4SLinus Torvalds 	return 0;
50*1da177e4SLinus Torvalds     }
51*1da177e4SLinus Torvalds 
52*1da177e4SLinus Torvalds     reiserfs_write_lock(inode->i_sb);
53*1da177e4SLinus Torvalds     down (&inode->i_sem);
54*1da177e4SLinus Torvalds     /* freeing preallocation only involves relogging blocks that
55*1da177e4SLinus Torvalds      * are already in the current transaction.  preallocation gets
56*1da177e4SLinus Torvalds      * freed at the end of each transaction, so it is impossible for
57*1da177e4SLinus Torvalds      * us to log any additional blocks (including quota blocks)
58*1da177e4SLinus Torvalds      */
59*1da177e4SLinus Torvalds     err = journal_begin(&th, inode->i_sb, 1);
60*1da177e4SLinus Torvalds     if (err) {
61*1da177e4SLinus Torvalds 	/* uh oh, we can't allow the inode to go away while there
62*1da177e4SLinus Torvalds 	 * is still preallocation blocks pending.  Try to join the
63*1da177e4SLinus Torvalds 	 * aborted transaction
64*1da177e4SLinus Torvalds 	 */
65*1da177e4SLinus Torvalds 	jbegin_failure = err;
66*1da177e4SLinus Torvalds 	err = journal_join_abort(&th, inode->i_sb, 1);
67*1da177e4SLinus Torvalds 
68*1da177e4SLinus Torvalds 	if (err) {
69*1da177e4SLinus Torvalds 	    /* hmpf, our choices here aren't good.  We can pin the inode
70*1da177e4SLinus Torvalds 	     * which will disallow unmount from every happening, we can
71*1da177e4SLinus Torvalds 	     * do nothing, which will corrupt random memory on unmount,
72*1da177e4SLinus Torvalds 	     * or we can forcibly remove the file from the preallocation
73*1da177e4SLinus Torvalds 	     * list, which will leak blocks on disk.  Lets pin the inode
74*1da177e4SLinus Torvalds 	     * and let the admin know what is going on.
75*1da177e4SLinus Torvalds 	     */
76*1da177e4SLinus Torvalds 	    igrab(inode);
77*1da177e4SLinus Torvalds 	    reiserfs_warning(inode->i_sb, "pinning inode %lu because the "
78*1da177e4SLinus Torvalds 	                     "preallocation can't be freed");
79*1da177e4SLinus Torvalds 	    goto out;
80*1da177e4SLinus Torvalds 	}
81*1da177e4SLinus Torvalds     }
82*1da177e4SLinus Torvalds     reiserfs_update_inode_transaction(inode) ;
83*1da177e4SLinus Torvalds 
84*1da177e4SLinus Torvalds #ifdef REISERFS_PREALLOCATE
85*1da177e4SLinus Torvalds     reiserfs_discard_prealloc (&th, inode);
86*1da177e4SLinus Torvalds #endif
87*1da177e4SLinus Torvalds     err = journal_end(&th, inode->i_sb, 1);
88*1da177e4SLinus Torvalds 
89*1da177e4SLinus Torvalds     /* copy back the error code from journal_begin */
90*1da177e4SLinus Torvalds     if (!err)
91*1da177e4SLinus Torvalds         err = jbegin_failure;
92*1da177e4SLinus Torvalds 
93*1da177e4SLinus Torvalds     if (!err && atomic_read(&inode->i_count) <= 1 &&
94*1da177e4SLinus Torvalds 	(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
95*1da177e4SLinus Torvalds         tail_has_to_be_packed (inode)) {
96*1da177e4SLinus Torvalds 	/* if regular file is released by last holder and it has been
97*1da177e4SLinus Torvalds 	   appended (we append by unformatted node only) or its direct
98*1da177e4SLinus Torvalds 	   item(s) had to be converted, then it may have to be
99*1da177e4SLinus Torvalds 	   indirect2direct converted */
100*1da177e4SLinus Torvalds 	err = reiserfs_truncate_file(inode, 0) ;
101*1da177e4SLinus Torvalds     }
102*1da177e4SLinus Torvalds out:
103*1da177e4SLinus Torvalds     up (&inode->i_sem);
104*1da177e4SLinus Torvalds     reiserfs_write_unlock(inode->i_sb);
105*1da177e4SLinus Torvalds     return err;
106*1da177e4SLinus Torvalds }
107*1da177e4SLinus Torvalds 
108*1da177e4SLinus Torvalds static void reiserfs_vfs_truncate_file(struct inode *inode) {
109*1da177e4SLinus Torvalds     reiserfs_truncate_file(inode, 1) ;
110*1da177e4SLinus Torvalds }
111*1da177e4SLinus Torvalds 
112*1da177e4SLinus Torvalds /* Sync a reiserfs file. */
113*1da177e4SLinus Torvalds 
114*1da177e4SLinus Torvalds /*
115*1da177e4SLinus Torvalds  * FIXME: sync_mapping_buffers() never has anything to sync.  Can
116*1da177e4SLinus Torvalds  * be removed...
117*1da177e4SLinus Torvalds  */
118*1da177e4SLinus Torvalds 
119*1da177e4SLinus Torvalds static int reiserfs_sync_file(
120*1da177e4SLinus Torvalds 			      struct file   * p_s_filp,
121*1da177e4SLinus Torvalds 			      struct dentry * p_s_dentry,
122*1da177e4SLinus Torvalds 			      int datasync
123*1da177e4SLinus Torvalds 			      ) {
124*1da177e4SLinus Torvalds   struct inode * p_s_inode = p_s_dentry->d_inode;
125*1da177e4SLinus Torvalds   int n_err;
126*1da177e4SLinus Torvalds   int barrier_done;
127*1da177e4SLinus Torvalds 
128*1da177e4SLinus Torvalds   if (!S_ISREG(p_s_inode->i_mode))
129*1da177e4SLinus Torvalds       BUG ();
130*1da177e4SLinus Torvalds   n_err = sync_mapping_buffers(p_s_inode->i_mapping) ;
131*1da177e4SLinus Torvalds   reiserfs_write_lock(p_s_inode->i_sb);
132*1da177e4SLinus Torvalds   barrier_done = reiserfs_commit_for_inode(p_s_inode);
133*1da177e4SLinus Torvalds   reiserfs_write_unlock(p_s_inode->i_sb);
134*1da177e4SLinus Torvalds   if (barrier_done != 1)
135*1da177e4SLinus Torvalds       blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL);
136*1da177e4SLinus Torvalds   if (barrier_done < 0)
137*1da177e4SLinus Torvalds     return barrier_done;
138*1da177e4SLinus Torvalds   return ( n_err < 0 ) ? -EIO : 0;
139*1da177e4SLinus Torvalds }
140*1da177e4SLinus Torvalds 
141*1da177e4SLinus Torvalds /* I really do not want to play with memory shortage right now, so
142*1da177e4SLinus Torvalds    to simplify the code, we are not going to write more than this much pages at
143*1da177e4SLinus Torvalds    a time. This still should considerably improve performance compared to 4k
144*1da177e4SLinus Torvalds    at a time case. This is 32 pages of 4k size. */
145*1da177e4SLinus Torvalds #define REISERFS_WRITE_PAGES_AT_A_TIME (128 * 1024) / PAGE_CACHE_SIZE
146*1da177e4SLinus Torvalds 
147*1da177e4SLinus Torvalds /* Allocates blocks for a file to fulfil write request.
148*1da177e4SLinus Torvalds    Maps all unmapped but prepared pages from the list.
149*1da177e4SLinus Torvalds    Updates metadata with newly allocated blocknumbers as needed */
150*1da177e4SLinus Torvalds static int reiserfs_allocate_blocks_for_region(
151*1da177e4SLinus Torvalds 				struct reiserfs_transaction_handle *th,
152*1da177e4SLinus Torvalds 				struct inode *inode, /* Inode we work with */
153*1da177e4SLinus Torvalds 				loff_t pos, /* Writing position */
154*1da177e4SLinus Torvalds 				int num_pages, /* number of pages write going
155*1da177e4SLinus Torvalds 						  to touch */
156*1da177e4SLinus Torvalds 				int write_bytes, /* amount of bytes to write */
157*1da177e4SLinus Torvalds 				struct page **prepared_pages, /* array of
158*1da177e4SLinus Torvalds 							         prepared pages
159*1da177e4SLinus Torvalds 							       */
160*1da177e4SLinus Torvalds 				int blocks_to_allocate /* Amount of blocks we
161*1da177e4SLinus Torvalds 							  need to allocate to
162*1da177e4SLinus Torvalds 							  fit the data into file
163*1da177e4SLinus Torvalds 							 */
164*1da177e4SLinus Torvalds 				)
165*1da177e4SLinus Torvalds {
166*1da177e4SLinus Torvalds     struct cpu_key key; // cpu key of item that we are going to deal with
167*1da177e4SLinus Torvalds     struct item_head *ih; // pointer to item head that we are going to deal with
168*1da177e4SLinus Torvalds     struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
169*1da177e4SLinus Torvalds     __u32 * item; // pointer to item we are going to deal with
170*1da177e4SLinus Torvalds     INITIALIZE_PATH(path); // path to item, that we are going to deal with.
171*1da177e4SLinus Torvalds     b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
172*1da177e4SLinus Torvalds     reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
173*1da177e4SLinus Torvalds     size_t res; // return value of various functions that we call.
174*1da177e4SLinus Torvalds     int curr_block; // current block used to keep track of unmapped blocks.
175*1da177e4SLinus Torvalds     int i; // loop counter
176*1da177e4SLinus Torvalds     int itempos; // position in item
177*1da177e4SLinus Torvalds     unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
178*1da177e4SLinus Torvalds 						       // first page
179*1da177e4SLinus Torvalds     unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
180*1da177e4SLinus Torvalds     __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
181*1da177e4SLinus Torvalds     int modifying_this_item = 0; // Flag for items traversal code to keep track
182*1da177e4SLinus Torvalds 				 // of the fact that we already prepared
183*1da177e4SLinus Torvalds 				 // current block for journal
184*1da177e4SLinus Torvalds     int will_prealloc = 0;
185*1da177e4SLinus Torvalds     RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");
186*1da177e4SLinus Torvalds 
187*1da177e4SLinus Torvalds     /* only preallocate if this is a small write */
188*1da177e4SLinus Torvalds     if (REISERFS_I(inode)->i_prealloc_count ||
189*1da177e4SLinus Torvalds        (!(write_bytes & (inode->i_sb->s_blocksize -1)) &&
190*1da177e4SLinus Torvalds         blocks_to_allocate <
191*1da177e4SLinus Torvalds         REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
192*1da177e4SLinus Torvalds         will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;
193*1da177e4SLinus Torvalds 
194*1da177e4SLinus Torvalds     allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
195*1da177e4SLinus Torvalds     					sizeof(b_blocknr_t), GFP_NOFS);
196*1da177e4SLinus Torvalds 
197*1da177e4SLinus Torvalds     /* First we compose a key to point at the writing position, we want to do
198*1da177e4SLinus Torvalds        that outside of any locking region. */
199*1da177e4SLinus Torvalds     make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);
200*1da177e4SLinus Torvalds 
201*1da177e4SLinus Torvalds     /* If we came here, it means we absolutely need to open a transaction,
202*1da177e4SLinus Torvalds        since we need to allocate some blocks */
203*1da177e4SLinus Torvalds     reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
204*1da177e4SLinus Torvalds     res = journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS); // Wish I know if this number enough
205*1da177e4SLinus Torvalds     if (res)
206*1da177e4SLinus Torvalds         goto error_exit;
207*1da177e4SLinus Torvalds     reiserfs_update_inode_transaction(inode) ;
208*1da177e4SLinus Torvalds 
209*1da177e4SLinus Torvalds     /* Look for the in-tree position of our write, need path for block allocator */
210*1da177e4SLinus Torvalds     res = search_for_position_by_key(inode->i_sb, &key, &path);
211*1da177e4SLinus Torvalds     if ( res == IO_ERROR ) {
212*1da177e4SLinus Torvalds 	res = -EIO;
213*1da177e4SLinus Torvalds 	goto error_exit;
214*1da177e4SLinus Torvalds     }
215*1da177e4SLinus Torvalds 
216*1da177e4SLinus Torvalds     /* Allocate blocks */
217*1da177e4SLinus Torvalds     /* First fill in "hint" structure for block allocator */
218*1da177e4SLinus Torvalds     hint.th = th; // transaction handle.
219*1da177e4SLinus Torvalds     hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
220*1da177e4SLinus Torvalds     hint.inode = inode; // Inode is needed by block allocator too.
221*1da177e4SLinus Torvalds     hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
222*1da177e4SLinus Torvalds     hint.key = key.on_disk_key; // on disk key of file.
223*1da177e4SLinus Torvalds     hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
224*1da177e4SLinus Torvalds     hint.formatted_node = 0; // We are allocating blocks for unformatted node.
225*1da177e4SLinus Torvalds     hint.preallocate = will_prealloc;
226*1da177e4SLinus Torvalds 
227*1da177e4SLinus Torvalds     /* Call block allocator to allocate blocks */
228*1da177e4SLinus Torvalds     res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
229*1da177e4SLinus Torvalds     if ( res != CARRY_ON ) {
230*1da177e4SLinus Torvalds 	if ( res == NO_DISK_SPACE ) {
231*1da177e4SLinus Torvalds 	    /* We flush the transaction in case of no space. This way some
232*1da177e4SLinus Torvalds 	       blocks might become free */
233*1da177e4SLinus Torvalds 	    SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
234*1da177e4SLinus Torvalds 	    res = restart_transaction(th, inode, &path);
235*1da177e4SLinus Torvalds             if (res)
236*1da177e4SLinus Torvalds                 goto error_exit;
237*1da177e4SLinus Torvalds 
238*1da177e4SLinus Torvalds 	    /* We might have scheduled, so search again */
239*1da177e4SLinus Torvalds 	    res = search_for_position_by_key(inode->i_sb, &key, &path);
240*1da177e4SLinus Torvalds 	    if ( res == IO_ERROR ) {
241*1da177e4SLinus Torvalds 		res = -EIO;
242*1da177e4SLinus Torvalds 		goto error_exit;
243*1da177e4SLinus Torvalds 	    }
244*1da177e4SLinus Torvalds 
245*1da177e4SLinus Torvalds 	    /* update changed info for hint structure. */
246*1da177e4SLinus Torvalds 	    res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
247*1da177e4SLinus Torvalds 	    if ( res != CARRY_ON ) {
248*1da177e4SLinus Torvalds 		res = -ENOSPC;
249*1da177e4SLinus Torvalds 		pathrelse(&path);
250*1da177e4SLinus Torvalds 		goto error_exit;
251*1da177e4SLinus Torvalds 	    }
252*1da177e4SLinus Torvalds 	} else {
253*1da177e4SLinus Torvalds 	    res = -ENOSPC;
254*1da177e4SLinus Torvalds 	    pathrelse(&path);
255*1da177e4SLinus Torvalds 	    goto error_exit;
256*1da177e4SLinus Torvalds 	}
257*1da177e4SLinus Torvalds     }
258*1da177e4SLinus Torvalds 
259*1da177e4SLinus Torvalds #ifdef __BIG_ENDIAN
260*1da177e4SLinus Torvalds         // Too bad, I have not found any way to convert a given region from
261*1da177e4SLinus Torvalds         // cpu format to little endian format
262*1da177e4SLinus Torvalds     {
263*1da177e4SLinus Torvalds         int i;
264*1da177e4SLinus Torvalds         for ( i = 0; i < blocks_to_allocate ; i++)
265*1da177e4SLinus Torvalds             allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
266*1da177e4SLinus Torvalds     }
267*1da177e4SLinus Torvalds #endif
268*1da177e4SLinus Torvalds 
269*1da177e4SLinus Torvalds     /* Blocks allocating well might have scheduled and tree might have changed,
270*1da177e4SLinus Torvalds        let's search the tree again */
271*1da177e4SLinus Torvalds     /* find where in the tree our write should go */
272*1da177e4SLinus Torvalds     res = search_for_position_by_key(inode->i_sb, &key, &path);
273*1da177e4SLinus Torvalds     if ( res == IO_ERROR ) {
274*1da177e4SLinus Torvalds 	res = -EIO;
275*1da177e4SLinus Torvalds 	goto error_exit_free_blocks;
276*1da177e4SLinus Torvalds     }
277*1da177e4SLinus Torvalds 
278*1da177e4SLinus Torvalds     bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
279*1da177e4SLinus Torvalds     ih = get_ih( &path );      // Get a pointer to last item head in path.
280*1da177e4SLinus Torvalds     item = get_item( &path );  // Get a pointer to last item in path
281*1da177e4SLinus Torvalds 
282*1da177e4SLinus Torvalds     /* Let's see what we have found */
283*1da177e4SLinus Torvalds     if ( res != POSITION_FOUND ) { /* position not found, this means that we
284*1da177e4SLinus Torvalds 				      might need to append file with holes
285*1da177e4SLinus Torvalds 				      first */
286*1da177e4SLinus Torvalds 	// Since we are writing past the file's end, we need to find out if
287*1da177e4SLinus Torvalds 	// there is a hole that needs to be inserted before our writing
288*1da177e4SLinus Torvalds 	// position, and how many blocks it is going to cover (we need to
289*1da177e4SLinus Torvalds 	//  populate pointers to file blocks representing the hole with zeros)
290*1da177e4SLinus Torvalds 
291*1da177e4SLinus Torvalds 	{
292*1da177e4SLinus Torvalds 	    int item_offset = 1;
293*1da177e4SLinus Torvalds 	    /*
294*1da177e4SLinus Torvalds 	     * if ih is stat data, its offset is 0 and we don't want to
295*1da177e4SLinus Torvalds 	     * add 1 to pos in the hole_size calculation
296*1da177e4SLinus Torvalds 	     */
297*1da177e4SLinus Torvalds 	    if (is_statdata_le_ih(ih))
298*1da177e4SLinus Torvalds 	        item_offset = 0;
299*1da177e4SLinus Torvalds 	    hole_size = (pos + item_offset -
300*1da177e4SLinus Torvalds 	            (le_key_k_offset( get_inode_item_key_version(inode),
301*1da177e4SLinus Torvalds 		    &(ih->ih_key)) +
302*1da177e4SLinus Torvalds 		    op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
303*1da177e4SLinus Torvalds 		    inode->i_sb->s_blocksize_bits;
304*1da177e4SLinus Torvalds 	}
305*1da177e4SLinus Torvalds 
306*1da177e4SLinus Torvalds 	if ( hole_size > 0 ) {
307*1da177e4SLinus Torvalds 	    int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); // How much data to insert first time.
308*1da177e4SLinus Torvalds 	    /* area filled with zeroes, to supply as list of zero blocknumbers
309*1da177e4SLinus Torvalds 	       We allocate it outside of loop just in case loop would spin for
310*1da177e4SLinus Torvalds 	       several iterations. */
311*1da177e4SLinus Torvalds 	    char *zeros = kmalloc(to_paste*UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
312*1da177e4SLinus Torvalds 	    if ( !zeros ) {
313*1da177e4SLinus Torvalds 		res = -ENOMEM;
314*1da177e4SLinus Torvalds 		goto error_exit_free_blocks;
315*1da177e4SLinus Torvalds 	    }
316*1da177e4SLinus Torvalds 	    memset ( zeros, 0, to_paste*UNFM_P_SIZE);
317*1da177e4SLinus Torvalds 	    do {
318*1da177e4SLinus Torvalds 		to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE );
319*1da177e4SLinus Torvalds 		if ( is_indirect_le_ih(ih) ) {
320*1da177e4SLinus Torvalds 		    /* Ok, there is existing indirect item already. Need to append it */
321*1da177e4SLinus Torvalds 		    /* Calculate position past inserted item */
322*1da177e4SLinus Torvalds 		    make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
323*1da177e4SLinus Torvalds 		    res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)zeros, UNFM_P_SIZE*to_paste);
324*1da177e4SLinus Torvalds 		    if ( res ) {
325*1da177e4SLinus Torvalds 			kfree(zeros);
326*1da177e4SLinus Torvalds 			goto error_exit_free_blocks;
327*1da177e4SLinus Torvalds 		    }
328*1da177e4SLinus Torvalds 		} else if ( is_statdata_le_ih(ih) ) {
329*1da177e4SLinus Torvalds 		    /* No existing item, create it */
330*1da177e4SLinus Torvalds 		    /* item head for new item */
331*1da177e4SLinus Torvalds 		    struct item_head ins_ih;
332*1da177e4SLinus Torvalds 
333*1da177e4SLinus Torvalds 		    /* create a key for our new item */
334*1da177e4SLinus Torvalds 		    make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3);
335*1da177e4SLinus Torvalds 
336*1da177e4SLinus Torvalds 		    /* Create new item head for our new item */
337*1da177e4SLinus Torvalds 		    make_le_item_head (&ins_ih, &key, key.version, 1,
338*1da177e4SLinus Torvalds 				       TYPE_INDIRECT, to_paste*UNFM_P_SIZE,
339*1da177e4SLinus Torvalds 				       0 /* free space */);
340*1da177e4SLinus Torvalds 
341*1da177e4SLinus Torvalds 		    /* Find where such item should live in the tree */
342*1da177e4SLinus Torvalds 		    res = search_item (inode->i_sb, &key, &path);
343*1da177e4SLinus Torvalds 		    if ( res != ITEM_NOT_FOUND ) {
344*1da177e4SLinus Torvalds 			/* item should not exist, otherwise we have error */
345*1da177e4SLinus Torvalds 			if ( res != -ENOSPC ) {
346*1da177e4SLinus Torvalds 			    reiserfs_warning (inode->i_sb,
347*1da177e4SLinus Torvalds 				"green-9008: search_by_key (%K) returned %d",
348*1da177e4SLinus Torvalds 					      &key, res);
349*1da177e4SLinus Torvalds 			}
350*1da177e4SLinus Torvalds 			res = -EIO;
351*1da177e4SLinus Torvalds 		        kfree(zeros);
352*1da177e4SLinus Torvalds 			goto error_exit_free_blocks;
353*1da177e4SLinus Torvalds 		    }
354*1da177e4SLinus Torvalds 		    res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)zeros);
355*1da177e4SLinus Torvalds 		} else {
356*1da177e4SLinus Torvalds 		    reiserfs_panic(inode->i_sb, "green-9011: Unexpected key type %K\n", &key);
357*1da177e4SLinus Torvalds 		}
358*1da177e4SLinus Torvalds 		if ( res ) {
359*1da177e4SLinus Torvalds 		    kfree(zeros);
360*1da177e4SLinus Torvalds 		    goto error_exit_free_blocks;
361*1da177e4SLinus Torvalds 		}
362*1da177e4SLinus Torvalds 		/* Now we want to check if transaction is too full, and if it is
363*1da177e4SLinus Torvalds 		   we restart it. This will also free the path. */
364*1da177e4SLinus Torvalds 		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
365*1da177e4SLinus Torvalds 		    res = restart_transaction(th, inode, &path);
366*1da177e4SLinus Torvalds                     if (res) {
367*1da177e4SLinus Torvalds                         pathrelse (&path);
368*1da177e4SLinus Torvalds                         kfree(zeros);
369*1da177e4SLinus Torvalds                         goto error_exit;
370*1da177e4SLinus Torvalds                     }
371*1da177e4SLinus Torvalds                 }
372*1da177e4SLinus Torvalds 
373*1da177e4SLinus Torvalds 		/* Well, need to recalculate path and stuff */
374*1da177e4SLinus Torvalds 		set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + (to_paste << inode->i_blkbits));
375*1da177e4SLinus Torvalds 		res = search_for_position_by_key(inode->i_sb, &key, &path);
376*1da177e4SLinus Torvalds 		if ( res == IO_ERROR ) {
377*1da177e4SLinus Torvalds 		    res = -EIO;
378*1da177e4SLinus Torvalds 		    kfree(zeros);
379*1da177e4SLinus Torvalds 		    goto error_exit_free_blocks;
380*1da177e4SLinus Torvalds 		}
381*1da177e4SLinus Torvalds 		bh=get_last_bh(&path);
382*1da177e4SLinus Torvalds 		ih=get_ih(&path);
383*1da177e4SLinus Torvalds 		item = get_item(&path);
384*1da177e4SLinus Torvalds 		hole_size -= to_paste;
385*1da177e4SLinus Torvalds 	    } while ( hole_size );
386*1da177e4SLinus Torvalds 	    kfree(zeros);
387*1da177e4SLinus Torvalds 	}
388*1da177e4SLinus Torvalds     }
389*1da177e4SLinus Torvalds 
390*1da177e4SLinus Torvalds     // Go through existing indirect items first
391*1da177e4SLinus Torvalds     // replace all zeroes with blocknumbers from list
392*1da177e4SLinus Torvalds     // Note that if no corresponding item was found, by previous search,
393*1da177e4SLinus Torvalds     // it means there are no existing in-tree representation for file area
394*1da177e4SLinus Torvalds     // we are going to overwrite, so there is nothing to scan through for holes.
395*1da177e4SLinus Torvalds     for ( curr_block = 0, itempos = path.pos_in_item ; curr_block < blocks_to_allocate && res == POSITION_FOUND ; ) {
396*1da177e4SLinus Torvalds retry:
397*1da177e4SLinus Torvalds 
398*1da177e4SLinus Torvalds 	if ( itempos >= ih_item_len(ih)/UNFM_P_SIZE ) {
399*1da177e4SLinus Torvalds 	    /* We run out of data in this indirect item, let's look for another
400*1da177e4SLinus Torvalds 	       one. */
401*1da177e4SLinus Torvalds 	    /* First if we are already modifying current item, log it */
402*1da177e4SLinus Torvalds 	    if ( modifying_this_item ) {
403*1da177e4SLinus Torvalds 		journal_mark_dirty (th, inode->i_sb, bh);
404*1da177e4SLinus Torvalds 		modifying_this_item = 0;
405*1da177e4SLinus Torvalds 	    }
406*1da177e4SLinus Torvalds 	    /* Then set the key to look for a new indirect item (offset of old
407*1da177e4SLinus Torvalds 	       item is added to old item length */
408*1da177e4SLinus Torvalds 	    set_cpu_key_k_offset( &key, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize));
409*1da177e4SLinus Torvalds 	    /* Search ofor position of new key in the tree. */
410*1da177e4SLinus Torvalds 	    res = search_for_position_by_key(inode->i_sb, &key, &path);
411*1da177e4SLinus Torvalds 	    if ( res == IO_ERROR) {
412*1da177e4SLinus Torvalds 		res = -EIO;
413*1da177e4SLinus Torvalds 		goto error_exit_free_blocks;
414*1da177e4SLinus Torvalds 	    }
415*1da177e4SLinus Torvalds 	    bh=get_last_bh(&path);
416*1da177e4SLinus Torvalds 	    ih=get_ih(&path);
417*1da177e4SLinus Torvalds 	    item = get_item(&path);
418*1da177e4SLinus Torvalds 	    itempos = path.pos_in_item;
419*1da177e4SLinus Torvalds 	    continue; // loop to check all kinds of conditions and so on.
420*1da177e4SLinus Torvalds 	}
421*1da177e4SLinus Torvalds 	/* Ok, we have correct position in item now, so let's see if it is
422*1da177e4SLinus Torvalds 	   representing file hole (blocknumber is zero) and fill it if needed */
423*1da177e4SLinus Torvalds 	if ( !item[itempos] ) {
424*1da177e4SLinus Torvalds 	    /* Ok, a hole. Now we need to check if we already prepared this
425*1da177e4SLinus Torvalds 	       block to be journaled */
426*1da177e4SLinus Torvalds 	    while ( !modifying_this_item ) { // loop until succeed
427*1da177e4SLinus Torvalds 		/* Well, this item is not journaled yet, so we must prepare
428*1da177e4SLinus Torvalds 		   it for journal first, before we can change it */
429*1da177e4SLinus Torvalds 		struct item_head tmp_ih; // We copy item head of found item,
430*1da177e4SLinus Torvalds 					 // here to detect if fs changed under
431*1da177e4SLinus Torvalds 					 // us while we were preparing for
432*1da177e4SLinus Torvalds 					 // journal.
433*1da177e4SLinus Torvalds 		int fs_gen; // We store fs generation here to find if someone
434*1da177e4SLinus Torvalds 			    // changes fs under our feet
435*1da177e4SLinus Torvalds 
436*1da177e4SLinus Torvalds 		copy_item_head (&tmp_ih, ih); // Remember itemhead
437*1da177e4SLinus Torvalds 		fs_gen = get_generation (inode->i_sb); // remember fs generation
438*1da177e4SLinus Torvalds 		reiserfs_prepare_for_journal(inode->i_sb, bh, 1); // Prepare a buffer within which indirect item is stored for changing.
439*1da177e4SLinus Torvalds 		if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
440*1da177e4SLinus Torvalds 		    // Sigh, fs was changed under us, we need to look for new
441*1da177e4SLinus Torvalds 		    // location of item we are working with
442*1da177e4SLinus Torvalds 
443*1da177e4SLinus Torvalds 		    /* unmark prepaerd area as journaled and search for it's
444*1da177e4SLinus Torvalds 		       new position */
445*1da177e4SLinus Torvalds 		    reiserfs_restore_prepared_buffer(inode->i_sb, bh);
446*1da177e4SLinus Torvalds 		    res = search_for_position_by_key(inode->i_sb, &key, &path);
447*1da177e4SLinus Torvalds 		    if ( res == IO_ERROR) {
448*1da177e4SLinus Torvalds 			res = -EIO;
449*1da177e4SLinus Torvalds 			goto error_exit_free_blocks;
450*1da177e4SLinus Torvalds 		    }
451*1da177e4SLinus Torvalds 		    bh=get_last_bh(&path);
452*1da177e4SLinus Torvalds 		    ih=get_ih(&path);
453*1da177e4SLinus Torvalds 		    item = get_item(&path);
454*1da177e4SLinus Torvalds 		    itempos = path.pos_in_item;
455*1da177e4SLinus Torvalds 		    goto retry;
456*1da177e4SLinus Torvalds 		}
457*1da177e4SLinus Torvalds 		modifying_this_item = 1;
458*1da177e4SLinus Torvalds 	    }
459*1da177e4SLinus Torvalds 	    item[itempos] = allocated_blocks[curr_block]; // Assign new block
460*1da177e4SLinus Torvalds 	    curr_block++;
461*1da177e4SLinus Torvalds 	}
462*1da177e4SLinus Torvalds 	itempos++;
463*1da177e4SLinus Torvalds     }
464*1da177e4SLinus Torvalds 
465*1da177e4SLinus Torvalds     if ( modifying_this_item ) { // We need to log last-accessed block, if it
466*1da177e4SLinus Torvalds 				 // was modified, but not logged yet.
467*1da177e4SLinus Torvalds 	journal_mark_dirty (th, inode->i_sb, bh);
468*1da177e4SLinus Torvalds     }
469*1da177e4SLinus Torvalds 
470*1da177e4SLinus Torvalds     if ( curr_block < blocks_to_allocate ) {
471*1da177e4SLinus Torvalds 	// Oh, well need to append to indirect item, or to create indirect item
472*1da177e4SLinus Torvalds 	// if there weren't any
473*1da177e4SLinus Torvalds 	if ( is_indirect_le_ih(ih) ) {
474*1da177e4SLinus Torvalds 	    // Existing indirect item - append. First calculate key for append
475*1da177e4SLinus Torvalds 	    // position. We do not need to recalculate path as it should
476*1da177e4SLinus Torvalds 	    // already point to correct place.
477*1da177e4SLinus Torvalds 	    make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
478*1da177e4SLinus Torvalds 	    res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)(allocated_blocks+curr_block), UNFM_P_SIZE*(blocks_to_allocate-curr_block));
479*1da177e4SLinus Torvalds 	    if ( res ) {
480*1da177e4SLinus Torvalds 		goto error_exit_free_blocks;
481*1da177e4SLinus Torvalds 	    }
482*1da177e4SLinus Torvalds 	} else if (is_statdata_le_ih(ih) ) {
483*1da177e4SLinus Torvalds 	    // Last found item was statdata. That means we need to create indirect item.
484*1da177e4SLinus Torvalds 	    struct item_head ins_ih; /* itemhead for new item */
485*1da177e4SLinus Torvalds 
486*1da177e4SLinus Torvalds 	    /* create a key for our new item */
487*1da177e4SLinus Torvalds 	    make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3); // Position one,
488*1da177e4SLinus Torvalds 							    // because that's
489*1da177e4SLinus Torvalds 							    // where first
490*1da177e4SLinus Torvalds 							    // indirect item
491*1da177e4SLinus Torvalds 							    // begins
492*1da177e4SLinus Torvalds 	    /* Create new item head for our new item */
493*1da177e4SLinus Torvalds 	    make_le_item_head (&ins_ih, &key, key.version, 1, TYPE_INDIRECT,
494*1da177e4SLinus Torvalds 			       (blocks_to_allocate-curr_block)*UNFM_P_SIZE,
495*1da177e4SLinus Torvalds 			       0 /* free space */);
496*1da177e4SLinus Torvalds 	    /* Find where such item should live in the tree */
497*1da177e4SLinus Torvalds 	    res = search_item (inode->i_sb, &key, &path);
498*1da177e4SLinus Torvalds 	    if ( res != ITEM_NOT_FOUND ) {
499*1da177e4SLinus Torvalds 		/* Well, if we have found such item already, or some error
500*1da177e4SLinus Torvalds 		   occured, we need to warn user and return error */
501*1da177e4SLinus Torvalds 		if ( res != -ENOSPC ) {
502*1da177e4SLinus Torvalds 		    reiserfs_warning (inode->i_sb,
503*1da177e4SLinus Torvalds 				      "green-9009: search_by_key (%K) "
504*1da177e4SLinus Torvalds 				      "returned %d", &key, res);
505*1da177e4SLinus Torvalds 		}
506*1da177e4SLinus Torvalds 		res = -EIO;
507*1da177e4SLinus Torvalds 		goto error_exit_free_blocks;
508*1da177e4SLinus Torvalds 	    }
509*1da177e4SLinus Torvalds 	    /* Insert item into the tree with the data as its body */
510*1da177e4SLinus Torvalds 	    res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)(allocated_blocks+curr_block));
511*1da177e4SLinus Torvalds 	} else {
512*1da177e4SLinus Torvalds 	    reiserfs_panic(inode->i_sb, "green-9010: unexpected item type for key %K\n",&key);
513*1da177e4SLinus Torvalds 	}
514*1da177e4SLinus Torvalds     }
515*1da177e4SLinus Torvalds 
516*1da177e4SLinus Torvalds     // the caller is responsible for closing the transaction
517*1da177e4SLinus Torvalds     // unless we return an error, they are also responsible for logging
518*1da177e4SLinus Torvalds     // the inode.
519*1da177e4SLinus Torvalds     //
520*1da177e4SLinus Torvalds     pathrelse(&path);
521*1da177e4SLinus Torvalds     /*
522*1da177e4SLinus Torvalds      * cleanup prellocation from previous writes
523*1da177e4SLinus Torvalds      * if this is a partial block write
524*1da177e4SLinus Torvalds      */
525*1da177e4SLinus Torvalds     if (write_bytes & (inode->i_sb->s_blocksize -1))
526*1da177e4SLinus Torvalds         reiserfs_discard_prealloc(th, inode);
527*1da177e4SLinus Torvalds     reiserfs_write_unlock(inode->i_sb);
528*1da177e4SLinus Torvalds 
529*1da177e4SLinus Torvalds     // go through all the pages/buffers and map the buffers to newly allocated
530*1da177e4SLinus Torvalds     // blocks (so that system knows where to write these pages later).
531*1da177e4SLinus Torvalds     curr_block = 0;
532*1da177e4SLinus Torvalds     for ( i = 0; i < num_pages ; i++ ) {
533*1da177e4SLinus Torvalds 	struct page *page=prepared_pages[i]; //current page
534*1da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);// first buffer for a page
535*1da177e4SLinus Torvalds 	int block_start, block_end; // in-page offsets for buffers.
536*1da177e4SLinus Torvalds 
537*1da177e4SLinus Torvalds 	if (!page_buffers(page))
538*1da177e4SLinus Torvalds 	    reiserfs_panic(inode->i_sb, "green-9005: No buffers for prepared page???");
539*1da177e4SLinus Torvalds 
540*1da177e4SLinus Torvalds 	/* For each buffer in page */
541*1da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
542*1da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
543*1da177e4SLinus Torvalds 	    if (!bh)
544*1da177e4SLinus Torvalds 		reiserfs_panic(inode->i_sb, "green-9006: Allocated but absent buffer for a page?");
545*1da177e4SLinus Torvalds 	    block_end = block_start+inode->i_sb->s_blocksize;
546*1da177e4SLinus Torvalds 	    if (i == 0 && block_end <= from )
547*1da177e4SLinus Torvalds 		/* if this buffer is before requested data to map, skip it */
548*1da177e4SLinus Torvalds 		continue;
549*1da177e4SLinus Torvalds 	    if (i == num_pages - 1 && block_start >= to)
550*1da177e4SLinus Torvalds 		/* If this buffer is after requested data to map, abort
551*1da177e4SLinus Torvalds 		   processing of current page */
552*1da177e4SLinus Torvalds 		break;
553*1da177e4SLinus Torvalds 
554*1da177e4SLinus Torvalds 	    if ( !buffer_mapped(bh) ) { // Ok, unmapped buffer, need to map it
555*1da177e4SLinus Torvalds 		map_bh( bh, inode->i_sb, le32_to_cpu(allocated_blocks[curr_block]));
556*1da177e4SLinus Torvalds 		curr_block++;
557*1da177e4SLinus Torvalds 		set_buffer_new(bh);
558*1da177e4SLinus Torvalds 	    }
559*1da177e4SLinus Torvalds 	}
560*1da177e4SLinus Torvalds     }
561*1da177e4SLinus Torvalds 
562*1da177e4SLinus Torvalds     RFALSE( curr_block > blocks_to_allocate, "green-9007: Used too many blocks? weird");
563*1da177e4SLinus Torvalds 
564*1da177e4SLinus Torvalds     kfree(allocated_blocks);
565*1da177e4SLinus Torvalds     return 0;
566*1da177e4SLinus Torvalds 
567*1da177e4SLinus Torvalds // Need to deal with transaction here.
568*1da177e4SLinus Torvalds error_exit_free_blocks:
569*1da177e4SLinus Torvalds     pathrelse(&path);
570*1da177e4SLinus Torvalds     // free blocks
571*1da177e4SLinus Torvalds     for( i = 0; i < blocks_to_allocate; i++ )
572*1da177e4SLinus Torvalds 	reiserfs_free_block(th, inode, le32_to_cpu(allocated_blocks[i]), 1);
573*1da177e4SLinus Torvalds 
574*1da177e4SLinus Torvalds error_exit:
575*1da177e4SLinus Torvalds     if (th->t_trans_id) {
576*1da177e4SLinus Torvalds         int err;
577*1da177e4SLinus Torvalds         // update any changes we made to blk count
578*1da177e4SLinus Torvalds         reiserfs_update_sd(th, inode);
579*1da177e4SLinus Torvalds         err = journal_end(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS);
580*1da177e4SLinus Torvalds         if (err)
581*1da177e4SLinus Torvalds             res = err;
582*1da177e4SLinus Torvalds     }
583*1da177e4SLinus Torvalds     reiserfs_write_unlock(inode->i_sb);
584*1da177e4SLinus Torvalds     kfree(allocated_blocks);
585*1da177e4SLinus Torvalds 
586*1da177e4SLinus Torvalds     return res;
587*1da177e4SLinus Torvalds }
588*1da177e4SLinus Torvalds 
589*1da177e4SLinus Torvalds /* Unlock pages prepared by reiserfs_prepare_file_region_for_write */
590*1da177e4SLinus Torvalds static void reiserfs_unprepare_pages(struct page **prepared_pages, /* list of locked pages */
591*1da177e4SLinus Torvalds 			      size_t num_pages /* amount of pages */) {
592*1da177e4SLinus Torvalds     int i; // loop counter
593*1da177e4SLinus Torvalds 
594*1da177e4SLinus Torvalds     for (i=0; i < num_pages ; i++) {
595*1da177e4SLinus Torvalds 	struct page *page = prepared_pages[i];
596*1da177e4SLinus Torvalds 
597*1da177e4SLinus Torvalds 	try_to_free_buffers(page);
598*1da177e4SLinus Torvalds 	unlock_page(page);
599*1da177e4SLinus Torvalds 	page_cache_release(page);
600*1da177e4SLinus Torvalds     }
601*1da177e4SLinus Torvalds }
602*1da177e4SLinus Torvalds 
603*1da177e4SLinus Torvalds /* This function will copy data from userspace to specified pages within
604*1da177e4SLinus Torvalds    supplied byte range */
605*1da177e4SLinus Torvalds static int reiserfs_copy_from_user_to_file_region(
606*1da177e4SLinus Torvalds 				loff_t pos, /* In-file position */
607*1da177e4SLinus Torvalds 				int num_pages, /* Number of pages affected */
608*1da177e4SLinus Torvalds 				int write_bytes, /* Amount of bytes to write */
609*1da177e4SLinus Torvalds 				struct page **prepared_pages, /* pointer to
610*1da177e4SLinus Torvalds 								 array to
611*1da177e4SLinus Torvalds 								 prepared pages
612*1da177e4SLinus Torvalds 								*/
613*1da177e4SLinus Torvalds 				const char __user *buf /* Pointer to user-supplied
614*1da177e4SLinus Torvalds 						   data*/
615*1da177e4SLinus Torvalds 				)
616*1da177e4SLinus Torvalds {
617*1da177e4SLinus Torvalds     long page_fault=0; // status of copy_from_user.
618*1da177e4SLinus Torvalds     int i; // loop counter.
619*1da177e4SLinus Torvalds     int offset; // offset in page
620*1da177e4SLinus Torvalds 
621*1da177e4SLinus Torvalds     for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
622*1da177e4SLinus Torvalds 	size_t count = min_t(size_t,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
623*1da177e4SLinus Torvalds 	struct page *page=prepared_pages[i]; // Current page we process.
624*1da177e4SLinus Torvalds 
625*1da177e4SLinus Torvalds 	fault_in_pages_readable( buf, count);
626*1da177e4SLinus Torvalds 
627*1da177e4SLinus Torvalds 	/* Copy data from userspace to the current page */
628*1da177e4SLinus Torvalds 	kmap(page);
629*1da177e4SLinus Torvalds 	page_fault = __copy_from_user(page_address(page)+offset, buf, count); // Copy the data.
630*1da177e4SLinus Torvalds 	/* Flush processor's dcache for this page */
631*1da177e4SLinus Torvalds 	flush_dcache_page(page);
632*1da177e4SLinus Torvalds 	kunmap(page);
633*1da177e4SLinus Torvalds 	buf+=count;
634*1da177e4SLinus Torvalds 	write_bytes-=count;
635*1da177e4SLinus Torvalds 
636*1da177e4SLinus Torvalds 	if (page_fault)
637*1da177e4SLinus Torvalds 	    break; // Was there a fault? abort.
638*1da177e4SLinus Torvalds     }
639*1da177e4SLinus Torvalds 
640*1da177e4SLinus Torvalds     return page_fault?-EFAULT:0;
641*1da177e4SLinus Torvalds }
642*1da177e4SLinus Torvalds 
643*1da177e4SLinus Torvalds /* taken fs/buffer.c:__block_commit_write */
644*1da177e4SLinus Torvalds int reiserfs_commit_page(struct inode *inode, struct page *page,
645*1da177e4SLinus Torvalds 		unsigned from, unsigned to)
646*1da177e4SLinus Torvalds {
647*1da177e4SLinus Torvalds     unsigned block_start, block_end;
648*1da177e4SLinus Torvalds     int partial = 0;
649*1da177e4SLinus Torvalds     unsigned blocksize;
650*1da177e4SLinus Torvalds     struct buffer_head *bh, *head;
651*1da177e4SLinus Torvalds     unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
652*1da177e4SLinus Torvalds     int new;
653*1da177e4SLinus Torvalds     int logit = reiserfs_file_data_log(inode);
654*1da177e4SLinus Torvalds     struct super_block *s = inode->i_sb;
655*1da177e4SLinus Torvalds     int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
656*1da177e4SLinus Torvalds     struct reiserfs_transaction_handle th;
657*1da177e4SLinus Torvalds     int ret = 0;
658*1da177e4SLinus Torvalds 
659*1da177e4SLinus Torvalds     th.t_trans_id = 0;
660*1da177e4SLinus Torvalds     blocksize = 1 << inode->i_blkbits;
661*1da177e4SLinus Torvalds 
662*1da177e4SLinus Torvalds     if (logit) {
663*1da177e4SLinus Torvalds 	reiserfs_write_lock(s);
664*1da177e4SLinus Torvalds 	ret = journal_begin(&th, s, bh_per_page + 1);
665*1da177e4SLinus Torvalds 	if (ret)
666*1da177e4SLinus Torvalds 	    goto drop_write_lock;
667*1da177e4SLinus Torvalds 	reiserfs_update_inode_transaction(inode);
668*1da177e4SLinus Torvalds     }
669*1da177e4SLinus Torvalds     for(bh = head = page_buffers(page), block_start = 0;
670*1da177e4SLinus Torvalds         bh != head || !block_start;
671*1da177e4SLinus Torvalds 	block_start=block_end, bh = bh->b_this_page)
672*1da177e4SLinus Torvalds     {
673*1da177e4SLinus Torvalds 
674*1da177e4SLinus Torvalds 	new = buffer_new(bh);
675*1da177e4SLinus Torvalds 	clear_buffer_new(bh);
676*1da177e4SLinus Torvalds 	block_end = block_start + blocksize;
677*1da177e4SLinus Torvalds 	if (block_end <= from || block_start >= to) {
678*1da177e4SLinus Torvalds 	    if (!buffer_uptodate(bh))
679*1da177e4SLinus Torvalds 		    partial = 1;
680*1da177e4SLinus Torvalds 	} else {
681*1da177e4SLinus Torvalds 	    set_buffer_uptodate(bh);
682*1da177e4SLinus Torvalds 	    if (logit) {
683*1da177e4SLinus Torvalds 		reiserfs_prepare_for_journal(s, bh, 1);
684*1da177e4SLinus Torvalds 		journal_mark_dirty(&th, s, bh);
685*1da177e4SLinus Torvalds 	    } else if (!buffer_dirty(bh)) {
686*1da177e4SLinus Torvalds 		mark_buffer_dirty(bh);
687*1da177e4SLinus Torvalds 		/* do data=ordered on any page past the end
688*1da177e4SLinus Torvalds 		 * of file and any buffer marked BH_New.
689*1da177e4SLinus Torvalds 		 */
690*1da177e4SLinus Torvalds 		if (reiserfs_data_ordered(inode->i_sb) &&
691*1da177e4SLinus Torvalds 		    (new || page->index >= i_size_index)) {
692*1da177e4SLinus Torvalds 		    reiserfs_add_ordered_list(inode, bh);
693*1da177e4SLinus Torvalds 	        }
694*1da177e4SLinus Torvalds 	    }
695*1da177e4SLinus Torvalds 	}
696*1da177e4SLinus Torvalds     }
697*1da177e4SLinus Torvalds     if (logit) {
698*1da177e4SLinus Torvalds 	ret = journal_end(&th, s, bh_per_page + 1);
699*1da177e4SLinus Torvalds drop_write_lock:
700*1da177e4SLinus Torvalds 	reiserfs_write_unlock(s);
701*1da177e4SLinus Torvalds     }
702*1da177e4SLinus Torvalds     /*
703*1da177e4SLinus Torvalds      * If this is a partial write which happened to make all buffers
704*1da177e4SLinus Torvalds      * uptodate then we can optimize away a bogus readpage() for
705*1da177e4SLinus Torvalds      * the next read(). Here we 'discover' whether the page went
706*1da177e4SLinus Torvalds      * uptodate as a result of this (potentially partial) write.
707*1da177e4SLinus Torvalds      */
708*1da177e4SLinus Torvalds     if (!partial)
709*1da177e4SLinus Torvalds 	SetPageUptodate(page);
710*1da177e4SLinus Torvalds     return ret;
711*1da177e4SLinus Torvalds }
712*1da177e4SLinus Torvalds 
713*1da177e4SLinus Torvalds 
714*1da177e4SLinus Torvalds /* Submit pages for write. This was separated from actual file copying
715*1da177e4SLinus Torvalds    because we might want to allocate block numbers in-between.
716*1da177e4SLinus Torvalds    This function assumes that caller will adjust file size to correct value. */
717*1da177e4SLinus Torvalds static int reiserfs_submit_file_region_for_write(
718*1da177e4SLinus Torvalds 				struct reiserfs_transaction_handle *th,
719*1da177e4SLinus Torvalds 				struct inode *inode,
720*1da177e4SLinus Torvalds 				loff_t pos, /* Writing position offset */
721*1da177e4SLinus Torvalds 				size_t num_pages, /* Number of pages to write */
722*1da177e4SLinus Torvalds 				size_t write_bytes, /* number of bytes to write */
723*1da177e4SLinus Torvalds 				struct page **prepared_pages /* list of pages */
724*1da177e4SLinus Torvalds 				)
725*1da177e4SLinus Torvalds {
726*1da177e4SLinus Torvalds     int status; // return status of block_commit_write.
727*1da177e4SLinus Torvalds     int retval = 0; // Return value we are going to return.
728*1da177e4SLinus Torvalds     int i; // loop counter
729*1da177e4SLinus Torvalds     int offset; // Writing offset in page.
730*1da177e4SLinus Torvalds     int orig_write_bytes = write_bytes;
731*1da177e4SLinus Torvalds     int sd_update = 0;
732*1da177e4SLinus Torvalds 
733*1da177e4SLinus Torvalds     for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
734*1da177e4SLinus Torvalds 	int count = min_t(int,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
735*1da177e4SLinus Torvalds 	struct page *page=prepared_pages[i]; // Current page we process.
736*1da177e4SLinus Torvalds 
737*1da177e4SLinus Torvalds 	status = reiserfs_commit_page(inode, page, offset, offset+count);
738*1da177e4SLinus Torvalds 	if ( status )
739*1da177e4SLinus Torvalds 	    retval = status; // To not overcomplicate matters We are going to
740*1da177e4SLinus Torvalds 			     // submit all the pages even if there was error.
741*1da177e4SLinus Torvalds 			     // we only remember error status to report it on
742*1da177e4SLinus Torvalds 			     // exit.
743*1da177e4SLinus Torvalds 	write_bytes-=count;
744*1da177e4SLinus Torvalds     }
745*1da177e4SLinus Torvalds     /* now that we've gotten all the ordered buffers marked dirty,
746*1da177e4SLinus Torvalds      * we can safely update i_size and close any running transaction
747*1da177e4SLinus Torvalds      */
748*1da177e4SLinus Torvalds     if ( pos + orig_write_bytes > inode->i_size) {
749*1da177e4SLinus Torvalds 	inode->i_size = pos + orig_write_bytes; // Set new size
750*1da177e4SLinus Torvalds 	/* If the file have grown so much that tail packing is no
751*1da177e4SLinus Torvalds 	 * longer possible, reset "need to pack" flag */
752*1da177e4SLinus Torvalds 	if ( (have_large_tails (inode->i_sb) &&
753*1da177e4SLinus Torvalds 	      inode->i_size > i_block_size (inode)*4) ||
754*1da177e4SLinus Torvalds 	     (have_small_tails (inode->i_sb) &&
755*1da177e4SLinus Torvalds 	     inode->i_size > i_block_size(inode)) )
756*1da177e4SLinus Torvalds 	    REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask ;
757*1da177e4SLinus Torvalds         else if ( (have_large_tails (inode->i_sb) &&
758*1da177e4SLinus Torvalds 	          inode->i_size < i_block_size (inode)*4) ||
759*1da177e4SLinus Torvalds 	          (have_small_tails (inode->i_sb) &&
760*1da177e4SLinus Torvalds 		  inode->i_size < i_block_size(inode)) )
761*1da177e4SLinus Torvalds 	    REISERFS_I(inode)->i_flags |= i_pack_on_close_mask ;
762*1da177e4SLinus Torvalds 
763*1da177e4SLinus Torvalds 	if (th->t_trans_id) {
764*1da177e4SLinus Torvalds 	    reiserfs_write_lock(inode->i_sb);
765*1da177e4SLinus Torvalds 	    reiserfs_update_sd(th, inode); // And update on-disk metadata
766*1da177e4SLinus Torvalds 	    reiserfs_write_unlock(inode->i_sb);
767*1da177e4SLinus Torvalds 	} else
768*1da177e4SLinus Torvalds 	    inode->i_sb->s_op->dirty_inode(inode);
769*1da177e4SLinus Torvalds 
770*1da177e4SLinus Torvalds         sd_update = 1;
771*1da177e4SLinus Torvalds     }
772*1da177e4SLinus Torvalds     if (th->t_trans_id) {
773*1da177e4SLinus Torvalds 	reiserfs_write_lock(inode->i_sb);
774*1da177e4SLinus Torvalds 	if (!sd_update)
775*1da177e4SLinus Torvalds 	    reiserfs_update_sd(th, inode);
776*1da177e4SLinus Torvalds 	status = journal_end(th, th->t_super, th->t_blocks_allocated);
777*1da177e4SLinus Torvalds         if (status)
778*1da177e4SLinus Torvalds             retval = status;
779*1da177e4SLinus Torvalds 	reiserfs_write_unlock(inode->i_sb);
780*1da177e4SLinus Torvalds     }
781*1da177e4SLinus Torvalds     th->t_trans_id = 0;
782*1da177e4SLinus Torvalds 
783*1da177e4SLinus Torvalds     /*
784*1da177e4SLinus Torvalds      * we have to unlock the pages after updating i_size, otherwise
785*1da177e4SLinus Torvalds      * we race with writepage
786*1da177e4SLinus Torvalds      */
787*1da177e4SLinus Torvalds     for ( i = 0; i < num_pages ; i++) {
788*1da177e4SLinus Torvalds 	struct page *page=prepared_pages[i];
789*1da177e4SLinus Torvalds 	unlock_page(page);
790*1da177e4SLinus Torvalds 	mark_page_accessed(page);
791*1da177e4SLinus Torvalds 	page_cache_release(page);
792*1da177e4SLinus Torvalds     }
793*1da177e4SLinus Torvalds     return retval;
794*1da177e4SLinus Torvalds }
795*1da177e4SLinus Torvalds 
796*1da177e4SLinus Torvalds /* Look if passed writing region is going to touch file's tail
797*1da177e4SLinus Torvalds    (if it is present). And if it is, convert the tail to unformatted node */
798*1da177e4SLinus Torvalds static int reiserfs_check_for_tail_and_convert( struct inode *inode, /* inode to deal with */
799*1da177e4SLinus Torvalds 					 loff_t pos, /* Writing position */
800*1da177e4SLinus Torvalds 					 int write_bytes /* amount of bytes to write */
801*1da177e4SLinus Torvalds 				        )
802*1da177e4SLinus Torvalds {
803*1da177e4SLinus Torvalds     INITIALIZE_PATH(path); // needed for search_for_position
804*1da177e4SLinus Torvalds     struct cpu_key key; // Key that would represent last touched writing byte.
805*1da177e4SLinus Torvalds     struct item_head *ih; // item header of found block;
806*1da177e4SLinus Torvalds     int res; // Return value of various functions we call.
807*1da177e4SLinus Torvalds     int cont_expand_offset; // We will put offset for generic_cont_expand here
808*1da177e4SLinus Torvalds 			    // This can be int just because tails are created
809*1da177e4SLinus Torvalds 			    // only for small files.
810*1da177e4SLinus Torvalds 
811*1da177e4SLinus Torvalds /* this embodies a dependency on a particular tail policy */
812*1da177e4SLinus Torvalds     if ( inode->i_size >= inode->i_sb->s_blocksize*4 ) {
813*1da177e4SLinus Torvalds 	/* such a big files do not have tails, so we won't bother ourselves
814*1da177e4SLinus Torvalds 	   to look for tails, simply return */
815*1da177e4SLinus Torvalds 	return 0;
816*1da177e4SLinus Torvalds     }
817*1da177e4SLinus Torvalds 
818*1da177e4SLinus Torvalds     reiserfs_write_lock(inode->i_sb);
819*1da177e4SLinus Torvalds     /* find the item containing the last byte to be written, or if
820*1da177e4SLinus Torvalds      * writing past the end of the file then the last item of the
821*1da177e4SLinus Torvalds      * file (and then we check its type). */
822*1da177e4SLinus Torvalds     make_cpu_key (&key, inode, pos+write_bytes+1, TYPE_ANY, 3/*key length*/);
823*1da177e4SLinus Torvalds     res = search_for_position_by_key(inode->i_sb, &key, &path);
824*1da177e4SLinus Torvalds     if ( res == IO_ERROR ) {
825*1da177e4SLinus Torvalds         reiserfs_write_unlock(inode->i_sb);
826*1da177e4SLinus Torvalds 	return -EIO;
827*1da177e4SLinus Torvalds     }
828*1da177e4SLinus Torvalds     ih = get_ih(&path);
829*1da177e4SLinus Torvalds     res = 0;
830*1da177e4SLinus Torvalds     if ( is_direct_le_ih(ih) ) {
831*1da177e4SLinus Torvalds 	/* Ok, closest item is file tail (tails are stored in "direct"
832*1da177e4SLinus Torvalds 	 * items), so we need to unpack it. */
833*1da177e4SLinus Torvalds 	/* To not overcomplicate matters, we just call generic_cont_expand
834*1da177e4SLinus Torvalds 	   which will in turn call other stuff and finally will boil down to
835*1da177e4SLinus Torvalds 	    reiserfs_get_block() that would do necessary conversion. */
836*1da177e4SLinus Torvalds 	cont_expand_offset = le_key_k_offset(get_inode_item_key_version(inode), &(ih->ih_key));
837*1da177e4SLinus Torvalds 	pathrelse(&path);
838*1da177e4SLinus Torvalds 	res = generic_cont_expand( inode, cont_expand_offset);
839*1da177e4SLinus Torvalds     } else
840*1da177e4SLinus Torvalds 	pathrelse(&path);
841*1da177e4SLinus Torvalds 
842*1da177e4SLinus Torvalds     reiserfs_write_unlock(inode->i_sb);
843*1da177e4SLinus Torvalds     return res;
844*1da177e4SLinus Torvalds }
845*1da177e4SLinus Torvalds 
846*1da177e4SLinus Torvalds /* This function locks pages starting from @pos for @inode.
847*1da177e4SLinus Torvalds    @num_pages pages are locked and stored in
848*1da177e4SLinus Torvalds    @prepared_pages array. Also buffers are allocated for these pages.
849*1da177e4SLinus Torvalds    First and last page of the region is read if it is overwritten only
850*1da177e4SLinus Torvalds    partially. If last page did not exist before write (file hole or file
851*1da177e4SLinus Torvalds    append), it is zeroed, then.
852*1da177e4SLinus Torvalds    Returns number of unallocated blocks that should be allocated to cover
853*1da177e4SLinus Torvalds    new file data.*/
854*1da177e4SLinus Torvalds static int reiserfs_prepare_file_region_for_write(
855*1da177e4SLinus Torvalds 				struct inode *inode /* Inode of the file */,
856*1da177e4SLinus Torvalds 				loff_t pos, /* position in the file */
857*1da177e4SLinus Torvalds 				size_t num_pages, /* number of pages to
858*1da177e4SLinus Torvalds 					          prepare */
859*1da177e4SLinus Torvalds 				size_t write_bytes, /* Amount of bytes to be
860*1da177e4SLinus Torvalds 						    overwritten from
861*1da177e4SLinus Torvalds 						    @pos */
862*1da177e4SLinus Torvalds 				struct page **prepared_pages /* pointer to array
863*1da177e4SLinus Torvalds 							       where to store
864*1da177e4SLinus Torvalds 							       prepared pages */
865*1da177e4SLinus Torvalds 					   )
866*1da177e4SLinus Torvalds {
867*1da177e4SLinus Torvalds     int res=0; // Return values of different functions we call.
868*1da177e4SLinus Torvalds     unsigned long index = pos >> PAGE_CACHE_SHIFT; // Offset in file in pages.
869*1da177e4SLinus Torvalds     int from = (pos & (PAGE_CACHE_SIZE - 1)); // Writing offset in first page
870*1da177e4SLinus Torvalds     int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1;
871*1da177e4SLinus Torvalds 					 /* offset of last modified byte in last
872*1da177e4SLinus Torvalds 				            page */
873*1da177e4SLinus Torvalds     struct address_space *mapping = inode->i_mapping; // Pages are mapped here.
874*1da177e4SLinus Torvalds     int i; // Simple counter
875*1da177e4SLinus Torvalds     int blocks = 0; /* Return value (blocks that should be allocated) */
876*1da177e4SLinus Torvalds     struct buffer_head *bh, *head; // Current bufferhead and first bufferhead
877*1da177e4SLinus Torvalds 				   // of a page.
878*1da177e4SLinus Torvalds     unsigned block_start, block_end; // Starting and ending offsets of current
879*1da177e4SLinus Torvalds 				     // buffer in the page.
880*1da177e4SLinus Torvalds     struct buffer_head *wait[2], **wait_bh=wait; // Buffers for page, if
881*1da177e4SLinus Torvalds 						 // Page appeared to be not up
882*1da177e4SLinus Torvalds 						 // to date. Note how we have
883*1da177e4SLinus Torvalds 						 // at most 2 buffers, this is
884*1da177e4SLinus Torvalds 						 // because we at most may
885*1da177e4SLinus Torvalds 						 // partially overwrite two
886*1da177e4SLinus Torvalds 						 // buffers for one page. One at                                                 // the beginning of write area
887*1da177e4SLinus Torvalds 						 // and one at the end.
888*1da177e4SLinus Torvalds 						 // Everything inthe middle gets                                                 // overwritten totally.
889*1da177e4SLinus Torvalds 
890*1da177e4SLinus Torvalds     struct cpu_key key; // cpu key of item that we are going to deal with
891*1da177e4SLinus Torvalds     struct item_head *ih = NULL; // pointer to item head that we are going to deal with
892*1da177e4SLinus Torvalds     struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with
893*1da177e4SLinus Torvalds     INITIALIZE_PATH(path); // path to item, that we are going to deal with.
894*1da177e4SLinus Torvalds     __u32 * item=NULL; // pointer to item we are going to deal with
895*1da177e4SLinus Torvalds     int item_pos=-1; /* Position in indirect item */
896*1da177e4SLinus Torvalds 
897*1da177e4SLinus Torvalds 
898*1da177e4SLinus Torvalds     if ( num_pages < 1 ) {
899*1da177e4SLinus Torvalds 	reiserfs_warning (inode->i_sb,
900*1da177e4SLinus Torvalds 			  "green-9001: reiserfs_prepare_file_region_for_write "
901*1da177e4SLinus Torvalds 			  "called with zero number of pages to process");
902*1da177e4SLinus Torvalds 	return -EFAULT;
903*1da177e4SLinus Torvalds     }
904*1da177e4SLinus Torvalds 
905*1da177e4SLinus Torvalds     /* We have 2 loops for pages. In first loop we grab and lock the pages, so
906*1da177e4SLinus Torvalds        that nobody would touch these until we release the pages. Then
907*1da177e4SLinus Torvalds        we'd start to deal with mapping buffers to blocks. */
908*1da177e4SLinus Torvalds     for ( i = 0; i < num_pages; i++) {
909*1da177e4SLinus Torvalds 	prepared_pages[i] = grab_cache_page(mapping, index + i); // locks the page
910*1da177e4SLinus Torvalds 	if ( !prepared_pages[i]) {
911*1da177e4SLinus Torvalds 	    res = -ENOMEM;
912*1da177e4SLinus Torvalds 	    goto failed_page_grabbing;
913*1da177e4SLinus Torvalds 	}
914*1da177e4SLinus Torvalds 	if (!page_has_buffers(prepared_pages[i]))
915*1da177e4SLinus Torvalds 	    create_empty_buffers(prepared_pages[i], inode->i_sb->s_blocksize, 0);
916*1da177e4SLinus Torvalds     }
917*1da177e4SLinus Torvalds 
918*1da177e4SLinus Torvalds     /* Let's count amount of blocks for a case where all the blocks
919*1da177e4SLinus Torvalds        overwritten are new (we will substract already allocated blocks later)*/
920*1da177e4SLinus Torvalds     if ( num_pages > 2 )
921*1da177e4SLinus Torvalds 	/* These are full-overwritten pages so we count all the blocks in
922*1da177e4SLinus Torvalds 	   these pages are counted as needed to be allocated */
923*1da177e4SLinus Torvalds 	blocks = (num_pages - 2) << (PAGE_CACHE_SHIFT - inode->i_blkbits);
924*1da177e4SLinus Torvalds 
925*1da177e4SLinus Torvalds     /* count blocks needed for first page (possibly partially written) */
926*1da177e4SLinus Torvalds     blocks += ((PAGE_CACHE_SIZE - from) >> inode->i_blkbits) +
927*1da177e4SLinus Torvalds 	   !!(from & (inode->i_sb->s_blocksize-1)); /* roundup */
928*1da177e4SLinus Torvalds 
929*1da177e4SLinus Torvalds     /* Now we account for last page. If last page == first page (we
930*1da177e4SLinus Torvalds        overwrite only one page), we substract all the blocks past the
931*1da177e4SLinus Torvalds        last writing position in a page out of already calculated number
932*1da177e4SLinus Torvalds        of blocks */
933*1da177e4SLinus Torvalds     blocks += ((num_pages > 1) << (PAGE_CACHE_SHIFT-inode->i_blkbits)) -
934*1da177e4SLinus Torvalds 	   ((PAGE_CACHE_SIZE - to) >> inode->i_blkbits);
935*1da177e4SLinus Torvalds 	   /* Note how we do not roundup here since partial blocks still
936*1da177e4SLinus Torvalds 		   should be allocated */
937*1da177e4SLinus Torvalds 
938*1da177e4SLinus Torvalds     /* Now if all the write area lies past the file end, no point in
939*1da177e4SLinus Torvalds        maping blocks, since there is none, so we just zero out remaining
940*1da177e4SLinus Torvalds        parts of first and last pages in write area (if needed) */
941*1da177e4SLinus Torvalds     if ( (pos & ~((loff_t)PAGE_CACHE_SIZE - 1)) > inode->i_size ) {
942*1da177e4SLinus Torvalds 	if ( from != 0 ) {/* First page needs to be partially zeroed */
943*1da177e4SLinus Torvalds 	    char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
944*1da177e4SLinus Torvalds 	    memset(kaddr, 0, from);
945*1da177e4SLinus Torvalds 	    kunmap_atomic( kaddr, KM_USER0);
946*1da177e4SLinus Torvalds 	}
947*1da177e4SLinus Torvalds 	if ( to != PAGE_CACHE_SIZE ) { /* Last page needs to be partially zeroed */
948*1da177e4SLinus Torvalds 	    char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
949*1da177e4SLinus Torvalds 	    memset(kaddr+to, 0, PAGE_CACHE_SIZE - to);
950*1da177e4SLinus Torvalds 	    kunmap_atomic( kaddr, KM_USER0);
951*1da177e4SLinus Torvalds 	}
952*1da177e4SLinus Torvalds 
953*1da177e4SLinus Torvalds 	/* Since all blocks are new - use already calculated value */
954*1da177e4SLinus Torvalds 	return blocks;
955*1da177e4SLinus Torvalds     }
956*1da177e4SLinus Torvalds 
957*1da177e4SLinus Torvalds     /* Well, since we write somewhere into the middle of a file, there is
958*1da177e4SLinus Torvalds        possibility we are writing over some already allocated blocks, so
959*1da177e4SLinus Torvalds        let's map these blocks and substract number of such blocks out of blocks
960*1da177e4SLinus Torvalds        we need to allocate (calculated above) */
961*1da177e4SLinus Torvalds     /* Mask write position to start on blocksize, we do it out of the
962*1da177e4SLinus Torvalds        loop for performance reasons */
963*1da177e4SLinus Torvalds     pos &= ~((loff_t) inode->i_sb->s_blocksize - 1);
964*1da177e4SLinus Torvalds     /* Set cpu key to the starting position in a file (on left block boundary)*/
965*1da177e4SLinus Torvalds     make_cpu_key (&key, inode, 1 + ((pos) & ~((loff_t) inode->i_sb->s_blocksize - 1)), TYPE_ANY, 3/*key length*/);
966*1da177e4SLinus Torvalds 
967*1da177e4SLinus Torvalds     reiserfs_write_lock(inode->i_sb); // We need that for at least search_by_key()
968*1da177e4SLinus Torvalds     for ( i = 0; i < num_pages ; i++ ) {
969*1da177e4SLinus Torvalds 
970*1da177e4SLinus Torvalds 	head = page_buffers(prepared_pages[i]);
971*1da177e4SLinus Torvalds 	/* For each buffer in the page */
972*1da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
973*1da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
974*1da177e4SLinus Torvalds 		if (!bh)
975*1da177e4SLinus Torvalds 		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
976*1da177e4SLinus Torvalds 		/* Find where this buffer ends */
977*1da177e4SLinus Torvalds 		block_end = block_start+inode->i_sb->s_blocksize;
978*1da177e4SLinus Torvalds 		if (i == 0 && block_end <= from )
979*1da177e4SLinus Torvalds 		    /* if this buffer is before requested data to map, skip it*/
980*1da177e4SLinus Torvalds 		    continue;
981*1da177e4SLinus Torvalds 
982*1da177e4SLinus Torvalds 		if (i == num_pages - 1 && block_start >= to) {
983*1da177e4SLinus Torvalds 		    /* If this buffer is after requested data to map, abort
984*1da177e4SLinus Torvalds 		       processing of current page */
985*1da177e4SLinus Torvalds 		    break;
986*1da177e4SLinus Torvalds 		}
987*1da177e4SLinus Torvalds 
988*1da177e4SLinus Torvalds 		if ( buffer_mapped(bh) && bh->b_blocknr !=0 ) {
989*1da177e4SLinus Torvalds 		    /* This is optimisation for a case where buffer is mapped
990*1da177e4SLinus Torvalds 		       and have blocknumber assigned. In case significant amount
991*1da177e4SLinus Torvalds 		       of such buffers are present, we may avoid some amount
992*1da177e4SLinus Torvalds 		       of search_by_key calls.
993*1da177e4SLinus Torvalds 		       Probably it would be possible to move parts of this code
994*1da177e4SLinus Torvalds 		       out of BKL, but I afraid that would overcomplicate code
995*1da177e4SLinus Torvalds 		       without any noticeable benefit.
996*1da177e4SLinus Torvalds 		    */
997*1da177e4SLinus Torvalds 		    item_pos++;
998*1da177e4SLinus Torvalds 		    /* Update the key */
999*1da177e4SLinus Torvalds 		    set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
1000*1da177e4SLinus Torvalds 		    blocks--; // Decrease the amount of blocks that need to be
1001*1da177e4SLinus Torvalds 			      // allocated
1002*1da177e4SLinus Torvalds 		    continue; // Go to the next buffer
1003*1da177e4SLinus Torvalds 		}
1004*1da177e4SLinus Torvalds 
1005*1da177e4SLinus Torvalds 		if ( !itembuf || /* if first iteration */
1006*1da177e4SLinus Torvalds 		     item_pos >= ih_item_len(ih)/UNFM_P_SIZE)
1007*1da177e4SLinus Torvalds 					     { /* or if we progressed past the
1008*1da177e4SLinus Torvalds 						  current unformatted_item */
1009*1da177e4SLinus Torvalds 			/* Try to find next item */
1010*1da177e4SLinus Torvalds 			res = search_for_position_by_key(inode->i_sb, &key, &path);
1011*1da177e4SLinus Torvalds 			/* Abort if no more items */
1012*1da177e4SLinus Torvalds 			if ( res != POSITION_FOUND ) {
1013*1da177e4SLinus Torvalds 			    /* make sure later loops don't use this item */
1014*1da177e4SLinus Torvalds 			    itembuf = NULL;
1015*1da177e4SLinus Torvalds 			    item = NULL;
1016*1da177e4SLinus Torvalds 			    break;
1017*1da177e4SLinus Torvalds 			}
1018*1da177e4SLinus Torvalds 
1019*1da177e4SLinus Torvalds 			/* Update information about current indirect item */
1020*1da177e4SLinus Torvalds 			itembuf = get_last_bh( &path );
1021*1da177e4SLinus Torvalds 			ih = get_ih( &path );
1022*1da177e4SLinus Torvalds 			item = get_item( &path );
1023*1da177e4SLinus Torvalds 			item_pos = path.pos_in_item;
1024*1da177e4SLinus Torvalds 
1025*1da177e4SLinus Torvalds 			RFALSE( !is_indirect_le_ih (ih), "green-9003: indirect item expected");
1026*1da177e4SLinus Torvalds 		}
1027*1da177e4SLinus Torvalds 
1028*1da177e4SLinus Torvalds 		/* See if there is some block associated with the file
1029*1da177e4SLinus Torvalds 		   at that position, map the buffer to this block */
1030*1da177e4SLinus Torvalds 		if ( get_block_num(item,item_pos) ) {
1031*1da177e4SLinus Torvalds 		    map_bh(bh, inode->i_sb, get_block_num(item,item_pos));
1032*1da177e4SLinus Torvalds 		    blocks--; // Decrease the amount of blocks that need to be
1033*1da177e4SLinus Torvalds 			      // allocated
1034*1da177e4SLinus Torvalds 		}
1035*1da177e4SLinus Torvalds 		item_pos++;
1036*1da177e4SLinus Torvalds 		/* Update the key */
1037*1da177e4SLinus Torvalds 		set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
1038*1da177e4SLinus Torvalds 	}
1039*1da177e4SLinus Torvalds     }
1040*1da177e4SLinus Torvalds     pathrelse(&path); // Free the path
1041*1da177e4SLinus Torvalds     reiserfs_write_unlock(inode->i_sb);
1042*1da177e4SLinus Torvalds 
1043*1da177e4SLinus Torvalds 	/* Now zero out unmappend buffers for the first and last pages of
1044*1da177e4SLinus Torvalds 	   write area or issue read requests if page is mapped. */
1045*1da177e4SLinus Torvalds 	/* First page, see if it is not uptodate */
1046*1da177e4SLinus Torvalds 	if ( !PageUptodate(prepared_pages[0]) ) {
1047*1da177e4SLinus Torvalds 	    head = page_buffers(prepared_pages[0]);
1048*1da177e4SLinus Torvalds 
1049*1da177e4SLinus Torvalds 	    /* For each buffer in page */
1050*1da177e4SLinus Torvalds 	    for(bh = head, block_start = 0; bh != head || !block_start;
1051*1da177e4SLinus Torvalds 		block_start=block_end, bh = bh->b_this_page) {
1052*1da177e4SLinus Torvalds 
1053*1da177e4SLinus Torvalds 		if (!bh)
1054*1da177e4SLinus Torvalds 		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
1055*1da177e4SLinus Torvalds 		/* Find where this buffer ends */
1056*1da177e4SLinus Torvalds 		block_end = block_start+inode->i_sb->s_blocksize;
1057*1da177e4SLinus Torvalds 		if ( block_end <= from )
1058*1da177e4SLinus Torvalds 		    /* if this buffer is before requested data to map, skip it*/
1059*1da177e4SLinus Torvalds 		    continue;
1060*1da177e4SLinus Torvalds 		if ( block_start < from ) { /* Aha, our partial buffer */
1061*1da177e4SLinus Torvalds 		    if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
1062*1da177e4SLinus Torvalds 						  issue READ request for it to
1063*1da177e4SLinus Torvalds 						  not loose data */
1064*1da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
1065*1da177e4SLinus Torvalds 			*wait_bh++=bh;
1066*1da177e4SLinus Torvalds 		    } else { /* Not mapped, zero it */
1067*1da177e4SLinus Torvalds 			char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
1068*1da177e4SLinus Torvalds 			memset(kaddr+block_start, 0, from-block_start);
1069*1da177e4SLinus Torvalds 			kunmap_atomic( kaddr, KM_USER0);
1070*1da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
1071*1da177e4SLinus Torvalds 		    }
1072*1da177e4SLinus Torvalds 		}
1073*1da177e4SLinus Torvalds 	    }
1074*1da177e4SLinus Torvalds 	}
1075*1da177e4SLinus Torvalds 
1076*1da177e4SLinus Torvalds 	/* Last page, see if it is not uptodate, or if the last page is past the end of the file. */
1077*1da177e4SLinus Torvalds 	if ( !PageUptodate(prepared_pages[num_pages-1]) ||
1078*1da177e4SLinus Torvalds 	    ((pos+write_bytes)>>PAGE_CACHE_SHIFT) > (inode->i_size>>PAGE_CACHE_SHIFT) ) {
1079*1da177e4SLinus Torvalds 	    head = page_buffers(prepared_pages[num_pages-1]);
1080*1da177e4SLinus Torvalds 
1081*1da177e4SLinus Torvalds 	    /* for each buffer in page */
1082*1da177e4SLinus Torvalds 	    for(bh = head, block_start = 0; bh != head || !block_start;
1083*1da177e4SLinus Torvalds 		block_start=block_end, bh = bh->b_this_page) {
1084*1da177e4SLinus Torvalds 
1085*1da177e4SLinus Torvalds 		if (!bh)
1086*1da177e4SLinus Torvalds 		    reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
1087*1da177e4SLinus Torvalds 		/* Find where this buffer ends */
1088*1da177e4SLinus Torvalds 		block_end = block_start+inode->i_sb->s_blocksize;
1089*1da177e4SLinus Torvalds 		if ( block_start >= to )
1090*1da177e4SLinus Torvalds 		    /* if this buffer is after requested data to map, skip it*/
1091*1da177e4SLinus Torvalds 		    break;
1092*1da177e4SLinus Torvalds 		if ( block_end > to ) { /* Aha, our partial buffer */
1093*1da177e4SLinus Torvalds 		    if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
1094*1da177e4SLinus Torvalds 						  issue READ request for it to
1095*1da177e4SLinus Torvalds 						  not loose data */
1096*1da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
1097*1da177e4SLinus Torvalds 			*wait_bh++=bh;
1098*1da177e4SLinus Torvalds 		    } else { /* Not mapped, zero it */
1099*1da177e4SLinus Torvalds 			char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
1100*1da177e4SLinus Torvalds 			memset(kaddr+to, 0, block_end-to);
1101*1da177e4SLinus Torvalds 			kunmap_atomic( kaddr, KM_USER0);
1102*1da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
1103*1da177e4SLinus Torvalds 		    }
1104*1da177e4SLinus Torvalds 		}
1105*1da177e4SLinus Torvalds 	    }
1106*1da177e4SLinus Torvalds 	}
1107*1da177e4SLinus Torvalds 
1108*1da177e4SLinus Torvalds     /* Wait for read requests we made to happen, if necessary */
1109*1da177e4SLinus Torvalds     while(wait_bh > wait) {
1110*1da177e4SLinus Torvalds 	wait_on_buffer(*--wait_bh);
1111*1da177e4SLinus Torvalds 	if (!buffer_uptodate(*wait_bh)) {
1112*1da177e4SLinus Torvalds 	    res = -EIO;
1113*1da177e4SLinus Torvalds 	    goto failed_read;
1114*1da177e4SLinus Torvalds 	}
1115*1da177e4SLinus Torvalds     }
1116*1da177e4SLinus Torvalds 
1117*1da177e4SLinus Torvalds     return blocks;
1118*1da177e4SLinus Torvalds failed_page_grabbing:
1119*1da177e4SLinus Torvalds     num_pages = i;
1120*1da177e4SLinus Torvalds failed_read:
1121*1da177e4SLinus Torvalds     reiserfs_unprepare_pages(prepared_pages, num_pages);
1122*1da177e4SLinus Torvalds     return res;
1123*1da177e4SLinus Torvalds }
1124*1da177e4SLinus Torvalds 
1125*1da177e4SLinus Torvalds /* Write @count bytes at position @ppos in a file indicated by @file
1126*1da177e4SLinus Torvalds    from the buffer @buf.
1127*1da177e4SLinus Torvalds 
1128*1da177e4SLinus Torvalds    generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
1129*1da177e4SLinus Torvalds    something simple that works.  It is not for serious use by general purpose filesystems, excepting the one that it was
1130*1da177e4SLinus Torvalds    written for (ext2/3).  This is for several reasons:
1131*1da177e4SLinus Torvalds 
1132*1da177e4SLinus Torvalds    * It has no understanding of any filesystem specific optimizations.
1133*1da177e4SLinus Torvalds 
1134*1da177e4SLinus Torvalds    * It enters the filesystem repeatedly for each page that is written.
1135*1da177e4SLinus Torvalds 
1136*1da177e4SLinus Torvalds    * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
1137*1da177e4SLinus Torvalds    * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
1138*1da177e4SLinus Torvalds    * to reiserfs which allows for fewer tree traversals.
1139*1da177e4SLinus Torvalds 
1140*1da177e4SLinus Torvalds    * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
1141*1da177e4SLinus Torvalds 
1142*1da177e4SLinus Torvalds    * Asking the block allocation code for blocks one at a time is slightly less efficient.
1143*1da177e4SLinus Torvalds 
1144*1da177e4SLinus Torvalds    All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
1145*1da177e4SLinus Torvalds    use it, but we were in a hurry to make code freeze, and so it couldn't be revised then.  This new code should make
1146*1da177e4SLinus Torvalds    things right finally.
1147*1da177e4SLinus Torvalds 
1148*1da177e4SLinus Torvalds    Future Features: providing search_by_key with hints.
1149*1da177e4SLinus Torvalds 
1150*1da177e4SLinus Torvalds */
1151*1da177e4SLinus Torvalds static ssize_t reiserfs_file_write( struct file *file, /* the file we are going to write into */
1152*1da177e4SLinus Torvalds                              const char __user *buf, /*  pointer to user supplied data
1153*1da177e4SLinus Torvalds (in userspace) */
1154*1da177e4SLinus Torvalds                              size_t count, /* amount of bytes to write */
1155*1da177e4SLinus Torvalds                              loff_t *ppos /* pointer to position in file that we start writing at. Should be updated to
1156*1da177e4SLinus Torvalds                                            * new current position before returning. */ )
1157*1da177e4SLinus Torvalds {
1158*1da177e4SLinus Torvalds     size_t already_written = 0; // Number of bytes already written to the file.
1159*1da177e4SLinus Torvalds     loff_t pos; // Current position in the file.
1160*1da177e4SLinus Torvalds     ssize_t res; // return value of various functions that we call.
1161*1da177e4SLinus Torvalds     int err = 0;
1162*1da177e4SLinus Torvalds     struct inode *inode = file->f_dentry->d_inode; // Inode of the file that we are writing to.
1163*1da177e4SLinus Torvalds 				/* To simplify coding at this time, we store
1164*1da177e4SLinus Torvalds 				   locked pages in array for now */
1165*1da177e4SLinus Torvalds     struct page * prepared_pages[REISERFS_WRITE_PAGES_AT_A_TIME];
1166*1da177e4SLinus Torvalds     struct reiserfs_transaction_handle th;
1167*1da177e4SLinus Torvalds     th.t_trans_id = 0;
1168*1da177e4SLinus Torvalds 
1169*1da177e4SLinus Torvalds     if ( file->f_flags & O_DIRECT) { // Direct IO needs treatment
1170*1da177e4SLinus Torvalds 	ssize_t result, after_file_end = 0;
1171*1da177e4SLinus Torvalds 	if ( (*ppos + count >= inode->i_size) || (file->f_flags & O_APPEND) ) {
1172*1da177e4SLinus Torvalds 	    /* If we are appending a file, we need to put this savelink in here.
1173*1da177e4SLinus Torvalds 	       If we will crash while doing direct io, finish_unfinished will
1174*1da177e4SLinus Torvalds 	       cut the garbage from the file end. */
1175*1da177e4SLinus Torvalds 	    reiserfs_write_lock(inode->i_sb);
1176*1da177e4SLinus Torvalds 	    err = journal_begin(&th, inode->i_sb,  JOURNAL_PER_BALANCE_CNT );
1177*1da177e4SLinus Torvalds             if (err) {
1178*1da177e4SLinus Torvalds 		reiserfs_write_unlock (inode->i_sb);
1179*1da177e4SLinus Torvalds 		return err;
1180*1da177e4SLinus Torvalds 	    }
1181*1da177e4SLinus Torvalds 	    reiserfs_update_inode_transaction(inode);
1182*1da177e4SLinus Torvalds 	    add_save_link (&th, inode, 1 /* Truncate */);
1183*1da177e4SLinus Torvalds 	    after_file_end = 1;
1184*1da177e4SLinus Torvalds 	    err = journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT );
1185*1da177e4SLinus Torvalds             reiserfs_write_unlock(inode->i_sb);
1186*1da177e4SLinus Torvalds 	    if (err)
1187*1da177e4SLinus Torvalds 		return err;
1188*1da177e4SLinus Torvalds 	}
1189*1da177e4SLinus Torvalds 	result = generic_file_write(file, buf, count, ppos);
1190*1da177e4SLinus Torvalds 
1191*1da177e4SLinus Torvalds 	if ( after_file_end ) { /* Now update i_size and remove the savelink */
1192*1da177e4SLinus Torvalds 	    struct reiserfs_transaction_handle th;
1193*1da177e4SLinus Torvalds 	    reiserfs_write_lock(inode->i_sb);
1194*1da177e4SLinus Torvalds 	    err = journal_begin(&th, inode->i_sb, 1);
1195*1da177e4SLinus Torvalds             if (err) {
1196*1da177e4SLinus Torvalds                 reiserfs_write_unlock (inode->i_sb);
1197*1da177e4SLinus Torvalds                 return err;
1198*1da177e4SLinus Torvalds             }
1199*1da177e4SLinus Torvalds 	    reiserfs_update_inode_transaction(inode);
1200*1da177e4SLinus Torvalds 	    reiserfs_update_sd(&th, inode);
1201*1da177e4SLinus Torvalds 	    err = journal_end(&th, inode->i_sb, 1);
1202*1da177e4SLinus Torvalds             if (err) {
1203*1da177e4SLinus Torvalds                 reiserfs_write_unlock (inode->i_sb);
1204*1da177e4SLinus Torvalds                 return err;
1205*1da177e4SLinus Torvalds             }
1206*1da177e4SLinus Torvalds 	    err = remove_save_link (inode, 1/* truncate */);
1207*1da177e4SLinus Torvalds 	    reiserfs_write_unlock(inode->i_sb);
1208*1da177e4SLinus Torvalds             if (err)
1209*1da177e4SLinus Torvalds                 return err;
1210*1da177e4SLinus Torvalds 	}
1211*1da177e4SLinus Torvalds 
1212*1da177e4SLinus Torvalds 	return result;
1213*1da177e4SLinus Torvalds     }
1214*1da177e4SLinus Torvalds 
1215*1da177e4SLinus Torvalds     if ( unlikely((ssize_t) count < 0 ))
1216*1da177e4SLinus Torvalds         return -EINVAL;
1217*1da177e4SLinus Torvalds 
1218*1da177e4SLinus Torvalds     if (unlikely(!access_ok(VERIFY_READ, buf, count)))
1219*1da177e4SLinus Torvalds         return -EFAULT;
1220*1da177e4SLinus Torvalds 
1221*1da177e4SLinus Torvalds     down(&inode->i_sem); // locks the entire file for just us
1222*1da177e4SLinus Torvalds 
1223*1da177e4SLinus Torvalds     pos = *ppos;
1224*1da177e4SLinus Torvalds 
1225*1da177e4SLinus Torvalds     /* Check if we can write to specified region of file, file
1226*1da177e4SLinus Torvalds        is not overly big and this kind of stuff. Adjust pos and
1227*1da177e4SLinus Torvalds        count, if needed */
1228*1da177e4SLinus Torvalds     res = generic_write_checks(file, &pos, &count, 0);
1229*1da177e4SLinus Torvalds     if (res)
1230*1da177e4SLinus Torvalds 	goto out;
1231*1da177e4SLinus Torvalds 
1232*1da177e4SLinus Torvalds     if ( count == 0 )
1233*1da177e4SLinus Torvalds 	goto out;
1234*1da177e4SLinus Torvalds 
1235*1da177e4SLinus Torvalds     res = remove_suid(file->f_dentry);
1236*1da177e4SLinus Torvalds     if (res)
1237*1da177e4SLinus Torvalds 	goto out;
1238*1da177e4SLinus Torvalds 
1239*1da177e4SLinus Torvalds     inode_update_time(inode, 1); /* Both mtime and ctime */
1240*1da177e4SLinus Torvalds 
1241*1da177e4SLinus Torvalds     // Ok, we are done with all the checks.
1242*1da177e4SLinus Torvalds 
1243*1da177e4SLinus Torvalds     // Now we should start real work
1244*1da177e4SLinus Torvalds 
1245*1da177e4SLinus Torvalds     /* If we are going to write past the file's packed tail or if we are going
1246*1da177e4SLinus Torvalds        to overwrite part of the tail, we need that tail to be converted into
1247*1da177e4SLinus Torvalds        unformatted node */
1248*1da177e4SLinus Torvalds     res = reiserfs_check_for_tail_and_convert( inode, pos, count);
1249*1da177e4SLinus Torvalds     if (res)
1250*1da177e4SLinus Torvalds 	goto out;
1251*1da177e4SLinus Torvalds 
1252*1da177e4SLinus Torvalds     while ( count > 0) {
1253*1da177e4SLinus Torvalds 	/* This is the main loop in which we running until some error occures
1254*1da177e4SLinus Torvalds 	   or until we write all of the data. */
1255*1da177e4SLinus Torvalds 	size_t num_pages;/* amount of pages we are going to write this iteration */
1256*1da177e4SLinus Torvalds 	size_t write_bytes; /* amount of bytes to write during this iteration */
1257*1da177e4SLinus Torvalds 	size_t blocks_to_allocate; /* how much blocks we need to allocate for this iteration */
1258*1da177e4SLinus Torvalds 
1259*1da177e4SLinus Torvalds         /*  (pos & (PAGE_CACHE_SIZE-1)) is an idiom for offset into a page of pos*/
1260*1da177e4SLinus Torvalds 	num_pages = !!((pos+count) & (PAGE_CACHE_SIZE - 1)) + /* round up partial
1261*1da177e4SLinus Torvalds 							  pages */
1262*1da177e4SLinus Torvalds 		    ((count + (pos & (PAGE_CACHE_SIZE-1))) >> PAGE_CACHE_SHIFT);
1263*1da177e4SLinus Torvalds 						/* convert size to amount of
1264*1da177e4SLinus Torvalds 						   pages */
1265*1da177e4SLinus Torvalds 	reiserfs_write_lock(inode->i_sb);
1266*1da177e4SLinus Torvalds 	if ( num_pages > REISERFS_WRITE_PAGES_AT_A_TIME
1267*1da177e4SLinus Torvalds 		|| num_pages > reiserfs_can_fit_pages(inode->i_sb) ) {
1268*1da177e4SLinus Torvalds 	    /* If we were asked to write more data than we want to or if there
1269*1da177e4SLinus Torvalds 	       is not that much space, then we shorten amount of data to write
1270*1da177e4SLinus Torvalds 	       for this iteration. */
1271*1da177e4SLinus Torvalds 	    num_pages = min_t(size_t, REISERFS_WRITE_PAGES_AT_A_TIME, reiserfs_can_fit_pages(inode->i_sb));
1272*1da177e4SLinus Torvalds 	    /* Also we should not forget to set size in bytes accordingly */
1273*1da177e4SLinus Torvalds 	    write_bytes = (num_pages << PAGE_CACHE_SHIFT) -
1274*1da177e4SLinus Torvalds 			    (pos & (PAGE_CACHE_SIZE-1));
1275*1da177e4SLinus Torvalds 					 /* If position is not on the
1276*1da177e4SLinus Torvalds 					    start of the page, we need
1277*1da177e4SLinus Torvalds 					    to substract the offset
1278*1da177e4SLinus Torvalds 					    within page */
1279*1da177e4SLinus Torvalds 	} else
1280*1da177e4SLinus Torvalds 	    write_bytes = count;
1281*1da177e4SLinus Torvalds 
1282*1da177e4SLinus Torvalds 	/* reserve the blocks to be allocated later, so that later on
1283*1da177e4SLinus Torvalds 	   we still have the space to write the blocks to */
1284*1da177e4SLinus Torvalds 	reiserfs_claim_blocks_to_be_allocated(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1285*1da177e4SLinus Torvalds 	reiserfs_write_unlock(inode->i_sb);
1286*1da177e4SLinus Torvalds 
1287*1da177e4SLinus Torvalds 	if ( !num_pages ) { /* If we do not have enough space even for */
1288*1da177e4SLinus Torvalds 	    res = -ENOSPC;  /* single page, return -ENOSPC */
1289*1da177e4SLinus Torvalds 	    if ( pos > (inode->i_size & (inode->i_sb->s_blocksize-1)))
1290*1da177e4SLinus Torvalds 		break; // In case we are writing past the file end, break.
1291*1da177e4SLinus Torvalds 	    // Otherwise we are possibly overwriting the file, so
1292*1da177e4SLinus Torvalds 	    // let's set write size to be equal or less than blocksize.
1293*1da177e4SLinus Torvalds 	    // This way we get it correctly for file holes.
1294*1da177e4SLinus Torvalds 	    // But overwriting files on absolutelly full volumes would not
1295*1da177e4SLinus Torvalds 	    // be very efficient. Well, people are not supposed to fill
1296*1da177e4SLinus Torvalds 	    // 100% of disk space anyway.
1297*1da177e4SLinus Torvalds 	    write_bytes = min_t(size_t, count, inode->i_sb->s_blocksize - (pos & (inode->i_sb->s_blocksize - 1)));
1298*1da177e4SLinus Torvalds 	    num_pages = 1;
1299*1da177e4SLinus Torvalds 	    // No blocks were claimed before, so do it now.
1300*1da177e4SLinus Torvalds 	    reiserfs_claim_blocks_to_be_allocated(inode->i_sb, 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1301*1da177e4SLinus Torvalds 	}
1302*1da177e4SLinus Torvalds 
1303*1da177e4SLinus Torvalds 	/* Prepare for writing into the region, read in all the
1304*1da177e4SLinus Torvalds 	   partially overwritten pages, if needed. And lock the pages,
1305*1da177e4SLinus Torvalds 	   so that nobody else can access these until we are done.
1306*1da177e4SLinus Torvalds 	   We get number of actual blocks needed as a result.*/
1307*1da177e4SLinus Torvalds 	blocks_to_allocate = reiserfs_prepare_file_region_for_write(inode, pos, num_pages, write_bytes, prepared_pages);
1308*1da177e4SLinus Torvalds 	if ( blocks_to_allocate < 0 ) {
1309*1da177e4SLinus Torvalds 	    res = blocks_to_allocate;
1310*1da177e4SLinus Torvalds 	    reiserfs_release_claimed_blocks(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1311*1da177e4SLinus Torvalds 	    break;
1312*1da177e4SLinus Torvalds 	}
1313*1da177e4SLinus Torvalds 
1314*1da177e4SLinus Torvalds 	/* First we correct our estimate of how many blocks we need */
1315*1da177e4SLinus Torvalds 	reiserfs_release_claimed_blocks(inode->i_sb, (num_pages << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits)) - blocks_to_allocate );
1316*1da177e4SLinus Torvalds 
1317*1da177e4SLinus Torvalds 	if ( blocks_to_allocate > 0) {/*We only allocate blocks if we need to*/
1318*1da177e4SLinus Torvalds 	    /* Fill in all the possible holes and append the file if needed */
1319*1da177e4SLinus Torvalds 	    res = reiserfs_allocate_blocks_for_region(&th, inode, pos, num_pages, write_bytes, prepared_pages, blocks_to_allocate);
1320*1da177e4SLinus Torvalds 	}
1321*1da177e4SLinus Torvalds 
1322*1da177e4SLinus Torvalds 	/* well, we have allocated the blocks, so it is time to free
1323*1da177e4SLinus Torvalds 	   the reservation we made earlier. */
1324*1da177e4SLinus Torvalds 	reiserfs_release_claimed_blocks(inode->i_sb, blocks_to_allocate);
1325*1da177e4SLinus Torvalds 	if ( res ) {
1326*1da177e4SLinus Torvalds 	    reiserfs_unprepare_pages(prepared_pages, num_pages);
1327*1da177e4SLinus Torvalds 	    break;
1328*1da177e4SLinus Torvalds 	}
1329*1da177e4SLinus Torvalds 
1330*1da177e4SLinus Torvalds /* NOTE that allocating blocks and filling blocks can be done in reverse order
1331*1da177e4SLinus Torvalds    and probably we would do that just to get rid of garbage in files after a
1332*1da177e4SLinus Torvalds    crash */
1333*1da177e4SLinus Torvalds 
1334*1da177e4SLinus Torvalds 	/* Copy data from user-supplied buffer to file's pages */
1335*1da177e4SLinus Torvalds 	res = reiserfs_copy_from_user_to_file_region(pos, num_pages, write_bytes, prepared_pages, buf);
1336*1da177e4SLinus Torvalds 	if ( res ) {
1337*1da177e4SLinus Torvalds 	    reiserfs_unprepare_pages(prepared_pages, num_pages);
1338*1da177e4SLinus Torvalds 	    break;
1339*1da177e4SLinus Torvalds 	}
1340*1da177e4SLinus Torvalds 
1341*1da177e4SLinus Torvalds 	/* Send the pages to disk and unlock them. */
1342*1da177e4SLinus Torvalds 	res = reiserfs_submit_file_region_for_write(&th, inode, pos, num_pages,
1343*1da177e4SLinus Torvalds 	                                            write_bytes,prepared_pages);
1344*1da177e4SLinus Torvalds 	if ( res )
1345*1da177e4SLinus Torvalds 	    break;
1346*1da177e4SLinus Torvalds 
1347*1da177e4SLinus Torvalds 	already_written += write_bytes;
1348*1da177e4SLinus Torvalds 	buf += write_bytes;
1349*1da177e4SLinus Torvalds 	*ppos = pos += write_bytes;
1350*1da177e4SLinus Torvalds 	count -= write_bytes;
1351*1da177e4SLinus Torvalds 	balance_dirty_pages_ratelimited(inode->i_mapping);
1352*1da177e4SLinus Torvalds     }
1353*1da177e4SLinus Torvalds 
1354*1da177e4SLinus Torvalds     /* this is only true on error */
1355*1da177e4SLinus Torvalds     if (th.t_trans_id) {
1356*1da177e4SLinus Torvalds         reiserfs_write_lock(inode->i_sb);
1357*1da177e4SLinus Torvalds         err = journal_end(&th, th.t_super, th.t_blocks_allocated);
1358*1da177e4SLinus Torvalds         reiserfs_write_unlock(inode->i_sb);
1359*1da177e4SLinus Torvalds         if (err) {
1360*1da177e4SLinus Torvalds             res = err;
1361*1da177e4SLinus Torvalds             goto out;
1362*1da177e4SLinus Torvalds         }
1363*1da177e4SLinus Torvalds     }
1364*1da177e4SLinus Torvalds 
1365*1da177e4SLinus Torvalds     if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
1366*1da177e4SLinus Torvalds 	res = generic_osync_inode(inode, file->f_mapping, OSYNC_METADATA|OSYNC_DATA);
1367*1da177e4SLinus Torvalds 
1368*1da177e4SLinus Torvalds     up(&inode->i_sem);
1369*1da177e4SLinus Torvalds     reiserfs_async_progress_wait(inode->i_sb);
1370*1da177e4SLinus Torvalds     return (already_written != 0)?already_written:res;
1371*1da177e4SLinus Torvalds 
1372*1da177e4SLinus Torvalds out:
1373*1da177e4SLinus Torvalds     up(&inode->i_sem); // unlock the file on exit.
1374*1da177e4SLinus Torvalds     return res;
1375*1da177e4SLinus Torvalds }
1376*1da177e4SLinus Torvalds 
1377*1da177e4SLinus Torvalds static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user *buf,
1378*1da177e4SLinus Torvalds 			       size_t count, loff_t pos)
1379*1da177e4SLinus Torvalds {
1380*1da177e4SLinus Torvalds     return generic_file_aio_write(iocb, buf, count, pos);
1381*1da177e4SLinus Torvalds }
1382*1da177e4SLinus Torvalds 
1383*1da177e4SLinus Torvalds 
1384*1da177e4SLinus Torvalds 
1385*1da177e4SLinus Torvalds struct file_operations reiserfs_file_operations = {
1386*1da177e4SLinus Torvalds     .read	= generic_file_read,
1387*1da177e4SLinus Torvalds     .write	= reiserfs_file_write,
1388*1da177e4SLinus Torvalds     .ioctl	= reiserfs_ioctl,
1389*1da177e4SLinus Torvalds     .mmap	= generic_file_mmap,
1390*1da177e4SLinus Torvalds     .release	= reiserfs_file_release,
1391*1da177e4SLinus Torvalds     .fsync	= reiserfs_sync_file,
1392*1da177e4SLinus Torvalds     .sendfile	= generic_file_sendfile,
1393*1da177e4SLinus Torvalds     .aio_read   = generic_file_aio_read,
1394*1da177e4SLinus Torvalds     .aio_write  = reiserfs_aio_write,
1395*1da177e4SLinus Torvalds };
1396*1da177e4SLinus Torvalds 
1397*1da177e4SLinus Torvalds 
1398*1da177e4SLinus Torvalds struct  inode_operations reiserfs_file_inode_operations = {
1399*1da177e4SLinus Torvalds     .truncate	= reiserfs_vfs_truncate_file,
1400*1da177e4SLinus Torvalds     .setattr    = reiserfs_setattr,
1401*1da177e4SLinus Torvalds     .setxattr   = reiserfs_setxattr,
1402*1da177e4SLinus Torvalds     .getxattr   = reiserfs_getxattr,
1403*1da177e4SLinus Torvalds     .listxattr  = reiserfs_listxattr,
1404*1da177e4SLinus Torvalds     .removexattr = reiserfs_removexattr,
1405*1da177e4SLinus Torvalds     .permission = reiserfs_permission,
1406*1da177e4SLinus Torvalds };
1407*1da177e4SLinus Torvalds 
1408*1da177e4SLinus Torvalds 
1409