xref: /openbmc/linux/fs/xfs/xfs_iomap.c (revision f0702555)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_btree.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_bmap.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_error.h"
31 #include "xfs_trans.h"
32 #include "xfs_trans_space.h"
33 #include "xfs_iomap.h"
34 #include "xfs_trace.h"
35 #include "xfs_icache.h"
36 #include "xfs_quota.h"
37 #include "xfs_dquot_item.h"
38 #include "xfs_dquot.h"
39 
40 
41 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
42 						<< mp->m_writeio_log)
43 #define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
44 
45 STATIC int
46 xfs_iomap_eof_align_last_fsb(
47 	xfs_mount_t	*mp,
48 	xfs_inode_t	*ip,
49 	xfs_extlen_t	extsize,
50 	xfs_fileoff_t	*last_fsb)
51 {
52 	xfs_extlen_t	align = 0;
53 	int		eof, error;
54 
55 	if (!XFS_IS_REALTIME_INODE(ip)) {
56 		/*
57 		 * Round up the allocation request to a stripe unit
58 		 * (m_dalign) boundary if the file size is >= stripe unit
59 		 * size, and we are allocating past the allocation eof.
60 		 *
61 		 * If mounted with the "-o swalloc" option the alignment is
62 		 * increased from the strip unit size to the stripe width.
63 		 */
64 		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
65 			align = mp->m_swidth;
66 		else if (mp->m_dalign)
67 			align = mp->m_dalign;
68 
69 		if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
70 			align = 0;
71 	}
72 
73 	/*
74 	 * Always round up the allocation request to an extent boundary
75 	 * (when file on a real-time subvolume or has di_extsize hint).
76 	 */
77 	if (extsize) {
78 		if (align)
79 			align = roundup_64(align, extsize);
80 		else
81 			align = extsize;
82 	}
83 
84 	if (align) {
85 		xfs_fileoff_t	new_last_fsb = roundup_64(*last_fsb, align);
86 		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
87 		if (error)
88 			return error;
89 		if (eof)
90 			*last_fsb = new_last_fsb;
91 	}
92 	return 0;
93 }
94 
95 STATIC int
96 xfs_alert_fsblock_zero(
97 	xfs_inode_t	*ip,
98 	xfs_bmbt_irec_t	*imap)
99 {
100 	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
101 			"Access to block zero in inode %llu "
102 			"start_block: %llx start_off: %llx "
103 			"blkcnt: %llx extent-state: %x",
104 		(unsigned long long)ip->i_ino,
105 		(unsigned long long)imap->br_startblock,
106 		(unsigned long long)imap->br_startoff,
107 		(unsigned long long)imap->br_blockcount,
108 		imap->br_state);
109 	return -EFSCORRUPTED;
110 }
111 
112 int
113 xfs_iomap_write_direct(
114 	xfs_inode_t	*ip,
115 	xfs_off_t	offset,
116 	size_t		count,
117 	xfs_bmbt_irec_t *imap,
118 	int		nmaps)
119 {
120 	xfs_mount_t	*mp = ip->i_mount;
121 	xfs_fileoff_t	offset_fsb;
122 	xfs_fileoff_t	last_fsb;
123 	xfs_filblks_t	count_fsb, resaligned;
124 	xfs_fsblock_t	firstfsb;
125 	xfs_extlen_t	extsz, temp;
126 	int		nimaps;
127 	int		quota_flag;
128 	int		rt;
129 	xfs_trans_t	*tp;
130 	xfs_bmap_free_t free_list;
131 	uint		qblocks, resblks, resrtextents;
132 	int		error;
133 	int		lockmode;
134 	int		bmapi_flags = XFS_BMAPI_PREALLOC;
135 	uint		tflags = 0;
136 
137 	rt = XFS_IS_REALTIME_INODE(ip);
138 	extsz = xfs_get_extsz_hint(ip);
139 	lockmode = XFS_ILOCK_SHARED;	/* locked by caller */
140 
141 	ASSERT(xfs_isilocked(ip, lockmode));
142 
143 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
144 	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
145 	if ((offset + count) > XFS_ISIZE(ip)) {
146 		/*
147 		 * Assert that the in-core extent list is present since this can
148 		 * call xfs_iread_extents() and we only have the ilock shared.
149 		 * This should be safe because the lock was held around a bmapi
150 		 * call in the caller and we only need it to access the in-core
151 		 * list.
152 		 */
153 		ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
154 								XFS_IFEXTENTS);
155 		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
156 		if (error)
157 			goto out_unlock;
158 	} else {
159 		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
160 			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
161 					imap->br_blockcount +
162 					imap->br_startoff);
163 	}
164 	count_fsb = last_fsb - offset_fsb;
165 	ASSERT(count_fsb > 0);
166 
167 	resaligned = count_fsb;
168 	if (unlikely(extsz)) {
169 		if ((temp = do_mod(offset_fsb, extsz)))
170 			resaligned += temp;
171 		if ((temp = do_mod(resaligned, extsz)))
172 			resaligned += extsz - temp;
173 	}
174 
175 	if (unlikely(rt)) {
176 		resrtextents = qblocks = resaligned;
177 		resrtextents /= mp->m_sb.sb_rextsize;
178 		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
179 		quota_flag = XFS_QMOPT_RES_RTBLKS;
180 	} else {
181 		resrtextents = 0;
182 		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
183 		quota_flag = XFS_QMOPT_RES_REGBLKS;
184 	}
185 
186 	/*
187 	 * Drop the shared lock acquired by the caller, attach the dquot if
188 	 * necessary and move on to transaction setup.
189 	 */
190 	xfs_iunlock(ip, lockmode);
191 	error = xfs_qm_dqattach(ip, 0);
192 	if (error)
193 		return error;
194 
195 	/*
196 	 * For DAX, we do not allocate unwritten extents, but instead we zero
197 	 * the block before we commit the transaction.  Ideally we'd like to do
198 	 * this outside the transaction context, but if we commit and then crash
199 	 * we may not have zeroed the blocks and this will be exposed on
200 	 * recovery of the allocation. Hence we must zero before commit.
201 	 *
202 	 * Further, if we are mapping unwritten extents here, we need to zero
203 	 * and convert them to written so that we don't need an unwritten extent
204 	 * callback for DAX. This also means that we need to be able to dip into
205 	 * the reserve block pool for bmbt block allocation if there is no space
206 	 * left but we need to do unwritten extent conversion.
207 	 */
208 	if (IS_DAX(VFS_I(ip))) {
209 		bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
210 		if (ISUNWRITTEN(imap)) {
211 			tflags |= XFS_TRANS_RESERVE;
212 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
213 		}
214 	}
215 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
216 			tflags, &tp);
217 	if (error)
218 		return error;
219 
220 	lockmode = XFS_ILOCK_EXCL;
221 	xfs_ilock(ip, lockmode);
222 
223 	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
224 	if (error)
225 		goto out_trans_cancel;
226 
227 	xfs_trans_ijoin(tp, ip, 0);
228 
229 	/*
230 	 * From this point onwards we overwrite the imap pointer that the
231 	 * caller gave to us.
232 	 */
233 	xfs_bmap_init(&free_list, &firstfsb);
234 	nimaps = 1;
235 	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
236 				bmapi_flags, &firstfsb, resblks, imap,
237 				&nimaps, &free_list);
238 	if (error)
239 		goto out_bmap_cancel;
240 
241 	/*
242 	 * Complete the transaction
243 	 */
244 	error = xfs_bmap_finish(&tp, &free_list, NULL);
245 	if (error)
246 		goto out_bmap_cancel;
247 
248 	error = xfs_trans_commit(tp);
249 	if (error)
250 		goto out_unlock;
251 
252 	/*
253 	 * Copy any maps to caller's array and return any error.
254 	 */
255 	if (nimaps == 0) {
256 		error = -ENOSPC;
257 		goto out_unlock;
258 	}
259 
260 	if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
261 		error = xfs_alert_fsblock_zero(ip, imap);
262 
263 out_unlock:
264 	xfs_iunlock(ip, lockmode);
265 	return error;
266 
267 out_bmap_cancel:
268 	xfs_bmap_cancel(&free_list);
269 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
270 out_trans_cancel:
271 	xfs_trans_cancel(tp);
272 	goto out_unlock;
273 }
274 
275 /*
276  * If the caller is doing a write at the end of the file, then extend the
277  * allocation out to the file system's write iosize.  We clean up any extra
278  * space left over when the file is closed in xfs_inactive().
279  *
280  * If we find we already have delalloc preallocation beyond EOF, don't do more
281  * preallocation as it it not needed.
282  */
283 STATIC int
284 xfs_iomap_eof_want_preallocate(
285 	xfs_mount_t	*mp,
286 	xfs_inode_t	*ip,
287 	xfs_off_t	offset,
288 	size_t		count,
289 	xfs_bmbt_irec_t *imap,
290 	int		nimaps,
291 	int		*prealloc)
292 {
293 	xfs_fileoff_t   start_fsb;
294 	xfs_filblks_t   count_fsb;
295 	int		n, error, imaps;
296 	int		found_delalloc = 0;
297 
298 	*prealloc = 0;
299 	if (offset + count <= XFS_ISIZE(ip))
300 		return 0;
301 
302 	/*
303 	 * If the file is smaller than the minimum prealloc and we are using
304 	 * dynamic preallocation, don't do any preallocation at all as it is
305 	 * likely this is the only write to the file that is going to be done.
306 	 */
307 	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
308 	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
309 		return 0;
310 
311 	/*
312 	 * If there are any real blocks past eof, then don't
313 	 * do any speculative allocation.
314 	 */
315 	start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
316 	count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
317 	while (count_fsb > 0) {
318 		imaps = nimaps;
319 		error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
320 				       0);
321 		if (error)
322 			return error;
323 		for (n = 0; n < imaps; n++) {
324 			if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
325 			    (imap[n].br_startblock != DELAYSTARTBLOCK))
326 				return 0;
327 			start_fsb += imap[n].br_blockcount;
328 			count_fsb -= imap[n].br_blockcount;
329 
330 			if (imap[n].br_startblock == DELAYSTARTBLOCK)
331 				found_delalloc = 1;
332 		}
333 	}
334 	if (!found_delalloc)
335 		*prealloc = 1;
336 	return 0;
337 }
338 
339 /*
340  * Determine the initial size of the preallocation. We are beyond the current
341  * EOF here, but we need to take into account whether this is a sparse write or
342  * an extending write when determining the preallocation size.  Hence we need to
343  * look up the extent that ends at the current write offset and use the result
344  * to determine the preallocation size.
345  *
346  * If the extent is a hole, then preallocation is essentially disabled.
347  * Otherwise we take the size of the preceeding data extent as the basis for the
348  * preallocation size. If the size of the extent is greater than half the
349  * maximum extent length, then use the current offset as the basis. This ensures
350  * that for large files the preallocation size always extends to MAXEXTLEN
351  * rather than falling short due to things like stripe unit/width alignment of
352  * real extents.
353  */
354 STATIC xfs_fsblock_t
355 xfs_iomap_eof_prealloc_initial_size(
356 	struct xfs_mount	*mp,
357 	struct xfs_inode	*ip,
358 	xfs_off_t		offset,
359 	xfs_bmbt_irec_t		*imap,
360 	int			nimaps)
361 {
362 	xfs_fileoff_t   start_fsb;
363 	int		imaps = 1;
364 	int		error;
365 
366 	ASSERT(nimaps >= imaps);
367 
368 	/* if we are using a specific prealloc size, return now */
369 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
370 		return 0;
371 
372 	/* If the file is small, then use the minimum prealloc */
373 	if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
374 		return 0;
375 
376 	/*
377 	 * As we write multiple pages, the offset will always align to the
378 	 * start of a page and hence point to a hole at EOF. i.e. if the size is
379 	 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
380 	 * will return FSB 1. Hence if there are blocks in the file, we want to
381 	 * point to the block prior to the EOF block and not the hole that maps
382 	 * directly at @offset.
383 	 */
384 	start_fsb = XFS_B_TO_FSB(mp, offset);
385 	if (start_fsb)
386 		start_fsb--;
387 	error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
388 	if (error)
389 		return 0;
390 
391 	ASSERT(imaps == 1);
392 	if (imap[0].br_startblock == HOLESTARTBLOCK)
393 		return 0;
394 	if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
395 		return imap[0].br_blockcount << 1;
396 	return XFS_B_TO_FSB(mp, offset);
397 }
398 
399 STATIC bool
400 xfs_quota_need_throttle(
401 	struct xfs_inode *ip,
402 	int type,
403 	xfs_fsblock_t alloc_blocks)
404 {
405 	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
406 
407 	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
408 		return false;
409 
410 	/* no hi watermark, no throttle */
411 	if (!dq->q_prealloc_hi_wmark)
412 		return false;
413 
414 	/* under the lo watermark, no throttle */
415 	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
416 		return false;
417 
418 	return true;
419 }
420 
421 STATIC void
422 xfs_quota_calc_throttle(
423 	struct xfs_inode *ip,
424 	int type,
425 	xfs_fsblock_t *qblocks,
426 	int *qshift,
427 	int64_t	*qfreesp)
428 {
429 	int64_t freesp;
430 	int shift = 0;
431 	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
432 
433 	/* no dq, or over hi wmark, squash the prealloc completely */
434 	if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
435 		*qblocks = 0;
436 		*qfreesp = 0;
437 		return;
438 	}
439 
440 	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
441 	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
442 		shift = 2;
443 		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
444 			shift += 2;
445 		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
446 			shift += 2;
447 	}
448 
449 	if (freesp < *qfreesp)
450 		*qfreesp = freesp;
451 
452 	/* only overwrite the throttle values if we are more aggressive */
453 	if ((freesp >> shift) < (*qblocks >> *qshift)) {
454 		*qblocks = freesp;
455 		*qshift = shift;
456 	}
457 }
458 
459 /*
460  * If we don't have a user specified preallocation size, dynamically increase
461  * the preallocation size as the size of the file grows. Cap the maximum size
462  * at a single extent or less if the filesystem is near full. The closer the
463  * filesystem is to full, the smaller the maximum prealocation.
464  */
465 STATIC xfs_fsblock_t
466 xfs_iomap_prealloc_size(
467 	struct xfs_mount	*mp,
468 	struct xfs_inode	*ip,
469 	xfs_off_t		offset,
470 	struct xfs_bmbt_irec	*imap,
471 	int			nimaps)
472 {
473 	xfs_fsblock_t		alloc_blocks = 0;
474 	int			shift = 0;
475 	int64_t			freesp;
476 	xfs_fsblock_t		qblocks;
477 	int			qshift = 0;
478 
479 	alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
480 							   imap, nimaps);
481 	if (!alloc_blocks)
482 		goto check_writeio;
483 	qblocks = alloc_blocks;
484 
485 	/*
486 	 * MAXEXTLEN is not a power of two value but we round the prealloc down
487 	 * to the nearest power of two value after throttling. To prevent the
488 	 * round down from unconditionally reducing the maximum supported prealloc
489 	 * size, we round up first, apply appropriate throttling, round down and
490 	 * cap the value to MAXEXTLEN.
491 	 */
492 	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
493 				       alloc_blocks);
494 
495 	freesp = percpu_counter_read_positive(&mp->m_fdblocks);
496 	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
497 		shift = 2;
498 		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
499 			shift++;
500 		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
501 			shift++;
502 		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
503 			shift++;
504 		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
505 			shift++;
506 	}
507 
508 	/*
509 	 * Check each quota to cap the prealloc size, provide a shift value to
510 	 * throttle with and adjust amount of available space.
511 	 */
512 	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
513 		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
514 					&freesp);
515 	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
516 		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
517 					&freesp);
518 	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
519 		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
520 					&freesp);
521 
522 	/*
523 	 * The final prealloc size is set to the minimum of free space available
524 	 * in each of the quotas and the overall filesystem.
525 	 *
526 	 * The shift throttle value is set to the maximum value as determined by
527 	 * the global low free space values and per-quota low free space values.
528 	 */
529 	alloc_blocks = MIN(alloc_blocks, qblocks);
530 	shift = MAX(shift, qshift);
531 
532 	if (shift)
533 		alloc_blocks >>= shift;
534 	/*
535 	 * rounddown_pow_of_two() returns an undefined result if we pass in
536 	 * alloc_blocks = 0.
537 	 */
538 	if (alloc_blocks)
539 		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
540 	if (alloc_blocks > MAXEXTLEN)
541 		alloc_blocks = MAXEXTLEN;
542 
543 	/*
544 	 * If we are still trying to allocate more space than is
545 	 * available, squash the prealloc hard. This can happen if we
546 	 * have a large file on a small filesystem and the above
547 	 * lowspace thresholds are smaller than MAXEXTLEN.
548 	 */
549 	while (alloc_blocks && alloc_blocks >= freesp)
550 		alloc_blocks >>= 4;
551 
552 check_writeio:
553 	if (alloc_blocks < mp->m_writeio_blocks)
554 		alloc_blocks = mp->m_writeio_blocks;
555 
556 	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
557 				      mp->m_writeio_blocks);
558 
559 	return alloc_blocks;
560 }
561 
562 int
563 xfs_iomap_write_delay(
564 	xfs_inode_t	*ip,
565 	xfs_off_t	offset,
566 	size_t		count,
567 	xfs_bmbt_irec_t *ret_imap)
568 {
569 	xfs_mount_t	*mp = ip->i_mount;
570 	xfs_fileoff_t	offset_fsb;
571 	xfs_fileoff_t	last_fsb;
572 	xfs_off_t	aligned_offset;
573 	xfs_fileoff_t	ioalign;
574 	xfs_extlen_t	extsz;
575 	int		nimaps;
576 	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
577 	int		prealloc;
578 	int		error;
579 
580 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
581 
582 	/*
583 	 * Make sure that the dquots are there. This doesn't hold
584 	 * the ilock across a disk read.
585 	 */
586 	error = xfs_qm_dqattach_locked(ip, 0);
587 	if (error)
588 		return error;
589 
590 	extsz = xfs_get_extsz_hint(ip);
591 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
592 
593 	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
594 				imap, XFS_WRITE_IMAPS, &prealloc);
595 	if (error)
596 		return error;
597 
598 retry:
599 	if (prealloc) {
600 		xfs_fsblock_t	alloc_blocks;
601 
602 		alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
603 						       XFS_WRITE_IMAPS);
604 
605 		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
606 		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
607 		last_fsb = ioalign + alloc_blocks;
608 	} else {
609 		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
610 	}
611 
612 	if (prealloc || extsz) {
613 		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
614 		if (error)
615 			return error;
616 	}
617 
618 	/*
619 	 * Make sure preallocation does not create extents beyond the range we
620 	 * actually support in this filesystem.
621 	 */
622 	if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
623 		last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
624 
625 	ASSERT(last_fsb > offset_fsb);
626 
627 	nimaps = XFS_WRITE_IMAPS;
628 	error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
629 				imap, &nimaps, XFS_BMAPI_ENTIRE);
630 	switch (error) {
631 	case 0:
632 	case -ENOSPC:
633 	case -EDQUOT:
634 		break;
635 	default:
636 		return error;
637 	}
638 
639 	/*
640 	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
641 	 * without EOF preallocation.
642 	 */
643 	if (nimaps == 0) {
644 		trace_xfs_delalloc_enospc(ip, offset, count);
645 		if (prealloc) {
646 			prealloc = 0;
647 			error = 0;
648 			goto retry;
649 		}
650 		return error ? error : -ENOSPC;
651 	}
652 
653 	if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
654 		return xfs_alert_fsblock_zero(ip, &imap[0]);
655 
656 	/*
657 	 * Tag the inode as speculatively preallocated so we can reclaim this
658 	 * space on demand, if necessary.
659 	 */
660 	if (prealloc)
661 		xfs_inode_set_eofblocks_tag(ip);
662 
663 	*ret_imap = imap[0];
664 	return 0;
665 }
666 
667 /*
668  * Pass in a delayed allocate extent, convert it to real extents;
669  * return to the caller the extent we create which maps on top of
670  * the originating callers request.
671  *
672  * Called without a lock on the inode.
673  *
674  * We no longer bother to look at the incoming map - all we have to
675  * guarantee is that whatever we allocate fills the required range.
676  */
677 int
678 xfs_iomap_write_allocate(
679 	xfs_inode_t	*ip,
680 	xfs_off_t	offset,
681 	xfs_bmbt_irec_t *imap)
682 {
683 	xfs_mount_t	*mp = ip->i_mount;
684 	xfs_fileoff_t	offset_fsb, last_block;
685 	xfs_fileoff_t	end_fsb, map_start_fsb;
686 	xfs_fsblock_t	first_block;
687 	xfs_bmap_free_t	free_list;
688 	xfs_filblks_t	count_fsb;
689 	xfs_trans_t	*tp;
690 	int		nimaps;
691 	int		error = 0;
692 	int		nres;
693 
694 	/*
695 	 * Make sure that the dquots are there.
696 	 */
697 	error = xfs_qm_dqattach(ip, 0);
698 	if (error)
699 		return error;
700 
701 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
702 	count_fsb = imap->br_blockcount;
703 	map_start_fsb = imap->br_startoff;
704 
705 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
706 
707 	while (count_fsb != 0) {
708 		/*
709 		 * Set up a transaction with which to allocate the
710 		 * backing store for the file.  Do allocations in a
711 		 * loop until we get some space in the range we are
712 		 * interested in.  The other space that might be allocated
713 		 * is in the delayed allocation extent on which we sit
714 		 * but before our buffer starts.
715 		 */
716 
717 		nimaps = 0;
718 		while (nimaps == 0) {
719 			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
720 
721 			error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
722 					0, XFS_TRANS_RESERVE, &tp);
723 			if (error)
724 				return error;
725 
726 			xfs_ilock(ip, XFS_ILOCK_EXCL);
727 			xfs_trans_ijoin(tp, ip, 0);
728 
729 			xfs_bmap_init(&free_list, &first_block);
730 
731 			/*
732 			 * it is possible that the extents have changed since
733 			 * we did the read call as we dropped the ilock for a
734 			 * while. We have to be careful about truncates or hole
735 			 * punchs here - we are not allowed to allocate
736 			 * non-delalloc blocks here.
737 			 *
738 			 * The only protection against truncation is the pages
739 			 * for the range we are being asked to convert are
740 			 * locked and hence a truncate will block on them
741 			 * first.
742 			 *
743 			 * As a result, if we go beyond the range we really
744 			 * need and hit an delalloc extent boundary followed by
745 			 * a hole while we have excess blocks in the map, we
746 			 * will fill the hole incorrectly and overrun the
747 			 * transaction reservation.
748 			 *
749 			 * Using a single map prevents this as we are forced to
750 			 * check each map we look for overlap with the desired
751 			 * range and abort as soon as we find it. Also, given
752 			 * that we only return a single map, having one beyond
753 			 * what we can return is probably a bit silly.
754 			 *
755 			 * We also need to check that we don't go beyond EOF;
756 			 * this is a truncate optimisation as a truncate sets
757 			 * the new file size before block on the pages we
758 			 * currently have locked under writeback. Because they
759 			 * are about to be tossed, we don't need to write them
760 			 * back....
761 			 */
762 			nimaps = 1;
763 			end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
764 			error = xfs_bmap_last_offset(ip, &last_block,
765 							XFS_DATA_FORK);
766 			if (error)
767 				goto trans_cancel;
768 
769 			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
770 			if ((map_start_fsb + count_fsb) > last_block) {
771 				count_fsb = last_block - map_start_fsb;
772 				if (count_fsb == 0) {
773 					error = -EAGAIN;
774 					goto trans_cancel;
775 				}
776 			}
777 
778 			/*
779 			 * From this point onwards we overwrite the imap
780 			 * pointer that the caller gave to us.
781 			 */
782 			error = xfs_bmapi_write(tp, ip, map_start_fsb,
783 						count_fsb, 0, &first_block,
784 						nres, imap, &nimaps,
785 						&free_list);
786 			if (error)
787 				goto trans_cancel;
788 
789 			error = xfs_bmap_finish(&tp, &free_list, NULL);
790 			if (error)
791 				goto trans_cancel;
792 
793 			error = xfs_trans_commit(tp);
794 			if (error)
795 				goto error0;
796 
797 			xfs_iunlock(ip, XFS_ILOCK_EXCL);
798 		}
799 
800 		/*
801 		 * See if we were able to allocate an extent that
802 		 * covers at least part of the callers request
803 		 */
804 		if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
805 			return xfs_alert_fsblock_zero(ip, imap);
806 
807 		if ((offset_fsb >= imap->br_startoff) &&
808 		    (offset_fsb < (imap->br_startoff +
809 				   imap->br_blockcount))) {
810 			XFS_STATS_INC(mp, xs_xstrat_quick);
811 			return 0;
812 		}
813 
814 		/*
815 		 * So far we have not mapped the requested part of the
816 		 * file, just surrounding data, try again.
817 		 */
818 		count_fsb -= imap->br_blockcount;
819 		map_start_fsb = imap->br_startoff + imap->br_blockcount;
820 	}
821 
822 trans_cancel:
823 	xfs_bmap_cancel(&free_list);
824 	xfs_trans_cancel(tp);
825 error0:
826 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
827 	return error;
828 }
829 
830 int
831 xfs_iomap_write_unwritten(
832 	xfs_inode_t	*ip,
833 	xfs_off_t	offset,
834 	xfs_off_t	count)
835 {
836 	xfs_mount_t	*mp = ip->i_mount;
837 	xfs_fileoff_t	offset_fsb;
838 	xfs_filblks_t	count_fsb;
839 	xfs_filblks_t	numblks_fsb;
840 	xfs_fsblock_t	firstfsb;
841 	int		nimaps;
842 	xfs_trans_t	*tp;
843 	xfs_bmbt_irec_t imap;
844 	xfs_bmap_free_t free_list;
845 	xfs_fsize_t	i_size;
846 	uint		resblks;
847 	int		error;
848 
849 	trace_xfs_unwritten_convert(ip, offset, count);
850 
851 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
852 	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
853 	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
854 
855 	/*
856 	 * Reserve enough blocks in this transaction for two complete extent
857 	 * btree splits.  We may be converting the middle part of an unwritten
858 	 * extent and in this case we will insert two new extents in the btree
859 	 * each of which could cause a full split.
860 	 *
861 	 * This reservation amount will be used in the first call to
862 	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
863 	 * rest of the operation.
864 	 */
865 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
866 
867 	do {
868 		/*
869 		 * Set up a transaction to convert the range of extents
870 		 * from unwritten to real. Do allocations in a loop until
871 		 * we have covered the range passed in.
872 		 *
873 		 * Note that we can't risk to recursing back into the filesystem
874 		 * here as we might be asked to write out the same inode that we
875 		 * complete here and might deadlock on the iolock.
876 		 */
877 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
878 				XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
879 		if (error)
880 			return error;
881 
882 		xfs_ilock(ip, XFS_ILOCK_EXCL);
883 		xfs_trans_ijoin(tp, ip, 0);
884 
885 		/*
886 		 * Modify the unwritten extent state of the buffer.
887 		 */
888 		xfs_bmap_init(&free_list, &firstfsb);
889 		nimaps = 1;
890 		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
891 					XFS_BMAPI_CONVERT, &firstfsb, resblks,
892 					&imap, &nimaps, &free_list);
893 		if (error)
894 			goto error_on_bmapi_transaction;
895 
896 		/*
897 		 * Log the updated inode size as we go.  We have to be careful
898 		 * to only log it up to the actual write offset if it is
899 		 * halfway into a block.
900 		 */
901 		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
902 		if (i_size > offset + count)
903 			i_size = offset + count;
904 
905 		i_size = xfs_new_eof(ip, i_size);
906 		if (i_size) {
907 			ip->i_d.di_size = i_size;
908 			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
909 		}
910 
911 		error = xfs_bmap_finish(&tp, &free_list, NULL);
912 		if (error)
913 			goto error_on_bmapi_transaction;
914 
915 		error = xfs_trans_commit(tp);
916 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
917 		if (error)
918 			return error;
919 
920 		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
921 			return xfs_alert_fsblock_zero(ip, &imap);
922 
923 		if ((numblks_fsb = imap.br_blockcount) == 0) {
924 			/*
925 			 * The numblks_fsb value should always get
926 			 * smaller, otherwise the loop is stuck.
927 			 */
928 			ASSERT(imap.br_blockcount);
929 			break;
930 		}
931 		offset_fsb += numblks_fsb;
932 		count_fsb -= numblks_fsb;
933 	} while (count_fsb > 0);
934 
935 	return 0;
936 
937 error_on_bmapi_transaction:
938 	xfs_bmap_cancel(&free_list);
939 	xfs_trans_cancel(tp);
940 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
941 	return error;
942 }
943