xref: /openbmc/linux/fs/xfs/xfs_iomap.c (revision d7a3d85e)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_btree.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_bmap.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_error.h"
31 #include "xfs_trans.h"
32 #include "xfs_trans_space.h"
33 #include "xfs_iomap.h"
34 #include "xfs_trace.h"
35 #include "xfs_icache.h"
36 #include "xfs_quota.h"
37 #include "xfs_dquot_item.h"
38 #include "xfs_dquot.h"
39 
40 
41 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
42 						<< mp->m_writeio_log)
43 #define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
44 
45 STATIC int
46 xfs_iomap_eof_align_last_fsb(
47 	xfs_mount_t	*mp,
48 	xfs_inode_t	*ip,
49 	xfs_extlen_t	extsize,
50 	xfs_fileoff_t	*last_fsb)
51 {
52 	xfs_extlen_t	align = 0;
53 	int		eof, error;
54 
55 	if (!XFS_IS_REALTIME_INODE(ip)) {
56 		/*
57 		 * Round up the allocation request to a stripe unit
58 		 * (m_dalign) boundary if the file size is >= stripe unit
59 		 * size, and we are allocating past the allocation eof.
60 		 *
61 		 * If mounted with the "-o swalloc" option the alignment is
62 		 * increased from the strip unit size to the stripe width.
63 		 */
64 		if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
65 			align = mp->m_swidth;
66 		else if (mp->m_dalign)
67 			align = mp->m_dalign;
68 
69 		if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
70 			align = 0;
71 	}
72 
73 	/*
74 	 * Always round up the allocation request to an extent boundary
75 	 * (when file on a real-time subvolume or has di_extsize hint).
76 	 */
77 	if (extsize) {
78 		if (align)
79 			align = roundup_64(align, extsize);
80 		else
81 			align = extsize;
82 	}
83 
84 	if (align) {
85 		xfs_fileoff_t	new_last_fsb = roundup_64(*last_fsb, align);
86 		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
87 		if (error)
88 			return error;
89 		if (eof)
90 			*last_fsb = new_last_fsb;
91 	}
92 	return 0;
93 }
94 
95 STATIC int
96 xfs_alert_fsblock_zero(
97 	xfs_inode_t	*ip,
98 	xfs_bmbt_irec_t	*imap)
99 {
100 	xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
101 			"Access to block zero in inode %llu "
102 			"start_block: %llx start_off: %llx "
103 			"blkcnt: %llx extent-state: %x",
104 		(unsigned long long)ip->i_ino,
105 		(unsigned long long)imap->br_startblock,
106 		(unsigned long long)imap->br_startoff,
107 		(unsigned long long)imap->br_blockcount,
108 		imap->br_state);
109 	return -EFSCORRUPTED;
110 }
111 
112 int
113 xfs_iomap_write_direct(
114 	xfs_inode_t	*ip,
115 	xfs_off_t	offset,
116 	size_t		count,
117 	xfs_bmbt_irec_t *imap,
118 	int		nmaps)
119 {
120 	xfs_mount_t	*mp = ip->i_mount;
121 	xfs_fileoff_t	offset_fsb;
122 	xfs_fileoff_t	last_fsb;
123 	xfs_filblks_t	count_fsb, resaligned;
124 	xfs_fsblock_t	firstfsb;
125 	xfs_extlen_t	extsz, temp;
126 	int		nimaps;
127 	int		quota_flag;
128 	int		rt;
129 	xfs_trans_t	*tp;
130 	xfs_bmap_free_t free_list;
131 	uint		qblocks, resblks, resrtextents;
132 	int		committed;
133 	int		error;
134 
135 	error = xfs_qm_dqattach(ip, 0);
136 	if (error)
137 		return error;
138 
139 	rt = XFS_IS_REALTIME_INODE(ip);
140 	extsz = xfs_get_extsz_hint(ip);
141 
142 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
143 	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
144 	if ((offset + count) > XFS_ISIZE(ip)) {
145 		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
146 		if (error)
147 			return error;
148 	} else {
149 		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
150 			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
151 					imap->br_blockcount +
152 					imap->br_startoff);
153 	}
154 	count_fsb = last_fsb - offset_fsb;
155 	ASSERT(count_fsb > 0);
156 
157 	resaligned = count_fsb;
158 	if (unlikely(extsz)) {
159 		if ((temp = do_mod(offset_fsb, extsz)))
160 			resaligned += temp;
161 		if ((temp = do_mod(resaligned, extsz)))
162 			resaligned += extsz - temp;
163 	}
164 
165 	if (unlikely(rt)) {
166 		resrtextents = qblocks = resaligned;
167 		resrtextents /= mp->m_sb.sb_rextsize;
168 		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
169 		quota_flag = XFS_QMOPT_RES_RTBLKS;
170 	} else {
171 		resrtextents = 0;
172 		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
173 		quota_flag = XFS_QMOPT_RES_REGBLKS;
174 	}
175 
176 	/*
177 	 * Allocate and setup the transaction
178 	 */
179 	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
180 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
181 				  resblks, resrtextents);
182 	/*
183 	 * Check for running out of space, note: need lock to return
184 	 */
185 	if (error) {
186 		xfs_trans_cancel(tp, 0);
187 		return error;
188 	}
189 
190 	xfs_ilock(ip, XFS_ILOCK_EXCL);
191 
192 	error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
193 	if (error)
194 		goto out_trans_cancel;
195 
196 	xfs_trans_ijoin(tp, ip, 0);
197 
198 	/*
199 	 * From this point onwards we overwrite the imap pointer that the
200 	 * caller gave to us.
201 	 */
202 	xfs_bmap_init(&free_list, &firstfsb);
203 	nimaps = 1;
204 	error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
205 				XFS_BMAPI_PREALLOC, &firstfsb, 0,
206 				imap, &nimaps, &free_list);
207 	if (error)
208 		goto out_bmap_cancel;
209 
210 	/*
211 	 * Complete the transaction
212 	 */
213 	error = xfs_bmap_finish(&tp, &free_list, &committed);
214 	if (error)
215 		goto out_bmap_cancel;
216 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
217 	if (error)
218 		goto out_unlock;
219 
220 	/*
221 	 * Copy any maps to caller's array and return any error.
222 	 */
223 	if (nimaps == 0) {
224 		error = -ENOSPC;
225 		goto out_unlock;
226 	}
227 
228 	if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
229 		error = xfs_alert_fsblock_zero(ip, imap);
230 
231 out_unlock:
232 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
233 	return error;
234 
235 out_bmap_cancel:
236 	xfs_bmap_cancel(&free_list);
237 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
238 out_trans_cancel:
239 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
240 	goto out_unlock;
241 }
242 
243 /*
244  * If the caller is doing a write at the end of the file, then extend the
245  * allocation out to the file system's write iosize.  We clean up any extra
246  * space left over when the file is closed in xfs_inactive().
247  *
248  * If we find we already have delalloc preallocation beyond EOF, don't do more
249  * preallocation as it it not needed.
250  */
251 STATIC int
252 xfs_iomap_eof_want_preallocate(
253 	xfs_mount_t	*mp,
254 	xfs_inode_t	*ip,
255 	xfs_off_t	offset,
256 	size_t		count,
257 	xfs_bmbt_irec_t *imap,
258 	int		nimaps,
259 	int		*prealloc)
260 {
261 	xfs_fileoff_t   start_fsb;
262 	xfs_filblks_t   count_fsb;
263 	int		n, error, imaps;
264 	int		found_delalloc = 0;
265 
266 	*prealloc = 0;
267 	if (offset + count <= XFS_ISIZE(ip))
268 		return 0;
269 
270 	/*
271 	 * If the file is smaller than the minimum prealloc and we are using
272 	 * dynamic preallocation, don't do any preallocation at all as it is
273 	 * likely this is the only write to the file that is going to be done.
274 	 */
275 	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
276 	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))
277 		return 0;
278 
279 	/*
280 	 * If there are any real blocks past eof, then don't
281 	 * do any speculative allocation.
282 	 */
283 	start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
284 	count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
285 	while (count_fsb > 0) {
286 		imaps = nimaps;
287 		error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
288 				       0);
289 		if (error)
290 			return error;
291 		for (n = 0; n < imaps; n++) {
292 			if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
293 			    (imap[n].br_startblock != DELAYSTARTBLOCK))
294 				return 0;
295 			start_fsb += imap[n].br_blockcount;
296 			count_fsb -= imap[n].br_blockcount;
297 
298 			if (imap[n].br_startblock == DELAYSTARTBLOCK)
299 				found_delalloc = 1;
300 		}
301 	}
302 	if (!found_delalloc)
303 		*prealloc = 1;
304 	return 0;
305 }
306 
307 /*
308  * Determine the initial size of the preallocation. We are beyond the current
309  * EOF here, but we need to take into account whether this is a sparse write or
310  * an extending write when determining the preallocation size.  Hence we need to
311  * look up the extent that ends at the current write offset and use the result
312  * to determine the preallocation size.
313  *
314  * If the extent is a hole, then preallocation is essentially disabled.
315  * Otherwise we take the size of the preceeding data extent as the basis for the
316  * preallocation size. If the size of the extent is greater than half the
317  * maximum extent length, then use the current offset as the basis. This ensures
318  * that for large files the preallocation size always extends to MAXEXTLEN
319  * rather than falling short due to things like stripe unit/width alignment of
320  * real extents.
321  */
322 STATIC xfs_fsblock_t
323 xfs_iomap_eof_prealloc_initial_size(
324 	struct xfs_mount	*mp,
325 	struct xfs_inode	*ip,
326 	xfs_off_t		offset,
327 	xfs_bmbt_irec_t		*imap,
328 	int			nimaps)
329 {
330 	xfs_fileoff_t   start_fsb;
331 	int		imaps = 1;
332 	int		error;
333 
334 	ASSERT(nimaps >= imaps);
335 
336 	/* if we are using a specific prealloc size, return now */
337 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
338 		return 0;
339 
340 	/* If the file is small, then use the minimum prealloc */
341 	if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign))
342 		return 0;
343 
344 	/*
345 	 * As we write multiple pages, the offset will always align to the
346 	 * start of a page and hence point to a hole at EOF. i.e. if the size is
347 	 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
348 	 * will return FSB 1. Hence if there are blocks in the file, we want to
349 	 * point to the block prior to the EOF block and not the hole that maps
350 	 * directly at @offset.
351 	 */
352 	start_fsb = XFS_B_TO_FSB(mp, offset);
353 	if (start_fsb)
354 		start_fsb--;
355 	error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
356 	if (error)
357 		return 0;
358 
359 	ASSERT(imaps == 1);
360 	if (imap[0].br_startblock == HOLESTARTBLOCK)
361 		return 0;
362 	if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
363 		return imap[0].br_blockcount << 1;
364 	return XFS_B_TO_FSB(mp, offset);
365 }
366 
367 STATIC bool
368 xfs_quota_need_throttle(
369 	struct xfs_inode *ip,
370 	int type,
371 	xfs_fsblock_t alloc_blocks)
372 {
373 	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
374 
375 	if (!dq || !xfs_this_quota_on(ip->i_mount, type))
376 		return false;
377 
378 	/* no hi watermark, no throttle */
379 	if (!dq->q_prealloc_hi_wmark)
380 		return false;
381 
382 	/* under the lo watermark, no throttle */
383 	if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
384 		return false;
385 
386 	return true;
387 }
388 
389 STATIC void
390 xfs_quota_calc_throttle(
391 	struct xfs_inode *ip,
392 	int type,
393 	xfs_fsblock_t *qblocks,
394 	int *qshift,
395 	int64_t	*qfreesp)
396 {
397 	int64_t freesp;
398 	int shift = 0;
399 	struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
400 
401 	/* no dq, or over hi wmark, squash the prealloc completely */
402 	if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
403 		*qblocks = 0;
404 		*qfreesp = 0;
405 		return;
406 	}
407 
408 	freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
409 	if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
410 		shift = 2;
411 		if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
412 			shift += 2;
413 		if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
414 			shift += 2;
415 	}
416 
417 	if (freesp < *qfreesp)
418 		*qfreesp = freesp;
419 
420 	/* only overwrite the throttle values if we are more aggressive */
421 	if ((freesp >> shift) < (*qblocks >> *qshift)) {
422 		*qblocks = freesp;
423 		*qshift = shift;
424 	}
425 }
426 
427 /*
428  * If we don't have a user specified preallocation size, dynamically increase
429  * the preallocation size as the size of the file grows. Cap the maximum size
430  * at a single extent or less if the filesystem is near full. The closer the
431  * filesystem is to full, the smaller the maximum prealocation.
432  */
433 STATIC xfs_fsblock_t
434 xfs_iomap_prealloc_size(
435 	struct xfs_mount	*mp,
436 	struct xfs_inode	*ip,
437 	xfs_off_t		offset,
438 	struct xfs_bmbt_irec	*imap,
439 	int			nimaps)
440 {
441 	xfs_fsblock_t		alloc_blocks = 0;
442 	int			shift = 0;
443 	int64_t			freesp;
444 	xfs_fsblock_t		qblocks;
445 	int			qshift = 0;
446 
447 	alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
448 							   imap, nimaps);
449 	if (!alloc_blocks)
450 		goto check_writeio;
451 	qblocks = alloc_blocks;
452 
453 	/*
454 	 * MAXEXTLEN is not a power of two value but we round the prealloc down
455 	 * to the nearest power of two value after throttling. To prevent the
456 	 * round down from unconditionally reducing the maximum supported prealloc
457 	 * size, we round up first, apply appropriate throttling, round down and
458 	 * cap the value to MAXEXTLEN.
459 	 */
460 	alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
461 				       alloc_blocks);
462 
463 	freesp = percpu_counter_read_positive(&mp->m_fdblocks);
464 	if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
465 		shift = 2;
466 		if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
467 			shift++;
468 		if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
469 			shift++;
470 		if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
471 			shift++;
472 		if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
473 			shift++;
474 	}
475 
476 	/*
477 	 * Check each quota to cap the prealloc size, provide a shift value to
478 	 * throttle with and adjust amount of available space.
479 	 */
480 	if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
481 		xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
482 					&freesp);
483 	if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
484 		xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
485 					&freesp);
486 	if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
487 		xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
488 					&freesp);
489 
490 	/*
491 	 * The final prealloc size is set to the minimum of free space available
492 	 * in each of the quotas and the overall filesystem.
493 	 *
494 	 * The shift throttle value is set to the maximum value as determined by
495 	 * the global low free space values and per-quota low free space values.
496 	 */
497 	alloc_blocks = MIN(alloc_blocks, qblocks);
498 	shift = MAX(shift, qshift);
499 
500 	if (shift)
501 		alloc_blocks >>= shift;
502 	/*
503 	 * rounddown_pow_of_two() returns an undefined result if we pass in
504 	 * alloc_blocks = 0.
505 	 */
506 	if (alloc_blocks)
507 		alloc_blocks = rounddown_pow_of_two(alloc_blocks);
508 	if (alloc_blocks > MAXEXTLEN)
509 		alloc_blocks = MAXEXTLEN;
510 
511 	/*
512 	 * If we are still trying to allocate more space than is
513 	 * available, squash the prealloc hard. This can happen if we
514 	 * have a large file on a small filesystem and the above
515 	 * lowspace thresholds are smaller than MAXEXTLEN.
516 	 */
517 	while (alloc_blocks && alloc_blocks >= freesp)
518 		alloc_blocks >>= 4;
519 
520 check_writeio:
521 	if (alloc_blocks < mp->m_writeio_blocks)
522 		alloc_blocks = mp->m_writeio_blocks;
523 
524 	trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
525 				      mp->m_writeio_blocks);
526 
527 	return alloc_blocks;
528 }
529 
530 int
531 xfs_iomap_write_delay(
532 	xfs_inode_t	*ip,
533 	xfs_off_t	offset,
534 	size_t		count,
535 	xfs_bmbt_irec_t *ret_imap)
536 {
537 	xfs_mount_t	*mp = ip->i_mount;
538 	xfs_fileoff_t	offset_fsb;
539 	xfs_fileoff_t	last_fsb;
540 	xfs_off_t	aligned_offset;
541 	xfs_fileoff_t	ioalign;
542 	xfs_extlen_t	extsz;
543 	int		nimaps;
544 	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
545 	int		prealloc;
546 	int		error;
547 
548 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
549 
550 	/*
551 	 * Make sure that the dquots are there. This doesn't hold
552 	 * the ilock across a disk read.
553 	 */
554 	error = xfs_qm_dqattach_locked(ip, 0);
555 	if (error)
556 		return error;
557 
558 	extsz = xfs_get_extsz_hint(ip);
559 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
560 
561 	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
562 				imap, XFS_WRITE_IMAPS, &prealloc);
563 	if (error)
564 		return error;
565 
566 retry:
567 	if (prealloc) {
568 		xfs_fsblock_t	alloc_blocks;
569 
570 		alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
571 						       XFS_WRITE_IMAPS);
572 
573 		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
574 		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
575 		last_fsb = ioalign + alloc_blocks;
576 	} else {
577 		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
578 	}
579 
580 	if (prealloc || extsz) {
581 		error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
582 		if (error)
583 			return error;
584 	}
585 
586 	/*
587 	 * Make sure preallocation does not create extents beyond the range we
588 	 * actually support in this filesystem.
589 	 */
590 	if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
591 		last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
592 
593 	ASSERT(last_fsb > offset_fsb);
594 
595 	nimaps = XFS_WRITE_IMAPS;
596 	error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
597 				imap, &nimaps, XFS_BMAPI_ENTIRE);
598 	switch (error) {
599 	case 0:
600 	case -ENOSPC:
601 	case -EDQUOT:
602 		break;
603 	default:
604 		return error;
605 	}
606 
607 	/*
608 	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
609 	 * without EOF preallocation.
610 	 */
611 	if (nimaps == 0) {
612 		trace_xfs_delalloc_enospc(ip, offset, count);
613 		if (prealloc) {
614 			prealloc = 0;
615 			error = 0;
616 			goto retry;
617 		}
618 		return error ? error : -ENOSPC;
619 	}
620 
621 	if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
622 		return xfs_alert_fsblock_zero(ip, &imap[0]);
623 
624 	/*
625 	 * Tag the inode as speculatively preallocated so we can reclaim this
626 	 * space on demand, if necessary.
627 	 */
628 	if (prealloc)
629 		xfs_inode_set_eofblocks_tag(ip);
630 
631 	*ret_imap = imap[0];
632 	return 0;
633 }
634 
635 /*
636  * Pass in a delayed allocate extent, convert it to real extents;
637  * return to the caller the extent we create which maps on top of
638  * the originating callers request.
639  *
640  * Called without a lock on the inode.
641  *
642  * We no longer bother to look at the incoming map - all we have to
643  * guarantee is that whatever we allocate fills the required range.
644  */
645 int
646 xfs_iomap_write_allocate(
647 	xfs_inode_t	*ip,
648 	xfs_off_t	offset,
649 	xfs_bmbt_irec_t *imap)
650 {
651 	xfs_mount_t	*mp = ip->i_mount;
652 	xfs_fileoff_t	offset_fsb, last_block;
653 	xfs_fileoff_t	end_fsb, map_start_fsb;
654 	xfs_fsblock_t	first_block;
655 	xfs_bmap_free_t	free_list;
656 	xfs_filblks_t	count_fsb;
657 	xfs_trans_t	*tp;
658 	int		nimaps, committed;
659 	int		error = 0;
660 	int		nres;
661 
662 	/*
663 	 * Make sure that the dquots are there.
664 	 */
665 	error = xfs_qm_dqattach(ip, 0);
666 	if (error)
667 		return error;
668 
669 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
670 	count_fsb = imap->br_blockcount;
671 	map_start_fsb = imap->br_startoff;
672 
673 	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
674 
675 	while (count_fsb != 0) {
676 		/*
677 		 * Set up a transaction with which to allocate the
678 		 * backing store for the file.  Do allocations in a
679 		 * loop until we get some space in the range we are
680 		 * interested in.  The other space that might be allocated
681 		 * is in the delayed allocation extent on which we sit
682 		 * but before our buffer starts.
683 		 */
684 
685 		nimaps = 0;
686 		while (nimaps == 0) {
687 			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
688 			tp->t_flags |= XFS_TRANS_RESERVE;
689 			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
690 			error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
691 						  nres, 0);
692 			if (error) {
693 				xfs_trans_cancel(tp, 0);
694 				return error;
695 			}
696 			xfs_ilock(ip, XFS_ILOCK_EXCL);
697 			xfs_trans_ijoin(tp, ip, 0);
698 
699 			xfs_bmap_init(&free_list, &first_block);
700 
701 			/*
702 			 * it is possible that the extents have changed since
703 			 * we did the read call as we dropped the ilock for a
704 			 * while. We have to be careful about truncates or hole
705 			 * punchs here - we are not allowed to allocate
706 			 * non-delalloc blocks here.
707 			 *
708 			 * The only protection against truncation is the pages
709 			 * for the range we are being asked to convert are
710 			 * locked and hence a truncate will block on them
711 			 * first.
712 			 *
713 			 * As a result, if we go beyond the range we really
714 			 * need and hit an delalloc extent boundary followed by
715 			 * a hole while we have excess blocks in the map, we
716 			 * will fill the hole incorrectly and overrun the
717 			 * transaction reservation.
718 			 *
719 			 * Using a single map prevents this as we are forced to
720 			 * check each map we look for overlap with the desired
721 			 * range and abort as soon as we find it. Also, given
722 			 * that we only return a single map, having one beyond
723 			 * what we can return is probably a bit silly.
724 			 *
725 			 * We also need to check that we don't go beyond EOF;
726 			 * this is a truncate optimisation as a truncate sets
727 			 * the new file size before block on the pages we
728 			 * currently have locked under writeback. Because they
729 			 * are about to be tossed, we don't need to write them
730 			 * back....
731 			 */
732 			nimaps = 1;
733 			end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
734 			error = xfs_bmap_last_offset(ip, &last_block,
735 							XFS_DATA_FORK);
736 			if (error)
737 				goto trans_cancel;
738 
739 			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
740 			if ((map_start_fsb + count_fsb) > last_block) {
741 				count_fsb = last_block - map_start_fsb;
742 				if (count_fsb == 0) {
743 					error = -EAGAIN;
744 					goto trans_cancel;
745 				}
746 			}
747 
748 			/*
749 			 * From this point onwards we overwrite the imap
750 			 * pointer that the caller gave to us.
751 			 */
752 			error = xfs_bmapi_write(tp, ip, map_start_fsb,
753 						count_fsb, 0,
754 						&first_block, 1,
755 						imap, &nimaps, &free_list);
756 			if (error)
757 				goto trans_cancel;
758 
759 			error = xfs_bmap_finish(&tp, &free_list, &committed);
760 			if (error)
761 				goto trans_cancel;
762 
763 			error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
764 			if (error)
765 				goto error0;
766 
767 			xfs_iunlock(ip, XFS_ILOCK_EXCL);
768 		}
769 
770 		/*
771 		 * See if we were able to allocate an extent that
772 		 * covers at least part of the callers request
773 		 */
774 		if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
775 			return xfs_alert_fsblock_zero(ip, imap);
776 
777 		if ((offset_fsb >= imap->br_startoff) &&
778 		    (offset_fsb < (imap->br_startoff +
779 				   imap->br_blockcount))) {
780 			XFS_STATS_INC(xs_xstrat_quick);
781 			return 0;
782 		}
783 
784 		/*
785 		 * So far we have not mapped the requested part of the
786 		 * file, just surrounding data, try again.
787 		 */
788 		count_fsb -= imap->br_blockcount;
789 		map_start_fsb = imap->br_startoff + imap->br_blockcount;
790 	}
791 
792 trans_cancel:
793 	xfs_bmap_cancel(&free_list);
794 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
795 error0:
796 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
797 	return error;
798 }
799 
800 int
801 xfs_iomap_write_unwritten(
802 	xfs_inode_t	*ip,
803 	xfs_off_t	offset,
804 	xfs_off_t	count)
805 {
806 	xfs_mount_t	*mp = ip->i_mount;
807 	xfs_fileoff_t	offset_fsb;
808 	xfs_filblks_t	count_fsb;
809 	xfs_filblks_t	numblks_fsb;
810 	xfs_fsblock_t	firstfsb;
811 	int		nimaps;
812 	xfs_trans_t	*tp;
813 	xfs_bmbt_irec_t imap;
814 	xfs_bmap_free_t free_list;
815 	xfs_fsize_t	i_size;
816 	uint		resblks;
817 	int		committed;
818 	int		error;
819 
820 	trace_xfs_unwritten_convert(ip, offset, count);
821 
822 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
823 	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
824 	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
825 
826 	/*
827 	 * Reserve enough blocks in this transaction for two complete extent
828 	 * btree splits.  We may be converting the middle part of an unwritten
829 	 * extent and in this case we will insert two new extents in the btree
830 	 * each of which could cause a full split.
831 	 *
832 	 * This reservation amount will be used in the first call to
833 	 * xfs_bmbt_split() to select an AG with enough space to satisfy the
834 	 * rest of the operation.
835 	 */
836 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
837 
838 	do {
839 		/*
840 		 * set up a transaction to convert the range of extents
841 		 * from unwritten to real. Do allocations in a loop until
842 		 * we have covered the range passed in.
843 		 *
844 		 * Note that we open code the transaction allocation here
845 		 * to pass KM_NOFS--we can't risk to recursing back into
846 		 * the filesystem here as we might be asked to write out
847 		 * the same inode that we complete here and might deadlock
848 		 * on the iolock.
849 		 */
850 		sb_start_intwrite(mp->m_super);
851 		tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
852 		tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
853 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
854 					  resblks, 0);
855 		if (error) {
856 			xfs_trans_cancel(tp, 0);
857 			return error;
858 		}
859 
860 		xfs_ilock(ip, XFS_ILOCK_EXCL);
861 		xfs_trans_ijoin(tp, ip, 0);
862 
863 		/*
864 		 * Modify the unwritten extent state of the buffer.
865 		 */
866 		xfs_bmap_init(&free_list, &firstfsb);
867 		nimaps = 1;
868 		error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
869 				  XFS_BMAPI_CONVERT, &firstfsb,
870 				  1, &imap, &nimaps, &free_list);
871 		if (error)
872 			goto error_on_bmapi_transaction;
873 
874 		/*
875 		 * Log the updated inode size as we go.  We have to be careful
876 		 * to only log it up to the actual write offset if it is
877 		 * halfway into a block.
878 		 */
879 		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
880 		if (i_size > offset + count)
881 			i_size = offset + count;
882 
883 		i_size = xfs_new_eof(ip, i_size);
884 		if (i_size) {
885 			ip->i_d.di_size = i_size;
886 			xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
887 		}
888 
889 		error = xfs_bmap_finish(&tp, &free_list, &committed);
890 		if (error)
891 			goto error_on_bmapi_transaction;
892 
893 		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
894 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
895 		if (error)
896 			return error;
897 
898 		if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
899 			return xfs_alert_fsblock_zero(ip, &imap);
900 
901 		if ((numblks_fsb = imap.br_blockcount) == 0) {
902 			/*
903 			 * The numblks_fsb value should always get
904 			 * smaller, otherwise the loop is stuck.
905 			 */
906 			ASSERT(imap.br_blockcount);
907 			break;
908 		}
909 		offset_fsb += numblks_fsb;
910 		count_fsb -= numblks_fsb;
911 	} while (count_fsb > 0);
912 
913 	return 0;
914 
915 error_on_bmapi_transaction:
916 	xfs_bmap_cancel(&free_list);
917 	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
918 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
919 	return error;
920 }
921