xref: /openbmc/linux/fs/xfs/xfs_buf.h (revision afb46f79)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #ifndef __XFS_BUF_H__
19 #define __XFS_BUF_H__
20 
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/spinlock.h>
24 #include <linux/mm.h>
25 #include <linux/fs.h>
26 #include <linux/buffer_head.h>
27 #include <linux/uio.h>
28 #include <linux/list_lru.h>
29 
30 /*
31  *	Base types
32  */
33 
34 #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
35 
36 typedef enum {
37 	XBRW_READ = 1,			/* transfer into target memory */
38 	XBRW_WRITE = 2,			/* transfer from target memory */
39 	XBRW_ZERO = 3,			/* Zero target memory */
40 } xfs_buf_rw_t;
41 
42 #define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
43 #define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
44 #define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
45 #define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
46 #define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
47 #define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
48 #define XBF_WRITE_FAIL	 (1 << 24)/* async writes have failed on this buffer */
49 
50 /* I/O hints for the BIO layer */
51 #define XBF_SYNCIO	 (1 << 10)/* treat this buffer as synchronous I/O */
52 #define XBF_FUA		 (1 << 11)/* force cache write through mode */
53 #define XBF_FLUSH	 (1 << 12)/* flush the disk cache before a write */
54 
55 /* flags used only as arguments to access routines */
56 #define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */
57 #define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */
58 
59 /* flags used only internally */
60 #define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
61 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
62 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
63 #define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
64 
65 typedef unsigned int xfs_buf_flags_t;
66 
67 #define XFS_BUF_FLAGS \
68 	{ XBF_READ,		"READ" }, \
69 	{ XBF_WRITE,		"WRITE" }, \
70 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
71 	{ XBF_ASYNC,		"ASYNC" }, \
72 	{ XBF_DONE,		"DONE" }, \
73 	{ XBF_STALE,		"STALE" }, \
74 	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
75 	{ XBF_SYNCIO,		"SYNCIO" }, \
76 	{ XBF_FUA,		"FUA" }, \
77 	{ XBF_FLUSH,		"FLUSH" }, \
78 	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\
79 	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\
80 	{ _XBF_PAGES,		"PAGES" }, \
81 	{ _XBF_KMEM,		"KMEM" }, \
82 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
83 	{ _XBF_COMPOUND,	"COMPOUND" }
84 
85 
86 /*
87  * Internal state flags.
88  */
89 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
90 
91 /*
92  * The xfs_buftarg contains 2 notions of "sector size" -
93  *
94  * 1) The metadata sector size, which is the minimum unit and
95  *    alignment of IO which will be performed by metadata operations.
96  * 2) The device logical sector size
97  *
98  * The first is specified at mkfs time, and is stored on-disk in the
99  * superblock's sb_sectsize.
100  *
101  * The latter is derived from the underlying device, and controls direct IO
102  * alignment constraints.
103  */
104 typedef struct xfs_buftarg {
105 	dev_t			bt_dev;
106 	struct block_device	*bt_bdev;
107 	struct backing_dev_info	*bt_bdi;
108 	struct xfs_mount	*bt_mount;
109 	unsigned int		bt_meta_sectorsize;
110 	size_t			bt_meta_sectormask;
111 	size_t			bt_logical_sectorsize;
112 	size_t			bt_logical_sectormask;
113 
114 	/* LRU control structures */
115 	struct shrinker		bt_shrinker;
116 	struct list_lru		bt_lru;
117 } xfs_buftarg_t;
118 
119 struct xfs_buf;
120 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
121 
122 
123 #define XB_PAGES	2
124 
125 struct xfs_buf_map {
126 	xfs_daddr_t		bm_bn;	/* block number for I/O */
127 	int			bm_len;	/* size of I/O */
128 };
129 
130 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
131 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
132 
133 struct xfs_buf_ops {
134 	void (*verify_read)(struct xfs_buf *);
135 	void (*verify_write)(struct xfs_buf *);
136 };
137 
138 typedef struct xfs_buf {
139 	/*
140 	 * first cacheline holds all the fields needed for an uncontended cache
141 	 * hit to be fully processed. The semaphore straddles the cacheline
142 	 * boundary, but the counter and lock sits on the first cacheline,
143 	 * which is the only bit that is touched if we hit the semaphore
144 	 * fast-path on locking.
145 	 */
146 	struct rb_node		b_rbnode;	/* rbtree node */
147 	xfs_daddr_t		b_bn;		/* block number of buffer */
148 	int			b_length;	/* size of buffer in BBs */
149 	atomic_t		b_hold;		/* reference count */
150 	atomic_t		b_lru_ref;	/* lru reclaim ref count */
151 	xfs_buf_flags_t		b_flags;	/* status flags */
152 	struct semaphore	b_sema;		/* semaphore for lockables */
153 
154 	/*
155 	 * concurrent access to b_lru and b_lru_flags are protected by
156 	 * bt_lru_lock and not by b_sema
157 	 */
158 	struct list_head	b_lru;		/* lru list */
159 	spinlock_t		b_lock;		/* internal state lock */
160 	unsigned int		b_state;	/* internal state flags */
161 	wait_queue_head_t	b_waiters;	/* unpin waiters */
162 	struct list_head	b_list;
163 	struct xfs_perag	*b_pag;		/* contains rbtree root */
164 	xfs_buftarg_t		*b_target;	/* buffer target (device) */
165 	void			*b_addr;	/* virtual address of buffer */
166 	struct work_struct	b_iodone_work;
167 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
168 	struct completion	b_iowait;	/* queue for I/O waiters */
169 	void			*b_fspriv;
170 	struct xfs_trans	*b_transp;
171 	struct page		**b_pages;	/* array of page pointers */
172 	struct page		*b_page_array[XB_PAGES]; /* inline pages */
173 	struct xfs_buf_map	*b_maps;	/* compound buffer map */
174 	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
175 	int			b_map_count;
176 	int			b_io_length;	/* IO size in BBs */
177 	atomic_t		b_pin_count;	/* pin count */
178 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
179 	unsigned int		b_page_count;	/* size of page array */
180 	unsigned int		b_offset;	/* page offset in first page */
181 	unsigned short		b_error;	/* error code on I/O */
182 	const struct xfs_buf_ops	*b_ops;
183 
184 #ifdef XFS_BUF_LOCK_TRACKING
185 	int			b_last_holder;
186 #endif
187 } xfs_buf_t;
188 
189 /* Finding and Reading Buffers */
190 struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
191 			      struct xfs_buf_map *map, int nmaps,
192 			      xfs_buf_flags_t flags, struct xfs_buf *new_bp);
193 
194 static inline struct xfs_buf *
195 xfs_incore(
196 	struct xfs_buftarg	*target,
197 	xfs_daddr_t		blkno,
198 	size_t			numblks,
199 	xfs_buf_flags_t		flags)
200 {
201 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
202 	return _xfs_buf_find(target, &map, 1, flags, NULL);
203 }
204 
205 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
206 			       struct xfs_buf_map *map, int nmaps,
207 			       xfs_buf_flags_t flags);
208 
209 static inline struct xfs_buf *
210 xfs_buf_alloc(
211 	struct xfs_buftarg	*target,
212 	xfs_daddr_t		blkno,
213 	size_t			numblks,
214 	xfs_buf_flags_t		flags)
215 {
216 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
217 	return _xfs_buf_alloc(target, &map, 1, flags);
218 }
219 
220 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
221 			       struct xfs_buf_map *map, int nmaps,
222 			       xfs_buf_flags_t flags);
223 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
224 			       struct xfs_buf_map *map, int nmaps,
225 			       xfs_buf_flags_t flags,
226 			       const struct xfs_buf_ops *ops);
227 void xfs_buf_readahead_map(struct xfs_buftarg *target,
228 			       struct xfs_buf_map *map, int nmaps,
229 			       const struct xfs_buf_ops *ops);
230 
231 static inline struct xfs_buf *
232 xfs_buf_get(
233 	struct xfs_buftarg	*target,
234 	xfs_daddr_t		blkno,
235 	size_t			numblks,
236 	xfs_buf_flags_t		flags)
237 {
238 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
239 	return xfs_buf_get_map(target, &map, 1, flags);
240 }
241 
242 static inline struct xfs_buf *
243 xfs_buf_read(
244 	struct xfs_buftarg	*target,
245 	xfs_daddr_t		blkno,
246 	size_t			numblks,
247 	xfs_buf_flags_t		flags,
248 	const struct xfs_buf_ops *ops)
249 {
250 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
251 	return xfs_buf_read_map(target, &map, 1, flags, ops);
252 }
253 
254 static inline void
255 xfs_buf_readahead(
256 	struct xfs_buftarg	*target,
257 	xfs_daddr_t		blkno,
258 	size_t			numblks,
259 	const struct xfs_buf_ops *ops)
260 {
261 	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
262 	return xfs_buf_readahead_map(target, &map, 1, ops);
263 }
264 
265 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
266 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
267 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
268 
269 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
270 				int flags);
271 struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
272 				xfs_daddr_t daddr, size_t numblks, int flags,
273 				const struct xfs_buf_ops *ops);
274 void xfs_buf_hold(struct xfs_buf *bp);
275 
276 /* Releasing Buffers */
277 extern void xfs_buf_free(xfs_buf_t *);
278 extern void xfs_buf_rele(xfs_buf_t *);
279 
280 /* Locking and Unlocking Buffers */
281 extern int xfs_buf_trylock(xfs_buf_t *);
282 extern void xfs_buf_lock(xfs_buf_t *);
283 extern void xfs_buf_unlock(xfs_buf_t *);
284 #define xfs_buf_islocked(bp) \
285 	((bp)->b_sema.count <= 0)
286 
287 /* Buffer Read and Write Routines */
288 extern int xfs_bwrite(struct xfs_buf *bp);
289 extern void xfs_buf_ioend(xfs_buf_t *,	int);
290 extern void xfs_buf_ioerror(xfs_buf_t *, int);
291 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
292 extern void xfs_buf_iorequest(xfs_buf_t *);
293 extern int xfs_buf_iowait(xfs_buf_t *);
294 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
295 				xfs_buf_rw_t);
296 #define xfs_buf_zero(bp, off, len) \
297 	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
298 
299 extern int xfs_bioerror_relse(struct xfs_buf *);
300 
301 static inline int xfs_buf_geterror(xfs_buf_t *bp)
302 {
303 	return bp ? bp->b_error : ENOMEM;
304 }
305 
306 /* Buffer Utility Routines */
307 extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
308 
309 /* Delayed Write Buffer Routines */
310 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
311 extern int xfs_buf_delwri_submit(struct list_head *);
312 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
313 
314 /* Buffer Daemon Setup Routines */
315 extern int xfs_buf_init(void);
316 extern void xfs_buf_terminate(void);
317 
318 #define XFS_BUF_ZEROFLAGS(bp) \
319 	((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
320 			    XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
321 			    XBF_WRITE_FAIL))
322 
323 void xfs_buf_stale(struct xfs_buf *bp);
324 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XBF_STALE)
325 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XBF_STALE)
326 
327 #define XFS_BUF_DONE(bp)	((bp)->b_flags |= XBF_DONE)
328 #define XFS_BUF_UNDONE(bp)	((bp)->b_flags &= ~XBF_DONE)
329 #define XFS_BUF_ISDONE(bp)	((bp)->b_flags & XBF_DONE)
330 
331 #define XFS_BUF_ASYNC(bp)	((bp)->b_flags |= XBF_ASYNC)
332 #define XFS_BUF_UNASYNC(bp)	((bp)->b_flags &= ~XBF_ASYNC)
333 #define XFS_BUF_ISASYNC(bp)	((bp)->b_flags & XBF_ASYNC)
334 
335 #define XFS_BUF_READ(bp)	((bp)->b_flags |= XBF_READ)
336 #define XFS_BUF_UNREAD(bp)	((bp)->b_flags &= ~XBF_READ)
337 #define XFS_BUF_ISREAD(bp)	((bp)->b_flags & XBF_READ)
338 
339 #define XFS_BUF_WRITE(bp)	((bp)->b_flags |= XBF_WRITE)
340 #define XFS_BUF_UNWRITE(bp)	((bp)->b_flags &= ~XBF_WRITE)
341 #define XFS_BUF_ISWRITE(bp)	((bp)->b_flags & XBF_WRITE)
342 
343 /*
344  * These macros use the IO block map rather than b_bn. b_bn is now really
345  * just for the buffer cache index for cached buffers. As IO does not use b_bn
346  * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
347  * map directly. Uncached buffers are not allowed to be discontiguous, so this
348  * is safe to do.
349  *
350  * In future, uncached buffers will pass the block number directly to the io
351  * request function and hence these macros will go away at that point.
352  */
353 #define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
354 #define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
355 
356 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
357 {
358 	atomic_set(&bp->b_lru_ref, lru_ref);
359 }
360 
361 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
362 {
363 	return atomic_read(&bp->b_pin_count);
364 }
365 
366 static inline void xfs_buf_relse(xfs_buf_t *bp)
367 {
368 	xfs_buf_unlock(bp);
369 	xfs_buf_rele(bp);
370 }
371 
372 static inline int
373 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
374 {
375 	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
376 				cksum_offset);
377 }
378 
379 static inline void
380 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
381 {
382 	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
383 			 cksum_offset);
384 }
385 
386 /*
387  *	Handling of buftargs.
388  */
389 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
390 			struct block_device *, int, const char *);
391 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
392 extern void xfs_wait_buftarg(xfs_buftarg_t *);
393 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
394 
395 #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
396 #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
397 
398 #endif	/* __XFS_BUF_H__ */
399