xref: /openbmc/linux/fs/ocfs2/alloc.h (revision dbdcf6a4)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * alloc.h
5  *
6  * Function prototypes
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25 
26 #ifndef OCFS2_ALLOC_H
27 #define OCFS2_ALLOC_H
28 
29 
30 /*
31  * For xattr tree leaf, we limit the leaf byte size to be 64K.
32  */
33 #define OCFS2_MAX_XATTR_TREE_LEAF_SIZE 65536
34 
35 /*
36  * ocfs2_extent_tree and ocfs2_extent_tree_operations are used to abstract
37  * the b-tree operations in ocfs2. Now all the b-tree operations are not
38  * limited to ocfs2_dinode only. Any data which need to allocate clusters
39  * to store can use b-tree. And it only needs to implement its ocfs2_extent_tree
40  * and operation.
41  *
42  * ocfs2_extent_tree becomes the first-class object for extent tree
43  * manipulation.  Callers of the alloc.c code need to fill it via one of
44  * the ocfs2_init_*_extent_tree() operations below.
45  *
46  * ocfs2_extent_tree contains info for the root of the b-tree, it must have a
47  * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree
48  * functions.  It needs the ocfs2_caching_info structure associated with
49  * I/O on the tree.  With metadata ecc, we now call different journal_access
50  * functions for each type of metadata, so it must have the
51  * root_journal_access function.
52  * ocfs2_extent_tree_operations abstract the normal operations we do for
53  * the root of extent b-tree.
54  */
55 struct ocfs2_extent_tree_operations;
56 struct ocfs2_extent_tree {
57 	struct ocfs2_extent_tree_operations	*et_ops;
58 	struct buffer_head			*et_root_bh;
59 	struct ocfs2_extent_list		*et_root_el;
60 	struct ocfs2_caching_info		*et_ci;
61 	ocfs2_journal_access_func		et_root_journal_access;
62 	void					*et_object;
63 	unsigned int				et_max_leaf_clusters;
64 };
65 
66 /*
67  * ocfs2_init_*_extent_tree() will fill an ocfs2_extent_tree from the
68  * specified object buffer.
69  */
70 void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
71 				   struct inode *inode,
72 				   struct buffer_head *bh);
73 void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
74 				       struct inode *inode,
75 				       struct buffer_head *bh);
76 struct ocfs2_xattr_value_buf;
77 void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
78 					struct inode *inode,
79 					struct ocfs2_xattr_value_buf *vb);
80 void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
81 				    struct inode *inode,
82 				    struct buffer_head *bh);
83 
84 /*
85  * Read an extent block into *bh.  If *bh is NULL, a bh will be
86  * allocated.  This is a cached read.  The extent block will be validated
87  * with ocfs2_validate_extent_block().
88  */
89 int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
90 			    struct buffer_head **bh);
91 
92 struct ocfs2_alloc_context;
93 int ocfs2_insert_extent(handle_t *handle,
94 			struct ocfs2_extent_tree *et,
95 			u32 cpos,
96 			u64 start_blk,
97 			u32 new_clusters,
98 			u8 flags,
99 			struct ocfs2_alloc_context *meta_ac);
100 
101 enum ocfs2_alloc_restarted {
102 	RESTART_NONE = 0,
103 	RESTART_TRANS,
104 	RESTART_META
105 };
106 int ocfs2_add_clusters_in_btree(handle_t *handle,
107 				struct ocfs2_extent_tree *et,
108 				u32 *logical_offset,
109 				u32 clusters_to_add,
110 				int mark_unwritten,
111 				struct ocfs2_alloc_context *data_ac,
112 				struct ocfs2_alloc_context *meta_ac,
113 				enum ocfs2_alloc_restarted *reason_ret);
114 struct ocfs2_cached_dealloc_ctxt;
115 int ocfs2_mark_extent_written(struct inode *inode,
116 			      struct ocfs2_extent_tree *et,
117 			      handle_t *handle, u32 cpos, u32 len, u32 phys,
118 			      struct ocfs2_alloc_context *meta_ac,
119 			      struct ocfs2_cached_dealloc_ctxt *dealloc);
120 int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et,
121 			u32 cpos, u32 len,
122 			struct ocfs2_alloc_context *meta_ac,
123 			struct ocfs2_cached_dealloc_ctxt *dealloc);
124 int ocfs2_remove_btree_range(struct inode *inode,
125 			     struct ocfs2_extent_tree *et,
126 			     u32 cpos, u32 phys_cpos, u32 len,
127 			     struct ocfs2_cached_dealloc_ctxt *dealloc);
128 
129 int ocfs2_num_free_extents(struct ocfs2_super *osb,
130 			   struct ocfs2_extent_tree *et);
131 
132 /*
133  * how many new metadata chunks would an allocation need at maximum?
134  *
135  * Please note that the caller must make sure that root_el is the root
136  * of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise
137  * the result may be wrong.
138  */
139 static inline int ocfs2_extend_meta_needed(struct ocfs2_extent_list *root_el)
140 {
141 	/*
142 	 * Rather than do all the work of determining how much we need
143 	 * (involves a ton of reads and locks), just ask for the
144 	 * maximal limit.  That's a tree depth shift.  So, one block for
145 	 * level of the tree (current l_tree_depth), one block for the
146 	 * new tree_depth==0 extent_block, and one block at the new
147 	 * top-of-the tree.
148 	 */
149 	return le16_to_cpu(root_el->l_tree_depth) + 2;
150 }
151 
152 void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di);
153 void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di);
154 int ocfs2_convert_inline_data_to_extents(struct inode *inode,
155 					 struct buffer_head *di_bh);
156 
157 int ocfs2_truncate_log_init(struct ocfs2_super *osb);
158 void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb);
159 void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
160 				       int cancel);
161 int ocfs2_flush_truncate_log(struct ocfs2_super *osb);
162 int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
163 				      int slot_num,
164 				      struct ocfs2_dinode **tl_copy);
165 int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
166 					 struct ocfs2_dinode *tl_copy);
167 int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb);
168 int ocfs2_truncate_log_append(struct ocfs2_super *osb,
169 			      handle_t *handle,
170 			      u64 start_blk,
171 			      unsigned int num_clusters);
172 int __ocfs2_flush_truncate_log(struct ocfs2_super *osb);
173 
174 /*
175  * Process local structure which describes the block unlinks done
176  * during an operation. This is populated via
177  * ocfs2_cache_block_dealloc().
178  *
179  * ocfs2_run_deallocs() should be called after the potentially
180  * de-allocating routines. No journal handles should be open, and most
181  * locks should have been dropped.
182  */
183 struct ocfs2_cached_dealloc_ctxt {
184 	struct ocfs2_per_slot_free_list		*c_first_suballocator;
185 	struct ocfs2_cached_block_free 		*c_global_allocator;
186 };
187 static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c)
188 {
189 	c->c_first_suballocator = NULL;
190 	c->c_global_allocator = NULL;
191 }
192 int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
193 				u64 blkno, unsigned int bit);
194 static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c)
195 {
196 	return c->c_global_allocator != NULL;
197 }
198 int ocfs2_run_deallocs(struct ocfs2_super *osb,
199 		       struct ocfs2_cached_dealloc_ctxt *ctxt);
200 
201 struct ocfs2_truncate_context {
202 	struct ocfs2_cached_dealloc_ctxt tc_dealloc;
203 	int tc_ext_alloc_locked; /* is it cluster locked? */
204 	/* these get destroyed once it's passed to ocfs2_commit_truncate. */
205 	struct buffer_head *tc_last_eb_bh;
206 };
207 
208 int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
209 				  u64 range_start, u64 range_end);
210 int ocfs2_prepare_truncate(struct ocfs2_super *osb,
211 			   struct inode *inode,
212 			   struct buffer_head *fe_bh,
213 			   struct ocfs2_truncate_context **tc);
214 int ocfs2_commit_truncate(struct ocfs2_super *osb,
215 			  struct inode *inode,
216 			  struct buffer_head *fe_bh,
217 			  struct ocfs2_truncate_context *tc);
218 int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
219 			  unsigned int start, unsigned int end, int trunc);
220 
221 int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
222 		    struct ocfs2_extent_list *root_el, u32 cpos,
223 		    struct buffer_head **leaf_bh);
224 int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster);
225 
226 /*
227  * Helper function to look at the # of clusters in an extent record.
228  */
229 static inline unsigned int ocfs2_rec_clusters(struct ocfs2_extent_list *el,
230 					      struct ocfs2_extent_rec *rec)
231 {
232 	/*
233 	 * Cluster count in extent records is slightly different
234 	 * between interior nodes and leaf nodes. This is to support
235 	 * unwritten extents which need a flags field in leaf node
236 	 * records, thus shrinking the available space for a clusters
237 	 * field.
238 	 */
239 	if (el->l_tree_depth)
240 		return le32_to_cpu(rec->e_int_clusters);
241 	else
242 		return le16_to_cpu(rec->e_leaf_clusters);
243 }
244 
245 /*
246  * This is only valid for leaf nodes, which are the only ones that can
247  * have empty extents anyway.
248  */
249 static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
250 {
251 	return !rec->e_leaf_clusters;
252 }
253 
254 #endif /* OCFS2_ALLOC_H */
255