xref: /openbmc/linux/include/linux/backing-dev.h (revision 1b6754fe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * include/linux/backing-dev.h
4  *
5  * low-level device information and state which is propagated up through
6  * to high-level code.
7  */
8 
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11 
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/device.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
19 
bdi_get(struct backing_dev_info * bdi)20 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21 {
22 	kref_get(&bdi->refcnt);
23 	return bdi;
24 }
25 
26 struct backing_dev_info *bdi_get_by_id(u64 id);
27 void bdi_put(struct backing_dev_info *bdi);
28 
29 __printf(2, 3)
30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31 __printf(2, 0)
32 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
33 		    va_list args);
34 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
35 void bdi_unregister(struct backing_dev_info *bdi);
36 
37 struct backing_dev_info *bdi_alloc(int node_id);
38 
39 void wb_start_background_writeback(struct bdi_writeback *wb);
40 void wb_workfn(struct work_struct *work);
41 void wb_wakeup_delayed(struct bdi_writeback *wb);
42 
43 void wb_wait_for_completion(struct wb_completion *done);
44 
45 extern spinlock_t bdi_lock;
46 extern struct list_head bdi_list;
47 
48 extern struct workqueue_struct *bdi_wq;
49 
wb_has_dirty_io(struct bdi_writeback * wb)50 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
51 {
52 	return test_bit(WB_has_dirty_io, &wb->state);
53 }
54 
bdi_has_dirty_io(struct backing_dev_info * bdi)55 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
56 {
57 	/*
58 	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
59 	 * any dirty wbs.  See wb_update_write_bandwidth().
60 	 */
61 	return atomic_long_read(&bdi->tot_write_bandwidth);
62 }
63 
wb_stat_mod(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)64 static inline void wb_stat_mod(struct bdi_writeback *wb,
65 				 enum wb_stat_item item, s64 amount)
66 {
67 	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
68 }
69 
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)70 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
71 {
72 	wb_stat_mod(wb, item, 1);
73 }
74 
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)75 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
76 {
77 	wb_stat_mod(wb, item, -1);
78 }
79 
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)80 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
81 {
82 	return percpu_counter_read_positive(&wb->stat[item]);
83 }
84 
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)85 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
86 {
87 	return percpu_counter_sum_positive(&wb->stat[item]);
88 }
89 
90 extern void wb_writeout_inc(struct bdi_writeback *wb);
91 
92 /*
93  * maximal error of a stat counter.
94  */
wb_stat_error(void)95 static inline unsigned long wb_stat_error(void)
96 {
97 #ifdef CONFIG_SMP
98 	return nr_cpu_ids * WB_STAT_BATCH;
99 #else
100 	return 1;
101 #endif
102 }
103 
104 /* BDI ratio is expressed as part per 1000000 for finer granularity. */
105 #define BDI_RATIO_SCALE 10000
106 
107 u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
108 u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
109 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
110 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
111 int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
112 int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
113 int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
114 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
115 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
116 
117 /*
118  * Flags in backing_dev_info::capability
119  *
120  * BDI_CAP_WRITEBACK:		Supports dirty page writeback, and dirty pages
121  *				should contribute to accounting
122  * BDI_CAP_WRITEBACK_ACCT:	Automatically account writeback pages
123  * BDI_CAP_STRICTLIMIT:		Keep number of dirty pages below bdi threshold
124  */
125 #define BDI_CAP_WRITEBACK		(1 << 0)
126 #define BDI_CAP_WRITEBACK_ACCT		(1 << 1)
127 #define BDI_CAP_STRICTLIMIT		(1 << 2)
128 
129 extern struct backing_dev_info noop_backing_dev_info;
130 
131 int bdi_init(struct backing_dev_info *bdi);
132 
133 /**
134  * writeback_in_progress - determine whether there is writeback in progress
135  * @wb: bdi_writeback of interest
136  *
137  * Determine whether there is writeback waiting to be handled against a
138  * bdi_writeback.
139  */
writeback_in_progress(struct bdi_writeback * wb)140 static inline bool writeback_in_progress(struct bdi_writeback *wb)
141 {
142 	return test_bit(WB_writeback_running, &wb->state);
143 }
144 
145 struct backing_dev_info *inode_to_bdi(struct inode *inode);
146 
mapping_can_writeback(struct address_space * mapping)147 static inline bool mapping_can_writeback(struct address_space *mapping)
148 {
149 	return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
150 }
151 
152 #ifdef CONFIG_CGROUP_WRITEBACK
153 
154 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
155 				    struct cgroup_subsys_state *memcg_css);
156 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
157 				    struct cgroup_subsys_state *memcg_css,
158 				    gfp_t gfp);
159 void wb_memcg_offline(struct mem_cgroup *memcg);
160 void wb_blkcg_offline(struct cgroup_subsys_state *css);
161 
162 /**
163  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
164  * @inode: inode of interest
165  *
166  * Cgroup writeback requires support from the filesystem.  Also, both memcg and
167  * iocg have to be on the default hierarchy.  Test whether all conditions are
168  * met.
169  *
170  * Note that the test result may change dynamically on the same inode
171  * depending on how memcg and iocg are configured.
172  */
inode_cgwb_enabled(struct inode * inode)173 static inline bool inode_cgwb_enabled(struct inode *inode)
174 {
175 	struct backing_dev_info *bdi = inode_to_bdi(inode);
176 
177 	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
178 		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
179 		(bdi->capabilities & BDI_CAP_WRITEBACK) &&
180 		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
181 }
182 
183 /**
184  * wb_find_current - find wb for %current on a bdi
185  * @bdi: bdi of interest
186  *
187  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
188  * Must be called under rcu_read_lock() which protects the returend wb.
189  * NULL if not found.
190  */
wb_find_current(struct backing_dev_info * bdi)191 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
192 {
193 	struct cgroup_subsys_state *memcg_css;
194 	struct bdi_writeback *wb;
195 
196 	memcg_css = task_css(current, memory_cgrp_id);
197 	if (!memcg_css->parent)
198 		return &bdi->wb;
199 
200 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
201 
202 	/*
203 	 * %current's blkcg equals the effective blkcg of its memcg.  No
204 	 * need to use the relatively expensive cgroup_get_e_css().
205 	 */
206 	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
207 		return wb;
208 	return NULL;
209 }
210 
211 /**
212  * wb_get_create_current - get or create wb for %current on a bdi
213  * @bdi: bdi of interest
214  * @gfp: allocation mask
215  *
216  * Equivalent to wb_get_create() on %current's memcg.  This function is
217  * called from a relatively hot path and optimizes the common cases using
218  * wb_find_current().
219  */
220 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)221 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
222 {
223 	struct bdi_writeback *wb;
224 
225 	rcu_read_lock();
226 	wb = wb_find_current(bdi);
227 	if (wb && unlikely(!wb_tryget(wb)))
228 		wb = NULL;
229 	rcu_read_unlock();
230 
231 	if (unlikely(!wb)) {
232 		struct cgroup_subsys_state *memcg_css;
233 
234 		memcg_css = task_get_css(current, memory_cgrp_id);
235 		wb = wb_get_create(bdi, memcg_css, gfp);
236 		css_put(memcg_css);
237 	}
238 	return wb;
239 }
240 
241 /**
242  * inode_to_wb - determine the wb of an inode
243  * @inode: inode of interest
244  *
245  * Returns the wb @inode is currently associated with.  The caller must be
246  * holding either @inode->i_lock, the i_pages lock, or the
247  * associated wb's list_lock.
248  */
inode_to_wb(const struct inode * inode)249 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
250 {
251 #ifdef CONFIG_LOCKDEP
252 	WARN_ON_ONCE(debug_locks &&
253 		     (!lockdep_is_held(&inode->i_lock) &&
254 		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
255 		      !lockdep_is_held(&inode->i_wb->list_lock)));
256 #endif
257 	return inode->i_wb;
258 }
259 
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)260 static inline struct bdi_writeback *inode_to_wb_wbc(
261 				struct inode *inode,
262 				struct writeback_control *wbc)
263 {
264 	/*
265 	 * If wbc does not have inode attached, it means cgroup writeback was
266 	 * disabled when wbc started. Just use the default wb in that case.
267 	 */
268 	return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
269 }
270 
271 /**
272  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
273  * @inode: target inode
274  * @cookie: output param, to be passed to the end function
275  *
276  * The caller wants to access the wb associated with @inode but isn't
277  * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
278  * function determines the wb associated with @inode and ensures that the
279  * association doesn't change until the transaction is finished with
280  * unlocked_inode_to_wb_end().
281  *
282  * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
283  * can't sleep during the transaction.  IRQs may or may not be disabled on
284  * return.
285  */
286 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)287 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
288 {
289 	rcu_read_lock();
290 
291 	/*
292 	 * Paired with store_release in inode_switch_wbs_work_fn() and
293 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
294 	 */
295 	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
296 
297 	if (unlikely(cookie->locked))
298 		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
299 
300 	/*
301 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
302 	 * lock.  inode_to_wb() will bark.  Deref directly.
303 	 */
304 	return inode->i_wb;
305 }
306 
307 /**
308  * unlocked_inode_to_wb_end - end inode wb access transaction
309  * @inode: target inode
310  * @cookie: @cookie from unlocked_inode_to_wb_begin()
311  */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)312 static inline void unlocked_inode_to_wb_end(struct inode *inode,
313 					    struct wb_lock_cookie *cookie)
314 {
315 	if (unlikely(cookie->locked))
316 		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
317 
318 	rcu_read_unlock();
319 }
320 
321 #else	/* CONFIG_CGROUP_WRITEBACK */
322 
inode_cgwb_enabled(struct inode * inode)323 static inline bool inode_cgwb_enabled(struct inode *inode)
324 {
325 	return false;
326 }
327 
wb_find_current(struct backing_dev_info * bdi)328 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
329 {
330 	return &bdi->wb;
331 }
332 
333 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)334 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
335 {
336 	return &bdi->wb;
337 }
338 
inode_to_wb(struct inode * inode)339 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
340 {
341 	return &inode_to_bdi(inode)->wb;
342 }
343 
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)344 static inline struct bdi_writeback *inode_to_wb_wbc(
345 				struct inode *inode,
346 				struct writeback_control *wbc)
347 {
348 	return inode_to_wb(inode);
349 }
350 
351 
352 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)353 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
354 {
355 	return inode_to_wb(inode);
356 }
357 
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)358 static inline void unlocked_inode_to_wb_end(struct inode *inode,
359 					    struct wb_lock_cookie *cookie)
360 {
361 }
362 
wb_memcg_offline(struct mem_cgroup * memcg)363 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
364 {
365 }
366 
wb_blkcg_offline(struct cgroup_subsys_state * css)367 static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
368 {
369 }
370 
371 #endif	/* CONFIG_CGROUP_WRITEBACK */
372 
373 const char *bdi_dev_name(struct backing_dev_info *bdi);
374 
375 #endif	/* _LINUX_BACKING_DEV_H */
376