1 /* 2 * include/linux/backing-dev.h 3 * 4 * low-level device information and state which is propagated up through 5 * to high-level code. 6 */ 7 8 #ifndef _LINUX_BACKING_DEV_H 9 #define _LINUX_BACKING_DEV_H 10 11 #include <linux/kernel.h> 12 #include <linux/fs.h> 13 #include <linux/sched.h> 14 #include <linux/blkdev.h> 15 #include <linux/writeback.h> 16 #include <linux/blk-cgroup.h> 17 #include <linux/backing-dev-defs.h> 18 #include <linux/slab.h> 19 20 int __must_check bdi_init(struct backing_dev_info *bdi); 21 void bdi_destroy(struct backing_dev_info *bdi); 22 23 __printf(3, 4) 24 int bdi_register(struct backing_dev_info *bdi, struct device *parent, 25 const char *fmt, ...); 26 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 27 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 28 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 29 bool range_cyclic, enum wb_reason reason); 30 void wb_start_background_writeback(struct bdi_writeback *wb); 31 void wb_workfn(struct work_struct *work); 32 void wb_wakeup_delayed(struct bdi_writeback *wb); 33 34 extern spinlock_t bdi_lock; 35 extern struct list_head bdi_list; 36 37 extern struct workqueue_struct *bdi_wq; 38 39 static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 40 { 41 return test_bit(WB_has_dirty_io, &wb->state); 42 } 43 44 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 45 { 46 /* 47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 48 * any dirty wbs. See wb_update_write_bandwidth(). 49 */ 50 return atomic_long_read(&bdi->tot_write_bandwidth); 51 } 52 53 static inline void __add_wb_stat(struct bdi_writeback *wb, 54 enum wb_stat_item item, s64 amount) 55 { 56 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH); 57 } 58 59 static inline void __inc_wb_stat(struct bdi_writeback *wb, 60 enum wb_stat_item item) 61 { 62 __add_wb_stat(wb, item, 1); 63 } 64 65 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 66 { 67 unsigned long flags; 68 69 local_irq_save(flags); 70 __inc_wb_stat(wb, item); 71 local_irq_restore(flags); 72 } 73 74 static inline void __dec_wb_stat(struct bdi_writeback *wb, 75 enum wb_stat_item item) 76 { 77 __add_wb_stat(wb, item, -1); 78 } 79 80 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 81 { 82 unsigned long flags; 83 84 local_irq_save(flags); 85 __dec_wb_stat(wb, item); 86 local_irq_restore(flags); 87 } 88 89 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 90 { 91 return percpu_counter_read_positive(&wb->stat[item]); 92 } 93 94 static inline s64 __wb_stat_sum(struct bdi_writeback *wb, 95 enum wb_stat_item item) 96 { 97 return percpu_counter_sum_positive(&wb->stat[item]); 98 } 99 100 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 101 { 102 s64 sum; 103 unsigned long flags; 104 105 local_irq_save(flags); 106 sum = __wb_stat_sum(wb, item); 107 local_irq_restore(flags); 108 109 return sum; 110 } 111 112 extern void wb_writeout_inc(struct bdi_writeback *wb); 113 114 /* 115 * maximal error of a stat counter. 116 */ 117 static inline unsigned long wb_stat_error(struct bdi_writeback *wb) 118 { 119 #ifdef CONFIG_SMP 120 return nr_cpu_ids * WB_STAT_BATCH; 121 #else 122 return 1; 123 #endif 124 } 125 126 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 127 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 128 129 /* 130 * Flags in backing_dev_info::capability 131 * 132 * The first three flags control whether dirty pages will contribute to the 133 * VM's accounting and whether writepages() should be called for dirty pages 134 * (something that would not, for example, be appropriate for ramfs) 135 * 136 * WARNING: these flags are closely related and should not normally be 137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these 138 * three flags into a single convenience macro. 139 * 140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 141 * BDI_CAP_NO_WRITEBACK: Don't write pages back 142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 144 * 145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. 146 */ 147 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 148 #define BDI_CAP_NO_WRITEBACK 0x00000002 149 #define BDI_CAP_NO_ACCT_WB 0x00000004 150 #define BDI_CAP_STABLE_WRITES 0x00000008 151 #define BDI_CAP_STRICTLIMIT 0x00000010 152 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020 153 154 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 156 157 extern struct backing_dev_info noop_backing_dev_info; 158 159 /** 160 * writeback_in_progress - determine whether there is writeback in progress 161 * @wb: bdi_writeback of interest 162 * 163 * Determine whether there is writeback waiting to be handled against a 164 * bdi_writeback. 165 */ 166 static inline bool writeback_in_progress(struct bdi_writeback *wb) 167 { 168 return test_bit(WB_writeback_running, &wb->state); 169 } 170 171 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) 172 { 173 struct super_block *sb; 174 175 if (!inode) 176 return &noop_backing_dev_info; 177 178 sb = inode->i_sb; 179 #ifdef CONFIG_BLOCK 180 if (sb_is_blkdev_sb(sb)) 181 return blk_get_backing_dev_info(I_BDEV(inode)); 182 #endif 183 return sb->s_bdi; 184 } 185 186 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) 187 { 188 struct backing_dev_info *bdi = wb->bdi; 189 190 if (bdi->congested_fn) 191 return bdi->congested_fn(bdi->congested_data, cong_bits); 192 return wb->congested->state & cong_bits; 193 } 194 195 long congestion_wait(int sync, long timeout); 196 long wait_iff_congested(struct zone *zone, int sync, long timeout); 197 int pdflush_proc_obsolete(struct ctl_table *table, int write, 198 void __user *buffer, size_t *lenp, loff_t *ppos); 199 200 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) 201 { 202 return bdi->capabilities & BDI_CAP_STABLE_WRITES; 203 } 204 205 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 206 { 207 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); 208 } 209 210 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) 211 { 212 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); 213 } 214 215 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) 216 { 217 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ 218 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | 219 BDI_CAP_NO_WRITEBACK)); 220 } 221 222 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 223 { 224 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); 225 } 226 227 static inline bool mapping_cap_account_dirty(struct address_space *mapping) 228 { 229 return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); 230 } 231 232 static inline int bdi_sched_wait(void *word) 233 { 234 schedule(); 235 return 0; 236 } 237 238 #ifdef CONFIG_CGROUP_WRITEBACK 239 240 struct bdi_writeback_congested * 241 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); 242 void wb_congested_put(struct bdi_writeback_congested *congested); 243 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 244 struct cgroup_subsys_state *memcg_css, 245 gfp_t gfp); 246 void wb_memcg_offline(struct mem_cgroup *memcg); 247 void wb_blkcg_offline(struct blkcg *blkcg); 248 int inode_congested(struct inode *inode, int cong_bits); 249 250 /** 251 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 252 * @inode: inode of interest 253 * 254 * cgroup writeback requires support from both the bdi and filesystem. 255 * Test whether @inode has both. 256 */ 257 static inline bool inode_cgwb_enabled(struct inode *inode) 258 { 259 struct backing_dev_info *bdi = inode_to_bdi(inode); 260 261 return bdi_cap_account_dirty(bdi) && 262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 263 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 264 } 265 266 /** 267 * wb_find_current - find wb for %current on a bdi 268 * @bdi: bdi of interest 269 * 270 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 271 * Must be called under rcu_read_lock() which protects the returend wb. 272 * NULL if not found. 273 */ 274 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 275 { 276 struct cgroup_subsys_state *memcg_css; 277 struct bdi_writeback *wb; 278 279 memcg_css = task_css(current, memory_cgrp_id); 280 if (!memcg_css->parent) 281 return &bdi->wb; 282 283 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 284 285 /* 286 * %current's blkcg equals the effective blkcg of its memcg. No 287 * need to use the relatively expensive cgroup_get_e_css(). 288 */ 289 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id))) 290 return wb; 291 return NULL; 292 } 293 294 /** 295 * wb_get_create_current - get or create wb for %current on a bdi 296 * @bdi: bdi of interest 297 * @gfp: allocation mask 298 * 299 * Equivalent to wb_get_create() on %current's memcg. This function is 300 * called from a relatively hot path and optimizes the common cases using 301 * wb_find_current(). 302 */ 303 static inline struct bdi_writeback * 304 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 305 { 306 struct bdi_writeback *wb; 307 308 rcu_read_lock(); 309 wb = wb_find_current(bdi); 310 if (wb && unlikely(!wb_tryget(wb))) 311 wb = NULL; 312 rcu_read_unlock(); 313 314 if (unlikely(!wb)) { 315 struct cgroup_subsys_state *memcg_css; 316 317 memcg_css = task_get_css(current, memory_cgrp_id); 318 wb = wb_get_create(bdi, memcg_css, gfp); 319 css_put(memcg_css); 320 } 321 return wb; 322 } 323 324 /** 325 * inode_to_wb_is_valid - test whether an inode has a wb associated 326 * @inode: inode of interest 327 * 328 * Returns %true if @inode has a wb associated. May be called without any 329 * locking. 330 */ 331 static inline bool inode_to_wb_is_valid(struct inode *inode) 332 { 333 return inode->i_wb; 334 } 335 336 /** 337 * inode_to_wb - determine the wb of an inode 338 * @inode: inode of interest 339 * 340 * Returns the wb @inode is currently associated with. The caller must be 341 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the 342 * associated wb's list_lock. 343 */ 344 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 345 { 346 #ifdef CONFIG_LOCKDEP 347 WARN_ON_ONCE(debug_locks && 348 (!lockdep_is_held(&inode->i_lock) && 349 !lockdep_is_held(&inode->i_mapping->tree_lock) && 350 !lockdep_is_held(&inode->i_wb->list_lock))); 351 #endif 352 return inode->i_wb; 353 } 354 355 /** 356 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 357 * @inode: target inode 358 * @lockedp: temp bool output param, to be passed to the end function 359 * 360 * The caller wants to access the wb associated with @inode but isn't 361 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This 362 * function determines the wb associated with @inode and ensures that the 363 * association doesn't change until the transaction is finished with 364 * unlocked_inode_to_wb_end(). 365 * 366 * The caller must call unlocked_inode_to_wb_end() with *@lockdep 367 * afterwards and can't sleep during transaction. IRQ may or may not be 368 * disabled on return. 369 */ 370 static inline struct bdi_writeback * 371 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 372 { 373 rcu_read_lock(); 374 375 /* 376 * Paired with store_release in inode_switch_wb_work_fn() and 377 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 378 */ 379 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 380 381 if (unlikely(*lockedp)) 382 spin_lock_irq(&inode->i_mapping->tree_lock); 383 384 /* 385 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. 386 * inode_to_wb() will bark. Deref directly. 387 */ 388 return inode->i_wb; 389 } 390 391 /** 392 * unlocked_inode_to_wb_end - end inode wb access transaction 393 * @inode: target inode 394 * @locked: *@lockedp from unlocked_inode_to_wb_begin() 395 */ 396 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 397 { 398 if (unlikely(locked)) 399 spin_unlock_irq(&inode->i_mapping->tree_lock); 400 401 rcu_read_unlock(); 402 } 403 404 struct wb_iter { 405 int start_blkcg_id; 406 struct radix_tree_iter tree_iter; 407 void **slot; 408 }; 409 410 static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, 411 struct backing_dev_info *bdi) 412 { 413 struct radix_tree_iter *titer = &iter->tree_iter; 414 415 WARN_ON_ONCE(!rcu_read_lock_held()); 416 417 if (iter->start_blkcg_id >= 0) { 418 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id); 419 iter->start_blkcg_id = -1; 420 } else { 421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0); 422 } 423 424 if (!iter->slot) 425 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); 426 if (iter->slot) 427 return *iter->slot; 428 return NULL; 429 } 430 431 static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, 432 struct backing_dev_info *bdi, 433 int start_blkcg_id) 434 { 435 iter->start_blkcg_id = start_blkcg_id; 436 437 if (start_blkcg_id) 438 return __wb_iter_next(iter, bdi); 439 else 440 return &bdi->wb; 441 } 442 443 /** 444 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order 445 * @wb_cur: cursor struct bdi_writeback pointer 446 * @bdi: bdi to walk wb's of 447 * @iter: pointer to struct wb_iter to be used as iteration buffer 448 * @start_blkcg_id: blkcg ID to start iteration from 449 * 450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending 451 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter 452 * to be used as temp storage during iteration. rcu_read_lock() must be 453 * held throughout iteration. 454 */ 455 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ 456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \ 457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) 458 459 #else /* CONFIG_CGROUP_WRITEBACK */ 460 461 static inline bool inode_cgwb_enabled(struct inode *inode) 462 { 463 return false; 464 } 465 466 static inline struct bdi_writeback_congested * 467 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) 468 { 469 atomic_inc(&bdi->wb_congested->refcnt); 470 return bdi->wb_congested; 471 } 472 473 static inline void wb_congested_put(struct bdi_writeback_congested *congested) 474 { 475 if (atomic_dec_and_test(&congested->refcnt)) 476 kfree(congested); 477 } 478 479 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 480 { 481 return &bdi->wb; 482 } 483 484 static inline struct bdi_writeback * 485 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 486 { 487 return &bdi->wb; 488 } 489 490 static inline bool inode_to_wb_is_valid(struct inode *inode) 491 { 492 return true; 493 } 494 495 static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 496 { 497 return &inode_to_bdi(inode)->wb; 498 } 499 500 static inline struct bdi_writeback * 501 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 502 { 503 return inode_to_wb(inode); 504 } 505 506 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 507 { 508 } 509 510 static inline void wb_memcg_offline(struct mem_cgroup *memcg) 511 { 512 } 513 514 static inline void wb_blkcg_offline(struct blkcg *blkcg) 515 { 516 } 517 518 struct wb_iter { 519 int next_id; 520 }; 521 522 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ 523 for ((iter)->next_id = (start_blkcg_id); \ 524 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) 525 526 static inline int inode_congested(struct inode *inode, int cong_bits) 527 { 528 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 529 } 530 531 #endif /* CONFIG_CGROUP_WRITEBACK */ 532 533 static inline int inode_read_congested(struct inode *inode) 534 { 535 return inode_congested(inode, 1 << WB_sync_congested); 536 } 537 538 static inline int inode_write_congested(struct inode *inode) 539 { 540 return inode_congested(inode, 1 << WB_async_congested); 541 } 542 543 static inline int inode_rw_congested(struct inode *inode) 544 { 545 return inode_congested(inode, (1 << WB_sync_congested) | 546 (1 << WB_async_congested)); 547 } 548 549 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) 550 { 551 return wb_congested(&bdi->wb, cong_bits); 552 } 553 554 static inline int bdi_read_congested(struct backing_dev_info *bdi) 555 { 556 return bdi_congested(bdi, 1 << WB_sync_congested); 557 } 558 559 static inline int bdi_write_congested(struct backing_dev_info *bdi) 560 { 561 return bdi_congested(bdi, 1 << WB_async_congested); 562 } 563 564 static inline int bdi_rw_congested(struct backing_dev_info *bdi) 565 { 566 return bdi_congested(bdi, (1 << WB_sync_congested) | 567 (1 << WB_async_congested)); 568 } 569 570 #endif /* _LINUX_BACKING_DEV_H */ 571