blk-wbt.c (7f36f5d11cda050b118f76d774151427a18d15ef) | blk-wbt.c (dc3b17cc8bf21307c7e076e7c778d5db756f7871) |
---|---|
1/* 2 * buffered writeback throttling. loosely based on CoDel. We can't drop 3 * packets for IO scheduling, so the logic is something like this: 4 * 5 * - Monitor latencies in a defined window of time. 6 * - If the minimum latency in the above window exceeds some target, increment 7 * scaling step and scale down queue depth by a factor of 2x. The monitoring 8 * window is then shrunk to 100 / sqrt(scaling step + 1). --- 82 unchanged lines hidden (view full) --- 91} 92 93/* 94 * If a task was rate throttled in balance_dirty_pages() within the last 95 * second or so, use that to indicate a higher cleaning rate. 96 */ 97static bool wb_recent_wait(struct rq_wb *rwb) 98{ | 1/* 2 * buffered writeback throttling. loosely based on CoDel. We can't drop 3 * packets for IO scheduling, so the logic is something like this: 4 * 5 * - Monitor latencies in a defined window of time. 6 * - If the minimum latency in the above window exceeds some target, increment 7 * scaling step and scale down queue depth by a factor of 2x. The monitoring 8 * window is then shrunk to 100 / sqrt(scaling step + 1). --- 82 unchanged lines hidden (view full) --- 91} 92 93/* 94 * If a task was rate throttled in balance_dirty_pages() within the last 95 * second or so, use that to indicate a higher cleaning rate. 96 */ 97static bool wb_recent_wait(struct rq_wb *rwb) 98{ |
99 struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb; | 99 struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb; |
100 101 return time_before(jiffies, wb->dirty_sleep + HZ); 102} 103 104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd) 105{ 106 return &rwb->rq_wait[is_kswapd]; 107} --- 166 unchanged lines hidden (view full) --- 274 LAT_OK = 1, 275 LAT_UNKNOWN, 276 LAT_UNKNOWN_WRITES, 277 LAT_EXCEEDED, 278}; 279 280static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) 281{ | 100 101 return time_before(jiffies, wb->dirty_sleep + HZ); 102} 103 104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd) 105{ 106 return &rwb->rq_wait[is_kswapd]; 107} --- 166 unchanged lines hidden (view full) --- 274 LAT_OK = 1, 275 LAT_UNKNOWN, 276 LAT_UNKNOWN_WRITES, 277 LAT_EXCEEDED, 278}; 279 280static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) 281{ |
282 struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; | 282 struct backing_dev_info *bdi = rwb->queue->backing_dev_info; |
283 u64 thislat; 284 285 /* 286 * If our stored sync issue exceeds the window size, or it 287 * exceeds our min target AND we haven't logged any entries, 288 * flag the latency as exceeded. wbt works off completion latencies, 289 * but for a flooded device, a single sync IO can take a long time 290 * to complete after being issued. If this time exceeds our --- 43 unchanged lines hidden (view full) --- 334 struct blk_rq_stat stat[2]; 335 336 blk_queue_stat_get(rwb->queue, stat); 337 return __latency_exceeded(rwb, stat); 338} 339 340static void rwb_trace_step(struct rq_wb *rwb, const char *msg) 341{ | 283 u64 thislat; 284 285 /* 286 * If our stored sync issue exceeds the window size, or it 287 * exceeds our min target AND we haven't logged any entries, 288 * flag the latency as exceeded. wbt works off completion latencies, 289 * but for a flooded device, a single sync IO can take a long time 290 * to complete after being issued. If this time exceeds our --- 43 unchanged lines hidden (view full) --- 334 struct blk_rq_stat stat[2]; 335 336 blk_queue_stat_get(rwb->queue, stat); 337 return __latency_exceeded(rwb, stat); 338} 339 340static void rwb_trace_step(struct rq_wb *rwb, const char *msg) 341{ |
342 struct backing_dev_info *bdi = &rwb->queue->backing_dev_info; | 342 struct backing_dev_info *bdi = rwb->queue->backing_dev_info; |
343 344 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, 345 rwb->wb_background, rwb->wb_normal, rwb->wb_max); 346} 347 348static void scale_up(struct rq_wb *rwb) 349{ 350 /* --- 67 unchanged lines hidden (view full) --- 418static void wb_timer_fn(unsigned long data) 419{ 420 struct rq_wb *rwb = (struct rq_wb *) data; 421 unsigned int inflight = wbt_inflight(rwb); 422 int status; 423 424 status = latency_exceeded(rwb); 425 | 343 344 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec, 345 rwb->wb_background, rwb->wb_normal, rwb->wb_max); 346} 347 348static void scale_up(struct rq_wb *rwb) 349{ 350 /* --- 67 unchanged lines hidden (view full) --- 418static void wb_timer_fn(unsigned long data) 419{ 420 struct rq_wb *rwb = (struct rq_wb *) data; 421 unsigned int inflight = wbt_inflight(rwb); 422 int status; 423 424 status = latency_exceeded(rwb); 425 |
426 trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step, | 426 trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step, |
427 inflight); 428 429 /* 430 * If we exceeded the latency target, step down. If we did not, 431 * step one level up. If we don't know enough to say either exceeded 432 * or ok, then don't do anything. 433 */ 434 switch (status) { --- 317 unchanged lines hidden --- | 427 inflight); 428 429 /* 430 * If we exceeded the latency target, step down. If we did not, 431 * step one level up. If we don't know enough to say either exceeded 432 * or ok, then don't do anything. 433 */ 434 switch (status) { --- 317 unchanged lines hidden --- |