1 #ifndef RQ_QOS_H 2 #define RQ_QOS_H 3 4 #include <linux/kernel.h> 5 #include <linux/blkdev.h> 6 #include <linux/blk_types.h> 7 #include <linux/atomic.h> 8 #include <linux/wait.h> 9 10 enum rq_qos_id { 11 RQ_QOS_WBT, 12 RQ_QOS_CGROUP, 13 }; 14 15 struct rq_wait { 16 wait_queue_head_t wait; 17 atomic_t inflight; 18 }; 19 20 struct rq_qos { 21 struct rq_qos_ops *ops; 22 struct request_queue *q; 23 enum rq_qos_id id; 24 struct rq_qos *next; 25 }; 26 27 struct rq_qos_ops { 28 void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *); 29 void (*track)(struct rq_qos *, struct request *, struct bio *); 30 void (*issue)(struct rq_qos *, struct request *); 31 void (*requeue)(struct rq_qos *, struct request *); 32 void (*done)(struct rq_qos *, struct request *); 33 void (*done_bio)(struct rq_qos *, struct bio *); 34 void (*cleanup)(struct rq_qos *, struct bio *); 35 void (*exit)(struct rq_qos *); 36 }; 37 38 struct rq_depth { 39 unsigned int max_depth; 40 41 int scale_step; 42 bool scaled_max; 43 44 unsigned int queue_depth; 45 unsigned int default_depth; 46 }; 47 48 static inline struct rq_qos *rq_qos_id(struct request_queue *q, 49 enum rq_qos_id id) 50 { 51 struct rq_qos *rqos; 52 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { 53 if (rqos->id == id) 54 break; 55 } 56 return rqos; 57 } 58 59 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) 60 { 61 return rq_qos_id(q, RQ_QOS_WBT); 62 } 63 64 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) 65 { 66 return rq_qos_id(q, RQ_QOS_CGROUP); 67 } 68 69 static inline void rq_wait_init(struct rq_wait *rq_wait) 70 { 71 atomic_set(&rq_wait->inflight, 0); 72 init_waitqueue_head(&rq_wait->wait); 73 } 74 75 static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) 76 { 77 rqos->next = q->rq_qos; 78 q->rq_qos = rqos; 79 } 80 81 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) 82 { 83 struct rq_qos *cur, *prev = NULL; 84 for (cur = q->rq_qos; cur; cur = cur->next) { 85 if (cur == rqos) { 86 if (prev) 87 prev->next = rqos->next; 88 else 89 q->rq_qos = cur; 90 break; 91 } 92 prev = cur; 93 } 94 } 95 96 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); 97 void rq_depth_scale_up(struct rq_depth *rqd); 98 void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 99 bool rq_depth_calc_max_depth(struct rq_depth *rqd); 100 101 void rq_qos_cleanup(struct request_queue *, struct bio *); 102 void rq_qos_done(struct request_queue *, struct request *); 103 void rq_qos_issue(struct request_queue *, struct request *); 104 void rq_qos_requeue(struct request_queue *, struct request *); 105 void rq_qos_done_bio(struct request_queue *q, struct bio *bio); 106 void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *); 107 void rq_qos_track(struct request_queue *q, struct request *, struct bio *); 108 void rq_qos_exit(struct request_queue *); 109 #endif 110