1 #ifndef BLK_INTERNAL_H 2 #define BLK_INTERNAL_H 3 4 /* Amount of time in which a process may batch requests */ 5 #define BLK_BATCH_TIME (HZ/50UL) 6 7 /* Number of requests a "batching" process may submit */ 8 #define BLK_BATCH_REQ 32 9 10 extern struct kmem_cache *blk_requestq_cachep; 11 extern struct kobj_type blk_queue_ktype; 12 13 void init_request_from_bio(struct request *req, struct bio *bio); 14 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 15 struct bio *bio); 16 void __blk_queue_free_tags(struct request_queue *q); 17 18 void blk_unplug_work(struct work_struct *work); 19 void blk_unplug_timeout(unsigned long data); 20 21 struct io_context *current_io_context(gfp_t gfp_flags, int node); 22 23 int ll_back_merge_fn(struct request_queue *q, struct request *req, 24 struct bio *bio); 25 int ll_front_merge_fn(struct request_queue *q, struct request *req, 26 struct bio *bio); 27 int attempt_back_merge(struct request_queue *q, struct request *rq); 28 int attempt_front_merge(struct request_queue *q, struct request *rq); 29 void blk_recalc_rq_segments(struct request *rq); 30 void blk_recalc_rq_sectors(struct request *rq, int nsect); 31 32 void blk_queue_congestion_threshold(struct request_queue *q); 33 34 int blk_dev_init(void); 35 36 /* 37 * Return the threshold (number of used requests) at which the queue is 38 * considered to be congested. It include a little hysteresis to keep the 39 * context switch rate down. 40 */ 41 static inline int queue_congestion_on_threshold(struct request_queue *q) 42 { 43 return q->nr_congestion_on; 44 } 45 46 /* 47 * The threshold at which a queue is considered to be uncongested 48 */ 49 static inline int queue_congestion_off_threshold(struct request_queue *q) 50 { 51 return q->nr_congestion_off; 52 } 53 54 #if defined(CONFIG_BLK_DEV_INTEGRITY) 55 56 #define rq_for_each_integrity_segment(bvl, _rq, _iter) \ 57 __rq_for_each_bio(_iter.bio, _rq) \ 58 bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) 59 60 #endif /* BLK_DEV_INTEGRITY */ 61 62 #endif 63