1 #ifndef IOCONTEXT_H 2 #define IOCONTEXT_H 3 4 #include <linux/radix-tree.h> 5 #include <linux/rcupdate.h> 6 7 /* 8 * This is the per-process anticipatory I/O scheduler state. 9 */ 10 struct as_io_context { 11 spinlock_t lock; 12 13 void (*dtor)(struct as_io_context *aic); /* destructor */ 14 void (*exit)(struct as_io_context *aic); /* called on task exit */ 15 16 unsigned long state; 17 atomic_t nr_queued; /* queued reads & sync writes */ 18 atomic_t nr_dispatched; /* number of requests gone to the drivers */ 19 20 /* IO History tracking */ 21 /* Thinktime */ 22 unsigned long last_end_request; 23 unsigned long ttime_total; 24 unsigned long ttime_samples; 25 unsigned long ttime_mean; 26 /* Layout pattern */ 27 unsigned int seek_samples; 28 sector_t last_request_pos; 29 u64 seek_total; 30 sector_t seek_mean; 31 }; 32 33 struct cfq_queue; 34 struct cfq_io_context { 35 void *key; 36 unsigned long dead_key; 37 38 struct cfq_queue *cfqq[2]; 39 40 struct io_context *ioc; 41 42 unsigned long last_end_request; 43 sector_t last_request_pos; 44 45 unsigned long ttime_total; 46 unsigned long ttime_samples; 47 unsigned long ttime_mean; 48 49 unsigned int seek_samples; 50 u64 seek_total; 51 sector_t seek_mean; 52 53 struct list_head queue_list; 54 struct hlist_node cic_list; 55 56 void (*dtor)(struct io_context *); /* destructor */ 57 void (*exit)(struct io_context *); /* called on task exit */ 58 59 struct rcu_head rcu_head; 60 }; 61 62 /* 63 * I/O subsystem state of the associated processes. It is refcounted 64 * and kmalloc'ed. These could be shared between processes. 65 */ 66 struct io_context { 67 atomic_t refcount; 68 atomic_t nr_tasks; 69 70 /* all the fields below are protected by this lock */ 71 spinlock_t lock; 72 73 unsigned short ioprio; 74 unsigned short ioprio_changed; 75 76 /* 77 * For request batching 78 */ 79 unsigned long last_waited; /* Time last woken after wait for request */ 80 int nr_batch_requests; /* Number of requests left in the batch */ 81 82 struct as_io_context *aic; 83 struct radix_tree_root radix_root; 84 struct hlist_head cic_list; 85 void *ioc_data; 86 }; 87 88 static inline struct io_context *ioc_task_link(struct io_context *ioc) 89 { 90 /* 91 * if ref count is zero, don't allow sharing (ioc is going away, it's 92 * a race). 93 */ 94 if (ioc && atomic_inc_not_zero(&ioc->refcount)) { 95 atomic_inc(&ioc->nr_tasks); 96 return ioc; 97 } 98 99 return NULL; 100 } 101 102 #ifdef CONFIG_BLOCK 103 int put_io_context(struct io_context *ioc); 104 void exit_io_context(void); 105 struct io_context *get_io_context(gfp_t gfp_flags, int node); 106 struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 107 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 108 #else 109 static inline void exit_io_context(void) 110 { 111 } 112 113 struct io_context; 114 static inline int put_io_context(struct io_context *ioc) 115 { 116 return 1; 117 } 118 #endif 119 120 #endif 121