1 /* 2 * zfcp device driver 3 * 4 * Header file for zfcp qdio interface 5 * 6 * Copyright IBM Corp. 2010 7 */ 8 9 #ifndef ZFCP_QDIO_H 10 #define ZFCP_QDIO_H 11 12 #include <asm/qdio.h> 13 14 #define ZFCP_QDIO_SBALE_LEN PAGE_SIZE 15 16 /* Max SBALS for chaining */ 17 #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 18 19 /** 20 * struct zfcp_qdio - basic qdio data structure 21 * @res_q: response queue 22 * @req_q: request queue 23 * @req_q_idx: index of next free buffer 24 * @req_q_free: number of free buffers in queue 25 * @stat_lock: lock to protect req_q_util and req_q_time 26 * @req_q_lock: lock to serialize access to request queue 27 * @req_q_time: time of last fill level change 28 * @req_q_util: used for accounting 29 * @req_q_full: queue full incidents 30 * @req_q_wq: used to wait for SBAL availability 31 * @adapter: adapter used in conjunction with this qdio structure 32 */ 33 struct zfcp_qdio { 34 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q]; 35 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q]; 36 u8 req_q_idx; 37 atomic_t req_q_free; 38 spinlock_t stat_lock; 39 spinlock_t req_q_lock; 40 unsigned long long req_q_time; 41 u64 req_q_util; 42 atomic_t req_q_full; 43 wait_queue_head_t req_q_wq; 44 struct zfcp_adapter *adapter; 45 u16 max_sbale_per_sbal; 46 u16 max_sbale_per_req; 47 }; 48 49 /** 50 * struct zfcp_qdio_req - qdio queue related values for a request 51 * @sbtype: sbal type flags for sbale 0 52 * @sbal_number: number of free sbals 53 * @sbal_first: first sbal for this request 54 * @sbal_last: last sbal for this request 55 * @sbal_limit: last possible sbal for this request 56 * @sbale_curr: current sbale at creation of this request 57 * @qdio_outb_usage: usage of outbound queue 58 */ 59 struct zfcp_qdio_req { 60 u8 sbtype; 61 u8 sbal_number; 62 u8 sbal_first; 63 u8 sbal_last; 64 u8 sbal_limit; 65 u8 sbale_curr; 66 u16 qdio_outb_usage; 67 }; 68 69 /** 70 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request 71 * @qdio: pointer to struct zfcp_qdio 72 * @q_rec: pointer to struct zfcp_qdio_req 73 * Returns: pointer to qdio_buffer_element (sbale) structure 74 */ 75 static inline struct qdio_buffer_element * 76 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 77 { 78 return &qdio->req_q[q_req->sbal_last]->element[0]; 79 } 80 81 /** 82 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request 83 * @qdio: pointer to struct zfcp_qdio 84 * @fsf_req: pointer to struct zfcp_fsf_req 85 * Returns: pointer to qdio_buffer_element (sbale) structure 86 */ 87 static inline struct qdio_buffer_element * 88 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 89 { 90 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr]; 91 } 92 93 /** 94 * zfcp_qdio_req_init - initialize qdio request 95 * @qdio: request queue where to start putting the request 96 * @q_req: the qdio request to start 97 * @req_id: The request id 98 * @sbtype: type flags to set for all sbals 99 * @data: First data block 100 * @len: Length of first data block 101 * 102 * This is the start of putting the request into the queue, the last 103 * step is passing the request to zfcp_qdio_send. The request queue 104 * lock must be held during the whole process from init to send. 105 */ 106 static inline 107 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 108 unsigned long req_id, u8 sbtype, void *data, u32 len) 109 { 110 struct qdio_buffer_element *sbale; 111 int count = min(atomic_read(&qdio->req_q_free), 112 ZFCP_QDIO_MAX_SBALS_PER_REQ); 113 114 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx; 115 q_req->sbal_number = 1; 116 q_req->sbtype = sbtype; 117 q_req->sbale_curr = 1; 118 q_req->sbal_limit = (q_req->sbal_first + count - 1) 119 % QDIO_MAX_BUFFERS_PER_Q; 120 121 sbale = zfcp_qdio_sbale_req(qdio, q_req); 122 sbale->addr = (void *) req_id; 123 sbale->eflags = 0; 124 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; 125 126 if (unlikely(!data)) 127 return; 128 sbale++; 129 sbale->addr = data; 130 sbale->length = len; 131 } 132 133 /** 134 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests 135 * @qdio: pointer to struct zfcp_qdio 136 * @q_req: pointer to struct zfcp_queue_req 137 * 138 * This is only required for single sbal requests, calling it when 139 * wrapping around to the next sbal is a bug. 140 */ 141 static inline 142 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 143 void *data, u32 len) 144 { 145 struct qdio_buffer_element *sbale; 146 147 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); 148 q_req->sbale_curr++; 149 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 150 sbale->addr = data; 151 sbale->length = len; 152 } 153 154 /** 155 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale 156 * @qdio: pointer to struct zfcp_qdio 157 * @q_req: pointer to struct zfcp_queue_req 158 */ 159 static inline 160 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio, 161 struct zfcp_qdio_req *q_req) 162 { 163 struct qdio_buffer_element *sbale; 164 165 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 166 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; 167 } 168 169 /** 170 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data 171 * @sg: The scatterlist where to check the data size 172 * 173 * Returns: 1 when one sbale is enough for the data in the scatterlist, 174 * 0 if not. 175 */ 176 static inline 177 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg) 178 { 179 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN; 180 } 181 182 /** 183 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal 184 * @q_req: The current zfcp_qdio_req 185 */ 186 static inline 187 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio, 188 struct zfcp_qdio_req *q_req) 189 { 190 q_req->sbale_curr = qdio->max_sbale_per_sbal - 1; 191 } 192 193 /** 194 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req 195 * @qdio: pointer to struct zfcp_qdio 196 * @q_req: The current zfcp_qdio_req 197 * @max_sbals: maximum number of SBALs allowed 198 */ 199 static inline 200 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, 201 struct zfcp_qdio_req *q_req, int max_sbals) 202 { 203 int count = min(atomic_read(&qdio->req_q_free), max_sbals); 204 205 q_req->sbal_limit = (q_req->sbal_first + count - 1) % 206 QDIO_MAX_BUFFERS_PER_Q; 207 } 208 209 /** 210 * zfcp_qdio_set_data_div - set data division count 211 * @qdio: pointer to struct zfcp_qdio 212 * @q_req: The current zfcp_qdio_req 213 * @count: The data division count 214 */ 215 static inline 216 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, 217 struct zfcp_qdio_req *q_req, u32 count) 218 { 219 struct qdio_buffer_element *sbale; 220 221 sbale = qdio->req_q[q_req->sbal_first]->element; 222 sbale->length = count; 223 } 224 225 /** 226 * zfcp_qdio_real_bytes - count bytes used 227 * @sg: pointer to struct scatterlist 228 */ 229 static inline 230 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg) 231 { 232 unsigned int real_bytes = 0; 233 234 for (; sg; sg = sg_next(sg)) 235 real_bytes += sg->length; 236 237 return real_bytes; 238 } 239 240 /** 241 * zfcp_qdio_set_scount - set SBAL count value 242 * @qdio: pointer to struct zfcp_qdio 243 * @q_req: The current zfcp_qdio_req 244 */ 245 static inline 246 void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 247 { 248 struct qdio_buffer_element *sbale; 249 250 sbale = qdio->req_q[q_req->sbal_first]->element; 251 sbale->scount = q_req->sbal_number - 1; 252 } 253 254 #endif /* ZFCP_QDIO_H */ 255