1 /* 2 * zfcp device driver 3 * 4 * Setup and helper functions to access QDIO. 5 * 6 * Copyright IBM Corporation 2002, 2008 7 */ 8 9 #include "zfcp_ext.h" 10 11 /* FIXME(tune): free space should be one max. SBAL chain plus what? */ 12 #define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ 13 - (ZFCP_MAX_SBALS_PER_REQ + 4)) 14 15 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) 16 { 17 int pos; 18 19 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { 20 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); 21 if (!sbal[pos]) 22 return -ENOMEM; 23 } 24 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) 25 if (pos % QBUFF_PER_PAGE) 26 sbal[pos] = sbal[pos - 1] + 1; 27 return 0; 28 } 29 30 static volatile struct qdio_buffer_element * 31 zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) 32 { 33 return &q->sbal[sbal_idx]->element[sbale_idx]; 34 } 35 36 /** 37 * zfcp_qdio_free - free memory used by request- and resposne queue 38 * @adapter: pointer to the zfcp_adapter structure 39 */ 40 void zfcp_qdio_free(struct zfcp_adapter *adapter) 41 { 42 struct qdio_buffer **sbal_req, **sbal_resp; 43 int p; 44 45 if (adapter->ccw_device) 46 qdio_free(adapter->ccw_device); 47 48 sbal_req = adapter->req_q.sbal; 49 sbal_resp = adapter->resp_q.sbal; 50 51 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { 52 free_page((unsigned long) sbal_req[p]); 53 free_page((unsigned long) sbal_resp[p]); 54 } 55 } 56 57 static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) 58 { 59 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); 60 61 zfcp_erp_adapter_reopen(adapter, 62 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 63 ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); 64 } 65 66 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, 67 unsigned int qdio_err, unsigned int siga_err, 68 unsigned int queue_no, int first, int count, 69 unsigned long parm) 70 { 71 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 72 struct zfcp_qdio_queue *queue = &adapter->req_q; 73 74 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 75 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, 76 first, count); 77 zfcp_qdio_handler_error(adapter, 140); 78 return; 79 } 80 81 /* cleanup all SBALs being program-owned now */ 82 zfcp_qdio_zero_sbals(queue->sbal, first, count); 83 84 atomic_add(count, &queue->count); 85 wake_up(&adapter->request_wq); 86 } 87 88 static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, 89 unsigned long req_id, int sbal_idx) 90 { 91 struct zfcp_fsf_req *fsf_req; 92 unsigned long flags; 93 94 spin_lock_irqsave(&adapter->req_list_lock, flags); 95 fsf_req = zfcp_reqlist_find(adapter, req_id); 96 97 if (!fsf_req) 98 /* 99 * Unknown request means that we have potentially memory 100 * corruption and must stop the machine immediatly. 101 */ 102 panic("error: unknown request id (%lx) on adapter %s.\n", 103 req_id, zfcp_get_busid_by_adapter(adapter)); 104 105 zfcp_reqlist_remove(adapter, fsf_req); 106 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 107 108 fsf_req->sbal_response = sbal_idx; 109 zfcp_fsf_req_complete(fsf_req); 110 } 111 112 static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) 113 { 114 struct zfcp_qdio_queue *queue = &adapter->resp_q; 115 struct ccw_device *cdev = adapter->ccw_device; 116 u8 count, start = queue->first; 117 unsigned int retval; 118 119 count = atomic_read(&queue->count) + processed; 120 121 retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 122 0, start, count, NULL); 123 124 if (unlikely(retval)) { 125 atomic_set(&queue->count, count); 126 /* FIXME: Recover this with an adapter reopen? */ 127 } else { 128 queue->first += count; 129 queue->first %= QDIO_MAX_BUFFERS_PER_Q; 130 atomic_set(&queue->count, 0); 131 } 132 } 133 134 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, 135 unsigned int qdio_err, unsigned int siga_err, 136 unsigned int queue_no, int first, int count, 137 unsigned long parm) 138 { 139 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 140 struct zfcp_qdio_queue *queue = &adapter->resp_q; 141 volatile struct qdio_buffer_element *sbale; 142 int sbal_idx, sbale_idx, sbal_no; 143 144 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 145 zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, 146 first, count); 147 zfcp_qdio_handler_error(adapter, 147); 148 return; 149 } 150 151 /* 152 * go through all SBALs from input queue currently 153 * returned by QDIO layer 154 */ 155 for (sbal_no = 0; sbal_no < count; sbal_no++) { 156 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 157 158 /* go through all SBALEs of SBAL */ 159 for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; 160 sbale_idx++) { 161 sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); 162 zfcp_qdio_reqid_check(adapter, 163 (unsigned long) sbale->addr, 164 sbal_idx); 165 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 166 break; 167 }; 168 169 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) 170 dev_warn(&adapter->ccw_device->dev, 171 "Protocol violation by adapter. " 172 "Continuing operations.\n"); 173 } 174 175 /* 176 * put range of SBALs back to response queue 177 * (including SBALs which have already been free before) 178 */ 179 zfcp_qdio_resp_put_back(adapter, count); 180 } 181 182 /** 183 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req 184 * @fsf_req: pointer to struct fsf_req 185 * Returns: pointer to qdio_buffer_element (SBALE) structure 186 */ 187 volatile struct qdio_buffer_element * 188 zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) 189 { 190 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); 191 } 192 193 /** 194 * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req 195 * @fsf_req: pointer to struct fsf_req 196 * Returns: pointer to qdio_buffer_element (SBALE) structure 197 */ 198 volatile struct qdio_buffer_element * 199 zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) 200 { 201 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 202 req->sbale_curr); 203 } 204 205 static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 206 { 207 int count = atomic_read(&fsf_req->adapter->req_q.count); 208 count = min(count, max_sbals); 209 fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) 210 % QDIO_MAX_BUFFERS_PER_Q; 211 } 212 213 static volatile struct qdio_buffer_element * 214 zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 215 { 216 volatile struct qdio_buffer_element *sbale; 217 218 /* set last entry flag in current SBALE of current SBAL */ 219 sbale = zfcp_qdio_sbale_curr(fsf_req); 220 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 221 222 /* don't exceed last allowed SBAL */ 223 if (fsf_req->sbal_last == fsf_req->sbal_limit) 224 return NULL; 225 226 /* set chaining flag in first SBALE of current SBAL */ 227 sbale = zfcp_qdio_sbale_req(fsf_req); 228 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 229 230 /* calculate index of next SBAL */ 231 fsf_req->sbal_last++; 232 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 233 234 /* keep this requests number of SBALs up-to-date */ 235 fsf_req->sbal_number++; 236 237 /* start at first SBALE of new SBAL */ 238 fsf_req->sbale_curr = 0; 239 240 /* set storage-block type for new SBAL */ 241 sbale = zfcp_qdio_sbale_curr(fsf_req); 242 sbale->flags |= sbtype; 243 244 return sbale; 245 } 246 247 static volatile struct qdio_buffer_element * 248 zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 249 { 250 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 251 return zfcp_qdio_sbal_chain(fsf_req, sbtype); 252 fsf_req->sbale_curr++; 253 return zfcp_qdio_sbale_curr(fsf_req); 254 } 255 256 static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) 257 { 258 struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; 259 int first = fsf_req->sbal_first; 260 int last = fsf_req->sbal_last; 261 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % 262 QDIO_MAX_BUFFERS_PER_Q + 1; 263 zfcp_qdio_zero_sbals(sbal, first, count); 264 } 265 266 static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, 267 unsigned int sbtype, void *start_addr, 268 unsigned int total_length) 269 { 270 volatile struct qdio_buffer_element *sbale; 271 unsigned long remaining, length; 272 void *addr; 273 274 /* split segment up */ 275 for (addr = start_addr, remaining = total_length; remaining > 0; 276 addr += length, remaining -= length) { 277 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); 278 if (!sbale) { 279 zfcp_qdio_undo_sbals(fsf_req); 280 return -EINVAL; 281 } 282 283 /* new piece must not exceed next page boundary */ 284 length = min(remaining, 285 (PAGE_SIZE - ((unsigned long)addr & 286 (PAGE_SIZE - 1)))); 287 sbale->addr = addr; 288 sbale->length = length; 289 } 290 return 0; 291 } 292 293 /** 294 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 295 * @fsf_req: request to be processed 296 * @sbtype: SBALE flags 297 * @sg: scatter-gather list 298 * @max_sbals: upper bound for number of SBALs to be used 299 * Returns: number of bytes, or error (negativ) 300 */ 301 int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 302 struct scatterlist *sg, int max_sbals) 303 { 304 volatile struct qdio_buffer_element *sbale; 305 int retval, bytes = 0; 306 307 /* figure out last allowed SBAL */ 308 zfcp_qdio_sbal_limit(fsf_req, max_sbals); 309 310 /* set storage-block type for this request */ 311 sbale = zfcp_qdio_sbale_req(fsf_req); 312 sbale->flags |= sbtype; 313 314 for (; sg; sg = sg_next(sg)) { 315 retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), 316 sg->length); 317 if (retval < 0) 318 return retval; 319 bytes += sg->length; 320 } 321 322 /* assume that no other SBALEs are to follow in the same SBAL */ 323 sbale = zfcp_qdio_sbale_curr(fsf_req); 324 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 325 326 return bytes; 327 } 328 329 /** 330 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 331 * @fsf_req: pointer to struct zfcp_fsf_req 332 * Returns: 0 on success, error otherwise 333 */ 334 int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) 335 { 336 struct zfcp_adapter *adapter = fsf_req->adapter; 337 struct zfcp_qdio_queue *req_q = &adapter->req_q; 338 int first = fsf_req->sbal_first; 339 int count = fsf_req->sbal_number; 340 int retval, pci, pci_batch; 341 volatile struct qdio_buffer_element *sbale; 342 343 /* acknowledgements for transferred buffers */ 344 pci_batch = req_q->pci_batch + count; 345 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { 346 pci_batch %= ZFCP_QDIO_PCI_INTERVAL; 347 pci = first + count - (pci_batch + 1); 348 pci %= QDIO_MAX_BUFFERS_PER_Q; 349 sbale = zfcp_qdio_sbale(req_q, pci, 0); 350 sbale->flags |= SBAL_FLAGS0_PCI; 351 } 352 353 retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, 354 count, NULL); 355 if (unlikely(retval)) { 356 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 357 return retval; 358 } 359 360 /* account for transferred buffers */ 361 atomic_sub(count, &req_q->count); 362 req_q->first += count; 363 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 364 req_q->pci_batch = pci_batch; 365 return 0; 366 } 367 368 /** 369 * zfcp_qdio_zero_sbals - zero all sbals of the specified area and queue 370 * @buf: pointer to array of SBALS 371 * @first: integer specifying the SBAL number to start 372 * @count: integer specifying the number of SBALS to process 373 */ 374 void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int count) 375 { 376 int i, sbal_idx; 377 378 for (i = first; i < first + count; i++) { 379 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; 380 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); 381 } 382 } 383 384 /** 385 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 386 * @adapter: pointer to struct zfcp_adapter 387 * Returns: -ENOMEM on memory allocation error or return value from 388 * qdio_allocate 389 */ 390 int zfcp_qdio_allocate(struct zfcp_adapter *adapter) 391 { 392 struct qdio_initialize *init_data; 393 394 if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || 395 zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) 396 return -ENOMEM; 397 398 init_data = &adapter->qdio_init_data; 399 400 init_data->cdev = adapter->ccw_device; 401 init_data->q_format = QDIO_ZFCP_QFMT; 402 memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); 403 ASCEBC(init_data->adapter_name, 8); 404 init_data->qib_param_field_format = 0; 405 init_data->qib_param_field = NULL; 406 init_data->input_slib_elements = NULL; 407 init_data->output_slib_elements = NULL; 408 init_data->min_input_threshold = 1; 409 init_data->max_input_threshold = 5000; 410 init_data->min_output_threshold = 1; 411 init_data->max_output_threshold = 1000; 412 init_data->no_input_qs = 1; 413 init_data->no_output_qs = 1; 414 init_data->input_handler = zfcp_qdio_int_resp; 415 init_data->output_handler = zfcp_qdio_int_req; 416 init_data->int_parm = (unsigned long) adapter; 417 init_data->flags = QDIO_INBOUND_0COPY_SBALS | 418 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; 419 init_data->input_sbal_addr_array = 420 (void **) (adapter->resp_q.sbal); 421 init_data->output_sbal_addr_array = 422 (void **) (adapter->req_q.sbal); 423 424 return qdio_allocate(init_data); 425 } 426 427 /** 428 * zfcp_close_qdio - close qdio queues for an adapter 429 */ 430 void zfcp_qdio_close(struct zfcp_adapter *adapter) 431 { 432 struct zfcp_qdio_queue *req_q; 433 int first, count; 434 435 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 436 return; 437 438 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 439 req_q = &adapter->req_q; 440 write_lock_irq(&req_q->lock); 441 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 442 write_unlock_irq(&req_q->lock); 443 444 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) 445 == -EINPROGRESS) 446 ssleep(1); 447 448 /* cleanup used outbound sbals */ 449 count = atomic_read(&req_q->count); 450 if (count < QDIO_MAX_BUFFERS_PER_Q) { 451 first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; 452 count = QDIO_MAX_BUFFERS_PER_Q - count; 453 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 454 } 455 req_q->first = 0; 456 atomic_set(&req_q->count, 0); 457 req_q->pci_batch = 0; 458 adapter->resp_q.first = 0; 459 atomic_set(&adapter->resp_q.count, 0); 460 } 461 462 /** 463 * zfcp_qdio_open - prepare and initialize response queue 464 * @adapter: pointer to struct zfcp_adapter 465 * Returns: 0 on success, otherwise -EIO 466 */ 467 int zfcp_qdio_open(struct zfcp_adapter *adapter) 468 { 469 volatile struct qdio_buffer_element *sbale; 470 int cc; 471 472 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 473 return -EIO; 474 475 if (qdio_establish(&adapter->qdio_init_data)) { 476 dev_err(&adapter->ccw_device->dev, 477 "Establish of QDIO queues failed.\n"); 478 return -EIO; 479 } 480 481 if (qdio_activate(adapter->ccw_device, 0)) { 482 dev_err(&adapter->ccw_device->dev, 483 "Activate of QDIO queues failed.\n"); 484 goto failed_qdio; 485 } 486 487 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 488 sbale = &(adapter->resp_q.sbal[cc]->element[0]); 489 sbale->length = 0; 490 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 491 sbale->addr = NULL; 492 } 493 494 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 495 QDIO_MAX_BUFFERS_PER_Q, NULL)) { 496 dev_err(&adapter->ccw_device->dev, 497 "Init of QDIO response queue failed.\n"); 498 goto failed_qdio; 499 } 500 501 /* set index of first avalable SBALS / number of available SBALS */ 502 adapter->req_q.first = 0; 503 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 504 adapter->req_q.pci_batch = 0; 505 506 return 0; 507 508 failed_qdio: 509 while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) 510 == -EINPROGRESS) 511 ssleep(1); 512 513 return -EIO; 514 } 515