1 /* 2 * linux/drivers/s390/scsi/zfcp_qdio.c 3 * 4 * FCP adapter driver for IBM eServer zSeries 5 * 6 * QDIO related routines 7 * 8 * (C) Copyright IBM Corp. 2002, 2004 9 * 10 * Authors: 11 * Martin Peschke <mpeschke@de.ibm.com> 12 * Raimund Schroeder <raimund.schroeder@de.ibm.com> 13 * Wolfgang Taphorn 14 * Heiko Carstens <heiko.carstens@de.ibm.com> 15 * Andreas Herrmann <aherrman@de.ibm.com> 16 * 17 * This program is free software; you can redistribute it and/or modify 18 * it under the terms of the GNU General Public License as published by 19 * the Free Software Foundation; either version 2, or (at your option) 20 * any later version. 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 * 27 * You should have received a copy of the GNU General Public License 28 * along with this program; if not, write to the Free Software 29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 30 */ 31 32 #define ZFCP_QDIO_C_REVISION "$Revision: 1.20 $" 33 34 #include "zfcp_ext.h" 35 36 static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 37 static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 38 (struct zfcp_qdio_queue *, int, int); 39 static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 40 (struct zfcp_fsf_req *, int, int); 41 static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 42 (struct zfcp_fsf_req *, unsigned long); 43 static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 44 (struct zfcp_fsf_req *, unsigned long); 45 static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 46 static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 47 static inline void zfcp_qdio_sbale_fill 48 (struct zfcp_fsf_req *, unsigned long, void *, int); 49 static inline int zfcp_qdio_sbals_from_segment 50 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 51 static inline int zfcp_qdio_sbals_from_buffer 52 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 53 54 static qdio_handler_t zfcp_qdio_request_handler; 55 static qdio_handler_t zfcp_qdio_response_handler; 56 static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, 57 unsigned int, 58 unsigned int, unsigned int); 59 60 #define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO 61 62 /* 63 * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t 64 * array in the adapter struct. 65 * Cur_buf is the pointer array and count can be any number of required 66 * buffers, the page-fitting arithmetic is done entirely within this funciton. 67 * 68 * returns: number of buffers allocated 69 * locks: must only be called with zfcp_data.config_sema taken 70 */ 71 static int 72 zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count) 73 { 74 int buf_pos; 75 int qdio_buffers_per_page; 76 int page_pos = 0; 77 struct qdio_buffer *first_in_page = NULL; 78 79 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer); 80 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page); 81 82 for (buf_pos = 0; buf_pos < count; buf_pos++) { 83 if (page_pos == 0) { 84 cur_buf[buf_pos] = (struct qdio_buffer *) 85 get_zeroed_page(GFP_KERNEL); 86 if (cur_buf[buf_pos] == NULL) { 87 ZFCP_LOG_INFO("error: allocation of " 88 "QDIO buffer failed \n"); 89 goto out; 90 } 91 first_in_page = cur_buf[buf_pos]; 92 } else { 93 cur_buf[buf_pos] = first_in_page + page_pos; 94 95 } 96 /* was initialised to zero */ 97 page_pos++; 98 page_pos %= qdio_buffers_per_page; 99 } 100 out: 101 return buf_pos; 102 } 103 104 /* 105 * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array 106 * in the adapter struct cur_buf is the pointer array and count can be any 107 * number of buffers in the array that should be freed starting from buffer 0 108 * 109 * locks: must only be called with zfcp_data.config_sema taken 110 */ 111 static void 112 zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count) 113 { 114 int buf_pos; 115 int qdio_buffers_per_page; 116 117 qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer); 118 ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page); 119 120 for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page) 121 free_page((unsigned long) cur_buf[buf_pos]); 122 return; 123 } 124 125 /* locks: must only be called with zfcp_data.config_sema taken */ 126 int 127 zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) 128 { 129 int buffer_count; 130 int retval = 0; 131 132 buffer_count = 133 zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]), 134 QDIO_MAX_BUFFERS_PER_Q); 135 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) { 136 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request " 137 "queue\n", buffer_count); 138 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]), 139 buffer_count); 140 retval = -ENOMEM; 141 goto out; 142 } 143 144 buffer_count = 145 zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]), 146 QDIO_MAX_BUFFERS_PER_Q); 147 if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) { 148 ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response " 149 "queue", buffer_count); 150 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]), 151 buffer_count); 152 ZFCP_LOG_TRACE("freeing request_queue buffers\n"); 153 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]), 154 QDIO_MAX_BUFFERS_PER_Q); 155 retval = -ENOMEM; 156 goto out; 157 } 158 out: 159 return retval; 160 } 161 162 /* locks: must only be called with zfcp_data.config_sema taken */ 163 void 164 zfcp_qdio_free_queues(struct zfcp_adapter *adapter) 165 { 166 ZFCP_LOG_TRACE("freeing request_queue buffers\n"); 167 zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]), 168 QDIO_MAX_BUFFERS_PER_Q); 169 170 ZFCP_LOG_TRACE("freeing response_queue buffers\n"); 171 zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]), 172 QDIO_MAX_BUFFERS_PER_Q); 173 } 174 175 int 176 zfcp_qdio_allocate(struct zfcp_adapter *adapter) 177 { 178 struct qdio_initialize *init_data; 179 180 init_data = &adapter->qdio_init_data; 181 182 init_data->cdev = adapter->ccw_device; 183 init_data->q_format = QDIO_SCSI_QFMT; 184 memcpy(init_data->adapter_name, &adapter->name, 8); 185 init_data->qib_param_field_format = 0; 186 init_data->qib_param_field = NULL; 187 init_data->input_slib_elements = NULL; 188 init_data->output_slib_elements = NULL; 189 init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD; 190 init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD; 191 init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD; 192 init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD; 193 init_data->no_input_qs = 1; 194 init_data->no_output_qs = 1; 195 init_data->input_handler = zfcp_qdio_response_handler; 196 init_data->output_handler = zfcp_qdio_request_handler; 197 init_data->int_parm = (unsigned long) adapter; 198 init_data->flags = QDIO_INBOUND_0COPY_SBALS | 199 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; 200 init_data->input_sbal_addr_array = 201 (void **) (adapter->response_queue.buffer); 202 init_data->output_sbal_addr_array = 203 (void **) (adapter->request_queue.buffer); 204 205 return qdio_allocate(init_data); 206 } 207 208 /* 209 * function: zfcp_qdio_handler_error_check 210 * 211 * purpose: called by the response handler to determine error condition 212 * 213 * returns: error flag 214 * 215 */ 216 static inline int 217 zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, 218 unsigned int status, 219 unsigned int qdio_error, unsigned int siga_error) 220 { 221 int retval = 0; 222 223 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) { 224 if (status & QDIO_STATUS_INBOUND_INT) { 225 ZFCP_LOG_TRACE("status is" 226 " QDIO_STATUS_INBOUND_INT \n"); 227 } 228 if (status & QDIO_STATUS_OUTBOUND_INT) { 229 ZFCP_LOG_TRACE("status is" 230 " QDIO_STATUS_OUTBOUND_INT \n"); 231 } 232 } // if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) 233 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 234 retval = -EIO; 235 236 ZFCP_LOG_FLAGS(1, "QDIO_STATUS_LOOK_FOR_ERROR \n"); 237 238 ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, " 239 "qdio_error=0x%x, siga_error=0x%x)\n", 240 status, qdio_error, siga_error); 241 242 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { 243 ZFCP_LOG_FLAGS(2, 244 "QDIO_STATUS_ACTIVATE_CHECK_CONDITION\n"); 245 } 246 if (status & QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR) { 247 ZFCP_LOG_FLAGS(2, 248 "QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR\n"); 249 } 250 if (status & QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR) { 251 ZFCP_LOG_FLAGS(2, 252 "QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR\n"); 253 } 254 255 if (siga_error & QDIO_SIGA_ERROR_ACCESS_EXCEPTION) { 256 ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_ACCESS_EXCEPTION\n"); 257 } 258 259 if (siga_error & QDIO_SIGA_ERROR_B_BIT_SET) { 260 ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_B_BIT_SET\n"); 261 } 262 263 switch (qdio_error) { 264 case 0: 265 ZFCP_LOG_FLAGS(3, "QDIO_OK"); 266 break; 267 case SLSB_P_INPUT_ERROR: 268 ZFCP_LOG_FLAGS(1, "SLSB_P_INPUT_ERROR\n"); 269 break; 270 case SLSB_P_OUTPUT_ERROR: 271 ZFCP_LOG_FLAGS(1, "SLSB_P_OUTPUT_ERROR\n"); 272 break; 273 default: 274 ZFCP_LOG_NORMAL("bug: unknown QDIO error 0x%x\n", 275 qdio_error); 276 break; 277 } 278 /* Restarting IO on the failed adapter from scratch */ 279 debug_text_event(adapter->erp_dbf, 1, "qdio_err"); 280 /* 281 * Since we have been using this adapter, it is save to assume 282 * that it is not failed but recoverable. The card seems to 283 * report link-up events by self-initiated queue shutdown. 284 * That is why we need to clear the the link-down flag 285 * which is set again in case we have missed by a mile. 286 */ 287 zfcp_erp_adapter_reopen( 288 adapter, 289 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 290 ZFCP_STATUS_COMMON_ERP_FAILED); 291 } 292 return retval; 293 } 294 295 /* 296 * function: zfcp_qdio_request_handler 297 * 298 * purpose: is called by QDIO layer for completed SBALs in request queue 299 * 300 * returns: (void) 301 */ 302 static void 303 zfcp_qdio_request_handler(struct ccw_device *ccw_device, 304 unsigned int status, 305 unsigned int qdio_error, 306 unsigned int siga_error, 307 unsigned int queue_number, 308 int first_element, 309 int elements_processed, 310 unsigned long int_parm) 311 { 312 struct zfcp_adapter *adapter; 313 struct zfcp_qdio_queue *queue; 314 315 adapter = (struct zfcp_adapter *) int_parm; 316 queue = &adapter->request_queue; 317 318 ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", 319 zfcp_get_busid_by_adapter(adapter), 320 first_element, elements_processed); 321 322 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 323 siga_error))) 324 goto out; 325 /* 326 * we stored address of struct zfcp_adapter data structure 327 * associated with irq in int_parm 328 */ 329 330 /* cleanup all SBALs being program-owned now */ 331 zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); 332 333 /* increase free space in outbound queue */ 334 atomic_add(elements_processed, &queue->free_count); 335 ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); 336 wake_up(&adapter->request_wq); 337 ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", 338 elements_processed, atomic_read(&queue->free_count)); 339 out: 340 return; 341 } 342 343 /* 344 * function: zfcp_qdio_response_handler 345 * 346 * purpose: is called by QDIO layer for completed SBALs in response queue 347 * 348 * returns: (void) 349 */ 350 static void 351 zfcp_qdio_response_handler(struct ccw_device *ccw_device, 352 unsigned int status, 353 unsigned int qdio_error, 354 unsigned int siga_error, 355 unsigned int queue_number, 356 int first_element, 357 int elements_processed, 358 unsigned long int_parm) 359 { 360 struct zfcp_adapter *adapter; 361 struct zfcp_qdio_queue *queue; 362 int buffer_index; 363 int i; 364 struct qdio_buffer *buffer; 365 int retval = 0; 366 u8 count; 367 u8 start; 368 volatile struct qdio_buffer_element *buffere = NULL; 369 int buffere_index; 370 371 adapter = (struct zfcp_adapter *) int_parm; 372 queue = &adapter->response_queue; 373 374 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 375 siga_error))) 376 goto out; 377 378 /* 379 * we stored address of struct zfcp_adapter data structure 380 * associated with irq in int_parm 381 */ 382 383 buffere = &(queue->buffer[first_element]->element[0]); 384 ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags); 385 /* 386 * go through all SBALs from input queue currently 387 * returned by QDIO layer 388 */ 389 390 for (i = 0; i < elements_processed; i++) { 391 392 buffer_index = first_element + i; 393 buffer_index %= QDIO_MAX_BUFFERS_PER_Q; 394 buffer = queue->buffer[buffer_index]; 395 396 /* go through all SBALEs of SBAL */ 397 for (buffere_index = 0; 398 buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; 399 buffere_index++) { 400 401 /* look for QDIO request identifiers in SB */ 402 buffere = &buffer->element[buffere_index]; 403 retval = zfcp_qdio_reqid_check(adapter, 404 (void *) buffere->addr); 405 406 if (retval) { 407 ZFCP_LOG_NORMAL("bug: unexpected inbound " 408 "packet on adapter %s " 409 "(reqid=0x%lx, " 410 "first_element=%d, " 411 "elements_processed=%d)\n", 412 zfcp_get_busid_by_adapter(adapter), 413 (unsigned long) buffere->addr, 414 first_element, 415 elements_processed); 416 ZFCP_LOG_NORMAL("hex dump of inbound buffer " 417 "at address %p " 418 "(buffer_index=%d, " 419 "buffere_index=%d)\n", buffer, 420 buffer_index, buffere_index); 421 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 422 (char *) buffer, SBAL_SIZE); 423 } 424 /* 425 * A single used SBALE per inbound SBALE has been 426 * implemented by QDIO so far. Hope they will 427 * do some optimisation. Will need to change to 428 * unlikely() then. 429 */ 430 if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) 431 break; 432 }; 433 434 if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { 435 ZFCP_LOG_NORMAL("bug: End of inbound data " 436 "not marked!\n"); 437 } 438 } 439 440 /* 441 * put range of SBALs back to response queue 442 * (including SBALs which have already been free before) 443 */ 444 count = atomic_read(&queue->free_count) + elements_processed; 445 start = queue->free_index; 446 447 ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " 448 "queue_no=%i, index_in_queue=%i, count=%i, " 449 "buffers=0x%lx\n", 450 zfcp_get_busid_by_adapter(adapter), 451 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 452 0, start, count, (unsigned long) &queue->buffer[start]); 453 454 retval = do_QDIO(ccw_device, 455 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, 456 0, start, count, NULL); 457 458 if (unlikely(retval)) { 459 atomic_set(&queue->free_count, count); 460 ZFCP_LOG_DEBUG("clearing of inbound data regions failed, " 461 "queues may be down " 462 "(count=%d, start=%d, retval=%d)\n", 463 count, start, retval); 464 } else { 465 queue->free_index += count; 466 queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; 467 atomic_set(&queue->free_count, 0); 468 ZFCP_LOG_TRACE("%i buffers enqueued to response " 469 "queue at position %i\n", count, start); 470 } 471 out: 472 return; 473 } 474 475 /* 476 * function: zfcp_qdio_reqid_check 477 * 478 * purpose: checks for valid reqids or unsolicited status 479 * 480 * returns: 0 - valid request id or unsolicited status 481 * !0 - otherwise 482 */ 483 int 484 zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) 485 { 486 struct zfcp_fsf_req *fsf_req; 487 int retval = 0; 488 489 /* invalid (per convention used in this driver) */ 490 if (unlikely(!sbale_addr)) { 491 ZFCP_LOG_NORMAL("bug: invalid reqid\n"); 492 retval = -EINVAL; 493 goto out; 494 } 495 496 /* valid request id and thus (hopefully :) valid fsf_req address */ 497 fsf_req = (struct zfcp_fsf_req *) sbale_addr; 498 499 if (unlikely(adapter != fsf_req->adapter)) { 500 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " 501 "fsf_req->adapter=%p, adapter=%p)\n", 502 fsf_req, fsf_req->adapter, adapter); 503 retval = -EINVAL; 504 goto out; 505 } 506 507 ZFCP_LOG_TRACE("fsf_req at %p, QTCB at %p\n", fsf_req, fsf_req->qtcb); 508 if (likely(fsf_req->qtcb)) { 509 ZFCP_LOG_TRACE("hex dump of QTCB:\n"); 510 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) fsf_req->qtcb, 511 sizeof(struct fsf_qtcb)); 512 } 513 514 /* finish the FSF request */ 515 zfcp_fsf_req_complete(fsf_req); 516 out: 517 return retval; 518 } 519 520 /** 521 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 522 * @queue: queue from which SBALE should be returned 523 * @sbal: specifies number of SBAL in queue 524 * @sbale: specifes number of SBALE in SBAL 525 */ 526 static inline volatile struct qdio_buffer_element * 527 zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) 528 { 529 return &queue->buffer[sbal]->element[sbale]; 530 } 531 532 /** 533 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 534 * a struct zfcp_fsf_req 535 */ 536 inline volatile struct qdio_buffer_element * 537 zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 538 { 539 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 540 sbal, sbale); 541 } 542 543 /** 544 * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for 545 * a struct zfcp_fsf_req 546 */ 547 static inline volatile struct qdio_buffer_element * 548 zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 549 { 550 return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue, 551 sbal, sbale); 552 } 553 554 /** 555 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 556 * a struct zfcp_fsf_req 557 */ 558 inline volatile struct qdio_buffer_element * 559 zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 560 { 561 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 562 fsf_req->sbale_curr); 563 } 564 565 /** 566 * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used 567 * on the request_queue for a struct zfcp_fsf_req 568 * @fsf_req: the number of the last SBAL that can be used is stored herein 569 * @max_sbals: used to pass an upper limit for the number of SBALs 570 * 571 * Note: We can assume at least one free SBAL in the request_queue when called. 572 */ 573 static inline void 574 zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 575 { 576 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 577 count = min(count, max_sbals); 578 fsf_req->sbal_last = fsf_req->sbal_first; 579 fsf_req->sbal_last += (count - 1); 580 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 581 } 582 583 /** 584 * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a 585 * request 586 * @fsf_req: zfcp_fsf_req to be processed 587 * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL 588 * 589 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 590 */ 591 static inline volatile struct qdio_buffer_element * 592 zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 593 { 594 volatile struct qdio_buffer_element *sbale; 595 596 /* set last entry flag in current SBALE of current SBAL */ 597 sbale = zfcp_qdio_sbale_curr(fsf_req); 598 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 599 600 /* don't exceed last allowed SBAL */ 601 if (fsf_req->sbal_curr == fsf_req->sbal_last) 602 return NULL; 603 604 /* set chaining flag in first SBALE of current SBAL */ 605 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 606 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 607 608 /* calculate index of next SBAL */ 609 fsf_req->sbal_curr++; 610 fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; 611 612 /* keep this requests number of SBALs up-to-date */ 613 fsf_req->sbal_number++; 614 615 /* start at first SBALE of new SBAL */ 616 fsf_req->sbale_curr = 0; 617 618 /* set storage-block type for new SBAL */ 619 sbale = zfcp_qdio_sbale_curr(fsf_req); 620 sbale->flags |= sbtype; 621 622 return sbale; 623 } 624 625 /** 626 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 627 */ 628 static inline volatile struct qdio_buffer_element * 629 zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 630 { 631 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 632 return zfcp_qdio_sbal_chain(fsf_req, sbtype); 633 634 fsf_req->sbale_curr++; 635 636 return zfcp_qdio_sbale_curr(fsf_req); 637 } 638 639 /** 640 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 641 * with zero from 642 */ 643 static inline int 644 zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 645 { 646 struct qdio_buffer **buf = queue->buffer; 647 int curr = first; 648 int count = 0; 649 650 for(;;) { 651 curr %= QDIO_MAX_BUFFERS_PER_Q; 652 count++; 653 memset(buf[curr], 0, sizeof(struct qdio_buffer)); 654 if (curr == last) 655 break; 656 curr++; 657 } 658 return count; 659 } 660 661 662 /** 663 * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req 664 */ 665 static inline int 666 zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) 667 { 668 return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, 669 fsf_req->sbal_first, fsf_req->sbal_curr); 670 } 671 672 673 /** 674 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 675 * on request_queue 676 */ 677 static inline void 678 zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 679 void *addr, int length) 680 { 681 volatile struct qdio_buffer_element *sbale; 682 683 sbale = zfcp_qdio_sbale_curr(fsf_req); 684 sbale->addr = addr; 685 sbale->length = length; 686 } 687 688 /** 689 * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s) 690 * @fsf_req: request to be processed 691 * @sbtype: SBALE flags 692 * @start_addr: address of memory segment 693 * @total_length: length of memory segment 694 * 695 * Alignment and length of the segment determine how many SBALEs are needed 696 * for the memory segment. 697 */ 698 static inline int 699 zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 700 void *start_addr, unsigned long total_length) 701 { 702 unsigned long remaining, length; 703 void *addr; 704 705 /* split segment up heeding page boundaries */ 706 for (addr = start_addr, remaining = total_length; remaining > 0; 707 addr += length, remaining -= length) { 708 /* get next free SBALE for new piece */ 709 if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { 710 /* no SBALE left, clean up and leave */ 711 zfcp_qdio_sbals_wipe(fsf_req); 712 return -EINVAL; 713 } 714 /* calculate length of new piece */ 715 length = min(remaining, 716 (PAGE_SIZE - ((unsigned long) addr & 717 (PAGE_SIZE - 1)))); 718 /* fill current SBALE with calculated piece */ 719 zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); 720 } 721 return total_length; 722 } 723 724 725 /** 726 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 727 * @fsf_req: request to be processed 728 * @sbtype: SBALE flags 729 * @sg: scatter-gather list 730 * @sg_count: number of elements in scatter-gather list 731 * @max_sbals: upper bound for number of SBALs to be used 732 */ 733 inline int 734 zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 735 struct scatterlist *sg, int sg_count, int max_sbals) 736 { 737 int sg_index; 738 struct scatterlist *sg_segment; 739 int retval; 740 volatile struct qdio_buffer_element *sbale; 741 int bytes = 0; 742 743 /* figure out last allowed SBAL */ 744 zfcp_qdio_sbal_limit(fsf_req, max_sbals); 745 746 /* set storage-block type for current SBAL */ 747 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 748 sbale->flags |= sbtype; 749 750 /* process all segements of scatter-gather list */ 751 for (sg_index = 0, sg_segment = sg, bytes = 0; 752 sg_index < sg_count; 753 sg_index++, sg_segment++) { 754 retval = zfcp_qdio_sbals_from_segment( 755 fsf_req, 756 sbtype, 757 zfcp_sg_to_address(sg_segment), 758 sg_segment->length); 759 if (retval < 0) { 760 bytes = retval; 761 goto out; 762 } else 763 bytes += retval; 764 } 765 /* assume that no other SBALEs are to follow in the same SBAL */ 766 sbale = zfcp_qdio_sbale_curr(fsf_req); 767 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 768 out: 769 return bytes; 770 } 771 772 773 /** 774 * zfcp_qdio_sbals_from_buffer - fill SBALs from buffer 775 * @fsf_req: request to be processed 776 * @sbtype: SBALE flags 777 * @buffer: data buffer 778 * @length: length of buffer 779 * @max_sbals: upper bound for number of SBALs to be used 780 */ 781 static inline int 782 zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 783 void *buffer, unsigned long length, int max_sbals) 784 { 785 struct scatterlist sg_segment; 786 787 zfcp_address_to_sg(buffer, &sg_segment); 788 sg_segment.length = length; 789 790 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1, 791 max_sbals); 792 } 793 794 795 /** 796 * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command 797 * @fsf_req: request to be processed 798 * @sbtype: SBALE flags 799 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 800 * to fill SBALs 801 */ 802 inline int 803 zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 804 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 805 { 806 if (scsi_cmnd->use_sg) { 807 return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, 808 (struct scatterlist *) 809 scsi_cmnd->request_buffer, 810 scsi_cmnd->use_sg, 811 ZFCP_MAX_SBALS_PER_REQ); 812 } else { 813 return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype, 814 scsi_cmnd->request_buffer, 815 scsi_cmnd->request_bufflen, 816 ZFCP_MAX_SBALS_PER_REQ); 817 } 818 } 819 820 /** 821 * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed 822 */ 823 int 824 zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue, 825 struct zfcp_fsf_req *fsf_req) 826 { 827 int new_distance_from_int; 828 int pci_pos; 829 volatile struct qdio_buffer_element *sbale; 830 831 new_distance_from_int = req_queue->distance_from_int + 832 fsf_req->sbal_number; 833 834 if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { 835 new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; 836 pci_pos = fsf_req->sbal_first; 837 pci_pos += fsf_req->sbal_number; 838 pci_pos -= new_distance_from_int; 839 pci_pos -= 1; 840 pci_pos %= QDIO_MAX_BUFFERS_PER_Q; 841 sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); 842 sbale->flags |= SBAL_FLAGS0_PCI; 843 } 844 return new_distance_from_int; 845 } 846 847 /* 848 * function: zfcp_zero_sbals 849 * 850 * purpose: zeros specified range of SBALs 851 * 852 * returns: 853 */ 854 void 855 zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count) 856 { 857 int cur_pos; 858 int index; 859 860 for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { 861 index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; 862 memset(buf[index], 0, sizeof (struct qdio_buffer)); 863 ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", 864 index, buf[index]); 865 } 866 } 867 868 #undef ZFCP_LOG_AREA 869