1 /* ------------------------------------------------------------ 2 * ibmvscsi.c 3 * (C) Copyright IBM Corporation 1994, 2004 4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com) 5 * Santiago Leon (santil@us.ibm.com) 6 * Dave Boutcher (sleddog@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 21 * USA 22 * 23 * ------------------------------------------------------------ 24 * Emulation of a SCSI host adapter for Virtual I/O devices 25 * 26 * This driver supports the SCSI adapter implemented by the IBM 27 * Power5 firmware. That SCSI adapter is not a physical adapter, 28 * but allows Linux SCSI peripheral drivers to directly 29 * access devices in another logical partition on the physical system. 30 * 31 * The virtual adapter(s) are present in the open firmware device 32 * tree just like real adapters. 33 * 34 * One of the capabilities provided on these systems is the ability 35 * to DMA between partitions. The architecture states that for VSCSI, 36 * the server side is allowed to DMA to and from the client. The client 37 * is never trusted to DMA to or from the server directly. 38 * 39 * Messages are sent between partitions on a "Command/Response Queue" 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 41 * Senders cannot access the buffer directly, but send messages by 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 43 * puts the message in the next 16 byte space in round-robbin fashion, 44 * turns on the high order bit of the message (the valid bit), and 45 * generates an interrupt to the receiver (if interrupts are turned on.) 46 * The receiver just turns off the valid bit when they have copied out 47 * the message. 48 * 49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit 50 * (IU) (as defined in the T10 standard available at www.t10.org), gets 51 * a DMA address for the message, and sends it to the server as the 52 * payload of a CRQ message. The server DMAs the SRP IU and processes it, 53 * including doing any additional data transfers. When it is done, it 54 * DMAs the SRP response back to the same address as the request came from, 55 * and sends a CRQ message back to inform the client that the request has 56 * completed. 57 * 58 * Note that some of the underlying infrastructure is different between 59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and 60 * the older iSeries hypervisor models. To support both, some low level 61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. 62 * The Makefile should pick one, not two, not zero, of these. 63 * 64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor 65 * interfaces. It would be really nice to abstract this above an RDMA 66 * layer. 67 */ 68 69 #include <linux/module.h> 70 #include <linux/moduleparam.h> 71 #include <linux/dma-mapping.h> 72 #include <linux/delay.h> 73 #include <asm/vio.h> 74 #include <scsi/scsi.h> 75 #include <scsi/scsi_cmnd.h> 76 #include <scsi/scsi_host.h> 77 #include <scsi/scsi_device.h> 78 #include "ibmvscsi.h" 79 80 /* The values below are somewhat arbitrary default values, but 81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.) 82 * Note that there are 3 bits of channel value, 6 bits of id, and 83 * 5 bits of LUN. 84 */ 85 static int max_id = 64; 86 static int max_channel = 3; 87 static int init_timeout = 5; 88 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 89 90 #define IBMVSCSI_VERSION "1.5.8" 91 92 MODULE_DESCRIPTION("IBM Virtual SCSI"); 93 MODULE_AUTHOR("Dave Boutcher"); 94 MODULE_LICENSE("GPL"); 95 MODULE_VERSION(IBMVSCSI_VERSION); 96 97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); 98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel"); 99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); 100 MODULE_PARM_DESC(max_channel, "Largest channel value"); 101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); 102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); 104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 105 106 /* ------------------------------------------------------------ 107 * Routines for the event pool and event structs 108 */ 109 /** 110 * initialize_event_pool: - Allocates and initializes the event pool for a host 111 * @pool: event_pool to be initialized 112 * @size: Number of events in pool 113 * @hostdata: ibmvscsi_host_data who owns the event pool 114 * 115 * Returns zero on success. 116 */ 117 static int initialize_event_pool(struct event_pool *pool, 118 int size, struct ibmvscsi_host_data *hostdata) 119 { 120 int i; 121 122 pool->size = size; 123 pool->next = 0; 124 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); 125 if (!pool->events) 126 return -ENOMEM; 127 128 pool->iu_storage = 129 dma_alloc_coherent(hostdata->dev, 130 pool->size * sizeof(*pool->iu_storage), 131 &pool->iu_token, 0); 132 if (!pool->iu_storage) { 133 kfree(pool->events); 134 return -ENOMEM; 135 } 136 137 for (i = 0; i < pool->size; ++i) { 138 struct srp_event_struct *evt = &pool->events[i]; 139 memset(&evt->crq, 0x00, sizeof(evt->crq)); 140 atomic_set(&evt->free, 1); 141 evt->crq.valid = 0x80; 142 evt->crq.IU_length = sizeof(*evt->xfer_iu); 143 evt->crq.IU_data_ptr = pool->iu_token + 144 sizeof(*evt->xfer_iu) * i; 145 evt->xfer_iu = pool->iu_storage + i; 146 evt->hostdata = hostdata; 147 evt->ext_list = NULL; 148 evt->ext_list_token = 0; 149 } 150 151 return 0; 152 } 153 154 /** 155 * release_event_pool: - Frees memory of an event pool of a host 156 * @pool: event_pool to be released 157 * @hostdata: ibmvscsi_host_data who owns the even pool 158 * 159 * Returns zero on success. 160 */ 161 static void release_event_pool(struct event_pool *pool, 162 struct ibmvscsi_host_data *hostdata) 163 { 164 int i, in_use = 0; 165 for (i = 0; i < pool->size; ++i) { 166 if (atomic_read(&pool->events[i].free) != 1) 167 ++in_use; 168 if (pool->events[i].ext_list) { 169 dma_free_coherent(hostdata->dev, 170 SG_ALL * sizeof(struct srp_direct_buf), 171 pool->events[i].ext_list, 172 pool->events[i].ext_list_token); 173 } 174 } 175 if (in_use) 176 printk(KERN_WARNING 177 "ibmvscsi: releasing event pool with %d " 178 "events still in use?\n", in_use); 179 kfree(pool->events); 180 dma_free_coherent(hostdata->dev, 181 pool->size * sizeof(*pool->iu_storage), 182 pool->iu_storage, pool->iu_token); 183 } 184 185 /** 186 * valid_event_struct: - Determines if event is valid. 187 * @pool: event_pool that contains the event 188 * @evt: srp_event_struct to be checked for validity 189 * 190 * Returns zero if event is invalid, one otherwise. 191 */ 192 static int valid_event_struct(struct event_pool *pool, 193 struct srp_event_struct *evt) 194 { 195 int index = evt - pool->events; 196 if (index < 0 || index >= pool->size) /* outside of bounds */ 197 return 0; 198 if (evt != pool->events + index) /* unaligned */ 199 return 0; 200 return 1; 201 } 202 203 /** 204 * ibmvscsi_free-event_struct: - Changes status of event to "free" 205 * @pool: event_pool that contains the event 206 * @evt: srp_event_struct to be modified 207 * 208 */ 209 static void free_event_struct(struct event_pool *pool, 210 struct srp_event_struct *evt) 211 { 212 if (!valid_event_struct(pool, evt)) { 213 printk(KERN_ERR 214 "ibmvscsi: Freeing invalid event_struct %p " 215 "(not in pool %p)\n", evt, pool->events); 216 return; 217 } 218 if (atomic_inc_return(&evt->free) != 1) { 219 printk(KERN_ERR 220 "ibmvscsi: Freeing event_struct %p " 221 "which is not in use!\n", evt); 222 return; 223 } 224 } 225 226 /** 227 * get_evt_struct: - Gets the next free event in pool 228 * @pool: event_pool that contains the events to be searched 229 * 230 * Returns the next event in "free" state, and NULL if none are free. 231 * Note that no synchronization is done here, we assume the host_lock 232 * will syncrhonze things. 233 */ 234 static struct srp_event_struct *get_event_struct(struct event_pool *pool) 235 { 236 int i; 237 int poolsize = pool->size; 238 int offset = pool->next; 239 240 for (i = 0; i < poolsize; i++) { 241 offset = (offset + 1) % poolsize; 242 if (!atomic_dec_if_positive(&pool->events[offset].free)) { 243 pool->next = offset; 244 return &pool->events[offset]; 245 } 246 } 247 248 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); 249 return NULL; 250 } 251 252 /** 253 * init_event_struct: Initialize fields in an event struct that are always 254 * required. 255 * @evt: The event 256 * @done: Routine to call when the event is responded to 257 * @format: SRP or MAD format 258 * @timeout: timeout value set in the CRQ 259 */ 260 static void init_event_struct(struct srp_event_struct *evt_struct, 261 void (*done) (struct srp_event_struct *), 262 u8 format, 263 int timeout) 264 { 265 evt_struct->cmnd = NULL; 266 evt_struct->cmnd_done = NULL; 267 evt_struct->sync_srp = NULL; 268 evt_struct->crq.format = format; 269 evt_struct->crq.timeout = timeout; 270 evt_struct->done = done; 271 } 272 273 /* ------------------------------------------------------------ 274 * Routines for receiving SCSI responses from the hosting partition 275 */ 276 277 /** 278 * set_srp_direction: Set the fields in the srp related to data 279 * direction and number of buffers based on the direction in 280 * the scsi_cmnd and the number of buffers 281 */ 282 static void set_srp_direction(struct scsi_cmnd *cmd, 283 struct srp_cmd *srp_cmd, 284 int numbuf) 285 { 286 u8 fmt; 287 288 if (numbuf == 0) 289 return; 290 291 if (numbuf == 1) 292 fmt = SRP_DATA_DESC_DIRECT; 293 else { 294 fmt = SRP_DATA_DESC_INDIRECT; 295 numbuf = min(numbuf, MAX_INDIRECT_BUFS); 296 297 if (cmd->sc_data_direction == DMA_TO_DEVICE) 298 srp_cmd->data_out_desc_cnt = numbuf; 299 else 300 srp_cmd->data_in_desc_cnt = numbuf; 301 } 302 303 if (cmd->sc_data_direction == DMA_TO_DEVICE) 304 srp_cmd->buf_fmt = fmt << 4; 305 else 306 srp_cmd->buf_fmt = fmt; 307 } 308 309 static void unmap_sg_list(int num_entries, 310 struct device *dev, 311 struct srp_direct_buf *md) 312 { 313 int i; 314 315 for (i = 0; i < num_entries; ++i) 316 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); 317 } 318 319 /** 320 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 321 * @cmd: srp_cmd whose additional_data member will be unmapped 322 * @dev: device for which the memory is mapped 323 * 324 */ 325 static void unmap_cmd_data(struct srp_cmd *cmd, 326 struct srp_event_struct *evt_struct, 327 struct device *dev) 328 { 329 u8 out_fmt, in_fmt; 330 331 out_fmt = cmd->buf_fmt >> 4; 332 in_fmt = cmd->buf_fmt & ((1U << 4) - 1); 333 334 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 335 return; 336 else if (out_fmt == SRP_DATA_DESC_DIRECT || 337 in_fmt == SRP_DATA_DESC_DIRECT) { 338 struct srp_direct_buf *data = 339 (struct srp_direct_buf *) cmd->add_data; 340 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); 341 } else { 342 struct srp_indirect_buf *indirect = 343 (struct srp_indirect_buf *) cmd->add_data; 344 int num_mapped = indirect->table_desc.len / 345 sizeof(struct srp_direct_buf); 346 347 if (num_mapped <= MAX_INDIRECT_BUFS) { 348 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 349 return; 350 } 351 352 unmap_sg_list(num_mapped, dev, evt_struct->ext_list); 353 } 354 } 355 356 static int map_sg_list(int num_entries, 357 struct scatterlist *sg, 358 struct srp_direct_buf *md) 359 { 360 int i; 361 u64 total_length = 0; 362 363 for (i = 0; i < num_entries; ++i) { 364 struct srp_direct_buf *descr = md + i; 365 struct scatterlist *sg_entry = &sg[i]; 366 descr->va = sg_dma_address(sg_entry); 367 descr->len = sg_dma_len(sg_entry); 368 descr->key = 0; 369 total_length += sg_dma_len(sg_entry); 370 } 371 return total_length; 372 } 373 374 /** 375 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * @cmd: Scsi_Cmnd with the scatterlist 377 * @srp_cmd: srp_cmd that contains the memory descriptor 378 * @dev: device for which to map dma memory 379 * 380 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 381 * Returns 1 on success. 382 */ 383 static int map_sg_data(struct scsi_cmnd *cmd, 384 struct srp_event_struct *evt_struct, 385 struct srp_cmd *srp_cmd, struct device *dev) 386 { 387 388 int sg_mapped; 389 u64 total_length = 0; 390 struct scatterlist *sg = cmd->request_buffer; 391 struct srp_direct_buf *data = 392 (struct srp_direct_buf *) srp_cmd->add_data; 393 struct srp_indirect_buf *indirect = 394 (struct srp_indirect_buf *) data; 395 396 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); 397 398 if (sg_mapped == 0) 399 return 0; 400 401 set_srp_direction(cmd, srp_cmd, sg_mapped); 402 403 /* special case; we can use a single direct descriptor */ 404 if (sg_mapped == 1) { 405 data->va = sg_dma_address(&sg[0]); 406 data->len = sg_dma_len(&sg[0]); 407 data->key = 0; 408 return 1; 409 } 410 411 indirect->table_desc.va = 0; 412 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 413 indirect->table_desc.key = 0; 414 415 if (sg_mapped <= MAX_INDIRECT_BUFS) { 416 total_length = map_sg_list(sg_mapped, sg, 417 &indirect->desc_list[0]); 418 indirect->len = total_length; 419 return 1; 420 } 421 422 /* get indirect table */ 423 if (!evt_struct->ext_list) { 424 evt_struct->ext_list = (struct srp_direct_buf *) 425 dma_alloc_coherent(dev, 426 SG_ALL * sizeof(struct srp_direct_buf), 427 &evt_struct->ext_list_token, 0); 428 if (!evt_struct->ext_list) { 429 printk(KERN_ERR 430 "ibmvscsi: Can't allocate memory for indirect table\n"); 431 return 0; 432 433 } 434 } 435 436 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); 437 438 indirect->len = total_length; 439 indirect->table_desc.va = evt_struct->ext_list_token; 440 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 441 memcpy(indirect->desc_list, evt_struct->ext_list, 442 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 443 444 return 1; 445 } 446 447 /** 448 * map_single_data: - Maps memory and initializes memory decriptor fields 449 * @cmd: struct scsi_cmnd with the memory to be mapped 450 * @srp_cmd: srp_cmd that contains the memory descriptor 451 * @dev: device for which to map dma memory 452 * 453 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 454 * Returns 1 on success. 455 */ 456 static int map_single_data(struct scsi_cmnd *cmd, 457 struct srp_cmd *srp_cmd, struct device *dev) 458 { 459 struct srp_direct_buf *data = 460 (struct srp_direct_buf *) srp_cmd->add_data; 461 462 data->va = 463 dma_map_single(dev, cmd->request_buffer, 464 cmd->request_bufflen, 465 DMA_BIDIRECTIONAL); 466 if (dma_mapping_error(data->va)) { 467 printk(KERN_ERR 468 "ibmvscsi: Unable to map request_buffer for command!\n"); 469 return 0; 470 } 471 data->len = cmd->request_bufflen; 472 data->key = 0; 473 474 set_srp_direction(cmd, srp_cmd, 1); 475 476 return 1; 477 } 478 479 /** 480 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds 481 * @cmd: struct scsi_cmnd with the memory to be mapped 482 * @srp_cmd: srp_cmd that contains the memory descriptor 483 * @dev: dma device for which to map dma memory 484 * 485 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 486 * Returns 1 on success. 487 */ 488 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 489 struct srp_event_struct *evt_struct, 490 struct srp_cmd *srp_cmd, struct device *dev) 491 { 492 switch (cmd->sc_data_direction) { 493 case DMA_FROM_DEVICE: 494 case DMA_TO_DEVICE: 495 break; 496 case DMA_NONE: 497 return 1; 498 case DMA_BIDIRECTIONAL: 499 printk(KERN_ERR 500 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); 501 return 0; 502 default: 503 printk(KERN_ERR 504 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", 505 cmd->sc_data_direction); 506 return 0; 507 } 508 509 if (!cmd->request_buffer) 510 return 1; 511 if (cmd->use_sg) 512 return map_sg_data(cmd, evt_struct, srp_cmd, dev); 513 return map_single_data(cmd, srp_cmd, dev); 514 } 515 516 /* ------------------------------------------------------------ 517 * Routines for sending and receiving SRPs 518 */ 519 /** 520 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() 521 * @evt_struct: evt_struct to be sent 522 * @hostdata: ibmvscsi_host_data of host 523 * 524 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) 525 * Note that this routine assumes that host_lock is held for synchronization 526 */ 527 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 528 struct ibmvscsi_host_data *hostdata) 529 { 530 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 531 int request_status; 532 int rc; 533 534 /* If we have exhausted our request limit, just fail this request, 535 * unless it is for a reset or abort. 536 * Note that there are rare cases involving driver generated requests 537 * (such as task management requests) that the mid layer may think we 538 * can handle more requests (can_queue) when we actually can't 539 */ 540 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { 541 request_status = 542 atomic_dec_if_positive(&hostdata->request_limit); 543 /* If request limit was -1 when we started, it is now even 544 * less than that 545 */ 546 if (request_status < -1) 547 goto send_error; 548 /* Otherwise, we may have run out of requests. */ 549 /* Abort and reset calls should make it through. 550 * Nothing except abort and reset should use the last two 551 * slots unless we had two or less to begin with. 552 */ 553 else if (request_status < 2 && 554 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) { 555 /* In the case that we have less than two requests 556 * available, check the server limit as a combination 557 * of the request limit and the number of requests 558 * in-flight (the size of the send list). If the 559 * server limit is greater than 2, return busy so 560 * that the last two are reserved for reset and abort. 561 */ 562 int server_limit = request_status; 563 struct srp_event_struct *tmp_evt; 564 565 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 566 server_limit++; 567 } 568 569 if (server_limit > 2) 570 goto send_busy; 571 } 572 } 573 574 /* Copy the IU into the transfer area */ 575 *evt_struct->xfer_iu = evt_struct->iu; 576 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; 577 578 /* Add this to the sent list. We need to do this 579 * before we actually send 580 * in case it comes back REALLY fast 581 */ 582 list_add_tail(&evt_struct->list, &hostdata->sent); 583 584 if ((rc = 585 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 586 list_del(&evt_struct->list); 587 588 printk(KERN_ERR "ibmvscsi: send error %d\n", 589 rc); 590 atomic_inc(&hostdata->request_limit); 591 goto send_error; 592 } 593 594 return 0; 595 596 send_busy: 597 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 598 599 free_event_struct(&hostdata->pool, evt_struct); 600 atomic_inc(&hostdata->request_limit); 601 return SCSI_MLQUEUE_HOST_BUSY; 602 603 send_error: 604 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 605 606 if (evt_struct->cmnd != NULL) { 607 evt_struct->cmnd->result = DID_ERROR << 16; 608 evt_struct->cmnd_done(evt_struct->cmnd); 609 } else if (evt_struct->done) 610 evt_struct->done(evt_struct); 611 612 free_event_struct(&hostdata->pool, evt_struct); 613 return 0; 614 } 615 616 /** 617 * handle_cmd_rsp: - Handle responses from commands 618 * @evt_struct: srp_event_struct to be handled 619 * 620 * Used as a callback by when sending scsi cmds. 621 * Gets called by ibmvscsi_handle_crq() 622 */ 623 static void handle_cmd_rsp(struct srp_event_struct *evt_struct) 624 { 625 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; 626 struct scsi_cmnd *cmnd = evt_struct->cmnd; 627 628 if (unlikely(rsp->opcode != SRP_RSP)) { 629 if (printk_ratelimit()) 630 printk(KERN_WARNING 631 "ibmvscsi: bad SRP RSP type %d\n", 632 rsp->opcode); 633 } 634 635 if (cmnd) { 636 cmnd->result = rsp->status; 637 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 638 memcpy(cmnd->sense_buffer, 639 rsp->data, 640 rsp->sense_data_len); 641 unmap_cmd_data(&evt_struct->iu.srp.cmd, 642 evt_struct, 643 evt_struct->hostdata->dev); 644 645 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 646 cmnd->resid = rsp->data_out_res_cnt; 647 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 648 cmnd->resid = rsp->data_in_res_cnt; 649 } 650 651 if (evt_struct->cmnd_done) 652 evt_struct->cmnd_done(cmnd); 653 } 654 655 /** 656 * lun_from_dev: - Returns the lun of the scsi device 657 * @dev: struct scsi_device 658 * 659 */ 660 static inline u16 lun_from_dev(struct scsi_device *dev) 661 { 662 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; 663 } 664 665 /** 666 * ibmvscsi_queue: - The queuecommand function of the scsi template 667 * @cmd: struct scsi_cmnd to be executed 668 * @done: Callback function to be called when cmd is completed 669 */ 670 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, 671 void (*done) (struct scsi_cmnd *)) 672 { 673 struct srp_cmd *srp_cmd; 674 struct srp_event_struct *evt_struct; 675 struct srp_indirect_buf *indirect; 676 struct ibmvscsi_host_data *hostdata = 677 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 678 u16 lun = lun_from_dev(cmnd->device); 679 u8 out_fmt, in_fmt; 680 681 evt_struct = get_event_struct(&hostdata->pool); 682 if (!evt_struct) 683 return SCSI_MLQUEUE_HOST_BUSY; 684 685 /* Set up the actual SRP IU */ 686 srp_cmd = &evt_struct->iu.srp.cmd; 687 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 688 srp_cmd->opcode = SRP_CMD; 689 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 690 srp_cmd->lun = ((u64) lun) << 48; 691 692 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 693 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 694 free_event_struct(&hostdata->pool, evt_struct); 695 return SCSI_MLQUEUE_HOST_BUSY; 696 } 697 698 init_event_struct(evt_struct, 699 handle_cmd_rsp, 700 VIOSRP_SRP_FORMAT, 701 cmnd->timeout_per_command/HZ); 702 703 evt_struct->cmnd = cmnd; 704 evt_struct->cmnd_done = done; 705 706 /* Fix up dma address of the buffer itself */ 707 indirect = (struct srp_indirect_buf *) srp_cmd->add_data; 708 out_fmt = srp_cmd->buf_fmt >> 4; 709 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); 710 if ((in_fmt == SRP_DATA_DESC_INDIRECT || 711 out_fmt == SRP_DATA_DESC_INDIRECT) && 712 indirect->table_desc.va == 0) { 713 indirect->table_desc.va = evt_struct->crq.IU_data_ptr + 714 offsetof(struct srp_cmd, add_data) + 715 offsetof(struct srp_indirect_buf, desc_list); 716 } 717 718 return ibmvscsi_send_srp_event(evt_struct, hostdata); 719 } 720 721 /* ------------------------------------------------------------ 722 * Routines for driver initialization 723 */ 724 /** 725 * adapter_info_rsp: - Handle response to MAD adapter info request 726 * @evt_struct: srp_event_struct with the response 727 * 728 * Used as a "done" callback by when sending adapter_info. Gets called 729 * by ibmvscsi_handle_crq() 730 */ 731 static void adapter_info_rsp(struct srp_event_struct *evt_struct) 732 { 733 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 734 dma_unmap_single(hostdata->dev, 735 evt_struct->iu.mad.adapter_info.buffer, 736 evt_struct->iu.mad.adapter_info.common.length, 737 DMA_BIDIRECTIONAL); 738 739 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 740 printk("ibmvscsi: error %d getting adapter info\n", 741 evt_struct->xfer_iu->mad.adapter_info.common.status); 742 } else { 743 printk("ibmvscsi: host srp version: %s, " 744 "host partition %s (%d), OS %d, max io %u\n", 745 hostdata->madapter_info.srp_version, 746 hostdata->madapter_info.partition_name, 747 hostdata->madapter_info.partition_number, 748 hostdata->madapter_info.os_type, 749 hostdata->madapter_info.port_max_txu[0]); 750 751 if (hostdata->madapter_info.port_max_txu[0]) 752 hostdata->host->max_sectors = 753 hostdata->madapter_info.port_max_txu[0] >> 9; 754 755 if (hostdata->madapter_info.os_type == 3 && 756 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 757 printk("ibmvscsi: host (Ver. %s) doesn't support large" 758 "transfers\n", 759 hostdata->madapter_info.srp_version); 760 printk("ibmvscsi: limiting scatterlists to %d\n", 761 MAX_INDIRECT_BUFS); 762 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 763 } 764 } 765 } 766 767 /** 768 * send_mad_adapter_info: - Sends the mad adapter info request 769 * and stores the result so it can be retrieved with 770 * sysfs. We COULD consider causing a failure if the 771 * returned SRP version doesn't match ours. 772 * @hostdata: ibmvscsi_host_data of host 773 * 774 * Returns zero if successful. 775 */ 776 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) 777 { 778 struct viosrp_adapter_info *req; 779 struct srp_event_struct *evt_struct; 780 dma_addr_t addr; 781 782 evt_struct = get_event_struct(&hostdata->pool); 783 if (!evt_struct) { 784 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 785 "for ADAPTER_INFO_REQ!\n"); 786 return; 787 } 788 789 init_event_struct(evt_struct, 790 adapter_info_rsp, 791 VIOSRP_MAD_FORMAT, 792 init_timeout * HZ); 793 794 req = &evt_struct->iu.mad.adapter_info; 795 memset(req, 0x00, sizeof(*req)); 796 797 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 798 req->common.length = sizeof(hostdata->madapter_info); 799 req->buffer = addr = dma_map_single(hostdata->dev, 800 &hostdata->madapter_info, 801 sizeof(hostdata->madapter_info), 802 DMA_BIDIRECTIONAL); 803 804 if (dma_mapping_error(req->buffer)) { 805 printk(KERN_ERR 806 "ibmvscsi: Unable to map request_buffer " 807 "for adapter_info!\n"); 808 free_event_struct(&hostdata->pool, evt_struct); 809 return; 810 } 811 812 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { 813 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 814 dma_unmap_single(hostdata->dev, 815 addr, 816 sizeof(hostdata->madapter_info), 817 DMA_BIDIRECTIONAL); 818 } 819 }; 820 821 /** 822 * login_rsp: - Handle response to SRP login request 823 * @evt_struct: srp_event_struct with the response 824 * 825 * Used as a "done" callback by when sending srp_login. Gets called 826 * by ibmvscsi_handle_crq() 827 */ 828 static void login_rsp(struct srp_event_struct *evt_struct) 829 { 830 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 831 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { 832 case SRP_LOGIN_RSP: /* it worked! */ 833 break; 834 case SRP_LOGIN_REJ: /* refused! */ 835 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 836 evt_struct->xfer_iu->srp.login_rej.reason); 837 /* Login failed. */ 838 atomic_set(&hostdata->request_limit, -1); 839 return; 840 default: 841 printk(KERN_ERR 842 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 843 evt_struct->xfer_iu->srp.login_rsp.opcode); 844 /* Login failed. */ 845 atomic_set(&hostdata->request_limit, -1); 846 return; 847 } 848 849 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 850 851 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) 852 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n"); 853 854 /* Now we know what the real request-limit is. 855 * This value is set rather than added to request_limit because 856 * request_limit could have been set to -1 by this client. 857 */ 858 atomic_set(&hostdata->request_limit, 859 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 860 861 /* If we had any pending I/Os, kick them */ 862 scsi_unblock_requests(hostdata->host); 863 864 send_mad_adapter_info(hostdata); 865 return; 866 } 867 868 /** 869 * send_srp_login: - Sends the srp login 870 * @hostdata: ibmvscsi_host_data of host 871 * 872 * Returns zero if successful. 873 */ 874 static int send_srp_login(struct ibmvscsi_host_data *hostdata) 875 { 876 int rc; 877 unsigned long flags; 878 struct srp_login_req *login; 879 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 880 if (!evt_struct) { 881 printk(KERN_ERR 882 "ibmvscsi: couldn't allocate an event for login req!\n"); 883 return FAILED; 884 } 885 886 init_event_struct(evt_struct, 887 login_rsp, 888 VIOSRP_SRP_FORMAT, 889 init_timeout * HZ); 890 891 login = &evt_struct->iu.srp.login_req; 892 memset(login, 0x00, sizeof(struct srp_login_req)); 893 login->opcode = SRP_LOGIN_REQ; 894 login->req_it_iu_len = sizeof(union srp_iu); 895 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 896 897 spin_lock_irqsave(hostdata->host->host_lock, flags); 898 /* Start out with a request limit of 1, since this is negotiated in 899 * the login request we are just sending 900 */ 901 atomic_set(&hostdata->request_limit, 1); 902 903 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 904 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 905 printk("ibmvscsic: sent SRP login\n"); 906 return rc; 907 }; 908 909 /** 910 * sync_completion: Signal that a synchronous command has completed 911 * Note that after returning from this call, the evt_struct is freed. 912 * the caller waiting on this completion shouldn't touch the evt_struct 913 * again. 914 */ 915 static void sync_completion(struct srp_event_struct *evt_struct) 916 { 917 /* copy the response back */ 918 if (evt_struct->sync_srp) 919 *evt_struct->sync_srp = *evt_struct->xfer_iu; 920 921 complete(&evt_struct->comp); 922 } 923 924 /** 925 * ibmvscsi_abort: Abort a command...from scsi host template 926 * send this over to the server and wait synchronously for the response 927 */ 928 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) 929 { 930 struct ibmvscsi_host_data *hostdata = 931 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 932 struct srp_tsk_mgmt *tsk_mgmt; 933 struct srp_event_struct *evt; 934 struct srp_event_struct *tmp_evt, *found_evt; 935 union viosrp_iu srp_rsp; 936 int rsp_rc; 937 unsigned long flags; 938 u16 lun = lun_from_dev(cmd->device); 939 940 /* First, find this command in our sent list so we can figure 941 * out the correct tag 942 */ 943 spin_lock_irqsave(hostdata->host->host_lock, flags); 944 found_evt = NULL; 945 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 946 if (tmp_evt->cmnd == cmd) { 947 found_evt = tmp_evt; 948 break; 949 } 950 } 951 952 if (!found_evt) { 953 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 954 return FAILED; 955 } 956 957 evt = get_event_struct(&hostdata->pool); 958 if (evt == NULL) { 959 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 960 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 961 return FAILED; 962 } 963 964 init_event_struct(evt, 965 sync_completion, 966 VIOSRP_SRP_FORMAT, 967 init_timeout * HZ); 968 969 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 970 971 /* Set up an abort SRP command */ 972 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 973 tsk_mgmt->opcode = SRP_TSK_MGMT; 974 tsk_mgmt->lun = ((u64) lun) << 48; 975 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 976 tsk_mgmt->task_tag = (u64) found_evt; 977 978 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 979 tsk_mgmt->lun, tsk_mgmt->task_tag); 980 981 evt->sync_srp = &srp_rsp; 982 init_completion(&evt->comp); 983 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 984 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 985 if (rsp_rc != 0) { 986 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 987 return FAILED; 988 } 989 990 wait_for_completion(&evt->comp); 991 992 /* make sure we got a good response */ 993 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 994 if (printk_ratelimit()) 995 printk(KERN_WARNING 996 "ibmvscsi: abort bad SRP RSP type %d\n", 997 srp_rsp.srp.rsp.opcode); 998 return FAILED; 999 } 1000 1001 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 1002 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 1003 else 1004 rsp_rc = srp_rsp.srp.rsp.status; 1005 1006 if (rsp_rc) { 1007 if (printk_ratelimit()) 1008 printk(KERN_WARNING 1009 "ibmvscsi: abort code %d for task tag 0x%lx\n", 1010 rsp_rc, 1011 tsk_mgmt->task_tag); 1012 return FAILED; 1013 } 1014 1015 /* Because we dropped the spinlock above, it's possible 1016 * The event is no longer in our list. Make sure it didn't 1017 * complete while we were aborting 1018 */ 1019 spin_lock_irqsave(hostdata->host->host_lock, flags); 1020 found_evt = NULL; 1021 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 1022 if (tmp_evt->cmnd == cmd) { 1023 found_evt = tmp_evt; 1024 break; 1025 } 1026 } 1027 1028 if (found_evt == NULL) { 1029 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1030 printk(KERN_INFO 1031 "ibmvscsi: aborted task tag 0x%lx completed\n", 1032 tsk_mgmt->task_tag); 1033 return SUCCESS; 1034 } 1035 1036 printk(KERN_INFO 1037 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1038 tsk_mgmt->task_tag); 1039 1040 cmd->result = (DID_ABORT << 16); 1041 list_del(&found_evt->list); 1042 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, 1043 found_evt->hostdata->dev); 1044 free_event_struct(&found_evt->hostdata->pool, found_evt); 1045 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1046 atomic_inc(&hostdata->request_limit); 1047 return SUCCESS; 1048 } 1049 1050 /** 1051 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 1052 * template send this over to the server and wait synchronously for the 1053 * response 1054 */ 1055 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) 1056 { 1057 struct ibmvscsi_host_data *hostdata = 1058 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 1059 1060 struct srp_tsk_mgmt *tsk_mgmt; 1061 struct srp_event_struct *evt; 1062 struct srp_event_struct *tmp_evt, *pos; 1063 union viosrp_iu srp_rsp; 1064 int rsp_rc; 1065 unsigned long flags; 1066 u16 lun = lun_from_dev(cmd->device); 1067 1068 spin_lock_irqsave(hostdata->host->host_lock, flags); 1069 evt = get_event_struct(&hostdata->pool); 1070 if (evt == NULL) { 1071 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1072 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1073 return FAILED; 1074 } 1075 1076 init_event_struct(evt, 1077 sync_completion, 1078 VIOSRP_SRP_FORMAT, 1079 init_timeout * HZ); 1080 1081 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1082 1083 /* Set up a lun reset SRP command */ 1084 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1085 tsk_mgmt->opcode = SRP_TSK_MGMT; 1086 tsk_mgmt->lun = ((u64) lun) << 48; 1087 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1088 1089 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1090 tsk_mgmt->lun); 1091 1092 evt->sync_srp = &srp_rsp; 1093 init_completion(&evt->comp); 1094 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1095 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1096 if (rsp_rc != 0) { 1097 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1098 return FAILED; 1099 } 1100 1101 wait_for_completion(&evt->comp); 1102 1103 /* make sure we got a good response */ 1104 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1105 if (printk_ratelimit()) 1106 printk(KERN_WARNING 1107 "ibmvscsi: reset bad SRP RSP type %d\n", 1108 srp_rsp.srp.rsp.opcode); 1109 return FAILED; 1110 } 1111 1112 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 1113 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 1114 else 1115 rsp_rc = srp_rsp.srp.rsp.status; 1116 1117 if (rsp_rc) { 1118 if (printk_ratelimit()) 1119 printk(KERN_WARNING 1120 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1121 rsp_rc, tsk_mgmt->task_tag); 1122 return FAILED; 1123 } 1124 1125 /* We need to find all commands for this LUN that have not yet been 1126 * responded to, and fail them with DID_RESET 1127 */ 1128 spin_lock_irqsave(hostdata->host->host_lock, flags); 1129 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1130 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { 1131 if (tmp_evt->cmnd) 1132 tmp_evt->cmnd->result = (DID_RESET << 16); 1133 list_del(&tmp_evt->list); 1134 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, 1135 tmp_evt->hostdata->dev); 1136 free_event_struct(&tmp_evt->hostdata->pool, 1137 tmp_evt); 1138 atomic_inc(&hostdata->request_limit); 1139 if (tmp_evt->cmnd_done) 1140 tmp_evt->cmnd_done(tmp_evt->cmnd); 1141 else if (tmp_evt->done) 1142 tmp_evt->done(tmp_evt); 1143 } 1144 } 1145 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1146 return SUCCESS; 1147 } 1148 1149 /** 1150 * purge_requests: Our virtual adapter just shut down. purge any sent requests 1151 * @hostdata: the adapter 1152 */ 1153 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) 1154 { 1155 struct srp_event_struct *tmp_evt, *pos; 1156 unsigned long flags; 1157 1158 spin_lock_irqsave(hostdata->host->host_lock, flags); 1159 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1160 list_del(&tmp_evt->list); 1161 if (tmp_evt->cmnd) { 1162 tmp_evt->cmnd->result = (error_code << 16); 1163 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1164 tmp_evt, 1165 tmp_evt->hostdata->dev); 1166 if (tmp_evt->cmnd_done) 1167 tmp_evt->cmnd_done(tmp_evt->cmnd); 1168 } else { 1169 if (tmp_evt->done) { 1170 tmp_evt->done(tmp_evt); 1171 } 1172 } 1173 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); 1174 } 1175 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1176 } 1177 1178 /** 1179 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ 1180 * @crq: Command/Response queue 1181 * @hostdata: ibmvscsi_host_data of host 1182 * 1183 */ 1184 void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1185 struct ibmvscsi_host_data *hostdata) 1186 { 1187 unsigned long flags; 1188 struct srp_event_struct *evt_struct = 1189 (struct srp_event_struct *)crq->IU_data_ptr; 1190 switch (crq->valid) { 1191 case 0xC0: /* initialization */ 1192 switch (crq->format) { 1193 case 0x01: /* Initialization message */ 1194 printk(KERN_INFO "ibmvscsi: partner initialized\n"); 1195 /* Send back a response */ 1196 if (ibmvscsi_send_crq(hostdata, 1197 0xC002000000000000LL, 0) == 0) { 1198 /* Now login */ 1199 send_srp_login(hostdata); 1200 } else { 1201 printk(KERN_ERR 1202 "ibmvscsi: Unable to send init rsp\n"); 1203 } 1204 1205 break; 1206 case 0x02: /* Initialization response */ 1207 printk(KERN_INFO 1208 "ibmvscsi: partner initialization complete\n"); 1209 1210 /* Now login */ 1211 send_srp_login(hostdata); 1212 break; 1213 default: 1214 printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); 1215 } 1216 return; 1217 case 0xFF: /* Hypervisor telling us the connection is closed */ 1218 scsi_block_requests(hostdata->host); 1219 atomic_set(&hostdata->request_limit, 0); 1220 if (crq->format == 0x06) { 1221 /* We need to re-setup the interpartition connection */ 1222 printk(KERN_INFO 1223 "ibmvscsi: Re-enabling adapter!\n"); 1224 purge_requests(hostdata, DID_REQUEUE); 1225 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, 1226 hostdata)) || 1227 (ibmvscsi_send_crq(hostdata, 1228 0xC001000000000000LL, 0))) { 1229 atomic_set(&hostdata->request_limit, 1230 -1); 1231 printk(KERN_ERR 1232 "ibmvscsi: error after" 1233 " enable\n"); 1234 } 1235 } else { 1236 printk(KERN_INFO 1237 "ibmvscsi: Virtual adapter failed rc %d!\n", 1238 crq->format); 1239 1240 purge_requests(hostdata, DID_ERROR); 1241 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, 1242 hostdata)) || 1243 (ibmvscsi_send_crq(hostdata, 1244 0xC001000000000000LL, 0))) { 1245 atomic_set(&hostdata->request_limit, 1246 -1); 1247 printk(KERN_ERR 1248 "ibmvscsi: error after reset\n"); 1249 } 1250 } 1251 scsi_unblock_requests(hostdata->host); 1252 return; 1253 case 0x80: /* real payload */ 1254 break; 1255 default: 1256 printk(KERN_ERR 1257 "ibmvscsi: got an invalid message type 0x%02x\n", 1258 crq->valid); 1259 return; 1260 } 1261 1262 /* The only kind of payload CRQs we should get are responses to 1263 * things we send. Make sure this response is to something we 1264 * actually sent 1265 */ 1266 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1267 printk(KERN_ERR 1268 "ibmvscsi: returned correlation_token 0x%p is invalid!\n", 1269 (void *)crq->IU_data_ptr); 1270 return; 1271 } 1272 1273 if (atomic_read(&evt_struct->free)) { 1274 printk(KERN_ERR 1275 "ibmvscsi: received duplicate correlation_token 0x%p!\n", 1276 (void *)crq->IU_data_ptr); 1277 return; 1278 } 1279 1280 if (crq->format == VIOSRP_SRP_FORMAT) 1281 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1282 &hostdata->request_limit); 1283 1284 if (evt_struct->done) 1285 evt_struct->done(evt_struct); 1286 else 1287 printk(KERN_ERR 1288 "ibmvscsi: returned done() is NULL; not running it!\n"); 1289 1290 /* 1291 * Lock the host_lock before messing with these structures, since we 1292 * are running in a task context 1293 */ 1294 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); 1295 list_del(&evt_struct->list); 1296 free_event_struct(&evt_struct->hostdata->pool, evt_struct); 1297 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); 1298 } 1299 1300 /** 1301 * ibmvscsi_get_host_config: Send the command to the server to get host 1302 * configuration data. The data is opaque to us. 1303 */ 1304 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, 1305 unsigned char *buffer, int length) 1306 { 1307 struct viosrp_host_config *host_config; 1308 struct srp_event_struct *evt_struct; 1309 dma_addr_t addr; 1310 int rc; 1311 1312 evt_struct = get_event_struct(&hostdata->pool); 1313 if (!evt_struct) { 1314 printk(KERN_ERR 1315 "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); 1316 return -1; 1317 } 1318 1319 init_event_struct(evt_struct, 1320 sync_completion, 1321 VIOSRP_MAD_FORMAT, 1322 init_timeout * HZ); 1323 1324 host_config = &evt_struct->iu.mad.host_config; 1325 1326 /* Set up a lun reset SRP command */ 1327 memset(host_config, 0x00, sizeof(*host_config)); 1328 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1329 host_config->common.length = length; 1330 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, 1331 length, 1332 DMA_BIDIRECTIONAL); 1333 1334 if (dma_mapping_error(host_config->buffer)) { 1335 printk(KERN_ERR 1336 "ibmvscsi: dma_mapping error " "getting host config\n"); 1337 free_event_struct(&hostdata->pool, evt_struct); 1338 return -1; 1339 } 1340 1341 init_completion(&evt_struct->comp); 1342 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1343 if (rc == 0) 1344 wait_for_completion(&evt_struct->comp); 1345 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); 1346 1347 return rc; 1348 } 1349 1350 /** 1351 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk. 1352 * @sdev: struct scsi_device device to configure 1353 * 1354 * Enable allow_restart for a device if it is a disk. Adjust the 1355 * queue_depth here also as is required by the documentation for 1356 * struct scsi_host_template. 1357 */ 1358 static int ibmvscsi_slave_configure(struct scsi_device *sdev) 1359 { 1360 struct Scsi_Host *shost = sdev->host; 1361 unsigned long lock_flags = 0; 1362 1363 spin_lock_irqsave(shost->host_lock, lock_flags); 1364 if (sdev->type == TYPE_DISK) 1365 sdev->allow_restart = 1; 1366 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1367 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1368 return 0; 1369 } 1370 1371 /** 1372 * ibmvscsi_change_queue_depth - Change the device's queue depth 1373 * @sdev: scsi device struct 1374 * @qdepth: depth to set 1375 * 1376 * Return value: 1377 * actual depth set 1378 **/ 1379 static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) 1380 { 1381 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) 1382 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; 1383 1384 scsi_adjust_queue_depth(sdev, 0, qdepth); 1385 return sdev->queue_depth; 1386 } 1387 1388 /* ------------------------------------------------------------ 1389 * sysfs attributes 1390 */ 1391 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf) 1392 { 1393 struct Scsi_Host *shost = class_to_shost(class_dev); 1394 struct ibmvscsi_host_data *hostdata = 1395 (struct ibmvscsi_host_data *)shost->hostdata; 1396 int len; 1397 1398 len = snprintf(buf, PAGE_SIZE, "%s\n", 1399 hostdata->madapter_info.srp_version); 1400 return len; 1401 } 1402 1403 static struct class_device_attribute ibmvscsi_host_srp_version = { 1404 .attr = { 1405 .name = "srp_version", 1406 .mode = S_IRUGO, 1407 }, 1408 .show = show_host_srp_version, 1409 }; 1410 1411 static ssize_t show_host_partition_name(struct class_device *class_dev, 1412 char *buf) 1413 { 1414 struct Scsi_Host *shost = class_to_shost(class_dev); 1415 struct ibmvscsi_host_data *hostdata = 1416 (struct ibmvscsi_host_data *)shost->hostdata; 1417 int len; 1418 1419 len = snprintf(buf, PAGE_SIZE, "%s\n", 1420 hostdata->madapter_info.partition_name); 1421 return len; 1422 } 1423 1424 static struct class_device_attribute ibmvscsi_host_partition_name = { 1425 .attr = { 1426 .name = "partition_name", 1427 .mode = S_IRUGO, 1428 }, 1429 .show = show_host_partition_name, 1430 }; 1431 1432 static ssize_t show_host_partition_number(struct class_device *class_dev, 1433 char *buf) 1434 { 1435 struct Scsi_Host *shost = class_to_shost(class_dev); 1436 struct ibmvscsi_host_data *hostdata = 1437 (struct ibmvscsi_host_data *)shost->hostdata; 1438 int len; 1439 1440 len = snprintf(buf, PAGE_SIZE, "%d\n", 1441 hostdata->madapter_info.partition_number); 1442 return len; 1443 } 1444 1445 static struct class_device_attribute ibmvscsi_host_partition_number = { 1446 .attr = { 1447 .name = "partition_number", 1448 .mode = S_IRUGO, 1449 }, 1450 .show = show_host_partition_number, 1451 }; 1452 1453 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf) 1454 { 1455 struct Scsi_Host *shost = class_to_shost(class_dev); 1456 struct ibmvscsi_host_data *hostdata = 1457 (struct ibmvscsi_host_data *)shost->hostdata; 1458 int len; 1459 1460 len = snprintf(buf, PAGE_SIZE, "%d\n", 1461 hostdata->madapter_info.mad_version); 1462 return len; 1463 } 1464 1465 static struct class_device_attribute ibmvscsi_host_mad_version = { 1466 .attr = { 1467 .name = "mad_version", 1468 .mode = S_IRUGO, 1469 }, 1470 .show = show_host_mad_version, 1471 }; 1472 1473 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf) 1474 { 1475 struct Scsi_Host *shost = class_to_shost(class_dev); 1476 struct ibmvscsi_host_data *hostdata = 1477 (struct ibmvscsi_host_data *)shost->hostdata; 1478 int len; 1479 1480 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); 1481 return len; 1482 } 1483 1484 static struct class_device_attribute ibmvscsi_host_os_type = { 1485 .attr = { 1486 .name = "os_type", 1487 .mode = S_IRUGO, 1488 }, 1489 .show = show_host_os_type, 1490 }; 1491 1492 static ssize_t show_host_config(struct class_device *class_dev, char *buf) 1493 { 1494 struct Scsi_Host *shost = class_to_shost(class_dev); 1495 struct ibmvscsi_host_data *hostdata = 1496 (struct ibmvscsi_host_data *)shost->hostdata; 1497 1498 /* returns null-terminated host config data */ 1499 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) 1500 return strlen(buf); 1501 else 1502 return 0; 1503 } 1504 1505 static struct class_device_attribute ibmvscsi_host_config = { 1506 .attr = { 1507 .name = "config", 1508 .mode = S_IRUGO, 1509 }, 1510 .show = show_host_config, 1511 }; 1512 1513 static struct class_device_attribute *ibmvscsi_attrs[] = { 1514 &ibmvscsi_host_srp_version, 1515 &ibmvscsi_host_partition_name, 1516 &ibmvscsi_host_partition_number, 1517 &ibmvscsi_host_mad_version, 1518 &ibmvscsi_host_os_type, 1519 &ibmvscsi_host_config, 1520 NULL 1521 }; 1522 1523 /* ------------------------------------------------------------ 1524 * SCSI driver registration 1525 */ 1526 static struct scsi_host_template driver_template = { 1527 .module = THIS_MODULE, 1528 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, 1529 .proc_name = "ibmvscsi", 1530 .queuecommand = ibmvscsi_queuecommand, 1531 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1532 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1533 .slave_configure = ibmvscsi_slave_configure, 1534 .change_queue_depth = ibmvscsi_change_queue_depth, 1535 .cmd_per_lun = 16, 1536 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 1537 .this_id = -1, 1538 .sg_tablesize = SG_ALL, 1539 .use_clustering = ENABLE_CLUSTERING, 1540 .shost_attrs = ibmvscsi_attrs, 1541 }; 1542 1543 /** 1544 * Called by bus code for each adapter 1545 */ 1546 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1547 { 1548 struct ibmvscsi_host_data *hostdata; 1549 struct Scsi_Host *host; 1550 struct device *dev = &vdev->dev; 1551 unsigned long wait_switch = 0; 1552 int rc; 1553 1554 vdev->dev.driver_data = NULL; 1555 1556 driver_template.can_queue = max_requests; 1557 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1558 if (!host) { 1559 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1560 goto scsi_host_alloc_failed; 1561 } 1562 1563 hostdata = (struct ibmvscsi_host_data *)host->hostdata; 1564 memset(hostdata, 0x00, sizeof(*hostdata)); 1565 INIT_LIST_HEAD(&hostdata->sent); 1566 hostdata->host = host; 1567 hostdata->dev = dev; 1568 atomic_set(&hostdata->request_limit, -1); 1569 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1570 1571 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); 1572 if (rc != 0 && rc != H_RESOURCE) { 1573 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1574 goto init_crq_failed; 1575 } 1576 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1577 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); 1578 goto init_pool_failed; 1579 } 1580 1581 host->max_lun = 8; 1582 host->max_id = max_id; 1583 host->max_channel = max_channel; 1584 1585 if (scsi_add_host(hostdata->host, hostdata->dev)) 1586 goto add_host_failed; 1587 1588 /* Try to send an initialization message. Note that this is allowed 1589 * to fail if the other end is not acive. In that case we don't 1590 * want to scan 1591 */ 1592 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 1593 || rc == H_RESOURCE) { 1594 /* 1595 * Wait around max init_timeout secs for the adapter to finish 1596 * initializing. When we are done initializing, we will have a 1597 * valid request_limit. We don't want Linux scanning before 1598 * we are ready. 1599 */ 1600 for (wait_switch = jiffies + (init_timeout * HZ); 1601 time_before(jiffies, wait_switch) && 1602 atomic_read(&hostdata->request_limit) < 2;) { 1603 1604 msleep(10); 1605 } 1606 1607 /* if we now have a valid request_limit, initiate a scan */ 1608 if (atomic_read(&hostdata->request_limit) > 0) 1609 scsi_scan_host(host); 1610 } 1611 1612 vdev->dev.driver_data = hostdata; 1613 return 0; 1614 1615 add_host_failed: 1616 release_event_pool(&hostdata->pool, hostdata); 1617 init_pool_failed: 1618 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); 1619 init_crq_failed: 1620 scsi_host_put(host); 1621 scsi_host_alloc_failed: 1622 return -1; 1623 } 1624 1625 static int ibmvscsi_remove(struct vio_dev *vdev) 1626 { 1627 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1628 release_event_pool(&hostdata->pool, hostdata); 1629 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 1630 max_requests); 1631 1632 scsi_remove_host(hostdata->host); 1633 scsi_host_put(hostdata->host); 1634 1635 return 0; 1636 } 1637 1638 /** 1639 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1640 * support. 1641 */ 1642 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { 1643 {"vscsi", "IBM,v-scsi"}, 1644 { "", "" } 1645 }; 1646 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1647 1648 static struct vio_driver ibmvscsi_driver = { 1649 .id_table = ibmvscsi_device_table, 1650 .probe = ibmvscsi_probe, 1651 .remove = ibmvscsi_remove, 1652 .driver = { 1653 .name = "ibmvscsi", 1654 .owner = THIS_MODULE, 1655 } 1656 }; 1657 1658 int __init ibmvscsi_module_init(void) 1659 { 1660 return vio_register_driver(&ibmvscsi_driver); 1661 } 1662 1663 void __exit ibmvscsi_module_exit(void) 1664 { 1665 vio_unregister_driver(&ibmvscsi_driver); 1666 } 1667 1668 module_init(ibmvscsi_module_init); 1669 module_exit(ibmvscsi_module_exit); 1670