1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module Name: 27 * commsup.c 28 * 29 * Abstract: Contain all routines that are required for FSA host/adapter 30 * communication. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/types.h> 37 #include <linux/sched.h> 38 #include <linux/pci.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/completion.h> 42 #include <linux/blkdev.h> 43 #include <linux/delay.h> 44 #include <linux/kthread.h> 45 #include <linux/interrupt.h> 46 #include <linux/semaphore.h> 47 #include <linux/bcd.h> 48 #include <scsi/scsi.h> 49 #include <scsi/scsi_host.h> 50 #include <scsi/scsi_device.h> 51 #include <scsi/scsi_cmnd.h> 52 53 #include "aacraid.h" 54 55 /** 56 * fib_map_alloc - allocate the fib objects 57 * @dev: Adapter to allocate for 58 * 59 * Allocate and map the shared PCI space for the FIB blocks used to 60 * talk to the Adaptec firmware. 61 */ 62 63 static int fib_map_alloc(struct aac_dev *dev) 64 { 65 if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE) 66 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; 67 else 68 dev->max_cmd_size = dev->max_fib_size; 69 if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) { 70 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE; 71 } else { 72 dev->max_cmd_size = dev->max_fib_size; 73 } 74 75 dprintk((KERN_INFO 76 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 77 dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, 78 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 79 dev->hw_fib_va = pci_alloc_consistent(dev->pdev, 80 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) 81 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), 82 &dev->hw_fib_pa); 83 if (dev->hw_fib_va == NULL) 84 return -ENOMEM; 85 return 0; 86 } 87 88 /** 89 * aac_fib_map_free - free the fib objects 90 * @dev: Adapter to free 91 * 92 * Free the PCI mappings and the memory allocated for FIB blocks 93 * on this adapter. 94 */ 95 96 void aac_fib_map_free(struct aac_dev *dev) 97 { 98 if (dev->hw_fib_va && dev->max_cmd_size) { 99 pci_free_consistent(dev->pdev, 100 (dev->max_cmd_size * 101 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), 102 dev->hw_fib_va, dev->hw_fib_pa); 103 } 104 dev->hw_fib_va = NULL; 105 dev->hw_fib_pa = 0; 106 } 107 108 void aac_fib_vector_assign(struct aac_dev *dev) 109 { 110 u32 i = 0; 111 u32 vector = 1; 112 struct fib *fibptr = NULL; 113 114 for (i = 0, fibptr = &dev->fibs[i]; 115 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 116 i++, fibptr++) { 117 if ((dev->max_msix == 1) || 118 (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1) 119 - dev->vector_cap))) { 120 fibptr->vector_no = 0; 121 } else { 122 fibptr->vector_no = vector; 123 vector++; 124 if (vector == dev->max_msix) 125 vector = 1; 126 } 127 } 128 } 129 130 /** 131 * aac_fib_setup - setup the fibs 132 * @dev: Adapter to set up 133 * 134 * Allocate the PCI space for the fibs, map it and then initialise the 135 * fib area, the unmapped fib data and also the free list 136 */ 137 138 int aac_fib_setup(struct aac_dev * dev) 139 { 140 struct fib *fibptr; 141 struct hw_fib *hw_fib; 142 dma_addr_t hw_fib_pa; 143 int i; 144 u32 max_cmds; 145 146 while (((i = fib_map_alloc(dev)) == -ENOMEM) 147 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 148 max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1; 149 dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB; 150 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) 151 dev->init->r7.max_io_commands = cpu_to_le32(max_cmds); 152 } 153 if (i<0) 154 return -ENOMEM; 155 156 /* 32 byte alignment for PMC */ 157 hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1); 158 dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + 159 (hw_fib_pa - dev->hw_fib_pa)); 160 dev->hw_fib_pa = hw_fib_pa; 161 memset(dev->hw_fib_va, 0, 162 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * 163 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 164 165 /* add Xport header */ 166 dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va + 167 sizeof(struct aac_fib_xporthdr)); 168 dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr); 169 170 hw_fib = dev->hw_fib_va; 171 hw_fib_pa = dev->hw_fib_pa; 172 /* 173 * Initialise the fibs 174 */ 175 for (i = 0, fibptr = &dev->fibs[i]; 176 i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 177 i++, fibptr++) 178 { 179 fibptr->flags = 0; 180 fibptr->size = sizeof(struct fib); 181 fibptr->dev = dev; 182 fibptr->hw_fib_va = hw_fib; 183 fibptr->data = (void *) fibptr->hw_fib_va->data; 184 fibptr->next = fibptr+1; /* Forward chain the fibs */ 185 sema_init(&fibptr->event_wait, 0); 186 spin_lock_init(&fibptr->event_lock); 187 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 188 hw_fib->header.SenderSize = 189 cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */ 190 fibptr->hw_fib_pa = hw_fib_pa; 191 fibptr->hw_sgl_pa = hw_fib_pa + 192 offsetof(struct aac_hba_cmd_req, sge[2]); 193 /* 194 * one element is for the ptr to the separate sg list, 195 * second element for 32 byte alignment 196 */ 197 fibptr->hw_error_pa = hw_fib_pa + 198 offsetof(struct aac_native_hba, resp.resp_bytes[0]); 199 200 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + 201 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)); 202 hw_fib_pa = hw_fib_pa + 203 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr); 204 } 205 206 /* 207 *Assign vector numbers to fibs 208 */ 209 aac_fib_vector_assign(dev); 210 211 /* 212 * Add the fib chain to the free list 213 */ 214 dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL; 215 /* 216 * Set 8 fibs aside for management tools 217 */ 218 dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue]; 219 return 0; 220 } 221 222 /** 223 * aac_fib_alloc_tag-allocate a fib using tags 224 * @dev: Adapter to allocate the fib for 225 * 226 * Allocate a fib from the adapter fib pool using tags 227 * from the blk layer. 228 */ 229 230 struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) 231 { 232 struct fib *fibptr; 233 234 fibptr = &dev->fibs[scmd->request->tag]; 235 /* 236 * Null out fields that depend on being zero at the start of 237 * each I/O 238 */ 239 fibptr->hw_fib_va->header.XferState = 0; 240 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 241 fibptr->callback_data = NULL; 242 fibptr->callback = NULL; 243 244 return fibptr; 245 } 246 247 /** 248 * aac_fib_alloc - allocate a fib 249 * @dev: Adapter to allocate the fib for 250 * 251 * Allocate a fib from the adapter fib pool. If the pool is empty we 252 * return NULL. 253 */ 254 255 struct fib *aac_fib_alloc(struct aac_dev *dev) 256 { 257 struct fib * fibptr; 258 unsigned long flags; 259 spin_lock_irqsave(&dev->fib_lock, flags); 260 fibptr = dev->free_fib; 261 if(!fibptr){ 262 spin_unlock_irqrestore(&dev->fib_lock, flags); 263 return fibptr; 264 } 265 dev->free_fib = fibptr->next; 266 spin_unlock_irqrestore(&dev->fib_lock, flags); 267 /* 268 * Set the proper node type code and node byte size 269 */ 270 fibptr->type = FSAFS_NTC_FIB_CONTEXT; 271 fibptr->size = sizeof(struct fib); 272 /* 273 * Null out fields that depend on being zero at the start of 274 * each I/O 275 */ 276 fibptr->hw_fib_va->header.XferState = 0; 277 fibptr->flags = 0; 278 fibptr->callback = NULL; 279 fibptr->callback_data = NULL; 280 281 return fibptr; 282 } 283 284 /** 285 * aac_fib_free - free a fib 286 * @fibptr: fib to free up 287 * 288 * Frees up a fib and places it on the appropriate queue 289 */ 290 291 void aac_fib_free(struct fib *fibptr) 292 { 293 unsigned long flags; 294 295 if (fibptr->done == 2) 296 return; 297 298 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 299 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 300 aac_config.fib_timeouts++; 301 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && 302 fibptr->hw_fib_va->header.XferState != 0) { 303 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 304 (void*)fibptr, 305 le32_to_cpu(fibptr->hw_fib_va->header.XferState)); 306 } 307 fibptr->next = fibptr->dev->free_fib; 308 fibptr->dev->free_fib = fibptr; 309 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 310 } 311 312 /** 313 * aac_fib_init - initialise a fib 314 * @fibptr: The fib to initialize 315 * 316 * Set up the generic fib fields ready for use 317 */ 318 319 void aac_fib_init(struct fib *fibptr) 320 { 321 struct hw_fib *hw_fib = fibptr->hw_fib_va; 322 323 memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr)); 324 hw_fib->header.StructType = FIB_MAGIC; 325 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 326 hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); 327 hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); 328 hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size); 329 } 330 331 /** 332 * fib_deallocate - deallocate a fib 333 * @fibptr: fib to deallocate 334 * 335 * Will deallocate and return to the free pool the FIB pointed to by the 336 * caller. 337 */ 338 339 static void fib_dealloc(struct fib * fibptr) 340 { 341 struct hw_fib *hw_fib = fibptr->hw_fib_va; 342 hw_fib->header.XferState = 0; 343 } 344 345 /* 346 * Commuication primitives define and support the queuing method we use to 347 * support host to adapter commuication. All queue accesses happen through 348 * these routines and are the only routines which have a knowledge of the 349 * how these queues are implemented. 350 */ 351 352 /** 353 * aac_get_entry - get a queue entry 354 * @dev: Adapter 355 * @qid: Queue Number 356 * @entry: Entry return 357 * @index: Index return 358 * @nonotify: notification control 359 * 360 * With a priority the routine returns a queue entry if the queue has free entries. If the queue 361 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is 362 * returned. 363 */ 364 365 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) 366 { 367 struct aac_queue * q; 368 unsigned long idx; 369 370 /* 371 * All of the queues wrap when they reach the end, so we check 372 * to see if they have reached the end and if they have we just 373 * set the index back to zero. This is a wrap. You could or off 374 * the high bits in all updates but this is a bit faster I think. 375 */ 376 377 q = &dev->queues->queue[qid]; 378 379 idx = *index = le32_to_cpu(*(q->headers.producer)); 380 /* Interrupt Moderation, only interrupt for first two entries */ 381 if (idx != le32_to_cpu(*(q->headers.consumer))) { 382 if (--idx == 0) { 383 if (qid == AdapNormCmdQueue) 384 idx = ADAP_NORM_CMD_ENTRIES; 385 else 386 idx = ADAP_NORM_RESP_ENTRIES; 387 } 388 if (idx != le32_to_cpu(*(q->headers.consumer))) 389 *nonotify = 1; 390 } 391 392 if (qid == AdapNormCmdQueue) { 393 if (*index >= ADAP_NORM_CMD_ENTRIES) 394 *index = 0; /* Wrap to front of the Producer Queue. */ 395 } else { 396 if (*index >= ADAP_NORM_RESP_ENTRIES) 397 *index = 0; /* Wrap to front of the Producer Queue. */ 398 } 399 400 /* Queue is full */ 401 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { 402 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 403 qid, atomic_read(&q->numpending)); 404 return 0; 405 } else { 406 *entry = q->base + *index; 407 return 1; 408 } 409 } 410 411 /** 412 * aac_queue_get - get the next free QE 413 * @dev: Adapter 414 * @index: Returned index 415 * @priority: Priority of fib 416 * @fib: Fib to associate with the queue entry 417 * @wait: Wait if queue full 418 * @fibptr: Driver fib object to go with fib 419 * @nonotify: Don't notify the adapter 420 * 421 * Gets the next free QE off the requested priorty adapter command 422 * queue and associates the Fib with the QE. The QE represented by 423 * index is ready to insert on the queue when this routine returns 424 * success. 425 */ 426 427 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) 428 { 429 struct aac_entry * entry = NULL; 430 int map = 0; 431 432 if (qid == AdapNormCmdQueue) { 433 /* if no entries wait for some if caller wants to */ 434 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { 435 printk(KERN_ERR "GetEntries failed\n"); 436 } 437 /* 438 * Setup queue entry with a command, status and fib mapped 439 */ 440 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 441 map = 1; 442 } else { 443 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { 444 /* if no entries wait for some if caller wants to */ 445 } 446 /* 447 * Setup queue entry with command, status and fib mapped 448 */ 449 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 450 entry->addr = hw_fib->header.SenderFibAddress; 451 /* Restore adapters pointer to the FIB */ 452 hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ 453 map = 0; 454 } 455 /* 456 * If MapFib is true than we need to map the Fib and put pointers 457 * in the queue entry. 458 */ 459 if (map) 460 entry->addr = cpu_to_le32(fibptr->hw_fib_pa); 461 return 0; 462 } 463 464 /* 465 * Define the highest level of host to adapter communication routines. 466 * These routines will support host to adapter FS commuication. These 467 * routines have no knowledge of the commuication method used. This level 468 * sends and receives FIBs. This level has no knowledge of how these FIBs 469 * get passed back and forth. 470 */ 471 472 /** 473 * aac_fib_send - send a fib to the adapter 474 * @command: Command to send 475 * @fibptr: The fib 476 * @size: Size of fib data area 477 * @priority: Priority of Fib 478 * @wait: Async/sync select 479 * @reply: True if a reply is wanted 480 * @callback: Called with reply 481 * @callback_data: Passed to callback 482 * 483 * Sends the requested FIB to the adapter and optionally will wait for a 484 * response FIB. If the caller does not wish to wait for a response than 485 * an event to wait on must be supplied. This event will be set when a 486 * response FIB is received from the adapter. 487 */ 488 489 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, 490 int priority, int wait, int reply, fib_callback callback, 491 void *callback_data) 492 { 493 struct aac_dev * dev = fibptr->dev; 494 struct hw_fib * hw_fib = fibptr->hw_fib_va; 495 unsigned long flags = 0; 496 unsigned long mflags = 0; 497 unsigned long sflags = 0; 498 499 500 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 501 return -EBUSY; 502 /* 503 * There are 5 cases with the wait and response requested flags. 504 * The only invalid cases are if the caller requests to wait and 505 * does not request a response and if the caller does not want a 506 * response and the Fib is not allocated from pool. If a response 507 * is not requesed the Fib will just be deallocaed by the DPC 508 * routine when the response comes back from the adapter. No 509 * further processing will be done besides deleting the Fib. We 510 * will have a debug mode where the adapter can notify the host 511 * it had a problem and the host can log that fact. 512 */ 513 fibptr->flags = 0; 514 if (wait && !reply) { 515 return -EINVAL; 516 } else if (!wait && reply) { 517 hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); 518 FIB_COUNTER_INCREMENT(aac_config.AsyncSent); 519 } else if (!wait && !reply) { 520 hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); 521 FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); 522 } else if (wait && reply) { 523 hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); 524 FIB_COUNTER_INCREMENT(aac_config.NormalSent); 525 } 526 /* 527 * Map the fib into 32bits by using the fib number 528 */ 529 530 hw_fib->header.SenderFibAddress = 531 cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 532 533 /* use the same shifted value for handle to be compatible 534 * with the new native hba command handle 535 */ 536 hw_fib->header.Handle = 537 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); 538 539 /* 540 * Set FIB state to indicate where it came from and if we want a 541 * response from the adapter. Also load the command from the 542 * caller. 543 * 544 * Map the hw fib pointer as a 32bit value 545 */ 546 hw_fib->header.Command = cpu_to_le16(command); 547 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 548 /* 549 * Set the size of the Fib we want to send to the adapter 550 */ 551 hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); 552 if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { 553 return -EMSGSIZE; 554 } 555 /* 556 * Get a queue entry connect the FIB to it and send an notify 557 * the adapter a command is ready. 558 */ 559 hw_fib->header.XferState |= cpu_to_le32(NormalPriority); 560 561 /* 562 * Fill in the Callback and CallbackContext if we are not 563 * going to wait. 564 */ 565 if (!wait) { 566 fibptr->callback = callback; 567 fibptr->callback_data = callback_data; 568 fibptr->flags = FIB_CONTEXT_FLAG; 569 } 570 571 fibptr->done = 0; 572 573 FIB_COUNTER_INCREMENT(aac_config.FibsSent); 574 575 dprintk((KERN_DEBUG "Fib contents:.\n")); 576 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 577 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 578 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 579 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va)); 580 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 581 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 582 583 if (!dev->queues) 584 return -EBUSY; 585 586 if (wait) { 587 588 spin_lock_irqsave(&dev->manage_lock, mflags); 589 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { 590 printk(KERN_INFO "No management Fibs Available:%d\n", 591 dev->management_fib_count); 592 spin_unlock_irqrestore(&dev->manage_lock, mflags); 593 return -EBUSY; 594 } 595 dev->management_fib_count++; 596 spin_unlock_irqrestore(&dev->manage_lock, mflags); 597 spin_lock_irqsave(&fibptr->event_lock, flags); 598 } 599 600 if (dev->sync_mode) { 601 if (wait) 602 spin_unlock_irqrestore(&fibptr->event_lock, flags); 603 spin_lock_irqsave(&dev->sync_lock, sflags); 604 if (dev->sync_fib) { 605 list_add_tail(&fibptr->fiblink, &dev->sync_fib_list); 606 spin_unlock_irqrestore(&dev->sync_lock, sflags); 607 } else { 608 dev->sync_fib = fibptr; 609 spin_unlock_irqrestore(&dev->sync_lock, sflags); 610 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, 611 (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0, 612 NULL, NULL, NULL, NULL, NULL); 613 } 614 if (wait) { 615 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 616 if (down_interruptible(&fibptr->event_wait)) { 617 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT; 618 return -EFAULT; 619 } 620 return 0; 621 } 622 return -EINPROGRESS; 623 } 624 625 if (aac_adapter_deliver(fibptr) != 0) { 626 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); 627 if (wait) { 628 spin_unlock_irqrestore(&fibptr->event_lock, flags); 629 spin_lock_irqsave(&dev->manage_lock, mflags); 630 dev->management_fib_count--; 631 spin_unlock_irqrestore(&dev->manage_lock, mflags); 632 } 633 return -EBUSY; 634 } 635 636 637 /* 638 * If the caller wanted us to wait for response wait now. 639 */ 640 641 if (wait) { 642 spin_unlock_irqrestore(&fibptr->event_lock, flags); 643 /* Only set for first known interruptable command */ 644 if (wait < 0) { 645 /* 646 * *VERY* Dangerous to time out a command, the 647 * assumption is made that we have no hope of 648 * functioning because an interrupt routing or other 649 * hardware failure has occurred. 650 */ 651 unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */ 652 while (down_trylock(&fibptr->event_wait)) { 653 int blink; 654 if (time_is_before_eq_jiffies(timeout)) { 655 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; 656 atomic_dec(&q->numpending); 657 if (wait == -1) { 658 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" 659 "Usually a result of a PCI interrupt routing problem;\n" 660 "update mother board BIOS or consider utilizing one of\n" 661 "the SAFE mode kernel options (acpi, apic etc)\n"); 662 } 663 return -ETIMEDOUT; 664 } 665 if ((blink = aac_adapter_check_health(dev)) > 0) { 666 if (wait == -1) { 667 printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n" 668 "Usually a result of a serious unrecoverable hardware problem\n", 669 blink); 670 } 671 return -EFAULT; 672 } 673 /* 674 * Allow other processes / CPUS to use core 675 */ 676 schedule(); 677 } 678 } else if (down_interruptible(&fibptr->event_wait)) { 679 /* Do nothing ... satisfy 680 * down_interruptible must_check */ 681 } 682 683 spin_lock_irqsave(&fibptr->event_lock, flags); 684 if (fibptr->done == 0) { 685 fibptr->done = 2; /* Tell interrupt we aborted */ 686 spin_unlock_irqrestore(&fibptr->event_lock, flags); 687 return -ERESTARTSYS; 688 } 689 spin_unlock_irqrestore(&fibptr->event_lock, flags); 690 BUG_ON(fibptr->done == 0); 691 692 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 693 return -ETIMEDOUT; 694 return 0; 695 } 696 /* 697 * If the user does not want a response than return success otherwise 698 * return pending 699 */ 700 if (reply) 701 return -EINPROGRESS; 702 else 703 return 0; 704 } 705 706 int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, 707 void *callback_data) 708 { 709 struct aac_dev *dev = fibptr->dev; 710 int wait; 711 unsigned long flags = 0; 712 unsigned long mflags = 0; 713 714 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); 715 if (callback) { 716 wait = 0; 717 fibptr->callback = callback; 718 fibptr->callback_data = callback_data; 719 } else 720 wait = 1; 721 722 723 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { 724 struct aac_hba_cmd_req *hbacmd = 725 (struct aac_hba_cmd_req *)fibptr->hw_fib_va; 726 727 hbacmd->iu_type = command; 728 /* bit1 of request_id must be 0 */ 729 hbacmd->request_id = 730 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); 731 } else 732 return -EINVAL; 733 734 735 if (wait) { 736 spin_lock_irqsave(&dev->manage_lock, mflags); 737 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { 738 spin_unlock_irqrestore(&dev->manage_lock, mflags); 739 return -EBUSY; 740 } 741 dev->management_fib_count++; 742 spin_unlock_irqrestore(&dev->manage_lock, mflags); 743 spin_lock_irqsave(&fibptr->event_lock, flags); 744 } 745 746 if (aac_adapter_deliver(fibptr) != 0) { 747 if (wait) { 748 spin_unlock_irqrestore(&fibptr->event_lock, flags); 749 spin_lock_irqsave(&dev->manage_lock, mflags); 750 dev->management_fib_count--; 751 spin_unlock_irqrestore(&dev->manage_lock, mflags); 752 } 753 return -EBUSY; 754 } 755 FIB_COUNTER_INCREMENT(aac_config.NativeSent); 756 757 if (wait) { 758 spin_unlock_irqrestore(&fibptr->event_lock, flags); 759 /* Only set for first known interruptable command */ 760 if (down_interruptible(&fibptr->event_wait)) { 761 fibptr->done = 2; 762 up(&fibptr->event_wait); 763 } 764 spin_lock_irqsave(&fibptr->event_lock, flags); 765 if ((fibptr->done == 0) || (fibptr->done == 2)) { 766 fibptr->done = 2; /* Tell interrupt we aborted */ 767 spin_unlock_irqrestore(&fibptr->event_lock, flags); 768 return -ERESTARTSYS; 769 } 770 spin_unlock_irqrestore(&fibptr->event_lock, flags); 771 WARN_ON(fibptr->done == 0); 772 773 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 774 return -ETIMEDOUT; 775 776 return 0; 777 } 778 779 return -EINPROGRESS; 780 } 781 782 /** 783 * aac_consumer_get - get the top of the queue 784 * @dev: Adapter 785 * @q: Queue 786 * @entry: Return entry 787 * 788 * Will return a pointer to the entry on the top of the queue requested that 789 * we are a consumer of, and return the address of the queue entry. It does 790 * not change the state of the queue. 791 */ 792 793 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) 794 { 795 u32 index; 796 int status; 797 if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { 798 status = 0; 799 } else { 800 /* 801 * The consumer index must be wrapped if we have reached 802 * the end of the queue, else we just use the entry 803 * pointed to by the header index 804 */ 805 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 806 index = 0; 807 else 808 index = le32_to_cpu(*q->headers.consumer); 809 *entry = q->base + index; 810 status = 1; 811 } 812 return(status); 813 } 814 815 /** 816 * aac_consumer_free - free consumer entry 817 * @dev: Adapter 818 * @q: Queue 819 * @qid: Queue ident 820 * 821 * Frees up the current top of the queue we are a consumer of. If the 822 * queue was full notify the producer that the queue is no longer full. 823 */ 824 825 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) 826 { 827 int wasfull = 0; 828 u32 notify; 829 830 if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) 831 wasfull = 1; 832 833 if (le32_to_cpu(*q->headers.consumer) >= q->entries) 834 *q->headers.consumer = cpu_to_le32(1); 835 else 836 le32_add_cpu(q->headers.consumer, 1); 837 838 if (wasfull) { 839 switch (qid) { 840 841 case HostNormCmdQueue: 842 notify = HostNormCmdNotFull; 843 break; 844 case HostNormRespQueue: 845 notify = HostNormRespNotFull; 846 break; 847 default: 848 BUG(); 849 return; 850 } 851 aac_adapter_notify(dev, notify); 852 } 853 } 854 855 /** 856 * aac_fib_adapter_complete - complete adapter issued fib 857 * @fibptr: fib to complete 858 * @size: size of fib 859 * 860 * Will do all necessary work to complete a FIB that was sent from 861 * the adapter. 862 */ 863 864 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 865 { 866 struct hw_fib * hw_fib = fibptr->hw_fib_va; 867 struct aac_dev * dev = fibptr->dev; 868 struct aac_queue * q; 869 unsigned long nointr = 0; 870 unsigned long qflags; 871 872 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || 873 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || 874 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { 875 kfree(hw_fib); 876 return 0; 877 } 878 879 if (hw_fib->header.XferState == 0) { 880 if (dev->comm_interface == AAC_COMM_MESSAGE) 881 kfree(hw_fib); 882 return 0; 883 } 884 /* 885 * If we plan to do anything check the structure type first. 886 */ 887 if (hw_fib->header.StructType != FIB_MAGIC && 888 hw_fib->header.StructType != FIB_MAGIC2 && 889 hw_fib->header.StructType != FIB_MAGIC2_64) { 890 if (dev->comm_interface == AAC_COMM_MESSAGE) 891 kfree(hw_fib); 892 return -EINVAL; 893 } 894 /* 895 * This block handles the case where the adapter had sent us a 896 * command and we have finished processing the command. We 897 * call completeFib when we are done processing the command 898 * and want to send a response back to the adapter. This will 899 * send the completed cdb to the adapter. 900 */ 901 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 902 if (dev->comm_interface == AAC_COMM_MESSAGE) { 903 kfree (hw_fib); 904 } else { 905 u32 index; 906 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 907 if (size) { 908 size += sizeof(struct aac_fibhdr); 909 if (size > le16_to_cpu(hw_fib->header.SenderSize)) 910 return -EMSGSIZE; 911 hw_fib->header.Size = cpu_to_le16(size); 912 } 913 q = &dev->queues->queue[AdapNormRespQueue]; 914 spin_lock_irqsave(q->lock, qflags); 915 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); 916 *(q->headers.producer) = cpu_to_le32(index + 1); 917 spin_unlock_irqrestore(q->lock, qflags); 918 if (!(nointr & (int)aac_config.irq_mod)) 919 aac_adapter_notify(dev, AdapNormRespQueue); 920 } 921 } else { 922 printk(KERN_WARNING "aac_fib_adapter_complete: " 923 "Unknown xferstate detected.\n"); 924 BUG(); 925 } 926 return 0; 927 } 928 929 /** 930 * aac_fib_complete - fib completion handler 931 * @fib: FIB to complete 932 * 933 * Will do all necessary work to complete a FIB. 934 */ 935 936 int aac_fib_complete(struct fib *fibptr) 937 { 938 struct hw_fib * hw_fib = fibptr->hw_fib_va; 939 940 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { 941 fib_dealloc(fibptr); 942 return 0; 943 } 944 945 /* 946 * Check for a fib which has already been completed or with a 947 * status wait timeout 948 */ 949 950 if (hw_fib->header.XferState == 0 || fibptr->done == 2) 951 return 0; 952 /* 953 * If we plan to do anything check the structure type first. 954 */ 955 956 if (hw_fib->header.StructType != FIB_MAGIC && 957 hw_fib->header.StructType != FIB_MAGIC2 && 958 hw_fib->header.StructType != FIB_MAGIC2_64) 959 return -EINVAL; 960 /* 961 * This block completes a cdb which orginated on the host and we 962 * just need to deallocate the cdb or reinit it. At this point the 963 * command is complete that we had sent to the adapter and this 964 * cdb could be reused. 965 */ 966 967 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 968 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 969 { 970 fib_dealloc(fibptr); 971 } 972 else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) 973 { 974 /* 975 * This handles the case when the host has aborted the I/O 976 * to the adapter because the adapter is not responding 977 */ 978 fib_dealloc(fibptr); 979 } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { 980 fib_dealloc(fibptr); 981 } else { 982 BUG(); 983 } 984 return 0; 985 } 986 987 /** 988 * aac_printf - handle printf from firmware 989 * @dev: Adapter 990 * @val: Message info 991 * 992 * Print a message passed to us by the controller firmware on the 993 * Adaptec board 994 */ 995 996 void aac_printf(struct aac_dev *dev, u32 val) 997 { 998 char *cp = dev->printfbuf; 999 if (dev->printf_enabled) 1000 { 1001 int length = val & 0xffff; 1002 int level = (val >> 16) & 0xffff; 1003 1004 /* 1005 * The size of the printfbuf is set in port.c 1006 * There is no variable or define for it 1007 */ 1008 if (length > 255) 1009 length = 255; 1010 if (cp[length] != 0) 1011 cp[length] = 0; 1012 if (level == LOG_AAC_HIGH_ERROR) 1013 printk(KERN_WARNING "%s:%s", dev->name, cp); 1014 else 1015 printk(KERN_INFO "%s:%s", dev->name, cp); 1016 } 1017 memset(cp, 0, 256); 1018 } 1019 1020 static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) 1021 { 1022 return le32_to_cpu(((__le32 *)aifcmd->data)[index]); 1023 } 1024 1025 1026 static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) 1027 { 1028 switch (aac_aif_data(aifcmd, 1)) { 1029 case AifBuCacheDataLoss: 1030 if (aac_aif_data(aifcmd, 2)) 1031 dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", 1032 aac_aif_data(aifcmd, 2)); 1033 else 1034 dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); 1035 break; 1036 case AifBuCacheDataRecover: 1037 if (aac_aif_data(aifcmd, 2)) 1038 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", 1039 aac_aif_data(aifcmd, 2)); 1040 else 1041 dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); 1042 break; 1043 } 1044 } 1045 1046 /** 1047 * aac_handle_aif - Handle a message from the firmware 1048 * @dev: Which adapter this fib is from 1049 * @fibptr: Pointer to fibptr from adapter 1050 * 1051 * This routine handles a driver notify fib from the adapter and 1052 * dispatches it to the appropriate routine for handling. 1053 */ 1054 1055 #define AIF_SNIFF_TIMEOUT (500*HZ) 1056 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 1057 { 1058 struct hw_fib * hw_fib = fibptr->hw_fib_va; 1059 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 1060 u32 channel, id, lun, container; 1061 struct scsi_device *device; 1062 enum { 1063 NOTHING, 1064 DELETE, 1065 ADD, 1066 CHANGE 1067 } device_config_needed = NOTHING; 1068 1069 /* Sniff for container changes */ 1070 1071 if (!dev || !dev->fsa_dev) 1072 return; 1073 container = channel = id = lun = (u32)-1; 1074 1075 /* 1076 * We have set this up to try and minimize the number of 1077 * re-configures that take place. As a result of this when 1078 * certain AIF's come in we will set a flag waiting for another 1079 * type of AIF before setting the re-config flag. 1080 */ 1081 switch (le32_to_cpu(aifcmd->command)) { 1082 case AifCmdDriverNotify: 1083 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { 1084 case AifRawDeviceRemove: 1085 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1086 if ((container >> 28)) { 1087 container = (u32)-1; 1088 break; 1089 } 1090 channel = (container >> 24) & 0xF; 1091 if (channel >= dev->maximum_num_channels) { 1092 container = (u32)-1; 1093 break; 1094 } 1095 id = container & 0xFFFF; 1096 if (id >= dev->maximum_num_physicals) { 1097 container = (u32)-1; 1098 break; 1099 } 1100 lun = (container >> 16) & 0xFF; 1101 container = (u32)-1; 1102 channel = aac_phys_to_logical(channel); 1103 device_config_needed = DELETE; 1104 break; 1105 1106 /* 1107 * Morph or Expand complete 1108 */ 1109 case AifDenMorphComplete: 1110 case AifDenVolumeExtendComplete: 1111 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1112 if (container >= dev->maximum_num_containers) 1113 break; 1114 1115 /* 1116 * Find the scsi_device associated with the SCSI 1117 * address. Make sure we have the right array, and if 1118 * so set the flag to initiate a new re-config once we 1119 * see an AifEnConfigChange AIF come through. 1120 */ 1121 1122 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { 1123 device = scsi_device_lookup(dev->scsi_host_ptr, 1124 CONTAINER_TO_CHANNEL(container), 1125 CONTAINER_TO_ID(container), 1126 CONTAINER_TO_LUN(container)); 1127 if (device) { 1128 dev->fsa_dev[container].config_needed = CHANGE; 1129 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; 1130 dev->fsa_dev[container].config_waiting_stamp = jiffies; 1131 scsi_device_put(device); 1132 } 1133 } 1134 } 1135 1136 /* 1137 * If we are waiting on something and this happens to be 1138 * that thing then set the re-configure flag. 1139 */ 1140 if (container != (u32)-1) { 1141 if (container >= dev->maximum_num_containers) 1142 break; 1143 if ((dev->fsa_dev[container].config_waiting_on == 1144 le32_to_cpu(*(__le32 *)aifcmd->data)) && 1145 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 1146 dev->fsa_dev[container].config_waiting_on = 0; 1147 } else for (container = 0; 1148 container < dev->maximum_num_containers; ++container) { 1149 if ((dev->fsa_dev[container].config_waiting_on == 1150 le32_to_cpu(*(__le32 *)aifcmd->data)) && 1151 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 1152 dev->fsa_dev[container].config_waiting_on = 0; 1153 } 1154 break; 1155 1156 case AifCmdEventNotify: 1157 switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { 1158 case AifEnBatteryEvent: 1159 dev->cache_protected = 1160 (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3)); 1161 break; 1162 /* 1163 * Add an Array. 1164 */ 1165 case AifEnAddContainer: 1166 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1167 if (container >= dev->maximum_num_containers) 1168 break; 1169 dev->fsa_dev[container].config_needed = ADD; 1170 dev->fsa_dev[container].config_waiting_on = 1171 AifEnConfigChange; 1172 dev->fsa_dev[container].config_waiting_stamp = jiffies; 1173 break; 1174 1175 /* 1176 * Delete an Array. 1177 */ 1178 case AifEnDeleteContainer: 1179 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1180 if (container >= dev->maximum_num_containers) 1181 break; 1182 dev->fsa_dev[container].config_needed = DELETE; 1183 dev->fsa_dev[container].config_waiting_on = 1184 AifEnConfigChange; 1185 dev->fsa_dev[container].config_waiting_stamp = jiffies; 1186 break; 1187 1188 /* 1189 * Container change detected. If we currently are not 1190 * waiting on something else, setup to wait on a Config Change. 1191 */ 1192 case AifEnContainerChange: 1193 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1194 if (container >= dev->maximum_num_containers) 1195 break; 1196 if (dev->fsa_dev[container].config_waiting_on && 1197 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 1198 break; 1199 dev->fsa_dev[container].config_needed = CHANGE; 1200 dev->fsa_dev[container].config_waiting_on = 1201 AifEnConfigChange; 1202 dev->fsa_dev[container].config_waiting_stamp = jiffies; 1203 break; 1204 1205 case AifEnConfigChange: 1206 break; 1207 1208 case AifEnAddJBOD: 1209 case AifEnDeleteJBOD: 1210 container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); 1211 if ((container >> 28)) { 1212 container = (u32)-1; 1213 break; 1214 } 1215 channel = (container >> 24) & 0xF; 1216 if (channel >= dev->maximum_num_channels) { 1217 container = (u32)-1; 1218 break; 1219 } 1220 id = container & 0xFFFF; 1221 if (id >= dev->maximum_num_physicals) { 1222 container = (u32)-1; 1223 break; 1224 } 1225 lun = (container >> 16) & 0xFF; 1226 container = (u32)-1; 1227 channel = aac_phys_to_logical(channel); 1228 device_config_needed = 1229 (((__le32 *)aifcmd->data)[0] == 1230 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE; 1231 if (device_config_needed == ADD) { 1232 device = scsi_device_lookup(dev->scsi_host_ptr, 1233 channel, 1234 id, 1235 lun); 1236 if (device) { 1237 scsi_remove_device(device); 1238 scsi_device_put(device); 1239 } 1240 } 1241 break; 1242 1243 case AifEnEnclosureManagement: 1244 /* 1245 * If in JBOD mode, automatic exposure of new 1246 * physical target to be suppressed until configured. 1247 */ 1248 if (dev->jbod) 1249 break; 1250 switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { 1251 case EM_DRIVE_INSERTION: 1252 case EM_DRIVE_REMOVAL: 1253 case EM_SES_DRIVE_INSERTION: 1254 case EM_SES_DRIVE_REMOVAL: 1255 container = le32_to_cpu( 1256 ((__le32 *)aifcmd->data)[2]); 1257 if ((container >> 28)) { 1258 container = (u32)-1; 1259 break; 1260 } 1261 channel = (container >> 24) & 0xF; 1262 if (channel >= dev->maximum_num_channels) { 1263 container = (u32)-1; 1264 break; 1265 } 1266 id = container & 0xFFFF; 1267 lun = (container >> 16) & 0xFF; 1268 container = (u32)-1; 1269 if (id >= dev->maximum_num_physicals) { 1270 /* legacy dev_t ? */ 1271 if ((0x2000 <= id) || lun || channel || 1272 ((channel = (id >> 7) & 0x3F) >= 1273 dev->maximum_num_channels)) 1274 break; 1275 lun = (id >> 4) & 7; 1276 id &= 0xF; 1277 } 1278 channel = aac_phys_to_logical(channel); 1279 device_config_needed = 1280 ((((__le32 *)aifcmd->data)[3] 1281 == cpu_to_le32(EM_DRIVE_INSERTION)) || 1282 (((__le32 *)aifcmd->data)[3] 1283 == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ? 1284 ADD : DELETE; 1285 break; 1286 } 1287 case AifBuManagerEvent: 1288 aac_handle_aif_bu(dev, aifcmd); 1289 break; 1290 } 1291 1292 /* 1293 * If we are waiting on something and this happens to be 1294 * that thing then set the re-configure flag. 1295 */ 1296 if (container != (u32)-1) { 1297 if (container >= dev->maximum_num_containers) 1298 break; 1299 if ((dev->fsa_dev[container].config_waiting_on == 1300 le32_to_cpu(*(__le32 *)aifcmd->data)) && 1301 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 1302 dev->fsa_dev[container].config_waiting_on = 0; 1303 } else for (container = 0; 1304 container < dev->maximum_num_containers; ++container) { 1305 if ((dev->fsa_dev[container].config_waiting_on == 1306 le32_to_cpu(*(__le32 *)aifcmd->data)) && 1307 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) 1308 dev->fsa_dev[container].config_waiting_on = 0; 1309 } 1310 break; 1311 1312 case AifCmdJobProgress: 1313 /* 1314 * These are job progress AIF's. When a Clear is being 1315 * done on a container it is initially created then hidden from 1316 * the OS. When the clear completes we don't get a config 1317 * change so we monitor the job status complete on a clear then 1318 * wait for a container change. 1319 */ 1320 1321 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && 1322 (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] || 1323 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) { 1324 for (container = 0; 1325 container < dev->maximum_num_containers; 1326 ++container) { 1327 /* 1328 * Stomp on all config sequencing for all 1329 * containers? 1330 */ 1331 dev->fsa_dev[container].config_waiting_on = 1332 AifEnContainerChange; 1333 dev->fsa_dev[container].config_needed = ADD; 1334 dev->fsa_dev[container].config_waiting_stamp = 1335 jiffies; 1336 } 1337 } 1338 if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) && 1339 ((__le32 *)aifcmd->data)[6] == 0 && 1340 ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) { 1341 for (container = 0; 1342 container < dev->maximum_num_containers; 1343 ++container) { 1344 /* 1345 * Stomp on all config sequencing for all 1346 * containers? 1347 */ 1348 dev->fsa_dev[container].config_waiting_on = 1349 AifEnContainerChange; 1350 dev->fsa_dev[container].config_needed = DELETE; 1351 dev->fsa_dev[container].config_waiting_stamp = 1352 jiffies; 1353 } 1354 } 1355 break; 1356 } 1357 1358 container = 0; 1359 retry_next: 1360 if (device_config_needed == NOTHING) 1361 for (; container < dev->maximum_num_containers; ++container) { 1362 if ((dev->fsa_dev[container].config_waiting_on == 0) && 1363 (dev->fsa_dev[container].config_needed != NOTHING) && 1364 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { 1365 device_config_needed = 1366 dev->fsa_dev[container].config_needed; 1367 dev->fsa_dev[container].config_needed = NOTHING; 1368 channel = CONTAINER_TO_CHANNEL(container); 1369 id = CONTAINER_TO_ID(container); 1370 lun = CONTAINER_TO_LUN(container); 1371 break; 1372 } 1373 } 1374 if (device_config_needed == NOTHING) 1375 return; 1376 1377 /* 1378 * If we decided that a re-configuration needs to be done, 1379 * schedule it here on the way out the door, please close the door 1380 * behind you. 1381 */ 1382 1383 /* 1384 * Find the scsi_device associated with the SCSI address, 1385 * and mark it as changed, invalidating the cache. This deals 1386 * with changes to existing device IDs. 1387 */ 1388 1389 if (!dev || !dev->scsi_host_ptr) 1390 return; 1391 /* 1392 * force reload of disk info via aac_probe_container 1393 */ 1394 if ((channel == CONTAINER_CHANNEL) && 1395 (device_config_needed != NOTHING)) { 1396 if (dev->fsa_dev[container].valid == 1) 1397 dev->fsa_dev[container].valid = 2; 1398 aac_probe_container(dev, container); 1399 } 1400 device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun); 1401 if (device) { 1402 switch (device_config_needed) { 1403 case DELETE: 1404 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) 1405 scsi_remove_device(device); 1406 #else 1407 if (scsi_device_online(device)) { 1408 scsi_device_set_state(device, SDEV_OFFLINE); 1409 sdev_printk(KERN_INFO, device, 1410 "Device offlined - %s\n", 1411 (channel == CONTAINER_CHANNEL) ? 1412 "array deleted" : 1413 "enclosure services event"); 1414 } 1415 #endif 1416 break; 1417 case ADD: 1418 if (!scsi_device_online(device)) { 1419 sdev_printk(KERN_INFO, device, 1420 "Device online - %s\n", 1421 (channel == CONTAINER_CHANNEL) ? 1422 "array created" : 1423 "enclosure services event"); 1424 scsi_device_set_state(device, SDEV_RUNNING); 1425 } 1426 /* FALLTHRU */ 1427 case CHANGE: 1428 if ((channel == CONTAINER_CHANNEL) 1429 && (!dev->fsa_dev[container].valid)) { 1430 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE)) 1431 scsi_remove_device(device); 1432 #else 1433 if (!scsi_device_online(device)) 1434 break; 1435 scsi_device_set_state(device, SDEV_OFFLINE); 1436 sdev_printk(KERN_INFO, device, 1437 "Device offlined - %s\n", 1438 "array failed"); 1439 #endif 1440 break; 1441 } 1442 scsi_rescan_device(&device->sdev_gendev); 1443 1444 default: 1445 break; 1446 } 1447 scsi_device_put(device); 1448 device_config_needed = NOTHING; 1449 } 1450 if (device_config_needed == ADD) 1451 scsi_add_device(dev->scsi_host_ptr, channel, id, lun); 1452 if (channel == CONTAINER_CHANNEL) { 1453 container++; 1454 device_config_needed = NOTHING; 1455 goto retry_next; 1456 } 1457 } 1458 1459 static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) 1460 { 1461 int index, quirks; 1462 int retval; 1463 struct Scsi_Host *host; 1464 struct scsi_device *dev; 1465 struct scsi_cmnd *command; 1466 struct scsi_cmnd *command_list; 1467 int jafo = 0; 1468 int bled; 1469 1470 /* 1471 * Assumptions: 1472 * - host is locked, unless called by the aacraid thread. 1473 * (a matter of convenience, due to legacy issues surrounding 1474 * eh_host_adapter_reset). 1475 * - in_reset is asserted, so no new i/o is getting to the 1476 * card. 1477 * - The card is dead, or will be very shortly ;-/ so no new 1478 * commands are completing in the interrupt service. 1479 */ 1480 host = aac->scsi_host_ptr; 1481 scsi_block_requests(host); 1482 aac_adapter_disable_int(aac); 1483 if (aac->thread->pid != current->pid) { 1484 spin_unlock_irq(host->host_lock); 1485 kthread_stop(aac->thread); 1486 jafo = 1; 1487 } 1488 1489 /* 1490 * If a positive health, means in a known DEAD PANIC 1491 * state and the adapter could be reset to `try again'. 1492 */ 1493 bled = forced ? 0 : aac_adapter_check_health(aac); 1494 retval = aac_adapter_restart(aac, bled, reset_type); 1495 1496 if (retval) 1497 goto out; 1498 1499 /* 1500 * Loop through the fibs, close the synchronous FIBS 1501 */ 1502 for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) { 1503 struct fib *fib = &aac->fibs[index]; 1504 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && 1505 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) { 1506 unsigned long flagv; 1507 spin_lock_irqsave(&fib->event_lock, flagv); 1508 up(&fib->event_wait); 1509 spin_unlock_irqrestore(&fib->event_lock, flagv); 1510 schedule(); 1511 retval = 0; 1512 } 1513 } 1514 /* Give some extra time for ioctls to complete. */ 1515 if (retval == 0) 1516 ssleep(2); 1517 index = aac->cardtype; 1518 1519 /* 1520 * Re-initialize the adapter, first free resources, then carefully 1521 * apply the initialization sequence to come back again. Only risk 1522 * is a change in Firmware dropping cache, it is assumed the caller 1523 * will ensure that i/o is queisced and the card is flushed in that 1524 * case. 1525 */ 1526 aac_fib_map_free(aac); 1527 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 1528 aac->comm_addr = NULL; 1529 aac->comm_phys = 0; 1530 kfree(aac->queues); 1531 aac->queues = NULL; 1532 aac_free_irq(aac); 1533 kfree(aac->fsa_dev); 1534 aac->fsa_dev = NULL; 1535 quirks = aac_get_driver_ident(index)->quirks; 1536 if (quirks & AAC_QUIRK_31BIT) { 1537 if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) || 1538 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31))))) 1539 goto out; 1540 } else { 1541 if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) || 1542 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32))))) 1543 goto out; 1544 } 1545 if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) 1546 goto out; 1547 if (quirks & AAC_QUIRK_31BIT) 1548 if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) 1549 goto out; 1550 if (jafo) { 1551 aac->thread = kthread_run(aac_command_thread, aac, "%s", 1552 aac->name); 1553 if (IS_ERR(aac->thread)) { 1554 retval = PTR_ERR(aac->thread); 1555 goto out; 1556 } 1557 } 1558 (void)aac_get_adapter_info(aac); 1559 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { 1560 host->sg_tablesize = 34; 1561 host->max_sectors = (host->sg_tablesize * 8) + 112; 1562 } 1563 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { 1564 host->sg_tablesize = 17; 1565 host->max_sectors = (host->sg_tablesize * 8) + 112; 1566 } 1567 aac_get_config_status(aac, 1); 1568 aac_get_containers(aac); 1569 /* 1570 * This is where the assumption that the Adapter is quiesced 1571 * is important. 1572 */ 1573 command_list = NULL; 1574 __shost_for_each_device(dev, host) { 1575 unsigned long flags; 1576 spin_lock_irqsave(&dev->list_lock, flags); 1577 list_for_each_entry(command, &dev->cmd_list, list) 1578 if (command->SCp.phase == AAC_OWNER_FIRMWARE) { 1579 command->SCp.buffer = (struct scatterlist *)command_list; 1580 command_list = command; 1581 } 1582 spin_unlock_irqrestore(&dev->list_lock, flags); 1583 } 1584 while ((command = command_list)) { 1585 command_list = (struct scsi_cmnd *)command->SCp.buffer; 1586 command->SCp.buffer = NULL; 1587 command->result = DID_OK << 16 1588 | COMMAND_COMPLETE << 8 1589 | SAM_STAT_TASK_SET_FULL; 1590 command->SCp.phase = AAC_OWNER_ERROR_HANDLER; 1591 command->scsi_done(command); 1592 } 1593 retval = 0; 1594 1595 out: 1596 aac->in_reset = 0; 1597 scsi_unblock_requests(host); 1598 if (jafo) { 1599 spin_lock_irq(host->host_lock); 1600 } 1601 return retval; 1602 } 1603 1604 int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) 1605 { 1606 unsigned long flagv = 0; 1607 int retval; 1608 struct Scsi_Host * host; 1609 int bled; 1610 1611 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) 1612 return -EBUSY; 1613 1614 if (aac->in_reset) { 1615 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1616 return -EBUSY; 1617 } 1618 aac->in_reset = 1; 1619 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1620 1621 /* 1622 * Wait for all commands to complete to this specific 1623 * target (block maximum 60 seconds). Although not necessary, 1624 * it does make us a good storage citizen. 1625 */ 1626 host = aac->scsi_host_ptr; 1627 scsi_block_requests(host); 1628 if (forced < 2) for (retval = 60; retval; --retval) { 1629 struct scsi_device * dev; 1630 struct scsi_cmnd * command; 1631 int active = 0; 1632 1633 __shost_for_each_device(dev, host) { 1634 spin_lock_irqsave(&dev->list_lock, flagv); 1635 list_for_each_entry(command, &dev->cmd_list, list) { 1636 if (command->SCp.phase == AAC_OWNER_FIRMWARE) { 1637 active++; 1638 break; 1639 } 1640 } 1641 spin_unlock_irqrestore(&dev->list_lock, flagv); 1642 if (active) 1643 break; 1644 1645 } 1646 /* 1647 * We can exit If all the commands are complete 1648 */ 1649 if (active == 0) 1650 break; 1651 ssleep(1); 1652 } 1653 1654 /* Quiesce build, flush cache, write through mode */ 1655 if (forced < 2) 1656 aac_send_shutdown(aac); 1657 spin_lock_irqsave(host->host_lock, flagv); 1658 bled = forced ? forced : 1659 (aac_check_reset != 0 && aac_check_reset != 1); 1660 retval = _aac_reset_adapter(aac, bled, reset_type); 1661 spin_unlock_irqrestore(host->host_lock, flagv); 1662 1663 if ((forced < 2) && (retval == -ENODEV)) { 1664 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */ 1665 struct fib * fibctx = aac_fib_alloc(aac); 1666 if (fibctx) { 1667 struct aac_pause *cmd; 1668 int status; 1669 1670 aac_fib_init(fibctx); 1671 1672 cmd = (struct aac_pause *) fib_data(fibctx); 1673 1674 cmd->command = cpu_to_le32(VM_ContainerConfig); 1675 cmd->type = cpu_to_le32(CT_PAUSE_IO); 1676 cmd->timeout = cpu_to_le32(1); 1677 cmd->min = cpu_to_le32(1); 1678 cmd->noRescan = cpu_to_le32(1); 1679 cmd->count = cpu_to_le32(0); 1680 1681 status = aac_fib_send(ContainerCommand, 1682 fibctx, 1683 sizeof(struct aac_pause), 1684 FsaNormal, 1685 -2 /* Timeout silently */, 1, 1686 NULL, NULL); 1687 1688 if (status >= 0) 1689 aac_fib_complete(fibctx); 1690 /* FIB should be freed only after getting 1691 * the response from the F/W */ 1692 if (status != -ERESTARTSYS) 1693 aac_fib_free(fibctx); 1694 } 1695 } 1696 1697 return retval; 1698 } 1699 1700 int aac_check_health(struct aac_dev * aac) 1701 { 1702 int BlinkLED; 1703 unsigned long time_now, flagv = 0; 1704 struct list_head * entry; 1705 struct Scsi_Host * host; 1706 int bled; 1707 1708 /* Extending the scope of fib_lock slightly to protect aac->in_reset */ 1709 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) 1710 return 0; 1711 1712 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { 1713 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1714 return 0; /* OK */ 1715 } 1716 1717 aac->in_reset = 1; 1718 1719 /* Fake up an AIF: 1720 * aac_aifcmd.command = AifCmdEventNotify = 1 1721 * aac_aifcmd.seqnum = 0xFFFFFFFF 1722 * aac_aifcmd.data[0] = AifEnExpEvent = 23 1723 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 1724 * aac.aifcmd.data[2] = AifHighPriority = 3 1725 * aac.aifcmd.data[3] = BlinkLED 1726 */ 1727 1728 time_now = jiffies/HZ; 1729 entry = aac->fib_list.next; 1730 1731 /* 1732 * For each Context that is on the 1733 * fibctxList, make a copy of the 1734 * fib, and then set the event to wake up the 1735 * thread that is waiting for it. 1736 */ 1737 while (entry != &aac->fib_list) { 1738 /* 1739 * Extract the fibctx 1740 */ 1741 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); 1742 struct hw_fib * hw_fib; 1743 struct fib * fib; 1744 /* 1745 * Check if the queue is getting 1746 * backlogged 1747 */ 1748 if (fibctx->count > 20) { 1749 /* 1750 * It's *not* jiffies folks, 1751 * but jiffies / HZ, so do not 1752 * panic ... 1753 */ 1754 u32 time_last = fibctx->jiffies; 1755 /* 1756 * Has it been > 2 minutes 1757 * since the last read off 1758 * the queue? 1759 */ 1760 if ((time_now - time_last) > aif_timeout) { 1761 entry = entry->next; 1762 aac_close_fib_context(aac, fibctx); 1763 continue; 1764 } 1765 } 1766 /* 1767 * Warning: no sleep allowed while 1768 * holding spinlock 1769 */ 1770 hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); 1771 fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); 1772 if (fib && hw_fib) { 1773 struct aac_aifcmd * aif; 1774 1775 fib->hw_fib_va = hw_fib; 1776 fib->dev = aac; 1777 aac_fib_init(fib); 1778 fib->type = FSAFS_NTC_FIB_CONTEXT; 1779 fib->size = sizeof (struct fib); 1780 fib->data = hw_fib->data; 1781 aif = (struct aac_aifcmd *)hw_fib->data; 1782 aif->command = cpu_to_le32(AifCmdEventNotify); 1783 aif->seqnum = cpu_to_le32(0xFFFFFFFF); 1784 ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); 1785 ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); 1786 ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); 1787 ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); 1788 1789 /* 1790 * Put the FIB onto the 1791 * fibctx's fibs 1792 */ 1793 list_add_tail(&fib->fiblink, &fibctx->fib_list); 1794 fibctx->count++; 1795 /* 1796 * Set the event to wake up the 1797 * thread that will waiting. 1798 */ 1799 up(&fibctx->wait_sem); 1800 } else { 1801 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1802 kfree(fib); 1803 kfree(hw_fib); 1804 } 1805 entry = entry->next; 1806 } 1807 1808 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1809 1810 if (BlinkLED < 0) { 1811 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1812 goto out; 1813 } 1814 1815 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1816 1817 if (!aac_check_reset || ((aac_check_reset == 1) && 1818 (aac->supplement_adapter_info.SupportedOptions2 & 1819 AAC_OPTION_IGNORE_RESET))) 1820 goto out; 1821 host = aac->scsi_host_ptr; 1822 if (aac->thread->pid != current->pid) 1823 spin_lock_irqsave(host->host_lock, flagv); 1824 bled = aac_check_reset != 1 ? 1 : 0; 1825 _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET); 1826 if (aac->thread->pid != current->pid) 1827 spin_unlock_irqrestore(host->host_lock, flagv); 1828 return BlinkLED; 1829 1830 out: 1831 aac->in_reset = 0; 1832 return BlinkLED; 1833 } 1834 1835 1836 static void aac_resolve_luns(struct aac_dev *dev) 1837 { 1838 int bus, target, channel; 1839 struct scsi_device *sdev; 1840 u8 devtype; 1841 u8 new_devtype; 1842 1843 for (bus = 0; bus < AAC_MAX_BUSES; bus++) { 1844 for (target = 0; target < AAC_MAX_TARGETS; target++) { 1845 1846 if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL) 1847 continue; 1848 1849 if (bus == CONTAINER_CHANNEL) 1850 channel = CONTAINER_CHANNEL; 1851 else 1852 channel = aac_phys_to_logical(bus); 1853 1854 devtype = dev->hba_map[bus][target].devtype; 1855 new_devtype = dev->hba_map[bus][target].new_devtype; 1856 1857 sdev = scsi_device_lookup(dev->scsi_host_ptr, channel, 1858 target, 0); 1859 1860 if (!sdev && devtype) 1861 scsi_add_device(dev->scsi_host_ptr, channel, 1862 target, 0); 1863 else if (sdev && new_devtype != devtype) 1864 scsi_remove_device(sdev); 1865 else if (sdev && new_devtype == devtype) 1866 scsi_rescan_device(&sdev->sdev_gendev); 1867 1868 if (sdev) 1869 scsi_device_put(sdev); 1870 1871 dev->hba_map[bus][target].devtype = new_devtype; 1872 } 1873 } 1874 } 1875 1876 /** 1877 * aac_handle_sa_aif Handle a message from the firmware 1878 * @dev: Which adapter this fib is from 1879 * @fibptr: Pointer to fibptr from adapter 1880 * 1881 * This routine handles a driver notify fib from the adapter and 1882 * dispatches it to the appropriate routine for handling. 1883 */ 1884 static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr) 1885 { 1886 int i, bus, target, container, rcode = 0; 1887 u32 events = 0; 1888 struct fib *fib; 1889 struct scsi_device *sdev; 1890 1891 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) 1892 events = SA_AIF_HOTPLUG; 1893 else if (fibptr->hbacmd_size & SA_AIF_HARDWARE) 1894 events = SA_AIF_HARDWARE; 1895 else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE) 1896 events = SA_AIF_PDEV_CHANGE; 1897 else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE) 1898 events = SA_AIF_LDEV_CHANGE; 1899 else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE) 1900 events = SA_AIF_BPSTAT_CHANGE; 1901 else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE) 1902 events = SA_AIF_BPCFG_CHANGE; 1903 1904 switch (events) { 1905 case SA_AIF_HOTPLUG: 1906 case SA_AIF_HARDWARE: 1907 case SA_AIF_PDEV_CHANGE: 1908 case SA_AIF_LDEV_CHANGE: 1909 case SA_AIF_BPCFG_CHANGE: 1910 1911 fib = aac_fib_alloc(dev); 1912 if (!fib) { 1913 pr_err("aac_handle_sa_aif: out of memory\n"); 1914 return; 1915 } 1916 for (bus = 0; bus < AAC_MAX_BUSES; bus++) 1917 for (target = 0; target < AAC_MAX_TARGETS; target++) 1918 dev->hba_map[bus][target].new_devtype = 0; 1919 1920 rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN); 1921 1922 if (rcode != -ERESTARTSYS) 1923 aac_fib_free(fib); 1924 1925 aac_resolve_luns(dev); 1926 1927 if (events == SA_AIF_LDEV_CHANGE || 1928 events == SA_AIF_BPCFG_CHANGE) { 1929 aac_get_containers(dev); 1930 for (container = 0; container < 1931 dev->maximum_num_containers; ++container) { 1932 sdev = scsi_device_lookup(dev->scsi_host_ptr, 1933 CONTAINER_CHANNEL, 1934 container, 0); 1935 if (dev->fsa_dev[container].valid && !sdev) { 1936 scsi_add_device(dev->scsi_host_ptr, 1937 CONTAINER_CHANNEL, 1938 container, 0); 1939 } else if (!dev->fsa_dev[container].valid && 1940 sdev) { 1941 scsi_remove_device(sdev); 1942 scsi_device_put(sdev); 1943 } else if (sdev) { 1944 scsi_rescan_device(&sdev->sdev_gendev); 1945 scsi_device_put(sdev); 1946 } 1947 } 1948 } 1949 break; 1950 1951 case SA_AIF_BPSTAT_CHANGE: 1952 /* currently do nothing */ 1953 break; 1954 } 1955 1956 for (i = 1; i <= 10; ++i) { 1957 events = src_readl(dev, MUnit.IDR); 1958 if (events & (1<<23)) { 1959 pr_warn(" AIF not cleared by firmware - %d/%d)\n", 1960 i, 10); 1961 ssleep(1); 1962 } 1963 } 1964 } 1965 1966 static int get_fib_count(struct aac_dev *dev) 1967 { 1968 unsigned int num = 0; 1969 struct list_head *entry; 1970 unsigned long flagv; 1971 1972 /* 1973 * Warning: no sleep allowed while 1974 * holding spinlock. We take the estimate 1975 * and pre-allocate a set of fibs outside the 1976 * lock. 1977 */ 1978 num = le32_to_cpu(dev->init->r7.adapter_fibs_size) 1979 / sizeof(struct hw_fib); /* some extra */ 1980 spin_lock_irqsave(&dev->fib_lock, flagv); 1981 entry = dev->fib_list.next; 1982 while (entry != &dev->fib_list) { 1983 entry = entry->next; 1984 ++num; 1985 } 1986 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1987 1988 return num; 1989 } 1990 1991 static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, 1992 struct fib **fib_pool, 1993 unsigned int num) 1994 { 1995 struct hw_fib **hw_fib_p; 1996 struct fib **fib_p; 1997 int rcode = 1; 1998 1999 hw_fib_p = hw_fib_pool; 2000 fib_p = fib_pool; 2001 while (hw_fib_p < &hw_fib_pool[num]) { 2002 *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL); 2003 if (!(*(hw_fib_p++))) { 2004 --hw_fib_p; 2005 break; 2006 } 2007 2008 *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL); 2009 if (!(*(fib_p++))) { 2010 kfree(*(--hw_fib_p)); 2011 break; 2012 } 2013 } 2014 2015 num = hw_fib_p - hw_fib_pool; 2016 if (!num) 2017 rcode = 0; 2018 2019 return rcode; 2020 } 2021 2022 static void wakeup_fibctx_threads(struct aac_dev *dev, 2023 struct hw_fib **hw_fib_pool, 2024 struct fib **fib_pool, 2025 struct fib *fib, 2026 struct hw_fib *hw_fib, 2027 unsigned int num) 2028 { 2029 unsigned long flagv; 2030 struct list_head *entry; 2031 struct hw_fib **hw_fib_p; 2032 struct fib **fib_p; 2033 u32 time_now, time_last; 2034 struct hw_fib *hw_newfib; 2035 struct fib *newfib; 2036 struct aac_fib_context *fibctx; 2037 2038 time_now = jiffies/HZ; 2039 spin_lock_irqsave(&dev->fib_lock, flagv); 2040 entry = dev->fib_list.next; 2041 /* 2042 * For each Context that is on the 2043 * fibctxList, make a copy of the 2044 * fib, and then set the event to wake up the 2045 * thread that is waiting for it. 2046 */ 2047 2048 hw_fib_p = hw_fib_pool; 2049 fib_p = fib_pool; 2050 while (entry != &dev->fib_list) { 2051 /* 2052 * Extract the fibctx 2053 */ 2054 fibctx = list_entry(entry, struct aac_fib_context, 2055 next); 2056 /* 2057 * Check if the queue is getting 2058 * backlogged 2059 */ 2060 if (fibctx->count > 20) { 2061 /* 2062 * It's *not* jiffies folks, 2063 * but jiffies / HZ so do not 2064 * panic ... 2065 */ 2066 time_last = fibctx->jiffies; 2067 /* 2068 * Has it been > 2 minutes 2069 * since the last read off 2070 * the queue? 2071 */ 2072 if ((time_now - time_last) > aif_timeout) { 2073 entry = entry->next; 2074 aac_close_fib_context(dev, fibctx); 2075 continue; 2076 } 2077 } 2078 /* 2079 * Warning: no sleep allowed while 2080 * holding spinlock 2081 */ 2082 if (hw_fib_p >= &hw_fib_pool[num]) { 2083 pr_warn("aifd: didn't allocate NewFib\n"); 2084 entry = entry->next; 2085 continue; 2086 } 2087 2088 hw_newfib = *hw_fib_p; 2089 *(hw_fib_p++) = NULL; 2090 newfib = *fib_p; 2091 *(fib_p++) = NULL; 2092 /* 2093 * Make the copy of the FIB 2094 */ 2095 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 2096 memcpy(newfib, fib, sizeof(struct fib)); 2097 newfib->hw_fib_va = hw_newfib; 2098 /* 2099 * Put the FIB onto the 2100 * fibctx's fibs 2101 */ 2102 list_add_tail(&newfib->fiblink, &fibctx->fib_list); 2103 fibctx->count++; 2104 /* 2105 * Set the event to wake up the 2106 * thread that is waiting. 2107 */ 2108 up(&fibctx->wait_sem); 2109 2110 entry = entry->next; 2111 } 2112 /* 2113 * Set the status of this FIB 2114 */ 2115 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 2116 aac_fib_adapter_complete(fib, sizeof(u32)); 2117 spin_unlock_irqrestore(&dev->fib_lock, flagv); 2118 2119 } 2120 2121 static void aac_process_events(struct aac_dev *dev) 2122 { 2123 struct hw_fib *hw_fib; 2124 struct fib *fib; 2125 unsigned long flags; 2126 spinlock_t *t_lock; 2127 unsigned int rcode; 2128 2129 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2130 spin_lock_irqsave(t_lock, flags); 2131 2132 while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { 2133 struct list_head *entry; 2134 struct aac_aifcmd *aifcmd; 2135 unsigned int num; 2136 struct hw_fib **hw_fib_pool, **hw_fib_p; 2137 struct fib **fib_pool, **fib_p; 2138 2139 set_current_state(TASK_RUNNING); 2140 2141 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 2142 list_del(entry); 2143 2144 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2145 spin_unlock_irqrestore(t_lock, flags); 2146 2147 fib = list_entry(entry, struct fib, fiblink); 2148 hw_fib = fib->hw_fib_va; 2149 if (dev->sa_firmware) { 2150 /* Thor AIF */ 2151 aac_handle_sa_aif(dev, fib); 2152 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 2153 continue; 2154 } 2155 /* 2156 * We will process the FIB here or pass it to a 2157 * worker thread that is TBD. We Really can't 2158 * do anything at this point since we don't have 2159 * anything defined for this thread to do. 2160 */ 2161 memset(fib, 0, sizeof(struct fib)); 2162 fib->type = FSAFS_NTC_FIB_CONTEXT; 2163 fib->size = sizeof(struct fib); 2164 fib->hw_fib_va = hw_fib; 2165 fib->data = hw_fib->data; 2166 fib->dev = dev; 2167 /* 2168 * We only handle AifRequest fibs from the adapter. 2169 */ 2170 2171 aifcmd = (struct aac_aifcmd *) hw_fib->data; 2172 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 2173 /* Handle Driver Notify Events */ 2174 aac_handle_aif(dev, fib); 2175 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 2176 aac_fib_adapter_complete(fib, (u16)sizeof(u32)); 2177 goto free_fib; 2178 } 2179 /* 2180 * The u32 here is important and intended. We are using 2181 * 32bit wrapping time to fit the adapter field 2182 */ 2183 2184 /* Sniff events */ 2185 if (aifcmd->command == cpu_to_le32(AifCmdEventNotify) 2186 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) { 2187 aac_handle_aif(dev, fib); 2188 } 2189 2190 /* 2191 * get number of fibs to process 2192 */ 2193 num = get_fib_count(dev); 2194 if (!num) 2195 goto free_fib; 2196 2197 hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *), 2198 GFP_KERNEL); 2199 if (!hw_fib_pool) 2200 goto free_fib; 2201 2202 fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL); 2203 if (!fib_pool) 2204 goto free_hw_fib_pool; 2205 2206 /* 2207 * Fill up fib pointer pools with actual fibs 2208 * and hw_fibs 2209 */ 2210 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2211 if (!rcode) 2212 goto free_mem; 2213 2214 /* 2215 * wakeup the thread that is waiting for 2216 * the response from fw (ioctl) 2217 */ 2218 wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool, 2219 fib, hw_fib, num); 2220 2221 free_mem: 2222 /* Free up the remaining resources */ 2223 hw_fib_p = hw_fib_pool; 2224 fib_p = fib_pool; 2225 while (hw_fib_p < &hw_fib_pool[num]) { 2226 kfree(*hw_fib_p); 2227 kfree(*fib_p); 2228 ++fib_p; 2229 ++hw_fib_p; 2230 } 2231 kfree(fib_pool); 2232 free_hw_fib_pool: 2233 kfree(hw_fib_pool); 2234 free_fib: 2235 kfree(fib); 2236 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2237 spin_lock_irqsave(t_lock, flags); 2238 } 2239 /* 2240 * There are no more AIF's 2241 */ 2242 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2243 spin_unlock_irqrestore(t_lock, flags); 2244 } 2245 2246 static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, 2247 u32 datasize) 2248 { 2249 struct aac_srb *srbcmd; 2250 struct sgmap64 *sg64; 2251 dma_addr_t addr; 2252 char *dma_buf; 2253 struct fib *fibptr; 2254 int ret = -ENOMEM; 2255 u32 vbus, vid; 2256 2257 fibptr = aac_fib_alloc(dev); 2258 if (!fibptr) 2259 goto out; 2260 2261 dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr); 2262 if (!dma_buf) 2263 goto fib_free_out; 2264 2265 aac_fib_init(fibptr); 2266 2267 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus); 2268 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget); 2269 2270 srbcmd = (struct aac_srb *)fib_data(fibptr); 2271 2272 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 2273 srbcmd->channel = cpu_to_le32(vbus); 2274 srbcmd->id = cpu_to_le32(vid); 2275 srbcmd->lun = 0; 2276 srbcmd->flags = cpu_to_le32(SRB_DataOut); 2277 srbcmd->timeout = cpu_to_le32(10); 2278 srbcmd->retry_limit = 0; 2279 srbcmd->cdb_size = cpu_to_le32(12); 2280 srbcmd->count = cpu_to_le32(datasize); 2281 2282 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 2283 srbcmd->cdb[0] = BMIC_OUT; 2284 srbcmd->cdb[6] = WRITE_HOST_WELLNESS; 2285 memcpy(dma_buf, (char *)wellness_str, datasize); 2286 2287 sg64 = (struct sgmap64 *)&srbcmd->sg; 2288 sg64->count = cpu_to_le32(1); 2289 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); 2290 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 2291 sg64->sg[0].count = cpu_to_le32(datasize); 2292 2293 ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), 2294 FsaNormal, 1, 1, NULL, NULL); 2295 2296 pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr); 2297 2298 /* 2299 * Do not set XferState to zero unless 2300 * receives a response from F/W 2301 */ 2302 if (ret >= 0) 2303 aac_fib_complete(fibptr); 2304 2305 /* 2306 * FIB should be freed only after 2307 * getting the response from the F/W 2308 */ 2309 if (ret != -ERESTARTSYS) 2310 goto fib_free_out; 2311 2312 out: 2313 return ret; 2314 fib_free_out: 2315 aac_fib_free(fibptr); 2316 goto out; 2317 } 2318 2319 int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) 2320 { 2321 struct tm cur_tm; 2322 char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; 2323 u32 datasize = sizeof(wellness_str); 2324 unsigned long local_time; 2325 int ret = -ENODEV; 2326 2327 if (!dev->sa_firmware) 2328 goto out; 2329 2330 local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); 2331 time_to_tm(local_time, 0, &cur_tm); 2332 cur_tm.tm_mon += 1; 2333 cur_tm.tm_year += 1900; 2334 wellness_str[8] = bin2bcd(cur_tm.tm_hour); 2335 wellness_str[9] = bin2bcd(cur_tm.tm_min); 2336 wellness_str[10] = bin2bcd(cur_tm.tm_sec); 2337 wellness_str[12] = bin2bcd(cur_tm.tm_mon); 2338 wellness_str[13] = bin2bcd(cur_tm.tm_mday); 2339 wellness_str[14] = bin2bcd(cur_tm.tm_year / 100); 2340 wellness_str[15] = bin2bcd(cur_tm.tm_year % 100); 2341 2342 ret = aac_send_wellness_command(dev, wellness_str, datasize); 2343 2344 out: 2345 return ret; 2346 } 2347 2348 int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) 2349 { 2350 int ret = -ENOMEM; 2351 struct fib *fibptr; 2352 __le32 *info; 2353 2354 fibptr = aac_fib_alloc(dev); 2355 if (!fibptr) 2356 goto out; 2357 2358 aac_fib_init(fibptr); 2359 info = (__le32 *)fib_data(fibptr); 2360 *info = cpu_to_le32(now->tv_sec); 2361 ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 2362 1, 1, NULL, NULL); 2363 2364 /* 2365 * Do not set XferState to zero unless 2366 * receives a response from F/W 2367 */ 2368 if (ret >= 0) 2369 aac_fib_complete(fibptr); 2370 2371 /* 2372 * FIB should be freed only after 2373 * getting the response from the F/W 2374 */ 2375 if (ret != -ERESTARTSYS) 2376 aac_fib_free(fibptr); 2377 2378 out: 2379 return ret; 2380 } 2381 2382 /** 2383 * aac_command_thread - command processing thread 2384 * @dev: Adapter to monitor 2385 * 2386 * Waits on the commandready event in it's queue. When the event gets set 2387 * it will pull FIBs off it's queue. It will continue to pull FIBs off 2388 * until the queue is empty. When the queue is empty it will wait for 2389 * more FIBs. 2390 */ 2391 2392 int aac_command_thread(void *data) 2393 { 2394 struct aac_dev *dev = data; 2395 DECLARE_WAITQUEUE(wait, current); 2396 unsigned long next_jiffies = jiffies + HZ; 2397 unsigned long next_check_jiffies = next_jiffies; 2398 long difference = HZ; 2399 2400 /* 2401 * We can only have one thread per adapter for AIF's. 2402 */ 2403 if (dev->aif_thread) 2404 return -EINVAL; 2405 2406 /* 2407 * Let the DPC know it has a place to send the AIF's to. 2408 */ 2409 dev->aif_thread = 1; 2410 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 2411 set_current_state(TASK_INTERRUPTIBLE); 2412 dprintk ((KERN_INFO "aac_command_thread start\n")); 2413 while (1) { 2414 2415 aac_process_events(dev); 2416 2417 /* 2418 * Background activity 2419 */ 2420 if ((time_before(next_check_jiffies,next_jiffies)) 2421 && ((difference = next_check_jiffies - jiffies) <= 0)) { 2422 next_check_jiffies = next_jiffies; 2423 if (aac_check_health(dev) == 0) { 2424 difference = ((long)(unsigned)check_interval) 2425 * HZ; 2426 next_check_jiffies = jiffies + difference; 2427 } else if (!dev->queues) 2428 break; 2429 } 2430 if (!time_before(next_check_jiffies,next_jiffies) 2431 && ((difference = next_jiffies - jiffies) <= 0)) { 2432 struct timeval now; 2433 int ret; 2434 2435 /* Don't even try to talk to adapter if its sick */ 2436 ret = aac_check_health(dev); 2437 if (!dev->queues) 2438 break; 2439 next_check_jiffies = jiffies 2440 + ((long)(unsigned)check_interval) 2441 * HZ; 2442 do_gettimeofday(&now); 2443 2444 /* Synchronize our watches */ 2445 if (((1000000 - (1000000 / HZ)) > now.tv_usec) 2446 && (now.tv_usec > (1000000 / HZ))) 2447 difference = (((1000000 - now.tv_usec) * HZ) 2448 + 500000) / 1000000; 2449 else if (ret == 0) { 2450 2451 if (now.tv_usec > 500000) 2452 ++now.tv_sec; 2453 2454 if (dev->sa_firmware) 2455 ret = 2456 aac_send_safw_hostttime(dev, &now); 2457 else 2458 ret = aac_send_hosttime(dev, &now); 2459 2460 difference = (long)(unsigned)update_interval*HZ; 2461 } else { 2462 /* retry shortly */ 2463 difference = 10 * HZ; 2464 } 2465 next_jiffies = jiffies + difference; 2466 if (time_before(next_check_jiffies,next_jiffies)) 2467 difference = next_check_jiffies - jiffies; 2468 } 2469 if (difference <= 0) 2470 difference = 1; 2471 set_current_state(TASK_INTERRUPTIBLE); 2472 2473 if (kthread_should_stop()) 2474 break; 2475 2476 schedule_timeout(difference); 2477 2478 if (kthread_should_stop()) 2479 break; 2480 } 2481 if (dev->queues) 2482 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 2483 dev->aif_thread = 0; 2484 return 0; 2485 } 2486 2487 int aac_acquire_irq(struct aac_dev *dev) 2488 { 2489 int i; 2490 int j; 2491 int ret = 0; 2492 2493 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { 2494 for (i = 0; i < dev->max_msix; i++) { 2495 dev->aac_msix[i].vector_no = i; 2496 dev->aac_msix[i].dev = dev; 2497 if (request_irq(pci_irq_vector(dev->pdev, i), 2498 dev->a_ops.adapter_intr, 2499 0, "aacraid", &(dev->aac_msix[i]))) { 2500 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", 2501 dev->name, dev->id, i); 2502 for (j = 0 ; j < i ; j++) 2503 free_irq(pci_irq_vector(dev->pdev, j), 2504 &(dev->aac_msix[j])); 2505 pci_disable_msix(dev->pdev); 2506 ret = -1; 2507 } 2508 } 2509 } else { 2510 dev->aac_msix[0].vector_no = 0; 2511 dev->aac_msix[0].dev = dev; 2512 2513 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 2514 IRQF_SHARED, "aacraid", 2515 &(dev->aac_msix[0])) < 0) { 2516 if (dev->msi) 2517 pci_disable_msi(dev->pdev); 2518 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 2519 dev->name, dev->id); 2520 ret = -1; 2521 } 2522 } 2523 return ret; 2524 } 2525 2526 void aac_free_irq(struct aac_dev *dev) 2527 { 2528 int i; 2529 int cpu; 2530 2531 cpu = cpumask_first(cpu_online_mask); 2532 if (dev->pdev->device == PMC_DEVICE_S6 || 2533 dev->pdev->device == PMC_DEVICE_S7 || 2534 dev->pdev->device == PMC_DEVICE_S8 || 2535 dev->pdev->device == PMC_DEVICE_S9) { 2536 if (dev->max_msix > 1) { 2537 for (i = 0; i < dev->max_msix; i++) 2538 free_irq(pci_irq_vector(dev->pdev, i), 2539 &(dev->aac_msix[i])); 2540 } else { 2541 free_irq(dev->pdev->irq, &(dev->aac_msix[0])); 2542 } 2543 } else { 2544 free_irq(dev->pdev->irq, dev); 2545 } 2546 if (dev->msi) 2547 pci_disable_msi(dev->pdev); 2548 else if (dev->max_msix > 1) 2549 pci_disable_msix(dev->pdev); 2550 } 2551