1 /* 2 ******************************************************************************* 3 ** O.S : Linux 4 ** FILE NAME : arcmsr_hba.c 5 ** BY : Nick Cheng, C.L. Huang 6 ** Description: SCSI RAID Device Driver for Areca RAID Controller 7 ******************************************************************************* 8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved 9 ** 10 ** Web site: www.areca.com.tw 11 ** E-mail: support@areca.com.tw 12 ** 13 ** This program is free software; you can redistribute it and/or modify 14 ** it under the terms of the GNU General Public License version 2 as 15 ** published by the Free Software Foundation. 16 ** This program is distributed in the hope that it will be useful, 17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 ** GNU General Public License for more details. 20 ******************************************************************************* 21 ** Redistribution and use in source and binary forms, with or without 22 ** modification, are permitted provided that the following conditions 23 ** are met: 24 ** 1. Redistributions of source code must retain the above copyright 25 ** notice, this list of conditions and the following disclaimer. 26 ** 2. Redistributions in binary form must reproduce the above copyright 27 ** notice, this list of conditions and the following disclaimer in the 28 ** documentation and/or other materials provided with the distribution. 29 ** 3. The name of the author may not be used to endorse or promote products 30 ** derived from this software without specific prior written permission. 31 ** 32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT 37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 ******************************************************************************* 43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr 44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt 45 ******************************************************************************* 46 */ 47 #include <linux/module.h> 48 #include <linux/reboot.h> 49 #include <linux/spinlock.h> 50 #include <linux/pci_ids.h> 51 #include <linux/interrupt.h> 52 #include <linux/moduleparam.h> 53 #include <linux/errno.h> 54 #include <linux/types.h> 55 #include <linux/delay.h> 56 #include <linux/dma-mapping.h> 57 #include <linux/timer.h> 58 #include <linux/slab.h> 59 #include <linux/pci.h> 60 #include <linux/aer.h> 61 #include <linux/circ_buf.h> 62 #include <asm/dma.h> 63 #include <asm/io.h> 64 #include <linux/uaccess.h> 65 #include <scsi/scsi_host.h> 66 #include <scsi/scsi.h> 67 #include <scsi/scsi_cmnd.h> 68 #include <scsi/scsi_tcq.h> 69 #include <scsi/scsi_device.h> 70 #include <scsi/scsi_transport.h> 71 #include <scsi/scsicam.h> 72 #include "arcmsr.h" 73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>"); 74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); 75 MODULE_LICENSE("Dual BSD/GPL"); 76 MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77 78 static int msix_enable = 1; 79 module_param(msix_enable, int, S_IRUGO); 80 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)"); 81 82 static int msi_enable = 1; 83 module_param(msi_enable, int, S_IRUGO); 84 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)"); 85 86 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 87 module_param(host_can_queue, int, S_IRUGO); 88 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128"); 89 90 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 91 module_param(cmd_per_lun, int, S_IRUGO); 92 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); 93 94 static int set_date_time = 0; 95 module_param(set_date_time, int, S_IRUGO); 96 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); 97 98 #define ARCMSR_SLEEPTIME 10 99 #define ARCMSR_RETRYCOUNT 12 100 101 static wait_queue_head_t wait_q; 102 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 103 struct scsi_cmnd *cmd); 104 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 105 static int arcmsr_abort(struct scsi_cmnd *); 106 static int arcmsr_bus_reset(struct scsi_cmnd *); 107 static int arcmsr_bios_param(struct scsi_device *sdev, 108 struct block_device *bdev, sector_t capacity, int *info); 109 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 110 static int arcmsr_probe(struct pci_dev *pdev, 111 const struct pci_device_id *id); 112 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state); 113 static int arcmsr_resume(struct pci_dev *pdev); 114 static void arcmsr_remove(struct pci_dev *pdev); 115 static void arcmsr_shutdown(struct pci_dev *pdev); 116 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 117 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); 118 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); 119 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 120 u32 intmask_org); 121 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 122 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); 123 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); 124 static void arcmsr_request_device_map(struct timer_list *t); 125 static void arcmsr_message_isr_bh_fn(struct work_struct *work); 126 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); 127 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 128 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); 129 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); 130 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); 131 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); 132 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); 133 static const char *arcmsr_info(struct Scsi_Host *); 134 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 135 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 136 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); 137 static void arcmsr_set_iop_datetime(struct timer_list *); 138 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) 139 { 140 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 141 queue_depth = ARCMSR_MAX_CMD_PERLUN; 142 return scsi_change_queue_depth(sdev, queue_depth); 143 } 144 145 static struct scsi_host_template arcmsr_scsi_host_template = { 146 .module = THIS_MODULE, 147 .name = "Areca SAS/SATA RAID driver", 148 .info = arcmsr_info, 149 .queuecommand = arcmsr_queue_command, 150 .eh_abort_handler = arcmsr_abort, 151 .eh_bus_reset_handler = arcmsr_bus_reset, 152 .bios_param = arcmsr_bios_param, 153 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 154 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, 155 .this_id = ARCMSR_SCSI_INITIATOR_ID, 156 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 157 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 158 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, 159 .use_clustering = ENABLE_CLUSTERING, 160 .shost_attrs = arcmsr_host_attrs, 161 .no_write_same = 1, 162 }; 163 164 static struct pci_device_id arcmsr_device_id_table[] = { 165 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), 166 .driver_data = ACB_ADAPTER_TYPE_A}, 167 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), 168 .driver_data = ACB_ADAPTER_TYPE_A}, 169 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), 170 .driver_data = ACB_ADAPTER_TYPE_A}, 171 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), 172 .driver_data = ACB_ADAPTER_TYPE_A}, 173 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), 174 .driver_data = ACB_ADAPTER_TYPE_A}, 175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), 176 .driver_data = ACB_ADAPTER_TYPE_B}, 177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), 178 .driver_data = ACB_ADAPTER_TYPE_B}, 179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), 180 .driver_data = ACB_ADAPTER_TYPE_B}, 181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), 182 .driver_data = ACB_ADAPTER_TYPE_B}, 183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), 184 .driver_data = ACB_ADAPTER_TYPE_A}, 185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), 186 .driver_data = ACB_ADAPTER_TYPE_D}, 187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), 188 .driver_data = ACB_ADAPTER_TYPE_A}, 189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), 190 .driver_data = ACB_ADAPTER_TYPE_A}, 191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), 192 .driver_data = ACB_ADAPTER_TYPE_A}, 193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), 194 .driver_data = ACB_ADAPTER_TYPE_A}, 195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), 196 .driver_data = ACB_ADAPTER_TYPE_A}, 197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), 198 .driver_data = ACB_ADAPTER_TYPE_A}, 199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), 200 .driver_data = ACB_ADAPTER_TYPE_A}, 201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), 202 .driver_data = ACB_ADAPTER_TYPE_A}, 203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), 204 .driver_data = ACB_ADAPTER_TYPE_A}, 205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), 206 .driver_data = ACB_ADAPTER_TYPE_C}, 207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), 208 .driver_data = ACB_ADAPTER_TYPE_E}, 209 {0, 0}, /* Terminating entry */ 210 }; 211 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 212 213 static struct pci_driver arcmsr_pci_driver = { 214 .name = "arcmsr", 215 .id_table = arcmsr_device_id_table, 216 .probe = arcmsr_probe, 217 .remove = arcmsr_remove, 218 .suspend = arcmsr_suspend, 219 .resume = arcmsr_resume, 220 .shutdown = arcmsr_shutdown, 221 }; 222 /* 223 **************************************************************************** 224 **************************************************************************** 225 */ 226 227 static void arcmsr_free_mu(struct AdapterControlBlock *acb) 228 { 229 switch (acb->adapter_type) { 230 case ACB_ADAPTER_TYPE_B: 231 case ACB_ADAPTER_TYPE_D: 232 case ACB_ADAPTER_TYPE_E: { 233 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, 234 acb->dma_coherent2, acb->dma_coherent_handle2); 235 break; 236 } 237 } 238 } 239 240 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) 241 { 242 struct pci_dev *pdev = acb->pdev; 243 switch (acb->adapter_type){ 244 case ACB_ADAPTER_TYPE_A:{ 245 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); 246 if (!acb->pmuA) { 247 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 248 return false; 249 } 250 break; 251 } 252 case ACB_ADAPTER_TYPE_B:{ 253 void __iomem *mem_base0, *mem_base1; 254 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 255 if (!mem_base0) { 256 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 257 return false; 258 } 259 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); 260 if (!mem_base1) { 261 iounmap(mem_base0); 262 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 263 return false; 264 } 265 acb->mem_base0 = mem_base0; 266 acb->mem_base1 = mem_base1; 267 break; 268 } 269 case ACB_ADAPTER_TYPE_C:{ 270 acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); 271 if (!acb->pmuC) { 272 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 273 return false; 274 } 275 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 276 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ 277 return true; 278 } 279 break; 280 } 281 case ACB_ADAPTER_TYPE_D: { 282 void __iomem *mem_base0; 283 unsigned long addr, range, flags; 284 285 addr = (unsigned long)pci_resource_start(pdev, 0); 286 range = pci_resource_len(pdev, 0); 287 flags = pci_resource_flags(pdev, 0); 288 mem_base0 = ioremap(addr, range); 289 if (!mem_base0) { 290 pr_notice("arcmsr%d: memory mapping region fail\n", 291 acb->host->host_no); 292 return false; 293 } 294 acb->mem_base0 = mem_base0; 295 break; 296 } 297 case ACB_ADAPTER_TYPE_E: { 298 acb->pmuE = ioremap(pci_resource_start(pdev, 1), 299 pci_resource_len(pdev, 1)); 300 if (!acb->pmuE) { 301 pr_notice("arcmsr%d: memory mapping region fail \n", 302 acb->host->host_no); 303 return false; 304 } 305 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/ 306 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */ 307 acb->in_doorbell = 0; 308 acb->out_doorbell = 0; 309 break; 310 } 311 } 312 return true; 313 } 314 315 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) 316 { 317 switch (acb->adapter_type) { 318 case ACB_ADAPTER_TYPE_A:{ 319 iounmap(acb->pmuA); 320 } 321 break; 322 case ACB_ADAPTER_TYPE_B:{ 323 iounmap(acb->mem_base0); 324 iounmap(acb->mem_base1); 325 } 326 327 break; 328 case ACB_ADAPTER_TYPE_C:{ 329 iounmap(acb->pmuC); 330 } 331 break; 332 case ACB_ADAPTER_TYPE_D: 333 iounmap(acb->mem_base0); 334 break; 335 case ACB_ADAPTER_TYPE_E: 336 iounmap(acb->pmuE); 337 break; 338 } 339 } 340 341 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 342 { 343 irqreturn_t handle_state; 344 struct AdapterControlBlock *acb = dev_id; 345 346 handle_state = arcmsr_interrupt(acb); 347 return handle_state; 348 } 349 350 static int arcmsr_bios_param(struct scsi_device *sdev, 351 struct block_device *bdev, sector_t capacity, int *geom) 352 { 353 int ret, heads, sectors, cylinders, total_capacity; 354 unsigned char *buffer;/* return copy of block device's partition table */ 355 356 buffer = scsi_bios_ptable(bdev); 357 if (buffer) { 358 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]); 359 kfree(buffer); 360 if (ret != -1) 361 return ret; 362 } 363 total_capacity = capacity; 364 heads = 64; 365 sectors = 32; 366 cylinders = total_capacity / (heads * sectors); 367 if (cylinders > 1024) { 368 heads = 255; 369 sectors = 63; 370 cylinders = total_capacity / (heads * sectors); 371 } 372 geom[0] = heads; 373 geom[1] = sectors; 374 geom[2] = cylinders; 375 return 0; 376 } 377 378 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) 379 { 380 struct MessageUnit_A __iomem *reg = acb->pmuA; 381 int i; 382 383 for (i = 0; i < 2000; i++) { 384 if (readl(®->outbound_intstatus) & 385 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 386 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 387 ®->outbound_intstatus); 388 return true; 389 } 390 msleep(10); 391 } /* max 20 seconds */ 392 393 return false; 394 } 395 396 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) 397 { 398 struct MessageUnit_B *reg = acb->pmuB; 399 int i; 400 401 for (i = 0; i < 2000; i++) { 402 if (readl(reg->iop2drv_doorbell) 403 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 404 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, 405 reg->iop2drv_doorbell); 406 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, 407 reg->drv2iop_doorbell); 408 return true; 409 } 410 msleep(10); 411 } /* max 20 seconds */ 412 413 return false; 414 } 415 416 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) 417 { 418 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 419 int i; 420 421 for (i = 0; i < 2000; i++) { 422 if (readl(&phbcmu->outbound_doorbell) 423 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 424 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, 425 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ 426 return true; 427 } 428 msleep(10); 429 } /* max 20 seconds */ 430 431 return false; 432 } 433 434 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) 435 { 436 struct MessageUnit_D *reg = pACB->pmuD; 437 int i; 438 439 for (i = 0; i < 2000; i++) { 440 if (readl(reg->outbound_doorbell) 441 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 442 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 443 reg->outbound_doorbell); 444 return true; 445 } 446 msleep(10); 447 } /* max 20 seconds */ 448 return false; 449 } 450 451 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB) 452 { 453 int i; 454 uint32_t read_doorbell; 455 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE; 456 457 for (i = 0; i < 2000; i++) { 458 read_doorbell = readl(&phbcmu->iobound_doorbell); 459 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 460 writel(0, &phbcmu->host_int_status); /*clear interrupt*/ 461 pACB->in_doorbell = read_doorbell; 462 return true; 463 } 464 msleep(10); 465 } /* max 20 seconds */ 466 return false; 467 } 468 469 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) 470 { 471 struct MessageUnit_A __iomem *reg = acb->pmuA; 472 int retry_count = 30; 473 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 474 do { 475 if (arcmsr_hbaA_wait_msgint_ready(acb)) 476 break; 477 else { 478 retry_count--; 479 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 480 timeout, retry count down = %d \n", acb->host->host_no, retry_count); 481 } 482 } while (retry_count != 0); 483 } 484 485 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) 486 { 487 struct MessageUnit_B *reg = acb->pmuB; 488 int retry_count = 30; 489 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); 490 do { 491 if (arcmsr_hbaB_wait_msgint_ready(acb)) 492 break; 493 else { 494 retry_count--; 495 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 496 timeout,retry count down = %d \n", acb->host->host_no, retry_count); 497 } 498 } while (retry_count != 0); 499 } 500 501 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) 502 { 503 struct MessageUnit_C __iomem *reg = pACB->pmuC; 504 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 505 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 506 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 507 do { 508 if (arcmsr_hbaC_wait_msgint_ready(pACB)) { 509 break; 510 } else { 511 retry_count--; 512 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 513 timeout,retry count down = %d \n", pACB->host->host_no, retry_count); 514 } 515 } while (retry_count != 0); 516 return; 517 } 518 519 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) 520 { 521 int retry_count = 15; 522 struct MessageUnit_D *reg = pACB->pmuD; 523 524 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); 525 do { 526 if (arcmsr_hbaD_wait_msgint_ready(pACB)) 527 break; 528 529 retry_count--; 530 pr_notice("arcmsr%d: wait 'flush adapter " 531 "cache' timeout, retry count down = %d\n", 532 pACB->host->host_no, retry_count); 533 } while (retry_count != 0); 534 } 535 536 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB) 537 { 538 int retry_count = 30; 539 struct MessageUnit_E __iomem *reg = pACB->pmuE; 540 541 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 542 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 543 writel(pACB->out_doorbell, ®->iobound_doorbell); 544 do { 545 if (arcmsr_hbaE_wait_msgint_ready(pACB)) 546 break; 547 retry_count--; 548 pr_notice("arcmsr%d: wait 'flush adapter " 549 "cache' timeout, retry count down = %d\n", 550 pACB->host->host_no, retry_count); 551 } while (retry_count != 0); 552 } 553 554 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 555 { 556 switch (acb->adapter_type) { 557 558 case ACB_ADAPTER_TYPE_A: { 559 arcmsr_hbaA_flush_cache(acb); 560 } 561 break; 562 563 case ACB_ADAPTER_TYPE_B: { 564 arcmsr_hbaB_flush_cache(acb); 565 } 566 break; 567 case ACB_ADAPTER_TYPE_C: { 568 arcmsr_hbaC_flush_cache(acb); 569 } 570 break; 571 case ACB_ADAPTER_TYPE_D: 572 arcmsr_hbaD_flush_cache(acb); 573 break; 574 case ACB_ADAPTER_TYPE_E: 575 arcmsr_hbaE_flush_cache(acb); 576 break; 577 } 578 } 579 580 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) 581 { 582 bool rtn = true; 583 void *dma_coherent; 584 dma_addr_t dma_coherent_handle; 585 struct pci_dev *pdev = acb->pdev; 586 587 switch (acb->adapter_type) { 588 case ACB_ADAPTER_TYPE_B: { 589 struct MessageUnit_B *reg; 590 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 591 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 592 &dma_coherent_handle, GFP_KERNEL); 593 if (!dma_coherent) { 594 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 595 return false; 596 } 597 acb->dma_coherent_handle2 = dma_coherent_handle; 598 acb->dma_coherent2 = dma_coherent; 599 reg = (struct MessageUnit_B *)dma_coherent; 600 acb->pmuB = reg; 601 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { 602 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); 603 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); 604 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); 605 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); 606 } else { 607 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); 608 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); 609 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); 610 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); 611 } 612 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); 613 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); 614 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); 615 } 616 break; 617 case ACB_ADAPTER_TYPE_D: { 618 struct MessageUnit_D *reg; 619 620 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 621 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 622 &dma_coherent_handle, GFP_KERNEL); 623 if (!dma_coherent) { 624 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 625 return false; 626 } 627 acb->dma_coherent_handle2 = dma_coherent_handle; 628 acb->dma_coherent2 = dma_coherent; 629 reg = (struct MessageUnit_D *)dma_coherent; 630 acb->pmuD = reg; 631 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); 632 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); 633 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); 634 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); 635 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); 636 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); 637 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); 638 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); 639 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); 640 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); 641 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); 642 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); 643 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); 644 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); 645 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); 646 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); 647 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); 648 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); 649 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); 650 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); 651 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); 652 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); 653 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); 654 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); 655 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); 656 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); 657 } 658 break; 659 case ACB_ADAPTER_TYPE_E: { 660 uint32_t completeQ_size; 661 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 662 acb->roundup_ccbsize = roundup(completeQ_size, 32); 663 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 664 &dma_coherent_handle, GFP_KERNEL); 665 if (!dma_coherent){ 666 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 667 return false; 668 } 669 acb->dma_coherent_handle2 = dma_coherent_handle; 670 acb->dma_coherent2 = dma_coherent; 671 acb->pCompletionQ = dma_coherent; 672 acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ); 673 acb->doneq_index = 0; 674 } 675 break; 676 default: 677 break; 678 } 679 return rtn; 680 } 681 682 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) 683 { 684 struct pci_dev *pdev = acb->pdev; 685 void *dma_coherent; 686 dma_addr_t dma_coherent_handle; 687 struct CommandControlBlock *ccb_tmp; 688 int i = 0, j = 0; 689 dma_addr_t cdb_phyaddr; 690 unsigned long roundup_ccbsize; 691 unsigned long max_xfer_len; 692 unsigned long max_sg_entrys; 693 uint32_t firm_config_version; 694 695 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 696 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 697 acb->devstate[i][j] = ARECA_RAID_GONE; 698 699 max_xfer_len = ARCMSR_MAX_XFER_LEN; 700 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; 701 firm_config_version = acb->firm_cfg_version; 702 if((firm_config_version & 0xFF) >= 3){ 703 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ 704 max_sg_entrys = (max_xfer_len/4096); 705 } 706 acb->host->max_sectors = max_xfer_len/512; 707 acb->host->sg_tablesize = max_sg_entrys; 708 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 709 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; 710 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 711 if(!dma_coherent){ 712 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); 713 return -ENOMEM; 714 } 715 acb->dma_coherent = dma_coherent; 716 acb->dma_coherent_handle = dma_coherent_handle; 717 memset(dma_coherent, 0, acb->uncache_size); 718 acb->ccbsize = roundup_ccbsize; 719 ccb_tmp = dma_coherent; 720 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 721 for(i = 0; i < acb->maxFreeCCB; i++){ 722 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); 723 switch (acb->adapter_type) { 724 case ACB_ADAPTER_TYPE_A: 725 case ACB_ADAPTER_TYPE_B: 726 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; 727 break; 728 case ACB_ADAPTER_TYPE_C: 729 case ACB_ADAPTER_TYPE_D: 730 case ACB_ADAPTER_TYPE_E: 731 ccb_tmp->cdb_phyaddr = cdb_phyaddr; 732 break; 733 } 734 acb->pccb_pool[i] = ccb_tmp; 735 ccb_tmp->acb = acb; 736 ccb_tmp->smid = (u32)i << 16; 737 INIT_LIST_HEAD(&ccb_tmp->list); 738 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 739 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); 740 dma_coherent_handle = dma_coherent_handle + roundup_ccbsize; 741 } 742 return 0; 743 } 744 745 static void arcmsr_message_isr_bh_fn(struct work_struct *work) 746 { 747 struct AdapterControlBlock *acb = container_of(work, 748 struct AdapterControlBlock, arcmsr_do_message_isr_bh); 749 char *acb_dev_map = (char *)acb->device_map; 750 uint32_t __iomem *signature = NULL; 751 char __iomem *devicemap = NULL; 752 int target, lun; 753 struct scsi_device *psdev; 754 char diff, temp; 755 756 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; 757 switch (acb->adapter_type) { 758 case ACB_ADAPTER_TYPE_A: { 759 struct MessageUnit_A __iomem *reg = acb->pmuA; 760 761 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 762 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 763 break; 764 } 765 case ACB_ADAPTER_TYPE_B: { 766 struct MessageUnit_B *reg = acb->pmuB; 767 768 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 769 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 770 break; 771 } 772 case ACB_ADAPTER_TYPE_C: { 773 struct MessageUnit_C __iomem *reg = acb->pmuC; 774 775 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 776 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 777 break; 778 } 779 case ACB_ADAPTER_TYPE_D: { 780 struct MessageUnit_D *reg = acb->pmuD; 781 782 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 783 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 784 break; 785 } 786 case ACB_ADAPTER_TYPE_E: { 787 struct MessageUnit_E __iomem *reg = acb->pmuE; 788 789 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 790 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 791 break; 792 } 793 } 794 atomic_inc(&acb->rq_map_token); 795 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) 796 return; 797 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; 798 target++) { 799 temp = readb(devicemap); 800 diff = (*acb_dev_map) ^ temp; 801 if (diff != 0) { 802 *acb_dev_map = temp; 803 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; 804 lun++) { 805 if ((diff & 0x01) == 1 && 806 (temp & 0x01) == 1) { 807 scsi_add_device(acb->host, 808 0, target, lun); 809 } else if ((diff & 0x01) == 1 810 && (temp & 0x01) == 0) { 811 psdev = scsi_device_lookup(acb->host, 812 0, target, lun); 813 if (psdev != NULL) { 814 scsi_remove_device(psdev); 815 scsi_device_put(psdev); 816 } 817 } 818 temp >>= 1; 819 diff >>= 1; 820 } 821 } 822 devicemap++; 823 acb_dev_map++; 824 } 825 } 826 827 static int 828 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) 829 { 830 unsigned long flags; 831 int nvec, i; 832 833 if (msix_enable == 0) 834 goto msi_int0; 835 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, 836 PCI_IRQ_MSIX); 837 if (nvec > 0) { 838 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); 839 flags = 0; 840 } else { 841 msi_int0: 842 if (msi_enable == 1) { 843 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 844 if (nvec == 1) { 845 dev_info(&pdev->dev, "msi enabled\n"); 846 goto msi_int1; 847 } 848 } 849 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 850 if (nvec < 1) 851 return FAILED; 852 msi_int1: 853 flags = IRQF_SHARED; 854 } 855 856 acb->vector_count = nvec; 857 for (i = 0; i < nvec; i++) { 858 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, 859 flags, "arcmsr", acb)) { 860 pr_warn("arcmsr%d: request_irq =%d failed!\n", 861 acb->host->host_no, pci_irq_vector(pdev, i)); 862 goto out_free_irq; 863 } 864 } 865 866 return SUCCESS; 867 out_free_irq: 868 while (--i >= 0) 869 free_irq(pci_irq_vector(pdev, i), acb); 870 pci_free_irq_vectors(pdev); 871 return FAILED; 872 } 873 874 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) 875 { 876 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 877 atomic_set(&pacb->rq_map_token, 16); 878 atomic_set(&pacb->ante_token_value, 16); 879 pacb->fw_flag = FW_NORMAL; 880 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); 881 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); 882 add_timer(&pacb->eternal_timer); 883 } 884 885 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) 886 { 887 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); 888 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); 889 add_timer(&pacb->refresh_timer); 890 } 891 892 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 893 { 894 struct Scsi_Host *host; 895 struct AdapterControlBlock *acb; 896 uint8_t bus,dev_fun; 897 int error; 898 error = pci_enable_device(pdev); 899 if(error){ 900 return -ENODEV; 901 } 902 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); 903 if(!host){ 904 goto pci_disable_dev; 905 } 906 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 907 if(error){ 908 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 909 if(error){ 910 printk(KERN_WARNING 911 "scsi%d: No suitable DMA mask available\n", 912 host->host_no); 913 goto scsi_host_release; 914 } 915 } 916 init_waitqueue_head(&wait_q); 917 bus = pdev->bus->number; 918 dev_fun = pdev->devfn; 919 acb = (struct AdapterControlBlock *) host->hostdata; 920 memset(acb,0,sizeof(struct AdapterControlBlock)); 921 acb->pdev = pdev; 922 acb->host = host; 923 host->max_lun = ARCMSR_MAX_TARGETLUN; 924 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ 925 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ 926 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD)) 927 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 928 host->can_queue = host_can_queue; /* max simultaneous cmds */ 929 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN)) 930 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 931 host->cmd_per_lun = cmd_per_lun; 932 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 933 host->unique_id = (bus << 8) | dev_fun; 934 pci_set_drvdata(pdev, host); 935 pci_set_master(pdev); 936 error = pci_request_regions(pdev, "arcmsr"); 937 if(error){ 938 goto scsi_host_release; 939 } 940 spin_lock_init(&acb->eh_lock); 941 spin_lock_init(&acb->ccblist_lock); 942 spin_lock_init(&acb->postq_lock); 943 spin_lock_init(&acb->doneq_lock); 944 spin_lock_init(&acb->rqbuffer_lock); 945 spin_lock_init(&acb->wqbuffer_lock); 946 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 947 ACB_F_MESSAGE_RQBUFFER_CLEARED | 948 ACB_F_MESSAGE_WQBUFFER_READED); 949 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 950 INIT_LIST_HEAD(&acb->ccb_free_list); 951 acb->adapter_type = id->driver_data; 952 error = arcmsr_remap_pciregion(acb); 953 if(!error){ 954 goto pci_release_regs; 955 } 956 error = arcmsr_alloc_io_queue(acb); 957 if (!error) 958 goto unmap_pci_region; 959 error = arcmsr_get_firmware_spec(acb); 960 if(!error){ 961 goto free_hbb_mu; 962 } 963 error = arcmsr_alloc_ccb_pool(acb); 964 if(error){ 965 goto free_hbb_mu; 966 } 967 error = scsi_add_host(host, &pdev->dev); 968 if(error){ 969 goto free_ccb_pool; 970 } 971 if (arcmsr_request_irq(pdev, acb) == FAILED) 972 goto scsi_host_remove; 973 arcmsr_iop_init(acb); 974 arcmsr_init_get_devmap_timer(acb); 975 if (set_date_time) 976 arcmsr_init_set_datetime_timer(acb); 977 if(arcmsr_alloc_sysfs_attr(acb)) 978 goto out_free_sysfs; 979 scsi_scan_host(host); 980 return 0; 981 out_free_sysfs: 982 if (set_date_time) 983 del_timer_sync(&acb->refresh_timer); 984 del_timer_sync(&acb->eternal_timer); 985 flush_work(&acb->arcmsr_do_message_isr_bh); 986 arcmsr_stop_adapter_bgrb(acb); 987 arcmsr_flush_adapter_cache(acb); 988 arcmsr_free_irq(pdev, acb); 989 scsi_host_remove: 990 scsi_remove_host(host); 991 free_ccb_pool: 992 arcmsr_free_ccb_pool(acb); 993 free_hbb_mu: 994 arcmsr_free_mu(acb); 995 unmap_pci_region: 996 arcmsr_unmap_pciregion(acb); 997 pci_release_regs: 998 pci_release_regions(pdev); 999 scsi_host_release: 1000 scsi_host_put(host); 1001 pci_disable_dev: 1002 pci_disable_device(pdev); 1003 return -ENODEV; 1004 } 1005 1006 static void arcmsr_free_irq(struct pci_dev *pdev, 1007 struct AdapterControlBlock *acb) 1008 { 1009 int i; 1010 1011 for (i = 0; i < acb->vector_count; i++) 1012 free_irq(pci_irq_vector(pdev, i), acb); 1013 pci_free_irq_vectors(pdev); 1014 } 1015 1016 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) 1017 { 1018 uint32_t intmask_org; 1019 struct Scsi_Host *host = pci_get_drvdata(pdev); 1020 struct AdapterControlBlock *acb = 1021 (struct AdapterControlBlock *)host->hostdata; 1022 1023 intmask_org = arcmsr_disable_outbound_ints(acb); 1024 arcmsr_free_irq(pdev, acb); 1025 del_timer_sync(&acb->eternal_timer); 1026 if (set_date_time) 1027 del_timer_sync(&acb->refresh_timer); 1028 flush_work(&acb->arcmsr_do_message_isr_bh); 1029 arcmsr_stop_adapter_bgrb(acb); 1030 arcmsr_flush_adapter_cache(acb); 1031 pci_set_drvdata(pdev, host); 1032 pci_save_state(pdev); 1033 pci_disable_device(pdev); 1034 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1035 return 0; 1036 } 1037 1038 static int arcmsr_resume(struct pci_dev *pdev) 1039 { 1040 int error; 1041 struct Scsi_Host *host = pci_get_drvdata(pdev); 1042 struct AdapterControlBlock *acb = 1043 (struct AdapterControlBlock *)host->hostdata; 1044 1045 pci_set_power_state(pdev, PCI_D0); 1046 pci_enable_wake(pdev, PCI_D0, 0); 1047 pci_restore_state(pdev); 1048 if (pci_enable_device(pdev)) { 1049 pr_warn("%s: pci_enable_device error\n", __func__); 1050 return -ENODEV; 1051 } 1052 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1053 if (error) { 1054 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1055 if (error) { 1056 pr_warn("scsi%d: No suitable DMA mask available\n", 1057 host->host_no); 1058 goto controller_unregister; 1059 } 1060 } 1061 pci_set_master(pdev); 1062 if (arcmsr_request_irq(pdev, acb) == FAILED) 1063 goto controller_stop; 1064 arcmsr_iop_init(acb); 1065 arcmsr_init_get_devmap_timer(acb); 1066 if (set_date_time) 1067 arcmsr_init_set_datetime_timer(acb); 1068 return 0; 1069 controller_stop: 1070 arcmsr_stop_adapter_bgrb(acb); 1071 arcmsr_flush_adapter_cache(acb); 1072 controller_unregister: 1073 scsi_remove_host(host); 1074 arcmsr_free_ccb_pool(acb); 1075 arcmsr_unmap_pciregion(acb); 1076 pci_release_regions(pdev); 1077 scsi_host_put(host); 1078 pci_disable_device(pdev); 1079 return -ENODEV; 1080 } 1081 1082 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) 1083 { 1084 struct MessageUnit_A __iomem *reg = acb->pmuA; 1085 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1086 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1087 printk(KERN_NOTICE 1088 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1089 , acb->host->host_no); 1090 return false; 1091 } 1092 return true; 1093 } 1094 1095 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) 1096 { 1097 struct MessageUnit_B *reg = acb->pmuB; 1098 1099 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); 1100 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1101 printk(KERN_NOTICE 1102 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1103 , acb->host->host_no); 1104 return false; 1105 } 1106 return true; 1107 } 1108 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) 1109 { 1110 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1111 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1112 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1113 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1114 printk(KERN_NOTICE 1115 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1116 , pACB->host->host_no); 1117 return false; 1118 } 1119 return true; 1120 } 1121 1122 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) 1123 { 1124 struct MessageUnit_D *reg = pACB->pmuD; 1125 1126 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); 1127 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 1128 pr_notice("arcmsr%d: wait 'abort all outstanding " 1129 "command' timeout\n", pACB->host->host_no); 1130 return false; 1131 } 1132 return true; 1133 } 1134 1135 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB) 1136 { 1137 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1138 1139 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1140 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1141 writel(pACB->out_doorbell, ®->iobound_doorbell); 1142 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1143 pr_notice("arcmsr%d: wait 'abort all outstanding " 1144 "command' timeout\n", pACB->host->host_no); 1145 return false; 1146 } 1147 return true; 1148 } 1149 1150 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 1151 { 1152 uint8_t rtnval = 0; 1153 switch (acb->adapter_type) { 1154 case ACB_ADAPTER_TYPE_A: { 1155 rtnval = arcmsr_hbaA_abort_allcmd(acb); 1156 } 1157 break; 1158 1159 case ACB_ADAPTER_TYPE_B: { 1160 rtnval = arcmsr_hbaB_abort_allcmd(acb); 1161 } 1162 break; 1163 1164 case ACB_ADAPTER_TYPE_C: { 1165 rtnval = arcmsr_hbaC_abort_allcmd(acb); 1166 } 1167 break; 1168 1169 case ACB_ADAPTER_TYPE_D: 1170 rtnval = arcmsr_hbaD_abort_allcmd(acb); 1171 break; 1172 case ACB_ADAPTER_TYPE_E: 1173 rtnval = arcmsr_hbaE_abort_allcmd(acb); 1174 break; 1175 } 1176 return rtnval; 1177 } 1178 1179 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 1180 { 1181 struct scsi_cmnd *pcmd = ccb->pcmd; 1182 1183 scsi_dma_unmap(pcmd); 1184 } 1185 1186 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) 1187 { 1188 struct AdapterControlBlock *acb = ccb->acb; 1189 struct scsi_cmnd *pcmd = ccb->pcmd; 1190 unsigned long flags; 1191 atomic_dec(&acb->ccboutstandingcount); 1192 arcmsr_pci_unmap_dma(ccb); 1193 ccb->startdone = ARCMSR_CCB_DONE; 1194 spin_lock_irqsave(&acb->ccblist_lock, flags); 1195 list_add_tail(&ccb->list, &acb->ccb_free_list); 1196 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 1197 pcmd->scsi_done(pcmd); 1198 } 1199 1200 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) 1201 { 1202 1203 struct scsi_cmnd *pcmd = ccb->pcmd; 1204 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 1205 pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1206 if (sensebuffer) { 1207 int sense_data_length = 1208 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE 1209 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE; 1210 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE); 1211 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); 1212 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 1213 sensebuffer->Valid = 1; 1214 pcmd->result |= (DRIVER_SENSE << 24); 1215 } 1216 } 1217 1218 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) 1219 { 1220 u32 orig_mask = 0; 1221 switch (acb->adapter_type) { 1222 case ACB_ADAPTER_TYPE_A : { 1223 struct MessageUnit_A __iomem *reg = acb->pmuA; 1224 orig_mask = readl(®->outbound_intmask); 1225 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 1226 ®->outbound_intmask); 1227 } 1228 break; 1229 case ACB_ADAPTER_TYPE_B : { 1230 struct MessageUnit_B *reg = acb->pmuB; 1231 orig_mask = readl(reg->iop2drv_doorbell_mask); 1232 writel(0, reg->iop2drv_doorbell_mask); 1233 } 1234 break; 1235 case ACB_ADAPTER_TYPE_C:{ 1236 struct MessageUnit_C __iomem *reg = acb->pmuC; 1237 /* disable all outbound interrupt */ 1238 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */ 1239 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 1240 } 1241 break; 1242 case ACB_ADAPTER_TYPE_D: { 1243 struct MessageUnit_D *reg = acb->pmuD; 1244 /* disable all outbound interrupt */ 1245 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); 1246 } 1247 break; 1248 case ACB_ADAPTER_TYPE_E: { 1249 struct MessageUnit_E __iomem *reg = acb->pmuE; 1250 orig_mask = readl(®->host_int_mask); 1251 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask); 1252 readl(®->host_int_mask); /* Dummy readl to force pci flush */ 1253 } 1254 break; 1255 } 1256 return orig_mask; 1257 } 1258 1259 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 1260 struct CommandControlBlock *ccb, bool error) 1261 { 1262 uint8_t id, lun; 1263 id = ccb->pcmd->device->id; 1264 lun = ccb->pcmd->device->lun; 1265 if (!error) { 1266 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 1267 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1268 ccb->pcmd->result = DID_OK << 16; 1269 arcmsr_ccb_complete(ccb); 1270 }else{ 1271 switch (ccb->arcmsr_cdb.DeviceStatus) { 1272 case ARCMSR_DEV_SELECT_TIMEOUT: { 1273 acb->devstate[id][lun] = ARECA_RAID_GONE; 1274 ccb->pcmd->result = DID_NO_CONNECT << 16; 1275 arcmsr_ccb_complete(ccb); 1276 } 1277 break; 1278 1279 case ARCMSR_DEV_ABORTED: 1280 1281 case ARCMSR_DEV_INIT_FAIL: { 1282 acb->devstate[id][lun] = ARECA_RAID_GONE; 1283 ccb->pcmd->result = DID_BAD_TARGET << 16; 1284 arcmsr_ccb_complete(ccb); 1285 } 1286 break; 1287 1288 case ARCMSR_DEV_CHECK_CONDITION: { 1289 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1290 arcmsr_report_sense_info(ccb); 1291 arcmsr_ccb_complete(ccb); 1292 } 1293 break; 1294 1295 default: 1296 printk(KERN_NOTICE 1297 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ 1298 but got unknown DeviceStatus = 0x%x \n" 1299 , acb->host->host_no 1300 , id 1301 , lun 1302 , ccb->arcmsr_cdb.DeviceStatus); 1303 acb->devstate[id][lun] = ARECA_RAID_GONE; 1304 ccb->pcmd->result = DID_NO_CONNECT << 16; 1305 arcmsr_ccb_complete(ccb); 1306 break; 1307 } 1308 } 1309 } 1310 1311 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 1312 { 1313 int id, lun; 1314 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 1315 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 1316 struct scsi_cmnd *abortcmd = pCCB->pcmd; 1317 if (abortcmd) { 1318 id = abortcmd->device->id; 1319 lun = abortcmd->device->lun; 1320 abortcmd->result |= DID_ABORT << 16; 1321 arcmsr_ccb_complete(pCCB); 1322 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", 1323 acb->host->host_no, pCCB); 1324 } 1325 return; 1326 } 1327 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ 1328 done acb = '0x%p'" 1329 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" 1330 " ccboutstandingcount = %d \n" 1331 , acb->host->host_no 1332 , acb 1333 , pCCB 1334 , pCCB->acb 1335 , pCCB->startdone 1336 , atomic_read(&acb->ccboutstandingcount)); 1337 return; 1338 } 1339 arcmsr_report_ccb_state(acb, pCCB, error); 1340 } 1341 1342 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 1343 { 1344 int i = 0; 1345 uint32_t flag_ccb, ccb_cdb_phy; 1346 struct ARCMSR_CDB *pARCMSR_CDB; 1347 bool error; 1348 struct CommandControlBlock *pCCB; 1349 switch (acb->adapter_type) { 1350 1351 case ACB_ADAPTER_TYPE_A: { 1352 struct MessageUnit_A __iomem *reg = acb->pmuA; 1353 uint32_t outbound_intstatus; 1354 outbound_intstatus = readl(®->outbound_intstatus) & 1355 acb->outbound_int_enable; 1356 /*clear and abort all outbound posted Q*/ 1357 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 1358 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) 1359 && (i++ < acb->maxOutstanding)) { 1360 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1361 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1362 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1363 arcmsr_drain_donequeue(acb, pCCB, error); 1364 } 1365 } 1366 break; 1367 1368 case ACB_ADAPTER_TYPE_B: { 1369 struct MessageUnit_B *reg = acb->pmuB; 1370 /*clear all outbound posted Q*/ 1371 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ 1372 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1373 flag_ccb = reg->done_qbuffer[i]; 1374 if (flag_ccb != 0) { 1375 reg->done_qbuffer[i] = 0; 1376 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1377 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1378 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1379 arcmsr_drain_donequeue(acb, pCCB, error); 1380 } 1381 reg->post_qbuffer[i] = 0; 1382 } 1383 reg->doneq_index = 0; 1384 reg->postq_index = 0; 1385 } 1386 break; 1387 case ACB_ADAPTER_TYPE_C: { 1388 struct MessageUnit_C __iomem *reg = acb->pmuC; 1389 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) { 1390 /*need to do*/ 1391 flag_ccb = readl(®->outbound_queueport_low); 1392 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 1393 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/ 1394 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1395 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 1396 arcmsr_drain_donequeue(acb, pCCB, error); 1397 } 1398 } 1399 break; 1400 case ACB_ADAPTER_TYPE_D: { 1401 struct MessageUnit_D *pmu = acb->pmuD; 1402 uint32_t outbound_write_pointer; 1403 uint32_t doneq_index, index_stripped, addressLow, residual, toggle; 1404 unsigned long flags; 1405 1406 residual = atomic_read(&acb->ccboutstandingcount); 1407 for (i = 0; i < residual; i++) { 1408 spin_lock_irqsave(&acb->doneq_lock, flags); 1409 outbound_write_pointer = 1410 pmu->done_qbuffer[0].addressLow + 1; 1411 doneq_index = pmu->doneq_index; 1412 if ((doneq_index & 0xFFF) != 1413 (outbound_write_pointer & 0xFFF)) { 1414 toggle = doneq_index & 0x4000; 1415 index_stripped = (doneq_index & 0xFFF) + 1; 1416 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 1417 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 1418 ((toggle ^ 0x4000) + 1); 1419 doneq_index = pmu->doneq_index; 1420 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1421 addressLow = pmu->done_qbuffer[doneq_index & 1422 0xFFF].addressLow; 1423 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 1424 pARCMSR_CDB = (struct ARCMSR_CDB *) 1425 (acb->vir2phy_offset + ccb_cdb_phy); 1426 pCCB = container_of(pARCMSR_CDB, 1427 struct CommandControlBlock, arcmsr_cdb); 1428 error = (addressLow & 1429 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? 1430 true : false; 1431 arcmsr_drain_donequeue(acb, pCCB, error); 1432 writel(doneq_index, 1433 pmu->outboundlist_read_pointer); 1434 } else { 1435 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1436 mdelay(10); 1437 } 1438 } 1439 pmu->postq_index = 0; 1440 pmu->doneq_index = 0x40FF; 1441 } 1442 break; 1443 case ACB_ADAPTER_TYPE_E: 1444 arcmsr_hbaE_postqueue_isr(acb); 1445 break; 1446 } 1447 } 1448 1449 static void arcmsr_remove(struct pci_dev *pdev) 1450 { 1451 struct Scsi_Host *host = pci_get_drvdata(pdev); 1452 struct AdapterControlBlock *acb = 1453 (struct AdapterControlBlock *) host->hostdata; 1454 int poll_count = 0; 1455 arcmsr_free_sysfs_attr(acb); 1456 scsi_remove_host(host); 1457 flush_work(&acb->arcmsr_do_message_isr_bh); 1458 del_timer_sync(&acb->eternal_timer); 1459 if (set_date_time) 1460 del_timer_sync(&acb->refresh_timer); 1461 arcmsr_disable_outbound_ints(acb); 1462 arcmsr_stop_adapter_bgrb(acb); 1463 arcmsr_flush_adapter_cache(acb); 1464 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 1465 acb->acb_flags &= ~ACB_F_IOP_INITED; 1466 1467 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){ 1468 if (!atomic_read(&acb->ccboutstandingcount)) 1469 break; 1470 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 1471 msleep(25); 1472 } 1473 1474 if (atomic_read(&acb->ccboutstandingcount)) { 1475 int i; 1476 1477 arcmsr_abort_allcmd(acb); 1478 arcmsr_done4abort_postqueue(acb); 1479 for (i = 0; i < acb->maxFreeCCB; i++) { 1480 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 1481 if (ccb->startdone == ARCMSR_CCB_START) { 1482 ccb->startdone = ARCMSR_CCB_ABORTED; 1483 ccb->pcmd->result = DID_ABORT << 16; 1484 arcmsr_ccb_complete(ccb); 1485 } 1486 } 1487 } 1488 arcmsr_free_irq(pdev, acb); 1489 arcmsr_free_ccb_pool(acb); 1490 arcmsr_free_mu(acb); 1491 arcmsr_unmap_pciregion(acb); 1492 pci_release_regions(pdev); 1493 scsi_host_put(host); 1494 pci_disable_device(pdev); 1495 } 1496 1497 static void arcmsr_shutdown(struct pci_dev *pdev) 1498 { 1499 struct Scsi_Host *host = pci_get_drvdata(pdev); 1500 struct AdapterControlBlock *acb = 1501 (struct AdapterControlBlock *)host->hostdata; 1502 del_timer_sync(&acb->eternal_timer); 1503 if (set_date_time) 1504 del_timer_sync(&acb->refresh_timer); 1505 arcmsr_disable_outbound_ints(acb); 1506 arcmsr_free_irq(pdev, acb); 1507 flush_work(&acb->arcmsr_do_message_isr_bh); 1508 arcmsr_stop_adapter_bgrb(acb); 1509 arcmsr_flush_adapter_cache(acb); 1510 } 1511 1512 static int arcmsr_module_init(void) 1513 { 1514 int error = 0; 1515 error = pci_register_driver(&arcmsr_pci_driver); 1516 return error; 1517 } 1518 1519 static void arcmsr_module_exit(void) 1520 { 1521 pci_unregister_driver(&arcmsr_pci_driver); 1522 } 1523 module_init(arcmsr_module_init); 1524 module_exit(arcmsr_module_exit); 1525 1526 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 1527 u32 intmask_org) 1528 { 1529 u32 mask; 1530 switch (acb->adapter_type) { 1531 1532 case ACB_ADAPTER_TYPE_A: { 1533 struct MessageUnit_A __iomem *reg = acb->pmuA; 1534 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 1535 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| 1536 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 1537 writel(mask, ®->outbound_intmask); 1538 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1539 } 1540 break; 1541 1542 case ACB_ADAPTER_TYPE_B: { 1543 struct MessageUnit_B *reg = acb->pmuB; 1544 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | 1545 ARCMSR_IOP2DRV_DATA_READ_OK | 1546 ARCMSR_IOP2DRV_CDB_DONE | 1547 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 1548 writel(mask, reg->iop2drv_doorbell_mask); 1549 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 1550 } 1551 break; 1552 case ACB_ADAPTER_TYPE_C: { 1553 struct MessageUnit_C __iomem *reg = acb->pmuC; 1554 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 1555 writel(intmask_org & mask, ®->host_int_mask); 1556 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 1557 } 1558 break; 1559 case ACB_ADAPTER_TYPE_D: { 1560 struct MessageUnit_D *reg = acb->pmuD; 1561 1562 mask = ARCMSR_ARC1214_ALL_INT_ENABLE; 1563 writel(intmask_org | mask, reg->pcief0_int_enable); 1564 break; 1565 } 1566 case ACB_ADAPTER_TYPE_E: { 1567 struct MessageUnit_E __iomem *reg = acb->pmuE; 1568 1569 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); 1570 writel(intmask_org & mask, ®->host_int_mask); 1571 break; 1572 } 1573 } 1574 } 1575 1576 static int arcmsr_build_ccb(struct AdapterControlBlock *acb, 1577 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 1578 { 1579 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1580 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 1581 __le32 address_lo, address_hi; 1582 int arccdbsize = 0x30; 1583 __le32 length = 0; 1584 int i; 1585 struct scatterlist *sg; 1586 int nseg; 1587 ccb->pcmd = pcmd; 1588 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 1589 arcmsr_cdb->TargetID = pcmd->device->id; 1590 arcmsr_cdb->LUN = pcmd->device->lun; 1591 arcmsr_cdb->Function = 1; 1592 arcmsr_cdb->msgContext = 0; 1593 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 1594 1595 nseg = scsi_dma_map(pcmd); 1596 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) 1597 return FAILED; 1598 scsi_for_each_sg(pcmd, sg, nseg, i) { 1599 /* Get the physical address of the current data pointer */ 1600 length = cpu_to_le32(sg_dma_len(sg)); 1601 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); 1602 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); 1603 if (address_hi == 0) { 1604 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 1605 1606 pdma_sg->address = address_lo; 1607 pdma_sg->length = length; 1608 psge += sizeof (struct SG32ENTRY); 1609 arccdbsize += sizeof (struct SG32ENTRY); 1610 } else { 1611 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; 1612 1613 pdma_sg->addresshigh = address_hi; 1614 pdma_sg->address = address_lo; 1615 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); 1616 psge += sizeof (struct SG64ENTRY); 1617 arccdbsize += sizeof (struct SG64ENTRY); 1618 } 1619 } 1620 arcmsr_cdb->sgcount = (uint8_t)nseg; 1621 arcmsr_cdb->DataLength = scsi_bufflen(pcmd); 1622 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); 1623 if ( arccdbsize > 256) 1624 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 1625 if (pcmd->sc_data_direction == DMA_TO_DEVICE) 1626 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 1627 ccb->arc_cdb_size = arccdbsize; 1628 return SUCCESS; 1629 } 1630 1631 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 1632 { 1633 uint32_t cdb_phyaddr = ccb->cdb_phyaddr; 1634 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1635 atomic_inc(&acb->ccboutstandingcount); 1636 ccb->startdone = ARCMSR_CCB_START; 1637 switch (acb->adapter_type) { 1638 case ACB_ADAPTER_TYPE_A: { 1639 struct MessageUnit_A __iomem *reg = acb->pmuA; 1640 1641 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 1642 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 1643 ®->inbound_queueport); 1644 else 1645 writel(cdb_phyaddr, ®->inbound_queueport); 1646 break; 1647 } 1648 1649 case ACB_ADAPTER_TYPE_B: { 1650 struct MessageUnit_B *reg = acb->pmuB; 1651 uint32_t ending_index, index = reg->postq_index; 1652 1653 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 1654 reg->post_qbuffer[ending_index] = 0; 1655 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1656 reg->post_qbuffer[index] = 1657 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; 1658 } else { 1659 reg->post_qbuffer[index] = cdb_phyaddr; 1660 } 1661 index++; 1662 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 1663 reg->postq_index = index; 1664 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); 1665 } 1666 break; 1667 case ACB_ADAPTER_TYPE_C: { 1668 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 1669 uint32_t ccb_post_stamp, arc_cdb_size; 1670 1671 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1672 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); 1673 if (acb->cdb_phyaddr_hi32) { 1674 writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high); 1675 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1676 } else { 1677 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1678 } 1679 } 1680 break; 1681 case ACB_ADAPTER_TYPE_D: { 1682 struct MessageUnit_D *pmu = acb->pmuD; 1683 u16 index_stripped; 1684 u16 postq_index, toggle; 1685 unsigned long flags; 1686 struct InBound_SRB *pinbound_srb; 1687 1688 spin_lock_irqsave(&acb->postq_lock, flags); 1689 postq_index = pmu->postq_index; 1690 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); 1691 pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr); 1692 pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr); 1693 pinbound_srb->length = ccb->arc_cdb_size >> 2; 1694 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); 1695 toggle = postq_index & 0x4000; 1696 index_stripped = postq_index + 1; 1697 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); 1698 pmu->postq_index = index_stripped ? (index_stripped | toggle) : 1699 (toggle ^ 0x4000); 1700 writel(postq_index, pmu->inboundlist_write_pointer); 1701 spin_unlock_irqrestore(&acb->postq_lock, flags); 1702 break; 1703 } 1704 case ACB_ADAPTER_TYPE_E: { 1705 struct MessageUnit_E __iomem *pmu = acb->pmuE; 1706 u32 ccb_post_stamp, arc_cdb_size; 1707 1708 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1709 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6)); 1710 writel(0, &pmu->inbound_queueport_high); 1711 writel(ccb_post_stamp, &pmu->inbound_queueport_low); 1712 break; 1713 } 1714 } 1715 } 1716 1717 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) 1718 { 1719 struct MessageUnit_A __iomem *reg = acb->pmuA; 1720 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1721 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1722 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1723 printk(KERN_NOTICE 1724 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1725 , acb->host->host_no); 1726 } 1727 } 1728 1729 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) 1730 { 1731 struct MessageUnit_B *reg = acb->pmuB; 1732 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1733 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); 1734 1735 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1736 printk(KERN_NOTICE 1737 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1738 , acb->host->host_no); 1739 } 1740 } 1741 1742 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) 1743 { 1744 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1745 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1746 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1747 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1748 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1749 printk(KERN_NOTICE 1750 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1751 , pACB->host->host_no); 1752 } 1753 return; 1754 } 1755 1756 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) 1757 { 1758 struct MessageUnit_D *reg = pACB->pmuD; 1759 1760 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1761 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); 1762 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) 1763 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " 1764 "timeout\n", pACB->host->host_no); 1765 } 1766 1767 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) 1768 { 1769 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1770 1771 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1772 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1773 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1774 writel(pACB->out_doorbell, ®->iobound_doorbell); 1775 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1776 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " 1777 "timeout\n", pACB->host->host_no); 1778 } 1779 } 1780 1781 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1782 { 1783 switch (acb->adapter_type) { 1784 case ACB_ADAPTER_TYPE_A: { 1785 arcmsr_hbaA_stop_bgrb(acb); 1786 } 1787 break; 1788 1789 case ACB_ADAPTER_TYPE_B: { 1790 arcmsr_hbaB_stop_bgrb(acb); 1791 } 1792 break; 1793 case ACB_ADAPTER_TYPE_C: { 1794 arcmsr_hbaC_stop_bgrb(acb); 1795 } 1796 break; 1797 case ACB_ADAPTER_TYPE_D: 1798 arcmsr_hbaD_stop_bgrb(acb); 1799 break; 1800 case ACB_ADAPTER_TYPE_E: 1801 arcmsr_hbaE_stop_bgrb(acb); 1802 break; 1803 } 1804 } 1805 1806 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) 1807 { 1808 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); 1809 } 1810 1811 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1812 { 1813 switch (acb->adapter_type) { 1814 case ACB_ADAPTER_TYPE_A: { 1815 struct MessageUnit_A __iomem *reg = acb->pmuA; 1816 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 1817 } 1818 break; 1819 1820 case ACB_ADAPTER_TYPE_B: { 1821 struct MessageUnit_B *reg = acb->pmuB; 1822 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 1823 } 1824 break; 1825 case ACB_ADAPTER_TYPE_C: { 1826 struct MessageUnit_C __iomem *reg = acb->pmuC; 1827 1828 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 1829 } 1830 break; 1831 case ACB_ADAPTER_TYPE_D: { 1832 struct MessageUnit_D *reg = acb->pmuD; 1833 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 1834 reg->inbound_doorbell); 1835 } 1836 break; 1837 case ACB_ADAPTER_TYPE_E: { 1838 struct MessageUnit_E __iomem *reg = acb->pmuE; 1839 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 1840 writel(acb->out_doorbell, ®->iobound_doorbell); 1841 } 1842 break; 1843 } 1844 } 1845 1846 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 1847 { 1848 switch (acb->adapter_type) { 1849 case ACB_ADAPTER_TYPE_A: { 1850 struct MessageUnit_A __iomem *reg = acb->pmuA; 1851 /* 1852 ** push inbound doorbell tell iop, driver data write ok 1853 ** and wait reply on next hwinterrupt for next Qbuffer post 1854 */ 1855 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell); 1856 } 1857 break; 1858 1859 case ACB_ADAPTER_TYPE_B: { 1860 struct MessageUnit_B *reg = acb->pmuB; 1861 /* 1862 ** push inbound doorbell tell iop, driver data write ok 1863 ** and wait reply on next hwinterrupt for next Qbuffer post 1864 */ 1865 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); 1866 } 1867 break; 1868 case ACB_ADAPTER_TYPE_C: { 1869 struct MessageUnit_C __iomem *reg = acb->pmuC; 1870 /* 1871 ** push inbound doorbell tell iop, driver data write ok 1872 ** and wait reply on next hwinterrupt for next Qbuffer post 1873 */ 1874 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell); 1875 } 1876 break; 1877 case ACB_ADAPTER_TYPE_D: { 1878 struct MessageUnit_D *reg = acb->pmuD; 1879 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, 1880 reg->inbound_doorbell); 1881 } 1882 break; 1883 case ACB_ADAPTER_TYPE_E: { 1884 struct MessageUnit_E __iomem *reg = acb->pmuE; 1885 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; 1886 writel(acb->out_doorbell, ®->iobound_doorbell); 1887 } 1888 break; 1889 } 1890 } 1891 1892 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) 1893 { 1894 struct QBUFFER __iomem *qbuffer = NULL; 1895 switch (acb->adapter_type) { 1896 1897 case ACB_ADAPTER_TYPE_A: { 1898 struct MessageUnit_A __iomem *reg = acb->pmuA; 1899 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 1900 } 1901 break; 1902 1903 case ACB_ADAPTER_TYPE_B: { 1904 struct MessageUnit_B *reg = acb->pmuB; 1905 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 1906 } 1907 break; 1908 case ACB_ADAPTER_TYPE_C: { 1909 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 1910 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; 1911 } 1912 break; 1913 case ACB_ADAPTER_TYPE_D: { 1914 struct MessageUnit_D *reg = acb->pmuD; 1915 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 1916 } 1917 break; 1918 case ACB_ADAPTER_TYPE_E: { 1919 struct MessageUnit_E __iomem *reg = acb->pmuE; 1920 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 1921 } 1922 break; 1923 } 1924 return qbuffer; 1925 } 1926 1927 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) 1928 { 1929 struct QBUFFER __iomem *pqbuffer = NULL; 1930 switch (acb->adapter_type) { 1931 1932 case ACB_ADAPTER_TYPE_A: { 1933 struct MessageUnit_A __iomem *reg = acb->pmuA; 1934 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; 1935 } 1936 break; 1937 1938 case ACB_ADAPTER_TYPE_B: { 1939 struct MessageUnit_B *reg = acb->pmuB; 1940 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 1941 } 1942 break; 1943 case ACB_ADAPTER_TYPE_C: { 1944 struct MessageUnit_C __iomem *reg = acb->pmuC; 1945 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 1946 } 1947 break; 1948 case ACB_ADAPTER_TYPE_D: { 1949 struct MessageUnit_D *reg = acb->pmuD; 1950 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 1951 } 1952 break; 1953 case ACB_ADAPTER_TYPE_E: { 1954 struct MessageUnit_E __iomem *reg = acb->pmuE; 1955 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 1956 } 1957 break; 1958 } 1959 return pqbuffer; 1960 } 1961 1962 static uint32_t 1963 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, 1964 struct QBUFFER __iomem *prbuffer) 1965 { 1966 uint8_t *pQbuffer; 1967 uint8_t *buf1 = NULL; 1968 uint32_t __iomem *iop_data; 1969 uint32_t iop_len, data_len, *buf2 = NULL; 1970 1971 iop_data = (uint32_t __iomem *)prbuffer->data; 1972 iop_len = readl(&prbuffer->data_len); 1973 if (iop_len > 0) { 1974 buf1 = kmalloc(128, GFP_ATOMIC); 1975 buf2 = (uint32_t *)buf1; 1976 if (buf1 == NULL) 1977 return 0; 1978 data_len = iop_len; 1979 while (data_len >= 4) { 1980 *buf2++ = readl(iop_data); 1981 iop_data++; 1982 data_len -= 4; 1983 } 1984 if (data_len) 1985 *buf2 = readl(iop_data); 1986 buf2 = (uint32_t *)buf1; 1987 } 1988 while (iop_len > 0) { 1989 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 1990 *pQbuffer = *buf1; 1991 acb->rqbuf_putIndex++; 1992 /* if last, index number set it to 0 */ 1993 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 1994 buf1++; 1995 iop_len--; 1996 } 1997 kfree(buf2); 1998 /* let IOP know data has been read */ 1999 arcmsr_iop_message_read(acb); 2000 return 1; 2001 } 2002 2003 uint32_t 2004 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, 2005 struct QBUFFER __iomem *prbuffer) { 2006 2007 uint8_t *pQbuffer; 2008 uint8_t __iomem *iop_data; 2009 uint32_t iop_len; 2010 2011 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) 2012 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); 2013 iop_data = (uint8_t __iomem *)prbuffer->data; 2014 iop_len = readl(&prbuffer->data_len); 2015 while (iop_len > 0) { 2016 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 2017 *pQbuffer = readb(iop_data); 2018 acb->rqbuf_putIndex++; 2019 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2020 iop_data++; 2021 iop_len--; 2022 } 2023 arcmsr_iop_message_read(acb); 2024 return 1; 2025 } 2026 2027 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 2028 { 2029 unsigned long flags; 2030 struct QBUFFER __iomem *prbuffer; 2031 int32_t buf_empty_len; 2032 2033 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2034 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2035 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) & 2036 (ARCMSR_MAX_QBUFFER - 1); 2037 if (buf_empty_len >= readl(&prbuffer->data_len)) { 2038 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2039 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2040 } else 2041 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2042 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2043 } 2044 2045 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) 2046 { 2047 uint8_t *pQbuffer; 2048 struct QBUFFER __iomem *pwbuffer; 2049 uint8_t *buf1 = NULL; 2050 uint32_t __iomem *iop_data; 2051 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; 2052 2053 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2054 buf1 = kmalloc(128, GFP_ATOMIC); 2055 buf2 = (uint32_t *)buf1; 2056 if (buf1 == NULL) 2057 return; 2058 2059 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2060 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2061 iop_data = (uint32_t __iomem *)pwbuffer->data; 2062 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2063 && (allxfer_len < 124)) { 2064 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2065 *buf1 = *pQbuffer; 2066 acb->wqbuf_getIndex++; 2067 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2068 buf1++; 2069 allxfer_len++; 2070 } 2071 data_len = allxfer_len; 2072 buf1 = (uint8_t *)buf2; 2073 while (data_len >= 4) { 2074 data = *buf2++; 2075 writel(data, iop_data); 2076 iop_data++; 2077 data_len -= 4; 2078 } 2079 if (data_len) { 2080 data = *buf2; 2081 writel(data, iop_data); 2082 } 2083 writel(allxfer_len, &pwbuffer->data_len); 2084 kfree(buf1); 2085 arcmsr_iop_message_wrote(acb); 2086 } 2087 } 2088 2089 void 2090 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) 2091 { 2092 uint8_t *pQbuffer; 2093 struct QBUFFER __iomem *pwbuffer; 2094 uint8_t __iomem *iop_data; 2095 int32_t allxfer_len = 0; 2096 2097 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) { 2098 arcmsr_write_ioctldata2iop_in_DWORD(acb); 2099 return; 2100 } 2101 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2102 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2103 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2104 iop_data = (uint8_t __iomem *)pwbuffer->data; 2105 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2106 && (allxfer_len < 124)) { 2107 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2108 writeb(*pQbuffer, iop_data); 2109 acb->wqbuf_getIndex++; 2110 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2111 iop_data++; 2112 allxfer_len++; 2113 } 2114 writel(allxfer_len, &pwbuffer->data_len); 2115 arcmsr_iop_message_wrote(acb); 2116 } 2117 } 2118 2119 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 2120 { 2121 unsigned long flags; 2122 2123 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2124 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 2125 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2126 arcmsr_write_ioctldata2iop(acb); 2127 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) 2128 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 2129 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2130 } 2131 2132 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) 2133 { 2134 uint32_t outbound_doorbell; 2135 struct MessageUnit_A __iomem *reg = acb->pmuA; 2136 outbound_doorbell = readl(®->outbound_doorbell); 2137 do { 2138 writel(outbound_doorbell, ®->outbound_doorbell); 2139 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) 2140 arcmsr_iop2drv_data_wrote_handle(acb); 2141 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) 2142 arcmsr_iop2drv_data_read_handle(acb); 2143 outbound_doorbell = readl(®->outbound_doorbell); 2144 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 2145 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); 2146 } 2147 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) 2148 { 2149 uint32_t outbound_doorbell; 2150 struct MessageUnit_C __iomem *reg = pACB->pmuC; 2151 /* 2152 ******************************************************************* 2153 ** Maybe here we need to check wrqbuffer_lock is lock or not 2154 ** DOORBELL: din! don! 2155 ** check if there are any mail need to pack from firmware 2156 ******************************************************************* 2157 */ 2158 outbound_doorbell = readl(®->outbound_doorbell); 2159 do { 2160 writel(outbound_doorbell, ®->outbound_doorbell_clear); 2161 readl(®->outbound_doorbell_clear); 2162 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) 2163 arcmsr_iop2drv_data_wrote_handle(pACB); 2164 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) 2165 arcmsr_iop2drv_data_read_handle(pACB); 2166 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) 2167 arcmsr_hbaC_message_isr(pACB); 2168 outbound_doorbell = readl(®->outbound_doorbell); 2169 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 2170 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 2171 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); 2172 } 2173 2174 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) 2175 { 2176 uint32_t outbound_doorbell; 2177 struct MessageUnit_D *pmu = pACB->pmuD; 2178 2179 outbound_doorbell = readl(pmu->outbound_doorbell); 2180 do { 2181 writel(outbound_doorbell, pmu->outbound_doorbell); 2182 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) 2183 arcmsr_hbaD_message_isr(pACB); 2184 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) 2185 arcmsr_iop2drv_data_wrote_handle(pACB); 2186 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) 2187 arcmsr_iop2drv_data_read_handle(pACB); 2188 outbound_doorbell = readl(pmu->outbound_doorbell); 2189 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 2190 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 2191 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); 2192 } 2193 2194 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB) 2195 { 2196 uint32_t outbound_doorbell, in_doorbell, tmp; 2197 struct MessageUnit_E __iomem *reg = pACB->pmuE; 2198 2199 in_doorbell = readl(®->iobound_doorbell); 2200 outbound_doorbell = in_doorbell ^ pACB->in_doorbell; 2201 do { 2202 writel(0, ®->host_int_status); /* clear interrupt */ 2203 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 2204 arcmsr_iop2drv_data_wrote_handle(pACB); 2205 } 2206 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { 2207 arcmsr_iop2drv_data_read_handle(pACB); 2208 } 2209 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 2210 arcmsr_hbaE_message_isr(pACB); 2211 } 2212 tmp = in_doorbell; 2213 in_doorbell = readl(®->iobound_doorbell); 2214 outbound_doorbell = tmp ^ in_doorbell; 2215 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 2216 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 2217 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE)); 2218 pACB->in_doorbell = in_doorbell; 2219 } 2220 2221 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) 2222 { 2223 uint32_t flag_ccb; 2224 struct MessageUnit_A __iomem *reg = acb->pmuA; 2225 struct ARCMSR_CDB *pARCMSR_CDB; 2226 struct CommandControlBlock *pCCB; 2227 bool error; 2228 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { 2229 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 2230 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2231 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2232 arcmsr_drain_donequeue(acb, pCCB, error); 2233 } 2234 } 2235 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) 2236 { 2237 uint32_t index; 2238 uint32_t flag_ccb; 2239 struct MessageUnit_B *reg = acb->pmuB; 2240 struct ARCMSR_CDB *pARCMSR_CDB; 2241 struct CommandControlBlock *pCCB; 2242 bool error; 2243 index = reg->doneq_index; 2244 while ((flag_ccb = reg->done_qbuffer[index]) != 0) { 2245 reg->done_qbuffer[index] = 0; 2246 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ 2247 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2248 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2249 arcmsr_drain_donequeue(acb, pCCB, error); 2250 index++; 2251 index %= ARCMSR_MAX_HBB_POSTQUEUE; 2252 reg->doneq_index = index; 2253 } 2254 } 2255 2256 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) 2257 { 2258 struct MessageUnit_C __iomem *phbcmu; 2259 struct ARCMSR_CDB *arcmsr_cdb; 2260 struct CommandControlBlock *ccb; 2261 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0; 2262 int error; 2263 2264 phbcmu = acb->pmuC; 2265 /* areca cdb command done */ 2266 /* Use correct offset and size for syncing */ 2267 2268 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != 2269 0xFFFFFFFF) { 2270 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 2271 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2272 + ccb_cdb_phy); 2273 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, 2274 arcmsr_cdb); 2275 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2276 ? true : false; 2277 /* check if command done with no error */ 2278 arcmsr_drain_donequeue(acb, ccb, error); 2279 throttling++; 2280 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 2281 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, 2282 &phbcmu->inbound_doorbell); 2283 throttling = 0; 2284 } 2285 } 2286 } 2287 2288 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) 2289 { 2290 u32 outbound_write_pointer, doneq_index, index_stripped, toggle; 2291 uint32_t addressLow, ccb_cdb_phy; 2292 int error; 2293 struct MessageUnit_D *pmu; 2294 struct ARCMSR_CDB *arcmsr_cdb; 2295 struct CommandControlBlock *ccb; 2296 unsigned long flags; 2297 2298 spin_lock_irqsave(&acb->doneq_lock, flags); 2299 pmu = acb->pmuD; 2300 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 2301 doneq_index = pmu->doneq_index; 2302 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { 2303 do { 2304 toggle = doneq_index & 0x4000; 2305 index_stripped = (doneq_index & 0xFFF) + 1; 2306 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 2307 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 2308 ((toggle ^ 0x4000) + 1); 2309 doneq_index = pmu->doneq_index; 2310 addressLow = pmu->done_qbuffer[doneq_index & 2311 0xFFF].addressLow; 2312 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 2313 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2314 + ccb_cdb_phy); 2315 ccb = container_of(arcmsr_cdb, 2316 struct CommandControlBlock, arcmsr_cdb); 2317 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2318 ? true : false; 2319 arcmsr_drain_donequeue(acb, ccb, error); 2320 writel(doneq_index, pmu->outboundlist_read_pointer); 2321 } while ((doneq_index & 0xFFF) != 2322 (outbound_write_pointer & 0xFFF)); 2323 } 2324 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, 2325 pmu->outboundlist_interrupt_cause); 2326 readl(pmu->outboundlist_interrupt_cause); 2327 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2328 } 2329 2330 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) 2331 { 2332 uint32_t doneq_index; 2333 uint16_t cmdSMID; 2334 int error; 2335 struct MessageUnit_E __iomem *pmu; 2336 struct CommandControlBlock *ccb; 2337 unsigned long flags; 2338 2339 spin_lock_irqsave(&acb->doneq_lock, flags); 2340 doneq_index = acb->doneq_index; 2341 pmu = acb->pmuE; 2342 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) { 2343 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2344 ccb = acb->pccb_pool[cmdSMID]; 2345 error = (acb->pCompletionQ[doneq_index].cmdFlag 2346 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 2347 arcmsr_drain_donequeue(acb, ccb, error); 2348 doneq_index++; 2349 if (doneq_index >= acb->completionQ_entry) 2350 doneq_index = 0; 2351 } 2352 acb->doneq_index = doneq_index; 2353 writel(doneq_index, &pmu->reply_post_consumer_index); 2354 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2355 } 2356 2357 /* 2358 ********************************************************************************** 2359 ** Handle a message interrupt 2360 ** 2361 ** The only message interrupt we expect is in response to a query for the current adapter config. 2362 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2363 ********************************************************************************** 2364 */ 2365 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) 2366 { 2367 struct MessageUnit_A __iomem *reg = acb->pmuA; 2368 /*clear interrupt and message state*/ 2369 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); 2370 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2371 schedule_work(&acb->arcmsr_do_message_isr_bh); 2372 } 2373 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) 2374 { 2375 struct MessageUnit_B *reg = acb->pmuB; 2376 2377 /*clear interrupt and message state*/ 2378 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2379 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2380 schedule_work(&acb->arcmsr_do_message_isr_bh); 2381 } 2382 /* 2383 ********************************************************************************** 2384 ** Handle a message interrupt 2385 ** 2386 ** The only message interrupt we expect is in response to a query for the 2387 ** current adapter config. 2388 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2389 ********************************************************************************** 2390 */ 2391 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) 2392 { 2393 struct MessageUnit_C __iomem *reg = acb->pmuC; 2394 /*clear interrupt and message state*/ 2395 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear); 2396 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2397 schedule_work(&acb->arcmsr_do_message_isr_bh); 2398 } 2399 2400 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) 2401 { 2402 struct MessageUnit_D *reg = acb->pmuD; 2403 2404 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); 2405 readl(reg->outbound_doorbell); 2406 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2407 schedule_work(&acb->arcmsr_do_message_isr_bh); 2408 } 2409 2410 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb) 2411 { 2412 struct MessageUnit_E __iomem *reg = acb->pmuE; 2413 2414 writel(0, ®->host_int_status); 2415 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2416 schedule_work(&acb->arcmsr_do_message_isr_bh); 2417 } 2418 2419 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) 2420 { 2421 uint32_t outbound_intstatus; 2422 struct MessageUnit_A __iomem *reg = acb->pmuA; 2423 outbound_intstatus = readl(®->outbound_intstatus) & 2424 acb->outbound_int_enable; 2425 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) 2426 return IRQ_NONE; 2427 do { 2428 writel(outbound_intstatus, ®->outbound_intstatus); 2429 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) 2430 arcmsr_hbaA_doorbell_isr(acb); 2431 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) 2432 arcmsr_hbaA_postqueue_isr(acb); 2433 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 2434 arcmsr_hbaA_message_isr(acb); 2435 outbound_intstatus = readl(®->outbound_intstatus) & 2436 acb->outbound_int_enable; 2437 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT 2438 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 2439 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); 2440 return IRQ_HANDLED; 2441 } 2442 2443 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) 2444 { 2445 uint32_t outbound_doorbell; 2446 struct MessageUnit_B *reg = acb->pmuB; 2447 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2448 acb->outbound_int_enable; 2449 if (!outbound_doorbell) 2450 return IRQ_NONE; 2451 do { 2452 writel(~outbound_doorbell, reg->iop2drv_doorbell); 2453 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 2454 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) 2455 arcmsr_iop2drv_data_wrote_handle(acb); 2456 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) 2457 arcmsr_iop2drv_data_read_handle(acb); 2458 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) 2459 arcmsr_hbaB_postqueue_isr(acb); 2460 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) 2461 arcmsr_hbaB_message_isr(acb); 2462 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2463 acb->outbound_int_enable; 2464 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK 2465 | ARCMSR_IOP2DRV_DATA_READ_OK 2466 | ARCMSR_IOP2DRV_CDB_DONE 2467 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); 2468 return IRQ_HANDLED; 2469 } 2470 2471 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) 2472 { 2473 uint32_t host_interrupt_status; 2474 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 2475 /* 2476 ********************************************* 2477 ** check outbound intstatus 2478 ********************************************* 2479 */ 2480 host_interrupt_status = readl(&phbcmu->host_int_status) & 2481 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2482 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); 2483 if (!host_interrupt_status) 2484 return IRQ_NONE; 2485 do { 2486 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) 2487 arcmsr_hbaC_doorbell_isr(pACB); 2488 /* MU post queue interrupts*/ 2489 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) 2490 arcmsr_hbaC_postqueue_isr(pACB); 2491 host_interrupt_status = readl(&phbcmu->host_int_status); 2492 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2493 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); 2494 return IRQ_HANDLED; 2495 } 2496 2497 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) 2498 { 2499 u32 host_interrupt_status; 2500 struct MessageUnit_D *pmu = pACB->pmuD; 2501 2502 host_interrupt_status = readl(pmu->host_int_status) & 2503 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2504 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); 2505 if (!host_interrupt_status) 2506 return IRQ_NONE; 2507 do { 2508 /* MU post queue interrupts*/ 2509 if (host_interrupt_status & 2510 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) 2511 arcmsr_hbaD_postqueue_isr(pACB); 2512 if (host_interrupt_status & 2513 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) 2514 arcmsr_hbaD_doorbell_isr(pACB); 2515 host_interrupt_status = readl(pmu->host_int_status); 2516 } while (host_interrupt_status & 2517 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2518 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); 2519 return IRQ_HANDLED; 2520 } 2521 2522 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) 2523 { 2524 uint32_t host_interrupt_status; 2525 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 2526 2527 host_interrupt_status = readl(&pmu->host_int_status) & 2528 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2529 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2530 if (!host_interrupt_status) 2531 return IRQ_NONE; 2532 do { 2533 /* MU ioctl transfer doorbell interrupts*/ 2534 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { 2535 arcmsr_hbaE_doorbell_isr(pACB); 2536 } 2537 /* MU post queue interrupts*/ 2538 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { 2539 arcmsr_hbaE_postqueue_isr(pACB); 2540 } 2541 host_interrupt_status = readl(&pmu->host_int_status); 2542 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2543 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2544 return IRQ_HANDLED; 2545 } 2546 2547 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 2548 { 2549 switch (acb->adapter_type) { 2550 case ACB_ADAPTER_TYPE_A: 2551 return arcmsr_hbaA_handle_isr(acb); 2552 break; 2553 case ACB_ADAPTER_TYPE_B: 2554 return arcmsr_hbaB_handle_isr(acb); 2555 break; 2556 case ACB_ADAPTER_TYPE_C: 2557 return arcmsr_hbaC_handle_isr(acb); 2558 case ACB_ADAPTER_TYPE_D: 2559 return arcmsr_hbaD_handle_isr(acb); 2560 case ACB_ADAPTER_TYPE_E: 2561 return arcmsr_hbaE_handle_isr(acb); 2562 default: 2563 return IRQ_NONE; 2564 } 2565 } 2566 2567 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 2568 { 2569 if (acb) { 2570 /* stop adapter background rebuild */ 2571 if (acb->acb_flags & ACB_F_MSG_START_BGRB) { 2572 uint32_t intmask_org; 2573 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 2574 intmask_org = arcmsr_disable_outbound_ints(acb); 2575 arcmsr_stop_adapter_bgrb(acb); 2576 arcmsr_flush_adapter_cache(acb); 2577 arcmsr_enable_outbound_ints(acb, intmask_org); 2578 } 2579 } 2580 } 2581 2582 2583 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) 2584 { 2585 uint32_t i; 2586 2587 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2588 for (i = 0; i < 15; i++) { 2589 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2590 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2591 acb->rqbuf_getIndex = 0; 2592 acb->rqbuf_putIndex = 0; 2593 arcmsr_iop_message_read(acb); 2594 mdelay(30); 2595 } else if (acb->rqbuf_getIndex != 2596 acb->rqbuf_putIndex) { 2597 acb->rqbuf_getIndex = 0; 2598 acb->rqbuf_putIndex = 0; 2599 mdelay(30); 2600 } else 2601 break; 2602 } 2603 } 2604 } 2605 2606 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 2607 struct scsi_cmnd *cmd) 2608 { 2609 char *buffer; 2610 unsigned short use_sg; 2611 int retvalue = 0, transfer_len = 0; 2612 unsigned long flags; 2613 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 2614 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | 2615 (uint32_t)cmd->cmnd[6] << 16 | 2616 (uint32_t)cmd->cmnd[7] << 8 | 2617 (uint32_t)cmd->cmnd[8]; 2618 struct scatterlist *sg; 2619 2620 use_sg = scsi_sg_count(cmd); 2621 sg = scsi_sglist(cmd); 2622 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 2623 if (use_sg > 1) { 2624 retvalue = ARCMSR_MESSAGE_FAIL; 2625 goto message_out; 2626 } 2627 transfer_len += sg->length; 2628 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 2629 retvalue = ARCMSR_MESSAGE_FAIL; 2630 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); 2631 goto message_out; 2632 } 2633 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; 2634 switch (controlcode) { 2635 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2636 unsigned char *ver_addr; 2637 uint8_t *ptmpQbuffer; 2638 uint32_t allxfer_len = 0; 2639 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2640 if (!ver_addr) { 2641 retvalue = ARCMSR_MESSAGE_FAIL; 2642 pr_info("%s: memory not enough!\n", __func__); 2643 goto message_out; 2644 } 2645 ptmpQbuffer = ver_addr; 2646 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2647 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { 2648 unsigned int tail = acb->rqbuf_getIndex; 2649 unsigned int head = acb->rqbuf_putIndex; 2650 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); 2651 2652 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); 2653 if (allxfer_len > ARCMSR_API_DATA_BUFLEN) 2654 allxfer_len = ARCMSR_API_DATA_BUFLEN; 2655 2656 if (allxfer_len <= cnt_to_end) 2657 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); 2658 else { 2659 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); 2660 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); 2661 } 2662 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; 2663 } 2664 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, 2665 allxfer_len); 2666 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2667 struct QBUFFER __iomem *prbuffer; 2668 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2669 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2670 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2671 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2672 } 2673 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2674 kfree(ver_addr); 2675 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2676 if (acb->fw_flag == FW_DEADLOCK) 2677 pcmdmessagefld->cmdmessage.ReturnCode = 2678 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2679 else 2680 pcmdmessagefld->cmdmessage.ReturnCode = 2681 ARCMSR_MESSAGE_RETURNCODE_OK; 2682 break; 2683 } 2684 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2685 unsigned char *ver_addr; 2686 uint32_t user_len; 2687 int32_t cnt2end; 2688 uint8_t *pQbuffer, *ptmpuserbuffer; 2689 2690 user_len = pcmdmessagefld->cmdmessage.Length; 2691 if (user_len > ARCMSR_API_DATA_BUFLEN) { 2692 retvalue = ARCMSR_MESSAGE_FAIL; 2693 goto message_out; 2694 } 2695 2696 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2697 if (!ver_addr) { 2698 retvalue = ARCMSR_MESSAGE_FAIL; 2699 goto message_out; 2700 } 2701 ptmpuserbuffer = ver_addr; 2702 2703 memcpy(ptmpuserbuffer, 2704 pcmdmessagefld->messagedatabuffer, user_len); 2705 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2706 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { 2707 struct SENSE_DATA *sensebuffer = 2708 (struct SENSE_DATA *)cmd->sense_buffer; 2709 arcmsr_write_ioctldata2iop(acb); 2710 /* has error report sensedata */ 2711 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 2712 sensebuffer->SenseKey = ILLEGAL_REQUEST; 2713 sensebuffer->AdditionalSenseLength = 0x0A; 2714 sensebuffer->AdditionalSenseCode = 0x20; 2715 sensebuffer->Valid = 1; 2716 retvalue = ARCMSR_MESSAGE_FAIL; 2717 } else { 2718 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; 2719 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; 2720 if (user_len > cnt2end) { 2721 memcpy(pQbuffer, ptmpuserbuffer, cnt2end); 2722 ptmpuserbuffer += cnt2end; 2723 user_len -= cnt2end; 2724 acb->wqbuf_putIndex = 0; 2725 pQbuffer = acb->wqbuffer; 2726 } 2727 memcpy(pQbuffer, ptmpuserbuffer, user_len); 2728 acb->wqbuf_putIndex += user_len; 2729 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2730 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 2731 acb->acb_flags &= 2732 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 2733 arcmsr_write_ioctldata2iop(acb); 2734 } 2735 } 2736 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2737 kfree(ver_addr); 2738 if (acb->fw_flag == FW_DEADLOCK) 2739 pcmdmessagefld->cmdmessage.ReturnCode = 2740 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2741 else 2742 pcmdmessagefld->cmdmessage.ReturnCode = 2743 ARCMSR_MESSAGE_RETURNCODE_OK; 2744 break; 2745 } 2746 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2747 uint8_t *pQbuffer = acb->rqbuffer; 2748 2749 arcmsr_clear_iop2drv_rqueue_buffer(acb); 2750 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2751 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2752 acb->rqbuf_getIndex = 0; 2753 acb->rqbuf_putIndex = 0; 2754 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2755 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2756 if (acb->fw_flag == FW_DEADLOCK) 2757 pcmdmessagefld->cmdmessage.ReturnCode = 2758 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2759 else 2760 pcmdmessagefld->cmdmessage.ReturnCode = 2761 ARCMSR_MESSAGE_RETURNCODE_OK; 2762 break; 2763 } 2764 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 2765 uint8_t *pQbuffer = acb->wqbuffer; 2766 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2767 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2768 ACB_F_MESSAGE_WQBUFFER_READED); 2769 acb->wqbuf_getIndex = 0; 2770 acb->wqbuf_putIndex = 0; 2771 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2772 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2773 if (acb->fw_flag == FW_DEADLOCK) 2774 pcmdmessagefld->cmdmessage.ReturnCode = 2775 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2776 else 2777 pcmdmessagefld->cmdmessage.ReturnCode = 2778 ARCMSR_MESSAGE_RETURNCODE_OK; 2779 break; 2780 } 2781 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2782 uint8_t *pQbuffer; 2783 arcmsr_clear_iop2drv_rqueue_buffer(acb); 2784 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2785 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2786 acb->rqbuf_getIndex = 0; 2787 acb->rqbuf_putIndex = 0; 2788 pQbuffer = acb->rqbuffer; 2789 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2790 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2791 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2792 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2793 ACB_F_MESSAGE_WQBUFFER_READED); 2794 acb->wqbuf_getIndex = 0; 2795 acb->wqbuf_putIndex = 0; 2796 pQbuffer = acb->wqbuffer; 2797 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2798 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2799 if (acb->fw_flag == FW_DEADLOCK) 2800 pcmdmessagefld->cmdmessage.ReturnCode = 2801 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2802 else 2803 pcmdmessagefld->cmdmessage.ReturnCode = 2804 ARCMSR_MESSAGE_RETURNCODE_OK; 2805 break; 2806 } 2807 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 2808 if (acb->fw_flag == FW_DEADLOCK) 2809 pcmdmessagefld->cmdmessage.ReturnCode = 2810 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2811 else 2812 pcmdmessagefld->cmdmessage.ReturnCode = 2813 ARCMSR_MESSAGE_RETURNCODE_3F; 2814 break; 2815 } 2816 case ARCMSR_MESSAGE_SAY_HELLO: { 2817 int8_t *hello_string = "Hello! I am ARCMSR"; 2818 if (acb->fw_flag == FW_DEADLOCK) 2819 pcmdmessagefld->cmdmessage.ReturnCode = 2820 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2821 else 2822 pcmdmessagefld->cmdmessage.ReturnCode = 2823 ARCMSR_MESSAGE_RETURNCODE_OK; 2824 memcpy(pcmdmessagefld->messagedatabuffer, 2825 hello_string, (int16_t)strlen(hello_string)); 2826 break; 2827 } 2828 case ARCMSR_MESSAGE_SAY_GOODBYE: { 2829 if (acb->fw_flag == FW_DEADLOCK) 2830 pcmdmessagefld->cmdmessage.ReturnCode = 2831 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2832 else 2833 pcmdmessagefld->cmdmessage.ReturnCode = 2834 ARCMSR_MESSAGE_RETURNCODE_OK; 2835 arcmsr_iop_parking(acb); 2836 break; 2837 } 2838 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 2839 if (acb->fw_flag == FW_DEADLOCK) 2840 pcmdmessagefld->cmdmessage.ReturnCode = 2841 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2842 else 2843 pcmdmessagefld->cmdmessage.ReturnCode = 2844 ARCMSR_MESSAGE_RETURNCODE_OK; 2845 arcmsr_flush_adapter_cache(acb); 2846 break; 2847 } 2848 default: 2849 retvalue = ARCMSR_MESSAGE_FAIL; 2850 pr_info("%s: unknown controlcode!\n", __func__); 2851 } 2852 message_out: 2853 if (use_sg) { 2854 struct scatterlist *sg = scsi_sglist(cmd); 2855 kunmap_atomic(buffer - sg->offset); 2856 } 2857 return retvalue; 2858 } 2859 2860 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) 2861 { 2862 struct list_head *head = &acb->ccb_free_list; 2863 struct CommandControlBlock *ccb = NULL; 2864 unsigned long flags; 2865 spin_lock_irqsave(&acb->ccblist_lock, flags); 2866 if (!list_empty(head)) { 2867 ccb = list_entry(head->next, struct CommandControlBlock, list); 2868 list_del_init(&ccb->list); 2869 }else{ 2870 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 2871 return NULL; 2872 } 2873 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 2874 return ccb; 2875 } 2876 2877 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 2878 struct scsi_cmnd *cmd) 2879 { 2880 switch (cmd->cmnd[0]) { 2881 case INQUIRY: { 2882 unsigned char inqdata[36]; 2883 char *buffer; 2884 struct scatterlist *sg; 2885 2886 if (cmd->device->lun) { 2887 cmd->result = (DID_TIME_OUT << 16); 2888 cmd->scsi_done(cmd); 2889 return; 2890 } 2891 inqdata[0] = TYPE_PROCESSOR; 2892 /* Periph Qualifier & Periph Dev Type */ 2893 inqdata[1] = 0; 2894 /* rem media bit & Dev Type Modifier */ 2895 inqdata[2] = 0; 2896 /* ISO, ECMA, & ANSI versions */ 2897 inqdata[4] = 31; 2898 /* length of additional data */ 2899 strncpy(&inqdata[8], "Areca ", 8); 2900 /* Vendor Identification */ 2901 strncpy(&inqdata[16], "RAID controller ", 16); 2902 /* Product Identification */ 2903 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2904 2905 sg = scsi_sglist(cmd); 2906 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 2907 2908 memcpy(buffer, inqdata, sizeof(inqdata)); 2909 sg = scsi_sglist(cmd); 2910 kunmap_atomic(buffer - sg->offset); 2911 2912 cmd->scsi_done(cmd); 2913 } 2914 break; 2915 case WRITE_BUFFER: 2916 case READ_BUFFER: { 2917 if (arcmsr_iop_message_xfer(acb, cmd)) 2918 cmd->result = (DID_ERROR << 16); 2919 cmd->scsi_done(cmd); 2920 } 2921 break; 2922 default: 2923 cmd->scsi_done(cmd); 2924 } 2925 } 2926 2927 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, 2928 void (* done)(struct scsi_cmnd *)) 2929 { 2930 struct Scsi_Host *host = cmd->device->host; 2931 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 2932 struct CommandControlBlock *ccb; 2933 int target = cmd->device->id; 2934 cmd->scsi_done = done; 2935 cmd->host_scribble = NULL; 2936 cmd->result = 0; 2937 if (target == 16) { 2938 /* virtual device for iop message transfer */ 2939 arcmsr_handle_virtual_command(acb, cmd); 2940 return 0; 2941 } 2942 ccb = arcmsr_get_freeccb(acb); 2943 if (!ccb) 2944 return SCSI_MLQUEUE_HOST_BUSY; 2945 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { 2946 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1); 2947 cmd->scsi_done(cmd); 2948 return 0; 2949 } 2950 arcmsr_post_ccb(acb, ccb); 2951 return 0; 2952 } 2953 2954 static DEF_SCSI_QCMD(arcmsr_queue_command) 2955 2956 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer) 2957 { 2958 int count; 2959 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model; 2960 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version; 2961 uint32_t *acb_device_map = (uint32_t *)pACB->device_map; 2962 uint32_t *firm_model = &rwbuffer[15]; 2963 uint32_t *firm_version = &rwbuffer[17]; 2964 uint32_t *device_map = &rwbuffer[21]; 2965 2966 count = 2; 2967 while (count) { 2968 *acb_firm_model = readl(firm_model); 2969 acb_firm_model++; 2970 firm_model++; 2971 count--; 2972 } 2973 count = 4; 2974 while (count) { 2975 *acb_firm_version = readl(firm_version); 2976 acb_firm_version++; 2977 firm_version++; 2978 count--; 2979 } 2980 count = 4; 2981 while (count) { 2982 *acb_device_map = readl(device_map); 2983 acb_device_map++; 2984 device_map++; 2985 count--; 2986 } 2987 pACB->signature = readl(&rwbuffer[0]); 2988 pACB->firm_request_len = readl(&rwbuffer[1]); 2989 pACB->firm_numbers_queue = readl(&rwbuffer[2]); 2990 pACB->firm_sdram_size = readl(&rwbuffer[3]); 2991 pACB->firm_hd_channels = readl(&rwbuffer[4]); 2992 pACB->firm_cfg_version = readl(&rwbuffer[25]); 2993 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", 2994 pACB->host->host_no, 2995 pACB->firm_model, 2996 pACB->firm_version); 2997 } 2998 2999 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) 3000 { 3001 struct MessageUnit_A __iomem *reg = acb->pmuA; 3002 3003 arcmsr_wait_firmware_ready(acb); 3004 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3005 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3006 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3007 miscellaneous data' timeout \n", acb->host->host_no); 3008 return false; 3009 } 3010 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3011 return true; 3012 } 3013 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) 3014 { 3015 struct MessageUnit_B *reg = acb->pmuB; 3016 3017 arcmsr_wait_firmware_ready(acb); 3018 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 3019 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3020 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); 3021 return false; 3022 } 3023 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 3024 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3025 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3026 miscellaneous data' timeout \n", acb->host->host_no); 3027 return false; 3028 } 3029 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3030 return true; 3031 } 3032 3033 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) 3034 { 3035 uint32_t intmask_org; 3036 struct MessageUnit_C __iomem *reg = pACB->pmuC; 3037 3038 /* disable all outbound interrupt */ 3039 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3040 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 3041 /* wait firmware ready */ 3042 arcmsr_wait_firmware_ready(pACB); 3043 /* post "get config" instruction */ 3044 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3045 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3046 /* wait message ready */ 3047 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 3048 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3049 miscellaneous data' timeout \n", pACB->host->host_no); 3050 return false; 3051 } 3052 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3053 return true; 3054 } 3055 3056 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) 3057 { 3058 struct MessageUnit_D *reg = acb->pmuD; 3059 3060 if (readl(acb->pmuD->outbound_doorbell) & 3061 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 3062 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 3063 acb->pmuD->outbound_doorbell);/*clear interrupt*/ 3064 } 3065 arcmsr_wait_firmware_ready(acb); 3066 /* post "get config" instruction */ 3067 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 3068 /* wait message ready */ 3069 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 3070 pr_notice("arcmsr%d: wait get adapter firmware " 3071 "miscellaneous data timeout\n", acb->host->host_no); 3072 return false; 3073 } 3074 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer); 3075 return true; 3076 } 3077 3078 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) 3079 { 3080 struct MessageUnit_E __iomem *reg = pACB->pmuE; 3081 uint32_t intmask_org; 3082 3083 /* disable all outbound interrupt */ 3084 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3085 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); 3086 /* wait firmware ready */ 3087 arcmsr_wait_firmware_ready(pACB); 3088 mdelay(20); 3089 /* post "get config" instruction */ 3090 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3091 3092 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3093 writel(pACB->out_doorbell, ®->iobound_doorbell); 3094 /* wait message ready */ 3095 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3096 pr_notice("arcmsr%d: wait get adapter firmware " 3097 "miscellaneous data timeout\n", pACB->host->host_no); 3098 return false; 3099 } 3100 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3101 return true; 3102 } 3103 3104 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 3105 { 3106 bool rtn = false; 3107 3108 switch (acb->adapter_type) { 3109 case ACB_ADAPTER_TYPE_A: 3110 rtn = arcmsr_hbaA_get_config(acb); 3111 break; 3112 case ACB_ADAPTER_TYPE_B: 3113 rtn = arcmsr_hbaB_get_config(acb); 3114 break; 3115 case ACB_ADAPTER_TYPE_C: 3116 rtn = arcmsr_hbaC_get_config(acb); 3117 break; 3118 case ACB_ADAPTER_TYPE_D: 3119 rtn = arcmsr_hbaD_get_config(acb); 3120 break; 3121 case ACB_ADAPTER_TYPE_E: 3122 rtn = arcmsr_hbaE_get_config(acb); 3123 break; 3124 default: 3125 break; 3126 } 3127 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3128 if (acb->host->can_queue >= acb->firm_numbers_queue) 3129 acb->host->can_queue = acb->maxOutstanding; 3130 else 3131 acb->maxOutstanding = acb->host->can_queue; 3132 acb->maxFreeCCB = acb->host->can_queue; 3133 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM) 3134 acb->maxFreeCCB += 64; 3135 return rtn; 3136 } 3137 3138 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, 3139 struct CommandControlBlock *poll_ccb) 3140 { 3141 struct MessageUnit_A __iomem *reg = acb->pmuA; 3142 struct CommandControlBlock *ccb; 3143 struct ARCMSR_CDB *arcmsr_cdb; 3144 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; 3145 int rtn; 3146 bool error; 3147 polling_hba_ccb_retry: 3148 poll_count++; 3149 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; 3150 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 3151 while (1) { 3152 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { 3153 if (poll_ccb_done){ 3154 rtn = SUCCESS; 3155 break; 3156 }else { 3157 msleep(25); 3158 if (poll_count > 100){ 3159 rtn = FAILED; 3160 break; 3161 } 3162 goto polling_hba_ccb_retry; 3163 } 3164 } 3165 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); 3166 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3167 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3168 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3169 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3170 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3171 " poll command abort successfully \n" 3172 , acb->host->host_no 3173 , ccb->pcmd->device->id 3174 , (u32)ccb->pcmd->device->lun 3175 , ccb); 3176 ccb->pcmd->result = DID_ABORT << 16; 3177 arcmsr_ccb_complete(ccb); 3178 continue; 3179 } 3180 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3181 " command done ccb = '0x%p'" 3182 "ccboutstandingcount = %d \n" 3183 , acb->host->host_no 3184 , ccb 3185 , atomic_read(&acb->ccboutstandingcount)); 3186 continue; 3187 } 3188 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3189 arcmsr_report_ccb_state(acb, ccb, error); 3190 } 3191 return rtn; 3192 } 3193 3194 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, 3195 struct CommandControlBlock *poll_ccb) 3196 { 3197 struct MessageUnit_B *reg = acb->pmuB; 3198 struct ARCMSR_CDB *arcmsr_cdb; 3199 struct CommandControlBlock *ccb; 3200 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; 3201 int index, rtn; 3202 bool error; 3203 polling_hbb_ccb_retry: 3204 3205 poll_count++; 3206 /* clear doorbell interrupt */ 3207 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3208 while(1){ 3209 index = reg->doneq_index; 3210 flag_ccb = reg->done_qbuffer[index]; 3211 if (flag_ccb == 0) { 3212 if (poll_ccb_done){ 3213 rtn = SUCCESS; 3214 break; 3215 }else { 3216 msleep(25); 3217 if (poll_count > 100){ 3218 rtn = FAILED; 3219 break; 3220 } 3221 goto polling_hbb_ccb_retry; 3222 } 3223 } 3224 reg->done_qbuffer[index] = 0; 3225 index++; 3226 /*if last index number set it to 0 */ 3227 index %= ARCMSR_MAX_HBB_POSTQUEUE; 3228 reg->doneq_index = index; 3229 /* check if command done with no error*/ 3230 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); 3231 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3232 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3233 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3234 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3235 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3236 " poll command abort successfully \n" 3237 ,acb->host->host_no 3238 ,ccb->pcmd->device->id 3239 ,(u32)ccb->pcmd->device->lun 3240 ,ccb); 3241 ccb->pcmd->result = DID_ABORT << 16; 3242 arcmsr_ccb_complete(ccb); 3243 continue; 3244 } 3245 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3246 " command done ccb = '0x%p'" 3247 "ccboutstandingcount = %d \n" 3248 , acb->host->host_no 3249 , ccb 3250 , atomic_read(&acb->ccboutstandingcount)); 3251 continue; 3252 } 3253 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3254 arcmsr_report_ccb_state(acb, ccb, error); 3255 } 3256 return rtn; 3257 } 3258 3259 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, 3260 struct CommandControlBlock *poll_ccb) 3261 { 3262 struct MessageUnit_C __iomem *reg = acb->pmuC; 3263 uint32_t flag_ccb, ccb_cdb_phy; 3264 struct ARCMSR_CDB *arcmsr_cdb; 3265 bool error; 3266 struct CommandControlBlock *pCCB; 3267 uint32_t poll_ccb_done = 0, poll_count = 0; 3268 int rtn; 3269 polling_hbc_ccb_retry: 3270 poll_count++; 3271 while (1) { 3272 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { 3273 if (poll_ccb_done) { 3274 rtn = SUCCESS; 3275 break; 3276 } else { 3277 msleep(25); 3278 if (poll_count > 100) { 3279 rtn = FAILED; 3280 break; 3281 } 3282 goto polling_hbc_ccb_retry; 3283 } 3284 } 3285 flag_ccb = readl(®->outbound_queueport_low); 3286 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3287 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/ 3288 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3289 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3290 /* check ifcommand done with no error*/ 3291 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3292 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3293 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3294 " poll command abort successfully \n" 3295 , acb->host->host_no 3296 , pCCB->pcmd->device->id 3297 , (u32)pCCB->pcmd->device->lun 3298 , pCCB); 3299 pCCB->pcmd->result = DID_ABORT << 16; 3300 arcmsr_ccb_complete(pCCB); 3301 continue; 3302 } 3303 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3304 " command done ccb = '0x%p'" 3305 "ccboutstandingcount = %d \n" 3306 , acb->host->host_no 3307 , pCCB 3308 , atomic_read(&acb->ccboutstandingcount)); 3309 continue; 3310 } 3311 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3312 arcmsr_report_ccb_state(acb, pCCB, error); 3313 } 3314 return rtn; 3315 } 3316 3317 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, 3318 struct CommandControlBlock *poll_ccb) 3319 { 3320 bool error; 3321 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy; 3322 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; 3323 unsigned long flags; 3324 struct ARCMSR_CDB *arcmsr_cdb; 3325 struct CommandControlBlock *pCCB; 3326 struct MessageUnit_D *pmu = acb->pmuD; 3327 3328 polling_hbaD_ccb_retry: 3329 poll_count++; 3330 while (1) { 3331 spin_lock_irqsave(&acb->doneq_lock, flags); 3332 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 3333 doneq_index = pmu->doneq_index; 3334 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { 3335 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3336 if (poll_ccb_done) { 3337 rtn = SUCCESS; 3338 break; 3339 } else { 3340 msleep(25); 3341 if (poll_count > 40) { 3342 rtn = FAILED; 3343 break; 3344 } 3345 goto polling_hbaD_ccb_retry; 3346 } 3347 } 3348 toggle = doneq_index & 0x4000; 3349 index_stripped = (doneq_index & 0xFFF) + 1; 3350 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 3351 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 3352 ((toggle ^ 0x4000) + 1); 3353 doneq_index = pmu->doneq_index; 3354 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3355 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; 3356 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3357 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + 3358 ccb_cdb_phy); 3359 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, 3360 arcmsr_cdb); 3361 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3362 if ((pCCB->acb != acb) || 3363 (pCCB->startdone != ARCMSR_CCB_START)) { 3364 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3365 pr_notice("arcmsr%d: scsi id = %d " 3366 "lun = %d ccb = '0x%p' poll command " 3367 "abort successfully\n" 3368 , acb->host->host_no 3369 , pCCB->pcmd->device->id 3370 , (u32)pCCB->pcmd->device->lun 3371 , pCCB); 3372 pCCB->pcmd->result = DID_ABORT << 16; 3373 arcmsr_ccb_complete(pCCB); 3374 continue; 3375 } 3376 pr_notice("arcmsr%d: polling an illegal " 3377 "ccb command done ccb = '0x%p' " 3378 "ccboutstandingcount = %d\n" 3379 , acb->host->host_no 3380 , pCCB 3381 , atomic_read(&acb->ccboutstandingcount)); 3382 continue; 3383 } 3384 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 3385 ? true : false; 3386 arcmsr_report_ccb_state(acb, pCCB, error); 3387 } 3388 return rtn; 3389 } 3390 3391 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb, 3392 struct CommandControlBlock *poll_ccb) 3393 { 3394 bool error; 3395 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index; 3396 uint16_t cmdSMID; 3397 unsigned long flags; 3398 int rtn; 3399 struct CommandControlBlock *pCCB; 3400 struct MessageUnit_E __iomem *reg = acb->pmuE; 3401 3402 polling_hbaC_ccb_retry: 3403 poll_count++; 3404 while (1) { 3405 spin_lock_irqsave(&acb->doneq_lock, flags); 3406 doneq_index = acb->doneq_index; 3407 if ((readl(®->reply_post_producer_index) & 0xFFFF) == 3408 doneq_index) { 3409 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3410 if (poll_ccb_done) { 3411 rtn = SUCCESS; 3412 break; 3413 } else { 3414 msleep(25); 3415 if (poll_count > 40) { 3416 rtn = FAILED; 3417 break; 3418 } 3419 goto polling_hbaC_ccb_retry; 3420 } 3421 } 3422 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 3423 doneq_index++; 3424 if (doneq_index >= acb->completionQ_entry) 3425 doneq_index = 0; 3426 acb->doneq_index = doneq_index; 3427 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3428 pCCB = acb->pccb_pool[cmdSMID]; 3429 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3430 /* check if command done with no error*/ 3431 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3432 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3433 pr_notice("arcmsr%d: scsi id = %d " 3434 "lun = %d ccb = '0x%p' poll command " 3435 "abort successfully\n" 3436 , acb->host->host_no 3437 , pCCB->pcmd->device->id 3438 , (u32)pCCB->pcmd->device->lun 3439 , pCCB); 3440 pCCB->pcmd->result = DID_ABORT << 16; 3441 arcmsr_ccb_complete(pCCB); 3442 continue; 3443 } 3444 pr_notice("arcmsr%d: polling an illegal " 3445 "ccb command done ccb = '0x%p' " 3446 "ccboutstandingcount = %d\n" 3447 , acb->host->host_no 3448 , pCCB 3449 , atomic_read(&acb->ccboutstandingcount)); 3450 continue; 3451 } 3452 error = (acb->pCompletionQ[doneq_index].cmdFlag & 3453 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3454 arcmsr_report_ccb_state(acb, pCCB, error); 3455 } 3456 writel(doneq_index, ®->reply_post_consumer_index); 3457 return rtn; 3458 } 3459 3460 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 3461 struct CommandControlBlock *poll_ccb) 3462 { 3463 int rtn = 0; 3464 switch (acb->adapter_type) { 3465 3466 case ACB_ADAPTER_TYPE_A: { 3467 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); 3468 } 3469 break; 3470 3471 case ACB_ADAPTER_TYPE_B: { 3472 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); 3473 } 3474 break; 3475 case ACB_ADAPTER_TYPE_C: { 3476 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); 3477 } 3478 break; 3479 case ACB_ADAPTER_TYPE_D: 3480 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); 3481 break; 3482 case ACB_ADAPTER_TYPE_E: 3483 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); 3484 break; 3485 } 3486 return rtn; 3487 } 3488 3489 static void arcmsr_set_iop_datetime(struct timer_list *t) 3490 { 3491 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer); 3492 unsigned int next_time; 3493 struct tm tm; 3494 3495 union { 3496 struct { 3497 uint16_t signature; 3498 uint8_t year; 3499 uint8_t month; 3500 uint8_t date; 3501 uint8_t hour; 3502 uint8_t minute; 3503 uint8_t second; 3504 } a; 3505 struct { 3506 uint32_t msg_time[2]; 3507 } b; 3508 } datetime; 3509 3510 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm); 3511 3512 datetime.a.signature = 0x55AA; 3513 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */ 3514 datetime.a.month = tm.tm_mon; 3515 datetime.a.date = tm.tm_mday; 3516 datetime.a.hour = tm.tm_hour; 3517 datetime.a.minute = tm.tm_min; 3518 datetime.a.second = tm.tm_sec; 3519 3520 switch (pacb->adapter_type) { 3521 case ACB_ADAPTER_TYPE_A: { 3522 struct MessageUnit_A __iomem *reg = pacb->pmuA; 3523 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]); 3524 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]); 3525 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3526 break; 3527 } 3528 case ACB_ADAPTER_TYPE_B: { 3529 uint32_t __iomem *rwbuffer; 3530 struct MessageUnit_B *reg = pacb->pmuB; 3531 rwbuffer = reg->message_rwbuffer; 3532 writel(datetime.b.msg_time[0], rwbuffer++); 3533 writel(datetime.b.msg_time[1], rwbuffer++); 3534 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell); 3535 break; 3536 } 3537 case ACB_ADAPTER_TYPE_C: { 3538 struct MessageUnit_C __iomem *reg = pacb->pmuC; 3539 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3540 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3541 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3542 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3543 break; 3544 } 3545 case ACB_ADAPTER_TYPE_D: { 3546 uint32_t __iomem *rwbuffer; 3547 struct MessageUnit_D *reg = pacb->pmuD; 3548 rwbuffer = reg->msgcode_rwbuffer; 3549 writel(datetime.b.msg_time[0], rwbuffer++); 3550 writel(datetime.b.msg_time[1], rwbuffer++); 3551 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0); 3552 break; 3553 } 3554 case ACB_ADAPTER_TYPE_E: { 3555 struct MessageUnit_E __iomem *reg = pacb->pmuE; 3556 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3557 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3558 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3559 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3560 writel(pacb->out_doorbell, ®->iobound_doorbell); 3561 break; 3562 } 3563 } 3564 if (sys_tz.tz_minuteswest) 3565 next_time = ARCMSR_HOURS; 3566 else 3567 next_time = ARCMSR_MINUTES; 3568 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time)); 3569 } 3570 3571 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3572 { 3573 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 3574 dma_addr_t dma_coherent_handle; 3575 3576 /* 3577 ******************************************************************** 3578 ** here we need to tell iop 331 our freeccb.HighPart 3579 ** if freeccb.HighPart is not zero 3580 ******************************************************************** 3581 */ 3582 switch (acb->adapter_type) { 3583 case ACB_ADAPTER_TYPE_B: 3584 case ACB_ADAPTER_TYPE_D: 3585 dma_coherent_handle = acb->dma_coherent_handle2; 3586 break; 3587 case ACB_ADAPTER_TYPE_E: 3588 dma_coherent_handle = acb->dma_coherent_handle + 3589 offsetof(struct CommandControlBlock, arcmsr_cdb); 3590 break; 3591 default: 3592 dma_coherent_handle = acb->dma_coherent_handle; 3593 break; 3594 } 3595 cdb_phyaddr = lower_32_bits(dma_coherent_handle); 3596 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); 3597 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; 3598 /* 3599 *********************************************************************** 3600 ** if adapter type B, set window of "post command Q" 3601 *********************************************************************** 3602 */ 3603 switch (acb->adapter_type) { 3604 3605 case ACB_ADAPTER_TYPE_A: { 3606 if (cdb_phyaddr_hi32 != 0) { 3607 struct MessageUnit_A __iomem *reg = acb->pmuA; 3608 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 3609 ®->message_rwbuffer[0]); 3610 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); 3611 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 3612 ®->inbound_msgaddr0); 3613 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3614 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 3615 part physical address timeout\n", 3616 acb->host->host_no); 3617 return 1; 3618 } 3619 } 3620 } 3621 break; 3622 3623 case ACB_ADAPTER_TYPE_B: { 3624 uint32_t __iomem *rwbuffer; 3625 3626 struct MessageUnit_B *reg = acb->pmuB; 3627 reg->postq_index = 0; 3628 reg->doneq_index = 0; 3629 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); 3630 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3631 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \ 3632 acb->host->host_no); 3633 return 1; 3634 } 3635 rwbuffer = reg->message_rwbuffer; 3636 /* driver "set config" signature */ 3637 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 3638 /* normal should be zero */ 3639 writel(cdb_phyaddr_hi32, rwbuffer++); 3640 /* postQ size (256 + 8)*4 */ 3641 writel(cdb_phyaddr, rwbuffer++); 3642 /* doneQ size (256 + 8)*4 */ 3643 writel(cdb_phyaddr + 1056, rwbuffer++); 3644 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 3645 writel(1056, rwbuffer); 3646 3647 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); 3648 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3649 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 3650 timeout \n",acb->host->host_no); 3651 return 1; 3652 } 3653 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 3654 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3655 pr_err("arcmsr%d: can't set driver mode.\n", 3656 acb->host->host_no); 3657 return 1; 3658 } 3659 } 3660 break; 3661 case ACB_ADAPTER_TYPE_C: { 3662 if (cdb_phyaddr_hi32 != 0) { 3663 struct MessageUnit_C __iomem *reg = acb->pmuC; 3664 3665 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", 3666 acb->adapter_index, cdb_phyaddr_hi32); 3667 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 3668 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); 3669 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 3670 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3671 if (!arcmsr_hbaC_wait_msgint_ready(acb)) { 3672 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 3673 timeout \n", acb->host->host_no); 3674 return 1; 3675 } 3676 } 3677 } 3678 break; 3679 case ACB_ADAPTER_TYPE_D: { 3680 uint32_t __iomem *rwbuffer; 3681 struct MessageUnit_D *reg = acb->pmuD; 3682 reg->postq_index = 0; 3683 reg->doneq_index = 0; 3684 rwbuffer = reg->msgcode_rwbuffer; 3685 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 3686 writel(cdb_phyaddr_hi32, rwbuffer++); 3687 writel(cdb_phyaddr, rwbuffer++); 3688 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * 3689 sizeof(struct InBound_SRB)), rwbuffer++); 3690 writel(0x100, rwbuffer); 3691 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); 3692 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 3693 pr_notice("arcmsr%d: 'set command Q window' timeout\n", 3694 acb->host->host_no); 3695 return 1; 3696 } 3697 } 3698 break; 3699 case ACB_ADAPTER_TYPE_E: { 3700 struct MessageUnit_E __iomem *reg = acb->pmuE; 3701 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 3702 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]); 3703 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]); 3704 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]); 3705 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]); 3706 dma_coherent_handle = acb->dma_coherent_handle2; 3707 cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff); 3708 cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16); 3709 writel(cdb_phyaddr, ®->msgcode_rwbuffer[5]); 3710 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[6]); 3711 writel(acb->roundup_ccbsize, ®->msgcode_rwbuffer[7]); 3712 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 3713 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3714 writel(acb->out_doorbell, ®->iobound_doorbell); 3715 if (!arcmsr_hbaE_wait_msgint_ready(acb)) { 3716 pr_notice("arcmsr%d: 'set command Q window' timeout \n", 3717 acb->host->host_no); 3718 return 1; 3719 } 3720 } 3721 break; 3722 } 3723 return 0; 3724 } 3725 3726 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) 3727 { 3728 uint32_t firmware_state = 0; 3729 switch (acb->adapter_type) { 3730 3731 case ACB_ADAPTER_TYPE_A: { 3732 struct MessageUnit_A __iomem *reg = acb->pmuA; 3733 do { 3734 firmware_state = readl(®->outbound_msgaddr1); 3735 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); 3736 } 3737 break; 3738 3739 case ACB_ADAPTER_TYPE_B: { 3740 struct MessageUnit_B *reg = acb->pmuB; 3741 do { 3742 firmware_state = readl(reg->iop2drv_doorbell); 3743 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 3744 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 3745 } 3746 break; 3747 case ACB_ADAPTER_TYPE_C: { 3748 struct MessageUnit_C __iomem *reg = acb->pmuC; 3749 do { 3750 firmware_state = readl(®->outbound_msgaddr1); 3751 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); 3752 } 3753 break; 3754 case ACB_ADAPTER_TYPE_D: { 3755 struct MessageUnit_D *reg = acb->pmuD; 3756 do { 3757 firmware_state = readl(reg->outbound_msgaddr1); 3758 } while ((firmware_state & 3759 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); 3760 } 3761 break; 3762 case ACB_ADAPTER_TYPE_E: { 3763 struct MessageUnit_E __iomem *reg = acb->pmuE; 3764 do { 3765 firmware_state = readl(®->outbound_msgaddr1); 3766 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0); 3767 } 3768 break; 3769 } 3770 } 3771 3772 static void arcmsr_request_device_map(struct timer_list *t) 3773 { 3774 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); 3775 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || 3776 (acb->acb_flags & ACB_F_BUS_RESET) || 3777 (acb->acb_flags & ACB_F_ABORT)) { 3778 mod_timer(&acb->eternal_timer, 3779 jiffies + msecs_to_jiffies(6 * HZ)); 3780 } else { 3781 acb->fw_flag = FW_NORMAL; 3782 if (atomic_read(&acb->ante_token_value) == 3783 atomic_read(&acb->rq_map_token)) { 3784 atomic_set(&acb->rq_map_token, 16); 3785 } 3786 atomic_set(&acb->ante_token_value, 3787 atomic_read(&acb->rq_map_token)); 3788 if (atomic_dec_and_test(&acb->rq_map_token)) { 3789 mod_timer(&acb->eternal_timer, jiffies + 3790 msecs_to_jiffies(6 * HZ)); 3791 return; 3792 } 3793 switch (acb->adapter_type) { 3794 case ACB_ADAPTER_TYPE_A: { 3795 struct MessageUnit_A __iomem *reg = acb->pmuA; 3796 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3797 break; 3798 } 3799 case ACB_ADAPTER_TYPE_B: { 3800 struct MessageUnit_B *reg = acb->pmuB; 3801 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 3802 break; 3803 } 3804 case ACB_ADAPTER_TYPE_C: { 3805 struct MessageUnit_C __iomem *reg = acb->pmuC; 3806 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3807 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3808 break; 3809 } 3810 case ACB_ADAPTER_TYPE_D: { 3811 struct MessageUnit_D *reg = acb->pmuD; 3812 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 3813 break; 3814 } 3815 case ACB_ADAPTER_TYPE_E: { 3816 struct MessageUnit_E __iomem *reg = acb->pmuE; 3817 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3818 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3819 writel(acb->out_doorbell, ®->iobound_doorbell); 3820 break; 3821 } 3822 default: 3823 return; 3824 } 3825 acb->acb_flags |= ACB_F_MSG_GET_CONFIG; 3826 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 3827 } 3828 } 3829 3830 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) 3831 { 3832 struct MessageUnit_A __iomem *reg = acb->pmuA; 3833 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3834 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); 3835 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3836 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3837 rebulid' timeout \n", acb->host->host_no); 3838 } 3839 } 3840 3841 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) 3842 { 3843 struct MessageUnit_B *reg = acb->pmuB; 3844 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3845 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); 3846 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3847 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3848 rebulid' timeout \n",acb->host->host_no); 3849 } 3850 } 3851 3852 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) 3853 { 3854 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 3855 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 3856 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); 3857 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); 3858 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 3859 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3860 rebulid' timeout \n", pACB->host->host_no); 3861 } 3862 return; 3863 } 3864 3865 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) 3866 { 3867 struct MessageUnit_D *pmu = pACB->pmuD; 3868 3869 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 3870 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); 3871 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 3872 pr_notice("arcmsr%d: wait 'start adapter " 3873 "background rebulid' timeout\n", pACB->host->host_no); 3874 } 3875 } 3876 3877 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB) 3878 { 3879 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 3880 3881 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 3882 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0); 3883 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3884 writel(pACB->out_doorbell, &pmu->iobound_doorbell); 3885 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3886 pr_notice("arcmsr%d: wait 'start adapter " 3887 "background rebulid' timeout \n", pACB->host->host_no); 3888 } 3889 } 3890 3891 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 3892 { 3893 switch (acb->adapter_type) { 3894 case ACB_ADAPTER_TYPE_A: 3895 arcmsr_hbaA_start_bgrb(acb); 3896 break; 3897 case ACB_ADAPTER_TYPE_B: 3898 arcmsr_hbaB_start_bgrb(acb); 3899 break; 3900 case ACB_ADAPTER_TYPE_C: 3901 arcmsr_hbaC_start_bgrb(acb); 3902 break; 3903 case ACB_ADAPTER_TYPE_D: 3904 arcmsr_hbaD_start_bgrb(acb); 3905 break; 3906 case ACB_ADAPTER_TYPE_E: 3907 arcmsr_hbaE_start_bgrb(acb); 3908 break; 3909 } 3910 } 3911 3912 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) 3913 { 3914 switch (acb->adapter_type) { 3915 case ACB_ADAPTER_TYPE_A: { 3916 struct MessageUnit_A __iomem *reg = acb->pmuA; 3917 uint32_t outbound_doorbell; 3918 /* empty doorbell Qbuffer if door bell ringed */ 3919 outbound_doorbell = readl(®->outbound_doorbell); 3920 /*clear doorbell interrupt */ 3921 writel(outbound_doorbell, ®->outbound_doorbell); 3922 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 3923 } 3924 break; 3925 3926 case ACB_ADAPTER_TYPE_B: { 3927 struct MessageUnit_B *reg = acb->pmuB; 3928 uint32_t outbound_doorbell, i; 3929 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3930 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 3931 /* let IOP know data has been read */ 3932 for(i=0; i < 200; i++) { 3933 msleep(20); 3934 outbound_doorbell = readl(reg->iop2drv_doorbell); 3935 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 3936 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3937 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 3938 } else 3939 break; 3940 } 3941 } 3942 break; 3943 case ACB_ADAPTER_TYPE_C: { 3944 struct MessageUnit_C __iomem *reg = acb->pmuC; 3945 uint32_t outbound_doorbell, i; 3946 /* empty doorbell Qbuffer if door bell ringed */ 3947 outbound_doorbell = readl(®->outbound_doorbell); 3948 writel(outbound_doorbell, ®->outbound_doorbell_clear); 3949 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 3950 for (i = 0; i < 200; i++) { 3951 msleep(20); 3952 outbound_doorbell = readl(®->outbound_doorbell); 3953 if (outbound_doorbell & 3954 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 3955 writel(outbound_doorbell, 3956 ®->outbound_doorbell_clear); 3957 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, 3958 ®->inbound_doorbell); 3959 } else 3960 break; 3961 } 3962 } 3963 break; 3964 case ACB_ADAPTER_TYPE_D: { 3965 struct MessageUnit_D *reg = acb->pmuD; 3966 uint32_t outbound_doorbell, i; 3967 /* empty doorbell Qbuffer if door bell ringed */ 3968 outbound_doorbell = readl(reg->outbound_doorbell); 3969 writel(outbound_doorbell, reg->outbound_doorbell); 3970 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 3971 reg->inbound_doorbell); 3972 for (i = 0; i < 200; i++) { 3973 msleep(20); 3974 outbound_doorbell = readl(reg->outbound_doorbell); 3975 if (outbound_doorbell & 3976 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { 3977 writel(outbound_doorbell, 3978 reg->outbound_doorbell); 3979 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 3980 reg->inbound_doorbell); 3981 } else 3982 break; 3983 } 3984 } 3985 break; 3986 case ACB_ADAPTER_TYPE_E: { 3987 struct MessageUnit_E __iomem *reg = acb->pmuE; 3988 uint32_t i, tmp; 3989 3990 acb->in_doorbell = readl(®->iobound_doorbell); 3991 writel(0, ®->host_int_status); /*clear interrupt*/ 3992 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 3993 writel(acb->out_doorbell, ®->iobound_doorbell); 3994 for(i=0; i < 200; i++) { 3995 msleep(20); 3996 tmp = acb->in_doorbell; 3997 acb->in_doorbell = readl(®->iobound_doorbell); 3998 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 3999 writel(0, ®->host_int_status); /*clear interrupt*/ 4000 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4001 writel(acb->out_doorbell, ®->iobound_doorbell); 4002 } else 4003 break; 4004 } 4005 } 4006 break; 4007 } 4008 } 4009 4010 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 4011 { 4012 switch (acb->adapter_type) { 4013 case ACB_ADAPTER_TYPE_A: 4014 return; 4015 case ACB_ADAPTER_TYPE_B: 4016 { 4017 struct MessageUnit_B *reg = acb->pmuB; 4018 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); 4019 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4020 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 4021 return; 4022 } 4023 } 4024 break; 4025 case ACB_ADAPTER_TYPE_C: 4026 return; 4027 } 4028 return; 4029 } 4030 4031 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) 4032 { 4033 uint8_t value[64]; 4034 int i, count = 0; 4035 struct MessageUnit_A __iomem *pmuA = acb->pmuA; 4036 struct MessageUnit_C __iomem *pmuC = acb->pmuC; 4037 struct MessageUnit_D *pmuD = acb->pmuD; 4038 4039 /* backup pci config data */ 4040 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); 4041 for (i = 0; i < 64; i++) { 4042 pci_read_config_byte(acb->pdev, i, &value[i]); 4043 } 4044 /* hardware reset signal */ 4045 if ((acb->dev_id == 0x1680)) { 4046 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); 4047 } else if ((acb->dev_id == 0x1880)) { 4048 do { 4049 count++; 4050 writel(0xF, &pmuC->write_sequence); 4051 writel(0x4, &pmuC->write_sequence); 4052 writel(0xB, &pmuC->write_sequence); 4053 writel(0x2, &pmuC->write_sequence); 4054 writel(0x7, &pmuC->write_sequence); 4055 writel(0xD, &pmuC->write_sequence); 4056 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 4057 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 4058 } else if (acb->dev_id == 0x1884) { 4059 struct MessageUnit_E __iomem *pmuE = acb->pmuE; 4060 do { 4061 count++; 4062 writel(0x4, &pmuE->write_sequence_3xxx); 4063 writel(0xB, &pmuE->write_sequence_3xxx); 4064 writel(0x2, &pmuE->write_sequence_3xxx); 4065 writel(0x7, &pmuE->write_sequence_3xxx); 4066 writel(0xD, &pmuE->write_sequence_3xxx); 4067 mdelay(10); 4068 } while (((readl(&pmuE->host_diagnostic_3xxx) & 4069 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); 4070 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); 4071 } else if ((acb->dev_id == 0x1214)) { 4072 writel(0x20, pmuD->reset_request); 4073 } else { 4074 pci_write_config_byte(acb->pdev, 0x84, 0x20); 4075 } 4076 msleep(2000); 4077 /* write back pci config data */ 4078 for (i = 0; i < 64; i++) { 4079 pci_write_config_byte(acb->pdev, i, value[i]); 4080 } 4081 msleep(1000); 4082 return; 4083 } 4084 4085 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) 4086 { 4087 bool rtn = true; 4088 4089 switch(acb->adapter_type) { 4090 case ACB_ADAPTER_TYPE_A:{ 4091 struct MessageUnit_A __iomem *reg = acb->pmuA; 4092 rtn = ((readl(®->outbound_msgaddr1) & 4093 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false; 4094 } 4095 break; 4096 case ACB_ADAPTER_TYPE_B:{ 4097 struct MessageUnit_B *reg = acb->pmuB; 4098 rtn = ((readl(reg->iop2drv_doorbell) & 4099 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false; 4100 } 4101 break; 4102 case ACB_ADAPTER_TYPE_C:{ 4103 struct MessageUnit_C __iomem *reg = acb->pmuC; 4104 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false; 4105 } 4106 break; 4107 case ACB_ADAPTER_TYPE_D:{ 4108 struct MessageUnit_D *reg = acb->pmuD; 4109 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ? 4110 true : false; 4111 } 4112 break; 4113 case ACB_ADAPTER_TYPE_E:{ 4114 struct MessageUnit_E __iomem *reg = acb->pmuE; 4115 rtn = (readl(®->host_diagnostic_3xxx) & 4116 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; 4117 } 4118 break; 4119 } 4120 return rtn; 4121 } 4122 4123 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 4124 { 4125 uint32_t intmask_org; 4126 /* disable all outbound interrupt */ 4127 intmask_org = arcmsr_disable_outbound_ints(acb); 4128 arcmsr_wait_firmware_ready(acb); 4129 arcmsr_iop_confirm(acb); 4130 /*start background rebuild*/ 4131 arcmsr_start_adapter_bgrb(acb); 4132 /* empty doorbell Qbuffer if door bell ringed */ 4133 arcmsr_clear_doorbell_queue_buffer(acb); 4134 arcmsr_enable_eoi_mode(acb); 4135 /* enable outbound Post Queue,outbound doorbell Interrupt */ 4136 arcmsr_enable_outbound_ints(acb, intmask_org); 4137 acb->acb_flags |= ACB_F_IOP_INITED; 4138 } 4139 4140 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) 4141 { 4142 struct CommandControlBlock *ccb; 4143 uint32_t intmask_org; 4144 uint8_t rtnval = 0x00; 4145 int i = 0; 4146 unsigned long flags; 4147 4148 if (atomic_read(&acb->ccboutstandingcount) != 0) { 4149 /* disable all outbound interrupt */ 4150 intmask_org = arcmsr_disable_outbound_ints(acb); 4151 /* talk to iop 331 outstanding command aborted */ 4152 rtnval = arcmsr_abort_allcmd(acb); 4153 /* clear all outbound posted Q */ 4154 arcmsr_done4abort_postqueue(acb); 4155 for (i = 0; i < acb->maxFreeCCB; i++) { 4156 ccb = acb->pccb_pool[i]; 4157 if (ccb->startdone == ARCMSR_CCB_START) { 4158 scsi_dma_unmap(ccb->pcmd); 4159 ccb->startdone = ARCMSR_CCB_DONE; 4160 ccb->ccb_flags = 0; 4161 spin_lock_irqsave(&acb->ccblist_lock, flags); 4162 list_add_tail(&ccb->list, &acb->ccb_free_list); 4163 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 4164 } 4165 } 4166 atomic_set(&acb->ccboutstandingcount, 0); 4167 /* enable all outbound interrupt */ 4168 arcmsr_enable_outbound_ints(acb, intmask_org); 4169 return rtnval; 4170 } 4171 return rtnval; 4172 } 4173 4174 static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 4175 { 4176 struct AdapterControlBlock *acb; 4177 int retry_count = 0; 4178 int rtn = FAILED; 4179 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; 4180 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d," 4181 " num_aborts = %d \n", acb->num_resets, acb->num_aborts); 4182 acb->num_resets++; 4183 4184 if (acb->acb_flags & ACB_F_BUS_RESET) { 4185 long timeout; 4186 pr_notice("arcmsr: there is a bus reset eh proceeding...\n"); 4187 timeout = wait_event_timeout(wait_q, (acb->acb_flags 4188 & ACB_F_BUS_RESET) == 0, 220 * HZ); 4189 if (timeout) 4190 return SUCCESS; 4191 } 4192 acb->acb_flags |= ACB_F_BUS_RESET; 4193 if (!arcmsr_iop_reset(acb)) { 4194 arcmsr_hardware_reset(acb); 4195 acb->acb_flags &= ~ACB_F_IOP_INITED; 4196 wait_reset_done: 4197 ssleep(ARCMSR_SLEEPTIME); 4198 if (arcmsr_reset_in_progress(acb)) { 4199 if (retry_count > ARCMSR_RETRYCOUNT) { 4200 acb->fw_flag = FW_DEADLOCK; 4201 pr_notice("arcmsr%d: waiting for hw bus reset" 4202 " return, RETRY TERMINATED!!\n", 4203 acb->host->host_no); 4204 return FAILED; 4205 } 4206 retry_count++; 4207 goto wait_reset_done; 4208 } 4209 arcmsr_iop_init(acb); 4210 atomic_set(&acb->rq_map_token, 16); 4211 atomic_set(&acb->ante_token_value, 16); 4212 acb->fw_flag = FW_NORMAL; 4213 mod_timer(&acb->eternal_timer, jiffies + 4214 msecs_to_jiffies(6 * HZ)); 4215 acb->acb_flags &= ~ACB_F_BUS_RESET; 4216 rtn = SUCCESS; 4217 pr_notice("arcmsr: scsi bus reset eh returns with success\n"); 4218 } else { 4219 acb->acb_flags &= ~ACB_F_BUS_RESET; 4220 atomic_set(&acb->rq_map_token, 16); 4221 atomic_set(&acb->ante_token_value, 16); 4222 acb->fw_flag = FW_NORMAL; 4223 mod_timer(&acb->eternal_timer, jiffies + 4224 msecs_to_jiffies(6 * HZ)); 4225 rtn = SUCCESS; 4226 } 4227 return rtn; 4228 } 4229 4230 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, 4231 struct CommandControlBlock *ccb) 4232 { 4233 int rtn; 4234 rtn = arcmsr_polling_ccbdone(acb, ccb); 4235 return rtn; 4236 } 4237 4238 static int arcmsr_abort(struct scsi_cmnd *cmd) 4239 { 4240 struct AdapterControlBlock *acb = 4241 (struct AdapterControlBlock *)cmd->device->host->hostdata; 4242 int i = 0; 4243 int rtn = FAILED; 4244 uint32_t intmask_org; 4245 4246 printk(KERN_NOTICE 4247 "arcmsr%d: abort device command of scsi id = %d lun = %d\n", 4248 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); 4249 acb->acb_flags |= ACB_F_ABORT; 4250 acb->num_aborts++; 4251 /* 4252 ************************************************ 4253 ** the all interrupt service routine is locked 4254 ** we need to handle it as soon as possible and exit 4255 ************************************************ 4256 */ 4257 if (!atomic_read(&acb->ccboutstandingcount)) { 4258 acb->acb_flags &= ~ACB_F_ABORT; 4259 return rtn; 4260 } 4261 4262 intmask_org = arcmsr_disable_outbound_ints(acb); 4263 for (i = 0; i < acb->maxFreeCCB; i++) { 4264 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 4265 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 4266 ccb->startdone = ARCMSR_CCB_ABORTED; 4267 rtn = arcmsr_abort_one_cmd(acb, ccb); 4268 break; 4269 } 4270 } 4271 acb->acb_flags &= ~ACB_F_ABORT; 4272 arcmsr_enable_outbound_ints(acb, intmask_org); 4273 return rtn; 4274 } 4275 4276 static const char *arcmsr_info(struct Scsi_Host *host) 4277 { 4278 struct AdapterControlBlock *acb = 4279 (struct AdapterControlBlock *) host->hostdata; 4280 static char buf[256]; 4281 char *type; 4282 int raid6 = 1; 4283 switch (acb->pdev->device) { 4284 case PCI_DEVICE_ID_ARECA_1110: 4285 case PCI_DEVICE_ID_ARECA_1200: 4286 case PCI_DEVICE_ID_ARECA_1202: 4287 case PCI_DEVICE_ID_ARECA_1210: 4288 raid6 = 0; 4289 /*FALLTHRU*/ 4290 case PCI_DEVICE_ID_ARECA_1120: 4291 case PCI_DEVICE_ID_ARECA_1130: 4292 case PCI_DEVICE_ID_ARECA_1160: 4293 case PCI_DEVICE_ID_ARECA_1170: 4294 case PCI_DEVICE_ID_ARECA_1201: 4295 case PCI_DEVICE_ID_ARECA_1203: 4296 case PCI_DEVICE_ID_ARECA_1220: 4297 case PCI_DEVICE_ID_ARECA_1230: 4298 case PCI_DEVICE_ID_ARECA_1260: 4299 case PCI_DEVICE_ID_ARECA_1270: 4300 case PCI_DEVICE_ID_ARECA_1280: 4301 type = "SATA"; 4302 break; 4303 case PCI_DEVICE_ID_ARECA_1214: 4304 case PCI_DEVICE_ID_ARECA_1380: 4305 case PCI_DEVICE_ID_ARECA_1381: 4306 case PCI_DEVICE_ID_ARECA_1680: 4307 case PCI_DEVICE_ID_ARECA_1681: 4308 case PCI_DEVICE_ID_ARECA_1880: 4309 case PCI_DEVICE_ID_ARECA_1884: 4310 type = "SAS/SATA"; 4311 break; 4312 default: 4313 type = "unknown"; 4314 raid6 = 0; 4315 break; 4316 } 4317 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", 4318 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); 4319 return buf; 4320 } 4321