1 /* 2 ******************************************************************************* 3 ** O.S : Linux 4 ** FILE NAME : arcmsr_hba.c 5 ** BY : Nick Cheng, C.L. Huang 6 ** Description: SCSI RAID Device Driver for Areca RAID Controller 7 ******************************************************************************* 8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved 9 ** 10 ** Web site: www.areca.com.tw 11 ** E-mail: support@areca.com.tw 12 ** 13 ** This program is free software; you can redistribute it and/or modify 14 ** it under the terms of the GNU General Public License version 2 as 15 ** published by the Free Software Foundation. 16 ** This program is distributed in the hope that it will be useful, 17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 ** GNU General Public License for more details. 20 ******************************************************************************* 21 ** Redistribution and use in source and binary forms, with or without 22 ** modification, are permitted provided that the following conditions 23 ** are met: 24 ** 1. Redistributions of source code must retain the above copyright 25 ** notice, this list of conditions and the following disclaimer. 26 ** 2. Redistributions in binary form must reproduce the above copyright 27 ** notice, this list of conditions and the following disclaimer in the 28 ** documentation and/or other materials provided with the distribution. 29 ** 3. The name of the author may not be used to endorse or promote products 30 ** derived from this software without specific prior written permission. 31 ** 32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT 37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 ******************************************************************************* 43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr 44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst 45 ******************************************************************************* 46 */ 47 #include <linux/module.h> 48 #include <linux/reboot.h> 49 #include <linux/spinlock.h> 50 #include <linux/pci_ids.h> 51 #include <linux/interrupt.h> 52 #include <linux/moduleparam.h> 53 #include <linux/errno.h> 54 #include <linux/types.h> 55 #include <linux/delay.h> 56 #include <linux/dma-mapping.h> 57 #include <linux/timer.h> 58 #include <linux/slab.h> 59 #include <linux/pci.h> 60 #include <linux/aer.h> 61 #include <linux/circ_buf.h> 62 #include <asm/dma.h> 63 #include <asm/io.h> 64 #include <linux/uaccess.h> 65 #include <scsi/scsi_host.h> 66 #include <scsi/scsi.h> 67 #include <scsi/scsi_cmnd.h> 68 #include <scsi/scsi_tcq.h> 69 #include <scsi/scsi_device.h> 70 #include <scsi/scsi_transport.h> 71 #include <scsi/scsicam.h> 72 #include "arcmsr.h" 73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>"); 74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver"); 75 MODULE_LICENSE("Dual BSD/GPL"); 76 MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77 78 static int msix_enable = 1; 79 module_param(msix_enable, int, S_IRUGO); 80 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)"); 81 82 static int msi_enable = 1; 83 module_param(msi_enable, int, S_IRUGO); 84 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)"); 85 86 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 87 module_param(host_can_queue, int, S_IRUGO); 88 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128"); 89 90 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 91 module_param(cmd_per_lun, int, S_IRUGO); 92 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); 93 94 static int dma_mask_64 = 0; 95 module_param(dma_mask_64, int, S_IRUGO); 96 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)"); 97 98 static int set_date_time = 0; 99 module_param(set_date_time, int, S_IRUGO); 100 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); 101 102 #define ARCMSR_SLEEPTIME 10 103 #define ARCMSR_RETRYCOUNT 12 104 105 static wait_queue_head_t wait_q; 106 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 107 struct scsi_cmnd *cmd); 108 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 109 static int arcmsr_abort(struct scsi_cmnd *); 110 static int arcmsr_bus_reset(struct scsi_cmnd *); 111 static int arcmsr_bios_param(struct scsi_device *sdev, 112 struct block_device *bdev, sector_t capacity, int *info); 113 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 114 static int arcmsr_probe(struct pci_dev *pdev, 115 const struct pci_device_id *id); 116 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state); 117 static int arcmsr_resume(struct pci_dev *pdev); 118 static void arcmsr_remove(struct pci_dev *pdev); 119 static void arcmsr_shutdown(struct pci_dev *pdev); 120 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 121 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); 122 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); 123 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 124 u32 intmask_org); 125 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 126 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); 127 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); 128 static void arcmsr_request_device_map(struct timer_list *t); 129 static void arcmsr_message_isr_bh_fn(struct work_struct *work); 130 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); 131 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 132 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); 133 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); 134 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); 135 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); 136 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb); 137 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); 138 static const char *arcmsr_info(struct Scsi_Host *); 139 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 140 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 141 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); 142 static void arcmsr_set_iop_datetime(struct timer_list *); 143 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) 144 { 145 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 146 queue_depth = ARCMSR_MAX_CMD_PERLUN; 147 return scsi_change_queue_depth(sdev, queue_depth); 148 } 149 150 static struct scsi_host_template arcmsr_scsi_host_template = { 151 .module = THIS_MODULE, 152 .name = "Areca SAS/SATA RAID driver", 153 .info = arcmsr_info, 154 .queuecommand = arcmsr_queue_command, 155 .eh_abort_handler = arcmsr_abort, 156 .eh_bus_reset_handler = arcmsr_bus_reset, 157 .bios_param = arcmsr_bios_param, 158 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 159 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD, 160 .this_id = ARCMSR_SCSI_INITIATOR_ID, 161 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 162 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 163 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, 164 .shost_attrs = arcmsr_host_attrs, 165 .no_write_same = 1, 166 }; 167 168 static struct pci_device_id arcmsr_device_id_table[] = { 169 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110), 170 .driver_data = ACB_ADAPTER_TYPE_A}, 171 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120), 172 .driver_data = ACB_ADAPTER_TYPE_A}, 173 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130), 174 .driver_data = ACB_ADAPTER_TYPE_A}, 175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160), 176 .driver_data = ACB_ADAPTER_TYPE_A}, 177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170), 178 .driver_data = ACB_ADAPTER_TYPE_A}, 179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200), 180 .driver_data = ACB_ADAPTER_TYPE_B}, 181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201), 182 .driver_data = ACB_ADAPTER_TYPE_B}, 183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202), 184 .driver_data = ACB_ADAPTER_TYPE_B}, 185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203), 186 .driver_data = ACB_ADAPTER_TYPE_B}, 187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210), 188 .driver_data = ACB_ADAPTER_TYPE_A}, 189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214), 190 .driver_data = ACB_ADAPTER_TYPE_D}, 191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220), 192 .driver_data = ACB_ADAPTER_TYPE_A}, 193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230), 194 .driver_data = ACB_ADAPTER_TYPE_A}, 195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260), 196 .driver_data = ACB_ADAPTER_TYPE_A}, 197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270), 198 .driver_data = ACB_ADAPTER_TYPE_A}, 199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280), 200 .driver_data = ACB_ADAPTER_TYPE_A}, 201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380), 202 .driver_data = ACB_ADAPTER_TYPE_A}, 203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381), 204 .driver_data = ACB_ADAPTER_TYPE_A}, 205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680), 206 .driver_data = ACB_ADAPTER_TYPE_A}, 207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681), 208 .driver_data = ACB_ADAPTER_TYPE_A}, 209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), 210 .driver_data = ACB_ADAPTER_TYPE_C}, 211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), 212 .driver_data = ACB_ADAPTER_TYPE_E}, 213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886), 214 .driver_data = ACB_ADAPTER_TYPE_F}, 215 {0, 0}, /* Terminating entry */ 216 }; 217 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 218 219 static struct pci_driver arcmsr_pci_driver = { 220 .name = "arcmsr", 221 .id_table = arcmsr_device_id_table, 222 .probe = arcmsr_probe, 223 .remove = arcmsr_remove, 224 .suspend = arcmsr_suspend, 225 .resume = arcmsr_resume, 226 .shutdown = arcmsr_shutdown, 227 }; 228 /* 229 **************************************************************************** 230 **************************************************************************** 231 */ 232 233 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) 234 { 235 switch (acb->adapter_type) { 236 case ACB_ADAPTER_TYPE_B: 237 case ACB_ADAPTER_TYPE_D: 238 case ACB_ADAPTER_TYPE_E: 239 case ACB_ADAPTER_TYPE_F: 240 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, 241 acb->dma_coherent2, acb->dma_coherent_handle2); 242 break; 243 } 244 } 245 246 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) 247 { 248 struct pci_dev *pdev = acb->pdev; 249 switch (acb->adapter_type){ 250 case ACB_ADAPTER_TYPE_A:{ 251 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); 252 if (!acb->pmuA) { 253 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 254 return false; 255 } 256 break; 257 } 258 case ACB_ADAPTER_TYPE_B:{ 259 void __iomem *mem_base0, *mem_base1; 260 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 261 if (!mem_base0) { 262 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 263 return false; 264 } 265 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); 266 if (!mem_base1) { 267 iounmap(mem_base0); 268 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 269 return false; 270 } 271 acb->mem_base0 = mem_base0; 272 acb->mem_base1 = mem_base1; 273 break; 274 } 275 case ACB_ADAPTER_TYPE_C:{ 276 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); 277 if (!acb->pmuC) { 278 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); 279 return false; 280 } 281 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 282 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ 283 return true; 284 } 285 break; 286 } 287 case ACB_ADAPTER_TYPE_D: { 288 void __iomem *mem_base0; 289 unsigned long addr, range; 290 291 addr = (unsigned long)pci_resource_start(pdev, 0); 292 range = pci_resource_len(pdev, 0); 293 mem_base0 = ioremap(addr, range); 294 if (!mem_base0) { 295 pr_notice("arcmsr%d: memory mapping region fail\n", 296 acb->host->host_no); 297 return false; 298 } 299 acb->mem_base0 = mem_base0; 300 break; 301 } 302 case ACB_ADAPTER_TYPE_E: { 303 acb->pmuE = ioremap(pci_resource_start(pdev, 1), 304 pci_resource_len(pdev, 1)); 305 if (!acb->pmuE) { 306 pr_notice("arcmsr%d: memory mapping region fail \n", 307 acb->host->host_no); 308 return false; 309 } 310 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/ 311 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */ 312 acb->in_doorbell = 0; 313 acb->out_doorbell = 0; 314 break; 315 } 316 case ACB_ADAPTER_TYPE_F: { 317 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 318 if (!acb->pmuF) { 319 pr_notice("arcmsr%d: memory mapping region fail\n", 320 acb->host->host_no); 321 return false; 322 } 323 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */ 324 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); 325 acb->in_doorbell = 0; 326 acb->out_doorbell = 0; 327 break; 328 } 329 } 330 return true; 331 } 332 333 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) 334 { 335 switch (acb->adapter_type) { 336 case ACB_ADAPTER_TYPE_A: 337 iounmap(acb->pmuA); 338 break; 339 case ACB_ADAPTER_TYPE_B: 340 iounmap(acb->mem_base0); 341 iounmap(acb->mem_base1); 342 break; 343 case ACB_ADAPTER_TYPE_C: 344 iounmap(acb->pmuC); 345 break; 346 case ACB_ADAPTER_TYPE_D: 347 iounmap(acb->mem_base0); 348 break; 349 case ACB_ADAPTER_TYPE_E: 350 iounmap(acb->pmuE); 351 break; 352 case ACB_ADAPTER_TYPE_F: 353 iounmap(acb->pmuF); 354 break; 355 } 356 } 357 358 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 359 { 360 irqreturn_t handle_state; 361 struct AdapterControlBlock *acb = dev_id; 362 363 handle_state = arcmsr_interrupt(acb); 364 return handle_state; 365 } 366 367 static int arcmsr_bios_param(struct scsi_device *sdev, 368 struct block_device *bdev, sector_t capacity, int *geom) 369 { 370 int heads, sectors, cylinders, total_capacity; 371 372 if (scsi_partsize(bdev, capacity, geom)) 373 return 0; 374 375 total_capacity = capacity; 376 heads = 64; 377 sectors = 32; 378 cylinders = total_capacity / (heads * sectors); 379 if (cylinders > 1024) { 380 heads = 255; 381 sectors = 63; 382 cylinders = total_capacity / (heads * sectors); 383 } 384 geom[0] = heads; 385 geom[1] = sectors; 386 geom[2] = cylinders; 387 return 0; 388 } 389 390 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb) 391 { 392 struct MessageUnit_A __iomem *reg = acb->pmuA; 393 int i; 394 395 for (i = 0; i < 2000; i++) { 396 if (readl(®->outbound_intstatus) & 397 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 398 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, 399 ®->outbound_intstatus); 400 return true; 401 } 402 msleep(10); 403 } /* max 20 seconds */ 404 405 return false; 406 } 407 408 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) 409 { 410 struct MessageUnit_B *reg = acb->pmuB; 411 int i; 412 413 for (i = 0; i < 2000; i++) { 414 if (readl(reg->iop2drv_doorbell) 415 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 416 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, 417 reg->iop2drv_doorbell); 418 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, 419 reg->drv2iop_doorbell); 420 return true; 421 } 422 msleep(10); 423 } /* max 20 seconds */ 424 425 return false; 426 } 427 428 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB) 429 { 430 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 431 int i; 432 433 for (i = 0; i < 2000; i++) { 434 if (readl(&phbcmu->outbound_doorbell) 435 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 436 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, 437 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ 438 return true; 439 } 440 msleep(10); 441 } /* max 20 seconds */ 442 443 return false; 444 } 445 446 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB) 447 { 448 struct MessageUnit_D *reg = pACB->pmuD; 449 int i; 450 451 for (i = 0; i < 2000; i++) { 452 if (readl(reg->outbound_doorbell) 453 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 454 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 455 reg->outbound_doorbell); 456 return true; 457 } 458 msleep(10); 459 } /* max 20 seconds */ 460 return false; 461 } 462 463 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB) 464 { 465 int i; 466 uint32_t read_doorbell; 467 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE; 468 469 for (i = 0; i < 2000; i++) { 470 read_doorbell = readl(&phbcmu->iobound_doorbell); 471 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 472 writel(0, &phbcmu->host_int_status); /*clear interrupt*/ 473 pACB->in_doorbell = read_doorbell; 474 return true; 475 } 476 msleep(10); 477 } /* max 20 seconds */ 478 return false; 479 } 480 481 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) 482 { 483 struct MessageUnit_A __iomem *reg = acb->pmuA; 484 int retry_count = 30; 485 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 486 do { 487 if (arcmsr_hbaA_wait_msgint_ready(acb)) 488 break; 489 else { 490 retry_count--; 491 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 492 timeout, retry count down = %d \n", acb->host->host_no, retry_count); 493 } 494 } while (retry_count != 0); 495 } 496 497 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb) 498 { 499 struct MessageUnit_B *reg = acb->pmuB; 500 int retry_count = 30; 501 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); 502 do { 503 if (arcmsr_hbaB_wait_msgint_ready(acb)) 504 break; 505 else { 506 retry_count--; 507 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 508 timeout,retry count down = %d \n", acb->host->host_no, retry_count); 509 } 510 } while (retry_count != 0); 511 } 512 513 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB) 514 { 515 struct MessageUnit_C __iomem *reg = pACB->pmuC; 516 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 517 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 518 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 519 do { 520 if (arcmsr_hbaC_wait_msgint_ready(pACB)) { 521 break; 522 } else { 523 retry_count--; 524 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ 525 timeout,retry count down = %d \n", pACB->host->host_no, retry_count); 526 } 527 } while (retry_count != 0); 528 return; 529 } 530 531 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB) 532 { 533 int retry_count = 15; 534 struct MessageUnit_D *reg = pACB->pmuD; 535 536 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0); 537 do { 538 if (arcmsr_hbaD_wait_msgint_ready(pACB)) 539 break; 540 541 retry_count--; 542 pr_notice("arcmsr%d: wait 'flush adapter " 543 "cache' timeout, retry count down = %d\n", 544 pACB->host->host_no, retry_count); 545 } while (retry_count != 0); 546 } 547 548 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB) 549 { 550 int retry_count = 30; 551 struct MessageUnit_E __iomem *reg = pACB->pmuE; 552 553 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); 554 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 555 writel(pACB->out_doorbell, ®->iobound_doorbell); 556 do { 557 if (arcmsr_hbaE_wait_msgint_ready(pACB)) 558 break; 559 retry_count--; 560 pr_notice("arcmsr%d: wait 'flush adapter " 561 "cache' timeout, retry count down = %d\n", 562 pACB->host->host_no, retry_count); 563 } while (retry_count != 0); 564 } 565 566 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 567 { 568 switch (acb->adapter_type) { 569 570 case ACB_ADAPTER_TYPE_A: 571 arcmsr_hbaA_flush_cache(acb); 572 break; 573 case ACB_ADAPTER_TYPE_B: 574 arcmsr_hbaB_flush_cache(acb); 575 break; 576 case ACB_ADAPTER_TYPE_C: 577 arcmsr_hbaC_flush_cache(acb); 578 break; 579 case ACB_ADAPTER_TYPE_D: 580 arcmsr_hbaD_flush_cache(acb); 581 break; 582 case ACB_ADAPTER_TYPE_E: 583 case ACB_ADAPTER_TYPE_F: 584 arcmsr_hbaE_flush_cache(acb); 585 break; 586 } 587 } 588 589 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb) 590 { 591 struct MessageUnit_B *reg = acb->pmuB; 592 593 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { 594 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); 595 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); 596 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); 597 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); 598 } else { 599 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); 600 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); 601 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); 602 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); 603 } 604 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); 605 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); 606 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); 607 } 608 609 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) 610 { 611 struct MessageUnit_D *reg = acb->pmuD; 612 613 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); 614 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); 615 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); 616 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); 617 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); 618 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); 619 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); 620 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); 621 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); 622 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); 623 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); 624 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); 625 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); 626 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); 627 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); 628 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); 629 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); 630 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); 631 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); 632 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); 633 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); 634 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); 635 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); 636 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); 637 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); 638 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); 639 } 640 641 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb) 642 { 643 dma_addr_t host_buffer_dma; 644 struct MessageUnit_F __iomem *pmuF; 645 646 memset(acb->dma_coherent2, 0xff, acb->completeQ_size); 647 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 + 648 acb->completeQ_size, 4); 649 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100; 650 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200; 651 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE); 652 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4); 653 pmuF = acb->pmuF; 654 /* host buffer low address, bit0:1 all buffer active */ 655 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0); 656 /* host buffer high address */ 657 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1); 658 /* set host buffer physical address */ 659 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell); 660 } 661 662 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) 663 { 664 bool rtn = true; 665 void *dma_coherent; 666 dma_addr_t dma_coherent_handle; 667 struct pci_dev *pdev = acb->pdev; 668 669 switch (acb->adapter_type) { 670 case ACB_ADAPTER_TYPE_B: { 671 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32); 672 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 673 &dma_coherent_handle, GFP_KERNEL); 674 if (!dma_coherent) { 675 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 676 return false; 677 } 678 acb->dma_coherent_handle2 = dma_coherent_handle; 679 acb->dma_coherent2 = dma_coherent; 680 acb->pmuB = (struct MessageUnit_B *)dma_coherent; 681 arcmsr_hbaB_assign_regAddr(acb); 682 } 683 break; 684 case ACB_ADAPTER_TYPE_D: { 685 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32); 686 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 687 &dma_coherent_handle, GFP_KERNEL); 688 if (!dma_coherent) { 689 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 690 return false; 691 } 692 acb->dma_coherent_handle2 = dma_coherent_handle; 693 acb->dma_coherent2 = dma_coherent; 694 acb->pmuD = (struct MessageUnit_D *)dma_coherent; 695 arcmsr_hbaD_assign_regAddr(acb); 696 } 697 break; 698 case ACB_ADAPTER_TYPE_E: { 699 uint32_t completeQ_size; 700 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 701 acb->ioqueue_size = roundup(completeQ_size, 32); 702 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 703 &dma_coherent_handle, GFP_KERNEL); 704 if (!dma_coherent){ 705 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 706 return false; 707 } 708 acb->dma_coherent_handle2 = dma_coherent_handle; 709 acb->dma_coherent2 = dma_coherent; 710 acb->pCompletionQ = dma_coherent; 711 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); 712 acb->doneq_index = 0; 713 } 714 break; 715 case ACB_ADAPTER_TYPE_F: { 716 uint32_t QueueDepth; 717 uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32}; 718 719 arcmsr_wait_firmware_ready(acb); 720 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7]; 721 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128; 722 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32); 723 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, 724 &dma_coherent_handle, GFP_KERNEL); 725 if (!dma_coherent) { 726 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 727 return false; 728 } 729 acb->dma_coherent_handle2 = dma_coherent_handle; 730 acb->dma_coherent2 = dma_coherent; 731 acb->pCompletionQ = dma_coherent; 732 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ); 733 acb->doneq_index = 0; 734 arcmsr_hbaF_assign_regAddr(acb); 735 } 736 break; 737 default: 738 break; 739 } 740 return rtn; 741 } 742 743 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) 744 { 745 struct pci_dev *pdev = acb->pdev; 746 void *dma_coherent; 747 dma_addr_t dma_coherent_handle; 748 struct CommandControlBlock *ccb_tmp; 749 int i = 0, j = 0; 750 unsigned long cdb_phyaddr, next_ccb_phy; 751 unsigned long roundup_ccbsize; 752 unsigned long max_xfer_len; 753 unsigned long max_sg_entrys; 754 uint32_t firm_config_version, curr_phy_upper32; 755 756 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 757 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 758 acb->devstate[i][j] = ARECA_RAID_GONE; 759 760 max_xfer_len = ARCMSR_MAX_XFER_LEN; 761 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; 762 firm_config_version = acb->firm_cfg_version; 763 if((firm_config_version & 0xFF) >= 3){ 764 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ 765 max_sg_entrys = (max_xfer_len/4096); 766 } 767 acb->host->max_sectors = max_xfer_len/512; 768 acb->host->sg_tablesize = max_sg_entrys; 769 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 770 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; 771 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) 772 acb->uncache_size += acb->ioqueue_size; 773 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 774 if(!dma_coherent){ 775 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); 776 return -ENOMEM; 777 } 778 acb->dma_coherent = dma_coherent; 779 acb->dma_coherent_handle = dma_coherent_handle; 780 memset(dma_coherent, 0, acb->uncache_size); 781 acb->ccbsize = roundup_ccbsize; 782 ccb_tmp = dma_coherent; 783 curr_phy_upper32 = upper_32_bits(dma_coherent_handle); 784 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 785 for(i = 0; i < acb->maxFreeCCB; i++){ 786 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); 787 switch (acb->adapter_type) { 788 case ACB_ADAPTER_TYPE_A: 789 case ACB_ADAPTER_TYPE_B: 790 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5; 791 break; 792 case ACB_ADAPTER_TYPE_C: 793 case ACB_ADAPTER_TYPE_D: 794 case ACB_ADAPTER_TYPE_E: 795 case ACB_ADAPTER_TYPE_F: 796 ccb_tmp->cdb_phyaddr = cdb_phyaddr; 797 break; 798 } 799 acb->pccb_pool[i] = ccb_tmp; 800 ccb_tmp->acb = acb; 801 ccb_tmp->smid = (u32)i << 16; 802 INIT_LIST_HEAD(&ccb_tmp->list); 803 next_ccb_phy = dma_coherent_handle + roundup_ccbsize; 804 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) { 805 acb->maxFreeCCB = i; 806 acb->host->can_queue = i; 807 break; 808 } 809 else 810 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 811 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); 812 dma_coherent_handle = next_ccb_phy; 813 } 814 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) { 815 acb->dma_coherent_handle2 = dma_coherent_handle; 816 acb->dma_coherent2 = ccb_tmp; 817 } 818 switch (acb->adapter_type) { 819 case ACB_ADAPTER_TYPE_B: 820 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; 821 arcmsr_hbaB_assign_regAddr(acb); 822 break; 823 case ACB_ADAPTER_TYPE_D: 824 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2; 825 arcmsr_hbaD_assign_regAddr(acb); 826 break; 827 case ACB_ADAPTER_TYPE_E: 828 acb->pCompletionQ = acb->dma_coherent2; 829 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); 830 acb->doneq_index = 0; 831 break; 832 } 833 return 0; 834 } 835 836 static void arcmsr_message_isr_bh_fn(struct work_struct *work) 837 { 838 struct AdapterControlBlock *acb = container_of(work, 839 struct AdapterControlBlock, arcmsr_do_message_isr_bh); 840 char *acb_dev_map = (char *)acb->device_map; 841 uint32_t __iomem *signature = NULL; 842 char __iomem *devicemap = NULL; 843 int target, lun; 844 struct scsi_device *psdev; 845 char diff, temp; 846 847 switch (acb->adapter_type) { 848 case ACB_ADAPTER_TYPE_A: { 849 struct MessageUnit_A __iomem *reg = acb->pmuA; 850 851 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 852 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 853 break; 854 } 855 case ACB_ADAPTER_TYPE_B: { 856 struct MessageUnit_B *reg = acb->pmuB; 857 858 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); 859 devicemap = (char __iomem *)(®->message_rwbuffer[21]); 860 break; 861 } 862 case ACB_ADAPTER_TYPE_C: { 863 struct MessageUnit_C __iomem *reg = acb->pmuC; 864 865 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 866 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 867 break; 868 } 869 case ACB_ADAPTER_TYPE_D: { 870 struct MessageUnit_D *reg = acb->pmuD; 871 872 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 873 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 874 break; 875 } 876 case ACB_ADAPTER_TYPE_E: { 877 struct MessageUnit_E __iomem *reg = acb->pmuE; 878 879 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]); 880 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); 881 break; 882 } 883 case ACB_ADAPTER_TYPE_F: { 884 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]); 885 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]); 886 break; 887 } 888 } 889 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) 890 return; 891 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; 892 target++) { 893 temp = readb(devicemap); 894 diff = (*acb_dev_map) ^ temp; 895 if (diff != 0) { 896 *acb_dev_map = temp; 897 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; 898 lun++) { 899 if ((diff & 0x01) == 1 && 900 (temp & 0x01) == 1) { 901 scsi_add_device(acb->host, 902 0, target, lun); 903 } else if ((diff & 0x01) == 1 904 && (temp & 0x01) == 0) { 905 psdev = scsi_device_lookup(acb->host, 906 0, target, lun); 907 if (psdev != NULL) { 908 scsi_remove_device(psdev); 909 scsi_device_put(psdev); 910 } 911 } 912 temp >>= 1; 913 diff >>= 1; 914 } 915 } 916 devicemap++; 917 acb_dev_map++; 918 } 919 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; 920 } 921 922 static int 923 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb) 924 { 925 unsigned long flags; 926 int nvec, i; 927 928 if (msix_enable == 0) 929 goto msi_int0; 930 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, 931 PCI_IRQ_MSIX); 932 if (nvec > 0) { 933 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); 934 flags = 0; 935 } else { 936 msi_int0: 937 if (msi_enable == 1) { 938 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 939 if (nvec == 1) { 940 dev_info(&pdev->dev, "msi enabled\n"); 941 goto msi_int1; 942 } 943 } 944 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 945 if (nvec < 1) 946 return FAILED; 947 msi_int1: 948 flags = IRQF_SHARED; 949 } 950 951 acb->vector_count = nvec; 952 for (i = 0; i < nvec; i++) { 953 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt, 954 flags, "arcmsr", acb)) { 955 pr_warn("arcmsr%d: request_irq =%d failed!\n", 956 acb->host->host_no, pci_irq_vector(pdev, i)); 957 goto out_free_irq; 958 } 959 } 960 961 return SUCCESS; 962 out_free_irq: 963 while (--i >= 0) 964 free_irq(pci_irq_vector(pdev, i), acb); 965 pci_free_irq_vectors(pdev); 966 return FAILED; 967 } 968 969 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) 970 { 971 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 972 pacb->fw_flag = FW_NORMAL; 973 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); 974 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); 975 add_timer(&pacb->eternal_timer); 976 } 977 978 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) 979 { 980 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); 981 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); 982 add_timer(&pacb->refresh_timer); 983 } 984 985 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb) 986 { 987 struct pci_dev *pcidev = acb->pdev; 988 989 if (IS_DMA64) { 990 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) || 991 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64))) 992 goto dma32; 993 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) || 994 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) { 995 printk("arcmsr: set DMA 64 mask failed\n"); 996 return -ENXIO; 997 } 998 } else { 999 dma32: 1000 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) || 1001 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) || 1002 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) { 1003 printk("arcmsr: set DMA 32-bit mask failed\n"); 1004 return -ENXIO; 1005 } 1006 } 1007 return 0; 1008 } 1009 1010 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1011 { 1012 struct Scsi_Host *host; 1013 struct AdapterControlBlock *acb; 1014 uint8_t bus,dev_fun; 1015 int error; 1016 error = pci_enable_device(pdev); 1017 if(error){ 1018 return -ENODEV; 1019 } 1020 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); 1021 if(!host){ 1022 goto pci_disable_dev; 1023 } 1024 init_waitqueue_head(&wait_q); 1025 bus = pdev->bus->number; 1026 dev_fun = pdev->devfn; 1027 acb = (struct AdapterControlBlock *) host->hostdata; 1028 memset(acb,0,sizeof(struct AdapterControlBlock)); 1029 acb->pdev = pdev; 1030 acb->adapter_type = id->driver_data; 1031 if (arcmsr_set_dma_mask(acb)) 1032 goto scsi_host_release; 1033 acb->host = host; 1034 host->max_lun = ARCMSR_MAX_TARGETLUN; 1035 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ 1036 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ 1037 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD)) 1038 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD; 1039 host->can_queue = host_can_queue; /* max simultaneous cmds */ 1040 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN)) 1041 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; 1042 host->cmd_per_lun = cmd_per_lun; 1043 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 1044 host->unique_id = (bus << 8) | dev_fun; 1045 pci_set_drvdata(pdev, host); 1046 pci_set_master(pdev); 1047 error = pci_request_regions(pdev, "arcmsr"); 1048 if(error){ 1049 goto scsi_host_release; 1050 } 1051 spin_lock_init(&acb->eh_lock); 1052 spin_lock_init(&acb->ccblist_lock); 1053 spin_lock_init(&acb->postq_lock); 1054 spin_lock_init(&acb->doneq_lock); 1055 spin_lock_init(&acb->rqbuffer_lock); 1056 spin_lock_init(&acb->wqbuffer_lock); 1057 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 1058 ACB_F_MESSAGE_RQBUFFER_CLEARED | 1059 ACB_F_MESSAGE_WQBUFFER_READED); 1060 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 1061 INIT_LIST_HEAD(&acb->ccb_free_list); 1062 error = arcmsr_remap_pciregion(acb); 1063 if(!error){ 1064 goto pci_release_regs; 1065 } 1066 error = arcmsr_alloc_io_queue(acb); 1067 if (!error) 1068 goto unmap_pci_region; 1069 error = arcmsr_get_firmware_spec(acb); 1070 if(!error){ 1071 goto free_hbb_mu; 1072 } 1073 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) 1074 arcmsr_free_io_queue(acb); 1075 error = arcmsr_alloc_ccb_pool(acb); 1076 if(error){ 1077 goto unmap_pci_region; 1078 } 1079 error = scsi_add_host(host, &pdev->dev); 1080 if(error){ 1081 goto free_ccb_pool; 1082 } 1083 if (arcmsr_request_irq(pdev, acb) == FAILED) 1084 goto scsi_host_remove; 1085 arcmsr_iop_init(acb); 1086 arcmsr_init_get_devmap_timer(acb); 1087 if (set_date_time) 1088 arcmsr_init_set_datetime_timer(acb); 1089 if(arcmsr_alloc_sysfs_attr(acb)) 1090 goto out_free_sysfs; 1091 scsi_scan_host(host); 1092 return 0; 1093 out_free_sysfs: 1094 if (set_date_time) 1095 del_timer_sync(&acb->refresh_timer); 1096 del_timer_sync(&acb->eternal_timer); 1097 flush_work(&acb->arcmsr_do_message_isr_bh); 1098 arcmsr_stop_adapter_bgrb(acb); 1099 arcmsr_flush_adapter_cache(acb); 1100 arcmsr_free_irq(pdev, acb); 1101 scsi_host_remove: 1102 scsi_remove_host(host); 1103 free_ccb_pool: 1104 arcmsr_free_ccb_pool(acb); 1105 goto unmap_pci_region; 1106 free_hbb_mu: 1107 arcmsr_free_io_queue(acb); 1108 unmap_pci_region: 1109 arcmsr_unmap_pciregion(acb); 1110 pci_release_regs: 1111 pci_release_regions(pdev); 1112 scsi_host_release: 1113 scsi_host_put(host); 1114 pci_disable_dev: 1115 pci_disable_device(pdev); 1116 return -ENODEV; 1117 } 1118 1119 static void arcmsr_free_irq(struct pci_dev *pdev, 1120 struct AdapterControlBlock *acb) 1121 { 1122 int i; 1123 1124 for (i = 0; i < acb->vector_count; i++) 1125 free_irq(pci_irq_vector(pdev, i), acb); 1126 pci_free_irq_vectors(pdev); 1127 } 1128 1129 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) 1130 { 1131 struct Scsi_Host *host = pci_get_drvdata(pdev); 1132 struct AdapterControlBlock *acb = 1133 (struct AdapterControlBlock *)host->hostdata; 1134 1135 arcmsr_disable_outbound_ints(acb); 1136 arcmsr_free_irq(pdev, acb); 1137 del_timer_sync(&acb->eternal_timer); 1138 if (set_date_time) 1139 del_timer_sync(&acb->refresh_timer); 1140 flush_work(&acb->arcmsr_do_message_isr_bh); 1141 arcmsr_stop_adapter_bgrb(acb); 1142 arcmsr_flush_adapter_cache(acb); 1143 pci_set_drvdata(pdev, host); 1144 pci_save_state(pdev); 1145 pci_disable_device(pdev); 1146 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1147 return 0; 1148 } 1149 1150 static int arcmsr_resume(struct pci_dev *pdev) 1151 { 1152 struct Scsi_Host *host = pci_get_drvdata(pdev); 1153 struct AdapterControlBlock *acb = 1154 (struct AdapterControlBlock *)host->hostdata; 1155 1156 pci_set_power_state(pdev, PCI_D0); 1157 pci_enable_wake(pdev, PCI_D0, 0); 1158 pci_restore_state(pdev); 1159 if (pci_enable_device(pdev)) { 1160 pr_warn("%s: pci_enable_device error\n", __func__); 1161 return -ENODEV; 1162 } 1163 if (arcmsr_set_dma_mask(acb)) 1164 goto controller_unregister; 1165 pci_set_master(pdev); 1166 if (arcmsr_request_irq(pdev, acb) == FAILED) 1167 goto controller_stop; 1168 switch (acb->adapter_type) { 1169 case ACB_ADAPTER_TYPE_B: { 1170 struct MessageUnit_B *reg = acb->pmuB; 1171 uint32_t i; 1172 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1173 reg->post_qbuffer[i] = 0; 1174 reg->done_qbuffer[i] = 0; 1175 } 1176 reg->postq_index = 0; 1177 reg->doneq_index = 0; 1178 break; 1179 } 1180 case ACB_ADAPTER_TYPE_E: 1181 writel(0, &acb->pmuE->host_int_status); 1182 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); 1183 acb->in_doorbell = 0; 1184 acb->out_doorbell = 0; 1185 acb->doneq_index = 0; 1186 break; 1187 case ACB_ADAPTER_TYPE_F: 1188 writel(0, &acb->pmuF->host_int_status); 1189 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); 1190 acb->in_doorbell = 0; 1191 acb->out_doorbell = 0; 1192 acb->doneq_index = 0; 1193 arcmsr_hbaF_assign_regAddr(acb); 1194 break; 1195 } 1196 arcmsr_iop_init(acb); 1197 arcmsr_init_get_devmap_timer(acb); 1198 if (set_date_time) 1199 arcmsr_init_set_datetime_timer(acb); 1200 return 0; 1201 controller_stop: 1202 arcmsr_stop_adapter_bgrb(acb); 1203 arcmsr_flush_adapter_cache(acb); 1204 controller_unregister: 1205 scsi_remove_host(host); 1206 arcmsr_free_ccb_pool(acb); 1207 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1208 arcmsr_free_io_queue(acb); 1209 arcmsr_unmap_pciregion(acb); 1210 pci_release_regions(pdev); 1211 scsi_host_put(host); 1212 pci_disable_device(pdev); 1213 return -ENODEV; 1214 } 1215 1216 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb) 1217 { 1218 struct MessageUnit_A __iomem *reg = acb->pmuA; 1219 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1220 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1221 printk(KERN_NOTICE 1222 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1223 , acb->host->host_no); 1224 return false; 1225 } 1226 return true; 1227 } 1228 1229 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb) 1230 { 1231 struct MessageUnit_B *reg = acb->pmuB; 1232 1233 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); 1234 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1235 printk(KERN_NOTICE 1236 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1237 , acb->host->host_no); 1238 return false; 1239 } 1240 return true; 1241 } 1242 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB) 1243 { 1244 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1245 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1246 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1247 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1248 printk(KERN_NOTICE 1249 "arcmsr%d: wait 'abort all outstanding command' timeout\n" 1250 , pACB->host->host_no); 1251 return false; 1252 } 1253 return true; 1254 } 1255 1256 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB) 1257 { 1258 struct MessageUnit_D *reg = pACB->pmuD; 1259 1260 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0); 1261 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 1262 pr_notice("arcmsr%d: wait 'abort all outstanding " 1263 "command' timeout\n", pACB->host->host_no); 1264 return false; 1265 } 1266 return true; 1267 } 1268 1269 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB) 1270 { 1271 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1272 1273 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); 1274 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1275 writel(pACB->out_doorbell, ®->iobound_doorbell); 1276 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1277 pr_notice("arcmsr%d: wait 'abort all outstanding " 1278 "command' timeout\n", pACB->host->host_no); 1279 return false; 1280 } 1281 return true; 1282 } 1283 1284 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 1285 { 1286 uint8_t rtnval = 0; 1287 switch (acb->adapter_type) { 1288 case ACB_ADAPTER_TYPE_A: 1289 rtnval = arcmsr_hbaA_abort_allcmd(acb); 1290 break; 1291 case ACB_ADAPTER_TYPE_B: 1292 rtnval = arcmsr_hbaB_abort_allcmd(acb); 1293 break; 1294 case ACB_ADAPTER_TYPE_C: 1295 rtnval = arcmsr_hbaC_abort_allcmd(acb); 1296 break; 1297 case ACB_ADAPTER_TYPE_D: 1298 rtnval = arcmsr_hbaD_abort_allcmd(acb); 1299 break; 1300 case ACB_ADAPTER_TYPE_E: 1301 case ACB_ADAPTER_TYPE_F: 1302 rtnval = arcmsr_hbaE_abort_allcmd(acb); 1303 break; 1304 } 1305 return rtnval; 1306 } 1307 1308 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 1309 { 1310 struct scsi_cmnd *pcmd = ccb->pcmd; 1311 1312 scsi_dma_unmap(pcmd); 1313 } 1314 1315 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) 1316 { 1317 struct AdapterControlBlock *acb = ccb->acb; 1318 struct scsi_cmnd *pcmd = ccb->pcmd; 1319 unsigned long flags; 1320 atomic_dec(&acb->ccboutstandingcount); 1321 arcmsr_pci_unmap_dma(ccb); 1322 ccb->startdone = ARCMSR_CCB_DONE; 1323 spin_lock_irqsave(&acb->ccblist_lock, flags); 1324 list_add_tail(&ccb->list, &acb->ccb_free_list); 1325 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 1326 pcmd->scsi_done(pcmd); 1327 } 1328 1329 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) 1330 { 1331 1332 struct scsi_cmnd *pcmd = ccb->pcmd; 1333 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 1334 pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1335 if (sensebuffer) { 1336 int sense_data_length = 1337 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE 1338 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE; 1339 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE); 1340 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); 1341 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 1342 sensebuffer->Valid = 1; 1343 pcmd->result |= (DRIVER_SENSE << 24); 1344 } 1345 } 1346 1347 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) 1348 { 1349 u32 orig_mask = 0; 1350 switch (acb->adapter_type) { 1351 case ACB_ADAPTER_TYPE_A : { 1352 struct MessageUnit_A __iomem *reg = acb->pmuA; 1353 orig_mask = readl(®->outbound_intmask); 1354 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 1355 ®->outbound_intmask); 1356 } 1357 break; 1358 case ACB_ADAPTER_TYPE_B : { 1359 struct MessageUnit_B *reg = acb->pmuB; 1360 orig_mask = readl(reg->iop2drv_doorbell_mask); 1361 writel(0, reg->iop2drv_doorbell_mask); 1362 } 1363 break; 1364 case ACB_ADAPTER_TYPE_C:{ 1365 struct MessageUnit_C __iomem *reg = acb->pmuC; 1366 /* disable all outbound interrupt */ 1367 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */ 1368 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 1369 } 1370 break; 1371 case ACB_ADAPTER_TYPE_D: { 1372 struct MessageUnit_D *reg = acb->pmuD; 1373 /* disable all outbound interrupt */ 1374 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); 1375 } 1376 break; 1377 case ACB_ADAPTER_TYPE_E: 1378 case ACB_ADAPTER_TYPE_F: { 1379 struct MessageUnit_E __iomem *reg = acb->pmuE; 1380 orig_mask = readl(®->host_int_mask); 1381 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask); 1382 readl(®->host_int_mask); /* Dummy readl to force pci flush */ 1383 } 1384 break; 1385 } 1386 return orig_mask; 1387 } 1388 1389 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 1390 struct CommandControlBlock *ccb, bool error) 1391 { 1392 uint8_t id, lun; 1393 id = ccb->pcmd->device->id; 1394 lun = ccb->pcmd->device->lun; 1395 if (!error) { 1396 if (acb->devstate[id][lun] == ARECA_RAID_GONE) 1397 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1398 ccb->pcmd->result = DID_OK << 16; 1399 arcmsr_ccb_complete(ccb); 1400 }else{ 1401 switch (ccb->arcmsr_cdb.DeviceStatus) { 1402 case ARCMSR_DEV_SELECT_TIMEOUT: { 1403 acb->devstate[id][lun] = ARECA_RAID_GONE; 1404 ccb->pcmd->result = DID_NO_CONNECT << 16; 1405 arcmsr_ccb_complete(ccb); 1406 } 1407 break; 1408 1409 case ARCMSR_DEV_ABORTED: 1410 1411 case ARCMSR_DEV_INIT_FAIL: { 1412 acb->devstate[id][lun] = ARECA_RAID_GONE; 1413 ccb->pcmd->result = DID_BAD_TARGET << 16; 1414 arcmsr_ccb_complete(ccb); 1415 } 1416 break; 1417 1418 case ARCMSR_DEV_CHECK_CONDITION: { 1419 acb->devstate[id][lun] = ARECA_RAID_GOOD; 1420 arcmsr_report_sense_info(ccb); 1421 arcmsr_ccb_complete(ccb); 1422 } 1423 break; 1424 1425 default: 1426 printk(KERN_NOTICE 1427 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ 1428 but got unknown DeviceStatus = 0x%x \n" 1429 , acb->host->host_no 1430 , id 1431 , lun 1432 , ccb->arcmsr_cdb.DeviceStatus); 1433 acb->devstate[id][lun] = ARECA_RAID_GONE; 1434 ccb->pcmd->result = DID_NO_CONNECT << 16; 1435 arcmsr_ccb_complete(ccb); 1436 break; 1437 } 1438 } 1439 } 1440 1441 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 1442 { 1443 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 1444 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 1445 struct scsi_cmnd *abortcmd = pCCB->pcmd; 1446 if (abortcmd) { 1447 abortcmd->result |= DID_ABORT << 16; 1448 arcmsr_ccb_complete(pCCB); 1449 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", 1450 acb->host->host_no, pCCB); 1451 } 1452 return; 1453 } 1454 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ 1455 done acb = '0x%p'" 1456 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" 1457 " ccboutstandingcount = %d \n" 1458 , acb->host->host_no 1459 , acb 1460 , pCCB 1461 , pCCB->acb 1462 , pCCB->startdone 1463 , atomic_read(&acb->ccboutstandingcount)); 1464 return; 1465 } 1466 arcmsr_report_ccb_state(acb, pCCB, error); 1467 } 1468 1469 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 1470 { 1471 int i = 0; 1472 uint32_t flag_ccb; 1473 struct ARCMSR_CDB *pARCMSR_CDB; 1474 bool error; 1475 struct CommandControlBlock *pCCB; 1476 unsigned long ccb_cdb_phy; 1477 1478 switch (acb->adapter_type) { 1479 1480 case ACB_ADAPTER_TYPE_A: { 1481 struct MessageUnit_A __iomem *reg = acb->pmuA; 1482 uint32_t outbound_intstatus; 1483 outbound_intstatus = readl(®->outbound_intstatus) & 1484 acb->outbound_int_enable; 1485 /*clear and abort all outbound posted Q*/ 1486 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 1487 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) 1488 && (i++ < acb->maxOutstanding)) { 1489 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 1490 if (acb->cdb_phyadd_hipart) 1491 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1492 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1493 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1494 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1495 arcmsr_drain_donequeue(acb, pCCB, error); 1496 } 1497 } 1498 break; 1499 1500 case ACB_ADAPTER_TYPE_B: { 1501 struct MessageUnit_B *reg = acb->pmuB; 1502 /*clear all outbound posted Q*/ 1503 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ 1504 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1505 flag_ccb = reg->done_qbuffer[i]; 1506 if (flag_ccb != 0) { 1507 reg->done_qbuffer[i] = 0; 1508 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 1509 if (acb->cdb_phyadd_hipart) 1510 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1511 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1512 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1513 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1514 arcmsr_drain_donequeue(acb, pCCB, error); 1515 } 1516 reg->post_qbuffer[i] = 0; 1517 } 1518 reg->doneq_index = 0; 1519 reg->postq_index = 0; 1520 } 1521 break; 1522 case ACB_ADAPTER_TYPE_C: { 1523 struct MessageUnit_C __iomem *reg = acb->pmuC; 1524 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) { 1525 /*need to do*/ 1526 flag_ccb = readl(®->outbound_queueport_low); 1527 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 1528 if (acb->cdb_phyadd_hipart) 1529 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1530 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1531 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1532 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 1533 arcmsr_drain_donequeue(acb, pCCB, error); 1534 } 1535 } 1536 break; 1537 case ACB_ADAPTER_TYPE_D: { 1538 struct MessageUnit_D *pmu = acb->pmuD; 1539 uint32_t outbound_write_pointer; 1540 uint32_t doneq_index, index_stripped, addressLow, residual, toggle; 1541 unsigned long flags; 1542 1543 residual = atomic_read(&acb->ccboutstandingcount); 1544 for (i = 0; i < residual; i++) { 1545 spin_lock_irqsave(&acb->doneq_lock, flags); 1546 outbound_write_pointer = 1547 pmu->done_qbuffer[0].addressLow + 1; 1548 doneq_index = pmu->doneq_index; 1549 if ((doneq_index & 0xFFF) != 1550 (outbound_write_pointer & 0xFFF)) { 1551 toggle = doneq_index & 0x4000; 1552 index_stripped = (doneq_index & 0xFFF) + 1; 1553 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 1554 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 1555 ((toggle ^ 0x4000) + 1); 1556 doneq_index = pmu->doneq_index; 1557 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1558 addressLow = pmu->done_qbuffer[doneq_index & 1559 0xFFF].addressLow; 1560 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 1561 if (acb->cdb_phyadd_hipart) 1562 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 1563 pARCMSR_CDB = (struct ARCMSR_CDB *) 1564 (acb->vir2phy_offset + ccb_cdb_phy); 1565 pCCB = container_of(pARCMSR_CDB, 1566 struct CommandControlBlock, arcmsr_cdb); 1567 error = (addressLow & 1568 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? 1569 true : false; 1570 arcmsr_drain_donequeue(acb, pCCB, error); 1571 writel(doneq_index, 1572 pmu->outboundlist_read_pointer); 1573 } else { 1574 spin_unlock_irqrestore(&acb->doneq_lock, flags); 1575 mdelay(10); 1576 } 1577 } 1578 pmu->postq_index = 0; 1579 pmu->doneq_index = 0x40FF; 1580 } 1581 break; 1582 case ACB_ADAPTER_TYPE_E: 1583 arcmsr_hbaE_postqueue_isr(acb); 1584 break; 1585 case ACB_ADAPTER_TYPE_F: 1586 arcmsr_hbaF_postqueue_isr(acb); 1587 break; 1588 } 1589 } 1590 1591 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb) 1592 { 1593 char *acb_dev_map = (char *)acb->device_map; 1594 int target, lun, i; 1595 struct scsi_device *psdev; 1596 struct CommandControlBlock *ccb; 1597 char temp; 1598 1599 for (i = 0; i < acb->maxFreeCCB; i++) { 1600 ccb = acb->pccb_pool[i]; 1601 if (ccb->startdone == ARCMSR_CCB_START) { 1602 ccb->pcmd->result = DID_NO_CONNECT << 16; 1603 arcmsr_pci_unmap_dma(ccb); 1604 ccb->pcmd->scsi_done(ccb->pcmd); 1605 } 1606 } 1607 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) { 1608 temp = *acb_dev_map; 1609 if (temp) { 1610 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { 1611 if (temp & 1) { 1612 psdev = scsi_device_lookup(acb->host, 1613 0, target, lun); 1614 if (psdev != NULL) { 1615 scsi_remove_device(psdev); 1616 scsi_device_put(psdev); 1617 } 1618 } 1619 temp >>= 1; 1620 } 1621 *acb_dev_map = 0; 1622 } 1623 acb_dev_map++; 1624 } 1625 } 1626 1627 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) 1628 { 1629 struct pci_dev *pdev; 1630 struct Scsi_Host *host; 1631 1632 host = acb->host; 1633 arcmsr_free_sysfs_attr(acb); 1634 scsi_remove_host(host); 1635 flush_work(&acb->arcmsr_do_message_isr_bh); 1636 del_timer_sync(&acb->eternal_timer); 1637 if (set_date_time) 1638 del_timer_sync(&acb->refresh_timer); 1639 pdev = acb->pdev; 1640 arcmsr_free_irq(pdev, acb); 1641 arcmsr_free_ccb_pool(acb); 1642 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1643 arcmsr_free_io_queue(acb); 1644 arcmsr_unmap_pciregion(acb); 1645 pci_release_regions(pdev); 1646 scsi_host_put(host); 1647 pci_disable_device(pdev); 1648 } 1649 1650 static void arcmsr_remove(struct pci_dev *pdev) 1651 { 1652 struct Scsi_Host *host = pci_get_drvdata(pdev); 1653 struct AdapterControlBlock *acb = 1654 (struct AdapterControlBlock *) host->hostdata; 1655 int poll_count = 0; 1656 uint16_t dev_id; 1657 1658 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); 1659 if (dev_id == 0xffff) { 1660 acb->acb_flags &= ~ACB_F_IOP_INITED; 1661 acb->acb_flags |= ACB_F_ADAPTER_REMOVED; 1662 arcmsr_remove_scsi_devices(acb); 1663 arcmsr_free_pcidev(acb); 1664 return; 1665 } 1666 arcmsr_free_sysfs_attr(acb); 1667 scsi_remove_host(host); 1668 flush_work(&acb->arcmsr_do_message_isr_bh); 1669 del_timer_sync(&acb->eternal_timer); 1670 if (set_date_time) 1671 del_timer_sync(&acb->refresh_timer); 1672 arcmsr_disable_outbound_ints(acb); 1673 arcmsr_stop_adapter_bgrb(acb); 1674 arcmsr_flush_adapter_cache(acb); 1675 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 1676 acb->acb_flags &= ~ACB_F_IOP_INITED; 1677 1678 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){ 1679 if (!atomic_read(&acb->ccboutstandingcount)) 1680 break; 1681 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 1682 msleep(25); 1683 } 1684 1685 if (atomic_read(&acb->ccboutstandingcount)) { 1686 int i; 1687 1688 arcmsr_abort_allcmd(acb); 1689 arcmsr_done4abort_postqueue(acb); 1690 for (i = 0; i < acb->maxFreeCCB; i++) { 1691 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 1692 if (ccb->startdone == ARCMSR_CCB_START) { 1693 ccb->startdone = ARCMSR_CCB_ABORTED; 1694 ccb->pcmd->result = DID_ABORT << 16; 1695 arcmsr_ccb_complete(ccb); 1696 } 1697 } 1698 } 1699 arcmsr_free_irq(pdev, acb); 1700 arcmsr_free_ccb_pool(acb); 1701 if (acb->adapter_type == ACB_ADAPTER_TYPE_F) 1702 arcmsr_free_io_queue(acb); 1703 arcmsr_unmap_pciregion(acb); 1704 pci_release_regions(pdev); 1705 scsi_host_put(host); 1706 pci_disable_device(pdev); 1707 } 1708 1709 static void arcmsr_shutdown(struct pci_dev *pdev) 1710 { 1711 struct Scsi_Host *host = pci_get_drvdata(pdev); 1712 struct AdapterControlBlock *acb = 1713 (struct AdapterControlBlock *)host->hostdata; 1714 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 1715 return; 1716 del_timer_sync(&acb->eternal_timer); 1717 if (set_date_time) 1718 del_timer_sync(&acb->refresh_timer); 1719 arcmsr_disable_outbound_ints(acb); 1720 arcmsr_free_irq(pdev, acb); 1721 flush_work(&acb->arcmsr_do_message_isr_bh); 1722 arcmsr_stop_adapter_bgrb(acb); 1723 arcmsr_flush_adapter_cache(acb); 1724 } 1725 1726 static int arcmsr_module_init(void) 1727 { 1728 int error = 0; 1729 error = pci_register_driver(&arcmsr_pci_driver); 1730 return error; 1731 } 1732 1733 static void arcmsr_module_exit(void) 1734 { 1735 pci_unregister_driver(&arcmsr_pci_driver); 1736 } 1737 module_init(arcmsr_module_init); 1738 module_exit(arcmsr_module_exit); 1739 1740 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, 1741 u32 intmask_org) 1742 { 1743 u32 mask; 1744 switch (acb->adapter_type) { 1745 1746 case ACB_ADAPTER_TYPE_A: { 1747 struct MessageUnit_A __iomem *reg = acb->pmuA; 1748 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 1749 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| 1750 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 1751 writel(mask, ®->outbound_intmask); 1752 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 1753 } 1754 break; 1755 1756 case ACB_ADAPTER_TYPE_B: { 1757 struct MessageUnit_B *reg = acb->pmuB; 1758 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | 1759 ARCMSR_IOP2DRV_DATA_READ_OK | 1760 ARCMSR_IOP2DRV_CDB_DONE | 1761 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 1762 writel(mask, reg->iop2drv_doorbell_mask); 1763 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 1764 } 1765 break; 1766 case ACB_ADAPTER_TYPE_C: { 1767 struct MessageUnit_C __iomem *reg = acb->pmuC; 1768 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 1769 writel(intmask_org & mask, ®->host_int_mask); 1770 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 1771 } 1772 break; 1773 case ACB_ADAPTER_TYPE_D: { 1774 struct MessageUnit_D *reg = acb->pmuD; 1775 1776 mask = ARCMSR_ARC1214_ALL_INT_ENABLE; 1777 writel(intmask_org | mask, reg->pcief0_int_enable); 1778 break; 1779 } 1780 case ACB_ADAPTER_TYPE_E: 1781 case ACB_ADAPTER_TYPE_F: { 1782 struct MessageUnit_E __iomem *reg = acb->pmuE; 1783 1784 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); 1785 writel(intmask_org & mask, ®->host_int_mask); 1786 break; 1787 } 1788 } 1789 } 1790 1791 static int arcmsr_build_ccb(struct AdapterControlBlock *acb, 1792 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 1793 { 1794 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1795 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 1796 __le32 address_lo, address_hi; 1797 int arccdbsize = 0x30; 1798 __le32 length = 0; 1799 int i; 1800 struct scatterlist *sg; 1801 int nseg; 1802 ccb->pcmd = pcmd; 1803 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 1804 arcmsr_cdb->TargetID = pcmd->device->id; 1805 arcmsr_cdb->LUN = pcmd->device->lun; 1806 arcmsr_cdb->Function = 1; 1807 arcmsr_cdb->msgContext = 0; 1808 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 1809 1810 nseg = scsi_dma_map(pcmd); 1811 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) 1812 return FAILED; 1813 scsi_for_each_sg(pcmd, sg, nseg, i) { 1814 /* Get the physical address of the current data pointer */ 1815 length = cpu_to_le32(sg_dma_len(sg)); 1816 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); 1817 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); 1818 if (address_hi == 0) { 1819 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 1820 1821 pdma_sg->address = address_lo; 1822 pdma_sg->length = length; 1823 psge += sizeof (struct SG32ENTRY); 1824 arccdbsize += sizeof (struct SG32ENTRY); 1825 } else { 1826 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; 1827 1828 pdma_sg->addresshigh = address_hi; 1829 pdma_sg->address = address_lo; 1830 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); 1831 psge += sizeof (struct SG64ENTRY); 1832 arccdbsize += sizeof (struct SG64ENTRY); 1833 } 1834 } 1835 arcmsr_cdb->sgcount = (uint8_t)nseg; 1836 arcmsr_cdb->DataLength = scsi_bufflen(pcmd); 1837 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); 1838 if ( arccdbsize > 256) 1839 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 1840 if (pcmd->sc_data_direction == DMA_TO_DEVICE) 1841 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 1842 ccb->arc_cdb_size = arccdbsize; 1843 return SUCCESS; 1844 } 1845 1846 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 1847 { 1848 uint32_t cdb_phyaddr = ccb->cdb_phyaddr; 1849 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1850 atomic_inc(&acb->ccboutstandingcount); 1851 ccb->startdone = ARCMSR_CCB_START; 1852 switch (acb->adapter_type) { 1853 case ACB_ADAPTER_TYPE_A: { 1854 struct MessageUnit_A __iomem *reg = acb->pmuA; 1855 1856 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 1857 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 1858 ®->inbound_queueport); 1859 else 1860 writel(cdb_phyaddr, ®->inbound_queueport); 1861 break; 1862 } 1863 1864 case ACB_ADAPTER_TYPE_B: { 1865 struct MessageUnit_B *reg = acb->pmuB; 1866 uint32_t ending_index, index = reg->postq_index; 1867 1868 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 1869 reg->post_qbuffer[ending_index] = 0; 1870 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1871 reg->post_qbuffer[index] = 1872 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE; 1873 } else { 1874 reg->post_qbuffer[index] = cdb_phyaddr; 1875 } 1876 index++; 1877 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 1878 reg->postq_index = index; 1879 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); 1880 } 1881 break; 1882 case ACB_ADAPTER_TYPE_C: { 1883 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 1884 uint32_t ccb_post_stamp, arc_cdb_size; 1885 1886 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1887 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); 1888 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high); 1889 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1890 } 1891 break; 1892 case ACB_ADAPTER_TYPE_D: { 1893 struct MessageUnit_D *pmu = acb->pmuD; 1894 u16 index_stripped; 1895 u16 postq_index, toggle; 1896 unsigned long flags; 1897 struct InBound_SRB *pinbound_srb; 1898 1899 spin_lock_irqsave(&acb->postq_lock, flags); 1900 postq_index = pmu->postq_index; 1901 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); 1902 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr); 1903 pinbound_srb->addressLow = cdb_phyaddr; 1904 pinbound_srb->length = ccb->arc_cdb_size >> 2; 1905 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); 1906 toggle = postq_index & 0x4000; 1907 index_stripped = postq_index + 1; 1908 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1); 1909 pmu->postq_index = index_stripped ? (index_stripped | toggle) : 1910 (toggle ^ 0x4000); 1911 writel(postq_index, pmu->inboundlist_write_pointer); 1912 spin_unlock_irqrestore(&acb->postq_lock, flags); 1913 break; 1914 } 1915 case ACB_ADAPTER_TYPE_E: { 1916 struct MessageUnit_E __iomem *pmu = acb->pmuE; 1917 u32 ccb_post_stamp, arc_cdb_size; 1918 1919 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1920 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6)); 1921 writel(0, &pmu->inbound_queueport_high); 1922 writel(ccb_post_stamp, &pmu->inbound_queueport_low); 1923 break; 1924 } 1925 case ACB_ADAPTER_TYPE_F: { 1926 struct MessageUnit_F __iomem *pmu = acb->pmuF; 1927 u32 ccb_post_stamp, arc_cdb_size; 1928 1929 if (ccb->arc_cdb_size <= 0x300) 1930 arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1; 1931 else 1932 arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1; 1933 ccb_post_stamp = (ccb->smid | arc_cdb_size); 1934 writel(0, &pmu->inbound_queueport_high); 1935 writel(ccb_post_stamp, &pmu->inbound_queueport_low); 1936 break; 1937 } 1938 } 1939 } 1940 1941 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb) 1942 { 1943 struct MessageUnit_A __iomem *reg = acb->pmuA; 1944 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1945 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1946 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1947 printk(KERN_NOTICE 1948 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1949 , acb->host->host_no); 1950 } 1951 } 1952 1953 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb) 1954 { 1955 struct MessageUnit_B *reg = acb->pmuB; 1956 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1957 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); 1958 1959 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1960 printk(KERN_NOTICE 1961 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1962 , acb->host->host_no); 1963 } 1964 } 1965 1966 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB) 1967 { 1968 struct MessageUnit_C __iomem *reg = pACB->pmuC; 1969 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1970 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1971 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 1972 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1973 printk(KERN_NOTICE 1974 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n" 1975 , pACB->host->host_no); 1976 } 1977 return; 1978 } 1979 1980 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB) 1981 { 1982 struct MessageUnit_D *reg = pACB->pmuD; 1983 1984 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1985 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); 1986 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) 1987 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " 1988 "timeout\n", pACB->host->host_no); 1989 } 1990 1991 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) 1992 { 1993 struct MessageUnit_E __iomem *reg = pACB->pmuE; 1994 1995 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1996 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); 1997 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1998 writel(pACB->out_doorbell, ®->iobound_doorbell); 1999 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 2000 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' " 2001 "timeout\n", pACB->host->host_no); 2002 } 2003 } 2004 2005 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 2006 { 2007 switch (acb->adapter_type) { 2008 case ACB_ADAPTER_TYPE_A: 2009 arcmsr_hbaA_stop_bgrb(acb); 2010 break; 2011 case ACB_ADAPTER_TYPE_B: 2012 arcmsr_hbaB_stop_bgrb(acb); 2013 break; 2014 case ACB_ADAPTER_TYPE_C: 2015 arcmsr_hbaC_stop_bgrb(acb); 2016 break; 2017 case ACB_ADAPTER_TYPE_D: 2018 arcmsr_hbaD_stop_bgrb(acb); 2019 break; 2020 case ACB_ADAPTER_TYPE_E: 2021 case ACB_ADAPTER_TYPE_F: 2022 arcmsr_hbaE_stop_bgrb(acb); 2023 break; 2024 } 2025 } 2026 2027 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) 2028 { 2029 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); 2030 } 2031 2032 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 2033 { 2034 switch (acb->adapter_type) { 2035 case ACB_ADAPTER_TYPE_A: { 2036 struct MessageUnit_A __iomem *reg = acb->pmuA; 2037 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 2038 } 2039 break; 2040 case ACB_ADAPTER_TYPE_B: { 2041 struct MessageUnit_B *reg = acb->pmuB; 2042 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 2043 } 2044 break; 2045 case ACB_ADAPTER_TYPE_C: { 2046 struct MessageUnit_C __iomem *reg = acb->pmuC; 2047 2048 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 2049 } 2050 break; 2051 case ACB_ADAPTER_TYPE_D: { 2052 struct MessageUnit_D *reg = acb->pmuD; 2053 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 2054 reg->inbound_doorbell); 2055 } 2056 break; 2057 case ACB_ADAPTER_TYPE_E: 2058 case ACB_ADAPTER_TYPE_F: { 2059 struct MessageUnit_E __iomem *reg = acb->pmuE; 2060 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 2061 writel(acb->out_doorbell, ®->iobound_doorbell); 2062 } 2063 break; 2064 } 2065 } 2066 2067 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 2068 { 2069 switch (acb->adapter_type) { 2070 case ACB_ADAPTER_TYPE_A: { 2071 struct MessageUnit_A __iomem *reg = acb->pmuA; 2072 /* 2073 ** push inbound doorbell tell iop, driver data write ok 2074 ** and wait reply on next hwinterrupt for next Qbuffer post 2075 */ 2076 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell); 2077 } 2078 break; 2079 2080 case ACB_ADAPTER_TYPE_B: { 2081 struct MessageUnit_B *reg = acb->pmuB; 2082 /* 2083 ** push inbound doorbell tell iop, driver data write ok 2084 ** and wait reply on next hwinterrupt for next Qbuffer post 2085 */ 2086 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); 2087 } 2088 break; 2089 case ACB_ADAPTER_TYPE_C: { 2090 struct MessageUnit_C __iomem *reg = acb->pmuC; 2091 /* 2092 ** push inbound doorbell tell iop, driver data write ok 2093 ** and wait reply on next hwinterrupt for next Qbuffer post 2094 */ 2095 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell); 2096 } 2097 break; 2098 case ACB_ADAPTER_TYPE_D: { 2099 struct MessageUnit_D *reg = acb->pmuD; 2100 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY, 2101 reg->inbound_doorbell); 2102 } 2103 break; 2104 case ACB_ADAPTER_TYPE_E: 2105 case ACB_ADAPTER_TYPE_F: { 2106 struct MessageUnit_E __iomem *reg = acb->pmuE; 2107 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; 2108 writel(acb->out_doorbell, ®->iobound_doorbell); 2109 } 2110 break; 2111 } 2112 } 2113 2114 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) 2115 { 2116 struct QBUFFER __iomem *qbuffer = NULL; 2117 switch (acb->adapter_type) { 2118 2119 case ACB_ADAPTER_TYPE_A: { 2120 struct MessageUnit_A __iomem *reg = acb->pmuA; 2121 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 2122 } 2123 break; 2124 case ACB_ADAPTER_TYPE_B: { 2125 struct MessageUnit_B *reg = acb->pmuB; 2126 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 2127 } 2128 break; 2129 case ACB_ADAPTER_TYPE_C: { 2130 struct MessageUnit_C __iomem *phbcmu = acb->pmuC; 2131 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; 2132 } 2133 break; 2134 case ACB_ADAPTER_TYPE_D: { 2135 struct MessageUnit_D *reg = acb->pmuD; 2136 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 2137 } 2138 break; 2139 case ACB_ADAPTER_TYPE_E: { 2140 struct MessageUnit_E __iomem *reg = acb->pmuE; 2141 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; 2142 } 2143 break; 2144 case ACB_ADAPTER_TYPE_F: { 2145 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer; 2146 } 2147 break; 2148 } 2149 return qbuffer; 2150 } 2151 2152 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) 2153 { 2154 struct QBUFFER __iomem *pqbuffer = NULL; 2155 switch (acb->adapter_type) { 2156 2157 case ACB_ADAPTER_TYPE_A: { 2158 struct MessageUnit_A __iomem *reg = acb->pmuA; 2159 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; 2160 } 2161 break; 2162 case ACB_ADAPTER_TYPE_B: { 2163 struct MessageUnit_B *reg = acb->pmuB; 2164 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 2165 } 2166 break; 2167 case ACB_ADAPTER_TYPE_C: { 2168 struct MessageUnit_C __iomem *reg = acb->pmuC; 2169 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 2170 } 2171 break; 2172 case ACB_ADAPTER_TYPE_D: { 2173 struct MessageUnit_D *reg = acb->pmuD; 2174 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 2175 } 2176 break; 2177 case ACB_ADAPTER_TYPE_E: { 2178 struct MessageUnit_E __iomem *reg = acb->pmuE; 2179 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; 2180 } 2181 break; 2182 case ACB_ADAPTER_TYPE_F: 2183 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer; 2184 break; 2185 } 2186 return pqbuffer; 2187 } 2188 2189 static uint32_t 2190 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb, 2191 struct QBUFFER __iomem *prbuffer) 2192 { 2193 uint8_t *pQbuffer; 2194 uint8_t *buf1 = NULL; 2195 uint32_t __iomem *iop_data; 2196 uint32_t iop_len, data_len, *buf2 = NULL; 2197 2198 iop_data = (uint32_t __iomem *)prbuffer->data; 2199 iop_len = readl(&prbuffer->data_len); 2200 if (iop_len > 0) { 2201 buf1 = kmalloc(128, GFP_ATOMIC); 2202 buf2 = (uint32_t *)buf1; 2203 if (buf1 == NULL) 2204 return 0; 2205 data_len = iop_len; 2206 while (data_len >= 4) { 2207 *buf2++ = readl(iop_data); 2208 iop_data++; 2209 data_len -= 4; 2210 } 2211 if (data_len) 2212 *buf2 = readl(iop_data); 2213 buf2 = (uint32_t *)buf1; 2214 } 2215 while (iop_len > 0) { 2216 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 2217 *pQbuffer = *buf1; 2218 acb->rqbuf_putIndex++; 2219 /* if last, index number set it to 0 */ 2220 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2221 buf1++; 2222 iop_len--; 2223 } 2224 kfree(buf2); 2225 /* let IOP know data has been read */ 2226 arcmsr_iop_message_read(acb); 2227 return 1; 2228 } 2229 2230 uint32_t 2231 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, 2232 struct QBUFFER __iomem *prbuffer) { 2233 2234 uint8_t *pQbuffer; 2235 uint8_t __iomem *iop_data; 2236 uint32_t iop_len; 2237 2238 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) 2239 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); 2240 iop_data = (uint8_t __iomem *)prbuffer->data; 2241 iop_len = readl(&prbuffer->data_len); 2242 while (iop_len > 0) { 2243 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex]; 2244 *pQbuffer = readb(iop_data); 2245 acb->rqbuf_putIndex++; 2246 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 2247 iop_data++; 2248 iop_len--; 2249 } 2250 arcmsr_iop_message_read(acb); 2251 return 1; 2252 } 2253 2254 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 2255 { 2256 unsigned long flags; 2257 struct QBUFFER __iomem *prbuffer; 2258 int32_t buf_empty_len; 2259 2260 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2261 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2262 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) & 2263 (ARCMSR_MAX_QBUFFER - 1); 2264 if (buf_empty_len >= readl(&prbuffer->data_len)) { 2265 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2266 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2267 } else 2268 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2269 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2270 } 2271 2272 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb) 2273 { 2274 uint8_t *pQbuffer; 2275 struct QBUFFER __iomem *pwbuffer; 2276 uint8_t *buf1 = NULL; 2277 uint32_t __iomem *iop_data; 2278 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data; 2279 2280 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2281 buf1 = kmalloc(128, GFP_ATOMIC); 2282 buf2 = (uint32_t *)buf1; 2283 if (buf1 == NULL) 2284 return; 2285 2286 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2287 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2288 iop_data = (uint32_t __iomem *)pwbuffer->data; 2289 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2290 && (allxfer_len < 124)) { 2291 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2292 *buf1 = *pQbuffer; 2293 acb->wqbuf_getIndex++; 2294 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2295 buf1++; 2296 allxfer_len++; 2297 } 2298 data_len = allxfer_len; 2299 buf1 = (uint8_t *)buf2; 2300 while (data_len >= 4) { 2301 data = *buf2++; 2302 writel(data, iop_data); 2303 iop_data++; 2304 data_len -= 4; 2305 } 2306 if (data_len) { 2307 data = *buf2; 2308 writel(data, iop_data); 2309 } 2310 writel(allxfer_len, &pwbuffer->data_len); 2311 kfree(buf1); 2312 arcmsr_iop_message_wrote(acb); 2313 } 2314 } 2315 2316 void 2317 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb) 2318 { 2319 uint8_t *pQbuffer; 2320 struct QBUFFER __iomem *pwbuffer; 2321 uint8_t __iomem *iop_data; 2322 int32_t allxfer_len = 0; 2323 2324 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) { 2325 arcmsr_write_ioctldata2iop_in_DWORD(acb); 2326 return; 2327 } 2328 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2329 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2330 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2331 iop_data = (uint8_t __iomem *)pwbuffer->data; 2332 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2333 && (allxfer_len < 124)) { 2334 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex]; 2335 writeb(*pQbuffer, iop_data); 2336 acb->wqbuf_getIndex++; 2337 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER; 2338 iop_data++; 2339 allxfer_len++; 2340 } 2341 writel(allxfer_len, &pwbuffer->data_len); 2342 arcmsr_iop_message_wrote(acb); 2343 } 2344 } 2345 2346 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 2347 { 2348 unsigned long flags; 2349 2350 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2351 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 2352 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex) 2353 arcmsr_write_ioctldata2iop(acb); 2354 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex) 2355 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 2356 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 2357 } 2358 2359 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb) 2360 { 2361 uint32_t outbound_doorbell; 2362 struct MessageUnit_A __iomem *reg = acb->pmuA; 2363 outbound_doorbell = readl(®->outbound_doorbell); 2364 do { 2365 writel(outbound_doorbell, ®->outbound_doorbell); 2366 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) 2367 arcmsr_iop2drv_data_wrote_handle(acb); 2368 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) 2369 arcmsr_iop2drv_data_read_handle(acb); 2370 outbound_doorbell = readl(®->outbound_doorbell); 2371 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 2372 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)); 2373 } 2374 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB) 2375 { 2376 uint32_t outbound_doorbell; 2377 struct MessageUnit_C __iomem *reg = pACB->pmuC; 2378 /* 2379 ******************************************************************* 2380 ** Maybe here we need to check wrqbuffer_lock is lock or not 2381 ** DOORBELL: din! don! 2382 ** check if there are any mail need to pack from firmware 2383 ******************************************************************* 2384 */ 2385 outbound_doorbell = readl(®->outbound_doorbell); 2386 do { 2387 writel(outbound_doorbell, ®->outbound_doorbell_clear); 2388 readl(®->outbound_doorbell_clear); 2389 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) 2390 arcmsr_iop2drv_data_wrote_handle(pACB); 2391 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) 2392 arcmsr_iop2drv_data_read_handle(pACB); 2393 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) 2394 arcmsr_hbaC_message_isr(pACB); 2395 outbound_doorbell = readl(®->outbound_doorbell); 2396 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 2397 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 2398 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)); 2399 } 2400 2401 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB) 2402 { 2403 uint32_t outbound_doorbell; 2404 struct MessageUnit_D *pmu = pACB->pmuD; 2405 2406 outbound_doorbell = readl(pmu->outbound_doorbell); 2407 do { 2408 writel(outbound_doorbell, pmu->outbound_doorbell); 2409 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) 2410 arcmsr_hbaD_message_isr(pACB); 2411 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) 2412 arcmsr_iop2drv_data_wrote_handle(pACB); 2413 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK) 2414 arcmsr_iop2drv_data_read_handle(pACB); 2415 outbound_doorbell = readl(pmu->outbound_doorbell); 2416 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 2417 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 2418 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); 2419 } 2420 2421 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB) 2422 { 2423 uint32_t outbound_doorbell, in_doorbell, tmp; 2424 struct MessageUnit_E __iomem *reg = pACB->pmuE; 2425 2426 in_doorbell = readl(®->iobound_doorbell); 2427 outbound_doorbell = in_doorbell ^ pACB->in_doorbell; 2428 do { 2429 writel(0, ®->host_int_status); /* clear interrupt */ 2430 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 2431 arcmsr_iop2drv_data_wrote_handle(pACB); 2432 } 2433 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) { 2434 arcmsr_iop2drv_data_read_handle(pACB); 2435 } 2436 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) { 2437 arcmsr_hbaE_message_isr(pACB); 2438 } 2439 tmp = in_doorbell; 2440 in_doorbell = readl(®->iobound_doorbell); 2441 outbound_doorbell = tmp ^ in_doorbell; 2442 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 2443 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 2444 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE)); 2445 pACB->in_doorbell = in_doorbell; 2446 } 2447 2448 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) 2449 { 2450 uint32_t flag_ccb; 2451 struct MessageUnit_A __iomem *reg = acb->pmuA; 2452 struct ARCMSR_CDB *pARCMSR_CDB; 2453 struct CommandControlBlock *pCCB; 2454 bool error; 2455 unsigned long cdb_phy_addr; 2456 2457 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { 2458 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; 2459 if (acb->cdb_phyadd_hipart) 2460 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; 2461 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); 2462 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2463 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2464 arcmsr_drain_donequeue(acb, pCCB, error); 2465 } 2466 } 2467 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) 2468 { 2469 uint32_t index; 2470 uint32_t flag_ccb; 2471 struct MessageUnit_B *reg = acb->pmuB; 2472 struct ARCMSR_CDB *pARCMSR_CDB; 2473 struct CommandControlBlock *pCCB; 2474 bool error; 2475 unsigned long cdb_phy_addr; 2476 2477 index = reg->doneq_index; 2478 while ((flag_ccb = reg->done_qbuffer[index]) != 0) { 2479 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; 2480 if (acb->cdb_phyadd_hipart) 2481 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; 2482 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); 2483 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 2484 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 2485 arcmsr_drain_donequeue(acb, pCCB, error); 2486 reg->done_qbuffer[index] = 0; 2487 index++; 2488 index %= ARCMSR_MAX_HBB_POSTQUEUE; 2489 reg->doneq_index = index; 2490 } 2491 } 2492 2493 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) 2494 { 2495 struct MessageUnit_C __iomem *phbcmu; 2496 struct ARCMSR_CDB *arcmsr_cdb; 2497 struct CommandControlBlock *ccb; 2498 uint32_t flag_ccb, throttling = 0; 2499 unsigned long ccb_cdb_phy; 2500 int error; 2501 2502 phbcmu = acb->pmuC; 2503 /* areca cdb command done */ 2504 /* Use correct offset and size for syncing */ 2505 2506 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != 2507 0xFFFFFFFF) { 2508 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 2509 if (acb->cdb_phyadd_hipart) 2510 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 2511 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2512 + ccb_cdb_phy); 2513 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, 2514 arcmsr_cdb); 2515 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2516 ? true : false; 2517 /* check if command done with no error */ 2518 arcmsr_drain_donequeue(acb, ccb, error); 2519 throttling++; 2520 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 2521 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, 2522 &phbcmu->inbound_doorbell); 2523 throttling = 0; 2524 } 2525 } 2526 } 2527 2528 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) 2529 { 2530 u32 outbound_write_pointer, doneq_index, index_stripped, toggle; 2531 uint32_t addressLow; 2532 int error; 2533 struct MessageUnit_D *pmu; 2534 struct ARCMSR_CDB *arcmsr_cdb; 2535 struct CommandControlBlock *ccb; 2536 unsigned long flags, ccb_cdb_phy; 2537 2538 spin_lock_irqsave(&acb->doneq_lock, flags); 2539 pmu = acb->pmuD; 2540 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 2541 doneq_index = pmu->doneq_index; 2542 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) { 2543 do { 2544 toggle = doneq_index & 0x4000; 2545 index_stripped = (doneq_index & 0xFFF) + 1; 2546 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 2547 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 2548 ((toggle ^ 0x4000) + 1); 2549 doneq_index = pmu->doneq_index; 2550 addressLow = pmu->done_qbuffer[doneq_index & 2551 0xFFF].addressLow; 2552 ccb_cdb_phy = (addressLow & 0xFFFFFFF0); 2553 if (acb->cdb_phyadd_hipart) 2554 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 2555 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset 2556 + ccb_cdb_phy); 2557 ccb = container_of(arcmsr_cdb, 2558 struct CommandControlBlock, arcmsr_cdb); 2559 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 2560 ? true : false; 2561 arcmsr_drain_donequeue(acb, ccb, error); 2562 writel(doneq_index, pmu->outboundlist_read_pointer); 2563 } while ((doneq_index & 0xFFF) != 2564 (outbound_write_pointer & 0xFFF)); 2565 } 2566 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR, 2567 pmu->outboundlist_interrupt_cause); 2568 readl(pmu->outboundlist_interrupt_cause); 2569 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2570 } 2571 2572 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) 2573 { 2574 uint32_t doneq_index; 2575 uint16_t cmdSMID; 2576 int error; 2577 struct MessageUnit_E __iomem *pmu; 2578 struct CommandControlBlock *ccb; 2579 unsigned long flags; 2580 2581 spin_lock_irqsave(&acb->doneq_lock, flags); 2582 doneq_index = acb->doneq_index; 2583 pmu = acb->pmuE; 2584 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) { 2585 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2586 ccb = acb->pccb_pool[cmdSMID]; 2587 error = (acb->pCompletionQ[doneq_index].cmdFlag 2588 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 2589 arcmsr_drain_donequeue(acb, ccb, error); 2590 doneq_index++; 2591 if (doneq_index >= acb->completionQ_entry) 2592 doneq_index = 0; 2593 } 2594 acb->doneq_index = doneq_index; 2595 writel(doneq_index, &pmu->reply_post_consumer_index); 2596 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2597 } 2598 2599 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb) 2600 { 2601 uint32_t doneq_index; 2602 uint16_t cmdSMID; 2603 int error; 2604 struct MessageUnit_F __iomem *phbcmu; 2605 struct CommandControlBlock *ccb; 2606 unsigned long flags; 2607 2608 spin_lock_irqsave(&acb->doneq_lock, flags); 2609 doneq_index = acb->doneq_index; 2610 phbcmu = acb->pmuF; 2611 while (1) { 2612 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 2613 if (cmdSMID == 0xffff) 2614 break; 2615 ccb = acb->pccb_pool[cmdSMID]; 2616 error = (acb->pCompletionQ[doneq_index].cmdFlag & 2617 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 2618 arcmsr_drain_donequeue(acb, ccb, error); 2619 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff; 2620 doneq_index++; 2621 if (doneq_index >= acb->completionQ_entry) 2622 doneq_index = 0; 2623 } 2624 acb->doneq_index = doneq_index; 2625 writel(doneq_index, &phbcmu->reply_post_consumer_index); 2626 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2627 } 2628 2629 /* 2630 ********************************************************************************** 2631 ** Handle a message interrupt 2632 ** 2633 ** The only message interrupt we expect is in response to a query for the current adapter config. 2634 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2635 ********************************************************************************** 2636 */ 2637 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb) 2638 { 2639 struct MessageUnit_A __iomem *reg = acb->pmuA; 2640 /*clear interrupt and message state*/ 2641 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); 2642 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2643 schedule_work(&acb->arcmsr_do_message_isr_bh); 2644 } 2645 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) 2646 { 2647 struct MessageUnit_B *reg = acb->pmuB; 2648 2649 /*clear interrupt and message state*/ 2650 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2651 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2652 schedule_work(&acb->arcmsr_do_message_isr_bh); 2653 } 2654 /* 2655 ********************************************************************************** 2656 ** Handle a message interrupt 2657 ** 2658 ** The only message interrupt we expect is in response to a query for the 2659 ** current adapter config. 2660 ** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2661 ********************************************************************************** 2662 */ 2663 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb) 2664 { 2665 struct MessageUnit_C __iomem *reg = acb->pmuC; 2666 /*clear interrupt and message state*/ 2667 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear); 2668 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2669 schedule_work(&acb->arcmsr_do_message_isr_bh); 2670 } 2671 2672 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) 2673 { 2674 struct MessageUnit_D *reg = acb->pmuD; 2675 2676 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); 2677 readl(reg->outbound_doorbell); 2678 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2679 schedule_work(&acb->arcmsr_do_message_isr_bh); 2680 } 2681 2682 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb) 2683 { 2684 struct MessageUnit_E __iomem *reg = acb->pmuE; 2685 2686 writel(0, ®->host_int_status); 2687 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG) 2688 schedule_work(&acb->arcmsr_do_message_isr_bh); 2689 } 2690 2691 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) 2692 { 2693 uint32_t outbound_intstatus; 2694 struct MessageUnit_A __iomem *reg = acb->pmuA; 2695 outbound_intstatus = readl(®->outbound_intstatus) & 2696 acb->outbound_int_enable; 2697 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) 2698 return IRQ_NONE; 2699 do { 2700 writel(outbound_intstatus, ®->outbound_intstatus); 2701 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) 2702 arcmsr_hbaA_doorbell_isr(acb); 2703 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) 2704 arcmsr_hbaA_postqueue_isr(acb); 2705 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 2706 arcmsr_hbaA_message_isr(acb); 2707 outbound_intstatus = readl(®->outbound_intstatus) & 2708 acb->outbound_int_enable; 2709 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT 2710 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 2711 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); 2712 return IRQ_HANDLED; 2713 } 2714 2715 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb) 2716 { 2717 uint32_t outbound_doorbell; 2718 struct MessageUnit_B *reg = acb->pmuB; 2719 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2720 acb->outbound_int_enable; 2721 if (!outbound_doorbell) 2722 return IRQ_NONE; 2723 do { 2724 writel(~outbound_doorbell, reg->iop2drv_doorbell); 2725 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 2726 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) 2727 arcmsr_iop2drv_data_wrote_handle(acb); 2728 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) 2729 arcmsr_iop2drv_data_read_handle(acb); 2730 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) 2731 arcmsr_hbaB_postqueue_isr(acb); 2732 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) 2733 arcmsr_hbaB_message_isr(acb); 2734 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2735 acb->outbound_int_enable; 2736 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK 2737 | ARCMSR_IOP2DRV_DATA_READ_OK 2738 | ARCMSR_IOP2DRV_CDB_DONE 2739 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)); 2740 return IRQ_HANDLED; 2741 } 2742 2743 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB) 2744 { 2745 uint32_t host_interrupt_status; 2746 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 2747 /* 2748 ********************************************* 2749 ** check outbound intstatus 2750 ********************************************* 2751 */ 2752 host_interrupt_status = readl(&phbcmu->host_int_status) & 2753 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2754 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); 2755 if (!host_interrupt_status) 2756 return IRQ_NONE; 2757 do { 2758 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) 2759 arcmsr_hbaC_doorbell_isr(pACB); 2760 /* MU post queue interrupts*/ 2761 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) 2762 arcmsr_hbaC_postqueue_isr(pACB); 2763 host_interrupt_status = readl(&phbcmu->host_int_status); 2764 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | 2765 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); 2766 return IRQ_HANDLED; 2767 } 2768 2769 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB) 2770 { 2771 u32 host_interrupt_status; 2772 struct MessageUnit_D *pmu = pACB->pmuD; 2773 2774 host_interrupt_status = readl(pmu->host_int_status) & 2775 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2776 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR); 2777 if (!host_interrupt_status) 2778 return IRQ_NONE; 2779 do { 2780 /* MU post queue interrupts*/ 2781 if (host_interrupt_status & 2782 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR) 2783 arcmsr_hbaD_postqueue_isr(pACB); 2784 if (host_interrupt_status & 2785 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR) 2786 arcmsr_hbaD_doorbell_isr(pACB); 2787 host_interrupt_status = readl(pmu->host_int_status); 2788 } while (host_interrupt_status & 2789 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR | 2790 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)); 2791 return IRQ_HANDLED; 2792 } 2793 2794 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) 2795 { 2796 uint32_t host_interrupt_status; 2797 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 2798 2799 host_interrupt_status = readl(&pmu->host_int_status) & 2800 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2801 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2802 if (!host_interrupt_status) 2803 return IRQ_NONE; 2804 do { 2805 /* MU ioctl transfer doorbell interrupts*/ 2806 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) { 2807 arcmsr_hbaE_doorbell_isr(pACB); 2808 } 2809 /* MU post queue interrupts*/ 2810 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) { 2811 arcmsr_hbaE_postqueue_isr(pACB); 2812 } 2813 host_interrupt_status = readl(&pmu->host_int_status); 2814 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2815 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2816 return IRQ_HANDLED; 2817 } 2818 2819 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB) 2820 { 2821 uint32_t host_interrupt_status; 2822 struct MessageUnit_F __iomem *phbcmu = pACB->pmuF; 2823 2824 host_interrupt_status = readl(&phbcmu->host_int_status) & 2825 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2826 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); 2827 if (!host_interrupt_status) 2828 return IRQ_NONE; 2829 do { 2830 /* MU post queue interrupts*/ 2831 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) 2832 arcmsr_hbaF_postqueue_isr(pACB); 2833 2834 /* MU ioctl transfer doorbell interrupts*/ 2835 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) 2836 arcmsr_hbaE_doorbell_isr(pACB); 2837 2838 host_interrupt_status = readl(&phbcmu->host_int_status); 2839 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | 2840 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); 2841 return IRQ_HANDLED; 2842 } 2843 2844 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 2845 { 2846 switch (acb->adapter_type) { 2847 case ACB_ADAPTER_TYPE_A: 2848 return arcmsr_hbaA_handle_isr(acb); 2849 case ACB_ADAPTER_TYPE_B: 2850 return arcmsr_hbaB_handle_isr(acb); 2851 case ACB_ADAPTER_TYPE_C: 2852 return arcmsr_hbaC_handle_isr(acb); 2853 case ACB_ADAPTER_TYPE_D: 2854 return arcmsr_hbaD_handle_isr(acb); 2855 case ACB_ADAPTER_TYPE_E: 2856 return arcmsr_hbaE_handle_isr(acb); 2857 case ACB_ADAPTER_TYPE_F: 2858 return arcmsr_hbaF_handle_isr(acb); 2859 default: 2860 return IRQ_NONE; 2861 } 2862 } 2863 2864 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 2865 { 2866 if (acb) { 2867 /* stop adapter background rebuild */ 2868 if (acb->acb_flags & ACB_F_MSG_START_BGRB) { 2869 uint32_t intmask_org; 2870 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 2871 intmask_org = arcmsr_disable_outbound_ints(acb); 2872 arcmsr_stop_adapter_bgrb(acb); 2873 arcmsr_flush_adapter_cache(acb); 2874 arcmsr_enable_outbound_ints(acb, intmask_org); 2875 } 2876 } 2877 } 2878 2879 2880 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb) 2881 { 2882 uint32_t i; 2883 2884 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2885 for (i = 0; i < 15; i++) { 2886 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2887 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2888 acb->rqbuf_getIndex = 0; 2889 acb->rqbuf_putIndex = 0; 2890 arcmsr_iop_message_read(acb); 2891 mdelay(30); 2892 } else if (acb->rqbuf_getIndex != 2893 acb->rqbuf_putIndex) { 2894 acb->rqbuf_getIndex = 0; 2895 acb->rqbuf_putIndex = 0; 2896 mdelay(30); 2897 } else 2898 break; 2899 } 2900 } 2901 } 2902 2903 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 2904 struct scsi_cmnd *cmd) 2905 { 2906 char *buffer; 2907 unsigned short use_sg; 2908 int retvalue = 0, transfer_len = 0; 2909 unsigned long flags; 2910 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 2911 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 | 2912 (uint32_t)cmd->cmnd[6] << 16 | 2913 (uint32_t)cmd->cmnd[7] << 8 | 2914 (uint32_t)cmd->cmnd[8]; 2915 struct scatterlist *sg; 2916 2917 use_sg = scsi_sg_count(cmd); 2918 sg = scsi_sglist(cmd); 2919 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 2920 if (use_sg > 1) { 2921 retvalue = ARCMSR_MESSAGE_FAIL; 2922 goto message_out; 2923 } 2924 transfer_len += sg->length; 2925 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 2926 retvalue = ARCMSR_MESSAGE_FAIL; 2927 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__); 2928 goto message_out; 2929 } 2930 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer; 2931 switch (controlcode) { 2932 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2933 unsigned char *ver_addr; 2934 uint8_t *ptmpQbuffer; 2935 uint32_t allxfer_len = 0; 2936 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2937 if (!ver_addr) { 2938 retvalue = ARCMSR_MESSAGE_FAIL; 2939 pr_info("%s: memory not enough!\n", __func__); 2940 goto message_out; 2941 } 2942 ptmpQbuffer = ver_addr; 2943 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 2944 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) { 2945 unsigned int tail = acb->rqbuf_getIndex; 2946 unsigned int head = acb->rqbuf_putIndex; 2947 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); 2948 2949 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); 2950 if (allxfer_len > ARCMSR_API_DATA_BUFLEN) 2951 allxfer_len = ARCMSR_API_DATA_BUFLEN; 2952 2953 if (allxfer_len <= cnt_to_end) 2954 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); 2955 else { 2956 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); 2957 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end); 2958 } 2959 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER; 2960 } 2961 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, 2962 allxfer_len); 2963 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2964 struct QBUFFER __iomem *prbuffer; 2965 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2966 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2967 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) 2968 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 2969 } 2970 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 2971 kfree(ver_addr); 2972 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2973 if (acb->fw_flag == FW_DEADLOCK) 2974 pcmdmessagefld->cmdmessage.ReturnCode = 2975 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2976 else 2977 pcmdmessagefld->cmdmessage.ReturnCode = 2978 ARCMSR_MESSAGE_RETURNCODE_OK; 2979 break; 2980 } 2981 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2982 unsigned char *ver_addr; 2983 uint32_t user_len; 2984 int32_t cnt2end; 2985 uint8_t *pQbuffer, *ptmpuserbuffer; 2986 2987 user_len = pcmdmessagefld->cmdmessage.Length; 2988 if (user_len > ARCMSR_API_DATA_BUFLEN) { 2989 retvalue = ARCMSR_MESSAGE_FAIL; 2990 goto message_out; 2991 } 2992 2993 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2994 if (!ver_addr) { 2995 retvalue = ARCMSR_MESSAGE_FAIL; 2996 goto message_out; 2997 } 2998 ptmpuserbuffer = ver_addr; 2999 3000 memcpy(ptmpuserbuffer, 3001 pcmdmessagefld->messagedatabuffer, user_len); 3002 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3003 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) { 3004 struct SENSE_DATA *sensebuffer = 3005 (struct SENSE_DATA *)cmd->sense_buffer; 3006 arcmsr_write_ioctldata2iop(acb); 3007 /* has error report sensedata */ 3008 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 3009 sensebuffer->SenseKey = ILLEGAL_REQUEST; 3010 sensebuffer->AdditionalSenseLength = 0x0A; 3011 sensebuffer->AdditionalSenseCode = 0x20; 3012 sensebuffer->Valid = 1; 3013 retvalue = ARCMSR_MESSAGE_FAIL; 3014 } else { 3015 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex]; 3016 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex; 3017 if (user_len > cnt2end) { 3018 memcpy(pQbuffer, ptmpuserbuffer, cnt2end); 3019 ptmpuserbuffer += cnt2end; 3020 user_len -= cnt2end; 3021 acb->wqbuf_putIndex = 0; 3022 pQbuffer = acb->wqbuffer; 3023 } 3024 memcpy(pQbuffer, ptmpuserbuffer, user_len); 3025 acb->wqbuf_putIndex += user_len; 3026 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER; 3027 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 3028 acb->acb_flags &= 3029 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 3030 arcmsr_write_ioctldata2iop(acb); 3031 } 3032 } 3033 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3034 kfree(ver_addr); 3035 if (acb->fw_flag == FW_DEADLOCK) 3036 pcmdmessagefld->cmdmessage.ReturnCode = 3037 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3038 else 3039 pcmdmessagefld->cmdmessage.ReturnCode = 3040 ARCMSR_MESSAGE_RETURNCODE_OK; 3041 break; 3042 } 3043 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 3044 uint8_t *pQbuffer = acb->rqbuffer; 3045 3046 arcmsr_clear_iop2drv_rqueue_buffer(acb); 3047 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 3048 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 3049 acb->rqbuf_getIndex = 0; 3050 acb->rqbuf_putIndex = 0; 3051 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 3052 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 3053 if (acb->fw_flag == FW_DEADLOCK) 3054 pcmdmessagefld->cmdmessage.ReturnCode = 3055 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3056 else 3057 pcmdmessagefld->cmdmessage.ReturnCode = 3058 ARCMSR_MESSAGE_RETURNCODE_OK; 3059 break; 3060 } 3061 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 3062 uint8_t *pQbuffer = acb->wqbuffer; 3063 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3064 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 3065 ACB_F_MESSAGE_WQBUFFER_READED); 3066 acb->wqbuf_getIndex = 0; 3067 acb->wqbuf_putIndex = 0; 3068 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 3069 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3070 if (acb->fw_flag == FW_DEADLOCK) 3071 pcmdmessagefld->cmdmessage.ReturnCode = 3072 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3073 else 3074 pcmdmessagefld->cmdmessage.ReturnCode = 3075 ARCMSR_MESSAGE_RETURNCODE_OK; 3076 break; 3077 } 3078 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 3079 uint8_t *pQbuffer; 3080 arcmsr_clear_iop2drv_rqueue_buffer(acb); 3081 spin_lock_irqsave(&acb->rqbuffer_lock, flags); 3082 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 3083 acb->rqbuf_getIndex = 0; 3084 acb->rqbuf_putIndex = 0; 3085 pQbuffer = acb->rqbuffer; 3086 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 3087 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags); 3088 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 3089 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 3090 ACB_F_MESSAGE_WQBUFFER_READED); 3091 acb->wqbuf_getIndex = 0; 3092 acb->wqbuf_putIndex = 0; 3093 pQbuffer = acb->wqbuffer; 3094 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 3095 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags); 3096 if (acb->fw_flag == FW_DEADLOCK) 3097 pcmdmessagefld->cmdmessage.ReturnCode = 3098 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3099 else 3100 pcmdmessagefld->cmdmessage.ReturnCode = 3101 ARCMSR_MESSAGE_RETURNCODE_OK; 3102 break; 3103 } 3104 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 3105 if (acb->fw_flag == FW_DEADLOCK) 3106 pcmdmessagefld->cmdmessage.ReturnCode = 3107 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3108 else 3109 pcmdmessagefld->cmdmessage.ReturnCode = 3110 ARCMSR_MESSAGE_RETURNCODE_3F; 3111 break; 3112 } 3113 case ARCMSR_MESSAGE_SAY_HELLO: { 3114 int8_t *hello_string = "Hello! I am ARCMSR"; 3115 if (acb->fw_flag == FW_DEADLOCK) 3116 pcmdmessagefld->cmdmessage.ReturnCode = 3117 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3118 else 3119 pcmdmessagefld->cmdmessage.ReturnCode = 3120 ARCMSR_MESSAGE_RETURNCODE_OK; 3121 memcpy(pcmdmessagefld->messagedatabuffer, 3122 hello_string, (int16_t)strlen(hello_string)); 3123 break; 3124 } 3125 case ARCMSR_MESSAGE_SAY_GOODBYE: { 3126 if (acb->fw_flag == FW_DEADLOCK) 3127 pcmdmessagefld->cmdmessage.ReturnCode = 3128 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3129 else 3130 pcmdmessagefld->cmdmessage.ReturnCode = 3131 ARCMSR_MESSAGE_RETURNCODE_OK; 3132 arcmsr_iop_parking(acb); 3133 break; 3134 } 3135 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 3136 if (acb->fw_flag == FW_DEADLOCK) 3137 pcmdmessagefld->cmdmessage.ReturnCode = 3138 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 3139 else 3140 pcmdmessagefld->cmdmessage.ReturnCode = 3141 ARCMSR_MESSAGE_RETURNCODE_OK; 3142 arcmsr_flush_adapter_cache(acb); 3143 break; 3144 } 3145 default: 3146 retvalue = ARCMSR_MESSAGE_FAIL; 3147 pr_info("%s: unknown controlcode!\n", __func__); 3148 } 3149 message_out: 3150 if (use_sg) { 3151 struct scatterlist *sg = scsi_sglist(cmd); 3152 kunmap_atomic(buffer - sg->offset); 3153 } 3154 return retvalue; 3155 } 3156 3157 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) 3158 { 3159 struct list_head *head = &acb->ccb_free_list; 3160 struct CommandControlBlock *ccb = NULL; 3161 unsigned long flags; 3162 spin_lock_irqsave(&acb->ccblist_lock, flags); 3163 if (!list_empty(head)) { 3164 ccb = list_entry(head->next, struct CommandControlBlock, list); 3165 list_del_init(&ccb->list); 3166 }else{ 3167 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 3168 return NULL; 3169 } 3170 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 3171 return ccb; 3172 } 3173 3174 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 3175 struct scsi_cmnd *cmd) 3176 { 3177 switch (cmd->cmnd[0]) { 3178 case INQUIRY: { 3179 unsigned char inqdata[36]; 3180 char *buffer; 3181 struct scatterlist *sg; 3182 3183 if (cmd->device->lun) { 3184 cmd->result = (DID_TIME_OUT << 16); 3185 cmd->scsi_done(cmd); 3186 return; 3187 } 3188 inqdata[0] = TYPE_PROCESSOR; 3189 /* Periph Qualifier & Periph Dev Type */ 3190 inqdata[1] = 0; 3191 /* rem media bit & Dev Type Modifier */ 3192 inqdata[2] = 0; 3193 /* ISO, ECMA, & ANSI versions */ 3194 inqdata[4] = 31; 3195 /* length of additional data */ 3196 strncpy(&inqdata[8], "Areca ", 8); 3197 /* Vendor Identification */ 3198 strncpy(&inqdata[16], "RAID controller ", 16); 3199 /* Product Identification */ 3200 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 3201 3202 sg = scsi_sglist(cmd); 3203 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 3204 3205 memcpy(buffer, inqdata, sizeof(inqdata)); 3206 sg = scsi_sglist(cmd); 3207 kunmap_atomic(buffer - sg->offset); 3208 3209 cmd->scsi_done(cmd); 3210 } 3211 break; 3212 case WRITE_BUFFER: 3213 case READ_BUFFER: { 3214 if (arcmsr_iop_message_xfer(acb, cmd)) 3215 cmd->result = (DID_ERROR << 16); 3216 cmd->scsi_done(cmd); 3217 } 3218 break; 3219 default: 3220 cmd->scsi_done(cmd); 3221 } 3222 } 3223 3224 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, 3225 void (* done)(struct scsi_cmnd *)) 3226 { 3227 struct Scsi_Host *host = cmd->device->host; 3228 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 3229 struct CommandControlBlock *ccb; 3230 int target = cmd->device->id; 3231 3232 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) { 3233 cmd->result = (DID_NO_CONNECT << 16); 3234 cmd->scsi_done(cmd); 3235 return 0; 3236 } 3237 cmd->scsi_done = done; 3238 cmd->host_scribble = NULL; 3239 cmd->result = 0; 3240 if (target == 16) { 3241 /* virtual device for iop message transfer */ 3242 arcmsr_handle_virtual_command(acb, cmd); 3243 return 0; 3244 } 3245 ccb = arcmsr_get_freeccb(acb); 3246 if (!ccb) 3247 return SCSI_MLQUEUE_HOST_BUSY; 3248 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { 3249 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1); 3250 cmd->scsi_done(cmd); 3251 return 0; 3252 } 3253 arcmsr_post_ccb(acb, ccb); 3254 return 0; 3255 } 3256 3257 static DEF_SCSI_QCMD(arcmsr_queue_command) 3258 3259 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer) 3260 { 3261 int count; 3262 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model; 3263 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version; 3264 uint32_t *acb_device_map = (uint32_t *)pACB->device_map; 3265 uint32_t *firm_model = &rwbuffer[15]; 3266 uint32_t *firm_version = &rwbuffer[17]; 3267 uint32_t *device_map = &rwbuffer[21]; 3268 3269 count = 2; 3270 while (count) { 3271 *acb_firm_model = readl(firm_model); 3272 acb_firm_model++; 3273 firm_model++; 3274 count--; 3275 } 3276 count = 4; 3277 while (count) { 3278 *acb_firm_version = readl(firm_version); 3279 acb_firm_version++; 3280 firm_version++; 3281 count--; 3282 } 3283 count = 4; 3284 while (count) { 3285 *acb_device_map = readl(device_map); 3286 acb_device_map++; 3287 device_map++; 3288 count--; 3289 } 3290 pACB->signature = readl(&rwbuffer[0]); 3291 pACB->firm_request_len = readl(&rwbuffer[1]); 3292 pACB->firm_numbers_queue = readl(&rwbuffer[2]); 3293 pACB->firm_sdram_size = readl(&rwbuffer[3]); 3294 pACB->firm_hd_channels = readl(&rwbuffer[4]); 3295 pACB->firm_cfg_version = readl(&rwbuffer[25]); 3296 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", 3297 pACB->host->host_no, 3298 pACB->firm_model, 3299 pACB->firm_version); 3300 } 3301 3302 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) 3303 { 3304 struct MessageUnit_A __iomem *reg = acb->pmuA; 3305 3306 arcmsr_wait_firmware_ready(acb); 3307 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3308 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3309 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3310 miscellaneous data' timeout \n", acb->host->host_no); 3311 return false; 3312 } 3313 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3314 return true; 3315 } 3316 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) 3317 { 3318 struct MessageUnit_B *reg = acb->pmuB; 3319 3320 arcmsr_wait_firmware_ready(acb); 3321 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 3322 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3323 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no); 3324 return false; 3325 } 3326 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 3327 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3328 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3329 miscellaneous data' timeout \n", acb->host->host_no); 3330 return false; 3331 } 3332 arcmsr_get_adapter_config(acb, reg->message_rwbuffer); 3333 return true; 3334 } 3335 3336 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) 3337 { 3338 uint32_t intmask_org; 3339 struct MessageUnit_C __iomem *reg = pACB->pmuC; 3340 3341 /* disable all outbound interrupt */ 3342 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3343 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask); 3344 /* wait firmware ready */ 3345 arcmsr_wait_firmware_ready(pACB); 3346 /* post "get config" instruction */ 3347 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3348 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3349 /* wait message ready */ 3350 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 3351 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3352 miscellaneous data' timeout \n", pACB->host->host_no); 3353 return false; 3354 } 3355 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3356 return true; 3357 } 3358 3359 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) 3360 { 3361 struct MessageUnit_D *reg = acb->pmuD; 3362 3363 if (readl(acb->pmuD->outbound_doorbell) & 3364 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 3365 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 3366 acb->pmuD->outbound_doorbell);/*clear interrupt*/ 3367 } 3368 arcmsr_wait_firmware_ready(acb); 3369 /* post "get config" instruction */ 3370 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 3371 /* wait message ready */ 3372 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 3373 pr_notice("arcmsr%d: wait get adapter firmware " 3374 "miscellaneous data timeout\n", acb->host->host_no); 3375 return false; 3376 } 3377 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer); 3378 return true; 3379 } 3380 3381 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) 3382 { 3383 struct MessageUnit_E __iomem *reg = pACB->pmuE; 3384 uint32_t intmask_org; 3385 3386 /* disable all outbound interrupt */ 3387 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3388 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); 3389 /* wait firmware ready */ 3390 arcmsr_wait_firmware_ready(pACB); 3391 mdelay(20); 3392 /* post "get config" instruction */ 3393 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3394 3395 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3396 writel(pACB->out_doorbell, ®->iobound_doorbell); 3397 /* wait message ready */ 3398 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3399 pr_notice("arcmsr%d: wait get adapter firmware " 3400 "miscellaneous data timeout\n", pACB->host->host_no); 3401 return false; 3402 } 3403 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer); 3404 return true; 3405 } 3406 3407 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB) 3408 { 3409 struct MessageUnit_F __iomem *reg = pACB->pmuF; 3410 uint32_t intmask_org; 3411 3412 /* disable all outbound interrupt */ 3413 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ 3414 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); 3415 /* wait firmware ready */ 3416 arcmsr_wait_firmware_ready(pACB); 3417 /* post "get config" instruction */ 3418 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 3419 3420 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3421 writel(pACB->out_doorbell, ®->iobound_doorbell); 3422 /* wait message ready */ 3423 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3424 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n", 3425 pACB->host->host_no); 3426 return false; 3427 } 3428 arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer); 3429 return true; 3430 } 3431 3432 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 3433 { 3434 bool rtn = false; 3435 3436 switch (acb->adapter_type) { 3437 case ACB_ADAPTER_TYPE_A: 3438 rtn = arcmsr_hbaA_get_config(acb); 3439 break; 3440 case ACB_ADAPTER_TYPE_B: 3441 rtn = arcmsr_hbaB_get_config(acb); 3442 break; 3443 case ACB_ADAPTER_TYPE_C: 3444 rtn = arcmsr_hbaC_get_config(acb); 3445 break; 3446 case ACB_ADAPTER_TYPE_D: 3447 rtn = arcmsr_hbaD_get_config(acb); 3448 break; 3449 case ACB_ADAPTER_TYPE_E: 3450 rtn = arcmsr_hbaE_get_config(acb); 3451 break; 3452 case ACB_ADAPTER_TYPE_F: 3453 rtn = arcmsr_hbaF_get_config(acb); 3454 break; 3455 default: 3456 break; 3457 } 3458 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3459 if (acb->host->can_queue >= acb->firm_numbers_queue) 3460 acb->host->can_queue = acb->maxOutstanding; 3461 else 3462 acb->maxOutstanding = acb->host->can_queue; 3463 acb->maxFreeCCB = acb->host->can_queue; 3464 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM) 3465 acb->maxFreeCCB += 64; 3466 return rtn; 3467 } 3468 3469 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, 3470 struct CommandControlBlock *poll_ccb) 3471 { 3472 struct MessageUnit_A __iomem *reg = acb->pmuA; 3473 struct CommandControlBlock *ccb; 3474 struct ARCMSR_CDB *arcmsr_cdb; 3475 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; 3476 int rtn; 3477 bool error; 3478 unsigned long ccb_cdb_phy; 3479 3480 polling_hba_ccb_retry: 3481 poll_count++; 3482 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; 3483 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ 3484 while (1) { 3485 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { 3486 if (poll_ccb_done){ 3487 rtn = SUCCESS; 3488 break; 3489 }else { 3490 msleep(25); 3491 if (poll_count > 100){ 3492 rtn = FAILED; 3493 break; 3494 } 3495 goto polling_hba_ccb_retry; 3496 } 3497 } 3498 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 3499 if (acb->cdb_phyadd_hipart) 3500 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3501 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3502 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3503 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3504 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3505 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3506 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3507 " poll command abort successfully \n" 3508 , acb->host->host_no 3509 , ccb->pcmd->device->id 3510 , (u32)ccb->pcmd->device->lun 3511 , ccb); 3512 ccb->pcmd->result = DID_ABORT << 16; 3513 arcmsr_ccb_complete(ccb); 3514 continue; 3515 } 3516 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3517 " command done ccb = '0x%p'" 3518 "ccboutstandingcount = %d \n" 3519 , acb->host->host_no 3520 , ccb 3521 , atomic_read(&acb->ccboutstandingcount)); 3522 continue; 3523 } 3524 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3525 arcmsr_report_ccb_state(acb, ccb, error); 3526 } 3527 return rtn; 3528 } 3529 3530 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, 3531 struct CommandControlBlock *poll_ccb) 3532 { 3533 struct MessageUnit_B *reg = acb->pmuB; 3534 struct ARCMSR_CDB *arcmsr_cdb; 3535 struct CommandControlBlock *ccb; 3536 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; 3537 int index, rtn; 3538 bool error; 3539 unsigned long ccb_cdb_phy; 3540 3541 polling_hbb_ccb_retry: 3542 poll_count++; 3543 /* clear doorbell interrupt */ 3544 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3545 while(1){ 3546 index = reg->doneq_index; 3547 flag_ccb = reg->done_qbuffer[index]; 3548 if (flag_ccb == 0) { 3549 if (poll_ccb_done){ 3550 rtn = SUCCESS; 3551 break; 3552 }else { 3553 msleep(25); 3554 if (poll_count > 100){ 3555 rtn = FAILED; 3556 break; 3557 } 3558 goto polling_hbb_ccb_retry; 3559 } 3560 } 3561 reg->done_qbuffer[index] = 0; 3562 index++; 3563 /*if last index number set it to 0 */ 3564 index %= ARCMSR_MAX_HBB_POSTQUEUE; 3565 reg->doneq_index = index; 3566 /* check if command done with no error*/ 3567 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; 3568 if (acb->cdb_phyadd_hipart) 3569 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3570 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3571 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3572 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; 3573 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3574 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3575 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3576 " poll command abort successfully \n" 3577 ,acb->host->host_no 3578 ,ccb->pcmd->device->id 3579 ,(u32)ccb->pcmd->device->lun 3580 ,ccb); 3581 ccb->pcmd->result = DID_ABORT << 16; 3582 arcmsr_ccb_complete(ccb); 3583 continue; 3584 } 3585 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3586 " command done ccb = '0x%p'" 3587 "ccboutstandingcount = %d \n" 3588 , acb->host->host_no 3589 , ccb 3590 , atomic_read(&acb->ccboutstandingcount)); 3591 continue; 3592 } 3593 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 3594 arcmsr_report_ccb_state(acb, ccb, error); 3595 } 3596 return rtn; 3597 } 3598 3599 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, 3600 struct CommandControlBlock *poll_ccb) 3601 { 3602 struct MessageUnit_C __iomem *reg = acb->pmuC; 3603 uint32_t flag_ccb; 3604 struct ARCMSR_CDB *arcmsr_cdb; 3605 bool error; 3606 struct CommandControlBlock *pCCB; 3607 uint32_t poll_ccb_done = 0, poll_count = 0; 3608 int rtn; 3609 unsigned long ccb_cdb_phy; 3610 3611 polling_hbc_ccb_retry: 3612 poll_count++; 3613 while (1) { 3614 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { 3615 if (poll_ccb_done) { 3616 rtn = SUCCESS; 3617 break; 3618 } else { 3619 msleep(25); 3620 if (poll_count > 100) { 3621 rtn = FAILED; 3622 break; 3623 } 3624 goto polling_hbc_ccb_retry; 3625 } 3626 } 3627 flag_ccb = readl(®->outbound_queueport_low); 3628 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3629 if (acb->cdb_phyadd_hipart) 3630 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3631 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 3632 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3633 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3634 /* check ifcommand done with no error*/ 3635 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3636 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3637 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3638 " poll command abort successfully \n" 3639 , acb->host->host_no 3640 , pCCB->pcmd->device->id 3641 , (u32)pCCB->pcmd->device->lun 3642 , pCCB); 3643 pCCB->pcmd->result = DID_ABORT << 16; 3644 arcmsr_ccb_complete(pCCB); 3645 continue; 3646 } 3647 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" 3648 " command done ccb = '0x%p'" 3649 "ccboutstandingcount = %d \n" 3650 , acb->host->host_no 3651 , pCCB 3652 , atomic_read(&acb->ccboutstandingcount)); 3653 continue; 3654 } 3655 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3656 arcmsr_report_ccb_state(acb, pCCB, error); 3657 } 3658 return rtn; 3659 } 3660 3661 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, 3662 struct CommandControlBlock *poll_ccb) 3663 { 3664 bool error; 3665 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb; 3666 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; 3667 unsigned long flags, ccb_cdb_phy; 3668 struct ARCMSR_CDB *arcmsr_cdb; 3669 struct CommandControlBlock *pCCB; 3670 struct MessageUnit_D *pmu = acb->pmuD; 3671 3672 polling_hbaD_ccb_retry: 3673 poll_count++; 3674 while (1) { 3675 spin_lock_irqsave(&acb->doneq_lock, flags); 3676 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1; 3677 doneq_index = pmu->doneq_index; 3678 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) { 3679 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3680 if (poll_ccb_done) { 3681 rtn = SUCCESS; 3682 break; 3683 } else { 3684 msleep(25); 3685 if (poll_count > 40) { 3686 rtn = FAILED; 3687 break; 3688 } 3689 goto polling_hbaD_ccb_retry; 3690 } 3691 } 3692 toggle = doneq_index & 0x4000; 3693 index_stripped = (doneq_index & 0xFFF) + 1; 3694 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE; 3695 pmu->doneq_index = index_stripped ? (index_stripped | toggle) : 3696 ((toggle ^ 0x4000) + 1); 3697 doneq_index = pmu->doneq_index; 3698 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3699 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; 3700 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3701 if (acb->cdb_phyadd_hipart) 3702 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; 3703 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + 3704 ccb_cdb_phy); 3705 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, 3706 arcmsr_cdb); 3707 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3708 if ((pCCB->acb != acb) || 3709 (pCCB->startdone != ARCMSR_CCB_START)) { 3710 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3711 pr_notice("arcmsr%d: scsi id = %d " 3712 "lun = %d ccb = '0x%p' poll command " 3713 "abort successfully\n" 3714 , acb->host->host_no 3715 , pCCB->pcmd->device->id 3716 , (u32)pCCB->pcmd->device->lun 3717 , pCCB); 3718 pCCB->pcmd->result = DID_ABORT << 16; 3719 arcmsr_ccb_complete(pCCB); 3720 continue; 3721 } 3722 pr_notice("arcmsr%d: polling an illegal " 3723 "ccb command done ccb = '0x%p' " 3724 "ccboutstandingcount = %d\n" 3725 , acb->host->host_no 3726 , pCCB 3727 , atomic_read(&acb->ccboutstandingcount)); 3728 continue; 3729 } 3730 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) 3731 ? true : false; 3732 arcmsr_report_ccb_state(acb, pCCB, error); 3733 } 3734 return rtn; 3735 } 3736 3737 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb, 3738 struct CommandControlBlock *poll_ccb) 3739 { 3740 bool error; 3741 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index; 3742 uint16_t cmdSMID; 3743 unsigned long flags; 3744 int rtn; 3745 struct CommandControlBlock *pCCB; 3746 struct MessageUnit_E __iomem *reg = acb->pmuE; 3747 3748 polling_hbaC_ccb_retry: 3749 poll_count++; 3750 while (1) { 3751 spin_lock_irqsave(&acb->doneq_lock, flags); 3752 doneq_index = acb->doneq_index; 3753 if ((readl(®->reply_post_producer_index) & 0xFFFF) == 3754 doneq_index) { 3755 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3756 if (poll_ccb_done) { 3757 rtn = SUCCESS; 3758 break; 3759 } else { 3760 msleep(25); 3761 if (poll_count > 40) { 3762 rtn = FAILED; 3763 break; 3764 } 3765 goto polling_hbaC_ccb_retry; 3766 } 3767 } 3768 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; 3769 doneq_index++; 3770 if (doneq_index >= acb->completionQ_entry) 3771 doneq_index = 0; 3772 acb->doneq_index = doneq_index; 3773 spin_unlock_irqrestore(&acb->doneq_lock, flags); 3774 pCCB = acb->pccb_pool[cmdSMID]; 3775 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; 3776 /* check if command done with no error*/ 3777 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3778 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3779 pr_notice("arcmsr%d: scsi id = %d " 3780 "lun = %d ccb = '0x%p' poll command " 3781 "abort successfully\n" 3782 , acb->host->host_no 3783 , pCCB->pcmd->device->id 3784 , (u32)pCCB->pcmd->device->lun 3785 , pCCB); 3786 pCCB->pcmd->result = DID_ABORT << 16; 3787 arcmsr_ccb_complete(pCCB); 3788 continue; 3789 } 3790 pr_notice("arcmsr%d: polling an illegal " 3791 "ccb command done ccb = '0x%p' " 3792 "ccboutstandingcount = %d\n" 3793 , acb->host->host_no 3794 , pCCB 3795 , atomic_read(&acb->ccboutstandingcount)); 3796 continue; 3797 } 3798 error = (acb->pCompletionQ[doneq_index].cmdFlag & 3799 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 3800 arcmsr_report_ccb_state(acb, pCCB, error); 3801 } 3802 writel(doneq_index, ®->reply_post_consumer_index); 3803 return rtn; 3804 } 3805 3806 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 3807 struct CommandControlBlock *poll_ccb) 3808 { 3809 int rtn = 0; 3810 switch (acb->adapter_type) { 3811 3812 case ACB_ADAPTER_TYPE_A: 3813 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); 3814 break; 3815 case ACB_ADAPTER_TYPE_B: 3816 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); 3817 break; 3818 case ACB_ADAPTER_TYPE_C: 3819 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); 3820 break; 3821 case ACB_ADAPTER_TYPE_D: 3822 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); 3823 break; 3824 case ACB_ADAPTER_TYPE_E: 3825 case ACB_ADAPTER_TYPE_F: 3826 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); 3827 break; 3828 } 3829 return rtn; 3830 } 3831 3832 static void arcmsr_set_iop_datetime(struct timer_list *t) 3833 { 3834 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer); 3835 unsigned int next_time; 3836 struct tm tm; 3837 3838 union { 3839 struct { 3840 uint16_t signature; 3841 uint8_t year; 3842 uint8_t month; 3843 uint8_t date; 3844 uint8_t hour; 3845 uint8_t minute; 3846 uint8_t second; 3847 } a; 3848 struct { 3849 uint32_t msg_time[2]; 3850 } b; 3851 } datetime; 3852 3853 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm); 3854 3855 datetime.a.signature = 0x55AA; 3856 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */ 3857 datetime.a.month = tm.tm_mon; 3858 datetime.a.date = tm.tm_mday; 3859 datetime.a.hour = tm.tm_hour; 3860 datetime.a.minute = tm.tm_min; 3861 datetime.a.second = tm.tm_sec; 3862 3863 switch (pacb->adapter_type) { 3864 case ACB_ADAPTER_TYPE_A: { 3865 struct MessageUnit_A __iomem *reg = pacb->pmuA; 3866 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]); 3867 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]); 3868 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3869 break; 3870 } 3871 case ACB_ADAPTER_TYPE_B: { 3872 uint32_t __iomem *rwbuffer; 3873 struct MessageUnit_B *reg = pacb->pmuB; 3874 rwbuffer = reg->message_rwbuffer; 3875 writel(datetime.b.msg_time[0], rwbuffer++); 3876 writel(datetime.b.msg_time[1], rwbuffer++); 3877 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell); 3878 break; 3879 } 3880 case ACB_ADAPTER_TYPE_C: { 3881 struct MessageUnit_C __iomem *reg = pacb->pmuC; 3882 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3883 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3884 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3885 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 3886 break; 3887 } 3888 case ACB_ADAPTER_TYPE_D: { 3889 uint32_t __iomem *rwbuffer; 3890 struct MessageUnit_D *reg = pacb->pmuD; 3891 rwbuffer = reg->msgcode_rwbuffer; 3892 writel(datetime.b.msg_time[0], rwbuffer++); 3893 writel(datetime.b.msg_time[1], rwbuffer++); 3894 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0); 3895 break; 3896 } 3897 case ACB_ADAPTER_TYPE_E: { 3898 struct MessageUnit_E __iomem *reg = pacb->pmuE; 3899 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]); 3900 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]); 3901 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3902 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3903 writel(pacb->out_doorbell, ®->iobound_doorbell); 3904 break; 3905 } 3906 case ACB_ADAPTER_TYPE_F: { 3907 struct MessageUnit_F __iomem *reg = pacb->pmuF; 3908 3909 pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0]; 3910 pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1]; 3911 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); 3912 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 3913 writel(pacb->out_doorbell, ®->iobound_doorbell); 3914 break; 3915 } 3916 } 3917 if (sys_tz.tz_minuteswest) 3918 next_time = ARCMSR_HOURS; 3919 else 3920 next_time = ARCMSR_MINUTES; 3921 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time)); 3922 } 3923 3924 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3925 { 3926 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 3927 dma_addr_t dma_coherent_handle; 3928 3929 /* 3930 ******************************************************************** 3931 ** here we need to tell iop 331 our freeccb.HighPart 3932 ** if freeccb.HighPart is not zero 3933 ******************************************************************** 3934 */ 3935 switch (acb->adapter_type) { 3936 case ACB_ADAPTER_TYPE_B: 3937 case ACB_ADAPTER_TYPE_D: 3938 dma_coherent_handle = acb->dma_coherent_handle2; 3939 break; 3940 case ACB_ADAPTER_TYPE_E: 3941 case ACB_ADAPTER_TYPE_F: 3942 dma_coherent_handle = acb->dma_coherent_handle + 3943 offsetof(struct CommandControlBlock, arcmsr_cdb); 3944 break; 3945 default: 3946 dma_coherent_handle = acb->dma_coherent_handle; 3947 break; 3948 } 3949 cdb_phyaddr = lower_32_bits(dma_coherent_handle); 3950 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); 3951 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; 3952 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32; 3953 /* 3954 *********************************************************************** 3955 ** if adapter type B, set window of "post command Q" 3956 *********************************************************************** 3957 */ 3958 switch (acb->adapter_type) { 3959 3960 case ACB_ADAPTER_TYPE_A: { 3961 if (cdb_phyaddr_hi32 != 0) { 3962 struct MessageUnit_A __iomem *reg = acb->pmuA; 3963 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 3964 ®->message_rwbuffer[0]); 3965 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]); 3966 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 3967 ®->inbound_msgaddr0); 3968 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3969 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 3970 part physical address timeout\n", 3971 acb->host->host_no); 3972 return 1; 3973 } 3974 } 3975 } 3976 break; 3977 3978 case ACB_ADAPTER_TYPE_B: { 3979 uint32_t __iomem *rwbuffer; 3980 3981 struct MessageUnit_B *reg = acb->pmuB; 3982 reg->postq_index = 0; 3983 reg->doneq_index = 0; 3984 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); 3985 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3986 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \ 3987 acb->host->host_no); 3988 return 1; 3989 } 3990 rwbuffer = reg->message_rwbuffer; 3991 /* driver "set config" signature */ 3992 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 3993 /* normal should be zero */ 3994 writel(cdb_phyaddr_hi32, rwbuffer++); 3995 /* postQ size (256 + 8)*4 */ 3996 writel(cdb_phyaddr, rwbuffer++); 3997 /* doneQ size (256 + 8)*4 */ 3998 writel(cdb_phyaddr + 1056, rwbuffer++); 3999 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 4000 writel(1056, rwbuffer); 4001 4002 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); 4003 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4004 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 4005 timeout \n",acb->host->host_no); 4006 return 1; 4007 } 4008 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 4009 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4010 pr_err("arcmsr%d: can't set driver mode.\n", 4011 acb->host->host_no); 4012 return 1; 4013 } 4014 } 4015 break; 4016 case ACB_ADAPTER_TYPE_C: { 4017 struct MessageUnit_C __iomem *reg = acb->pmuC; 4018 4019 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", 4020 acb->adapter_index, cdb_phyaddr_hi32); 4021 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 4022 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); 4023 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4024 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 4025 if (!arcmsr_hbaC_wait_msgint_ready(acb)) { 4026 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 4027 timeout \n", acb->host->host_no); 4028 return 1; 4029 } 4030 } 4031 break; 4032 case ACB_ADAPTER_TYPE_D: { 4033 uint32_t __iomem *rwbuffer; 4034 struct MessageUnit_D *reg = acb->pmuD; 4035 reg->postq_index = 0; 4036 reg->doneq_index = 0; 4037 rwbuffer = reg->msgcode_rwbuffer; 4038 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 4039 writel(cdb_phyaddr_hi32, rwbuffer++); 4040 writel(cdb_phyaddr, rwbuffer++); 4041 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE * 4042 sizeof(struct InBound_SRB)), rwbuffer++); 4043 writel(0x100, rwbuffer); 4044 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0); 4045 if (!arcmsr_hbaD_wait_msgint_ready(acb)) { 4046 pr_notice("arcmsr%d: 'set command Q window' timeout\n", 4047 acb->host->host_no); 4048 return 1; 4049 } 4050 } 4051 break; 4052 case ACB_ADAPTER_TYPE_E: { 4053 struct MessageUnit_E __iomem *reg = acb->pmuE; 4054 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); 4055 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]); 4056 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]); 4057 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]); 4058 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]); 4059 writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]); 4060 writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]); 4061 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]); 4062 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4063 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4064 writel(acb->out_doorbell, ®->iobound_doorbell); 4065 if (!arcmsr_hbaE_wait_msgint_ready(acb)) { 4066 pr_notice("arcmsr%d: 'set command Q window' timeout \n", 4067 acb->host->host_no); 4068 return 1; 4069 } 4070 } 4071 break; 4072 case ACB_ADAPTER_TYPE_F: { 4073 struct MessageUnit_F __iomem *reg = acb->pmuF; 4074 4075 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG; 4076 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886; 4077 acb->msgcode_rwbuffer[2] = cdb_phyaddr; 4078 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32; 4079 acb->msgcode_rwbuffer[4] = acb->ccbsize; 4080 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2); 4081 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2); 4082 acb->msgcode_rwbuffer[7] = acb->completeQ_size; 4083 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); 4084 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4085 writel(acb->out_doorbell, ®->iobound_doorbell); 4086 if (!arcmsr_hbaE_wait_msgint_ready(acb)) { 4087 pr_notice("arcmsr%d: 'set command Q window' timeout\n", 4088 acb->host->host_no); 4089 return 1; 4090 } 4091 } 4092 break; 4093 } 4094 return 0; 4095 } 4096 4097 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) 4098 { 4099 uint32_t firmware_state = 0; 4100 switch (acb->adapter_type) { 4101 4102 case ACB_ADAPTER_TYPE_A: { 4103 struct MessageUnit_A __iomem *reg = acb->pmuA; 4104 do { 4105 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4106 msleep(20); 4107 firmware_state = readl(®->outbound_msgaddr1); 4108 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); 4109 } 4110 break; 4111 4112 case ACB_ADAPTER_TYPE_B: { 4113 struct MessageUnit_B *reg = acb->pmuB; 4114 do { 4115 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4116 msleep(20); 4117 firmware_state = readl(reg->iop2drv_doorbell); 4118 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 4119 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 4120 } 4121 break; 4122 case ACB_ADAPTER_TYPE_C: { 4123 struct MessageUnit_C __iomem *reg = acb->pmuC; 4124 do { 4125 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4126 msleep(20); 4127 firmware_state = readl(®->outbound_msgaddr1); 4128 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); 4129 } 4130 break; 4131 case ACB_ADAPTER_TYPE_D: { 4132 struct MessageUnit_D *reg = acb->pmuD; 4133 do { 4134 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4135 msleep(20); 4136 firmware_state = readl(reg->outbound_msgaddr1); 4137 } while ((firmware_state & 4138 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); 4139 } 4140 break; 4141 case ACB_ADAPTER_TYPE_E: 4142 case ACB_ADAPTER_TYPE_F: { 4143 struct MessageUnit_E __iomem *reg = acb->pmuE; 4144 do { 4145 if (!(acb->acb_flags & ACB_F_IOP_INITED)) 4146 msleep(20); 4147 firmware_state = readl(®->outbound_msgaddr1); 4148 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0); 4149 } 4150 break; 4151 } 4152 } 4153 4154 static void arcmsr_request_device_map(struct timer_list *t) 4155 { 4156 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); 4157 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) { 4158 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 4159 } else { 4160 acb->fw_flag = FW_NORMAL; 4161 switch (acb->adapter_type) { 4162 case ACB_ADAPTER_TYPE_A: { 4163 struct MessageUnit_A __iomem *reg = acb->pmuA; 4164 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4165 break; 4166 } 4167 case ACB_ADAPTER_TYPE_B: { 4168 struct MessageUnit_B *reg = acb->pmuB; 4169 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 4170 break; 4171 } 4172 case ACB_ADAPTER_TYPE_C: { 4173 struct MessageUnit_C __iomem *reg = acb->pmuC; 4174 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4175 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell); 4176 break; 4177 } 4178 case ACB_ADAPTER_TYPE_D: { 4179 struct MessageUnit_D *reg = acb->pmuD; 4180 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 4181 break; 4182 } 4183 case ACB_ADAPTER_TYPE_E: { 4184 struct MessageUnit_E __iomem *reg = acb->pmuE; 4185 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4186 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4187 writel(acb->out_doorbell, ®->iobound_doorbell); 4188 break; 4189 } 4190 case ACB_ADAPTER_TYPE_F: { 4191 struct MessageUnit_F __iomem *reg = acb->pmuF; 4192 uint32_t outMsg1 = readl(®->outbound_msgaddr1); 4193 4194 if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) || 4195 (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE)) 4196 goto nxt6s; 4197 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); 4198 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4199 writel(acb->out_doorbell, ®->iobound_doorbell); 4200 break; 4201 } 4202 default: 4203 return; 4204 } 4205 acb->acb_flags |= ACB_F_MSG_GET_CONFIG; 4206 nxt6s: 4207 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 4208 } 4209 } 4210 4211 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb) 4212 { 4213 struct MessageUnit_A __iomem *reg = acb->pmuA; 4214 acb->acb_flags |= ACB_F_MSG_START_BGRB; 4215 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); 4216 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 4217 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4218 rebuild' timeout \n", acb->host->host_no); 4219 } 4220 } 4221 4222 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb) 4223 { 4224 struct MessageUnit_B *reg = acb->pmuB; 4225 acb->acb_flags |= ACB_F_MSG_START_BGRB; 4226 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); 4227 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4228 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4229 rebuild' timeout \n",acb->host->host_no); 4230 } 4231 } 4232 4233 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB) 4234 { 4235 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC; 4236 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4237 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); 4238 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); 4239 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 4240 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 4241 rebuild' timeout \n", pACB->host->host_no); 4242 } 4243 return; 4244 } 4245 4246 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) 4247 { 4248 struct MessageUnit_D *pmu = pACB->pmuD; 4249 4250 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4251 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); 4252 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 4253 pr_notice("arcmsr%d: wait 'start adapter " 4254 "background rebuild' timeout\n", pACB->host->host_no); 4255 } 4256 } 4257 4258 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB) 4259 { 4260 struct MessageUnit_E __iomem *pmu = pACB->pmuE; 4261 4262 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 4263 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0); 4264 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 4265 writel(pACB->out_doorbell, &pmu->iobound_doorbell); 4266 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 4267 pr_notice("arcmsr%d: wait 'start adapter " 4268 "background rebuild' timeout \n", pACB->host->host_no); 4269 } 4270 } 4271 4272 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 4273 { 4274 switch (acb->adapter_type) { 4275 case ACB_ADAPTER_TYPE_A: 4276 arcmsr_hbaA_start_bgrb(acb); 4277 break; 4278 case ACB_ADAPTER_TYPE_B: 4279 arcmsr_hbaB_start_bgrb(acb); 4280 break; 4281 case ACB_ADAPTER_TYPE_C: 4282 arcmsr_hbaC_start_bgrb(acb); 4283 break; 4284 case ACB_ADAPTER_TYPE_D: 4285 arcmsr_hbaD_start_bgrb(acb); 4286 break; 4287 case ACB_ADAPTER_TYPE_E: 4288 case ACB_ADAPTER_TYPE_F: 4289 arcmsr_hbaE_start_bgrb(acb); 4290 break; 4291 } 4292 } 4293 4294 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) 4295 { 4296 switch (acb->adapter_type) { 4297 case ACB_ADAPTER_TYPE_A: { 4298 struct MessageUnit_A __iomem *reg = acb->pmuA; 4299 uint32_t outbound_doorbell; 4300 /* empty doorbell Qbuffer if door bell ringed */ 4301 outbound_doorbell = readl(®->outbound_doorbell); 4302 /*clear doorbell interrupt */ 4303 writel(outbound_doorbell, ®->outbound_doorbell); 4304 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); 4305 } 4306 break; 4307 4308 case ACB_ADAPTER_TYPE_B: { 4309 struct MessageUnit_B *reg = acb->pmuB; 4310 uint32_t outbound_doorbell, i; 4311 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 4312 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 4313 /* let IOP know data has been read */ 4314 for(i=0; i < 200; i++) { 4315 msleep(20); 4316 outbound_doorbell = readl(reg->iop2drv_doorbell); 4317 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 4318 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 4319 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 4320 } else 4321 break; 4322 } 4323 } 4324 break; 4325 case ACB_ADAPTER_TYPE_C: { 4326 struct MessageUnit_C __iomem *reg = acb->pmuC; 4327 uint32_t outbound_doorbell, i; 4328 /* empty doorbell Qbuffer if door bell ringed */ 4329 outbound_doorbell = readl(®->outbound_doorbell); 4330 writel(outbound_doorbell, ®->outbound_doorbell_clear); 4331 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell); 4332 for (i = 0; i < 200; i++) { 4333 msleep(20); 4334 outbound_doorbell = readl(®->outbound_doorbell); 4335 if (outbound_doorbell & 4336 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 4337 writel(outbound_doorbell, 4338 ®->outbound_doorbell_clear); 4339 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, 4340 ®->inbound_doorbell); 4341 } else 4342 break; 4343 } 4344 } 4345 break; 4346 case ACB_ADAPTER_TYPE_D: { 4347 struct MessageUnit_D *reg = acb->pmuD; 4348 uint32_t outbound_doorbell, i; 4349 /* empty doorbell Qbuffer if door bell ringed */ 4350 outbound_doorbell = readl(reg->outbound_doorbell); 4351 writel(outbound_doorbell, reg->outbound_doorbell); 4352 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 4353 reg->inbound_doorbell); 4354 for (i = 0; i < 200; i++) { 4355 msleep(20); 4356 outbound_doorbell = readl(reg->outbound_doorbell); 4357 if (outbound_doorbell & 4358 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) { 4359 writel(outbound_doorbell, 4360 reg->outbound_doorbell); 4361 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ, 4362 reg->inbound_doorbell); 4363 } else 4364 break; 4365 } 4366 } 4367 break; 4368 case ACB_ADAPTER_TYPE_E: 4369 case ACB_ADAPTER_TYPE_F: { 4370 struct MessageUnit_E __iomem *reg = acb->pmuE; 4371 uint32_t i, tmp; 4372 4373 acb->in_doorbell = readl(®->iobound_doorbell); 4374 writel(0, ®->host_int_status); /*clear interrupt*/ 4375 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4376 writel(acb->out_doorbell, ®->iobound_doorbell); 4377 for(i=0; i < 200; i++) { 4378 msleep(20); 4379 tmp = acb->in_doorbell; 4380 acb->in_doorbell = readl(®->iobound_doorbell); 4381 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) { 4382 writel(0, ®->host_int_status); /*clear interrupt*/ 4383 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; 4384 writel(acb->out_doorbell, ®->iobound_doorbell); 4385 } else 4386 break; 4387 } 4388 } 4389 break; 4390 } 4391 } 4392 4393 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 4394 { 4395 switch (acb->adapter_type) { 4396 case ACB_ADAPTER_TYPE_A: 4397 return; 4398 case ACB_ADAPTER_TYPE_B: 4399 { 4400 struct MessageUnit_B *reg = acb->pmuB; 4401 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); 4402 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 4403 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 4404 return; 4405 } 4406 } 4407 break; 4408 case ACB_ADAPTER_TYPE_C: 4409 return; 4410 } 4411 return; 4412 } 4413 4414 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) 4415 { 4416 uint8_t value[64]; 4417 int i, count = 0; 4418 struct MessageUnit_A __iomem *pmuA = acb->pmuA; 4419 struct MessageUnit_C __iomem *pmuC = acb->pmuC; 4420 struct MessageUnit_D *pmuD = acb->pmuD; 4421 4422 /* backup pci config data */ 4423 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); 4424 for (i = 0; i < 64; i++) { 4425 pci_read_config_byte(acb->pdev, i, &value[i]); 4426 } 4427 /* hardware reset signal */ 4428 if (acb->dev_id == 0x1680) { 4429 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); 4430 } else if (acb->dev_id == 0x1880) { 4431 do { 4432 count++; 4433 writel(0xF, &pmuC->write_sequence); 4434 writel(0x4, &pmuC->write_sequence); 4435 writel(0xB, &pmuC->write_sequence); 4436 writel(0x2, &pmuC->write_sequence); 4437 writel(0x7, &pmuC->write_sequence); 4438 writel(0xD, &pmuC->write_sequence); 4439 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 4440 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 4441 } else if (acb->dev_id == 0x1884) { 4442 struct MessageUnit_E __iomem *pmuE = acb->pmuE; 4443 do { 4444 count++; 4445 writel(0x4, &pmuE->write_sequence_3xxx); 4446 writel(0xB, &pmuE->write_sequence_3xxx); 4447 writel(0x2, &pmuE->write_sequence_3xxx); 4448 writel(0x7, &pmuE->write_sequence_3xxx); 4449 writel(0xD, &pmuE->write_sequence_3xxx); 4450 mdelay(10); 4451 } while (((readl(&pmuE->host_diagnostic_3xxx) & 4452 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); 4453 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); 4454 } else if (acb->dev_id == 0x1214) { 4455 writel(0x20, pmuD->reset_request); 4456 } else { 4457 pci_write_config_byte(acb->pdev, 0x84, 0x20); 4458 } 4459 msleep(2000); 4460 /* write back pci config data */ 4461 for (i = 0; i < 64; i++) { 4462 pci_write_config_byte(acb->pdev, i, value[i]); 4463 } 4464 msleep(1000); 4465 return; 4466 } 4467 4468 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) 4469 { 4470 bool rtn = true; 4471 4472 switch(acb->adapter_type) { 4473 case ACB_ADAPTER_TYPE_A:{ 4474 struct MessageUnit_A __iomem *reg = acb->pmuA; 4475 rtn = ((readl(®->outbound_msgaddr1) & 4476 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false; 4477 } 4478 break; 4479 case ACB_ADAPTER_TYPE_B:{ 4480 struct MessageUnit_B *reg = acb->pmuB; 4481 rtn = ((readl(reg->iop2drv_doorbell) & 4482 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false; 4483 } 4484 break; 4485 case ACB_ADAPTER_TYPE_C:{ 4486 struct MessageUnit_C __iomem *reg = acb->pmuC; 4487 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false; 4488 } 4489 break; 4490 case ACB_ADAPTER_TYPE_D:{ 4491 struct MessageUnit_D *reg = acb->pmuD; 4492 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ? 4493 true : false; 4494 } 4495 break; 4496 case ACB_ADAPTER_TYPE_E: 4497 case ACB_ADAPTER_TYPE_F:{ 4498 struct MessageUnit_E __iomem *reg = acb->pmuE; 4499 rtn = (readl(®->host_diagnostic_3xxx) & 4500 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; 4501 } 4502 break; 4503 } 4504 return rtn; 4505 } 4506 4507 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 4508 { 4509 uint32_t intmask_org; 4510 /* disable all outbound interrupt */ 4511 intmask_org = arcmsr_disable_outbound_ints(acb); 4512 arcmsr_wait_firmware_ready(acb); 4513 arcmsr_iop_confirm(acb); 4514 /*start background rebuild*/ 4515 arcmsr_start_adapter_bgrb(acb); 4516 /* empty doorbell Qbuffer if door bell ringed */ 4517 arcmsr_clear_doorbell_queue_buffer(acb); 4518 arcmsr_enable_eoi_mode(acb); 4519 /* enable outbound Post Queue,outbound doorbell Interrupt */ 4520 arcmsr_enable_outbound_ints(acb, intmask_org); 4521 acb->acb_flags |= ACB_F_IOP_INITED; 4522 } 4523 4524 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) 4525 { 4526 struct CommandControlBlock *ccb; 4527 uint32_t intmask_org; 4528 uint8_t rtnval = 0x00; 4529 int i = 0; 4530 unsigned long flags; 4531 4532 if (atomic_read(&acb->ccboutstandingcount) != 0) { 4533 /* disable all outbound interrupt */ 4534 intmask_org = arcmsr_disable_outbound_ints(acb); 4535 /* talk to iop 331 outstanding command aborted */ 4536 rtnval = arcmsr_abort_allcmd(acb); 4537 /* clear all outbound posted Q */ 4538 arcmsr_done4abort_postqueue(acb); 4539 for (i = 0; i < acb->maxFreeCCB; i++) { 4540 ccb = acb->pccb_pool[i]; 4541 if (ccb->startdone == ARCMSR_CCB_START) { 4542 scsi_dma_unmap(ccb->pcmd); 4543 ccb->startdone = ARCMSR_CCB_DONE; 4544 ccb->ccb_flags = 0; 4545 spin_lock_irqsave(&acb->ccblist_lock, flags); 4546 list_add_tail(&ccb->list, &acb->ccb_free_list); 4547 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 4548 } 4549 } 4550 atomic_set(&acb->ccboutstandingcount, 0); 4551 /* enable all outbound interrupt */ 4552 arcmsr_enable_outbound_ints(acb, intmask_org); 4553 return rtnval; 4554 } 4555 return rtnval; 4556 } 4557 4558 static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 4559 { 4560 struct AdapterControlBlock *acb; 4561 int retry_count = 0; 4562 int rtn = FAILED; 4563 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; 4564 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 4565 return SUCCESS; 4566 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d," 4567 " num_aborts = %d \n", acb->num_resets, acb->num_aborts); 4568 acb->num_resets++; 4569 4570 if (acb->acb_flags & ACB_F_BUS_RESET) { 4571 long timeout; 4572 pr_notice("arcmsr: there is a bus reset eh proceeding...\n"); 4573 timeout = wait_event_timeout(wait_q, (acb->acb_flags 4574 & ACB_F_BUS_RESET) == 0, 220 * HZ); 4575 if (timeout) 4576 return SUCCESS; 4577 } 4578 acb->acb_flags |= ACB_F_BUS_RESET; 4579 if (!arcmsr_iop_reset(acb)) { 4580 arcmsr_hardware_reset(acb); 4581 acb->acb_flags &= ~ACB_F_IOP_INITED; 4582 wait_reset_done: 4583 ssleep(ARCMSR_SLEEPTIME); 4584 if (arcmsr_reset_in_progress(acb)) { 4585 if (retry_count > ARCMSR_RETRYCOUNT) { 4586 acb->fw_flag = FW_DEADLOCK; 4587 pr_notice("arcmsr%d: waiting for hw bus reset" 4588 " return, RETRY TERMINATED!!\n", 4589 acb->host->host_no); 4590 return FAILED; 4591 } 4592 retry_count++; 4593 goto wait_reset_done; 4594 } 4595 arcmsr_iop_init(acb); 4596 acb->fw_flag = FW_NORMAL; 4597 mod_timer(&acb->eternal_timer, jiffies + 4598 msecs_to_jiffies(6 * HZ)); 4599 acb->acb_flags &= ~ACB_F_BUS_RESET; 4600 rtn = SUCCESS; 4601 pr_notice("arcmsr: scsi bus reset eh returns with success\n"); 4602 } else { 4603 acb->acb_flags &= ~ACB_F_BUS_RESET; 4604 acb->fw_flag = FW_NORMAL; 4605 mod_timer(&acb->eternal_timer, jiffies + 4606 msecs_to_jiffies(6 * HZ)); 4607 rtn = SUCCESS; 4608 } 4609 return rtn; 4610 } 4611 4612 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, 4613 struct CommandControlBlock *ccb) 4614 { 4615 int rtn; 4616 rtn = arcmsr_polling_ccbdone(acb, ccb); 4617 return rtn; 4618 } 4619 4620 static int arcmsr_abort(struct scsi_cmnd *cmd) 4621 { 4622 struct AdapterControlBlock *acb = 4623 (struct AdapterControlBlock *)cmd->device->host->hostdata; 4624 int i = 0; 4625 int rtn = FAILED; 4626 uint32_t intmask_org; 4627 4628 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 4629 return SUCCESS; 4630 printk(KERN_NOTICE 4631 "arcmsr%d: abort device command of scsi id = %d lun = %d\n", 4632 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); 4633 acb->acb_flags |= ACB_F_ABORT; 4634 acb->num_aborts++; 4635 /* 4636 ************************************************ 4637 ** the all interrupt service routine is locked 4638 ** we need to handle it as soon as possible and exit 4639 ************************************************ 4640 */ 4641 if (!atomic_read(&acb->ccboutstandingcount)) { 4642 acb->acb_flags &= ~ACB_F_ABORT; 4643 return rtn; 4644 } 4645 4646 intmask_org = arcmsr_disable_outbound_ints(acb); 4647 for (i = 0; i < acb->maxFreeCCB; i++) { 4648 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 4649 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 4650 ccb->startdone = ARCMSR_CCB_ABORTED; 4651 rtn = arcmsr_abort_one_cmd(acb, ccb); 4652 break; 4653 } 4654 } 4655 acb->acb_flags &= ~ACB_F_ABORT; 4656 arcmsr_enable_outbound_ints(acb, intmask_org); 4657 return rtn; 4658 } 4659 4660 static const char *arcmsr_info(struct Scsi_Host *host) 4661 { 4662 struct AdapterControlBlock *acb = 4663 (struct AdapterControlBlock *) host->hostdata; 4664 static char buf[256]; 4665 char *type; 4666 int raid6 = 1; 4667 switch (acb->pdev->device) { 4668 case PCI_DEVICE_ID_ARECA_1110: 4669 case PCI_DEVICE_ID_ARECA_1200: 4670 case PCI_DEVICE_ID_ARECA_1202: 4671 case PCI_DEVICE_ID_ARECA_1210: 4672 raid6 = 0; 4673 fallthrough; 4674 case PCI_DEVICE_ID_ARECA_1120: 4675 case PCI_DEVICE_ID_ARECA_1130: 4676 case PCI_DEVICE_ID_ARECA_1160: 4677 case PCI_DEVICE_ID_ARECA_1170: 4678 case PCI_DEVICE_ID_ARECA_1201: 4679 case PCI_DEVICE_ID_ARECA_1203: 4680 case PCI_DEVICE_ID_ARECA_1220: 4681 case PCI_DEVICE_ID_ARECA_1230: 4682 case PCI_DEVICE_ID_ARECA_1260: 4683 case PCI_DEVICE_ID_ARECA_1270: 4684 case PCI_DEVICE_ID_ARECA_1280: 4685 type = "SATA"; 4686 break; 4687 case PCI_DEVICE_ID_ARECA_1214: 4688 case PCI_DEVICE_ID_ARECA_1380: 4689 case PCI_DEVICE_ID_ARECA_1381: 4690 case PCI_DEVICE_ID_ARECA_1680: 4691 case PCI_DEVICE_ID_ARECA_1681: 4692 case PCI_DEVICE_ID_ARECA_1880: 4693 case PCI_DEVICE_ID_ARECA_1884: 4694 type = "SAS/SATA"; 4695 break; 4696 case PCI_DEVICE_ID_ARECA_1886: 4697 type = "NVMe/SAS/SATA"; 4698 break; 4699 default: 4700 type = "unknown"; 4701 raid6 = 0; 4702 break; 4703 } 4704 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n", 4705 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); 4706 return buf; 4707 } 4708