1 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver. 2 * 3 * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net) 4 * 5 * A lot of this driver was directly stolen from Erik H. Moe's PCI 6 * Qlogic ISP driver. Mucho kudos to him for this code. 7 * 8 * An even bigger kudos to John Grana at Performance Technologies 9 * for providing me with the hardware to write this driver, you rule 10 * John you really do. 11 * 12 * May, 2, 1997: Added support for QLGC,isp --jj 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/delay.h> 17 #include <linux/types.h> 18 #include <linux/string.h> 19 #include <linux/slab.h> 20 #include <linux/blkdev.h> 21 #include <linux/proc_fs.h> 22 #include <linux/stat.h> 23 #include <linux/init.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/module.h> 27 #include <linux/jiffies.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/firmware.h> 32 33 #include <asm/byteorder.h> 34 35 #include "qlogicpti.h" 36 37 #include <asm/dma.h> 38 #include <asm/system.h> 39 #include <asm/ptrace.h> 40 #include <asm/pgtable.h> 41 #include <asm/oplib.h> 42 #include <asm/io.h> 43 #include <asm/irq.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_eh.h> 49 #include <scsi/scsi_tcq.h> 50 #include <scsi/scsi_host.h> 51 52 #define MAX_TARGETS 16 53 #define MAX_LUNS 8 /* 32 for 1.31 F/W */ 54 55 #define DEFAULT_LOOP_COUNT 10000 56 57 static struct qlogicpti *qptichain = NULL; 58 static DEFINE_SPINLOCK(qptichain_lock); 59 60 #define PACKB(a, b) (((a)<<4)|(b)) 61 62 static const u_char mbox_param[] = { 63 PACKB(1, 1), /* MBOX_NO_OP */ 64 PACKB(5, 5), /* MBOX_LOAD_RAM */ 65 PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */ 66 PACKB(5, 5), /* MBOX_DUMP_RAM */ 67 PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */ 68 PACKB(2, 3), /* MBOX_READ_RAM_WORD */ 69 PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */ 70 PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */ 71 PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */ 72 PACKB(0, 0), /* 0x0009 */ 73 PACKB(0, 0), /* 0x000a */ 74 PACKB(0, 0), /* 0x000b */ 75 PACKB(0, 0), /* 0x000c */ 76 PACKB(0, 0), /* 0x000d */ 77 PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */ 78 PACKB(0, 0), /* 0x000f */ 79 PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */ 80 PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */ 81 PACKB(4, 4), /* MBOX_EXECUTE_IOCB */ 82 PACKB(2, 2), /* MBOX_WAKE_UP */ 83 PACKB(1, 6), /* MBOX_STOP_FIRMWARE */ 84 PACKB(4, 4), /* MBOX_ABORT */ 85 PACKB(2, 2), /* MBOX_ABORT_DEVICE */ 86 PACKB(3, 3), /* MBOX_ABORT_TARGET */ 87 PACKB(2, 2), /* MBOX_BUS_RESET */ 88 PACKB(2, 3), /* MBOX_STOP_QUEUE */ 89 PACKB(2, 3), /* MBOX_START_QUEUE */ 90 PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */ 91 PACKB(2, 3), /* MBOX_ABORT_QUEUE */ 92 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */ 93 PACKB(0, 0), /* 0x001e */ 94 PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */ 95 PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */ 96 PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */ 97 PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */ 98 PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */ 99 PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */ 100 PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */ 101 PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */ 102 PACKB(1, 3), /* MBOX_GET_SBUS_PARAMS */ 103 PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */ 104 PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */ 105 PACKB(0, 0), /* 0x002a */ 106 PACKB(0, 0), /* 0x002b */ 107 PACKB(0, 0), /* 0x002c */ 108 PACKB(0, 0), /* 0x002d */ 109 PACKB(0, 0), /* 0x002e */ 110 PACKB(0, 0), /* 0x002f */ 111 PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */ 112 PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */ 113 PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */ 114 PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */ 115 PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */ 116 PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */ 117 PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */ 118 PACKB(3, 3), /* MBOX_SET_SBUS_CONTROL_PARAMS */ 119 PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */ 120 PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */ 121 PACKB(0, 0), /* 0x003a */ 122 PACKB(0, 0), /* 0x003b */ 123 PACKB(0, 0), /* 0x003c */ 124 PACKB(0, 0), /* 0x003d */ 125 PACKB(0, 0), /* 0x003e */ 126 PACKB(0, 0), /* 0x003f */ 127 PACKB(0, 0), /* 0x0040 */ 128 PACKB(0, 0), /* 0x0041 */ 129 PACKB(0, 0) /* 0x0042 */ 130 }; 131 132 #define MAX_MBOX_COMMAND ARRAY_SIZE(mbox_param) 133 134 /* queue length's _must_ be power of two: */ 135 #define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql)) 136 #define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ 137 QLOGICPTI_REQ_QUEUE_LEN) 138 #define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN) 139 140 static inline void qlogicpti_enable_irqs(struct qlogicpti *qpti) 141 { 142 sbus_writew(SBUS_CTRL_ERIRQ | SBUS_CTRL_GENAB, 143 qpti->qregs + SBUS_CTRL); 144 } 145 146 static inline void qlogicpti_disable_irqs(struct qlogicpti *qpti) 147 { 148 sbus_writew(0, qpti->qregs + SBUS_CTRL); 149 } 150 151 static inline void set_sbus_cfg1(struct qlogicpti *qpti) 152 { 153 u16 val; 154 u8 bursts = qpti->bursts; 155 156 #if 0 /* It appears that at least PTI cards do not support 157 * 64-byte bursts and that setting the B64 bit actually 158 * is a nop and the chip ends up using the smallest burst 159 * size. -DaveM 160 */ 161 if (sbus_can_burst64() && (bursts & DMA_BURST64)) { 162 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B64); 163 } else 164 #endif 165 if (bursts & DMA_BURST32) { 166 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B32); 167 } else if (bursts & DMA_BURST16) { 168 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B16); 169 } else if (bursts & DMA_BURST8) { 170 val = (SBUS_CFG1_BENAB | SBUS_CFG1_B8); 171 } else { 172 val = 0; /* No sbus bursts for you... */ 173 } 174 sbus_writew(val, qpti->qregs + SBUS_CFG1); 175 } 176 177 static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int force) 178 { 179 int loop_count; 180 u16 tmp; 181 182 if (mbox_param[param[0]] == 0) 183 return 1; 184 185 /* Set SBUS semaphore. */ 186 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 187 tmp |= SBUS_SEMAPHORE_LCK; 188 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 189 190 /* Wait for host IRQ bit to clear. */ 191 loop_count = DEFAULT_LOOP_COUNT; 192 while (--loop_count && (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_HIRQ)) { 193 barrier(); 194 cpu_relax(); 195 } 196 if (!loop_count) 197 printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n", 198 qpti->qpti_id); 199 200 /* Write mailbox command registers. */ 201 switch (mbox_param[param[0]] >> 4) { 202 case 6: sbus_writew(param[5], qpti->qregs + MBOX5); 203 case 5: sbus_writew(param[4], qpti->qregs + MBOX4); 204 case 4: sbus_writew(param[3], qpti->qregs + MBOX3); 205 case 3: sbus_writew(param[2], qpti->qregs + MBOX2); 206 case 2: sbus_writew(param[1], qpti->qregs + MBOX1); 207 case 1: sbus_writew(param[0], qpti->qregs + MBOX0); 208 } 209 210 /* Clear RISC interrupt. */ 211 tmp = sbus_readw(qpti->qregs + HCCTRL); 212 tmp |= HCCTRL_CRIRQ; 213 sbus_writew(tmp, qpti->qregs + HCCTRL); 214 215 /* Clear SBUS semaphore. */ 216 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 217 218 /* Set HOST interrupt. */ 219 tmp = sbus_readw(qpti->qregs + HCCTRL); 220 tmp |= HCCTRL_SHIRQ; 221 sbus_writew(tmp, qpti->qregs + HCCTRL); 222 223 /* Wait for HOST interrupt clears. */ 224 loop_count = DEFAULT_LOOP_COUNT; 225 while (--loop_count && 226 (sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ)) 227 udelay(20); 228 if (!loop_count) 229 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n", 230 qpti->qpti_id, param[0]); 231 232 /* Wait for SBUS semaphore to get set. */ 233 loop_count = DEFAULT_LOOP_COUNT; 234 while (--loop_count && 235 !(sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK)) { 236 udelay(20); 237 238 /* Workaround for some buggy chips. */ 239 if (sbus_readw(qpti->qregs + MBOX0) & 0x4000) 240 break; 241 } 242 if (!loop_count) 243 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n", 244 qpti->qpti_id, param[0]); 245 246 /* Wait for MBOX busy condition to go away. */ 247 loop_count = DEFAULT_LOOP_COUNT; 248 while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04)) 249 udelay(20); 250 if (!loop_count) 251 printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n", 252 qpti->qpti_id, param[0]); 253 254 /* Read back output parameters. */ 255 switch (mbox_param[param[0]] & 0xf) { 256 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); 257 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); 258 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); 259 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); 260 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); 261 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); 262 } 263 264 /* Clear RISC interrupt. */ 265 tmp = sbus_readw(qpti->qregs + HCCTRL); 266 tmp |= HCCTRL_CRIRQ; 267 sbus_writew(tmp, qpti->qregs + HCCTRL); 268 269 /* Release SBUS semaphore. */ 270 tmp = sbus_readw(qpti->qregs + SBUS_SEMAPHORE); 271 tmp &= ~(SBUS_SEMAPHORE_LCK); 272 sbus_writew(tmp, qpti->qregs + SBUS_SEMAPHORE); 273 274 /* We're done. */ 275 return 0; 276 } 277 278 static inline void qlogicpti_set_hostdev_defaults(struct qlogicpti *qpti) 279 { 280 int i; 281 282 qpti->host_param.initiator_scsi_id = qpti->scsi_id; 283 qpti->host_param.bus_reset_delay = 3; 284 qpti->host_param.retry_count = 0; 285 qpti->host_param.retry_delay = 5; 286 qpti->host_param.async_data_setup_time = 3; 287 qpti->host_param.req_ack_active_negation = 1; 288 qpti->host_param.data_line_active_negation = 1; 289 qpti->host_param.data_dma_burst_enable = 1; 290 qpti->host_param.command_dma_burst_enable = 1; 291 qpti->host_param.tag_aging = 8; 292 qpti->host_param.selection_timeout = 250; 293 qpti->host_param.max_queue_depth = 256; 294 295 for(i = 0; i < MAX_TARGETS; i++) { 296 /* 297 * disconnect, parity, arq, reneg on reset, and, oddly enough 298 * tags...the midlayer's notion of tagged support has to match 299 * our device settings, and since we base whether we enable a 300 * tag on a per-cmnd basis upon what the midlayer sez, we 301 * actually enable the capability here. 302 */ 303 qpti->dev_param[i].device_flags = 0xcd; 304 qpti->dev_param[i].execution_throttle = 16; 305 if (qpti->ultra) { 306 qpti->dev_param[i].synchronous_period = 12; 307 qpti->dev_param[i].synchronous_offset = 8; 308 } else { 309 qpti->dev_param[i].synchronous_period = 25; 310 qpti->dev_param[i].synchronous_offset = 12; 311 } 312 qpti->dev_param[i].device_enable = 1; 313 } 314 } 315 316 static int qlogicpti_reset_hardware(struct Scsi_Host *host) 317 { 318 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 319 u_short param[6]; 320 unsigned short risc_code_addr; 321 int loop_count, i; 322 unsigned long flags; 323 324 risc_code_addr = 0x1000; /* all load addresses are at 0x1000 */ 325 326 spin_lock_irqsave(host->host_lock, flags); 327 328 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 329 330 /* Only reset the scsi bus if it is not free. */ 331 if (sbus_readw(qpti->qregs + CPU_PCTRL) & CPU_PCTRL_BSY) { 332 sbus_writew(CPU_ORIDE_RMOD, qpti->qregs + CPU_ORIDE); 333 sbus_writew(CPU_CMD_BRESET, qpti->qregs + CPU_CMD); 334 udelay(400); 335 } 336 337 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 338 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 339 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 340 341 loop_count = DEFAULT_LOOP_COUNT; 342 while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04)) 343 udelay(20); 344 if (!loop_count) 345 printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n", 346 qpti->qpti_id); 347 348 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 349 set_sbus_cfg1(qpti); 350 qlogicpti_enable_irqs(qpti); 351 352 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 353 qpti->ultra = 1; 354 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 355 qpti->qregs + RISC_MTREG); 356 } else { 357 qpti->ultra = 0; 358 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 359 qpti->qregs + RISC_MTREG); 360 } 361 362 /* reset adapter and per-device default values. */ 363 /* do it after finding out whether we're ultra mode capable */ 364 qlogicpti_set_hostdev_defaults(qpti); 365 366 /* Release the RISC processor. */ 367 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 368 369 /* Get RISC to start executing the firmware code. */ 370 param[0] = MBOX_EXEC_FIRMWARE; 371 param[1] = risc_code_addr; 372 if (qlogicpti_mbox_command(qpti, param, 1)) { 373 printk(KERN_EMERG "qlogicpti%d: Cannot execute ISP firmware.\n", 374 qpti->qpti_id); 375 spin_unlock_irqrestore(host->host_lock, flags); 376 return 1; 377 } 378 379 /* Set initiator scsi ID. */ 380 param[0] = MBOX_SET_INIT_SCSI_ID; 381 param[1] = qpti->host_param.initiator_scsi_id; 382 if (qlogicpti_mbox_command(qpti, param, 1) || 383 (param[0] != MBOX_COMMAND_COMPLETE)) { 384 printk(KERN_EMERG "qlogicpti%d: Cannot set initiator SCSI ID.\n", 385 qpti->qpti_id); 386 spin_unlock_irqrestore(host->host_lock, flags); 387 return 1; 388 } 389 390 /* Initialize state of the queues, both hw and sw. */ 391 qpti->req_in_ptr = qpti->res_out_ptr = 0; 392 393 param[0] = MBOX_INIT_RES_QUEUE; 394 param[1] = RES_QUEUE_LEN + 1; 395 param[2] = (u_short) (qpti->res_dvma >> 16); 396 param[3] = (u_short) (qpti->res_dvma & 0xffff); 397 param[4] = param[5] = 0; 398 if (qlogicpti_mbox_command(qpti, param, 1)) { 399 printk(KERN_EMERG "qlogicpti%d: Cannot init response queue.\n", 400 qpti->qpti_id); 401 spin_unlock_irqrestore(host->host_lock, flags); 402 return 1; 403 } 404 405 param[0] = MBOX_INIT_REQ_QUEUE; 406 param[1] = QLOGICPTI_REQ_QUEUE_LEN + 1; 407 param[2] = (u_short) (qpti->req_dvma >> 16); 408 param[3] = (u_short) (qpti->req_dvma & 0xffff); 409 param[4] = param[5] = 0; 410 if (qlogicpti_mbox_command(qpti, param, 1)) { 411 printk(KERN_EMERG "qlogicpti%d: Cannot init request queue.\n", 412 qpti->qpti_id); 413 spin_unlock_irqrestore(host->host_lock, flags); 414 return 1; 415 } 416 417 param[0] = MBOX_SET_RETRY_COUNT; 418 param[1] = qpti->host_param.retry_count; 419 param[2] = qpti->host_param.retry_delay; 420 qlogicpti_mbox_command(qpti, param, 0); 421 422 param[0] = MBOX_SET_TAG_AGE_LIMIT; 423 param[1] = qpti->host_param.tag_aging; 424 qlogicpti_mbox_command(qpti, param, 0); 425 426 for (i = 0; i < MAX_TARGETS; i++) { 427 param[0] = MBOX_GET_DEV_QUEUE_PARAMS; 428 param[1] = (i << 8); 429 qlogicpti_mbox_command(qpti, param, 0); 430 } 431 432 param[0] = MBOX_GET_FIRMWARE_STATUS; 433 qlogicpti_mbox_command(qpti, param, 0); 434 435 param[0] = MBOX_SET_SELECT_TIMEOUT; 436 param[1] = qpti->host_param.selection_timeout; 437 qlogicpti_mbox_command(qpti, param, 0); 438 439 for (i = 0; i < MAX_TARGETS; i++) { 440 param[0] = MBOX_SET_TARGET_PARAMS; 441 param[1] = (i << 8); 442 param[2] = (qpti->dev_param[i].device_flags << 8); 443 /* 444 * Since we're now loading 1.31 f/w, force narrow/async. 445 */ 446 param[2] |= 0xc0; 447 param[3] = 0; /* no offset, we do not have sync mode yet */ 448 qlogicpti_mbox_command(qpti, param, 0); 449 } 450 451 /* 452 * Always (sigh) do an initial bus reset (kicks f/w). 453 */ 454 param[0] = MBOX_BUS_RESET; 455 param[1] = qpti->host_param.bus_reset_delay; 456 qlogicpti_mbox_command(qpti, param, 0); 457 qpti->send_marker = 1; 458 459 spin_unlock_irqrestore(host->host_lock, flags); 460 return 0; 461 } 462 463 #define PTI_RESET_LIMIT 400 464 465 static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti) 466 { 467 const struct firmware *fw; 468 const char fwname[] = "qlogic/isp1000.bin"; 469 const __le16 *fw_data; 470 struct Scsi_Host *host = qpti->qhost; 471 unsigned short csum = 0; 472 unsigned short param[6]; 473 unsigned short risc_code_addr, risc_code_length; 474 int err; 475 unsigned long flags; 476 int i, timeout; 477 478 err = request_firmware(&fw, fwname, &qpti->op->dev); 479 if (err) { 480 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 481 fwname, err); 482 return err; 483 } 484 if (fw->size % 2) { 485 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n", 486 fw->size, fwname); 487 err = -EINVAL; 488 goto outfirm; 489 } 490 fw_data = (const __le16 *)&fw->data[0]; 491 risc_code_addr = 0x1000; /* all f/w modules load at 0x1000 */ 492 risc_code_length = fw->size / 2; 493 494 spin_lock_irqsave(host->host_lock, flags); 495 496 /* Verify the checksum twice, one before loading it, and once 497 * afterwards via the mailbox commands. 498 */ 499 for (i = 0; i < risc_code_length; i++) 500 csum += __le16_to_cpu(fw_data[i]); 501 if (csum) { 502 printk(KERN_EMERG "qlogicpti%d: Aieee, firmware checksum failed!", 503 qpti->qpti_id); 504 err = 1; 505 goto out; 506 } 507 sbus_writew(SBUS_CTRL_RESET, qpti->qregs + SBUS_CTRL); 508 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + CMD_DMA_CTRL); 509 sbus_writew((DMA_CTRL_CCLEAR | DMA_CTRL_CIRQ), qpti->qregs + DATA_DMA_CTRL); 510 timeout = PTI_RESET_LIMIT; 511 while (--timeout && (sbus_readw(qpti->qregs + SBUS_CTRL) & SBUS_CTRL_RESET)) 512 udelay(20); 513 if (!timeout) { 514 printk(KERN_EMERG "qlogicpti%d: Cannot reset the ISP.", qpti->qpti_id); 515 err = 1; 516 goto out; 517 } 518 519 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 520 mdelay(1); 521 522 sbus_writew((SBUS_CTRL_GENAB | SBUS_CTRL_ERIRQ), qpti->qregs + SBUS_CTRL); 523 set_sbus_cfg1(qpti); 524 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 525 526 if (sbus_readw(qpti->qregs + RISC_PSR) & RISC_PSR_ULTRA) { 527 qpti->ultra = 1; 528 sbus_writew((RISC_MTREG_P0ULTRA | RISC_MTREG_P1ULTRA), 529 qpti->qregs + RISC_MTREG); 530 } else { 531 qpti->ultra = 0; 532 sbus_writew((RISC_MTREG_P0DFLT | RISC_MTREG_P1DFLT), 533 qpti->qregs + RISC_MTREG); 534 } 535 536 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 537 538 /* Pin lines are only stable while RISC is paused. */ 539 sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL); 540 if (sbus_readw(qpti->qregs + CPU_PDIFF) & CPU_PDIFF_MODE) 541 qpti->differential = 1; 542 else 543 qpti->differential = 0; 544 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 545 546 /* This shouldn't be necessary- we've reset things so we should be 547 running from the ROM now.. */ 548 549 param[0] = MBOX_STOP_FIRMWARE; 550 param[1] = param[2] = param[3] = param[4] = param[5] = 0; 551 if (qlogicpti_mbox_command(qpti, param, 1)) { 552 printk(KERN_EMERG "qlogicpti%d: Cannot stop firmware for reload.\n", 553 qpti->qpti_id); 554 err = 1; 555 goto out; 556 } 557 558 /* Load it up.. */ 559 for (i = 0; i < risc_code_length; i++) { 560 param[0] = MBOX_WRITE_RAM_WORD; 561 param[1] = risc_code_addr + i; 562 param[2] = __le16_to_cpu(fw_data[i]); 563 if (qlogicpti_mbox_command(qpti, param, 1) || 564 param[0] != MBOX_COMMAND_COMPLETE) { 565 printk("qlogicpti%d: Firmware dload failed, I'm bolixed!\n", 566 qpti->qpti_id); 567 err = 1; 568 goto out; 569 } 570 } 571 572 /* Reset the ISP again. */ 573 sbus_writew(HCCTRL_RESET, qpti->qregs + HCCTRL); 574 mdelay(1); 575 576 qlogicpti_enable_irqs(qpti); 577 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 578 sbus_writew(HCCTRL_REL, qpti->qregs + HCCTRL); 579 580 /* Ask ISP to verify the checksum of the new code. */ 581 param[0] = MBOX_VERIFY_CHECKSUM; 582 param[1] = risc_code_addr; 583 if (qlogicpti_mbox_command(qpti, param, 1) || 584 (param[0] != MBOX_COMMAND_COMPLETE)) { 585 printk(KERN_EMERG "qlogicpti%d: New firmware csum failure!\n", 586 qpti->qpti_id); 587 err = 1; 588 goto out; 589 } 590 591 /* Start using newly downloaded firmware. */ 592 param[0] = MBOX_EXEC_FIRMWARE; 593 param[1] = risc_code_addr; 594 qlogicpti_mbox_command(qpti, param, 1); 595 596 param[0] = MBOX_ABOUT_FIRMWARE; 597 if (qlogicpti_mbox_command(qpti, param, 1) || 598 (param[0] != MBOX_COMMAND_COMPLETE)) { 599 printk(KERN_EMERG "qlogicpti%d: AboutFirmware cmd fails.\n", 600 qpti->qpti_id); 601 err = 1; 602 goto out; 603 } 604 605 /* Snag the major and minor revisions from the result. */ 606 qpti->fware_majrev = param[1]; 607 qpti->fware_minrev = param[2]; 608 qpti->fware_micrev = param[3]; 609 610 /* Set the clock rate */ 611 param[0] = MBOX_SET_CLOCK_RATE; 612 param[1] = qpti->clock; 613 if (qlogicpti_mbox_command(qpti, param, 1) || 614 (param[0] != MBOX_COMMAND_COMPLETE)) { 615 printk(KERN_EMERG "qlogicpti%d: could not set clock rate.\n", 616 qpti->qpti_id); 617 err = 1; 618 goto out; 619 } 620 621 if (qpti->is_pti != 0) { 622 /* Load scsi initiator ID and interrupt level into sbus static ram. */ 623 param[0] = MBOX_WRITE_RAM_WORD; 624 param[1] = 0xff80; 625 param[2] = (unsigned short) qpti->scsi_id; 626 qlogicpti_mbox_command(qpti, param, 1); 627 628 param[0] = MBOX_WRITE_RAM_WORD; 629 param[1] = 0xff00; 630 param[2] = (unsigned short) 3; 631 qlogicpti_mbox_command(qpti, param, 1); 632 } 633 634 out: 635 spin_unlock_irqrestore(host->host_lock, flags); 636 outfirm: 637 release_firmware(fw); 638 return err; 639 } 640 641 static int qlogicpti_verify_tmon(struct qlogicpti *qpti) 642 { 643 int curstat = sbus_readb(qpti->sreg); 644 645 curstat &= 0xf0; 646 if (!(curstat & SREG_FUSE) && (qpti->swsreg & SREG_FUSE)) 647 printk("qlogicpti%d: Fuse returned to normal state.\n", qpti->qpti_id); 648 if (!(curstat & SREG_TPOWER) && (qpti->swsreg & SREG_TPOWER)) 649 printk("qlogicpti%d: termpwr back to normal state.\n", qpti->qpti_id); 650 if (curstat != qpti->swsreg) { 651 int error = 0; 652 if (curstat & SREG_FUSE) { 653 error++; 654 printk("qlogicpti%d: Fuse is open!\n", qpti->qpti_id); 655 } 656 if (curstat & SREG_TPOWER) { 657 error++; 658 printk("qlogicpti%d: termpwr failure\n", qpti->qpti_id); 659 } 660 if (qpti->differential && 661 (curstat & SREG_DSENSE) != SREG_DSENSE) { 662 error++; 663 printk("qlogicpti%d: You have a single ended device on a " 664 "differential bus! Please fix!\n", qpti->qpti_id); 665 } 666 qpti->swsreg = curstat; 667 return error; 668 } 669 return 0; 670 } 671 672 static irqreturn_t qpti_intr(int irq, void *dev_id); 673 674 static void __devinit qpti_chain_add(struct qlogicpti *qpti) 675 { 676 spin_lock_irq(&qptichain_lock); 677 if (qptichain != NULL) { 678 struct qlogicpti *qlink = qptichain; 679 680 while(qlink->next) 681 qlink = qlink->next; 682 qlink->next = qpti; 683 } else { 684 qptichain = qpti; 685 } 686 qpti->next = NULL; 687 spin_unlock_irq(&qptichain_lock); 688 } 689 690 static void __devexit qpti_chain_del(struct qlogicpti *qpti) 691 { 692 spin_lock_irq(&qptichain_lock); 693 if (qptichain == qpti) { 694 qptichain = qpti->next; 695 } else { 696 struct qlogicpti *qlink = qptichain; 697 while(qlink->next != qpti) 698 qlink = qlink->next; 699 qlink->next = qpti->next; 700 } 701 qpti->next = NULL; 702 spin_unlock_irq(&qptichain_lock); 703 } 704 705 static int __devinit qpti_map_regs(struct qlogicpti *qpti) 706 { 707 struct of_device *op = qpti->op; 708 709 qpti->qregs = of_ioremap(&op->resource[0], 0, 710 resource_size(&op->resource[0]), 711 "PTI Qlogic/ISP"); 712 if (!qpti->qregs) { 713 printk("PTI: Qlogic/ISP registers are unmappable\n"); 714 return -1; 715 } 716 if (qpti->is_pti) { 717 qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096), 718 sizeof(unsigned char), 719 "PTI Qlogic/ISP statreg"); 720 if (!qpti->sreg) { 721 printk("PTI: Qlogic/ISP status register is unmappable\n"); 722 return -1; 723 } 724 } 725 return 0; 726 } 727 728 static int __devinit qpti_register_irq(struct qlogicpti *qpti) 729 { 730 struct of_device *op = qpti->op; 731 732 qpti->qhost->irq = qpti->irq = op->irqs[0]; 733 734 /* We used to try various overly-clever things to 735 * reduce the interrupt processing overhead on 736 * sun4c/sun4m when multiple PTI's shared the 737 * same IRQ. It was too complex and messy to 738 * sanely maintain. 739 */ 740 if (request_irq(qpti->irq, qpti_intr, 741 IRQF_SHARED, "Qlogic/PTI", qpti)) 742 goto fail; 743 744 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq); 745 746 return 0; 747 748 fail: 749 printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id); 750 return -1; 751 } 752 753 static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) 754 { 755 struct of_device *op = qpti->op; 756 struct device_node *dp; 757 758 dp = op->node; 759 760 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); 761 if (qpti->scsi_id == -1) 762 qpti->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 763 -1); 764 if (qpti->scsi_id == -1) 765 qpti->scsi_id = 766 of_getintprop_default(dp->parent, 767 "scsi-initiator-id", 7); 768 qpti->qhost->this_id = qpti->scsi_id; 769 qpti->qhost->max_sectors = 64; 770 771 printk("SCSI ID %d ", qpti->scsi_id); 772 } 773 774 static void qpti_get_bursts(struct qlogicpti *qpti) 775 { 776 struct of_device *op = qpti->op; 777 u8 bursts, bmask; 778 779 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff); 780 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff); 781 if (bmask != 0xff) 782 bursts &= bmask; 783 if (bursts == 0xff || 784 (bursts & DMA_BURST16) == 0 || 785 (bursts & DMA_BURST32) == 0) 786 bursts = (DMA_BURST32 - 1); 787 788 qpti->bursts = bursts; 789 } 790 791 static void qpti_get_clock(struct qlogicpti *qpti) 792 { 793 unsigned int cfreq; 794 795 /* Check for what the clock input to this card is. 796 * Default to 40Mhz. 797 */ 798 cfreq = prom_getintdefault(qpti->prom_node,"clock-frequency",40000000); 799 qpti->clock = (cfreq + 500000)/1000000; 800 if (qpti->clock == 0) /* bullshit */ 801 qpti->clock = 40; 802 } 803 804 /* The request and response queues must each be aligned 805 * on a page boundary. 806 */ 807 static int __devinit qpti_map_queues(struct qlogicpti *qpti) 808 { 809 struct of_device *op = qpti->op; 810 811 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 812 qpti->res_cpu = dma_alloc_coherent(&op->dev, 813 QSIZE(RES_QUEUE_LEN), 814 &qpti->res_dvma, GFP_ATOMIC); 815 if (qpti->res_cpu == NULL || 816 qpti->res_dvma == 0) { 817 printk("QPTI: Cannot map response queue.\n"); 818 return -1; 819 } 820 821 qpti->req_cpu = dma_alloc_coherent(&op->dev, 822 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 823 &qpti->req_dvma, GFP_ATOMIC); 824 if (qpti->req_cpu == NULL || 825 qpti->req_dvma == 0) { 826 dma_free_coherent(&op->dev, QSIZE(RES_QUEUE_LEN), 827 qpti->res_cpu, qpti->res_dvma); 828 printk("QPTI: Cannot map request queue.\n"); 829 return -1; 830 } 831 memset(qpti->res_cpu, 0, QSIZE(RES_QUEUE_LEN)); 832 memset(qpti->req_cpu, 0, QSIZE(QLOGICPTI_REQ_QUEUE_LEN)); 833 return 0; 834 } 835 836 const char *qlogicpti_info(struct Scsi_Host *host) 837 { 838 static char buf[80]; 839 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 840 841 sprintf(buf, "PTI Qlogic,ISP SBUS SCSI irq %d regs at %p", 842 qpti->qhost->irq, qpti->qregs); 843 return buf; 844 } 845 846 /* I am a certified frobtronicist. */ 847 static inline void marker_frob(struct Command_Entry *cmd) 848 { 849 struct Marker_Entry *marker = (struct Marker_Entry *) cmd; 850 851 memset(marker, 0, sizeof(struct Marker_Entry)); 852 marker->hdr.entry_cnt = 1; 853 marker->hdr.entry_type = ENTRY_MARKER; 854 marker->modifier = SYNC_ALL; 855 marker->rsvd = 0; 856 } 857 858 static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, 859 struct qlogicpti *qpti) 860 { 861 memset(cmd, 0, sizeof(struct Command_Entry)); 862 cmd->hdr.entry_cnt = 1; 863 cmd->hdr.entry_type = ENTRY_COMMAND; 864 cmd->target_id = Cmnd->device->id; 865 cmd->target_lun = Cmnd->device->lun; 866 cmd->cdb_length = Cmnd->cmd_len; 867 cmd->control_flags = 0; 868 if (Cmnd->device->tagged_supported) { 869 if (qpti->cmd_count[Cmnd->device->id] == 0) 870 qpti->tag_ages[Cmnd->device->id] = jiffies; 871 if (time_after(jiffies, qpti->tag_ages[Cmnd->device->id] + (5*HZ))) { 872 cmd->control_flags = CFLAG_ORDERED_TAG; 873 qpti->tag_ages[Cmnd->device->id] = jiffies; 874 } else 875 cmd->control_flags = CFLAG_SIMPLE_TAG; 876 } 877 if ((Cmnd->cmnd[0] == WRITE_6) || 878 (Cmnd->cmnd[0] == WRITE_10) || 879 (Cmnd->cmnd[0] == WRITE_12)) 880 cmd->control_flags |= CFLAG_WRITE; 881 else 882 cmd->control_flags |= CFLAG_READ; 883 cmd->time_out = 30; 884 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); 885 } 886 887 /* Do it to it baby. */ 888 static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, 889 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) 890 { 891 struct dataseg *ds; 892 struct scatterlist *sg, *s; 893 int i, n; 894 895 if (scsi_bufflen(Cmnd)) { 896 int sg_count; 897 898 sg = scsi_sglist(Cmnd); 899 sg_count = dma_map_sg(&qpti->op->dev, sg, 900 scsi_sg_count(Cmnd), 901 Cmnd->sc_data_direction); 902 903 ds = cmd->dataseg; 904 cmd->segment_cnt = sg_count; 905 906 /* Fill in first four sg entries: */ 907 n = sg_count; 908 if (n > 4) 909 n = 4; 910 for_each_sg(sg, s, n, i) { 911 ds[i].d_base = sg_dma_address(s); 912 ds[i].d_count = sg_dma_len(s); 913 } 914 sg_count -= 4; 915 sg = s; 916 while (sg_count > 0) { 917 struct Continuation_Entry *cont; 918 919 ++cmd->hdr.entry_cnt; 920 cont = (struct Continuation_Entry *) &qpti->req_cpu[in_ptr]; 921 in_ptr = NEXT_REQ_PTR(in_ptr); 922 if (in_ptr == out_ptr) 923 return -1; 924 925 cont->hdr.entry_type = ENTRY_CONTINUATION; 926 cont->hdr.entry_cnt = 0; 927 cont->hdr.sys_def_1 = 0; 928 cont->hdr.flags = 0; 929 cont->reserved = 0; 930 ds = cont->dataseg; 931 n = sg_count; 932 if (n > 7) 933 n = 7; 934 for_each_sg(sg, s, n, i) { 935 ds[i].d_base = sg_dma_address(s); 936 ds[i].d_count = sg_dma_len(s); 937 } 938 sg_count -= n; 939 sg = s; 940 } 941 } else { 942 cmd->dataseg[0].d_base = 0; 943 cmd->dataseg[0].d_count = 0; 944 cmd->segment_cnt = 1; /* Shouldn't this be 0? */ 945 } 946 947 /* Committed, record Scsi_Cmd so we can find it later. */ 948 cmd->handle = in_ptr; 949 qpti->cmd_slots[in_ptr] = Cmnd; 950 951 qpti->cmd_count[Cmnd->device->id]++; 952 sbus_writew(in_ptr, qpti->qregs + MBOX4); 953 qpti->req_in_ptr = in_ptr; 954 955 return in_ptr; 956 } 957 958 static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int out_ptr) 959 { 960 /* Temporary workaround until bug is found and fixed (one bug has been found 961 already, but fixing it makes things even worse) -jj */ 962 int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; 963 host->can_queue = host->host_busy + num_free; 964 host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); 965 } 966 967 static int qlogicpti_slave_configure(struct scsi_device *sdev) 968 { 969 struct qlogicpti *qpti = shost_priv(sdev->host); 970 int tgt = sdev->id; 971 u_short param[6]; 972 973 /* tags handled in midlayer */ 974 /* enable sync mode? */ 975 if (sdev->sdtr) { 976 qpti->dev_param[tgt].device_flags |= 0x10; 977 } else { 978 qpti->dev_param[tgt].synchronous_offset = 0; 979 qpti->dev_param[tgt].synchronous_period = 0; 980 } 981 /* are we wide capable? */ 982 if (sdev->wdtr) 983 qpti->dev_param[tgt].device_flags |= 0x20; 984 985 param[0] = MBOX_SET_TARGET_PARAMS; 986 param[1] = (tgt << 8); 987 param[2] = (qpti->dev_param[tgt].device_flags << 8); 988 if (qpti->dev_param[tgt].device_flags & 0x10) { 989 param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) | 990 qpti->dev_param[tgt].synchronous_period; 991 } else { 992 param[3] = 0; 993 } 994 qlogicpti_mbox_command(qpti, param, 0); 995 return 0; 996 } 997 998 /* 999 * The middle SCSI layer ensures that queuecommand never gets invoked 1000 * concurrently with itself or the interrupt handler (though the 1001 * interrupt handler may call this routine as part of 1002 * request-completion handling). 1003 * 1004 * "This code must fly." -davem 1005 */ 1006 static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) 1007 { 1008 struct Scsi_Host *host = Cmnd->device->host; 1009 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1010 struct Command_Entry *cmd; 1011 u_int out_ptr; 1012 int in_ptr; 1013 1014 Cmnd->scsi_done = done; 1015 1016 in_ptr = qpti->req_in_ptr; 1017 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1018 out_ptr = sbus_readw(qpti->qregs + MBOX4); 1019 in_ptr = NEXT_REQ_PTR(in_ptr); 1020 if (in_ptr == out_ptr) 1021 goto toss_command; 1022 1023 if (qpti->send_marker) { 1024 marker_frob(cmd); 1025 qpti->send_marker = 0; 1026 if (NEXT_REQ_PTR(in_ptr) == out_ptr) { 1027 sbus_writew(in_ptr, qpti->qregs + MBOX4); 1028 qpti->req_in_ptr = in_ptr; 1029 goto toss_command; 1030 } 1031 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; 1032 in_ptr = NEXT_REQ_PTR(in_ptr); 1033 } 1034 cmd_frob(cmd, Cmnd, qpti); 1035 if ((in_ptr = load_cmd(Cmnd, cmd, qpti, in_ptr, out_ptr)) == -1) 1036 goto toss_command; 1037 1038 update_can_queue(host, in_ptr, out_ptr); 1039 1040 return 0; 1041 1042 toss_command: 1043 printk(KERN_EMERG "qlogicpti%d: request queue overflow\n", 1044 qpti->qpti_id); 1045 1046 /* Unfortunately, unless you use the new EH code, which 1047 * we don't, the midlayer will ignore the return value, 1048 * which is insane. We pick up the pieces like this. 1049 */ 1050 Cmnd->result = DID_BUS_BUSY; 1051 done(Cmnd); 1052 return 1; 1053 } 1054 1055 static int qlogicpti_return_status(struct Status_Entry *sts, int id) 1056 { 1057 int host_status = DID_ERROR; 1058 1059 switch (sts->completion_status) { 1060 case CS_COMPLETE: 1061 host_status = DID_OK; 1062 break; 1063 case CS_INCOMPLETE: 1064 if (!(sts->state_flags & SF_GOT_BUS)) 1065 host_status = DID_NO_CONNECT; 1066 else if (!(sts->state_flags & SF_GOT_TARGET)) 1067 host_status = DID_BAD_TARGET; 1068 else if (!(sts->state_flags & SF_SENT_CDB)) 1069 host_status = DID_ERROR; 1070 else if (!(sts->state_flags & SF_TRANSFERRED_DATA)) 1071 host_status = DID_ERROR; 1072 else if (!(sts->state_flags & SF_GOT_STATUS)) 1073 host_status = DID_ERROR; 1074 else if (!(sts->state_flags & SF_GOT_SENSE)) 1075 host_status = DID_ERROR; 1076 break; 1077 case CS_DMA_ERROR: 1078 case CS_TRANSPORT_ERROR: 1079 host_status = DID_ERROR; 1080 break; 1081 case CS_RESET_OCCURRED: 1082 case CS_BUS_RESET: 1083 host_status = DID_RESET; 1084 break; 1085 case CS_ABORTED: 1086 host_status = DID_ABORT; 1087 break; 1088 case CS_TIMEOUT: 1089 host_status = DID_TIME_OUT; 1090 break; 1091 case CS_DATA_OVERRUN: 1092 case CS_COMMAND_OVERRUN: 1093 case CS_STATUS_OVERRUN: 1094 case CS_BAD_MESSAGE: 1095 case CS_NO_MESSAGE_OUT: 1096 case CS_EXT_ID_FAILED: 1097 case CS_IDE_MSG_FAILED: 1098 case CS_ABORT_MSG_FAILED: 1099 case CS_NOP_MSG_FAILED: 1100 case CS_PARITY_ERROR_MSG_FAILED: 1101 case CS_DEVICE_RESET_MSG_FAILED: 1102 case CS_ID_MSG_FAILED: 1103 case CS_UNEXP_BUS_FREE: 1104 host_status = DID_ERROR; 1105 break; 1106 case CS_DATA_UNDERRUN: 1107 host_status = DID_OK; 1108 break; 1109 default: 1110 printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n", 1111 id, sts->completion_status); 1112 host_status = DID_ERROR; 1113 break; 1114 } 1115 1116 return (sts->scsi_status & STATUS_MASK) | (host_status << 16); 1117 } 1118 1119 static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) 1120 { 1121 struct scsi_cmnd *Cmnd, *done_queue = NULL; 1122 struct Status_Entry *sts; 1123 u_int in_ptr, out_ptr; 1124 1125 if (!(sbus_readw(qpti->qregs + SBUS_STAT) & SBUS_STAT_RINT)) 1126 return NULL; 1127 1128 in_ptr = sbus_readw(qpti->qregs + MBOX5); 1129 sbus_writew(HCCTRL_CRIRQ, qpti->qregs + HCCTRL); 1130 if (sbus_readw(qpti->qregs + SBUS_SEMAPHORE) & SBUS_SEMAPHORE_LCK) { 1131 switch (sbus_readw(qpti->qregs + MBOX0)) { 1132 case ASYNC_SCSI_BUS_RESET: 1133 case EXECUTION_TIMEOUT_RESET: 1134 qpti->send_marker = 1; 1135 break; 1136 case INVALID_COMMAND: 1137 case HOST_INTERFACE_ERROR: 1138 case COMMAND_ERROR: 1139 case COMMAND_PARAM_ERROR: 1140 break; 1141 }; 1142 sbus_writew(0, qpti->qregs + SBUS_SEMAPHORE); 1143 } 1144 1145 /* This looks like a network driver! */ 1146 out_ptr = qpti->res_out_ptr; 1147 while (out_ptr != in_ptr) { 1148 u_int cmd_slot; 1149 1150 sts = (struct Status_Entry *) &qpti->res_cpu[out_ptr]; 1151 out_ptr = NEXT_RES_PTR(out_ptr); 1152 1153 /* We store an index in the handle, not the pointer in 1154 * some form. This avoids problems due to the fact 1155 * that the handle provided is only 32-bits. -DaveM 1156 */ 1157 cmd_slot = sts->handle; 1158 Cmnd = qpti->cmd_slots[cmd_slot]; 1159 qpti->cmd_slots[cmd_slot] = NULL; 1160 1161 if (sts->completion_status == CS_RESET_OCCURRED || 1162 sts->completion_status == CS_ABORTED || 1163 (sts->status_flags & STF_BUS_RESET)) 1164 qpti->send_marker = 1; 1165 1166 if (sts->state_flags & SF_GOT_SENSE) 1167 memcpy(Cmnd->sense_buffer, sts->req_sense_data, 1168 SCSI_SENSE_BUFFERSIZE); 1169 1170 if (sts->hdr.entry_type == ENTRY_STATUS) 1171 Cmnd->result = 1172 qlogicpti_return_status(sts, qpti->qpti_id); 1173 else 1174 Cmnd->result = DID_ERROR << 16; 1175 1176 if (scsi_bufflen(Cmnd)) 1177 dma_unmap_sg(&qpti->op->dev, 1178 scsi_sglist(Cmnd), scsi_sg_count(Cmnd), 1179 Cmnd->sc_data_direction); 1180 1181 qpti->cmd_count[Cmnd->device->id]--; 1182 sbus_writew(out_ptr, qpti->qregs + MBOX5); 1183 Cmnd->host_scribble = (unsigned char *) done_queue; 1184 done_queue = Cmnd; 1185 } 1186 qpti->res_out_ptr = out_ptr; 1187 1188 return done_queue; 1189 } 1190 1191 static irqreturn_t qpti_intr(int irq, void *dev_id) 1192 { 1193 struct qlogicpti *qpti = dev_id; 1194 unsigned long flags; 1195 struct scsi_cmnd *dq; 1196 1197 spin_lock_irqsave(qpti->qhost->host_lock, flags); 1198 dq = qlogicpti_intr_handler(qpti); 1199 1200 if (dq != NULL) { 1201 do { 1202 struct scsi_cmnd *next; 1203 1204 next = (struct scsi_cmnd *) dq->host_scribble; 1205 dq->scsi_done(dq); 1206 dq = next; 1207 } while (dq != NULL); 1208 } 1209 spin_unlock_irqrestore(qpti->qhost->host_lock, flags); 1210 1211 return IRQ_HANDLED; 1212 } 1213 1214 static int qlogicpti_abort(struct scsi_cmnd *Cmnd) 1215 { 1216 u_short param[6]; 1217 struct Scsi_Host *host = Cmnd->device->host; 1218 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1219 int return_status = SUCCESS; 1220 u32 cmd_cookie; 1221 int i; 1222 1223 printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n", 1224 qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun); 1225 1226 qlogicpti_disable_irqs(qpti); 1227 1228 /* Find the 32-bit cookie we gave to the firmware for 1229 * this command. 1230 */ 1231 for (i = 0; i < QLOGICPTI_REQ_QUEUE_LEN + 1; i++) 1232 if (qpti->cmd_slots[i] == Cmnd) 1233 break; 1234 cmd_cookie = i; 1235 1236 param[0] = MBOX_ABORT; 1237 param[1] = (((u_short) Cmnd->device->id) << 8) | Cmnd->device->lun; 1238 param[2] = cmd_cookie >> 16; 1239 param[3] = cmd_cookie & 0xffff; 1240 if (qlogicpti_mbox_command(qpti, param, 0) || 1241 (param[0] != MBOX_COMMAND_COMPLETE)) { 1242 printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n", 1243 qpti->qpti_id, param[0]); 1244 return_status = FAILED; 1245 } 1246 1247 qlogicpti_enable_irqs(qpti); 1248 1249 return return_status; 1250 } 1251 1252 static int qlogicpti_reset(struct scsi_cmnd *Cmnd) 1253 { 1254 u_short param[6]; 1255 struct Scsi_Host *host = Cmnd->device->host; 1256 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1257 int return_status = SUCCESS; 1258 1259 printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n", 1260 qpti->qpti_id); 1261 1262 qlogicpti_disable_irqs(qpti); 1263 1264 param[0] = MBOX_BUS_RESET; 1265 param[1] = qpti->host_param.bus_reset_delay; 1266 if (qlogicpti_mbox_command(qpti, param, 0) || 1267 (param[0] != MBOX_COMMAND_COMPLETE)) { 1268 printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n", 1269 qpti->qpti_id, param[0]); 1270 return_status = FAILED; 1271 } 1272 1273 qlogicpti_enable_irqs(qpti); 1274 1275 return return_status; 1276 } 1277 1278 static struct scsi_host_template qpti_template = { 1279 .module = THIS_MODULE, 1280 .name = "qlogicpti", 1281 .info = qlogicpti_info, 1282 .queuecommand = qlogicpti_queuecommand, 1283 .slave_configure = qlogicpti_slave_configure, 1284 .eh_abort_handler = qlogicpti_abort, 1285 .eh_bus_reset_handler = qlogicpti_reset, 1286 .can_queue = QLOGICPTI_REQ_QUEUE_LEN, 1287 .this_id = 7, 1288 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), 1289 .cmd_per_lun = 1, 1290 .use_clustering = ENABLE_CLUSTERING, 1291 }; 1292 1293 static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) 1294 { 1295 struct scsi_host_template *tpnt = match->data; 1296 struct device_node *dp = op->node; 1297 struct Scsi_Host *host; 1298 struct qlogicpti *qpti; 1299 static int nqptis; 1300 const char *fcode; 1301 1302 /* Sometimes Antares cards come up not completely 1303 * setup, and we get a report of a zero IRQ. 1304 */ 1305 if (op->irqs[0] == 0) 1306 return -ENODEV; 1307 1308 host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1309 if (!host) 1310 return -ENOMEM; 1311 1312 qpti = shost_priv(host); 1313 1314 host->max_id = MAX_TARGETS; 1315 qpti->qhost = host; 1316 qpti->op = op; 1317 qpti->qpti_id = nqptis; 1318 strcpy(qpti->prom_name, op->node->name); 1319 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); 1320 1321 if (qpti_map_regs(qpti) < 0) 1322 goto fail_unlink; 1323 1324 if (qpti_register_irq(qpti) < 0) 1325 goto fail_unmap_regs; 1326 1327 qpti_get_scsi_id(qpti); 1328 qpti_get_bursts(qpti); 1329 qpti_get_clock(qpti); 1330 1331 /* Clear out scsi_cmnd array. */ 1332 memset(qpti->cmd_slots, 0, sizeof(qpti->cmd_slots)); 1333 1334 if (qpti_map_queues(qpti) < 0) 1335 goto fail_free_irq; 1336 1337 /* Load the firmware. */ 1338 if (qlogicpti_load_firmware(qpti)) 1339 goto fail_unmap_queues; 1340 if (qpti->is_pti) { 1341 /* Check the PTI status reg. */ 1342 if (qlogicpti_verify_tmon(qpti)) 1343 goto fail_unmap_queues; 1344 } 1345 1346 /* Reset the ISP and init res/req queues. */ 1347 if (qlogicpti_reset_hardware(host)) 1348 goto fail_unmap_queues; 1349 1350 printk("(Firmware v%d.%d.%d)", qpti->fware_majrev, 1351 qpti->fware_minrev, qpti->fware_micrev); 1352 1353 fcode = of_get_property(dp, "isp-fcode", NULL); 1354 if (fcode && fcode[0]) 1355 printk("(FCode %s)", fcode); 1356 if (of_find_property(dp, "differential", NULL) != NULL) 1357 qpti->differential = 1; 1358 1359 printk("\nqlogicpti%d: [%s Wide, using %s interface]\n", 1360 qpti->qpti_id, 1361 (qpti->ultra ? "Ultra" : "Fast"), 1362 (qpti->differential ? "differential" : "single ended")); 1363 1364 if (scsi_add_host(host, &op->dev)) { 1365 printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id); 1366 goto fail_unmap_queues; 1367 } 1368 1369 dev_set_drvdata(&op->dev, qpti); 1370 1371 qpti_chain_add(qpti); 1372 1373 scsi_scan_host(host); 1374 nqptis++; 1375 1376 return 0; 1377 1378 fail_unmap_queues: 1379 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1380 dma_free_coherent(&op->dev, 1381 QSIZE(RES_QUEUE_LEN), 1382 qpti->res_cpu, qpti->res_dvma); 1383 dma_free_coherent(&op->dev, 1384 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1385 qpti->req_cpu, qpti->req_dvma); 1386 #undef QSIZE 1387 1388 fail_unmap_regs: 1389 of_iounmap(&op->resource[0], qpti->qregs, 1390 resource_size(&op->resource[0])); 1391 if (qpti->is_pti) 1392 of_iounmap(&op->resource[0], qpti->sreg, 1393 sizeof(unsigned char)); 1394 1395 fail_free_irq: 1396 free_irq(qpti->irq, qpti); 1397 1398 fail_unlink: 1399 scsi_host_put(host); 1400 1401 return -ENODEV; 1402 } 1403 1404 static int __devexit qpti_sbus_remove(struct of_device *op) 1405 { 1406 struct qlogicpti *qpti = dev_get_drvdata(&op->dev); 1407 1408 qpti_chain_del(qpti); 1409 1410 scsi_remove_host(qpti->qhost); 1411 1412 /* Shut up the card. */ 1413 sbus_writew(0, qpti->qregs + SBUS_CTRL); 1414 1415 /* Free IRQ handler and unmap Qlogic,ISP and PTI status regs. */ 1416 free_irq(qpti->irq, qpti); 1417 1418 #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) 1419 dma_free_coherent(&op->dev, 1420 QSIZE(RES_QUEUE_LEN), 1421 qpti->res_cpu, qpti->res_dvma); 1422 dma_free_coherent(&op->dev, 1423 QSIZE(QLOGICPTI_REQ_QUEUE_LEN), 1424 qpti->req_cpu, qpti->req_dvma); 1425 #undef QSIZE 1426 1427 of_iounmap(&op->resource[0], qpti->qregs, 1428 resource_size(&op->resource[0])); 1429 if (qpti->is_pti) 1430 of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char)); 1431 1432 scsi_host_put(qpti->qhost); 1433 1434 return 0; 1435 } 1436 1437 static const struct of_device_id qpti_match[] = { 1438 { 1439 .name = "ptisp", 1440 .data = &qpti_template, 1441 }, 1442 { 1443 .name = "PTI,ptisp", 1444 .data = &qpti_template, 1445 }, 1446 { 1447 .name = "QLGC,isp", 1448 .data = &qpti_template, 1449 }, 1450 { 1451 .name = "SUNW,isp", 1452 .data = &qpti_template, 1453 }, 1454 {}, 1455 }; 1456 MODULE_DEVICE_TABLE(of, qpti_match); 1457 1458 static struct of_platform_driver qpti_sbus_driver = { 1459 .name = "qpti", 1460 .match_table = qpti_match, 1461 .probe = qpti_sbus_probe, 1462 .remove = __devexit_p(qpti_sbus_remove), 1463 }; 1464 1465 static int __init qpti_init(void) 1466 { 1467 return of_register_driver(&qpti_sbus_driver, &of_bus_type); 1468 } 1469 1470 static void __exit qpti_exit(void) 1471 { 1472 of_unregister_driver(&qpti_sbus_driver); 1473 } 1474 1475 MODULE_DESCRIPTION("QlogicISP SBUS driver"); 1476 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 1477 MODULE_LICENSE("GPL"); 1478 MODULE_VERSION("2.1"); 1479 MODULE_FIRMWARE("qlogic/isp1000.bin"); 1480 1481 module_init(qpti_init); 1482 module_exit(qpti_exit); 1483