1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2003 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ 41 */ 42 43 #include "aic79xx_osm.h" 44 #include "aic79xx_inline.h" 45 #include "aicasm/aicasm_insformat.h" 46 47 /***************************** Lookup Tables **********************************/ 48 static const char *const ahd_chip_names[] = 49 { 50 "NONE", 51 "aic7901", 52 "aic7902", 53 "aic7901A" 54 }; 55 56 /* 57 * Hardware error codes. 58 */ 59 struct ahd_hard_error_entry { 60 uint8_t errno; 61 const char *errmesg; 62 }; 63 64 static const struct ahd_hard_error_entry ahd_hard_errors[] = { 65 { DSCTMOUT, "Discard Timer has timed out" }, 66 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 67 { SQPARERR, "Sequencer Parity Error" }, 68 { DPARERR, "Data-path Parity Error" }, 69 { MPARERR, "Scratch or SCB Memory Parity Error" }, 70 { CIOPARERR, "CIOBUS Parity Error" }, 71 }; 72 static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); 73 74 static const struct ahd_phase_table_entry ahd_phase_table[] = 75 { 76 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 77 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 78 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 79 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 80 { P_COMMAND, MSG_NOOP, "in Command phase" }, 81 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 82 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 83 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 84 { P_BUSFREE, MSG_NOOP, "while idle" }, 85 { 0, MSG_NOOP, "in unknown phase" } 86 }; 87 88 /* 89 * In most cases we only wish to itterate over real phases, so 90 * exclude the last element from the count. 91 */ 92 static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; 93 94 /* Our Sequencer Program */ 95 #include "aic79xx_seq.h" 96 97 /**************************** Function Declarations ***************************/ 98 static void ahd_handle_transmission_error(struct ahd_softc *ahd); 99 static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, 100 u_int lqistat1); 101 static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, 102 u_int busfreetime); 103 static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); 104 static void ahd_handle_proto_violation(struct ahd_softc *ahd); 105 static void ahd_force_renegotiation(struct ahd_softc *ahd, 106 struct ahd_devinfo *devinfo); 107 108 static struct ahd_tmode_tstate* 109 ahd_alloc_tstate(struct ahd_softc *ahd, 110 u_int scsi_id, char channel); 111 #ifdef AHD_TARGET_MODE 112 static void ahd_free_tstate(struct ahd_softc *ahd, 113 u_int scsi_id, char channel, int force); 114 #endif 115 static void ahd_devlimited_syncrate(struct ahd_softc *ahd, 116 struct ahd_initiator_tinfo *, 117 u_int *period, 118 u_int *ppr_options, 119 role_t role); 120 static void ahd_update_neg_table(struct ahd_softc *ahd, 121 struct ahd_devinfo *devinfo, 122 struct ahd_transinfo *tinfo); 123 static void ahd_update_pending_scbs(struct ahd_softc *ahd); 124 static void ahd_fetch_devinfo(struct ahd_softc *ahd, 125 struct ahd_devinfo *devinfo); 126 static void ahd_scb_devinfo(struct ahd_softc *ahd, 127 struct ahd_devinfo *devinfo, 128 struct scb *scb); 129 static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, 130 struct ahd_devinfo *devinfo, 131 struct scb *scb); 132 static void ahd_build_transfer_msg(struct ahd_softc *ahd, 133 struct ahd_devinfo *devinfo); 134 static void ahd_construct_sdtr(struct ahd_softc *ahd, 135 struct ahd_devinfo *devinfo, 136 u_int period, u_int offset); 137 static void ahd_construct_wdtr(struct ahd_softc *ahd, 138 struct ahd_devinfo *devinfo, 139 u_int bus_width); 140 static void ahd_construct_ppr(struct ahd_softc *ahd, 141 struct ahd_devinfo *devinfo, 142 u_int period, u_int offset, 143 u_int bus_width, u_int ppr_options); 144 static void ahd_clear_msg_state(struct ahd_softc *ahd); 145 static void ahd_handle_message_phase(struct ahd_softc *ahd); 146 typedef enum { 147 AHDMSG_1B, 148 AHDMSG_2B, 149 AHDMSG_EXT 150 } ahd_msgtype; 151 static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, 152 u_int msgval, int full); 153 static int ahd_parse_msg(struct ahd_softc *ahd, 154 struct ahd_devinfo *devinfo); 155 static int ahd_handle_msg_reject(struct ahd_softc *ahd, 156 struct ahd_devinfo *devinfo); 157 static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, 158 struct ahd_devinfo *devinfo); 159 static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); 160 static void ahd_handle_devreset(struct ahd_softc *ahd, 161 struct ahd_devinfo *devinfo, 162 u_int lun, cam_status status, 163 char *message, int verbose_level); 164 #ifdef AHD_TARGET_MODE 165 static void ahd_setup_target_msgin(struct ahd_softc *ahd, 166 struct ahd_devinfo *devinfo, 167 struct scb *scb); 168 #endif 169 170 static u_int ahd_sglist_size(struct ahd_softc *ahd); 171 static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); 172 static bus_dmamap_callback_t 173 ahd_dmamap_cb; 174 static void ahd_initialize_hscbs(struct ahd_softc *ahd); 175 static int ahd_init_scbdata(struct ahd_softc *ahd); 176 static void ahd_fini_scbdata(struct ahd_softc *ahd); 177 static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); 178 static void ahd_iocell_first_selection(struct ahd_softc *ahd); 179 static void ahd_add_col_list(struct ahd_softc *ahd, 180 struct scb *scb, u_int col_idx); 181 static void ahd_rem_col_list(struct ahd_softc *ahd, 182 struct scb *scb); 183 static void ahd_chip_init(struct ahd_softc *ahd); 184 static void ahd_qinfifo_requeue(struct ahd_softc *ahd, 185 struct scb *prev_scb, 186 struct scb *scb); 187 static int ahd_qinfifo_count(struct ahd_softc *ahd); 188 static int ahd_search_scb_list(struct ahd_softc *ahd, int target, 189 char channel, int lun, u_int tag, 190 role_t role, uint32_t status, 191 ahd_search_action action, 192 u_int *list_head, u_int *list_tail, 193 u_int tid); 194 static void ahd_stitch_tid_list(struct ahd_softc *ahd, 195 u_int tid_prev, u_int tid_cur, 196 u_int tid_next); 197 static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, 198 u_int scbid); 199 static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 200 u_int prev, u_int next, u_int tid); 201 static void ahd_reset_current_bus(struct ahd_softc *ahd); 202 static void ahd_stat_timer(struct timer_list *t); 203 #ifdef AHD_DUMP_SEQ 204 static void ahd_dumpseq(struct ahd_softc *ahd); 205 #endif 206 static void ahd_loadseq(struct ahd_softc *ahd); 207 static int ahd_check_patch(struct ahd_softc *ahd, 208 const struct patch **start_patch, 209 u_int start_instr, u_int *skip_addr); 210 static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, 211 u_int address); 212 static void ahd_download_instr(struct ahd_softc *ahd, 213 u_int instrptr, uint8_t *dconsts); 214 static int ahd_probe_stack_size(struct ahd_softc *ahd); 215 static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, 216 struct scb *scb); 217 static void ahd_run_data_fifo(struct ahd_softc *ahd, 218 struct scb *scb); 219 220 #ifdef AHD_TARGET_MODE 221 static void ahd_queue_lstate_event(struct ahd_softc *ahd, 222 struct ahd_tmode_lstate *lstate, 223 u_int initiator_id, 224 u_int event_type, 225 u_int event_arg); 226 static void ahd_update_scsiid(struct ahd_softc *ahd, 227 u_int targid_mask); 228 static int ahd_handle_target_cmd(struct ahd_softc *ahd, 229 struct target_cmd *cmd); 230 #endif 231 232 static int ahd_abort_scbs(struct ahd_softc *ahd, int target, 233 char channel, int lun, u_int tag, 234 role_t role, uint32_t status); 235 static void ahd_alloc_scbs(struct ahd_softc *ahd); 236 static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, 237 u_int scbid); 238 static void ahd_calc_residual(struct ahd_softc *ahd, 239 struct scb *scb); 240 static void ahd_clear_critical_section(struct ahd_softc *ahd); 241 static void ahd_clear_intstat(struct ahd_softc *ahd); 242 static void ahd_enable_coalescing(struct ahd_softc *ahd, 243 int enable); 244 static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); 245 static void ahd_freeze_devq(struct ahd_softc *ahd, 246 struct scb *scb); 247 static void ahd_handle_scb_status(struct ahd_softc *ahd, 248 struct scb *scb); 249 static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); 250 static void ahd_shutdown(void *arg); 251 static void ahd_update_coalescing_values(struct ahd_softc *ahd, 252 u_int timer, 253 u_int maxcmds, 254 u_int mincmds); 255 static int ahd_verify_vpd_cksum(struct vpd_config *vpd); 256 static int ahd_wait_seeprom(struct ahd_softc *ahd); 257 static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, 258 int target, char channel, int lun, 259 u_int tag, role_t role); 260 261 static void ahd_reset_cmds_pending(struct ahd_softc *ahd); 262 263 /*************************** Interrupt Services *******************************/ 264 static void ahd_run_qoutfifo(struct ahd_softc *ahd); 265 #ifdef AHD_TARGET_MODE 266 static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); 267 #endif 268 static void ahd_handle_hwerrint(struct ahd_softc *ahd); 269 static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); 270 static void ahd_handle_scsiint(struct ahd_softc *ahd, 271 u_int intstat); 272 273 /************************ Sequencer Execution Control *************************/ 274 void 275 ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 276 { 277 if (ahd->src_mode == src && ahd->dst_mode == dst) 278 return; 279 #ifdef AHD_DEBUG 280 if (ahd->src_mode == AHD_MODE_UNKNOWN 281 || ahd->dst_mode == AHD_MODE_UNKNOWN) 282 panic("Setting mode prior to saving it.\n"); 283 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 284 printk("%s: Setting mode 0x%x\n", ahd_name(ahd), 285 ahd_build_mode_state(ahd, src, dst)); 286 #endif 287 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); 288 ahd->src_mode = src; 289 ahd->dst_mode = dst; 290 } 291 292 static void 293 ahd_update_modes(struct ahd_softc *ahd) 294 { 295 ahd_mode_state mode_ptr; 296 ahd_mode src; 297 ahd_mode dst; 298 299 mode_ptr = ahd_inb(ahd, MODE_PTR); 300 #ifdef AHD_DEBUG 301 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 302 printk("Reading mode 0x%x\n", mode_ptr); 303 #endif 304 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); 305 ahd_known_modes(ahd, src, dst); 306 } 307 308 static void 309 ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 310 ahd_mode dstmode, const char *file, int line) 311 { 312 #ifdef AHD_DEBUG 313 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 314 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { 315 panic("%s:%s:%d: Mode assertion failed.\n", 316 ahd_name(ahd), file, line); 317 } 318 #endif 319 } 320 321 #define AHD_ASSERT_MODES(ahd, source, dest) \ 322 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); 323 324 ahd_mode_state 325 ahd_save_modes(struct ahd_softc *ahd) 326 { 327 if (ahd->src_mode == AHD_MODE_UNKNOWN 328 || ahd->dst_mode == AHD_MODE_UNKNOWN) 329 ahd_update_modes(ahd); 330 331 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); 332 } 333 334 void 335 ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) 336 { 337 ahd_mode src; 338 ahd_mode dst; 339 340 ahd_extract_mode_state(ahd, state, &src, &dst); 341 ahd_set_modes(ahd, src, dst); 342 } 343 344 /* 345 * Determine whether the sequencer has halted code execution. 346 * Returns non-zero status if the sequencer is stopped. 347 */ 348 int 349 ahd_is_paused(struct ahd_softc *ahd) 350 { 351 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); 352 } 353 354 /* 355 * Request that the sequencer stop and wait, indefinitely, for it 356 * to stop. The sequencer will only acknowledge that it is paused 357 * once it has reached an instruction boundary and PAUSEDIS is 358 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 359 * for critical sections. 360 */ 361 void 362 ahd_pause(struct ahd_softc *ahd) 363 { 364 ahd_outb(ahd, HCNTRL, ahd->pause); 365 366 /* 367 * Since the sequencer can disable pausing in a critical section, we 368 * must loop until it actually stops. 369 */ 370 while (ahd_is_paused(ahd) == 0) 371 ; 372 } 373 374 /* 375 * Allow the sequencer to continue program execution. 376 * We check here to ensure that no additional interrupt 377 * sources that would cause the sequencer to halt have been 378 * asserted. If, for example, a SCSI bus reset is detected 379 * while we are fielding a different, pausing, interrupt type, 380 * we don't want to release the sequencer before going back 381 * into our interrupt handler and dealing with this new 382 * condition. 383 */ 384 void 385 ahd_unpause(struct ahd_softc *ahd) 386 { 387 /* 388 * Automatically restore our modes to those saved 389 * prior to the first change of the mode. 390 */ 391 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN 392 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { 393 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) 394 ahd_reset_cmds_pending(ahd); 395 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 396 } 397 398 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) 399 ahd_outb(ahd, HCNTRL, ahd->unpause); 400 401 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); 402 } 403 404 /*********************** Scatter Gather List Handling *************************/ 405 void * 406 ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 407 void *sgptr, dma_addr_t addr, bus_size_t len, int last) 408 { 409 scb->sg_count++; 410 if (sizeof(dma_addr_t) > 4 411 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 412 struct ahd_dma64_seg *sg; 413 414 sg = (struct ahd_dma64_seg *)sgptr; 415 sg->addr = ahd_htole64(addr); 416 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); 417 return (sg + 1); 418 } else { 419 struct ahd_dma_seg *sg; 420 421 sg = (struct ahd_dma_seg *)sgptr; 422 sg->addr = ahd_htole32(addr & 0xFFFFFFFF); 423 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) 424 | (last ? AHD_DMA_LAST_SEG : 0)); 425 return (sg + 1); 426 } 427 } 428 429 static void 430 ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) 431 { 432 /* XXX Handle target mode SCBs. */ 433 scb->crc_retry_count = 0; 434 if ((scb->flags & SCB_PACKETIZED) != 0) { 435 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ 436 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; 437 } else { 438 if (ahd_get_transfer_length(scb) & 0x01) 439 scb->hscb->task_attribute = SCB_XFERLEN_ODD; 440 else 441 scb->hscb->task_attribute = 0; 442 } 443 444 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR 445 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) 446 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = 447 ahd_htole32(scb->sense_busaddr); 448 } 449 450 static void 451 ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) 452 { 453 /* 454 * Copy the first SG into the "current" data ponter area. 455 */ 456 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 457 struct ahd_dma64_seg *sg; 458 459 sg = (struct ahd_dma64_seg *)scb->sg_list; 460 scb->hscb->dataptr = sg->addr; 461 scb->hscb->datacnt = sg->len; 462 } else { 463 struct ahd_dma_seg *sg; 464 uint32_t *dataptr_words; 465 466 sg = (struct ahd_dma_seg *)scb->sg_list; 467 dataptr_words = (uint32_t*)&scb->hscb->dataptr; 468 dataptr_words[0] = sg->addr; 469 dataptr_words[1] = 0; 470 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { 471 uint64_t high_addr; 472 473 high_addr = ahd_le32toh(sg->len) & 0x7F000000; 474 scb->hscb->dataptr |= ahd_htole64(high_addr << 8); 475 } 476 scb->hscb->datacnt = sg->len; 477 } 478 /* 479 * Note where to find the SG entries in bus space. 480 * We also set the full residual flag which the 481 * sequencer will clear as soon as a data transfer 482 * occurs. 483 */ 484 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); 485 } 486 487 static void 488 ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) 489 { 490 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); 491 scb->hscb->dataptr = 0; 492 scb->hscb->datacnt = 0; 493 } 494 495 /************************** Memory mapping routines ***************************/ 496 static void * 497 ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) 498 { 499 dma_addr_t sg_offset; 500 501 /* sg_list_phys points to entry 1, not 0 */ 502 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); 503 return ((uint8_t *)scb->sg_list + sg_offset); 504 } 505 506 static uint32_t 507 ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) 508 { 509 dma_addr_t sg_offset; 510 511 /* sg_list_phys points to entry 1, not 0 */ 512 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) 513 - ahd_sg_size(ahd); 514 515 return (scb->sg_list_busaddr + sg_offset); 516 } 517 518 static void 519 ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) 520 { 521 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, 522 scb->hscb_map->dmamap, 523 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, 524 /*len*/sizeof(*scb->hscb), op); 525 } 526 527 void 528 ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) 529 { 530 if (scb->sg_count == 0) 531 return; 532 533 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, 534 scb->sg_map->dmamap, 535 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), 536 /*len*/ahd_sg_size(ahd) * scb->sg_count, op); 537 } 538 539 static void 540 ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) 541 { 542 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, 543 scb->sense_map->dmamap, 544 /*offset*/scb->sense_busaddr, 545 /*len*/AHD_SENSE_BUFSIZE, op); 546 } 547 548 #ifdef AHD_TARGET_MODE 549 static uint32_t 550 ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) 551 { 552 return (((uint8_t *)&ahd->targetcmds[index]) 553 - (uint8_t *)ahd->qoutfifo); 554 } 555 #endif 556 557 /*********************** Miscellaneous Support Functions ***********************/ 558 /* 559 * Return pointers to the transfer negotiation information 560 * for the specified our_id/remote_id pair. 561 */ 562 struct ahd_initiator_tinfo * 563 ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, 564 u_int remote_id, struct ahd_tmode_tstate **tstate) 565 { 566 /* 567 * Transfer data structures are stored from the perspective 568 * of the target role. Since the parameters for a connection 569 * in the initiator role to a given target are the same as 570 * when the roles are reversed, we pretend we are the target. 571 */ 572 if (channel == 'B') 573 our_id += 8; 574 *tstate = ahd->enabled_targets[our_id]; 575 return (&(*tstate)->transinfo[remote_id]); 576 } 577 578 uint16_t 579 ahd_inw(struct ahd_softc *ahd, u_int port) 580 { 581 /* 582 * Read high byte first as some registers increment 583 * or have other side effects when the low byte is 584 * read. 585 */ 586 uint16_t r = ahd_inb(ahd, port+1) << 8; 587 return r | ahd_inb(ahd, port); 588 } 589 590 void 591 ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) 592 { 593 /* 594 * Write low byte first to accommodate registers 595 * such as PRGMCNT where the order maters. 596 */ 597 ahd_outb(ahd, port, value & 0xFF); 598 ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 599 } 600 601 uint32_t 602 ahd_inl(struct ahd_softc *ahd, u_int port) 603 { 604 return ((ahd_inb(ahd, port)) 605 | (ahd_inb(ahd, port+1) << 8) 606 | (ahd_inb(ahd, port+2) << 16) 607 | (ahd_inb(ahd, port+3) << 24)); 608 } 609 610 void 611 ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) 612 { 613 ahd_outb(ahd, port, (value) & 0xFF); 614 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); 615 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); 616 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); 617 } 618 619 uint64_t 620 ahd_inq(struct ahd_softc *ahd, u_int port) 621 { 622 return ((ahd_inb(ahd, port)) 623 | (ahd_inb(ahd, port+1) << 8) 624 | (ahd_inb(ahd, port+2) << 16) 625 | (ahd_inb(ahd, port+3) << 24) 626 | (((uint64_t)ahd_inb(ahd, port+4)) << 32) 627 | (((uint64_t)ahd_inb(ahd, port+5)) << 40) 628 | (((uint64_t)ahd_inb(ahd, port+6)) << 48) 629 | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); 630 } 631 632 void 633 ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) 634 { 635 ahd_outb(ahd, port, value & 0xFF); 636 ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 637 ahd_outb(ahd, port+2, (value >> 16) & 0xFF); 638 ahd_outb(ahd, port+3, (value >> 24) & 0xFF); 639 ahd_outb(ahd, port+4, (value >> 32) & 0xFF); 640 ahd_outb(ahd, port+5, (value >> 40) & 0xFF); 641 ahd_outb(ahd, port+6, (value >> 48) & 0xFF); 642 ahd_outb(ahd, port+7, (value >> 56) & 0xFF); 643 } 644 645 u_int 646 ahd_get_scbptr(struct ahd_softc *ahd) 647 { 648 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 649 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 650 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); 651 } 652 653 void 654 ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) 655 { 656 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 657 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 658 ahd_outb(ahd, SCBPTR, scbptr & 0xFF); 659 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); 660 } 661 662 #if 0 /* unused */ 663 static u_int 664 ahd_get_hnscb_qoff(struct ahd_softc *ahd) 665 { 666 return (ahd_inw_atomic(ahd, HNSCB_QOFF)); 667 } 668 #endif 669 670 static void 671 ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) 672 { 673 ahd_outw_atomic(ahd, HNSCB_QOFF, value); 674 } 675 676 #if 0 /* unused */ 677 static u_int 678 ahd_get_hescb_qoff(struct ahd_softc *ahd) 679 { 680 return (ahd_inb(ahd, HESCB_QOFF)); 681 } 682 #endif 683 684 static void 685 ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) 686 { 687 ahd_outb(ahd, HESCB_QOFF, value); 688 } 689 690 static u_int 691 ahd_get_snscb_qoff(struct ahd_softc *ahd) 692 { 693 u_int oldvalue; 694 695 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 696 oldvalue = ahd_inw(ahd, SNSCB_QOFF); 697 ahd_outw(ahd, SNSCB_QOFF, oldvalue); 698 return (oldvalue); 699 } 700 701 static void 702 ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) 703 { 704 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 705 ahd_outw(ahd, SNSCB_QOFF, value); 706 } 707 708 #if 0 /* unused */ 709 static u_int 710 ahd_get_sescb_qoff(struct ahd_softc *ahd) 711 { 712 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 713 return (ahd_inb(ahd, SESCB_QOFF)); 714 } 715 #endif 716 717 static void 718 ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) 719 { 720 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 721 ahd_outb(ahd, SESCB_QOFF, value); 722 } 723 724 #if 0 /* unused */ 725 static u_int 726 ahd_get_sdscb_qoff(struct ahd_softc *ahd) 727 { 728 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 729 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); 730 } 731 #endif 732 733 static void 734 ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) 735 { 736 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 737 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); 738 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); 739 } 740 741 u_int 742 ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) 743 { 744 u_int value; 745 746 /* 747 * Workaround PCI-X Rev A. hardware bug. 748 * After a host read of SCB memory, the chip 749 * may become confused into thinking prefetch 750 * was required. This starts the discard timer 751 * running and can cause an unexpected discard 752 * timer interrupt. The work around is to read 753 * a normal register prior to the exhaustion of 754 * the discard timer. The mode pointer register 755 * has no side effects and so serves well for 756 * this purpose. 757 * 758 * Razor #528 759 */ 760 value = ahd_inb(ahd, offset); 761 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) 762 ahd_inb(ahd, MODE_PTR); 763 return (value); 764 } 765 766 u_int 767 ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) 768 { 769 return (ahd_inb_scbram(ahd, offset) 770 | (ahd_inb_scbram(ahd, offset+1) << 8)); 771 } 772 773 static uint32_t 774 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) 775 { 776 return (ahd_inw_scbram(ahd, offset) 777 | (ahd_inw_scbram(ahd, offset+2) << 16)); 778 } 779 780 static uint64_t 781 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) 782 { 783 return (ahd_inl_scbram(ahd, offset) 784 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); 785 } 786 787 struct scb * 788 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) 789 { 790 struct scb* scb; 791 792 if (tag >= AHD_SCB_MAX) 793 return (NULL); 794 scb = ahd->scb_data.scbindex[tag]; 795 if (scb != NULL) 796 ahd_sync_scb(ahd, scb, 797 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 798 return (scb); 799 } 800 801 static void 802 ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) 803 { 804 struct hardware_scb *q_hscb; 805 struct map_node *q_hscb_map; 806 uint32_t saved_hscb_busaddr; 807 808 /* 809 * Our queuing method is a bit tricky. The card 810 * knows in advance which HSCB (by address) to download, 811 * and we can't disappoint it. To achieve this, the next 812 * HSCB to download is saved off in ahd->next_queued_hscb. 813 * When we are called to queue "an arbitrary scb", 814 * we copy the contents of the incoming HSCB to the one 815 * the sequencer knows about, swap HSCB pointers and 816 * finally assign the SCB to the tag indexed location 817 * in the scb_array. This makes sure that we can still 818 * locate the correct SCB by SCB_TAG. 819 */ 820 q_hscb = ahd->next_queued_hscb; 821 q_hscb_map = ahd->next_queued_hscb_map; 822 saved_hscb_busaddr = q_hscb->hscb_busaddr; 823 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 824 q_hscb->hscb_busaddr = saved_hscb_busaddr; 825 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 826 827 /* Now swap HSCB pointers. */ 828 ahd->next_queued_hscb = scb->hscb; 829 ahd->next_queued_hscb_map = scb->hscb_map; 830 scb->hscb = q_hscb; 831 scb->hscb_map = q_hscb_map; 832 833 /* Now define the mapping from tag to SCB in the scbindex */ 834 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 835 } 836 837 /* 838 * Tell the sequencer about a new transaction to execute. 839 */ 840 void 841 ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) 842 { 843 ahd_swap_with_next_hscb(ahd, scb); 844 845 if (SCBID_IS_NULL(SCB_GET_TAG(scb))) 846 panic("Attempt to queue invalid SCB tag %x\n", 847 SCB_GET_TAG(scb)); 848 849 /* 850 * Keep a history of SCBs we've downloaded in the qinfifo. 851 */ 852 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 853 ahd->qinfifonext++; 854 855 if (scb->sg_count != 0) 856 ahd_setup_data_scb(ahd, scb); 857 else 858 ahd_setup_noxfer_scb(ahd, scb); 859 ahd_setup_scb_common(ahd, scb); 860 861 /* 862 * Make sure our data is consistent from the 863 * perspective of the adapter. 864 */ 865 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 866 867 #ifdef AHD_DEBUG 868 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { 869 uint64_t host_dataptr; 870 871 host_dataptr = ahd_le64toh(scb->hscb->dataptr); 872 printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", 873 ahd_name(ahd), 874 SCB_GET_TAG(scb), scb->hscb->scsiid, 875 ahd_le32toh(scb->hscb->hscb_busaddr), 876 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), 877 (u_int)(host_dataptr & 0xFFFFFFFF), 878 ahd_le32toh(scb->hscb->datacnt)); 879 } 880 #endif 881 /* Tell the adapter about the newly queued SCB */ 882 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 883 } 884 885 /************************** Interrupt Processing ******************************/ 886 static void 887 ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) 888 { 889 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 890 /*offset*/0, 891 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); 892 } 893 894 static void 895 ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) 896 { 897 #ifdef AHD_TARGET_MODE 898 if ((ahd->flags & AHD_TARGETROLE) != 0) { 899 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 900 ahd->shared_data_map.dmamap, 901 ahd_targetcmd_offset(ahd, 0), 902 sizeof(struct target_cmd) * AHD_TMODE_CMDS, 903 op); 904 } 905 #endif 906 } 907 908 /* 909 * See if the firmware has posted any completed commands 910 * into our in-core command complete fifos. 911 */ 912 #define AHD_RUN_QOUTFIFO 0x1 913 #define AHD_RUN_TQINFIFO 0x2 914 static u_int 915 ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) 916 { 917 u_int retval; 918 919 retval = 0; 920 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 921 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), 922 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); 923 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag 924 == ahd->qoutfifonext_valid_tag) 925 retval |= AHD_RUN_QOUTFIFO; 926 #ifdef AHD_TARGET_MODE 927 if ((ahd->flags & AHD_TARGETROLE) != 0 928 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { 929 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 930 ahd->shared_data_map.dmamap, 931 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), 932 /*len*/sizeof(struct target_cmd), 933 BUS_DMASYNC_POSTREAD); 934 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) 935 retval |= AHD_RUN_TQINFIFO; 936 } 937 #endif 938 return (retval); 939 } 940 941 /* 942 * Catch an interrupt from the adapter 943 */ 944 int 945 ahd_intr(struct ahd_softc *ahd) 946 { 947 u_int intstat; 948 949 if ((ahd->pause & INTEN) == 0) { 950 /* 951 * Our interrupt is not enabled on the chip 952 * and may be disabled for re-entrancy reasons, 953 * so just return. This is likely just a shared 954 * interrupt. 955 */ 956 return (0); 957 } 958 959 /* 960 * Instead of directly reading the interrupt status register, 961 * infer the cause of the interrupt by checking our in-core 962 * completion queues. This avoids a costly PCI bus read in 963 * most cases. 964 */ 965 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 966 && (ahd_check_cmdcmpltqueues(ahd) != 0)) 967 intstat = CMDCMPLT; 968 else 969 intstat = ahd_inb(ahd, INTSTAT); 970 971 if ((intstat & INT_PEND) == 0) 972 return (0); 973 974 if (intstat & CMDCMPLT) { 975 ahd_outb(ahd, CLRINT, CLRCMDINT); 976 977 /* 978 * Ensure that the chip sees that we've cleared 979 * this interrupt before we walk the output fifo. 980 * Otherwise, we may, due to posted bus writes, 981 * clear the interrupt after we finish the scan, 982 * and after the sequencer has added new entries 983 * and asserted the interrupt again. 984 */ 985 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 986 if (ahd_is_paused(ahd)) { 987 /* 988 * Potentially lost SEQINT. 989 * If SEQINTCODE is non-zero, 990 * simulate the SEQINT. 991 */ 992 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) 993 intstat |= SEQINT; 994 } 995 } else { 996 ahd_flush_device_writes(ahd); 997 } 998 ahd_run_qoutfifo(ahd); 999 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; 1000 ahd->cmdcmplt_total++; 1001 #ifdef AHD_TARGET_MODE 1002 if ((ahd->flags & AHD_TARGETROLE) != 0) 1003 ahd_run_tqinfifo(ahd, /*paused*/FALSE); 1004 #endif 1005 } 1006 1007 /* 1008 * Handle statuses that may invalidate our cached 1009 * copy of INTSTAT separately. 1010 */ 1011 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { 1012 /* Hot eject. Do nothing */ 1013 } else if (intstat & HWERRINT) { 1014 ahd_handle_hwerrint(ahd); 1015 } else if ((intstat & (PCIINT|SPLTINT)) != 0) { 1016 ahd->bus_intr(ahd); 1017 } else { 1018 1019 if ((intstat & SEQINT) != 0) 1020 ahd_handle_seqint(ahd, intstat); 1021 1022 if ((intstat & SCSIINT) != 0) 1023 ahd_handle_scsiint(ahd, intstat); 1024 } 1025 return (1); 1026 } 1027 1028 /******************************** Private Inlines *****************************/ 1029 static inline void 1030 ahd_assert_atn(struct ahd_softc *ahd) 1031 { 1032 ahd_outb(ahd, SCSISIGO, ATNO); 1033 } 1034 1035 /* 1036 * Determine if the current connection has a packetized 1037 * agreement. This does not necessarily mean that we 1038 * are currently in a packetized transfer. We could 1039 * just as easily be sending or receiving a message. 1040 */ 1041 static int 1042 ahd_currently_packetized(struct ahd_softc *ahd) 1043 { 1044 ahd_mode_state saved_modes; 1045 int packetized; 1046 1047 saved_modes = ahd_save_modes(ahd); 1048 if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { 1049 /* 1050 * The packetized bit refers to the last 1051 * connection, not the current one. Check 1052 * for non-zero LQISTATE instead. 1053 */ 1054 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 1055 packetized = ahd_inb(ahd, LQISTATE) != 0; 1056 } else { 1057 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1058 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; 1059 } 1060 ahd_restore_modes(ahd, saved_modes); 1061 return (packetized); 1062 } 1063 1064 static inline int 1065 ahd_set_active_fifo(struct ahd_softc *ahd) 1066 { 1067 u_int active_fifo; 1068 1069 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 1070 active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 1071 switch (active_fifo) { 1072 case 0: 1073 case 1: 1074 ahd_set_modes(ahd, active_fifo, active_fifo); 1075 return (1); 1076 default: 1077 return (0); 1078 } 1079 } 1080 1081 static inline void 1082 ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) 1083 { 1084 ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); 1085 } 1086 1087 /* 1088 * Determine whether the sequencer reported a residual 1089 * for this SCB/transaction. 1090 */ 1091 static inline void 1092 ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) 1093 { 1094 uint32_t sgptr; 1095 1096 sgptr = ahd_le32toh(scb->hscb->sgptr); 1097 if ((sgptr & SG_STATUS_VALID) != 0) 1098 ahd_calc_residual(ahd, scb); 1099 } 1100 1101 static inline void 1102 ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) 1103 { 1104 uint32_t sgptr; 1105 1106 sgptr = ahd_le32toh(scb->hscb->sgptr); 1107 if ((sgptr & SG_STATUS_VALID) != 0) 1108 ahd_handle_scb_status(ahd, scb); 1109 else 1110 ahd_done(ahd, scb); 1111 } 1112 1113 1114 /************************* Sequencer Execution Control ************************/ 1115 /* 1116 * Restart the sequencer program from address zero 1117 */ 1118 static void 1119 ahd_restart(struct ahd_softc *ahd) 1120 { 1121 1122 ahd_pause(ahd); 1123 1124 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1125 1126 /* No more pending messages */ 1127 ahd_clear_msg_state(ahd); 1128 ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ 1129 ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ 1130 ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); 1131 ahd_outb(ahd, SEQINTCTL, 0); 1132 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 1133 ahd_outb(ahd, SEQ_FLAGS, 0); 1134 ahd_outb(ahd, SAVED_SCSIID, 0xFF); 1135 ahd_outb(ahd, SAVED_LUN, 0xFF); 1136 1137 /* 1138 * Ensure that the sequencer's idea of TQINPOS 1139 * matches our own. The sequencer increments TQINPOS 1140 * only after it sees a DMA complete and a reset could 1141 * occur before the increment leaving the kernel to believe 1142 * the command arrived but the sequencer to not. 1143 */ 1144 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 1145 1146 /* Always allow reselection */ 1147 ahd_outb(ahd, SCSISEQ1, 1148 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 1149 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 1150 1151 /* 1152 * Clear any pending sequencer interrupt. It is no 1153 * longer relevant since we're resetting the Program 1154 * Counter. 1155 */ 1156 ahd_outb(ahd, CLRINT, CLRSEQINT); 1157 1158 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 1159 ahd_unpause(ahd); 1160 } 1161 1162 static void 1163 ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) 1164 { 1165 ahd_mode_state saved_modes; 1166 1167 #ifdef AHD_DEBUG 1168 if ((ahd_debug & AHD_SHOW_FIFOS) != 0) 1169 printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); 1170 #endif 1171 saved_modes = ahd_save_modes(ahd); 1172 ahd_set_modes(ahd, fifo, fifo); 1173 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 1174 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) 1175 ahd_outb(ahd, CCSGCTL, CCSGRESET); 1176 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 1177 ahd_outb(ahd, SG_STATE, 0); 1178 ahd_restore_modes(ahd, saved_modes); 1179 } 1180 1181 /************************* Input/Output Queues ********************************/ 1182 /* 1183 * Flush and completed commands that are sitting in the command 1184 * complete queues down on the chip but have yet to be dma'ed back up. 1185 */ 1186 static void 1187 ahd_flush_qoutfifo(struct ahd_softc *ahd) 1188 { 1189 struct scb *scb; 1190 ahd_mode_state saved_modes; 1191 u_int saved_scbptr; 1192 u_int ccscbctl; 1193 u_int scbid; 1194 u_int next_scbid; 1195 1196 saved_modes = ahd_save_modes(ahd); 1197 1198 /* 1199 * Flush the good status FIFO for completed packetized commands. 1200 */ 1201 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1202 saved_scbptr = ahd_get_scbptr(ahd); 1203 while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { 1204 u_int fifo_mode; 1205 u_int i; 1206 1207 scbid = ahd_inw(ahd, GSFIFO); 1208 scb = ahd_lookup_scb(ahd, scbid); 1209 if (scb == NULL) { 1210 printk("%s: Warning - GSFIFO SCB %d invalid\n", 1211 ahd_name(ahd), scbid); 1212 continue; 1213 } 1214 /* 1215 * Determine if this transaction is still active in 1216 * any FIFO. If it is, we must flush that FIFO to 1217 * the host before completing the command. 1218 */ 1219 fifo_mode = 0; 1220 rescan_fifos: 1221 for (i = 0; i < 2; i++) { 1222 /* Toggle to the other mode. */ 1223 fifo_mode ^= 1; 1224 ahd_set_modes(ahd, fifo_mode, fifo_mode); 1225 1226 if (ahd_scb_active_in_fifo(ahd, scb) == 0) 1227 continue; 1228 1229 ahd_run_data_fifo(ahd, scb); 1230 1231 /* 1232 * Running this FIFO may cause a CFG4DATA for 1233 * this same transaction to assert in the other 1234 * FIFO or a new snapshot SAVEPTRS interrupt 1235 * in this FIFO. Even running a FIFO may not 1236 * clear the transaction if we are still waiting 1237 * for data to drain to the host. We must loop 1238 * until the transaction is not active in either 1239 * FIFO just to be sure. Reset our loop counter 1240 * so we will visit both FIFOs again before 1241 * declaring this transaction finished. We 1242 * also delay a bit so that status has a chance 1243 * to change before we look at this FIFO again. 1244 */ 1245 ahd_delay(200); 1246 goto rescan_fifos; 1247 } 1248 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1249 ahd_set_scbptr(ahd, scbid); 1250 if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 1251 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 1252 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) 1253 & SG_LIST_NULL) != 0)) { 1254 u_int comp_head; 1255 1256 /* 1257 * The transfer completed with a residual. 1258 * Place this SCB on the complete DMA list 1259 * so that we update our in-core copy of the 1260 * SCB before completing the command. 1261 */ 1262 ahd_outb(ahd, SCB_SCSI_STATUS, 0); 1263 ahd_outb(ahd, SCB_SGPTR, 1264 ahd_inb_scbram(ahd, SCB_SGPTR) 1265 | SG_STATUS_VALID); 1266 ahd_outw(ahd, SCB_TAG, scbid); 1267 ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); 1268 comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 1269 if (SCBID_IS_NULL(comp_head)) { 1270 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); 1271 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); 1272 } else { 1273 u_int tail; 1274 1275 tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); 1276 ahd_set_scbptr(ahd, tail); 1277 ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); 1278 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); 1279 ahd_set_scbptr(ahd, scbid); 1280 } 1281 } else 1282 ahd_complete_scb(ahd, scb); 1283 } 1284 ahd_set_scbptr(ahd, saved_scbptr); 1285 1286 /* 1287 * Setup for command channel portion of flush. 1288 */ 1289 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 1290 1291 /* 1292 * Wait for any inprogress DMA to complete and clear DMA state 1293 * if this is for an SCB in the qinfifo. 1294 */ 1295 while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { 1296 1297 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { 1298 if ((ccscbctl & ARRDONE) != 0) 1299 break; 1300 } else if ((ccscbctl & CCSCBDONE) != 0) 1301 break; 1302 ahd_delay(200); 1303 } 1304 /* 1305 * We leave the sequencer to cleanup in the case of DMA's to 1306 * update the qoutfifo. In all other cases (DMA's to the 1307 * chip or a push of an SCB from the COMPLETE_DMA_SCB list), 1308 * we disable the DMA engine so that the sequencer will not 1309 * attempt to handle the DMA completion. 1310 */ 1311 if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) 1312 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); 1313 1314 /* 1315 * Complete any SCBs that just finished 1316 * being DMA'ed into the qoutfifo. 1317 */ 1318 ahd_run_qoutfifo(ahd); 1319 1320 saved_scbptr = ahd_get_scbptr(ahd); 1321 /* 1322 * Manually update/complete any completed SCBs that are waiting to be 1323 * DMA'ed back up to the host. 1324 */ 1325 scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 1326 while (!SCBID_IS_NULL(scbid)) { 1327 uint8_t *hscb_ptr; 1328 u_int i; 1329 1330 ahd_set_scbptr(ahd, scbid); 1331 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1332 scb = ahd_lookup_scb(ahd, scbid); 1333 if (scb == NULL) { 1334 printk("%s: Warning - DMA-up and complete " 1335 "SCB %d invalid\n", ahd_name(ahd), scbid); 1336 continue; 1337 } 1338 hscb_ptr = (uint8_t *)scb->hscb; 1339 for (i = 0; i < sizeof(struct hardware_scb); i++) 1340 *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); 1341 1342 ahd_complete_scb(ahd, scb); 1343 scbid = next_scbid; 1344 } 1345 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 1346 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); 1347 1348 scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); 1349 while (!SCBID_IS_NULL(scbid)) { 1350 1351 ahd_set_scbptr(ahd, scbid); 1352 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1353 scb = ahd_lookup_scb(ahd, scbid); 1354 if (scb == NULL) { 1355 printk("%s: Warning - Complete Qfrz SCB %d invalid\n", 1356 ahd_name(ahd), scbid); 1357 continue; 1358 } 1359 1360 ahd_complete_scb(ahd, scb); 1361 scbid = next_scbid; 1362 } 1363 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); 1364 1365 scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); 1366 while (!SCBID_IS_NULL(scbid)) { 1367 1368 ahd_set_scbptr(ahd, scbid); 1369 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1370 scb = ahd_lookup_scb(ahd, scbid); 1371 if (scb == NULL) { 1372 printk("%s: Warning - Complete SCB %d invalid\n", 1373 ahd_name(ahd), scbid); 1374 continue; 1375 } 1376 1377 ahd_complete_scb(ahd, scb); 1378 scbid = next_scbid; 1379 } 1380 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 1381 1382 /* 1383 * Restore state. 1384 */ 1385 ahd_set_scbptr(ahd, saved_scbptr); 1386 ahd_restore_modes(ahd, saved_modes); 1387 ahd->flags |= AHD_UPDATE_PEND_CMDS; 1388 } 1389 1390 /* 1391 * Determine if an SCB for a packetized transaction 1392 * is active in a FIFO. 1393 */ 1394 static int 1395 ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) 1396 { 1397 1398 /* 1399 * The FIFO is only active for our transaction if 1400 * the SCBPTR matches the SCB's ID and the firmware 1401 * has installed a handler for the FIFO or we have 1402 * a pending SAVEPTRS or CFG4DATA interrupt. 1403 */ 1404 if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) 1405 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 1406 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) 1407 return (0); 1408 1409 return (1); 1410 } 1411 1412 /* 1413 * Run a data fifo to completion for a transaction we know 1414 * has completed across the SCSI bus (good status has been 1415 * received). We are already set to the correct FIFO mode 1416 * on entry to this routine. 1417 * 1418 * This function attempts to operate exactly as the firmware 1419 * would when running this FIFO. Care must be taken to update 1420 * this routine any time the firmware's FIFO algorithm is 1421 * changed. 1422 */ 1423 static void 1424 ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) 1425 { 1426 u_int seqintsrc; 1427 1428 seqintsrc = ahd_inb(ahd, SEQINTSRC); 1429 if ((seqintsrc & CFG4DATA) != 0) { 1430 uint32_t datacnt; 1431 uint32_t sgptr; 1432 1433 /* 1434 * Clear full residual flag. 1435 */ 1436 sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; 1437 ahd_outb(ahd, SCB_SGPTR, sgptr); 1438 1439 /* 1440 * Load datacnt and address. 1441 */ 1442 datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); 1443 if ((datacnt & AHD_DMA_LAST_SEG) != 0) { 1444 sgptr |= LAST_SEG; 1445 ahd_outb(ahd, SG_STATE, 0); 1446 } else 1447 ahd_outb(ahd, SG_STATE, LOADING_NEEDED); 1448 ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); 1449 ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); 1450 ahd_outb(ahd, SG_CACHE_PRE, sgptr); 1451 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); 1452 1453 /* 1454 * Initialize Residual Fields. 1455 */ 1456 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); 1457 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); 1458 1459 /* 1460 * Mark the SCB as having a FIFO in use. 1461 */ 1462 ahd_outb(ahd, SCB_FIFO_USE_COUNT, 1463 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); 1464 1465 /* 1466 * Install a "fake" handler for this FIFO. 1467 */ 1468 ahd_outw(ahd, LONGJMP_ADDR, 0); 1469 1470 /* 1471 * Notify the hardware that we have satisfied 1472 * this sequencer interrupt. 1473 */ 1474 ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); 1475 } else if ((seqintsrc & SAVEPTRS) != 0) { 1476 uint32_t sgptr; 1477 uint32_t resid; 1478 1479 if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { 1480 /* 1481 * Snapshot Save Pointers. All that 1482 * is necessary to clear the snapshot 1483 * is a CLRCHN. 1484 */ 1485 goto clrchn; 1486 } 1487 1488 /* 1489 * Disable S/G fetch so the DMA engine 1490 * is available to future users. 1491 */ 1492 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) 1493 ahd_outb(ahd, CCSGCTL, 0); 1494 ahd_outb(ahd, SG_STATE, 0); 1495 1496 /* 1497 * Flush the data FIFO. Strickly only 1498 * necessary for Rev A parts. 1499 */ 1500 ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); 1501 1502 /* 1503 * Calculate residual. 1504 */ 1505 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 1506 resid = ahd_inl(ahd, SHCNT); 1507 resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; 1508 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); 1509 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { 1510 /* 1511 * Must back up to the correct S/G element. 1512 * Typically this just means resetting our 1513 * low byte to the offset in the SG_CACHE, 1514 * but if we wrapped, we have to correct 1515 * the other bytes of the sgptr too. 1516 */ 1517 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 1518 && (sgptr & 0x80) == 0) 1519 sgptr -= 0x100; 1520 sgptr &= ~0xFF; 1521 sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) 1522 & SG_ADDR_MASK; 1523 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 1524 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); 1525 } else if ((resid & AHD_SG_LEN_MASK) == 0) { 1526 ahd_outb(ahd, SCB_RESIDUAL_SGPTR, 1527 sgptr | SG_LIST_NULL); 1528 } 1529 /* 1530 * Save Pointers. 1531 */ 1532 ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); 1533 ahd_outl(ahd, SCB_DATACNT, resid); 1534 ahd_outl(ahd, SCB_SGPTR, sgptr); 1535 ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); 1536 ahd_outb(ahd, SEQIMODE, 1537 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); 1538 /* 1539 * If the data is to the SCSI bus, we are 1540 * done, otherwise wait for FIFOEMP. 1541 */ 1542 if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) 1543 goto clrchn; 1544 } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { 1545 uint32_t sgptr; 1546 uint64_t data_addr; 1547 uint32_t data_len; 1548 u_int dfcntrl; 1549 1550 /* 1551 * Disable S/G fetch so the DMA engine 1552 * is available to future users. We won't 1553 * be using the DMA engine to load segments. 1554 */ 1555 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { 1556 ahd_outb(ahd, CCSGCTL, 0); 1557 ahd_outb(ahd, SG_STATE, LOADING_NEEDED); 1558 } 1559 1560 /* 1561 * Wait for the DMA engine to notice that the 1562 * host transfer is enabled and that there is 1563 * space in the S/G FIFO for new segments before 1564 * loading more segments. 1565 */ 1566 if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 1567 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { 1568 1569 /* 1570 * Determine the offset of the next S/G 1571 * element to load. 1572 */ 1573 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 1574 sgptr &= SG_PTR_MASK; 1575 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 1576 struct ahd_dma64_seg *sg; 1577 1578 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 1579 data_addr = sg->addr; 1580 data_len = sg->len; 1581 sgptr += sizeof(*sg); 1582 } else { 1583 struct ahd_dma_seg *sg; 1584 1585 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 1586 data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; 1587 data_addr <<= 8; 1588 data_addr |= sg->addr; 1589 data_len = sg->len; 1590 sgptr += sizeof(*sg); 1591 } 1592 1593 /* 1594 * Update residual information. 1595 */ 1596 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); 1597 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 1598 1599 /* 1600 * Load the S/G. 1601 */ 1602 if (data_len & AHD_DMA_LAST_SEG) { 1603 sgptr |= LAST_SEG; 1604 ahd_outb(ahd, SG_STATE, 0); 1605 } 1606 ahd_outq(ahd, HADDR, data_addr); 1607 ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); 1608 ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); 1609 1610 /* 1611 * Advertise the segment to the hardware. 1612 */ 1613 dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; 1614 if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { 1615 /* 1616 * Use SCSIENWRDIS so that SCSIEN 1617 * is never modified by this 1618 * operation. 1619 */ 1620 dfcntrl |= SCSIENWRDIS; 1621 } 1622 ahd_outb(ahd, DFCNTRL, dfcntrl); 1623 } 1624 } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { 1625 1626 /* 1627 * Transfer completed to the end of SG list 1628 * and has flushed to the host. 1629 */ 1630 ahd_outb(ahd, SCB_SGPTR, 1631 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); 1632 goto clrchn; 1633 } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { 1634 clrchn: 1635 /* 1636 * Clear any handler for this FIFO, decrement 1637 * the FIFO use count for the SCB, and release 1638 * the FIFO. 1639 */ 1640 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 1641 ahd_outb(ahd, SCB_FIFO_USE_COUNT, 1642 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); 1643 ahd_outb(ahd, DFFSXFRCTL, CLRCHN); 1644 } 1645 } 1646 1647 /* 1648 * Look for entries in the QoutFIFO that have completed. 1649 * The valid_tag completion field indicates the validity 1650 * of the entry - the valid value toggles each time through 1651 * the queue. We use the sg_status field in the completion 1652 * entry to avoid referencing the hscb if the completion 1653 * occurred with no errors and no residual. sg_status is 1654 * a copy of the first byte (little endian) of the sgptr 1655 * hscb field. 1656 */ 1657 static void 1658 ahd_run_qoutfifo(struct ahd_softc *ahd) 1659 { 1660 struct ahd_completion *completion; 1661 struct scb *scb; 1662 u_int scb_index; 1663 1664 if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) 1665 panic("ahd_run_qoutfifo recursion"); 1666 ahd->flags |= AHD_RUNNING_QOUTFIFO; 1667 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); 1668 for (;;) { 1669 completion = &ahd->qoutfifo[ahd->qoutfifonext]; 1670 1671 if (completion->valid_tag != ahd->qoutfifonext_valid_tag) 1672 break; 1673 1674 scb_index = ahd_le16toh(completion->tag); 1675 scb = ahd_lookup_scb(ahd, scb_index); 1676 if (scb == NULL) { 1677 printk("%s: WARNING no command for scb %d " 1678 "(cmdcmplt)\nQOUTPOS = %d\n", 1679 ahd_name(ahd), scb_index, 1680 ahd->qoutfifonext); 1681 ahd_dump_card_state(ahd); 1682 } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { 1683 ahd_handle_scb_status(ahd, scb); 1684 } else { 1685 ahd_done(ahd, scb); 1686 } 1687 1688 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); 1689 if (ahd->qoutfifonext == 0) 1690 ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; 1691 } 1692 ahd->flags &= ~AHD_RUNNING_QOUTFIFO; 1693 } 1694 1695 /************************* Interrupt Handling *********************************/ 1696 static void 1697 ahd_handle_hwerrint(struct ahd_softc *ahd) 1698 { 1699 /* 1700 * Some catastrophic hardware error has occurred. 1701 * Print it for the user and disable the controller. 1702 */ 1703 int i; 1704 int error; 1705 1706 error = ahd_inb(ahd, ERROR); 1707 for (i = 0; i < num_errors; i++) { 1708 if ((error & ahd_hard_errors[i].errno) != 0) 1709 printk("%s: hwerrint, %s\n", 1710 ahd_name(ahd), ahd_hard_errors[i].errmesg); 1711 } 1712 1713 ahd_dump_card_state(ahd); 1714 panic("BRKADRINT"); 1715 1716 /* Tell everyone that this HBA is no longer available */ 1717 ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 1718 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 1719 CAM_NO_HBA); 1720 1721 /* Tell the system that this controller has gone away. */ 1722 ahd_free(ahd); 1723 } 1724 1725 #ifdef AHD_DEBUG 1726 static void 1727 ahd_dump_sglist(struct scb *scb) 1728 { 1729 int i; 1730 1731 if (scb->sg_count > 0) { 1732 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { 1733 struct ahd_dma64_seg *sg_list; 1734 1735 sg_list = (struct ahd_dma64_seg*)scb->sg_list; 1736 for (i = 0; i < scb->sg_count; i++) { 1737 uint64_t addr; 1738 uint32_t len; 1739 1740 addr = ahd_le64toh(sg_list[i].addr); 1741 len = ahd_le32toh(sg_list[i].len); 1742 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1743 i, 1744 (uint32_t)((addr >> 32) & 0xFFFFFFFF), 1745 (uint32_t)(addr & 0xFFFFFFFF), 1746 sg_list[i].len & AHD_SG_LEN_MASK, 1747 (sg_list[i].len & AHD_DMA_LAST_SEG) 1748 ? " Last" : ""); 1749 } 1750 } else { 1751 struct ahd_dma_seg *sg_list; 1752 1753 sg_list = (struct ahd_dma_seg*)scb->sg_list; 1754 for (i = 0; i < scb->sg_count; i++) { 1755 uint32_t len; 1756 1757 len = ahd_le32toh(sg_list[i].len); 1758 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1759 i, 1760 (len & AHD_SG_HIGH_ADDR_MASK) >> 24, 1761 ahd_le32toh(sg_list[i].addr), 1762 len & AHD_SG_LEN_MASK, 1763 len & AHD_DMA_LAST_SEG ? " Last" : ""); 1764 } 1765 } 1766 } 1767 } 1768 #endif /* AHD_DEBUG */ 1769 1770 static void 1771 ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) 1772 { 1773 u_int seqintcode; 1774 1775 /* 1776 * Save the sequencer interrupt code and clear the SEQINT 1777 * bit. We will unpause the sequencer, if appropriate, 1778 * after servicing the request. 1779 */ 1780 seqintcode = ahd_inb(ahd, SEQINTCODE); 1781 ahd_outb(ahd, CLRINT, CLRSEQINT); 1782 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 1783 /* 1784 * Unpause the sequencer and let it clear 1785 * SEQINT by writing NO_SEQINT to it. This 1786 * will cause the sequencer to be paused again, 1787 * which is the expected state of this routine. 1788 */ 1789 ahd_unpause(ahd); 1790 while (!ahd_is_paused(ahd)) 1791 ; 1792 ahd_outb(ahd, CLRINT, CLRSEQINT); 1793 } 1794 ahd_update_modes(ahd); 1795 #ifdef AHD_DEBUG 1796 if ((ahd_debug & AHD_SHOW_MISC) != 0) 1797 printk("%s: Handle Seqint Called for code %d\n", 1798 ahd_name(ahd), seqintcode); 1799 #endif 1800 switch (seqintcode) { 1801 case ENTERING_NONPACK: 1802 { 1803 struct scb *scb; 1804 u_int scbid; 1805 1806 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 1807 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 1808 scbid = ahd_get_scbptr(ahd); 1809 scb = ahd_lookup_scb(ahd, scbid); 1810 if (scb == NULL) { 1811 /* 1812 * Somehow need to know if this 1813 * is from a selection or reselection. 1814 * From that, we can determine target 1815 * ID so we at least have an I_T nexus. 1816 */ 1817 } else { 1818 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 1819 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); 1820 ahd_outb(ahd, SEQ_FLAGS, 0x0); 1821 } 1822 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 1823 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 1824 /* 1825 * Phase change after read stream with 1826 * CRC error with P0 asserted on last 1827 * packet. 1828 */ 1829 #ifdef AHD_DEBUG 1830 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1831 printk("%s: Assuming LQIPHASE_NLQ with " 1832 "P0 assertion\n", ahd_name(ahd)); 1833 #endif 1834 } 1835 #ifdef AHD_DEBUG 1836 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1837 printk("%s: Entering NONPACK\n", ahd_name(ahd)); 1838 #endif 1839 break; 1840 } 1841 case INVALID_SEQINT: 1842 printk("%s: Invalid Sequencer interrupt occurred, " 1843 "resetting channel.\n", 1844 ahd_name(ahd)); 1845 #ifdef AHD_DEBUG 1846 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1847 ahd_dump_card_state(ahd); 1848 #endif 1849 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1850 break; 1851 case STATUS_OVERRUN: 1852 { 1853 struct scb *scb; 1854 u_int scbid; 1855 1856 scbid = ahd_get_scbptr(ahd); 1857 scb = ahd_lookup_scb(ahd, scbid); 1858 if (scb != NULL) 1859 ahd_print_path(ahd, scb); 1860 else 1861 printk("%s: ", ahd_name(ahd)); 1862 printk("SCB %d Packetized Status Overrun", scbid); 1863 ahd_dump_card_state(ahd); 1864 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1865 break; 1866 } 1867 case CFG4ISTAT_INTR: 1868 { 1869 struct scb *scb; 1870 u_int scbid; 1871 1872 scbid = ahd_get_scbptr(ahd); 1873 scb = ahd_lookup_scb(ahd, scbid); 1874 if (scb == NULL) { 1875 ahd_dump_card_state(ahd); 1876 printk("CFG4ISTAT: Free SCB %d referenced", scbid); 1877 panic("For safety"); 1878 } 1879 ahd_outq(ahd, HADDR, scb->sense_busaddr); 1880 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); 1881 ahd_outb(ahd, HCNT + 2, 0); 1882 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); 1883 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); 1884 break; 1885 } 1886 case ILLEGAL_PHASE: 1887 { 1888 u_int bus_phase; 1889 1890 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 1891 printk("%s: ILLEGAL_PHASE 0x%x\n", 1892 ahd_name(ahd), bus_phase); 1893 1894 switch (bus_phase) { 1895 case P_DATAOUT: 1896 case P_DATAIN: 1897 case P_DATAOUT_DT: 1898 case P_DATAIN_DT: 1899 case P_MESGOUT: 1900 case P_STATUS: 1901 case P_MESGIN: 1902 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1903 printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); 1904 break; 1905 case P_COMMAND: 1906 { 1907 struct ahd_devinfo devinfo; 1908 struct scb *scb; 1909 struct ahd_initiator_tinfo *targ_info; 1910 struct ahd_tmode_tstate *tstate; 1911 struct ahd_transinfo *tinfo; 1912 u_int scbid; 1913 1914 /* 1915 * If a target takes us into the command phase 1916 * assume that it has been externally reset and 1917 * has thus lost our previous packetized negotiation 1918 * agreement. Since we have not sent an identify 1919 * message and may not have fully qualified the 1920 * connection, we change our command to TUR, assert 1921 * ATN and ABORT the task when we go to message in 1922 * phase. The OSM will see the REQUEUE_REQUEST 1923 * status and retry the command. 1924 */ 1925 scbid = ahd_get_scbptr(ahd); 1926 scb = ahd_lookup_scb(ahd, scbid); 1927 if (scb == NULL) { 1928 printk("Invalid phase with no valid SCB. " 1929 "Resetting bus.\n"); 1930 ahd_reset_channel(ahd, 'A', 1931 /*Initiate Reset*/TRUE); 1932 break; 1933 } 1934 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 1935 SCB_GET_TARGET(ahd, scb), 1936 SCB_GET_LUN(scb), 1937 SCB_GET_CHANNEL(ahd, scb), 1938 ROLE_INITIATOR); 1939 targ_info = ahd_fetch_transinfo(ahd, 1940 devinfo.channel, 1941 devinfo.our_scsiid, 1942 devinfo.target, 1943 &tstate); 1944 tinfo = &targ_info->curr; 1945 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1946 AHD_TRANS_ACTIVE, /*paused*/TRUE); 1947 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 1948 /*offset*/0, /*ppr_options*/0, 1949 AHD_TRANS_ACTIVE, /*paused*/TRUE); 1950 /* Hand-craft TUR command */ 1951 ahd_outb(ahd, SCB_CDB_STORE, 0); 1952 ahd_outb(ahd, SCB_CDB_STORE+1, 0); 1953 ahd_outb(ahd, SCB_CDB_STORE+2, 0); 1954 ahd_outb(ahd, SCB_CDB_STORE+3, 0); 1955 ahd_outb(ahd, SCB_CDB_STORE+4, 0); 1956 ahd_outb(ahd, SCB_CDB_STORE+5, 0); 1957 ahd_outb(ahd, SCB_CDB_LEN, 6); 1958 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); 1959 scb->hscb->control |= MK_MESSAGE; 1960 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); 1961 ahd_outb(ahd, MSG_OUT, HOST_MSG); 1962 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 1963 /* 1964 * The lun is 0, regardless of the SCB's lun 1965 * as we have not sent an identify message. 1966 */ 1967 ahd_outb(ahd, SAVED_LUN, 0); 1968 ahd_outb(ahd, SEQ_FLAGS, 0); 1969 ahd_assert_atn(ahd); 1970 scb->flags &= ~SCB_PACKETIZED; 1971 scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; 1972 ahd_freeze_devq(ahd, scb); 1973 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 1974 ahd_freeze_scb(scb); 1975 1976 /* Notify XPT */ 1977 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1978 CAM_LUN_WILDCARD, AC_SENT_BDR); 1979 1980 /* 1981 * Allow the sequencer to continue with 1982 * non-pack processing. 1983 */ 1984 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1985 ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); 1986 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 1987 ahd_outb(ahd, CLRLQOINT1, 0); 1988 } 1989 #ifdef AHD_DEBUG 1990 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 1991 ahd_print_path(ahd, scb); 1992 printk("Unexpected command phase from " 1993 "packetized target\n"); 1994 } 1995 #endif 1996 break; 1997 } 1998 } 1999 break; 2000 } 2001 case CFG4OVERRUN: 2002 { 2003 struct scb *scb; 2004 u_int scb_index; 2005 2006 #ifdef AHD_DEBUG 2007 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2008 printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), 2009 ahd_inb(ahd, MODE_PTR)); 2010 } 2011 #endif 2012 scb_index = ahd_get_scbptr(ahd); 2013 scb = ahd_lookup_scb(ahd, scb_index); 2014 if (scb == NULL) { 2015 /* 2016 * Attempt to transfer to an SCB that is 2017 * not outstanding. 2018 */ 2019 ahd_assert_atn(ahd); 2020 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2021 ahd->msgout_buf[0] = MSG_ABORT_TASK; 2022 ahd->msgout_len = 1; 2023 ahd->msgout_index = 0; 2024 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2025 /* 2026 * Clear status received flag to prevent any 2027 * attempt to complete this bogus SCB. 2028 */ 2029 ahd_outb(ahd, SCB_CONTROL, 2030 ahd_inb_scbram(ahd, SCB_CONTROL) 2031 & ~STATUS_RCVD); 2032 } 2033 break; 2034 } 2035 case DUMP_CARD_STATE: 2036 { 2037 ahd_dump_card_state(ahd); 2038 break; 2039 } 2040 case PDATA_REINIT: 2041 { 2042 #ifdef AHD_DEBUG 2043 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2044 printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " 2045 "SG_CACHE_SHADOW = 0x%x\n", 2046 ahd_name(ahd), ahd_inb(ahd, DFCNTRL), 2047 ahd_inb(ahd, SG_CACHE_SHADOW)); 2048 } 2049 #endif 2050 ahd_reinitialize_dataptrs(ahd); 2051 break; 2052 } 2053 case HOST_MSG_LOOP: 2054 { 2055 struct ahd_devinfo devinfo; 2056 2057 /* 2058 * The sequencer has encountered a message phase 2059 * that requires host assistance for completion. 2060 * While handling the message phase(s), we will be 2061 * notified by the sequencer after each byte is 2062 * transferred so we can track bus phase changes. 2063 * 2064 * If this is the first time we've seen a HOST_MSG_LOOP 2065 * interrupt, initialize the state of the host message 2066 * loop. 2067 */ 2068 ahd_fetch_devinfo(ahd, &devinfo); 2069 if (ahd->msg_type == MSG_TYPE_NONE) { 2070 struct scb *scb; 2071 u_int scb_index; 2072 u_int bus_phase; 2073 2074 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2075 if (bus_phase != P_MESGIN 2076 && bus_phase != P_MESGOUT) { 2077 printk("ahd_intr: HOST_MSG_LOOP bad " 2078 "phase 0x%x\n", bus_phase); 2079 /* 2080 * Probably transitioned to bus free before 2081 * we got here. Just punt the message. 2082 */ 2083 ahd_dump_card_state(ahd); 2084 ahd_clear_intstat(ahd); 2085 ahd_restart(ahd); 2086 return; 2087 } 2088 2089 scb_index = ahd_get_scbptr(ahd); 2090 scb = ahd_lookup_scb(ahd, scb_index); 2091 if (devinfo.role == ROLE_INITIATOR) { 2092 if (bus_phase == P_MESGOUT) 2093 ahd_setup_initiator_msgout(ahd, 2094 &devinfo, 2095 scb); 2096 else { 2097 ahd->msg_type = 2098 MSG_TYPE_INITIATOR_MSGIN; 2099 ahd->msgin_index = 0; 2100 } 2101 } 2102 #ifdef AHD_TARGET_MODE 2103 else { 2104 if (bus_phase == P_MESGOUT) { 2105 ahd->msg_type = 2106 MSG_TYPE_TARGET_MSGOUT; 2107 ahd->msgin_index = 0; 2108 } 2109 else 2110 ahd_setup_target_msgin(ahd, 2111 &devinfo, 2112 scb); 2113 } 2114 #endif 2115 } 2116 2117 ahd_handle_message_phase(ahd); 2118 break; 2119 } 2120 case NO_MATCH: 2121 { 2122 /* Ensure we don't leave the selection hardware on */ 2123 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 2124 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2125 2126 printk("%s:%c:%d: no active SCB for reconnecting " 2127 "target - issuing BUS DEVICE RESET\n", 2128 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); 2129 printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 2130 "REG0 == 0x%x ACCUM = 0x%x\n", 2131 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), 2132 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); 2133 printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 2134 "SINDEX == 0x%x\n", 2135 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), 2136 ahd_find_busy_tcl(ahd, 2137 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), 2138 ahd_inb(ahd, SAVED_LUN))), 2139 ahd_inw(ahd, SINDEX)); 2140 printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 2141 "SCB_CONTROL == 0x%x\n", 2142 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), 2143 ahd_inb_scbram(ahd, SCB_LUN), 2144 ahd_inb_scbram(ahd, SCB_CONTROL)); 2145 printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", 2146 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); 2147 printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); 2148 printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); 2149 ahd_dump_card_state(ahd); 2150 ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; 2151 ahd->msgout_len = 1; 2152 ahd->msgout_index = 0; 2153 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2154 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2155 ahd_assert_atn(ahd); 2156 break; 2157 } 2158 case PROTO_VIOLATION: 2159 { 2160 ahd_handle_proto_violation(ahd); 2161 break; 2162 } 2163 case IGN_WIDE_RES: 2164 { 2165 struct ahd_devinfo devinfo; 2166 2167 ahd_fetch_devinfo(ahd, &devinfo); 2168 ahd_handle_ign_wide_residue(ahd, &devinfo); 2169 break; 2170 } 2171 case BAD_PHASE: 2172 { 2173 u_int lastphase; 2174 2175 lastphase = ahd_inb(ahd, LASTPHASE); 2176 printk("%s:%c:%d: unknown scsi bus phase %x, " 2177 "lastphase = 0x%x. Attempting to continue\n", 2178 ahd_name(ahd), 'A', 2179 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2180 lastphase, ahd_inb(ahd, SCSISIGI)); 2181 break; 2182 } 2183 case MISSED_BUSFREE: 2184 { 2185 u_int lastphase; 2186 2187 lastphase = ahd_inb(ahd, LASTPHASE); 2188 printk("%s:%c:%d: Missed busfree. " 2189 "Lastphase = 0x%x, Curphase = 0x%x\n", 2190 ahd_name(ahd), 'A', 2191 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2192 lastphase, ahd_inb(ahd, SCSISIGI)); 2193 ahd_restart(ahd); 2194 return; 2195 } 2196 case DATA_OVERRUN: 2197 { 2198 /* 2199 * When the sequencer detects an overrun, it 2200 * places the controller in "BITBUCKET" mode 2201 * and allows the target to complete its transfer. 2202 * Unfortunately, none of the counters get updated 2203 * when the controller is in this mode, so we have 2204 * no way of knowing how large the overrun was. 2205 */ 2206 struct scb *scb; 2207 u_int scbindex; 2208 #ifdef AHD_DEBUG 2209 u_int lastphase; 2210 #endif 2211 2212 scbindex = ahd_get_scbptr(ahd); 2213 scb = ahd_lookup_scb(ahd, scbindex); 2214 #ifdef AHD_DEBUG 2215 lastphase = ahd_inb(ahd, LASTPHASE); 2216 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2217 ahd_print_path(ahd, scb); 2218 printk("data overrun detected %s. Tag == 0x%x.\n", 2219 ahd_lookup_phase_entry(lastphase)->phasemsg, 2220 SCB_GET_TAG(scb)); 2221 ahd_print_path(ahd, scb); 2222 printk("%s seen Data Phase. Length = %ld. " 2223 "NumSGs = %d.\n", 2224 ahd_inb(ahd, SEQ_FLAGS) & DPHASE 2225 ? "Have" : "Haven't", 2226 ahd_get_transfer_length(scb), scb->sg_count); 2227 ahd_dump_sglist(scb); 2228 } 2229 #endif 2230 2231 /* 2232 * Set this and it will take effect when the 2233 * target does a command complete. 2234 */ 2235 ahd_freeze_devq(ahd, scb); 2236 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 2237 ahd_freeze_scb(scb); 2238 break; 2239 } 2240 case MKMSG_FAILED: 2241 { 2242 struct ahd_devinfo devinfo; 2243 struct scb *scb; 2244 u_int scbid; 2245 2246 ahd_fetch_devinfo(ahd, &devinfo); 2247 printk("%s:%c:%d:%d: Attempt to issue message failed\n", 2248 ahd_name(ahd), devinfo.channel, devinfo.target, 2249 devinfo.lun); 2250 scbid = ahd_get_scbptr(ahd); 2251 scb = ahd_lookup_scb(ahd, scbid); 2252 if (scb != NULL 2253 && (scb->flags & SCB_RECOVERY_SCB) != 0) 2254 /* 2255 * Ensure that we didn't put a second instance of this 2256 * SCB into the QINFIFO. 2257 */ 2258 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 2259 SCB_GET_CHANNEL(ahd, scb), 2260 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 2261 ROLE_INITIATOR, /*status*/0, 2262 SEARCH_REMOVE); 2263 ahd_outb(ahd, SCB_CONTROL, 2264 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); 2265 break; 2266 } 2267 case TASKMGMT_FUNC_COMPLETE: 2268 { 2269 u_int scbid; 2270 struct scb *scb; 2271 2272 scbid = ahd_get_scbptr(ahd); 2273 scb = ahd_lookup_scb(ahd, scbid); 2274 if (scb != NULL) { 2275 u_int lun; 2276 u_int tag; 2277 cam_status error; 2278 2279 ahd_print_path(ahd, scb); 2280 printk("Task Management Func 0x%x Complete\n", 2281 scb->hscb->task_management); 2282 lun = CAM_LUN_WILDCARD; 2283 tag = SCB_LIST_NULL; 2284 2285 switch (scb->hscb->task_management) { 2286 case SIU_TASKMGMT_ABORT_TASK: 2287 tag = SCB_GET_TAG(scb); 2288 /* fall through */ 2289 case SIU_TASKMGMT_ABORT_TASK_SET: 2290 case SIU_TASKMGMT_CLEAR_TASK_SET: 2291 lun = scb->hscb->lun; 2292 error = CAM_REQ_ABORTED; 2293 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 2294 'A', lun, tag, ROLE_INITIATOR, 2295 error); 2296 break; 2297 case SIU_TASKMGMT_LUN_RESET: 2298 lun = scb->hscb->lun; 2299 /* fall through */ 2300 case SIU_TASKMGMT_TARGET_RESET: 2301 { 2302 struct ahd_devinfo devinfo; 2303 2304 ahd_scb_devinfo(ahd, &devinfo, scb); 2305 error = CAM_BDR_SENT; 2306 ahd_handle_devreset(ahd, &devinfo, lun, 2307 CAM_BDR_SENT, 2308 lun != CAM_LUN_WILDCARD 2309 ? "Lun Reset" 2310 : "Target Reset", 2311 /*verbose_level*/0); 2312 break; 2313 } 2314 default: 2315 panic("Unexpected TaskMgmt Func\n"); 2316 break; 2317 } 2318 } 2319 break; 2320 } 2321 case TASKMGMT_CMD_CMPLT_OKAY: 2322 { 2323 u_int scbid; 2324 struct scb *scb; 2325 2326 /* 2327 * An ABORT TASK TMF failed to be delivered before 2328 * the targeted command completed normally. 2329 */ 2330 scbid = ahd_get_scbptr(ahd); 2331 scb = ahd_lookup_scb(ahd, scbid); 2332 if (scb != NULL) { 2333 /* 2334 * Remove the second instance of this SCB from 2335 * the QINFIFO if it is still there. 2336 */ 2337 ahd_print_path(ahd, scb); 2338 printk("SCB completes before TMF\n"); 2339 /* 2340 * Handle losing the race. Wait until any 2341 * current selection completes. We will then 2342 * set the TMF back to zero in this SCB so that 2343 * the sequencer doesn't bother to issue another 2344 * sequencer interrupt for its completion. 2345 */ 2346 while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 2347 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 2348 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) 2349 ; 2350 ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); 2351 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 2352 SCB_GET_CHANNEL(ahd, scb), 2353 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 2354 ROLE_INITIATOR, /*status*/0, 2355 SEARCH_REMOVE); 2356 } 2357 break; 2358 } 2359 case TRACEPOINT0: 2360 case TRACEPOINT1: 2361 case TRACEPOINT2: 2362 case TRACEPOINT3: 2363 printk("%s: Tracepoint %d\n", ahd_name(ahd), 2364 seqintcode - TRACEPOINT0); 2365 break; 2366 case NO_SEQINT: 2367 break; 2368 case SAW_HWERR: 2369 ahd_handle_hwerrint(ahd); 2370 break; 2371 default: 2372 printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), 2373 seqintcode); 2374 break; 2375 } 2376 /* 2377 * The sequencer is paused immediately on 2378 * a SEQINT, so we should restart it when 2379 * we're done. 2380 */ 2381 ahd_unpause(ahd); 2382 } 2383 2384 static void 2385 ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) 2386 { 2387 struct scb *scb; 2388 u_int status0; 2389 u_int status3; 2390 u_int status; 2391 u_int lqistat1; 2392 u_int lqostat0; 2393 u_int scbid; 2394 u_int busfreetime; 2395 2396 ahd_update_modes(ahd); 2397 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2398 2399 status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); 2400 status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); 2401 status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 2402 lqistat1 = ahd_inb(ahd, LQISTAT1); 2403 lqostat0 = ahd_inb(ahd, LQOSTAT0); 2404 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 2405 2406 /* 2407 * Ignore external resets after a bus reset. 2408 */ 2409 if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { 2410 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 2411 return; 2412 } 2413 2414 /* 2415 * Clear bus reset flag 2416 */ 2417 ahd->flags &= ~AHD_BUS_RESET_ACTIVE; 2418 2419 if ((status0 & (SELDI|SELDO)) != 0) { 2420 u_int simode0; 2421 2422 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2423 simode0 = ahd_inb(ahd, SIMODE0); 2424 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); 2425 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2426 } 2427 scbid = ahd_get_scbptr(ahd); 2428 scb = ahd_lookup_scb(ahd, scbid); 2429 if (scb != NULL 2430 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 2431 scb = NULL; 2432 2433 if ((status0 & IOERR) != 0) { 2434 u_int now_lvd; 2435 2436 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; 2437 printk("%s: Transceiver State Has Changed to %s mode\n", 2438 ahd_name(ahd), now_lvd ? "LVD" : "SE"); 2439 ahd_outb(ahd, CLRSINT0, CLRIOERR); 2440 /* 2441 * A change in I/O mode is equivalent to a bus reset. 2442 */ 2443 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2444 ahd_pause(ahd); 2445 ahd_setup_iocell_workaround(ahd); 2446 ahd_unpause(ahd); 2447 } else if ((status0 & OVERRUN) != 0) { 2448 2449 printk("%s: SCSI offset overrun detected. Resetting bus.\n", 2450 ahd_name(ahd)); 2451 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2452 } else if ((status & SCSIRSTI) != 0) { 2453 2454 printk("%s: Someone reset channel A\n", ahd_name(ahd)); 2455 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); 2456 } else if ((status & SCSIPERR) != 0) { 2457 2458 /* Make sure the sequencer is in a safe location. */ 2459 ahd_clear_critical_section(ahd); 2460 2461 ahd_handle_transmission_error(ahd); 2462 } else if (lqostat0 != 0) { 2463 2464 printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); 2465 ahd_outb(ahd, CLRLQOINT0, lqostat0); 2466 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2467 ahd_outb(ahd, CLRLQOINT1, 0); 2468 } else if ((status & SELTO) != 0) { 2469 /* Stop the selection */ 2470 ahd_outb(ahd, SCSISEQ0, 0); 2471 2472 /* Make sure the sequencer is in a safe location. */ 2473 ahd_clear_critical_section(ahd); 2474 2475 /* No more pending messages */ 2476 ahd_clear_msg_state(ahd); 2477 2478 /* Clear interrupt state */ 2479 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 2480 2481 /* 2482 * Although the driver does not care about the 2483 * 'Selection in Progress' status bit, the busy 2484 * LED does. SELINGO is only cleared by a successful 2485 * selection, so we must manually clear it to insure 2486 * the LED turns off just incase no future successful 2487 * selections occur (e.g. no devices on the bus). 2488 */ 2489 ahd_outb(ahd, CLRSINT0, CLRSELINGO); 2490 2491 scbid = ahd_inw(ahd, WAITING_TID_HEAD); 2492 scb = ahd_lookup_scb(ahd, scbid); 2493 if (scb == NULL) { 2494 printk("%s: ahd_intr - referenced scb not " 2495 "valid during SELTO scb(0x%x)\n", 2496 ahd_name(ahd), scbid); 2497 ahd_dump_card_state(ahd); 2498 } else { 2499 struct ahd_devinfo devinfo; 2500 #ifdef AHD_DEBUG 2501 if ((ahd_debug & AHD_SHOW_SELTO) != 0) { 2502 ahd_print_path(ahd, scb); 2503 printk("Saw Selection Timeout for SCB 0x%x\n", 2504 scbid); 2505 } 2506 #endif 2507 ahd_scb_devinfo(ahd, &devinfo, scb); 2508 ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); 2509 ahd_freeze_devq(ahd, scb); 2510 2511 /* 2512 * Cancel any pending transactions on the device 2513 * now that it seems to be missing. This will 2514 * also revert us to async/narrow transfers until 2515 * we can renegotiate with the device. 2516 */ 2517 ahd_handle_devreset(ahd, &devinfo, 2518 CAM_LUN_WILDCARD, 2519 CAM_SEL_TIMEOUT, 2520 "Selection Timeout", 2521 /*verbose_level*/1); 2522 } 2523 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2524 ahd_iocell_first_selection(ahd); 2525 ahd_unpause(ahd); 2526 } else if ((status0 & (SELDI|SELDO)) != 0) { 2527 2528 ahd_iocell_first_selection(ahd); 2529 ahd_unpause(ahd); 2530 } else if (status3 != 0) { 2531 printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", 2532 ahd_name(ahd), status3); 2533 ahd_outb(ahd, CLRSINT3, status3); 2534 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { 2535 2536 /* Make sure the sequencer is in a safe location. */ 2537 ahd_clear_critical_section(ahd); 2538 2539 ahd_handle_lqiphase_error(ahd, lqistat1); 2540 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 2541 /* 2542 * This status can be delayed during some 2543 * streaming operations. The SCSIPHASE 2544 * handler has already dealt with this case 2545 * so just clear the error. 2546 */ 2547 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); 2548 } else if ((status & BUSFREE) != 0 2549 || (lqistat1 & LQOBUSFREE) != 0) { 2550 u_int lqostat1; 2551 int restart; 2552 int clear_fifo; 2553 int packetized; 2554 u_int mode; 2555 2556 /* 2557 * Clear our selection hardware as soon as possible. 2558 * We may have an entry in the waiting Q for this target, 2559 * that is affected by this busfree and we don't want to 2560 * go about selecting the target while we handle the event. 2561 */ 2562 ahd_outb(ahd, SCSISEQ0, 0); 2563 2564 /* Make sure the sequencer is in a safe location. */ 2565 ahd_clear_critical_section(ahd); 2566 2567 /* 2568 * Determine what we were up to at the time of 2569 * the busfree. 2570 */ 2571 mode = AHD_MODE_SCSI; 2572 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 2573 lqostat1 = ahd_inb(ahd, LQOSTAT1); 2574 switch (busfreetime) { 2575 case BUSFREE_DFF0: 2576 case BUSFREE_DFF1: 2577 { 2578 mode = busfreetime == BUSFREE_DFF0 2579 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; 2580 ahd_set_modes(ahd, mode, mode); 2581 scbid = ahd_get_scbptr(ahd); 2582 scb = ahd_lookup_scb(ahd, scbid); 2583 if (scb == NULL) { 2584 printk("%s: Invalid SCB %d in DFF%d " 2585 "during unexpected busfree\n", 2586 ahd_name(ahd), scbid, mode); 2587 packetized = 0; 2588 } else 2589 packetized = (scb->flags & SCB_PACKETIZED) != 0; 2590 clear_fifo = 1; 2591 break; 2592 } 2593 case BUSFREE_LQO: 2594 clear_fifo = 0; 2595 packetized = 1; 2596 break; 2597 default: 2598 clear_fifo = 0; 2599 packetized = (lqostat1 & LQOBUSFREE) != 0; 2600 if (!packetized 2601 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE 2602 && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 2603 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 2604 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) 2605 /* 2606 * Assume packetized if we are not 2607 * on the bus in a non-packetized 2608 * capacity and any pending selection 2609 * was a packetized selection. 2610 */ 2611 packetized = 1; 2612 break; 2613 } 2614 2615 #ifdef AHD_DEBUG 2616 if ((ahd_debug & AHD_SHOW_MISC) != 0) 2617 printk("Saw Busfree. Busfreetime = 0x%x.\n", 2618 busfreetime); 2619 #endif 2620 /* 2621 * Busfrees that occur in non-packetized phases are 2622 * handled by the nonpkt_busfree handler. 2623 */ 2624 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { 2625 restart = ahd_handle_pkt_busfree(ahd, busfreetime); 2626 } else { 2627 packetized = 0; 2628 restart = ahd_handle_nonpkt_busfree(ahd); 2629 } 2630 /* 2631 * Clear the busfree interrupt status. The setting of 2632 * the interrupt is a pulse, so in a perfect world, we 2633 * would not need to muck with the ENBUSFREE logic. This 2634 * would ensure that if the bus moves on to another 2635 * connection, busfree protection is still in force. If 2636 * BUSFREEREV is broken, however, we must manually clear 2637 * the ENBUSFREE if the busfree occurred during a non-pack 2638 * connection so that we don't get false positives during 2639 * future, packetized, connections. 2640 */ 2641 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 2642 if (packetized == 0 2643 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) 2644 ahd_outb(ahd, SIMODE1, 2645 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); 2646 2647 if (clear_fifo) 2648 ahd_clear_fifo(ahd, mode); 2649 2650 ahd_clear_msg_state(ahd); 2651 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2652 if (restart) { 2653 ahd_restart(ahd); 2654 } else { 2655 ahd_unpause(ahd); 2656 } 2657 } else { 2658 printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", 2659 ahd_name(ahd), status); 2660 ahd_dump_card_state(ahd); 2661 ahd_clear_intstat(ahd); 2662 ahd_unpause(ahd); 2663 } 2664 } 2665 2666 static void 2667 ahd_handle_transmission_error(struct ahd_softc *ahd) 2668 { 2669 struct scb *scb; 2670 u_int scbid; 2671 u_int lqistat1; 2672 u_int lqistat2; 2673 u_int msg_out; 2674 u_int curphase; 2675 u_int lastphase; 2676 u_int perrdiag; 2677 u_int cur_col; 2678 int silent; 2679 2680 scb = NULL; 2681 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2682 lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); 2683 lqistat2 = ahd_inb(ahd, LQISTAT2); 2684 if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 2685 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { 2686 u_int lqistate; 2687 2688 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2689 lqistate = ahd_inb(ahd, LQISTATE); 2690 if ((lqistate >= 0x1E && lqistate <= 0x24) 2691 || (lqistate == 0x29)) { 2692 #ifdef AHD_DEBUG 2693 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2694 printk("%s: NLQCRC found via LQISTATE\n", 2695 ahd_name(ahd)); 2696 } 2697 #endif 2698 lqistat1 |= LQICRCI_NLQ; 2699 } 2700 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2701 } 2702 2703 ahd_outb(ahd, CLRLQIINT1, lqistat1); 2704 lastphase = ahd_inb(ahd, LASTPHASE); 2705 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2706 perrdiag = ahd_inb(ahd, PERRDIAG); 2707 msg_out = MSG_INITIATOR_DET_ERR; 2708 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); 2709 2710 /* 2711 * Try to find the SCB associated with this error. 2712 */ 2713 silent = FALSE; 2714 if (lqistat1 == 0 2715 || (lqistat1 & LQICRCI_NLQ) != 0) { 2716 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) 2717 ahd_set_active_fifo(ahd); 2718 scbid = ahd_get_scbptr(ahd); 2719 scb = ahd_lookup_scb(ahd, scbid); 2720 if (scb != NULL && SCB_IS_SILENT(scb)) 2721 silent = TRUE; 2722 } 2723 2724 cur_col = 0; 2725 if (silent == FALSE) { 2726 printk("%s: Transmission error detected\n", ahd_name(ahd)); 2727 ahd_lqistat1_print(lqistat1, &cur_col, 50); 2728 ahd_lastphase_print(lastphase, &cur_col, 50); 2729 ahd_scsisigi_print(curphase, &cur_col, 50); 2730 ahd_perrdiag_print(perrdiag, &cur_col, 50); 2731 printk("\n"); 2732 ahd_dump_card_state(ahd); 2733 } 2734 2735 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { 2736 if (silent == FALSE) { 2737 printk("%s: Gross protocol error during incoming " 2738 "packet. lqistat1 == 0x%x. Resetting bus.\n", 2739 ahd_name(ahd), lqistat1); 2740 } 2741 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2742 return; 2743 } else if ((lqistat1 & LQICRCI_LQ) != 0) { 2744 /* 2745 * A CRC error has been detected on an incoming LQ. 2746 * The bus is currently hung on the last ACK. 2747 * Hit LQIRETRY to release the last ack, and 2748 * wait for the sequencer to determine that ATNO 2749 * is asserted while in message out to take us 2750 * to our host message loop. No NONPACKREQ or 2751 * LQIPHASE type errors will occur in this 2752 * scenario. After this first LQIRETRY, the LQI 2753 * manager will be in ISELO where it will 2754 * happily sit until another packet phase begins. 2755 * Unexpected bus free detection is enabled 2756 * through any phases that occur after we release 2757 * this last ack until the LQI manager sees a 2758 * packet phase. This implies we may have to 2759 * ignore a perfectly valid "unexected busfree" 2760 * after our "initiator detected error" message is 2761 * sent. A busfree is the expected response after 2762 * we tell the target that it's L_Q was corrupted. 2763 * (SPI4R09 10.7.3.3.3) 2764 */ 2765 ahd_outb(ahd, LQCTL2, LQIRETRY); 2766 printk("LQIRetry for LQICRCI_LQ to release ACK\n"); 2767 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 2768 /* 2769 * We detected a CRC error in a NON-LQ packet. 2770 * The hardware has varying behavior in this situation 2771 * depending on whether this packet was part of a 2772 * stream or not. 2773 * 2774 * PKT by PKT mode: 2775 * The hardware has already acked the complete packet. 2776 * If the target honors our outstanding ATN condition, 2777 * we should be (or soon will be) in MSGOUT phase. 2778 * This will trigger the LQIPHASE_LQ status bit as the 2779 * hardware was expecting another LQ. Unexpected 2780 * busfree detection is enabled. Once LQIPHASE_LQ is 2781 * true (first entry into host message loop is much 2782 * the same), we must clear LQIPHASE_LQ and hit 2783 * LQIRETRY so the hardware is ready to handle 2784 * a future LQ. NONPACKREQ will not be asserted again 2785 * once we hit LQIRETRY until another packet is 2786 * processed. The target may either go busfree 2787 * or start another packet in response to our message. 2788 * 2789 * Read Streaming P0 asserted: 2790 * If we raise ATN and the target completes the entire 2791 * stream (P0 asserted during the last packet), the 2792 * hardware will ack all data and return to the ISTART 2793 * state. When the target reponds to our ATN condition, 2794 * LQIPHASE_LQ will be asserted. We should respond to 2795 * this with an LQIRETRY to prepare for any future 2796 * packets. NONPACKREQ will not be asserted again 2797 * once we hit LQIRETRY until another packet is 2798 * processed. The target may either go busfree or 2799 * start another packet in response to our message. 2800 * Busfree detection is enabled. 2801 * 2802 * Read Streaming P0 not asserted: 2803 * If we raise ATN and the target transitions to 2804 * MSGOUT in or after a packet where P0 is not 2805 * asserted, the hardware will assert LQIPHASE_NLQ. 2806 * We should respond to the LQIPHASE_NLQ with an 2807 * LQIRETRY. Should the target stay in a non-pkt 2808 * phase after we send our message, the hardware 2809 * will assert LQIPHASE_LQ. Recovery is then just as 2810 * listed above for the read streaming with P0 asserted. 2811 * Busfree detection is enabled. 2812 */ 2813 if (silent == FALSE) 2814 printk("LQICRC_NLQ\n"); 2815 if (scb == NULL) { 2816 printk("%s: No SCB valid for LQICRC_NLQ. " 2817 "Resetting bus\n", ahd_name(ahd)); 2818 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2819 return; 2820 } 2821 } else if ((lqistat1 & LQIBADLQI) != 0) { 2822 printk("Need to handle BADLQI!\n"); 2823 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2824 return; 2825 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { 2826 if ((curphase & ~P_DATAIN_DT) != 0) { 2827 /* Ack the byte. So we can continue. */ 2828 if (silent == FALSE) 2829 printk("Acking %s to clear perror\n", 2830 ahd_lookup_phase_entry(curphase)->phasemsg); 2831 ahd_inb(ahd, SCSIDAT); 2832 } 2833 2834 if (curphase == P_MESGIN) 2835 msg_out = MSG_PARITY_ERROR; 2836 } 2837 2838 /* 2839 * We've set the hardware to assert ATN if we 2840 * get a parity error on "in" phases, so all we 2841 * need to do is stuff the message buffer with 2842 * the appropriate message. "In" phases have set 2843 * mesg_out to something other than MSG_NOP. 2844 */ 2845 ahd->send_msg_perror = msg_out; 2846 if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) 2847 scb->flags |= SCB_TRANSMISSION_ERROR; 2848 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2849 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2850 ahd_unpause(ahd); 2851 } 2852 2853 static void 2854 ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) 2855 { 2856 /* 2857 * Clear the sources of the interrupts. 2858 */ 2859 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2860 ahd_outb(ahd, CLRLQIINT1, lqistat1); 2861 2862 /* 2863 * If the "illegal" phase changes were in response 2864 * to our ATN to flag a CRC error, AND we ended up 2865 * on packet boundaries, clear the error, restart the 2866 * LQI manager as appropriate, and go on our merry 2867 * way toward sending the message. Otherwise, reset 2868 * the bus to clear the error. 2869 */ 2870 ahd_set_active_fifo(ahd); 2871 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 2872 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { 2873 if ((lqistat1 & LQIPHASE_LQ) != 0) { 2874 printk("LQIRETRY for LQIPHASE_LQ\n"); 2875 ahd_outb(ahd, LQCTL2, LQIRETRY); 2876 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { 2877 printk("LQIRETRY for LQIPHASE_NLQ\n"); 2878 ahd_outb(ahd, LQCTL2, LQIRETRY); 2879 } else 2880 panic("ahd_handle_lqiphase_error: No phase errors\n"); 2881 ahd_dump_card_state(ahd); 2882 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2883 ahd_unpause(ahd); 2884 } else { 2885 printk("Resetting Channel for LQI Phase error\n"); 2886 ahd_dump_card_state(ahd); 2887 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2888 } 2889 } 2890 2891 /* 2892 * Packetized unexpected or expected busfree. 2893 * Entered in mode based on busfreetime. 2894 */ 2895 static int 2896 ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) 2897 { 2898 u_int lqostat1; 2899 2900 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 2901 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 2902 lqostat1 = ahd_inb(ahd, LQOSTAT1); 2903 if ((lqostat1 & LQOBUSFREE) != 0) { 2904 struct scb *scb; 2905 u_int scbid; 2906 u_int saved_scbptr; 2907 u_int waiting_h; 2908 u_int waiting_t; 2909 u_int next; 2910 2911 /* 2912 * The LQO manager detected an unexpected busfree 2913 * either: 2914 * 2915 * 1) During an outgoing LQ. 2916 * 2) After an outgoing LQ but before the first 2917 * REQ of the command packet. 2918 * 3) During an outgoing command packet. 2919 * 2920 * In all cases, CURRSCB is pointing to the 2921 * SCB that encountered the failure. Clean 2922 * up the queue, clear SELDO and LQOBUSFREE, 2923 * and allow the sequencer to restart the select 2924 * out at its lesure. 2925 */ 2926 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2927 scbid = ahd_inw(ahd, CURRSCB); 2928 scb = ahd_lookup_scb(ahd, scbid); 2929 if (scb == NULL) 2930 panic("SCB not valid during LQOBUSFREE"); 2931 /* 2932 * Clear the status. 2933 */ 2934 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); 2935 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2936 ahd_outb(ahd, CLRLQOINT1, 0); 2937 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2938 ahd_flush_device_writes(ahd); 2939 ahd_outb(ahd, CLRSINT0, CLRSELDO); 2940 2941 /* 2942 * Return the LQO manager to its idle loop. It will 2943 * not do this automatically if the busfree occurs 2944 * after the first REQ of either the LQ or command 2945 * packet or between the LQ and command packet. 2946 */ 2947 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); 2948 2949 /* 2950 * Update the waiting for selection queue so 2951 * we restart on the correct SCB. 2952 */ 2953 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); 2954 saved_scbptr = ahd_get_scbptr(ahd); 2955 if (waiting_h != scbid) { 2956 2957 ahd_outw(ahd, WAITING_TID_HEAD, scbid); 2958 waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); 2959 if (waiting_t == waiting_h) { 2960 ahd_outw(ahd, WAITING_TID_TAIL, scbid); 2961 next = SCB_LIST_NULL; 2962 } else { 2963 ahd_set_scbptr(ahd, waiting_h); 2964 next = ahd_inw_scbram(ahd, SCB_NEXT2); 2965 } 2966 ahd_set_scbptr(ahd, scbid); 2967 ahd_outw(ahd, SCB_NEXT2, next); 2968 } 2969 ahd_set_scbptr(ahd, saved_scbptr); 2970 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { 2971 if (SCB_IS_SILENT(scb) == FALSE) { 2972 ahd_print_path(ahd, scb); 2973 printk("Probable outgoing LQ CRC error. " 2974 "Retrying command\n"); 2975 } 2976 scb->crc_retry_count++; 2977 } else { 2978 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); 2979 ahd_freeze_scb(scb); 2980 ahd_freeze_devq(ahd, scb); 2981 } 2982 /* Return unpausing the sequencer. */ 2983 return (0); 2984 } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { 2985 /* 2986 * Ignore what are really parity errors that 2987 * occur on the last REQ of a free running 2988 * clock prior to going busfree. Some drives 2989 * do not properly active negate just before 2990 * going busfree resulting in a parity glitch. 2991 */ 2992 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); 2993 #ifdef AHD_DEBUG 2994 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) 2995 printk("%s: Parity on last REQ detected " 2996 "during busfree phase.\n", 2997 ahd_name(ahd)); 2998 #endif 2999 /* Return unpausing the sequencer. */ 3000 return (0); 3001 } 3002 if (ahd->src_mode != AHD_MODE_SCSI) { 3003 u_int scbid; 3004 struct scb *scb; 3005 3006 scbid = ahd_get_scbptr(ahd); 3007 scb = ahd_lookup_scb(ahd, scbid); 3008 ahd_print_path(ahd, scb); 3009 printk("Unexpected PKT busfree condition\n"); 3010 ahd_dump_card_state(ahd); 3011 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', 3012 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 3013 ROLE_INITIATOR, CAM_UNEXP_BUSFREE); 3014 3015 /* Return restarting the sequencer. */ 3016 return (1); 3017 } 3018 printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); 3019 ahd_dump_card_state(ahd); 3020 /* Restart the sequencer. */ 3021 return (1); 3022 } 3023 3024 /* 3025 * Non-packetized unexpected or expected busfree. 3026 */ 3027 static int 3028 ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) 3029 { 3030 struct ahd_devinfo devinfo; 3031 struct scb *scb; 3032 u_int lastphase; 3033 u_int saved_scsiid; 3034 u_int saved_lun; 3035 u_int target; 3036 u_int initiator_role_id; 3037 u_int scbid; 3038 u_int ppr_busfree; 3039 int printerror; 3040 3041 /* 3042 * Look at what phase we were last in. If its message out, 3043 * chances are pretty good that the busfree was in response 3044 * to one of our abort requests. 3045 */ 3046 lastphase = ahd_inb(ahd, LASTPHASE); 3047 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 3048 saved_lun = ahd_inb(ahd, SAVED_LUN); 3049 target = SCSIID_TARGET(ahd, saved_scsiid); 3050 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 3051 ahd_compile_devinfo(&devinfo, initiator_role_id, 3052 target, saved_lun, 'A', ROLE_INITIATOR); 3053 printerror = 1; 3054 3055 scbid = ahd_get_scbptr(ahd); 3056 scb = ahd_lookup_scb(ahd, scbid); 3057 if (scb != NULL 3058 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 3059 scb = NULL; 3060 3061 ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; 3062 if (lastphase == P_MESGOUT) { 3063 u_int tag; 3064 3065 tag = SCB_LIST_NULL; 3066 if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) 3067 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { 3068 int found; 3069 int sent_msg; 3070 3071 if (scb == NULL) { 3072 ahd_print_devinfo(ahd, &devinfo); 3073 printk("Abort for unidentified " 3074 "connection completed.\n"); 3075 /* restart the sequencer. */ 3076 return (1); 3077 } 3078 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; 3079 ahd_print_path(ahd, scb); 3080 printk("SCB %d - Abort%s Completed.\n", 3081 SCB_GET_TAG(scb), 3082 sent_msg == MSG_ABORT_TAG ? "" : " Tag"); 3083 3084 if (sent_msg == MSG_ABORT_TAG) 3085 tag = SCB_GET_TAG(scb); 3086 3087 if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { 3088 /* 3089 * This abort is in response to an 3090 * unexpected switch to command phase 3091 * for a packetized connection. Since 3092 * the identify message was never sent, 3093 * "saved lun" is 0. We really want to 3094 * abort only the SCB that encountered 3095 * this error, which could have a different 3096 * lun. The SCB will be retried so the OS 3097 * will see the UA after renegotiating to 3098 * packetized. 3099 */ 3100 tag = SCB_GET_TAG(scb); 3101 saved_lun = scb->hscb->lun; 3102 } 3103 found = ahd_abort_scbs(ahd, target, 'A', saved_lun, 3104 tag, ROLE_INITIATOR, 3105 CAM_REQ_ABORTED); 3106 printk("found == 0x%x\n", found); 3107 printerror = 0; 3108 } else if (ahd_sent_msg(ahd, AHDMSG_1B, 3109 MSG_BUS_DEV_RESET, TRUE)) { 3110 ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, 3111 CAM_BDR_SENT, "Bus Device Reset", 3112 /*verbose_level*/0); 3113 printerror = 0; 3114 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) 3115 && ppr_busfree == 0) { 3116 struct ahd_initiator_tinfo *tinfo; 3117 struct ahd_tmode_tstate *tstate; 3118 3119 /* 3120 * PPR Rejected. 3121 * 3122 * If the previous negotiation was packetized, 3123 * this could be because the device has been 3124 * reset without our knowledge. Force our 3125 * current negotiation to async and retry the 3126 * negotiation. Otherwise retry the command 3127 * with non-ppr negotiation. 3128 */ 3129 #ifdef AHD_DEBUG 3130 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3131 printk("PPR negotiation rejected busfree.\n"); 3132 #endif 3133 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 3134 devinfo.our_scsiid, 3135 devinfo.target, &tstate); 3136 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { 3137 ahd_set_width(ahd, &devinfo, 3138 MSG_EXT_WDTR_BUS_8_BIT, 3139 AHD_TRANS_CUR, 3140 /*paused*/TRUE); 3141 ahd_set_syncrate(ahd, &devinfo, 3142 /*period*/0, /*offset*/0, 3143 /*ppr_options*/0, 3144 AHD_TRANS_CUR, 3145 /*paused*/TRUE); 3146 /* 3147 * The expect PPR busfree handler below 3148 * will effect the retry and necessary 3149 * abort. 3150 */ 3151 } else { 3152 tinfo->curr.transport_version = 2; 3153 tinfo->goal.transport_version = 2; 3154 tinfo->goal.ppr_options = 0; 3155 if (scb != NULL) { 3156 /* 3157 * Remove any SCBs in the waiting 3158 * for selection queue that may 3159 * also be for this target so that 3160 * command ordering is preserved. 3161 */ 3162 ahd_freeze_devq(ahd, scb); 3163 ahd_qinfifo_requeue_tail(ahd, scb); 3164 } 3165 printerror = 0; 3166 } 3167 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) 3168 && ppr_busfree == 0) { 3169 /* 3170 * Negotiation Rejected. Go-narrow and 3171 * retry command. 3172 */ 3173 #ifdef AHD_DEBUG 3174 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3175 printk("WDTR negotiation rejected busfree.\n"); 3176 #endif 3177 ahd_set_width(ahd, &devinfo, 3178 MSG_EXT_WDTR_BUS_8_BIT, 3179 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3180 /*paused*/TRUE); 3181 if (scb != NULL) { 3182 /* 3183 * Remove any SCBs in the waiting for 3184 * selection queue that may also be for 3185 * this target so that command ordering 3186 * is preserved. 3187 */ 3188 ahd_freeze_devq(ahd, scb); 3189 ahd_qinfifo_requeue_tail(ahd, scb); 3190 } 3191 printerror = 0; 3192 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3193 && ppr_busfree == 0) { 3194 /* 3195 * Negotiation Rejected. Go-async and 3196 * retry command. 3197 */ 3198 #ifdef AHD_DEBUG 3199 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3200 printk("SDTR negotiation rejected busfree.\n"); 3201 #endif 3202 ahd_set_syncrate(ahd, &devinfo, 3203 /*period*/0, /*offset*/0, 3204 /*ppr_options*/0, 3205 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3206 /*paused*/TRUE); 3207 if (scb != NULL) { 3208 /* 3209 * Remove any SCBs in the waiting for 3210 * selection queue that may also be for 3211 * this target so that command ordering 3212 * is preserved. 3213 */ 3214 ahd_freeze_devq(ahd, scb); 3215 ahd_qinfifo_requeue_tail(ahd, scb); 3216 } 3217 printerror = 0; 3218 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3219 && ahd_sent_msg(ahd, AHDMSG_1B, 3220 MSG_INITIATOR_DET_ERR, TRUE)) { 3221 3222 #ifdef AHD_DEBUG 3223 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3224 printk("Expected IDE Busfree\n"); 3225 #endif 3226 printerror = 0; 3227 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) 3228 && ahd_sent_msg(ahd, AHDMSG_1B, 3229 MSG_MESSAGE_REJECT, TRUE)) { 3230 3231 #ifdef AHD_DEBUG 3232 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3233 printk("Expected QAS Reject Busfree\n"); 3234 #endif 3235 printerror = 0; 3236 } 3237 } 3238 3239 /* 3240 * The busfree required flag is honored at the end of 3241 * the message phases. We check it last in case we 3242 * had to send some other message that caused a busfree. 3243 */ 3244 if (scb != NULL && printerror != 0 3245 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3246 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3247 3248 ahd_freeze_devq(ahd, scb); 3249 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 3250 ahd_freeze_scb(scb); 3251 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { 3252 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 3253 SCB_GET_CHANNEL(ahd, scb), 3254 SCB_GET_LUN(scb), SCB_LIST_NULL, 3255 ROLE_INITIATOR, CAM_REQ_ABORTED); 3256 } else { 3257 #ifdef AHD_DEBUG 3258 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3259 printk("PPR Negotiation Busfree.\n"); 3260 #endif 3261 ahd_done(ahd, scb); 3262 } 3263 printerror = 0; 3264 } 3265 if (printerror != 0) { 3266 int aborted; 3267 3268 aborted = 0; 3269 if (scb != NULL) { 3270 u_int tag; 3271 3272 if ((scb->hscb->control & TAG_ENB) != 0) 3273 tag = SCB_GET_TAG(scb); 3274 else 3275 tag = SCB_LIST_NULL; 3276 ahd_print_path(ahd, scb); 3277 aborted = ahd_abort_scbs(ahd, target, 'A', 3278 SCB_GET_LUN(scb), tag, 3279 ROLE_INITIATOR, 3280 CAM_UNEXP_BUSFREE); 3281 } else { 3282 /* 3283 * We had not fully identified this connection, 3284 * so we cannot abort anything. 3285 */ 3286 printk("%s: ", ahd_name(ahd)); 3287 } 3288 printk("Unexpected busfree %s, %d SCBs aborted, " 3289 "PRGMCNT == 0x%x\n", 3290 ahd_lookup_phase_entry(lastphase)->phasemsg, 3291 aborted, 3292 ahd_inw(ahd, PRGMCNT)); 3293 ahd_dump_card_state(ahd); 3294 if (lastphase != P_BUSFREE) 3295 ahd_force_renegotiation(ahd, &devinfo); 3296 } 3297 /* Always restart the sequencer. */ 3298 return (1); 3299 } 3300 3301 static void 3302 ahd_handle_proto_violation(struct ahd_softc *ahd) 3303 { 3304 struct ahd_devinfo devinfo; 3305 struct scb *scb; 3306 u_int scbid; 3307 u_int seq_flags; 3308 u_int curphase; 3309 u_int lastphase; 3310 int found; 3311 3312 ahd_fetch_devinfo(ahd, &devinfo); 3313 scbid = ahd_get_scbptr(ahd); 3314 scb = ahd_lookup_scb(ahd, scbid); 3315 seq_flags = ahd_inb(ahd, SEQ_FLAGS); 3316 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 3317 lastphase = ahd_inb(ahd, LASTPHASE); 3318 if ((seq_flags & NOT_IDENTIFIED) != 0) { 3319 3320 /* 3321 * The reconnecting target either did not send an 3322 * identify message, or did, but we didn't find an SCB 3323 * to match. 3324 */ 3325 ahd_print_devinfo(ahd, &devinfo); 3326 printk("Target did not send an IDENTIFY message. " 3327 "LASTPHASE = 0x%x.\n", lastphase); 3328 scb = NULL; 3329 } else if (scb == NULL) { 3330 /* 3331 * We don't seem to have an SCB active for this 3332 * transaction. Print an error and reset the bus. 3333 */ 3334 ahd_print_devinfo(ahd, &devinfo); 3335 printk("No SCB found during protocol violation\n"); 3336 goto proto_violation_reset; 3337 } else { 3338 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 3339 if ((seq_flags & NO_CDB_SENT) != 0) { 3340 ahd_print_path(ahd, scb); 3341 printk("No or incomplete CDB sent to device.\n"); 3342 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) 3343 & STATUS_RCVD) == 0) { 3344 /* 3345 * The target never bothered to provide status to 3346 * us prior to completing the command. Since we don't 3347 * know the disposition of this command, we must attempt 3348 * to abort it. Assert ATN and prepare to send an abort 3349 * message. 3350 */ 3351 ahd_print_path(ahd, scb); 3352 printk("Completed command without status.\n"); 3353 } else { 3354 ahd_print_path(ahd, scb); 3355 printk("Unknown protocol violation.\n"); 3356 ahd_dump_card_state(ahd); 3357 } 3358 } 3359 if ((lastphase & ~P_DATAIN_DT) == 0 3360 || lastphase == P_COMMAND) { 3361 proto_violation_reset: 3362 /* 3363 * Target either went directly to data 3364 * phase or didn't respond to our ATN. 3365 * The only safe thing to do is to blow 3366 * it away with a bus reset. 3367 */ 3368 found = ahd_reset_channel(ahd, 'A', TRUE); 3369 printk("%s: Issued Channel %c Bus Reset. " 3370 "%d SCBs aborted\n", ahd_name(ahd), 'A', found); 3371 } else { 3372 /* 3373 * Leave the selection hardware off in case 3374 * this abort attempt will affect yet to 3375 * be sent commands. 3376 */ 3377 ahd_outb(ahd, SCSISEQ0, 3378 ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 3379 ahd_assert_atn(ahd); 3380 ahd_outb(ahd, MSG_OUT, HOST_MSG); 3381 if (scb == NULL) { 3382 ahd_print_devinfo(ahd, &devinfo); 3383 ahd->msgout_buf[0] = MSG_ABORT_TASK; 3384 ahd->msgout_len = 1; 3385 ahd->msgout_index = 0; 3386 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3387 } else { 3388 ahd_print_path(ahd, scb); 3389 scb->flags |= SCB_ABORT; 3390 } 3391 printk("Protocol violation %s. Attempting to abort.\n", 3392 ahd_lookup_phase_entry(curphase)->phasemsg); 3393 } 3394 } 3395 3396 /* 3397 * Force renegotiation to occur the next time we initiate 3398 * a command to the current device. 3399 */ 3400 static void 3401 ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 3402 { 3403 struct ahd_initiator_tinfo *targ_info; 3404 struct ahd_tmode_tstate *tstate; 3405 3406 #ifdef AHD_DEBUG 3407 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3408 ahd_print_devinfo(ahd, devinfo); 3409 printk("Forcing renegotiation\n"); 3410 } 3411 #endif 3412 targ_info = ahd_fetch_transinfo(ahd, 3413 devinfo->channel, 3414 devinfo->our_scsiid, 3415 devinfo->target, 3416 &tstate); 3417 ahd_update_neg_request(ahd, devinfo, tstate, 3418 targ_info, AHD_NEG_IF_NON_ASYNC); 3419 } 3420 3421 #define AHD_MAX_STEPS 2000 3422 static void 3423 ahd_clear_critical_section(struct ahd_softc *ahd) 3424 { 3425 ahd_mode_state saved_modes; 3426 int stepping; 3427 int steps; 3428 int first_instr; 3429 u_int simode0; 3430 u_int simode1; 3431 u_int simode3; 3432 u_int lqimode0; 3433 u_int lqimode1; 3434 u_int lqomode0; 3435 u_int lqomode1; 3436 3437 if (ahd->num_critical_sections == 0) 3438 return; 3439 3440 stepping = FALSE; 3441 steps = 0; 3442 first_instr = 0; 3443 simode0 = 0; 3444 simode1 = 0; 3445 simode3 = 0; 3446 lqimode0 = 0; 3447 lqimode1 = 0; 3448 lqomode0 = 0; 3449 lqomode1 = 0; 3450 saved_modes = ahd_save_modes(ahd); 3451 for (;;) { 3452 struct cs *cs; 3453 u_int seqaddr; 3454 u_int i; 3455 3456 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3457 seqaddr = ahd_inw(ahd, CURADDR); 3458 3459 cs = ahd->critical_sections; 3460 for (i = 0; i < ahd->num_critical_sections; i++, cs++) { 3461 3462 if (cs->begin < seqaddr && cs->end >= seqaddr) 3463 break; 3464 } 3465 3466 if (i == ahd->num_critical_sections) 3467 break; 3468 3469 if (steps > AHD_MAX_STEPS) { 3470 printk("%s: Infinite loop in critical section\n" 3471 "%s: First Instruction 0x%x now 0x%x\n", 3472 ahd_name(ahd), ahd_name(ahd), first_instr, 3473 seqaddr); 3474 ahd_dump_card_state(ahd); 3475 panic("critical section loop"); 3476 } 3477 3478 steps++; 3479 #ifdef AHD_DEBUG 3480 if ((ahd_debug & AHD_SHOW_MISC) != 0) 3481 printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), 3482 seqaddr); 3483 #endif 3484 if (stepping == FALSE) { 3485 3486 first_instr = seqaddr; 3487 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 3488 simode0 = ahd_inb(ahd, SIMODE0); 3489 simode3 = ahd_inb(ahd, SIMODE3); 3490 lqimode0 = ahd_inb(ahd, LQIMODE0); 3491 lqimode1 = ahd_inb(ahd, LQIMODE1); 3492 lqomode0 = ahd_inb(ahd, LQOMODE0); 3493 lqomode1 = ahd_inb(ahd, LQOMODE1); 3494 ahd_outb(ahd, SIMODE0, 0); 3495 ahd_outb(ahd, SIMODE3, 0); 3496 ahd_outb(ahd, LQIMODE0, 0); 3497 ahd_outb(ahd, LQIMODE1, 0); 3498 ahd_outb(ahd, LQOMODE0, 0); 3499 ahd_outb(ahd, LQOMODE1, 0); 3500 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3501 simode1 = ahd_inb(ahd, SIMODE1); 3502 /* 3503 * We don't clear ENBUSFREE. Unfortunately 3504 * we cannot re-enable busfree detection within 3505 * the current connection, so we must leave it 3506 * on while single stepping. 3507 */ 3508 ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); 3509 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); 3510 stepping = TRUE; 3511 } 3512 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 3513 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3514 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 3515 ahd_outb(ahd, HCNTRL, ahd->unpause); 3516 while (!ahd_is_paused(ahd)) 3517 ahd_delay(200); 3518 ahd_update_modes(ahd); 3519 } 3520 if (stepping) { 3521 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 3522 ahd_outb(ahd, SIMODE0, simode0); 3523 ahd_outb(ahd, SIMODE3, simode3); 3524 ahd_outb(ahd, LQIMODE0, lqimode0); 3525 ahd_outb(ahd, LQIMODE1, lqimode1); 3526 ahd_outb(ahd, LQOMODE0, lqomode0); 3527 ahd_outb(ahd, LQOMODE1, lqomode1); 3528 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3529 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); 3530 ahd_outb(ahd, SIMODE1, simode1); 3531 /* 3532 * SCSIINT seems to glitch occasionally when 3533 * the interrupt masks are restored. Clear SCSIINT 3534 * one more time so that only persistent errors 3535 * are seen as a real interrupt. 3536 */ 3537 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3538 } 3539 ahd_restore_modes(ahd, saved_modes); 3540 } 3541 3542 /* 3543 * Clear any pending interrupt status. 3544 */ 3545 static void 3546 ahd_clear_intstat(struct ahd_softc *ahd) 3547 { 3548 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 3549 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 3550 /* Clear any interrupt conditions this may have caused */ 3551 ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 3552 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); 3553 ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT 3554 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI 3555 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); 3556 ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ 3557 |CLRLQOATNPKT|CLRLQOTCRC); 3558 ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS 3559 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); 3560 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 3561 ahd_outb(ahd, CLRLQOINT0, 0); 3562 ahd_outb(ahd, CLRLQOINT1, 0); 3563 } 3564 ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); 3565 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 3566 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); 3567 ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO 3568 |CLRIOERR|CLROVERRUN); 3569 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3570 } 3571 3572 /**************************** Debugging Routines ******************************/ 3573 #ifdef AHD_DEBUG 3574 uint32_t ahd_debug = AHD_DEBUG_OPTS; 3575 #endif 3576 3577 #if 0 3578 void 3579 ahd_print_scb(struct scb *scb) 3580 { 3581 struct hardware_scb *hscb; 3582 int i; 3583 3584 hscb = scb->hscb; 3585 printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 3586 (void *)scb, 3587 hscb->control, 3588 hscb->scsiid, 3589 hscb->lun, 3590 hscb->cdb_len); 3591 printk("Shared Data: "); 3592 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) 3593 printk("%#02x", hscb->shared_data.idata.cdb[i]); 3594 printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", 3595 (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), 3596 (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), 3597 ahd_le32toh(hscb->datacnt), 3598 ahd_le32toh(hscb->sgptr), 3599 SCB_GET_TAG(scb)); 3600 ahd_dump_sglist(scb); 3601 } 3602 #endif /* 0 */ 3603 3604 /************************* Transfer Negotiation *******************************/ 3605 /* 3606 * Allocate per target mode instance (ID we respond to as a target) 3607 * transfer negotiation data structures. 3608 */ 3609 static struct ahd_tmode_tstate * 3610 ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) 3611 { 3612 struct ahd_tmode_tstate *master_tstate; 3613 struct ahd_tmode_tstate *tstate; 3614 int i; 3615 3616 master_tstate = ahd->enabled_targets[ahd->our_id]; 3617 if (ahd->enabled_targets[scsi_id] != NULL 3618 && ahd->enabled_targets[scsi_id] != master_tstate) 3619 panic("%s: ahd_alloc_tstate - Target already allocated", 3620 ahd_name(ahd)); 3621 tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); 3622 if (tstate == NULL) 3623 return (NULL); 3624 3625 /* 3626 * If we have allocated a master tstate, copy user settings from 3627 * the master tstate (taken from SRAM or the EEPROM) for this 3628 * channel, but reset our current and goal settings to async/narrow 3629 * until an initiator talks to us. 3630 */ 3631 if (master_tstate != NULL) { 3632 memcpy(tstate, master_tstate, sizeof(*tstate)); 3633 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 3634 for (i = 0; i < 16; i++) { 3635 memset(&tstate->transinfo[i].curr, 0, 3636 sizeof(tstate->transinfo[i].curr)); 3637 memset(&tstate->transinfo[i].goal, 0, 3638 sizeof(tstate->transinfo[i].goal)); 3639 } 3640 } else 3641 memset(tstate, 0, sizeof(*tstate)); 3642 ahd->enabled_targets[scsi_id] = tstate; 3643 return (tstate); 3644 } 3645 3646 #ifdef AHD_TARGET_MODE 3647 /* 3648 * Free per target mode instance (ID we respond to as a target) 3649 * transfer negotiation data structures. 3650 */ 3651 static void 3652 ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) 3653 { 3654 struct ahd_tmode_tstate *tstate; 3655 3656 /* 3657 * Don't clean up our "master" tstate. 3658 * It has our default user settings. 3659 */ 3660 if (scsi_id == ahd->our_id 3661 && force == FALSE) 3662 return; 3663 3664 tstate = ahd->enabled_targets[scsi_id]; 3665 kfree(tstate); 3666 ahd->enabled_targets[scsi_id] = NULL; 3667 } 3668 #endif 3669 3670 /* 3671 * Called when we have an active connection to a target on the bus, 3672 * this function finds the nearest period to the input period limited 3673 * by the capabilities of the bus connectivity of and sync settings for 3674 * the target. 3675 */ 3676 static void 3677 ahd_devlimited_syncrate(struct ahd_softc *ahd, 3678 struct ahd_initiator_tinfo *tinfo, 3679 u_int *period, u_int *ppr_options, role_t role) 3680 { 3681 struct ahd_transinfo *transinfo; 3682 u_int maxsync; 3683 3684 if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 3685 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { 3686 maxsync = AHD_SYNCRATE_PACED; 3687 } else { 3688 maxsync = AHD_SYNCRATE_ULTRA; 3689 /* Can't do DT related options on an SE bus */ 3690 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 3691 } 3692 /* 3693 * Never allow a value higher than our current goal 3694 * period otherwise we may allow a target initiated 3695 * negotiation to go above the limit as set by the 3696 * user. In the case of an initiator initiated 3697 * sync negotiation, we limit based on the user 3698 * setting. This allows the system to still accept 3699 * incoming negotiations even if target initiated 3700 * negotiation is not performed. 3701 */ 3702 if (role == ROLE_TARGET) 3703 transinfo = &tinfo->user; 3704 else 3705 transinfo = &tinfo->goal; 3706 *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); 3707 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 3708 maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); 3709 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 3710 } 3711 if (transinfo->period == 0) { 3712 *period = 0; 3713 *ppr_options = 0; 3714 } else { 3715 *period = max(*period, (u_int)transinfo->period); 3716 ahd_find_syncrate(ahd, period, ppr_options, maxsync); 3717 } 3718 } 3719 3720 /* 3721 * Look up the valid period to SCSIRATE conversion in our table. 3722 * Return the period and offset that should be sent to the target 3723 * if this was the beginning of an SDTR. 3724 */ 3725 void 3726 ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, 3727 u_int *ppr_options, u_int maxsync) 3728 { 3729 if (*period < maxsync) 3730 *period = maxsync; 3731 3732 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 3733 && *period > AHD_SYNCRATE_MIN_DT) 3734 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 3735 3736 if (*period > AHD_SYNCRATE_MIN) 3737 *period = 0; 3738 3739 /* Honor PPR option conformance rules. */ 3740 if (*period > AHD_SYNCRATE_PACED) 3741 *ppr_options &= ~MSG_EXT_PPR_RTI; 3742 3743 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 3744 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); 3745 3746 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) 3747 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 3748 3749 /* Skip all PACED only entries if IU is not available */ 3750 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 3751 && *period < AHD_SYNCRATE_DT) 3752 *period = AHD_SYNCRATE_DT; 3753 3754 /* Skip all DT only entries if DT is not available */ 3755 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3756 && *period < AHD_SYNCRATE_ULTRA2) 3757 *period = AHD_SYNCRATE_ULTRA2; 3758 } 3759 3760 /* 3761 * Truncate the given synchronous offset to a value the 3762 * current adapter type and syncrate are capable of. 3763 */ 3764 static void 3765 ahd_validate_offset(struct ahd_softc *ahd, 3766 struct ahd_initiator_tinfo *tinfo, 3767 u_int period, u_int *offset, int wide, 3768 role_t role) 3769 { 3770 u_int maxoffset; 3771 3772 /* Limit offset to what we can do */ 3773 if (period == 0) 3774 maxoffset = 0; 3775 else if (period <= AHD_SYNCRATE_PACED) { 3776 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) 3777 maxoffset = MAX_OFFSET_PACED_BUG; 3778 else 3779 maxoffset = MAX_OFFSET_PACED; 3780 } else 3781 maxoffset = MAX_OFFSET_NON_PACED; 3782 *offset = min(*offset, maxoffset); 3783 if (tinfo != NULL) { 3784 if (role == ROLE_TARGET) 3785 *offset = min(*offset, (u_int)tinfo->user.offset); 3786 else 3787 *offset = min(*offset, (u_int)tinfo->goal.offset); 3788 } 3789 } 3790 3791 /* 3792 * Truncate the given transfer width parameter to a value the 3793 * current adapter type is capable of. 3794 */ 3795 static void 3796 ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, 3797 u_int *bus_width, role_t role) 3798 { 3799 switch (*bus_width) { 3800 default: 3801 if (ahd->features & AHD_WIDE) { 3802 /* Respond Wide */ 3803 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3804 break; 3805 } 3806 /* FALLTHROUGH */ 3807 case MSG_EXT_WDTR_BUS_8_BIT: 3808 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3809 break; 3810 } 3811 if (tinfo != NULL) { 3812 if (role == ROLE_TARGET) 3813 *bus_width = min((u_int)tinfo->user.width, *bus_width); 3814 else 3815 *bus_width = min((u_int)tinfo->goal.width, *bus_width); 3816 } 3817 } 3818 3819 /* 3820 * Update the bitmask of targets for which the controller should 3821 * negotiate with at the next convenient opportunity. This currently 3822 * means the next time we send the initial identify messages for 3823 * a new transaction. 3824 */ 3825 int 3826 ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3827 struct ahd_tmode_tstate *tstate, 3828 struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) 3829 { 3830 u_int auto_negotiate_orig; 3831 3832 auto_negotiate_orig = tstate->auto_negotiate; 3833 if (neg_type == AHD_NEG_ALWAYS) { 3834 /* 3835 * Force our "current" settings to be 3836 * unknown so that unless a bus reset 3837 * occurs the need to renegotiate is 3838 * recorded persistently. 3839 */ 3840 if ((ahd->features & AHD_WIDE) != 0) 3841 tinfo->curr.width = AHD_WIDTH_UNKNOWN; 3842 tinfo->curr.period = AHD_PERIOD_UNKNOWN; 3843 tinfo->curr.offset = AHD_OFFSET_UNKNOWN; 3844 } 3845 if (tinfo->curr.period != tinfo->goal.period 3846 || tinfo->curr.width != tinfo->goal.width 3847 || tinfo->curr.offset != tinfo->goal.offset 3848 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 3849 || (neg_type == AHD_NEG_IF_NON_ASYNC 3850 && (tinfo->goal.offset != 0 3851 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 3852 || tinfo->goal.ppr_options != 0))) 3853 tstate->auto_negotiate |= devinfo->target_mask; 3854 else 3855 tstate->auto_negotiate &= ~devinfo->target_mask; 3856 3857 return (auto_negotiate_orig != tstate->auto_negotiate); 3858 } 3859 3860 /* 3861 * Update the user/goal/curr tables of synchronous negotiation 3862 * parameters as well as, in the case of a current or active update, 3863 * any data structures on the host controller. In the case of an 3864 * active update, the specified target is currently talking to us on 3865 * the bus, so the transfer parameter update must take effect 3866 * immediately. 3867 */ 3868 void 3869 ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3870 u_int period, u_int offset, u_int ppr_options, 3871 u_int type, int paused) 3872 { 3873 struct ahd_initiator_tinfo *tinfo; 3874 struct ahd_tmode_tstate *tstate; 3875 u_int old_period; 3876 u_int old_offset; 3877 u_int old_ppr; 3878 int active; 3879 int update_needed; 3880 3881 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 3882 update_needed = 0; 3883 3884 if (period == 0 || offset == 0) { 3885 period = 0; 3886 offset = 0; 3887 } 3888 3889 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 3890 devinfo->target, &tstate); 3891 3892 if ((type & AHD_TRANS_USER) != 0) { 3893 tinfo->user.period = period; 3894 tinfo->user.offset = offset; 3895 tinfo->user.ppr_options = ppr_options; 3896 } 3897 3898 if ((type & AHD_TRANS_GOAL) != 0) { 3899 tinfo->goal.period = period; 3900 tinfo->goal.offset = offset; 3901 tinfo->goal.ppr_options = ppr_options; 3902 } 3903 3904 old_period = tinfo->curr.period; 3905 old_offset = tinfo->curr.offset; 3906 old_ppr = tinfo->curr.ppr_options; 3907 3908 if ((type & AHD_TRANS_CUR) != 0 3909 && (old_period != period 3910 || old_offset != offset 3911 || old_ppr != ppr_options)) { 3912 3913 update_needed++; 3914 3915 tinfo->curr.period = period; 3916 tinfo->curr.offset = offset; 3917 tinfo->curr.ppr_options = ppr_options; 3918 3919 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3920 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 3921 if (bootverbose) { 3922 if (offset != 0) { 3923 int options; 3924 3925 printk("%s: target %d synchronous with " 3926 "period = 0x%x, offset = 0x%x", 3927 ahd_name(ahd), devinfo->target, 3928 period, offset); 3929 options = 0; 3930 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { 3931 printk("(RDSTRM"); 3932 options++; 3933 } 3934 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 3935 printk("%s", options ? "|DT" : "(DT"); 3936 options++; 3937 } 3938 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 3939 printk("%s", options ? "|IU" : "(IU"); 3940 options++; 3941 } 3942 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { 3943 printk("%s", options ? "|RTI" : "(RTI"); 3944 options++; 3945 } 3946 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { 3947 printk("%s", options ? "|QAS" : "(QAS"); 3948 options++; 3949 } 3950 if (options != 0) 3951 printk(")\n"); 3952 else 3953 printk("\n"); 3954 } else { 3955 printk("%s: target %d using " 3956 "asynchronous transfers%s\n", 3957 ahd_name(ahd), devinfo->target, 3958 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 3959 ? "(QAS)" : ""); 3960 } 3961 } 3962 } 3963 /* 3964 * Always refresh the neg-table to handle the case of the 3965 * sequencer setting the ENATNO bit for a MK_MESSAGE request. 3966 * We will always renegotiate in that case if this is a 3967 * packetized request. Also manage the busfree expected flag 3968 * from this common routine so that we catch changes due to 3969 * WDTR or SDTR messages. 3970 */ 3971 if ((type & AHD_TRANS_CUR) != 0) { 3972 if (!paused) 3973 ahd_pause(ahd); 3974 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 3975 if (!paused) 3976 ahd_unpause(ahd); 3977 if (ahd->msg_type != MSG_TYPE_NONE) { 3978 if ((old_ppr & MSG_EXT_PPR_IU_REQ) 3979 != (ppr_options & MSG_EXT_PPR_IU_REQ)) { 3980 #ifdef AHD_DEBUG 3981 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3982 ahd_print_devinfo(ahd, devinfo); 3983 printk("Expecting IU Change busfree\n"); 3984 } 3985 #endif 3986 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 3987 | MSG_FLAG_IU_REQ_CHANGED; 3988 } 3989 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { 3990 #ifdef AHD_DEBUG 3991 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3992 printk("PPR with IU_REQ outstanding\n"); 3993 #endif 3994 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; 3995 } 3996 } 3997 } 3998 3999 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 4000 tinfo, AHD_NEG_TO_GOAL); 4001 4002 if (update_needed && active) 4003 ahd_update_pending_scbs(ahd); 4004 } 4005 4006 /* 4007 * Update the user/goal/curr tables of wide negotiation 4008 * parameters as well as, in the case of a current or active update, 4009 * any data structures on the host controller. In the case of an 4010 * active update, the specified target is currently talking to us on 4011 * the bus, so the transfer parameter update must take effect 4012 * immediately. 4013 */ 4014 void 4015 ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4016 u_int width, u_int type, int paused) 4017 { 4018 struct ahd_initiator_tinfo *tinfo; 4019 struct ahd_tmode_tstate *tstate; 4020 u_int oldwidth; 4021 int active; 4022 int update_needed; 4023 4024 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 4025 update_needed = 0; 4026 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 4027 devinfo->target, &tstate); 4028 4029 if ((type & AHD_TRANS_USER) != 0) 4030 tinfo->user.width = width; 4031 4032 if ((type & AHD_TRANS_GOAL) != 0) 4033 tinfo->goal.width = width; 4034 4035 oldwidth = tinfo->curr.width; 4036 if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { 4037 4038 update_needed++; 4039 4040 tinfo->curr.width = width; 4041 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4042 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 4043 if (bootverbose) { 4044 printk("%s: target %d using %dbit transfers\n", 4045 ahd_name(ahd), devinfo->target, 4046 8 * (0x01 << width)); 4047 } 4048 } 4049 4050 if ((type & AHD_TRANS_CUR) != 0) { 4051 if (!paused) 4052 ahd_pause(ahd); 4053 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 4054 if (!paused) 4055 ahd_unpause(ahd); 4056 } 4057 4058 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 4059 tinfo, AHD_NEG_TO_GOAL); 4060 if (update_needed && active) 4061 ahd_update_pending_scbs(ahd); 4062 4063 } 4064 4065 /* 4066 * Update the current state of tagged queuing for a given target. 4067 */ 4068 static void 4069 ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, 4070 struct ahd_devinfo *devinfo, ahd_queue_alg alg) 4071 { 4072 struct scsi_device *sdev = cmd->device; 4073 4074 ahd_platform_set_tags(ahd, sdev, devinfo, alg); 4075 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4076 devinfo->lun, AC_TRANSFER_NEG); 4077 } 4078 4079 static void 4080 ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4081 struct ahd_transinfo *tinfo) 4082 { 4083 ahd_mode_state saved_modes; 4084 u_int period; 4085 u_int ppr_opts; 4086 u_int con_opts; 4087 u_int offset; 4088 u_int saved_negoaddr; 4089 uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; 4090 4091 saved_modes = ahd_save_modes(ahd); 4092 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4093 4094 saved_negoaddr = ahd_inb(ahd, NEGOADDR); 4095 ahd_outb(ahd, NEGOADDR, devinfo->target); 4096 period = tinfo->period; 4097 offset = tinfo->offset; 4098 memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); 4099 ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ 4100 |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); 4101 con_opts = 0; 4102 if (period == 0) 4103 period = AHD_SYNCRATE_ASYNC; 4104 if (period == AHD_SYNCRATE_160) { 4105 4106 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { 4107 /* 4108 * When the SPI4 spec was finalized, PACE transfers 4109 * was not made a configurable option in the PPR 4110 * message. Instead it is assumed to be enabled for 4111 * any syncrate faster than 80MHz. Nevertheless, 4112 * Harpoon2A4 allows this to be configurable. 4113 * 4114 * Harpoon2A4 also assumes at most 2 data bytes per 4115 * negotiated REQ/ACK offset. Paced transfers take 4116 * 4, so we must adjust our offset. 4117 */ 4118 ppr_opts |= PPROPT_PACE; 4119 offset *= 2; 4120 4121 /* 4122 * Harpoon2A assumed that there would be a 4123 * fallback rate between 160MHz and 80MHz, 4124 * so 7 is used as the period factor rather 4125 * than 8 for 160MHz. 4126 */ 4127 period = AHD_SYNCRATE_REVA_160; 4128 } 4129 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) 4130 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= 4131 ~AHD_PRECOMP_MASK; 4132 } else { 4133 /* 4134 * Precomp should be disabled for non-paced transfers. 4135 */ 4136 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; 4137 4138 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 4139 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 4140 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { 4141 /* 4142 * Slow down our CRC interval to be 4143 * compatible with non-packetized 4144 * U160 devices that can't handle a 4145 * CRC at full speed. 4146 */ 4147 con_opts |= ENSLOWCRC; 4148 } 4149 4150 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { 4151 /* 4152 * On H2A4, revert to a slower slewrate 4153 * on non-paced transfers. 4154 */ 4155 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= 4156 ~AHD_SLEWRATE_MASK; 4157 } 4158 } 4159 4160 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); 4161 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); 4162 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); 4163 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); 4164 4165 ahd_outb(ahd, NEGPERIOD, period); 4166 ahd_outb(ahd, NEGPPROPTS, ppr_opts); 4167 ahd_outb(ahd, NEGOFFSET, offset); 4168 4169 if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) 4170 con_opts |= WIDEXFER; 4171 4172 /* 4173 * Slow down our CRC interval to be 4174 * compatible with packetized U320 devices 4175 * that can't handle a CRC at full speed 4176 */ 4177 if (ahd->features & AHD_AIC79XXB_SLOWCRC) { 4178 con_opts |= ENSLOWCRC; 4179 } 4180 4181 /* 4182 * During packetized transfers, the target will 4183 * give us the opportunity to send command packets 4184 * without us asserting attention. 4185 */ 4186 if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 4187 con_opts |= ENAUTOATNO; 4188 ahd_outb(ahd, NEGCONOPTS, con_opts); 4189 ahd_outb(ahd, NEGOADDR, saved_negoaddr); 4190 ahd_restore_modes(ahd, saved_modes); 4191 } 4192 4193 /* 4194 * When the transfer settings for a connection change, setup for 4195 * negotiation in pending SCBs to effect the change as quickly as 4196 * possible. We also cancel any negotiations that are scheduled 4197 * for inflight SCBs that have not been started yet. 4198 */ 4199 static void 4200 ahd_update_pending_scbs(struct ahd_softc *ahd) 4201 { 4202 struct scb *pending_scb; 4203 int pending_scb_count; 4204 int paused; 4205 u_int saved_scbptr; 4206 ahd_mode_state saved_modes; 4207 4208 /* 4209 * Traverse the pending SCB list and ensure that all of the 4210 * SCBs there have the proper settings. We can only safely 4211 * clear the negotiation required flag (setting requires the 4212 * execution queue to be modified) and this is only possible 4213 * if we are not already attempting to select out for this 4214 * SCB. For this reason, all callers only call this routine 4215 * if we are changing the negotiation settings for the currently 4216 * active transaction on the bus. 4217 */ 4218 pending_scb_count = 0; 4219 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 4220 struct ahd_devinfo devinfo; 4221 struct ahd_initiator_tinfo *tinfo; 4222 struct ahd_tmode_tstate *tstate; 4223 4224 ahd_scb_devinfo(ahd, &devinfo, pending_scb); 4225 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 4226 devinfo.our_scsiid, 4227 devinfo.target, &tstate); 4228 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 4229 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 4230 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 4231 pending_scb->hscb->control &= ~MK_MESSAGE; 4232 } 4233 ahd_sync_scb(ahd, pending_scb, 4234 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4235 pending_scb_count++; 4236 } 4237 4238 if (pending_scb_count == 0) 4239 return; 4240 4241 if (ahd_is_paused(ahd)) { 4242 paused = 1; 4243 } else { 4244 paused = 0; 4245 ahd_pause(ahd); 4246 } 4247 4248 /* 4249 * Force the sequencer to reinitialize the selection for 4250 * the command at the head of the execution queue if it 4251 * has already been setup. The negotiation changes may 4252 * effect whether we select-out with ATN. It is only 4253 * safe to clear ENSELO when the bus is not free and no 4254 * selection is in progres or completed. 4255 */ 4256 saved_modes = ahd_save_modes(ahd); 4257 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4258 if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 4259 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) 4260 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 4261 saved_scbptr = ahd_get_scbptr(ahd); 4262 /* Ensure that the hscbs down on the card match the new information */ 4263 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 4264 u_int scb_tag; 4265 u_int control; 4266 4267 scb_tag = SCB_GET_TAG(pending_scb); 4268 ahd_set_scbptr(ahd, scb_tag); 4269 control = ahd_inb_scbram(ahd, SCB_CONTROL); 4270 control &= ~MK_MESSAGE; 4271 control |= pending_scb->hscb->control & MK_MESSAGE; 4272 ahd_outb(ahd, SCB_CONTROL, control); 4273 } 4274 ahd_set_scbptr(ahd, saved_scbptr); 4275 ahd_restore_modes(ahd, saved_modes); 4276 4277 if (paused == 0) 4278 ahd_unpause(ahd); 4279 } 4280 4281 /**************************** Pathing Information *****************************/ 4282 static void 4283 ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4284 { 4285 ahd_mode_state saved_modes; 4286 u_int saved_scsiid; 4287 role_t role; 4288 int our_id; 4289 4290 saved_modes = ahd_save_modes(ahd); 4291 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4292 4293 if (ahd_inb(ahd, SSTAT0) & TARGET) 4294 role = ROLE_TARGET; 4295 else 4296 role = ROLE_INITIATOR; 4297 4298 if (role == ROLE_TARGET 4299 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 4300 /* We were selected, so pull our id from TARGIDIN */ 4301 our_id = ahd_inb(ahd, TARGIDIN) & OID; 4302 } else if (role == ROLE_TARGET) 4303 our_id = ahd_inb(ahd, TOWNID); 4304 else 4305 our_id = ahd_inb(ahd, IOWNID); 4306 4307 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 4308 ahd_compile_devinfo(devinfo, 4309 our_id, 4310 SCSIID_TARGET(ahd, saved_scsiid), 4311 ahd_inb(ahd, SAVED_LUN), 4312 SCSIID_CHANNEL(ahd, saved_scsiid), 4313 role); 4314 ahd_restore_modes(ahd, saved_modes); 4315 } 4316 4317 void 4318 ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4319 { 4320 printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', 4321 devinfo->target, devinfo->lun); 4322 } 4323 4324 static const struct ahd_phase_table_entry* 4325 ahd_lookup_phase_entry(int phase) 4326 { 4327 const struct ahd_phase_table_entry *entry; 4328 const struct ahd_phase_table_entry *last_entry; 4329 4330 /* 4331 * num_phases doesn't include the default entry which 4332 * will be returned if the phase doesn't match. 4333 */ 4334 last_entry = &ahd_phase_table[num_phases]; 4335 for (entry = ahd_phase_table; entry < last_entry; entry++) { 4336 if (phase == entry->phase) 4337 break; 4338 } 4339 return (entry); 4340 } 4341 4342 void 4343 ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, 4344 u_int lun, char channel, role_t role) 4345 { 4346 devinfo->our_scsiid = our_id; 4347 devinfo->target = target; 4348 devinfo->lun = lun; 4349 devinfo->target_offset = target; 4350 devinfo->channel = channel; 4351 devinfo->role = role; 4352 if (channel == 'B') 4353 devinfo->target_offset += 8; 4354 devinfo->target_mask = (0x01 << devinfo->target_offset); 4355 } 4356 4357 static void 4358 ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4359 struct scb *scb) 4360 { 4361 role_t role; 4362 int our_id; 4363 4364 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 4365 role = ROLE_INITIATOR; 4366 if ((scb->hscb->control & TARGET_SCB) != 0) 4367 role = ROLE_TARGET; 4368 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), 4369 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); 4370 } 4371 4372 4373 /************************ Message Phase Processing ****************************/ 4374 /* 4375 * When an initiator transaction with the MK_MESSAGE flag either reconnects 4376 * or enters the initial message out phase, we are interrupted. Fill our 4377 * outgoing message buffer with the appropriate message and beging handing 4378 * the message phase(s) manually. 4379 */ 4380 static void 4381 ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4382 struct scb *scb) 4383 { 4384 /* 4385 * To facilitate adding multiple messages together, 4386 * each routine should increment the index and len 4387 * variables instead of setting them explicitly. 4388 */ 4389 ahd->msgout_index = 0; 4390 ahd->msgout_len = 0; 4391 4392 if (ahd_currently_packetized(ahd)) 4393 ahd->msg_flags |= MSG_FLAG_PACKETIZED; 4394 4395 if (ahd->send_msg_perror 4396 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { 4397 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; 4398 ahd->msgout_len++; 4399 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4400 #ifdef AHD_DEBUG 4401 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4402 printk("Setting up for Parity Error delivery\n"); 4403 #endif 4404 return; 4405 } else if (scb == NULL) { 4406 printk("%s: WARNING. No pending message for " 4407 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); 4408 ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; 4409 ahd->msgout_len++; 4410 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4411 return; 4412 } 4413 4414 if ((scb->flags & SCB_DEVICE_RESET) == 0 4415 && (scb->flags & SCB_PACKETIZED) == 0 4416 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { 4417 u_int identify_msg; 4418 4419 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 4420 if ((scb->hscb->control & DISCENB) != 0) 4421 identify_msg |= MSG_IDENTIFY_DISCFLAG; 4422 ahd->msgout_buf[ahd->msgout_index++] = identify_msg; 4423 ahd->msgout_len++; 4424 4425 if ((scb->hscb->control & TAG_ENB) != 0) { 4426 ahd->msgout_buf[ahd->msgout_index++] = 4427 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 4428 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); 4429 ahd->msgout_len += 2; 4430 } 4431 } 4432 4433 if (scb->flags & SCB_DEVICE_RESET) { 4434 ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; 4435 ahd->msgout_len++; 4436 ahd_print_path(ahd, scb); 4437 printk("Bus Device Reset Message Sent\n"); 4438 /* 4439 * Clear our selection hardware in advance of 4440 * the busfree. We may have an entry in the waiting 4441 * Q for this target, and we don't want to go about 4442 * selecting while we handle the busfree and blow it 4443 * away. 4444 */ 4445 ahd_outb(ahd, SCSISEQ0, 0); 4446 } else if ((scb->flags & SCB_ABORT) != 0) { 4447 4448 if ((scb->hscb->control & TAG_ENB) != 0) { 4449 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; 4450 } else { 4451 ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; 4452 } 4453 ahd->msgout_len++; 4454 ahd_print_path(ahd, scb); 4455 printk("Abort%s Message Sent\n", 4456 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 4457 /* 4458 * Clear our selection hardware in advance of 4459 * the busfree. We may have an entry in the waiting 4460 * Q for this target, and we don't want to go about 4461 * selecting while we handle the busfree and blow it 4462 * away. 4463 */ 4464 ahd_outb(ahd, SCSISEQ0, 0); 4465 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 4466 ahd_build_transfer_msg(ahd, devinfo); 4467 /* 4468 * Clear our selection hardware in advance of potential 4469 * PPR IU status change busfree. We may have an entry in 4470 * the waiting Q for this target, and we don't want to go 4471 * about selecting while we handle the busfree and blow 4472 * it away. 4473 */ 4474 ahd_outb(ahd, SCSISEQ0, 0); 4475 } else { 4476 printk("ahd_intr: AWAITING_MSG for an SCB that " 4477 "does not have a waiting message\n"); 4478 printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 4479 devinfo->target_mask); 4480 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " 4481 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, 4482 ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), 4483 scb->flags); 4484 } 4485 4486 /* 4487 * Clear the MK_MESSAGE flag from the SCB so we aren't 4488 * asked to send this message again. 4489 */ 4490 ahd_outb(ahd, SCB_CONTROL, 4491 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); 4492 scb->hscb->control &= ~MK_MESSAGE; 4493 ahd->msgout_index = 0; 4494 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4495 } 4496 4497 /* 4498 * Build an appropriate transfer negotiation message for the 4499 * currently active target. 4500 */ 4501 static void 4502 ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4503 { 4504 /* 4505 * We need to initiate transfer negotiations. 4506 * If our current and goal settings are identical, 4507 * we want to renegotiate due to a check condition. 4508 */ 4509 struct ahd_initiator_tinfo *tinfo; 4510 struct ahd_tmode_tstate *tstate; 4511 int dowide; 4512 int dosync; 4513 int doppr; 4514 u_int period; 4515 u_int ppr_options; 4516 u_int offset; 4517 4518 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 4519 devinfo->target, &tstate); 4520 /* 4521 * Filter our period based on the current connection. 4522 * If we can't perform DT transfers on this segment (not in LVD 4523 * mode for instance), then our decision to issue a PPR message 4524 * may change. 4525 */ 4526 period = tinfo->goal.period; 4527 offset = tinfo->goal.offset; 4528 ppr_options = tinfo->goal.ppr_options; 4529 /* Target initiated PPR is not allowed in the SCSI spec */ 4530 if (devinfo->role == ROLE_TARGET) 4531 ppr_options = 0; 4532 ahd_devlimited_syncrate(ahd, tinfo, &period, 4533 &ppr_options, devinfo->role); 4534 dowide = tinfo->curr.width != tinfo->goal.width; 4535 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; 4536 /* 4537 * Only use PPR if we have options that need it, even if the device 4538 * claims to support it. There might be an expander in the way 4539 * that doesn't. 4540 */ 4541 doppr = ppr_options != 0; 4542 4543 if (!dowide && !dosync && !doppr) { 4544 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 4545 dosync = tinfo->goal.offset != 0; 4546 } 4547 4548 if (!dowide && !dosync && !doppr) { 4549 /* 4550 * Force async with a WDTR message if we have a wide bus, 4551 * or just issue an SDTR with a 0 offset. 4552 */ 4553 if ((ahd->features & AHD_WIDE) != 0) 4554 dowide = 1; 4555 else 4556 dosync = 1; 4557 4558 if (bootverbose) { 4559 ahd_print_devinfo(ahd, devinfo); 4560 printk("Ensuring async\n"); 4561 } 4562 } 4563 /* Target initiated PPR is not allowed in the SCSI spec */ 4564 if (devinfo->role == ROLE_TARGET) 4565 doppr = 0; 4566 4567 /* 4568 * Both the PPR message and SDTR message require the 4569 * goal syncrate to be limited to what the target device 4570 * is capable of handling (based on whether an LVD->SE 4571 * expander is on the bus), so combine these two cases. 4572 * Regardless, guarantee that if we are using WDTR and SDTR 4573 * messages that WDTR comes first. 4574 */ 4575 if (doppr || (dosync && !dowide)) { 4576 4577 offset = tinfo->goal.offset; 4578 ahd_validate_offset(ahd, tinfo, period, &offset, 4579 doppr ? tinfo->goal.width 4580 : tinfo->curr.width, 4581 devinfo->role); 4582 if (doppr) { 4583 ahd_construct_ppr(ahd, devinfo, period, offset, 4584 tinfo->goal.width, ppr_options); 4585 } else { 4586 ahd_construct_sdtr(ahd, devinfo, period, offset); 4587 } 4588 } else { 4589 ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); 4590 } 4591 } 4592 4593 /* 4594 * Build a synchronous negotiation message in our message 4595 * buffer based on the input parameters. 4596 */ 4597 static void 4598 ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4599 u_int period, u_int offset) 4600 { 4601 if (offset == 0) 4602 period = AHD_ASYNC_XFER_PERIOD; 4603 ahd->msgout_index += spi_populate_sync_msg( 4604 ahd->msgout_buf + ahd->msgout_index, period, offset); 4605 ahd->msgout_len += 5; 4606 if (bootverbose) { 4607 printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 4608 ahd_name(ahd), devinfo->channel, devinfo->target, 4609 devinfo->lun, period, offset); 4610 } 4611 } 4612 4613 /* 4614 * Build a wide negotiateion message in our message 4615 * buffer based on the input parameters. 4616 */ 4617 static void 4618 ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4619 u_int bus_width) 4620 { 4621 ahd->msgout_index += spi_populate_width_msg( 4622 ahd->msgout_buf + ahd->msgout_index, bus_width); 4623 ahd->msgout_len += 4; 4624 if (bootverbose) { 4625 printk("(%s:%c:%d:%d): Sending WDTR %x\n", 4626 ahd_name(ahd), devinfo->channel, devinfo->target, 4627 devinfo->lun, bus_width); 4628 } 4629 } 4630 4631 /* 4632 * Build a parallel protocol request message in our message 4633 * buffer based on the input parameters. 4634 */ 4635 static void 4636 ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4637 u_int period, u_int offset, u_int bus_width, 4638 u_int ppr_options) 4639 { 4640 /* 4641 * Always request precompensation from 4642 * the other target if we are running 4643 * at paced syncrates. 4644 */ 4645 if (period <= AHD_SYNCRATE_PACED) 4646 ppr_options |= MSG_EXT_PPR_PCOMP_EN; 4647 if (offset == 0) 4648 period = AHD_ASYNC_XFER_PERIOD; 4649 ahd->msgout_index += spi_populate_ppr_msg( 4650 ahd->msgout_buf + ahd->msgout_index, period, offset, 4651 bus_width, ppr_options); 4652 ahd->msgout_len += 8; 4653 if (bootverbose) { 4654 printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 4655 "offset %x, ppr_options %x\n", ahd_name(ahd), 4656 devinfo->channel, devinfo->target, devinfo->lun, 4657 bus_width, period, offset, ppr_options); 4658 } 4659 } 4660 4661 /* 4662 * Clear any active message state. 4663 */ 4664 static void 4665 ahd_clear_msg_state(struct ahd_softc *ahd) 4666 { 4667 ahd_mode_state saved_modes; 4668 4669 saved_modes = ahd_save_modes(ahd); 4670 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4671 ahd->send_msg_perror = 0; 4672 ahd->msg_flags = MSG_FLAG_NONE; 4673 ahd->msgout_len = 0; 4674 ahd->msgin_index = 0; 4675 ahd->msg_type = MSG_TYPE_NONE; 4676 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 4677 /* 4678 * The target didn't care to respond to our 4679 * message request, so clear ATN. 4680 */ 4681 ahd_outb(ahd, CLRSINT1, CLRATNO); 4682 } 4683 ahd_outb(ahd, MSG_OUT, MSG_NOOP); 4684 ahd_outb(ahd, SEQ_FLAGS2, 4685 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 4686 ahd_restore_modes(ahd, saved_modes); 4687 } 4688 4689 /* 4690 * Manual message loop handler. 4691 */ 4692 static void 4693 ahd_handle_message_phase(struct ahd_softc *ahd) 4694 { 4695 struct ahd_devinfo devinfo; 4696 u_int bus_phase; 4697 int end_session; 4698 4699 ahd_fetch_devinfo(ahd, &devinfo); 4700 end_session = FALSE; 4701 bus_phase = ahd_inb(ahd, LASTPHASE); 4702 4703 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { 4704 printk("LQIRETRY for LQIPHASE_OUTPKT\n"); 4705 ahd_outb(ahd, LQCTL2, LQIRETRY); 4706 } 4707 reswitch: 4708 switch (ahd->msg_type) { 4709 case MSG_TYPE_INITIATOR_MSGOUT: 4710 { 4711 int lastbyte; 4712 int phasemis; 4713 int msgdone; 4714 4715 if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) 4716 panic("HOST_MSG_LOOP interrupt with no active message"); 4717 4718 #ifdef AHD_DEBUG 4719 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4720 ahd_print_devinfo(ahd, &devinfo); 4721 printk("INITIATOR_MSG_OUT"); 4722 } 4723 #endif 4724 phasemis = bus_phase != P_MESGOUT; 4725 if (phasemis) { 4726 #ifdef AHD_DEBUG 4727 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4728 printk(" PHASEMIS %s\n", 4729 ahd_lookup_phase_entry(bus_phase) 4730 ->phasemsg); 4731 } 4732 #endif 4733 if (bus_phase == P_MESGIN) { 4734 /* 4735 * Change gears and see if 4736 * this messages is of interest to 4737 * us or should be passed back to 4738 * the sequencer. 4739 */ 4740 ahd_outb(ahd, CLRSINT1, CLRATNO); 4741 ahd->send_msg_perror = 0; 4742 ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; 4743 ahd->msgin_index = 0; 4744 goto reswitch; 4745 } 4746 end_session = TRUE; 4747 break; 4748 } 4749 4750 if (ahd->send_msg_perror) { 4751 ahd_outb(ahd, CLRSINT1, CLRATNO); 4752 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4753 #ifdef AHD_DEBUG 4754 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4755 printk(" byte 0x%x\n", ahd->send_msg_perror); 4756 #endif 4757 /* 4758 * If we are notifying the target of a CRC error 4759 * during packetized operations, the target is 4760 * within its rights to acknowledge our message 4761 * with a busfree. 4762 */ 4763 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 4764 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) 4765 ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; 4766 4767 ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); 4768 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 4769 break; 4770 } 4771 4772 msgdone = ahd->msgout_index == ahd->msgout_len; 4773 if (msgdone) { 4774 /* 4775 * The target has requested a retry. 4776 * Re-assert ATN, reset our message index to 4777 * 0, and try again. 4778 */ 4779 ahd->msgout_index = 0; 4780 ahd_assert_atn(ahd); 4781 } 4782 4783 lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); 4784 if (lastbyte) { 4785 /* Last byte is signified by dropping ATN */ 4786 ahd_outb(ahd, CLRSINT1, CLRATNO); 4787 } 4788 4789 /* 4790 * Clear our interrupt status and present 4791 * the next byte on the bus. 4792 */ 4793 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4794 #ifdef AHD_DEBUG 4795 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4796 printk(" byte 0x%x\n", 4797 ahd->msgout_buf[ahd->msgout_index]); 4798 #endif 4799 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); 4800 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 4801 break; 4802 } 4803 case MSG_TYPE_INITIATOR_MSGIN: 4804 { 4805 int phasemis; 4806 int message_done; 4807 4808 #ifdef AHD_DEBUG 4809 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4810 ahd_print_devinfo(ahd, &devinfo); 4811 printk("INITIATOR_MSG_IN"); 4812 } 4813 #endif 4814 phasemis = bus_phase != P_MESGIN; 4815 if (phasemis) { 4816 #ifdef AHD_DEBUG 4817 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4818 printk(" PHASEMIS %s\n", 4819 ahd_lookup_phase_entry(bus_phase) 4820 ->phasemsg); 4821 } 4822 #endif 4823 ahd->msgin_index = 0; 4824 if (bus_phase == P_MESGOUT 4825 && (ahd->send_msg_perror != 0 4826 || (ahd->msgout_len != 0 4827 && ahd->msgout_index == 0))) { 4828 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4829 goto reswitch; 4830 } 4831 end_session = TRUE; 4832 break; 4833 } 4834 4835 /* Pull the byte in without acking it */ 4836 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); 4837 #ifdef AHD_DEBUG 4838 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4839 printk(" byte 0x%x\n", 4840 ahd->msgin_buf[ahd->msgin_index]); 4841 #endif 4842 4843 message_done = ahd_parse_msg(ahd, &devinfo); 4844 4845 if (message_done) { 4846 /* 4847 * Clear our incoming message buffer in case there 4848 * is another message following this one. 4849 */ 4850 ahd->msgin_index = 0; 4851 4852 /* 4853 * If this message illicited a response, 4854 * assert ATN so the target takes us to the 4855 * message out phase. 4856 */ 4857 if (ahd->msgout_len != 0) { 4858 #ifdef AHD_DEBUG 4859 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4860 ahd_print_devinfo(ahd, &devinfo); 4861 printk("Asserting ATN for response\n"); 4862 } 4863 #endif 4864 ahd_assert_atn(ahd); 4865 } 4866 } else 4867 ahd->msgin_index++; 4868 4869 if (message_done == MSGLOOP_TERMINATED) { 4870 end_session = TRUE; 4871 } else { 4872 /* Ack the byte */ 4873 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4874 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); 4875 } 4876 break; 4877 } 4878 case MSG_TYPE_TARGET_MSGIN: 4879 { 4880 int msgdone; 4881 int msgout_request; 4882 4883 /* 4884 * By default, the message loop will continue. 4885 */ 4886 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 4887 4888 if (ahd->msgout_len == 0) 4889 panic("Target MSGIN with no active message"); 4890 4891 /* 4892 * If we interrupted a mesgout session, the initiator 4893 * will not know this until our first REQ. So, we 4894 * only honor mesgout requests after we've sent our 4895 * first byte. 4896 */ 4897 if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 4898 && ahd->msgout_index > 0) 4899 msgout_request = TRUE; 4900 else 4901 msgout_request = FALSE; 4902 4903 if (msgout_request) { 4904 4905 /* 4906 * Change gears and see if 4907 * this messages is of interest to 4908 * us or should be passed back to 4909 * the sequencer. 4910 */ 4911 ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; 4912 ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); 4913 ahd->msgin_index = 0; 4914 /* Dummy read to REQ for first byte */ 4915 ahd_inb(ahd, SCSIDAT); 4916 ahd_outb(ahd, SXFRCTL0, 4917 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4918 break; 4919 } 4920 4921 msgdone = ahd->msgout_index == ahd->msgout_len; 4922 if (msgdone) { 4923 ahd_outb(ahd, SXFRCTL0, 4924 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 4925 end_session = TRUE; 4926 break; 4927 } 4928 4929 /* 4930 * Present the next byte on the bus. 4931 */ 4932 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4933 ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); 4934 break; 4935 } 4936 case MSG_TYPE_TARGET_MSGOUT: 4937 { 4938 int lastbyte; 4939 int msgdone; 4940 4941 /* 4942 * By default, the message loop will continue. 4943 */ 4944 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 4945 4946 /* 4947 * The initiator signals that this is 4948 * the last byte by dropping ATN. 4949 */ 4950 lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; 4951 4952 /* 4953 * Read the latched byte, but turn off SPIOEN first 4954 * so that we don't inadvertently cause a REQ for the 4955 * next byte. 4956 */ 4957 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 4958 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); 4959 msgdone = ahd_parse_msg(ahd, &devinfo); 4960 if (msgdone == MSGLOOP_TERMINATED) { 4961 /* 4962 * The message is *really* done in that it caused 4963 * us to go to bus free. The sequencer has already 4964 * been reset at this point, so pull the ejection 4965 * handle. 4966 */ 4967 return; 4968 } 4969 4970 ahd->msgin_index++; 4971 4972 /* 4973 * XXX Read spec about initiator dropping ATN too soon 4974 * and use msgdone to detect it. 4975 */ 4976 if (msgdone == MSGLOOP_MSGCOMPLETE) { 4977 ahd->msgin_index = 0; 4978 4979 /* 4980 * If this message illicited a response, transition 4981 * to the Message in phase and send it. 4982 */ 4983 if (ahd->msgout_len != 0) { 4984 ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); 4985 ahd_outb(ahd, SXFRCTL0, 4986 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4987 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 4988 ahd->msgin_index = 0; 4989 break; 4990 } 4991 } 4992 4993 if (lastbyte) 4994 end_session = TRUE; 4995 else { 4996 /* Ask for the next byte. */ 4997 ahd_outb(ahd, SXFRCTL0, 4998 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4999 } 5000 5001 break; 5002 } 5003 default: 5004 panic("Unknown REQINIT message type"); 5005 } 5006 5007 if (end_session) { 5008 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { 5009 printk("%s: Returning to Idle Loop\n", 5010 ahd_name(ahd)); 5011 ahd_clear_msg_state(ahd); 5012 5013 /* 5014 * Perform the equivalent of a clear_target_state. 5015 */ 5016 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 5017 ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); 5018 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 5019 } else { 5020 ahd_clear_msg_state(ahd); 5021 ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); 5022 } 5023 } 5024 } 5025 5026 /* 5027 * See if we sent a particular extended message to the target. 5028 * If "full" is true, return true only if the target saw the full 5029 * message. If "full" is false, return true if the target saw at 5030 * least the first byte of the message. 5031 */ 5032 static int 5033 ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) 5034 { 5035 int found; 5036 u_int index; 5037 5038 found = FALSE; 5039 index = 0; 5040 5041 while (index < ahd->msgout_len) { 5042 if (ahd->msgout_buf[index] == MSG_EXTENDED) { 5043 u_int end_index; 5044 5045 end_index = index + 1 + ahd->msgout_buf[index + 1]; 5046 if (ahd->msgout_buf[index+2] == msgval 5047 && type == AHDMSG_EXT) { 5048 5049 if (full) { 5050 if (ahd->msgout_index > end_index) 5051 found = TRUE; 5052 } else if (ahd->msgout_index > index) 5053 found = TRUE; 5054 } 5055 index = end_index; 5056 } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK 5057 && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 5058 5059 /* Skip tag type and tag id or residue param*/ 5060 index += 2; 5061 } else { 5062 /* Single byte message */ 5063 if (type == AHDMSG_1B 5064 && ahd->msgout_index > index 5065 && (ahd->msgout_buf[index] == msgval 5066 || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 5067 && msgval == MSG_IDENTIFYFLAG))) 5068 found = TRUE; 5069 index++; 5070 } 5071 5072 if (found) 5073 break; 5074 } 5075 return (found); 5076 } 5077 5078 /* 5079 * Wait for a complete incoming message, parse it, and respond accordingly. 5080 */ 5081 static int 5082 ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5083 { 5084 struct ahd_initiator_tinfo *tinfo; 5085 struct ahd_tmode_tstate *tstate; 5086 int reject; 5087 int done; 5088 int response; 5089 5090 done = MSGLOOP_IN_PROG; 5091 response = FALSE; 5092 reject = FALSE; 5093 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 5094 devinfo->target, &tstate); 5095 5096 /* 5097 * Parse as much of the message as is available, 5098 * rejecting it if we don't support it. When 5099 * the entire message is available and has been 5100 * handled, return MSGLOOP_MSGCOMPLETE, indicating 5101 * that we have parsed an entire message. 5102 * 5103 * In the case of extended messages, we accept the length 5104 * byte outright and perform more checking once we know the 5105 * extended message type. 5106 */ 5107 switch (ahd->msgin_buf[0]) { 5108 case MSG_DISCONNECT: 5109 case MSG_SAVEDATAPOINTER: 5110 case MSG_CMDCOMPLETE: 5111 case MSG_RESTOREPOINTERS: 5112 case MSG_IGN_WIDE_RESIDUE: 5113 /* 5114 * End our message loop as these are messages 5115 * the sequencer handles on its own. 5116 */ 5117 done = MSGLOOP_TERMINATED; 5118 break; 5119 case MSG_MESSAGE_REJECT: 5120 response = ahd_handle_msg_reject(ahd, devinfo); 5121 /* FALLTHROUGH */ 5122 case MSG_NOOP: 5123 done = MSGLOOP_MSGCOMPLETE; 5124 break; 5125 case MSG_EXTENDED: 5126 { 5127 /* Wait for enough of the message to begin validation */ 5128 if (ahd->msgin_index < 2) 5129 break; 5130 switch (ahd->msgin_buf[2]) { 5131 case MSG_EXT_SDTR: 5132 { 5133 u_int period; 5134 u_int ppr_options; 5135 u_int offset; 5136 u_int saved_offset; 5137 5138 if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 5139 reject = TRUE; 5140 break; 5141 } 5142 5143 /* 5144 * Wait until we have both args before validating 5145 * and acting on this message. 5146 * 5147 * Add one to MSG_EXT_SDTR_LEN to account for 5148 * the extended message preamble. 5149 */ 5150 if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 5151 break; 5152 5153 period = ahd->msgin_buf[3]; 5154 ppr_options = 0; 5155 saved_offset = offset = ahd->msgin_buf[4]; 5156 ahd_devlimited_syncrate(ahd, tinfo, &period, 5157 &ppr_options, devinfo->role); 5158 ahd_validate_offset(ahd, tinfo, period, &offset, 5159 tinfo->curr.width, devinfo->role); 5160 if (bootverbose) { 5161 printk("(%s:%c:%d:%d): Received " 5162 "SDTR period %x, offset %x\n\t" 5163 "Filtered to period %x, offset %x\n", 5164 ahd_name(ahd), devinfo->channel, 5165 devinfo->target, devinfo->lun, 5166 ahd->msgin_buf[3], saved_offset, 5167 period, offset); 5168 } 5169 ahd_set_syncrate(ahd, devinfo, period, 5170 offset, ppr_options, 5171 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5172 /*paused*/TRUE); 5173 5174 /* 5175 * See if we initiated Sync Negotiation 5176 * and didn't have to fall down to async 5177 * transfers. 5178 */ 5179 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { 5180 /* We started it */ 5181 if (saved_offset != offset) { 5182 /* Went too low - force async */ 5183 reject = TRUE; 5184 } 5185 } else { 5186 /* 5187 * Send our own SDTR in reply 5188 */ 5189 if (bootverbose 5190 && devinfo->role == ROLE_INITIATOR) { 5191 printk("(%s:%c:%d:%d): Target " 5192 "Initiated SDTR\n", 5193 ahd_name(ahd), devinfo->channel, 5194 devinfo->target, devinfo->lun); 5195 } 5196 ahd->msgout_index = 0; 5197 ahd->msgout_len = 0; 5198 ahd_construct_sdtr(ahd, devinfo, 5199 period, offset); 5200 ahd->msgout_index = 0; 5201 response = TRUE; 5202 } 5203 done = MSGLOOP_MSGCOMPLETE; 5204 break; 5205 } 5206 case MSG_EXT_WDTR: 5207 { 5208 u_int bus_width; 5209 u_int saved_width; 5210 u_int sending_reply; 5211 5212 sending_reply = FALSE; 5213 if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 5214 reject = TRUE; 5215 break; 5216 } 5217 5218 /* 5219 * Wait until we have our arg before validating 5220 * and acting on this message. 5221 * 5222 * Add one to MSG_EXT_WDTR_LEN to account for 5223 * the extended message preamble. 5224 */ 5225 if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 5226 break; 5227 5228 bus_width = ahd->msgin_buf[3]; 5229 saved_width = bus_width; 5230 ahd_validate_width(ahd, tinfo, &bus_width, 5231 devinfo->role); 5232 if (bootverbose) { 5233 printk("(%s:%c:%d:%d): Received WDTR " 5234 "%x filtered to %x\n", 5235 ahd_name(ahd), devinfo->channel, 5236 devinfo->target, devinfo->lun, 5237 saved_width, bus_width); 5238 } 5239 5240 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { 5241 /* 5242 * Don't send a WDTR back to the 5243 * target, since we asked first. 5244 * If the width went higher than our 5245 * request, reject it. 5246 */ 5247 if (saved_width > bus_width) { 5248 reject = TRUE; 5249 printk("(%s:%c:%d:%d): requested %dBit " 5250 "transfers. Rejecting...\n", 5251 ahd_name(ahd), devinfo->channel, 5252 devinfo->target, devinfo->lun, 5253 8 * (0x01 << bus_width)); 5254 bus_width = 0; 5255 } 5256 } else { 5257 /* 5258 * Send our own WDTR in reply 5259 */ 5260 if (bootverbose 5261 && devinfo->role == ROLE_INITIATOR) { 5262 printk("(%s:%c:%d:%d): Target " 5263 "Initiated WDTR\n", 5264 ahd_name(ahd), devinfo->channel, 5265 devinfo->target, devinfo->lun); 5266 } 5267 ahd->msgout_index = 0; 5268 ahd->msgout_len = 0; 5269 ahd_construct_wdtr(ahd, devinfo, bus_width); 5270 ahd->msgout_index = 0; 5271 response = TRUE; 5272 sending_reply = TRUE; 5273 } 5274 /* 5275 * After a wide message, we are async, but 5276 * some devices don't seem to honor this portion 5277 * of the spec. Force a renegotiation of the 5278 * sync component of our transfer agreement even 5279 * if our goal is async. By updating our width 5280 * after forcing the negotiation, we avoid 5281 * renegotiating for width. 5282 */ 5283 ahd_update_neg_request(ahd, devinfo, tstate, 5284 tinfo, AHD_NEG_ALWAYS); 5285 ahd_set_width(ahd, devinfo, bus_width, 5286 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5287 /*paused*/TRUE); 5288 if (sending_reply == FALSE && reject == FALSE) { 5289 5290 /* 5291 * We will always have an SDTR to send. 5292 */ 5293 ahd->msgout_index = 0; 5294 ahd->msgout_len = 0; 5295 ahd_build_transfer_msg(ahd, devinfo); 5296 ahd->msgout_index = 0; 5297 response = TRUE; 5298 } 5299 done = MSGLOOP_MSGCOMPLETE; 5300 break; 5301 } 5302 case MSG_EXT_PPR: 5303 { 5304 u_int period; 5305 u_int offset; 5306 u_int bus_width; 5307 u_int ppr_options; 5308 u_int saved_width; 5309 u_int saved_offset; 5310 u_int saved_ppr_options; 5311 5312 if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { 5313 reject = TRUE; 5314 break; 5315 } 5316 5317 /* 5318 * Wait until we have all args before validating 5319 * and acting on this message. 5320 * 5321 * Add one to MSG_EXT_PPR_LEN to account for 5322 * the extended message preamble. 5323 */ 5324 if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) 5325 break; 5326 5327 period = ahd->msgin_buf[3]; 5328 offset = ahd->msgin_buf[5]; 5329 bus_width = ahd->msgin_buf[6]; 5330 saved_width = bus_width; 5331 ppr_options = ahd->msgin_buf[7]; 5332 /* 5333 * According to the spec, a DT only 5334 * period factor with no DT option 5335 * set implies async. 5336 */ 5337 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 5338 && period <= 9) 5339 offset = 0; 5340 saved_ppr_options = ppr_options; 5341 saved_offset = offset; 5342 5343 /* 5344 * Transfer options are only available if we 5345 * are negotiating wide. 5346 */ 5347 if (bus_width == 0) 5348 ppr_options &= MSG_EXT_PPR_QAS_REQ; 5349 5350 ahd_validate_width(ahd, tinfo, &bus_width, 5351 devinfo->role); 5352 ahd_devlimited_syncrate(ahd, tinfo, &period, 5353 &ppr_options, devinfo->role); 5354 ahd_validate_offset(ahd, tinfo, period, &offset, 5355 bus_width, devinfo->role); 5356 5357 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { 5358 /* 5359 * If we are unable to do any of the 5360 * requested options (we went too low), 5361 * then we'll have to reject the message. 5362 */ 5363 if (saved_width > bus_width 5364 || saved_offset != offset 5365 || saved_ppr_options != ppr_options) { 5366 reject = TRUE; 5367 period = 0; 5368 offset = 0; 5369 bus_width = 0; 5370 ppr_options = 0; 5371 } 5372 } else { 5373 if (devinfo->role != ROLE_TARGET) 5374 printk("(%s:%c:%d:%d): Target " 5375 "Initiated PPR\n", 5376 ahd_name(ahd), devinfo->channel, 5377 devinfo->target, devinfo->lun); 5378 else 5379 printk("(%s:%c:%d:%d): Initiator " 5380 "Initiated PPR\n", 5381 ahd_name(ahd), devinfo->channel, 5382 devinfo->target, devinfo->lun); 5383 ahd->msgout_index = 0; 5384 ahd->msgout_len = 0; 5385 ahd_construct_ppr(ahd, devinfo, period, offset, 5386 bus_width, ppr_options); 5387 ahd->msgout_index = 0; 5388 response = TRUE; 5389 } 5390 if (bootverbose) { 5391 printk("(%s:%c:%d:%d): Received PPR width %x, " 5392 "period %x, offset %x,options %x\n" 5393 "\tFiltered to width %x, period %x, " 5394 "offset %x, options %x\n", 5395 ahd_name(ahd), devinfo->channel, 5396 devinfo->target, devinfo->lun, 5397 saved_width, ahd->msgin_buf[3], 5398 saved_offset, saved_ppr_options, 5399 bus_width, period, offset, ppr_options); 5400 } 5401 ahd_set_width(ahd, devinfo, bus_width, 5402 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5403 /*paused*/TRUE); 5404 ahd_set_syncrate(ahd, devinfo, period, 5405 offset, ppr_options, 5406 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5407 /*paused*/TRUE); 5408 5409 done = MSGLOOP_MSGCOMPLETE; 5410 break; 5411 } 5412 default: 5413 /* Unknown extended message. Reject it. */ 5414 reject = TRUE; 5415 break; 5416 } 5417 break; 5418 } 5419 #ifdef AHD_TARGET_MODE 5420 case MSG_BUS_DEV_RESET: 5421 ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, 5422 CAM_BDR_SENT, 5423 "Bus Device Reset Received", 5424 /*verbose_level*/0); 5425 ahd_restart(ahd); 5426 done = MSGLOOP_TERMINATED; 5427 break; 5428 case MSG_ABORT_TAG: 5429 case MSG_ABORT: 5430 case MSG_CLEAR_QUEUE: 5431 { 5432 int tag; 5433 5434 /* Target mode messages */ 5435 if (devinfo->role != ROLE_TARGET) { 5436 reject = TRUE; 5437 break; 5438 } 5439 tag = SCB_LIST_NULL; 5440 if (ahd->msgin_buf[0] == MSG_ABORT_TAG) 5441 tag = ahd_inb(ahd, INITIATOR_TAG); 5442 ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 5443 devinfo->lun, tag, ROLE_TARGET, 5444 CAM_REQ_ABORTED); 5445 5446 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 5447 if (tstate != NULL) { 5448 struct ahd_tmode_lstate* lstate; 5449 5450 lstate = tstate->enabled_luns[devinfo->lun]; 5451 if (lstate != NULL) { 5452 ahd_queue_lstate_event(ahd, lstate, 5453 devinfo->our_scsiid, 5454 ahd->msgin_buf[0], 5455 /*arg*/tag); 5456 ahd_send_lstate_events(ahd, lstate); 5457 } 5458 } 5459 ahd_restart(ahd); 5460 done = MSGLOOP_TERMINATED; 5461 break; 5462 } 5463 #endif 5464 case MSG_QAS_REQUEST: 5465 #ifdef AHD_DEBUG 5466 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 5467 printk("%s: QAS request. SCSISIGI == 0x%x\n", 5468 ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); 5469 #endif 5470 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; 5471 /* FALLTHROUGH */ 5472 case MSG_TERM_IO_PROC: 5473 default: 5474 reject = TRUE; 5475 break; 5476 } 5477 5478 if (reject) { 5479 /* 5480 * Setup to reject the message. 5481 */ 5482 ahd->msgout_index = 0; 5483 ahd->msgout_len = 1; 5484 ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; 5485 done = MSGLOOP_MSGCOMPLETE; 5486 response = TRUE; 5487 } 5488 5489 if (done != MSGLOOP_IN_PROG && !response) 5490 /* Clear the outgoing message buffer */ 5491 ahd->msgout_len = 0; 5492 5493 return (done); 5494 } 5495 5496 /* 5497 * Process a message reject message. 5498 */ 5499 static int 5500 ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5501 { 5502 /* 5503 * What we care about here is if we had an 5504 * outstanding SDTR or WDTR message for this 5505 * target. If we did, this is a signal that 5506 * the target is refusing negotiation. 5507 */ 5508 struct scb *scb; 5509 struct ahd_initiator_tinfo *tinfo; 5510 struct ahd_tmode_tstate *tstate; 5511 u_int scb_index; 5512 u_int last_msg; 5513 int response = 0; 5514 5515 scb_index = ahd_get_scbptr(ahd); 5516 scb = ahd_lookup_scb(ahd, scb_index); 5517 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, 5518 devinfo->our_scsiid, 5519 devinfo->target, &tstate); 5520 /* Might be necessary */ 5521 last_msg = ahd_inb(ahd, LAST_MSG); 5522 5523 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 5524 if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) 5525 && tinfo->goal.period <= AHD_SYNCRATE_PACED) { 5526 /* 5527 * Target may not like our SPI-4 PPR Options. 5528 * Attempt to negotiate 80MHz which will turn 5529 * off these options. 5530 */ 5531 if (bootverbose) { 5532 printk("(%s:%c:%d:%d): PPR Rejected. " 5533 "Trying simple U160 PPR\n", 5534 ahd_name(ahd), devinfo->channel, 5535 devinfo->target, devinfo->lun); 5536 } 5537 tinfo->goal.period = AHD_SYNCRATE_DT; 5538 tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ 5539 | MSG_EXT_PPR_QAS_REQ 5540 | MSG_EXT_PPR_DT_REQ; 5541 } else { 5542 /* 5543 * Target does not support the PPR message. 5544 * Attempt to negotiate SPI-2 style. 5545 */ 5546 if (bootverbose) { 5547 printk("(%s:%c:%d:%d): PPR Rejected. " 5548 "Trying WDTR/SDTR\n", 5549 ahd_name(ahd), devinfo->channel, 5550 devinfo->target, devinfo->lun); 5551 } 5552 tinfo->goal.ppr_options = 0; 5553 tinfo->curr.transport_version = 2; 5554 tinfo->goal.transport_version = 2; 5555 } 5556 ahd->msgout_index = 0; 5557 ahd->msgout_len = 0; 5558 ahd_build_transfer_msg(ahd, devinfo); 5559 ahd->msgout_index = 0; 5560 response = 1; 5561 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 5562 5563 /* note 8bit xfers */ 5564 printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 5565 "8bit transfers\n", ahd_name(ahd), 5566 devinfo->channel, devinfo->target, devinfo->lun); 5567 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5568 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5569 /*paused*/TRUE); 5570 /* 5571 * No need to clear the sync rate. If the target 5572 * did not accept the command, our syncrate is 5573 * unaffected. If the target started the negotiation, 5574 * but rejected our response, we already cleared the 5575 * sync rate before sending our WDTR. 5576 */ 5577 if (tinfo->goal.offset != tinfo->curr.offset) { 5578 5579 /* Start the sync negotiation */ 5580 ahd->msgout_index = 0; 5581 ahd->msgout_len = 0; 5582 ahd_build_transfer_msg(ahd, devinfo); 5583 ahd->msgout_index = 0; 5584 response = 1; 5585 } 5586 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 5587 /* note asynch xfers and clear flag */ 5588 ahd_set_syncrate(ahd, devinfo, /*period*/0, 5589 /*offset*/0, /*ppr_options*/0, 5590 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5591 /*paused*/TRUE); 5592 printk("(%s:%c:%d:%d): refuses synchronous negotiation. " 5593 "Using asynchronous transfers\n", 5594 ahd_name(ahd), devinfo->channel, 5595 devinfo->target, devinfo->lun); 5596 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 5597 int tag_type; 5598 int mask; 5599 5600 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 5601 5602 if (tag_type == MSG_SIMPLE_TASK) { 5603 printk("(%s:%c:%d:%d): refuses tagged commands. " 5604 "Performing non-tagged I/O\n", ahd_name(ahd), 5605 devinfo->channel, devinfo->target, devinfo->lun); 5606 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); 5607 mask = ~0x23; 5608 } else { 5609 printk("(%s:%c:%d:%d): refuses %s tagged commands. " 5610 "Performing simple queue tagged I/O only\n", 5611 ahd_name(ahd), devinfo->channel, devinfo->target, 5612 devinfo->lun, tag_type == MSG_ORDERED_TASK 5613 ? "ordered" : "head of queue"); 5614 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); 5615 mask = ~0x03; 5616 } 5617 5618 /* 5619 * Resend the identify for this CCB as the target 5620 * may believe that the selection is invalid otherwise. 5621 */ 5622 ahd_outb(ahd, SCB_CONTROL, 5623 ahd_inb_scbram(ahd, SCB_CONTROL) & mask); 5624 scb->hscb->control &= mask; 5625 ahd_set_transaction_tag(scb, /*enabled*/FALSE, 5626 /*type*/MSG_SIMPLE_TASK); 5627 ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); 5628 ahd_assert_atn(ahd); 5629 ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 5630 SCB_GET_TAG(scb)); 5631 5632 /* 5633 * Requeue all tagged commands for this target 5634 * currently in our possession so they can be 5635 * converted to untagged commands. 5636 */ 5637 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 5638 SCB_GET_CHANNEL(ahd, scb), 5639 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 5640 ROLE_INITIATOR, CAM_REQUEUE_REQ, 5641 SEARCH_COMPLETE); 5642 } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { 5643 /* 5644 * Most likely the device believes that we had 5645 * previously negotiated packetized. 5646 */ 5647 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 5648 | MSG_FLAG_IU_REQ_CHANGED; 5649 5650 ahd_force_renegotiation(ahd, devinfo); 5651 ahd->msgout_index = 0; 5652 ahd->msgout_len = 0; 5653 ahd_build_transfer_msg(ahd, devinfo); 5654 ahd->msgout_index = 0; 5655 response = 1; 5656 } else { 5657 /* 5658 * Otherwise, we ignore it. 5659 */ 5660 printk("%s:%c:%d: Message reject for %x -- ignored\n", 5661 ahd_name(ahd), devinfo->channel, devinfo->target, 5662 last_msg); 5663 } 5664 return (response); 5665 } 5666 5667 /* 5668 * Process an ingnore wide residue message. 5669 */ 5670 static void 5671 ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5672 { 5673 u_int scb_index; 5674 struct scb *scb; 5675 5676 scb_index = ahd_get_scbptr(ahd); 5677 scb = ahd_lookup_scb(ahd, scb_index); 5678 /* 5679 * XXX Actually check data direction in the sequencer? 5680 * Perhaps add datadir to some spare bits in the hscb? 5681 */ 5682 if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 5683 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { 5684 /* 5685 * Ignore the message if we haven't 5686 * seen an appropriate data phase yet. 5687 */ 5688 } else { 5689 /* 5690 * If the residual occurred on the last 5691 * transfer and the transfer request was 5692 * expected to end on an odd count, do 5693 * nothing. Otherwise, subtract a byte 5694 * and update the residual count accordingly. 5695 */ 5696 uint32_t sgptr; 5697 5698 sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); 5699 if ((sgptr & SG_LIST_NULL) != 0 5700 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) 5701 & SCB_XFERLEN_ODD) != 0) { 5702 /* 5703 * If the residual occurred on the last 5704 * transfer and the transfer request was 5705 * expected to end on an odd count, do 5706 * nothing. 5707 */ 5708 } else { 5709 uint32_t data_cnt; 5710 uint64_t data_addr; 5711 uint32_t sglen; 5712 5713 /* Pull in the rest of the sgptr */ 5714 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 5715 data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); 5716 if ((sgptr & SG_LIST_NULL) != 0) { 5717 /* 5718 * The residual data count is not updated 5719 * for the command run to completion case. 5720 * Explicitly zero the count. 5721 */ 5722 data_cnt &= ~AHD_SG_LEN_MASK; 5723 } 5724 data_addr = ahd_inq(ahd, SHADDR); 5725 data_cnt += 1; 5726 data_addr -= 1; 5727 sgptr &= SG_PTR_MASK; 5728 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 5729 struct ahd_dma64_seg *sg; 5730 5731 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5732 5733 /* 5734 * The residual sg ptr points to the next S/G 5735 * to load so we must go back one. 5736 */ 5737 sg--; 5738 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 5739 if (sg != scb->sg_list 5740 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 5741 5742 sg--; 5743 sglen = ahd_le32toh(sg->len); 5744 /* 5745 * Preserve High Address and SG_LIST 5746 * bits while setting the count to 1. 5747 */ 5748 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 5749 data_addr = ahd_le64toh(sg->addr) 5750 + (sglen & AHD_SG_LEN_MASK) 5751 - 1; 5752 5753 /* 5754 * Increment sg so it points to the 5755 * "next" sg. 5756 */ 5757 sg++; 5758 sgptr = ahd_sg_virt_to_bus(ahd, scb, 5759 sg); 5760 } 5761 } else { 5762 struct ahd_dma_seg *sg; 5763 5764 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5765 5766 /* 5767 * The residual sg ptr points to the next S/G 5768 * to load so we must go back one. 5769 */ 5770 sg--; 5771 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 5772 if (sg != scb->sg_list 5773 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 5774 5775 sg--; 5776 sglen = ahd_le32toh(sg->len); 5777 /* 5778 * Preserve High Address and SG_LIST 5779 * bits while setting the count to 1. 5780 */ 5781 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 5782 data_addr = ahd_le32toh(sg->addr) 5783 + (sglen & AHD_SG_LEN_MASK) 5784 - 1; 5785 5786 /* 5787 * Increment sg so it points to the 5788 * "next" sg. 5789 */ 5790 sg++; 5791 sgptr = ahd_sg_virt_to_bus(ahd, scb, 5792 sg); 5793 } 5794 } 5795 /* 5796 * Toggle the "oddness" of the transfer length 5797 * to handle this mid-transfer ignore wide 5798 * residue. This ensures that the oddness is 5799 * correct for subsequent data transfers. 5800 */ 5801 ahd_outb(ahd, SCB_TASK_ATTRIBUTE, 5802 ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) 5803 ^ SCB_XFERLEN_ODD); 5804 5805 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 5806 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); 5807 /* 5808 * The FIFO's pointers will be updated if/when the 5809 * sequencer re-enters a data phase. 5810 */ 5811 } 5812 } 5813 } 5814 5815 5816 /* 5817 * Reinitialize the data pointers for the active transfer 5818 * based on its current residual. 5819 */ 5820 static void 5821 ahd_reinitialize_dataptrs(struct ahd_softc *ahd) 5822 { 5823 struct scb *scb; 5824 ahd_mode_state saved_modes; 5825 u_int scb_index; 5826 u_int wait; 5827 uint32_t sgptr; 5828 uint32_t resid; 5829 uint64_t dataptr; 5830 5831 AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, 5832 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); 5833 5834 scb_index = ahd_get_scbptr(ahd); 5835 scb = ahd_lookup_scb(ahd, scb_index); 5836 5837 /* 5838 * Release and reacquire the FIFO so we 5839 * have a clean slate. 5840 */ 5841 ahd_outb(ahd, DFFSXFRCTL, CLRCHN); 5842 wait = 1000; 5843 while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) 5844 ahd_delay(100); 5845 if (wait == 0) { 5846 ahd_print_path(ahd, scb); 5847 printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); 5848 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 5849 } 5850 saved_modes = ahd_save_modes(ahd); 5851 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 5852 ahd_outb(ahd, DFFSTAT, 5853 ahd_inb(ahd, DFFSTAT) 5854 | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); 5855 5856 /* 5857 * Determine initial values for data_addr and data_cnt 5858 * for resuming the data phase. 5859 */ 5860 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 5861 sgptr &= SG_PTR_MASK; 5862 5863 resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) 5864 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) 5865 | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); 5866 5867 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 5868 struct ahd_dma64_seg *sg; 5869 5870 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5871 5872 /* The residual sg_ptr always points to the next sg */ 5873 sg--; 5874 5875 dataptr = ahd_le64toh(sg->addr) 5876 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 5877 - resid; 5878 ahd_outl(ahd, HADDR + 4, dataptr >> 32); 5879 } else { 5880 struct ahd_dma_seg *sg; 5881 5882 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5883 5884 /* The residual sg_ptr always points to the next sg */ 5885 sg--; 5886 5887 dataptr = ahd_le32toh(sg->addr) 5888 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 5889 - resid; 5890 ahd_outb(ahd, HADDR + 4, 5891 (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); 5892 } 5893 ahd_outl(ahd, HADDR, dataptr); 5894 ahd_outb(ahd, HCNT + 2, resid >> 16); 5895 ahd_outb(ahd, HCNT + 1, resid >> 8); 5896 ahd_outb(ahd, HCNT, resid); 5897 } 5898 5899 /* 5900 * Handle the effects of issuing a bus device reset message. 5901 */ 5902 static void 5903 ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 5904 u_int lun, cam_status status, char *message, 5905 int verbose_level) 5906 { 5907 #ifdef AHD_TARGET_MODE 5908 struct ahd_tmode_tstate* tstate; 5909 #endif 5910 int found; 5911 5912 found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 5913 lun, SCB_LIST_NULL, devinfo->role, 5914 status); 5915 5916 #ifdef AHD_TARGET_MODE 5917 /* 5918 * Send an immediate notify ccb to all target mord peripheral 5919 * drivers affected by this action. 5920 */ 5921 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 5922 if (tstate != NULL) { 5923 u_int cur_lun; 5924 u_int max_lun; 5925 5926 if (lun != CAM_LUN_WILDCARD) { 5927 cur_lun = 0; 5928 max_lun = AHD_NUM_LUNS - 1; 5929 } else { 5930 cur_lun = lun; 5931 max_lun = lun; 5932 } 5933 for (;cur_lun <= max_lun; cur_lun++) { 5934 struct ahd_tmode_lstate* lstate; 5935 5936 lstate = tstate->enabled_luns[cur_lun]; 5937 if (lstate == NULL) 5938 continue; 5939 5940 ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, 5941 MSG_BUS_DEV_RESET, /*arg*/0); 5942 ahd_send_lstate_events(ahd, lstate); 5943 } 5944 } 5945 #endif 5946 5947 /* 5948 * Go back to async/narrow transfers and renegotiate. 5949 */ 5950 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5951 AHD_TRANS_CUR, /*paused*/TRUE); 5952 ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, 5953 /*ppr_options*/0, AHD_TRANS_CUR, 5954 /*paused*/TRUE); 5955 5956 if (status != CAM_SEL_TIMEOUT) 5957 ahd_send_async(ahd, devinfo->channel, devinfo->target, 5958 CAM_LUN_WILDCARD, AC_SENT_BDR); 5959 5960 if (message != NULL && bootverbose) 5961 printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 5962 message, devinfo->channel, devinfo->target, found); 5963 } 5964 5965 #ifdef AHD_TARGET_MODE 5966 static void 5967 ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 5968 struct scb *scb) 5969 { 5970 5971 /* 5972 * To facilitate adding multiple messages together, 5973 * each routine should increment the index and len 5974 * variables instead of setting them explicitly. 5975 */ 5976 ahd->msgout_index = 0; 5977 ahd->msgout_len = 0; 5978 5979 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 5980 ahd_build_transfer_msg(ahd, devinfo); 5981 else 5982 panic("ahd_intr: AWAITING target message with no message"); 5983 5984 ahd->msgout_index = 0; 5985 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 5986 } 5987 #endif 5988 /**************************** Initialization **********************************/ 5989 static u_int 5990 ahd_sglist_size(struct ahd_softc *ahd) 5991 { 5992 bus_size_t list_size; 5993 5994 list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; 5995 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 5996 list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; 5997 return (list_size); 5998 } 5999 6000 /* 6001 * Calculate the optimum S/G List allocation size. S/G elements used 6002 * for a given transaction must be physically contiguous. Assume the 6003 * OS will allocate full pages to us, so it doesn't make sense to request 6004 * less than a page. 6005 */ 6006 static u_int 6007 ahd_sglist_allocsize(struct ahd_softc *ahd) 6008 { 6009 bus_size_t sg_list_increment; 6010 bus_size_t sg_list_size; 6011 bus_size_t max_list_size; 6012 bus_size_t best_list_size; 6013 6014 /* Start out with the minimum required for AHD_NSEG. */ 6015 sg_list_increment = ahd_sglist_size(ahd); 6016 sg_list_size = sg_list_increment; 6017 6018 /* Get us as close as possible to a page in size. */ 6019 while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) 6020 sg_list_size += sg_list_increment; 6021 6022 /* 6023 * Try to reduce the amount of wastage by allocating 6024 * multiple pages. 6025 */ 6026 best_list_size = sg_list_size; 6027 max_list_size = roundup(sg_list_increment, PAGE_SIZE); 6028 if (max_list_size < 4 * PAGE_SIZE) 6029 max_list_size = 4 * PAGE_SIZE; 6030 if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) 6031 max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); 6032 while ((sg_list_size + sg_list_increment) <= max_list_size 6033 && (sg_list_size % PAGE_SIZE) != 0) { 6034 bus_size_t new_mod; 6035 bus_size_t best_mod; 6036 6037 sg_list_size += sg_list_increment; 6038 new_mod = sg_list_size % PAGE_SIZE; 6039 best_mod = best_list_size % PAGE_SIZE; 6040 if (new_mod > best_mod || new_mod == 0) { 6041 best_list_size = sg_list_size; 6042 } 6043 } 6044 return (best_list_size); 6045 } 6046 6047 /* 6048 * Allocate a controller structure for a new device 6049 * and perform initial initializion. 6050 */ 6051 struct ahd_softc * 6052 ahd_alloc(void *platform_arg, char *name) 6053 { 6054 struct ahd_softc *ahd; 6055 6056 ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC); 6057 if (!ahd) { 6058 printk("aic7xxx: cannot malloc softc!\n"); 6059 kfree(name); 6060 return NULL; 6061 } 6062 6063 ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); 6064 if (ahd->seep_config == NULL) { 6065 kfree(ahd); 6066 kfree(name); 6067 return (NULL); 6068 } 6069 LIST_INIT(&ahd->pending_scbs); 6070 /* We don't know our unit number until the OSM sets it */ 6071 ahd->name = name; 6072 ahd->unit = -1; 6073 ahd->description = NULL; 6074 ahd->bus_description = NULL; 6075 ahd->channel = 'A'; 6076 ahd->chip = AHD_NONE; 6077 ahd->features = AHD_FENONE; 6078 ahd->bugs = AHD_BUGNONE; 6079 ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A 6080 | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; 6081 timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); 6082 ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; 6083 ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; 6084 ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; 6085 ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; 6086 ahd->int_coalescing_stop_threshold = 6087 AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; 6088 6089 #ifdef AHD_DEBUG 6090 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { 6091 printk("%s: scb size = 0x%x, hscb size = 0x%x\n", 6092 ahd_name(ahd), (u_int)sizeof(struct scb), 6093 (u_int)sizeof(struct hardware_scb)); 6094 } 6095 #endif 6096 if (ahd_platform_alloc(ahd, platform_arg) != 0) { 6097 ahd_free(ahd); 6098 ahd = NULL; 6099 } 6100 return (ahd); 6101 } 6102 6103 int 6104 ahd_softc_init(struct ahd_softc *ahd) 6105 { 6106 6107 ahd->unpause = 0; 6108 ahd->pause = PAUSE; 6109 return (0); 6110 } 6111 6112 void 6113 ahd_set_unit(struct ahd_softc *ahd, int unit) 6114 { 6115 ahd->unit = unit; 6116 } 6117 6118 void 6119 ahd_set_name(struct ahd_softc *ahd, char *name) 6120 { 6121 kfree(ahd->name); 6122 ahd->name = name; 6123 } 6124 6125 void 6126 ahd_free(struct ahd_softc *ahd) 6127 { 6128 int i; 6129 6130 switch (ahd->init_level) { 6131 default: 6132 case 5: 6133 ahd_shutdown(ahd); 6134 /* FALLTHROUGH */ 6135 case 4: 6136 ahd_dmamap_unload(ahd, ahd->shared_data_dmat, 6137 ahd->shared_data_map.dmamap); 6138 /* FALLTHROUGH */ 6139 case 3: 6140 ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, 6141 ahd->shared_data_map.dmamap); 6142 ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, 6143 ahd->shared_data_map.dmamap); 6144 /* FALLTHROUGH */ 6145 case 2: 6146 ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); 6147 case 1: 6148 break; 6149 case 0: 6150 break; 6151 } 6152 6153 ahd_platform_free(ahd); 6154 ahd_fini_scbdata(ahd); 6155 for (i = 0; i < AHD_NUM_TARGETS; i++) { 6156 struct ahd_tmode_tstate *tstate; 6157 6158 tstate = ahd->enabled_targets[i]; 6159 if (tstate != NULL) { 6160 #ifdef AHD_TARGET_MODE 6161 int j; 6162 6163 for (j = 0; j < AHD_NUM_LUNS; j++) { 6164 struct ahd_tmode_lstate *lstate; 6165 6166 lstate = tstate->enabled_luns[j]; 6167 if (lstate != NULL) { 6168 xpt_free_path(lstate->path); 6169 kfree(lstate); 6170 } 6171 } 6172 #endif 6173 kfree(tstate); 6174 } 6175 } 6176 #ifdef AHD_TARGET_MODE 6177 if (ahd->black_hole != NULL) { 6178 xpt_free_path(ahd->black_hole->path); 6179 kfree(ahd->black_hole); 6180 } 6181 #endif 6182 kfree(ahd->name); 6183 kfree(ahd->seep_config); 6184 kfree(ahd->saved_stack); 6185 kfree(ahd); 6186 return; 6187 } 6188 6189 static void 6190 ahd_shutdown(void *arg) 6191 { 6192 struct ahd_softc *ahd; 6193 6194 ahd = (struct ahd_softc *)arg; 6195 6196 /* 6197 * Stop periodic timer callbacks. 6198 */ 6199 del_timer_sync(&ahd->stat_timer); 6200 6201 /* This will reset most registers to 0, but not all */ 6202 ahd_reset(ahd, /*reinit*/FALSE); 6203 } 6204 6205 /* 6206 * Reset the controller and record some information about it 6207 * that is only available just after a reset. If "reinit" is 6208 * non-zero, this reset occurred after initial configuration 6209 * and the caller requests that the chip be fully reinitialized 6210 * to a runable state. Chip interrupts are *not* enabled after 6211 * a reinitialization. The caller must enable interrupts via 6212 * ahd_intr_enable(). 6213 */ 6214 int 6215 ahd_reset(struct ahd_softc *ahd, int reinit) 6216 { 6217 u_int sxfrctl1; 6218 int wait; 6219 uint32_t cmd; 6220 6221 /* 6222 * Preserve the value of the SXFRCTL1 register for all channels. 6223 * It contains settings that affect termination and we don't want 6224 * to disturb the integrity of the bus. 6225 */ 6226 ahd_pause(ahd); 6227 ahd_update_modes(ahd); 6228 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6229 sxfrctl1 = ahd_inb(ahd, SXFRCTL1); 6230 6231 cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); 6232 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 6233 uint32_t mod_cmd; 6234 6235 /* 6236 * A4 Razor #632 6237 * During the assertion of CHIPRST, the chip 6238 * does not disable its parity logic prior to 6239 * the start of the reset. This may cause a 6240 * parity error to be detected and thus a 6241 * spurious SERR or PERR assertion. Disable 6242 * PERR and SERR responses during the CHIPRST. 6243 */ 6244 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); 6245 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 6246 mod_cmd, /*bytes*/2); 6247 } 6248 ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); 6249 6250 /* 6251 * Ensure that the reset has finished. We delay 1000us 6252 * prior to reading the register to make sure the chip 6253 * has sufficiently completed its reset to handle register 6254 * accesses. 6255 */ 6256 wait = 1000; 6257 do { 6258 ahd_delay(1000); 6259 } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); 6260 6261 if (wait == 0) { 6262 printk("%s: WARNING - Failed chip reset! " 6263 "Trying to initialize anyway.\n", ahd_name(ahd)); 6264 } 6265 ahd_outb(ahd, HCNTRL, ahd->pause); 6266 6267 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 6268 /* 6269 * Clear any latched PCI error status and restore 6270 * previous SERR and PERR response enables. 6271 */ 6272 ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 6273 0xFF, /*bytes*/1); 6274 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 6275 cmd, /*bytes*/2); 6276 } 6277 6278 /* 6279 * Mode should be SCSI after a chip reset, but lets 6280 * set it just to be safe. We touch the MODE_PTR 6281 * register directly so as to bypass the lazy update 6282 * code in ahd_set_modes(). 6283 */ 6284 ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6285 ahd_outb(ahd, MODE_PTR, 6286 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); 6287 6288 /* 6289 * Restore SXFRCTL1. 6290 * 6291 * We must always initialize STPWEN to 1 before we 6292 * restore the saved values. STPWEN is initialized 6293 * to a tri-state condition which can only be cleared 6294 * by turning it on. 6295 */ 6296 ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); 6297 ahd_outb(ahd, SXFRCTL1, sxfrctl1); 6298 6299 /* Determine chip configuration */ 6300 ahd->features &= ~AHD_WIDE; 6301 if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) 6302 ahd->features |= AHD_WIDE; 6303 6304 /* 6305 * If a recovery action has forced a chip reset, 6306 * re-initialize the chip to our liking. 6307 */ 6308 if (reinit != 0) 6309 ahd_chip_init(ahd); 6310 6311 return (0); 6312 } 6313 6314 /* 6315 * Determine the number of SCBs available on the controller 6316 */ 6317 static int 6318 ahd_probe_scbs(struct ahd_softc *ahd) { 6319 int i; 6320 6321 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 6322 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 6323 for (i = 0; i < AHD_SCB_MAX; i++) { 6324 int j; 6325 6326 ahd_set_scbptr(ahd, i); 6327 ahd_outw(ahd, SCB_BASE, i); 6328 for (j = 2; j < 64; j++) 6329 ahd_outb(ahd, SCB_BASE+j, 0); 6330 /* Start out life as unallocated (needing an abort) */ 6331 ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); 6332 if (ahd_inw_scbram(ahd, SCB_BASE) != i) 6333 break; 6334 ahd_set_scbptr(ahd, 0); 6335 if (ahd_inw_scbram(ahd, SCB_BASE) != 0) 6336 break; 6337 } 6338 return (i); 6339 } 6340 6341 static void 6342 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 6343 { 6344 dma_addr_t *baddr; 6345 6346 baddr = (dma_addr_t *)arg; 6347 *baddr = segs->ds_addr; 6348 } 6349 6350 static void 6351 ahd_initialize_hscbs(struct ahd_softc *ahd) 6352 { 6353 int i; 6354 6355 for (i = 0; i < ahd->scb_data.maxhscbs; i++) { 6356 ahd_set_scbptr(ahd, i); 6357 6358 /* Clear the control byte. */ 6359 ahd_outb(ahd, SCB_CONTROL, 0); 6360 6361 /* Set the next pointer */ 6362 ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); 6363 } 6364 } 6365 6366 static int 6367 ahd_init_scbdata(struct ahd_softc *ahd) 6368 { 6369 struct scb_data *scb_data; 6370 int i; 6371 6372 scb_data = &ahd->scb_data; 6373 TAILQ_INIT(&scb_data->free_scbs); 6374 for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) 6375 LIST_INIT(&scb_data->free_scb_lists[i]); 6376 LIST_INIT(&scb_data->any_dev_free_scb_list); 6377 SLIST_INIT(&scb_data->hscb_maps); 6378 SLIST_INIT(&scb_data->sg_maps); 6379 SLIST_INIT(&scb_data->sense_maps); 6380 6381 /* Determine the number of hardware SCBs and initialize them */ 6382 scb_data->maxhscbs = ahd_probe_scbs(ahd); 6383 if (scb_data->maxhscbs == 0) { 6384 printk("%s: No SCB space found\n", ahd_name(ahd)); 6385 return (ENXIO); 6386 } 6387 6388 ahd_initialize_hscbs(ahd); 6389 6390 /* 6391 * Create our DMA tags. These tags define the kinds of device 6392 * accessible memory allocations and memory mappings we will 6393 * need to perform during normal operation. 6394 * 6395 * Unless we need to further restrict the allocation, we rely 6396 * on the restrictions of the parent dmat, hence the common 6397 * use of MAXADDR and MAXSIZE. 6398 */ 6399 6400 /* DMA tag for our hardware scb structures */ 6401 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 6402 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6403 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6404 /*highaddr*/BUS_SPACE_MAXADDR, 6405 /*filter*/NULL, /*filterarg*/NULL, 6406 PAGE_SIZE, /*nsegments*/1, 6407 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6408 /*flags*/0, &scb_data->hscb_dmat) != 0) { 6409 goto error_exit; 6410 } 6411 6412 scb_data->init_level++; 6413 6414 /* DMA tag for our S/G structures. */ 6415 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, 6416 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6417 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6418 /*highaddr*/BUS_SPACE_MAXADDR, 6419 /*filter*/NULL, /*filterarg*/NULL, 6420 ahd_sglist_allocsize(ahd), /*nsegments*/1, 6421 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6422 /*flags*/0, &scb_data->sg_dmat) != 0) { 6423 goto error_exit; 6424 } 6425 #ifdef AHD_DEBUG 6426 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) 6427 printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), 6428 ahd_sglist_allocsize(ahd)); 6429 #endif 6430 6431 scb_data->init_level++; 6432 6433 /* DMA tag for our sense buffers. We allocate in page sized chunks */ 6434 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 6435 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6436 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6437 /*highaddr*/BUS_SPACE_MAXADDR, 6438 /*filter*/NULL, /*filterarg*/NULL, 6439 PAGE_SIZE, /*nsegments*/1, 6440 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6441 /*flags*/0, &scb_data->sense_dmat) != 0) { 6442 goto error_exit; 6443 } 6444 6445 scb_data->init_level++; 6446 6447 /* Perform initial CCB allocation */ 6448 ahd_alloc_scbs(ahd); 6449 6450 if (scb_data->numscbs == 0) { 6451 printk("%s: ahd_init_scbdata - " 6452 "Unable to allocate initial scbs\n", 6453 ahd_name(ahd)); 6454 goto error_exit; 6455 } 6456 6457 /* 6458 * Note that we were successful 6459 */ 6460 return (0); 6461 6462 error_exit: 6463 6464 return (ENOMEM); 6465 } 6466 6467 static struct scb * 6468 ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) 6469 { 6470 struct scb *scb; 6471 6472 /* 6473 * Look on the pending list. 6474 */ 6475 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 6476 if (SCB_GET_TAG(scb) == tag) 6477 return (scb); 6478 } 6479 6480 /* 6481 * Then on all of the collision free lists. 6482 */ 6483 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 6484 struct scb *list_scb; 6485 6486 list_scb = scb; 6487 do { 6488 if (SCB_GET_TAG(list_scb) == tag) 6489 return (list_scb); 6490 list_scb = LIST_NEXT(list_scb, collision_links); 6491 } while (list_scb); 6492 } 6493 6494 /* 6495 * And finally on the generic free list. 6496 */ 6497 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 6498 if (SCB_GET_TAG(scb) == tag) 6499 return (scb); 6500 } 6501 6502 return (NULL); 6503 } 6504 6505 static void 6506 ahd_fini_scbdata(struct ahd_softc *ahd) 6507 { 6508 struct scb_data *scb_data; 6509 6510 scb_data = &ahd->scb_data; 6511 if (scb_data == NULL) 6512 return; 6513 6514 switch (scb_data->init_level) { 6515 default: 6516 case 7: 6517 { 6518 struct map_node *sns_map; 6519 6520 while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { 6521 SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); 6522 ahd_dmamap_unload(ahd, scb_data->sense_dmat, 6523 sns_map->dmamap); 6524 ahd_dmamem_free(ahd, scb_data->sense_dmat, 6525 sns_map->vaddr, sns_map->dmamap); 6526 kfree(sns_map); 6527 } 6528 ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); 6529 } 6530 /* fall through */ 6531 case 6: 6532 { 6533 struct map_node *sg_map; 6534 6535 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { 6536 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 6537 ahd_dmamap_unload(ahd, scb_data->sg_dmat, 6538 sg_map->dmamap); 6539 ahd_dmamem_free(ahd, scb_data->sg_dmat, 6540 sg_map->vaddr, sg_map->dmamap); 6541 kfree(sg_map); 6542 } 6543 ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); 6544 } 6545 /* fall through */ 6546 case 5: 6547 { 6548 struct map_node *hscb_map; 6549 6550 while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { 6551 SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); 6552 ahd_dmamap_unload(ahd, scb_data->hscb_dmat, 6553 hscb_map->dmamap); 6554 ahd_dmamem_free(ahd, scb_data->hscb_dmat, 6555 hscb_map->vaddr, hscb_map->dmamap); 6556 kfree(hscb_map); 6557 } 6558 ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); 6559 /* FALLTHROUGH */ 6560 } 6561 case 4: 6562 case 3: 6563 case 2: 6564 case 1: 6565 case 0: 6566 break; 6567 } 6568 } 6569 6570 /* 6571 * DSP filter Bypass must be enabled until the first selection 6572 * after a change in bus mode (Razor #491 and #493). 6573 */ 6574 static void 6575 ahd_setup_iocell_workaround(struct ahd_softc *ahd) 6576 { 6577 ahd_mode_state saved_modes; 6578 6579 saved_modes = ahd_save_modes(ahd); 6580 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6581 ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) 6582 | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); 6583 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); 6584 #ifdef AHD_DEBUG 6585 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6586 printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); 6587 #endif 6588 ahd_restore_modes(ahd, saved_modes); 6589 ahd->flags &= ~AHD_HAD_FIRST_SEL; 6590 } 6591 6592 static void 6593 ahd_iocell_first_selection(struct ahd_softc *ahd) 6594 { 6595 ahd_mode_state saved_modes; 6596 u_int sblkctl; 6597 6598 if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) 6599 return; 6600 saved_modes = ahd_save_modes(ahd); 6601 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6602 sblkctl = ahd_inb(ahd, SBLKCTL); 6603 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6604 #ifdef AHD_DEBUG 6605 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6606 printk("%s: iocell first selection\n", ahd_name(ahd)); 6607 #endif 6608 if ((sblkctl & ENAB40) != 0) { 6609 ahd_outb(ahd, DSPDATACTL, 6610 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); 6611 #ifdef AHD_DEBUG 6612 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6613 printk("%s: BYPASS now disabled\n", ahd_name(ahd)); 6614 #endif 6615 } 6616 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); 6617 ahd_outb(ahd, CLRINT, CLRSCSIINT); 6618 ahd_restore_modes(ahd, saved_modes); 6619 ahd->flags |= AHD_HAD_FIRST_SEL; 6620 } 6621 6622 /*************************** SCB Management ***********************************/ 6623 static void 6624 ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) 6625 { 6626 struct scb_list *free_list; 6627 struct scb_tailq *free_tailq; 6628 struct scb *first_scb; 6629 6630 scb->flags |= SCB_ON_COL_LIST; 6631 AHD_SET_SCB_COL_IDX(scb, col_idx); 6632 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 6633 free_tailq = &ahd->scb_data.free_scbs; 6634 first_scb = LIST_FIRST(free_list); 6635 if (first_scb != NULL) { 6636 LIST_INSERT_AFTER(first_scb, scb, collision_links); 6637 } else { 6638 LIST_INSERT_HEAD(free_list, scb, collision_links); 6639 TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); 6640 } 6641 } 6642 6643 static void 6644 ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) 6645 { 6646 struct scb_list *free_list; 6647 struct scb_tailq *free_tailq; 6648 struct scb *first_scb; 6649 u_int col_idx; 6650 6651 scb->flags &= ~SCB_ON_COL_LIST; 6652 col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); 6653 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 6654 free_tailq = &ahd->scb_data.free_scbs; 6655 first_scb = LIST_FIRST(free_list); 6656 if (first_scb == scb) { 6657 struct scb *next_scb; 6658 6659 /* 6660 * Maintain order in the collision free 6661 * lists for fairness if this device has 6662 * other colliding tags active. 6663 */ 6664 next_scb = LIST_NEXT(scb, collision_links); 6665 if (next_scb != NULL) { 6666 TAILQ_INSERT_AFTER(free_tailq, scb, 6667 next_scb, links.tqe); 6668 } 6669 TAILQ_REMOVE(free_tailq, scb, links.tqe); 6670 } 6671 LIST_REMOVE(scb, collision_links); 6672 } 6673 6674 /* 6675 * Get a free scb. If there are none, see if we can allocate a new SCB. 6676 */ 6677 struct scb * 6678 ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) 6679 { 6680 struct scb *scb; 6681 int tries; 6682 6683 tries = 0; 6684 look_again: 6685 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 6686 if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { 6687 ahd_rem_col_list(ahd, scb); 6688 goto found; 6689 } 6690 } 6691 if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { 6692 6693 if (tries++ != 0) 6694 return (NULL); 6695 ahd_alloc_scbs(ahd); 6696 goto look_again; 6697 } 6698 LIST_REMOVE(scb, links.le); 6699 if (col_idx != AHD_NEVER_COL_IDX 6700 && (scb->col_scb != NULL) 6701 && (scb->col_scb->flags & SCB_ACTIVE) == 0) { 6702 LIST_REMOVE(scb->col_scb, links.le); 6703 ahd_add_col_list(ahd, scb->col_scb, col_idx); 6704 } 6705 found: 6706 scb->flags |= SCB_ACTIVE; 6707 return (scb); 6708 } 6709 6710 /* 6711 * Return an SCB resource to the free list. 6712 */ 6713 void 6714 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 6715 { 6716 /* Clean up for the next user */ 6717 scb->flags = SCB_FLAG_NONE; 6718 scb->hscb->control = 0; 6719 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; 6720 6721 if (scb->col_scb == NULL) { 6722 6723 /* 6724 * No collision possible. Just free normally. 6725 */ 6726 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6727 scb, links.le); 6728 } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { 6729 6730 /* 6731 * The SCB we might have collided with is on 6732 * a free collision list. Put both SCBs on 6733 * the generic list. 6734 */ 6735 ahd_rem_col_list(ahd, scb->col_scb); 6736 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6737 scb, links.le); 6738 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6739 scb->col_scb, links.le); 6740 } else if ((scb->col_scb->flags 6741 & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE 6742 && (scb->col_scb->hscb->control & TAG_ENB) != 0) { 6743 6744 /* 6745 * The SCB we might collide with on the next allocation 6746 * is still active in a non-packetized, tagged, context. 6747 * Put us on the SCB collision list. 6748 */ 6749 ahd_add_col_list(ahd, scb, 6750 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); 6751 } else { 6752 /* 6753 * The SCB we might collide with on the next allocation 6754 * is either active in a packetized context, or free. 6755 * Since we can't collide, put this SCB on the generic 6756 * free list. 6757 */ 6758 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6759 scb, links.le); 6760 } 6761 6762 ahd_platform_scb_free(ahd, scb); 6763 } 6764 6765 static void 6766 ahd_alloc_scbs(struct ahd_softc *ahd) 6767 { 6768 struct scb_data *scb_data; 6769 struct scb *next_scb; 6770 struct hardware_scb *hscb; 6771 struct map_node *hscb_map; 6772 struct map_node *sg_map; 6773 struct map_node *sense_map; 6774 uint8_t *segs; 6775 uint8_t *sense_data; 6776 dma_addr_t hscb_busaddr; 6777 dma_addr_t sg_busaddr; 6778 dma_addr_t sense_busaddr; 6779 int newcount; 6780 int i; 6781 6782 scb_data = &ahd->scb_data; 6783 if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) 6784 /* Can't allocate any more */ 6785 return; 6786 6787 if (scb_data->scbs_left != 0) { 6788 int offset; 6789 6790 offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; 6791 hscb_map = SLIST_FIRST(&scb_data->hscb_maps); 6792 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; 6793 hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); 6794 } else { 6795 hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); 6796 6797 if (hscb_map == NULL) 6798 return; 6799 6800 /* Allocate the next batch of hardware SCBs */ 6801 if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, 6802 (void **)&hscb_map->vaddr, 6803 BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { 6804 kfree(hscb_map); 6805 return; 6806 } 6807 6808 SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); 6809 6810 ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, 6811 hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 6812 &hscb_map->physaddr, /*flags*/0); 6813 6814 hscb = (struct hardware_scb *)hscb_map->vaddr; 6815 hscb_busaddr = hscb_map->physaddr; 6816 scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); 6817 } 6818 6819 if (scb_data->sgs_left != 0) { 6820 int offset; 6821 6822 offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) 6823 - scb_data->sgs_left) * ahd_sglist_size(ahd); 6824 sg_map = SLIST_FIRST(&scb_data->sg_maps); 6825 segs = sg_map->vaddr + offset; 6826 sg_busaddr = sg_map->physaddr + offset; 6827 } else { 6828 sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); 6829 6830 if (sg_map == NULL) 6831 return; 6832 6833 /* Allocate the next batch of S/G lists */ 6834 if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, 6835 (void **)&sg_map->vaddr, 6836 BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { 6837 kfree(sg_map); 6838 return; 6839 } 6840 6841 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 6842 6843 ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, 6844 sg_map->vaddr, ahd_sglist_allocsize(ahd), 6845 ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); 6846 6847 segs = sg_map->vaddr; 6848 sg_busaddr = sg_map->physaddr; 6849 scb_data->sgs_left = 6850 ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); 6851 #ifdef AHD_DEBUG 6852 if (ahd_debug & AHD_SHOW_MEMORY) 6853 printk("Mapped SG data\n"); 6854 #endif 6855 } 6856 6857 if (scb_data->sense_left != 0) { 6858 int offset; 6859 6860 offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); 6861 sense_map = SLIST_FIRST(&scb_data->sense_maps); 6862 sense_data = sense_map->vaddr + offset; 6863 sense_busaddr = sense_map->physaddr + offset; 6864 } else { 6865 sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); 6866 6867 if (sense_map == NULL) 6868 return; 6869 6870 /* Allocate the next batch of sense buffers */ 6871 if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, 6872 (void **)&sense_map->vaddr, 6873 BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { 6874 kfree(sense_map); 6875 return; 6876 } 6877 6878 SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); 6879 6880 ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, 6881 sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 6882 &sense_map->physaddr, /*flags*/0); 6883 6884 sense_data = sense_map->vaddr; 6885 sense_busaddr = sense_map->physaddr; 6886 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; 6887 #ifdef AHD_DEBUG 6888 if (ahd_debug & AHD_SHOW_MEMORY) 6889 printk("Mapped sense data\n"); 6890 #endif 6891 } 6892 6893 newcount = min(scb_data->sense_left, scb_data->scbs_left); 6894 newcount = min(newcount, scb_data->sgs_left); 6895 newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); 6896 for (i = 0; i < newcount; i++) { 6897 struct scb_platform_data *pdata; 6898 u_int col_tag; 6899 6900 next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); 6901 if (next_scb == NULL) 6902 break; 6903 6904 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); 6905 if (pdata == NULL) { 6906 kfree(next_scb); 6907 break; 6908 } 6909 next_scb->platform_data = pdata; 6910 next_scb->hscb_map = hscb_map; 6911 next_scb->sg_map = sg_map; 6912 next_scb->sense_map = sense_map; 6913 next_scb->sg_list = segs; 6914 next_scb->sense_data = sense_data; 6915 next_scb->sense_busaddr = sense_busaddr; 6916 memset(hscb, 0, sizeof(*hscb)); 6917 next_scb->hscb = hscb; 6918 hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); 6919 6920 /* 6921 * The sequencer always starts with the second entry. 6922 * The first entry is embedded in the scb. 6923 */ 6924 next_scb->sg_list_busaddr = sg_busaddr; 6925 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 6926 next_scb->sg_list_busaddr 6927 += sizeof(struct ahd_dma64_seg); 6928 else 6929 next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); 6930 next_scb->ahd_softc = ahd; 6931 next_scb->flags = SCB_FLAG_NONE; 6932 next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); 6933 col_tag = scb_data->numscbs ^ 0x100; 6934 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); 6935 if (next_scb->col_scb != NULL) 6936 next_scb->col_scb->col_scb = next_scb; 6937 ahd_free_scb(ahd, next_scb); 6938 hscb++; 6939 hscb_busaddr += sizeof(*hscb); 6940 segs += ahd_sglist_size(ahd); 6941 sg_busaddr += ahd_sglist_size(ahd); 6942 sense_data += AHD_SENSE_BUFSIZE; 6943 sense_busaddr += AHD_SENSE_BUFSIZE; 6944 scb_data->numscbs++; 6945 scb_data->sense_left--; 6946 scb_data->scbs_left--; 6947 scb_data->sgs_left--; 6948 } 6949 } 6950 6951 void 6952 ahd_controller_info(struct ahd_softc *ahd, char *buf) 6953 { 6954 const char *speed; 6955 const char *type; 6956 int len; 6957 6958 len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); 6959 buf += len; 6960 6961 speed = "Ultra320 "; 6962 if ((ahd->features & AHD_WIDE) != 0) { 6963 type = "Wide "; 6964 } else { 6965 type = "Single "; 6966 } 6967 len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", 6968 speed, type, ahd->channel, ahd->our_id); 6969 buf += len; 6970 6971 sprintf(buf, "%s, %d SCBs", ahd->bus_description, 6972 ahd->scb_data.maxhscbs); 6973 } 6974 6975 static const char *channel_strings[] = { 6976 "Primary Low", 6977 "Primary High", 6978 "Secondary Low", 6979 "Secondary High" 6980 }; 6981 6982 static const char *termstat_strings[] = { 6983 "Terminated Correctly", 6984 "Over Terminated", 6985 "Under Terminated", 6986 "Not Configured" 6987 }; 6988 6989 /***************************** Timer Facilities *******************************/ 6990 static void 6991 ahd_timer_reset(struct timer_list *timer, int usec) 6992 { 6993 del_timer(timer); 6994 timer->expires = jiffies + (usec * HZ)/1000000; 6995 add_timer(timer); 6996 } 6997 6998 /* 6999 * Start the board, ready for normal operation 7000 */ 7001 int 7002 ahd_init(struct ahd_softc *ahd) 7003 { 7004 uint8_t *next_vaddr; 7005 dma_addr_t next_baddr; 7006 size_t driver_data_size; 7007 int i; 7008 int error; 7009 u_int warn_user; 7010 uint8_t current_sensing; 7011 uint8_t fstat; 7012 7013 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7014 7015 ahd->stack_size = ahd_probe_stack_size(ahd); 7016 ahd->saved_stack = kmalloc_array(ahd->stack_size, sizeof(uint16_t), 7017 GFP_ATOMIC); 7018 if (ahd->saved_stack == NULL) 7019 return (ENOMEM); 7020 7021 /* 7022 * Verify that the compiler hasn't over-aggressively 7023 * padded important structures. 7024 */ 7025 if (sizeof(struct hardware_scb) != 64) 7026 panic("Hardware SCB size is incorrect"); 7027 7028 #ifdef AHD_DEBUG 7029 if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) 7030 ahd->flags |= AHD_SEQUENCER_DEBUG; 7031 #endif 7032 7033 /* 7034 * Default to allowing initiator operations. 7035 */ 7036 ahd->flags |= AHD_INITIATORROLE; 7037 7038 /* 7039 * Only allow target mode features if this unit has them enabled. 7040 */ 7041 if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) 7042 ahd->features &= ~AHD_TARGETMODE; 7043 7044 ahd->init_level++; 7045 7046 /* 7047 * DMA tag for our command fifos and other data in system memory 7048 * the card's sequencer must be able to access. For initiator 7049 * roles, we need to allocate space for the qoutfifo. When providing 7050 * for the target mode role, we must additionally provide space for 7051 * the incoming target command fifo. 7052 */ 7053 driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) 7054 + sizeof(struct hardware_scb); 7055 if ((ahd->features & AHD_TARGETMODE) != 0) 7056 driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7057 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) 7058 driver_data_size += PKT_OVERRUN_BUFSIZE; 7059 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 7060 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 7061 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 7062 /*highaddr*/BUS_SPACE_MAXADDR, 7063 /*filter*/NULL, /*filterarg*/NULL, 7064 driver_data_size, 7065 /*nsegments*/1, 7066 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 7067 /*flags*/0, &ahd->shared_data_dmat) != 0) { 7068 return (ENOMEM); 7069 } 7070 7071 ahd->init_level++; 7072 7073 /* Allocation of driver data */ 7074 if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, 7075 (void **)&ahd->shared_data_map.vaddr, 7076 BUS_DMA_NOWAIT, 7077 &ahd->shared_data_map.dmamap) != 0) { 7078 return (ENOMEM); 7079 } 7080 7081 ahd->init_level++; 7082 7083 /* And permanently map it in */ 7084 ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 7085 ahd->shared_data_map.vaddr, driver_data_size, 7086 ahd_dmamap_cb, &ahd->shared_data_map.physaddr, 7087 /*flags*/0); 7088 ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; 7089 next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; 7090 next_baddr = ahd->shared_data_map.physaddr 7091 + AHD_QOUT_SIZE*sizeof(struct ahd_completion); 7092 if ((ahd->features & AHD_TARGETMODE) != 0) { 7093 ahd->targetcmds = (struct target_cmd *)next_vaddr; 7094 next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7095 next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7096 } 7097 7098 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { 7099 ahd->overrun_buf = next_vaddr; 7100 next_vaddr += PKT_OVERRUN_BUFSIZE; 7101 next_baddr += PKT_OVERRUN_BUFSIZE; 7102 } 7103 7104 /* 7105 * We need one SCB to serve as the "next SCB". Since the 7106 * tag identifier in this SCB will never be used, there is 7107 * no point in using a valid HSCB tag from an SCB pulled from 7108 * the standard free pool. So, we allocate this "sentinel" 7109 * specially from the DMA safe memory chunk used for the QOUTFIFO. 7110 */ 7111 ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; 7112 ahd->next_queued_hscb_map = &ahd->shared_data_map; 7113 ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); 7114 7115 ahd->init_level++; 7116 7117 /* Allocate SCB data now that buffer_dmat is initialized */ 7118 if (ahd_init_scbdata(ahd) != 0) 7119 return (ENOMEM); 7120 7121 if ((ahd->flags & AHD_INITIATORROLE) == 0) 7122 ahd->flags &= ~AHD_RESET_BUS_A; 7123 7124 /* 7125 * Before committing these settings to the chip, give 7126 * the OSM one last chance to modify our configuration. 7127 */ 7128 ahd_platform_init(ahd); 7129 7130 /* Bring up the chip. */ 7131 ahd_chip_init(ahd); 7132 7133 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7134 7135 if ((ahd->flags & AHD_CURRENT_SENSING) == 0) 7136 goto init_done; 7137 7138 /* 7139 * Verify termination based on current draw and 7140 * warn user if the bus is over/under terminated. 7141 */ 7142 error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 7143 CURSENSE_ENB); 7144 if (error != 0) { 7145 printk("%s: current sensing timeout 1\n", ahd_name(ahd)); 7146 goto init_done; 7147 } 7148 for (i = 20, fstat = FLX_FSTAT_BUSY; 7149 (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { 7150 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); 7151 if (error != 0) { 7152 printk("%s: current sensing timeout 2\n", 7153 ahd_name(ahd)); 7154 goto init_done; 7155 } 7156 } 7157 if (i == 0) { 7158 printk("%s: Timedout during current-sensing test\n", 7159 ahd_name(ahd)); 7160 goto init_done; 7161 } 7162 7163 /* Latch Current Sensing status. */ 7164 error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); 7165 if (error != 0) { 7166 printk("%s: current sensing timeout 3\n", ahd_name(ahd)); 7167 goto init_done; 7168 } 7169 7170 /* Diable current sensing. */ 7171 ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); 7172 7173 #ifdef AHD_DEBUG 7174 if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { 7175 printk("%s: current_sensing == 0x%x\n", 7176 ahd_name(ahd), current_sensing); 7177 } 7178 #endif 7179 warn_user = 0; 7180 for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { 7181 u_int term_stat; 7182 7183 term_stat = (current_sensing & FLX_CSTAT_MASK); 7184 switch (term_stat) { 7185 case FLX_CSTAT_OVER: 7186 case FLX_CSTAT_UNDER: 7187 warn_user++; 7188 /* fall through */ 7189 case FLX_CSTAT_INVALID: 7190 case FLX_CSTAT_OKAY: 7191 if (warn_user == 0 && bootverbose == 0) 7192 break; 7193 printk("%s: %s Channel %s\n", ahd_name(ahd), 7194 channel_strings[i], termstat_strings[term_stat]); 7195 break; 7196 } 7197 } 7198 if (warn_user) { 7199 printk("%s: WARNING. Termination is not configured correctly.\n" 7200 "%s: WARNING. SCSI bus operations may FAIL.\n", 7201 ahd_name(ahd), ahd_name(ahd)); 7202 } 7203 init_done: 7204 ahd_restart(ahd); 7205 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 7206 return (0); 7207 } 7208 7209 /* 7210 * (Re)initialize chip state after a chip reset. 7211 */ 7212 static void 7213 ahd_chip_init(struct ahd_softc *ahd) 7214 { 7215 uint32_t busaddr; 7216 u_int sxfrctl1; 7217 u_int scsiseq_template; 7218 u_int wait; 7219 u_int i; 7220 u_int target; 7221 7222 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7223 /* 7224 * Take the LED out of diagnostic mode 7225 */ 7226 ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); 7227 7228 /* 7229 * Return HS_MAILBOX to its default value. 7230 */ 7231 ahd->hs_mailbox = 0; 7232 ahd_outb(ahd, HS_MAILBOX, 0); 7233 7234 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ 7235 ahd_outb(ahd, IOWNID, ahd->our_id); 7236 ahd_outb(ahd, TOWNID, ahd->our_id); 7237 sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; 7238 sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; 7239 if ((ahd->bugs & AHD_LONG_SETIMO_BUG) 7240 && (ahd->seltime != STIMESEL_MIN)) { 7241 /* 7242 * The selection timer duration is twice as long 7243 * as it should be. Halve it by adding "1" to 7244 * the user specified setting. 7245 */ 7246 sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; 7247 } else { 7248 sxfrctl1 |= ahd->seltime; 7249 } 7250 7251 ahd_outb(ahd, SXFRCTL0, DFON); 7252 ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); 7253 ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 7254 7255 /* 7256 * Now that termination is set, wait for up 7257 * to 500ms for our transceivers to settle. If 7258 * the adapter does not have a cable attached, 7259 * the transceivers may never settle, so don't 7260 * complain if we fail here. 7261 */ 7262 for (wait = 10000; 7263 (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 7264 wait--) 7265 ahd_delay(100); 7266 7267 /* Clear any false bus resets due to the transceivers settling */ 7268 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 7269 ahd_outb(ahd, CLRINT, CLRSCSIINT); 7270 7271 /* Initialize mode specific S/G state. */ 7272 for (i = 0; i < 2; i++) { 7273 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 7274 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 7275 ahd_outb(ahd, SG_STATE, 0); 7276 ahd_outb(ahd, CLRSEQINTSRC, 0xFF); 7277 ahd_outb(ahd, SEQIMODE, 7278 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT 7279 |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); 7280 } 7281 7282 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 7283 ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); 7284 ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); 7285 ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); 7286 ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); 7287 if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { 7288 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); 7289 } else { 7290 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); 7291 } 7292 ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); 7293 if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) 7294 /* 7295 * Do not issue a target abort when a split completion 7296 * error occurs. Let our PCIX interrupt handler deal 7297 * with it instead. H2A4 Razor #625 7298 */ 7299 ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); 7300 7301 if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) 7302 ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); 7303 7304 /* 7305 * Tweak IOCELL settings. 7306 */ 7307 if ((ahd->flags & AHD_HP_BOARD) != 0) { 7308 for (i = 0; i < NUMDSPS; i++) { 7309 ahd_outb(ahd, DSPSELECT, i); 7310 ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); 7311 } 7312 #ifdef AHD_DEBUG 7313 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7314 printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), 7315 WRTBIASCTL_HP_DEFAULT); 7316 #endif 7317 } 7318 ahd_setup_iocell_workaround(ahd); 7319 7320 /* 7321 * Enable LQI Manager interrupts. 7322 */ 7323 ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT 7324 | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI 7325 | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); 7326 ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); 7327 /* 7328 * We choose to have the sequencer catch LQOPHCHGINPKT errors 7329 * manually for the command phase at the start of a packetized 7330 * selection case. ENLQOBUSFREE should be made redundant by 7331 * the BUSFREE interrupt, but it seems that some LQOBUSFREE 7332 * events fail to assert the BUSFREE interrupt so we must 7333 * also enable LQOBUSFREE interrupts. 7334 */ 7335 ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); 7336 7337 /* 7338 * Setup sequencer interrupt handlers. 7339 */ 7340 ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); 7341 ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); 7342 7343 /* 7344 * Setup SCB Offset registers. 7345 */ 7346 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 7347 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, 7348 pkt_long_lun)); 7349 } else { 7350 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); 7351 } 7352 ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); 7353 ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); 7354 ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); 7355 ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, 7356 shared_data.idata.cdb)); 7357 ahd_outb(ahd, QNEXTPTR, 7358 offsetof(struct hardware_scb, next_hscb_busaddr)); 7359 ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); 7360 ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); 7361 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 7362 ahd_outb(ahd, LUNLEN, 7363 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); 7364 } else { 7365 ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); 7366 } 7367 ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); 7368 ahd_outb(ahd, MAXCMD, 0xFF); 7369 ahd_outb(ahd, SCBAUTOPTR, 7370 AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); 7371 7372 /* We haven't been enabled for target mode yet. */ 7373 ahd_outb(ahd, MULTARGID, 0); 7374 ahd_outb(ahd, MULTARGID + 1, 0); 7375 7376 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7377 /* Initialize the negotiation table. */ 7378 if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { 7379 /* 7380 * Clear the spare bytes in the neg table to avoid 7381 * spurious parity errors. 7382 */ 7383 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7384 ahd_outb(ahd, NEGOADDR, target); 7385 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); 7386 for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) 7387 ahd_outb(ahd, ANNEXDAT, 0); 7388 } 7389 } 7390 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7391 struct ahd_devinfo devinfo; 7392 struct ahd_initiator_tinfo *tinfo; 7393 struct ahd_tmode_tstate *tstate; 7394 7395 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7396 target, &tstate); 7397 ahd_compile_devinfo(&devinfo, ahd->our_id, 7398 target, CAM_LUN_WILDCARD, 7399 'A', ROLE_INITIATOR); 7400 ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); 7401 } 7402 7403 ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); 7404 ahd_outb(ahd, CLRINT, CLRSCSIINT); 7405 7406 #ifdef NEEDS_MORE_TESTING 7407 /* 7408 * Always enable abort on incoming L_Qs if this feature is 7409 * supported. We use this to catch invalid SCB references. 7410 */ 7411 if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) 7412 ahd_outb(ahd, LQCTL1, ABORTPENDING); 7413 else 7414 #endif 7415 ahd_outb(ahd, LQCTL1, 0); 7416 7417 /* All of our queues are empty */ 7418 ahd->qoutfifonext = 0; 7419 ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; 7420 ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); 7421 for (i = 0; i < AHD_QOUT_SIZE; i++) 7422 ahd->qoutfifo[i].valid_tag = 0; 7423 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); 7424 7425 ahd->qinfifonext = 0; 7426 for (i = 0; i < AHD_QIN_SIZE; i++) 7427 ahd->qinfifo[i] = SCB_LIST_NULL; 7428 7429 if ((ahd->features & AHD_TARGETMODE) != 0) { 7430 /* All target command blocks start out invalid. */ 7431 for (i = 0; i < AHD_TMODE_CMDS; i++) 7432 ahd->targetcmds[i].cmd_valid = 0; 7433 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); 7434 ahd->tqinfifonext = 1; 7435 ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); 7436 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 7437 } 7438 7439 /* Initialize Scratch Ram. */ 7440 ahd_outb(ahd, SEQ_FLAGS, 0); 7441 ahd_outb(ahd, SEQ_FLAGS2, 0); 7442 7443 /* We don't have any waiting selections */ 7444 ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); 7445 ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); 7446 ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); 7447 ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); 7448 for (i = 0; i < AHD_NUM_TARGETS; i++) 7449 ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); 7450 7451 /* 7452 * Nobody is waiting to be DMAed into the QOUTFIFO. 7453 */ 7454 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 7455 ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); 7456 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 7457 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); 7458 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); 7459 7460 /* 7461 * The Freeze Count is 0. 7462 */ 7463 ahd->qfreeze_cnt = 0; 7464 ahd_outw(ahd, QFREEZE_COUNT, 0); 7465 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); 7466 7467 /* 7468 * Tell the sequencer where it can find our arrays in memory. 7469 */ 7470 busaddr = ahd->shared_data_map.physaddr; 7471 ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); 7472 ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); 7473 7474 /* 7475 * Setup the allowed SCSI Sequences based on operational mode. 7476 * If we are a target, we'll enable select in operations once 7477 * we've had a lun enabled. 7478 */ 7479 scsiseq_template = ENAUTOATNP; 7480 if ((ahd->flags & AHD_INITIATORROLE) != 0) 7481 scsiseq_template |= ENRSELI; 7482 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); 7483 7484 /* There are no busy SCBs yet. */ 7485 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7486 int lun; 7487 7488 for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) 7489 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); 7490 } 7491 7492 /* 7493 * Initialize the group code to command length table. 7494 * Vendor Unique codes are set to 0 so we only capture 7495 * the first byte of the cdb. These can be overridden 7496 * when target mode is enabled. 7497 */ 7498 ahd_outb(ahd, CMDSIZE_TABLE, 5); 7499 ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); 7500 ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); 7501 ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); 7502 ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); 7503 ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); 7504 ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); 7505 ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); 7506 7507 /* Tell the sequencer of our initial queue positions */ 7508 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 7509 ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); 7510 ahd->qinfifonext = 0; 7511 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 7512 ahd_set_hescb_qoff(ahd, 0); 7513 ahd_set_snscb_qoff(ahd, 0); 7514 ahd_set_sescb_qoff(ahd, 0); 7515 ahd_set_sdscb_qoff(ahd, 0); 7516 7517 /* 7518 * Tell the sequencer which SCB will be the next one it receives. 7519 */ 7520 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 7521 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 7522 7523 /* 7524 * Default to coalescing disabled. 7525 */ 7526 ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); 7527 ahd_outw(ahd, CMDS_PENDING, 0); 7528 ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, 7529 ahd->int_coalescing_maxcmds, 7530 ahd->int_coalescing_mincmds); 7531 ahd_enable_coalescing(ahd, FALSE); 7532 7533 ahd_loadseq(ahd); 7534 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7535 7536 if (ahd->features & AHD_AIC79XXB_SLOWCRC) { 7537 u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); 7538 7539 negodat3 |= ENSLOWCRC; 7540 ahd_outb(ahd, NEGCONOPTS, negodat3); 7541 negodat3 = ahd_inb(ahd, NEGCONOPTS); 7542 if (!(negodat3 & ENSLOWCRC)) 7543 printk("aic79xx: failed to set the SLOWCRC bit\n"); 7544 else 7545 printk("aic79xx: SLOWCRC bit set\n"); 7546 } 7547 } 7548 7549 /* 7550 * Setup default device and controller settings. 7551 * This should only be called if our probe has 7552 * determined that no configuration data is available. 7553 */ 7554 int 7555 ahd_default_config(struct ahd_softc *ahd) 7556 { 7557 int targ; 7558 7559 ahd->our_id = 7; 7560 7561 /* 7562 * Allocate a tstate to house information for our 7563 * initiator presence on the bus as well as the user 7564 * data for any target mode initiator. 7565 */ 7566 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7567 printk("%s: unable to allocate ahd_tmode_tstate. " 7568 "Failing attach\n", ahd_name(ahd)); 7569 return (ENOMEM); 7570 } 7571 7572 for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { 7573 struct ahd_devinfo devinfo; 7574 struct ahd_initiator_tinfo *tinfo; 7575 struct ahd_tmode_tstate *tstate; 7576 uint16_t target_mask; 7577 7578 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7579 targ, &tstate); 7580 /* 7581 * We support SPC2 and SPI4. 7582 */ 7583 tinfo->user.protocol_version = 4; 7584 tinfo->user.transport_version = 4; 7585 7586 target_mask = 0x01 << targ; 7587 ahd->user_discenable |= target_mask; 7588 tstate->discenable |= target_mask; 7589 ahd->user_tagenable |= target_mask; 7590 #ifdef AHD_FORCE_160 7591 tinfo->user.period = AHD_SYNCRATE_DT; 7592 #else 7593 tinfo->user.period = AHD_SYNCRATE_160; 7594 #endif 7595 tinfo->user.offset = MAX_OFFSET; 7596 tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM 7597 | MSG_EXT_PPR_WR_FLOW 7598 | MSG_EXT_PPR_HOLD_MCS 7599 | MSG_EXT_PPR_IU_REQ 7600 | MSG_EXT_PPR_QAS_REQ 7601 | MSG_EXT_PPR_DT_REQ; 7602 if ((ahd->features & AHD_RTI) != 0) 7603 tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; 7604 7605 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 7606 7607 /* 7608 * Start out Async/Narrow/Untagged and with 7609 * conservative protocol support. 7610 */ 7611 tinfo->goal.protocol_version = 2; 7612 tinfo->goal.transport_version = 2; 7613 tinfo->curr.protocol_version = 2; 7614 tinfo->curr.transport_version = 2; 7615 ahd_compile_devinfo(&devinfo, ahd->our_id, 7616 targ, CAM_LUN_WILDCARD, 7617 'A', ROLE_INITIATOR); 7618 tstate->tagenable &= ~target_mask; 7619 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 7620 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 7621 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 7622 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 7623 /*paused*/TRUE); 7624 } 7625 return (0); 7626 } 7627 7628 /* 7629 * Parse device configuration information. 7630 */ 7631 int 7632 ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) 7633 { 7634 int targ; 7635 int max_targ; 7636 7637 max_targ = sc->max_targets & CFMAXTARG; 7638 ahd->our_id = sc->brtime_id & CFSCSIID; 7639 7640 /* 7641 * Allocate a tstate to house information for our 7642 * initiator presence on the bus as well as the user 7643 * data for any target mode initiator. 7644 */ 7645 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7646 printk("%s: unable to allocate ahd_tmode_tstate. " 7647 "Failing attach\n", ahd_name(ahd)); 7648 return (ENOMEM); 7649 } 7650 7651 for (targ = 0; targ < max_targ; targ++) { 7652 struct ahd_devinfo devinfo; 7653 struct ahd_initiator_tinfo *tinfo; 7654 struct ahd_transinfo *user_tinfo; 7655 struct ahd_tmode_tstate *tstate; 7656 uint16_t target_mask; 7657 7658 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7659 targ, &tstate); 7660 user_tinfo = &tinfo->user; 7661 7662 /* 7663 * We support SPC2 and SPI4. 7664 */ 7665 tinfo->user.protocol_version = 4; 7666 tinfo->user.transport_version = 4; 7667 7668 target_mask = 0x01 << targ; 7669 ahd->user_discenable &= ~target_mask; 7670 tstate->discenable &= ~target_mask; 7671 ahd->user_tagenable &= ~target_mask; 7672 if (sc->device_flags[targ] & CFDISC) { 7673 tstate->discenable |= target_mask; 7674 ahd->user_discenable |= target_mask; 7675 ahd->user_tagenable |= target_mask; 7676 } else { 7677 /* 7678 * Cannot be packetized without disconnection. 7679 */ 7680 sc->device_flags[targ] &= ~CFPACKETIZED; 7681 } 7682 7683 user_tinfo->ppr_options = 0; 7684 user_tinfo->period = (sc->device_flags[targ] & CFXFER); 7685 if (user_tinfo->period < CFXFER_ASYNC) { 7686 if (user_tinfo->period <= AHD_PERIOD_10MHz) 7687 user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; 7688 user_tinfo->offset = MAX_OFFSET; 7689 } else { 7690 user_tinfo->offset = 0; 7691 user_tinfo->period = AHD_ASYNC_XFER_PERIOD; 7692 } 7693 #ifdef AHD_FORCE_160 7694 if (user_tinfo->period <= AHD_SYNCRATE_160) 7695 user_tinfo->period = AHD_SYNCRATE_DT; 7696 #endif 7697 7698 if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { 7699 user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM 7700 | MSG_EXT_PPR_WR_FLOW 7701 | MSG_EXT_PPR_HOLD_MCS 7702 | MSG_EXT_PPR_IU_REQ; 7703 if ((ahd->features & AHD_RTI) != 0) 7704 user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; 7705 } 7706 7707 if ((sc->device_flags[targ] & CFQAS) != 0) 7708 user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; 7709 7710 if ((sc->device_flags[targ] & CFWIDEB) != 0) 7711 user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; 7712 else 7713 user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; 7714 #ifdef AHD_DEBUG 7715 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7716 printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, 7717 user_tinfo->period, user_tinfo->offset, 7718 user_tinfo->ppr_options); 7719 #endif 7720 /* 7721 * Start out Async/Narrow/Untagged and with 7722 * conservative protocol support. 7723 */ 7724 tstate->tagenable &= ~target_mask; 7725 tinfo->goal.protocol_version = 2; 7726 tinfo->goal.transport_version = 2; 7727 tinfo->curr.protocol_version = 2; 7728 tinfo->curr.transport_version = 2; 7729 ahd_compile_devinfo(&devinfo, ahd->our_id, 7730 targ, CAM_LUN_WILDCARD, 7731 'A', ROLE_INITIATOR); 7732 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 7733 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 7734 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 7735 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 7736 /*paused*/TRUE); 7737 } 7738 7739 ahd->flags &= ~AHD_SPCHK_ENB_A; 7740 if (sc->bios_control & CFSPARITY) 7741 ahd->flags |= AHD_SPCHK_ENB_A; 7742 7743 ahd->flags &= ~AHD_RESET_BUS_A; 7744 if (sc->bios_control & CFRESETB) 7745 ahd->flags |= AHD_RESET_BUS_A; 7746 7747 ahd->flags &= ~AHD_EXTENDED_TRANS_A; 7748 if (sc->bios_control & CFEXTEND) 7749 ahd->flags |= AHD_EXTENDED_TRANS_A; 7750 7751 ahd->flags &= ~AHD_BIOS_ENABLED; 7752 if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) 7753 ahd->flags |= AHD_BIOS_ENABLED; 7754 7755 ahd->flags &= ~AHD_STPWLEVEL_A; 7756 if ((sc->adapter_control & CFSTPWLEVEL) != 0) 7757 ahd->flags |= AHD_STPWLEVEL_A; 7758 7759 return (0); 7760 } 7761 7762 /* 7763 * Parse device configuration information. 7764 */ 7765 int 7766 ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) 7767 { 7768 int error; 7769 7770 error = ahd_verify_vpd_cksum(vpd); 7771 if (error == 0) 7772 return (EINVAL); 7773 if ((vpd->bios_flags & VPDBOOTHOST) != 0) 7774 ahd->flags |= AHD_BOOT_CHANNEL; 7775 return (0); 7776 } 7777 7778 void 7779 ahd_intr_enable(struct ahd_softc *ahd, int enable) 7780 { 7781 u_int hcntrl; 7782 7783 hcntrl = ahd_inb(ahd, HCNTRL); 7784 hcntrl &= ~INTEN; 7785 ahd->pause &= ~INTEN; 7786 ahd->unpause &= ~INTEN; 7787 if (enable) { 7788 hcntrl |= INTEN; 7789 ahd->pause |= INTEN; 7790 ahd->unpause |= INTEN; 7791 } 7792 ahd_outb(ahd, HCNTRL, hcntrl); 7793 } 7794 7795 static void 7796 ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, 7797 u_int mincmds) 7798 { 7799 if (timer > AHD_TIMER_MAX_US) 7800 timer = AHD_TIMER_MAX_US; 7801 ahd->int_coalescing_timer = timer; 7802 7803 if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) 7804 maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; 7805 if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) 7806 mincmds = AHD_INT_COALESCING_MINCMDS_MAX; 7807 ahd->int_coalescing_maxcmds = maxcmds; 7808 ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); 7809 ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); 7810 ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); 7811 } 7812 7813 static void 7814 ahd_enable_coalescing(struct ahd_softc *ahd, int enable) 7815 { 7816 7817 ahd->hs_mailbox &= ~ENINT_COALESCE; 7818 if (enable) 7819 ahd->hs_mailbox |= ENINT_COALESCE; 7820 ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); 7821 ahd_flush_device_writes(ahd); 7822 ahd_run_qoutfifo(ahd); 7823 } 7824 7825 /* 7826 * Ensure that the card is paused in a location 7827 * outside of all critical sections and that all 7828 * pending work is completed prior to returning. 7829 * This routine should only be called from outside 7830 * an interrupt context. 7831 */ 7832 void 7833 ahd_pause_and_flushwork(struct ahd_softc *ahd) 7834 { 7835 u_int intstat; 7836 u_int maxloops; 7837 7838 maxloops = 1000; 7839 ahd->flags |= AHD_ALL_INTERRUPTS; 7840 ahd_pause(ahd); 7841 /* 7842 * Freeze the outgoing selections. We do this only 7843 * until we are safely paused without further selections 7844 * pending. 7845 */ 7846 ahd->qfreeze_cnt--; 7847 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 7848 ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); 7849 do { 7850 7851 ahd_unpause(ahd); 7852 /* 7853 * Give the sequencer some time to service 7854 * any active selections. 7855 */ 7856 ahd_delay(500); 7857 7858 ahd_intr(ahd); 7859 ahd_pause(ahd); 7860 intstat = ahd_inb(ahd, INTSTAT); 7861 if ((intstat & INT_PEND) == 0) { 7862 ahd_clear_critical_section(ahd); 7863 intstat = ahd_inb(ahd, INTSTAT); 7864 } 7865 } while (--maxloops 7866 && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) 7867 && ((intstat & INT_PEND) != 0 7868 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 7869 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); 7870 7871 if (maxloops == 0) { 7872 printk("Infinite interrupt loop, INTSTAT = %x", 7873 ahd_inb(ahd, INTSTAT)); 7874 } 7875 ahd->qfreeze_cnt++; 7876 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 7877 7878 ahd_flush_qoutfifo(ahd); 7879 7880 ahd->flags &= ~AHD_ALL_INTERRUPTS; 7881 } 7882 7883 #ifdef CONFIG_PM 7884 int 7885 ahd_suspend(struct ahd_softc *ahd) 7886 { 7887 7888 ahd_pause_and_flushwork(ahd); 7889 7890 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 7891 ahd_unpause(ahd); 7892 return (EBUSY); 7893 } 7894 ahd_shutdown(ahd); 7895 return (0); 7896 } 7897 7898 void 7899 ahd_resume(struct ahd_softc *ahd) 7900 { 7901 7902 ahd_reset(ahd, /*reinit*/TRUE); 7903 ahd_intr_enable(ahd, TRUE); 7904 ahd_restart(ahd); 7905 } 7906 #endif 7907 7908 /************************** Busy Target Table *********************************/ 7909 /* 7910 * Set SCBPTR to the SCB that contains the busy 7911 * table entry for TCL. Return the offset into 7912 * the SCB that contains the entry for TCL. 7913 * saved_scbid is dereferenced and set to the 7914 * scbid that should be restored once manipualtion 7915 * of the TCL entry is complete. 7916 */ 7917 static inline u_int 7918 ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) 7919 { 7920 /* 7921 * Index to the SCB that contains the busy entry. 7922 */ 7923 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7924 *saved_scbid = ahd_get_scbptr(ahd); 7925 ahd_set_scbptr(ahd, TCL_LUN(tcl) 7926 | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); 7927 7928 /* 7929 * And now calculate the SCB offset to the entry. 7930 * Each entry is 2 bytes wide, hence the 7931 * multiplication by 2. 7932 */ 7933 return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); 7934 } 7935 7936 /* 7937 * Return the untagged transaction id for a given target/channel lun. 7938 */ 7939 static u_int 7940 ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) 7941 { 7942 u_int scbid; 7943 u_int scb_offset; 7944 u_int saved_scbptr; 7945 7946 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 7947 scbid = ahd_inw_scbram(ahd, scb_offset); 7948 ahd_set_scbptr(ahd, saved_scbptr); 7949 return (scbid); 7950 } 7951 7952 static void 7953 ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) 7954 { 7955 u_int scb_offset; 7956 u_int saved_scbptr; 7957 7958 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 7959 ahd_outw(ahd, scb_offset, scbid); 7960 ahd_set_scbptr(ahd, saved_scbptr); 7961 } 7962 7963 /************************** SCB and SCB queue management **********************/ 7964 static int 7965 ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, 7966 char channel, int lun, u_int tag, role_t role) 7967 { 7968 int targ = SCB_GET_TARGET(ahd, scb); 7969 char chan = SCB_GET_CHANNEL(ahd, scb); 7970 int slun = SCB_GET_LUN(scb); 7971 int match; 7972 7973 match = ((chan == channel) || (channel == ALL_CHANNELS)); 7974 if (match != 0) 7975 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 7976 if (match != 0) 7977 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 7978 if (match != 0) { 7979 #ifdef AHD_TARGET_MODE 7980 int group; 7981 7982 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 7983 if (role == ROLE_INITIATOR) { 7984 match = (group != XPT_FC_GROUP_TMODE) 7985 && ((tag == SCB_GET_TAG(scb)) 7986 || (tag == SCB_LIST_NULL)); 7987 } else if (role == ROLE_TARGET) { 7988 match = (group == XPT_FC_GROUP_TMODE) 7989 && ((tag == scb->io_ctx->csio.tag_id) 7990 || (tag == SCB_LIST_NULL)); 7991 } 7992 #else /* !AHD_TARGET_MODE */ 7993 match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); 7994 #endif /* AHD_TARGET_MODE */ 7995 } 7996 7997 return match; 7998 } 7999 8000 static void 8001 ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) 8002 { 8003 int target; 8004 char channel; 8005 int lun; 8006 8007 target = SCB_GET_TARGET(ahd, scb); 8008 lun = SCB_GET_LUN(scb); 8009 channel = SCB_GET_CHANNEL(ahd, scb); 8010 8011 ahd_search_qinfifo(ahd, target, channel, lun, 8012 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 8013 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 8014 8015 ahd_platform_freeze_devq(ahd, scb); 8016 } 8017 8018 void 8019 ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) 8020 { 8021 struct scb *prev_scb; 8022 ahd_mode_state saved_modes; 8023 8024 saved_modes = ahd_save_modes(ahd); 8025 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8026 prev_scb = NULL; 8027 if (ahd_qinfifo_count(ahd) != 0) { 8028 u_int prev_tag; 8029 u_int prev_pos; 8030 8031 prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); 8032 prev_tag = ahd->qinfifo[prev_pos]; 8033 prev_scb = ahd_lookup_scb(ahd, prev_tag); 8034 } 8035 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8036 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 8037 ahd_restore_modes(ahd, saved_modes); 8038 } 8039 8040 static void 8041 ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, 8042 struct scb *scb) 8043 { 8044 if (prev_scb == NULL) { 8045 uint32_t busaddr; 8046 8047 busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); 8048 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 8049 } else { 8050 prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 8051 ahd_sync_scb(ahd, prev_scb, 8052 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 8053 } 8054 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 8055 ahd->qinfifonext++; 8056 scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; 8057 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 8058 } 8059 8060 static int 8061 ahd_qinfifo_count(struct ahd_softc *ahd) 8062 { 8063 u_int qinpos; 8064 u_int wrap_qinpos; 8065 u_int wrap_qinfifonext; 8066 8067 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 8068 qinpos = ahd_get_snscb_qoff(ahd); 8069 wrap_qinpos = AHD_QIN_WRAP(qinpos); 8070 wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); 8071 if (wrap_qinfifonext >= wrap_qinpos) 8072 return (wrap_qinfifonext - wrap_qinpos); 8073 else 8074 return (wrap_qinfifonext 8075 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); 8076 } 8077 8078 static void 8079 ahd_reset_cmds_pending(struct ahd_softc *ahd) 8080 { 8081 struct scb *scb; 8082 ahd_mode_state saved_modes; 8083 u_int pending_cmds; 8084 8085 saved_modes = ahd_save_modes(ahd); 8086 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8087 8088 /* 8089 * Don't count any commands as outstanding that the 8090 * sequencer has already marked for completion. 8091 */ 8092 ahd_flush_qoutfifo(ahd); 8093 8094 pending_cmds = 0; 8095 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 8096 pending_cmds++; 8097 } 8098 ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); 8099 ahd_restore_modes(ahd, saved_modes); 8100 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 8101 } 8102 8103 static void 8104 ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 8105 { 8106 cam_status ostat; 8107 cam_status cstat; 8108 8109 ostat = ahd_get_transaction_status(scb); 8110 if (ostat == CAM_REQ_INPROG) 8111 ahd_set_transaction_status(scb, status); 8112 cstat = ahd_get_transaction_status(scb); 8113 if (cstat != CAM_REQ_CMP) 8114 ahd_freeze_scb(scb); 8115 ahd_done(ahd, scb); 8116 } 8117 8118 int 8119 ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, 8120 int lun, u_int tag, role_t role, uint32_t status, 8121 ahd_search_action action) 8122 { 8123 struct scb *scb; 8124 struct scb *mk_msg_scb; 8125 struct scb *prev_scb; 8126 ahd_mode_state saved_modes; 8127 u_int qinstart; 8128 u_int qinpos; 8129 u_int qintail; 8130 u_int tid_next; 8131 u_int tid_prev; 8132 u_int scbid; 8133 u_int seq_flags2; 8134 u_int savedscbptr; 8135 uint32_t busaddr; 8136 int found; 8137 int targets; 8138 8139 /* Must be in CCHAN mode */ 8140 saved_modes = ahd_save_modes(ahd); 8141 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8142 8143 /* 8144 * Halt any pending SCB DMA. The sequencer will reinitiate 8145 * this dma if the qinfifo is not empty once we unpause. 8146 */ 8147 if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) 8148 == (CCARREN|CCSCBEN|CCSCBDIR)) { 8149 ahd_outb(ahd, CCSCBCTL, 8150 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); 8151 while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) 8152 ; 8153 } 8154 /* Determine sequencer's position in the qinfifo. */ 8155 qintail = AHD_QIN_WRAP(ahd->qinfifonext); 8156 qinstart = ahd_get_snscb_qoff(ahd); 8157 qinpos = AHD_QIN_WRAP(qinstart); 8158 found = 0; 8159 prev_scb = NULL; 8160 8161 if (action == SEARCH_PRINT) { 8162 printk("qinstart = %d qinfifonext = %d\nQINFIFO:", 8163 qinstart, ahd->qinfifonext); 8164 } 8165 8166 /* 8167 * Start with an empty queue. Entries that are not chosen 8168 * for removal will be re-added to the queue as we go. 8169 */ 8170 ahd->qinfifonext = qinstart; 8171 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 8172 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 8173 8174 while (qinpos != qintail) { 8175 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); 8176 if (scb == NULL) { 8177 printk("qinpos = %d, SCB index = %d\n", 8178 qinpos, ahd->qinfifo[qinpos]); 8179 panic("Loop 1\n"); 8180 } 8181 8182 if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { 8183 /* 8184 * We found an scb that needs to be acted on. 8185 */ 8186 found++; 8187 switch (action) { 8188 case SEARCH_COMPLETE: 8189 if ((scb->flags & SCB_ACTIVE) == 0) 8190 printk("Inactive SCB in qinfifo\n"); 8191 ahd_done_with_status(ahd, scb, status); 8192 /* FALLTHROUGH */ 8193 case SEARCH_REMOVE: 8194 break; 8195 case SEARCH_PRINT: 8196 printk(" 0x%x", ahd->qinfifo[qinpos]); 8197 /* FALLTHROUGH */ 8198 case SEARCH_COUNT: 8199 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8200 prev_scb = scb; 8201 break; 8202 } 8203 } else { 8204 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8205 prev_scb = scb; 8206 } 8207 qinpos = AHD_QIN_WRAP(qinpos+1); 8208 } 8209 8210 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 8211 8212 if (action == SEARCH_PRINT) 8213 printk("\nWAITING_TID_QUEUES:\n"); 8214 8215 /* 8216 * Search waiting for selection lists. We traverse the 8217 * list of "their ids" waiting for selection and, if 8218 * appropriate, traverse the SCBs of each "their id" 8219 * looking for matches. 8220 */ 8221 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8222 seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); 8223 if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { 8224 scbid = ahd_inw(ahd, MK_MESSAGE_SCB); 8225 mk_msg_scb = ahd_lookup_scb(ahd, scbid); 8226 } else 8227 mk_msg_scb = NULL; 8228 savedscbptr = ahd_get_scbptr(ahd); 8229 tid_next = ahd_inw(ahd, WAITING_TID_HEAD); 8230 tid_prev = SCB_LIST_NULL; 8231 targets = 0; 8232 for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { 8233 u_int tid_head; 8234 u_int tid_tail; 8235 8236 targets++; 8237 if (targets > AHD_NUM_TARGETS) 8238 panic("TID LIST LOOP"); 8239 8240 if (scbid >= ahd->scb_data.numscbs) { 8241 printk("%s: Waiting TID List inconsistency. " 8242 "SCB index == 0x%x, yet numscbs == 0x%x.", 8243 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8244 ahd_dump_card_state(ahd); 8245 panic("for safety"); 8246 } 8247 scb = ahd_lookup_scb(ahd, scbid); 8248 if (scb == NULL) { 8249 printk("%s: SCB = 0x%x Not Active!\n", 8250 ahd_name(ahd), scbid); 8251 panic("Waiting TID List traversal\n"); 8252 } 8253 ahd_set_scbptr(ahd, scbid); 8254 tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); 8255 if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, 8256 SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { 8257 tid_prev = scbid; 8258 continue; 8259 } 8260 8261 /* 8262 * We found a list of scbs that needs to be searched. 8263 */ 8264 if (action == SEARCH_PRINT) 8265 printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); 8266 tid_head = scbid; 8267 found += ahd_search_scb_list(ahd, target, channel, 8268 lun, tag, role, status, 8269 action, &tid_head, &tid_tail, 8270 SCB_GET_TARGET(ahd, scb)); 8271 /* 8272 * Check any MK_MESSAGE SCB that is still waiting to 8273 * enter this target's waiting for selection queue. 8274 */ 8275 if (mk_msg_scb != NULL 8276 && ahd_match_scb(ahd, mk_msg_scb, target, channel, 8277 lun, tag, role)) { 8278 8279 /* 8280 * We found an scb that needs to be acted on. 8281 */ 8282 found++; 8283 switch (action) { 8284 case SEARCH_COMPLETE: 8285 if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) 8286 printk("Inactive SCB pending MK_MSG\n"); 8287 ahd_done_with_status(ahd, mk_msg_scb, status); 8288 /* FALLTHROUGH */ 8289 case SEARCH_REMOVE: 8290 { 8291 u_int tail_offset; 8292 8293 printk("Removing MK_MSG scb\n"); 8294 8295 /* 8296 * Reset our tail to the tail of the 8297 * main per-target list. 8298 */ 8299 tail_offset = WAITING_SCB_TAILS 8300 + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); 8301 ahd_outw(ahd, tail_offset, tid_tail); 8302 8303 seq_flags2 &= ~PENDING_MK_MESSAGE; 8304 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); 8305 ahd_outw(ahd, CMDS_PENDING, 8306 ahd_inw(ahd, CMDS_PENDING)-1); 8307 mk_msg_scb = NULL; 8308 break; 8309 } 8310 case SEARCH_PRINT: 8311 printk(" 0x%x", SCB_GET_TAG(scb)); 8312 /* FALLTHROUGH */ 8313 case SEARCH_COUNT: 8314 break; 8315 } 8316 } 8317 8318 if (mk_msg_scb != NULL 8319 && SCBID_IS_NULL(tid_head) 8320 && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, 8321 SCB_LIST_NULL, ROLE_UNKNOWN)) { 8322 8323 /* 8324 * When removing the last SCB for a target 8325 * queue with a pending MK_MESSAGE scb, we 8326 * must queue the MK_MESSAGE scb. 8327 */ 8328 printk("Queueing mk_msg_scb\n"); 8329 tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); 8330 seq_flags2 &= ~PENDING_MK_MESSAGE; 8331 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); 8332 mk_msg_scb = NULL; 8333 } 8334 if (tid_head != scbid) 8335 ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); 8336 if (!SCBID_IS_NULL(tid_head)) 8337 tid_prev = tid_head; 8338 if (action == SEARCH_PRINT) 8339 printk(")\n"); 8340 } 8341 8342 /* Restore saved state. */ 8343 ahd_set_scbptr(ahd, savedscbptr); 8344 ahd_restore_modes(ahd, saved_modes); 8345 return (found); 8346 } 8347 8348 static int 8349 ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, 8350 int lun, u_int tag, role_t role, uint32_t status, 8351 ahd_search_action action, u_int *list_head, 8352 u_int *list_tail, u_int tid) 8353 { 8354 struct scb *scb; 8355 u_int scbid; 8356 u_int next; 8357 u_int prev; 8358 int found; 8359 8360 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8361 found = 0; 8362 prev = SCB_LIST_NULL; 8363 next = *list_head; 8364 *list_tail = SCB_LIST_NULL; 8365 for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { 8366 if (scbid >= ahd->scb_data.numscbs) { 8367 printk("%s:SCB List inconsistency. " 8368 "SCB == 0x%x, yet numscbs == 0x%x.", 8369 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8370 ahd_dump_card_state(ahd); 8371 panic("for safety"); 8372 } 8373 scb = ahd_lookup_scb(ahd, scbid); 8374 if (scb == NULL) { 8375 printk("%s: SCB = %d Not Active!\n", 8376 ahd_name(ahd), scbid); 8377 panic("Waiting List traversal\n"); 8378 } 8379 ahd_set_scbptr(ahd, scbid); 8380 *list_tail = scbid; 8381 next = ahd_inw_scbram(ahd, SCB_NEXT); 8382 if (ahd_match_scb(ahd, scb, target, channel, 8383 lun, SCB_LIST_NULL, role) == 0) { 8384 prev = scbid; 8385 continue; 8386 } 8387 found++; 8388 switch (action) { 8389 case SEARCH_COMPLETE: 8390 if ((scb->flags & SCB_ACTIVE) == 0) 8391 printk("Inactive SCB in Waiting List\n"); 8392 ahd_done_with_status(ahd, scb, status); 8393 /* fall through */ 8394 case SEARCH_REMOVE: 8395 ahd_rem_wscb(ahd, scbid, prev, next, tid); 8396 *list_tail = prev; 8397 if (SCBID_IS_NULL(prev)) 8398 *list_head = next; 8399 break; 8400 case SEARCH_PRINT: 8401 printk("0x%x ", scbid); 8402 /* fall through */ 8403 case SEARCH_COUNT: 8404 prev = scbid; 8405 break; 8406 } 8407 if (found > AHD_SCB_MAX) 8408 panic("SCB LIST LOOP"); 8409 } 8410 if (action == SEARCH_COMPLETE 8411 || action == SEARCH_REMOVE) 8412 ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); 8413 return (found); 8414 } 8415 8416 static void 8417 ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, 8418 u_int tid_cur, u_int tid_next) 8419 { 8420 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8421 8422 if (SCBID_IS_NULL(tid_cur)) { 8423 8424 /* Bypass current TID list */ 8425 if (SCBID_IS_NULL(tid_prev)) { 8426 ahd_outw(ahd, WAITING_TID_HEAD, tid_next); 8427 } else { 8428 ahd_set_scbptr(ahd, tid_prev); 8429 ahd_outw(ahd, SCB_NEXT2, tid_next); 8430 } 8431 if (SCBID_IS_NULL(tid_next)) 8432 ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); 8433 } else { 8434 8435 /* Stitch through tid_cur */ 8436 if (SCBID_IS_NULL(tid_prev)) { 8437 ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); 8438 } else { 8439 ahd_set_scbptr(ahd, tid_prev); 8440 ahd_outw(ahd, SCB_NEXT2, tid_cur); 8441 } 8442 ahd_set_scbptr(ahd, tid_cur); 8443 ahd_outw(ahd, SCB_NEXT2, tid_next); 8444 8445 if (SCBID_IS_NULL(tid_next)) 8446 ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); 8447 } 8448 } 8449 8450 /* 8451 * Manipulate the waiting for selection list and return the 8452 * scb that follows the one that we remove. 8453 */ 8454 static u_int 8455 ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 8456 u_int prev, u_int next, u_int tid) 8457 { 8458 u_int tail_offset; 8459 8460 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8461 if (!SCBID_IS_NULL(prev)) { 8462 ahd_set_scbptr(ahd, prev); 8463 ahd_outw(ahd, SCB_NEXT, next); 8464 } 8465 8466 /* 8467 * SCBs that have MK_MESSAGE set in them may 8468 * cause the tail pointer to be updated without 8469 * setting the next pointer of the previous tail. 8470 * Only clear the tail if the removed SCB was 8471 * the tail. 8472 */ 8473 tail_offset = WAITING_SCB_TAILS + (2 * tid); 8474 if (SCBID_IS_NULL(next) 8475 && ahd_inw(ahd, tail_offset) == scbid) 8476 ahd_outw(ahd, tail_offset, prev); 8477 8478 ahd_add_scb_to_free_list(ahd, scbid); 8479 return (next); 8480 } 8481 8482 /* 8483 * Add the SCB as selected by SCBPTR onto the on chip list of 8484 * free hardware SCBs. This list is empty/unused if we are not 8485 * performing SCB paging. 8486 */ 8487 static void 8488 ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) 8489 { 8490 /* XXX Need some other mechanism to designate "free". */ 8491 /* 8492 * Invalidate the tag so that our abort 8493 * routines don't think it's active. 8494 ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); 8495 */ 8496 } 8497 8498 /******************************** Error Handling ******************************/ 8499 /* 8500 * Abort all SCBs that match the given description (target/channel/lun/tag), 8501 * setting their status to the passed in status if the status has not already 8502 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 8503 * is paused before it is called. 8504 */ 8505 static int 8506 ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, 8507 int lun, u_int tag, role_t role, uint32_t status) 8508 { 8509 struct scb *scbp; 8510 struct scb *scbp_next; 8511 u_int i, j; 8512 u_int maxtarget; 8513 u_int minlun; 8514 u_int maxlun; 8515 int found; 8516 ahd_mode_state saved_modes; 8517 8518 /* restore this when we're done */ 8519 saved_modes = ahd_save_modes(ahd); 8520 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8521 8522 found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, 8523 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 8524 8525 /* 8526 * Clean out the busy target table for any untagged commands. 8527 */ 8528 i = 0; 8529 maxtarget = 16; 8530 if (target != CAM_TARGET_WILDCARD) { 8531 i = target; 8532 if (channel == 'B') 8533 i += 8; 8534 maxtarget = i + 1; 8535 } 8536 8537 if (lun == CAM_LUN_WILDCARD) { 8538 minlun = 0; 8539 maxlun = AHD_NUM_LUNS_NONPKT; 8540 } else if (lun >= AHD_NUM_LUNS_NONPKT) { 8541 minlun = maxlun = 0; 8542 } else { 8543 minlun = lun; 8544 maxlun = lun + 1; 8545 } 8546 8547 if (role != ROLE_TARGET) { 8548 for (;i < maxtarget; i++) { 8549 for (j = minlun;j < maxlun; j++) { 8550 u_int scbid; 8551 u_int tcl; 8552 8553 tcl = BUILD_TCL_RAW(i, 'A', j); 8554 scbid = ahd_find_busy_tcl(ahd, tcl); 8555 scbp = ahd_lookup_scb(ahd, scbid); 8556 if (scbp == NULL 8557 || ahd_match_scb(ahd, scbp, target, channel, 8558 lun, tag, role) == 0) 8559 continue; 8560 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); 8561 } 8562 } 8563 } 8564 8565 /* 8566 * Don't abort commands that have already completed, 8567 * but haven't quite made it up to the host yet. 8568 */ 8569 ahd_flush_qoutfifo(ahd); 8570 8571 /* 8572 * Go through the pending CCB list and look for 8573 * commands for this target that are still active. 8574 * These are other tagged commands that were 8575 * disconnected when the reset occurred. 8576 */ 8577 scbp_next = LIST_FIRST(&ahd->pending_scbs); 8578 while (scbp_next != NULL) { 8579 scbp = scbp_next; 8580 scbp_next = LIST_NEXT(scbp, pending_links); 8581 if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { 8582 cam_status ostat; 8583 8584 ostat = ahd_get_transaction_status(scbp); 8585 if (ostat == CAM_REQ_INPROG) 8586 ahd_set_transaction_status(scbp, status); 8587 if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) 8588 ahd_freeze_scb(scbp); 8589 if ((scbp->flags & SCB_ACTIVE) == 0) 8590 printk("Inactive SCB on pending list\n"); 8591 ahd_done(ahd, scbp); 8592 found++; 8593 } 8594 } 8595 ahd_restore_modes(ahd, saved_modes); 8596 ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); 8597 ahd->flags |= AHD_UPDATE_PEND_CMDS; 8598 return found; 8599 } 8600 8601 static void 8602 ahd_reset_current_bus(struct ahd_softc *ahd) 8603 { 8604 uint8_t scsiseq; 8605 8606 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8607 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); 8608 scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); 8609 ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); 8610 ahd_flush_device_writes(ahd); 8611 ahd_delay(AHD_BUSRESET_DELAY); 8612 /* Turn off the bus reset */ 8613 ahd_outb(ahd, SCSISEQ0, scsiseq); 8614 ahd_flush_device_writes(ahd); 8615 ahd_delay(AHD_BUSRESET_DELAY); 8616 if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { 8617 /* 8618 * 2A Razor #474 8619 * Certain chip state is not cleared for 8620 * SCSI bus resets that we initiate, so 8621 * we must reset the chip. 8622 */ 8623 ahd_reset(ahd, /*reinit*/TRUE); 8624 ahd_intr_enable(ahd, /*enable*/TRUE); 8625 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8626 } 8627 8628 ahd_clear_intstat(ahd); 8629 } 8630 8631 int 8632 ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) 8633 { 8634 struct ahd_devinfo caminfo; 8635 u_int initiator; 8636 u_int target; 8637 u_int max_scsiid; 8638 int found; 8639 u_int fifo; 8640 u_int next_fifo; 8641 uint8_t scsiseq; 8642 8643 /* 8644 * Check if the last bus reset is cleared 8645 */ 8646 if (ahd->flags & AHD_BUS_RESET_ACTIVE) { 8647 printk("%s: bus reset still active\n", 8648 ahd_name(ahd)); 8649 return 0; 8650 } 8651 ahd->flags |= AHD_BUS_RESET_ACTIVE; 8652 8653 ahd->pending_device = NULL; 8654 8655 ahd_compile_devinfo(&caminfo, 8656 CAM_TARGET_WILDCARD, 8657 CAM_TARGET_WILDCARD, 8658 CAM_LUN_WILDCARD, 8659 channel, ROLE_UNKNOWN); 8660 ahd_pause(ahd); 8661 8662 /* Make sure the sequencer is in a safe location. */ 8663 ahd_clear_critical_section(ahd); 8664 8665 /* 8666 * Run our command complete fifos to ensure that we perform 8667 * completion processing on any commands that 'completed' 8668 * before the reset occurred. 8669 */ 8670 ahd_run_qoutfifo(ahd); 8671 #ifdef AHD_TARGET_MODE 8672 if ((ahd->flags & AHD_TARGETROLE) != 0) { 8673 ahd_run_tqinfifo(ahd, /*paused*/TRUE); 8674 } 8675 #endif 8676 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8677 8678 /* 8679 * Disable selections so no automatic hardware 8680 * functions will modify chip state. 8681 */ 8682 ahd_outb(ahd, SCSISEQ0, 0); 8683 ahd_outb(ahd, SCSISEQ1, 0); 8684 8685 /* 8686 * Safely shut down our DMA engines. Always start with 8687 * the FIFO that is not currently active (if any are 8688 * actively connected). 8689 */ 8690 next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 8691 if (next_fifo > CURRFIFO_1) 8692 /* If disconneced, arbitrarily start with FIFO1. */ 8693 next_fifo = fifo = 0; 8694 do { 8695 next_fifo ^= CURRFIFO_1; 8696 ahd_set_modes(ahd, next_fifo, next_fifo); 8697 ahd_outb(ahd, DFCNTRL, 8698 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); 8699 while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) 8700 ahd_delay(10); 8701 /* 8702 * Set CURRFIFO to the now inactive channel. 8703 */ 8704 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8705 ahd_outb(ahd, DFFSTAT, next_fifo); 8706 } while (next_fifo != fifo); 8707 8708 /* 8709 * Reset the bus if we are initiating this reset 8710 */ 8711 ahd_clear_msg_state(ahd); 8712 ahd_outb(ahd, SIMODE1, 8713 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); 8714 8715 if (initiate_reset) 8716 ahd_reset_current_bus(ahd); 8717 8718 ahd_clear_intstat(ahd); 8719 8720 /* 8721 * Clean up all the state information for the 8722 * pending transactions on this bus. 8723 */ 8724 found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, 8725 CAM_LUN_WILDCARD, SCB_LIST_NULL, 8726 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 8727 8728 /* 8729 * Cleanup anything left in the FIFOs. 8730 */ 8731 ahd_clear_fifo(ahd, 0); 8732 ahd_clear_fifo(ahd, 1); 8733 8734 /* 8735 * Clear SCSI interrupt status 8736 */ 8737 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 8738 8739 /* 8740 * Reenable selections 8741 */ 8742 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); 8743 scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); 8744 ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 8745 8746 max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; 8747 #ifdef AHD_TARGET_MODE 8748 /* 8749 * Send an immediate notify ccb to all target more peripheral 8750 * drivers affected by this action. 8751 */ 8752 for (target = 0; target <= max_scsiid; target++) { 8753 struct ahd_tmode_tstate* tstate; 8754 u_int lun; 8755 8756 tstate = ahd->enabled_targets[target]; 8757 if (tstate == NULL) 8758 continue; 8759 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 8760 struct ahd_tmode_lstate* lstate; 8761 8762 lstate = tstate->enabled_luns[lun]; 8763 if (lstate == NULL) 8764 continue; 8765 8766 ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, 8767 EVENT_TYPE_BUS_RESET, /*arg*/0); 8768 ahd_send_lstate_events(ahd, lstate); 8769 } 8770 } 8771 #endif 8772 /* 8773 * Revert to async/narrow transfers until we renegotiate. 8774 */ 8775 for (target = 0; target <= max_scsiid; target++) { 8776 8777 if (ahd->enabled_targets[target] == NULL) 8778 continue; 8779 for (initiator = 0; initiator <= max_scsiid; initiator++) { 8780 struct ahd_devinfo devinfo; 8781 8782 ahd_compile_devinfo(&devinfo, target, initiator, 8783 CAM_LUN_WILDCARD, 8784 'A', ROLE_UNKNOWN); 8785 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 8786 AHD_TRANS_CUR, /*paused*/TRUE); 8787 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 8788 /*offset*/0, /*ppr_options*/0, 8789 AHD_TRANS_CUR, /*paused*/TRUE); 8790 } 8791 } 8792 8793 /* Notify the XPT that a bus reset occurred */ 8794 ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, 8795 CAM_LUN_WILDCARD, AC_BUS_RESET); 8796 8797 ahd_restart(ahd); 8798 8799 return (found); 8800 } 8801 8802 /**************************** Statistics Processing ***************************/ 8803 static void 8804 ahd_stat_timer(struct timer_list *t) 8805 { 8806 struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); 8807 u_long s; 8808 int enint_coal; 8809 8810 ahd_lock(ahd, &s); 8811 8812 enint_coal = ahd->hs_mailbox & ENINT_COALESCE; 8813 if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) 8814 enint_coal |= ENINT_COALESCE; 8815 else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) 8816 enint_coal &= ~ENINT_COALESCE; 8817 8818 if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { 8819 ahd_enable_coalescing(ahd, enint_coal); 8820 #ifdef AHD_DEBUG 8821 if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) 8822 printk("%s: Interrupt coalescing " 8823 "now %sabled. Cmds %d\n", 8824 ahd_name(ahd), 8825 (enint_coal & ENINT_COALESCE) ? "en" : "dis", 8826 ahd->cmdcmplt_total); 8827 #endif 8828 } 8829 8830 ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); 8831 ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; 8832 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; 8833 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 8834 ahd_unlock(ahd, &s); 8835 } 8836 8837 /****************************** Status Processing *****************************/ 8838 8839 static void 8840 ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) 8841 { 8842 struct hardware_scb *hscb; 8843 int paused; 8844 8845 /* 8846 * The sequencer freezes its select-out queue 8847 * anytime a SCSI status error occurs. We must 8848 * handle the error and increment our qfreeze count 8849 * to allow the sequencer to continue. We don't 8850 * bother clearing critical sections here since all 8851 * operations are on data structures that the sequencer 8852 * is not touching once the queue is frozen. 8853 */ 8854 hscb = scb->hscb; 8855 8856 if (ahd_is_paused(ahd)) { 8857 paused = 1; 8858 } else { 8859 paused = 0; 8860 ahd_pause(ahd); 8861 } 8862 8863 /* Freeze the queue until the client sees the error. */ 8864 ahd_freeze_devq(ahd, scb); 8865 ahd_freeze_scb(scb); 8866 ahd->qfreeze_cnt++; 8867 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 8868 8869 if (paused == 0) 8870 ahd_unpause(ahd); 8871 8872 /* Don't want to clobber the original sense code */ 8873 if ((scb->flags & SCB_SENSE) != 0) { 8874 /* 8875 * Clear the SCB_SENSE Flag and perform 8876 * a normal command completion. 8877 */ 8878 scb->flags &= ~SCB_SENSE; 8879 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 8880 ahd_done(ahd, scb); 8881 return; 8882 } 8883 ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 8884 ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); 8885 switch (hscb->shared_data.istatus.scsi_status) { 8886 case STATUS_PKT_SENSE: 8887 { 8888 struct scsi_status_iu_header *siu; 8889 8890 ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); 8891 siu = (struct scsi_status_iu_header *)scb->sense_data; 8892 ahd_set_scsi_status(scb, siu->status); 8893 #ifdef AHD_DEBUG 8894 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 8895 ahd_print_path(ahd, scb); 8896 printk("SCB 0x%x Received PKT Status of 0x%x\n", 8897 SCB_GET_TAG(scb), siu->status); 8898 printk("\tflags = 0x%x, sense len = 0x%x, " 8899 "pktfail = 0x%x\n", 8900 siu->flags, scsi_4btoul(siu->sense_length), 8901 scsi_4btoul(siu->pkt_failures_length)); 8902 } 8903 #endif 8904 if ((siu->flags & SIU_RSPVALID) != 0) { 8905 ahd_print_path(ahd, scb); 8906 if (scsi_4btoul(siu->pkt_failures_length) < 4) { 8907 printk("Unable to parse pkt_failures\n"); 8908 } else { 8909 8910 switch (SIU_PKTFAIL_CODE(siu)) { 8911 case SIU_PFC_NONE: 8912 printk("No packet failure found\n"); 8913 break; 8914 case SIU_PFC_CIU_FIELDS_INVALID: 8915 printk("Invalid Command IU Field\n"); 8916 break; 8917 case SIU_PFC_TMF_NOT_SUPPORTED: 8918 printk("TMF not supported\n"); 8919 break; 8920 case SIU_PFC_TMF_FAILED: 8921 printk("TMF failed\n"); 8922 break; 8923 case SIU_PFC_INVALID_TYPE_CODE: 8924 printk("Invalid L_Q Type code\n"); 8925 break; 8926 case SIU_PFC_ILLEGAL_REQUEST: 8927 printk("Illegal request\n"); 8928 default: 8929 break; 8930 } 8931 } 8932 if (siu->status == SCSI_STATUS_OK) 8933 ahd_set_transaction_status(scb, 8934 CAM_REQ_CMP_ERR); 8935 } 8936 if ((siu->flags & SIU_SNSVALID) != 0) { 8937 scb->flags |= SCB_PKT_SENSE; 8938 #ifdef AHD_DEBUG 8939 if ((ahd_debug & AHD_SHOW_SENSE) != 0) 8940 printk("Sense data available\n"); 8941 #endif 8942 } 8943 ahd_done(ahd, scb); 8944 break; 8945 } 8946 case SCSI_STATUS_CMD_TERMINATED: 8947 case SCSI_STATUS_CHECK_COND: 8948 { 8949 struct ahd_devinfo devinfo; 8950 struct ahd_dma_seg *sg; 8951 struct scsi_sense *sc; 8952 struct ahd_initiator_tinfo *targ_info; 8953 struct ahd_tmode_tstate *tstate; 8954 struct ahd_transinfo *tinfo; 8955 #ifdef AHD_DEBUG 8956 if (ahd_debug & AHD_SHOW_SENSE) { 8957 ahd_print_path(ahd, scb); 8958 printk("SCB %d: requests Check Status\n", 8959 SCB_GET_TAG(scb)); 8960 } 8961 #endif 8962 8963 if (ahd_perform_autosense(scb) == 0) 8964 break; 8965 8966 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 8967 SCB_GET_TARGET(ahd, scb), 8968 SCB_GET_LUN(scb), 8969 SCB_GET_CHANNEL(ahd, scb), 8970 ROLE_INITIATOR); 8971 targ_info = ahd_fetch_transinfo(ahd, 8972 devinfo.channel, 8973 devinfo.our_scsiid, 8974 devinfo.target, 8975 &tstate); 8976 tinfo = &targ_info->curr; 8977 sg = scb->sg_list; 8978 sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; 8979 /* 8980 * Save off the residual if there is one. 8981 */ 8982 ahd_update_residual(ahd, scb); 8983 #ifdef AHD_DEBUG 8984 if (ahd_debug & AHD_SHOW_SENSE) { 8985 ahd_print_path(ahd, scb); 8986 printk("Sending Sense\n"); 8987 } 8988 #endif 8989 scb->sg_count = 0; 8990 sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), 8991 ahd_get_sense_bufsize(ahd, scb), 8992 /*last*/TRUE); 8993 sc->opcode = REQUEST_SENSE; 8994 sc->byte2 = 0; 8995 if (tinfo->protocol_version <= SCSI_REV_2 8996 && SCB_GET_LUN(scb) < 8) 8997 sc->byte2 = SCB_GET_LUN(scb) << 5; 8998 sc->unused[0] = 0; 8999 sc->unused[1] = 0; 9000 sc->length = ahd_get_sense_bufsize(ahd, scb); 9001 sc->control = 0; 9002 9003 /* 9004 * We can't allow the target to disconnect. 9005 * This will be an untagged transaction and 9006 * having the target disconnect will make this 9007 * transaction indestinguishable from outstanding 9008 * tagged transactions. 9009 */ 9010 hscb->control = 0; 9011 9012 /* 9013 * This request sense could be because the 9014 * the device lost power or in some other 9015 * way has lost our transfer negotiations. 9016 * Renegotiate if appropriate. Unit attention 9017 * errors will be reported before any data 9018 * phases occur. 9019 */ 9020 if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { 9021 ahd_update_neg_request(ahd, &devinfo, 9022 tstate, targ_info, 9023 AHD_NEG_IF_NON_ASYNC); 9024 } 9025 if (tstate->auto_negotiate & devinfo.target_mask) { 9026 hscb->control |= MK_MESSAGE; 9027 scb->flags &= 9028 ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); 9029 scb->flags |= SCB_AUTO_NEGOTIATE; 9030 } 9031 hscb->cdb_len = sizeof(*sc); 9032 ahd_setup_data_scb(ahd, scb); 9033 scb->flags |= SCB_SENSE; 9034 ahd_queue_scb(ahd, scb); 9035 break; 9036 } 9037 case SCSI_STATUS_OK: 9038 printk("%s: Interrupted for status of 0???\n", 9039 ahd_name(ahd)); 9040 /* FALLTHROUGH */ 9041 default: 9042 ahd_done(ahd, scb); 9043 break; 9044 } 9045 } 9046 9047 static void 9048 ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) 9049 { 9050 if (scb->hscb->shared_data.istatus.scsi_status != 0) { 9051 ahd_handle_scsi_status(ahd, scb); 9052 } else { 9053 ahd_calc_residual(ahd, scb); 9054 ahd_done(ahd, scb); 9055 } 9056 } 9057 9058 /* 9059 * Calculate the residual for a just completed SCB. 9060 */ 9061 static void 9062 ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) 9063 { 9064 struct hardware_scb *hscb; 9065 struct initiator_status *spkt; 9066 uint32_t sgptr; 9067 uint32_t resid_sgptr; 9068 uint32_t resid; 9069 9070 /* 9071 * 5 cases. 9072 * 1) No residual. 9073 * SG_STATUS_VALID clear in sgptr. 9074 * 2) Transferless command 9075 * 3) Never performed any transfers. 9076 * sgptr has SG_FULL_RESID set. 9077 * 4) No residual but target did not 9078 * save data pointers after the 9079 * last transfer, so sgptr was 9080 * never updated. 9081 * 5) We have a partial residual. 9082 * Use residual_sgptr to determine 9083 * where we are. 9084 */ 9085 9086 hscb = scb->hscb; 9087 sgptr = ahd_le32toh(hscb->sgptr); 9088 if ((sgptr & SG_STATUS_VALID) == 0) 9089 /* Case 1 */ 9090 return; 9091 sgptr &= ~SG_STATUS_VALID; 9092 9093 if ((sgptr & SG_LIST_NULL) != 0) 9094 /* Case 2 */ 9095 return; 9096 9097 /* 9098 * Residual fields are the same in both 9099 * target and initiator status packets, 9100 * so we can always use the initiator fields 9101 * regardless of the role for this SCB. 9102 */ 9103 spkt = &hscb->shared_data.istatus; 9104 resid_sgptr = ahd_le32toh(spkt->residual_sgptr); 9105 if ((sgptr & SG_FULL_RESID) != 0) { 9106 /* Case 3 */ 9107 resid = ahd_get_transfer_length(scb); 9108 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 9109 /* Case 4 */ 9110 return; 9111 } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { 9112 ahd_print_path(ahd, scb); 9113 printk("data overrun detected Tag == 0x%x.\n", 9114 SCB_GET_TAG(scb)); 9115 ahd_freeze_devq(ahd, scb); 9116 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 9117 ahd_freeze_scb(scb); 9118 return; 9119 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 9120 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 9121 /* NOTREACHED */ 9122 } else { 9123 struct ahd_dma_seg *sg; 9124 9125 /* 9126 * Remainder of the SG where the transfer 9127 * stopped. 9128 */ 9129 resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; 9130 sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); 9131 9132 /* The residual sg_ptr always points to the next sg */ 9133 sg--; 9134 9135 /* 9136 * Add up the contents of all residual 9137 * SG segments that are after the SG where 9138 * the transfer stopped. 9139 */ 9140 while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { 9141 sg++; 9142 resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 9143 } 9144 } 9145 if ((scb->flags & SCB_SENSE) == 0) 9146 ahd_set_residual(scb, resid); 9147 else 9148 ahd_set_sense_residual(scb, resid); 9149 9150 #ifdef AHD_DEBUG 9151 if ((ahd_debug & AHD_SHOW_MISC) != 0) { 9152 ahd_print_path(ahd, scb); 9153 printk("Handled %sResidual of %d bytes\n", 9154 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 9155 } 9156 #endif 9157 } 9158 9159 /******************************* Target Mode **********************************/ 9160 #ifdef AHD_TARGET_MODE 9161 /* 9162 * Add a target mode event to this lun's queue 9163 */ 9164 static void 9165 ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, 9166 u_int initiator_id, u_int event_type, u_int event_arg) 9167 { 9168 struct ahd_tmode_event *event; 9169 int pending; 9170 9171 xpt_freeze_devq(lstate->path, /*count*/1); 9172 if (lstate->event_w_idx >= lstate->event_r_idx) 9173 pending = lstate->event_w_idx - lstate->event_r_idx; 9174 else 9175 pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 9176 - (lstate->event_r_idx - lstate->event_w_idx); 9177 9178 if (event_type == EVENT_TYPE_BUS_RESET 9179 || event_type == MSG_BUS_DEV_RESET) { 9180 /* 9181 * Any earlier events are irrelevant, so reset our buffer. 9182 * This has the effect of allowing us to deal with reset 9183 * floods (an external device holding down the reset line) 9184 * without losing the event that is really interesting. 9185 */ 9186 lstate->event_r_idx = 0; 9187 lstate->event_w_idx = 0; 9188 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 9189 } 9190 9191 if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { 9192 xpt_print_path(lstate->path); 9193 printk("immediate event %x:%x lost\n", 9194 lstate->event_buffer[lstate->event_r_idx].event_type, 9195 lstate->event_buffer[lstate->event_r_idx].event_arg); 9196 lstate->event_r_idx++; 9197 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9198 lstate->event_r_idx = 0; 9199 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 9200 } 9201 9202 event = &lstate->event_buffer[lstate->event_w_idx]; 9203 event->initiator_id = initiator_id; 9204 event->event_type = event_type; 9205 event->event_arg = event_arg; 9206 lstate->event_w_idx++; 9207 if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9208 lstate->event_w_idx = 0; 9209 } 9210 9211 /* 9212 * Send any target mode events queued up waiting 9213 * for immediate notify resources. 9214 */ 9215 void 9216 ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) 9217 { 9218 struct ccb_hdr *ccbh; 9219 struct ccb_immed_notify *inot; 9220 9221 while (lstate->event_r_idx != lstate->event_w_idx 9222 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 9223 struct ahd_tmode_event *event; 9224 9225 event = &lstate->event_buffer[lstate->event_r_idx]; 9226 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 9227 inot = (struct ccb_immed_notify *)ccbh; 9228 switch (event->event_type) { 9229 case EVENT_TYPE_BUS_RESET: 9230 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 9231 break; 9232 default: 9233 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 9234 inot->message_args[0] = event->event_type; 9235 inot->message_args[1] = event->event_arg; 9236 break; 9237 } 9238 inot->initiator_id = event->initiator_id; 9239 inot->sense_len = 0; 9240 xpt_done((union ccb *)inot); 9241 lstate->event_r_idx++; 9242 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9243 lstate->event_r_idx = 0; 9244 } 9245 } 9246 #endif 9247 9248 /******************** Sequencer Program Patching/Download *********************/ 9249 9250 #ifdef AHD_DUMP_SEQ 9251 void 9252 ahd_dumpseq(struct ahd_softc* ahd) 9253 { 9254 int i; 9255 int max_prog; 9256 9257 max_prog = 2048; 9258 9259 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 9260 ahd_outw(ahd, PRGMCNT, 0); 9261 for (i = 0; i < max_prog; i++) { 9262 uint8_t ins_bytes[4]; 9263 9264 ahd_insb(ahd, SEQRAM, ins_bytes, 4); 9265 printk("0x%08x\n", ins_bytes[0] << 24 9266 | ins_bytes[1] << 16 9267 | ins_bytes[2] << 8 9268 | ins_bytes[3]); 9269 } 9270 } 9271 #endif 9272 9273 static void 9274 ahd_loadseq(struct ahd_softc *ahd) 9275 { 9276 struct cs cs_table[NUM_CRITICAL_SECTIONS]; 9277 u_int begin_set[NUM_CRITICAL_SECTIONS]; 9278 u_int end_set[NUM_CRITICAL_SECTIONS]; 9279 const struct patch *cur_patch; 9280 u_int cs_count; 9281 u_int cur_cs; 9282 u_int i; 9283 int downloaded; 9284 u_int skip_addr; 9285 u_int sg_prefetch_cnt; 9286 u_int sg_prefetch_cnt_limit; 9287 u_int sg_prefetch_align; 9288 u_int sg_size; 9289 u_int cacheline_mask; 9290 uint8_t download_consts[DOWNLOAD_CONST_COUNT]; 9291 9292 if (bootverbose) 9293 printk("%s: Downloading Sequencer Program...", 9294 ahd_name(ahd)); 9295 9296 #if DOWNLOAD_CONST_COUNT != 8 9297 #error "Download Const Mismatch" 9298 #endif 9299 /* 9300 * Start out with 0 critical sections 9301 * that apply to this firmware load. 9302 */ 9303 cs_count = 0; 9304 cur_cs = 0; 9305 memset(begin_set, 0, sizeof(begin_set)); 9306 memset(end_set, 0, sizeof(end_set)); 9307 9308 /* 9309 * Setup downloadable constant table. 9310 * 9311 * The computation for the S/G prefetch variables is 9312 * a bit complicated. We would like to always fetch 9313 * in terms of cachelined sized increments. However, 9314 * if the cacheline is not an even multiple of the 9315 * SG element size or is larger than our SG RAM, using 9316 * just the cache size might leave us with only a portion 9317 * of an SG element at the tail of a prefetch. If the 9318 * cacheline is larger than our S/G prefetch buffer less 9319 * the size of an SG element, we may round down to a cacheline 9320 * that doesn't contain any or all of the S/G of interest 9321 * within the bounds of our S/G ram. Provide variables to 9322 * the sequencer that will allow it to handle these edge 9323 * cases. 9324 */ 9325 /* Start by aligning to the nearest cacheline. */ 9326 sg_prefetch_align = ahd->pci_cachesize; 9327 if (sg_prefetch_align == 0) 9328 sg_prefetch_align = 8; 9329 /* Round down to the nearest power of 2. */ 9330 while (powerof2(sg_prefetch_align) == 0) 9331 sg_prefetch_align--; 9332 9333 cacheline_mask = sg_prefetch_align - 1; 9334 9335 /* 9336 * If the cacheline boundary is greater than half our prefetch RAM 9337 * we risk not being able to fetch even a single complete S/G 9338 * segment if we align to that boundary. 9339 */ 9340 if (sg_prefetch_align > CCSGADDR_MAX/2) 9341 sg_prefetch_align = CCSGADDR_MAX/2; 9342 /* Start by fetching a single cacheline. */ 9343 sg_prefetch_cnt = sg_prefetch_align; 9344 /* 9345 * Increment the prefetch count by cachelines until 9346 * at least one S/G element will fit. 9347 */ 9348 sg_size = sizeof(struct ahd_dma_seg); 9349 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 9350 sg_size = sizeof(struct ahd_dma64_seg); 9351 while (sg_prefetch_cnt < sg_size) 9352 sg_prefetch_cnt += sg_prefetch_align; 9353 /* 9354 * If the cacheline is not an even multiple of 9355 * the S/G size, we may only get a partial S/G when 9356 * we align. Add a cacheline if this is the case. 9357 */ 9358 if ((sg_prefetch_align % sg_size) != 0 9359 && (sg_prefetch_cnt < CCSGADDR_MAX)) 9360 sg_prefetch_cnt += sg_prefetch_align; 9361 /* 9362 * Lastly, compute a value that the sequencer can use 9363 * to determine if the remainder of the CCSGRAM buffer 9364 * has a full S/G element in it. 9365 */ 9366 sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); 9367 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 9368 download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; 9369 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); 9370 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); 9371 download_consts[SG_SIZEOF] = sg_size; 9372 download_consts[PKT_OVERRUN_BUFOFFSET] = 9373 (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; 9374 download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; 9375 download_consts[CACHELINE_MASK] = cacheline_mask; 9376 cur_patch = patches; 9377 downloaded = 0; 9378 skip_addr = 0; 9379 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 9380 ahd_outw(ahd, PRGMCNT, 0); 9381 9382 for (i = 0; i < sizeof(seqprog)/4; i++) { 9383 if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { 9384 /* 9385 * Don't download this instruction as it 9386 * is in a patch that was removed. 9387 */ 9388 continue; 9389 } 9390 /* 9391 * Move through the CS table until we find a CS 9392 * that might apply to this instruction. 9393 */ 9394 for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { 9395 if (critical_sections[cur_cs].end <= i) { 9396 if (begin_set[cs_count] == TRUE 9397 && end_set[cs_count] == FALSE) { 9398 cs_table[cs_count].end = downloaded; 9399 end_set[cs_count] = TRUE; 9400 cs_count++; 9401 } 9402 continue; 9403 } 9404 if (critical_sections[cur_cs].begin <= i 9405 && begin_set[cs_count] == FALSE) { 9406 cs_table[cs_count].begin = downloaded; 9407 begin_set[cs_count] = TRUE; 9408 } 9409 break; 9410 } 9411 ahd_download_instr(ahd, i, download_consts); 9412 downloaded++; 9413 } 9414 9415 ahd->num_critical_sections = cs_count; 9416 if (cs_count != 0) { 9417 9418 cs_count *= sizeof(struct cs); 9419 ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC); 9420 if (ahd->critical_sections == NULL) 9421 panic("ahd_loadseq: Could not malloc"); 9422 memcpy(ahd->critical_sections, cs_table, cs_count); 9423 } 9424 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); 9425 9426 if (bootverbose) { 9427 printk(" %d instructions downloaded\n", downloaded); 9428 printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 9429 ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); 9430 } 9431 } 9432 9433 static int 9434 ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, 9435 u_int start_instr, u_int *skip_addr) 9436 { 9437 const struct patch *cur_patch; 9438 const struct patch *last_patch; 9439 u_int num_patches; 9440 9441 num_patches = ARRAY_SIZE(patches); 9442 last_patch = &patches[num_patches]; 9443 cur_patch = *start_patch; 9444 9445 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 9446 9447 if (cur_patch->patch_func(ahd) == 0) { 9448 9449 /* Start rejecting code */ 9450 *skip_addr = start_instr + cur_patch->skip_instr; 9451 cur_patch += cur_patch->skip_patch; 9452 } else { 9453 /* Accepted this patch. Advance to the next 9454 * one and wait for our intruction pointer to 9455 * hit this point. 9456 */ 9457 cur_patch++; 9458 } 9459 } 9460 9461 *start_patch = cur_patch; 9462 if (start_instr < *skip_addr) 9463 /* Still skipping */ 9464 return (0); 9465 9466 return (1); 9467 } 9468 9469 static u_int 9470 ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) 9471 { 9472 const struct patch *cur_patch; 9473 int address_offset; 9474 u_int skip_addr; 9475 u_int i; 9476 9477 address_offset = 0; 9478 cur_patch = patches; 9479 skip_addr = 0; 9480 9481 for (i = 0; i < address;) { 9482 9483 ahd_check_patch(ahd, &cur_patch, i, &skip_addr); 9484 9485 if (skip_addr > i) { 9486 int end_addr; 9487 9488 end_addr = min(address, skip_addr); 9489 address_offset += end_addr - i; 9490 i = skip_addr; 9491 } else { 9492 i++; 9493 } 9494 } 9495 return (address - address_offset); 9496 } 9497 9498 static void 9499 ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) 9500 { 9501 union ins_formats instr; 9502 struct ins_format1 *fmt1_ins; 9503 struct ins_format3 *fmt3_ins; 9504 u_int opcode; 9505 9506 /* 9507 * The firmware is always compiled into a little endian format. 9508 */ 9509 instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 9510 9511 fmt1_ins = &instr.format1; 9512 fmt3_ins = NULL; 9513 9514 /* Pull the opcode */ 9515 opcode = instr.format1.opcode; 9516 switch (opcode) { 9517 case AIC_OP_JMP: 9518 case AIC_OP_JC: 9519 case AIC_OP_JNC: 9520 case AIC_OP_CALL: 9521 case AIC_OP_JNE: 9522 case AIC_OP_JNZ: 9523 case AIC_OP_JE: 9524 case AIC_OP_JZ: 9525 { 9526 fmt3_ins = &instr.format3; 9527 fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); 9528 } 9529 /* fall through */ 9530 case AIC_OP_OR: 9531 case AIC_OP_AND: 9532 case AIC_OP_XOR: 9533 case AIC_OP_ADD: 9534 case AIC_OP_ADC: 9535 case AIC_OP_BMOV: 9536 if (fmt1_ins->parity != 0) { 9537 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 9538 } 9539 fmt1_ins->parity = 0; 9540 /* fall through */ 9541 case AIC_OP_ROL: 9542 { 9543 int i, count; 9544 9545 /* Calculate odd parity for the instruction */ 9546 for (i = 0, count = 0; i < 31; i++) { 9547 uint32_t mask; 9548 9549 mask = 0x01 << i; 9550 if ((instr.integer & mask) != 0) 9551 count++; 9552 } 9553 if ((count & 0x01) == 0) 9554 instr.format1.parity = 1; 9555 9556 /* The sequencer is a little endian cpu */ 9557 instr.integer = ahd_htole32(instr.integer); 9558 ahd_outsb(ahd, SEQRAM, instr.bytes, 4); 9559 break; 9560 } 9561 default: 9562 panic("Unknown opcode encountered in seq program"); 9563 break; 9564 } 9565 } 9566 9567 static int 9568 ahd_probe_stack_size(struct ahd_softc *ahd) 9569 { 9570 int last_probe; 9571 9572 last_probe = 0; 9573 while (1) { 9574 int i; 9575 9576 /* 9577 * We avoid using 0 as a pattern to avoid 9578 * confusion if the stack implementation 9579 * "back-fills" with zeros when "poping' 9580 * entries. 9581 */ 9582 for (i = 1; i <= last_probe+1; i++) { 9583 ahd_outb(ahd, STACK, i & 0xFF); 9584 ahd_outb(ahd, STACK, (i >> 8) & 0xFF); 9585 } 9586 9587 /* Verify */ 9588 for (i = last_probe+1; i > 0; i--) { 9589 u_int stack_entry; 9590 9591 stack_entry = ahd_inb(ahd, STACK) 9592 |(ahd_inb(ahd, STACK) << 8); 9593 if (stack_entry != i) 9594 goto sized; 9595 } 9596 last_probe++; 9597 } 9598 sized: 9599 return (last_probe); 9600 } 9601 9602 int 9603 ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, 9604 const char *name, u_int address, u_int value, 9605 u_int *cur_column, u_int wrap_point) 9606 { 9607 int printed; 9608 u_int printed_mask; 9609 9610 if (cur_column != NULL && *cur_column >= wrap_point) { 9611 printk("\n"); 9612 *cur_column = 0; 9613 } 9614 printed = printk("%s[0x%x]", name, value); 9615 if (table == NULL) { 9616 printed += printk(" "); 9617 *cur_column += printed; 9618 return (printed); 9619 } 9620 printed_mask = 0; 9621 while (printed_mask != 0xFF) { 9622 int entry; 9623 9624 for (entry = 0; entry < num_entries; entry++) { 9625 if (((value & table[entry].mask) 9626 != table[entry].value) 9627 || ((printed_mask & table[entry].mask) 9628 == table[entry].mask)) 9629 continue; 9630 9631 printed += printk("%s%s", 9632 printed_mask == 0 ? ":(" : "|", 9633 table[entry].name); 9634 printed_mask |= table[entry].mask; 9635 9636 break; 9637 } 9638 if (entry >= num_entries) 9639 break; 9640 } 9641 if (printed_mask != 0) 9642 printed += printk(") "); 9643 else 9644 printed += printk(" "); 9645 if (cur_column != NULL) 9646 *cur_column += printed; 9647 return (printed); 9648 } 9649 9650 void 9651 ahd_dump_card_state(struct ahd_softc *ahd) 9652 { 9653 struct scb *scb; 9654 ahd_mode_state saved_modes; 9655 u_int dffstat; 9656 int paused; 9657 u_int scb_index; 9658 u_int saved_scb_index; 9659 u_int cur_col; 9660 int i; 9661 9662 if (ahd_is_paused(ahd)) { 9663 paused = 1; 9664 } else { 9665 paused = 0; 9666 ahd_pause(ahd); 9667 } 9668 saved_modes = ahd_save_modes(ahd); 9669 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 9670 printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 9671 "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", 9672 ahd_name(ahd), 9673 ahd_inw(ahd, CURADDR), 9674 ahd_build_mode_state(ahd, ahd->saved_src_mode, 9675 ahd->saved_dst_mode)); 9676 if (paused) 9677 printk("Card was paused\n"); 9678 9679 if (ahd_check_cmdcmpltqueues(ahd)) 9680 printk("Completions are pending\n"); 9681 9682 /* 9683 * Mode independent registers. 9684 */ 9685 cur_col = 0; 9686 ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); 9687 ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); 9688 ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); 9689 ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); 9690 ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); 9691 ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); 9692 ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); 9693 ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); 9694 ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); 9695 ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); 9696 ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); 9697 ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); 9698 ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); 9699 ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); 9700 ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); 9701 ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); 9702 ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); 9703 ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); 9704 ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); 9705 ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), 9706 &cur_col, 50); 9707 ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); 9708 ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), 9709 &cur_col, 50); 9710 ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); 9711 ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); 9712 ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); 9713 ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); 9714 ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); 9715 ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); 9716 ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); 9717 ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); 9718 ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); 9719 ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); 9720 ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); 9721 ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); 9722 printk("\n"); 9723 printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " 9724 "CURRSCB 0x%x NEXTSCB 0x%x\n", 9725 ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), 9726 ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), 9727 ahd_inw(ahd, NEXTSCB)); 9728 cur_col = 0; 9729 /* QINFIFO */ 9730 ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 9731 CAM_LUN_WILDCARD, SCB_LIST_NULL, 9732 ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); 9733 saved_scb_index = ahd_get_scbptr(ahd); 9734 printk("Pending list:"); 9735 i = 0; 9736 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 9737 if (i++ > AHD_SCB_MAX) 9738 break; 9739 cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), 9740 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); 9741 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 9742 ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), 9743 &cur_col, 60); 9744 ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), 9745 &cur_col, 60); 9746 } 9747 printk("\nTotal %d\n", i); 9748 9749 printk("Kernel Free SCB list: "); 9750 i = 0; 9751 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 9752 struct scb *list_scb; 9753 9754 list_scb = scb; 9755 do { 9756 printk("%d ", SCB_GET_TAG(list_scb)); 9757 list_scb = LIST_NEXT(list_scb, collision_links); 9758 } while (list_scb && i++ < AHD_SCB_MAX); 9759 } 9760 9761 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 9762 if (i++ > AHD_SCB_MAX) 9763 break; 9764 printk("%d ", SCB_GET_TAG(scb)); 9765 } 9766 printk("\n"); 9767 9768 printk("Sequencer Complete DMA-inprog list: "); 9769 scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); 9770 i = 0; 9771 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9772 ahd_set_scbptr(ahd, scb_index); 9773 printk("%d ", scb_index); 9774 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9775 } 9776 printk("\n"); 9777 9778 printk("Sequencer Complete list: "); 9779 scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); 9780 i = 0; 9781 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9782 ahd_set_scbptr(ahd, scb_index); 9783 printk("%d ", scb_index); 9784 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9785 } 9786 printk("\n"); 9787 9788 9789 printk("Sequencer DMA-Up and Complete list: "); 9790 scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 9791 i = 0; 9792 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9793 ahd_set_scbptr(ahd, scb_index); 9794 printk("%d ", scb_index); 9795 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9796 } 9797 printk("\n"); 9798 printk("Sequencer On QFreeze and Complete list: "); 9799 scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); 9800 i = 0; 9801 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9802 ahd_set_scbptr(ahd, scb_index); 9803 printk("%d ", scb_index); 9804 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9805 } 9806 printk("\n"); 9807 ahd_set_scbptr(ahd, saved_scb_index); 9808 dffstat = ahd_inb(ahd, DFFSTAT); 9809 for (i = 0; i < 2; i++) { 9810 #ifdef AHD_DEBUG 9811 struct scb *fifo_scb; 9812 #endif 9813 u_int fifo_scbptr; 9814 9815 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 9816 fifo_scbptr = ahd_get_scbptr(ahd); 9817 printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", 9818 ahd_name(ahd), i, 9819 (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", 9820 ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); 9821 cur_col = 0; 9822 ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); 9823 ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); 9824 ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); 9825 ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); 9826 ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), 9827 &cur_col, 50); 9828 ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); 9829 ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); 9830 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); 9831 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); 9832 if (cur_col > 50) { 9833 printk("\n"); 9834 cur_col = 0; 9835 } 9836 cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", 9837 ahd_inl(ahd, SHADDR+4), 9838 ahd_inl(ahd, SHADDR), 9839 (ahd_inb(ahd, SHCNT) 9840 | (ahd_inb(ahd, SHCNT + 1) << 8) 9841 | (ahd_inb(ahd, SHCNT + 2) << 16))); 9842 if (cur_col > 50) { 9843 printk("\n"); 9844 cur_col = 0; 9845 } 9846 cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", 9847 ahd_inl(ahd, HADDR+4), 9848 ahd_inl(ahd, HADDR), 9849 (ahd_inb(ahd, HCNT) 9850 | (ahd_inb(ahd, HCNT + 1) << 8) 9851 | (ahd_inb(ahd, HCNT + 2) << 16))); 9852 ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); 9853 #ifdef AHD_DEBUG 9854 if ((ahd_debug & AHD_SHOW_SG) != 0) { 9855 fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); 9856 if (fifo_scb != NULL) 9857 ahd_dump_sglist(fifo_scb); 9858 } 9859 #endif 9860 } 9861 printk("\nLQIN: "); 9862 for (i = 0; i < 20; i++) 9863 printk("0x%x ", ahd_inb(ahd, LQIN + i)); 9864 printk("\n"); 9865 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 9866 printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", 9867 ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), 9868 ahd_inb(ahd, OPTIONMODE)); 9869 printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", 9870 ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), 9871 ahd_inb(ahd, MAXCMDCNT)); 9872 printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", 9873 ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), 9874 ahd_inb(ahd, SAVED_LUN)); 9875 ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); 9876 printk("\n"); 9877 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 9878 cur_col = 0; 9879 ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); 9880 printk("\n"); 9881 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 9882 printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", 9883 ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), 9884 ahd_inw(ahd, DINDEX)); 9885 printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", 9886 ahd_name(ahd), ahd_get_scbptr(ahd), 9887 ahd_inw_scbram(ahd, SCB_NEXT), 9888 ahd_inw_scbram(ahd, SCB_NEXT2)); 9889 printk("CDB %x %x %x %x %x %x\n", 9890 ahd_inb_scbram(ahd, SCB_CDB_STORE), 9891 ahd_inb_scbram(ahd, SCB_CDB_STORE+1), 9892 ahd_inb_scbram(ahd, SCB_CDB_STORE+2), 9893 ahd_inb_scbram(ahd, SCB_CDB_STORE+3), 9894 ahd_inb_scbram(ahd, SCB_CDB_STORE+4), 9895 ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); 9896 printk("STACK:"); 9897 for (i = 0; i < ahd->stack_size; i++) { 9898 ahd->saved_stack[i] = 9899 ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); 9900 printk(" 0x%x", ahd->saved_stack[i]); 9901 } 9902 for (i = ahd->stack_size-1; i >= 0; i--) { 9903 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); 9904 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 9905 } 9906 printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 9907 ahd_restore_modes(ahd, saved_modes); 9908 if (paused == 0) 9909 ahd_unpause(ahd); 9910 } 9911 9912 #if 0 9913 void 9914 ahd_dump_scbs(struct ahd_softc *ahd) 9915 { 9916 ahd_mode_state saved_modes; 9917 u_int saved_scb_index; 9918 int i; 9919 9920 saved_modes = ahd_save_modes(ahd); 9921 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 9922 saved_scb_index = ahd_get_scbptr(ahd); 9923 for (i = 0; i < AHD_SCB_MAX; i++) { 9924 ahd_set_scbptr(ahd, i); 9925 printk("%3d", i); 9926 printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", 9927 ahd_inb_scbram(ahd, SCB_CONTROL), 9928 ahd_inb_scbram(ahd, SCB_SCSIID), 9929 ahd_inw_scbram(ahd, SCB_NEXT), 9930 ahd_inw_scbram(ahd, SCB_NEXT2), 9931 ahd_inl_scbram(ahd, SCB_SGPTR), 9932 ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); 9933 } 9934 printk("\n"); 9935 ahd_set_scbptr(ahd, saved_scb_index); 9936 ahd_restore_modes(ahd, saved_modes); 9937 } 9938 #endif /* 0 */ 9939 9940 /**************************** Flexport Logic **********************************/ 9941 /* 9942 * Read count 16bit words from 16bit word address start_addr from the 9943 * SEEPROM attached to the controller, into buf, using the controller's 9944 * SEEPROM reading state machine. Optionally treat the data as a byte 9945 * stream in terms of byte order. 9946 */ 9947 int 9948 ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, 9949 u_int start_addr, u_int count, int bytestream) 9950 { 9951 u_int cur_addr; 9952 u_int end_addr; 9953 int error; 9954 9955 /* 9956 * If we never make it through the loop even once, 9957 * we were passed invalid arguments. 9958 */ 9959 error = EINVAL; 9960 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9961 end_addr = start_addr + count; 9962 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 9963 9964 ahd_outb(ahd, SEEADR, cur_addr); 9965 ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); 9966 9967 error = ahd_wait_seeprom(ahd); 9968 if (error) 9969 break; 9970 if (bytestream != 0) { 9971 uint8_t *bytestream_ptr; 9972 9973 bytestream_ptr = (uint8_t *)buf; 9974 *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); 9975 *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); 9976 } else { 9977 /* 9978 * ahd_inw() already handles machine byte order. 9979 */ 9980 *buf = ahd_inw(ahd, SEEDAT); 9981 } 9982 buf++; 9983 } 9984 return (error); 9985 } 9986 9987 /* 9988 * Write count 16bit words from buf, into SEEPROM attache to the 9989 * controller starting at 16bit word address start_addr, using the 9990 * controller's SEEPROM writing state machine. 9991 */ 9992 int 9993 ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, 9994 u_int start_addr, u_int count) 9995 { 9996 u_int cur_addr; 9997 u_int end_addr; 9998 int error; 9999 int retval; 10000 10001 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10002 error = ENOENT; 10003 10004 /* Place the chip into write-enable mode */ 10005 ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); 10006 ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); 10007 error = ahd_wait_seeprom(ahd); 10008 if (error) 10009 return (error); 10010 10011 /* 10012 * Write the data. If we don't get through the loop at 10013 * least once, the arguments were invalid. 10014 */ 10015 retval = EINVAL; 10016 end_addr = start_addr + count; 10017 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 10018 ahd_outw(ahd, SEEDAT, *buf++); 10019 ahd_outb(ahd, SEEADR, cur_addr); 10020 ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); 10021 10022 retval = ahd_wait_seeprom(ahd); 10023 if (retval) 10024 break; 10025 } 10026 10027 /* 10028 * Disable writes. 10029 */ 10030 ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); 10031 ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); 10032 error = ahd_wait_seeprom(ahd); 10033 if (error) 10034 return (error); 10035 return (retval); 10036 } 10037 10038 /* 10039 * Wait ~100us for the serial eeprom to satisfy our request. 10040 */ 10041 static int 10042 ahd_wait_seeprom(struct ahd_softc *ahd) 10043 { 10044 int cnt; 10045 10046 cnt = 5000; 10047 while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) 10048 ahd_delay(5); 10049 10050 if (cnt == 0) 10051 return (ETIMEDOUT); 10052 return (0); 10053 } 10054 10055 /* 10056 * Validate the two checksums in the per_channel 10057 * vital product data struct. 10058 */ 10059 static int 10060 ahd_verify_vpd_cksum(struct vpd_config *vpd) 10061 { 10062 int i; 10063 int maxaddr; 10064 uint32_t checksum; 10065 uint8_t *vpdarray; 10066 10067 vpdarray = (uint8_t *)vpd; 10068 maxaddr = offsetof(struct vpd_config, vpd_checksum); 10069 checksum = 0; 10070 for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) 10071 checksum = checksum + vpdarray[i]; 10072 if (checksum == 0 10073 || (-checksum & 0xFF) != vpd->vpd_checksum) 10074 return (0); 10075 10076 checksum = 0; 10077 maxaddr = offsetof(struct vpd_config, checksum); 10078 for (i = offsetof(struct vpd_config, default_target_flags); 10079 i < maxaddr; i++) 10080 checksum = checksum + vpdarray[i]; 10081 if (checksum == 0 10082 || (-checksum & 0xFF) != vpd->checksum) 10083 return (0); 10084 return (1); 10085 } 10086 10087 int 10088 ahd_verify_cksum(struct seeprom_config *sc) 10089 { 10090 int i; 10091 int maxaddr; 10092 uint32_t checksum; 10093 uint16_t *scarray; 10094 10095 maxaddr = (sizeof(*sc)/2) - 1; 10096 checksum = 0; 10097 scarray = (uint16_t *)sc; 10098 10099 for (i = 0; i < maxaddr; i++) 10100 checksum = checksum + scarray[i]; 10101 if (checksum == 0 10102 || (checksum & 0xFFFF) != sc->checksum) { 10103 return (0); 10104 } else { 10105 return (1); 10106 } 10107 } 10108 10109 int 10110 ahd_acquire_seeprom(struct ahd_softc *ahd) 10111 { 10112 /* 10113 * We should be able to determine the SEEPROM type 10114 * from the flexport logic, but unfortunately not 10115 * all implementations have this logic and there is 10116 * no programatic method for determining if the logic 10117 * is present. 10118 */ 10119 return (1); 10120 #if 0 10121 uint8_t seetype; 10122 int error; 10123 10124 error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); 10125 if (error != 0 10126 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) 10127 return (0); 10128 return (1); 10129 #endif 10130 } 10131 10132 void 10133 ahd_release_seeprom(struct ahd_softc *ahd) 10134 { 10135 /* Currently a no-op */ 10136 } 10137 10138 /* 10139 * Wait at most 2 seconds for flexport arbitration to succeed. 10140 */ 10141 static int 10142 ahd_wait_flexport(struct ahd_softc *ahd) 10143 { 10144 int cnt; 10145 10146 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10147 cnt = 1000000 * 2 / 5; 10148 while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) 10149 ahd_delay(5); 10150 10151 if (cnt == 0) 10152 return (ETIMEDOUT); 10153 return (0); 10154 } 10155 10156 int 10157 ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) 10158 { 10159 int error; 10160 10161 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10162 if (addr > 7) 10163 panic("ahd_write_flexport: address out of range"); 10164 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 10165 error = ahd_wait_flexport(ahd); 10166 if (error != 0) 10167 return (error); 10168 ahd_outb(ahd, BRDDAT, value); 10169 ahd_flush_device_writes(ahd); 10170 ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); 10171 ahd_flush_device_writes(ahd); 10172 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 10173 ahd_flush_device_writes(ahd); 10174 ahd_outb(ahd, BRDCTL, 0); 10175 ahd_flush_device_writes(ahd); 10176 return (0); 10177 } 10178 10179 int 10180 ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) 10181 { 10182 int error; 10183 10184 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10185 if (addr > 7) 10186 panic("ahd_read_flexport: address out of range"); 10187 ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); 10188 error = ahd_wait_flexport(ahd); 10189 if (error != 0) 10190 return (error); 10191 *value = ahd_inb(ahd, BRDDAT); 10192 ahd_outb(ahd, BRDCTL, 0); 10193 ahd_flush_device_writes(ahd); 10194 return (0); 10195 } 10196 10197 /************************* Target Mode ****************************************/ 10198 #ifdef AHD_TARGET_MODE 10199 cam_status 10200 ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, 10201 struct ahd_tmode_tstate **tstate, 10202 struct ahd_tmode_lstate **lstate, 10203 int notfound_failure) 10204 { 10205 10206 if ((ahd->features & AHD_TARGETMODE) == 0) 10207 return (CAM_REQ_INVALID); 10208 10209 /* 10210 * Handle the 'black hole' device that sucks up 10211 * requests to unattached luns on enabled targets. 10212 */ 10213 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 10214 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 10215 *tstate = NULL; 10216 *lstate = ahd->black_hole; 10217 } else { 10218 u_int max_id; 10219 10220 max_id = (ahd->features & AHD_WIDE) ? 16 : 8; 10221 if (ccb->ccb_h.target_id >= max_id) 10222 return (CAM_TID_INVALID); 10223 10224 if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) 10225 return (CAM_LUN_INVALID); 10226 10227 *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; 10228 *lstate = NULL; 10229 if (*tstate != NULL) 10230 *lstate = 10231 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 10232 } 10233 10234 if (notfound_failure != 0 && *lstate == NULL) 10235 return (CAM_PATH_INVALID); 10236 10237 return (CAM_REQ_CMP); 10238 } 10239 10240 void 10241 ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) 10242 { 10243 #if NOT_YET 10244 struct ahd_tmode_tstate *tstate; 10245 struct ahd_tmode_lstate *lstate; 10246 struct ccb_en_lun *cel; 10247 cam_status status; 10248 u_int target; 10249 u_int lun; 10250 u_int target_mask; 10251 u_long s; 10252 char channel; 10253 10254 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, 10255 /*notfound_failure*/FALSE); 10256 10257 if (status != CAM_REQ_CMP) { 10258 ccb->ccb_h.status = status; 10259 return; 10260 } 10261 10262 if ((ahd->features & AHD_MULTIROLE) != 0) { 10263 u_int our_id; 10264 10265 our_id = ahd->our_id; 10266 if (ccb->ccb_h.target_id != our_id) { 10267 if ((ahd->features & AHD_MULTI_TID) != 0 10268 && (ahd->flags & AHD_INITIATORROLE) != 0) { 10269 /* 10270 * Only allow additional targets if 10271 * the initiator role is disabled. 10272 * The hardware cannot handle a re-select-in 10273 * on the initiator id during a re-select-out 10274 * on a different target id. 10275 */ 10276 status = CAM_TID_INVALID; 10277 } else if ((ahd->flags & AHD_INITIATORROLE) != 0 10278 || ahd->enabled_luns > 0) { 10279 /* 10280 * Only allow our target id to change 10281 * if the initiator role is not configured 10282 * and there are no enabled luns which 10283 * are attached to the currently registered 10284 * scsi id. 10285 */ 10286 status = CAM_TID_INVALID; 10287 } 10288 } 10289 } 10290 10291 if (status != CAM_REQ_CMP) { 10292 ccb->ccb_h.status = status; 10293 return; 10294 } 10295 10296 /* 10297 * We now have an id that is valid. 10298 * If we aren't in target mode, switch modes. 10299 */ 10300 if ((ahd->flags & AHD_TARGETROLE) == 0 10301 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 10302 u_long s; 10303 10304 printk("Configuring Target Mode\n"); 10305 ahd_lock(ahd, &s); 10306 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 10307 ccb->ccb_h.status = CAM_BUSY; 10308 ahd_unlock(ahd, &s); 10309 return; 10310 } 10311 ahd->flags |= AHD_TARGETROLE; 10312 if ((ahd->features & AHD_MULTIROLE) == 0) 10313 ahd->flags &= ~AHD_INITIATORROLE; 10314 ahd_pause(ahd); 10315 ahd_loadseq(ahd); 10316 ahd_restart(ahd); 10317 ahd_unlock(ahd, &s); 10318 } 10319 cel = &ccb->cel; 10320 target = ccb->ccb_h.target_id; 10321 lun = ccb->ccb_h.target_lun; 10322 channel = SIM_CHANNEL(ahd, sim); 10323 target_mask = 0x01 << target; 10324 if (channel == 'B') 10325 target_mask <<= 8; 10326 10327 if (cel->enable != 0) { 10328 u_int scsiseq1; 10329 10330 /* Are we already enabled?? */ 10331 if (lstate != NULL) { 10332 xpt_print_path(ccb->ccb_h.path); 10333 printk("Lun already enabled\n"); 10334 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 10335 return; 10336 } 10337 10338 if (cel->grp6_len != 0 10339 || cel->grp7_len != 0) { 10340 /* 10341 * Don't (yet?) support vendor 10342 * specific commands. 10343 */ 10344 ccb->ccb_h.status = CAM_REQ_INVALID; 10345 printk("Non-zero Group Codes\n"); 10346 return; 10347 } 10348 10349 /* 10350 * Seems to be okay. 10351 * Setup our data structures. 10352 */ 10353 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 10354 tstate = ahd_alloc_tstate(ahd, target, channel); 10355 if (tstate == NULL) { 10356 xpt_print_path(ccb->ccb_h.path); 10357 printk("Couldn't allocate tstate\n"); 10358 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10359 return; 10360 } 10361 } 10362 lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); 10363 if (lstate == NULL) { 10364 xpt_print_path(ccb->ccb_h.path); 10365 printk("Couldn't allocate lstate\n"); 10366 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10367 return; 10368 } 10369 status = xpt_create_path(&lstate->path, /*periph*/NULL, 10370 xpt_path_path_id(ccb->ccb_h.path), 10371 xpt_path_target_id(ccb->ccb_h.path), 10372 xpt_path_lun_id(ccb->ccb_h.path)); 10373 if (status != CAM_REQ_CMP) { 10374 kfree(lstate); 10375 xpt_print_path(ccb->ccb_h.path); 10376 printk("Couldn't allocate path\n"); 10377 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10378 return; 10379 } 10380 SLIST_INIT(&lstate->accept_tios); 10381 SLIST_INIT(&lstate->immed_notifies); 10382 ahd_lock(ahd, &s); 10383 ahd_pause(ahd); 10384 if (target != CAM_TARGET_WILDCARD) { 10385 tstate->enabled_luns[lun] = lstate; 10386 ahd->enabled_luns++; 10387 10388 if ((ahd->features & AHD_MULTI_TID) != 0) { 10389 u_int targid_mask; 10390 10391 targid_mask = ahd_inw(ahd, TARGID); 10392 targid_mask |= target_mask; 10393 ahd_outw(ahd, TARGID, targid_mask); 10394 ahd_update_scsiid(ahd, targid_mask); 10395 } else { 10396 u_int our_id; 10397 char channel; 10398 10399 channel = SIM_CHANNEL(ahd, sim); 10400 our_id = SIM_SCSI_ID(ahd, sim); 10401 10402 /* 10403 * This can only happen if selections 10404 * are not enabled 10405 */ 10406 if (target != our_id) { 10407 u_int sblkctl; 10408 char cur_channel; 10409 int swap; 10410 10411 sblkctl = ahd_inb(ahd, SBLKCTL); 10412 cur_channel = (sblkctl & SELBUSB) 10413 ? 'B' : 'A'; 10414 if ((ahd->features & AHD_TWIN) == 0) 10415 cur_channel = 'A'; 10416 swap = cur_channel != channel; 10417 ahd->our_id = target; 10418 10419 if (swap) 10420 ahd_outb(ahd, SBLKCTL, 10421 sblkctl ^ SELBUSB); 10422 10423 ahd_outb(ahd, SCSIID, target); 10424 10425 if (swap) 10426 ahd_outb(ahd, SBLKCTL, sblkctl); 10427 } 10428 } 10429 } else 10430 ahd->black_hole = lstate; 10431 /* Allow select-in operations */ 10432 if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { 10433 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 10434 scsiseq1 |= ENSELI; 10435 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 10436 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 10437 scsiseq1 |= ENSELI; 10438 ahd_outb(ahd, SCSISEQ1, scsiseq1); 10439 } 10440 ahd_unpause(ahd); 10441 ahd_unlock(ahd, &s); 10442 ccb->ccb_h.status = CAM_REQ_CMP; 10443 xpt_print_path(ccb->ccb_h.path); 10444 printk("Lun now enabled for target mode\n"); 10445 } else { 10446 struct scb *scb; 10447 int i, empty; 10448 10449 if (lstate == NULL) { 10450 ccb->ccb_h.status = CAM_LUN_INVALID; 10451 return; 10452 } 10453 10454 ahd_lock(ahd, &s); 10455 10456 ccb->ccb_h.status = CAM_REQ_CMP; 10457 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 10458 struct ccb_hdr *ccbh; 10459 10460 ccbh = &scb->io_ctx->ccb_h; 10461 if (ccbh->func_code == XPT_CONT_TARGET_IO 10462 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 10463 printk("CTIO pending\n"); 10464 ccb->ccb_h.status = CAM_REQ_INVALID; 10465 ahd_unlock(ahd, &s); 10466 return; 10467 } 10468 } 10469 10470 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 10471 printk("ATIOs pending\n"); 10472 ccb->ccb_h.status = CAM_REQ_INVALID; 10473 } 10474 10475 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 10476 printk("INOTs pending\n"); 10477 ccb->ccb_h.status = CAM_REQ_INVALID; 10478 } 10479 10480 if (ccb->ccb_h.status != CAM_REQ_CMP) { 10481 ahd_unlock(ahd, &s); 10482 return; 10483 } 10484 10485 xpt_print_path(ccb->ccb_h.path); 10486 printk("Target mode disabled\n"); 10487 xpt_free_path(lstate->path); 10488 kfree(lstate); 10489 10490 ahd_pause(ahd); 10491 /* Can we clean up the target too? */ 10492 if (target != CAM_TARGET_WILDCARD) { 10493 tstate->enabled_luns[lun] = NULL; 10494 ahd->enabled_luns--; 10495 for (empty = 1, i = 0; i < 8; i++) 10496 if (tstate->enabled_luns[i] != NULL) { 10497 empty = 0; 10498 break; 10499 } 10500 10501 if (empty) { 10502 ahd_free_tstate(ahd, target, channel, 10503 /*force*/FALSE); 10504 if (ahd->features & AHD_MULTI_TID) { 10505 u_int targid_mask; 10506 10507 targid_mask = ahd_inw(ahd, TARGID); 10508 targid_mask &= ~target_mask; 10509 ahd_outw(ahd, TARGID, targid_mask); 10510 ahd_update_scsiid(ahd, targid_mask); 10511 } 10512 } 10513 } else { 10514 10515 ahd->black_hole = NULL; 10516 10517 /* 10518 * We can't allow selections without 10519 * our black hole device. 10520 */ 10521 empty = TRUE; 10522 } 10523 if (ahd->enabled_luns == 0) { 10524 /* Disallow select-in */ 10525 u_int scsiseq1; 10526 10527 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 10528 scsiseq1 &= ~ENSELI; 10529 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 10530 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 10531 scsiseq1 &= ~ENSELI; 10532 ahd_outb(ahd, SCSISEQ1, scsiseq1); 10533 10534 if ((ahd->features & AHD_MULTIROLE) == 0) { 10535 printk("Configuring Initiator Mode\n"); 10536 ahd->flags &= ~AHD_TARGETROLE; 10537 ahd->flags |= AHD_INITIATORROLE; 10538 ahd_pause(ahd); 10539 ahd_loadseq(ahd); 10540 ahd_restart(ahd); 10541 /* 10542 * Unpaused. The extra unpause 10543 * that follows is harmless. 10544 */ 10545 } 10546 } 10547 ahd_unpause(ahd); 10548 ahd_unlock(ahd, &s); 10549 } 10550 #endif 10551 } 10552 10553 static void 10554 ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) 10555 { 10556 #if NOT_YET 10557 u_int scsiid_mask; 10558 u_int scsiid; 10559 10560 if ((ahd->features & AHD_MULTI_TID) == 0) 10561 panic("ahd_update_scsiid called on non-multitid unit\n"); 10562 10563 /* 10564 * Since we will rely on the TARGID mask 10565 * for selection enables, ensure that OID 10566 * in SCSIID is not set to some other ID 10567 * that we don't want to allow selections on. 10568 */ 10569 if ((ahd->features & AHD_ULTRA2) != 0) 10570 scsiid = ahd_inb(ahd, SCSIID_ULTRA2); 10571 else 10572 scsiid = ahd_inb(ahd, SCSIID); 10573 scsiid_mask = 0x1 << (scsiid & OID); 10574 if ((targid_mask & scsiid_mask) == 0) { 10575 u_int our_id; 10576 10577 /* ffs counts from 1 */ 10578 our_id = ffs(targid_mask); 10579 if (our_id == 0) 10580 our_id = ahd->our_id; 10581 else 10582 our_id--; 10583 scsiid &= TID; 10584 scsiid |= our_id; 10585 } 10586 if ((ahd->features & AHD_ULTRA2) != 0) 10587 ahd_outb(ahd, SCSIID_ULTRA2, scsiid); 10588 else 10589 ahd_outb(ahd, SCSIID, scsiid); 10590 #endif 10591 } 10592 10593 static void 10594 ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) 10595 { 10596 struct target_cmd *cmd; 10597 10598 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); 10599 while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { 10600 10601 /* 10602 * Only advance through the queue if we 10603 * have the resources to process the command. 10604 */ 10605 if (ahd_handle_target_cmd(ahd, cmd) != 0) 10606 break; 10607 10608 cmd->cmd_valid = 0; 10609 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 10610 ahd->shared_data_map.dmamap, 10611 ahd_targetcmd_offset(ahd, ahd->tqinfifonext), 10612 sizeof(struct target_cmd), 10613 BUS_DMASYNC_PREREAD); 10614 ahd->tqinfifonext++; 10615 10616 /* 10617 * Lazily update our position in the target mode incoming 10618 * command queue as seen by the sequencer. 10619 */ 10620 if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 10621 u_int hs_mailbox; 10622 10623 hs_mailbox = ahd_inb(ahd, HS_MAILBOX); 10624 hs_mailbox &= ~HOST_TQINPOS; 10625 hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; 10626 ahd_outb(ahd, HS_MAILBOX, hs_mailbox); 10627 } 10628 } 10629 } 10630 10631 static int 10632 ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) 10633 { 10634 struct ahd_tmode_tstate *tstate; 10635 struct ahd_tmode_lstate *lstate; 10636 struct ccb_accept_tio *atio; 10637 uint8_t *byte; 10638 int initiator; 10639 int target; 10640 int lun; 10641 10642 initiator = SCSIID_TARGET(ahd, cmd->scsiid); 10643 target = SCSIID_OUR_ID(cmd->scsiid); 10644 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 10645 10646 byte = cmd->bytes; 10647 tstate = ahd->enabled_targets[target]; 10648 lstate = NULL; 10649 if (tstate != NULL) 10650 lstate = tstate->enabled_luns[lun]; 10651 10652 /* 10653 * Commands for disabled luns go to the black hole driver. 10654 */ 10655 if (lstate == NULL) 10656 lstate = ahd->black_hole; 10657 10658 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 10659 if (atio == NULL) { 10660 ahd->flags |= AHD_TQINFIFO_BLOCKED; 10661 /* 10662 * Wait for more ATIOs from the peripheral driver for this lun. 10663 */ 10664 return (1); 10665 } else 10666 ahd->flags &= ~AHD_TQINFIFO_BLOCKED; 10667 #ifdef AHD_DEBUG 10668 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10669 printk("Incoming command from %d for %d:%d%s\n", 10670 initiator, target, lun, 10671 lstate == ahd->black_hole ? "(Black Holed)" : ""); 10672 #endif 10673 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 10674 10675 if (lstate == ahd->black_hole) { 10676 /* Fill in the wildcards */ 10677 atio->ccb_h.target_id = target; 10678 atio->ccb_h.target_lun = lun; 10679 } 10680 10681 /* 10682 * Package it up and send it off to 10683 * whomever has this lun enabled. 10684 */ 10685 atio->sense_len = 0; 10686 atio->init_id = initiator; 10687 if (byte[0] != 0xFF) { 10688 /* Tag was included */ 10689 atio->tag_action = *byte++; 10690 atio->tag_id = *byte++; 10691 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 10692 } else { 10693 atio->ccb_h.flags = 0; 10694 } 10695 byte++; 10696 10697 /* Okay. Now determine the cdb size based on the command code */ 10698 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 10699 case 0: 10700 atio->cdb_len = 6; 10701 break; 10702 case 1: 10703 case 2: 10704 atio->cdb_len = 10; 10705 break; 10706 case 4: 10707 atio->cdb_len = 16; 10708 break; 10709 case 5: 10710 atio->cdb_len = 12; 10711 break; 10712 case 3: 10713 default: 10714 /* Only copy the opcode. */ 10715 atio->cdb_len = 1; 10716 printk("Reserved or VU command code type encountered\n"); 10717 break; 10718 } 10719 10720 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 10721 10722 atio->ccb_h.status |= CAM_CDB_RECVD; 10723 10724 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 10725 /* 10726 * We weren't allowed to disconnect. 10727 * We're hanging on the bus until a 10728 * continue target I/O comes in response 10729 * to this accept tio. 10730 */ 10731 #ifdef AHD_DEBUG 10732 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10733 printk("Received Immediate Command %d:%d:%d - %p\n", 10734 initiator, target, lun, ahd->pending_device); 10735 #endif 10736 ahd->pending_device = lstate; 10737 ahd_freeze_ccb((union ccb *)atio); 10738 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 10739 } 10740 xpt_done((union ccb*)atio); 10741 return (0); 10742 } 10743 10744 #endif 10745