1 /* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2002 Justin T. Gibbs. 5 * Copyright (c) 2000-2003 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ 41 */ 42 43 #include "aic79xx_osm.h" 44 #include "aic79xx_inline.h" 45 #include "aicasm/aicasm_insformat.h" 46 47 /***************************** Lookup Tables **********************************/ 48 static const char *const ahd_chip_names[] = 49 { 50 "NONE", 51 "aic7901", 52 "aic7902", 53 "aic7901A" 54 }; 55 56 /* 57 * Hardware error codes. 58 */ 59 struct ahd_hard_error_entry { 60 uint8_t errno; 61 const char *errmesg; 62 }; 63 64 static const struct ahd_hard_error_entry ahd_hard_errors[] = { 65 { DSCTMOUT, "Discard Timer has timed out" }, 66 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 67 { SQPARERR, "Sequencer Parity Error" }, 68 { DPARERR, "Data-path Parity Error" }, 69 { MPARERR, "Scratch or SCB Memory Parity Error" }, 70 { CIOPARERR, "CIOBUS Parity Error" }, 71 }; 72 static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); 73 74 static const struct ahd_phase_table_entry ahd_phase_table[] = 75 { 76 { P_DATAOUT, NOP, "in Data-out phase" }, 77 { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" }, 78 { P_DATAOUT_DT, NOP, "in DT Data-out phase" }, 79 { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" }, 80 { P_COMMAND, NOP, "in Command phase" }, 81 { P_MESGOUT, NOP, "in Message-out phase" }, 82 { P_STATUS, INITIATOR_ERROR, "in Status phase" }, 83 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 84 { P_BUSFREE, NOP, "while idle" }, 85 { 0, NOP, "in unknown phase" } 86 }; 87 88 /* 89 * In most cases we only wish to itterate over real phases, so 90 * exclude the last element from the count. 91 */ 92 static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; 93 94 /* Our Sequencer Program */ 95 #include "aic79xx_seq.h" 96 97 /**************************** Function Declarations ***************************/ 98 static void ahd_handle_transmission_error(struct ahd_softc *ahd); 99 static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, 100 u_int lqistat1); 101 static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, 102 u_int busfreetime); 103 static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); 104 static void ahd_handle_proto_violation(struct ahd_softc *ahd); 105 static void ahd_force_renegotiation(struct ahd_softc *ahd, 106 struct ahd_devinfo *devinfo); 107 108 static struct ahd_tmode_tstate* 109 ahd_alloc_tstate(struct ahd_softc *ahd, 110 u_int scsi_id, char channel); 111 #ifdef AHD_TARGET_MODE 112 static void ahd_free_tstate(struct ahd_softc *ahd, 113 u_int scsi_id, char channel, int force); 114 #endif 115 static void ahd_devlimited_syncrate(struct ahd_softc *ahd, 116 struct ahd_initiator_tinfo *, 117 u_int *period, 118 u_int *ppr_options, 119 role_t role); 120 static void ahd_update_neg_table(struct ahd_softc *ahd, 121 struct ahd_devinfo *devinfo, 122 struct ahd_transinfo *tinfo); 123 static void ahd_update_pending_scbs(struct ahd_softc *ahd); 124 static void ahd_fetch_devinfo(struct ahd_softc *ahd, 125 struct ahd_devinfo *devinfo); 126 static void ahd_scb_devinfo(struct ahd_softc *ahd, 127 struct ahd_devinfo *devinfo, 128 struct scb *scb); 129 static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, 130 struct ahd_devinfo *devinfo, 131 struct scb *scb); 132 static void ahd_build_transfer_msg(struct ahd_softc *ahd, 133 struct ahd_devinfo *devinfo); 134 static void ahd_construct_sdtr(struct ahd_softc *ahd, 135 struct ahd_devinfo *devinfo, 136 u_int period, u_int offset); 137 static void ahd_construct_wdtr(struct ahd_softc *ahd, 138 struct ahd_devinfo *devinfo, 139 u_int bus_width); 140 static void ahd_construct_ppr(struct ahd_softc *ahd, 141 struct ahd_devinfo *devinfo, 142 u_int period, u_int offset, 143 u_int bus_width, u_int ppr_options); 144 static void ahd_clear_msg_state(struct ahd_softc *ahd); 145 static void ahd_handle_message_phase(struct ahd_softc *ahd); 146 typedef enum { 147 AHDMSG_1B, 148 AHDMSG_2B, 149 AHDMSG_EXT 150 } ahd_msgtype; 151 static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, 152 u_int msgval, int full); 153 static int ahd_parse_msg(struct ahd_softc *ahd, 154 struct ahd_devinfo *devinfo); 155 static int ahd_handle_msg_reject(struct ahd_softc *ahd, 156 struct ahd_devinfo *devinfo); 157 static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, 158 struct ahd_devinfo *devinfo); 159 static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); 160 static void ahd_handle_devreset(struct ahd_softc *ahd, 161 struct ahd_devinfo *devinfo, 162 u_int lun, cam_status status, 163 char *message, int verbose_level); 164 #ifdef AHD_TARGET_MODE 165 static void ahd_setup_target_msgin(struct ahd_softc *ahd, 166 struct ahd_devinfo *devinfo, 167 struct scb *scb); 168 #endif 169 170 static u_int ahd_sglist_size(struct ahd_softc *ahd); 171 static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); 172 static bus_dmamap_callback_t 173 ahd_dmamap_cb; 174 static void ahd_initialize_hscbs(struct ahd_softc *ahd); 175 static int ahd_init_scbdata(struct ahd_softc *ahd); 176 static void ahd_fini_scbdata(struct ahd_softc *ahd); 177 static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); 178 static void ahd_iocell_first_selection(struct ahd_softc *ahd); 179 static void ahd_add_col_list(struct ahd_softc *ahd, 180 struct scb *scb, u_int col_idx); 181 static void ahd_rem_col_list(struct ahd_softc *ahd, 182 struct scb *scb); 183 static void ahd_chip_init(struct ahd_softc *ahd); 184 static void ahd_qinfifo_requeue(struct ahd_softc *ahd, 185 struct scb *prev_scb, 186 struct scb *scb); 187 static int ahd_qinfifo_count(struct ahd_softc *ahd); 188 static int ahd_search_scb_list(struct ahd_softc *ahd, int target, 189 char channel, int lun, u_int tag, 190 role_t role, uint32_t status, 191 ahd_search_action action, 192 u_int *list_head, u_int *list_tail, 193 u_int tid); 194 static void ahd_stitch_tid_list(struct ahd_softc *ahd, 195 u_int tid_prev, u_int tid_cur, 196 u_int tid_next); 197 static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, 198 u_int scbid); 199 static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 200 u_int prev, u_int next, u_int tid); 201 static void ahd_reset_current_bus(struct ahd_softc *ahd); 202 static void ahd_stat_timer(struct timer_list *t); 203 #ifdef AHD_DUMP_SEQ 204 static void ahd_dumpseq(struct ahd_softc *ahd); 205 #endif 206 static void ahd_loadseq(struct ahd_softc *ahd); 207 static int ahd_check_patch(struct ahd_softc *ahd, 208 const struct patch **start_patch, 209 u_int start_instr, u_int *skip_addr); 210 static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, 211 u_int address); 212 static void ahd_download_instr(struct ahd_softc *ahd, 213 u_int instrptr, uint8_t *dconsts); 214 static int ahd_probe_stack_size(struct ahd_softc *ahd); 215 static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, 216 struct scb *scb); 217 static void ahd_run_data_fifo(struct ahd_softc *ahd, 218 struct scb *scb); 219 220 #ifdef AHD_TARGET_MODE 221 static void ahd_queue_lstate_event(struct ahd_softc *ahd, 222 struct ahd_tmode_lstate *lstate, 223 u_int initiator_id, 224 u_int event_type, 225 u_int event_arg); 226 static void ahd_update_scsiid(struct ahd_softc *ahd, 227 u_int targid_mask); 228 static int ahd_handle_target_cmd(struct ahd_softc *ahd, 229 struct target_cmd *cmd); 230 #endif 231 232 static int ahd_abort_scbs(struct ahd_softc *ahd, int target, 233 char channel, int lun, u_int tag, 234 role_t role, uint32_t status); 235 static void ahd_alloc_scbs(struct ahd_softc *ahd); 236 static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, 237 u_int scbid); 238 static void ahd_calc_residual(struct ahd_softc *ahd, 239 struct scb *scb); 240 static void ahd_clear_critical_section(struct ahd_softc *ahd); 241 static void ahd_clear_intstat(struct ahd_softc *ahd); 242 static void ahd_enable_coalescing(struct ahd_softc *ahd, 243 int enable); 244 static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); 245 static void ahd_freeze_devq(struct ahd_softc *ahd, 246 struct scb *scb); 247 static void ahd_handle_scb_status(struct ahd_softc *ahd, 248 struct scb *scb); 249 static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); 250 static void ahd_shutdown(void *arg); 251 static void ahd_update_coalescing_values(struct ahd_softc *ahd, 252 u_int timer, 253 u_int maxcmds, 254 u_int mincmds); 255 static int ahd_verify_vpd_cksum(struct vpd_config *vpd); 256 static int ahd_wait_seeprom(struct ahd_softc *ahd); 257 static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, 258 int target, char channel, int lun, 259 u_int tag, role_t role); 260 261 static void ahd_reset_cmds_pending(struct ahd_softc *ahd); 262 263 /*************************** Interrupt Services *******************************/ 264 static void ahd_run_qoutfifo(struct ahd_softc *ahd); 265 #ifdef AHD_TARGET_MODE 266 static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); 267 #endif 268 static void ahd_handle_hwerrint(struct ahd_softc *ahd); 269 static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); 270 static void ahd_handle_scsiint(struct ahd_softc *ahd, 271 u_int intstat); 272 273 /************************ Sequencer Execution Control *************************/ 274 void 275 ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) 276 { 277 if (ahd->src_mode == src && ahd->dst_mode == dst) 278 return; 279 #ifdef AHD_DEBUG 280 if (ahd->src_mode == AHD_MODE_UNKNOWN 281 || ahd->dst_mode == AHD_MODE_UNKNOWN) 282 panic("Setting mode prior to saving it.\n"); 283 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 284 printk("%s: Setting mode 0x%x\n", ahd_name(ahd), 285 ahd_build_mode_state(ahd, src, dst)); 286 #endif 287 ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); 288 ahd->src_mode = src; 289 ahd->dst_mode = dst; 290 } 291 292 static void 293 ahd_update_modes(struct ahd_softc *ahd) 294 { 295 ahd_mode_state mode_ptr; 296 ahd_mode src; 297 ahd_mode dst; 298 299 mode_ptr = ahd_inb(ahd, MODE_PTR); 300 #ifdef AHD_DEBUG 301 if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) 302 printk("Reading mode 0x%x\n", mode_ptr); 303 #endif 304 ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); 305 ahd_known_modes(ahd, src, dst); 306 } 307 308 static void 309 ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, 310 ahd_mode dstmode, const char *file, int line) 311 { 312 #ifdef AHD_DEBUG 313 if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 314 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { 315 panic("%s:%s:%d: Mode assertion failed.\n", 316 ahd_name(ahd), file, line); 317 } 318 #endif 319 } 320 321 #define AHD_ASSERT_MODES(ahd, source, dest) \ 322 ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); 323 324 ahd_mode_state 325 ahd_save_modes(struct ahd_softc *ahd) 326 { 327 if (ahd->src_mode == AHD_MODE_UNKNOWN 328 || ahd->dst_mode == AHD_MODE_UNKNOWN) 329 ahd_update_modes(ahd); 330 331 return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); 332 } 333 334 void 335 ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) 336 { 337 ahd_mode src; 338 ahd_mode dst; 339 340 ahd_extract_mode_state(ahd, state, &src, &dst); 341 ahd_set_modes(ahd, src, dst); 342 } 343 344 /* 345 * Determine whether the sequencer has halted code execution. 346 * Returns non-zero status if the sequencer is stopped. 347 */ 348 int 349 ahd_is_paused(struct ahd_softc *ahd) 350 { 351 return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); 352 } 353 354 /* 355 * Request that the sequencer stop and wait, indefinitely, for it 356 * to stop. The sequencer will only acknowledge that it is paused 357 * once it has reached an instruction boundary and PAUSEDIS is 358 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS 359 * for critical sections. 360 */ 361 void 362 ahd_pause(struct ahd_softc *ahd) 363 { 364 ahd_outb(ahd, HCNTRL, ahd->pause); 365 366 /* 367 * Since the sequencer can disable pausing in a critical section, we 368 * must loop until it actually stops. 369 */ 370 while (ahd_is_paused(ahd) == 0) 371 ; 372 } 373 374 /* 375 * Allow the sequencer to continue program execution. 376 * We check here to ensure that no additional interrupt 377 * sources that would cause the sequencer to halt have been 378 * asserted. If, for example, a SCSI bus reset is detected 379 * while we are fielding a different, pausing, interrupt type, 380 * we don't want to release the sequencer before going back 381 * into our interrupt handler and dealing with this new 382 * condition. 383 */ 384 void 385 ahd_unpause(struct ahd_softc *ahd) 386 { 387 /* 388 * Automatically restore our modes to those saved 389 * prior to the first change of the mode. 390 */ 391 if (ahd->saved_src_mode != AHD_MODE_UNKNOWN 392 && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { 393 if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) 394 ahd_reset_cmds_pending(ahd); 395 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 396 } 397 398 if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) 399 ahd_outb(ahd, HCNTRL, ahd->unpause); 400 401 ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); 402 } 403 404 /*********************** Scatter Gather List Handling *************************/ 405 void * 406 ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, 407 void *sgptr, dma_addr_t addr, bus_size_t len, int last) 408 { 409 scb->sg_count++; 410 if (sizeof(dma_addr_t) > 4 411 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 412 struct ahd_dma64_seg *sg; 413 414 sg = (struct ahd_dma64_seg *)sgptr; 415 sg->addr = ahd_htole64(addr); 416 sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); 417 return (sg + 1); 418 } else { 419 struct ahd_dma_seg *sg; 420 421 sg = (struct ahd_dma_seg *)sgptr; 422 sg->addr = ahd_htole32(addr & 0xFFFFFFFF); 423 sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) 424 | (last ? AHD_DMA_LAST_SEG : 0)); 425 return (sg + 1); 426 } 427 } 428 429 static void 430 ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) 431 { 432 /* XXX Handle target mode SCBs. */ 433 scb->crc_retry_count = 0; 434 if ((scb->flags & SCB_PACKETIZED) != 0) { 435 /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ 436 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; 437 } else { 438 if (ahd_get_transfer_length(scb) & 0x01) 439 scb->hscb->task_attribute = SCB_XFERLEN_ODD; 440 else 441 scb->hscb->task_attribute = 0; 442 } 443 444 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR 445 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) 446 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = 447 ahd_htole32(scb->sense_busaddr); 448 } 449 450 static void 451 ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) 452 { 453 /* 454 * Copy the first SG into the "current" data ponter area. 455 */ 456 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 457 struct ahd_dma64_seg *sg; 458 459 sg = (struct ahd_dma64_seg *)scb->sg_list; 460 scb->hscb->dataptr = sg->addr; 461 scb->hscb->datacnt = sg->len; 462 } else { 463 struct ahd_dma_seg *sg; 464 uint32_t *dataptr_words; 465 466 sg = (struct ahd_dma_seg *)scb->sg_list; 467 dataptr_words = (uint32_t*)&scb->hscb->dataptr; 468 dataptr_words[0] = sg->addr; 469 dataptr_words[1] = 0; 470 if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { 471 uint64_t high_addr; 472 473 high_addr = ahd_le32toh(sg->len) & 0x7F000000; 474 scb->hscb->dataptr |= ahd_htole64(high_addr << 8); 475 } 476 scb->hscb->datacnt = sg->len; 477 } 478 /* 479 * Note where to find the SG entries in bus space. 480 * We also set the full residual flag which the 481 * sequencer will clear as soon as a data transfer 482 * occurs. 483 */ 484 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); 485 } 486 487 static void 488 ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) 489 { 490 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); 491 scb->hscb->dataptr = 0; 492 scb->hscb->datacnt = 0; 493 } 494 495 /************************** Memory mapping routines ***************************/ 496 static void * 497 ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) 498 { 499 dma_addr_t sg_offset; 500 501 /* sg_list_phys points to entry 1, not 0 */ 502 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); 503 return ((uint8_t *)scb->sg_list + sg_offset); 504 } 505 506 static uint32_t 507 ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) 508 { 509 dma_addr_t sg_offset; 510 511 /* sg_list_phys points to entry 1, not 0 */ 512 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) 513 - ahd_sg_size(ahd); 514 515 return (scb->sg_list_busaddr + sg_offset); 516 } 517 518 static void 519 ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) 520 { 521 ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, 522 scb->hscb_map->dmamap, 523 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, 524 /*len*/sizeof(*scb->hscb), op); 525 } 526 527 void 528 ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) 529 { 530 if (scb->sg_count == 0) 531 return; 532 533 ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, 534 scb->sg_map->dmamap, 535 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), 536 /*len*/ahd_sg_size(ahd) * scb->sg_count, op); 537 } 538 539 static void 540 ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) 541 { 542 ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, 543 scb->sense_map->dmamap, 544 /*offset*/scb->sense_busaddr, 545 /*len*/AHD_SENSE_BUFSIZE, op); 546 } 547 548 #ifdef AHD_TARGET_MODE 549 static uint32_t 550 ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) 551 { 552 return (((uint8_t *)&ahd->targetcmds[index]) 553 - (uint8_t *)ahd->qoutfifo); 554 } 555 #endif 556 557 /*********************** Miscellaneous Support Functions ***********************/ 558 /* 559 * Return pointers to the transfer negotiation information 560 * for the specified our_id/remote_id pair. 561 */ 562 struct ahd_initiator_tinfo * 563 ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, 564 u_int remote_id, struct ahd_tmode_tstate **tstate) 565 { 566 /* 567 * Transfer data structures are stored from the perspective 568 * of the target role. Since the parameters for a connection 569 * in the initiator role to a given target are the same as 570 * when the roles are reversed, we pretend we are the target. 571 */ 572 if (channel == 'B') 573 our_id += 8; 574 *tstate = ahd->enabled_targets[our_id]; 575 return (&(*tstate)->transinfo[remote_id]); 576 } 577 578 uint16_t 579 ahd_inw(struct ahd_softc *ahd, u_int port) 580 { 581 /* 582 * Read high byte first as some registers increment 583 * or have other side effects when the low byte is 584 * read. 585 */ 586 uint16_t r = ahd_inb(ahd, port+1) << 8; 587 return r | ahd_inb(ahd, port); 588 } 589 590 void 591 ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) 592 { 593 /* 594 * Write low byte first to accommodate registers 595 * such as PRGMCNT where the order maters. 596 */ 597 ahd_outb(ahd, port, value & 0xFF); 598 ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 599 } 600 601 uint32_t 602 ahd_inl(struct ahd_softc *ahd, u_int port) 603 { 604 return ((ahd_inb(ahd, port)) 605 | (ahd_inb(ahd, port+1) << 8) 606 | (ahd_inb(ahd, port+2) << 16) 607 | (ahd_inb(ahd, port+3) << 24)); 608 } 609 610 void 611 ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) 612 { 613 ahd_outb(ahd, port, (value) & 0xFF); 614 ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); 615 ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); 616 ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); 617 } 618 619 uint64_t 620 ahd_inq(struct ahd_softc *ahd, u_int port) 621 { 622 return ((ahd_inb(ahd, port)) 623 | (ahd_inb(ahd, port+1) << 8) 624 | (ahd_inb(ahd, port+2) << 16) 625 | (ahd_inb(ahd, port+3) << 24) 626 | (((uint64_t)ahd_inb(ahd, port+4)) << 32) 627 | (((uint64_t)ahd_inb(ahd, port+5)) << 40) 628 | (((uint64_t)ahd_inb(ahd, port+6)) << 48) 629 | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); 630 } 631 632 void 633 ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) 634 { 635 ahd_outb(ahd, port, value & 0xFF); 636 ahd_outb(ahd, port+1, (value >> 8) & 0xFF); 637 ahd_outb(ahd, port+2, (value >> 16) & 0xFF); 638 ahd_outb(ahd, port+3, (value >> 24) & 0xFF); 639 ahd_outb(ahd, port+4, (value >> 32) & 0xFF); 640 ahd_outb(ahd, port+5, (value >> 40) & 0xFF); 641 ahd_outb(ahd, port+6, (value >> 48) & 0xFF); 642 ahd_outb(ahd, port+7, (value >> 56) & 0xFF); 643 } 644 645 u_int 646 ahd_get_scbptr(struct ahd_softc *ahd) 647 { 648 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 649 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 650 return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); 651 } 652 653 void 654 ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) 655 { 656 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 657 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 658 ahd_outb(ahd, SCBPTR, scbptr & 0xFF); 659 ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); 660 } 661 662 #if 0 /* unused */ 663 static u_int 664 ahd_get_hnscb_qoff(struct ahd_softc *ahd) 665 { 666 return (ahd_inw_atomic(ahd, HNSCB_QOFF)); 667 } 668 #endif 669 670 static void 671 ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) 672 { 673 ahd_outw_atomic(ahd, HNSCB_QOFF, value); 674 } 675 676 #if 0 /* unused */ 677 static u_int 678 ahd_get_hescb_qoff(struct ahd_softc *ahd) 679 { 680 return (ahd_inb(ahd, HESCB_QOFF)); 681 } 682 #endif 683 684 static void 685 ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) 686 { 687 ahd_outb(ahd, HESCB_QOFF, value); 688 } 689 690 static u_int 691 ahd_get_snscb_qoff(struct ahd_softc *ahd) 692 { 693 u_int oldvalue; 694 695 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 696 oldvalue = ahd_inw(ahd, SNSCB_QOFF); 697 ahd_outw(ahd, SNSCB_QOFF, oldvalue); 698 return (oldvalue); 699 } 700 701 static void 702 ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) 703 { 704 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 705 ahd_outw(ahd, SNSCB_QOFF, value); 706 } 707 708 #if 0 /* unused */ 709 static u_int 710 ahd_get_sescb_qoff(struct ahd_softc *ahd) 711 { 712 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 713 return (ahd_inb(ahd, SESCB_QOFF)); 714 } 715 #endif 716 717 static void 718 ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) 719 { 720 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 721 ahd_outb(ahd, SESCB_QOFF, value); 722 } 723 724 #if 0 /* unused */ 725 static u_int 726 ahd_get_sdscb_qoff(struct ahd_softc *ahd) 727 { 728 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 729 return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); 730 } 731 #endif 732 733 static void 734 ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) 735 { 736 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 737 ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); 738 ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); 739 } 740 741 u_int 742 ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) 743 { 744 u_int value; 745 746 /* 747 * Workaround PCI-X Rev A. hardware bug. 748 * After a host read of SCB memory, the chip 749 * may become confused into thinking prefetch 750 * was required. This starts the discard timer 751 * running and can cause an unexpected discard 752 * timer interrupt. The work around is to read 753 * a normal register prior to the exhaustion of 754 * the discard timer. The mode pointer register 755 * has no side effects and so serves well for 756 * this purpose. 757 * 758 * Razor #528 759 */ 760 value = ahd_inb(ahd, offset); 761 if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) 762 ahd_inb(ahd, MODE_PTR); 763 return (value); 764 } 765 766 u_int 767 ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) 768 { 769 return (ahd_inb_scbram(ahd, offset) 770 | (ahd_inb_scbram(ahd, offset+1) << 8)); 771 } 772 773 static uint32_t 774 ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) 775 { 776 return (ahd_inw_scbram(ahd, offset) 777 | (ahd_inw_scbram(ahd, offset+2) << 16)); 778 } 779 780 static uint64_t 781 ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) 782 { 783 return (ahd_inl_scbram(ahd, offset) 784 | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); 785 } 786 787 struct scb * 788 ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) 789 { 790 struct scb* scb; 791 792 if (tag >= AHD_SCB_MAX) 793 return (NULL); 794 scb = ahd->scb_data.scbindex[tag]; 795 if (scb != NULL) 796 ahd_sync_scb(ahd, scb, 797 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 798 return (scb); 799 } 800 801 static void 802 ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) 803 { 804 struct hardware_scb *q_hscb; 805 struct map_node *q_hscb_map; 806 uint32_t saved_hscb_busaddr; 807 808 /* 809 * Our queuing method is a bit tricky. The card 810 * knows in advance which HSCB (by address) to download, 811 * and we can't disappoint it. To achieve this, the next 812 * HSCB to download is saved off in ahd->next_queued_hscb. 813 * When we are called to queue "an arbitrary scb", 814 * we copy the contents of the incoming HSCB to the one 815 * the sequencer knows about, swap HSCB pointers and 816 * finally assign the SCB to the tag indexed location 817 * in the scb_array. This makes sure that we can still 818 * locate the correct SCB by SCB_TAG. 819 */ 820 q_hscb = ahd->next_queued_hscb; 821 q_hscb_map = ahd->next_queued_hscb_map; 822 saved_hscb_busaddr = q_hscb->hscb_busaddr; 823 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); 824 q_hscb->hscb_busaddr = saved_hscb_busaddr; 825 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 826 827 /* Now swap HSCB pointers. */ 828 ahd->next_queued_hscb = scb->hscb; 829 ahd->next_queued_hscb_map = scb->hscb_map; 830 scb->hscb = q_hscb; 831 scb->hscb_map = q_hscb_map; 832 833 /* Now define the mapping from tag to SCB in the scbindex */ 834 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 835 } 836 837 /* 838 * Tell the sequencer about a new transaction to execute. 839 */ 840 void 841 ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) 842 { 843 ahd_swap_with_next_hscb(ahd, scb); 844 845 if (SCBID_IS_NULL(SCB_GET_TAG(scb))) 846 panic("Attempt to queue invalid SCB tag %x\n", 847 SCB_GET_TAG(scb)); 848 849 /* 850 * Keep a history of SCBs we've downloaded in the qinfifo. 851 */ 852 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 853 ahd->qinfifonext++; 854 855 if (scb->sg_count != 0) 856 ahd_setup_data_scb(ahd, scb); 857 else 858 ahd_setup_noxfer_scb(ahd, scb); 859 ahd_setup_scb_common(ahd, scb); 860 861 /* 862 * Make sure our data is consistent from the 863 * perspective of the adapter. 864 */ 865 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 866 867 #ifdef AHD_DEBUG 868 if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { 869 uint64_t host_dataptr; 870 871 host_dataptr = ahd_le64toh(scb->hscb->dataptr); 872 printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", 873 ahd_name(ahd), 874 SCB_GET_TAG(scb), scb->hscb->scsiid, 875 ahd_le32toh(scb->hscb->hscb_busaddr), 876 (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), 877 (u_int)(host_dataptr & 0xFFFFFFFF), 878 ahd_le32toh(scb->hscb->datacnt)); 879 } 880 #endif 881 /* Tell the adapter about the newly queued SCB */ 882 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 883 } 884 885 /************************** Interrupt Processing ******************************/ 886 static void 887 ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) 888 { 889 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 890 /*offset*/0, 891 /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); 892 } 893 894 static void 895 ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) 896 { 897 #ifdef AHD_TARGET_MODE 898 if ((ahd->flags & AHD_TARGETROLE) != 0) { 899 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 900 ahd->shared_data_map.dmamap, 901 ahd_targetcmd_offset(ahd, 0), 902 sizeof(struct target_cmd) * AHD_TMODE_CMDS, 903 op); 904 } 905 #endif 906 } 907 908 /* 909 * See if the firmware has posted any completed commands 910 * into our in-core command complete fifos. 911 */ 912 #define AHD_RUN_QOUTFIFO 0x1 913 #define AHD_RUN_TQINFIFO 0x2 914 static u_int 915 ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) 916 { 917 u_int retval; 918 919 retval = 0; 920 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 921 /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), 922 /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); 923 if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag 924 == ahd->qoutfifonext_valid_tag) 925 retval |= AHD_RUN_QOUTFIFO; 926 #ifdef AHD_TARGET_MODE 927 if ((ahd->flags & AHD_TARGETROLE) != 0 928 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { 929 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 930 ahd->shared_data_map.dmamap, 931 ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), 932 /*len*/sizeof(struct target_cmd), 933 BUS_DMASYNC_POSTREAD); 934 if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) 935 retval |= AHD_RUN_TQINFIFO; 936 } 937 #endif 938 return (retval); 939 } 940 941 /* 942 * Catch an interrupt from the adapter 943 */ 944 int 945 ahd_intr(struct ahd_softc *ahd) 946 { 947 u_int intstat; 948 949 if ((ahd->pause & INTEN) == 0) { 950 /* 951 * Our interrupt is not enabled on the chip 952 * and may be disabled for re-entrancy reasons, 953 * so just return. This is likely just a shared 954 * interrupt. 955 */ 956 return (0); 957 } 958 959 /* 960 * Instead of directly reading the interrupt status register, 961 * infer the cause of the interrupt by checking our in-core 962 * completion queues. This avoids a costly PCI bus read in 963 * most cases. 964 */ 965 if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 966 && (ahd_check_cmdcmpltqueues(ahd) != 0)) 967 intstat = CMDCMPLT; 968 else 969 intstat = ahd_inb(ahd, INTSTAT); 970 971 if ((intstat & INT_PEND) == 0) 972 return (0); 973 974 if (intstat & CMDCMPLT) { 975 ahd_outb(ahd, CLRINT, CLRCMDINT); 976 977 /* 978 * Ensure that the chip sees that we've cleared 979 * this interrupt before we walk the output fifo. 980 * Otherwise, we may, due to posted bus writes, 981 * clear the interrupt after we finish the scan, 982 * and after the sequencer has added new entries 983 * and asserted the interrupt again. 984 */ 985 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 986 if (ahd_is_paused(ahd)) { 987 /* 988 * Potentially lost SEQINT. 989 * If SEQINTCODE is non-zero, 990 * simulate the SEQINT. 991 */ 992 if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) 993 intstat |= SEQINT; 994 } 995 } else { 996 ahd_flush_device_writes(ahd); 997 } 998 ahd_run_qoutfifo(ahd); 999 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; 1000 ahd->cmdcmplt_total++; 1001 #ifdef AHD_TARGET_MODE 1002 if ((ahd->flags & AHD_TARGETROLE) != 0) 1003 ahd_run_tqinfifo(ahd, /*paused*/FALSE); 1004 #endif 1005 } 1006 1007 /* 1008 * Handle statuses that may invalidate our cached 1009 * copy of INTSTAT separately. 1010 */ 1011 if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { 1012 /* Hot eject. Do nothing */ 1013 } else if (intstat & HWERRINT) { 1014 ahd_handle_hwerrint(ahd); 1015 } else if ((intstat & (PCIINT|SPLTINT)) != 0) { 1016 ahd->bus_intr(ahd); 1017 } else { 1018 1019 if ((intstat & SEQINT) != 0) 1020 ahd_handle_seqint(ahd, intstat); 1021 1022 if ((intstat & SCSIINT) != 0) 1023 ahd_handle_scsiint(ahd, intstat); 1024 } 1025 return (1); 1026 } 1027 1028 /******************************** Private Inlines *****************************/ 1029 static inline void 1030 ahd_assert_atn(struct ahd_softc *ahd) 1031 { 1032 ahd_outb(ahd, SCSISIGO, ATNO); 1033 } 1034 1035 /* 1036 * Determine if the current connection has a packetized 1037 * agreement. This does not necessarily mean that we 1038 * are currently in a packetized transfer. We could 1039 * just as easily be sending or receiving a message. 1040 */ 1041 static int 1042 ahd_currently_packetized(struct ahd_softc *ahd) 1043 { 1044 ahd_mode_state saved_modes; 1045 int packetized; 1046 1047 saved_modes = ahd_save_modes(ahd); 1048 if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { 1049 /* 1050 * The packetized bit refers to the last 1051 * connection, not the current one. Check 1052 * for non-zero LQISTATE instead. 1053 */ 1054 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 1055 packetized = ahd_inb(ahd, LQISTATE) != 0; 1056 } else { 1057 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1058 packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; 1059 } 1060 ahd_restore_modes(ahd, saved_modes); 1061 return (packetized); 1062 } 1063 1064 static inline int 1065 ahd_set_active_fifo(struct ahd_softc *ahd) 1066 { 1067 u_int active_fifo; 1068 1069 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 1070 active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 1071 switch (active_fifo) { 1072 case 0: 1073 case 1: 1074 ahd_set_modes(ahd, active_fifo, active_fifo); 1075 return (1); 1076 default: 1077 return (0); 1078 } 1079 } 1080 1081 static inline void 1082 ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) 1083 { 1084 ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); 1085 } 1086 1087 /* 1088 * Determine whether the sequencer reported a residual 1089 * for this SCB/transaction. 1090 */ 1091 static inline void 1092 ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) 1093 { 1094 uint32_t sgptr; 1095 1096 sgptr = ahd_le32toh(scb->hscb->sgptr); 1097 if ((sgptr & SG_STATUS_VALID) != 0) 1098 ahd_calc_residual(ahd, scb); 1099 } 1100 1101 static inline void 1102 ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) 1103 { 1104 uint32_t sgptr; 1105 1106 sgptr = ahd_le32toh(scb->hscb->sgptr); 1107 if ((sgptr & SG_STATUS_VALID) != 0) 1108 ahd_handle_scb_status(ahd, scb); 1109 else 1110 ahd_done(ahd, scb); 1111 } 1112 1113 1114 /************************* Sequencer Execution Control ************************/ 1115 /* 1116 * Restart the sequencer program from address zero 1117 */ 1118 static void 1119 ahd_restart(struct ahd_softc *ahd) 1120 { 1121 1122 ahd_pause(ahd); 1123 1124 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1125 1126 /* No more pending messages */ 1127 ahd_clear_msg_state(ahd); 1128 ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ 1129 ahd_outb(ahd, MSG_OUT, NOP); /* No message to send */ 1130 ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); 1131 ahd_outb(ahd, SEQINTCTL, 0); 1132 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 1133 ahd_outb(ahd, SEQ_FLAGS, 0); 1134 ahd_outb(ahd, SAVED_SCSIID, 0xFF); 1135 ahd_outb(ahd, SAVED_LUN, 0xFF); 1136 1137 /* 1138 * Ensure that the sequencer's idea of TQINPOS 1139 * matches our own. The sequencer increments TQINPOS 1140 * only after it sees a DMA complete and a reset could 1141 * occur before the increment leaving the kernel to believe 1142 * the command arrived but the sequencer to not. 1143 */ 1144 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 1145 1146 /* Always allow reselection */ 1147 ahd_outb(ahd, SCSISEQ1, 1148 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 1149 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 1150 1151 /* 1152 * Clear any pending sequencer interrupt. It is no 1153 * longer relevant since we're resetting the Program 1154 * Counter. 1155 */ 1156 ahd_outb(ahd, CLRINT, CLRSEQINT); 1157 1158 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 1159 ahd_unpause(ahd); 1160 } 1161 1162 static void 1163 ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) 1164 { 1165 ahd_mode_state saved_modes; 1166 1167 #ifdef AHD_DEBUG 1168 if ((ahd_debug & AHD_SHOW_FIFOS) != 0) 1169 printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); 1170 #endif 1171 saved_modes = ahd_save_modes(ahd); 1172 ahd_set_modes(ahd, fifo, fifo); 1173 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 1174 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) 1175 ahd_outb(ahd, CCSGCTL, CCSGRESET); 1176 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 1177 ahd_outb(ahd, SG_STATE, 0); 1178 ahd_restore_modes(ahd, saved_modes); 1179 } 1180 1181 /************************* Input/Output Queues ********************************/ 1182 /* 1183 * Flush and completed commands that are sitting in the command 1184 * complete queues down on the chip but have yet to be dma'ed back up. 1185 */ 1186 static void 1187 ahd_flush_qoutfifo(struct ahd_softc *ahd) 1188 { 1189 struct scb *scb; 1190 ahd_mode_state saved_modes; 1191 u_int saved_scbptr; 1192 u_int ccscbctl; 1193 u_int scbid; 1194 u_int next_scbid; 1195 1196 saved_modes = ahd_save_modes(ahd); 1197 1198 /* 1199 * Flush the good status FIFO for completed packetized commands. 1200 */ 1201 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1202 saved_scbptr = ahd_get_scbptr(ahd); 1203 while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { 1204 u_int fifo_mode; 1205 u_int i; 1206 1207 scbid = ahd_inw(ahd, GSFIFO); 1208 scb = ahd_lookup_scb(ahd, scbid); 1209 if (scb == NULL) { 1210 printk("%s: Warning - GSFIFO SCB %d invalid\n", 1211 ahd_name(ahd), scbid); 1212 continue; 1213 } 1214 /* 1215 * Determine if this transaction is still active in 1216 * any FIFO. If it is, we must flush that FIFO to 1217 * the host before completing the command. 1218 */ 1219 fifo_mode = 0; 1220 rescan_fifos: 1221 for (i = 0; i < 2; i++) { 1222 /* Toggle to the other mode. */ 1223 fifo_mode ^= 1; 1224 ahd_set_modes(ahd, fifo_mode, fifo_mode); 1225 1226 if (ahd_scb_active_in_fifo(ahd, scb) == 0) 1227 continue; 1228 1229 ahd_run_data_fifo(ahd, scb); 1230 1231 /* 1232 * Running this FIFO may cause a CFG4DATA for 1233 * this same transaction to assert in the other 1234 * FIFO or a new snapshot SAVEPTRS interrupt 1235 * in this FIFO. Even running a FIFO may not 1236 * clear the transaction if we are still waiting 1237 * for data to drain to the host. We must loop 1238 * until the transaction is not active in either 1239 * FIFO just to be sure. Reset our loop counter 1240 * so we will visit both FIFOs again before 1241 * declaring this transaction finished. We 1242 * also delay a bit so that status has a chance 1243 * to change before we look at this FIFO again. 1244 */ 1245 ahd_delay(200); 1246 goto rescan_fifos; 1247 } 1248 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1249 ahd_set_scbptr(ahd, scbid); 1250 if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 1251 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 1252 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) 1253 & SG_LIST_NULL) != 0)) { 1254 u_int comp_head; 1255 1256 /* 1257 * The transfer completed with a residual. 1258 * Place this SCB on the complete DMA list 1259 * so that we update our in-core copy of the 1260 * SCB before completing the command. 1261 */ 1262 ahd_outb(ahd, SCB_SCSI_STATUS, 0); 1263 ahd_outb(ahd, SCB_SGPTR, 1264 ahd_inb_scbram(ahd, SCB_SGPTR) 1265 | SG_STATUS_VALID); 1266 ahd_outw(ahd, SCB_TAG, scbid); 1267 ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); 1268 comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 1269 if (SCBID_IS_NULL(comp_head)) { 1270 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); 1271 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); 1272 } else { 1273 u_int tail; 1274 1275 tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); 1276 ahd_set_scbptr(ahd, tail); 1277 ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); 1278 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); 1279 ahd_set_scbptr(ahd, scbid); 1280 } 1281 } else 1282 ahd_complete_scb(ahd, scb); 1283 } 1284 ahd_set_scbptr(ahd, saved_scbptr); 1285 1286 /* 1287 * Setup for command channel portion of flush. 1288 */ 1289 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 1290 1291 /* 1292 * Wait for any inprogress DMA to complete and clear DMA state 1293 * if this is for an SCB in the qinfifo. 1294 */ 1295 while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { 1296 1297 if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { 1298 if ((ccscbctl & ARRDONE) != 0) 1299 break; 1300 } else if ((ccscbctl & CCSCBDONE) != 0) 1301 break; 1302 ahd_delay(200); 1303 } 1304 /* 1305 * We leave the sequencer to cleanup in the case of DMA's to 1306 * update the qoutfifo. In all other cases (DMA's to the 1307 * chip or a push of an SCB from the COMPLETE_DMA_SCB list), 1308 * we disable the DMA engine so that the sequencer will not 1309 * attempt to handle the DMA completion. 1310 */ 1311 if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) 1312 ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); 1313 1314 /* 1315 * Complete any SCBs that just finished 1316 * being DMA'ed into the qoutfifo. 1317 */ 1318 ahd_run_qoutfifo(ahd); 1319 1320 saved_scbptr = ahd_get_scbptr(ahd); 1321 /* 1322 * Manually update/complete any completed SCBs that are waiting to be 1323 * DMA'ed back up to the host. 1324 */ 1325 scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 1326 while (!SCBID_IS_NULL(scbid)) { 1327 uint8_t *hscb_ptr; 1328 u_int i; 1329 1330 ahd_set_scbptr(ahd, scbid); 1331 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1332 scb = ahd_lookup_scb(ahd, scbid); 1333 if (scb == NULL) { 1334 printk("%s: Warning - DMA-up and complete " 1335 "SCB %d invalid\n", ahd_name(ahd), scbid); 1336 continue; 1337 } 1338 hscb_ptr = (uint8_t *)scb->hscb; 1339 for (i = 0; i < sizeof(struct hardware_scb); i++) 1340 *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); 1341 1342 ahd_complete_scb(ahd, scb); 1343 scbid = next_scbid; 1344 } 1345 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 1346 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); 1347 1348 scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); 1349 while (!SCBID_IS_NULL(scbid)) { 1350 1351 ahd_set_scbptr(ahd, scbid); 1352 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1353 scb = ahd_lookup_scb(ahd, scbid); 1354 if (scb == NULL) { 1355 printk("%s: Warning - Complete Qfrz SCB %d invalid\n", 1356 ahd_name(ahd), scbid); 1357 continue; 1358 } 1359 1360 ahd_complete_scb(ahd, scb); 1361 scbid = next_scbid; 1362 } 1363 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); 1364 1365 scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); 1366 while (!SCBID_IS_NULL(scbid)) { 1367 1368 ahd_set_scbptr(ahd, scbid); 1369 next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 1370 scb = ahd_lookup_scb(ahd, scbid); 1371 if (scb == NULL) { 1372 printk("%s: Warning - Complete SCB %d invalid\n", 1373 ahd_name(ahd), scbid); 1374 continue; 1375 } 1376 1377 ahd_complete_scb(ahd, scb); 1378 scbid = next_scbid; 1379 } 1380 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 1381 1382 /* 1383 * Restore state. 1384 */ 1385 ahd_set_scbptr(ahd, saved_scbptr); 1386 ahd_restore_modes(ahd, saved_modes); 1387 ahd->flags |= AHD_UPDATE_PEND_CMDS; 1388 } 1389 1390 /* 1391 * Determine if an SCB for a packetized transaction 1392 * is active in a FIFO. 1393 */ 1394 static int 1395 ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) 1396 { 1397 1398 /* 1399 * The FIFO is only active for our transaction if 1400 * the SCBPTR matches the SCB's ID and the firmware 1401 * has installed a handler for the FIFO or we have 1402 * a pending SAVEPTRS or CFG4DATA interrupt. 1403 */ 1404 if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) 1405 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 1406 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) 1407 return (0); 1408 1409 return (1); 1410 } 1411 1412 /* 1413 * Run a data fifo to completion for a transaction we know 1414 * has completed across the SCSI bus (good status has been 1415 * received). We are already set to the correct FIFO mode 1416 * on entry to this routine. 1417 * 1418 * This function attempts to operate exactly as the firmware 1419 * would when running this FIFO. Care must be taken to update 1420 * this routine any time the firmware's FIFO algorithm is 1421 * changed. 1422 */ 1423 static void 1424 ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) 1425 { 1426 u_int seqintsrc; 1427 1428 seqintsrc = ahd_inb(ahd, SEQINTSRC); 1429 if ((seqintsrc & CFG4DATA) != 0) { 1430 uint32_t datacnt; 1431 uint32_t sgptr; 1432 1433 /* 1434 * Clear full residual flag. 1435 */ 1436 sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; 1437 ahd_outb(ahd, SCB_SGPTR, sgptr); 1438 1439 /* 1440 * Load datacnt and address. 1441 */ 1442 datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); 1443 if ((datacnt & AHD_DMA_LAST_SEG) != 0) { 1444 sgptr |= LAST_SEG; 1445 ahd_outb(ahd, SG_STATE, 0); 1446 } else 1447 ahd_outb(ahd, SG_STATE, LOADING_NEEDED); 1448 ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); 1449 ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); 1450 ahd_outb(ahd, SG_CACHE_PRE, sgptr); 1451 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); 1452 1453 /* 1454 * Initialize Residual Fields. 1455 */ 1456 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); 1457 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); 1458 1459 /* 1460 * Mark the SCB as having a FIFO in use. 1461 */ 1462 ahd_outb(ahd, SCB_FIFO_USE_COUNT, 1463 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); 1464 1465 /* 1466 * Install a "fake" handler for this FIFO. 1467 */ 1468 ahd_outw(ahd, LONGJMP_ADDR, 0); 1469 1470 /* 1471 * Notify the hardware that we have satisfied 1472 * this sequencer interrupt. 1473 */ 1474 ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); 1475 } else if ((seqintsrc & SAVEPTRS) != 0) { 1476 uint32_t sgptr; 1477 uint32_t resid; 1478 1479 if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { 1480 /* 1481 * Snapshot Save Pointers. All that 1482 * is necessary to clear the snapshot 1483 * is a CLRCHN. 1484 */ 1485 goto clrchn; 1486 } 1487 1488 /* 1489 * Disable S/G fetch so the DMA engine 1490 * is available to future users. 1491 */ 1492 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) 1493 ahd_outb(ahd, CCSGCTL, 0); 1494 ahd_outb(ahd, SG_STATE, 0); 1495 1496 /* 1497 * Flush the data FIFO. Strickly only 1498 * necessary for Rev A parts. 1499 */ 1500 ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); 1501 1502 /* 1503 * Calculate residual. 1504 */ 1505 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 1506 resid = ahd_inl(ahd, SHCNT); 1507 resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; 1508 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); 1509 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { 1510 /* 1511 * Must back up to the correct S/G element. 1512 * Typically this just means resetting our 1513 * low byte to the offset in the SG_CACHE, 1514 * but if we wrapped, we have to correct 1515 * the other bytes of the sgptr too. 1516 */ 1517 if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 1518 && (sgptr & 0x80) == 0) 1519 sgptr -= 0x100; 1520 sgptr &= ~0xFF; 1521 sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) 1522 & SG_ADDR_MASK; 1523 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 1524 ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); 1525 } else if ((resid & AHD_SG_LEN_MASK) == 0) { 1526 ahd_outb(ahd, SCB_RESIDUAL_SGPTR, 1527 sgptr | SG_LIST_NULL); 1528 } 1529 /* 1530 * Save Pointers. 1531 */ 1532 ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); 1533 ahd_outl(ahd, SCB_DATACNT, resid); 1534 ahd_outl(ahd, SCB_SGPTR, sgptr); 1535 ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); 1536 ahd_outb(ahd, SEQIMODE, 1537 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); 1538 /* 1539 * If the data is to the SCSI bus, we are 1540 * done, otherwise wait for FIFOEMP. 1541 */ 1542 if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) 1543 goto clrchn; 1544 } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { 1545 uint32_t sgptr; 1546 uint64_t data_addr; 1547 uint32_t data_len; 1548 u_int dfcntrl; 1549 1550 /* 1551 * Disable S/G fetch so the DMA engine 1552 * is available to future users. We won't 1553 * be using the DMA engine to load segments. 1554 */ 1555 if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { 1556 ahd_outb(ahd, CCSGCTL, 0); 1557 ahd_outb(ahd, SG_STATE, LOADING_NEEDED); 1558 } 1559 1560 /* 1561 * Wait for the DMA engine to notice that the 1562 * host transfer is enabled and that there is 1563 * space in the S/G FIFO for new segments before 1564 * loading more segments. 1565 */ 1566 if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 1567 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { 1568 1569 /* 1570 * Determine the offset of the next S/G 1571 * element to load. 1572 */ 1573 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 1574 sgptr &= SG_PTR_MASK; 1575 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 1576 struct ahd_dma64_seg *sg; 1577 1578 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 1579 data_addr = sg->addr; 1580 data_len = sg->len; 1581 sgptr += sizeof(*sg); 1582 } else { 1583 struct ahd_dma_seg *sg; 1584 1585 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 1586 data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; 1587 data_addr <<= 8; 1588 data_addr |= sg->addr; 1589 data_len = sg->len; 1590 sgptr += sizeof(*sg); 1591 } 1592 1593 /* 1594 * Update residual information. 1595 */ 1596 ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); 1597 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 1598 1599 /* 1600 * Load the S/G. 1601 */ 1602 if (data_len & AHD_DMA_LAST_SEG) { 1603 sgptr |= LAST_SEG; 1604 ahd_outb(ahd, SG_STATE, 0); 1605 } 1606 ahd_outq(ahd, HADDR, data_addr); 1607 ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); 1608 ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); 1609 1610 /* 1611 * Advertise the segment to the hardware. 1612 */ 1613 dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; 1614 if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { 1615 /* 1616 * Use SCSIENWRDIS so that SCSIEN 1617 * is never modified by this 1618 * operation. 1619 */ 1620 dfcntrl |= SCSIENWRDIS; 1621 } 1622 ahd_outb(ahd, DFCNTRL, dfcntrl); 1623 } 1624 } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { 1625 1626 /* 1627 * Transfer completed to the end of SG list 1628 * and has flushed to the host. 1629 */ 1630 ahd_outb(ahd, SCB_SGPTR, 1631 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); 1632 goto clrchn; 1633 } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { 1634 clrchn: 1635 /* 1636 * Clear any handler for this FIFO, decrement 1637 * the FIFO use count for the SCB, and release 1638 * the FIFO. 1639 */ 1640 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 1641 ahd_outb(ahd, SCB_FIFO_USE_COUNT, 1642 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); 1643 ahd_outb(ahd, DFFSXFRCTL, CLRCHN); 1644 } 1645 } 1646 1647 /* 1648 * Look for entries in the QoutFIFO that have completed. 1649 * The valid_tag completion field indicates the validity 1650 * of the entry - the valid value toggles each time through 1651 * the queue. We use the sg_status field in the completion 1652 * entry to avoid referencing the hscb if the completion 1653 * occurred with no errors and no residual. sg_status is 1654 * a copy of the first byte (little endian) of the sgptr 1655 * hscb field. 1656 */ 1657 static void 1658 ahd_run_qoutfifo(struct ahd_softc *ahd) 1659 { 1660 struct ahd_completion *completion; 1661 struct scb *scb; 1662 u_int scb_index; 1663 1664 if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) 1665 panic("ahd_run_qoutfifo recursion"); 1666 ahd->flags |= AHD_RUNNING_QOUTFIFO; 1667 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); 1668 for (;;) { 1669 completion = &ahd->qoutfifo[ahd->qoutfifonext]; 1670 1671 if (completion->valid_tag != ahd->qoutfifonext_valid_tag) 1672 break; 1673 1674 scb_index = ahd_le16toh(completion->tag); 1675 scb = ahd_lookup_scb(ahd, scb_index); 1676 if (scb == NULL) { 1677 printk("%s: WARNING no command for scb %d " 1678 "(cmdcmplt)\nQOUTPOS = %d\n", 1679 ahd_name(ahd), scb_index, 1680 ahd->qoutfifonext); 1681 ahd_dump_card_state(ahd); 1682 } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { 1683 ahd_handle_scb_status(ahd, scb); 1684 } else { 1685 ahd_done(ahd, scb); 1686 } 1687 1688 ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); 1689 if (ahd->qoutfifonext == 0) 1690 ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; 1691 } 1692 ahd->flags &= ~AHD_RUNNING_QOUTFIFO; 1693 } 1694 1695 /************************* Interrupt Handling *********************************/ 1696 static void 1697 ahd_handle_hwerrint(struct ahd_softc *ahd) 1698 { 1699 /* 1700 * Some catastrophic hardware error has occurred. 1701 * Print it for the user and disable the controller. 1702 */ 1703 int i; 1704 int error; 1705 1706 error = ahd_inb(ahd, ERROR); 1707 for (i = 0; i < num_errors; i++) { 1708 if ((error & ahd_hard_errors[i].errno) != 0) 1709 printk("%s: hwerrint, %s\n", 1710 ahd_name(ahd), ahd_hard_errors[i].errmesg); 1711 } 1712 1713 ahd_dump_card_state(ahd); 1714 panic("BRKADRINT"); 1715 1716 /* Tell everyone that this HBA is no longer available */ 1717 ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 1718 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 1719 CAM_NO_HBA); 1720 1721 /* Tell the system that this controller has gone away. */ 1722 ahd_free(ahd); 1723 } 1724 1725 #ifdef AHD_DEBUG 1726 static void 1727 ahd_dump_sglist(struct scb *scb) 1728 { 1729 int i; 1730 1731 if (scb->sg_count > 0) { 1732 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { 1733 struct ahd_dma64_seg *sg_list; 1734 1735 sg_list = (struct ahd_dma64_seg*)scb->sg_list; 1736 for (i = 0; i < scb->sg_count; i++) { 1737 uint64_t addr; 1738 1739 addr = ahd_le64toh(sg_list[i].addr); 1740 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1741 i, 1742 (uint32_t)((addr >> 32) & 0xFFFFFFFF), 1743 (uint32_t)(addr & 0xFFFFFFFF), 1744 sg_list[i].len & AHD_SG_LEN_MASK, 1745 (sg_list[i].len & AHD_DMA_LAST_SEG) 1746 ? " Last" : ""); 1747 } 1748 } else { 1749 struct ahd_dma_seg *sg_list; 1750 1751 sg_list = (struct ahd_dma_seg*)scb->sg_list; 1752 for (i = 0; i < scb->sg_count; i++) { 1753 uint32_t len; 1754 1755 len = ahd_le32toh(sg_list[i].len); 1756 printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", 1757 i, 1758 (len & AHD_SG_HIGH_ADDR_MASK) >> 24, 1759 ahd_le32toh(sg_list[i].addr), 1760 len & AHD_SG_LEN_MASK, 1761 len & AHD_DMA_LAST_SEG ? " Last" : ""); 1762 } 1763 } 1764 } 1765 } 1766 #endif /* AHD_DEBUG */ 1767 1768 static void 1769 ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) 1770 { 1771 u_int seqintcode; 1772 1773 /* 1774 * Save the sequencer interrupt code and clear the SEQINT 1775 * bit. We will unpause the sequencer, if appropriate, 1776 * after servicing the request. 1777 */ 1778 seqintcode = ahd_inb(ahd, SEQINTCODE); 1779 ahd_outb(ahd, CLRINT, CLRSEQINT); 1780 if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { 1781 /* 1782 * Unpause the sequencer and let it clear 1783 * SEQINT by writing NO_SEQINT to it. This 1784 * will cause the sequencer to be paused again, 1785 * which is the expected state of this routine. 1786 */ 1787 ahd_unpause(ahd); 1788 while (!ahd_is_paused(ahd)) 1789 ; 1790 ahd_outb(ahd, CLRINT, CLRSEQINT); 1791 } 1792 ahd_update_modes(ahd); 1793 #ifdef AHD_DEBUG 1794 if ((ahd_debug & AHD_SHOW_MISC) != 0) 1795 printk("%s: Handle Seqint Called for code %d\n", 1796 ahd_name(ahd), seqintcode); 1797 #endif 1798 switch (seqintcode) { 1799 case ENTERING_NONPACK: 1800 { 1801 struct scb *scb; 1802 u_int scbid; 1803 1804 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 1805 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 1806 scbid = ahd_get_scbptr(ahd); 1807 scb = ahd_lookup_scb(ahd, scbid); 1808 if (scb == NULL) { 1809 /* 1810 * Somehow need to know if this 1811 * is from a selection or reselection. 1812 * From that, we can determine target 1813 * ID so we at least have an I_T nexus. 1814 */ 1815 } else { 1816 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 1817 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); 1818 ahd_outb(ahd, SEQ_FLAGS, 0x0); 1819 } 1820 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 1821 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 1822 /* 1823 * Phase change after read stream with 1824 * CRC error with P0 asserted on last 1825 * packet. 1826 */ 1827 #ifdef AHD_DEBUG 1828 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1829 printk("%s: Assuming LQIPHASE_NLQ with " 1830 "P0 assertion\n", ahd_name(ahd)); 1831 #endif 1832 } 1833 #ifdef AHD_DEBUG 1834 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1835 printk("%s: Entering NONPACK\n", ahd_name(ahd)); 1836 #endif 1837 break; 1838 } 1839 case INVALID_SEQINT: 1840 printk("%s: Invalid Sequencer interrupt occurred, " 1841 "resetting channel.\n", 1842 ahd_name(ahd)); 1843 #ifdef AHD_DEBUG 1844 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) 1845 ahd_dump_card_state(ahd); 1846 #endif 1847 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1848 break; 1849 case STATUS_OVERRUN: 1850 { 1851 struct scb *scb; 1852 u_int scbid; 1853 1854 scbid = ahd_get_scbptr(ahd); 1855 scb = ahd_lookup_scb(ahd, scbid); 1856 if (scb != NULL) 1857 ahd_print_path(ahd, scb); 1858 else 1859 printk("%s: ", ahd_name(ahd)); 1860 printk("SCB %d Packetized Status Overrun", scbid); 1861 ahd_dump_card_state(ahd); 1862 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1863 break; 1864 } 1865 case CFG4ISTAT_INTR: 1866 { 1867 struct scb *scb; 1868 u_int scbid; 1869 1870 scbid = ahd_get_scbptr(ahd); 1871 scb = ahd_lookup_scb(ahd, scbid); 1872 if (scb == NULL) { 1873 ahd_dump_card_state(ahd); 1874 printk("CFG4ISTAT: Free SCB %d referenced", scbid); 1875 panic("For safety"); 1876 } 1877 ahd_outq(ahd, HADDR, scb->sense_busaddr); 1878 ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); 1879 ahd_outb(ahd, HCNT + 2, 0); 1880 ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); 1881 ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); 1882 break; 1883 } 1884 case ILLEGAL_PHASE: 1885 { 1886 u_int bus_phase; 1887 1888 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 1889 printk("%s: ILLEGAL_PHASE 0x%x\n", 1890 ahd_name(ahd), bus_phase); 1891 1892 switch (bus_phase) { 1893 case P_DATAOUT: 1894 case P_DATAIN: 1895 case P_DATAOUT_DT: 1896 case P_DATAIN_DT: 1897 case P_MESGOUT: 1898 case P_STATUS: 1899 case P_MESGIN: 1900 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 1901 printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); 1902 break; 1903 case P_COMMAND: 1904 { 1905 struct ahd_devinfo devinfo; 1906 struct scb *scb; 1907 u_int scbid; 1908 1909 /* 1910 * If a target takes us into the command phase 1911 * assume that it has been externally reset and 1912 * has thus lost our previous packetized negotiation 1913 * agreement. Since we have not sent an identify 1914 * message and may not have fully qualified the 1915 * connection, we change our command to TUR, assert 1916 * ATN and ABORT the task when we go to message in 1917 * phase. The OSM will see the REQUEUE_REQUEST 1918 * status and retry the command. 1919 */ 1920 scbid = ahd_get_scbptr(ahd); 1921 scb = ahd_lookup_scb(ahd, scbid); 1922 if (scb == NULL) { 1923 printk("Invalid phase with no valid SCB. " 1924 "Resetting bus.\n"); 1925 ahd_reset_channel(ahd, 'A', 1926 /*Initiate Reset*/TRUE); 1927 break; 1928 } 1929 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 1930 SCB_GET_TARGET(ahd, scb), 1931 SCB_GET_LUN(scb), 1932 SCB_GET_CHANNEL(ahd, scb), 1933 ROLE_INITIATOR); 1934 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1935 AHD_TRANS_ACTIVE, /*paused*/TRUE); 1936 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 1937 /*offset*/0, /*ppr_options*/0, 1938 AHD_TRANS_ACTIVE, /*paused*/TRUE); 1939 /* Hand-craft TUR command */ 1940 ahd_outb(ahd, SCB_CDB_STORE, 0); 1941 ahd_outb(ahd, SCB_CDB_STORE+1, 0); 1942 ahd_outb(ahd, SCB_CDB_STORE+2, 0); 1943 ahd_outb(ahd, SCB_CDB_STORE+3, 0); 1944 ahd_outb(ahd, SCB_CDB_STORE+4, 0); 1945 ahd_outb(ahd, SCB_CDB_STORE+5, 0); 1946 ahd_outb(ahd, SCB_CDB_LEN, 6); 1947 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); 1948 scb->hscb->control |= MK_MESSAGE; 1949 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); 1950 ahd_outb(ahd, MSG_OUT, HOST_MSG); 1951 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); 1952 /* 1953 * The lun is 0, regardless of the SCB's lun 1954 * as we have not sent an identify message. 1955 */ 1956 ahd_outb(ahd, SAVED_LUN, 0); 1957 ahd_outb(ahd, SEQ_FLAGS, 0); 1958 ahd_assert_atn(ahd); 1959 scb->flags &= ~SCB_PACKETIZED; 1960 scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; 1961 ahd_freeze_devq(ahd, scb); 1962 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 1963 ahd_freeze_scb(scb); 1964 1965 /* Notify XPT */ 1966 ahd_send_async(ahd, devinfo.channel, devinfo.target, 1967 CAM_LUN_WILDCARD, AC_SENT_BDR); 1968 1969 /* 1970 * Allow the sequencer to continue with 1971 * non-pack processing. 1972 */ 1973 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 1974 ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); 1975 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 1976 ahd_outb(ahd, CLRLQOINT1, 0); 1977 } 1978 #ifdef AHD_DEBUG 1979 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 1980 ahd_print_path(ahd, scb); 1981 printk("Unexpected command phase from " 1982 "packetized target\n"); 1983 } 1984 #endif 1985 break; 1986 } 1987 } 1988 break; 1989 } 1990 case CFG4OVERRUN: 1991 { 1992 struct scb *scb; 1993 u_int scb_index; 1994 1995 #ifdef AHD_DEBUG 1996 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 1997 printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), 1998 ahd_inb(ahd, MODE_PTR)); 1999 } 2000 #endif 2001 scb_index = ahd_get_scbptr(ahd); 2002 scb = ahd_lookup_scb(ahd, scb_index); 2003 if (scb == NULL) { 2004 /* 2005 * Attempt to transfer to an SCB that is 2006 * not outstanding. 2007 */ 2008 ahd_assert_atn(ahd); 2009 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2010 ahd->msgout_buf[0] = ABORT_TASK; 2011 ahd->msgout_len = 1; 2012 ahd->msgout_index = 0; 2013 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2014 /* 2015 * Clear status received flag to prevent any 2016 * attempt to complete this bogus SCB. 2017 */ 2018 ahd_outb(ahd, SCB_CONTROL, 2019 ahd_inb_scbram(ahd, SCB_CONTROL) 2020 & ~STATUS_RCVD); 2021 } 2022 break; 2023 } 2024 case DUMP_CARD_STATE: 2025 { 2026 ahd_dump_card_state(ahd); 2027 break; 2028 } 2029 case PDATA_REINIT: 2030 { 2031 #ifdef AHD_DEBUG 2032 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2033 printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " 2034 "SG_CACHE_SHADOW = 0x%x\n", 2035 ahd_name(ahd), ahd_inb(ahd, DFCNTRL), 2036 ahd_inb(ahd, SG_CACHE_SHADOW)); 2037 } 2038 #endif 2039 ahd_reinitialize_dataptrs(ahd); 2040 break; 2041 } 2042 case HOST_MSG_LOOP: 2043 { 2044 struct ahd_devinfo devinfo; 2045 2046 /* 2047 * The sequencer has encountered a message phase 2048 * that requires host assistance for completion. 2049 * While handling the message phase(s), we will be 2050 * notified by the sequencer after each byte is 2051 * transferred so we can track bus phase changes. 2052 * 2053 * If this is the first time we've seen a HOST_MSG_LOOP 2054 * interrupt, initialize the state of the host message 2055 * loop. 2056 */ 2057 ahd_fetch_devinfo(ahd, &devinfo); 2058 if (ahd->msg_type == MSG_TYPE_NONE) { 2059 struct scb *scb; 2060 u_int scb_index; 2061 u_int bus_phase; 2062 2063 bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2064 if (bus_phase != P_MESGIN 2065 && bus_phase != P_MESGOUT) { 2066 printk("ahd_intr: HOST_MSG_LOOP bad " 2067 "phase 0x%x\n", bus_phase); 2068 /* 2069 * Probably transitioned to bus free before 2070 * we got here. Just punt the message. 2071 */ 2072 ahd_dump_card_state(ahd); 2073 ahd_clear_intstat(ahd); 2074 ahd_restart(ahd); 2075 return; 2076 } 2077 2078 scb_index = ahd_get_scbptr(ahd); 2079 scb = ahd_lookup_scb(ahd, scb_index); 2080 if (devinfo.role == ROLE_INITIATOR) { 2081 if (bus_phase == P_MESGOUT) 2082 ahd_setup_initiator_msgout(ahd, 2083 &devinfo, 2084 scb); 2085 else { 2086 ahd->msg_type = 2087 MSG_TYPE_INITIATOR_MSGIN; 2088 ahd->msgin_index = 0; 2089 } 2090 } 2091 #ifdef AHD_TARGET_MODE 2092 else { 2093 if (bus_phase == P_MESGOUT) { 2094 ahd->msg_type = 2095 MSG_TYPE_TARGET_MSGOUT; 2096 ahd->msgin_index = 0; 2097 } else 2098 ahd_setup_target_msgin(ahd, 2099 &devinfo, 2100 scb); 2101 } 2102 #endif 2103 } 2104 2105 ahd_handle_message_phase(ahd); 2106 break; 2107 } 2108 case NO_MATCH: 2109 { 2110 /* Ensure we don't leave the selection hardware on */ 2111 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 2112 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2113 2114 printk("%s:%c:%d: no active SCB for reconnecting " 2115 "target - issuing BUS DEVICE RESET\n", 2116 ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); 2117 printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 2118 "REG0 == 0x%x ACCUM = 0x%x\n", 2119 ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), 2120 ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); 2121 printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 2122 "SINDEX == 0x%x\n", 2123 ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), 2124 ahd_find_busy_tcl(ahd, 2125 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), 2126 ahd_inb(ahd, SAVED_LUN))), 2127 ahd_inw(ahd, SINDEX)); 2128 printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 2129 "SCB_CONTROL == 0x%x\n", 2130 ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), 2131 ahd_inb_scbram(ahd, SCB_LUN), 2132 ahd_inb_scbram(ahd, SCB_CONTROL)); 2133 printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", 2134 ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); 2135 printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); 2136 printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); 2137 ahd_dump_card_state(ahd); 2138 ahd->msgout_buf[0] = TARGET_RESET; 2139 ahd->msgout_len = 1; 2140 ahd->msgout_index = 0; 2141 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2142 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2143 ahd_assert_atn(ahd); 2144 break; 2145 } 2146 case PROTO_VIOLATION: 2147 { 2148 ahd_handle_proto_violation(ahd); 2149 break; 2150 } 2151 case IGN_WIDE_RES: 2152 { 2153 struct ahd_devinfo devinfo; 2154 2155 ahd_fetch_devinfo(ahd, &devinfo); 2156 ahd_handle_ign_wide_residue(ahd, &devinfo); 2157 break; 2158 } 2159 case BAD_PHASE: 2160 { 2161 u_int lastphase; 2162 2163 lastphase = ahd_inb(ahd, LASTPHASE); 2164 printk("%s:%c:%d: unknown scsi bus phase %x, " 2165 "lastphase = 0x%x. Attempting to continue\n", 2166 ahd_name(ahd), 'A', 2167 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2168 lastphase, ahd_inb(ahd, SCSISIGI)); 2169 break; 2170 } 2171 case MISSED_BUSFREE: 2172 { 2173 u_int lastphase; 2174 2175 lastphase = ahd_inb(ahd, LASTPHASE); 2176 printk("%s:%c:%d: Missed busfree. " 2177 "Lastphase = 0x%x, Curphase = 0x%x\n", 2178 ahd_name(ahd), 'A', 2179 SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), 2180 lastphase, ahd_inb(ahd, SCSISIGI)); 2181 ahd_restart(ahd); 2182 return; 2183 } 2184 case DATA_OVERRUN: 2185 { 2186 /* 2187 * When the sequencer detects an overrun, it 2188 * places the controller in "BITBUCKET" mode 2189 * and allows the target to complete its transfer. 2190 * Unfortunately, none of the counters get updated 2191 * when the controller is in this mode, so we have 2192 * no way of knowing how large the overrun was. 2193 */ 2194 struct scb *scb; 2195 u_int scbindex; 2196 #ifdef AHD_DEBUG 2197 u_int lastphase; 2198 #endif 2199 2200 scbindex = ahd_get_scbptr(ahd); 2201 scb = ahd_lookup_scb(ahd, scbindex); 2202 #ifdef AHD_DEBUG 2203 lastphase = ahd_inb(ahd, LASTPHASE); 2204 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2205 ahd_print_path(ahd, scb); 2206 printk("data overrun detected %s. Tag == 0x%x.\n", 2207 ahd_lookup_phase_entry(lastphase)->phasemsg, 2208 SCB_GET_TAG(scb)); 2209 ahd_print_path(ahd, scb); 2210 printk("%s seen Data Phase. Length = %ld. " 2211 "NumSGs = %d.\n", 2212 ahd_inb(ahd, SEQ_FLAGS) & DPHASE 2213 ? "Have" : "Haven't", 2214 ahd_get_transfer_length(scb), scb->sg_count); 2215 ahd_dump_sglist(scb); 2216 } 2217 #endif 2218 2219 /* 2220 * Set this and it will take effect when the 2221 * target does a command complete. 2222 */ 2223 ahd_freeze_devq(ahd, scb); 2224 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 2225 ahd_freeze_scb(scb); 2226 break; 2227 } 2228 case MKMSG_FAILED: 2229 { 2230 struct ahd_devinfo devinfo; 2231 struct scb *scb; 2232 u_int scbid; 2233 2234 ahd_fetch_devinfo(ahd, &devinfo); 2235 printk("%s:%c:%d:%d: Attempt to issue message failed\n", 2236 ahd_name(ahd), devinfo.channel, devinfo.target, 2237 devinfo.lun); 2238 scbid = ahd_get_scbptr(ahd); 2239 scb = ahd_lookup_scb(ahd, scbid); 2240 if (scb != NULL 2241 && (scb->flags & SCB_RECOVERY_SCB) != 0) 2242 /* 2243 * Ensure that we didn't put a second instance of this 2244 * SCB into the QINFIFO. 2245 */ 2246 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 2247 SCB_GET_CHANNEL(ahd, scb), 2248 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 2249 ROLE_INITIATOR, /*status*/0, 2250 SEARCH_REMOVE); 2251 ahd_outb(ahd, SCB_CONTROL, 2252 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); 2253 break; 2254 } 2255 case TASKMGMT_FUNC_COMPLETE: 2256 { 2257 u_int scbid; 2258 struct scb *scb; 2259 2260 scbid = ahd_get_scbptr(ahd); 2261 scb = ahd_lookup_scb(ahd, scbid); 2262 if (scb != NULL) { 2263 u_int lun; 2264 u_int tag; 2265 cam_status error; 2266 2267 ahd_print_path(ahd, scb); 2268 printk("Task Management Func 0x%x Complete\n", 2269 scb->hscb->task_management); 2270 lun = CAM_LUN_WILDCARD; 2271 tag = SCB_LIST_NULL; 2272 2273 switch (scb->hscb->task_management) { 2274 case SIU_TASKMGMT_ABORT_TASK: 2275 tag = SCB_GET_TAG(scb); 2276 fallthrough; 2277 case SIU_TASKMGMT_ABORT_TASK_SET: 2278 case SIU_TASKMGMT_CLEAR_TASK_SET: 2279 lun = scb->hscb->lun; 2280 error = CAM_REQ_ABORTED; 2281 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 2282 'A', lun, tag, ROLE_INITIATOR, 2283 error); 2284 break; 2285 case SIU_TASKMGMT_LUN_RESET: 2286 lun = scb->hscb->lun; 2287 fallthrough; 2288 case SIU_TASKMGMT_TARGET_RESET: 2289 { 2290 struct ahd_devinfo devinfo; 2291 2292 ahd_scb_devinfo(ahd, &devinfo, scb); 2293 error = CAM_BDR_SENT; 2294 ahd_handle_devreset(ahd, &devinfo, lun, 2295 CAM_BDR_SENT, 2296 lun != CAM_LUN_WILDCARD 2297 ? "Lun Reset" 2298 : "Target Reset", 2299 /*verbose_level*/0); 2300 break; 2301 } 2302 default: 2303 panic("Unexpected TaskMgmt Func\n"); 2304 break; 2305 } 2306 } 2307 break; 2308 } 2309 case TASKMGMT_CMD_CMPLT_OKAY: 2310 { 2311 u_int scbid; 2312 struct scb *scb; 2313 2314 /* 2315 * An ABORT TASK TMF failed to be delivered before 2316 * the targeted command completed normally. 2317 */ 2318 scbid = ahd_get_scbptr(ahd); 2319 scb = ahd_lookup_scb(ahd, scbid); 2320 if (scb != NULL) { 2321 /* 2322 * Remove the second instance of this SCB from 2323 * the QINFIFO if it is still there. 2324 */ 2325 ahd_print_path(ahd, scb); 2326 printk("SCB completes before TMF\n"); 2327 /* 2328 * Handle losing the race. Wait until any 2329 * current selection completes. We will then 2330 * set the TMF back to zero in this SCB so that 2331 * the sequencer doesn't bother to issue another 2332 * sequencer interrupt for its completion. 2333 */ 2334 while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 2335 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 2336 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) 2337 ; 2338 ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); 2339 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 2340 SCB_GET_CHANNEL(ahd, scb), 2341 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 2342 ROLE_INITIATOR, /*status*/0, 2343 SEARCH_REMOVE); 2344 } 2345 break; 2346 } 2347 case TRACEPOINT0: 2348 case TRACEPOINT1: 2349 case TRACEPOINT2: 2350 case TRACEPOINT3: 2351 printk("%s: Tracepoint %d\n", ahd_name(ahd), 2352 seqintcode - TRACEPOINT0); 2353 break; 2354 case NO_SEQINT: 2355 break; 2356 case SAW_HWERR: 2357 ahd_handle_hwerrint(ahd); 2358 break; 2359 default: 2360 printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), 2361 seqintcode); 2362 break; 2363 } 2364 /* 2365 * The sequencer is paused immediately on 2366 * a SEQINT, so we should restart it when 2367 * we're done. 2368 */ 2369 ahd_unpause(ahd); 2370 } 2371 2372 static void 2373 ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) 2374 { 2375 struct scb *scb; 2376 u_int status0; 2377 u_int status3; 2378 u_int status; 2379 u_int lqistat1; 2380 u_int lqostat0; 2381 u_int scbid; 2382 u_int busfreetime; 2383 2384 ahd_update_modes(ahd); 2385 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2386 2387 status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); 2388 status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); 2389 status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 2390 lqistat1 = ahd_inb(ahd, LQISTAT1); 2391 lqostat0 = ahd_inb(ahd, LQOSTAT0); 2392 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 2393 2394 /* 2395 * Ignore external resets after a bus reset. 2396 */ 2397 if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { 2398 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 2399 return; 2400 } 2401 2402 /* 2403 * Clear bus reset flag 2404 */ 2405 ahd->flags &= ~AHD_BUS_RESET_ACTIVE; 2406 2407 if ((status0 & (SELDI|SELDO)) != 0) { 2408 u_int simode0; 2409 2410 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2411 simode0 = ahd_inb(ahd, SIMODE0); 2412 status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); 2413 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2414 } 2415 scbid = ahd_get_scbptr(ahd); 2416 scb = ahd_lookup_scb(ahd, scbid); 2417 if (scb != NULL 2418 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 2419 scb = NULL; 2420 2421 if ((status0 & IOERR) != 0) { 2422 u_int now_lvd; 2423 2424 now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; 2425 printk("%s: Transceiver State Has Changed to %s mode\n", 2426 ahd_name(ahd), now_lvd ? "LVD" : "SE"); 2427 ahd_outb(ahd, CLRSINT0, CLRIOERR); 2428 /* 2429 * A change in I/O mode is equivalent to a bus reset. 2430 */ 2431 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2432 ahd_pause(ahd); 2433 ahd_setup_iocell_workaround(ahd); 2434 ahd_unpause(ahd); 2435 } else if ((status0 & OVERRUN) != 0) { 2436 2437 printk("%s: SCSI offset overrun detected. Resetting bus.\n", 2438 ahd_name(ahd)); 2439 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2440 } else if ((status & SCSIRSTI) != 0) { 2441 2442 printk("%s: Someone reset channel A\n", ahd_name(ahd)); 2443 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); 2444 } else if ((status & SCSIPERR) != 0) { 2445 2446 /* Make sure the sequencer is in a safe location. */ 2447 ahd_clear_critical_section(ahd); 2448 2449 ahd_handle_transmission_error(ahd); 2450 } else if (lqostat0 != 0) { 2451 2452 printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); 2453 ahd_outb(ahd, CLRLQOINT0, lqostat0); 2454 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2455 ahd_outb(ahd, CLRLQOINT1, 0); 2456 } else if ((status & SELTO) != 0) { 2457 /* Stop the selection */ 2458 ahd_outb(ahd, SCSISEQ0, 0); 2459 2460 /* Make sure the sequencer is in a safe location. */ 2461 ahd_clear_critical_section(ahd); 2462 2463 /* No more pending messages */ 2464 ahd_clear_msg_state(ahd); 2465 2466 /* Clear interrupt state */ 2467 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 2468 2469 /* 2470 * Although the driver does not care about the 2471 * 'Selection in Progress' status bit, the busy 2472 * LED does. SELINGO is only cleared by a successful 2473 * selection, so we must manually clear it to insure 2474 * the LED turns off just incase no future successful 2475 * selections occur (e.g. no devices on the bus). 2476 */ 2477 ahd_outb(ahd, CLRSINT0, CLRSELINGO); 2478 2479 scbid = ahd_inw(ahd, WAITING_TID_HEAD); 2480 scb = ahd_lookup_scb(ahd, scbid); 2481 if (scb == NULL) { 2482 printk("%s: ahd_intr - referenced scb not " 2483 "valid during SELTO scb(0x%x)\n", 2484 ahd_name(ahd), scbid); 2485 ahd_dump_card_state(ahd); 2486 } else { 2487 struct ahd_devinfo devinfo; 2488 #ifdef AHD_DEBUG 2489 if ((ahd_debug & AHD_SHOW_SELTO) != 0) { 2490 ahd_print_path(ahd, scb); 2491 printk("Saw Selection Timeout for SCB 0x%x\n", 2492 scbid); 2493 } 2494 #endif 2495 ahd_scb_devinfo(ahd, &devinfo, scb); 2496 ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); 2497 ahd_freeze_devq(ahd, scb); 2498 2499 /* 2500 * Cancel any pending transactions on the device 2501 * now that it seems to be missing. This will 2502 * also revert us to async/narrow transfers until 2503 * we can renegotiate with the device. 2504 */ 2505 ahd_handle_devreset(ahd, &devinfo, 2506 CAM_LUN_WILDCARD, 2507 CAM_SEL_TIMEOUT, 2508 "Selection Timeout", 2509 /*verbose_level*/1); 2510 } 2511 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2512 ahd_iocell_first_selection(ahd); 2513 ahd_unpause(ahd); 2514 } else if ((status0 & (SELDI|SELDO)) != 0) { 2515 2516 ahd_iocell_first_selection(ahd); 2517 ahd_unpause(ahd); 2518 } else if (status3 != 0) { 2519 printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", 2520 ahd_name(ahd), status3); 2521 ahd_outb(ahd, CLRSINT3, status3); 2522 } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { 2523 2524 /* Make sure the sequencer is in a safe location. */ 2525 ahd_clear_critical_section(ahd); 2526 2527 ahd_handle_lqiphase_error(ahd, lqistat1); 2528 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 2529 /* 2530 * This status can be delayed during some 2531 * streaming operations. The SCSIPHASE 2532 * handler has already dealt with this case 2533 * so just clear the error. 2534 */ 2535 ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); 2536 } else if ((status & BUSFREE) != 0 2537 || (lqistat1 & LQOBUSFREE) != 0) { 2538 u_int lqostat1; 2539 int restart; 2540 int clear_fifo; 2541 int packetized; 2542 u_int mode; 2543 2544 /* 2545 * Clear our selection hardware as soon as possible. 2546 * We may have an entry in the waiting Q for this target, 2547 * that is affected by this busfree and we don't want to 2548 * go about selecting the target while we handle the event. 2549 */ 2550 ahd_outb(ahd, SCSISEQ0, 0); 2551 2552 /* Make sure the sequencer is in a safe location. */ 2553 ahd_clear_critical_section(ahd); 2554 2555 /* 2556 * Determine what we were up to at the time of 2557 * the busfree. 2558 */ 2559 mode = AHD_MODE_SCSI; 2560 busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; 2561 lqostat1 = ahd_inb(ahd, LQOSTAT1); 2562 switch (busfreetime) { 2563 case BUSFREE_DFF0: 2564 case BUSFREE_DFF1: 2565 { 2566 mode = busfreetime == BUSFREE_DFF0 2567 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; 2568 ahd_set_modes(ahd, mode, mode); 2569 scbid = ahd_get_scbptr(ahd); 2570 scb = ahd_lookup_scb(ahd, scbid); 2571 if (scb == NULL) { 2572 printk("%s: Invalid SCB %d in DFF%d " 2573 "during unexpected busfree\n", 2574 ahd_name(ahd), scbid, mode); 2575 packetized = 0; 2576 } else 2577 packetized = (scb->flags & SCB_PACKETIZED) != 0; 2578 clear_fifo = 1; 2579 break; 2580 } 2581 case BUSFREE_LQO: 2582 clear_fifo = 0; 2583 packetized = 1; 2584 break; 2585 default: 2586 clear_fifo = 0; 2587 packetized = (lqostat1 & LQOBUSFREE) != 0; 2588 if (!packetized 2589 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE 2590 && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 2591 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 2592 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) 2593 /* 2594 * Assume packetized if we are not 2595 * on the bus in a non-packetized 2596 * capacity and any pending selection 2597 * was a packetized selection. 2598 */ 2599 packetized = 1; 2600 break; 2601 } 2602 2603 #ifdef AHD_DEBUG 2604 if ((ahd_debug & AHD_SHOW_MISC) != 0) 2605 printk("Saw Busfree. Busfreetime = 0x%x.\n", 2606 busfreetime); 2607 #endif 2608 /* 2609 * Busfrees that occur in non-packetized phases are 2610 * handled by the nonpkt_busfree handler. 2611 */ 2612 if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { 2613 restart = ahd_handle_pkt_busfree(ahd, busfreetime); 2614 } else { 2615 packetized = 0; 2616 restart = ahd_handle_nonpkt_busfree(ahd); 2617 } 2618 /* 2619 * Clear the busfree interrupt status. The setting of 2620 * the interrupt is a pulse, so in a perfect world, we 2621 * would not need to muck with the ENBUSFREE logic. This 2622 * would ensure that if the bus moves on to another 2623 * connection, busfree protection is still in force. If 2624 * BUSFREEREV is broken, however, we must manually clear 2625 * the ENBUSFREE if the busfree occurred during a non-pack 2626 * connection so that we don't get false positives during 2627 * future, packetized, connections. 2628 */ 2629 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 2630 if (packetized == 0 2631 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) 2632 ahd_outb(ahd, SIMODE1, 2633 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); 2634 2635 if (clear_fifo) 2636 ahd_clear_fifo(ahd, mode); 2637 2638 ahd_clear_msg_state(ahd); 2639 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2640 if (restart) { 2641 ahd_restart(ahd); 2642 } else { 2643 ahd_unpause(ahd); 2644 } 2645 } else { 2646 printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", 2647 ahd_name(ahd), status); 2648 ahd_dump_card_state(ahd); 2649 ahd_clear_intstat(ahd); 2650 ahd_unpause(ahd); 2651 } 2652 } 2653 2654 static void 2655 ahd_handle_transmission_error(struct ahd_softc *ahd) 2656 { 2657 struct scb *scb; 2658 u_int scbid; 2659 u_int lqistat1; 2660 u_int msg_out; 2661 u_int curphase; 2662 u_int lastphase; 2663 u_int perrdiag; 2664 u_int cur_col; 2665 int silent; 2666 2667 scb = NULL; 2668 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2669 lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); 2670 ahd_inb(ahd, LQISTAT2); 2671 if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 2672 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { 2673 u_int lqistate; 2674 2675 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 2676 lqistate = ahd_inb(ahd, LQISTATE); 2677 if ((lqistate >= 0x1E && lqistate <= 0x24) 2678 || (lqistate == 0x29)) { 2679 #ifdef AHD_DEBUG 2680 if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { 2681 printk("%s: NLQCRC found via LQISTATE\n", 2682 ahd_name(ahd)); 2683 } 2684 #endif 2685 lqistat1 |= LQICRCI_NLQ; 2686 } 2687 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2688 } 2689 2690 ahd_outb(ahd, CLRLQIINT1, lqistat1); 2691 lastphase = ahd_inb(ahd, LASTPHASE); 2692 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 2693 perrdiag = ahd_inb(ahd, PERRDIAG); 2694 msg_out = INITIATOR_ERROR; 2695 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); 2696 2697 /* 2698 * Try to find the SCB associated with this error. 2699 */ 2700 silent = FALSE; 2701 if (lqistat1 == 0 2702 || (lqistat1 & LQICRCI_NLQ) != 0) { 2703 if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) 2704 ahd_set_active_fifo(ahd); 2705 scbid = ahd_get_scbptr(ahd); 2706 scb = ahd_lookup_scb(ahd, scbid); 2707 if (scb != NULL && SCB_IS_SILENT(scb)) 2708 silent = TRUE; 2709 } 2710 2711 cur_col = 0; 2712 if (silent == FALSE) { 2713 printk("%s: Transmission error detected\n", ahd_name(ahd)); 2714 ahd_lqistat1_print(lqistat1, &cur_col, 50); 2715 ahd_lastphase_print(lastphase, &cur_col, 50); 2716 ahd_scsisigi_print(curphase, &cur_col, 50); 2717 ahd_perrdiag_print(perrdiag, &cur_col, 50); 2718 printk("\n"); 2719 ahd_dump_card_state(ahd); 2720 } 2721 2722 if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { 2723 if (silent == FALSE) { 2724 printk("%s: Gross protocol error during incoming " 2725 "packet. lqistat1 == 0x%x. Resetting bus.\n", 2726 ahd_name(ahd), lqistat1); 2727 } 2728 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2729 return; 2730 } else if ((lqistat1 & LQICRCI_LQ) != 0) { 2731 /* 2732 * A CRC error has been detected on an incoming LQ. 2733 * The bus is currently hung on the last ACK. 2734 * Hit LQIRETRY to release the last ack, and 2735 * wait for the sequencer to determine that ATNO 2736 * is asserted while in message out to take us 2737 * to our host message loop. No NONPACKREQ or 2738 * LQIPHASE type errors will occur in this 2739 * scenario. After this first LQIRETRY, the LQI 2740 * manager will be in ISELO where it will 2741 * happily sit until another packet phase begins. 2742 * Unexpected bus free detection is enabled 2743 * through any phases that occur after we release 2744 * this last ack until the LQI manager sees a 2745 * packet phase. This implies we may have to 2746 * ignore a perfectly valid "unexected busfree" 2747 * after our "initiator detected error" message is 2748 * sent. A busfree is the expected response after 2749 * we tell the target that it's L_Q was corrupted. 2750 * (SPI4R09 10.7.3.3.3) 2751 */ 2752 ahd_outb(ahd, LQCTL2, LQIRETRY); 2753 printk("LQIRetry for LQICRCI_LQ to release ACK\n"); 2754 } else if ((lqistat1 & LQICRCI_NLQ) != 0) { 2755 /* 2756 * We detected a CRC error in a NON-LQ packet. 2757 * The hardware has varying behavior in this situation 2758 * depending on whether this packet was part of a 2759 * stream or not. 2760 * 2761 * PKT by PKT mode: 2762 * The hardware has already acked the complete packet. 2763 * If the target honors our outstanding ATN condition, 2764 * we should be (or soon will be) in MSGOUT phase. 2765 * This will trigger the LQIPHASE_LQ status bit as the 2766 * hardware was expecting another LQ. Unexpected 2767 * busfree detection is enabled. Once LQIPHASE_LQ is 2768 * true (first entry into host message loop is much 2769 * the same), we must clear LQIPHASE_LQ and hit 2770 * LQIRETRY so the hardware is ready to handle 2771 * a future LQ. NONPACKREQ will not be asserted again 2772 * once we hit LQIRETRY until another packet is 2773 * processed. The target may either go busfree 2774 * or start another packet in response to our message. 2775 * 2776 * Read Streaming P0 asserted: 2777 * If we raise ATN and the target completes the entire 2778 * stream (P0 asserted during the last packet), the 2779 * hardware will ack all data and return to the ISTART 2780 * state. When the target reponds to our ATN condition, 2781 * LQIPHASE_LQ will be asserted. We should respond to 2782 * this with an LQIRETRY to prepare for any future 2783 * packets. NONPACKREQ will not be asserted again 2784 * once we hit LQIRETRY until another packet is 2785 * processed. The target may either go busfree or 2786 * start another packet in response to our message. 2787 * Busfree detection is enabled. 2788 * 2789 * Read Streaming P0 not asserted: 2790 * If we raise ATN and the target transitions to 2791 * MSGOUT in or after a packet where P0 is not 2792 * asserted, the hardware will assert LQIPHASE_NLQ. 2793 * We should respond to the LQIPHASE_NLQ with an 2794 * LQIRETRY. Should the target stay in a non-pkt 2795 * phase after we send our message, the hardware 2796 * will assert LQIPHASE_LQ. Recovery is then just as 2797 * listed above for the read streaming with P0 asserted. 2798 * Busfree detection is enabled. 2799 */ 2800 if (silent == FALSE) 2801 printk("LQICRC_NLQ\n"); 2802 if (scb == NULL) { 2803 printk("%s: No SCB valid for LQICRC_NLQ. " 2804 "Resetting bus\n", ahd_name(ahd)); 2805 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2806 return; 2807 } 2808 } else if ((lqistat1 & LQIBADLQI) != 0) { 2809 printk("Need to handle BADLQI!\n"); 2810 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2811 return; 2812 } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { 2813 if ((curphase & ~P_DATAIN_DT) != 0) { 2814 /* Ack the byte. So we can continue. */ 2815 if (silent == FALSE) 2816 printk("Acking %s to clear perror\n", 2817 ahd_lookup_phase_entry(curphase)->phasemsg); 2818 ahd_inb(ahd, SCSIDAT); 2819 } 2820 2821 if (curphase == P_MESGIN) 2822 msg_out = MSG_PARITY_ERROR; 2823 } 2824 2825 /* 2826 * We've set the hardware to assert ATN if we 2827 * get a parity error on "in" phases, so all we 2828 * need to do is stuff the message buffer with 2829 * the appropriate message. "In" phases have set 2830 * mesg_out to something other than NOP. 2831 */ 2832 ahd->send_msg_perror = msg_out; 2833 if (scb != NULL && msg_out == INITIATOR_ERROR) 2834 scb->flags |= SCB_TRANSMISSION_ERROR; 2835 ahd_outb(ahd, MSG_OUT, HOST_MSG); 2836 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2837 ahd_unpause(ahd); 2838 } 2839 2840 static void 2841 ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) 2842 { 2843 /* 2844 * Clear the sources of the interrupts. 2845 */ 2846 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2847 ahd_outb(ahd, CLRLQIINT1, lqistat1); 2848 2849 /* 2850 * If the "illegal" phase changes were in response 2851 * to our ATN to flag a CRC error, AND we ended up 2852 * on packet boundaries, clear the error, restart the 2853 * LQI manager as appropriate, and go on our merry 2854 * way toward sending the message. Otherwise, reset 2855 * the bus to clear the error. 2856 */ 2857 ahd_set_active_fifo(ahd); 2858 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 2859 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { 2860 if ((lqistat1 & LQIPHASE_LQ) != 0) { 2861 printk("LQIRETRY for LQIPHASE_LQ\n"); 2862 ahd_outb(ahd, LQCTL2, LQIRETRY); 2863 } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { 2864 printk("LQIRETRY for LQIPHASE_NLQ\n"); 2865 ahd_outb(ahd, LQCTL2, LQIRETRY); 2866 } else 2867 panic("ahd_handle_lqiphase_error: No phase errors\n"); 2868 ahd_dump_card_state(ahd); 2869 ahd_outb(ahd, CLRINT, CLRSCSIINT); 2870 ahd_unpause(ahd); 2871 } else { 2872 printk("Resetting Channel for LQI Phase error\n"); 2873 ahd_dump_card_state(ahd); 2874 ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); 2875 } 2876 } 2877 2878 /* 2879 * Packetized unexpected or expected busfree. 2880 * Entered in mode based on busfreetime. 2881 */ 2882 static int 2883 ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) 2884 { 2885 u_int lqostat1; 2886 2887 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 2888 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 2889 lqostat1 = ahd_inb(ahd, LQOSTAT1); 2890 if ((lqostat1 & LQOBUSFREE) != 0) { 2891 struct scb *scb; 2892 u_int scbid; 2893 u_int saved_scbptr; 2894 u_int waiting_h; 2895 u_int waiting_t; 2896 u_int next; 2897 2898 /* 2899 * The LQO manager detected an unexpected busfree 2900 * either: 2901 * 2902 * 1) During an outgoing LQ. 2903 * 2) After an outgoing LQ but before the first 2904 * REQ of the command packet. 2905 * 3) During an outgoing command packet. 2906 * 2907 * In all cases, CURRSCB is pointing to the 2908 * SCB that encountered the failure. Clean 2909 * up the queue, clear SELDO and LQOBUSFREE, 2910 * and allow the sequencer to restart the select 2911 * out at its lesure. 2912 */ 2913 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 2914 scbid = ahd_inw(ahd, CURRSCB); 2915 scb = ahd_lookup_scb(ahd, scbid); 2916 if (scb == NULL) 2917 panic("SCB not valid during LQOBUSFREE"); 2918 /* 2919 * Clear the status. 2920 */ 2921 ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); 2922 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) 2923 ahd_outb(ahd, CLRLQOINT1, 0); 2924 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 2925 ahd_flush_device_writes(ahd); 2926 ahd_outb(ahd, CLRSINT0, CLRSELDO); 2927 2928 /* 2929 * Return the LQO manager to its idle loop. It will 2930 * not do this automatically if the busfree occurs 2931 * after the first REQ of either the LQ or command 2932 * packet or between the LQ and command packet. 2933 */ 2934 ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); 2935 2936 /* 2937 * Update the waiting for selection queue so 2938 * we restart on the correct SCB. 2939 */ 2940 waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); 2941 saved_scbptr = ahd_get_scbptr(ahd); 2942 if (waiting_h != scbid) { 2943 2944 ahd_outw(ahd, WAITING_TID_HEAD, scbid); 2945 waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); 2946 if (waiting_t == waiting_h) { 2947 ahd_outw(ahd, WAITING_TID_TAIL, scbid); 2948 next = SCB_LIST_NULL; 2949 } else { 2950 ahd_set_scbptr(ahd, waiting_h); 2951 next = ahd_inw_scbram(ahd, SCB_NEXT2); 2952 } 2953 ahd_set_scbptr(ahd, scbid); 2954 ahd_outw(ahd, SCB_NEXT2, next); 2955 } 2956 ahd_set_scbptr(ahd, saved_scbptr); 2957 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { 2958 if (SCB_IS_SILENT(scb) == FALSE) { 2959 ahd_print_path(ahd, scb); 2960 printk("Probable outgoing LQ CRC error. " 2961 "Retrying command\n"); 2962 } 2963 scb->crc_retry_count++; 2964 } else { 2965 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); 2966 ahd_freeze_scb(scb); 2967 ahd_freeze_devq(ahd, scb); 2968 } 2969 /* Return unpausing the sequencer. */ 2970 return (0); 2971 } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { 2972 /* 2973 * Ignore what are really parity errors that 2974 * occur on the last REQ of a free running 2975 * clock prior to going busfree. Some drives 2976 * do not properly active negate just before 2977 * going busfree resulting in a parity glitch. 2978 */ 2979 ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); 2980 #ifdef AHD_DEBUG 2981 if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) 2982 printk("%s: Parity on last REQ detected " 2983 "during busfree phase.\n", 2984 ahd_name(ahd)); 2985 #endif 2986 /* Return unpausing the sequencer. */ 2987 return (0); 2988 } 2989 if (ahd->src_mode != AHD_MODE_SCSI) { 2990 u_int scbid; 2991 struct scb *scb; 2992 2993 scbid = ahd_get_scbptr(ahd); 2994 scb = ahd_lookup_scb(ahd, scbid); 2995 ahd_print_path(ahd, scb); 2996 printk("Unexpected PKT busfree condition\n"); 2997 ahd_dump_card_state(ahd); 2998 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', 2999 SCB_GET_LUN(scb), SCB_GET_TAG(scb), 3000 ROLE_INITIATOR, CAM_UNEXP_BUSFREE); 3001 3002 /* Return restarting the sequencer. */ 3003 return (1); 3004 } 3005 printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); 3006 ahd_dump_card_state(ahd); 3007 /* Restart the sequencer. */ 3008 return (1); 3009 } 3010 3011 /* 3012 * Non-packetized unexpected or expected busfree. 3013 */ 3014 static int 3015 ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) 3016 { 3017 struct ahd_devinfo devinfo; 3018 struct scb *scb; 3019 u_int lastphase; 3020 u_int saved_scsiid; 3021 u_int saved_lun; 3022 u_int target; 3023 u_int initiator_role_id; 3024 u_int scbid; 3025 u_int ppr_busfree; 3026 int printerror; 3027 3028 /* 3029 * Look at what phase we were last in. If its message out, 3030 * chances are pretty good that the busfree was in response 3031 * to one of our abort requests. 3032 */ 3033 lastphase = ahd_inb(ahd, LASTPHASE); 3034 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 3035 saved_lun = ahd_inb(ahd, SAVED_LUN); 3036 target = SCSIID_TARGET(ahd, saved_scsiid); 3037 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 3038 ahd_compile_devinfo(&devinfo, initiator_role_id, 3039 target, saved_lun, 'A', ROLE_INITIATOR); 3040 printerror = 1; 3041 3042 scbid = ahd_get_scbptr(ahd); 3043 scb = ahd_lookup_scb(ahd, scbid); 3044 if (scb != NULL 3045 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) 3046 scb = NULL; 3047 3048 ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; 3049 if (lastphase == P_MESGOUT) { 3050 u_int tag; 3051 3052 tag = SCB_LIST_NULL; 3053 if (ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK, TRUE) 3054 || ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK_SET, TRUE)) { 3055 int found; 3056 int sent_msg; 3057 3058 if (scb == NULL) { 3059 ahd_print_devinfo(ahd, &devinfo); 3060 printk("Abort for unidentified " 3061 "connection completed.\n"); 3062 /* restart the sequencer. */ 3063 return (1); 3064 } 3065 sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; 3066 ahd_print_path(ahd, scb); 3067 printk("SCB %d - Abort%s Completed.\n", 3068 SCB_GET_TAG(scb), 3069 sent_msg == ABORT_TASK ? "" : " Tag"); 3070 3071 if (sent_msg == ABORT_TASK) 3072 tag = SCB_GET_TAG(scb); 3073 3074 if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { 3075 /* 3076 * This abort is in response to an 3077 * unexpected switch to command phase 3078 * for a packetized connection. Since 3079 * the identify message was never sent, 3080 * "saved lun" is 0. We really want to 3081 * abort only the SCB that encountered 3082 * this error, which could have a different 3083 * lun. The SCB will be retried so the OS 3084 * will see the UA after renegotiating to 3085 * packetized. 3086 */ 3087 tag = SCB_GET_TAG(scb); 3088 saved_lun = scb->hscb->lun; 3089 } 3090 found = ahd_abort_scbs(ahd, target, 'A', saved_lun, 3091 tag, ROLE_INITIATOR, 3092 CAM_REQ_ABORTED); 3093 printk("found == 0x%x\n", found); 3094 printerror = 0; 3095 } else if (ahd_sent_msg(ahd, AHDMSG_1B, 3096 TARGET_RESET, TRUE)) { 3097 ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, 3098 CAM_BDR_SENT, "Bus Device Reset", 3099 /*verbose_level*/0); 3100 printerror = 0; 3101 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, FALSE) 3102 && ppr_busfree == 0) { 3103 struct ahd_initiator_tinfo *tinfo; 3104 struct ahd_tmode_tstate *tstate; 3105 3106 /* 3107 * PPR Rejected. 3108 * 3109 * If the previous negotiation was packetized, 3110 * this could be because the device has been 3111 * reset without our knowledge. Force our 3112 * current negotiation to async and retry the 3113 * negotiation. Otherwise retry the command 3114 * with non-ppr negotiation. 3115 */ 3116 #ifdef AHD_DEBUG 3117 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3118 printk("PPR negotiation rejected busfree.\n"); 3119 #endif 3120 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 3121 devinfo.our_scsiid, 3122 devinfo.target, &tstate); 3123 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { 3124 ahd_set_width(ahd, &devinfo, 3125 MSG_EXT_WDTR_BUS_8_BIT, 3126 AHD_TRANS_CUR, 3127 /*paused*/TRUE); 3128 ahd_set_syncrate(ahd, &devinfo, 3129 /*period*/0, /*offset*/0, 3130 /*ppr_options*/0, 3131 AHD_TRANS_CUR, 3132 /*paused*/TRUE); 3133 /* 3134 * The expect PPR busfree handler below 3135 * will effect the retry and necessary 3136 * abort. 3137 */ 3138 } else { 3139 tinfo->curr.transport_version = 2; 3140 tinfo->goal.transport_version = 2; 3141 tinfo->goal.ppr_options = 0; 3142 if (scb != NULL) { 3143 /* 3144 * Remove any SCBs in the waiting 3145 * for selection queue that may 3146 * also be for this target so that 3147 * command ordering is preserved. 3148 */ 3149 ahd_freeze_devq(ahd, scb); 3150 ahd_qinfifo_requeue_tail(ahd, scb); 3151 } 3152 printerror = 0; 3153 } 3154 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, FALSE) 3155 && ppr_busfree == 0) { 3156 /* 3157 * Negotiation Rejected. Go-narrow and 3158 * retry command. 3159 */ 3160 #ifdef AHD_DEBUG 3161 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3162 printk("WDTR negotiation rejected busfree.\n"); 3163 #endif 3164 ahd_set_width(ahd, &devinfo, 3165 MSG_EXT_WDTR_BUS_8_BIT, 3166 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3167 /*paused*/TRUE); 3168 if (scb != NULL) { 3169 /* 3170 * Remove any SCBs in the waiting for 3171 * selection queue that may also be for 3172 * this target so that command ordering 3173 * is preserved. 3174 */ 3175 ahd_freeze_devq(ahd, scb); 3176 ahd_qinfifo_requeue_tail(ahd, scb); 3177 } 3178 printerror = 0; 3179 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, FALSE) 3180 && ppr_busfree == 0) { 3181 /* 3182 * Negotiation Rejected. Go-async and 3183 * retry command. 3184 */ 3185 #ifdef AHD_DEBUG 3186 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3187 printk("SDTR negotiation rejected busfree.\n"); 3188 #endif 3189 ahd_set_syncrate(ahd, &devinfo, 3190 /*period*/0, /*offset*/0, 3191 /*ppr_options*/0, 3192 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3193 /*paused*/TRUE); 3194 if (scb != NULL) { 3195 /* 3196 * Remove any SCBs in the waiting for 3197 * selection queue that may also be for 3198 * this target so that command ordering 3199 * is preserved. 3200 */ 3201 ahd_freeze_devq(ahd, scb); 3202 ahd_qinfifo_requeue_tail(ahd, scb); 3203 } 3204 printerror = 0; 3205 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3206 && ahd_sent_msg(ahd, AHDMSG_1B, 3207 INITIATOR_ERROR, TRUE)) { 3208 3209 #ifdef AHD_DEBUG 3210 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3211 printk("Expected IDE Busfree\n"); 3212 #endif 3213 printerror = 0; 3214 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) 3215 && ahd_sent_msg(ahd, AHDMSG_1B, 3216 MESSAGE_REJECT, TRUE)) { 3217 3218 #ifdef AHD_DEBUG 3219 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3220 printk("Expected QAS Reject Busfree\n"); 3221 #endif 3222 printerror = 0; 3223 } 3224 } 3225 3226 /* 3227 * The busfree required flag is honored at the end of 3228 * the message phases. We check it last in case we 3229 * had to send some other message that caused a busfree. 3230 */ 3231 if (scb != NULL && printerror != 0 3232 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3233 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3234 3235 ahd_freeze_devq(ahd, scb); 3236 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); 3237 ahd_freeze_scb(scb); 3238 if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { 3239 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 3240 SCB_GET_CHANNEL(ahd, scb), 3241 SCB_GET_LUN(scb), SCB_LIST_NULL, 3242 ROLE_INITIATOR, CAM_REQ_ABORTED); 3243 } else { 3244 #ifdef AHD_DEBUG 3245 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3246 printk("PPR Negotiation Busfree.\n"); 3247 #endif 3248 ahd_done(ahd, scb); 3249 } 3250 printerror = 0; 3251 } 3252 if (printerror != 0) { 3253 int aborted; 3254 3255 aborted = 0; 3256 if (scb != NULL) { 3257 u_int tag; 3258 3259 if ((scb->hscb->control & TAG_ENB) != 0) 3260 tag = SCB_GET_TAG(scb); 3261 else 3262 tag = SCB_LIST_NULL; 3263 ahd_print_path(ahd, scb); 3264 aborted = ahd_abort_scbs(ahd, target, 'A', 3265 SCB_GET_LUN(scb), tag, 3266 ROLE_INITIATOR, 3267 CAM_UNEXP_BUSFREE); 3268 } else { 3269 /* 3270 * We had not fully identified this connection, 3271 * so we cannot abort anything. 3272 */ 3273 printk("%s: ", ahd_name(ahd)); 3274 } 3275 printk("Unexpected busfree %s, %d SCBs aborted, " 3276 "PRGMCNT == 0x%x\n", 3277 ahd_lookup_phase_entry(lastphase)->phasemsg, 3278 aborted, 3279 ahd_inw(ahd, PRGMCNT)); 3280 ahd_dump_card_state(ahd); 3281 if (lastphase != P_BUSFREE) 3282 ahd_force_renegotiation(ahd, &devinfo); 3283 } 3284 /* Always restart the sequencer. */ 3285 return (1); 3286 } 3287 3288 static void 3289 ahd_handle_proto_violation(struct ahd_softc *ahd) 3290 { 3291 struct ahd_devinfo devinfo; 3292 struct scb *scb; 3293 u_int scbid; 3294 u_int seq_flags; 3295 u_int curphase; 3296 u_int lastphase; 3297 int found; 3298 3299 ahd_fetch_devinfo(ahd, &devinfo); 3300 scbid = ahd_get_scbptr(ahd); 3301 scb = ahd_lookup_scb(ahd, scbid); 3302 seq_flags = ahd_inb(ahd, SEQ_FLAGS); 3303 curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; 3304 lastphase = ahd_inb(ahd, LASTPHASE); 3305 if ((seq_flags & NOT_IDENTIFIED) != 0) { 3306 3307 /* 3308 * The reconnecting target either did not send an 3309 * identify message, or did, but we didn't find an SCB 3310 * to match. 3311 */ 3312 ahd_print_devinfo(ahd, &devinfo); 3313 printk("Target did not send an IDENTIFY message. " 3314 "LASTPHASE = 0x%x.\n", lastphase); 3315 scb = NULL; 3316 } else if (scb == NULL) { 3317 /* 3318 * We don't seem to have an SCB active for this 3319 * transaction. Print an error and reset the bus. 3320 */ 3321 ahd_print_devinfo(ahd, &devinfo); 3322 printk("No SCB found during protocol violation\n"); 3323 goto proto_violation_reset; 3324 } else { 3325 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); 3326 if ((seq_flags & NO_CDB_SENT) != 0) { 3327 ahd_print_path(ahd, scb); 3328 printk("No or incomplete CDB sent to device.\n"); 3329 } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) 3330 & STATUS_RCVD) == 0) { 3331 /* 3332 * The target never bothered to provide status to 3333 * us prior to completing the command. Since we don't 3334 * know the disposition of this command, we must attempt 3335 * to abort it. Assert ATN and prepare to send an abort 3336 * message. 3337 */ 3338 ahd_print_path(ahd, scb); 3339 printk("Completed command without status.\n"); 3340 } else { 3341 ahd_print_path(ahd, scb); 3342 printk("Unknown protocol violation.\n"); 3343 ahd_dump_card_state(ahd); 3344 } 3345 } 3346 if ((lastphase & ~P_DATAIN_DT) == 0 3347 || lastphase == P_COMMAND) { 3348 proto_violation_reset: 3349 /* 3350 * Target either went directly to data 3351 * phase or didn't respond to our ATN. 3352 * The only safe thing to do is to blow 3353 * it away with a bus reset. 3354 */ 3355 found = ahd_reset_channel(ahd, 'A', TRUE); 3356 printk("%s: Issued Channel %c Bus Reset. " 3357 "%d SCBs aborted\n", ahd_name(ahd), 'A', found); 3358 } else { 3359 /* 3360 * Leave the selection hardware off in case 3361 * this abort attempt will affect yet to 3362 * be sent commands. 3363 */ 3364 ahd_outb(ahd, SCSISEQ0, 3365 ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 3366 ahd_assert_atn(ahd); 3367 ahd_outb(ahd, MSG_OUT, HOST_MSG); 3368 if (scb == NULL) { 3369 ahd_print_devinfo(ahd, &devinfo); 3370 ahd->msgout_buf[0] = ABORT_TASK; 3371 ahd->msgout_len = 1; 3372 ahd->msgout_index = 0; 3373 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 3374 } else { 3375 ahd_print_path(ahd, scb); 3376 scb->flags |= SCB_ABORT; 3377 } 3378 printk("Protocol violation %s. Attempting to abort.\n", 3379 ahd_lookup_phase_entry(curphase)->phasemsg); 3380 } 3381 } 3382 3383 /* 3384 * Force renegotiation to occur the next time we initiate 3385 * a command to the current device. 3386 */ 3387 static void 3388 ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 3389 { 3390 struct ahd_initiator_tinfo *targ_info; 3391 struct ahd_tmode_tstate *tstate; 3392 3393 #ifdef AHD_DEBUG 3394 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3395 ahd_print_devinfo(ahd, devinfo); 3396 printk("Forcing renegotiation\n"); 3397 } 3398 #endif 3399 targ_info = ahd_fetch_transinfo(ahd, 3400 devinfo->channel, 3401 devinfo->our_scsiid, 3402 devinfo->target, 3403 &tstate); 3404 ahd_update_neg_request(ahd, devinfo, tstate, 3405 targ_info, AHD_NEG_IF_NON_ASYNC); 3406 } 3407 3408 #define AHD_MAX_STEPS 2000 3409 static void 3410 ahd_clear_critical_section(struct ahd_softc *ahd) 3411 { 3412 ahd_mode_state saved_modes; 3413 int stepping; 3414 int steps; 3415 int first_instr; 3416 u_int simode0; 3417 u_int simode1; 3418 u_int simode3; 3419 u_int lqimode0; 3420 u_int lqimode1; 3421 u_int lqomode0; 3422 u_int lqomode1; 3423 3424 if (ahd->num_critical_sections == 0) 3425 return; 3426 3427 stepping = FALSE; 3428 steps = 0; 3429 first_instr = 0; 3430 simode0 = 0; 3431 simode1 = 0; 3432 simode3 = 0; 3433 lqimode0 = 0; 3434 lqimode1 = 0; 3435 lqomode0 = 0; 3436 lqomode1 = 0; 3437 saved_modes = ahd_save_modes(ahd); 3438 for (;;) { 3439 struct cs *cs; 3440 u_int seqaddr; 3441 u_int i; 3442 3443 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3444 seqaddr = ahd_inw(ahd, CURADDR); 3445 3446 cs = ahd->critical_sections; 3447 for (i = 0; i < ahd->num_critical_sections; i++, cs++) { 3448 if (cs->begin < seqaddr && cs->end >= seqaddr) 3449 break; 3450 } 3451 3452 if (i == ahd->num_critical_sections) 3453 break; 3454 3455 if (steps > AHD_MAX_STEPS) { 3456 printk("%s: Infinite loop in critical section\n" 3457 "%s: First Instruction 0x%x now 0x%x\n", 3458 ahd_name(ahd), ahd_name(ahd), first_instr, 3459 seqaddr); 3460 ahd_dump_card_state(ahd); 3461 panic("critical section loop"); 3462 } 3463 3464 steps++; 3465 #ifdef AHD_DEBUG 3466 if ((ahd_debug & AHD_SHOW_MISC) != 0) 3467 printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), 3468 seqaddr); 3469 #endif 3470 if (stepping == FALSE) { 3471 3472 first_instr = seqaddr; 3473 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 3474 simode0 = ahd_inb(ahd, SIMODE0); 3475 simode3 = ahd_inb(ahd, SIMODE3); 3476 lqimode0 = ahd_inb(ahd, LQIMODE0); 3477 lqimode1 = ahd_inb(ahd, LQIMODE1); 3478 lqomode0 = ahd_inb(ahd, LQOMODE0); 3479 lqomode1 = ahd_inb(ahd, LQOMODE1); 3480 ahd_outb(ahd, SIMODE0, 0); 3481 ahd_outb(ahd, SIMODE3, 0); 3482 ahd_outb(ahd, LQIMODE0, 0); 3483 ahd_outb(ahd, LQIMODE1, 0); 3484 ahd_outb(ahd, LQOMODE0, 0); 3485 ahd_outb(ahd, LQOMODE1, 0); 3486 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3487 simode1 = ahd_inb(ahd, SIMODE1); 3488 /* 3489 * We don't clear ENBUSFREE. Unfortunately 3490 * we cannot re-enable busfree detection within 3491 * the current connection, so we must leave it 3492 * on while single stepping. 3493 */ 3494 ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); 3495 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); 3496 stepping = TRUE; 3497 } 3498 ahd_outb(ahd, CLRSINT1, CLRBUSFREE); 3499 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3500 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 3501 ahd_outb(ahd, HCNTRL, ahd->unpause); 3502 while (!ahd_is_paused(ahd)) 3503 ahd_delay(200); 3504 ahd_update_modes(ahd); 3505 } 3506 if (stepping) { 3507 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 3508 ahd_outb(ahd, SIMODE0, simode0); 3509 ahd_outb(ahd, SIMODE3, simode3); 3510 ahd_outb(ahd, LQIMODE0, lqimode0); 3511 ahd_outb(ahd, LQIMODE1, lqimode1); 3512 ahd_outb(ahd, LQOMODE0, lqomode0); 3513 ahd_outb(ahd, LQOMODE1, lqomode1); 3514 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 3515 ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); 3516 ahd_outb(ahd, SIMODE1, simode1); 3517 /* 3518 * SCSIINT seems to glitch occasionally when 3519 * the interrupt masks are restored. Clear SCSIINT 3520 * one more time so that only persistent errors 3521 * are seen as a real interrupt. 3522 */ 3523 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3524 } 3525 ahd_restore_modes(ahd, saved_modes); 3526 } 3527 3528 /* 3529 * Clear any pending interrupt status. 3530 */ 3531 static void 3532 ahd_clear_intstat(struct ahd_softc *ahd) 3533 { 3534 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 3535 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 3536 /* Clear any interrupt conditions this may have caused */ 3537 ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 3538 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); 3539 ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT 3540 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI 3541 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); 3542 ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ 3543 |CLRLQOATNPKT|CLRLQOTCRC); 3544 ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS 3545 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); 3546 if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { 3547 ahd_outb(ahd, CLRLQOINT0, 0); 3548 ahd_outb(ahd, CLRLQOINT1, 0); 3549 } 3550 ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); 3551 ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 3552 |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); 3553 ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO 3554 |CLRIOERR|CLROVERRUN); 3555 ahd_outb(ahd, CLRINT, CLRSCSIINT); 3556 } 3557 3558 /**************************** Debugging Routines ******************************/ 3559 #ifdef AHD_DEBUG 3560 uint32_t ahd_debug = AHD_DEBUG_OPTS; 3561 #endif 3562 3563 #if 0 3564 void 3565 ahd_print_scb(struct scb *scb) 3566 { 3567 struct hardware_scb *hscb; 3568 int i; 3569 3570 hscb = scb->hscb; 3571 printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 3572 (void *)scb, 3573 hscb->control, 3574 hscb->scsiid, 3575 hscb->lun, 3576 hscb->cdb_len); 3577 printk("Shared Data: "); 3578 for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) 3579 printk("%#02x", hscb->shared_data.idata.cdb[i]); 3580 printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", 3581 (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), 3582 (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), 3583 ahd_le32toh(hscb->datacnt), 3584 ahd_le32toh(hscb->sgptr), 3585 SCB_GET_TAG(scb)); 3586 ahd_dump_sglist(scb); 3587 } 3588 #endif /* 0 */ 3589 3590 /************************* Transfer Negotiation *******************************/ 3591 /* 3592 * Allocate per target mode instance (ID we respond to as a target) 3593 * transfer negotiation data structures. 3594 */ 3595 static struct ahd_tmode_tstate * 3596 ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) 3597 { 3598 struct ahd_tmode_tstate *master_tstate; 3599 struct ahd_tmode_tstate *tstate; 3600 int i; 3601 3602 master_tstate = ahd->enabled_targets[ahd->our_id]; 3603 if (ahd->enabled_targets[scsi_id] != NULL 3604 && ahd->enabled_targets[scsi_id] != master_tstate) 3605 panic("%s: ahd_alloc_tstate - Target already allocated", 3606 ahd_name(ahd)); 3607 tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); 3608 if (tstate == NULL) 3609 return (NULL); 3610 3611 /* 3612 * If we have allocated a master tstate, copy user settings from 3613 * the master tstate (taken from SRAM or the EEPROM) for this 3614 * channel, but reset our current and goal settings to async/narrow 3615 * until an initiator talks to us. 3616 */ 3617 if (master_tstate != NULL) { 3618 memcpy(tstate, master_tstate, sizeof(*tstate)); 3619 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 3620 for (i = 0; i < 16; i++) { 3621 memset(&tstate->transinfo[i].curr, 0, 3622 sizeof(tstate->transinfo[i].curr)); 3623 memset(&tstate->transinfo[i].goal, 0, 3624 sizeof(tstate->transinfo[i].goal)); 3625 } 3626 } else 3627 memset(tstate, 0, sizeof(*tstate)); 3628 ahd->enabled_targets[scsi_id] = tstate; 3629 return (tstate); 3630 } 3631 3632 #ifdef AHD_TARGET_MODE 3633 /* 3634 * Free per target mode instance (ID we respond to as a target) 3635 * transfer negotiation data structures. 3636 */ 3637 static void 3638 ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) 3639 { 3640 struct ahd_tmode_tstate *tstate; 3641 3642 /* 3643 * Don't clean up our "master" tstate. 3644 * It has our default user settings. 3645 */ 3646 if (scsi_id == ahd->our_id 3647 && force == FALSE) 3648 return; 3649 3650 tstate = ahd->enabled_targets[scsi_id]; 3651 kfree(tstate); 3652 ahd->enabled_targets[scsi_id] = NULL; 3653 } 3654 #endif 3655 3656 /* 3657 * Called when we have an active connection to a target on the bus, 3658 * this function finds the nearest period to the input period limited 3659 * by the capabilities of the bus connectivity of and sync settings for 3660 * the target. 3661 */ 3662 static void 3663 ahd_devlimited_syncrate(struct ahd_softc *ahd, 3664 struct ahd_initiator_tinfo *tinfo, 3665 u_int *period, u_int *ppr_options, role_t role) 3666 { 3667 struct ahd_transinfo *transinfo; 3668 u_int maxsync; 3669 3670 if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 3671 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { 3672 maxsync = AHD_SYNCRATE_PACED; 3673 } else { 3674 maxsync = AHD_SYNCRATE_ULTRA; 3675 /* Can't do DT related options on an SE bus */ 3676 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 3677 } 3678 /* 3679 * Never allow a value higher than our current goal 3680 * period otherwise we may allow a target initiated 3681 * negotiation to go above the limit as set by the 3682 * user. In the case of an initiator initiated 3683 * sync negotiation, we limit based on the user 3684 * setting. This allows the system to still accept 3685 * incoming negotiations even if target initiated 3686 * negotiation is not performed. 3687 */ 3688 if (role == ROLE_TARGET) 3689 transinfo = &tinfo->user; 3690 else 3691 transinfo = &tinfo->goal; 3692 *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); 3693 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { 3694 maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); 3695 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 3696 } 3697 if (transinfo->period == 0) { 3698 *period = 0; 3699 *ppr_options = 0; 3700 } else { 3701 *period = max(*period, (u_int)transinfo->period); 3702 ahd_find_syncrate(ahd, period, ppr_options, maxsync); 3703 } 3704 } 3705 3706 /* 3707 * Look up the valid period to SCSIRATE conversion in our table. 3708 * Return the period and offset that should be sent to the target 3709 * if this was the beginning of an SDTR. 3710 */ 3711 void 3712 ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, 3713 u_int *ppr_options, u_int maxsync) 3714 { 3715 if (*period < maxsync) 3716 *period = maxsync; 3717 3718 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 3719 && *period > AHD_SYNCRATE_MIN_DT) 3720 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 3721 3722 if (*period > AHD_SYNCRATE_MIN) 3723 *period = 0; 3724 3725 /* Honor PPR option conformance rules. */ 3726 if (*period > AHD_SYNCRATE_PACED) 3727 *ppr_options &= ~MSG_EXT_PPR_RTI; 3728 3729 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 3730 *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); 3731 3732 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) 3733 *ppr_options &= MSG_EXT_PPR_QAS_REQ; 3734 3735 /* Skip all PACED only entries if IU is not available */ 3736 if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 3737 && *period < AHD_SYNCRATE_DT) 3738 *period = AHD_SYNCRATE_DT; 3739 3740 /* Skip all DT only entries if DT is not available */ 3741 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 3742 && *period < AHD_SYNCRATE_ULTRA2) 3743 *period = AHD_SYNCRATE_ULTRA2; 3744 } 3745 3746 /* 3747 * Truncate the given synchronous offset to a value the 3748 * current adapter type and syncrate are capable of. 3749 */ 3750 static void 3751 ahd_validate_offset(struct ahd_softc *ahd, 3752 struct ahd_initiator_tinfo *tinfo, 3753 u_int period, u_int *offset, int wide, 3754 role_t role) 3755 { 3756 u_int maxoffset; 3757 3758 /* Limit offset to what we can do */ 3759 if (period == 0) 3760 maxoffset = 0; 3761 else if (period <= AHD_SYNCRATE_PACED) { 3762 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) 3763 maxoffset = MAX_OFFSET_PACED_BUG; 3764 else 3765 maxoffset = MAX_OFFSET_PACED; 3766 } else 3767 maxoffset = MAX_OFFSET_NON_PACED; 3768 *offset = min(*offset, maxoffset); 3769 if (tinfo != NULL) { 3770 if (role == ROLE_TARGET) 3771 *offset = min(*offset, (u_int)tinfo->user.offset); 3772 else 3773 *offset = min(*offset, (u_int)tinfo->goal.offset); 3774 } 3775 } 3776 3777 /* 3778 * Truncate the given transfer width parameter to a value the 3779 * current adapter type is capable of. 3780 */ 3781 static void 3782 ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, 3783 u_int *bus_width, role_t role) 3784 { 3785 switch (*bus_width) { 3786 default: 3787 if (ahd->features & AHD_WIDE) { 3788 /* Respond Wide */ 3789 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3790 break; 3791 } 3792 fallthrough; 3793 case MSG_EXT_WDTR_BUS_8_BIT: 3794 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3795 break; 3796 } 3797 if (tinfo != NULL) { 3798 if (role == ROLE_TARGET) 3799 *bus_width = min((u_int)tinfo->user.width, *bus_width); 3800 else 3801 *bus_width = min((u_int)tinfo->goal.width, *bus_width); 3802 } 3803 } 3804 3805 /* 3806 * Update the bitmask of targets for which the controller should 3807 * negotiate with at the next convenient opportunity. This currently 3808 * means the next time we send the initial identify messages for 3809 * a new transaction. 3810 */ 3811 int 3812 ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3813 struct ahd_tmode_tstate *tstate, 3814 struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) 3815 { 3816 u_int auto_negotiate_orig; 3817 3818 auto_negotiate_orig = tstate->auto_negotiate; 3819 if (neg_type == AHD_NEG_ALWAYS) { 3820 /* 3821 * Force our "current" settings to be 3822 * unknown so that unless a bus reset 3823 * occurs the need to renegotiate is 3824 * recorded persistently. 3825 */ 3826 if ((ahd->features & AHD_WIDE) != 0) 3827 tinfo->curr.width = AHD_WIDTH_UNKNOWN; 3828 tinfo->curr.period = AHD_PERIOD_UNKNOWN; 3829 tinfo->curr.offset = AHD_OFFSET_UNKNOWN; 3830 } 3831 if (tinfo->curr.period != tinfo->goal.period 3832 || tinfo->curr.width != tinfo->goal.width 3833 || tinfo->curr.offset != tinfo->goal.offset 3834 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 3835 || (neg_type == AHD_NEG_IF_NON_ASYNC 3836 && (tinfo->goal.offset != 0 3837 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 3838 || tinfo->goal.ppr_options != 0))) 3839 tstate->auto_negotiate |= devinfo->target_mask; 3840 else 3841 tstate->auto_negotiate &= ~devinfo->target_mask; 3842 3843 return (auto_negotiate_orig != tstate->auto_negotiate); 3844 } 3845 3846 /* 3847 * Update the user/goal/curr tables of synchronous negotiation 3848 * parameters as well as, in the case of a current or active update, 3849 * any data structures on the host controller. In the case of an 3850 * active update, the specified target is currently talking to us on 3851 * the bus, so the transfer parameter update must take effect 3852 * immediately. 3853 */ 3854 void 3855 ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 3856 u_int period, u_int offset, u_int ppr_options, 3857 u_int type, int paused) 3858 { 3859 struct ahd_initiator_tinfo *tinfo; 3860 struct ahd_tmode_tstate *tstate; 3861 u_int old_period; 3862 u_int old_offset; 3863 u_int old_ppr; 3864 int active; 3865 int update_needed; 3866 3867 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 3868 update_needed = 0; 3869 3870 if (period == 0 || offset == 0) { 3871 period = 0; 3872 offset = 0; 3873 } 3874 3875 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 3876 devinfo->target, &tstate); 3877 3878 if ((type & AHD_TRANS_USER) != 0) { 3879 tinfo->user.period = period; 3880 tinfo->user.offset = offset; 3881 tinfo->user.ppr_options = ppr_options; 3882 } 3883 3884 if ((type & AHD_TRANS_GOAL) != 0) { 3885 tinfo->goal.period = period; 3886 tinfo->goal.offset = offset; 3887 tinfo->goal.ppr_options = ppr_options; 3888 } 3889 3890 old_period = tinfo->curr.period; 3891 old_offset = tinfo->curr.offset; 3892 old_ppr = tinfo->curr.ppr_options; 3893 3894 if ((type & AHD_TRANS_CUR) != 0 3895 && (old_period != period 3896 || old_offset != offset 3897 || old_ppr != ppr_options)) { 3898 3899 update_needed++; 3900 3901 tinfo->curr.period = period; 3902 tinfo->curr.offset = offset; 3903 tinfo->curr.ppr_options = ppr_options; 3904 3905 ahd_send_async(ahd, devinfo->channel, devinfo->target, 3906 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 3907 if (bootverbose) { 3908 if (offset != 0) { 3909 int options; 3910 3911 printk("%s: target %d synchronous with " 3912 "period = 0x%x, offset = 0x%x", 3913 ahd_name(ahd), devinfo->target, 3914 period, offset); 3915 options = 0; 3916 if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { 3917 printk("(RDSTRM"); 3918 options++; 3919 } 3920 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { 3921 printk("%s", options ? "|DT" : "(DT"); 3922 options++; 3923 } 3924 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 3925 printk("%s", options ? "|IU" : "(IU"); 3926 options++; 3927 } 3928 if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { 3929 printk("%s", options ? "|RTI" : "(RTI"); 3930 options++; 3931 } 3932 if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { 3933 printk("%s", options ? "|QAS" : "(QAS"); 3934 options++; 3935 } 3936 if (options != 0) 3937 printk(")\n"); 3938 else 3939 printk("\n"); 3940 } else { 3941 printk("%s: target %d using " 3942 "asynchronous transfers%s\n", 3943 ahd_name(ahd), devinfo->target, 3944 (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 3945 ? "(QAS)" : ""); 3946 } 3947 } 3948 } 3949 /* 3950 * Always refresh the neg-table to handle the case of the 3951 * sequencer setting the ENATNO bit for a MK_MESSAGE request. 3952 * We will always renegotiate in that case if this is a 3953 * packetized request. Also manage the busfree expected flag 3954 * from this common routine so that we catch changes due to 3955 * WDTR or SDTR messages. 3956 */ 3957 if ((type & AHD_TRANS_CUR) != 0) { 3958 if (!paused) 3959 ahd_pause(ahd); 3960 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 3961 if (!paused) 3962 ahd_unpause(ahd); 3963 if (ahd->msg_type != MSG_TYPE_NONE) { 3964 if ((old_ppr & MSG_EXT_PPR_IU_REQ) 3965 != (ppr_options & MSG_EXT_PPR_IU_REQ)) { 3966 #ifdef AHD_DEBUG 3967 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 3968 ahd_print_devinfo(ahd, devinfo); 3969 printk("Expecting IU Change busfree\n"); 3970 } 3971 #endif 3972 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 3973 | MSG_FLAG_IU_REQ_CHANGED; 3974 } 3975 if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { 3976 #ifdef AHD_DEBUG 3977 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 3978 printk("PPR with IU_REQ outstanding\n"); 3979 #endif 3980 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; 3981 } 3982 } 3983 } 3984 3985 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 3986 tinfo, AHD_NEG_TO_GOAL); 3987 3988 if (update_needed && active) 3989 ahd_update_pending_scbs(ahd); 3990 } 3991 3992 /* 3993 * Update the user/goal/curr tables of wide negotiation 3994 * parameters as well as, in the case of a current or active update, 3995 * any data structures on the host controller. In the case of an 3996 * active update, the specified target is currently talking to us on 3997 * the bus, so the transfer parameter update must take effect 3998 * immediately. 3999 */ 4000 void 4001 ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4002 u_int width, u_int type, int paused) 4003 { 4004 struct ahd_initiator_tinfo *tinfo; 4005 struct ahd_tmode_tstate *tstate; 4006 u_int oldwidth; 4007 int active; 4008 int update_needed; 4009 4010 active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; 4011 update_needed = 0; 4012 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 4013 devinfo->target, &tstate); 4014 4015 if ((type & AHD_TRANS_USER) != 0) 4016 tinfo->user.width = width; 4017 4018 if ((type & AHD_TRANS_GOAL) != 0) 4019 tinfo->goal.width = width; 4020 4021 oldwidth = tinfo->curr.width; 4022 if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { 4023 4024 update_needed++; 4025 4026 tinfo->curr.width = width; 4027 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4028 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 4029 if (bootverbose) { 4030 printk("%s: target %d using %dbit transfers\n", 4031 ahd_name(ahd), devinfo->target, 4032 8 * (0x01 << width)); 4033 } 4034 } 4035 4036 if ((type & AHD_TRANS_CUR) != 0) { 4037 if (!paused) 4038 ahd_pause(ahd); 4039 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); 4040 if (!paused) 4041 ahd_unpause(ahd); 4042 } 4043 4044 update_needed += ahd_update_neg_request(ahd, devinfo, tstate, 4045 tinfo, AHD_NEG_TO_GOAL); 4046 if (update_needed && active) 4047 ahd_update_pending_scbs(ahd); 4048 4049 } 4050 4051 /* 4052 * Update the current state of tagged queuing for a given target. 4053 */ 4054 static void 4055 ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, 4056 struct ahd_devinfo *devinfo, ahd_queue_alg alg) 4057 { 4058 struct scsi_device *sdev = cmd->device; 4059 4060 ahd_platform_set_tags(ahd, sdev, devinfo, alg); 4061 ahd_send_async(ahd, devinfo->channel, devinfo->target, 4062 devinfo->lun, AC_TRANSFER_NEG); 4063 } 4064 4065 static void 4066 ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4067 struct ahd_transinfo *tinfo) 4068 { 4069 ahd_mode_state saved_modes; 4070 u_int period; 4071 u_int ppr_opts; 4072 u_int con_opts; 4073 u_int offset; 4074 u_int saved_negoaddr; 4075 uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; 4076 4077 saved_modes = ahd_save_modes(ahd); 4078 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4079 4080 saved_negoaddr = ahd_inb(ahd, NEGOADDR); 4081 ahd_outb(ahd, NEGOADDR, devinfo->target); 4082 period = tinfo->period; 4083 offset = tinfo->offset; 4084 memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); 4085 ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ 4086 |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); 4087 con_opts = 0; 4088 if (period == 0) 4089 period = AHD_SYNCRATE_ASYNC; 4090 if (period == AHD_SYNCRATE_160) { 4091 4092 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { 4093 /* 4094 * When the SPI4 spec was finalized, PACE transfers 4095 * was not made a configurable option in the PPR 4096 * message. Instead it is assumed to be enabled for 4097 * any syncrate faster than 80MHz. Nevertheless, 4098 * Harpoon2A4 allows this to be configurable. 4099 * 4100 * Harpoon2A4 also assumes at most 2 data bytes per 4101 * negotiated REQ/ACK offset. Paced transfers take 4102 * 4, so we must adjust our offset. 4103 */ 4104 ppr_opts |= PPROPT_PACE; 4105 offset *= 2; 4106 4107 /* 4108 * Harpoon2A assumed that there would be a 4109 * fallback rate between 160MHz and 80MHz, 4110 * so 7 is used as the period factor rather 4111 * than 8 for 160MHz. 4112 */ 4113 period = AHD_SYNCRATE_REVA_160; 4114 } 4115 if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) 4116 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= 4117 ~AHD_PRECOMP_MASK; 4118 } else { 4119 /* 4120 * Precomp should be disabled for non-paced transfers. 4121 */ 4122 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; 4123 4124 if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 4125 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 4126 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { 4127 /* 4128 * Slow down our CRC interval to be 4129 * compatible with non-packetized 4130 * U160 devices that can't handle a 4131 * CRC at full speed. 4132 */ 4133 con_opts |= ENSLOWCRC; 4134 } 4135 4136 if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { 4137 /* 4138 * On H2A4, revert to a slower slewrate 4139 * on non-paced transfers. 4140 */ 4141 iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= 4142 ~AHD_SLEWRATE_MASK; 4143 } 4144 } 4145 4146 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); 4147 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); 4148 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); 4149 ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); 4150 4151 ahd_outb(ahd, NEGPERIOD, period); 4152 ahd_outb(ahd, NEGPPROPTS, ppr_opts); 4153 ahd_outb(ahd, NEGOFFSET, offset); 4154 4155 if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) 4156 con_opts |= WIDEXFER; 4157 4158 /* 4159 * Slow down our CRC interval to be 4160 * compatible with packetized U320 devices 4161 * that can't handle a CRC at full speed 4162 */ 4163 if (ahd->features & AHD_AIC79XXB_SLOWCRC) { 4164 con_opts |= ENSLOWCRC; 4165 } 4166 4167 /* 4168 * During packetized transfers, the target will 4169 * give us the opportunity to send command packets 4170 * without us asserting attention. 4171 */ 4172 if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) 4173 con_opts |= ENAUTOATNO; 4174 ahd_outb(ahd, NEGCONOPTS, con_opts); 4175 ahd_outb(ahd, NEGOADDR, saved_negoaddr); 4176 ahd_restore_modes(ahd, saved_modes); 4177 } 4178 4179 /* 4180 * When the transfer settings for a connection change, setup for 4181 * negotiation in pending SCBs to effect the change as quickly as 4182 * possible. We also cancel any negotiations that are scheduled 4183 * for inflight SCBs that have not been started yet. 4184 */ 4185 static void 4186 ahd_update_pending_scbs(struct ahd_softc *ahd) 4187 { 4188 struct scb *pending_scb; 4189 int pending_scb_count; 4190 int paused; 4191 u_int saved_scbptr; 4192 ahd_mode_state saved_modes; 4193 4194 /* 4195 * Traverse the pending SCB list and ensure that all of the 4196 * SCBs there have the proper settings. We can only safely 4197 * clear the negotiation required flag (setting requires the 4198 * execution queue to be modified) and this is only possible 4199 * if we are not already attempting to select out for this 4200 * SCB. For this reason, all callers only call this routine 4201 * if we are changing the negotiation settings for the currently 4202 * active transaction on the bus. 4203 */ 4204 pending_scb_count = 0; 4205 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 4206 struct ahd_devinfo devinfo; 4207 struct ahd_tmode_tstate *tstate; 4208 4209 ahd_scb_devinfo(ahd, &devinfo, pending_scb); 4210 ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, 4211 devinfo.target, &tstate); 4212 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 4213 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 4214 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 4215 pending_scb->hscb->control &= ~MK_MESSAGE; 4216 } 4217 ahd_sync_scb(ahd, pending_scb, 4218 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4219 pending_scb_count++; 4220 } 4221 4222 if (pending_scb_count == 0) 4223 return; 4224 4225 if (ahd_is_paused(ahd)) { 4226 paused = 1; 4227 } else { 4228 paused = 0; 4229 ahd_pause(ahd); 4230 } 4231 4232 /* 4233 * Force the sequencer to reinitialize the selection for 4234 * the command at the head of the execution queue if it 4235 * has already been setup. The negotiation changes may 4236 * effect whether we select-out with ATN. It is only 4237 * safe to clear ENSELO when the bus is not free and no 4238 * selection is in progres or completed. 4239 */ 4240 saved_modes = ahd_save_modes(ahd); 4241 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4242 if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 4243 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) 4244 ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); 4245 saved_scbptr = ahd_get_scbptr(ahd); 4246 /* Ensure that the hscbs down on the card match the new information */ 4247 LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { 4248 u_int scb_tag; 4249 u_int control; 4250 4251 scb_tag = SCB_GET_TAG(pending_scb); 4252 ahd_set_scbptr(ahd, scb_tag); 4253 control = ahd_inb_scbram(ahd, SCB_CONTROL); 4254 control &= ~MK_MESSAGE; 4255 control |= pending_scb->hscb->control & MK_MESSAGE; 4256 ahd_outb(ahd, SCB_CONTROL, control); 4257 } 4258 ahd_set_scbptr(ahd, saved_scbptr); 4259 ahd_restore_modes(ahd, saved_modes); 4260 4261 if (paused == 0) 4262 ahd_unpause(ahd); 4263 } 4264 4265 /**************************** Pathing Information *****************************/ 4266 static void 4267 ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4268 { 4269 ahd_mode_state saved_modes; 4270 u_int saved_scsiid; 4271 role_t role; 4272 int our_id; 4273 4274 saved_modes = ahd_save_modes(ahd); 4275 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4276 4277 if (ahd_inb(ahd, SSTAT0) & TARGET) 4278 role = ROLE_TARGET; 4279 else 4280 role = ROLE_INITIATOR; 4281 4282 if (role == ROLE_TARGET 4283 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 4284 /* We were selected, so pull our id from TARGIDIN */ 4285 our_id = ahd_inb(ahd, TARGIDIN) & OID; 4286 } else if (role == ROLE_TARGET) 4287 our_id = ahd_inb(ahd, TOWNID); 4288 else 4289 our_id = ahd_inb(ahd, IOWNID); 4290 4291 saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); 4292 ahd_compile_devinfo(devinfo, 4293 our_id, 4294 SCSIID_TARGET(ahd, saved_scsiid), 4295 ahd_inb(ahd, SAVED_LUN), 4296 SCSIID_CHANNEL(ahd, saved_scsiid), 4297 role); 4298 ahd_restore_modes(ahd, saved_modes); 4299 } 4300 4301 void 4302 ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4303 { 4304 printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', 4305 devinfo->target, devinfo->lun); 4306 } 4307 4308 static const struct ahd_phase_table_entry* 4309 ahd_lookup_phase_entry(int phase) 4310 { 4311 const struct ahd_phase_table_entry *entry; 4312 const struct ahd_phase_table_entry *last_entry; 4313 4314 /* 4315 * num_phases doesn't include the default entry which 4316 * will be returned if the phase doesn't match. 4317 */ 4318 last_entry = &ahd_phase_table[num_phases]; 4319 for (entry = ahd_phase_table; entry < last_entry; entry++) { 4320 if (phase == entry->phase) 4321 break; 4322 } 4323 return (entry); 4324 } 4325 4326 void 4327 ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, 4328 u_int lun, char channel, role_t role) 4329 { 4330 devinfo->our_scsiid = our_id; 4331 devinfo->target = target; 4332 devinfo->lun = lun; 4333 devinfo->target_offset = target; 4334 devinfo->channel = channel; 4335 devinfo->role = role; 4336 if (channel == 'B') 4337 devinfo->target_offset += 8; 4338 devinfo->target_mask = (0x01 << devinfo->target_offset); 4339 } 4340 4341 static void 4342 ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4343 struct scb *scb) 4344 { 4345 role_t role; 4346 int our_id; 4347 4348 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 4349 role = ROLE_INITIATOR; 4350 if ((scb->hscb->control & TARGET_SCB) != 0) 4351 role = ROLE_TARGET; 4352 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), 4353 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); 4354 } 4355 4356 4357 /************************ Message Phase Processing ****************************/ 4358 /* 4359 * When an initiator transaction with the MK_MESSAGE flag either reconnects 4360 * or enters the initial message out phase, we are interrupted. Fill our 4361 * outgoing message buffer with the appropriate message and beging handing 4362 * the message phase(s) manually. 4363 */ 4364 static void 4365 ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4366 struct scb *scb) 4367 { 4368 /* 4369 * To facilitate adding multiple messages together, 4370 * each routine should increment the index and len 4371 * variables instead of setting them explicitly. 4372 */ 4373 ahd->msgout_index = 0; 4374 ahd->msgout_len = 0; 4375 4376 if (ahd_currently_packetized(ahd)) 4377 ahd->msg_flags |= MSG_FLAG_PACKETIZED; 4378 4379 if (ahd->send_msg_perror 4380 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { 4381 ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; 4382 ahd->msgout_len++; 4383 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4384 #ifdef AHD_DEBUG 4385 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4386 printk("Setting up for Parity Error delivery\n"); 4387 #endif 4388 return; 4389 } else if (scb == NULL) { 4390 printk("%s: WARNING. No pending message for " 4391 "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); 4392 ahd->msgout_buf[ahd->msgout_index++] = NOP; 4393 ahd->msgout_len++; 4394 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4395 return; 4396 } 4397 4398 if ((scb->flags & SCB_DEVICE_RESET) == 0 4399 && (scb->flags & SCB_PACKETIZED) == 0 4400 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { 4401 u_int identify_msg; 4402 4403 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 4404 if ((scb->hscb->control & DISCENB) != 0) 4405 identify_msg |= MSG_IDENTIFY_DISCFLAG; 4406 ahd->msgout_buf[ahd->msgout_index++] = identify_msg; 4407 ahd->msgout_len++; 4408 4409 if ((scb->hscb->control & TAG_ENB) != 0) { 4410 ahd->msgout_buf[ahd->msgout_index++] = 4411 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 4412 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); 4413 ahd->msgout_len += 2; 4414 } 4415 } 4416 4417 if (scb->flags & SCB_DEVICE_RESET) { 4418 ahd->msgout_buf[ahd->msgout_index++] = TARGET_RESET; 4419 ahd->msgout_len++; 4420 ahd_print_path(ahd, scb); 4421 printk("Bus Device Reset Message Sent\n"); 4422 /* 4423 * Clear our selection hardware in advance of 4424 * the busfree. We may have an entry in the waiting 4425 * Q for this target, and we don't want to go about 4426 * selecting while we handle the busfree and blow it 4427 * away. 4428 */ 4429 ahd_outb(ahd, SCSISEQ0, 0); 4430 } else if ((scb->flags & SCB_ABORT) != 0) { 4431 4432 if ((scb->hscb->control & TAG_ENB) != 0) { 4433 ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK; 4434 } else { 4435 ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK_SET; 4436 } 4437 ahd->msgout_len++; 4438 ahd_print_path(ahd, scb); 4439 printk("Abort%s Message Sent\n", 4440 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 4441 /* 4442 * Clear our selection hardware in advance of 4443 * the busfree. We may have an entry in the waiting 4444 * Q for this target, and we don't want to go about 4445 * selecting while we handle the busfree and blow it 4446 * away. 4447 */ 4448 ahd_outb(ahd, SCSISEQ0, 0); 4449 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 4450 ahd_build_transfer_msg(ahd, devinfo); 4451 /* 4452 * Clear our selection hardware in advance of potential 4453 * PPR IU status change busfree. We may have an entry in 4454 * the waiting Q for this target, and we don't want to go 4455 * about selecting while we handle the busfree and blow 4456 * it away. 4457 */ 4458 ahd_outb(ahd, SCSISEQ0, 0); 4459 } else { 4460 printk("ahd_intr: AWAITING_MSG for an SCB that " 4461 "does not have a waiting message\n"); 4462 printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 4463 devinfo->target_mask); 4464 panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " 4465 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, 4466 ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), 4467 scb->flags); 4468 } 4469 4470 /* 4471 * Clear the MK_MESSAGE flag from the SCB so we aren't 4472 * asked to send this message again. 4473 */ 4474 ahd_outb(ahd, SCB_CONTROL, 4475 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); 4476 scb->hscb->control &= ~MK_MESSAGE; 4477 ahd->msgout_index = 0; 4478 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4479 } 4480 4481 /* 4482 * Build an appropriate transfer negotiation message for the 4483 * currently active target. 4484 */ 4485 static void 4486 ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 4487 { 4488 /* 4489 * We need to initiate transfer negotiations. 4490 * If our current and goal settings are identical, 4491 * we want to renegotiate due to a check condition. 4492 */ 4493 struct ahd_initiator_tinfo *tinfo; 4494 struct ahd_tmode_tstate *tstate; 4495 int dowide; 4496 int dosync; 4497 int doppr; 4498 u_int period; 4499 u_int ppr_options; 4500 u_int offset; 4501 4502 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 4503 devinfo->target, &tstate); 4504 /* 4505 * Filter our period based on the current connection. 4506 * If we can't perform DT transfers on this segment (not in LVD 4507 * mode for instance), then our decision to issue a PPR message 4508 * may change. 4509 */ 4510 period = tinfo->goal.period; 4511 offset = tinfo->goal.offset; 4512 ppr_options = tinfo->goal.ppr_options; 4513 /* Target initiated PPR is not allowed in the SCSI spec */ 4514 if (devinfo->role == ROLE_TARGET) 4515 ppr_options = 0; 4516 ahd_devlimited_syncrate(ahd, tinfo, &period, 4517 &ppr_options, devinfo->role); 4518 dowide = tinfo->curr.width != tinfo->goal.width; 4519 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; 4520 /* 4521 * Only use PPR if we have options that need it, even if the device 4522 * claims to support it. There might be an expander in the way 4523 * that doesn't. 4524 */ 4525 doppr = ppr_options != 0; 4526 4527 if (!dowide && !dosync && !doppr) { 4528 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 4529 dosync = tinfo->goal.offset != 0; 4530 } 4531 4532 if (!dowide && !dosync && !doppr) { 4533 /* 4534 * Force async with a WDTR message if we have a wide bus, 4535 * or just issue an SDTR with a 0 offset. 4536 */ 4537 if ((ahd->features & AHD_WIDE) != 0) 4538 dowide = 1; 4539 else 4540 dosync = 1; 4541 4542 if (bootverbose) { 4543 ahd_print_devinfo(ahd, devinfo); 4544 printk("Ensuring async\n"); 4545 } 4546 } 4547 /* Target initiated PPR is not allowed in the SCSI spec */ 4548 if (devinfo->role == ROLE_TARGET) 4549 doppr = 0; 4550 4551 /* 4552 * Both the PPR message and SDTR message require the 4553 * goal syncrate to be limited to what the target device 4554 * is capable of handling (based on whether an LVD->SE 4555 * expander is on the bus), so combine these two cases. 4556 * Regardless, guarantee that if we are using WDTR and SDTR 4557 * messages that WDTR comes first. 4558 */ 4559 if (doppr || (dosync && !dowide)) { 4560 4561 offset = tinfo->goal.offset; 4562 ahd_validate_offset(ahd, tinfo, period, &offset, 4563 doppr ? tinfo->goal.width 4564 : tinfo->curr.width, 4565 devinfo->role); 4566 if (doppr) { 4567 ahd_construct_ppr(ahd, devinfo, period, offset, 4568 tinfo->goal.width, ppr_options); 4569 } else { 4570 ahd_construct_sdtr(ahd, devinfo, period, offset); 4571 } 4572 } else { 4573 ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); 4574 } 4575 } 4576 4577 /* 4578 * Build a synchronous negotiation message in our message 4579 * buffer based on the input parameters. 4580 */ 4581 static void 4582 ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4583 u_int period, u_int offset) 4584 { 4585 if (offset == 0) 4586 period = AHD_ASYNC_XFER_PERIOD; 4587 ahd->msgout_index += spi_populate_sync_msg( 4588 ahd->msgout_buf + ahd->msgout_index, period, offset); 4589 ahd->msgout_len += 5; 4590 if (bootverbose) { 4591 printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 4592 ahd_name(ahd), devinfo->channel, devinfo->target, 4593 devinfo->lun, period, offset); 4594 } 4595 } 4596 4597 /* 4598 * Build a wide negotiateion message in our message 4599 * buffer based on the input parameters. 4600 */ 4601 static void 4602 ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4603 u_int bus_width) 4604 { 4605 ahd->msgout_index += spi_populate_width_msg( 4606 ahd->msgout_buf + ahd->msgout_index, bus_width); 4607 ahd->msgout_len += 4; 4608 if (bootverbose) { 4609 printk("(%s:%c:%d:%d): Sending WDTR %x\n", 4610 ahd_name(ahd), devinfo->channel, devinfo->target, 4611 devinfo->lun, bus_width); 4612 } 4613 } 4614 4615 /* 4616 * Build a parallel protocol request message in our message 4617 * buffer based on the input parameters. 4618 */ 4619 static void 4620 ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 4621 u_int period, u_int offset, u_int bus_width, 4622 u_int ppr_options) 4623 { 4624 /* 4625 * Always request precompensation from 4626 * the other target if we are running 4627 * at paced syncrates. 4628 */ 4629 if (period <= AHD_SYNCRATE_PACED) 4630 ppr_options |= MSG_EXT_PPR_PCOMP_EN; 4631 if (offset == 0) 4632 period = AHD_ASYNC_XFER_PERIOD; 4633 ahd->msgout_index += spi_populate_ppr_msg( 4634 ahd->msgout_buf + ahd->msgout_index, period, offset, 4635 bus_width, ppr_options); 4636 ahd->msgout_len += 8; 4637 if (bootverbose) { 4638 printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 4639 "offset %x, ppr_options %x\n", ahd_name(ahd), 4640 devinfo->channel, devinfo->target, devinfo->lun, 4641 bus_width, period, offset, ppr_options); 4642 } 4643 } 4644 4645 /* 4646 * Clear any active message state. 4647 */ 4648 static void 4649 ahd_clear_msg_state(struct ahd_softc *ahd) 4650 { 4651 ahd_mode_state saved_modes; 4652 4653 saved_modes = ahd_save_modes(ahd); 4654 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 4655 ahd->send_msg_perror = 0; 4656 ahd->msg_flags = MSG_FLAG_NONE; 4657 ahd->msgout_len = 0; 4658 ahd->msgin_index = 0; 4659 ahd->msg_type = MSG_TYPE_NONE; 4660 if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { 4661 /* 4662 * The target didn't care to respond to our 4663 * message request, so clear ATN. 4664 */ 4665 ahd_outb(ahd, CLRSINT1, CLRATNO); 4666 } 4667 ahd_outb(ahd, MSG_OUT, NOP); 4668 ahd_outb(ahd, SEQ_FLAGS2, 4669 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 4670 ahd_restore_modes(ahd, saved_modes); 4671 } 4672 4673 /* 4674 * Manual message loop handler. 4675 */ 4676 static void 4677 ahd_handle_message_phase(struct ahd_softc *ahd) 4678 { 4679 struct ahd_devinfo devinfo; 4680 u_int bus_phase; 4681 int end_session; 4682 4683 ahd_fetch_devinfo(ahd, &devinfo); 4684 end_session = FALSE; 4685 bus_phase = ahd_inb(ahd, LASTPHASE); 4686 4687 if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { 4688 printk("LQIRETRY for LQIPHASE_OUTPKT\n"); 4689 ahd_outb(ahd, LQCTL2, LQIRETRY); 4690 } 4691 reswitch: 4692 switch (ahd->msg_type) { 4693 case MSG_TYPE_INITIATOR_MSGOUT: 4694 { 4695 int lastbyte; 4696 int phasemis; 4697 int msgdone; 4698 4699 if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) 4700 panic("HOST_MSG_LOOP interrupt with no active message"); 4701 4702 #ifdef AHD_DEBUG 4703 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4704 ahd_print_devinfo(ahd, &devinfo); 4705 printk("INITIATOR_MSG_OUT"); 4706 } 4707 #endif 4708 phasemis = bus_phase != P_MESGOUT; 4709 if (phasemis) { 4710 #ifdef AHD_DEBUG 4711 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4712 printk(" PHASEMIS %s\n", 4713 ahd_lookup_phase_entry(bus_phase) 4714 ->phasemsg); 4715 } 4716 #endif 4717 if (bus_phase == P_MESGIN) { 4718 /* 4719 * Change gears and see if 4720 * this messages is of interest to 4721 * us or should be passed back to 4722 * the sequencer. 4723 */ 4724 ahd_outb(ahd, CLRSINT1, CLRATNO); 4725 ahd->send_msg_perror = 0; 4726 ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; 4727 ahd->msgin_index = 0; 4728 goto reswitch; 4729 } 4730 end_session = TRUE; 4731 break; 4732 } 4733 4734 if (ahd->send_msg_perror) { 4735 ahd_outb(ahd, CLRSINT1, CLRATNO); 4736 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4737 #ifdef AHD_DEBUG 4738 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4739 printk(" byte 0x%x\n", ahd->send_msg_perror); 4740 #endif 4741 /* 4742 * If we are notifying the target of a CRC error 4743 * during packetized operations, the target is 4744 * within its rights to acknowledge our message 4745 * with a busfree. 4746 */ 4747 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 4748 && ahd->send_msg_perror == INITIATOR_ERROR) 4749 ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; 4750 4751 ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); 4752 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 4753 break; 4754 } 4755 4756 msgdone = ahd->msgout_index == ahd->msgout_len; 4757 if (msgdone) { 4758 /* 4759 * The target has requested a retry. 4760 * Re-assert ATN, reset our message index to 4761 * 0, and try again. 4762 */ 4763 ahd->msgout_index = 0; 4764 ahd_assert_atn(ahd); 4765 } 4766 4767 lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); 4768 if (lastbyte) { 4769 /* Last byte is signified by dropping ATN */ 4770 ahd_outb(ahd, CLRSINT1, CLRATNO); 4771 } 4772 4773 /* 4774 * Clear our interrupt status and present 4775 * the next byte on the bus. 4776 */ 4777 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4778 #ifdef AHD_DEBUG 4779 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4780 printk(" byte 0x%x\n", 4781 ahd->msgout_buf[ahd->msgout_index]); 4782 #endif 4783 ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); 4784 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); 4785 break; 4786 } 4787 case MSG_TYPE_INITIATOR_MSGIN: 4788 { 4789 int phasemis; 4790 int message_done; 4791 4792 #ifdef AHD_DEBUG 4793 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4794 ahd_print_devinfo(ahd, &devinfo); 4795 printk("INITIATOR_MSG_IN"); 4796 } 4797 #endif 4798 phasemis = bus_phase != P_MESGIN; 4799 if (phasemis) { 4800 #ifdef AHD_DEBUG 4801 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4802 printk(" PHASEMIS %s\n", 4803 ahd_lookup_phase_entry(bus_phase) 4804 ->phasemsg); 4805 } 4806 #endif 4807 ahd->msgin_index = 0; 4808 if (bus_phase == P_MESGOUT 4809 && (ahd->send_msg_perror != 0 4810 || (ahd->msgout_len != 0 4811 && ahd->msgout_index == 0))) { 4812 ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 4813 goto reswitch; 4814 } 4815 end_session = TRUE; 4816 break; 4817 } 4818 4819 /* Pull the byte in without acking it */ 4820 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); 4821 #ifdef AHD_DEBUG 4822 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 4823 printk(" byte 0x%x\n", 4824 ahd->msgin_buf[ahd->msgin_index]); 4825 #endif 4826 4827 message_done = ahd_parse_msg(ahd, &devinfo); 4828 4829 if (message_done) { 4830 /* 4831 * Clear our incoming message buffer in case there 4832 * is another message following this one. 4833 */ 4834 ahd->msgin_index = 0; 4835 4836 /* 4837 * If this message illicited a response, 4838 * assert ATN so the target takes us to the 4839 * message out phase. 4840 */ 4841 if (ahd->msgout_len != 0) { 4842 #ifdef AHD_DEBUG 4843 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { 4844 ahd_print_devinfo(ahd, &devinfo); 4845 printk("Asserting ATN for response\n"); 4846 } 4847 #endif 4848 ahd_assert_atn(ahd); 4849 } 4850 } else 4851 ahd->msgin_index++; 4852 4853 if (message_done == MSGLOOP_TERMINATED) { 4854 end_session = TRUE; 4855 } else { 4856 /* Ack the byte */ 4857 ahd_outb(ahd, CLRSINT1, CLRREQINIT); 4858 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); 4859 } 4860 break; 4861 } 4862 case MSG_TYPE_TARGET_MSGIN: 4863 { 4864 int msgdone; 4865 int msgout_request; 4866 4867 /* 4868 * By default, the message loop will continue. 4869 */ 4870 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 4871 4872 if (ahd->msgout_len == 0) 4873 panic("Target MSGIN with no active message"); 4874 4875 /* 4876 * If we interrupted a mesgout session, the initiator 4877 * will not know this until our first REQ. So, we 4878 * only honor mesgout requests after we've sent our 4879 * first byte. 4880 */ 4881 if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 4882 && ahd->msgout_index > 0) 4883 msgout_request = TRUE; 4884 else 4885 msgout_request = FALSE; 4886 4887 if (msgout_request) { 4888 4889 /* 4890 * Change gears and see if 4891 * this messages is of interest to 4892 * us or should be passed back to 4893 * the sequencer. 4894 */ 4895 ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; 4896 ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); 4897 ahd->msgin_index = 0; 4898 /* Dummy read to REQ for first byte */ 4899 ahd_inb(ahd, SCSIDAT); 4900 ahd_outb(ahd, SXFRCTL0, 4901 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4902 break; 4903 } 4904 4905 msgdone = ahd->msgout_index == ahd->msgout_len; 4906 if (msgdone) { 4907 ahd_outb(ahd, SXFRCTL0, 4908 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 4909 end_session = TRUE; 4910 break; 4911 } 4912 4913 /* 4914 * Present the next byte on the bus. 4915 */ 4916 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4917 ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); 4918 break; 4919 } 4920 case MSG_TYPE_TARGET_MSGOUT: 4921 { 4922 int lastbyte; 4923 int msgdone; 4924 4925 /* 4926 * By default, the message loop will continue. 4927 */ 4928 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 4929 4930 /* 4931 * The initiator signals that this is 4932 * the last byte by dropping ATN. 4933 */ 4934 lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; 4935 4936 /* 4937 * Read the latched byte, but turn off SPIOEN first 4938 * so that we don't inadvertently cause a REQ for the 4939 * next byte. 4940 */ 4941 ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); 4942 ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); 4943 msgdone = ahd_parse_msg(ahd, &devinfo); 4944 if (msgdone == MSGLOOP_TERMINATED) { 4945 /* 4946 * The message is *really* done in that it caused 4947 * us to go to bus free. The sequencer has already 4948 * been reset at this point, so pull the ejection 4949 * handle. 4950 */ 4951 return; 4952 } 4953 4954 ahd->msgin_index++; 4955 4956 /* 4957 * XXX Read spec about initiator dropping ATN too soon 4958 * and use msgdone to detect it. 4959 */ 4960 if (msgdone == MSGLOOP_MSGCOMPLETE) { 4961 ahd->msgin_index = 0; 4962 4963 /* 4964 * If this message illicited a response, transition 4965 * to the Message in phase and send it. 4966 */ 4967 if (ahd->msgout_len != 0) { 4968 ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); 4969 ahd_outb(ahd, SXFRCTL0, 4970 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4971 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 4972 ahd->msgin_index = 0; 4973 break; 4974 } 4975 } 4976 4977 if (lastbyte) 4978 end_session = TRUE; 4979 else { 4980 /* Ask for the next byte. */ 4981 ahd_outb(ahd, SXFRCTL0, 4982 ahd_inb(ahd, SXFRCTL0) | SPIOEN); 4983 } 4984 4985 break; 4986 } 4987 default: 4988 panic("Unknown REQINIT message type"); 4989 } 4990 4991 if (end_session) { 4992 if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { 4993 printk("%s: Returning to Idle Loop\n", 4994 ahd_name(ahd)); 4995 ahd_clear_msg_state(ahd); 4996 4997 /* 4998 * Perform the equivalent of a clear_target_state. 4999 */ 5000 ahd_outb(ahd, LASTPHASE, P_BUSFREE); 5001 ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); 5002 ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); 5003 } else { 5004 ahd_clear_msg_state(ahd); 5005 ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); 5006 } 5007 } 5008 } 5009 5010 /* 5011 * See if we sent a particular extended message to the target. 5012 * If "full" is true, return true only if the target saw the full 5013 * message. If "full" is false, return true if the target saw at 5014 * least the first byte of the message. 5015 */ 5016 static int 5017 ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) 5018 { 5019 int found; 5020 u_int index; 5021 5022 found = FALSE; 5023 index = 0; 5024 5025 while (index < ahd->msgout_len) { 5026 if (ahd->msgout_buf[index] == EXTENDED_MESSAGE) { 5027 u_int end_index; 5028 5029 end_index = index + 1 + ahd->msgout_buf[index + 1]; 5030 if (ahd->msgout_buf[index+2] == msgval 5031 && type == AHDMSG_EXT) { 5032 5033 if (full) { 5034 if (ahd->msgout_index > end_index) 5035 found = TRUE; 5036 } else if (ahd->msgout_index > index) 5037 found = TRUE; 5038 } 5039 index = end_index; 5040 } else if (ahd->msgout_buf[index] >= SIMPLE_QUEUE_TAG 5041 && ahd->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) { 5042 5043 /* Skip tag type and tag id or residue param*/ 5044 index += 2; 5045 } else { 5046 /* Single byte message */ 5047 if (type == AHDMSG_1B 5048 && ahd->msgout_index > index 5049 && (ahd->msgout_buf[index] == msgval 5050 || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 5051 && msgval == MSG_IDENTIFYFLAG))) 5052 found = TRUE; 5053 index++; 5054 } 5055 5056 if (found) 5057 break; 5058 } 5059 return (found); 5060 } 5061 5062 /* 5063 * Wait for a complete incoming message, parse it, and respond accordingly. 5064 */ 5065 static int 5066 ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5067 { 5068 struct ahd_initiator_tinfo *tinfo; 5069 struct ahd_tmode_tstate *tstate; 5070 int reject; 5071 int done; 5072 int response; 5073 5074 done = MSGLOOP_IN_PROG; 5075 response = FALSE; 5076 reject = FALSE; 5077 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, 5078 devinfo->target, &tstate); 5079 5080 /* 5081 * Parse as much of the message as is available, 5082 * rejecting it if we don't support it. When 5083 * the entire message is available and has been 5084 * handled, return MSGLOOP_MSGCOMPLETE, indicating 5085 * that we have parsed an entire message. 5086 * 5087 * In the case of extended messages, we accept the length 5088 * byte outright and perform more checking once we know the 5089 * extended message type. 5090 */ 5091 switch (ahd->msgin_buf[0]) { 5092 case DISCONNECT: 5093 case SAVE_POINTERS: 5094 case COMMAND_COMPLETE: 5095 case RESTORE_POINTERS: 5096 case IGNORE_WIDE_RESIDUE: 5097 /* 5098 * End our message loop as these are messages 5099 * the sequencer handles on its own. 5100 */ 5101 done = MSGLOOP_TERMINATED; 5102 break; 5103 case MESSAGE_REJECT: 5104 response = ahd_handle_msg_reject(ahd, devinfo); 5105 fallthrough; 5106 case NOP: 5107 done = MSGLOOP_MSGCOMPLETE; 5108 break; 5109 case EXTENDED_MESSAGE: 5110 { 5111 /* Wait for enough of the message to begin validation */ 5112 if (ahd->msgin_index < 2) 5113 break; 5114 switch (ahd->msgin_buf[2]) { 5115 case EXTENDED_SDTR: 5116 { 5117 u_int period; 5118 u_int ppr_options; 5119 u_int offset; 5120 u_int saved_offset; 5121 5122 if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 5123 reject = TRUE; 5124 break; 5125 } 5126 5127 /* 5128 * Wait until we have both args before validating 5129 * and acting on this message. 5130 * 5131 * Add one to MSG_EXT_SDTR_LEN to account for 5132 * the extended message preamble. 5133 */ 5134 if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 5135 break; 5136 5137 period = ahd->msgin_buf[3]; 5138 ppr_options = 0; 5139 saved_offset = offset = ahd->msgin_buf[4]; 5140 ahd_devlimited_syncrate(ahd, tinfo, &period, 5141 &ppr_options, devinfo->role); 5142 ahd_validate_offset(ahd, tinfo, period, &offset, 5143 tinfo->curr.width, devinfo->role); 5144 if (bootverbose) { 5145 printk("(%s:%c:%d:%d): Received " 5146 "SDTR period %x, offset %x\n\t" 5147 "Filtered to period %x, offset %x\n", 5148 ahd_name(ahd), devinfo->channel, 5149 devinfo->target, devinfo->lun, 5150 ahd->msgin_buf[3], saved_offset, 5151 period, offset); 5152 } 5153 ahd_set_syncrate(ahd, devinfo, period, 5154 offset, ppr_options, 5155 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5156 /*paused*/TRUE); 5157 5158 /* 5159 * See if we initiated Sync Negotiation 5160 * and didn't have to fall down to async 5161 * transfers. 5162 */ 5163 if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, TRUE)) { 5164 /* We started it */ 5165 if (saved_offset != offset) { 5166 /* Went too low - force async */ 5167 reject = TRUE; 5168 } 5169 } else { 5170 /* 5171 * Send our own SDTR in reply 5172 */ 5173 if (bootverbose 5174 && devinfo->role == ROLE_INITIATOR) { 5175 printk("(%s:%c:%d:%d): Target " 5176 "Initiated SDTR\n", 5177 ahd_name(ahd), devinfo->channel, 5178 devinfo->target, devinfo->lun); 5179 } 5180 ahd->msgout_index = 0; 5181 ahd->msgout_len = 0; 5182 ahd_construct_sdtr(ahd, devinfo, 5183 period, offset); 5184 ahd->msgout_index = 0; 5185 response = TRUE; 5186 } 5187 done = MSGLOOP_MSGCOMPLETE; 5188 break; 5189 } 5190 case EXTENDED_WDTR: 5191 { 5192 u_int bus_width; 5193 u_int saved_width; 5194 u_int sending_reply; 5195 5196 sending_reply = FALSE; 5197 if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 5198 reject = TRUE; 5199 break; 5200 } 5201 5202 /* 5203 * Wait until we have our arg before validating 5204 * and acting on this message. 5205 * 5206 * Add one to MSG_EXT_WDTR_LEN to account for 5207 * the extended message preamble. 5208 */ 5209 if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 5210 break; 5211 5212 bus_width = ahd->msgin_buf[3]; 5213 saved_width = bus_width; 5214 ahd_validate_width(ahd, tinfo, &bus_width, 5215 devinfo->role); 5216 if (bootverbose) { 5217 printk("(%s:%c:%d:%d): Received WDTR " 5218 "%x filtered to %x\n", 5219 ahd_name(ahd), devinfo->channel, 5220 devinfo->target, devinfo->lun, 5221 saved_width, bus_width); 5222 } 5223 5224 if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, TRUE)) { 5225 /* 5226 * Don't send a WDTR back to the 5227 * target, since we asked first. 5228 * If the width went higher than our 5229 * request, reject it. 5230 */ 5231 if (saved_width > bus_width) { 5232 reject = TRUE; 5233 printk("(%s:%c:%d:%d): requested %dBit " 5234 "transfers. Rejecting...\n", 5235 ahd_name(ahd), devinfo->channel, 5236 devinfo->target, devinfo->lun, 5237 8 * (0x01 << bus_width)); 5238 bus_width = 0; 5239 } 5240 } else { 5241 /* 5242 * Send our own WDTR in reply 5243 */ 5244 if (bootverbose 5245 && devinfo->role == ROLE_INITIATOR) { 5246 printk("(%s:%c:%d:%d): Target " 5247 "Initiated WDTR\n", 5248 ahd_name(ahd), devinfo->channel, 5249 devinfo->target, devinfo->lun); 5250 } 5251 ahd->msgout_index = 0; 5252 ahd->msgout_len = 0; 5253 ahd_construct_wdtr(ahd, devinfo, bus_width); 5254 ahd->msgout_index = 0; 5255 response = TRUE; 5256 sending_reply = TRUE; 5257 } 5258 /* 5259 * After a wide message, we are async, but 5260 * some devices don't seem to honor this portion 5261 * of the spec. Force a renegotiation of the 5262 * sync component of our transfer agreement even 5263 * if our goal is async. By updating our width 5264 * after forcing the negotiation, we avoid 5265 * renegotiating for width. 5266 */ 5267 ahd_update_neg_request(ahd, devinfo, tstate, 5268 tinfo, AHD_NEG_ALWAYS); 5269 ahd_set_width(ahd, devinfo, bus_width, 5270 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5271 /*paused*/TRUE); 5272 if (sending_reply == FALSE && reject == FALSE) { 5273 5274 /* 5275 * We will always have an SDTR to send. 5276 */ 5277 ahd->msgout_index = 0; 5278 ahd->msgout_len = 0; 5279 ahd_build_transfer_msg(ahd, devinfo); 5280 ahd->msgout_index = 0; 5281 response = TRUE; 5282 } 5283 done = MSGLOOP_MSGCOMPLETE; 5284 break; 5285 } 5286 case EXTENDED_PPR: 5287 { 5288 u_int period; 5289 u_int offset; 5290 u_int bus_width; 5291 u_int ppr_options; 5292 u_int saved_width; 5293 u_int saved_offset; 5294 u_int saved_ppr_options; 5295 5296 if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { 5297 reject = TRUE; 5298 break; 5299 } 5300 5301 /* 5302 * Wait until we have all args before validating 5303 * and acting on this message. 5304 * 5305 * Add one to MSG_EXT_PPR_LEN to account for 5306 * the extended message preamble. 5307 */ 5308 if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) 5309 break; 5310 5311 period = ahd->msgin_buf[3]; 5312 offset = ahd->msgin_buf[5]; 5313 bus_width = ahd->msgin_buf[6]; 5314 saved_width = bus_width; 5315 ppr_options = ahd->msgin_buf[7]; 5316 /* 5317 * According to the spec, a DT only 5318 * period factor with no DT option 5319 * set implies async. 5320 */ 5321 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 5322 && period <= 9) 5323 offset = 0; 5324 saved_ppr_options = ppr_options; 5325 saved_offset = offset; 5326 5327 /* 5328 * Transfer options are only available if we 5329 * are negotiating wide. 5330 */ 5331 if (bus_width == 0) 5332 ppr_options &= MSG_EXT_PPR_QAS_REQ; 5333 5334 ahd_validate_width(ahd, tinfo, &bus_width, 5335 devinfo->role); 5336 ahd_devlimited_syncrate(ahd, tinfo, &period, 5337 &ppr_options, devinfo->role); 5338 ahd_validate_offset(ahd, tinfo, period, &offset, 5339 bus_width, devinfo->role); 5340 5341 if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, TRUE)) { 5342 /* 5343 * If we are unable to do any of the 5344 * requested options (we went too low), 5345 * then we'll have to reject the message. 5346 */ 5347 if (saved_width > bus_width 5348 || saved_offset != offset 5349 || saved_ppr_options != ppr_options) { 5350 reject = TRUE; 5351 period = 0; 5352 offset = 0; 5353 bus_width = 0; 5354 ppr_options = 0; 5355 } 5356 } else { 5357 if (devinfo->role != ROLE_TARGET) 5358 printk("(%s:%c:%d:%d): Target " 5359 "Initiated PPR\n", 5360 ahd_name(ahd), devinfo->channel, 5361 devinfo->target, devinfo->lun); 5362 else 5363 printk("(%s:%c:%d:%d): Initiator " 5364 "Initiated PPR\n", 5365 ahd_name(ahd), devinfo->channel, 5366 devinfo->target, devinfo->lun); 5367 ahd->msgout_index = 0; 5368 ahd->msgout_len = 0; 5369 ahd_construct_ppr(ahd, devinfo, period, offset, 5370 bus_width, ppr_options); 5371 ahd->msgout_index = 0; 5372 response = TRUE; 5373 } 5374 if (bootverbose) { 5375 printk("(%s:%c:%d:%d): Received PPR width %x, " 5376 "period %x, offset %x,options %x\n" 5377 "\tFiltered to width %x, period %x, " 5378 "offset %x, options %x\n", 5379 ahd_name(ahd), devinfo->channel, 5380 devinfo->target, devinfo->lun, 5381 saved_width, ahd->msgin_buf[3], 5382 saved_offset, saved_ppr_options, 5383 bus_width, period, offset, ppr_options); 5384 } 5385 ahd_set_width(ahd, devinfo, bus_width, 5386 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5387 /*paused*/TRUE); 5388 ahd_set_syncrate(ahd, devinfo, period, 5389 offset, ppr_options, 5390 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5391 /*paused*/TRUE); 5392 5393 done = MSGLOOP_MSGCOMPLETE; 5394 break; 5395 } 5396 default: 5397 /* Unknown extended message. Reject it. */ 5398 reject = TRUE; 5399 break; 5400 } 5401 break; 5402 } 5403 #ifdef AHD_TARGET_MODE 5404 case TARGET_RESET: 5405 ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, 5406 CAM_BDR_SENT, 5407 "Bus Device Reset Received", 5408 /*verbose_level*/0); 5409 ahd_restart(ahd); 5410 done = MSGLOOP_TERMINATED; 5411 break; 5412 case ABORT_TASK: 5413 case ABORT_TASK_SET: 5414 case CLEAR_TASK_SET: 5415 { 5416 int tag; 5417 5418 /* Target mode messages */ 5419 if (devinfo->role != ROLE_TARGET) { 5420 reject = TRUE; 5421 break; 5422 } 5423 tag = SCB_LIST_NULL; 5424 if (ahd->msgin_buf[0] == ABORT_TASK) 5425 tag = ahd_inb(ahd, INITIATOR_TAG); 5426 ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 5427 devinfo->lun, tag, ROLE_TARGET, 5428 CAM_REQ_ABORTED); 5429 5430 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 5431 if (tstate != NULL) { 5432 struct ahd_tmode_lstate* lstate; 5433 5434 lstate = tstate->enabled_luns[devinfo->lun]; 5435 if (lstate != NULL) { 5436 ahd_queue_lstate_event(ahd, lstate, 5437 devinfo->our_scsiid, 5438 ahd->msgin_buf[0], 5439 /*arg*/tag); 5440 ahd_send_lstate_events(ahd, lstate); 5441 } 5442 } 5443 ahd_restart(ahd); 5444 done = MSGLOOP_TERMINATED; 5445 break; 5446 } 5447 #endif 5448 case QAS_REQUEST: 5449 #ifdef AHD_DEBUG 5450 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) 5451 printk("%s: QAS request. SCSISIGI == 0x%x\n", 5452 ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); 5453 #endif 5454 ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; 5455 fallthrough; 5456 case TERMINATE_IO_PROC: 5457 default: 5458 reject = TRUE; 5459 break; 5460 } 5461 5462 if (reject) { 5463 /* 5464 * Setup to reject the message. 5465 */ 5466 ahd->msgout_index = 0; 5467 ahd->msgout_len = 1; 5468 ahd->msgout_buf[0] = MESSAGE_REJECT; 5469 done = MSGLOOP_MSGCOMPLETE; 5470 response = TRUE; 5471 } 5472 5473 if (done != MSGLOOP_IN_PROG && !response) 5474 /* Clear the outgoing message buffer */ 5475 ahd->msgout_len = 0; 5476 5477 return (done); 5478 } 5479 5480 /* 5481 * Process a message reject message. 5482 */ 5483 static int 5484 ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5485 { 5486 /* 5487 * What we care about here is if we had an 5488 * outstanding SDTR or WDTR message for this 5489 * target. If we did, this is a signal that 5490 * the target is refusing negotiation. 5491 */ 5492 struct scb *scb; 5493 struct ahd_initiator_tinfo *tinfo; 5494 struct ahd_tmode_tstate *tstate; 5495 u_int scb_index; 5496 u_int last_msg; 5497 int response = 0; 5498 5499 scb_index = ahd_get_scbptr(ahd); 5500 scb = ahd_lookup_scb(ahd, scb_index); 5501 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, 5502 devinfo->our_scsiid, 5503 devinfo->target, &tstate); 5504 /* Might be necessary */ 5505 last_msg = ahd_inb(ahd, LAST_MSG); 5506 5507 if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) { 5508 if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/TRUE) 5509 && tinfo->goal.period <= AHD_SYNCRATE_PACED) { 5510 /* 5511 * Target may not like our SPI-4 PPR Options. 5512 * Attempt to negotiate 80MHz which will turn 5513 * off these options. 5514 */ 5515 if (bootverbose) { 5516 printk("(%s:%c:%d:%d): PPR Rejected. " 5517 "Trying simple U160 PPR\n", 5518 ahd_name(ahd), devinfo->channel, 5519 devinfo->target, devinfo->lun); 5520 } 5521 tinfo->goal.period = AHD_SYNCRATE_DT; 5522 tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ 5523 | MSG_EXT_PPR_QAS_REQ 5524 | MSG_EXT_PPR_DT_REQ; 5525 } else { 5526 /* 5527 * Target does not support the PPR message. 5528 * Attempt to negotiate SPI-2 style. 5529 */ 5530 if (bootverbose) { 5531 printk("(%s:%c:%d:%d): PPR Rejected. " 5532 "Trying WDTR/SDTR\n", 5533 ahd_name(ahd), devinfo->channel, 5534 devinfo->target, devinfo->lun); 5535 } 5536 tinfo->goal.ppr_options = 0; 5537 tinfo->curr.transport_version = 2; 5538 tinfo->goal.transport_version = 2; 5539 } 5540 ahd->msgout_index = 0; 5541 ahd->msgout_len = 0; 5542 ahd_build_transfer_msg(ahd, devinfo); 5543 ahd->msgout_index = 0; 5544 response = 1; 5545 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) { 5546 5547 /* note 8bit xfers */ 5548 printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 5549 "8bit transfers\n", ahd_name(ahd), 5550 devinfo->channel, devinfo->target, devinfo->lun); 5551 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5552 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5553 /*paused*/TRUE); 5554 /* 5555 * No need to clear the sync rate. If the target 5556 * did not accept the command, our syncrate is 5557 * unaffected. If the target started the negotiation, 5558 * but rejected our response, we already cleared the 5559 * sync rate before sending our WDTR. 5560 */ 5561 if (tinfo->goal.offset != tinfo->curr.offset) { 5562 5563 /* Start the sync negotiation */ 5564 ahd->msgout_index = 0; 5565 ahd->msgout_len = 0; 5566 ahd_build_transfer_msg(ahd, devinfo); 5567 ahd->msgout_index = 0; 5568 response = 1; 5569 } 5570 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) { 5571 /* note asynch xfers and clear flag */ 5572 ahd_set_syncrate(ahd, devinfo, /*period*/0, 5573 /*offset*/0, /*ppr_options*/0, 5574 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, 5575 /*paused*/TRUE); 5576 printk("(%s:%c:%d:%d): refuses synchronous negotiation. " 5577 "Using asynchronous transfers\n", 5578 ahd_name(ahd), devinfo->channel, 5579 devinfo->target, devinfo->lun); 5580 } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) { 5581 int tag_type; 5582 int mask; 5583 5584 tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG); 5585 5586 if (tag_type == SIMPLE_QUEUE_TAG) { 5587 printk("(%s:%c:%d:%d): refuses tagged commands. " 5588 "Performing non-tagged I/O\n", ahd_name(ahd), 5589 devinfo->channel, devinfo->target, devinfo->lun); 5590 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); 5591 mask = ~0x23; 5592 } else { 5593 printk("(%s:%c:%d:%d): refuses %s tagged commands. " 5594 "Performing simple queue tagged I/O only\n", 5595 ahd_name(ahd), devinfo->channel, devinfo->target, 5596 devinfo->lun, tag_type == ORDERED_QUEUE_TAG 5597 ? "ordered" : "head of queue"); 5598 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); 5599 mask = ~0x03; 5600 } 5601 5602 /* 5603 * Resend the identify for this CCB as the target 5604 * may believe that the selection is invalid otherwise. 5605 */ 5606 ahd_outb(ahd, SCB_CONTROL, 5607 ahd_inb_scbram(ahd, SCB_CONTROL) & mask); 5608 scb->hscb->control &= mask; 5609 ahd_set_transaction_tag(scb, /*enabled*/FALSE, 5610 /*type*/SIMPLE_QUEUE_TAG); 5611 ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); 5612 ahd_assert_atn(ahd); 5613 ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 5614 SCB_GET_TAG(scb)); 5615 5616 /* 5617 * Requeue all tagged commands for this target 5618 * currently in our possession so they can be 5619 * converted to untagged commands. 5620 */ 5621 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), 5622 SCB_GET_CHANNEL(ahd, scb), 5623 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 5624 ROLE_INITIATOR, CAM_REQUEUE_REQ, 5625 SEARCH_COMPLETE); 5626 } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { 5627 /* 5628 * Most likely the device believes that we had 5629 * previously negotiated packetized. 5630 */ 5631 ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE 5632 | MSG_FLAG_IU_REQ_CHANGED; 5633 5634 ahd_force_renegotiation(ahd, devinfo); 5635 ahd->msgout_index = 0; 5636 ahd->msgout_len = 0; 5637 ahd_build_transfer_msg(ahd, devinfo); 5638 ahd->msgout_index = 0; 5639 response = 1; 5640 } else { 5641 /* 5642 * Otherwise, we ignore it. 5643 */ 5644 printk("%s:%c:%d: Message reject for %x -- ignored\n", 5645 ahd_name(ahd), devinfo->channel, devinfo->target, 5646 last_msg); 5647 } 5648 return (response); 5649 } 5650 5651 /* 5652 * Process an ingnore wide residue message. 5653 */ 5654 static void 5655 ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) 5656 { 5657 u_int scb_index; 5658 struct scb *scb; 5659 5660 scb_index = ahd_get_scbptr(ahd); 5661 scb = ahd_lookup_scb(ahd, scb_index); 5662 /* 5663 * XXX Actually check data direction in the sequencer? 5664 * Perhaps add datadir to some spare bits in the hscb? 5665 */ 5666 if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 5667 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { 5668 /* 5669 * Ignore the message if we haven't 5670 * seen an appropriate data phase yet. 5671 */ 5672 } else { 5673 /* 5674 * If the residual occurred on the last 5675 * transfer and the transfer request was 5676 * expected to end on an odd count, do 5677 * nothing. Otherwise, subtract a byte 5678 * and update the residual count accordingly. 5679 */ 5680 uint32_t sgptr; 5681 5682 sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); 5683 if ((sgptr & SG_LIST_NULL) != 0 5684 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) 5685 & SCB_XFERLEN_ODD) != 0) { 5686 /* 5687 * If the residual occurred on the last 5688 * transfer and the transfer request was 5689 * expected to end on an odd count, do 5690 * nothing. 5691 */ 5692 } else { 5693 uint32_t data_cnt; 5694 uint64_t data_addr; 5695 uint32_t sglen; 5696 5697 /* Pull in the rest of the sgptr */ 5698 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 5699 data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); 5700 if ((sgptr & SG_LIST_NULL) != 0) { 5701 /* 5702 * The residual data count is not updated 5703 * for the command run to completion case. 5704 * Explicitly zero the count. 5705 */ 5706 data_cnt &= ~AHD_SG_LEN_MASK; 5707 } 5708 data_addr = ahd_inq(ahd, SHADDR); 5709 data_cnt += 1; 5710 data_addr -= 1; 5711 sgptr &= SG_PTR_MASK; 5712 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 5713 struct ahd_dma64_seg *sg; 5714 5715 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5716 5717 /* 5718 * The residual sg ptr points to the next S/G 5719 * to load so we must go back one. 5720 */ 5721 sg--; 5722 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 5723 if (sg != scb->sg_list 5724 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 5725 5726 sg--; 5727 sglen = ahd_le32toh(sg->len); 5728 /* 5729 * Preserve High Address and SG_LIST 5730 * bits while setting the count to 1. 5731 */ 5732 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 5733 data_addr = ahd_le64toh(sg->addr) 5734 + (sglen & AHD_SG_LEN_MASK) 5735 - 1; 5736 5737 /* 5738 * Increment sg so it points to the 5739 * "next" sg. 5740 */ 5741 sg++; 5742 sgptr = ahd_sg_virt_to_bus(ahd, scb, 5743 sg); 5744 } 5745 } else { 5746 struct ahd_dma_seg *sg; 5747 5748 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5749 5750 /* 5751 * The residual sg ptr points to the next S/G 5752 * to load so we must go back one. 5753 */ 5754 sg--; 5755 sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 5756 if (sg != scb->sg_list 5757 && sglen < (data_cnt & AHD_SG_LEN_MASK)) { 5758 5759 sg--; 5760 sglen = ahd_le32toh(sg->len); 5761 /* 5762 * Preserve High Address and SG_LIST 5763 * bits while setting the count to 1. 5764 */ 5765 data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); 5766 data_addr = ahd_le32toh(sg->addr) 5767 + (sglen & AHD_SG_LEN_MASK) 5768 - 1; 5769 5770 /* 5771 * Increment sg so it points to the 5772 * "next" sg. 5773 */ 5774 sg++; 5775 sgptr = ahd_sg_virt_to_bus(ahd, scb, 5776 sg); 5777 } 5778 } 5779 /* 5780 * Toggle the "oddness" of the transfer length 5781 * to handle this mid-transfer ignore wide 5782 * residue. This ensures that the oddness is 5783 * correct for subsequent data transfers. 5784 */ 5785 ahd_outb(ahd, SCB_TASK_ATTRIBUTE, 5786 ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) 5787 ^ SCB_XFERLEN_ODD); 5788 5789 ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); 5790 ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); 5791 /* 5792 * The FIFO's pointers will be updated if/when the 5793 * sequencer re-enters a data phase. 5794 */ 5795 } 5796 } 5797 } 5798 5799 5800 /* 5801 * Reinitialize the data pointers for the active transfer 5802 * based on its current residual. 5803 */ 5804 static void 5805 ahd_reinitialize_dataptrs(struct ahd_softc *ahd) 5806 { 5807 struct scb *scb; 5808 ahd_mode_state saved_modes; 5809 u_int scb_index; 5810 u_int wait; 5811 uint32_t sgptr; 5812 uint32_t resid; 5813 uint64_t dataptr; 5814 5815 AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, 5816 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); 5817 5818 scb_index = ahd_get_scbptr(ahd); 5819 scb = ahd_lookup_scb(ahd, scb_index); 5820 5821 /* 5822 * Release and reacquire the FIFO so we 5823 * have a clean slate. 5824 */ 5825 ahd_outb(ahd, DFFSXFRCTL, CLRCHN); 5826 wait = 1000; 5827 while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) 5828 ahd_delay(100); 5829 if (wait == 0) { 5830 ahd_print_path(ahd, scb); 5831 printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); 5832 ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); 5833 } 5834 saved_modes = ahd_save_modes(ahd); 5835 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 5836 ahd_outb(ahd, DFFSTAT, 5837 ahd_inb(ahd, DFFSTAT) 5838 | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); 5839 5840 /* 5841 * Determine initial values for data_addr and data_cnt 5842 * for resuming the data phase. 5843 */ 5844 sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); 5845 sgptr &= SG_PTR_MASK; 5846 5847 resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) 5848 | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) 5849 | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); 5850 5851 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { 5852 struct ahd_dma64_seg *sg; 5853 5854 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5855 5856 /* The residual sg_ptr always points to the next sg */ 5857 sg--; 5858 5859 dataptr = ahd_le64toh(sg->addr) 5860 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 5861 - resid; 5862 ahd_outl(ahd, HADDR + 4, dataptr >> 32); 5863 } else { 5864 struct ahd_dma_seg *sg; 5865 5866 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); 5867 5868 /* The residual sg_ptr always points to the next sg */ 5869 sg--; 5870 5871 dataptr = ahd_le32toh(sg->addr) 5872 + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) 5873 - resid; 5874 ahd_outb(ahd, HADDR + 4, 5875 (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); 5876 } 5877 ahd_outl(ahd, HADDR, dataptr); 5878 ahd_outb(ahd, HCNT + 2, resid >> 16); 5879 ahd_outb(ahd, HCNT + 1, resid >> 8); 5880 ahd_outb(ahd, HCNT, resid); 5881 } 5882 5883 /* 5884 * Handle the effects of issuing a bus device reset message. 5885 */ 5886 static void 5887 ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 5888 u_int lun, cam_status status, char *message, 5889 int verbose_level) 5890 { 5891 #ifdef AHD_TARGET_MODE 5892 struct ahd_tmode_tstate* tstate; 5893 #endif 5894 int found; 5895 5896 found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, 5897 lun, SCB_LIST_NULL, devinfo->role, 5898 status); 5899 5900 #ifdef AHD_TARGET_MODE 5901 /* 5902 * Send an immediate notify ccb to all target mord peripheral 5903 * drivers affected by this action. 5904 */ 5905 tstate = ahd->enabled_targets[devinfo->our_scsiid]; 5906 if (tstate != NULL) { 5907 u_int cur_lun; 5908 u_int max_lun; 5909 5910 if (lun != CAM_LUN_WILDCARD) { 5911 cur_lun = 0; 5912 max_lun = AHD_NUM_LUNS - 1; 5913 } else { 5914 cur_lun = lun; 5915 max_lun = lun; 5916 } 5917 for (;cur_lun <= max_lun; cur_lun++) { 5918 struct ahd_tmode_lstate* lstate; 5919 5920 lstate = tstate->enabled_luns[cur_lun]; 5921 if (lstate == NULL) 5922 continue; 5923 5924 ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, 5925 TARGET_RESET, /*arg*/0); 5926 ahd_send_lstate_events(ahd, lstate); 5927 } 5928 } 5929 #endif 5930 5931 /* 5932 * Go back to async/narrow transfers and renegotiate. 5933 */ 5934 ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5935 AHD_TRANS_CUR, /*paused*/TRUE); 5936 ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, 5937 /*ppr_options*/0, AHD_TRANS_CUR, 5938 /*paused*/TRUE); 5939 5940 if (status != CAM_SEL_TIMEOUT) 5941 ahd_send_async(ahd, devinfo->channel, devinfo->target, 5942 CAM_LUN_WILDCARD, AC_SENT_BDR); 5943 5944 if (message != NULL && bootverbose) 5945 printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), 5946 message, devinfo->channel, devinfo->target, found); 5947 } 5948 5949 #ifdef AHD_TARGET_MODE 5950 static void 5951 ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, 5952 struct scb *scb) 5953 { 5954 5955 /* 5956 * To facilitate adding multiple messages together, 5957 * each routine should increment the index and len 5958 * variables instead of setting them explicitly. 5959 */ 5960 ahd->msgout_index = 0; 5961 ahd->msgout_len = 0; 5962 5963 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 5964 ahd_build_transfer_msg(ahd, devinfo); 5965 else 5966 panic("ahd_intr: AWAITING target message with no message"); 5967 5968 ahd->msgout_index = 0; 5969 ahd->msg_type = MSG_TYPE_TARGET_MSGIN; 5970 } 5971 #endif 5972 /**************************** Initialization **********************************/ 5973 static u_int 5974 ahd_sglist_size(struct ahd_softc *ahd) 5975 { 5976 bus_size_t list_size; 5977 5978 list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; 5979 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 5980 list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; 5981 return (list_size); 5982 } 5983 5984 /* 5985 * Calculate the optimum S/G List allocation size. S/G elements used 5986 * for a given transaction must be physically contiguous. Assume the 5987 * OS will allocate full pages to us, so it doesn't make sense to request 5988 * less than a page. 5989 */ 5990 static u_int 5991 ahd_sglist_allocsize(struct ahd_softc *ahd) 5992 { 5993 bus_size_t sg_list_increment; 5994 bus_size_t sg_list_size; 5995 bus_size_t max_list_size; 5996 bus_size_t best_list_size; 5997 5998 /* Start out with the minimum required for AHD_NSEG. */ 5999 sg_list_increment = ahd_sglist_size(ahd); 6000 sg_list_size = sg_list_increment; 6001 6002 /* Get us as close as possible to a page in size. */ 6003 while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) 6004 sg_list_size += sg_list_increment; 6005 6006 /* 6007 * Try to reduce the amount of wastage by allocating 6008 * multiple pages. 6009 */ 6010 best_list_size = sg_list_size; 6011 max_list_size = roundup(sg_list_increment, PAGE_SIZE); 6012 if (max_list_size < 4 * PAGE_SIZE) 6013 max_list_size = 4 * PAGE_SIZE; 6014 if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) 6015 max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); 6016 while ((sg_list_size + sg_list_increment) <= max_list_size 6017 && (sg_list_size % PAGE_SIZE) != 0) { 6018 bus_size_t new_mod; 6019 bus_size_t best_mod; 6020 6021 sg_list_size += sg_list_increment; 6022 new_mod = sg_list_size % PAGE_SIZE; 6023 best_mod = best_list_size % PAGE_SIZE; 6024 if (new_mod > best_mod || new_mod == 0) { 6025 best_list_size = sg_list_size; 6026 } 6027 } 6028 return (best_list_size); 6029 } 6030 6031 /* 6032 * Allocate a controller structure for a new device 6033 * and perform initial initializion. 6034 */ 6035 struct ahd_softc * 6036 ahd_alloc(void *platform_arg, char *name) 6037 { 6038 struct ahd_softc *ahd; 6039 6040 ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC); 6041 if (!ahd) { 6042 printk("aic7xxx: cannot malloc softc!\n"); 6043 kfree(name); 6044 return NULL; 6045 } 6046 6047 ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); 6048 if (ahd->seep_config == NULL) { 6049 kfree(ahd); 6050 kfree(name); 6051 return (NULL); 6052 } 6053 LIST_INIT(&ahd->pending_scbs); 6054 /* We don't know our unit number until the OSM sets it */ 6055 ahd->name = name; 6056 ahd->unit = -1; 6057 ahd->description = NULL; 6058 ahd->bus_description = NULL; 6059 ahd->channel = 'A'; 6060 ahd->chip = AHD_NONE; 6061 ahd->features = AHD_FENONE; 6062 ahd->bugs = AHD_BUGNONE; 6063 ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A 6064 | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; 6065 timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); 6066 ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; 6067 ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; 6068 ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; 6069 ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; 6070 ahd->int_coalescing_stop_threshold = 6071 AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; 6072 6073 #ifdef AHD_DEBUG 6074 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { 6075 printk("%s: scb size = 0x%x, hscb size = 0x%x\n", 6076 ahd_name(ahd), (u_int)sizeof(struct scb), 6077 (u_int)sizeof(struct hardware_scb)); 6078 } 6079 #endif 6080 if (ahd_platform_alloc(ahd, platform_arg) != 0) { 6081 ahd_free(ahd); 6082 ahd = NULL; 6083 } 6084 return (ahd); 6085 } 6086 6087 int 6088 ahd_softc_init(struct ahd_softc *ahd) 6089 { 6090 6091 ahd->unpause = 0; 6092 ahd->pause = PAUSE; 6093 return (0); 6094 } 6095 6096 void 6097 ahd_set_unit(struct ahd_softc *ahd, int unit) 6098 { 6099 ahd->unit = unit; 6100 } 6101 6102 void 6103 ahd_set_name(struct ahd_softc *ahd, char *name) 6104 { 6105 kfree(ahd->name); 6106 ahd->name = name; 6107 } 6108 6109 void 6110 ahd_free(struct ahd_softc *ahd) 6111 { 6112 int i; 6113 6114 switch (ahd->init_level) { 6115 default: 6116 case 5: 6117 ahd_shutdown(ahd); 6118 fallthrough; 6119 case 4: 6120 ahd_dmamap_unload(ahd, ahd->shared_data_dmat, 6121 ahd->shared_data_map.dmamap); 6122 fallthrough; 6123 case 3: 6124 ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, 6125 ahd->shared_data_map.dmamap); 6126 ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, 6127 ahd->shared_data_map.dmamap); 6128 fallthrough; 6129 case 2: 6130 ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); 6131 break; 6132 case 1: 6133 break; 6134 case 0: 6135 break; 6136 } 6137 6138 ahd_platform_free(ahd); 6139 ahd_fini_scbdata(ahd); 6140 for (i = 0; i < AHD_NUM_TARGETS; i++) { 6141 struct ahd_tmode_tstate *tstate; 6142 6143 tstate = ahd->enabled_targets[i]; 6144 if (tstate != NULL) { 6145 #ifdef AHD_TARGET_MODE 6146 int j; 6147 6148 for (j = 0; j < AHD_NUM_LUNS; j++) { 6149 struct ahd_tmode_lstate *lstate; 6150 6151 lstate = tstate->enabled_luns[j]; 6152 if (lstate != NULL) { 6153 xpt_free_path(lstate->path); 6154 kfree(lstate); 6155 } 6156 } 6157 #endif 6158 kfree(tstate); 6159 } 6160 } 6161 #ifdef AHD_TARGET_MODE 6162 if (ahd->black_hole != NULL) { 6163 xpt_free_path(ahd->black_hole->path); 6164 kfree(ahd->black_hole); 6165 } 6166 #endif 6167 kfree(ahd->name); 6168 kfree(ahd->seep_config); 6169 kfree(ahd->saved_stack); 6170 kfree(ahd); 6171 return; 6172 } 6173 6174 static void 6175 ahd_shutdown(void *arg) 6176 { 6177 struct ahd_softc *ahd; 6178 6179 ahd = (struct ahd_softc *)arg; 6180 6181 /* 6182 * Stop periodic timer callbacks. 6183 */ 6184 del_timer_sync(&ahd->stat_timer); 6185 6186 /* This will reset most registers to 0, but not all */ 6187 ahd_reset(ahd, /*reinit*/FALSE); 6188 } 6189 6190 /* 6191 * Reset the controller and record some information about it 6192 * that is only available just after a reset. If "reinit" is 6193 * non-zero, this reset occurred after initial configuration 6194 * and the caller requests that the chip be fully reinitialized 6195 * to a runable state. Chip interrupts are *not* enabled after 6196 * a reinitialization. The caller must enable interrupts via 6197 * ahd_intr_enable(). 6198 */ 6199 int 6200 ahd_reset(struct ahd_softc *ahd, int reinit) 6201 { 6202 u_int sxfrctl1; 6203 int wait; 6204 uint32_t cmd; 6205 6206 /* 6207 * Preserve the value of the SXFRCTL1 register for all channels. 6208 * It contains settings that affect termination and we don't want 6209 * to disturb the integrity of the bus. 6210 */ 6211 ahd_pause(ahd); 6212 ahd_update_modes(ahd); 6213 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6214 sxfrctl1 = ahd_inb(ahd, SXFRCTL1); 6215 6216 cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); 6217 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 6218 uint32_t mod_cmd; 6219 6220 /* 6221 * A4 Razor #632 6222 * During the assertion of CHIPRST, the chip 6223 * does not disable its parity logic prior to 6224 * the start of the reset. This may cause a 6225 * parity error to be detected and thus a 6226 * spurious SERR or PERR assertion. Disable 6227 * PERR and SERR responses during the CHIPRST. 6228 */ 6229 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); 6230 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 6231 mod_cmd, /*bytes*/2); 6232 } 6233 ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); 6234 6235 /* 6236 * Ensure that the reset has finished. We delay 1000us 6237 * prior to reading the register to make sure the chip 6238 * has sufficiently completed its reset to handle register 6239 * accesses. 6240 */ 6241 wait = 1000; 6242 do { 6243 ahd_delay(1000); 6244 } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); 6245 6246 if (wait == 0) { 6247 printk("%s: WARNING - Failed chip reset! " 6248 "Trying to initialize anyway.\n", ahd_name(ahd)); 6249 } 6250 ahd_outb(ahd, HCNTRL, ahd->pause); 6251 6252 if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { 6253 /* 6254 * Clear any latched PCI error status and restore 6255 * previous SERR and PERR response enables. 6256 */ 6257 ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 6258 0xFF, /*bytes*/1); 6259 ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, 6260 cmd, /*bytes*/2); 6261 } 6262 6263 /* 6264 * Mode should be SCSI after a chip reset, but lets 6265 * set it just to be safe. We touch the MODE_PTR 6266 * register directly so as to bypass the lazy update 6267 * code in ahd_set_modes(). 6268 */ 6269 ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6270 ahd_outb(ahd, MODE_PTR, 6271 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); 6272 6273 /* 6274 * Restore SXFRCTL1. 6275 * 6276 * We must always initialize STPWEN to 1 before we 6277 * restore the saved values. STPWEN is initialized 6278 * to a tri-state condition which can only be cleared 6279 * by turning it on. 6280 */ 6281 ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); 6282 ahd_outb(ahd, SXFRCTL1, sxfrctl1); 6283 6284 /* Determine chip configuration */ 6285 ahd->features &= ~AHD_WIDE; 6286 if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) 6287 ahd->features |= AHD_WIDE; 6288 6289 /* 6290 * If a recovery action has forced a chip reset, 6291 * re-initialize the chip to our liking. 6292 */ 6293 if (reinit != 0) 6294 ahd_chip_init(ahd); 6295 6296 return (0); 6297 } 6298 6299 /* 6300 * Determine the number of SCBs available on the controller 6301 */ 6302 static int 6303 ahd_probe_scbs(struct ahd_softc *ahd) { 6304 int i; 6305 6306 AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), 6307 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); 6308 for (i = 0; i < AHD_SCB_MAX; i++) { 6309 int j; 6310 6311 ahd_set_scbptr(ahd, i); 6312 ahd_outw(ahd, SCB_BASE, i); 6313 for (j = 2; j < 64; j++) 6314 ahd_outb(ahd, SCB_BASE+j, 0); 6315 /* Start out life as unallocated (needing an abort) */ 6316 ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); 6317 if (ahd_inw_scbram(ahd, SCB_BASE) != i) 6318 break; 6319 ahd_set_scbptr(ahd, 0); 6320 if (ahd_inw_scbram(ahd, SCB_BASE) != 0) 6321 break; 6322 } 6323 return (i); 6324 } 6325 6326 static void 6327 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 6328 { 6329 dma_addr_t *baddr; 6330 6331 baddr = (dma_addr_t *)arg; 6332 *baddr = segs->ds_addr; 6333 } 6334 6335 static void 6336 ahd_initialize_hscbs(struct ahd_softc *ahd) 6337 { 6338 int i; 6339 6340 for (i = 0; i < ahd->scb_data.maxhscbs; i++) { 6341 ahd_set_scbptr(ahd, i); 6342 6343 /* Clear the control byte. */ 6344 ahd_outb(ahd, SCB_CONTROL, 0); 6345 6346 /* Set the next pointer */ 6347 ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); 6348 } 6349 } 6350 6351 static int 6352 ahd_init_scbdata(struct ahd_softc *ahd) 6353 { 6354 struct scb_data *scb_data; 6355 int i; 6356 6357 scb_data = &ahd->scb_data; 6358 TAILQ_INIT(&scb_data->free_scbs); 6359 for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) 6360 LIST_INIT(&scb_data->free_scb_lists[i]); 6361 LIST_INIT(&scb_data->any_dev_free_scb_list); 6362 SLIST_INIT(&scb_data->hscb_maps); 6363 SLIST_INIT(&scb_data->sg_maps); 6364 SLIST_INIT(&scb_data->sense_maps); 6365 6366 /* Determine the number of hardware SCBs and initialize them */ 6367 scb_data->maxhscbs = ahd_probe_scbs(ahd); 6368 if (scb_data->maxhscbs == 0) { 6369 printk("%s: No SCB space found\n", ahd_name(ahd)); 6370 return (ENXIO); 6371 } 6372 6373 ahd_initialize_hscbs(ahd); 6374 6375 /* 6376 * Create our DMA tags. These tags define the kinds of device 6377 * accessible memory allocations and memory mappings we will 6378 * need to perform during normal operation. 6379 * 6380 * Unless we need to further restrict the allocation, we rely 6381 * on the restrictions of the parent dmat, hence the common 6382 * use of MAXADDR and MAXSIZE. 6383 */ 6384 6385 /* DMA tag for our hardware scb structures */ 6386 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 6387 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6388 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6389 /*highaddr*/BUS_SPACE_MAXADDR, 6390 /*filter*/NULL, /*filterarg*/NULL, 6391 PAGE_SIZE, /*nsegments*/1, 6392 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6393 /*flags*/0, &scb_data->hscb_dmat) != 0) { 6394 goto error_exit; 6395 } 6396 6397 scb_data->init_level++; 6398 6399 /* DMA tag for our S/G structures. */ 6400 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, 6401 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6402 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6403 /*highaddr*/BUS_SPACE_MAXADDR, 6404 /*filter*/NULL, /*filterarg*/NULL, 6405 ahd_sglist_allocsize(ahd), /*nsegments*/1, 6406 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6407 /*flags*/0, &scb_data->sg_dmat) != 0) { 6408 goto error_exit; 6409 } 6410 #ifdef AHD_DEBUG 6411 if ((ahd_debug & AHD_SHOW_MEMORY) != 0) 6412 printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), 6413 ahd_sglist_allocsize(ahd)); 6414 #endif 6415 6416 scb_data->init_level++; 6417 6418 /* DMA tag for our sense buffers. We allocate in page sized chunks */ 6419 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 6420 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 6421 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 6422 /*highaddr*/BUS_SPACE_MAXADDR, 6423 /*filter*/NULL, /*filterarg*/NULL, 6424 PAGE_SIZE, /*nsegments*/1, 6425 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 6426 /*flags*/0, &scb_data->sense_dmat) != 0) { 6427 goto error_exit; 6428 } 6429 6430 scb_data->init_level++; 6431 6432 /* Perform initial CCB allocation */ 6433 ahd_alloc_scbs(ahd); 6434 6435 if (scb_data->numscbs == 0) { 6436 printk("%s: ahd_init_scbdata - " 6437 "Unable to allocate initial scbs\n", 6438 ahd_name(ahd)); 6439 goto error_exit; 6440 } 6441 6442 /* 6443 * Note that we were successful 6444 */ 6445 return (0); 6446 6447 error_exit: 6448 6449 return (ENOMEM); 6450 } 6451 6452 static struct scb * 6453 ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) 6454 { 6455 struct scb *scb; 6456 6457 /* 6458 * Look on the pending list. 6459 */ 6460 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 6461 if (SCB_GET_TAG(scb) == tag) 6462 return (scb); 6463 } 6464 6465 /* 6466 * Then on all of the collision free lists. 6467 */ 6468 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 6469 struct scb *list_scb; 6470 6471 list_scb = scb; 6472 do { 6473 if (SCB_GET_TAG(list_scb) == tag) 6474 return (list_scb); 6475 list_scb = LIST_NEXT(list_scb, collision_links); 6476 } while (list_scb); 6477 } 6478 6479 /* 6480 * And finally on the generic free list. 6481 */ 6482 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 6483 if (SCB_GET_TAG(scb) == tag) 6484 return (scb); 6485 } 6486 6487 return (NULL); 6488 } 6489 6490 static void 6491 ahd_fini_scbdata(struct ahd_softc *ahd) 6492 { 6493 struct scb_data *scb_data; 6494 6495 scb_data = &ahd->scb_data; 6496 if (scb_data == NULL) 6497 return; 6498 6499 switch (scb_data->init_level) { 6500 default: 6501 case 7: 6502 { 6503 struct map_node *sns_map; 6504 6505 while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { 6506 SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); 6507 ahd_dmamap_unload(ahd, scb_data->sense_dmat, 6508 sns_map->dmamap); 6509 ahd_dmamem_free(ahd, scb_data->sense_dmat, 6510 sns_map->vaddr, sns_map->dmamap); 6511 kfree(sns_map); 6512 } 6513 ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); 6514 } 6515 fallthrough; 6516 case 6: 6517 { 6518 struct map_node *sg_map; 6519 6520 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { 6521 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 6522 ahd_dmamap_unload(ahd, scb_data->sg_dmat, 6523 sg_map->dmamap); 6524 ahd_dmamem_free(ahd, scb_data->sg_dmat, 6525 sg_map->vaddr, sg_map->dmamap); 6526 kfree(sg_map); 6527 } 6528 ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); 6529 } 6530 fallthrough; 6531 case 5: 6532 { 6533 struct map_node *hscb_map; 6534 6535 while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { 6536 SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); 6537 ahd_dmamap_unload(ahd, scb_data->hscb_dmat, 6538 hscb_map->dmamap); 6539 ahd_dmamem_free(ahd, scb_data->hscb_dmat, 6540 hscb_map->vaddr, hscb_map->dmamap); 6541 kfree(hscb_map); 6542 } 6543 ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); 6544 } 6545 fallthrough; 6546 case 4: 6547 case 3: 6548 case 2: 6549 case 1: 6550 case 0: 6551 break; 6552 } 6553 } 6554 6555 /* 6556 * DSP filter Bypass must be enabled until the first selection 6557 * after a change in bus mode (Razor #491 and #493). 6558 */ 6559 static void 6560 ahd_setup_iocell_workaround(struct ahd_softc *ahd) 6561 { 6562 ahd_mode_state saved_modes; 6563 6564 saved_modes = ahd_save_modes(ahd); 6565 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6566 ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) 6567 | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); 6568 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); 6569 #ifdef AHD_DEBUG 6570 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6571 printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); 6572 #endif 6573 ahd_restore_modes(ahd, saved_modes); 6574 ahd->flags &= ~AHD_HAD_FIRST_SEL; 6575 } 6576 6577 static void 6578 ahd_iocell_first_selection(struct ahd_softc *ahd) 6579 { 6580 ahd_mode_state saved_modes; 6581 u_int sblkctl; 6582 6583 if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) 6584 return; 6585 saved_modes = ahd_save_modes(ahd); 6586 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 6587 sblkctl = ahd_inb(ahd, SBLKCTL); 6588 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 6589 #ifdef AHD_DEBUG 6590 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6591 printk("%s: iocell first selection\n", ahd_name(ahd)); 6592 #endif 6593 if ((sblkctl & ENAB40) != 0) { 6594 ahd_outb(ahd, DSPDATACTL, 6595 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); 6596 #ifdef AHD_DEBUG 6597 if ((ahd_debug & AHD_SHOW_MISC) != 0) 6598 printk("%s: BYPASS now disabled\n", ahd_name(ahd)); 6599 #endif 6600 } 6601 ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); 6602 ahd_outb(ahd, CLRINT, CLRSCSIINT); 6603 ahd_restore_modes(ahd, saved_modes); 6604 ahd->flags |= AHD_HAD_FIRST_SEL; 6605 } 6606 6607 /*************************** SCB Management ***********************************/ 6608 static void 6609 ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) 6610 { 6611 struct scb_list *free_list; 6612 struct scb_tailq *free_tailq; 6613 struct scb *first_scb; 6614 6615 scb->flags |= SCB_ON_COL_LIST; 6616 AHD_SET_SCB_COL_IDX(scb, col_idx); 6617 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 6618 free_tailq = &ahd->scb_data.free_scbs; 6619 first_scb = LIST_FIRST(free_list); 6620 if (first_scb != NULL) { 6621 LIST_INSERT_AFTER(first_scb, scb, collision_links); 6622 } else { 6623 LIST_INSERT_HEAD(free_list, scb, collision_links); 6624 TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); 6625 } 6626 } 6627 6628 static void 6629 ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) 6630 { 6631 struct scb_list *free_list; 6632 struct scb_tailq *free_tailq; 6633 struct scb *first_scb; 6634 u_int col_idx; 6635 6636 scb->flags &= ~SCB_ON_COL_LIST; 6637 col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); 6638 free_list = &ahd->scb_data.free_scb_lists[col_idx]; 6639 free_tailq = &ahd->scb_data.free_scbs; 6640 first_scb = LIST_FIRST(free_list); 6641 if (first_scb == scb) { 6642 struct scb *next_scb; 6643 6644 /* 6645 * Maintain order in the collision free 6646 * lists for fairness if this device has 6647 * other colliding tags active. 6648 */ 6649 next_scb = LIST_NEXT(scb, collision_links); 6650 if (next_scb != NULL) { 6651 TAILQ_INSERT_AFTER(free_tailq, scb, 6652 next_scb, links.tqe); 6653 } 6654 TAILQ_REMOVE(free_tailq, scb, links.tqe); 6655 } 6656 LIST_REMOVE(scb, collision_links); 6657 } 6658 6659 /* 6660 * Get a free scb. If there are none, see if we can allocate a new SCB. 6661 */ 6662 struct scb * 6663 ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) 6664 { 6665 struct scb *scb; 6666 int tries; 6667 6668 tries = 0; 6669 look_again: 6670 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 6671 if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { 6672 ahd_rem_col_list(ahd, scb); 6673 goto found; 6674 } 6675 } 6676 if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { 6677 6678 if (tries++ != 0) 6679 return (NULL); 6680 ahd_alloc_scbs(ahd); 6681 goto look_again; 6682 } 6683 LIST_REMOVE(scb, links.le); 6684 if (col_idx != AHD_NEVER_COL_IDX 6685 && (scb->col_scb != NULL) 6686 && (scb->col_scb->flags & SCB_ACTIVE) == 0) { 6687 LIST_REMOVE(scb->col_scb, links.le); 6688 ahd_add_col_list(ahd, scb->col_scb, col_idx); 6689 } 6690 found: 6691 scb->flags |= SCB_ACTIVE; 6692 return (scb); 6693 } 6694 6695 /* 6696 * Return an SCB resource to the free list. 6697 */ 6698 void 6699 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) 6700 { 6701 /* Clean up for the next user */ 6702 scb->flags = SCB_FLAG_NONE; 6703 scb->hscb->control = 0; 6704 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; 6705 6706 if (scb->col_scb == NULL) { 6707 6708 /* 6709 * No collision possible. Just free normally. 6710 */ 6711 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6712 scb, links.le); 6713 } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { 6714 6715 /* 6716 * The SCB we might have collided with is on 6717 * a free collision list. Put both SCBs on 6718 * the generic list. 6719 */ 6720 ahd_rem_col_list(ahd, scb->col_scb); 6721 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6722 scb, links.le); 6723 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6724 scb->col_scb, links.le); 6725 } else if ((scb->col_scb->flags 6726 & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE 6727 && (scb->col_scb->hscb->control & TAG_ENB) != 0) { 6728 6729 /* 6730 * The SCB we might collide with on the next allocation 6731 * is still active in a non-packetized, tagged, context. 6732 * Put us on the SCB collision list. 6733 */ 6734 ahd_add_col_list(ahd, scb, 6735 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); 6736 } else { 6737 /* 6738 * The SCB we might collide with on the next allocation 6739 * is either active in a packetized context, or free. 6740 * Since we can't collide, put this SCB on the generic 6741 * free list. 6742 */ 6743 LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, 6744 scb, links.le); 6745 } 6746 6747 ahd_platform_scb_free(ahd, scb); 6748 } 6749 6750 static void 6751 ahd_alloc_scbs(struct ahd_softc *ahd) 6752 { 6753 struct scb_data *scb_data; 6754 struct scb *next_scb; 6755 struct hardware_scb *hscb; 6756 struct map_node *hscb_map; 6757 struct map_node *sg_map; 6758 struct map_node *sense_map; 6759 uint8_t *segs; 6760 uint8_t *sense_data; 6761 dma_addr_t hscb_busaddr; 6762 dma_addr_t sg_busaddr; 6763 dma_addr_t sense_busaddr; 6764 int newcount; 6765 int i; 6766 6767 scb_data = &ahd->scb_data; 6768 if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) 6769 /* Can't allocate any more */ 6770 return; 6771 6772 if (scb_data->scbs_left != 0) { 6773 int offset; 6774 6775 offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; 6776 hscb_map = SLIST_FIRST(&scb_data->hscb_maps); 6777 hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; 6778 hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); 6779 } else { 6780 hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); 6781 6782 if (hscb_map == NULL) 6783 return; 6784 6785 /* Allocate the next batch of hardware SCBs */ 6786 if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, 6787 (void **)&hscb_map->vaddr, 6788 BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { 6789 kfree(hscb_map); 6790 return; 6791 } 6792 6793 SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); 6794 6795 ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, 6796 hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 6797 &hscb_map->physaddr, /*flags*/0); 6798 6799 hscb = (struct hardware_scb *)hscb_map->vaddr; 6800 hscb_busaddr = hscb_map->physaddr; 6801 scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); 6802 } 6803 6804 if (scb_data->sgs_left != 0) { 6805 int offset; 6806 6807 offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) 6808 - scb_data->sgs_left) * ahd_sglist_size(ahd); 6809 sg_map = SLIST_FIRST(&scb_data->sg_maps); 6810 segs = sg_map->vaddr + offset; 6811 sg_busaddr = sg_map->physaddr + offset; 6812 } else { 6813 sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); 6814 6815 if (sg_map == NULL) 6816 return; 6817 6818 /* Allocate the next batch of S/G lists */ 6819 if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, 6820 (void **)&sg_map->vaddr, 6821 BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { 6822 kfree(sg_map); 6823 return; 6824 } 6825 6826 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 6827 6828 ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, 6829 sg_map->vaddr, ahd_sglist_allocsize(ahd), 6830 ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); 6831 6832 segs = sg_map->vaddr; 6833 sg_busaddr = sg_map->physaddr; 6834 scb_data->sgs_left = 6835 ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); 6836 #ifdef AHD_DEBUG 6837 if (ahd_debug & AHD_SHOW_MEMORY) 6838 printk("Mapped SG data\n"); 6839 #endif 6840 } 6841 6842 if (scb_data->sense_left != 0) { 6843 int offset; 6844 6845 offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); 6846 sense_map = SLIST_FIRST(&scb_data->sense_maps); 6847 sense_data = sense_map->vaddr + offset; 6848 sense_busaddr = sense_map->physaddr + offset; 6849 } else { 6850 sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); 6851 6852 if (sense_map == NULL) 6853 return; 6854 6855 /* Allocate the next batch of sense buffers */ 6856 if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, 6857 (void **)&sense_map->vaddr, 6858 BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { 6859 kfree(sense_map); 6860 return; 6861 } 6862 6863 SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); 6864 6865 ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, 6866 sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, 6867 &sense_map->physaddr, /*flags*/0); 6868 6869 sense_data = sense_map->vaddr; 6870 sense_busaddr = sense_map->physaddr; 6871 scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; 6872 #ifdef AHD_DEBUG 6873 if (ahd_debug & AHD_SHOW_MEMORY) 6874 printk("Mapped sense data\n"); 6875 #endif 6876 } 6877 6878 newcount = min(scb_data->sense_left, scb_data->scbs_left); 6879 newcount = min(newcount, scb_data->sgs_left); 6880 newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); 6881 for (i = 0; i < newcount; i++) { 6882 struct scb_platform_data *pdata; 6883 u_int col_tag; 6884 6885 next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); 6886 if (next_scb == NULL) 6887 break; 6888 6889 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); 6890 if (pdata == NULL) { 6891 kfree(next_scb); 6892 break; 6893 } 6894 next_scb->platform_data = pdata; 6895 next_scb->hscb_map = hscb_map; 6896 next_scb->sg_map = sg_map; 6897 next_scb->sense_map = sense_map; 6898 next_scb->sg_list = segs; 6899 next_scb->sense_data = sense_data; 6900 next_scb->sense_busaddr = sense_busaddr; 6901 memset(hscb, 0, sizeof(*hscb)); 6902 next_scb->hscb = hscb; 6903 hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); 6904 6905 /* 6906 * The sequencer always starts with the second entry. 6907 * The first entry is embedded in the scb. 6908 */ 6909 next_scb->sg_list_busaddr = sg_busaddr; 6910 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 6911 next_scb->sg_list_busaddr 6912 += sizeof(struct ahd_dma64_seg); 6913 else 6914 next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); 6915 next_scb->ahd_softc = ahd; 6916 next_scb->flags = SCB_FLAG_NONE; 6917 next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); 6918 col_tag = scb_data->numscbs ^ 0x100; 6919 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); 6920 if (next_scb->col_scb != NULL) 6921 next_scb->col_scb->col_scb = next_scb; 6922 ahd_free_scb(ahd, next_scb); 6923 hscb++; 6924 hscb_busaddr += sizeof(*hscb); 6925 segs += ahd_sglist_size(ahd); 6926 sg_busaddr += ahd_sglist_size(ahd); 6927 sense_data += AHD_SENSE_BUFSIZE; 6928 sense_busaddr += AHD_SENSE_BUFSIZE; 6929 scb_data->numscbs++; 6930 scb_data->sense_left--; 6931 scb_data->scbs_left--; 6932 scb_data->sgs_left--; 6933 } 6934 } 6935 6936 void 6937 ahd_controller_info(struct ahd_softc *ahd, char *buf) 6938 { 6939 const char *speed; 6940 const char *type; 6941 int len; 6942 6943 len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); 6944 buf += len; 6945 6946 speed = "Ultra320 "; 6947 if ((ahd->features & AHD_WIDE) != 0) { 6948 type = "Wide "; 6949 } else { 6950 type = "Single "; 6951 } 6952 len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", 6953 speed, type, ahd->channel, ahd->our_id); 6954 buf += len; 6955 6956 sprintf(buf, "%s, %d SCBs", ahd->bus_description, 6957 ahd->scb_data.maxhscbs); 6958 } 6959 6960 static const char *channel_strings[] = { 6961 "Primary Low", 6962 "Primary High", 6963 "Secondary Low", 6964 "Secondary High" 6965 }; 6966 6967 static const char *termstat_strings[] = { 6968 "Terminated Correctly", 6969 "Over Terminated", 6970 "Under Terminated", 6971 "Not Configured" 6972 }; 6973 6974 /***************************** Timer Facilities *******************************/ 6975 static void 6976 ahd_timer_reset(struct timer_list *timer, int usec) 6977 { 6978 del_timer(timer); 6979 timer->expires = jiffies + (usec * HZ)/1000000; 6980 add_timer(timer); 6981 } 6982 6983 /* 6984 * Start the board, ready for normal operation 6985 */ 6986 int 6987 ahd_init(struct ahd_softc *ahd) 6988 { 6989 uint8_t *next_vaddr; 6990 dma_addr_t next_baddr; 6991 size_t driver_data_size; 6992 int i; 6993 int error; 6994 u_int warn_user; 6995 uint8_t current_sensing; 6996 uint8_t fstat; 6997 6998 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 6999 7000 ahd->stack_size = ahd_probe_stack_size(ahd); 7001 ahd->saved_stack = kmalloc_array(ahd->stack_size, sizeof(uint16_t), 7002 GFP_ATOMIC); 7003 if (ahd->saved_stack == NULL) 7004 return (ENOMEM); 7005 7006 /* 7007 * Verify that the compiler hasn't over-aggressively 7008 * padded important structures. 7009 */ 7010 if (sizeof(struct hardware_scb) != 64) 7011 panic("Hardware SCB size is incorrect"); 7012 7013 #ifdef AHD_DEBUG 7014 if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) 7015 ahd->flags |= AHD_SEQUENCER_DEBUG; 7016 #endif 7017 7018 /* 7019 * Default to allowing initiator operations. 7020 */ 7021 ahd->flags |= AHD_INITIATORROLE; 7022 7023 /* 7024 * Only allow target mode features if this unit has them enabled. 7025 */ 7026 if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) 7027 ahd->features &= ~AHD_TARGETMODE; 7028 7029 ahd->init_level++; 7030 7031 /* 7032 * DMA tag for our command fifos and other data in system memory 7033 * the card's sequencer must be able to access. For initiator 7034 * roles, we need to allocate space for the qoutfifo. When providing 7035 * for the target mode role, we must additionally provide space for 7036 * the incoming target command fifo. 7037 */ 7038 driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) 7039 + sizeof(struct hardware_scb); 7040 if ((ahd->features & AHD_TARGETMODE) != 0) 7041 driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7042 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) 7043 driver_data_size += PKT_OVERRUN_BUFSIZE; 7044 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, 7045 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 7046 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 7047 /*highaddr*/BUS_SPACE_MAXADDR, 7048 /*filter*/NULL, /*filterarg*/NULL, 7049 driver_data_size, 7050 /*nsegments*/1, 7051 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 7052 /*flags*/0, &ahd->shared_data_dmat) != 0) { 7053 return (ENOMEM); 7054 } 7055 7056 ahd->init_level++; 7057 7058 /* Allocation of driver data */ 7059 if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, 7060 (void **)&ahd->shared_data_map.vaddr, 7061 BUS_DMA_NOWAIT, 7062 &ahd->shared_data_map.dmamap) != 0) { 7063 return (ENOMEM); 7064 } 7065 7066 ahd->init_level++; 7067 7068 /* And permanently map it in */ 7069 ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, 7070 ahd->shared_data_map.vaddr, driver_data_size, 7071 ahd_dmamap_cb, &ahd->shared_data_map.physaddr, 7072 /*flags*/0); 7073 ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; 7074 next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; 7075 next_baddr = ahd->shared_data_map.physaddr 7076 + AHD_QOUT_SIZE*sizeof(struct ahd_completion); 7077 if ((ahd->features & AHD_TARGETMODE) != 0) { 7078 ahd->targetcmds = (struct target_cmd *)next_vaddr; 7079 next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7080 next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); 7081 } 7082 7083 if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { 7084 ahd->overrun_buf = next_vaddr; 7085 next_vaddr += PKT_OVERRUN_BUFSIZE; 7086 next_baddr += PKT_OVERRUN_BUFSIZE; 7087 } 7088 7089 /* 7090 * We need one SCB to serve as the "next SCB". Since the 7091 * tag identifier in this SCB will never be used, there is 7092 * no point in using a valid HSCB tag from an SCB pulled from 7093 * the standard free pool. So, we allocate this "sentinel" 7094 * specially from the DMA safe memory chunk used for the QOUTFIFO. 7095 */ 7096 ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; 7097 ahd->next_queued_hscb_map = &ahd->shared_data_map; 7098 ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); 7099 7100 ahd->init_level++; 7101 7102 /* Allocate SCB data now that buffer_dmat is initialized */ 7103 if (ahd_init_scbdata(ahd) != 0) 7104 return (ENOMEM); 7105 7106 if ((ahd->flags & AHD_INITIATORROLE) == 0) 7107 ahd->flags &= ~AHD_RESET_BUS_A; 7108 7109 /* 7110 * Before committing these settings to the chip, give 7111 * the OSM one last chance to modify our configuration. 7112 */ 7113 ahd_platform_init(ahd); 7114 7115 /* Bring up the chip. */ 7116 ahd_chip_init(ahd); 7117 7118 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7119 7120 if ((ahd->flags & AHD_CURRENT_SENSING) == 0) 7121 goto init_done; 7122 7123 /* 7124 * Verify termination based on current draw and 7125 * warn user if the bus is over/under terminated. 7126 */ 7127 error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 7128 CURSENSE_ENB); 7129 if (error != 0) { 7130 printk("%s: current sensing timeout 1\n", ahd_name(ahd)); 7131 goto init_done; 7132 } 7133 for (i = 20, fstat = FLX_FSTAT_BUSY; 7134 (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { 7135 error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); 7136 if (error != 0) { 7137 printk("%s: current sensing timeout 2\n", 7138 ahd_name(ahd)); 7139 goto init_done; 7140 } 7141 } 7142 if (i == 0) { 7143 printk("%s: Timedout during current-sensing test\n", 7144 ahd_name(ahd)); 7145 goto init_done; 7146 } 7147 7148 /* Latch Current Sensing status. */ 7149 error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); 7150 if (error != 0) { 7151 printk("%s: current sensing timeout 3\n", ahd_name(ahd)); 7152 goto init_done; 7153 } 7154 7155 /* Diable current sensing. */ 7156 ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); 7157 7158 #ifdef AHD_DEBUG 7159 if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { 7160 printk("%s: current_sensing == 0x%x\n", 7161 ahd_name(ahd), current_sensing); 7162 } 7163 #endif 7164 warn_user = 0; 7165 for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { 7166 u_int term_stat; 7167 7168 term_stat = (current_sensing & FLX_CSTAT_MASK); 7169 switch (term_stat) { 7170 case FLX_CSTAT_OVER: 7171 case FLX_CSTAT_UNDER: 7172 warn_user++; 7173 fallthrough; 7174 case FLX_CSTAT_INVALID: 7175 case FLX_CSTAT_OKAY: 7176 if (warn_user == 0 && bootverbose == 0) 7177 break; 7178 printk("%s: %s Channel %s\n", ahd_name(ahd), 7179 channel_strings[i], termstat_strings[term_stat]); 7180 break; 7181 } 7182 } 7183 if (warn_user) { 7184 printk("%s: WARNING. Termination is not configured correctly.\n" 7185 "%s: WARNING. SCSI bus operations may FAIL.\n", 7186 ahd_name(ahd), ahd_name(ahd)); 7187 } 7188 init_done: 7189 ahd_restart(ahd); 7190 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 7191 return (0); 7192 } 7193 7194 /* 7195 * (Re)initialize chip state after a chip reset. 7196 */ 7197 static void 7198 ahd_chip_init(struct ahd_softc *ahd) 7199 { 7200 uint32_t busaddr; 7201 u_int sxfrctl1; 7202 u_int scsiseq_template; 7203 u_int wait; 7204 u_int i; 7205 u_int target; 7206 7207 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7208 /* 7209 * Take the LED out of diagnostic mode 7210 */ 7211 ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); 7212 7213 /* 7214 * Return HS_MAILBOX to its default value. 7215 */ 7216 ahd->hs_mailbox = 0; 7217 ahd_outb(ahd, HS_MAILBOX, 0); 7218 7219 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ 7220 ahd_outb(ahd, IOWNID, ahd->our_id); 7221 ahd_outb(ahd, TOWNID, ahd->our_id); 7222 sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; 7223 sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; 7224 if ((ahd->bugs & AHD_LONG_SETIMO_BUG) 7225 && (ahd->seltime != STIMESEL_MIN)) { 7226 /* 7227 * The selection timer duration is twice as long 7228 * as it should be. Halve it by adding "1" to 7229 * the user specified setting. 7230 */ 7231 sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; 7232 } else { 7233 sxfrctl1 |= ahd->seltime; 7234 } 7235 7236 ahd_outb(ahd, SXFRCTL0, DFON); 7237 ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); 7238 ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 7239 7240 /* 7241 * Now that termination is set, wait for up 7242 * to 500ms for our transceivers to settle. If 7243 * the adapter does not have a cable attached, 7244 * the transceivers may never settle, so don't 7245 * complain if we fail here. 7246 */ 7247 for (wait = 10000; 7248 (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 7249 wait--) 7250 ahd_delay(100); 7251 7252 /* Clear any false bus resets due to the transceivers settling */ 7253 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 7254 ahd_outb(ahd, CLRINT, CLRSCSIINT); 7255 7256 /* Initialize mode specific S/G state. */ 7257 for (i = 0; i < 2; i++) { 7258 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 7259 ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); 7260 ahd_outb(ahd, SG_STATE, 0); 7261 ahd_outb(ahd, CLRSEQINTSRC, 0xFF); 7262 ahd_outb(ahd, SEQIMODE, 7263 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT 7264 |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); 7265 } 7266 7267 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 7268 ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); 7269 ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); 7270 ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); 7271 ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); 7272 if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { 7273 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); 7274 } else { 7275 ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); 7276 } 7277 ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); 7278 if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) 7279 /* 7280 * Do not issue a target abort when a split completion 7281 * error occurs. Let our PCIX interrupt handler deal 7282 * with it instead. H2A4 Razor #625 7283 */ 7284 ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); 7285 7286 if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) 7287 ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); 7288 7289 /* 7290 * Tweak IOCELL settings. 7291 */ 7292 if ((ahd->flags & AHD_HP_BOARD) != 0) { 7293 for (i = 0; i < NUMDSPS; i++) { 7294 ahd_outb(ahd, DSPSELECT, i); 7295 ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); 7296 } 7297 #ifdef AHD_DEBUG 7298 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7299 printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), 7300 WRTBIASCTL_HP_DEFAULT); 7301 #endif 7302 } 7303 ahd_setup_iocell_workaround(ahd); 7304 7305 /* 7306 * Enable LQI Manager interrupts. 7307 */ 7308 ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT 7309 | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI 7310 | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); 7311 ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); 7312 /* 7313 * We choose to have the sequencer catch LQOPHCHGINPKT errors 7314 * manually for the command phase at the start of a packetized 7315 * selection case. ENLQOBUSFREE should be made redundant by 7316 * the BUSFREE interrupt, but it seems that some LQOBUSFREE 7317 * events fail to assert the BUSFREE interrupt so we must 7318 * also enable LQOBUSFREE interrupts. 7319 */ 7320 ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); 7321 7322 /* 7323 * Setup sequencer interrupt handlers. 7324 */ 7325 ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); 7326 ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); 7327 7328 /* 7329 * Setup SCB Offset registers. 7330 */ 7331 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 7332 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, 7333 pkt_long_lun)); 7334 } else { 7335 ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); 7336 } 7337 ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); 7338 ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); 7339 ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); 7340 ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, 7341 shared_data.idata.cdb)); 7342 ahd_outb(ahd, QNEXTPTR, 7343 offsetof(struct hardware_scb, next_hscb_busaddr)); 7344 ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); 7345 ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); 7346 if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { 7347 ahd_outb(ahd, LUNLEN, 7348 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); 7349 } else { 7350 ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); 7351 } 7352 ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); 7353 ahd_outb(ahd, MAXCMD, 0xFF); 7354 ahd_outb(ahd, SCBAUTOPTR, 7355 AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); 7356 7357 /* We haven't been enabled for target mode yet. */ 7358 ahd_outb(ahd, MULTARGID, 0); 7359 ahd_outb(ahd, MULTARGID + 1, 0); 7360 7361 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7362 /* Initialize the negotiation table. */ 7363 if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { 7364 /* 7365 * Clear the spare bytes in the neg table to avoid 7366 * spurious parity errors. 7367 */ 7368 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7369 ahd_outb(ahd, NEGOADDR, target); 7370 ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); 7371 for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) 7372 ahd_outb(ahd, ANNEXDAT, 0); 7373 } 7374 } 7375 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7376 struct ahd_devinfo devinfo; 7377 struct ahd_initiator_tinfo *tinfo; 7378 struct ahd_tmode_tstate *tstate; 7379 7380 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7381 target, &tstate); 7382 ahd_compile_devinfo(&devinfo, ahd->our_id, 7383 target, CAM_LUN_WILDCARD, 7384 'A', ROLE_INITIATOR); 7385 ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); 7386 } 7387 7388 ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); 7389 ahd_outb(ahd, CLRINT, CLRSCSIINT); 7390 7391 #ifdef NEEDS_MORE_TESTING 7392 /* 7393 * Always enable abort on incoming L_Qs if this feature is 7394 * supported. We use this to catch invalid SCB references. 7395 */ 7396 if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) 7397 ahd_outb(ahd, LQCTL1, ABORTPENDING); 7398 else 7399 #endif 7400 ahd_outb(ahd, LQCTL1, 0); 7401 7402 /* All of our queues are empty */ 7403 ahd->qoutfifonext = 0; 7404 ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; 7405 ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); 7406 for (i = 0; i < AHD_QOUT_SIZE; i++) 7407 ahd->qoutfifo[i].valid_tag = 0; 7408 ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); 7409 7410 ahd->qinfifonext = 0; 7411 for (i = 0; i < AHD_QIN_SIZE; i++) 7412 ahd->qinfifo[i] = SCB_LIST_NULL; 7413 7414 if ((ahd->features & AHD_TARGETMODE) != 0) { 7415 /* All target command blocks start out invalid. */ 7416 for (i = 0; i < AHD_TMODE_CMDS; i++) 7417 ahd->targetcmds[i].cmd_valid = 0; 7418 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); 7419 ahd->tqinfifonext = 1; 7420 ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); 7421 ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); 7422 } 7423 7424 /* Initialize Scratch Ram. */ 7425 ahd_outb(ahd, SEQ_FLAGS, 0); 7426 ahd_outb(ahd, SEQ_FLAGS2, 0); 7427 7428 /* We don't have any waiting selections */ 7429 ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); 7430 ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); 7431 ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); 7432 ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); 7433 for (i = 0; i < AHD_NUM_TARGETS; i++) 7434 ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); 7435 7436 /* 7437 * Nobody is waiting to be DMAed into the QOUTFIFO. 7438 */ 7439 ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); 7440 ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); 7441 ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); 7442 ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); 7443 ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); 7444 7445 /* 7446 * The Freeze Count is 0. 7447 */ 7448 ahd->qfreeze_cnt = 0; 7449 ahd_outw(ahd, QFREEZE_COUNT, 0); 7450 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); 7451 7452 /* 7453 * Tell the sequencer where it can find our arrays in memory. 7454 */ 7455 busaddr = ahd->shared_data_map.physaddr; 7456 ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); 7457 ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); 7458 7459 /* 7460 * Setup the allowed SCSI Sequences based on operational mode. 7461 * If we are a target, we'll enable select in operations once 7462 * we've had a lun enabled. 7463 */ 7464 scsiseq_template = ENAUTOATNP; 7465 if ((ahd->flags & AHD_INITIATORROLE) != 0) 7466 scsiseq_template |= ENRSELI; 7467 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); 7468 7469 /* There are no busy SCBs yet. */ 7470 for (target = 0; target < AHD_NUM_TARGETS; target++) { 7471 int lun; 7472 7473 for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) 7474 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); 7475 } 7476 7477 /* 7478 * Initialize the group code to command length table. 7479 * Vendor Unique codes are set to 0 so we only capture 7480 * the first byte of the cdb. These can be overridden 7481 * when target mode is enabled. 7482 */ 7483 ahd_outb(ahd, CMDSIZE_TABLE, 5); 7484 ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); 7485 ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); 7486 ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); 7487 ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); 7488 ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); 7489 ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); 7490 ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); 7491 7492 /* Tell the sequencer of our initial queue positions */ 7493 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 7494 ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); 7495 ahd->qinfifonext = 0; 7496 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 7497 ahd_set_hescb_qoff(ahd, 0); 7498 ahd_set_snscb_qoff(ahd, 0); 7499 ahd_set_sescb_qoff(ahd, 0); 7500 ahd_set_sdscb_qoff(ahd, 0); 7501 7502 /* 7503 * Tell the sequencer which SCB will be the next one it receives. 7504 */ 7505 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 7506 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 7507 7508 /* 7509 * Default to coalescing disabled. 7510 */ 7511 ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); 7512 ahd_outw(ahd, CMDS_PENDING, 0); 7513 ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, 7514 ahd->int_coalescing_maxcmds, 7515 ahd->int_coalescing_mincmds); 7516 ahd_enable_coalescing(ahd, FALSE); 7517 7518 ahd_loadseq(ahd); 7519 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 7520 7521 if (ahd->features & AHD_AIC79XXB_SLOWCRC) { 7522 u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); 7523 7524 negodat3 |= ENSLOWCRC; 7525 ahd_outb(ahd, NEGCONOPTS, negodat3); 7526 negodat3 = ahd_inb(ahd, NEGCONOPTS); 7527 if (!(negodat3 & ENSLOWCRC)) 7528 printk("aic79xx: failed to set the SLOWCRC bit\n"); 7529 else 7530 printk("aic79xx: SLOWCRC bit set\n"); 7531 } 7532 } 7533 7534 /* 7535 * Setup default device and controller settings. 7536 * This should only be called if our probe has 7537 * determined that no configuration data is available. 7538 */ 7539 int 7540 ahd_default_config(struct ahd_softc *ahd) 7541 { 7542 int targ; 7543 7544 ahd->our_id = 7; 7545 7546 /* 7547 * Allocate a tstate to house information for our 7548 * initiator presence on the bus as well as the user 7549 * data for any target mode initiator. 7550 */ 7551 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7552 printk("%s: unable to allocate ahd_tmode_tstate. " 7553 "Failing attach\n", ahd_name(ahd)); 7554 return (ENOMEM); 7555 } 7556 7557 for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { 7558 struct ahd_devinfo devinfo; 7559 struct ahd_initiator_tinfo *tinfo; 7560 struct ahd_tmode_tstate *tstate; 7561 uint16_t target_mask; 7562 7563 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7564 targ, &tstate); 7565 /* 7566 * We support SPC2 and SPI4. 7567 */ 7568 tinfo->user.protocol_version = 4; 7569 tinfo->user.transport_version = 4; 7570 7571 target_mask = 0x01 << targ; 7572 ahd->user_discenable |= target_mask; 7573 tstate->discenable |= target_mask; 7574 ahd->user_tagenable |= target_mask; 7575 #ifdef AHD_FORCE_160 7576 tinfo->user.period = AHD_SYNCRATE_DT; 7577 #else 7578 tinfo->user.period = AHD_SYNCRATE_160; 7579 #endif 7580 tinfo->user.offset = MAX_OFFSET; 7581 tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM 7582 | MSG_EXT_PPR_WR_FLOW 7583 | MSG_EXT_PPR_HOLD_MCS 7584 | MSG_EXT_PPR_IU_REQ 7585 | MSG_EXT_PPR_QAS_REQ 7586 | MSG_EXT_PPR_DT_REQ; 7587 if ((ahd->features & AHD_RTI) != 0) 7588 tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; 7589 7590 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 7591 7592 /* 7593 * Start out Async/Narrow/Untagged and with 7594 * conservative protocol support. 7595 */ 7596 tinfo->goal.protocol_version = 2; 7597 tinfo->goal.transport_version = 2; 7598 tinfo->curr.protocol_version = 2; 7599 tinfo->curr.transport_version = 2; 7600 ahd_compile_devinfo(&devinfo, ahd->our_id, 7601 targ, CAM_LUN_WILDCARD, 7602 'A', ROLE_INITIATOR); 7603 tstate->tagenable &= ~target_mask; 7604 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 7605 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 7606 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 7607 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 7608 /*paused*/TRUE); 7609 } 7610 return (0); 7611 } 7612 7613 /* 7614 * Parse device configuration information. 7615 */ 7616 int 7617 ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) 7618 { 7619 int targ; 7620 int max_targ; 7621 7622 max_targ = sc->max_targets & CFMAXTARG; 7623 ahd->our_id = sc->brtime_id & CFSCSIID; 7624 7625 /* 7626 * Allocate a tstate to house information for our 7627 * initiator presence on the bus as well as the user 7628 * data for any target mode initiator. 7629 */ 7630 if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { 7631 printk("%s: unable to allocate ahd_tmode_tstate. " 7632 "Failing attach\n", ahd_name(ahd)); 7633 return (ENOMEM); 7634 } 7635 7636 for (targ = 0; targ < max_targ; targ++) { 7637 struct ahd_devinfo devinfo; 7638 struct ahd_initiator_tinfo *tinfo; 7639 struct ahd_transinfo *user_tinfo; 7640 struct ahd_tmode_tstate *tstate; 7641 uint16_t target_mask; 7642 7643 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, 7644 targ, &tstate); 7645 user_tinfo = &tinfo->user; 7646 7647 /* 7648 * We support SPC2 and SPI4. 7649 */ 7650 tinfo->user.protocol_version = 4; 7651 tinfo->user.transport_version = 4; 7652 7653 target_mask = 0x01 << targ; 7654 ahd->user_discenable &= ~target_mask; 7655 tstate->discenable &= ~target_mask; 7656 ahd->user_tagenable &= ~target_mask; 7657 if (sc->device_flags[targ] & CFDISC) { 7658 tstate->discenable |= target_mask; 7659 ahd->user_discenable |= target_mask; 7660 ahd->user_tagenable |= target_mask; 7661 } else { 7662 /* 7663 * Cannot be packetized without disconnection. 7664 */ 7665 sc->device_flags[targ] &= ~CFPACKETIZED; 7666 } 7667 7668 user_tinfo->ppr_options = 0; 7669 user_tinfo->period = (sc->device_flags[targ] & CFXFER); 7670 if (user_tinfo->period < CFXFER_ASYNC) { 7671 if (user_tinfo->period <= AHD_PERIOD_10MHz) 7672 user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; 7673 user_tinfo->offset = MAX_OFFSET; 7674 } else { 7675 user_tinfo->offset = 0; 7676 user_tinfo->period = AHD_ASYNC_XFER_PERIOD; 7677 } 7678 #ifdef AHD_FORCE_160 7679 if (user_tinfo->period <= AHD_SYNCRATE_160) 7680 user_tinfo->period = AHD_SYNCRATE_DT; 7681 #endif 7682 7683 if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { 7684 user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM 7685 | MSG_EXT_PPR_WR_FLOW 7686 | MSG_EXT_PPR_HOLD_MCS 7687 | MSG_EXT_PPR_IU_REQ; 7688 if ((ahd->features & AHD_RTI) != 0) 7689 user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; 7690 } 7691 7692 if ((sc->device_flags[targ] & CFQAS) != 0) 7693 user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; 7694 7695 if ((sc->device_flags[targ] & CFWIDEB) != 0) 7696 user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; 7697 else 7698 user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; 7699 #ifdef AHD_DEBUG 7700 if ((ahd_debug & AHD_SHOW_MISC) != 0) 7701 printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, 7702 user_tinfo->period, user_tinfo->offset, 7703 user_tinfo->ppr_options); 7704 #endif 7705 /* 7706 * Start out Async/Narrow/Untagged and with 7707 * conservative protocol support. 7708 */ 7709 tstate->tagenable &= ~target_mask; 7710 tinfo->goal.protocol_version = 2; 7711 tinfo->goal.transport_version = 2; 7712 tinfo->curr.protocol_version = 2; 7713 tinfo->curr.transport_version = 2; 7714 ahd_compile_devinfo(&devinfo, ahd->our_id, 7715 targ, CAM_LUN_WILDCARD, 7716 'A', ROLE_INITIATOR); 7717 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 7718 AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); 7719 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 7720 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, 7721 /*paused*/TRUE); 7722 } 7723 7724 ahd->flags &= ~AHD_SPCHK_ENB_A; 7725 if (sc->bios_control & CFSPARITY) 7726 ahd->flags |= AHD_SPCHK_ENB_A; 7727 7728 ahd->flags &= ~AHD_RESET_BUS_A; 7729 if (sc->bios_control & CFRESETB) 7730 ahd->flags |= AHD_RESET_BUS_A; 7731 7732 ahd->flags &= ~AHD_EXTENDED_TRANS_A; 7733 if (sc->bios_control & CFEXTEND) 7734 ahd->flags |= AHD_EXTENDED_TRANS_A; 7735 7736 ahd->flags &= ~AHD_BIOS_ENABLED; 7737 if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) 7738 ahd->flags |= AHD_BIOS_ENABLED; 7739 7740 ahd->flags &= ~AHD_STPWLEVEL_A; 7741 if ((sc->adapter_control & CFSTPWLEVEL) != 0) 7742 ahd->flags |= AHD_STPWLEVEL_A; 7743 7744 return (0); 7745 } 7746 7747 /* 7748 * Parse device configuration information. 7749 */ 7750 int 7751 ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) 7752 { 7753 int error; 7754 7755 error = ahd_verify_vpd_cksum(vpd); 7756 if (error == 0) 7757 return (EINVAL); 7758 if ((vpd->bios_flags & VPDBOOTHOST) != 0) 7759 ahd->flags |= AHD_BOOT_CHANNEL; 7760 return (0); 7761 } 7762 7763 void 7764 ahd_intr_enable(struct ahd_softc *ahd, int enable) 7765 { 7766 u_int hcntrl; 7767 7768 hcntrl = ahd_inb(ahd, HCNTRL); 7769 hcntrl &= ~INTEN; 7770 ahd->pause &= ~INTEN; 7771 ahd->unpause &= ~INTEN; 7772 if (enable) { 7773 hcntrl |= INTEN; 7774 ahd->pause |= INTEN; 7775 ahd->unpause |= INTEN; 7776 } 7777 ahd_outb(ahd, HCNTRL, hcntrl); 7778 } 7779 7780 static void 7781 ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, 7782 u_int mincmds) 7783 { 7784 if (timer > AHD_TIMER_MAX_US) 7785 timer = AHD_TIMER_MAX_US; 7786 ahd->int_coalescing_timer = timer; 7787 7788 if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) 7789 maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; 7790 if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) 7791 mincmds = AHD_INT_COALESCING_MINCMDS_MAX; 7792 ahd->int_coalescing_maxcmds = maxcmds; 7793 ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); 7794 ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); 7795 ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); 7796 } 7797 7798 static void 7799 ahd_enable_coalescing(struct ahd_softc *ahd, int enable) 7800 { 7801 7802 ahd->hs_mailbox &= ~ENINT_COALESCE; 7803 if (enable) 7804 ahd->hs_mailbox |= ENINT_COALESCE; 7805 ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); 7806 ahd_flush_device_writes(ahd); 7807 ahd_run_qoutfifo(ahd); 7808 } 7809 7810 /* 7811 * Ensure that the card is paused in a location 7812 * outside of all critical sections and that all 7813 * pending work is completed prior to returning. 7814 * This routine should only be called from outside 7815 * an interrupt context. 7816 */ 7817 void 7818 ahd_pause_and_flushwork(struct ahd_softc *ahd) 7819 { 7820 u_int intstat; 7821 u_int maxloops; 7822 7823 maxloops = 1000; 7824 ahd->flags |= AHD_ALL_INTERRUPTS; 7825 ahd_pause(ahd); 7826 /* 7827 * Freeze the outgoing selections. We do this only 7828 * until we are safely paused without further selections 7829 * pending. 7830 */ 7831 ahd->qfreeze_cnt--; 7832 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 7833 ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); 7834 do { 7835 7836 ahd_unpause(ahd); 7837 /* 7838 * Give the sequencer some time to service 7839 * any active selections. 7840 */ 7841 ahd_delay(500); 7842 7843 ahd_intr(ahd); 7844 ahd_pause(ahd); 7845 intstat = ahd_inb(ahd, INTSTAT); 7846 if ((intstat & INT_PEND) == 0) { 7847 ahd_clear_critical_section(ahd); 7848 intstat = ahd_inb(ahd, INTSTAT); 7849 } 7850 } while (--maxloops 7851 && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) 7852 && ((intstat & INT_PEND) != 0 7853 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 7854 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); 7855 7856 if (maxloops == 0) { 7857 printk("Infinite interrupt loop, INTSTAT = %x", 7858 ahd_inb(ahd, INTSTAT)); 7859 } 7860 ahd->qfreeze_cnt++; 7861 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 7862 7863 ahd_flush_qoutfifo(ahd); 7864 7865 ahd->flags &= ~AHD_ALL_INTERRUPTS; 7866 } 7867 7868 int __maybe_unused 7869 ahd_suspend(struct ahd_softc *ahd) 7870 { 7871 ahd_pause_and_flushwork(ahd); 7872 7873 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 7874 ahd_unpause(ahd); 7875 return (EBUSY); 7876 } 7877 ahd_shutdown(ahd); 7878 return (0); 7879 } 7880 7881 void __maybe_unused 7882 ahd_resume(struct ahd_softc *ahd) 7883 { 7884 ahd_reset(ahd, /*reinit*/TRUE); 7885 ahd_intr_enable(ahd, TRUE); 7886 ahd_restart(ahd); 7887 } 7888 7889 /************************** Busy Target Table *********************************/ 7890 /* 7891 * Set SCBPTR to the SCB that contains the busy 7892 * table entry for TCL. Return the offset into 7893 * the SCB that contains the entry for TCL. 7894 * saved_scbid is dereferenced and set to the 7895 * scbid that should be restored once manipualtion 7896 * of the TCL entry is complete. 7897 */ 7898 static inline u_int 7899 ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) 7900 { 7901 /* 7902 * Index to the SCB that contains the busy entry. 7903 */ 7904 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 7905 *saved_scbid = ahd_get_scbptr(ahd); 7906 ahd_set_scbptr(ahd, TCL_LUN(tcl) 7907 | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); 7908 7909 /* 7910 * And now calculate the SCB offset to the entry. 7911 * Each entry is 2 bytes wide, hence the 7912 * multiplication by 2. 7913 */ 7914 return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); 7915 } 7916 7917 /* 7918 * Return the untagged transaction id for a given target/channel lun. 7919 */ 7920 static u_int 7921 ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) 7922 { 7923 u_int scbid; 7924 u_int scb_offset; 7925 u_int saved_scbptr; 7926 7927 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 7928 scbid = ahd_inw_scbram(ahd, scb_offset); 7929 ahd_set_scbptr(ahd, saved_scbptr); 7930 return (scbid); 7931 } 7932 7933 static void 7934 ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) 7935 { 7936 u_int scb_offset; 7937 u_int saved_scbptr; 7938 7939 scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); 7940 ahd_outw(ahd, scb_offset, scbid); 7941 ahd_set_scbptr(ahd, saved_scbptr); 7942 } 7943 7944 /************************** SCB and SCB queue management **********************/ 7945 static int 7946 ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, 7947 char channel, int lun, u_int tag, role_t role) 7948 { 7949 int targ = SCB_GET_TARGET(ahd, scb); 7950 char chan = SCB_GET_CHANNEL(ahd, scb); 7951 int slun = SCB_GET_LUN(scb); 7952 int match; 7953 7954 match = ((chan == channel) || (channel == ALL_CHANNELS)); 7955 if (match != 0) 7956 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 7957 if (match != 0) 7958 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 7959 if (match != 0) { 7960 #ifdef AHD_TARGET_MODE 7961 int group; 7962 7963 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 7964 if (role == ROLE_INITIATOR) { 7965 match = (group != XPT_FC_GROUP_TMODE) 7966 && ((tag == SCB_GET_TAG(scb)) 7967 || (tag == SCB_LIST_NULL)); 7968 } else if (role == ROLE_TARGET) { 7969 match = (group == XPT_FC_GROUP_TMODE) 7970 && ((tag == scb->io_ctx->csio.tag_id) 7971 || (tag == SCB_LIST_NULL)); 7972 } 7973 #else /* !AHD_TARGET_MODE */ 7974 match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); 7975 #endif /* AHD_TARGET_MODE */ 7976 } 7977 7978 return match; 7979 } 7980 7981 static void 7982 ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) 7983 { 7984 int target; 7985 char channel; 7986 int lun; 7987 7988 target = SCB_GET_TARGET(ahd, scb); 7989 lun = SCB_GET_LUN(scb); 7990 channel = SCB_GET_CHANNEL(ahd, scb); 7991 7992 ahd_search_qinfifo(ahd, target, channel, lun, 7993 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 7994 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 7995 7996 ahd_platform_freeze_devq(ahd, scb); 7997 } 7998 7999 void 8000 ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) 8001 { 8002 struct scb *prev_scb; 8003 ahd_mode_state saved_modes; 8004 8005 saved_modes = ahd_save_modes(ahd); 8006 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8007 prev_scb = NULL; 8008 if (ahd_qinfifo_count(ahd) != 0) { 8009 u_int prev_tag; 8010 u_int prev_pos; 8011 8012 prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); 8013 prev_tag = ahd->qinfifo[prev_pos]; 8014 prev_scb = ahd_lookup_scb(ahd, prev_tag); 8015 } 8016 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8017 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 8018 ahd_restore_modes(ahd, saved_modes); 8019 } 8020 8021 static void 8022 ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, 8023 struct scb *scb) 8024 { 8025 if (prev_scb == NULL) { 8026 uint32_t busaddr; 8027 8028 busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); 8029 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 8030 } else { 8031 prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; 8032 ahd_sync_scb(ahd, prev_scb, 8033 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 8034 } 8035 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); 8036 ahd->qinfifonext++; 8037 scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; 8038 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 8039 } 8040 8041 static int 8042 ahd_qinfifo_count(struct ahd_softc *ahd) 8043 { 8044 u_int qinpos; 8045 u_int wrap_qinpos; 8046 u_int wrap_qinfifonext; 8047 8048 AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); 8049 qinpos = ahd_get_snscb_qoff(ahd); 8050 wrap_qinpos = AHD_QIN_WRAP(qinpos); 8051 wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); 8052 if (wrap_qinfifonext >= wrap_qinpos) 8053 return (wrap_qinfifonext - wrap_qinpos); 8054 else 8055 return (wrap_qinfifonext 8056 + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); 8057 } 8058 8059 static void 8060 ahd_reset_cmds_pending(struct ahd_softc *ahd) 8061 { 8062 struct scb *scb; 8063 ahd_mode_state saved_modes; 8064 u_int pending_cmds; 8065 8066 saved_modes = ahd_save_modes(ahd); 8067 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8068 8069 /* 8070 * Don't count any commands as outstanding that the 8071 * sequencer has already marked for completion. 8072 */ 8073 ahd_flush_qoutfifo(ahd); 8074 8075 pending_cmds = 0; 8076 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 8077 pending_cmds++; 8078 } 8079 ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); 8080 ahd_restore_modes(ahd, saved_modes); 8081 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 8082 } 8083 8084 static void 8085 ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 8086 { 8087 cam_status ostat; 8088 cam_status cstat; 8089 8090 ostat = ahd_get_transaction_status(scb); 8091 if (ostat == CAM_REQ_INPROG) 8092 ahd_set_transaction_status(scb, status); 8093 cstat = ahd_get_transaction_status(scb); 8094 if (cstat != CAM_REQ_CMP) 8095 ahd_freeze_scb(scb); 8096 ahd_done(ahd, scb); 8097 } 8098 8099 int 8100 ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, 8101 int lun, u_int tag, role_t role, uint32_t status, 8102 ahd_search_action action) 8103 { 8104 struct scb *scb; 8105 struct scb *mk_msg_scb; 8106 struct scb *prev_scb; 8107 ahd_mode_state saved_modes; 8108 u_int qinstart; 8109 u_int qinpos; 8110 u_int qintail; 8111 u_int tid_next; 8112 u_int tid_prev; 8113 u_int scbid; 8114 u_int seq_flags2; 8115 u_int savedscbptr; 8116 uint32_t busaddr; 8117 int found; 8118 int targets; 8119 8120 /* Must be in CCHAN mode */ 8121 saved_modes = ahd_save_modes(ahd); 8122 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 8123 8124 /* 8125 * Halt any pending SCB DMA. The sequencer will reinitiate 8126 * this dma if the qinfifo is not empty once we unpause. 8127 */ 8128 if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) 8129 == (CCARREN|CCSCBEN|CCSCBDIR)) { 8130 ahd_outb(ahd, CCSCBCTL, 8131 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); 8132 while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) 8133 ; 8134 } 8135 /* Determine sequencer's position in the qinfifo. */ 8136 qintail = AHD_QIN_WRAP(ahd->qinfifonext); 8137 qinstart = ahd_get_snscb_qoff(ahd); 8138 qinpos = AHD_QIN_WRAP(qinstart); 8139 found = 0; 8140 prev_scb = NULL; 8141 8142 if (action == SEARCH_PRINT) { 8143 printk("qinstart = %d qinfifonext = %d\nQINFIFO:", 8144 qinstart, ahd->qinfifonext); 8145 } 8146 8147 /* 8148 * Start with an empty queue. Entries that are not chosen 8149 * for removal will be re-added to the queue as we go. 8150 */ 8151 ahd->qinfifonext = qinstart; 8152 busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); 8153 ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); 8154 8155 while (qinpos != qintail) { 8156 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); 8157 if (scb == NULL) { 8158 printk("qinpos = %d, SCB index = %d\n", 8159 qinpos, ahd->qinfifo[qinpos]); 8160 panic("Loop 1\n"); 8161 } 8162 8163 if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { 8164 /* 8165 * We found an scb that needs to be acted on. 8166 */ 8167 found++; 8168 switch (action) { 8169 case SEARCH_COMPLETE: 8170 if ((scb->flags & SCB_ACTIVE) == 0) 8171 printk("Inactive SCB in qinfifo\n"); 8172 ahd_done_with_status(ahd, scb, status); 8173 fallthrough; 8174 case SEARCH_REMOVE: 8175 break; 8176 case SEARCH_PRINT: 8177 printk(" 0x%x", ahd->qinfifo[qinpos]); 8178 fallthrough; 8179 case SEARCH_COUNT: 8180 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8181 prev_scb = scb; 8182 break; 8183 } 8184 } else { 8185 ahd_qinfifo_requeue(ahd, prev_scb, scb); 8186 prev_scb = scb; 8187 } 8188 qinpos = AHD_QIN_WRAP(qinpos+1); 8189 } 8190 8191 ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); 8192 8193 if (action == SEARCH_PRINT) 8194 printk("\nWAITING_TID_QUEUES:\n"); 8195 8196 /* 8197 * Search waiting for selection lists. We traverse the 8198 * list of "their ids" waiting for selection and, if 8199 * appropriate, traverse the SCBs of each "their id" 8200 * looking for matches. 8201 */ 8202 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8203 seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); 8204 if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { 8205 scbid = ahd_inw(ahd, MK_MESSAGE_SCB); 8206 mk_msg_scb = ahd_lookup_scb(ahd, scbid); 8207 } else 8208 mk_msg_scb = NULL; 8209 savedscbptr = ahd_get_scbptr(ahd); 8210 tid_next = ahd_inw(ahd, WAITING_TID_HEAD); 8211 tid_prev = SCB_LIST_NULL; 8212 targets = 0; 8213 for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { 8214 u_int tid_head; 8215 u_int tid_tail; 8216 8217 targets++; 8218 if (targets > AHD_NUM_TARGETS) 8219 panic("TID LIST LOOP"); 8220 8221 if (scbid >= ahd->scb_data.numscbs) { 8222 printk("%s: Waiting TID List inconsistency. " 8223 "SCB index == 0x%x, yet numscbs == 0x%x.", 8224 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8225 ahd_dump_card_state(ahd); 8226 panic("for safety"); 8227 } 8228 scb = ahd_lookup_scb(ahd, scbid); 8229 if (scb == NULL) { 8230 printk("%s: SCB = 0x%x Not Active!\n", 8231 ahd_name(ahd), scbid); 8232 panic("Waiting TID List traversal\n"); 8233 } 8234 ahd_set_scbptr(ahd, scbid); 8235 tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); 8236 if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, 8237 SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { 8238 tid_prev = scbid; 8239 continue; 8240 } 8241 8242 /* 8243 * We found a list of scbs that needs to be searched. 8244 */ 8245 if (action == SEARCH_PRINT) 8246 printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); 8247 tid_head = scbid; 8248 found += ahd_search_scb_list(ahd, target, channel, 8249 lun, tag, role, status, 8250 action, &tid_head, &tid_tail, 8251 SCB_GET_TARGET(ahd, scb)); 8252 /* 8253 * Check any MK_MESSAGE SCB that is still waiting to 8254 * enter this target's waiting for selection queue. 8255 */ 8256 if (mk_msg_scb != NULL 8257 && ahd_match_scb(ahd, mk_msg_scb, target, channel, 8258 lun, tag, role)) { 8259 8260 /* 8261 * We found an scb that needs to be acted on. 8262 */ 8263 found++; 8264 switch (action) { 8265 case SEARCH_COMPLETE: 8266 if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) 8267 printk("Inactive SCB pending MK_MSG\n"); 8268 ahd_done_with_status(ahd, mk_msg_scb, status); 8269 fallthrough; 8270 case SEARCH_REMOVE: 8271 { 8272 u_int tail_offset; 8273 8274 printk("Removing MK_MSG scb\n"); 8275 8276 /* 8277 * Reset our tail to the tail of the 8278 * main per-target list. 8279 */ 8280 tail_offset = WAITING_SCB_TAILS 8281 + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); 8282 ahd_outw(ahd, tail_offset, tid_tail); 8283 8284 seq_flags2 &= ~PENDING_MK_MESSAGE; 8285 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); 8286 ahd_outw(ahd, CMDS_PENDING, 8287 ahd_inw(ahd, CMDS_PENDING)-1); 8288 mk_msg_scb = NULL; 8289 break; 8290 } 8291 case SEARCH_PRINT: 8292 printk(" 0x%x", SCB_GET_TAG(scb)); 8293 fallthrough; 8294 case SEARCH_COUNT: 8295 break; 8296 } 8297 } 8298 8299 if (mk_msg_scb != NULL 8300 && SCBID_IS_NULL(tid_head) 8301 && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, 8302 SCB_LIST_NULL, ROLE_UNKNOWN)) { 8303 8304 /* 8305 * When removing the last SCB for a target 8306 * queue with a pending MK_MESSAGE scb, we 8307 * must queue the MK_MESSAGE scb. 8308 */ 8309 printk("Queueing mk_msg_scb\n"); 8310 tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); 8311 seq_flags2 &= ~PENDING_MK_MESSAGE; 8312 ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); 8313 mk_msg_scb = NULL; 8314 } 8315 if (tid_head != scbid) 8316 ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); 8317 if (!SCBID_IS_NULL(tid_head)) 8318 tid_prev = tid_head; 8319 if (action == SEARCH_PRINT) 8320 printk(")\n"); 8321 } 8322 8323 /* Restore saved state. */ 8324 ahd_set_scbptr(ahd, savedscbptr); 8325 ahd_restore_modes(ahd, saved_modes); 8326 return (found); 8327 } 8328 8329 static int 8330 ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, 8331 int lun, u_int tag, role_t role, uint32_t status, 8332 ahd_search_action action, u_int *list_head, 8333 u_int *list_tail, u_int tid) 8334 { 8335 struct scb *scb; 8336 u_int scbid; 8337 u_int next; 8338 u_int prev; 8339 int found; 8340 8341 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8342 found = 0; 8343 prev = SCB_LIST_NULL; 8344 next = *list_head; 8345 *list_tail = SCB_LIST_NULL; 8346 for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { 8347 if (scbid >= ahd->scb_data.numscbs) { 8348 printk("%s:SCB List inconsistency. " 8349 "SCB == 0x%x, yet numscbs == 0x%x.", 8350 ahd_name(ahd), scbid, ahd->scb_data.numscbs); 8351 ahd_dump_card_state(ahd); 8352 panic("for safety"); 8353 } 8354 scb = ahd_lookup_scb(ahd, scbid); 8355 if (scb == NULL) { 8356 printk("%s: SCB = %d Not Active!\n", 8357 ahd_name(ahd), scbid); 8358 panic("Waiting List traversal\n"); 8359 } 8360 ahd_set_scbptr(ahd, scbid); 8361 *list_tail = scbid; 8362 next = ahd_inw_scbram(ahd, SCB_NEXT); 8363 if (ahd_match_scb(ahd, scb, target, channel, 8364 lun, SCB_LIST_NULL, role) == 0) { 8365 prev = scbid; 8366 continue; 8367 } 8368 found++; 8369 switch (action) { 8370 case SEARCH_COMPLETE: 8371 if ((scb->flags & SCB_ACTIVE) == 0) 8372 printk("Inactive SCB in Waiting List\n"); 8373 ahd_done_with_status(ahd, scb, status); 8374 fallthrough; 8375 case SEARCH_REMOVE: 8376 ahd_rem_wscb(ahd, scbid, prev, next, tid); 8377 *list_tail = prev; 8378 if (SCBID_IS_NULL(prev)) 8379 *list_head = next; 8380 break; 8381 case SEARCH_PRINT: 8382 printk("0x%x ", scbid); 8383 fallthrough; 8384 case SEARCH_COUNT: 8385 prev = scbid; 8386 break; 8387 } 8388 if (found > AHD_SCB_MAX) 8389 panic("SCB LIST LOOP"); 8390 } 8391 if (action == SEARCH_COMPLETE 8392 || action == SEARCH_REMOVE) 8393 ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); 8394 return (found); 8395 } 8396 8397 static void 8398 ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, 8399 u_int tid_cur, u_int tid_next) 8400 { 8401 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8402 8403 if (SCBID_IS_NULL(tid_cur)) { 8404 8405 /* Bypass current TID list */ 8406 if (SCBID_IS_NULL(tid_prev)) { 8407 ahd_outw(ahd, WAITING_TID_HEAD, tid_next); 8408 } else { 8409 ahd_set_scbptr(ahd, tid_prev); 8410 ahd_outw(ahd, SCB_NEXT2, tid_next); 8411 } 8412 if (SCBID_IS_NULL(tid_next)) 8413 ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); 8414 } else { 8415 8416 /* Stitch through tid_cur */ 8417 if (SCBID_IS_NULL(tid_prev)) { 8418 ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); 8419 } else { 8420 ahd_set_scbptr(ahd, tid_prev); 8421 ahd_outw(ahd, SCB_NEXT2, tid_cur); 8422 } 8423 ahd_set_scbptr(ahd, tid_cur); 8424 ahd_outw(ahd, SCB_NEXT2, tid_next); 8425 8426 if (SCBID_IS_NULL(tid_next)) 8427 ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); 8428 } 8429 } 8430 8431 /* 8432 * Manipulate the waiting for selection list and return the 8433 * scb that follows the one that we remove. 8434 */ 8435 static u_int 8436 ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 8437 u_int prev, u_int next, u_int tid) 8438 { 8439 u_int tail_offset; 8440 8441 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8442 if (!SCBID_IS_NULL(prev)) { 8443 ahd_set_scbptr(ahd, prev); 8444 ahd_outw(ahd, SCB_NEXT, next); 8445 } 8446 8447 /* 8448 * SCBs that have MK_MESSAGE set in them may 8449 * cause the tail pointer to be updated without 8450 * setting the next pointer of the previous tail. 8451 * Only clear the tail if the removed SCB was 8452 * the tail. 8453 */ 8454 tail_offset = WAITING_SCB_TAILS + (2 * tid); 8455 if (SCBID_IS_NULL(next) 8456 && ahd_inw(ahd, tail_offset) == scbid) 8457 ahd_outw(ahd, tail_offset, prev); 8458 8459 ahd_add_scb_to_free_list(ahd, scbid); 8460 return (next); 8461 } 8462 8463 /* 8464 * Add the SCB as selected by SCBPTR onto the on chip list of 8465 * free hardware SCBs. This list is empty/unused if we are not 8466 * performing SCB paging. 8467 */ 8468 static void 8469 ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) 8470 { 8471 /* XXX Need some other mechanism to designate "free". */ 8472 /* 8473 * Invalidate the tag so that our abort 8474 * routines don't think it's active. 8475 ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); 8476 */ 8477 } 8478 8479 /******************************** Error Handling ******************************/ 8480 /* 8481 * Abort all SCBs that match the given description (target/channel/lun/tag), 8482 * setting their status to the passed in status if the status has not already 8483 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 8484 * is paused before it is called. 8485 */ 8486 static int 8487 ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, 8488 int lun, u_int tag, role_t role, uint32_t status) 8489 { 8490 struct scb *scbp; 8491 struct scb *scbp_next; 8492 u_int i, j; 8493 u_int maxtarget; 8494 u_int minlun; 8495 u_int maxlun; 8496 int found; 8497 ahd_mode_state saved_modes; 8498 8499 /* restore this when we're done */ 8500 saved_modes = ahd_save_modes(ahd); 8501 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8502 8503 found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, 8504 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 8505 8506 /* 8507 * Clean out the busy target table for any untagged commands. 8508 */ 8509 i = 0; 8510 maxtarget = 16; 8511 if (target != CAM_TARGET_WILDCARD) { 8512 i = target; 8513 if (channel == 'B') 8514 i += 8; 8515 maxtarget = i + 1; 8516 } 8517 8518 if (lun == CAM_LUN_WILDCARD) { 8519 minlun = 0; 8520 maxlun = AHD_NUM_LUNS_NONPKT; 8521 } else if (lun >= AHD_NUM_LUNS_NONPKT) { 8522 minlun = maxlun = 0; 8523 } else { 8524 minlun = lun; 8525 maxlun = lun + 1; 8526 } 8527 8528 if (role != ROLE_TARGET) { 8529 for (;i < maxtarget; i++) { 8530 for (j = minlun;j < maxlun; j++) { 8531 u_int scbid; 8532 u_int tcl; 8533 8534 tcl = BUILD_TCL_RAW(i, 'A', j); 8535 scbid = ahd_find_busy_tcl(ahd, tcl); 8536 scbp = ahd_lookup_scb(ahd, scbid); 8537 if (scbp == NULL 8538 || ahd_match_scb(ahd, scbp, target, channel, 8539 lun, tag, role) == 0) 8540 continue; 8541 ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); 8542 } 8543 } 8544 } 8545 8546 /* 8547 * Don't abort commands that have already completed, 8548 * but haven't quite made it up to the host yet. 8549 */ 8550 ahd_flush_qoutfifo(ahd); 8551 8552 /* 8553 * Go through the pending CCB list and look for 8554 * commands for this target that are still active. 8555 * These are other tagged commands that were 8556 * disconnected when the reset occurred. 8557 */ 8558 scbp_next = LIST_FIRST(&ahd->pending_scbs); 8559 while (scbp_next != NULL) { 8560 scbp = scbp_next; 8561 scbp_next = LIST_NEXT(scbp, pending_links); 8562 if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { 8563 cam_status ostat; 8564 8565 ostat = ahd_get_transaction_status(scbp); 8566 if (ostat == CAM_REQ_INPROG) 8567 ahd_set_transaction_status(scbp, status); 8568 if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) 8569 ahd_freeze_scb(scbp); 8570 if ((scbp->flags & SCB_ACTIVE) == 0) 8571 printk("Inactive SCB on pending list\n"); 8572 ahd_done(ahd, scbp); 8573 found++; 8574 } 8575 } 8576 ahd_restore_modes(ahd, saved_modes); 8577 ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); 8578 ahd->flags |= AHD_UPDATE_PEND_CMDS; 8579 return found; 8580 } 8581 8582 static void 8583 ahd_reset_current_bus(struct ahd_softc *ahd) 8584 { 8585 uint8_t scsiseq; 8586 8587 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8588 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); 8589 scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); 8590 ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); 8591 ahd_flush_device_writes(ahd); 8592 ahd_delay(AHD_BUSRESET_DELAY); 8593 /* Turn off the bus reset */ 8594 ahd_outb(ahd, SCSISEQ0, scsiseq); 8595 ahd_flush_device_writes(ahd); 8596 ahd_delay(AHD_BUSRESET_DELAY); 8597 if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { 8598 /* 8599 * 2A Razor #474 8600 * Certain chip state is not cleared for 8601 * SCSI bus resets that we initiate, so 8602 * we must reset the chip. 8603 */ 8604 ahd_reset(ahd, /*reinit*/TRUE); 8605 ahd_intr_enable(ahd, /*enable*/TRUE); 8606 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 8607 } 8608 8609 ahd_clear_intstat(ahd); 8610 } 8611 8612 int 8613 ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) 8614 { 8615 struct ahd_devinfo caminfo; 8616 u_int initiator; 8617 u_int target; 8618 u_int max_scsiid; 8619 int found; 8620 u_int fifo; 8621 u_int next_fifo; 8622 uint8_t scsiseq; 8623 8624 /* 8625 * Check if the last bus reset is cleared 8626 */ 8627 if (ahd->flags & AHD_BUS_RESET_ACTIVE) { 8628 printk("%s: bus reset still active\n", 8629 ahd_name(ahd)); 8630 return 0; 8631 } 8632 ahd->flags |= AHD_BUS_RESET_ACTIVE; 8633 8634 ahd->pending_device = NULL; 8635 8636 ahd_compile_devinfo(&caminfo, 8637 CAM_TARGET_WILDCARD, 8638 CAM_TARGET_WILDCARD, 8639 CAM_LUN_WILDCARD, 8640 channel, ROLE_UNKNOWN); 8641 ahd_pause(ahd); 8642 8643 /* Make sure the sequencer is in a safe location. */ 8644 ahd_clear_critical_section(ahd); 8645 8646 /* 8647 * Run our command complete fifos to ensure that we perform 8648 * completion processing on any commands that 'completed' 8649 * before the reset occurred. 8650 */ 8651 ahd_run_qoutfifo(ahd); 8652 #ifdef AHD_TARGET_MODE 8653 if ((ahd->flags & AHD_TARGETROLE) != 0) { 8654 ahd_run_tqinfifo(ahd, /*paused*/TRUE); 8655 } 8656 #endif 8657 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8658 8659 /* 8660 * Disable selections so no automatic hardware 8661 * functions will modify chip state. 8662 */ 8663 ahd_outb(ahd, SCSISEQ0, 0); 8664 ahd_outb(ahd, SCSISEQ1, 0); 8665 8666 /* 8667 * Safely shut down our DMA engines. Always start with 8668 * the FIFO that is not currently active (if any are 8669 * actively connected). 8670 */ 8671 next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; 8672 if (next_fifo > CURRFIFO_1) 8673 /* If disconneced, arbitrarily start with FIFO1. */ 8674 next_fifo = fifo = 0; 8675 do { 8676 next_fifo ^= CURRFIFO_1; 8677 ahd_set_modes(ahd, next_fifo, next_fifo); 8678 ahd_outb(ahd, DFCNTRL, 8679 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); 8680 while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) 8681 ahd_delay(10); 8682 /* 8683 * Set CURRFIFO to the now inactive channel. 8684 */ 8685 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 8686 ahd_outb(ahd, DFFSTAT, next_fifo); 8687 } while (next_fifo != fifo); 8688 8689 /* 8690 * Reset the bus if we are initiating this reset 8691 */ 8692 ahd_clear_msg_state(ahd); 8693 ahd_outb(ahd, SIMODE1, 8694 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); 8695 8696 if (initiate_reset) 8697 ahd_reset_current_bus(ahd); 8698 8699 ahd_clear_intstat(ahd); 8700 8701 /* 8702 * Clean up all the state information for the 8703 * pending transactions on this bus. 8704 */ 8705 found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, 8706 CAM_LUN_WILDCARD, SCB_LIST_NULL, 8707 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 8708 8709 /* 8710 * Cleanup anything left in the FIFOs. 8711 */ 8712 ahd_clear_fifo(ahd, 0); 8713 ahd_clear_fifo(ahd, 1); 8714 8715 /* 8716 * Clear SCSI interrupt status 8717 */ 8718 ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); 8719 8720 /* 8721 * Reenable selections 8722 */ 8723 ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); 8724 scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); 8725 ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 8726 8727 max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; 8728 #ifdef AHD_TARGET_MODE 8729 /* 8730 * Send an immediate notify ccb to all target more peripheral 8731 * drivers affected by this action. 8732 */ 8733 for (target = 0; target <= max_scsiid; target++) { 8734 struct ahd_tmode_tstate* tstate; 8735 u_int lun; 8736 8737 tstate = ahd->enabled_targets[target]; 8738 if (tstate == NULL) 8739 continue; 8740 for (lun = 0; lun < AHD_NUM_LUNS; lun++) { 8741 struct ahd_tmode_lstate* lstate; 8742 8743 lstate = tstate->enabled_luns[lun]; 8744 if (lstate == NULL) 8745 continue; 8746 8747 ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, 8748 EVENT_TYPE_BUS_RESET, /*arg*/0); 8749 ahd_send_lstate_events(ahd, lstate); 8750 } 8751 } 8752 #endif 8753 /* 8754 * Revert to async/narrow transfers until we renegotiate. 8755 */ 8756 for (target = 0; target <= max_scsiid; target++) { 8757 8758 if (ahd->enabled_targets[target] == NULL) 8759 continue; 8760 for (initiator = 0; initiator <= max_scsiid; initiator++) { 8761 struct ahd_devinfo devinfo; 8762 8763 ahd_compile_devinfo(&devinfo, target, initiator, 8764 CAM_LUN_WILDCARD, 8765 'A', ROLE_UNKNOWN); 8766 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 8767 AHD_TRANS_CUR, /*paused*/TRUE); 8768 ahd_set_syncrate(ahd, &devinfo, /*period*/0, 8769 /*offset*/0, /*ppr_options*/0, 8770 AHD_TRANS_CUR, /*paused*/TRUE); 8771 } 8772 } 8773 8774 /* Notify the XPT that a bus reset occurred */ 8775 ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, 8776 CAM_LUN_WILDCARD, AC_BUS_RESET); 8777 8778 ahd_restart(ahd); 8779 8780 return (found); 8781 } 8782 8783 /**************************** Statistics Processing ***************************/ 8784 static void 8785 ahd_stat_timer(struct timer_list *t) 8786 { 8787 struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); 8788 u_long s; 8789 int enint_coal; 8790 8791 ahd_lock(ahd, &s); 8792 8793 enint_coal = ahd->hs_mailbox & ENINT_COALESCE; 8794 if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) 8795 enint_coal |= ENINT_COALESCE; 8796 else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) 8797 enint_coal &= ~ENINT_COALESCE; 8798 8799 if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { 8800 ahd_enable_coalescing(ahd, enint_coal); 8801 #ifdef AHD_DEBUG 8802 if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) 8803 printk("%s: Interrupt coalescing " 8804 "now %sabled. Cmds %d\n", 8805 ahd_name(ahd), 8806 (enint_coal & ENINT_COALESCE) ? "en" : "dis", 8807 ahd->cmdcmplt_total); 8808 #endif 8809 } 8810 8811 ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); 8812 ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; 8813 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; 8814 ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 8815 ahd_unlock(ahd, &s); 8816 } 8817 8818 /****************************** Status Processing *****************************/ 8819 8820 static void 8821 ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) 8822 { 8823 struct hardware_scb *hscb; 8824 int paused; 8825 8826 /* 8827 * The sequencer freezes its select-out queue 8828 * anytime a SCSI status error occurs. We must 8829 * handle the error and increment our qfreeze count 8830 * to allow the sequencer to continue. We don't 8831 * bother clearing critical sections here since all 8832 * operations are on data structures that the sequencer 8833 * is not touching once the queue is frozen. 8834 */ 8835 hscb = scb->hscb; 8836 8837 if (ahd_is_paused(ahd)) { 8838 paused = 1; 8839 } else { 8840 paused = 0; 8841 ahd_pause(ahd); 8842 } 8843 8844 /* Freeze the queue until the client sees the error. */ 8845 ahd_freeze_devq(ahd, scb); 8846 ahd_freeze_scb(scb); 8847 ahd->qfreeze_cnt++; 8848 ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); 8849 8850 if (paused == 0) 8851 ahd_unpause(ahd); 8852 8853 /* Don't want to clobber the original sense code */ 8854 if ((scb->flags & SCB_SENSE) != 0) { 8855 /* 8856 * Clear the SCB_SENSE Flag and perform 8857 * a normal command completion. 8858 */ 8859 scb->flags &= ~SCB_SENSE; 8860 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 8861 ahd_done(ahd, scb); 8862 return; 8863 } 8864 ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 8865 ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); 8866 switch (hscb->shared_data.istatus.scsi_status) { 8867 case STATUS_PKT_SENSE: 8868 { 8869 struct scsi_status_iu_header *siu; 8870 8871 ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); 8872 siu = (struct scsi_status_iu_header *)scb->sense_data; 8873 ahd_set_scsi_status(scb, siu->status); 8874 #ifdef AHD_DEBUG 8875 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 8876 ahd_print_path(ahd, scb); 8877 printk("SCB 0x%x Received PKT Status of 0x%x\n", 8878 SCB_GET_TAG(scb), siu->status); 8879 printk("\tflags = 0x%x, sense len = 0x%x, " 8880 "pktfail = 0x%x\n", 8881 siu->flags, scsi_4btoul(siu->sense_length), 8882 scsi_4btoul(siu->pkt_failures_length)); 8883 } 8884 #endif 8885 if ((siu->flags & SIU_RSPVALID) != 0) { 8886 ahd_print_path(ahd, scb); 8887 if (scsi_4btoul(siu->pkt_failures_length) < 4) { 8888 printk("Unable to parse pkt_failures\n"); 8889 } else { 8890 8891 switch (SIU_PKTFAIL_CODE(siu)) { 8892 case SIU_PFC_NONE: 8893 printk("No packet failure found\n"); 8894 break; 8895 case SIU_PFC_CIU_FIELDS_INVALID: 8896 printk("Invalid Command IU Field\n"); 8897 break; 8898 case SIU_PFC_TMF_NOT_SUPPORTED: 8899 printk("TMF not supported\n"); 8900 break; 8901 case SIU_PFC_TMF_FAILED: 8902 printk("TMF failed\n"); 8903 break; 8904 case SIU_PFC_INVALID_TYPE_CODE: 8905 printk("Invalid L_Q Type code\n"); 8906 break; 8907 case SIU_PFC_ILLEGAL_REQUEST: 8908 printk("Illegal request\n"); 8909 break; 8910 default: 8911 break; 8912 } 8913 } 8914 if (siu->status == SAM_STAT_GOOD) 8915 ahd_set_transaction_status(scb, 8916 CAM_REQ_CMP_ERR); 8917 } 8918 if ((siu->flags & SIU_SNSVALID) != 0) { 8919 scb->flags |= SCB_PKT_SENSE; 8920 #ifdef AHD_DEBUG 8921 if ((ahd_debug & AHD_SHOW_SENSE) != 0) 8922 printk("Sense data available\n"); 8923 #endif 8924 } 8925 ahd_done(ahd, scb); 8926 break; 8927 } 8928 case SAM_STAT_COMMAND_TERMINATED: 8929 case SAM_STAT_CHECK_CONDITION: 8930 { 8931 struct ahd_devinfo devinfo; 8932 struct ahd_dma_seg *sg; 8933 struct scsi_sense *sc; 8934 struct ahd_initiator_tinfo *targ_info; 8935 struct ahd_tmode_tstate *tstate; 8936 struct ahd_transinfo *tinfo; 8937 #ifdef AHD_DEBUG 8938 if (ahd_debug & AHD_SHOW_SENSE) { 8939 ahd_print_path(ahd, scb); 8940 printk("SCB %d: requests Check Status\n", 8941 SCB_GET_TAG(scb)); 8942 } 8943 #endif 8944 8945 if (ahd_perform_autosense(scb) == 0) 8946 break; 8947 8948 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), 8949 SCB_GET_TARGET(ahd, scb), 8950 SCB_GET_LUN(scb), 8951 SCB_GET_CHANNEL(ahd, scb), 8952 ROLE_INITIATOR); 8953 targ_info = ahd_fetch_transinfo(ahd, 8954 devinfo.channel, 8955 devinfo.our_scsiid, 8956 devinfo.target, 8957 &tstate); 8958 tinfo = &targ_info->curr; 8959 sg = scb->sg_list; 8960 sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; 8961 /* 8962 * Save off the residual if there is one. 8963 */ 8964 ahd_update_residual(ahd, scb); 8965 #ifdef AHD_DEBUG 8966 if (ahd_debug & AHD_SHOW_SENSE) { 8967 ahd_print_path(ahd, scb); 8968 printk("Sending Sense\n"); 8969 } 8970 #endif 8971 scb->sg_count = 0; 8972 sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), 8973 ahd_get_sense_bufsize(ahd, scb), 8974 /*last*/TRUE); 8975 sc->opcode = REQUEST_SENSE; 8976 sc->byte2 = 0; 8977 if (tinfo->protocol_version <= SCSI_REV_2 8978 && SCB_GET_LUN(scb) < 8) 8979 sc->byte2 = SCB_GET_LUN(scb) << 5; 8980 sc->unused[0] = 0; 8981 sc->unused[1] = 0; 8982 sc->length = ahd_get_sense_bufsize(ahd, scb); 8983 sc->control = 0; 8984 8985 /* 8986 * We can't allow the target to disconnect. 8987 * This will be an untagged transaction and 8988 * having the target disconnect will make this 8989 * transaction indestinguishable from outstanding 8990 * tagged transactions. 8991 */ 8992 hscb->control = 0; 8993 8994 /* 8995 * This request sense could be because the 8996 * the device lost power or in some other 8997 * way has lost our transfer negotiations. 8998 * Renegotiate if appropriate. Unit attention 8999 * errors will be reported before any data 9000 * phases occur. 9001 */ 9002 if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { 9003 ahd_update_neg_request(ahd, &devinfo, 9004 tstate, targ_info, 9005 AHD_NEG_IF_NON_ASYNC); 9006 } 9007 if (tstate->auto_negotiate & devinfo.target_mask) { 9008 hscb->control |= MK_MESSAGE; 9009 scb->flags &= 9010 ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); 9011 scb->flags |= SCB_AUTO_NEGOTIATE; 9012 } 9013 hscb->cdb_len = sizeof(*sc); 9014 ahd_setup_data_scb(ahd, scb); 9015 scb->flags |= SCB_SENSE; 9016 ahd_queue_scb(ahd, scb); 9017 break; 9018 } 9019 case SAM_STAT_GOOD: 9020 printk("%s: Interrupted for status of 0???\n", 9021 ahd_name(ahd)); 9022 fallthrough; 9023 default: 9024 ahd_done(ahd, scb); 9025 break; 9026 } 9027 } 9028 9029 static void 9030 ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) 9031 { 9032 if (scb->hscb->shared_data.istatus.scsi_status != 0) { 9033 ahd_handle_scsi_status(ahd, scb); 9034 } else { 9035 ahd_calc_residual(ahd, scb); 9036 ahd_done(ahd, scb); 9037 } 9038 } 9039 9040 /* 9041 * Calculate the residual for a just completed SCB. 9042 */ 9043 static void 9044 ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) 9045 { 9046 struct hardware_scb *hscb; 9047 struct initiator_status *spkt; 9048 uint32_t sgptr; 9049 uint32_t resid_sgptr; 9050 uint32_t resid; 9051 9052 /* 9053 * 5 cases. 9054 * 1) No residual. 9055 * SG_STATUS_VALID clear in sgptr. 9056 * 2) Transferless command 9057 * 3) Never performed any transfers. 9058 * sgptr has SG_FULL_RESID set. 9059 * 4) No residual but target did not 9060 * save data pointers after the 9061 * last transfer, so sgptr was 9062 * never updated. 9063 * 5) We have a partial residual. 9064 * Use residual_sgptr to determine 9065 * where we are. 9066 */ 9067 9068 hscb = scb->hscb; 9069 sgptr = ahd_le32toh(hscb->sgptr); 9070 if ((sgptr & SG_STATUS_VALID) == 0) 9071 /* Case 1 */ 9072 return; 9073 sgptr &= ~SG_STATUS_VALID; 9074 9075 if ((sgptr & SG_LIST_NULL) != 0) 9076 /* Case 2 */ 9077 return; 9078 9079 /* 9080 * Residual fields are the same in both 9081 * target and initiator status packets, 9082 * so we can always use the initiator fields 9083 * regardless of the role for this SCB. 9084 */ 9085 spkt = &hscb->shared_data.istatus; 9086 resid_sgptr = ahd_le32toh(spkt->residual_sgptr); 9087 if ((sgptr & SG_FULL_RESID) != 0) { 9088 /* Case 3 */ 9089 resid = ahd_get_transfer_length(scb); 9090 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 9091 /* Case 4 */ 9092 return; 9093 } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { 9094 ahd_print_path(ahd, scb); 9095 printk("data overrun detected Tag == 0x%x.\n", 9096 SCB_GET_TAG(scb)); 9097 ahd_freeze_devq(ahd, scb); 9098 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); 9099 ahd_freeze_scb(scb); 9100 return; 9101 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 9102 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 9103 /* NOTREACHED */ 9104 } else { 9105 struct ahd_dma_seg *sg; 9106 9107 /* 9108 * Remainder of the SG where the transfer 9109 * stopped. 9110 */ 9111 resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; 9112 sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); 9113 9114 /* The residual sg_ptr always points to the next sg */ 9115 sg--; 9116 9117 /* 9118 * Add up the contents of all residual 9119 * SG segments that are after the SG where 9120 * the transfer stopped. 9121 */ 9122 while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { 9123 sg++; 9124 resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; 9125 } 9126 } 9127 if ((scb->flags & SCB_SENSE) == 0) 9128 ahd_set_residual(scb, resid); 9129 else 9130 ahd_set_sense_residual(scb, resid); 9131 9132 #ifdef AHD_DEBUG 9133 if ((ahd_debug & AHD_SHOW_MISC) != 0) { 9134 ahd_print_path(ahd, scb); 9135 printk("Handled %sResidual of %d bytes\n", 9136 (scb->flags & SCB_SENSE) ? "Sense " : "", resid); 9137 } 9138 #endif 9139 } 9140 9141 /******************************* Target Mode **********************************/ 9142 #ifdef AHD_TARGET_MODE 9143 /* 9144 * Add a target mode event to this lun's queue 9145 */ 9146 static void 9147 ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, 9148 u_int initiator_id, u_int event_type, u_int event_arg) 9149 { 9150 struct ahd_tmode_event *event; 9151 int pending; 9152 9153 xpt_freeze_devq(lstate->path, /*count*/1); 9154 if (lstate->event_w_idx >= lstate->event_r_idx) 9155 pending = lstate->event_w_idx - lstate->event_r_idx; 9156 else 9157 pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 9158 - (lstate->event_r_idx - lstate->event_w_idx); 9159 9160 if (event_type == EVENT_TYPE_BUS_RESET 9161 || event_type == TARGET_RESET) { 9162 /* 9163 * Any earlier events are irrelevant, so reset our buffer. 9164 * This has the effect of allowing us to deal with reset 9165 * floods (an external device holding down the reset line) 9166 * without losing the event that is really interesting. 9167 */ 9168 lstate->event_r_idx = 0; 9169 lstate->event_w_idx = 0; 9170 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 9171 } 9172 9173 if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { 9174 xpt_print_path(lstate->path); 9175 printk("immediate event %x:%x lost\n", 9176 lstate->event_buffer[lstate->event_r_idx].event_type, 9177 lstate->event_buffer[lstate->event_r_idx].event_arg); 9178 lstate->event_r_idx++; 9179 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9180 lstate->event_r_idx = 0; 9181 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 9182 } 9183 9184 event = &lstate->event_buffer[lstate->event_w_idx]; 9185 event->initiator_id = initiator_id; 9186 event->event_type = event_type; 9187 event->event_arg = event_arg; 9188 lstate->event_w_idx++; 9189 if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9190 lstate->event_w_idx = 0; 9191 } 9192 9193 /* 9194 * Send any target mode events queued up waiting 9195 * for immediate notify resources. 9196 */ 9197 void 9198 ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) 9199 { 9200 struct ccb_hdr *ccbh; 9201 struct ccb_immed_notify *inot; 9202 9203 while (lstate->event_r_idx != lstate->event_w_idx 9204 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 9205 struct ahd_tmode_event *event; 9206 9207 event = &lstate->event_buffer[lstate->event_r_idx]; 9208 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 9209 inot = (struct ccb_immed_notify *)ccbh; 9210 switch (event->event_type) { 9211 case EVENT_TYPE_BUS_RESET: 9212 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 9213 break; 9214 default: 9215 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 9216 inot->message_args[0] = event->event_type; 9217 inot->message_args[1] = event->event_arg; 9218 break; 9219 } 9220 inot->initiator_id = event->initiator_id; 9221 inot->sense_len = 0; 9222 xpt_done((union ccb *)inot); 9223 lstate->event_r_idx++; 9224 if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) 9225 lstate->event_r_idx = 0; 9226 } 9227 } 9228 #endif 9229 9230 /******************** Sequencer Program Patching/Download *********************/ 9231 9232 #ifdef AHD_DUMP_SEQ 9233 void 9234 ahd_dumpseq(struct ahd_softc* ahd) 9235 { 9236 int i; 9237 int max_prog; 9238 9239 max_prog = 2048; 9240 9241 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 9242 ahd_outw(ahd, PRGMCNT, 0); 9243 for (i = 0; i < max_prog; i++) { 9244 uint8_t ins_bytes[4]; 9245 9246 ahd_insb(ahd, SEQRAM, ins_bytes, 4); 9247 printk("0x%08x\n", ins_bytes[0] << 24 9248 | ins_bytes[1] << 16 9249 | ins_bytes[2] << 8 9250 | ins_bytes[3]); 9251 } 9252 } 9253 #endif 9254 9255 static void 9256 ahd_loadseq(struct ahd_softc *ahd) 9257 { 9258 struct cs cs_table[NUM_CRITICAL_SECTIONS]; 9259 u_int begin_set[NUM_CRITICAL_SECTIONS]; 9260 u_int end_set[NUM_CRITICAL_SECTIONS]; 9261 const struct patch *cur_patch; 9262 u_int cs_count; 9263 u_int cur_cs; 9264 u_int i; 9265 int downloaded; 9266 u_int skip_addr; 9267 u_int sg_prefetch_cnt; 9268 u_int sg_prefetch_cnt_limit; 9269 u_int sg_prefetch_align; 9270 u_int sg_size; 9271 u_int cacheline_mask; 9272 uint8_t download_consts[DOWNLOAD_CONST_COUNT]; 9273 9274 if (bootverbose) 9275 printk("%s: Downloading Sequencer Program...", 9276 ahd_name(ahd)); 9277 9278 #if DOWNLOAD_CONST_COUNT != 8 9279 #error "Download Const Mismatch" 9280 #endif 9281 /* 9282 * Start out with 0 critical sections 9283 * that apply to this firmware load. 9284 */ 9285 cs_count = 0; 9286 cur_cs = 0; 9287 memset(begin_set, 0, sizeof(begin_set)); 9288 memset(end_set, 0, sizeof(end_set)); 9289 9290 /* 9291 * Setup downloadable constant table. 9292 * 9293 * The computation for the S/G prefetch variables is 9294 * a bit complicated. We would like to always fetch 9295 * in terms of cachelined sized increments. However, 9296 * if the cacheline is not an even multiple of the 9297 * SG element size or is larger than our SG RAM, using 9298 * just the cache size might leave us with only a portion 9299 * of an SG element at the tail of a prefetch. If the 9300 * cacheline is larger than our S/G prefetch buffer less 9301 * the size of an SG element, we may round down to a cacheline 9302 * that doesn't contain any or all of the S/G of interest 9303 * within the bounds of our S/G ram. Provide variables to 9304 * the sequencer that will allow it to handle these edge 9305 * cases. 9306 */ 9307 /* Start by aligning to the nearest cacheline. */ 9308 sg_prefetch_align = ahd->pci_cachesize; 9309 if (sg_prefetch_align == 0) 9310 sg_prefetch_align = 8; 9311 /* Round down to the nearest power of 2. */ 9312 while (powerof2(sg_prefetch_align) == 0) 9313 sg_prefetch_align--; 9314 9315 cacheline_mask = sg_prefetch_align - 1; 9316 9317 /* 9318 * If the cacheline boundary is greater than half our prefetch RAM 9319 * we risk not being able to fetch even a single complete S/G 9320 * segment if we align to that boundary. 9321 */ 9322 if (sg_prefetch_align > CCSGADDR_MAX/2) 9323 sg_prefetch_align = CCSGADDR_MAX/2; 9324 /* Start by fetching a single cacheline. */ 9325 sg_prefetch_cnt = sg_prefetch_align; 9326 /* 9327 * Increment the prefetch count by cachelines until 9328 * at least one S/G element will fit. 9329 */ 9330 sg_size = sizeof(struct ahd_dma_seg); 9331 if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) 9332 sg_size = sizeof(struct ahd_dma64_seg); 9333 while (sg_prefetch_cnt < sg_size) 9334 sg_prefetch_cnt += sg_prefetch_align; 9335 /* 9336 * If the cacheline is not an even multiple of 9337 * the S/G size, we may only get a partial S/G when 9338 * we align. Add a cacheline if this is the case. 9339 */ 9340 if ((sg_prefetch_align % sg_size) != 0 9341 && (sg_prefetch_cnt < CCSGADDR_MAX)) 9342 sg_prefetch_cnt += sg_prefetch_align; 9343 /* 9344 * Lastly, compute a value that the sequencer can use 9345 * to determine if the remainder of the CCSGRAM buffer 9346 * has a full S/G element in it. 9347 */ 9348 sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); 9349 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 9350 download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; 9351 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); 9352 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); 9353 download_consts[SG_SIZEOF] = sg_size; 9354 download_consts[PKT_OVERRUN_BUFOFFSET] = 9355 (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; 9356 download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; 9357 download_consts[CACHELINE_MASK] = cacheline_mask; 9358 cur_patch = patches; 9359 downloaded = 0; 9360 skip_addr = 0; 9361 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 9362 ahd_outw(ahd, PRGMCNT, 0); 9363 9364 for (i = 0; i < sizeof(seqprog)/4; i++) { 9365 if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { 9366 /* 9367 * Don't download this instruction as it 9368 * is in a patch that was removed. 9369 */ 9370 continue; 9371 } 9372 /* 9373 * Move through the CS table until we find a CS 9374 * that might apply to this instruction. 9375 */ 9376 for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) { 9377 if (critical_sections[cur_cs].end <= i) { 9378 if (begin_set[cs_count] == TRUE 9379 && end_set[cs_count] == FALSE) { 9380 cs_table[cs_count].end = downloaded; 9381 end_set[cs_count] = TRUE; 9382 cs_count++; 9383 } 9384 continue; 9385 } 9386 if (critical_sections[cur_cs].begin <= i 9387 && begin_set[cs_count] == FALSE) { 9388 cs_table[cs_count].begin = downloaded; 9389 begin_set[cs_count] = TRUE; 9390 } 9391 break; 9392 } 9393 ahd_download_instr(ahd, i, download_consts); 9394 downloaded++; 9395 } 9396 9397 ahd->num_critical_sections = cs_count; 9398 if (cs_count != 0) { 9399 9400 cs_count *= sizeof(struct cs); 9401 ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC); 9402 if (ahd->critical_sections == NULL) 9403 panic("ahd_loadseq: Could not malloc"); 9404 } 9405 ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); 9406 9407 if (bootverbose) { 9408 printk(" %d instructions downloaded\n", downloaded); 9409 printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", 9410 ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); 9411 } 9412 } 9413 9414 static int 9415 ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, 9416 u_int start_instr, u_int *skip_addr) 9417 { 9418 const struct patch *cur_patch; 9419 const struct patch *last_patch; 9420 u_int num_patches; 9421 9422 num_patches = ARRAY_SIZE(patches); 9423 last_patch = &patches[num_patches]; 9424 cur_patch = *start_patch; 9425 9426 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 9427 9428 if (cur_patch->patch_func(ahd) == 0) { 9429 9430 /* Start rejecting code */ 9431 *skip_addr = start_instr + cur_patch->skip_instr; 9432 cur_patch += cur_patch->skip_patch; 9433 } else { 9434 /* Accepted this patch. Advance to the next 9435 * one and wait for our intruction pointer to 9436 * hit this point. 9437 */ 9438 cur_patch++; 9439 } 9440 } 9441 9442 *start_patch = cur_patch; 9443 if (start_instr < *skip_addr) 9444 /* Still skipping */ 9445 return (0); 9446 9447 return (1); 9448 } 9449 9450 static u_int 9451 ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) 9452 { 9453 const struct patch *cur_patch; 9454 int address_offset; 9455 u_int skip_addr; 9456 u_int i; 9457 9458 address_offset = 0; 9459 cur_patch = patches; 9460 skip_addr = 0; 9461 9462 for (i = 0; i < address;) { 9463 9464 ahd_check_patch(ahd, &cur_patch, i, &skip_addr); 9465 9466 if (skip_addr > i) { 9467 int end_addr; 9468 9469 end_addr = min(address, skip_addr); 9470 address_offset += end_addr - i; 9471 i = skip_addr; 9472 } else { 9473 i++; 9474 } 9475 } 9476 return (address - address_offset); 9477 } 9478 9479 static void 9480 ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) 9481 { 9482 union ins_formats instr; 9483 struct ins_format1 *fmt1_ins; 9484 struct ins_format3 *fmt3_ins; 9485 u_int opcode; 9486 9487 /* 9488 * The firmware is always compiled into a little endian format. 9489 */ 9490 instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 9491 9492 fmt1_ins = &instr.format1; 9493 fmt3_ins = NULL; 9494 9495 /* Pull the opcode */ 9496 opcode = instr.format1.opcode; 9497 switch (opcode) { 9498 case AIC_OP_JMP: 9499 case AIC_OP_JC: 9500 case AIC_OP_JNC: 9501 case AIC_OP_CALL: 9502 case AIC_OP_JNE: 9503 case AIC_OP_JNZ: 9504 case AIC_OP_JE: 9505 case AIC_OP_JZ: 9506 { 9507 fmt3_ins = &instr.format3; 9508 fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); 9509 } 9510 fallthrough; 9511 case AIC_OP_OR: 9512 case AIC_OP_AND: 9513 case AIC_OP_XOR: 9514 case AIC_OP_ADD: 9515 case AIC_OP_ADC: 9516 case AIC_OP_BMOV: 9517 if (fmt1_ins->parity != 0) { 9518 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 9519 } 9520 fmt1_ins->parity = 0; 9521 fallthrough; 9522 case AIC_OP_ROL: 9523 { 9524 int i, count; 9525 9526 /* Calculate odd parity for the instruction */ 9527 for (i = 0, count = 0; i < 31; i++) { 9528 uint32_t mask; 9529 9530 mask = 0x01 << i; 9531 if ((instr.integer & mask) != 0) 9532 count++; 9533 } 9534 if ((count & 0x01) == 0) 9535 instr.format1.parity = 1; 9536 9537 /* The sequencer is a little endian cpu */ 9538 instr.integer = ahd_htole32(instr.integer); 9539 ahd_outsb(ahd, SEQRAM, instr.bytes, 4); 9540 break; 9541 } 9542 default: 9543 panic("Unknown opcode encountered in seq program"); 9544 break; 9545 } 9546 } 9547 9548 static int 9549 ahd_probe_stack_size(struct ahd_softc *ahd) 9550 { 9551 int last_probe; 9552 9553 last_probe = 0; 9554 while (1) { 9555 int i; 9556 9557 /* 9558 * We avoid using 0 as a pattern to avoid 9559 * confusion if the stack implementation 9560 * "back-fills" with zeros when "poping' 9561 * entries. 9562 */ 9563 for (i = 1; i <= last_probe+1; i++) { 9564 ahd_outb(ahd, STACK, i & 0xFF); 9565 ahd_outb(ahd, STACK, (i >> 8) & 0xFF); 9566 } 9567 9568 /* Verify */ 9569 for (i = last_probe+1; i > 0; i--) { 9570 u_int stack_entry; 9571 9572 stack_entry = ahd_inb(ahd, STACK) 9573 |(ahd_inb(ahd, STACK) << 8); 9574 if (stack_entry != i) 9575 goto sized; 9576 } 9577 last_probe++; 9578 } 9579 sized: 9580 return (last_probe); 9581 } 9582 9583 int 9584 ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, 9585 const char *name, u_int address, u_int value, 9586 u_int *cur_column, u_int wrap_point) 9587 { 9588 int printed; 9589 u_int printed_mask; 9590 9591 if (cur_column != NULL && *cur_column >= wrap_point) { 9592 printk("\n"); 9593 *cur_column = 0; 9594 } 9595 printed = printk("%s[0x%x]", name, value); 9596 if (table == NULL) { 9597 printed += printk(" "); 9598 *cur_column += printed; 9599 return (printed); 9600 } 9601 printed_mask = 0; 9602 while (printed_mask != 0xFF) { 9603 int entry; 9604 9605 for (entry = 0; entry < num_entries; entry++) { 9606 if (((value & table[entry].mask) 9607 != table[entry].value) 9608 || ((printed_mask & table[entry].mask) 9609 == table[entry].mask)) 9610 continue; 9611 9612 printed += printk("%s%s", 9613 printed_mask == 0 ? ":(" : "|", 9614 table[entry].name); 9615 printed_mask |= table[entry].mask; 9616 9617 break; 9618 } 9619 if (entry >= num_entries) 9620 break; 9621 } 9622 if (printed_mask != 0) 9623 printed += printk(") "); 9624 else 9625 printed += printk(" "); 9626 if (cur_column != NULL) 9627 *cur_column += printed; 9628 return (printed); 9629 } 9630 9631 void 9632 ahd_dump_card_state(struct ahd_softc *ahd) 9633 { 9634 struct scb *scb; 9635 ahd_mode_state saved_modes; 9636 u_int dffstat; 9637 int paused; 9638 u_int scb_index; 9639 u_int saved_scb_index; 9640 u_int cur_col; 9641 int i; 9642 9643 if (ahd_is_paused(ahd)) { 9644 paused = 1; 9645 } else { 9646 paused = 0; 9647 ahd_pause(ahd); 9648 } 9649 saved_modes = ahd_save_modes(ahd); 9650 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 9651 printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" 9652 "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", 9653 ahd_name(ahd), 9654 ahd_inw(ahd, CURADDR), 9655 ahd_build_mode_state(ahd, ahd->saved_src_mode, 9656 ahd->saved_dst_mode)); 9657 if (paused) 9658 printk("Card was paused\n"); 9659 9660 if (ahd_check_cmdcmpltqueues(ahd)) 9661 printk("Completions are pending\n"); 9662 9663 /* 9664 * Mode independent registers. 9665 */ 9666 cur_col = 0; 9667 ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); 9668 ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); 9669 ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); 9670 ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); 9671 ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); 9672 ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); 9673 ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); 9674 ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); 9675 ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); 9676 ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); 9677 ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); 9678 ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); 9679 ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); 9680 ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); 9681 ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); 9682 ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); 9683 ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); 9684 ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); 9685 ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); 9686 ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), 9687 &cur_col, 50); 9688 ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); 9689 ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), 9690 &cur_col, 50); 9691 ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); 9692 ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); 9693 ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); 9694 ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); 9695 ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); 9696 ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); 9697 ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); 9698 ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); 9699 ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); 9700 ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); 9701 ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); 9702 ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); 9703 printk("\n"); 9704 printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " 9705 "CURRSCB 0x%x NEXTSCB 0x%x\n", 9706 ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), 9707 ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), 9708 ahd_inw(ahd, NEXTSCB)); 9709 cur_col = 0; 9710 /* QINFIFO */ 9711 ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, 9712 CAM_LUN_WILDCARD, SCB_LIST_NULL, 9713 ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); 9714 saved_scb_index = ahd_get_scbptr(ahd); 9715 printk("Pending list:"); 9716 i = 0; 9717 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 9718 if (i++ > AHD_SCB_MAX) 9719 break; 9720 cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), 9721 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); 9722 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 9723 ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), 9724 &cur_col, 60); 9725 ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), 9726 &cur_col, 60); 9727 } 9728 printk("\nTotal %d\n", i); 9729 9730 printk("Kernel Free SCB list: "); 9731 i = 0; 9732 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { 9733 struct scb *list_scb; 9734 9735 list_scb = scb; 9736 do { 9737 printk("%d ", SCB_GET_TAG(list_scb)); 9738 list_scb = LIST_NEXT(list_scb, collision_links); 9739 } while (list_scb && i++ < AHD_SCB_MAX); 9740 } 9741 9742 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { 9743 if (i++ > AHD_SCB_MAX) 9744 break; 9745 printk("%d ", SCB_GET_TAG(scb)); 9746 } 9747 printk("\n"); 9748 9749 printk("Sequencer Complete DMA-inprog list: "); 9750 scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); 9751 i = 0; 9752 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9753 ahd_set_scbptr(ahd, scb_index); 9754 printk("%d ", scb_index); 9755 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9756 } 9757 printk("\n"); 9758 9759 printk("Sequencer Complete list: "); 9760 scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); 9761 i = 0; 9762 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9763 ahd_set_scbptr(ahd, scb_index); 9764 printk("%d ", scb_index); 9765 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9766 } 9767 printk("\n"); 9768 9769 printk("Sequencer DMA-Up and Complete list: "); 9770 scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); 9771 i = 0; 9772 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9773 ahd_set_scbptr(ahd, scb_index); 9774 printk("%d ", scb_index); 9775 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9776 } 9777 printk("\n"); 9778 printk("Sequencer On QFreeze and Complete list: "); 9779 scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); 9780 i = 0; 9781 while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { 9782 ahd_set_scbptr(ahd, scb_index); 9783 printk("%d ", scb_index); 9784 scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); 9785 } 9786 printk("\n"); 9787 ahd_set_scbptr(ahd, saved_scb_index); 9788 dffstat = ahd_inb(ahd, DFFSTAT); 9789 for (i = 0; i < 2; i++) { 9790 #ifdef AHD_DEBUG 9791 struct scb *fifo_scb; 9792 #endif 9793 u_int fifo_scbptr; 9794 9795 ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); 9796 fifo_scbptr = ahd_get_scbptr(ahd); 9797 printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", 9798 ahd_name(ahd), i, 9799 (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", 9800 ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); 9801 cur_col = 0; 9802 ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); 9803 ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); 9804 ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); 9805 ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); 9806 ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), 9807 &cur_col, 50); 9808 ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); 9809 ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); 9810 ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); 9811 ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); 9812 if (cur_col > 50) { 9813 printk("\n"); 9814 cur_col = 0; 9815 } 9816 cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", 9817 ahd_inl(ahd, SHADDR+4), 9818 ahd_inl(ahd, SHADDR), 9819 (ahd_inb(ahd, SHCNT) 9820 | (ahd_inb(ahd, SHCNT + 1) << 8) 9821 | (ahd_inb(ahd, SHCNT + 2) << 16))); 9822 if (cur_col > 50) { 9823 printk("\n"); 9824 cur_col = 0; 9825 } 9826 cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", 9827 ahd_inl(ahd, HADDR+4), 9828 ahd_inl(ahd, HADDR), 9829 (ahd_inb(ahd, HCNT) 9830 | (ahd_inb(ahd, HCNT + 1) << 8) 9831 | (ahd_inb(ahd, HCNT + 2) << 16))); 9832 ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); 9833 #ifdef AHD_DEBUG 9834 if ((ahd_debug & AHD_SHOW_SG) != 0) { 9835 fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); 9836 if (fifo_scb != NULL) 9837 ahd_dump_sglist(fifo_scb); 9838 } 9839 #endif 9840 } 9841 printk("\nLQIN: "); 9842 for (i = 0; i < 20; i++) 9843 printk("0x%x ", ahd_inb(ahd, LQIN + i)); 9844 printk("\n"); 9845 ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); 9846 printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", 9847 ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), 9848 ahd_inb(ahd, OPTIONMODE)); 9849 printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", 9850 ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), 9851 ahd_inb(ahd, MAXCMDCNT)); 9852 printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", 9853 ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), 9854 ahd_inb(ahd, SAVED_LUN)); 9855 ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); 9856 printk("\n"); 9857 ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); 9858 cur_col = 0; 9859 ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); 9860 printk("\n"); 9861 ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); 9862 printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", 9863 ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), 9864 ahd_inw(ahd, DINDEX)); 9865 printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", 9866 ahd_name(ahd), ahd_get_scbptr(ahd), 9867 ahd_inw_scbram(ahd, SCB_NEXT), 9868 ahd_inw_scbram(ahd, SCB_NEXT2)); 9869 printk("CDB %x %x %x %x %x %x\n", 9870 ahd_inb_scbram(ahd, SCB_CDB_STORE), 9871 ahd_inb_scbram(ahd, SCB_CDB_STORE+1), 9872 ahd_inb_scbram(ahd, SCB_CDB_STORE+2), 9873 ahd_inb_scbram(ahd, SCB_CDB_STORE+3), 9874 ahd_inb_scbram(ahd, SCB_CDB_STORE+4), 9875 ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); 9876 printk("STACK:"); 9877 for (i = 0; i < ahd->stack_size; i++) { 9878 ahd->saved_stack[i] = 9879 ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); 9880 printk(" 0x%x", ahd->saved_stack[i]); 9881 } 9882 for (i = ahd->stack_size-1; i >= 0; i--) { 9883 ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); 9884 ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); 9885 } 9886 printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); 9887 ahd_restore_modes(ahd, saved_modes); 9888 if (paused == 0) 9889 ahd_unpause(ahd); 9890 } 9891 9892 #if 0 9893 void 9894 ahd_dump_scbs(struct ahd_softc *ahd) 9895 { 9896 ahd_mode_state saved_modes; 9897 u_int saved_scb_index; 9898 int i; 9899 9900 saved_modes = ahd_save_modes(ahd); 9901 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); 9902 saved_scb_index = ahd_get_scbptr(ahd); 9903 for (i = 0; i < AHD_SCB_MAX; i++) { 9904 ahd_set_scbptr(ahd, i); 9905 printk("%3d", i); 9906 printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", 9907 ahd_inb_scbram(ahd, SCB_CONTROL), 9908 ahd_inb_scbram(ahd, SCB_SCSIID), 9909 ahd_inw_scbram(ahd, SCB_NEXT), 9910 ahd_inw_scbram(ahd, SCB_NEXT2), 9911 ahd_inl_scbram(ahd, SCB_SGPTR), 9912 ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); 9913 } 9914 printk("\n"); 9915 ahd_set_scbptr(ahd, saved_scb_index); 9916 ahd_restore_modes(ahd, saved_modes); 9917 } 9918 #endif /* 0 */ 9919 9920 /**************************** Flexport Logic **********************************/ 9921 /* 9922 * Read count 16bit words from 16bit word address start_addr from the 9923 * SEEPROM attached to the controller, into buf, using the controller's 9924 * SEEPROM reading state machine. Optionally treat the data as a byte 9925 * stream in terms of byte order. 9926 */ 9927 int 9928 ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, 9929 u_int start_addr, u_int count, int bytestream) 9930 { 9931 u_int cur_addr; 9932 u_int end_addr; 9933 int error; 9934 9935 /* 9936 * If we never make it through the loop even once, 9937 * we were passed invalid arguments. 9938 */ 9939 error = EINVAL; 9940 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9941 end_addr = start_addr + count; 9942 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 9943 9944 ahd_outb(ahd, SEEADR, cur_addr); 9945 ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); 9946 9947 error = ahd_wait_seeprom(ahd); 9948 if (error) 9949 break; 9950 if (bytestream != 0) { 9951 uint8_t *bytestream_ptr; 9952 9953 bytestream_ptr = (uint8_t *)buf; 9954 *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); 9955 *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); 9956 } else { 9957 /* 9958 * ahd_inw() already handles machine byte order. 9959 */ 9960 *buf = ahd_inw(ahd, SEEDAT); 9961 } 9962 buf++; 9963 } 9964 return (error); 9965 } 9966 9967 /* 9968 * Write count 16bit words from buf, into SEEPROM attache to the 9969 * controller starting at 16bit word address start_addr, using the 9970 * controller's SEEPROM writing state machine. 9971 */ 9972 int 9973 ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, 9974 u_int start_addr, u_int count) 9975 { 9976 u_int cur_addr; 9977 u_int end_addr; 9978 int error; 9979 int retval; 9980 9981 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 9982 error = ENOENT; 9983 9984 /* Place the chip into write-enable mode */ 9985 ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); 9986 ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); 9987 error = ahd_wait_seeprom(ahd); 9988 if (error) 9989 return (error); 9990 9991 /* 9992 * Write the data. If we don't get through the loop at 9993 * least once, the arguments were invalid. 9994 */ 9995 retval = EINVAL; 9996 end_addr = start_addr + count; 9997 for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { 9998 ahd_outw(ahd, SEEDAT, *buf++); 9999 ahd_outb(ahd, SEEADR, cur_addr); 10000 ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); 10001 10002 retval = ahd_wait_seeprom(ahd); 10003 if (retval) 10004 break; 10005 } 10006 10007 /* 10008 * Disable writes. 10009 */ 10010 ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); 10011 ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); 10012 error = ahd_wait_seeprom(ahd); 10013 if (error) 10014 return (error); 10015 return (retval); 10016 } 10017 10018 /* 10019 * Wait ~100us for the serial eeprom to satisfy our request. 10020 */ 10021 static int 10022 ahd_wait_seeprom(struct ahd_softc *ahd) 10023 { 10024 int cnt; 10025 10026 cnt = 5000; 10027 while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) 10028 ahd_delay(5); 10029 10030 if (cnt == 0) 10031 return (ETIMEDOUT); 10032 return (0); 10033 } 10034 10035 /* 10036 * Validate the two checksums in the per_channel 10037 * vital product data struct. 10038 */ 10039 static int 10040 ahd_verify_vpd_cksum(struct vpd_config *vpd) 10041 { 10042 int i; 10043 int maxaddr; 10044 uint32_t checksum; 10045 uint8_t *vpdarray; 10046 10047 vpdarray = (uint8_t *)vpd; 10048 maxaddr = offsetof(struct vpd_config, vpd_checksum); 10049 checksum = 0; 10050 for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) 10051 checksum = checksum + vpdarray[i]; 10052 if (checksum == 0 10053 || (-checksum & 0xFF) != vpd->vpd_checksum) 10054 return (0); 10055 10056 checksum = 0; 10057 maxaddr = offsetof(struct vpd_config, checksum); 10058 for (i = offsetof(struct vpd_config, default_target_flags); 10059 i < maxaddr; i++) 10060 checksum = checksum + vpdarray[i]; 10061 if (checksum == 0 10062 || (-checksum & 0xFF) != vpd->checksum) 10063 return (0); 10064 return (1); 10065 } 10066 10067 int 10068 ahd_verify_cksum(struct seeprom_config *sc) 10069 { 10070 int i; 10071 int maxaddr; 10072 uint32_t checksum; 10073 uint16_t *scarray; 10074 10075 maxaddr = (sizeof(*sc)/2) - 1; 10076 checksum = 0; 10077 scarray = (uint16_t *)sc; 10078 10079 for (i = 0; i < maxaddr; i++) 10080 checksum = checksum + scarray[i]; 10081 if (checksum == 0 10082 || (checksum & 0xFFFF) != sc->checksum) { 10083 return (0); 10084 } else { 10085 return (1); 10086 } 10087 } 10088 10089 int 10090 ahd_acquire_seeprom(struct ahd_softc *ahd) 10091 { 10092 /* 10093 * We should be able to determine the SEEPROM type 10094 * from the flexport logic, but unfortunately not 10095 * all implementations have this logic and there is 10096 * no programatic method for determining if the logic 10097 * is present. 10098 */ 10099 return (1); 10100 #if 0 10101 uint8_t seetype; 10102 int error; 10103 10104 error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); 10105 if (error != 0 10106 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) 10107 return (0); 10108 return (1); 10109 #endif 10110 } 10111 10112 void 10113 ahd_release_seeprom(struct ahd_softc *ahd) 10114 { 10115 /* Currently a no-op */ 10116 } 10117 10118 /* 10119 * Wait at most 2 seconds for flexport arbitration to succeed. 10120 */ 10121 static int 10122 ahd_wait_flexport(struct ahd_softc *ahd) 10123 { 10124 int cnt; 10125 10126 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10127 cnt = 1000000 * 2 / 5; 10128 while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) 10129 ahd_delay(5); 10130 10131 if (cnt == 0) 10132 return (ETIMEDOUT); 10133 return (0); 10134 } 10135 10136 int 10137 ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) 10138 { 10139 int error; 10140 10141 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10142 if (addr > 7) 10143 panic("ahd_write_flexport: address out of range"); 10144 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 10145 error = ahd_wait_flexport(ahd); 10146 if (error != 0) 10147 return (error); 10148 ahd_outb(ahd, BRDDAT, value); 10149 ahd_flush_device_writes(ahd); 10150 ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); 10151 ahd_flush_device_writes(ahd); 10152 ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); 10153 ahd_flush_device_writes(ahd); 10154 ahd_outb(ahd, BRDCTL, 0); 10155 ahd_flush_device_writes(ahd); 10156 return (0); 10157 } 10158 10159 int 10160 ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) 10161 { 10162 int error; 10163 10164 AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); 10165 if (addr > 7) 10166 panic("ahd_read_flexport: address out of range"); 10167 ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); 10168 error = ahd_wait_flexport(ahd); 10169 if (error != 0) 10170 return (error); 10171 *value = ahd_inb(ahd, BRDDAT); 10172 ahd_outb(ahd, BRDCTL, 0); 10173 ahd_flush_device_writes(ahd); 10174 return (0); 10175 } 10176 10177 /************************* Target Mode ****************************************/ 10178 #ifdef AHD_TARGET_MODE 10179 cam_status 10180 ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, 10181 struct ahd_tmode_tstate **tstate, 10182 struct ahd_tmode_lstate **lstate, 10183 int notfound_failure) 10184 { 10185 10186 if ((ahd->features & AHD_TARGETMODE) == 0) 10187 return (CAM_REQ_INVALID); 10188 10189 /* 10190 * Handle the 'black hole' device that sucks up 10191 * requests to unattached luns on enabled targets. 10192 */ 10193 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 10194 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 10195 *tstate = NULL; 10196 *lstate = ahd->black_hole; 10197 } else { 10198 u_int max_id; 10199 10200 max_id = (ahd->features & AHD_WIDE) ? 16 : 8; 10201 if (ccb->ccb_h.target_id >= max_id) 10202 return (CAM_TID_INVALID); 10203 10204 if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) 10205 return (CAM_LUN_INVALID); 10206 10207 *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; 10208 *lstate = NULL; 10209 if (*tstate != NULL) 10210 *lstate = 10211 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 10212 } 10213 10214 if (notfound_failure != 0 && *lstate == NULL) 10215 return (CAM_PATH_INVALID); 10216 10217 return (CAM_REQ_CMP); 10218 } 10219 10220 void 10221 ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) 10222 { 10223 #if NOT_YET 10224 struct ahd_tmode_tstate *tstate; 10225 struct ahd_tmode_lstate *lstate; 10226 struct ccb_en_lun *cel; 10227 cam_status status; 10228 u_int target; 10229 u_int lun; 10230 u_int target_mask; 10231 u_long s; 10232 char channel; 10233 10234 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, 10235 /*notfound_failure*/FALSE); 10236 10237 if (status != CAM_REQ_CMP) { 10238 ccb->ccb_h.status = status; 10239 return; 10240 } 10241 10242 if ((ahd->features & AHD_MULTIROLE) != 0) { 10243 u_int our_id; 10244 10245 our_id = ahd->our_id; 10246 if (ccb->ccb_h.target_id != our_id) { 10247 if ((ahd->features & AHD_MULTI_TID) != 0 10248 && (ahd->flags & AHD_INITIATORROLE) != 0) { 10249 /* 10250 * Only allow additional targets if 10251 * the initiator role is disabled. 10252 * The hardware cannot handle a re-select-in 10253 * on the initiator id during a re-select-out 10254 * on a different target id. 10255 */ 10256 status = CAM_TID_INVALID; 10257 } else if ((ahd->flags & AHD_INITIATORROLE) != 0 10258 || ahd->enabled_luns > 0) { 10259 /* 10260 * Only allow our target id to change 10261 * if the initiator role is not configured 10262 * and there are no enabled luns which 10263 * are attached to the currently registered 10264 * scsi id. 10265 */ 10266 status = CAM_TID_INVALID; 10267 } 10268 } 10269 } 10270 10271 if (status != CAM_REQ_CMP) { 10272 ccb->ccb_h.status = status; 10273 return; 10274 } 10275 10276 /* 10277 * We now have an id that is valid. 10278 * If we aren't in target mode, switch modes. 10279 */ 10280 if ((ahd->flags & AHD_TARGETROLE) == 0 10281 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 10282 u_long s; 10283 10284 printk("Configuring Target Mode\n"); 10285 ahd_lock(ahd, &s); 10286 if (LIST_FIRST(&ahd->pending_scbs) != NULL) { 10287 ccb->ccb_h.status = CAM_BUSY; 10288 ahd_unlock(ahd, &s); 10289 return; 10290 } 10291 ahd->flags |= AHD_TARGETROLE; 10292 if ((ahd->features & AHD_MULTIROLE) == 0) 10293 ahd->flags &= ~AHD_INITIATORROLE; 10294 ahd_pause(ahd); 10295 ahd_loadseq(ahd); 10296 ahd_restart(ahd); 10297 ahd_unlock(ahd, &s); 10298 } 10299 cel = &ccb->cel; 10300 target = ccb->ccb_h.target_id; 10301 lun = ccb->ccb_h.target_lun; 10302 channel = SIM_CHANNEL(ahd, sim); 10303 target_mask = 0x01 << target; 10304 if (channel == 'B') 10305 target_mask <<= 8; 10306 10307 if (cel->enable != 0) { 10308 u_int scsiseq1; 10309 10310 /* Are we already enabled?? */ 10311 if (lstate != NULL) { 10312 xpt_print_path(ccb->ccb_h.path); 10313 printk("Lun already enabled\n"); 10314 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 10315 return; 10316 } 10317 10318 if (cel->grp6_len != 0 10319 || cel->grp7_len != 0) { 10320 /* 10321 * Don't (yet?) support vendor 10322 * specific commands. 10323 */ 10324 ccb->ccb_h.status = CAM_REQ_INVALID; 10325 printk("Non-zero Group Codes\n"); 10326 return; 10327 } 10328 10329 /* 10330 * Seems to be okay. 10331 * Setup our data structures. 10332 */ 10333 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 10334 tstate = ahd_alloc_tstate(ahd, target, channel); 10335 if (tstate == NULL) { 10336 xpt_print_path(ccb->ccb_h.path); 10337 printk("Couldn't allocate tstate\n"); 10338 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10339 return; 10340 } 10341 } 10342 lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC); 10343 if (lstate == NULL) { 10344 xpt_print_path(ccb->ccb_h.path); 10345 printk("Couldn't allocate lstate\n"); 10346 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10347 return; 10348 } 10349 status = xpt_create_path(&lstate->path, /*periph*/NULL, 10350 xpt_path_path_id(ccb->ccb_h.path), 10351 xpt_path_target_id(ccb->ccb_h.path), 10352 xpt_path_lun_id(ccb->ccb_h.path)); 10353 if (status != CAM_REQ_CMP) { 10354 kfree(lstate); 10355 xpt_print_path(ccb->ccb_h.path); 10356 printk("Couldn't allocate path\n"); 10357 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 10358 return; 10359 } 10360 SLIST_INIT(&lstate->accept_tios); 10361 SLIST_INIT(&lstate->immed_notifies); 10362 ahd_lock(ahd, &s); 10363 ahd_pause(ahd); 10364 if (target != CAM_TARGET_WILDCARD) { 10365 tstate->enabled_luns[lun] = lstate; 10366 ahd->enabled_luns++; 10367 10368 if ((ahd->features & AHD_MULTI_TID) != 0) { 10369 u_int targid_mask; 10370 10371 targid_mask = ahd_inw(ahd, TARGID); 10372 targid_mask |= target_mask; 10373 ahd_outw(ahd, TARGID, targid_mask); 10374 ahd_update_scsiid(ahd, targid_mask); 10375 } else { 10376 u_int our_id; 10377 char channel; 10378 10379 channel = SIM_CHANNEL(ahd, sim); 10380 our_id = SIM_SCSI_ID(ahd, sim); 10381 10382 /* 10383 * This can only happen if selections 10384 * are not enabled 10385 */ 10386 if (target != our_id) { 10387 u_int sblkctl; 10388 char cur_channel; 10389 int swap; 10390 10391 sblkctl = ahd_inb(ahd, SBLKCTL); 10392 cur_channel = (sblkctl & SELBUSB) 10393 ? 'B' : 'A'; 10394 if ((ahd->features & AHD_TWIN) == 0) 10395 cur_channel = 'A'; 10396 swap = cur_channel != channel; 10397 ahd->our_id = target; 10398 10399 if (swap) 10400 ahd_outb(ahd, SBLKCTL, 10401 sblkctl ^ SELBUSB); 10402 10403 ahd_outb(ahd, SCSIID, target); 10404 10405 if (swap) 10406 ahd_outb(ahd, SBLKCTL, sblkctl); 10407 } 10408 } 10409 } else 10410 ahd->black_hole = lstate; 10411 /* Allow select-in operations */ 10412 if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { 10413 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 10414 scsiseq1 |= ENSELI; 10415 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 10416 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 10417 scsiseq1 |= ENSELI; 10418 ahd_outb(ahd, SCSISEQ1, scsiseq1); 10419 } 10420 ahd_unpause(ahd); 10421 ahd_unlock(ahd, &s); 10422 ccb->ccb_h.status = CAM_REQ_CMP; 10423 xpt_print_path(ccb->ccb_h.path); 10424 printk("Lun now enabled for target mode\n"); 10425 } else { 10426 struct scb *scb; 10427 int i, empty; 10428 10429 if (lstate == NULL) { 10430 ccb->ccb_h.status = CAM_LUN_INVALID; 10431 return; 10432 } 10433 10434 ahd_lock(ahd, &s); 10435 10436 ccb->ccb_h.status = CAM_REQ_CMP; 10437 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { 10438 struct ccb_hdr *ccbh; 10439 10440 ccbh = &scb->io_ctx->ccb_h; 10441 if (ccbh->func_code == XPT_CONT_TARGET_IO 10442 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 10443 printk("CTIO pending\n"); 10444 ccb->ccb_h.status = CAM_REQ_INVALID; 10445 ahd_unlock(ahd, &s); 10446 return; 10447 } 10448 } 10449 10450 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 10451 printk("ATIOs pending\n"); 10452 ccb->ccb_h.status = CAM_REQ_INVALID; 10453 } 10454 10455 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 10456 printk("INOTs pending\n"); 10457 ccb->ccb_h.status = CAM_REQ_INVALID; 10458 } 10459 10460 if (ccb->ccb_h.status != CAM_REQ_CMP) { 10461 ahd_unlock(ahd, &s); 10462 return; 10463 } 10464 10465 xpt_print_path(ccb->ccb_h.path); 10466 printk("Target mode disabled\n"); 10467 xpt_free_path(lstate->path); 10468 kfree(lstate); 10469 10470 ahd_pause(ahd); 10471 /* Can we clean up the target too? */ 10472 if (target != CAM_TARGET_WILDCARD) { 10473 tstate->enabled_luns[lun] = NULL; 10474 ahd->enabled_luns--; 10475 for (empty = 1, i = 0; i < 8; i++) 10476 if (tstate->enabled_luns[i] != NULL) { 10477 empty = 0; 10478 break; 10479 } 10480 10481 if (empty) { 10482 ahd_free_tstate(ahd, target, channel, 10483 /*force*/FALSE); 10484 if (ahd->features & AHD_MULTI_TID) { 10485 u_int targid_mask; 10486 10487 targid_mask = ahd_inw(ahd, TARGID); 10488 targid_mask &= ~target_mask; 10489 ahd_outw(ahd, TARGID, targid_mask); 10490 ahd_update_scsiid(ahd, targid_mask); 10491 } 10492 } 10493 } else { 10494 10495 ahd->black_hole = NULL; 10496 10497 /* 10498 * We can't allow selections without 10499 * our black hole device. 10500 */ 10501 empty = TRUE; 10502 } 10503 if (ahd->enabled_luns == 0) { 10504 /* Disallow select-in */ 10505 u_int scsiseq1; 10506 10507 scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); 10508 scsiseq1 &= ~ENSELI; 10509 ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); 10510 scsiseq1 = ahd_inb(ahd, SCSISEQ1); 10511 scsiseq1 &= ~ENSELI; 10512 ahd_outb(ahd, SCSISEQ1, scsiseq1); 10513 10514 if ((ahd->features & AHD_MULTIROLE) == 0) { 10515 printk("Configuring Initiator Mode\n"); 10516 ahd->flags &= ~AHD_TARGETROLE; 10517 ahd->flags |= AHD_INITIATORROLE; 10518 ahd_pause(ahd); 10519 ahd_loadseq(ahd); 10520 ahd_restart(ahd); 10521 /* 10522 * Unpaused. The extra unpause 10523 * that follows is harmless. 10524 */ 10525 } 10526 } 10527 ahd_unpause(ahd); 10528 ahd_unlock(ahd, &s); 10529 } 10530 #endif 10531 } 10532 10533 static void 10534 ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) 10535 { 10536 #if NOT_YET 10537 u_int scsiid_mask; 10538 u_int scsiid; 10539 10540 if ((ahd->features & AHD_MULTI_TID) == 0) 10541 panic("ahd_update_scsiid called on non-multitid unit\n"); 10542 10543 /* 10544 * Since we will rely on the TARGID mask 10545 * for selection enables, ensure that OID 10546 * in SCSIID is not set to some other ID 10547 * that we don't want to allow selections on. 10548 */ 10549 if ((ahd->features & AHD_ULTRA2) != 0) 10550 scsiid = ahd_inb(ahd, SCSIID_ULTRA2); 10551 else 10552 scsiid = ahd_inb(ahd, SCSIID); 10553 scsiid_mask = 0x1 << (scsiid & OID); 10554 if ((targid_mask & scsiid_mask) == 0) { 10555 u_int our_id; 10556 10557 /* ffs counts from 1 */ 10558 our_id = ffs(targid_mask); 10559 if (our_id == 0) 10560 our_id = ahd->our_id; 10561 else 10562 our_id--; 10563 scsiid &= TID; 10564 scsiid |= our_id; 10565 } 10566 if ((ahd->features & AHD_ULTRA2) != 0) 10567 ahd_outb(ahd, SCSIID_ULTRA2, scsiid); 10568 else 10569 ahd_outb(ahd, SCSIID, scsiid); 10570 #endif 10571 } 10572 10573 static void 10574 ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) 10575 { 10576 struct target_cmd *cmd; 10577 10578 ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); 10579 while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { 10580 10581 /* 10582 * Only advance through the queue if we 10583 * have the resources to process the command. 10584 */ 10585 if (ahd_handle_target_cmd(ahd, cmd) != 0) 10586 break; 10587 10588 cmd->cmd_valid = 0; 10589 ahd_dmamap_sync(ahd, ahd->shared_data_dmat, 10590 ahd->shared_data_map.dmamap, 10591 ahd_targetcmd_offset(ahd, ahd->tqinfifonext), 10592 sizeof(struct target_cmd), 10593 BUS_DMASYNC_PREREAD); 10594 ahd->tqinfifonext++; 10595 10596 /* 10597 * Lazily update our position in the target mode incoming 10598 * command queue as seen by the sequencer. 10599 */ 10600 if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 10601 u_int hs_mailbox; 10602 10603 hs_mailbox = ahd_inb(ahd, HS_MAILBOX); 10604 hs_mailbox &= ~HOST_TQINPOS; 10605 hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; 10606 ahd_outb(ahd, HS_MAILBOX, hs_mailbox); 10607 } 10608 } 10609 } 10610 10611 static int 10612 ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) 10613 { 10614 struct ahd_tmode_tstate *tstate; 10615 struct ahd_tmode_lstate *lstate; 10616 struct ccb_accept_tio *atio; 10617 uint8_t *byte; 10618 int initiator; 10619 int target; 10620 int lun; 10621 10622 initiator = SCSIID_TARGET(ahd, cmd->scsiid); 10623 target = SCSIID_OUR_ID(cmd->scsiid); 10624 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 10625 10626 byte = cmd->bytes; 10627 tstate = ahd->enabled_targets[target]; 10628 lstate = NULL; 10629 if (tstate != NULL) 10630 lstate = tstate->enabled_luns[lun]; 10631 10632 /* 10633 * Commands for disabled luns go to the black hole driver. 10634 */ 10635 if (lstate == NULL) 10636 lstate = ahd->black_hole; 10637 10638 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 10639 if (atio == NULL) { 10640 ahd->flags |= AHD_TQINFIFO_BLOCKED; 10641 /* 10642 * Wait for more ATIOs from the peripheral driver for this lun. 10643 */ 10644 return (1); 10645 } else 10646 ahd->flags &= ~AHD_TQINFIFO_BLOCKED; 10647 #ifdef AHD_DEBUG 10648 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10649 printk("Incoming command from %d for %d:%d%s\n", 10650 initiator, target, lun, 10651 lstate == ahd->black_hole ? "(Black Holed)" : ""); 10652 #endif 10653 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 10654 10655 if (lstate == ahd->black_hole) { 10656 /* Fill in the wildcards */ 10657 atio->ccb_h.target_id = target; 10658 atio->ccb_h.target_lun = lun; 10659 } 10660 10661 /* 10662 * Package it up and send it off to 10663 * whomever has this lun enabled. 10664 */ 10665 atio->sense_len = 0; 10666 atio->init_id = initiator; 10667 if (byte[0] != 0xFF) { 10668 /* Tag was included */ 10669 atio->tag_action = *byte++; 10670 atio->tag_id = *byte++; 10671 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 10672 } else { 10673 atio->ccb_h.flags = 0; 10674 } 10675 byte++; 10676 10677 /* Okay. Now determine the cdb size based on the command code */ 10678 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 10679 case 0: 10680 atio->cdb_len = 6; 10681 break; 10682 case 1: 10683 case 2: 10684 atio->cdb_len = 10; 10685 break; 10686 case 4: 10687 atio->cdb_len = 16; 10688 break; 10689 case 5: 10690 atio->cdb_len = 12; 10691 break; 10692 case 3: 10693 default: 10694 /* Only copy the opcode. */ 10695 atio->cdb_len = 1; 10696 printk("Reserved or VU command code type encountered\n"); 10697 break; 10698 } 10699 10700 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 10701 10702 atio->ccb_h.status |= CAM_CDB_RECVD; 10703 10704 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 10705 /* 10706 * We weren't allowed to disconnect. 10707 * We're hanging on the bus until a 10708 * continue target I/O comes in response 10709 * to this accept tio. 10710 */ 10711 #ifdef AHD_DEBUG 10712 if ((ahd_debug & AHD_SHOW_TQIN) != 0) 10713 printk("Received Immediate Command %d:%d:%d - %p\n", 10714 initiator, target, lun, ahd->pending_device); 10715 #endif 10716 ahd->pending_device = lstate; 10717 ahd_freeze_ccb((union ccb *)atio); 10718 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 10719 } 10720 xpt_done((union ccb*)atio); 10721 return (0); 10722 } 10723 10724 #endif 10725