1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Aic94xx SAS/SATA driver hardware interface. 4 * 5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <linux/module.h> 13 #include <linux/firmware.h> 14 15 #include "aic94xx.h" 16 #include "aic94xx_reg.h" 17 #include "aic94xx_hwi.h" 18 #include "aic94xx_seq.h" 19 #include "aic94xx_dump.h" 20 21 u32 MBAR0_SWB_SIZE; 22 23 /* ---------- Initialization ---------- */ 24 25 static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) 26 { 27 /* adapter came with a sas address */ 28 if (asd_ha->hw_prof.sas_addr[0]) 29 return 0; 30 31 return sas_request_addr(asd_ha->sas_ha.shost, 32 asd_ha->hw_prof.sas_addr); 33 } 34 35 static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) 36 { 37 int i; 38 39 for (i = 0; i < ASD_MAX_PHYS; i++) { 40 if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) 41 continue; 42 /* Set a phy's address only if it has none. 43 */ 44 ASD_DPRINTK("setting phy%d addr to %llx\n", i, 45 SAS_ADDR(asd_ha->hw_prof.sas_addr)); 46 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, 47 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); 48 } 49 } 50 51 /* ---------- PHY initialization ---------- */ 52 53 static void asd_init_phy_identify(struct asd_phy *phy) 54 { 55 phy->identify_frame = phy->id_frm_tok->vaddr; 56 57 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); 58 59 phy->identify_frame->dev_type = SAS_END_DEVICE; 60 if (phy->sas_phy.role & PHY_ROLE_INITIATOR) 61 phy->identify_frame->initiator_bits = phy->sas_phy.iproto; 62 if (phy->sas_phy.role & PHY_ROLE_TARGET) 63 phy->identify_frame->target_bits = phy->sas_phy.tproto; 64 memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, 65 SAS_ADDR_SIZE); 66 phy->identify_frame->phy_id = phy->sas_phy.id; 67 } 68 69 static int asd_init_phy(struct asd_phy *phy) 70 { 71 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; 72 struct asd_sas_phy *sas_phy = &phy->sas_phy; 73 74 sas_phy->enabled = 1; 75 sas_phy->iproto = SAS_PROTOCOL_ALL; 76 sas_phy->tproto = 0; 77 sas_phy->role = PHY_ROLE_INITIATOR; 78 sas_phy->oob_mode = OOB_NOT_CONNECTED; 79 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 80 81 phy->id_frm_tok = asd_alloc_coherent(asd_ha, 82 sizeof(*phy->identify_frame), 83 GFP_KERNEL); 84 if (!phy->id_frm_tok) { 85 asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); 86 return -ENOMEM; 87 } else 88 asd_init_phy_identify(phy); 89 90 memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); 91 92 return 0; 93 } 94 95 static void asd_init_ports(struct asd_ha_struct *asd_ha) 96 { 97 int i; 98 99 spin_lock_init(&asd_ha->asd_ports_lock); 100 for (i = 0; i < ASD_MAX_PHYS; i++) { 101 struct asd_port *asd_port = &asd_ha->asd_ports[i]; 102 103 memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE); 104 memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE); 105 asd_port->phy_mask = 0; 106 asd_port->num_phys = 0; 107 } 108 } 109 110 static int asd_init_phys(struct asd_ha_struct *asd_ha) 111 { 112 u8 i; 113 u8 phy_mask = asd_ha->hw_prof.enabled_phys; 114 115 for (i = 0; i < ASD_MAX_PHYS; i++) { 116 struct asd_phy *phy = &asd_ha->phys[i]; 117 118 phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; 119 phy->asd_port = NULL; 120 121 phy->sas_phy.enabled = 0; 122 phy->sas_phy.id = i; 123 phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; 124 phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; 125 phy->sas_phy.ha = &asd_ha->sas_ha; 126 phy->sas_phy.lldd_phy = phy; 127 } 128 129 /* Now enable and initialize only the enabled phys. */ 130 for_each_phy(phy_mask, phy_mask, i) { 131 int err = asd_init_phy(&asd_ha->phys[i]); 132 if (err) 133 return err; 134 } 135 136 return 0; 137 } 138 139 /* ---------- Sliding windows ---------- */ 140 141 static int asd_init_sw(struct asd_ha_struct *asd_ha) 142 { 143 struct pci_dev *pcidev = asd_ha->pcidev; 144 int err; 145 u32 v; 146 147 /* Unlock MBARs */ 148 err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); 149 if (err) { 150 asd_printk("couldn't access conf. space of %s\n", 151 pci_name(pcidev)); 152 goto Err; 153 } 154 if (v) 155 err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); 156 if (err) { 157 asd_printk("couldn't write to MBAR_KEY of %s\n", 158 pci_name(pcidev)); 159 goto Err; 160 } 161 162 /* Set sliding windows A, B and C to point to proper internal 163 * memory regions. 164 */ 165 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); 166 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, 167 REG_BASE_ADDR_CSEQCIO); 168 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); 169 asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; 170 asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; 171 asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; 172 MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; 173 if (!asd_ha->iospace) { 174 /* MBAR1 will point to OCM (On Chip Memory) */ 175 pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); 176 asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; 177 } 178 spin_lock_init(&asd_ha->iolock); 179 Err: 180 return err; 181 } 182 183 /* ---------- SCB initialization ---------- */ 184 185 /** 186 * asd_init_scbs - manually allocate the first SCB. 187 * @asd_ha: pointer to host adapter structure 188 * 189 * This allocates the very first SCB which would be sent to the 190 * sequencer for execution. Its bus address is written to 191 * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of 192 * the _next_ scb to be DMA-ed to the host adapter is read from the last 193 * SCB DMA-ed to the host adapter, we have to always stay one step 194 * ahead of the sequencer and keep one SCB already allocated. 195 */ 196 static int asd_init_scbs(struct asd_ha_struct *asd_ha) 197 { 198 struct asd_seq_data *seq = &asd_ha->seq; 199 int bitmap_bytes; 200 201 /* allocate the index array and bitmap */ 202 asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; 203 asd_ha->seq.tc_index_array = kcalloc(asd_ha->seq.tc_index_bitmap_bits, 204 sizeof(void *), 205 GFP_KERNEL); 206 if (!asd_ha->seq.tc_index_array) 207 return -ENOMEM; 208 209 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; 210 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); 211 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); 212 if (!asd_ha->seq.tc_index_bitmap) { 213 kfree(asd_ha->seq.tc_index_array); 214 asd_ha->seq.tc_index_array = NULL; 215 return -ENOMEM; 216 } 217 218 spin_lock_init(&seq->tc_index_lock); 219 220 seq->next_scb.size = sizeof(struct scb); 221 seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, 222 &seq->next_scb.dma_handle); 223 if (!seq->next_scb.vaddr) { 224 kfree(asd_ha->seq.tc_index_bitmap); 225 kfree(asd_ha->seq.tc_index_array); 226 asd_ha->seq.tc_index_bitmap = NULL; 227 asd_ha->seq.tc_index_array = NULL; 228 return -ENOMEM; 229 } 230 231 seq->pending = 0; 232 spin_lock_init(&seq->pend_q_lock); 233 INIT_LIST_HEAD(&seq->pend_q); 234 235 return 0; 236 } 237 238 static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) 239 { 240 asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; 241 asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; 242 ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", 243 asd_ha->hw_prof.max_scbs, 244 asd_ha->hw_prof.max_ddbs); 245 } 246 247 /* ---------- Done List initialization ---------- */ 248 249 static void asd_dl_tasklet_handler(unsigned long); 250 251 static int asd_init_dl(struct asd_ha_struct *asd_ha) 252 { 253 asd_ha->seq.actual_dl 254 = asd_alloc_coherent(asd_ha, 255 ASD_DL_SIZE * sizeof(struct done_list_struct), 256 GFP_KERNEL); 257 if (!asd_ha->seq.actual_dl) 258 return -ENOMEM; 259 asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; 260 asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; 261 asd_ha->seq.dl_next = 0; 262 tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, 263 (unsigned long) asd_ha); 264 265 return 0; 266 } 267 268 /* ---------- EDB and ESCB init ---------- */ 269 270 static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags) 271 { 272 struct asd_seq_data *seq = &asd_ha->seq; 273 int i; 274 275 seq->edb_arr = kmalloc_array(seq->num_edbs, sizeof(*seq->edb_arr), 276 gfp_flags); 277 if (!seq->edb_arr) 278 return -ENOMEM; 279 280 for (i = 0; i < seq->num_edbs; i++) { 281 seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, 282 gfp_flags); 283 if (!seq->edb_arr[i]) 284 goto Err_unroll; 285 memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); 286 } 287 288 ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); 289 290 return 0; 291 292 Err_unroll: 293 for (i-- ; i >= 0; i--) 294 asd_free_coherent(asd_ha, seq->edb_arr[i]); 295 kfree(seq->edb_arr); 296 seq->edb_arr = NULL; 297 298 return -ENOMEM; 299 } 300 301 static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, 302 gfp_t gfp_flags) 303 { 304 struct asd_seq_data *seq = &asd_ha->seq; 305 struct asd_ascb *escb; 306 int i, escbs; 307 308 seq->escb_arr = kmalloc_array(seq->num_escbs, sizeof(*seq->escb_arr), 309 gfp_flags); 310 if (!seq->escb_arr) 311 return -ENOMEM; 312 313 escbs = seq->num_escbs; 314 escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); 315 if (!escb) { 316 asd_printk("couldn't allocate list of escbs\n"); 317 goto Err; 318 } 319 seq->num_escbs -= escbs; /* subtract what was not allocated */ 320 ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); 321 322 for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, 323 struct asd_ascb, 324 list)) { 325 seq->escb_arr[i] = escb; 326 escb->scb->header.opcode = EMPTY_SCB; 327 } 328 329 return 0; 330 Err: 331 kfree(seq->escb_arr); 332 seq->escb_arr = NULL; 333 return -ENOMEM; 334 335 } 336 337 static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) 338 { 339 struct asd_seq_data *seq = &asd_ha->seq; 340 int i, k, z = 0; 341 342 for (i = 0; i < seq->num_escbs; i++) { 343 struct asd_ascb *ascb = seq->escb_arr[i]; 344 struct empty_scb *escb = &ascb->scb->escb; 345 346 ascb->edb_index = z; 347 348 escb->num_valid = ASD_EDBS_PER_SCB; 349 350 for (k = 0; k < ASD_EDBS_PER_SCB; k++) { 351 struct sg_el *eb = &escb->eb[k]; 352 struct asd_dma_tok *edb = seq->edb_arr[z++]; 353 354 memset(eb, 0, sizeof(*eb)); 355 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); 356 eb->size = cpu_to_le32(((u32) edb->size)); 357 } 358 } 359 } 360 361 /** 362 * asd_init_escbs -- allocate and initialize empty scbs 363 * @asd_ha: pointer to host adapter structure 364 * 365 * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. 366 * They transport sense data, etc. 367 */ 368 static int asd_init_escbs(struct asd_ha_struct *asd_ha) 369 { 370 struct asd_seq_data *seq = &asd_ha->seq; 371 int err = 0; 372 373 /* Allocate two empty data buffers (edb) per sequencer. */ 374 int edbs = 2*(1+asd_ha->hw_prof.num_phys); 375 376 seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; 377 seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; 378 379 err = asd_alloc_edbs(asd_ha, GFP_KERNEL); 380 if (err) { 381 asd_printk("couldn't allocate edbs\n"); 382 return err; 383 } 384 385 err = asd_alloc_escbs(asd_ha, GFP_KERNEL); 386 if (err) { 387 asd_printk("couldn't allocate escbs\n"); 388 return err; 389 } 390 391 asd_assign_edbs2escbs(asd_ha); 392 /* In order to insure that normal SCBs do not overfill sequencer 393 * memory and leave no space for escbs (halting condition), 394 * we increment pending here by the number of escbs. However, 395 * escbs are never pending. 396 */ 397 seq->pending = seq->num_escbs; 398 seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; 399 400 return 0; 401 } 402 403 /* ---------- HW initialization ---------- */ 404 405 /** 406 * asd_chip_hardrst -- hard reset the chip 407 * @asd_ha: pointer to host adapter structure 408 * 409 * This takes 16 cycles and is synchronous to CFCLK, which runs 410 * at 200 MHz, so this should take at most 80 nanoseconds. 411 */ 412 int asd_chip_hardrst(struct asd_ha_struct *asd_ha) 413 { 414 int i; 415 int count = 100; 416 u32 reg; 417 418 for (i = 0 ; i < 4 ; i++) { 419 asd_write_reg_dword(asd_ha, COMBIST, HARDRST); 420 } 421 422 do { 423 udelay(1); 424 reg = asd_read_reg_dword(asd_ha, CHIMINT); 425 if (reg & HARDRSTDET) { 426 asd_write_reg_dword(asd_ha, CHIMINT, 427 HARDRSTDET|PORRSTDET); 428 return 0; 429 } 430 } while (--count > 0); 431 432 return -ENODEV; 433 } 434 435 /** 436 * asd_init_chip -- initialize the chip 437 * @asd_ha: pointer to host adapter structure 438 * 439 * Hard resets the chip, disables HA interrupts, downloads the sequnecer 440 * microcode and starts the sequencers. The caller has to explicitly 441 * enable HA interrupts with asd_enable_ints(asd_ha). 442 */ 443 static int asd_init_chip(struct asd_ha_struct *asd_ha) 444 { 445 int err; 446 447 err = asd_chip_hardrst(asd_ha); 448 if (err) { 449 asd_printk("couldn't hard reset %s\n", 450 pci_name(asd_ha->pcidev)); 451 goto out; 452 } 453 454 asd_disable_ints(asd_ha); 455 456 err = asd_init_seqs(asd_ha); 457 if (err) { 458 asd_printk("couldn't init seqs for %s\n", 459 pci_name(asd_ha->pcidev)); 460 goto out; 461 } 462 463 err = asd_start_seqs(asd_ha); 464 if (err) { 465 asd_printk("couldn't start seqs for %s\n", 466 pci_name(asd_ha->pcidev)); 467 goto out; 468 } 469 out: 470 return err; 471 } 472 473 #define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) 474 475 static int max_devs = 0; 476 module_param_named(max_devs, max_devs, int, S_IRUGO); 477 MODULE_PARM_DESC(max_devs, "\n" 478 "\tMaximum number of SAS devices to support (not LUs).\n" 479 "\tDefault: 2176, Maximum: 65663.\n"); 480 481 static int max_cmnds = 0; 482 module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); 483 MODULE_PARM_DESC(max_cmnds, "\n" 484 "\tMaximum number of commands queuable.\n" 485 "\tDefault: 512, Maximum: 66047.\n"); 486 487 static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) 488 { 489 unsigned long dma_addr = OCM_BASE_ADDR; 490 u32 d; 491 492 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; 493 asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); 494 d = asd_read_reg_dword(asd_ha, CTXDOMAIN); 495 d |= 4; 496 asd_write_reg_dword(asd_ha, CTXDOMAIN, d); 497 asd_ha->hw_prof.max_ddbs += MAX_DEVS; 498 } 499 500 static int asd_extend_devctx(struct asd_ha_struct *asd_ha) 501 { 502 dma_addr_t dma_handle; 503 unsigned long dma_addr; 504 u32 d; 505 int size; 506 507 asd_extend_devctx_ocm(asd_ha); 508 509 asd_ha->hw_prof.ddb_ext = NULL; 510 if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { 511 max_devs = asd_ha->hw_prof.max_ddbs; 512 return 0; 513 } 514 515 size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; 516 517 asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); 518 if (!asd_ha->hw_prof.ddb_ext) { 519 asd_printk("couldn't allocate memory for %d devices\n", 520 max_devs); 521 max_devs = asd_ha->hw_prof.max_ddbs; 522 return -ENOMEM; 523 } 524 dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; 525 dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); 526 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; 527 dma_handle = (dma_addr_t) dma_addr; 528 asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); 529 d = asd_read_reg_dword(asd_ha, CTXDOMAIN); 530 d &= ~4; 531 asd_write_reg_dword(asd_ha, CTXDOMAIN, d); 532 533 asd_ha->hw_prof.max_ddbs = max_devs; 534 535 return 0; 536 } 537 538 static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) 539 { 540 dma_addr_t dma_handle; 541 unsigned long dma_addr; 542 u32 d; 543 int size; 544 545 asd_ha->hw_prof.scb_ext = NULL; 546 if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { 547 max_cmnds = asd_ha->hw_prof.max_scbs; 548 return 0; 549 } 550 551 size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; 552 553 asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); 554 if (!asd_ha->hw_prof.scb_ext) { 555 asd_printk("couldn't allocate memory for %d commands\n", 556 max_cmnds); 557 max_cmnds = asd_ha->hw_prof.max_scbs; 558 return -ENOMEM; 559 } 560 dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; 561 dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); 562 dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; 563 dma_handle = (dma_addr_t) dma_addr; 564 asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); 565 d = asd_read_reg_dword(asd_ha, CTXDOMAIN); 566 d &= ~1; 567 asd_write_reg_dword(asd_ha, CTXDOMAIN, d); 568 569 asd_ha->hw_prof.max_scbs = max_cmnds; 570 571 return 0; 572 } 573 574 /** 575 * asd_init_ctxmem -- initialize context memory 576 * @asd_ha: pointer to host adapter structure 577 * 578 * This function sets the maximum number of SCBs and 579 * DDBs which can be used by the sequencer. This is normally 580 * 512 and 128 respectively. If support for more SCBs or more DDBs 581 * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are 582 * initialized here to extend context memory to point to host memory, 583 * thus allowing unlimited support for SCBs and DDBs -- only limited 584 * by host memory. 585 */ 586 static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) 587 { 588 int bitmap_bytes; 589 590 asd_get_max_scb_ddb(asd_ha); 591 asd_extend_devctx(asd_ha); 592 asd_extend_cmdctx(asd_ha); 593 594 /* The kernel wants bitmaps to be unsigned long sized. */ 595 bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; 596 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); 597 asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); 598 if (!asd_ha->hw_prof.ddb_bitmap) 599 return -ENOMEM; 600 spin_lock_init(&asd_ha->hw_prof.ddb_lock); 601 602 return 0; 603 } 604 605 int asd_init_hw(struct asd_ha_struct *asd_ha) 606 { 607 int err; 608 u32 v; 609 610 err = asd_init_sw(asd_ha); 611 if (err) 612 return err; 613 614 err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); 615 if (err) { 616 asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", 617 pci_name(asd_ha->pcidev)); 618 return err; 619 } 620 err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, 621 v | SC_TMR_DIS); 622 if (err) { 623 asd_printk("couldn't disable split completion timer of %s\n", 624 pci_name(asd_ha->pcidev)); 625 return err; 626 } 627 628 err = asd_read_ocm(asd_ha); 629 if (err) { 630 asd_printk("couldn't read ocm(%d)\n", err); 631 /* While suspicios, it is not an error that we 632 * couldn't read the OCM. */ 633 } 634 635 err = asd_read_flash(asd_ha); 636 if (err) { 637 asd_printk("couldn't read flash(%d)\n", err); 638 /* While suspicios, it is not an error that we 639 * couldn't read FLASH memory. 640 */ 641 } 642 643 asd_init_ctxmem(asd_ha); 644 645 if (asd_get_user_sas_addr(asd_ha)) { 646 asd_printk("No SAS Address provided for %s\n", 647 pci_name(asd_ha->pcidev)); 648 err = -ENODEV; 649 goto Out; 650 } 651 652 asd_propagate_sas_addr(asd_ha); 653 654 err = asd_init_phys(asd_ha); 655 if (err) { 656 asd_printk("couldn't initialize phys for %s\n", 657 pci_name(asd_ha->pcidev)); 658 goto Out; 659 } 660 661 asd_init_ports(asd_ha); 662 663 err = asd_init_scbs(asd_ha); 664 if (err) { 665 asd_printk("couldn't initialize scbs for %s\n", 666 pci_name(asd_ha->pcidev)); 667 goto Out; 668 } 669 670 err = asd_init_dl(asd_ha); 671 if (err) { 672 asd_printk("couldn't initialize the done list:%d\n", 673 err); 674 goto Out; 675 } 676 677 err = asd_init_escbs(asd_ha); 678 if (err) { 679 asd_printk("couldn't initialize escbs\n"); 680 goto Out; 681 } 682 683 err = asd_init_chip(asd_ha); 684 if (err) { 685 asd_printk("couldn't init the chip\n"); 686 goto Out; 687 } 688 Out: 689 return err; 690 } 691 692 /* ---------- Chip reset ---------- */ 693 694 /** 695 * asd_chip_reset -- reset the host adapter, etc 696 * @asd_ha: pointer to host adapter structure of interest 697 * 698 * Called from the ISR. Hard reset the chip. Let everything 699 * timeout. This should be no different than hot-unplugging the 700 * host adapter. Once everything times out we'll init the chip with 701 * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). 702 * XXX finish. 703 */ 704 static void asd_chip_reset(struct asd_ha_struct *asd_ha) 705 { 706 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); 707 asd_chip_hardrst(asd_ha); 708 } 709 710 /* ---------- Done List Routines ---------- */ 711 712 static void asd_dl_tasklet_handler(unsigned long data) 713 { 714 struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; 715 struct asd_seq_data *seq = &asd_ha->seq; 716 unsigned long flags; 717 718 while (1) { 719 struct done_list_struct *dl = &seq->dl[seq->dl_next]; 720 struct asd_ascb *ascb; 721 722 if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) 723 break; 724 725 /* find the aSCB */ 726 spin_lock_irqsave(&seq->tc_index_lock, flags); 727 ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); 728 spin_unlock_irqrestore(&seq->tc_index_lock, flags); 729 if (unlikely(!ascb)) { 730 ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); 731 goto next_1; 732 } else if (ascb->scb->header.opcode == EMPTY_SCB) { 733 goto out; 734 } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { 735 goto next_1; 736 } 737 spin_lock_irqsave(&seq->pend_q_lock, flags); 738 list_del_init(&ascb->list); 739 seq->pending--; 740 spin_unlock_irqrestore(&seq->pend_q_lock, flags); 741 out: 742 ascb->tasklet_complete(ascb, dl); 743 744 next_1: 745 seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); 746 if (!seq->dl_next) 747 seq->dl_toggle ^= DL_TOGGLE_MASK; 748 } 749 } 750 751 /* ---------- Interrupt Service Routines ---------- */ 752 753 /** 754 * asd_process_donelist_isr -- schedule processing of done list entries 755 * @asd_ha: pointer to host adapter structure 756 */ 757 static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) 758 { 759 tasklet_schedule(&asd_ha->seq.dl_tasklet); 760 } 761 762 /** 763 * asd_com_sas_isr -- process device communication interrupt (COMINT) 764 * @asd_ha: pointer to host adapter structure 765 */ 766 static void asd_com_sas_isr(struct asd_ha_struct *asd_ha) 767 { 768 u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); 769 770 /* clear COMSTAT int */ 771 asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); 772 773 if (comstat & CSBUFPERR) { 774 asd_printk("%s: command/status buffer dma parity error\n", 775 pci_name(asd_ha->pcidev)); 776 } else if (comstat & CSERR) { 777 int i; 778 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); 779 dmaerr &= 0xFF; 780 asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " 781 "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", 782 pci_name(asd_ha->pcidev), 783 dmaerr, 784 asd_read_reg_dword(asd_ha, CSDMAADR), 785 asd_read_reg_dword(asd_ha, CSDMAADR+4)); 786 asd_printk("CSBUFFER:\n"); 787 for (i = 0; i < 8; i++) { 788 asd_printk("%08x %08x %08x %08x\n", 789 asd_read_reg_dword(asd_ha, CSBUFFER), 790 asd_read_reg_dword(asd_ha, CSBUFFER+4), 791 asd_read_reg_dword(asd_ha, CSBUFFER+8), 792 asd_read_reg_dword(asd_ha, CSBUFFER+12)); 793 } 794 asd_dump_seq_state(asd_ha, 0); 795 } else if (comstat & OVLYERR) { 796 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); 797 dmaerr = (dmaerr >> 8) & 0xFF; 798 asd_printk("%s: overlay dma error:0x%x\n", 799 pci_name(asd_ha->pcidev), 800 dmaerr); 801 } 802 asd_chip_reset(asd_ha); 803 } 804 805 static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) 806 { 807 static const char *halt_code[256] = { 808 "UNEXPECTED_INTERRUPT0", 809 "UNEXPECTED_INTERRUPT1", 810 "UNEXPECTED_INTERRUPT2", 811 "UNEXPECTED_INTERRUPT3", 812 "UNEXPECTED_INTERRUPT4", 813 "UNEXPECTED_INTERRUPT5", 814 "UNEXPECTED_INTERRUPT6", 815 "UNEXPECTED_INTERRUPT7", 816 "UNEXPECTED_INTERRUPT8", 817 "UNEXPECTED_INTERRUPT9", 818 "UNEXPECTED_INTERRUPT10", 819 [11 ... 19] = "unknown[11,19]", 820 "NO_FREE_SCB_AVAILABLE", 821 "INVALID_SCB_OPCODE", 822 "INVALID_MBX_OPCODE", 823 "INVALID_ATA_STATE", 824 "ATA_QUEUE_FULL", 825 "ATA_TAG_TABLE_FAULT", 826 "ATA_TAG_MASK_FAULT", 827 "BAD_LINK_QUEUE_STATE", 828 "DMA2CHIM_QUEUE_ERROR", 829 "EMPTY_SCB_LIST_FULL", 830 "unknown[30]", 831 "IN_USE_SCB_ON_FREE_LIST", 832 "BAD_OPEN_WAIT_STATE", 833 "INVALID_STP_AFFILIATION", 834 "unknown[34]", 835 "EXEC_QUEUE_ERROR", 836 "TOO_MANY_EMPTIES_NEEDED", 837 "EMPTY_REQ_QUEUE_ERROR", 838 "Q_MONIRTT_MGMT_ERROR", 839 "TARGET_MODE_FLOW_ERROR", 840 "DEVICE_QUEUE_NOT_FOUND", 841 "START_IRTT_TIMER_ERROR", 842 "ABORT_TASK_ILLEGAL_REQ", 843 [43 ... 255] = "unknown[43,255]" 844 }; 845 846 if (dchstatus & CSEQINT) { 847 u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); 848 849 if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { 850 asd_printk("%s: CSEQ arp2int:0x%x\n", 851 pci_name(asd_ha->pcidev), 852 arp2int); 853 } else if (arp2int & ARP2HALTC) 854 asd_printk("%s: CSEQ halted: %s\n", 855 pci_name(asd_ha->pcidev), 856 halt_code[(arp2int>>16)&0xFF]); 857 else 858 asd_printk("%s: CARP2INT:0x%x\n", 859 pci_name(asd_ha->pcidev), 860 arp2int); 861 } 862 if (dchstatus & LSEQINT_MASK) { 863 int lseq; 864 u8 lseq_mask = dchstatus & LSEQINT_MASK; 865 866 for_each_sequencer(lseq_mask, lseq_mask, lseq) { 867 u32 arp2int = asd_read_reg_dword(asd_ha, 868 LmARP2INT(lseq)); 869 if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR 870 | ARP2CIOPERR)) { 871 asd_printk("%s: LSEQ%d arp2int:0x%x\n", 872 pci_name(asd_ha->pcidev), 873 lseq, arp2int); 874 /* XXX we should only do lseq reset */ 875 } else if (arp2int & ARP2HALTC) 876 asd_printk("%s: LSEQ%d halted: %s\n", 877 pci_name(asd_ha->pcidev), 878 lseq,halt_code[(arp2int>>16)&0xFF]); 879 else 880 asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", 881 pci_name(asd_ha->pcidev), lseq, 882 arp2int); 883 } 884 } 885 asd_chip_reset(asd_ha); 886 } 887 888 /** 889 * asd_dch_sas_isr -- process device channel interrupt (DEVINT) 890 * @asd_ha: pointer to host adapter structure 891 */ 892 static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) 893 { 894 u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); 895 896 if (dchstatus & CFIFTOERR) { 897 asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); 898 asd_chip_reset(asd_ha); 899 } else 900 asd_arp2_err(asd_ha, dchstatus); 901 } 902 903 /** 904 * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR) 905 * @asd_ha: pointer to host adapter structure 906 */ 907 static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) 908 { 909 u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); 910 911 if (!(stat0r & ASIERR)) { 912 asd_printk("hmm, EXSI interrupted but no error?\n"); 913 return; 914 } 915 916 if (stat0r & ASIFMTERR) { 917 asd_printk("ASI SEEPROM format error for %s\n", 918 pci_name(asd_ha->pcidev)); 919 } else if (stat0r & ASISEECHKERR) { 920 u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); 921 asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", 922 stat1r & CHECKSUM_MASK, 923 pci_name(asd_ha->pcidev)); 924 } else { 925 u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); 926 927 if (!(statr & CPI2ASIMSTERR_MASK)) { 928 ASD_DPRINTK("hmm, ASIERR?\n"); 929 return; 930 } else { 931 u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); 932 u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); 933 934 asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " 935 "count: 0x%x, byteen: 0x%x, targerr: 0x%x " 936 "master id: 0x%x, master err: 0x%x\n", 937 pci_name(asd_ha->pcidev), 938 addr, data, 939 (statr & CPI2ASIBYTECNT_MASK) >> 16, 940 (statr & CPI2ASIBYTEEN_MASK) >> 12, 941 (statr & CPI2ASITARGERR_MASK) >> 8, 942 (statr & CPI2ASITARGMID_MASK) >> 4, 943 (statr & CPI2ASIMSTERR_MASK)); 944 } 945 } 946 asd_chip_reset(asd_ha); 947 } 948 949 /** 950 * asd_hst_pcix_isr -- process host interface interrupts 951 * @asd_ha: pointer to host adapter structure 952 * 953 * Asserted on PCIX errors: target abort, etc. 954 */ 955 static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) 956 { 957 u16 status; 958 u32 pcix_status; 959 u32 ecc_status; 960 961 pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); 962 pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); 963 pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); 964 965 if (status & PCI_STATUS_DETECTED_PARITY) 966 asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); 967 else if (status & PCI_STATUS_REC_MASTER_ABORT) 968 asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); 969 else if (status & PCI_STATUS_REC_TARGET_ABORT) 970 asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); 971 else if (status & PCI_STATUS_PARITY) 972 asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); 973 else if (pcix_status & RCV_SCE) { 974 asd_printk("received split completion error for %s\n", 975 pci_name(asd_ha->pcidev)); 976 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); 977 /* XXX: Abort task? */ 978 return; 979 } else if (pcix_status & UNEXP_SC) { 980 asd_printk("unexpected split completion for %s\n", 981 pci_name(asd_ha->pcidev)); 982 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); 983 /* ignore */ 984 return; 985 } else if (pcix_status & SC_DISCARD) 986 asd_printk("split completion discarded for %s\n", 987 pci_name(asd_ha->pcidev)); 988 else if (ecc_status & UNCOR_ECCERR) 989 asd_printk("uncorrectable ECC error for %s\n", 990 pci_name(asd_ha->pcidev)); 991 asd_chip_reset(asd_ha); 992 } 993 994 /** 995 * asd_hw_isr -- host adapter interrupt service routine 996 * @irq: ignored 997 * @dev_id: pointer to host adapter structure 998 * 999 * The ISR processes done list entries and level 3 error handling. 1000 */ 1001 irqreturn_t asd_hw_isr(int irq, void *dev_id) 1002 { 1003 struct asd_ha_struct *asd_ha = dev_id; 1004 u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); 1005 1006 if (!chimint) 1007 return IRQ_NONE; 1008 1009 asd_write_reg_dword(asd_ha, CHIMINT, chimint); 1010 (void) asd_read_reg_dword(asd_ha, CHIMINT); 1011 1012 if (chimint & DLAVAIL) 1013 asd_process_donelist_isr(asd_ha); 1014 if (chimint & COMINT) 1015 asd_com_sas_isr(asd_ha); 1016 if (chimint & DEVINT) 1017 asd_dch_sas_isr(asd_ha); 1018 if (chimint & INITERR) 1019 asd_rbi_exsi_isr(asd_ha); 1020 if (chimint & HOSTERR) 1021 asd_hst_pcix_isr(asd_ha); 1022 1023 return IRQ_HANDLED; 1024 } 1025 1026 /* ---------- SCB handling ---------- */ 1027 1028 static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, 1029 gfp_t gfp_flags) 1030 { 1031 extern struct kmem_cache *asd_ascb_cache; 1032 struct asd_seq_data *seq = &asd_ha->seq; 1033 struct asd_ascb *ascb; 1034 unsigned long flags; 1035 1036 ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags); 1037 1038 if (ascb) { 1039 ascb->dma_scb.size = sizeof(struct scb); 1040 ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool, 1041 gfp_flags, 1042 &ascb->dma_scb.dma_handle); 1043 if (!ascb->dma_scb.vaddr) { 1044 kmem_cache_free(asd_ascb_cache, ascb); 1045 return NULL; 1046 } 1047 asd_init_ascb(asd_ha, ascb); 1048 1049 spin_lock_irqsave(&seq->tc_index_lock, flags); 1050 ascb->tc_index = asd_tc_index_get(seq, ascb); 1051 spin_unlock_irqrestore(&seq->tc_index_lock, flags); 1052 if (ascb->tc_index == -1) 1053 goto undo; 1054 1055 ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); 1056 } 1057 1058 return ascb; 1059 undo: 1060 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, 1061 ascb->dma_scb.dma_handle); 1062 kmem_cache_free(asd_ascb_cache, ascb); 1063 ASD_DPRINTK("no index for ascb\n"); 1064 return NULL; 1065 } 1066 1067 /** 1068 * asd_ascb_alloc_list -- allocate a list of aSCBs 1069 * @asd_ha: pointer to host adapter structure 1070 * @num: pointer to integer number of aSCBs 1071 * @gfp_flags: GFP_ flags. 1072 * 1073 * This is the only function which is used to allocate aSCBs. 1074 * It can allocate one or many. If more than one, then they form 1075 * a linked list in two ways: by their list field of the ascb struct 1076 * and by the next_scb field of the scb_header. 1077 * 1078 * Returns NULL if no memory was available, else pointer to a list 1079 * of ascbs. When this function returns, @num would be the number 1080 * of SCBs which were not able to be allocated, 0 if all requested 1081 * were able to be allocated. 1082 */ 1083 struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct 1084 *asd_ha, int *num, 1085 gfp_t gfp_flags) 1086 { 1087 struct asd_ascb *first = NULL; 1088 1089 for ( ; *num > 0; --*num) { 1090 struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); 1091 1092 if (!ascb) 1093 break; 1094 else if (!first) 1095 first = ascb; 1096 else { 1097 struct asd_ascb *last = list_entry(first->list.prev, 1098 struct asd_ascb, 1099 list); 1100 list_add_tail(&ascb->list, &first->list); 1101 last->scb->header.next_scb = 1102 cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); 1103 } 1104 } 1105 1106 return first; 1107 } 1108 1109 /** 1110 * asd_swap_head_scb -- swap the head scb 1111 * @asd_ha: pointer to host adapter structure 1112 * @ascb: pointer to the head of an ascb list 1113 * 1114 * The sequencer knows the DMA address of the next SCB to be DMAed to 1115 * the host adapter, from initialization or from the last list DMAed. 1116 * seq->next_scb keeps the address of this SCB. The sequencer will 1117 * DMA to the host adapter this list of SCBs. But the head (first 1118 * element) of this list is not known to the sequencer. Here we swap 1119 * the head of the list with the known SCB (memcpy()). 1120 * Only one memcpy() is required per list so it is in our interest 1121 * to keep the list of SCB as long as possible so that the ratio 1122 * of number of memcpy calls to the number of SCB DMA-ed is as small 1123 * as possible. 1124 * 1125 * LOCKING: called with the pending list lock held. 1126 */ 1127 static void asd_swap_head_scb(struct asd_ha_struct *asd_ha, 1128 struct asd_ascb *ascb) 1129 { 1130 struct asd_seq_data *seq = &asd_ha->seq; 1131 struct asd_ascb *last = list_entry(ascb->list.prev, 1132 struct asd_ascb, 1133 list); 1134 struct asd_dma_tok t = ascb->dma_scb; 1135 1136 memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); 1137 ascb->dma_scb = seq->next_scb; 1138 ascb->scb = ascb->dma_scb.vaddr; 1139 seq->next_scb = t; 1140 last->scb->header.next_scb = 1141 cpu_to_le64(((u64)seq->next_scb.dma_handle)); 1142 } 1143 1144 /** 1145 * asd_start_scb_timers -- (add and) start timers of SCBs 1146 * @list: pointer to struct list_head of the scbs 1147 * 1148 * If an SCB in the @list has no timer function, assign the default 1149 * one, then start the timer of the SCB. This function is 1150 * intended to be called from asd_post_ascb_list(), just prior to 1151 * posting the SCBs to the sequencer. 1152 */ 1153 static void asd_start_scb_timers(struct list_head *list) 1154 { 1155 struct asd_ascb *ascb; 1156 list_for_each_entry(ascb, list, list) { 1157 if (!ascb->uldd_timer) { 1158 ascb->timer.function = asd_ascb_timedout; 1159 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; 1160 add_timer(&ascb->timer); 1161 } 1162 } 1163 } 1164 1165 /** 1166 * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter 1167 * @asd_ha: pointer to a host adapter structure 1168 * @ascb: pointer to the first aSCB in the list 1169 * @num: number of aSCBs in the list (to be posted) 1170 * 1171 * See queueing comment in asd_post_escb_list(). 1172 * 1173 * Additional note on queuing: In order to minimize the ratio of memcpy() 1174 * to the number of ascbs sent, we try to batch-send as many ascbs as possible 1175 * in one go. 1176 * Two cases are possible: 1177 * A) can_queue >= num, 1178 * B) can_queue < num. 1179 * Case A: we can send the whole batch at once. Increment "pending" 1180 * in the beginning of this function, when it is checked, in order to 1181 * eliminate races when this function is called by multiple processes. 1182 * Case B: should never happen. 1183 */ 1184 int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, 1185 int num) 1186 { 1187 unsigned long flags; 1188 LIST_HEAD(list); 1189 int can_queue; 1190 1191 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); 1192 can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; 1193 if (can_queue >= num) 1194 asd_ha->seq.pending += num; 1195 else 1196 can_queue = 0; 1197 1198 if (!can_queue) { 1199 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 1200 asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); 1201 return -SAS_QUEUE_FULL; 1202 } 1203 1204 asd_swap_head_scb(asd_ha, ascb); 1205 1206 __list_add(&list, ascb->list.prev, &ascb->list); 1207 1208 asd_start_scb_timers(&list); 1209 1210 asd_ha->seq.scbpro += num; 1211 list_splice_init(&list, asd_ha->seq.pend_q.prev); 1212 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); 1213 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 1214 1215 return 0; 1216 } 1217 1218 /** 1219 * asd_post_escb_list -- post a list of 1 or more empty scb 1220 * @asd_ha: pointer to a host adapter structure 1221 * @ascb: pointer to the first empty SCB in the list 1222 * @num: number of aSCBs in the list (to be posted) 1223 * 1224 * This is essentially the same as asd_post_ascb_list, but we do not 1225 * increment pending, add those to the pending list or get indexes. 1226 * See asd_init_escbs() and asd_init_post_escbs(). 1227 * 1228 * Since sending a list of ascbs is a superset of sending a single 1229 * ascb, this function exists to generalize this. More specifically, 1230 * when sending a list of those, we want to do only a _single_ 1231 * memcpy() at swap head, as opposed to for each ascb sent (in the 1232 * case of sending them one by one). That is, we want to minimize the 1233 * ratio of memcpy() operations to the number of ascbs sent. The same 1234 * logic applies to asd_post_ascb_list(). 1235 */ 1236 int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, 1237 int num) 1238 { 1239 unsigned long flags; 1240 1241 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); 1242 asd_swap_head_scb(asd_ha, ascb); 1243 asd_ha->seq.scbpro += num; 1244 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); 1245 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 1246 1247 return 0; 1248 } 1249 1250 /* ---------- LED ---------- */ 1251 1252 /** 1253 * asd_turn_led -- turn on/off an LED 1254 * @asd_ha: pointer to host adapter structure 1255 * @phy_id: the PHY id whose LED we want to manupulate 1256 * @op: 1 to turn on, 0 to turn off 1257 */ 1258 void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) 1259 { 1260 if (phy_id < ASD_MAX_PHYS) { 1261 u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); 1262 if (op) 1263 v |= LEDPOL; 1264 else 1265 v &= ~LEDPOL; 1266 asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); 1267 } 1268 } 1269 1270 /** 1271 * asd_control_led -- enable/disable an LED on the board 1272 * @asd_ha: pointer to host adapter structure 1273 * @phy_id: integer, the phy id 1274 * @op: integer, 1 to enable, 0 to disable the LED 1275 * 1276 * First we output enable the LED, then we set the source 1277 * to be an external module. 1278 */ 1279 void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) 1280 { 1281 if (phy_id < ASD_MAX_PHYS) { 1282 u32 v; 1283 1284 v = asd_read_reg_dword(asd_ha, GPIOOER); 1285 if (op) 1286 v |= (1 << phy_id); 1287 else 1288 v &= ~(1 << phy_id); 1289 asd_write_reg_dword(asd_ha, GPIOOER, v); 1290 1291 v = asd_read_reg_dword(asd_ha, GPIOCNFGR); 1292 if (op) 1293 v |= (1 << phy_id); 1294 else 1295 v &= ~(1 << phy_id); 1296 asd_write_reg_dword(asd_ha, GPIOCNFGR, v); 1297 } 1298 } 1299 1300 /* ---------- PHY enable ---------- */ 1301 1302 static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) 1303 { 1304 struct asd_phy *phy = &asd_ha->phys[phy_id]; 1305 1306 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); 1307 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), 1308 HOTPLUG_DELAY_TIMEOUT); 1309 1310 /* Get defaults from manuf. sector */ 1311 /* XXX we need defaults for those in case MS is broken. */ 1312 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), 1313 phy->phy_desc->phy_control_0); 1314 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), 1315 phy->phy_desc->phy_control_1); 1316 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), 1317 phy->phy_desc->phy_control_2); 1318 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), 1319 phy->phy_desc->phy_control_3); 1320 1321 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), 1322 ASD_COMINIT_TIMEOUT); 1323 1324 asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), 1325 phy->id_frm_tok->dma_handle); 1326 1327 asd_control_led(asd_ha, phy_id, 1); 1328 1329 return 0; 1330 } 1331 1332 int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) 1333 { 1334 u8 phy_m; 1335 u8 i; 1336 int num = 0, k; 1337 struct asd_ascb *ascb; 1338 struct asd_ascb *ascb_list; 1339 1340 if (!phy_mask) { 1341 asd_printk("%s called with phy_mask of 0!?\n", __func__); 1342 return 0; 1343 } 1344 1345 for_each_phy(phy_mask, phy_m, i) { 1346 num++; 1347 asd_enable_phy(asd_ha, i); 1348 } 1349 1350 k = num; 1351 ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); 1352 if (!ascb_list) { 1353 asd_printk("no memory for control phy ascb list\n"); 1354 return -ENOMEM; 1355 } 1356 num -= k; 1357 1358 ascb = ascb_list; 1359 for_each_phy(phy_mask, phy_m, i) { 1360 asd_build_control_phy(ascb, i, ENABLE_PHY); 1361 ascb = list_entry(ascb->list.next, struct asd_ascb, list); 1362 } 1363 ASD_DPRINTK("posting %d control phy scbs\n", num); 1364 k = asd_post_ascb_list(asd_ha, ascb_list, num); 1365 if (k) 1366 asd_ascb_free_list(ascb_list); 1367 1368 return k; 1369 } 1370