1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #include "isci.h" 57 #include "port.h" 58 #include "request.h" 59 60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) 61 #define SCU_DUMMY_INDEX (0xFFFF) 62 63 #undef C 64 #define C(a) (#a) 65 const char *port_state_name(enum sci_port_states state) 66 { 67 static const char * const strings[] = PORT_STATES; 68 69 return strings[state]; 70 } 71 #undef C 72 73 static struct device *sciport_to_dev(struct isci_port *iport) 74 { 75 int i = iport->physical_port_index; 76 struct isci_port *table; 77 struct isci_host *ihost; 78 79 if (i == SCIC_SDS_DUMMY_PORT) 80 i = SCI_MAX_PORTS+1; 81 82 table = iport - i; 83 ihost = container_of(table, typeof(*ihost), ports[0]); 84 85 return &ihost->pdev->dev; 86 } 87 88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) 89 { 90 u8 index; 91 92 proto->all = 0; 93 for (index = 0; index < SCI_MAX_PHYS; index++) { 94 struct isci_phy *iphy = iport->phy_table[index]; 95 96 if (!iphy) 97 continue; 98 sci_phy_get_protocols(iphy, proto); 99 } 100 } 101 102 static u32 sci_port_get_phys(struct isci_port *iport) 103 { 104 u32 index; 105 u32 mask; 106 107 mask = 0; 108 for (index = 0; index < SCI_MAX_PHYS; index++) 109 if (iport->phy_table[index]) 110 mask |= (1 << index); 111 112 return mask; 113 } 114 115 /** 116 * sci_port_get_properties() - This method simply returns the properties 117 * regarding the port, such as: physical index, protocols, sas address, etc. 118 * @port: this parameter specifies the port for which to retrieve the physical 119 * index. 120 * @properties: This parameter specifies the properties structure into which to 121 * copy the requested information. 122 * 123 * Indicate if the user specified a valid port. SCI_SUCCESS This value is 124 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This 125 * value is returned if the specified port is not valid. When this value is 126 * returned, no data is copied to the properties output parameter. 127 */ 128 enum sci_status sci_port_get_properties(struct isci_port *iport, 129 struct sci_port_properties *prop) 130 { 131 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) 132 return SCI_FAILURE_INVALID_PORT; 133 134 prop->index = iport->logical_port_index; 135 prop->phy_mask = sci_port_get_phys(iport); 136 sci_port_get_sas_address(iport, &prop->local.sas_address); 137 sci_port_get_protocols(iport, &prop->local.protocols); 138 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); 139 140 return SCI_SUCCESS; 141 } 142 143 static void sci_port_bcn_enable(struct isci_port *iport) 144 { 145 struct isci_phy *iphy; 146 u32 val; 147 int i; 148 149 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { 150 iphy = iport->phy_table[i]; 151 if (!iphy) 152 continue; 153 val = readl(&iphy->link_layer_registers->link_layer_control); 154 /* clear the bit by writing 1. */ 155 writel(val, &iphy->link_layer_registers->link_layer_control); 156 } 157 } 158 159 static void isci_port_bc_change_received(struct isci_host *ihost, 160 struct isci_port *iport, 161 struct isci_phy *iphy) 162 { 163 dev_dbg(&ihost->pdev->dev, 164 "%s: isci_phy = %p, sas_phy = %p\n", 165 __func__, iphy, &iphy->sas_phy); 166 167 ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); 168 sci_port_bcn_enable(iport); 169 } 170 171 static void isci_port_link_up(struct isci_host *isci_host, 172 struct isci_port *iport, 173 struct isci_phy *iphy) 174 { 175 unsigned long flags; 176 struct sci_port_properties properties; 177 unsigned long success = true; 178 179 dev_dbg(&isci_host->pdev->dev, 180 "%s: isci_port = %p\n", 181 __func__, iport); 182 183 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); 184 185 sci_port_get_properties(iport, &properties); 186 187 if (iphy->protocol == SAS_PROTOCOL_SATA) { 188 u64 attached_sas_address; 189 190 iphy->sas_phy.oob_mode = SATA_OOB_MODE; 191 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis); 192 193 /* 194 * For direct-attached SATA devices, the SCI core will 195 * automagically assign a SAS address to the end device 196 * for the purpose of creating a port. This SAS address 197 * will not be the same as assigned to the PHY and needs 198 * to be obtained from struct sci_port_properties properties. 199 */ 200 attached_sas_address = properties.remote.sas_address.high; 201 attached_sas_address <<= 32; 202 attached_sas_address |= properties.remote.sas_address.low; 203 swab64s(&attached_sas_address); 204 205 memcpy(&iphy->sas_phy.attached_sas_addr, 206 &attached_sas_address, sizeof(attached_sas_address)); 207 } else if (iphy->protocol == SAS_PROTOCOL_SSP) { 208 iphy->sas_phy.oob_mode = SAS_OOB_MODE; 209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); 210 211 /* Copy the attached SAS address from the IAF */ 212 memcpy(iphy->sas_phy.attached_sas_addr, 213 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); 214 } else { 215 dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__); 216 success = false; 217 } 218 219 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy); 220 221 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); 222 223 /* Notify libsas that we have an address frame, if indeed 224 * we've found an SSP, SMP, or STP target */ 225 if (success) 226 isci_host->sas_ha.notify_port_event(&iphy->sas_phy, 227 PORTE_BYTES_DMAED); 228 } 229 230 231 /** 232 * isci_port_link_down() - This function is called by the sci core when a link 233 * becomes inactive. 234 * @isci_host: This parameter specifies the isci host object. 235 * @phy: This parameter specifies the isci phy with the active link. 236 * @port: This parameter specifies the isci port with the active link. 237 * 238 */ 239 static void isci_port_link_down(struct isci_host *isci_host, 240 struct isci_phy *isci_phy, 241 struct isci_port *isci_port) 242 { 243 struct isci_remote_device *isci_device; 244 245 dev_dbg(&isci_host->pdev->dev, 246 "%s: isci_port = %p\n", __func__, isci_port); 247 248 if (isci_port) { 249 250 /* check to see if this is the last phy on this port. */ 251 if (isci_phy->sas_phy.port && 252 isci_phy->sas_phy.port->num_phys == 1) { 253 /* change the state for all devices on this port. The 254 * next task sent to this device will be returned as 255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will 256 * remove the target 257 */ 258 list_for_each_entry(isci_device, 259 &isci_port->remote_dev_list, 260 node) { 261 dev_dbg(&isci_host->pdev->dev, 262 "%s: isci_device = %p\n", 263 __func__, isci_device); 264 set_bit(IDEV_GONE, &isci_device->flags); 265 } 266 } 267 } 268 269 /* Notify libsas of the borken link, this will trigger calls to our 270 * isci_port_deformed and isci_dev_gone functions. 271 */ 272 sas_phy_disconnected(&isci_phy->sas_phy); 273 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, 274 PHYE_LOSS_OF_SIGNAL); 275 276 dev_dbg(&isci_host->pdev->dev, 277 "%s: isci_port = %p - Done\n", __func__, isci_port); 278 } 279 280 static bool is_port_ready_state(enum sci_port_states state) 281 { 282 switch (state) { 283 case SCI_PORT_READY: 284 case SCI_PORT_SUB_WAITING: 285 case SCI_PORT_SUB_OPERATIONAL: 286 case SCI_PORT_SUB_CONFIGURING: 287 return true; 288 default: 289 return false; 290 } 291 } 292 293 /* flag dummy rnc hanling when exiting a ready state */ 294 static void port_state_machine_change(struct isci_port *iport, 295 enum sci_port_states state) 296 { 297 struct sci_base_state_machine *sm = &iport->sm; 298 enum sci_port_states old_state = sm->current_state_id; 299 300 if (is_port_ready_state(old_state) && !is_port_ready_state(state)) 301 iport->ready_exit = true; 302 303 sci_change_state(sm, state); 304 iport->ready_exit = false; 305 } 306 307 /** 308 * isci_port_hard_reset_complete() - This function is called by the sci core 309 * when the hard reset complete notification has been received. 310 * @port: This parameter specifies the sci port with the active link. 311 * @completion_status: This parameter specifies the core status for the reset 312 * process. 313 * 314 */ 315 static void isci_port_hard_reset_complete(struct isci_port *isci_port, 316 enum sci_status completion_status) 317 { 318 struct isci_host *ihost = isci_port->owning_controller; 319 320 dev_dbg(&ihost->pdev->dev, 321 "%s: isci_port = %p, completion_status=%x\n", 322 __func__, isci_port, completion_status); 323 324 /* Save the status of the hard reset from the port. */ 325 isci_port->hard_reset_status = completion_status; 326 327 if (completion_status != SCI_SUCCESS) { 328 329 /* The reset failed. The port state is now SCI_PORT_FAILED. */ 330 if (isci_port->active_phy_mask == 0) { 331 int phy_idx = isci_port->last_active_phy; 332 struct isci_phy *iphy = &ihost->phys[phy_idx]; 333 334 /* Generate the link down now to the host, since it 335 * was intercepted by the hard reset state machine when 336 * it really happened. 337 */ 338 isci_port_link_down(ihost, iphy, isci_port); 339 } 340 /* Advance the port state so that link state changes will be 341 * noticed. 342 */ 343 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); 344 345 } 346 clear_bit(IPORT_RESET_PENDING, &isci_port->state); 347 wake_up(&ihost->eventq); 348 349 } 350 351 /* This method will return a true value if the specified phy can be assigned to 352 * this port The following is a list of phys for each port that are allowed: - 353 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method 354 * doesn't preclude all configurations. It merely ensures that a phy is part 355 * of the allowable set of phy identifiers for that port. For example, one 356 * could assign phy 3 to port 0 and no other phys. Please refer to 357 * sci_port_is_phy_mask_valid() for information regarding whether the 358 * phy_mask for a port can be supported. bool true if this is a valid phy 359 * assignment for the port false if this is not a valid phy assignment for the 360 * port 361 */ 362 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) 363 { 364 struct isci_host *ihost = iport->owning_controller; 365 struct sci_user_parameters *user = &ihost->user_parameters; 366 367 /* Initialize to invalid value. */ 368 u32 existing_phy_index = SCI_MAX_PHYS; 369 u32 index; 370 371 if ((iport->physical_port_index == 1) && (phy_index != 1)) 372 return false; 373 374 if (iport->physical_port_index == 3 && phy_index != 3) 375 return false; 376 377 if (iport->physical_port_index == 2 && 378 (phy_index == 0 || phy_index == 1)) 379 return false; 380 381 for (index = 0; index < SCI_MAX_PHYS; index++) 382 if (iport->phy_table[index] && index != phy_index) 383 existing_phy_index = index; 384 385 /* Ensure that all of the phys in the port are capable of 386 * operating at the same maximum link rate. 387 */ 388 if (existing_phy_index < SCI_MAX_PHYS && 389 user->phys[phy_index].max_speed_generation != 390 user->phys[existing_phy_index].max_speed_generation) 391 return false; 392 393 return true; 394 } 395 396 /** 397 * 398 * @sci_port: This is the port object for which to determine if the phy mask 399 * can be supported. 400 * 401 * This method will return a true value if the port's phy mask can be supported 402 * by the SCU. The following is a list of valid PHY mask configurations for 403 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2] 404 * - Port 3 - [3] This method returns a boolean indication specifying if the 405 * phy mask can be supported. true if this is a valid phy assignment for the 406 * port false if this is not a valid phy assignment for the port 407 */ 408 static bool sci_port_is_phy_mask_valid( 409 struct isci_port *iport, 410 u32 phy_mask) 411 { 412 if (iport->physical_port_index == 0) { 413 if (((phy_mask & 0x0F) == 0x0F) 414 || ((phy_mask & 0x03) == 0x03) 415 || ((phy_mask & 0x01) == 0x01) 416 || (phy_mask == 0)) 417 return true; 418 } else if (iport->physical_port_index == 1) { 419 if (((phy_mask & 0x02) == 0x02) 420 || (phy_mask == 0)) 421 return true; 422 } else if (iport->physical_port_index == 2) { 423 if (((phy_mask & 0x0C) == 0x0C) 424 || ((phy_mask & 0x04) == 0x04) 425 || (phy_mask == 0)) 426 return true; 427 } else if (iport->physical_port_index == 3) { 428 if (((phy_mask & 0x08) == 0x08) 429 || (phy_mask == 0)) 430 return true; 431 } 432 433 return false; 434 } 435 436 /* 437 * This method retrieves a currently active (i.e. connected) phy contained in 438 * the port. Currently, the lowest order phy that is connected is returned. 439 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is 440 * returned if there are no currently active (i.e. connected to a remote end 441 * point) phys contained in the port. All other values specify a struct sci_phy 442 * object that is active in the port. 443 */ 444 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) 445 { 446 u32 index; 447 struct isci_phy *iphy; 448 449 for (index = 0; index < SCI_MAX_PHYS; index++) { 450 /* Ensure that the phy is both part of the port and currently 451 * connected to the remote end-point. 452 */ 453 iphy = iport->phy_table[index]; 454 if (iphy && sci_port_active_phy(iport, iphy)) 455 return iphy; 456 } 457 458 return NULL; 459 } 460 461 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) 462 { 463 /* Check to see if we can add this phy to a port 464 * that means that the phy is not part of a port and that the port does 465 * not already have a phy assinged to the phy index. 466 */ 467 if (!iport->phy_table[iphy->phy_index] && 468 !phy_get_non_dummy_port(iphy) && 469 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { 470 /* Phy is being added in the stopped state so we are in MPC mode 471 * make logical port index = physical port index 472 */ 473 iport->logical_port_index = iport->physical_port_index; 474 iport->phy_table[iphy->phy_index] = iphy; 475 sci_phy_set_port(iphy, iport); 476 477 return SCI_SUCCESS; 478 } 479 480 return SCI_FAILURE; 481 } 482 483 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) 484 { 485 /* Make sure that this phy is part of this port */ 486 if (iport->phy_table[iphy->phy_index] == iphy && 487 phy_get_non_dummy_port(iphy) == iport) { 488 struct isci_host *ihost = iport->owning_controller; 489 490 /* Yep it is assigned to this port so remove it */ 491 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); 492 iport->phy_table[iphy->phy_index] = NULL; 493 return SCI_SUCCESS; 494 } 495 496 return SCI_FAILURE; 497 } 498 499 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) 500 { 501 u32 index; 502 503 sas->high = 0; 504 sas->low = 0; 505 for (index = 0; index < SCI_MAX_PHYS; index++) 506 if (iport->phy_table[index]) 507 sci_phy_get_sas_address(iport->phy_table[index], sas); 508 } 509 510 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) 511 { 512 struct isci_phy *iphy; 513 514 /* 515 * Ensure that the phy is both part of the port and currently 516 * connected to the remote end-point. 517 */ 518 iphy = sci_port_get_a_connected_phy(iport); 519 if (iphy) { 520 if (iphy->protocol != SAS_PROTOCOL_SATA) { 521 sci_phy_get_attached_sas_address(iphy, sas); 522 } else { 523 sci_phy_get_sas_address(iphy, sas); 524 sas->low += iphy->phy_index; 525 } 526 } else { 527 sas->high = 0; 528 sas->low = 0; 529 } 530 } 531 532 /** 533 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround 534 * 535 * @sci_port: logical port on which we need to create the remote node context 536 * @rni: remote node index for this remote node context. 537 * 538 * This routine will construct a dummy remote node context data structure 539 * This structure will be posted to the hardware to work around a scheduler 540 * error in the hardware. 541 */ 542 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) 543 { 544 union scu_remote_node_context *rnc; 545 546 rnc = &iport->owning_controller->remote_node_context_table[rni]; 547 548 memset(rnc, 0, sizeof(union scu_remote_node_context)); 549 550 rnc->ssp.remote_sas_address_hi = 0; 551 rnc->ssp.remote_sas_address_lo = 0; 552 553 rnc->ssp.remote_node_index = rni; 554 rnc->ssp.remote_node_port_width = 1; 555 rnc->ssp.logical_port_index = iport->physical_port_index; 556 557 rnc->ssp.nexus_loss_timer_enable = false; 558 rnc->ssp.check_bit = false; 559 rnc->ssp.is_valid = true; 560 rnc->ssp.is_remote_node_context = true; 561 rnc->ssp.function_number = 0; 562 rnc->ssp.arbitration_wait_time = 0; 563 } 564 565 /* 566 * construct a dummy task context data structure. This 567 * structure will be posted to the hardwre to work around a scheduler error 568 * in the hardware. 569 */ 570 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) 571 { 572 struct isci_host *ihost = iport->owning_controller; 573 struct scu_task_context *task_context; 574 575 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 576 memset(task_context, 0, sizeof(struct scu_task_context)); 577 578 task_context->initiator_request = 1; 579 task_context->connection_rate = 1; 580 task_context->logical_port_index = iport->physical_port_index; 581 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 582 task_context->task_index = ISCI_TAG_TCI(tag); 583 task_context->valid = SCU_TASK_CONTEXT_VALID; 584 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 585 task_context->remote_node_index = iport->reserved_rni; 586 task_context->do_not_dma_ssp_good_response = 1; 587 task_context->task_phase = 0x01; 588 } 589 590 static void sci_port_destroy_dummy_resources(struct isci_port *iport) 591 { 592 struct isci_host *ihost = iport->owning_controller; 593 594 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) 595 isci_free_tag(ihost, iport->reserved_tag); 596 597 if (iport->reserved_rni != SCU_DUMMY_INDEX) 598 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 599 1, iport->reserved_rni); 600 601 iport->reserved_rni = SCU_DUMMY_INDEX; 602 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 603 } 604 605 void sci_port_setup_transports(struct isci_port *iport, u32 device_id) 606 { 607 u8 index; 608 609 for (index = 0; index < SCI_MAX_PHYS; index++) { 610 if (iport->active_phy_mask & (1 << index)) 611 sci_phy_setup_transport(iport->phy_table[index], device_id); 612 } 613 } 614 615 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) 616 { 617 sci_phy_resume(iphy); 618 iport->enabled_phy_mask |= 1 << iphy->phy_index; 619 } 620 621 static void sci_port_activate_phy(struct isci_port *iport, 622 struct isci_phy *iphy, 623 u8 flags) 624 { 625 struct isci_host *ihost = iport->owning_controller; 626 627 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) 628 sci_phy_resume(iphy); 629 630 iport->active_phy_mask |= 1 << iphy->phy_index; 631 632 sci_controller_clear_invalid_phy(ihost, iphy); 633 634 if (flags & PF_NOTIFY) 635 isci_port_link_up(ihost, iport, iphy); 636 } 637 638 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, 639 bool do_notify_user) 640 { 641 struct isci_host *ihost = iport->owning_controller; 642 643 iport->active_phy_mask &= ~(1 << iphy->phy_index); 644 iport->enabled_phy_mask &= ~(1 << iphy->phy_index); 645 if (!iport->active_phy_mask) 646 iport->last_active_phy = iphy->phy_index; 647 648 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 649 650 /* Re-assign the phy back to the LP as if it were a narrow port for APC 651 * mode. For MPC mode, the phy will remain in the port. 652 */ 653 if (iport->owning_controller->oem_parameters.controller.mode_type == 654 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) 655 writel(iphy->phy_index, 656 &iport->port_pe_configuration_register[iphy->phy_index]); 657 658 if (do_notify_user == true) 659 isci_port_link_down(ihost, iphy, iport); 660 } 661 662 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) 663 { 664 struct isci_host *ihost = iport->owning_controller; 665 666 /* 667 * Check to see if we have alreay reported this link as bad and if 668 * not go ahead and tell the SCI_USER that we have discovered an 669 * invalid link. 670 */ 671 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { 672 ihost->invalid_phy_mask |= 1 << iphy->phy_index; 673 dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); 674 } 675 } 676 677 /** 678 * sci_port_general_link_up_handler - phy can be assigned to port? 679 * @sci_port: sci_port object for which has a phy that has gone link up. 680 * @sci_phy: This is the struct isci_phy object that has gone link up. 681 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy 682 * 683 * Determine if this phy can be assigned to this port . If the phy is 684 * not a valid PHY for this port then the function will notify the user. 685 * A PHY can only be part of a port if it's attached SAS ADDRESS is the 686 * same as all other PHYs in the same port. 687 */ 688 static void sci_port_general_link_up_handler(struct isci_port *iport, 689 struct isci_phy *iphy, 690 u8 flags) 691 { 692 struct sci_sas_address port_sas_address; 693 struct sci_sas_address phy_sas_address; 694 695 sci_port_get_attached_sas_address(iport, &port_sas_address); 696 sci_phy_get_attached_sas_address(iphy, &phy_sas_address); 697 698 /* If the SAS address of the new phy matches the SAS address of 699 * other phys in the port OR this is the first phy in the port, 700 * then activate the phy and allow it to be used for operations 701 * in this port. 702 */ 703 if ((phy_sas_address.high == port_sas_address.high && 704 phy_sas_address.low == port_sas_address.low) || 705 iport->active_phy_mask == 0) { 706 struct sci_base_state_machine *sm = &iport->sm; 707 708 sci_port_activate_phy(iport, iphy, flags); 709 if (sm->current_state_id == SCI_PORT_RESETTING) 710 port_state_machine_change(iport, SCI_PORT_READY); 711 } else 712 sci_port_invalid_link_up(iport, iphy); 713 } 714 715 716 717 /** 718 * This method returns false if the port only has a single phy object assigned. 719 * If there are no phys or more than one phy then the method will return 720 * true. 721 * @sci_port: The port for which the wide port condition is to be checked. 722 * 723 * bool true Is returned if this is a wide ported port. false Is returned if 724 * this is a narrow port. 725 */ 726 static bool sci_port_is_wide(struct isci_port *iport) 727 { 728 u32 index; 729 u32 phy_count = 0; 730 731 for (index = 0; index < SCI_MAX_PHYS; index++) { 732 if (iport->phy_table[index] != NULL) { 733 phy_count++; 734 } 735 } 736 737 return phy_count != 1; 738 } 739 740 /** 741 * This method is called by the PHY object when the link is detected. if the 742 * port wants the PHY to continue on to the link up state then the port 743 * layer must return true. If the port object returns false the phy object 744 * must halt its attempt to go link up. 745 * @sci_port: The port associated with the phy object. 746 * @sci_phy: The phy object that is trying to go link up. 747 * 748 * true if the phy object can continue to the link up condition. true Is 749 * returned if this phy can continue to the ready state. false Is returned if 750 * can not continue on to the ready state. This notification is in place for 751 * wide ports and direct attached phys. Since there are no wide ported SATA 752 * devices this could become an invalid port configuration. 753 */ 754 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) 755 { 756 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 757 (iphy->protocol == SAS_PROTOCOL_SATA)) { 758 if (sci_port_is_wide(iport)) { 759 sci_port_invalid_link_up(iport, iphy); 760 return false; 761 } else { 762 struct isci_host *ihost = iport->owning_controller; 763 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); 764 writel(iphy->phy_index, 765 &dst_port->port_pe_configuration_register[iphy->phy_index]); 766 } 767 } 768 769 return true; 770 } 771 772 static void port_timeout(struct timer_list *t) 773 { 774 struct sci_timer *tmr = from_timer(tmr, t, timer); 775 struct isci_port *iport = container_of(tmr, typeof(*iport), timer); 776 struct isci_host *ihost = iport->owning_controller; 777 unsigned long flags; 778 u32 current_state; 779 780 spin_lock_irqsave(&ihost->scic_lock, flags); 781 782 if (tmr->cancel) 783 goto done; 784 785 current_state = iport->sm.current_state_id; 786 787 if (current_state == SCI_PORT_RESETTING) { 788 /* if the port is still in the resetting state then the timeout 789 * fired before the reset completed. 790 */ 791 port_state_machine_change(iport, SCI_PORT_FAILED); 792 } else if (current_state == SCI_PORT_STOPPED) { 793 /* if the port is stopped then the start request failed In this 794 * case stay in the stopped state. 795 */ 796 dev_err(sciport_to_dev(iport), 797 "%s: SCIC Port 0x%p failed to stop before timeout.\n", 798 __func__, 799 iport); 800 } else if (current_state == SCI_PORT_STOPPING) { 801 dev_dbg(sciport_to_dev(iport), 802 "%s: port%d: stop complete timeout\n", 803 __func__, iport->physical_port_index); 804 } else { 805 /* The port is in the ready state and we have a timer 806 * reporting a timeout this should not happen. 807 */ 808 dev_err(sciport_to_dev(iport), 809 "%s: SCIC Port 0x%p is processing a timeout operation " 810 "in state %d.\n", __func__, iport, current_state); 811 } 812 813 done: 814 spin_unlock_irqrestore(&ihost->scic_lock, flags); 815 } 816 817 /* --------------------------------------------------------------------------- */ 818 819 /** 820 * This function updates the hardwares VIIT entry for this port. 821 * 822 * 823 */ 824 static void sci_port_update_viit_entry(struct isci_port *iport) 825 { 826 struct sci_sas_address sas_address; 827 828 sci_port_get_sas_address(iport, &sas_address); 829 830 writel(sas_address.high, 831 &iport->viit_registers->initiator_sas_address_hi); 832 writel(sas_address.low, 833 &iport->viit_registers->initiator_sas_address_lo); 834 835 /* This value get cleared just in case its not already cleared */ 836 writel(0, &iport->viit_registers->reserved); 837 838 /* We are required to update the status register last */ 839 writel(SCU_VIIT_ENTRY_ID_VIIT | 840 SCU_VIIT_IPPT_INITIATOR | 841 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) | 842 SCU_VIIT_STATUS_ALL_VALID, 843 &iport->viit_registers->status); 844 } 845 846 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) 847 { 848 u16 index; 849 struct isci_phy *iphy; 850 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS; 851 852 /* 853 * Loop through all of the phys in this port and find the phy with the 854 * lowest maximum link rate. */ 855 for (index = 0; index < SCI_MAX_PHYS; index++) { 856 iphy = iport->phy_table[index]; 857 if (iphy && sci_port_active_phy(iport, iphy) && 858 iphy->max_negotiated_speed < max_allowed_speed) 859 max_allowed_speed = iphy->max_negotiated_speed; 860 } 861 862 return max_allowed_speed; 863 } 864 865 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) 866 { 867 u32 pts_control_value; 868 869 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 870 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND); 871 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 872 } 873 874 /** 875 * sci_port_post_dummy_request() - post dummy/workaround request 876 * @sci_port: port to post task 877 * 878 * Prevent the hardware scheduler from posting new requests to the front 879 * of the scheduler queue causing a starvation problem for currently 880 * ongoing requests. 881 * 882 */ 883 static void sci_port_post_dummy_request(struct isci_port *iport) 884 { 885 struct isci_host *ihost = iport->owning_controller; 886 u16 tag = iport->reserved_tag; 887 struct scu_task_context *tc; 888 u32 command; 889 890 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 891 tc->abort = 0; 892 893 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 894 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | 895 ISCI_TAG_TCI(tag); 896 897 sci_controller_post_request(ihost, command); 898 } 899 900 /** 901 * This routine will abort the dummy request. This will alow the hardware to 902 * power down parts of the silicon to save power. 903 * 904 * @sci_port: The port on which the task must be aborted. 905 * 906 */ 907 static void sci_port_abort_dummy_request(struct isci_port *iport) 908 { 909 struct isci_host *ihost = iport->owning_controller; 910 u16 tag = iport->reserved_tag; 911 struct scu_task_context *tc; 912 u32 command; 913 914 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 915 tc->abort = 1; 916 917 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | 918 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | 919 ISCI_TAG_TCI(tag); 920 921 sci_controller_post_request(ihost, command); 922 } 923 924 /** 925 * 926 * @sci_port: This is the struct isci_port object to resume. 927 * 928 * This method will resume the port task scheduler for this port object. none 929 */ 930 static void 931 sci_port_resume_port_task_scheduler(struct isci_port *iport) 932 { 933 u32 pts_control_value; 934 935 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 936 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND); 937 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 938 } 939 940 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) 941 { 942 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 943 944 sci_port_suspend_port_task_scheduler(iport); 945 946 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; 947 948 if (iport->active_phy_mask != 0) { 949 /* At least one of the phys on the port is ready */ 950 port_state_machine_change(iport, 951 SCI_PORT_SUB_OPERATIONAL); 952 } 953 } 954 955 static void scic_sds_port_ready_substate_waiting_exit( 956 struct sci_base_state_machine *sm) 957 { 958 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 959 sci_port_resume_port_task_scheduler(iport); 960 } 961 962 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) 963 { 964 u32 index; 965 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 966 struct isci_host *ihost = iport->owning_controller; 967 968 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n", 969 __func__, iport->physical_port_index); 970 971 for (index = 0; index < SCI_MAX_PHYS; index++) { 972 if (iport->phy_table[index]) { 973 writel(iport->physical_port_index, 974 &iport->port_pe_configuration_register[ 975 iport->phy_table[index]->phy_index]); 976 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) 977 sci_port_resume_phy(iport, iport->phy_table[index]); 978 } 979 } 980 981 sci_port_update_viit_entry(iport); 982 983 /* 984 * Post the dummy task for the port so the hardware can schedule 985 * io correctly 986 */ 987 sci_port_post_dummy_request(iport); 988 } 989 990 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) 991 { 992 struct isci_host *ihost = iport->owning_controller; 993 u8 phys_index = iport->physical_port_index; 994 union scu_remote_node_context *rnc; 995 u16 rni = iport->reserved_rni; 996 u32 command; 997 998 rnc = &ihost->remote_node_context_table[rni]; 999 1000 rnc->ssp.is_valid = false; 1001 1002 /* ensure the preceding tc abort request has reached the 1003 * controller and give it ample time to act before posting the rnc 1004 * invalidate 1005 */ 1006 readl(&ihost->smu_registers->interrupt_status); /* flush */ 1007 udelay(10); 1008 1009 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | 1010 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1011 1012 sci_controller_post_request(ihost, command); 1013 } 1014 1015 /** 1016 * 1017 * @object: This is the object which is cast to a struct isci_port object. 1018 * 1019 * This method will perform the actions required by the struct isci_port on 1020 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports 1021 * the port not ready and suspends the port task scheduler. none 1022 */ 1023 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) 1024 { 1025 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1026 struct isci_host *ihost = iport->owning_controller; 1027 1028 /* 1029 * Kill the dummy task for this port if it has not yet posted 1030 * the hardware will treat this as a NOP and just return abort 1031 * complete. 1032 */ 1033 sci_port_abort_dummy_request(iport); 1034 1035 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1036 __func__, iport->physical_port_index); 1037 1038 if (iport->ready_exit) 1039 sci_port_invalidate_dummy_remote_node(iport); 1040 } 1041 1042 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) 1043 { 1044 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1045 struct isci_host *ihost = iport->owning_controller; 1046 1047 if (iport->active_phy_mask == 0) { 1048 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1049 __func__, iport->physical_port_index); 1050 1051 port_state_machine_change(iport, SCI_PORT_SUB_WAITING); 1052 } else 1053 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); 1054 } 1055 1056 enum sci_status sci_port_start(struct isci_port *iport) 1057 { 1058 struct isci_host *ihost = iport->owning_controller; 1059 enum sci_status status = SCI_SUCCESS; 1060 enum sci_port_states state; 1061 u32 phy_mask; 1062 1063 state = iport->sm.current_state_id; 1064 if (state != SCI_PORT_STOPPED) { 1065 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1066 __func__, port_state_name(state)); 1067 return SCI_FAILURE_INVALID_STATE; 1068 } 1069 1070 if (iport->assigned_device_count > 0) { 1071 /* TODO This is a start failure operation because 1072 * there are still devices assigned to this port. 1073 * There must be no devices assigned to a port on a 1074 * start operation. 1075 */ 1076 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; 1077 } 1078 1079 if (iport->reserved_rni == SCU_DUMMY_INDEX) { 1080 u16 rni = sci_remote_node_table_allocate_remote_node( 1081 &ihost->available_remote_nodes, 1); 1082 1083 if (rni != SCU_DUMMY_INDEX) 1084 sci_port_construct_dummy_rnc(iport, rni); 1085 else 1086 status = SCI_FAILURE_INSUFFICIENT_RESOURCES; 1087 iport->reserved_rni = rni; 1088 } 1089 1090 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 1091 u16 tag; 1092 1093 tag = isci_alloc_tag(ihost); 1094 if (tag == SCI_CONTROLLER_INVALID_IO_TAG) 1095 status = SCI_FAILURE_INSUFFICIENT_RESOURCES; 1096 else 1097 sci_port_construct_dummy_task(iport, tag); 1098 iport->reserved_tag = tag; 1099 } 1100 1101 if (status == SCI_SUCCESS) { 1102 phy_mask = sci_port_get_phys(iport); 1103 1104 /* 1105 * There are one or more phys assigned to this port. Make sure 1106 * the port's phy mask is in fact legal and supported by the 1107 * silicon. 1108 */ 1109 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { 1110 port_state_machine_change(iport, 1111 SCI_PORT_READY); 1112 1113 return SCI_SUCCESS; 1114 } 1115 status = SCI_FAILURE; 1116 } 1117 1118 if (status != SCI_SUCCESS) 1119 sci_port_destroy_dummy_resources(iport); 1120 1121 return status; 1122 } 1123 1124 enum sci_status sci_port_stop(struct isci_port *iport) 1125 { 1126 enum sci_port_states state; 1127 1128 state = iport->sm.current_state_id; 1129 switch (state) { 1130 case SCI_PORT_STOPPED: 1131 return SCI_SUCCESS; 1132 case SCI_PORT_SUB_WAITING: 1133 case SCI_PORT_SUB_OPERATIONAL: 1134 case SCI_PORT_SUB_CONFIGURING: 1135 case SCI_PORT_RESETTING: 1136 port_state_machine_change(iport, 1137 SCI_PORT_STOPPING); 1138 return SCI_SUCCESS; 1139 default: 1140 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1141 __func__, port_state_name(state)); 1142 return SCI_FAILURE_INVALID_STATE; 1143 } 1144 } 1145 1146 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) 1147 { 1148 enum sci_status status = SCI_FAILURE_INVALID_PHY; 1149 struct isci_phy *iphy = NULL; 1150 enum sci_port_states state; 1151 u32 phy_index; 1152 1153 state = iport->sm.current_state_id; 1154 if (state != SCI_PORT_SUB_OPERATIONAL) { 1155 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1156 __func__, port_state_name(state)); 1157 return SCI_FAILURE_INVALID_STATE; 1158 } 1159 1160 /* Select a phy on which we can send the hard reset request. */ 1161 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { 1162 iphy = iport->phy_table[phy_index]; 1163 if (iphy && !sci_port_active_phy(iport, iphy)) { 1164 /* 1165 * We found a phy but it is not ready select 1166 * different phy 1167 */ 1168 iphy = NULL; 1169 } 1170 } 1171 1172 /* If we have a phy then go ahead and start the reset procedure */ 1173 if (!iphy) 1174 return status; 1175 status = sci_phy_reset(iphy); 1176 1177 if (status != SCI_SUCCESS) 1178 return status; 1179 1180 sci_mod_timer(&iport->timer, timeout); 1181 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; 1182 1183 port_state_machine_change(iport, SCI_PORT_RESETTING); 1184 return SCI_SUCCESS; 1185 } 1186 1187 /** 1188 * sci_port_add_phy() - 1189 * @sci_port: This parameter specifies the port in which the phy will be added. 1190 * @sci_phy: This parameter is the phy which is to be added to the port. 1191 * 1192 * This method will add a PHY to the selected port. This method returns an 1193 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other 1194 * status is a failure to add the phy to the port. 1195 */ 1196 enum sci_status sci_port_add_phy(struct isci_port *iport, 1197 struct isci_phy *iphy) 1198 { 1199 enum sci_status status; 1200 enum sci_port_states state; 1201 1202 sci_port_bcn_enable(iport); 1203 1204 state = iport->sm.current_state_id; 1205 switch (state) { 1206 case SCI_PORT_STOPPED: { 1207 struct sci_sas_address port_sas_address; 1208 1209 /* Read the port assigned SAS Address if there is one */ 1210 sci_port_get_sas_address(iport, &port_sas_address); 1211 1212 if (port_sas_address.high != 0 && port_sas_address.low != 0) { 1213 struct sci_sas_address phy_sas_address; 1214 1215 /* Make sure that the PHY SAS Address matches the SAS Address 1216 * for this port 1217 */ 1218 sci_phy_get_sas_address(iphy, &phy_sas_address); 1219 1220 if (port_sas_address.high != phy_sas_address.high || 1221 port_sas_address.low != phy_sas_address.low) 1222 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; 1223 } 1224 return sci_port_set_phy(iport, iphy); 1225 } 1226 case SCI_PORT_SUB_WAITING: 1227 case SCI_PORT_SUB_OPERATIONAL: 1228 status = sci_port_set_phy(iport, iphy); 1229 1230 if (status != SCI_SUCCESS) 1231 return status; 1232 1233 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); 1234 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1235 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); 1236 1237 return status; 1238 case SCI_PORT_SUB_CONFIGURING: 1239 status = sci_port_set_phy(iport, iphy); 1240 1241 if (status != SCI_SUCCESS) 1242 return status; 1243 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); 1244 1245 /* Re-enter the configuring state since this may be the last phy in 1246 * the port. 1247 */ 1248 port_state_machine_change(iport, 1249 SCI_PORT_SUB_CONFIGURING); 1250 return SCI_SUCCESS; 1251 default: 1252 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1253 __func__, port_state_name(state)); 1254 return SCI_FAILURE_INVALID_STATE; 1255 } 1256 } 1257 1258 /** 1259 * sci_port_remove_phy() - 1260 * @sci_port: This parameter specifies the port in which the phy will be added. 1261 * @sci_phy: This parameter is the phy which is to be added to the port. 1262 * 1263 * This method will remove the PHY from the selected PORT. This method returns 1264 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any 1265 * other status is a failure to add the phy to the port. 1266 */ 1267 enum sci_status sci_port_remove_phy(struct isci_port *iport, 1268 struct isci_phy *iphy) 1269 { 1270 enum sci_status status; 1271 enum sci_port_states state; 1272 1273 state = iport->sm.current_state_id; 1274 1275 switch (state) { 1276 case SCI_PORT_STOPPED: 1277 return sci_port_clear_phy(iport, iphy); 1278 case SCI_PORT_SUB_OPERATIONAL: 1279 status = sci_port_clear_phy(iport, iphy); 1280 if (status != SCI_SUCCESS) 1281 return status; 1282 1283 sci_port_deactivate_phy(iport, iphy, true); 1284 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1285 port_state_machine_change(iport, 1286 SCI_PORT_SUB_CONFIGURING); 1287 return SCI_SUCCESS; 1288 case SCI_PORT_SUB_CONFIGURING: 1289 status = sci_port_clear_phy(iport, iphy); 1290 1291 if (status != SCI_SUCCESS) 1292 return status; 1293 sci_port_deactivate_phy(iport, iphy, true); 1294 1295 /* Re-enter the configuring state since this may be the last phy in 1296 * the port 1297 */ 1298 port_state_machine_change(iport, 1299 SCI_PORT_SUB_CONFIGURING); 1300 return SCI_SUCCESS; 1301 default: 1302 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1303 __func__, port_state_name(state)); 1304 return SCI_FAILURE_INVALID_STATE; 1305 } 1306 } 1307 1308 enum sci_status sci_port_link_up(struct isci_port *iport, 1309 struct isci_phy *iphy) 1310 { 1311 enum sci_port_states state; 1312 1313 state = iport->sm.current_state_id; 1314 switch (state) { 1315 case SCI_PORT_SUB_WAITING: 1316 /* Since this is the first phy going link up for the port we 1317 * can just enable it and continue 1318 */ 1319 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); 1320 1321 port_state_machine_change(iport, 1322 SCI_PORT_SUB_OPERATIONAL); 1323 return SCI_SUCCESS; 1324 case SCI_PORT_SUB_OPERATIONAL: 1325 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); 1326 return SCI_SUCCESS; 1327 case SCI_PORT_RESETTING: 1328 /* TODO We should make sure that the phy that has gone 1329 * link up is the same one on which we sent the reset. It is 1330 * possible that the phy on which we sent the reset is not the 1331 * one that has gone link up and we want to make sure that 1332 * phy being reset comes back. Consider the case where a 1333 * reset is sent but before the hardware processes the reset it 1334 * get a link up on the port because of a hot plug event. 1335 * because of the reset request this phy will go link down 1336 * almost immediately. 1337 */ 1338 1339 /* In the resetting state we don't notify the user regarding 1340 * link up and link down notifications. 1341 */ 1342 sci_port_general_link_up_handler(iport, iphy, PF_RESUME); 1343 return SCI_SUCCESS; 1344 default: 1345 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1346 __func__, port_state_name(state)); 1347 return SCI_FAILURE_INVALID_STATE; 1348 } 1349 } 1350 1351 enum sci_status sci_port_link_down(struct isci_port *iport, 1352 struct isci_phy *iphy) 1353 { 1354 enum sci_port_states state; 1355 1356 state = iport->sm.current_state_id; 1357 switch (state) { 1358 case SCI_PORT_SUB_OPERATIONAL: 1359 sci_port_deactivate_phy(iport, iphy, true); 1360 1361 /* If there are no active phys left in the port, then 1362 * transition the port to the WAITING state until such time 1363 * as a phy goes link up 1364 */ 1365 if (iport->active_phy_mask == 0) 1366 port_state_machine_change(iport, 1367 SCI_PORT_SUB_WAITING); 1368 return SCI_SUCCESS; 1369 case SCI_PORT_RESETTING: 1370 /* In the resetting state we don't notify the user regarding 1371 * link up and link down notifications. */ 1372 sci_port_deactivate_phy(iport, iphy, false); 1373 return SCI_SUCCESS; 1374 default: 1375 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1376 __func__, port_state_name(state)); 1377 return SCI_FAILURE_INVALID_STATE; 1378 } 1379 } 1380 1381 enum sci_status sci_port_start_io(struct isci_port *iport, 1382 struct isci_remote_device *idev, 1383 struct isci_request *ireq) 1384 { 1385 enum sci_port_states state; 1386 1387 state = iport->sm.current_state_id; 1388 switch (state) { 1389 case SCI_PORT_SUB_WAITING: 1390 return SCI_FAILURE_INVALID_STATE; 1391 case SCI_PORT_SUB_OPERATIONAL: 1392 iport->started_request_count++; 1393 return SCI_SUCCESS; 1394 default: 1395 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1396 __func__, port_state_name(state)); 1397 return SCI_FAILURE_INVALID_STATE; 1398 } 1399 } 1400 1401 enum sci_status sci_port_complete_io(struct isci_port *iport, 1402 struct isci_remote_device *idev, 1403 struct isci_request *ireq) 1404 { 1405 enum sci_port_states state; 1406 1407 state = iport->sm.current_state_id; 1408 switch (state) { 1409 case SCI_PORT_STOPPED: 1410 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1411 __func__, port_state_name(state)); 1412 return SCI_FAILURE_INVALID_STATE; 1413 case SCI_PORT_STOPPING: 1414 sci_port_decrement_request_count(iport); 1415 1416 if (iport->started_request_count == 0) 1417 port_state_machine_change(iport, 1418 SCI_PORT_STOPPED); 1419 break; 1420 case SCI_PORT_READY: 1421 case SCI_PORT_RESETTING: 1422 case SCI_PORT_FAILED: 1423 case SCI_PORT_SUB_WAITING: 1424 case SCI_PORT_SUB_OPERATIONAL: 1425 sci_port_decrement_request_count(iport); 1426 break; 1427 case SCI_PORT_SUB_CONFIGURING: 1428 sci_port_decrement_request_count(iport); 1429 if (iport->started_request_count == 0) { 1430 port_state_machine_change(iport, 1431 SCI_PORT_SUB_OPERATIONAL); 1432 } 1433 break; 1434 } 1435 return SCI_SUCCESS; 1436 } 1437 1438 static void sci_port_enable_port_task_scheduler(struct isci_port *iport) 1439 { 1440 u32 pts_control_value; 1441 1442 /* enable the port task scheduler in a suspended state */ 1443 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 1444 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); 1445 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 1446 } 1447 1448 static void sci_port_disable_port_task_scheduler(struct isci_port *iport) 1449 { 1450 u32 pts_control_value; 1451 1452 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 1453 pts_control_value &= 1454 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND)); 1455 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 1456 } 1457 1458 static void sci_port_post_dummy_remote_node(struct isci_port *iport) 1459 { 1460 struct isci_host *ihost = iport->owning_controller; 1461 u8 phys_index = iport->physical_port_index; 1462 union scu_remote_node_context *rnc; 1463 u16 rni = iport->reserved_rni; 1464 u32 command; 1465 1466 rnc = &ihost->remote_node_context_table[rni]; 1467 rnc->ssp.is_valid = true; 1468 1469 command = SCU_CONTEXT_COMMAND_POST_RNC_32 | 1470 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1471 1472 sci_controller_post_request(ihost, command); 1473 1474 /* ensure hardware has seen the post rnc command and give it 1475 * ample time to act before sending the suspend 1476 */ 1477 readl(&ihost->smu_registers->interrupt_status); /* flush */ 1478 udelay(10); 1479 1480 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | 1481 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1482 1483 sci_controller_post_request(ihost, command); 1484 } 1485 1486 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) 1487 { 1488 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1489 1490 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) { 1491 /* 1492 * If we enter this state becasuse of a request to stop 1493 * the port then we want to disable the hardwares port 1494 * task scheduler. */ 1495 sci_port_disable_port_task_scheduler(iport); 1496 } 1497 } 1498 1499 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) 1500 { 1501 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1502 1503 /* Enable and suspend the port task scheduler */ 1504 sci_port_enable_port_task_scheduler(iport); 1505 } 1506 1507 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) 1508 { 1509 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1510 struct isci_host *ihost = iport->owning_controller; 1511 u32 prev_state; 1512 1513 prev_state = iport->sm.previous_state_id; 1514 if (prev_state == SCI_PORT_RESETTING) 1515 isci_port_hard_reset_complete(iport, SCI_SUCCESS); 1516 else 1517 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1518 __func__, iport->physical_port_index); 1519 1520 /* Post and suspend the dummy remote node context for this port. */ 1521 sci_port_post_dummy_remote_node(iport); 1522 1523 /* Start the ready substate machine */ 1524 port_state_machine_change(iport, 1525 SCI_PORT_SUB_WAITING); 1526 } 1527 1528 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) 1529 { 1530 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1531 1532 sci_del_timer(&iport->timer); 1533 } 1534 1535 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) 1536 { 1537 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1538 1539 sci_del_timer(&iport->timer); 1540 1541 sci_port_destroy_dummy_resources(iport); 1542 } 1543 1544 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) 1545 { 1546 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1547 1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); 1549 } 1550 1551 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) 1552 { 1553 int phy_index; 1554 u32 phy_mask = iport->active_phy_mask; 1555 1556 if (timeout) 1557 ++iport->hang_detect_users; 1558 else if (iport->hang_detect_users > 1) 1559 --iport->hang_detect_users; 1560 else 1561 iport->hang_detect_users = 0; 1562 1563 if (timeout || (iport->hang_detect_users == 0)) { 1564 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { 1565 if ((phy_mask >> phy_index) & 1) { 1566 writel(timeout, 1567 &iport->phy_table[phy_index] 1568 ->link_layer_registers 1569 ->link_layer_hang_detection_timeout); 1570 } 1571 } 1572 } 1573 } 1574 /* --------------------------------------------------------------------------- */ 1575 1576 static const struct sci_base_state sci_port_state_table[] = { 1577 [SCI_PORT_STOPPED] = { 1578 .enter_state = sci_port_stopped_state_enter, 1579 .exit_state = sci_port_stopped_state_exit 1580 }, 1581 [SCI_PORT_STOPPING] = { 1582 .exit_state = sci_port_stopping_state_exit 1583 }, 1584 [SCI_PORT_READY] = { 1585 .enter_state = sci_port_ready_state_enter, 1586 }, 1587 [SCI_PORT_SUB_WAITING] = { 1588 .enter_state = sci_port_ready_substate_waiting_enter, 1589 .exit_state = scic_sds_port_ready_substate_waiting_exit, 1590 }, 1591 [SCI_PORT_SUB_OPERATIONAL] = { 1592 .enter_state = sci_port_ready_substate_operational_enter, 1593 .exit_state = sci_port_ready_substate_operational_exit 1594 }, 1595 [SCI_PORT_SUB_CONFIGURING] = { 1596 .enter_state = sci_port_ready_substate_configuring_enter 1597 }, 1598 [SCI_PORT_RESETTING] = { 1599 .exit_state = sci_port_resetting_state_exit 1600 }, 1601 [SCI_PORT_FAILED] = { 1602 .enter_state = sci_port_failed_state_enter, 1603 } 1604 }; 1605 1606 void sci_port_construct(struct isci_port *iport, u8 index, 1607 struct isci_host *ihost) 1608 { 1609 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); 1610 1611 iport->logical_port_index = SCIC_SDS_DUMMY_PORT; 1612 iport->physical_port_index = index; 1613 iport->active_phy_mask = 0; 1614 iport->enabled_phy_mask = 0; 1615 iport->last_active_phy = 0; 1616 iport->ready_exit = false; 1617 1618 iport->owning_controller = ihost; 1619 1620 iport->started_request_count = 0; 1621 iport->assigned_device_count = 0; 1622 iport->hang_detect_users = 0; 1623 1624 iport->reserved_rni = SCU_DUMMY_INDEX; 1625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 1626 1627 sci_init_timer(&iport->timer, port_timeout); 1628 1629 iport->port_task_scheduler_registers = NULL; 1630 1631 for (index = 0; index < SCI_MAX_PHYS; index++) 1632 iport->phy_table[index] = NULL; 1633 } 1634 1635 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1636 { 1637 struct isci_host *ihost = iport->owning_controller; 1638 1639 /* notify the user. */ 1640 isci_port_bc_change_received(ihost, iport, iphy); 1641 } 1642 1643 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport) 1644 { 1645 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state)); 1646 } 1647 1648 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 1649 struct isci_phy *iphy) 1650 { 1651 unsigned long flags; 1652 enum sci_status status; 1653 int ret = TMF_RESP_FUNC_COMPLETE; 1654 1655 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", 1656 __func__, iport); 1657 1658 spin_lock_irqsave(&ihost->scic_lock, flags); 1659 set_bit(IPORT_RESET_PENDING, &iport->state); 1660 1661 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT 1662 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); 1663 1664 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1665 1666 if (status == SCI_SUCCESS) { 1667 wait_port_reset(ihost, iport); 1668 1669 dev_dbg(&ihost->pdev->dev, 1670 "%s: iport = %p; hard reset completion\n", 1671 __func__, iport); 1672 1673 if (iport->hard_reset_status != SCI_SUCCESS) { 1674 ret = TMF_RESP_FUNC_FAILED; 1675 1676 dev_err(&ihost->pdev->dev, 1677 "%s: iport = %p; hard reset failed (0x%x)\n", 1678 __func__, iport, iport->hard_reset_status); 1679 } 1680 } else { 1681 clear_bit(IPORT_RESET_PENDING, &iport->state); 1682 wake_up(&ihost->eventq); 1683 ret = TMF_RESP_FUNC_FAILED; 1684 1685 dev_err(&ihost->pdev->dev, 1686 "%s: iport = %p; sci_port_hard_reset call" 1687 " failed 0x%x\n", 1688 __func__, iport, status); 1689 1690 } 1691 return ret; 1692 } 1693 1694 int isci_ata_check_ready(struct domain_device *dev) 1695 { 1696 struct isci_port *iport = dev->port->lldd_port; 1697 struct isci_host *ihost = dev_to_ihost(dev); 1698 struct isci_remote_device *idev; 1699 unsigned long flags; 1700 int rc = 0; 1701 1702 spin_lock_irqsave(&ihost->scic_lock, flags); 1703 idev = isci_lookup_device(dev); 1704 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1705 1706 if (!idev) 1707 goto out; 1708 1709 if (test_bit(IPORT_RESET_PENDING, &iport->state)) 1710 goto out; 1711 1712 rc = !!iport->active_phy_mask; 1713 out: 1714 isci_put_device(idev); 1715 1716 return rc; 1717 } 1718 1719 void isci_port_deformed(struct asd_sas_phy *phy) 1720 { 1721 struct isci_host *ihost = phy->ha->lldd_ha; 1722 struct isci_port *iport = phy->port->lldd_port; 1723 unsigned long flags; 1724 int i; 1725 1726 /* we got a port notification on a port that was subsequently 1727 * torn down and libsas is just now catching up 1728 */ 1729 if (!iport) 1730 return; 1731 1732 spin_lock_irqsave(&ihost->scic_lock, flags); 1733 for (i = 0; i < SCI_MAX_PHYS; i++) { 1734 if (iport->active_phy_mask & 1 << i) 1735 break; 1736 } 1737 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1738 1739 if (i >= SCI_MAX_PHYS) 1740 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n", 1741 __func__, (long) (iport - &ihost->ports[0])); 1742 } 1743 1744 void isci_port_formed(struct asd_sas_phy *phy) 1745 { 1746 struct isci_host *ihost = phy->ha->lldd_ha; 1747 struct isci_phy *iphy = to_iphy(phy); 1748 struct asd_sas_port *port = phy->port; 1749 struct isci_port *iport = NULL; 1750 unsigned long flags; 1751 int i; 1752 1753 /* initial ports are formed as the driver is still initializing, 1754 * wait for that process to complete 1755 */ 1756 wait_for_start(ihost); 1757 1758 spin_lock_irqsave(&ihost->scic_lock, flags); 1759 for (i = 0; i < SCI_MAX_PORTS; i++) { 1760 iport = &ihost->ports[i]; 1761 if (iport->active_phy_mask & 1 << iphy->phy_index) 1762 break; 1763 } 1764 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1765 1766 if (i >= SCI_MAX_PORTS) 1767 iport = NULL; 1768 1769 port->lldd_port = iport; 1770 } 1771