1 /* 2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 USI Co., Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 #include <linux/slab.h> 41 #include "pm8001_sas.h" 42 #include "pm8001_hwi.h" 43 #include "pm8001_chips.h" 44 #include "pm8001_ctl.h" 45 #include "pm80xx_tracepoints.h" 46 47 /** 48 * read_main_config_table - read the configure table and save it. 49 * @pm8001_ha: our hba card information 50 */ 51 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) 52 { 53 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 54 pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = 55 pm8001_mr32(address, 0x00); 56 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = 57 pm8001_mr32(address, 0x04); 58 pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = 59 pm8001_mr32(address, 0x08); 60 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = 61 pm8001_mr32(address, 0x0C); 62 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = 63 pm8001_mr32(address, 0x10); 64 pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = 65 pm8001_mr32(address, 0x14); 66 pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = 67 pm8001_mr32(address, 0x18); 68 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = 69 pm8001_mr32(address, MAIN_IBQ_OFFSET); 70 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = 71 pm8001_mr32(address, MAIN_OBQ_OFFSET); 72 pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = 73 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); 74 75 /* read analog Setting offset from the configuration table */ 76 pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = 77 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); 78 79 /* read Error Dump Offset and Length */ 80 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = 81 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); 82 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = 83 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); 84 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = 85 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); 86 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = 87 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); 88 } 89 90 /** 91 * read_general_status_table - read the general status table and save it. 92 * @pm8001_ha: our hba card information 93 */ 94 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) 95 { 96 void __iomem *address = pm8001_ha->general_stat_tbl_addr; 97 pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = 98 pm8001_mr32(address, 0x00); 99 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = 100 pm8001_mr32(address, 0x04); 101 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = 102 pm8001_mr32(address, 0x08); 103 pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = 104 pm8001_mr32(address, 0x0C); 105 pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = 106 pm8001_mr32(address, 0x10); 107 pm8001_ha->gs_tbl.pm8001_tbl.rsvd = 108 pm8001_mr32(address, 0x14); 109 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = 110 pm8001_mr32(address, 0x18); 111 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = 112 pm8001_mr32(address, 0x1C); 113 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = 114 pm8001_mr32(address, 0x20); 115 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = 116 pm8001_mr32(address, 0x24); 117 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = 118 pm8001_mr32(address, 0x28); 119 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = 120 pm8001_mr32(address, 0x2C); 121 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = 122 pm8001_mr32(address, 0x30); 123 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = 124 pm8001_mr32(address, 0x34); 125 pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = 126 pm8001_mr32(address, 0x38); 127 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = 128 pm8001_mr32(address, 0x3C); 129 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = 130 pm8001_mr32(address, 0x40); 131 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = 132 pm8001_mr32(address, 0x44); 133 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = 134 pm8001_mr32(address, 0x48); 135 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = 136 pm8001_mr32(address, 0x4C); 137 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = 138 pm8001_mr32(address, 0x50); 139 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = 140 pm8001_mr32(address, 0x54); 141 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = 142 pm8001_mr32(address, 0x58); 143 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = 144 pm8001_mr32(address, 0x5C); 145 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = 146 pm8001_mr32(address, 0x60); 147 } 148 149 /** 150 * read_inbnd_queue_table - read the inbound queue table and save it. 151 * @pm8001_ha: our hba card information 152 */ 153 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 154 { 155 int i; 156 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 157 for (i = 0; i < PM8001_MAX_INB_NUM; i++) { 158 u32 offset = i * 0x20; 159 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 160 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 161 pm8001_ha->inbnd_q_tbl[i].pi_offset = 162 pm8001_mr32(address, (offset + 0x18)); 163 } 164 } 165 166 /** 167 * read_outbnd_queue_table - read the outbound queue table and save it. 168 * @pm8001_ha: our hba card information 169 */ 170 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 171 { 172 int i; 173 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 174 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { 175 u32 offset = i * 0x24; 176 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 177 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 178 pm8001_ha->outbnd_q_tbl[i].ci_offset = 179 pm8001_mr32(address, (offset + 0x18)); 180 } 181 } 182 183 /** 184 * init_default_table_values - init the default table. 185 * @pm8001_ha: our hba card information 186 */ 187 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) 188 { 189 int i; 190 u32 offsetib, offsetob; 191 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; 192 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; 193 u32 ib_offset = pm8001_ha->ib_offset; 194 u32 ob_offset = pm8001_ha->ob_offset; 195 u32 ci_offset = pm8001_ha->ci_offset; 196 u32 pi_offset = pm8001_ha->pi_offset; 197 198 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; 199 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; 200 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; 201 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; 202 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; 203 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = 204 0; 205 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = 206 0; 207 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; 208 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; 209 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; 210 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; 211 212 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = 213 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; 214 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = 215 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; 216 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = 217 PM8001_EVENT_LOG_SIZE; 218 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; 219 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = 220 pm8001_ha->memoryMap.region[IOP].phys_addr_hi; 221 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = 222 pm8001_ha->memoryMap.region[IOP].phys_addr_lo; 223 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = 224 PM8001_EVENT_LOG_SIZE; 225 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; 226 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; 227 for (i = 0; i < pm8001_ha->max_q_num; i++) { 228 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 229 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); 230 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = 231 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; 232 pm8001_ha->inbnd_q_tbl[i].lower_base_addr = 233 pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; 234 pm8001_ha->inbnd_q_tbl[i].base_virt = 235 (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; 236 pm8001_ha->inbnd_q_tbl[i].total_length = 237 pm8001_ha->memoryMap.region[ib_offset + i].total_len; 238 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = 239 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; 240 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = 241 pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; 242 pm8001_ha->inbnd_q_tbl[i].ci_virt = 243 pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; 244 pm8001_write_32(pm8001_ha->inbnd_q_tbl[i].ci_virt, 0, 0); 245 offsetib = i * 0x20; 246 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 247 get_pci_bar_index(pm8001_mr32(addressib, 248 (offsetib + 0x14))); 249 pm8001_ha->inbnd_q_tbl[i].pi_offset = 250 pm8001_mr32(addressib, (offsetib + 0x18)); 251 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; 252 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; 253 } 254 for (i = 0; i < pm8001_ha->max_q_num; i++) { 255 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 256 PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); 257 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = 258 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; 259 pm8001_ha->outbnd_q_tbl[i].lower_base_addr = 260 pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; 261 pm8001_ha->outbnd_q_tbl[i].base_virt = 262 (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; 263 pm8001_ha->outbnd_q_tbl[i].total_length = 264 pm8001_ha->memoryMap.region[ob_offset + i].total_len; 265 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = 266 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; 267 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = 268 pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; 269 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = 270 0 | (10 << 16) | (i << 24); 271 pm8001_ha->outbnd_q_tbl[i].pi_virt = 272 pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; 273 pm8001_write_32(pm8001_ha->outbnd_q_tbl[i].pi_virt, 0, 0); 274 offsetob = i * 0x24; 275 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 276 get_pci_bar_index(pm8001_mr32(addressob, 277 offsetob + 0x14)); 278 pm8001_ha->outbnd_q_tbl[i].ci_offset = 279 pm8001_mr32(addressob, (offsetob + 0x18)); 280 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; 281 pm8001_ha->outbnd_q_tbl[i].producer_index = 0; 282 } 283 } 284 285 /** 286 * update_main_config_table - update the main default table to the HBA. 287 * @pm8001_ha: our hba card information 288 */ 289 static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) 290 { 291 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 292 pm8001_mw32(address, 0x24, 293 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); 294 pm8001_mw32(address, 0x28, 295 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); 296 pm8001_mw32(address, 0x2C, 297 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); 298 pm8001_mw32(address, 0x30, 299 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); 300 pm8001_mw32(address, 0x34, 301 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); 302 pm8001_mw32(address, 0x38, 303 pm8001_ha->main_cfg_tbl.pm8001_tbl. 304 outbound_tgt_ITNexus_event_pid0_3); 305 pm8001_mw32(address, 0x3C, 306 pm8001_ha->main_cfg_tbl.pm8001_tbl. 307 outbound_tgt_ITNexus_event_pid4_7); 308 pm8001_mw32(address, 0x40, 309 pm8001_ha->main_cfg_tbl.pm8001_tbl. 310 outbound_tgt_ssp_event_pid0_3); 311 pm8001_mw32(address, 0x44, 312 pm8001_ha->main_cfg_tbl.pm8001_tbl. 313 outbound_tgt_ssp_event_pid4_7); 314 pm8001_mw32(address, 0x48, 315 pm8001_ha->main_cfg_tbl.pm8001_tbl. 316 outbound_tgt_smp_event_pid0_3); 317 pm8001_mw32(address, 0x4C, 318 pm8001_ha->main_cfg_tbl.pm8001_tbl. 319 outbound_tgt_smp_event_pid4_7); 320 pm8001_mw32(address, 0x50, 321 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); 322 pm8001_mw32(address, 0x54, 323 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); 324 pm8001_mw32(address, 0x58, 325 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); 326 pm8001_mw32(address, 0x5C, 327 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); 328 pm8001_mw32(address, 0x60, 329 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); 330 pm8001_mw32(address, 0x64, 331 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); 332 pm8001_mw32(address, 0x68, 333 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); 334 pm8001_mw32(address, 0x6C, 335 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); 336 pm8001_mw32(address, 0x70, 337 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); 338 } 339 340 /** 341 * update_inbnd_queue_table - update the inbound queue table to the HBA. 342 * @pm8001_ha: our hba card information 343 * @number: entry in the queue 344 */ 345 static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, 346 int number) 347 { 348 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 349 u16 offset = number * 0x20; 350 pm8001_mw32(address, offset + 0x00, 351 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); 352 pm8001_mw32(address, offset + 0x04, 353 pm8001_ha->inbnd_q_tbl[number].upper_base_addr); 354 pm8001_mw32(address, offset + 0x08, 355 pm8001_ha->inbnd_q_tbl[number].lower_base_addr); 356 pm8001_mw32(address, offset + 0x0C, 357 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); 358 pm8001_mw32(address, offset + 0x10, 359 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); 360 } 361 362 /** 363 * update_outbnd_queue_table - update the outbound queue table to the HBA. 364 * @pm8001_ha: our hba card information 365 * @number: entry in the queue 366 */ 367 static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, 368 int number) 369 { 370 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 371 u16 offset = number * 0x24; 372 pm8001_mw32(address, offset + 0x00, 373 pm8001_ha->outbnd_q_tbl[number].element_size_cnt); 374 pm8001_mw32(address, offset + 0x04, 375 pm8001_ha->outbnd_q_tbl[number].upper_base_addr); 376 pm8001_mw32(address, offset + 0x08, 377 pm8001_ha->outbnd_q_tbl[number].lower_base_addr); 378 pm8001_mw32(address, offset + 0x0C, 379 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); 380 pm8001_mw32(address, offset + 0x10, 381 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); 382 pm8001_mw32(address, offset + 0x1C, 383 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); 384 } 385 386 /** 387 * pm8001_bar4_shift - function is called to shift BAR base address 388 * @pm8001_ha : our hba card information 389 * @shiftValue : shifting value in memory bar. 390 */ 391 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) 392 { 393 u32 regVal; 394 unsigned long start; 395 396 /* program the inbound AXI translation Lower Address */ 397 pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); 398 399 /* confirm the setting is written */ 400 start = jiffies + HZ; /* 1 sec */ 401 do { 402 regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); 403 } while ((regVal != shiftValue) && time_before(jiffies, start)); 404 405 if (regVal != shiftValue) { 406 pm8001_dbg(pm8001_ha, INIT, 407 "TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW = 0x%x\n", 408 regVal); 409 return -1; 410 } 411 return 0; 412 } 413 414 /** 415 * mpi_set_phys_g3_with_ssc 416 * @pm8001_ha: our hba card information 417 * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. 418 */ 419 static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, 420 u32 SSCbit) 421 { 422 u32 offset, i; 423 unsigned long flags; 424 425 #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 426 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 427 #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 428 #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 429 #define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 430 #define PHY_G3_WITH_SSC_BIT_SHIFT 13 431 #define SNW3_PHY_CAPABILITIES_PARITY 31 432 433 /* 434 * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) 435 * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) 436 */ 437 spin_lock_irqsave(&pm8001_ha->lock, flags); 438 if (-1 == pm8001_bar4_shift(pm8001_ha, 439 SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) { 440 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 441 return; 442 } 443 444 for (i = 0; i < 4; i++) { 445 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; 446 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); 447 } 448 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ 449 if (-1 == pm8001_bar4_shift(pm8001_ha, 450 SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) { 451 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 452 return; 453 } 454 for (i = 4; i < 8; i++) { 455 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 456 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); 457 } 458 /************************************************************* 459 Change the SSC upspreading value to 0x0 so that upspreading is disabled. 460 Device MABC SMOD0 Controls 461 Address: (via MEMBASE-III): 462 Using shifted destination address 0x0_0000: with Offset 0xD8 463 464 31:28 R/W Reserved Do not change 465 27:24 R/W SAS_SMOD_SPRDUP 0000 466 23:20 R/W SAS_SMOD_SPRDDN 0000 467 19:0 R/W Reserved Do not change 468 Upon power-up this register will read as 0x8990c016, 469 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000 470 so that the written value will be 0x8090c016. 471 This will ensure only down-spreading SSC is enabled on the SPC. 472 *************************************************************/ 473 pm8001_cr32(pm8001_ha, 2, 0xd8); 474 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016); 475 476 /*set the shifted destination address to 0x0 to avoid error operation */ 477 pm8001_bar4_shift(pm8001_ha, 0x0); 478 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 479 return; 480 } 481 482 /** 483 * mpi_set_open_retry_interval_reg 484 * @pm8001_ha: our hba card information 485 * @interval: interval time for each OPEN_REJECT (RETRY). The units are in 1us. 486 */ 487 static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha, 488 u32 interval) 489 { 490 u32 offset; 491 u32 value; 492 u32 i; 493 unsigned long flags; 494 495 #define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 496 #define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 497 #define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4 498 #define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4 499 #define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF 500 501 value = interval & OPEN_RETRY_INTERVAL_REG_MASK; 502 spin_lock_irqsave(&pm8001_ha->lock, flags); 503 /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ 504 if (-1 == pm8001_bar4_shift(pm8001_ha, 505 OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) { 506 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 507 return; 508 } 509 for (i = 0; i < 4; i++) { 510 offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; 511 pm8001_cw32(pm8001_ha, 2, offset, value); 512 } 513 514 if (-1 == pm8001_bar4_shift(pm8001_ha, 515 OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) { 516 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 517 return; 518 } 519 for (i = 4; i < 8; i++) { 520 offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 521 pm8001_cw32(pm8001_ha, 2, offset, value); 522 } 523 /*set the shifted destination address to 0x0 to avoid error operation */ 524 pm8001_bar4_shift(pm8001_ha, 0x0); 525 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 526 return; 527 } 528 529 /** 530 * mpi_init_check - check firmware initialization status. 531 * @pm8001_ha: our hba card information 532 */ 533 static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) 534 { 535 u32 max_wait_count; 536 u32 value; 537 u32 gst_len_mpistate; 538 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the 539 table is updated */ 540 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); 541 /* wait until Inbound DoorBell Clear Register toggled */ 542 max_wait_count = 1 * 1000 * 1000;/* 1 sec */ 543 do { 544 udelay(1); 545 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); 546 value &= SPC_MSGU_CFG_TABLE_UPDATE; 547 } while ((value != 0) && (--max_wait_count)); 548 549 if (!max_wait_count) 550 return -1; 551 /* check the MPI-State for initialization */ 552 gst_len_mpistate = 553 pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 554 GST_GSTLEN_MPIS_OFFSET); 555 if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) 556 return -1; 557 /* check MPI Initialization error */ 558 gst_len_mpistate = gst_len_mpistate >> 16; 559 if (0x0000 != gst_len_mpistate) 560 return -1; 561 return 0; 562 } 563 564 /** 565 * check_fw_ready - The LLDD check if the FW is ready, if not, return error. 566 * @pm8001_ha: our hba card information 567 */ 568 static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) 569 { 570 u32 value, value1; 571 u32 max_wait_count; 572 /* check error state */ 573 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 574 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); 575 /* check AAP error */ 576 if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { 577 /* error state */ 578 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); 579 return -1; 580 } 581 582 /* check IOP error */ 583 if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { 584 /* error state */ 585 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); 586 return -1; 587 } 588 589 /* bit 4-31 of scratch pad1 should be zeros if it is not 590 in error state*/ 591 if (value & SCRATCH_PAD1_STATE_MASK) { 592 /* error case */ 593 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); 594 return -1; 595 } 596 597 /* bit 2, 4-31 of scratch pad2 should be zeros if it is not 598 in error state */ 599 if (value1 & SCRATCH_PAD2_STATE_MASK) { 600 /* error case */ 601 return -1; 602 } 603 604 max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ 605 606 /* wait until scratch pad 1 and 2 registers in ready state */ 607 do { 608 udelay(1); 609 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) 610 & SCRATCH_PAD1_RDY; 611 value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) 612 & SCRATCH_PAD2_RDY; 613 if ((--max_wait_count) == 0) 614 return -1; 615 } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); 616 return 0; 617 } 618 619 static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) 620 { 621 void __iomem *base_addr; 622 u32 value; 623 u32 offset; 624 u32 pcibar; 625 u32 pcilogic; 626 627 value = pm8001_cr32(pm8001_ha, 0, 0x44); 628 offset = value & 0x03FFFFFF; 629 pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 Offset: %x\n", offset); 630 pcilogic = (value & 0xFC000000) >> 26; 631 pcibar = get_pci_bar_index(pcilogic); 632 pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); 633 pm8001_ha->main_cfg_tbl_addr = base_addr = 634 pm8001_ha->io_mem[pcibar].memvirtaddr + offset; 635 pm8001_ha->general_stat_tbl_addr = 636 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); 637 pm8001_ha->inbnd_q_tbl_addr = 638 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); 639 pm8001_ha->outbnd_q_tbl_addr = 640 base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); 641 } 642 643 /** 644 * pm8001_chip_init - the main init function that initialize whole PM8001 chip. 645 * @pm8001_ha: our hba card information 646 */ 647 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) 648 { 649 u32 i = 0; 650 u16 deviceid; 651 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); 652 /* 8081 controllers need BAR shift to access MPI space 653 * as this is shared with BIOS data */ 654 if (deviceid == 0x8081 || deviceid == 0x0042) { 655 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { 656 pm8001_dbg(pm8001_ha, FAIL, 657 "Shift Bar4 to 0x%x failed\n", 658 GSM_SM_BASE); 659 return -1; 660 } 661 } 662 /* check the firmware status */ 663 if (-1 == check_fw_ready(pm8001_ha)) { 664 pm8001_dbg(pm8001_ha, FAIL, "Firmware is not ready!\n"); 665 return -EBUSY; 666 } 667 668 /* Initialize pci space address eg: mpi offset */ 669 init_pci_device_addresses(pm8001_ha); 670 init_default_table_values(pm8001_ha); 671 read_main_config_table(pm8001_ha); 672 read_general_status_table(pm8001_ha); 673 read_inbnd_queue_table(pm8001_ha); 674 read_outbnd_queue_table(pm8001_ha); 675 /* update main config table ,inbound table and outbound table */ 676 update_main_config_table(pm8001_ha); 677 for (i = 0; i < pm8001_ha->max_q_num; i++) 678 update_inbnd_queue_table(pm8001_ha, i); 679 for (i = 0; i < pm8001_ha->max_q_num; i++) 680 update_outbnd_queue_table(pm8001_ha, i); 681 /* 8081 controller donot require these operations */ 682 if (deviceid != 0x8081 && deviceid != 0x0042) { 683 mpi_set_phys_g3_with_ssc(pm8001_ha, 0); 684 /* 7->130ms, 34->500ms, 119->1.5s */ 685 mpi_set_open_retry_interval_reg(pm8001_ha, 119); 686 } 687 /* notify firmware update finished and check initialization status */ 688 if (0 == mpi_init_check(pm8001_ha)) { 689 pm8001_dbg(pm8001_ha, INIT, "MPI initialize successful!\n"); 690 } else 691 return -EBUSY; 692 /*This register is a 16-bit timer with a resolution of 1us. This is the 693 timer used for interrupt delay/coalescing in the PCIe Application Layer. 694 Zero is not a valid value. A value of 1 in the register will cause the 695 interrupts to be normal. A value greater than 1 will cause coalescing 696 delays.*/ 697 pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); 698 pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); 699 return 0; 700 } 701 702 static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) 703 { 704 u32 max_wait_count; 705 u32 value; 706 u32 gst_len_mpistate; 707 u16 deviceid; 708 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); 709 if (deviceid == 0x8081 || deviceid == 0x0042) { 710 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { 711 pm8001_dbg(pm8001_ha, FAIL, 712 "Shift Bar4 to 0x%x failed\n", 713 GSM_SM_BASE); 714 return -1; 715 } 716 } 717 init_pci_device_addresses(pm8001_ha); 718 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the 719 table is stop */ 720 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); 721 722 /* wait until Inbound DoorBell Clear Register toggled */ 723 max_wait_count = 1 * 1000 * 1000;/* 1 sec */ 724 do { 725 udelay(1); 726 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); 727 value &= SPC_MSGU_CFG_TABLE_RESET; 728 } while ((value != 0) && (--max_wait_count)); 729 730 if (!max_wait_count) { 731 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:IBDB value/=0x%x\n", 732 value); 733 return -1; 734 } 735 736 /* check the MPI-State for termination in progress */ 737 /* wait until Inbound DoorBell Clear Register toggled */ 738 max_wait_count = 1 * 1000 * 1000; /* 1 sec */ 739 do { 740 udelay(1); 741 gst_len_mpistate = 742 pm8001_mr32(pm8001_ha->general_stat_tbl_addr, 743 GST_GSTLEN_MPIS_OFFSET); 744 if (GST_MPI_STATE_UNINIT == 745 (gst_len_mpistate & GST_MPI_STATE_MASK)) 746 break; 747 } while (--max_wait_count); 748 if (!max_wait_count) { 749 pm8001_dbg(pm8001_ha, FAIL, " TIME OUT MPI State = 0x%x\n", 750 gst_len_mpistate & GST_MPI_STATE_MASK); 751 return -1; 752 } 753 return 0; 754 } 755 756 /** 757 * soft_reset_ready_check - Function to check FW is ready for soft reset. 758 * @pm8001_ha: our hba card information 759 */ 760 static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) 761 { 762 u32 regVal, regVal1, regVal2; 763 if (mpi_uninit_check(pm8001_ha) != 0) { 764 pm8001_dbg(pm8001_ha, FAIL, "MPI state is not ready\n"); 765 return -1; 766 } 767 /* read the scratch pad 2 register bit 2 */ 768 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) 769 & SCRATCH_PAD2_FWRDY_RST; 770 if (regVal == SCRATCH_PAD2_FWRDY_RST) { 771 pm8001_dbg(pm8001_ha, INIT, "Firmware is ready for reset.\n"); 772 } else { 773 unsigned long flags; 774 /* Trigger NMI twice via RB6 */ 775 spin_lock_irqsave(&pm8001_ha->lock, flags); 776 if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { 777 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 778 pm8001_dbg(pm8001_ha, FAIL, 779 "Shift Bar4 to 0x%x failed\n", 780 RB6_ACCESS_REG); 781 return -1; 782 } 783 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, 784 RB6_MAGIC_NUMBER_RST); 785 pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); 786 /* wait for 100 ms */ 787 mdelay(100); 788 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & 789 SCRATCH_PAD2_FWRDY_RST; 790 if (regVal != SCRATCH_PAD2_FWRDY_RST) { 791 regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 792 regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); 793 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT:MSGU_SCRATCH_PAD1=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", 794 regVal1, regVal2); 795 pm8001_dbg(pm8001_ha, FAIL, 796 "SCRATCH_PAD0 value = 0x%x\n", 797 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)); 798 pm8001_dbg(pm8001_ha, FAIL, 799 "SCRATCH_PAD3 value = 0x%x\n", 800 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)); 801 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 802 return -1; 803 } 804 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 805 } 806 return 0; 807 } 808 809 /** 810 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all 811 * the FW register status to the originated status. 812 * @pm8001_ha: our hba card information 813 */ 814 static int 815 pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) 816 { 817 u32 regVal, toggleVal; 818 u32 max_wait_count; 819 u32 regVal1, regVal2, regVal3; 820 u32 signature = 0x252acbcd; /* for host scratch pad0 */ 821 unsigned long flags; 822 823 /* step1: Check FW is ready for soft reset */ 824 if (soft_reset_ready_check(pm8001_ha) != 0) { 825 pm8001_dbg(pm8001_ha, FAIL, "FW is not ready\n"); 826 return -1; 827 } 828 829 /* step 2: clear NMI status register on AAP1 and IOP, write the same 830 value to clear */ 831 /* map 0x60000 to BAR4(0x20), BAR2(win) */ 832 spin_lock_irqsave(&pm8001_ha->lock, flags); 833 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { 834 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 835 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", 836 MBIC_AAP1_ADDR_BASE); 837 return -1; 838 } 839 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); 840 pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", 841 regVal); 842 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); 843 /* map 0x70000 to BAR4(0x20), BAR2(win) */ 844 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { 845 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 846 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", 847 MBIC_IOP_ADDR_BASE); 848 return -1; 849 } 850 regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); 851 pm8001_dbg(pm8001_ha, INIT, "MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", 852 regVal); 853 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); 854 855 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); 856 pm8001_dbg(pm8001_ha, INIT, "PCIE -Event Interrupt Enable = 0x%x\n", 857 regVal); 858 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); 859 860 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); 861 pm8001_dbg(pm8001_ha, INIT, "PCIE - Event Interrupt = 0x%x\n", 862 regVal); 863 pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); 864 865 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); 866 pm8001_dbg(pm8001_ha, INIT, "PCIE -Error Interrupt Enable = 0x%x\n", 867 regVal); 868 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); 869 870 regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); 871 pm8001_dbg(pm8001_ha, INIT, "PCIE - Error Interrupt = 0x%x\n", regVal); 872 pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); 873 874 /* read the scratch pad 1 register bit 2 */ 875 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) 876 & SCRATCH_PAD1_RST; 877 toggleVal = regVal ^ SCRATCH_PAD1_RST; 878 879 /* set signature in host scratch pad0 register to tell SPC that the 880 host performs the soft reset */ 881 pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); 882 883 /* read required registers for confirmming */ 884 /* map 0x0700000 to BAR4(0x20), BAR2(win) */ 885 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { 886 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 887 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", 888 GSM_ADDR_BASE); 889 return -1; 890 } 891 pm8001_dbg(pm8001_ha, INIT, 892 "GSM 0x0(0x00007b88)-GSM Configuration and Reset = 0x%x\n", 893 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); 894 895 /* step 3: host read GSM Configuration and Reset register */ 896 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); 897 /* Put those bits to low */ 898 /* GSM XCBI offset = 0x70 0000 899 0x00 Bit 13 COM_SLV_SW_RSTB 1 900 0x00 Bit 12 QSSP_SW_RSTB 1 901 0x00 Bit 11 RAAE_SW_RSTB 1 902 0x00 Bit 9 RB_1_SW_RSTB 1 903 0x00 Bit 8 SM_SW_RSTB 1 904 */ 905 regVal &= ~(0x00003b00); 906 /* host write GSM Configuration and Reset register */ 907 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); 908 pm8001_dbg(pm8001_ha, INIT, 909 "GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM Configuration and Reset is set to = 0x%x\n", 910 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); 911 912 /* step 4: */ 913 /* disable GSM - Read Address Parity Check */ 914 regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); 915 pm8001_dbg(pm8001_ha, INIT, 916 "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", 917 regVal1); 918 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); 919 pm8001_dbg(pm8001_ha, INIT, 920 "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", 921 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); 922 923 /* disable GSM - Write Address Parity Check */ 924 regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); 925 pm8001_dbg(pm8001_ha, INIT, 926 "GSM 0x700040 - Write Address Parity Check Enable = 0x%x\n", 927 regVal2); 928 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); 929 pm8001_dbg(pm8001_ha, INIT, 930 "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", 931 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); 932 933 /* disable GSM - Write Data Parity Check */ 934 regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); 935 pm8001_dbg(pm8001_ha, INIT, "GSM 0x300048 - Write Data Parity Check Enable = 0x%x\n", 936 regVal3); 937 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); 938 pm8001_dbg(pm8001_ha, INIT, 939 "GSM 0x300048 - Write Data Parity Check Enable is set to = 0x%x\n", 940 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); 941 942 /* step 5: delay 10 usec */ 943 udelay(10); 944 /* step 5-b: set GPIO-0 output control to tristate anyway */ 945 if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { 946 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 947 pm8001_dbg(pm8001_ha, INIT, "Shift Bar4 to 0x%x failed\n", 948 GPIO_ADDR_BASE); 949 return -1; 950 } 951 regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); 952 pm8001_dbg(pm8001_ha, INIT, "GPIO Output Control Register: = 0x%x\n", 953 regVal); 954 /* set GPIO-0 output control to tri-state */ 955 regVal &= 0xFFFFFFFC; 956 pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); 957 958 /* Step 6: Reset the IOP and AAP1 */ 959 /* map 0x00000 to BAR4(0x20), BAR2(win) */ 960 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { 961 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 962 pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", 963 SPC_TOP_LEVEL_ADDR_BASE); 964 return -1; 965 } 966 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); 967 pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting IOP/AAP1:= 0x%x\n", 968 regVal); 969 regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); 970 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); 971 972 /* step 7: Reset the BDMA/OSSP */ 973 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); 974 pm8001_dbg(pm8001_ha, INIT, "Top Register before resetting BDMA/OSSP: = 0x%x\n", 975 regVal); 976 regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); 977 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); 978 979 /* step 8: delay 10 usec */ 980 udelay(10); 981 982 /* step 9: bring the BDMA and OSSP out of reset */ 983 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); 984 pm8001_dbg(pm8001_ha, INIT, 985 "Top Register before bringing up BDMA/OSSP:= 0x%x\n", 986 regVal); 987 regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); 988 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); 989 990 /* step 10: delay 10 usec */ 991 udelay(10); 992 993 /* step 11: reads and sets the GSM Configuration and Reset Register */ 994 /* map 0x0700000 to BAR4(0x20), BAR2(win) */ 995 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { 996 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 997 pm8001_dbg(pm8001_ha, FAIL, "SPC Shift Bar4 to 0x%x failed\n", 998 GSM_ADDR_BASE); 999 return -1; 1000 } 1001 pm8001_dbg(pm8001_ha, INIT, 1002 "GSM 0x0 (0x00007b88)-GSM Configuration and Reset = 0x%x\n", 1003 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); 1004 regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); 1005 /* Put those bits to high */ 1006 /* GSM XCBI offset = 0x70 0000 1007 0x00 Bit 13 COM_SLV_SW_RSTB 1 1008 0x00 Bit 12 QSSP_SW_RSTB 1 1009 0x00 Bit 11 RAAE_SW_RSTB 1 1010 0x00 Bit 9 RB_1_SW_RSTB 1 1011 0x00 Bit 8 SM_SW_RSTB 1 1012 */ 1013 regVal |= (GSM_CONFIG_RESET_VALUE); 1014 pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); 1015 pm8001_dbg(pm8001_ha, INIT, "GSM (0x00004088 ==> 0x00007b88) - GSM Configuration and Reset is set to = 0x%x\n", 1016 pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)); 1017 1018 /* step 12: Restore GSM - Read Address Parity Check */ 1019 regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); 1020 /* just for debugging */ 1021 pm8001_dbg(pm8001_ha, INIT, 1022 "GSM 0x700038 - Read Address Parity Check Enable = 0x%x\n", 1023 regVal); 1024 pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); 1025 pm8001_dbg(pm8001_ha, INIT, "GSM 0x700038 - Read Address Parity Check Enable is set to = 0x%x\n", 1026 pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)); 1027 /* Restore GSM - Write Address Parity Check */ 1028 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); 1029 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); 1030 pm8001_dbg(pm8001_ha, INIT, 1031 "GSM 0x700040 - Write Address Parity Check Enable is set to = 0x%x\n", 1032 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)); 1033 /* Restore GSM - Write Data Parity Check */ 1034 regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); 1035 pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); 1036 pm8001_dbg(pm8001_ha, INIT, 1037 "GSM 0x700048 - Write Data Parity Check Enable is set to = 0x%x\n", 1038 pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)); 1039 1040 /* step 13: bring the IOP and AAP1 out of reset */ 1041 /* map 0x00000 to BAR4(0x20), BAR2(win) */ 1042 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { 1043 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1044 pm8001_dbg(pm8001_ha, FAIL, "Shift Bar4 to 0x%x failed\n", 1045 SPC_TOP_LEVEL_ADDR_BASE); 1046 return -1; 1047 } 1048 regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); 1049 regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); 1050 pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); 1051 1052 /* step 14: delay 10 usec - Normal Mode */ 1053 udelay(10); 1054 /* check Soft Reset Normal mode or Soft Reset HDA mode */ 1055 if (signature == SPC_SOFT_RESET_SIGNATURE) { 1056 /* step 15 (Normal Mode): wait until scratch pad1 register 1057 bit 2 toggled */ 1058 max_wait_count = 2 * 1000 * 1000;/* 2 sec */ 1059 do { 1060 udelay(1); 1061 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & 1062 SCRATCH_PAD1_RST; 1063 } while ((regVal != toggleVal) && (--max_wait_count)); 1064 1065 if (!max_wait_count) { 1066 regVal = pm8001_cr32(pm8001_ha, 0, 1067 MSGU_SCRATCH_PAD_1); 1068 pm8001_dbg(pm8001_ha, FAIL, "TIMEOUT : ToggleVal 0x%x,MSGU_SCRATCH_PAD1 = 0x%x\n", 1069 toggleVal, regVal); 1070 pm8001_dbg(pm8001_ha, FAIL, 1071 "SCRATCH_PAD0 value = 0x%x\n", 1072 pm8001_cr32(pm8001_ha, 0, 1073 MSGU_SCRATCH_PAD_0)); 1074 pm8001_dbg(pm8001_ha, FAIL, 1075 "SCRATCH_PAD2 value = 0x%x\n", 1076 pm8001_cr32(pm8001_ha, 0, 1077 MSGU_SCRATCH_PAD_2)); 1078 pm8001_dbg(pm8001_ha, FAIL, 1079 "SCRATCH_PAD3 value = 0x%x\n", 1080 pm8001_cr32(pm8001_ha, 0, 1081 MSGU_SCRATCH_PAD_3)); 1082 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1083 return -1; 1084 } 1085 1086 /* step 16 (Normal) - Clear ODMR and ODCR */ 1087 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); 1088 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); 1089 1090 /* step 17 (Normal Mode): wait for the FW and IOP to get 1091 ready - 1 sec timeout */ 1092 /* Wait for the SPC Configuration Table to be ready */ 1093 if (check_fw_ready(pm8001_ha) == -1) { 1094 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); 1095 /* return error if MPI Configuration Table not ready */ 1096 pm8001_dbg(pm8001_ha, INIT, 1097 "FW not ready SCRATCH_PAD1 = 0x%x\n", 1098 regVal); 1099 regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); 1100 /* return error if MPI Configuration Table not ready */ 1101 pm8001_dbg(pm8001_ha, INIT, 1102 "FW not ready SCRATCH_PAD2 = 0x%x\n", 1103 regVal); 1104 pm8001_dbg(pm8001_ha, INIT, 1105 "SCRATCH_PAD0 value = 0x%x\n", 1106 pm8001_cr32(pm8001_ha, 0, 1107 MSGU_SCRATCH_PAD_0)); 1108 pm8001_dbg(pm8001_ha, INIT, 1109 "SCRATCH_PAD3 value = 0x%x\n", 1110 pm8001_cr32(pm8001_ha, 0, 1111 MSGU_SCRATCH_PAD_3)); 1112 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1113 return -1; 1114 } 1115 } 1116 pm8001_bar4_shift(pm8001_ha, 0); 1117 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1118 1119 pm8001_dbg(pm8001_ha, INIT, "SPC soft reset Complete\n"); 1120 return 0; 1121 } 1122 1123 static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) 1124 { 1125 u32 i; 1126 u32 regVal; 1127 pm8001_dbg(pm8001_ha, INIT, "chip reset start\n"); 1128 1129 /* do SPC chip reset. */ 1130 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); 1131 regVal &= ~(SPC_REG_RESET_DEVICE); 1132 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); 1133 1134 /* delay 10 usec */ 1135 udelay(10); 1136 1137 /* bring chip reset out of reset */ 1138 regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); 1139 regVal |= SPC_REG_RESET_DEVICE; 1140 pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); 1141 1142 /* delay 10 usec */ 1143 udelay(10); 1144 1145 /* wait for 20 msec until the firmware gets reloaded */ 1146 i = 20; 1147 do { 1148 mdelay(1); 1149 } while ((--i) != 0); 1150 1151 pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n"); 1152 } 1153 1154 /** 1155 * pm8001_chip_iounmap - which mapped when initialized. 1156 * @pm8001_ha: our hba card information 1157 */ 1158 void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) 1159 { 1160 s8 bar, logical = 0; 1161 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 1162 /* 1163 ** logical BARs for SPC: 1164 ** bar 0 and 1 - logical BAR0 1165 ** bar 2 and 3 - logical BAR1 1166 ** bar4 - logical BAR2 1167 ** bar5 - logical BAR3 1168 ** Skip the appropriate assignments: 1169 */ 1170 if ((bar == 1) || (bar == 3)) 1171 continue; 1172 if (pm8001_ha->io_mem[logical].memvirtaddr) { 1173 iounmap(pm8001_ha->io_mem[logical].memvirtaddr); 1174 logical++; 1175 } 1176 } 1177 } 1178 1179 #ifndef PM8001_USE_MSIX 1180 /** 1181 * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt 1182 * @pm8001_ha: our hba card information 1183 */ 1184 static void 1185 pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) 1186 { 1187 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); 1188 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); 1189 } 1190 1191 /** 1192 * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt 1193 * @pm8001_ha: our hba card information 1194 */ 1195 static void 1196 pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) 1197 { 1198 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); 1199 } 1200 1201 #else 1202 1203 /** 1204 * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt 1205 * @pm8001_ha: our hba card information 1206 * @int_vec_idx: interrupt number to enable 1207 */ 1208 static void 1209 pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, 1210 u32 int_vec_idx) 1211 { 1212 u32 msi_index; 1213 u32 value; 1214 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; 1215 msi_index += MSIX_TABLE_BASE; 1216 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); 1217 value = (1 << int_vec_idx); 1218 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); 1219 1220 } 1221 1222 /** 1223 * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt 1224 * @pm8001_ha: our hba card information 1225 * @int_vec_idx: interrupt number to disable 1226 */ 1227 static void 1228 pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, 1229 u32 int_vec_idx) 1230 { 1231 u32 msi_index; 1232 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; 1233 msi_index += MSIX_TABLE_BASE; 1234 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); 1235 } 1236 #endif 1237 1238 /** 1239 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt 1240 * @pm8001_ha: our hba card information 1241 * @vec: unused 1242 */ 1243 static void 1244 pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) 1245 { 1246 #ifdef PM8001_USE_MSIX 1247 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); 1248 #else 1249 pm8001_chip_intx_interrupt_enable(pm8001_ha); 1250 #endif 1251 } 1252 1253 /** 1254 * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt 1255 * @pm8001_ha: our hba card information 1256 * @vec: unused 1257 */ 1258 static void 1259 pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) 1260 { 1261 #ifdef PM8001_USE_MSIX 1262 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); 1263 #else 1264 pm8001_chip_intx_interrupt_disable(pm8001_ha); 1265 #endif 1266 } 1267 1268 /** 1269 * pm8001_mpi_msg_free_get - get the free message buffer for transfer 1270 * inbound queue. 1271 * @circularQ: the inbound queue we want to transfer to HBA. 1272 * @messageSize: the message size of this transfer, normally it is 64 bytes 1273 * @messagePtr: the pointer to message. 1274 */ 1275 int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, 1276 u16 messageSize, void **messagePtr) 1277 { 1278 u32 offset, consumer_index; 1279 struct mpi_msg_hdr *msgHeader; 1280 u8 bcCount = 1; /* only support single buffer */ 1281 1282 /* Checks is the requested message size can be allocated in this queue*/ 1283 if (messageSize > IOMB_SIZE_SPCV) { 1284 *messagePtr = NULL; 1285 return -1; 1286 } 1287 1288 /* Stores the new consumer index */ 1289 consumer_index = pm8001_read_32(circularQ->ci_virt); 1290 circularQ->consumer_index = cpu_to_le32(consumer_index); 1291 if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) == 1292 le32_to_cpu(circularQ->consumer_index)) { 1293 *messagePtr = NULL; 1294 return -1; 1295 } 1296 /* get memory IOMB buffer address */ 1297 offset = circularQ->producer_idx * messageSize; 1298 /* increment to next bcCount element */ 1299 circularQ->producer_idx = (circularQ->producer_idx + bcCount) 1300 % PM8001_MPI_QUEUE; 1301 /* Adds that distance to the base of the region virtual address plus 1302 the message header size*/ 1303 msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); 1304 *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); 1305 return 0; 1306 } 1307 1308 /** 1309 * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to 1310 * FW to tell the fw to get this message from IOMB. 1311 * @pm8001_ha: our hba card information 1312 * @circularQ: the inbound queue we want to transfer to HBA. 1313 * @opCode: the operation code represents commands which LLDD and fw recognized. 1314 * @payload: the command payload of each operation command. 1315 * @nb: size in bytes of the command payload 1316 * @responseQueue: queue to interrupt on w/ command response (if any) 1317 */ 1318 int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, 1319 struct inbound_queue_table *circularQ, 1320 u32 opCode, void *payload, size_t nb, 1321 u32 responseQueue) 1322 { 1323 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; 1324 void *pMessage; 1325 unsigned long flags; 1326 int q_index = circularQ - pm8001_ha->inbnd_q_tbl; 1327 int rv; 1328 u32 htag = le32_to_cpu(*(__le32 *)payload); 1329 1330 trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index, 1331 circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index)); 1332 1333 if (WARN_ON(q_index >= pm8001_ha->max_q_num)) 1334 return -EINVAL; 1335 1336 spin_lock_irqsave(&circularQ->iq_lock, flags); 1337 rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, 1338 &pMessage); 1339 if (rv < 0) { 1340 pm8001_dbg(pm8001_ha, IO, "No free mpi buffer\n"); 1341 rv = -ENOMEM; 1342 goto done; 1343 } 1344 1345 if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr))) 1346 nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr); 1347 memcpy(pMessage, payload, nb); 1348 if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size) 1349 memset(pMessage + nb, 0, pm8001_ha->iomb_size - 1350 (nb + sizeof(struct mpi_msg_hdr))); 1351 1352 /*Build the header*/ 1353 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) 1354 | ((responseQueue & 0x3F) << 16) 1355 | ((category & 0xF) << 12) | (opCode & 0xFFF)); 1356 1357 pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); 1358 /*Update the PI to the firmware*/ 1359 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, 1360 circularQ->pi_offset, circularQ->producer_idx); 1361 pm8001_dbg(pm8001_ha, DEVIO, 1362 "INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", 1363 responseQueue, opCode, circularQ->producer_idx, 1364 circularQ->consumer_index); 1365 done: 1366 spin_unlock_irqrestore(&circularQ->iq_lock, flags); 1367 return rv; 1368 } 1369 1370 u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, 1371 struct outbound_queue_table *circularQ, u8 bc) 1372 { 1373 u32 producer_index; 1374 struct mpi_msg_hdr *msgHeader; 1375 struct mpi_msg_hdr *pOutBoundMsgHeader; 1376 1377 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); 1378 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + 1379 circularQ->consumer_idx * pm8001_ha->iomb_size); 1380 if (pOutBoundMsgHeader != msgHeader) { 1381 pm8001_dbg(pm8001_ha, FAIL, 1382 "consumer_idx = %d msgHeader = %p\n", 1383 circularQ->consumer_idx, msgHeader); 1384 1385 /* Update the producer index from SPC */ 1386 producer_index = pm8001_read_32(circularQ->pi_virt); 1387 circularQ->producer_index = cpu_to_le32(producer_index); 1388 pm8001_dbg(pm8001_ha, FAIL, 1389 "consumer_idx = %d producer_index = %dmsgHeader = %p\n", 1390 circularQ->consumer_idx, 1391 circularQ->producer_index, msgHeader); 1392 return 0; 1393 } 1394 /* free the circular queue buffer elements associated with the message*/ 1395 circularQ->consumer_idx = (circularQ->consumer_idx + bc) 1396 % PM8001_MPI_QUEUE; 1397 /* update the CI of outbound queue */ 1398 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, 1399 circularQ->consumer_idx); 1400 /* Update the producer index from SPC*/ 1401 producer_index = pm8001_read_32(circularQ->pi_virt); 1402 circularQ->producer_index = cpu_to_le32(producer_index); 1403 pm8001_dbg(pm8001_ha, IO, " CI=%d PI=%d\n", 1404 circularQ->consumer_idx, circularQ->producer_index); 1405 return 0; 1406 } 1407 1408 /** 1409 * pm8001_mpi_msg_consume- get the MPI message from outbound queue 1410 * message table. 1411 * @pm8001_ha: our hba card information 1412 * @circularQ: the outbound queue table. 1413 * @messagePtr1: the message contents of this outbound message. 1414 * @pBC: the message size. 1415 */ 1416 u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, 1417 struct outbound_queue_table *circularQ, 1418 void **messagePtr1, u8 *pBC) 1419 { 1420 struct mpi_msg_hdr *msgHeader; 1421 __le32 msgHeader_tmp; 1422 u32 header_tmp; 1423 do { 1424 /* If there are not-yet-delivered messages ... */ 1425 if (le32_to_cpu(circularQ->producer_index) 1426 != circularQ->consumer_idx) { 1427 /*Get the pointer to the circular queue buffer element*/ 1428 msgHeader = (struct mpi_msg_hdr *) 1429 (circularQ->base_virt + 1430 circularQ->consumer_idx * pm8001_ha->iomb_size); 1431 /* read header */ 1432 header_tmp = pm8001_read_32(msgHeader); 1433 msgHeader_tmp = cpu_to_le32(header_tmp); 1434 pm8001_dbg(pm8001_ha, DEVIO, 1435 "outbound opcode msgheader:%x ci=%d pi=%d\n", 1436 msgHeader_tmp, circularQ->consumer_idx, 1437 circularQ->producer_index); 1438 if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { 1439 if (OPC_OUB_SKIP_ENTRY != 1440 (le32_to_cpu(msgHeader_tmp) & 0xfff)) { 1441 *messagePtr1 = 1442 ((u8 *)msgHeader) + 1443 sizeof(struct mpi_msg_hdr); 1444 *pBC = (u8)((le32_to_cpu(msgHeader_tmp) 1445 >> 24) & 0x1f); 1446 pm8001_dbg(pm8001_ha, IO, 1447 ": CI=%d PI=%d msgHeader=%x\n", 1448 circularQ->consumer_idx, 1449 circularQ->producer_index, 1450 msgHeader_tmp); 1451 return MPI_IO_STATUS_SUCCESS; 1452 } else { 1453 circularQ->consumer_idx = 1454 (circularQ->consumer_idx + 1455 ((le32_to_cpu(msgHeader_tmp) 1456 >> 24) & 0x1f)) 1457 % PM8001_MPI_QUEUE; 1458 msgHeader_tmp = 0; 1459 pm8001_write_32(msgHeader, 0, 0); 1460 /* update the CI of outbound queue */ 1461 pm8001_cw32(pm8001_ha, 1462 circularQ->ci_pci_bar, 1463 circularQ->ci_offset, 1464 circularQ->consumer_idx); 1465 } 1466 } else { 1467 circularQ->consumer_idx = 1468 (circularQ->consumer_idx + 1469 ((le32_to_cpu(msgHeader_tmp) >> 24) & 1470 0x1f)) % PM8001_MPI_QUEUE; 1471 msgHeader_tmp = 0; 1472 pm8001_write_32(msgHeader, 0, 0); 1473 /* update the CI of outbound queue */ 1474 pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, 1475 circularQ->ci_offset, 1476 circularQ->consumer_idx); 1477 return MPI_IO_STATUS_FAIL; 1478 } 1479 } else { 1480 u32 producer_index; 1481 void *pi_virt = circularQ->pi_virt; 1482 /* spurious interrupt during setup if 1483 * kexec-ing and driver doing a doorbell access 1484 * with the pre-kexec oq interrupt setup 1485 */ 1486 if (!pi_virt) 1487 break; 1488 /* Update the producer index from SPC */ 1489 producer_index = pm8001_read_32(pi_virt); 1490 circularQ->producer_index = cpu_to_le32(producer_index); 1491 } 1492 } while (le32_to_cpu(circularQ->producer_index) != 1493 circularQ->consumer_idx); 1494 /* while we don't have any more not-yet-delivered message */ 1495 /* report empty */ 1496 return MPI_IO_STATUS_BUSY; 1497 } 1498 1499 void pm8001_work_fn(struct work_struct *work) 1500 { 1501 struct pm8001_work *pw = container_of(work, struct pm8001_work, work); 1502 struct pm8001_device *pm8001_dev; 1503 struct domain_device *dev; 1504 1505 /* 1506 * So far, all users of this stash an associated structure here. 1507 * If we get here, and this pointer is null, then the action 1508 * was cancelled. This nullification happens when the device 1509 * goes away. 1510 */ 1511 if (pw->handler != IO_FATAL_ERROR) { 1512 pm8001_dev = pw->data; /* Most stash device structure */ 1513 if ((pm8001_dev == NULL) 1514 || ((pw->handler != IO_XFER_ERROR_BREAK) 1515 && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { 1516 kfree(pw); 1517 return; 1518 } 1519 } 1520 1521 switch (pw->handler) { 1522 case IO_XFER_ERROR_BREAK: 1523 { /* This one stashes the sas_task instead */ 1524 struct sas_task *t = (struct sas_task *)pm8001_dev; 1525 u32 tag; 1526 struct pm8001_ccb_info *ccb; 1527 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; 1528 unsigned long flags, flags1; 1529 struct task_status_struct *ts; 1530 int i; 1531 1532 if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC) 1533 break; /* Task still on lu */ 1534 spin_lock_irqsave(&pm8001_ha->lock, flags); 1535 1536 spin_lock_irqsave(&t->task_state_lock, flags1); 1537 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { 1538 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1539 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1540 break; /* Task got completed by another */ 1541 } 1542 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1543 1544 /* Search for a possible ccb that matches the task */ 1545 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { 1546 ccb = &pm8001_ha->ccb_info[i]; 1547 tag = ccb->ccb_tag; 1548 if ((tag != 0xFFFFFFFF) && (ccb->task == t)) 1549 break; 1550 } 1551 if (!ccb) { 1552 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1553 break; /* Task got freed by another */ 1554 } 1555 ts = &t->task_status; 1556 ts->resp = SAS_TASK_COMPLETE; 1557 /* Force the midlayer to retry */ 1558 ts->stat = SAS_QUEUE_FULL; 1559 pm8001_dev = ccb->device; 1560 if (pm8001_dev) 1561 atomic_dec(&pm8001_dev->running_req); 1562 spin_lock_irqsave(&t->task_state_lock, flags1); 1563 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1564 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 1565 t->task_state_flags |= SAS_TASK_STATE_DONE; 1566 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 1567 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1568 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 1569 t, pw->handler, ts->resp, ts->stat); 1570 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 1571 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1572 } else { 1573 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1574 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 1575 mb();/* in order to force CPU ordering */ 1576 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1577 t->task_done(t); 1578 } 1579 } break; 1580 case IO_XFER_OPEN_RETRY_TIMEOUT: 1581 { /* This one stashes the sas_task instead */ 1582 struct sas_task *t = (struct sas_task *)pm8001_dev; 1583 u32 tag; 1584 struct pm8001_ccb_info *ccb; 1585 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; 1586 unsigned long flags, flags1; 1587 int i, ret = 0; 1588 1589 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 1590 1591 ret = pm8001_query_task(t); 1592 1593 if (ret == TMF_RESP_FUNC_SUCC) 1594 pm8001_dbg(pm8001_ha, IO, "...Task on lu\n"); 1595 else if (ret == TMF_RESP_FUNC_COMPLETE) 1596 pm8001_dbg(pm8001_ha, IO, "...Task NOT on lu\n"); 1597 else 1598 pm8001_dbg(pm8001_ha, DEVIO, "...query task failed!!!\n"); 1599 1600 spin_lock_irqsave(&pm8001_ha->lock, flags); 1601 1602 spin_lock_irqsave(&t->task_state_lock, flags1); 1603 1604 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { 1605 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1606 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1607 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ 1608 (void)pm8001_abort_task(t); 1609 break; /* Task got completed by another */ 1610 } 1611 1612 spin_unlock_irqrestore(&t->task_state_lock, flags1); 1613 1614 /* Search for a possible ccb that matches the task */ 1615 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { 1616 ccb = &pm8001_ha->ccb_info[i]; 1617 tag = ccb->ccb_tag; 1618 if ((tag != 0xFFFFFFFF) && (ccb->task == t)) 1619 break; 1620 } 1621 if (!ccb) { 1622 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1623 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ 1624 (void)pm8001_abort_task(t); 1625 break; /* Task got freed by another */ 1626 } 1627 1628 pm8001_dev = ccb->device; 1629 dev = pm8001_dev->sas_device; 1630 1631 switch (ret) { 1632 case TMF_RESP_FUNC_SUCC: /* task on lu */ 1633 ccb->open_retry = 1; /* Snub completion */ 1634 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1635 ret = pm8001_abort_task(t); 1636 ccb->open_retry = 0; 1637 switch (ret) { 1638 case TMF_RESP_FUNC_SUCC: 1639 case TMF_RESP_FUNC_COMPLETE: 1640 break; 1641 default: /* device misbehavior */ 1642 ret = TMF_RESP_FUNC_FAILED; 1643 pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); 1644 pm8001_I_T_nexus_reset(dev); 1645 break; 1646 } 1647 break; 1648 1649 case TMF_RESP_FUNC_COMPLETE: /* task not on lu */ 1650 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1651 /* Do we need to abort the task locally? */ 1652 break; 1653 1654 default: /* device misbehavior */ 1655 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1656 ret = TMF_RESP_FUNC_FAILED; 1657 pm8001_dbg(pm8001_ha, IO, "...Reset phy\n"); 1658 pm8001_I_T_nexus_reset(dev); 1659 } 1660 1661 if (ret == TMF_RESP_FUNC_FAILED) 1662 t = NULL; 1663 pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev); 1664 pm8001_dbg(pm8001_ha, IO, "...Complete\n"); 1665 } break; 1666 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 1667 dev = pm8001_dev->sas_device; 1668 pm8001_I_T_nexus_event_handler(dev); 1669 break; 1670 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 1671 dev = pm8001_dev->sas_device; 1672 pm8001_I_T_nexus_reset(dev); 1673 break; 1674 case IO_DS_IN_ERROR: 1675 dev = pm8001_dev->sas_device; 1676 pm8001_I_T_nexus_reset(dev); 1677 break; 1678 case IO_DS_NON_OPERATIONAL: 1679 dev = pm8001_dev->sas_device; 1680 pm8001_I_T_nexus_reset(dev); 1681 break; 1682 case IO_FATAL_ERROR: 1683 { 1684 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; 1685 struct pm8001_ccb_info *ccb; 1686 struct task_status_struct *ts; 1687 struct sas_task *task; 1688 int i; 1689 u32 tag, device_id; 1690 1691 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { 1692 ccb = &pm8001_ha->ccb_info[i]; 1693 task = ccb->task; 1694 ts = &task->task_status; 1695 tag = ccb->ccb_tag; 1696 /* check if tag is NULL */ 1697 if (!tag) { 1698 pm8001_dbg(pm8001_ha, FAIL, 1699 "tag Null\n"); 1700 continue; 1701 } 1702 if (task != NULL) { 1703 dev = task->dev; 1704 if (!dev) { 1705 pm8001_dbg(pm8001_ha, FAIL, 1706 "dev is NULL\n"); 1707 continue; 1708 } 1709 /*complete sas task and update to top layer */ 1710 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 1711 ts->resp = SAS_TASK_COMPLETE; 1712 task->task_done(task); 1713 } else if (tag != 0xFFFFFFFF) { 1714 /* complete the internal commands/non-sas task */ 1715 pm8001_dev = ccb->device; 1716 if (pm8001_dev->dcompletion) { 1717 complete(pm8001_dev->dcompletion); 1718 pm8001_dev->dcompletion = NULL; 1719 } 1720 complete(pm8001_ha->nvmd_completion); 1721 pm8001_tag_free(pm8001_ha, tag); 1722 } 1723 } 1724 /* Deregister all the device ids */ 1725 for (i = 0; i < PM8001_MAX_DEVICES; i++) { 1726 pm8001_dev = &pm8001_ha->devices[i]; 1727 device_id = pm8001_dev->device_id; 1728 if (device_id) { 1729 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); 1730 pm8001_free_dev(pm8001_dev); 1731 } 1732 } 1733 } break; 1734 } 1735 kfree(pw); 1736 } 1737 1738 int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, 1739 int handler) 1740 { 1741 struct pm8001_work *pw; 1742 int ret = 0; 1743 1744 pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); 1745 if (pw) { 1746 pw->pm8001_ha = pm8001_ha; 1747 pw->data = data; 1748 pw->handler = handler; 1749 INIT_WORK(&pw->work, pm8001_work_fn); 1750 queue_work(pm8001_wq, &pw->work); 1751 } else 1752 ret = -ENOMEM; 1753 1754 return ret; 1755 } 1756 1757 static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, 1758 struct pm8001_device *pm8001_ha_dev) 1759 { 1760 int res; 1761 u32 ccb_tag; 1762 struct pm8001_ccb_info *ccb; 1763 struct sas_task *task = NULL; 1764 struct task_abort_req task_abort; 1765 struct inbound_queue_table *circularQ; 1766 u32 opc = OPC_INB_SATA_ABORT; 1767 int ret; 1768 1769 if (!pm8001_ha_dev) { 1770 pm8001_dbg(pm8001_ha, FAIL, "dev is null\n"); 1771 return; 1772 } 1773 1774 task = sas_alloc_slow_task(GFP_ATOMIC); 1775 1776 if (!task) { 1777 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); 1778 return; 1779 } 1780 1781 task->task_done = pm8001_task_done; 1782 1783 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 1784 if (res) 1785 return; 1786 1787 ccb = &pm8001_ha->ccb_info[ccb_tag]; 1788 ccb->device = pm8001_ha_dev; 1789 ccb->ccb_tag = ccb_tag; 1790 ccb->task = task; 1791 1792 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1793 1794 memset(&task_abort, 0, sizeof(task_abort)); 1795 task_abort.abort_all = cpu_to_le32(1); 1796 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 1797 task_abort.tag = cpu_to_le32(ccb_tag); 1798 1799 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 1800 sizeof(task_abort), 0); 1801 if (ret) 1802 pm8001_tag_free(pm8001_ha, ccb_tag); 1803 1804 } 1805 1806 static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, 1807 struct pm8001_device *pm8001_ha_dev) 1808 { 1809 struct sata_start_req sata_cmd; 1810 int res; 1811 u32 ccb_tag; 1812 struct pm8001_ccb_info *ccb; 1813 struct sas_task *task = NULL; 1814 struct host_to_dev_fis fis; 1815 struct domain_device *dev; 1816 struct inbound_queue_table *circularQ; 1817 u32 opc = OPC_INB_SATA_HOST_OPSTART; 1818 1819 task = sas_alloc_slow_task(GFP_ATOMIC); 1820 1821 if (!task) { 1822 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); 1823 return; 1824 } 1825 task->task_done = pm8001_task_done; 1826 1827 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 1828 if (res) { 1829 sas_free_task(task); 1830 pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n"); 1831 return; 1832 } 1833 1834 /* allocate domain device by ourselves as libsas 1835 * is not going to provide any 1836 */ 1837 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); 1838 if (!dev) { 1839 sas_free_task(task); 1840 pm8001_tag_free(pm8001_ha, ccb_tag); 1841 pm8001_dbg(pm8001_ha, FAIL, 1842 "Domain device cannot be allocated\n"); 1843 return; 1844 } 1845 task->dev = dev; 1846 task->dev->lldd_dev = pm8001_ha_dev; 1847 1848 ccb = &pm8001_ha->ccb_info[ccb_tag]; 1849 ccb->device = pm8001_ha_dev; 1850 ccb->ccb_tag = ccb_tag; 1851 ccb->task = task; 1852 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; 1853 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; 1854 1855 memset(&sata_cmd, 0, sizeof(sata_cmd)); 1856 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 1857 1858 /* construct read log FIS */ 1859 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1860 fis.fis_type = 0x27; 1861 fis.flags = 0x80; 1862 fis.command = ATA_CMD_READ_LOG_EXT; 1863 fis.lbal = 0x10; 1864 fis.sector_count = 0x1; 1865 1866 sata_cmd.tag = cpu_to_le32(ccb_tag); 1867 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 1868 sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); 1869 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); 1870 1871 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 1872 sizeof(sata_cmd), 0); 1873 if (res) { 1874 sas_free_task(task); 1875 pm8001_tag_free(pm8001_ha, ccb_tag); 1876 kfree(dev); 1877 } 1878 } 1879 1880 /** 1881 * mpi_ssp_completion- process the event that FW response to the SSP request. 1882 * @pm8001_ha: our hba card information 1883 * @piomb: the message contents of this outbound message. 1884 * 1885 * When FW has completed a ssp request for example a IO request, after it has 1886 * filled the SG data with the data, it will trigger this event representing 1887 * that he has finished the job; please check the corresponding buffer. 1888 * So we will tell the caller who maybe waiting the result to tell upper layer 1889 * that the task has been finished. 1890 */ 1891 static void 1892 mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 1893 { 1894 struct sas_task *t; 1895 struct pm8001_ccb_info *ccb; 1896 unsigned long flags; 1897 u32 status; 1898 u32 param; 1899 u32 tag; 1900 struct ssp_completion_resp *psspPayload; 1901 struct task_status_struct *ts; 1902 struct ssp_response_iu *iu; 1903 struct pm8001_device *pm8001_dev; 1904 psspPayload = (struct ssp_completion_resp *)(piomb + 4); 1905 status = le32_to_cpu(psspPayload->status); 1906 tag = le32_to_cpu(psspPayload->tag); 1907 ccb = &pm8001_ha->ccb_info[tag]; 1908 if ((status == IO_ABORTED) && ccb->open_retry) { 1909 /* Being completed by another */ 1910 ccb->open_retry = 0; 1911 return; 1912 } 1913 pm8001_dev = ccb->device; 1914 param = le32_to_cpu(psspPayload->param); 1915 1916 t = ccb->task; 1917 1918 if (status && status != IO_UNDERFLOW) 1919 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", status); 1920 if (unlikely(!t || !t->lldd_task || !t->dev)) 1921 return; 1922 ts = &t->task_status; 1923 /* Print sas address of IO failed device */ 1924 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 1925 (status != IO_UNDERFLOW)) 1926 pm8001_dbg(pm8001_ha, FAIL, "SAS Address of IO Failure Drive:%016llx\n", 1927 SAS_ADDR(t->dev->sas_addr)); 1928 1929 if (status) 1930 pm8001_dbg(pm8001_ha, IOERR, 1931 "status:0x%x, tag:0x%x, task:0x%p\n", 1932 status, tag, t); 1933 1934 switch (status) { 1935 case IO_SUCCESS: 1936 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS,param = %d\n", 1937 param); 1938 if (param == 0) { 1939 ts->resp = SAS_TASK_COMPLETE; 1940 ts->stat = SAS_SAM_STAT_GOOD; 1941 } else { 1942 ts->resp = SAS_TASK_COMPLETE; 1943 ts->stat = SAS_PROTO_RESPONSE; 1944 ts->residual = param; 1945 iu = &psspPayload->ssp_resp_iu; 1946 sas_ssp_task_response(pm8001_ha->dev, t, iu); 1947 } 1948 if (pm8001_dev) 1949 atomic_dec(&pm8001_dev->running_req); 1950 break; 1951 case IO_ABORTED: 1952 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); 1953 ts->resp = SAS_TASK_COMPLETE; 1954 ts->stat = SAS_ABORTED_TASK; 1955 break; 1956 case IO_UNDERFLOW: 1957 /* SSP Completion with error */ 1958 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW,param = %d\n", 1959 param); 1960 ts->resp = SAS_TASK_COMPLETE; 1961 ts->stat = SAS_DATA_UNDERRUN; 1962 ts->residual = param; 1963 if (pm8001_dev) 1964 atomic_dec(&pm8001_dev->running_req); 1965 break; 1966 case IO_NO_DEVICE: 1967 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 1968 ts->resp = SAS_TASK_UNDELIVERED; 1969 ts->stat = SAS_PHY_DOWN; 1970 break; 1971 case IO_XFER_ERROR_BREAK: 1972 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 1973 ts->resp = SAS_TASK_COMPLETE; 1974 ts->stat = SAS_OPEN_REJECT; 1975 /* Force the midlayer to retry */ 1976 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1977 break; 1978 case IO_XFER_ERROR_PHY_NOT_READY: 1979 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 1980 ts->resp = SAS_TASK_COMPLETE; 1981 ts->stat = SAS_OPEN_REJECT; 1982 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1983 break; 1984 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 1985 pm8001_dbg(pm8001_ha, IO, 1986 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 1987 ts->resp = SAS_TASK_COMPLETE; 1988 ts->stat = SAS_OPEN_REJECT; 1989 ts->open_rej_reason = SAS_OREJ_EPROTO; 1990 break; 1991 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 1992 pm8001_dbg(pm8001_ha, IO, 1993 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 1994 ts->resp = SAS_TASK_COMPLETE; 1995 ts->stat = SAS_OPEN_REJECT; 1996 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 1997 break; 1998 case IO_OPEN_CNX_ERROR_BREAK: 1999 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2000 ts->resp = SAS_TASK_COMPLETE; 2001 ts->stat = SAS_OPEN_REJECT; 2002 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2003 break; 2004 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2005 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2006 ts->resp = SAS_TASK_COMPLETE; 2007 ts->stat = SAS_OPEN_REJECT; 2008 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2009 if (!t->uldd_task) 2010 pm8001_handle_event(pm8001_ha, 2011 pm8001_dev, 2012 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2013 break; 2014 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2015 pm8001_dbg(pm8001_ha, IO, 2016 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2017 ts->resp = SAS_TASK_COMPLETE; 2018 ts->stat = SAS_OPEN_REJECT; 2019 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2020 break; 2021 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2022 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2023 ts->resp = SAS_TASK_COMPLETE; 2024 ts->stat = SAS_OPEN_REJECT; 2025 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2026 break; 2027 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2028 pm8001_dbg(pm8001_ha, IO, 2029 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2030 ts->resp = SAS_TASK_UNDELIVERED; 2031 ts->stat = SAS_OPEN_REJECT; 2032 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2033 break; 2034 case IO_XFER_ERROR_NAK_RECEIVED: 2035 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2036 ts->resp = SAS_TASK_COMPLETE; 2037 ts->stat = SAS_OPEN_REJECT; 2038 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2039 break; 2040 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2041 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2042 ts->resp = SAS_TASK_COMPLETE; 2043 ts->stat = SAS_NAK_R_ERR; 2044 break; 2045 case IO_XFER_ERROR_DMA: 2046 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); 2047 ts->resp = SAS_TASK_COMPLETE; 2048 ts->stat = SAS_OPEN_REJECT; 2049 break; 2050 case IO_XFER_OPEN_RETRY_TIMEOUT: 2051 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2052 ts->resp = SAS_TASK_COMPLETE; 2053 ts->stat = SAS_OPEN_REJECT; 2054 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2055 break; 2056 case IO_XFER_ERROR_OFFSET_MISMATCH: 2057 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2058 ts->resp = SAS_TASK_COMPLETE; 2059 ts->stat = SAS_OPEN_REJECT; 2060 break; 2061 case IO_PORT_IN_RESET: 2062 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 2063 ts->resp = SAS_TASK_COMPLETE; 2064 ts->stat = SAS_OPEN_REJECT; 2065 break; 2066 case IO_DS_NON_OPERATIONAL: 2067 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 2068 ts->resp = SAS_TASK_COMPLETE; 2069 ts->stat = SAS_OPEN_REJECT; 2070 if (!t->uldd_task) 2071 pm8001_handle_event(pm8001_ha, 2072 pm8001_dev, 2073 IO_DS_NON_OPERATIONAL); 2074 break; 2075 case IO_DS_IN_RECOVERY: 2076 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); 2077 ts->resp = SAS_TASK_COMPLETE; 2078 ts->stat = SAS_OPEN_REJECT; 2079 break; 2080 case IO_TM_TAG_NOT_FOUND: 2081 pm8001_dbg(pm8001_ha, IO, "IO_TM_TAG_NOT_FOUND\n"); 2082 ts->resp = SAS_TASK_COMPLETE; 2083 ts->stat = SAS_OPEN_REJECT; 2084 break; 2085 case IO_SSP_EXT_IU_ZERO_LEN_ERROR: 2086 pm8001_dbg(pm8001_ha, IO, "IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"); 2087 ts->resp = SAS_TASK_COMPLETE; 2088 ts->stat = SAS_OPEN_REJECT; 2089 break; 2090 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 2091 pm8001_dbg(pm8001_ha, IO, 2092 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 2093 ts->resp = SAS_TASK_COMPLETE; 2094 ts->stat = SAS_OPEN_REJECT; 2095 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2096 break; 2097 default: 2098 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); 2099 /* not allowed case. Therefore, return failed status */ 2100 ts->resp = SAS_TASK_COMPLETE; 2101 ts->stat = SAS_OPEN_REJECT; 2102 break; 2103 } 2104 pm8001_dbg(pm8001_ha, IO, "scsi_status = %x\n", 2105 psspPayload->ssp_resp_iu.status); 2106 spin_lock_irqsave(&t->task_state_lock, flags); 2107 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2108 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2109 t->task_state_flags |= SAS_TASK_STATE_DONE; 2110 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2111 spin_unlock_irqrestore(&t->task_state_lock, flags); 2112 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2113 t, status, ts->resp, ts->stat); 2114 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2115 } else { 2116 spin_unlock_irqrestore(&t->task_state_lock, flags); 2117 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2118 mb();/* in order to force CPU ordering */ 2119 t->task_done(t); 2120 } 2121 } 2122 2123 /*See the comments for mpi_ssp_completion */ 2124 static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 2125 { 2126 struct sas_task *t; 2127 unsigned long flags; 2128 struct task_status_struct *ts; 2129 struct pm8001_ccb_info *ccb; 2130 struct pm8001_device *pm8001_dev; 2131 struct ssp_event_resp *psspPayload = 2132 (struct ssp_event_resp *)(piomb + 4); 2133 u32 event = le32_to_cpu(psspPayload->event); 2134 u32 tag = le32_to_cpu(psspPayload->tag); 2135 u32 port_id = le32_to_cpu(psspPayload->port_id); 2136 u32 dev_id = le32_to_cpu(psspPayload->device_id); 2137 2138 ccb = &pm8001_ha->ccb_info[tag]; 2139 t = ccb->task; 2140 pm8001_dev = ccb->device; 2141 if (event) 2142 pm8001_dbg(pm8001_ha, FAIL, "sas IO status 0x%x\n", event); 2143 if (unlikely(!t || !t->lldd_task || !t->dev)) 2144 return; 2145 ts = &t->task_status; 2146 pm8001_dbg(pm8001_ha, DEVIO, "port_id = %x,device_id = %x\n", 2147 port_id, dev_id); 2148 switch (event) { 2149 case IO_OVERFLOW: 2150 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 2151 ts->resp = SAS_TASK_COMPLETE; 2152 ts->stat = SAS_DATA_OVERRUN; 2153 ts->residual = 0; 2154 if (pm8001_dev) 2155 atomic_dec(&pm8001_dev->running_req); 2156 break; 2157 case IO_XFER_ERROR_BREAK: 2158 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2159 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); 2160 return; 2161 case IO_XFER_ERROR_PHY_NOT_READY: 2162 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2163 ts->resp = SAS_TASK_COMPLETE; 2164 ts->stat = SAS_OPEN_REJECT; 2165 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2166 break; 2167 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2168 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2169 ts->resp = SAS_TASK_COMPLETE; 2170 ts->stat = SAS_OPEN_REJECT; 2171 ts->open_rej_reason = SAS_OREJ_EPROTO; 2172 break; 2173 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2174 pm8001_dbg(pm8001_ha, IO, 2175 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2176 ts->resp = SAS_TASK_COMPLETE; 2177 ts->stat = SAS_OPEN_REJECT; 2178 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2179 break; 2180 case IO_OPEN_CNX_ERROR_BREAK: 2181 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2182 ts->resp = SAS_TASK_COMPLETE; 2183 ts->stat = SAS_OPEN_REJECT; 2184 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2185 break; 2186 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2187 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2188 ts->resp = SAS_TASK_COMPLETE; 2189 ts->stat = SAS_OPEN_REJECT; 2190 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2191 if (!t->uldd_task) 2192 pm8001_handle_event(pm8001_ha, 2193 pm8001_dev, 2194 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2195 break; 2196 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2197 pm8001_dbg(pm8001_ha, IO, 2198 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2199 ts->resp = SAS_TASK_COMPLETE; 2200 ts->stat = SAS_OPEN_REJECT; 2201 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2202 break; 2203 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2204 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2205 ts->resp = SAS_TASK_COMPLETE; 2206 ts->stat = SAS_OPEN_REJECT; 2207 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2208 break; 2209 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2210 pm8001_dbg(pm8001_ha, IO, 2211 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2212 ts->resp = SAS_TASK_COMPLETE; 2213 ts->stat = SAS_OPEN_REJECT; 2214 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2215 break; 2216 case IO_XFER_ERROR_NAK_RECEIVED: 2217 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2218 ts->resp = SAS_TASK_COMPLETE; 2219 ts->stat = SAS_OPEN_REJECT; 2220 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2221 break; 2222 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2223 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2224 ts->resp = SAS_TASK_COMPLETE; 2225 ts->stat = SAS_NAK_R_ERR; 2226 break; 2227 case IO_XFER_OPEN_RETRY_TIMEOUT: 2228 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2229 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); 2230 return; 2231 case IO_XFER_ERROR_UNEXPECTED_PHASE: 2232 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); 2233 ts->resp = SAS_TASK_COMPLETE; 2234 ts->stat = SAS_DATA_OVERRUN; 2235 break; 2236 case IO_XFER_ERROR_XFER_RDY_OVERRUN: 2237 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); 2238 ts->resp = SAS_TASK_COMPLETE; 2239 ts->stat = SAS_DATA_OVERRUN; 2240 break; 2241 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: 2242 pm8001_dbg(pm8001_ha, IO, 2243 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); 2244 ts->resp = SAS_TASK_COMPLETE; 2245 ts->stat = SAS_DATA_OVERRUN; 2246 break; 2247 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 2248 pm8001_dbg(pm8001_ha, IO, 2249 "IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"); 2250 ts->resp = SAS_TASK_COMPLETE; 2251 ts->stat = SAS_DATA_OVERRUN; 2252 break; 2253 case IO_XFER_ERROR_OFFSET_MISMATCH: 2254 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2255 ts->resp = SAS_TASK_COMPLETE; 2256 ts->stat = SAS_DATA_OVERRUN; 2257 break; 2258 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: 2259 pm8001_dbg(pm8001_ha, IO, 2260 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); 2261 ts->resp = SAS_TASK_COMPLETE; 2262 ts->stat = SAS_DATA_OVERRUN; 2263 break; 2264 case IO_XFER_CMD_FRAME_ISSUED: 2265 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); 2266 return; 2267 default: 2268 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); 2269 /* not allowed case. Therefore, return failed status */ 2270 ts->resp = SAS_TASK_COMPLETE; 2271 ts->stat = SAS_DATA_OVERRUN; 2272 break; 2273 } 2274 spin_lock_irqsave(&t->task_state_lock, flags); 2275 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2276 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2277 t->task_state_flags |= SAS_TASK_STATE_DONE; 2278 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2279 spin_unlock_irqrestore(&t->task_state_lock, flags); 2280 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2281 t, event, ts->resp, ts->stat); 2282 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2283 } else { 2284 spin_unlock_irqrestore(&t->task_state_lock, flags); 2285 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2286 mb();/* in order to force CPU ordering */ 2287 t->task_done(t); 2288 } 2289 } 2290 2291 /*See the comments for mpi_ssp_completion */ 2292 static void 2293 mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 2294 { 2295 struct sas_task *t; 2296 struct pm8001_ccb_info *ccb; 2297 u32 param; 2298 u32 status; 2299 u32 tag; 2300 int i, j; 2301 u8 sata_addr_low[4]; 2302 u32 temp_sata_addr_low; 2303 u8 sata_addr_hi[4]; 2304 u32 temp_sata_addr_hi; 2305 struct sata_completion_resp *psataPayload; 2306 struct task_status_struct *ts; 2307 struct ata_task_resp *resp ; 2308 u32 *sata_resp; 2309 struct pm8001_device *pm8001_dev; 2310 unsigned long flags; 2311 2312 psataPayload = (struct sata_completion_resp *)(piomb + 4); 2313 status = le32_to_cpu(psataPayload->status); 2314 param = le32_to_cpu(psataPayload->param); 2315 tag = le32_to_cpu(psataPayload->tag); 2316 2317 if (!tag) { 2318 pm8001_dbg(pm8001_ha, FAIL, "tag null\n"); 2319 return; 2320 } 2321 2322 ccb = &pm8001_ha->ccb_info[tag]; 2323 t = ccb->task; 2324 pm8001_dev = ccb->device; 2325 2326 if (t) { 2327 if (t->dev && (t->dev->lldd_dev)) 2328 pm8001_dev = t->dev->lldd_dev; 2329 } else { 2330 pm8001_dbg(pm8001_ha, FAIL, "task null\n"); 2331 return; 2332 } 2333 2334 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) 2335 && unlikely(!t || !t->lldd_task || !t->dev)) { 2336 pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); 2337 return; 2338 } 2339 2340 ts = &t->task_status; 2341 2342 if (status) 2343 pm8001_dbg(pm8001_ha, IOERR, 2344 "status:0x%x, tag:0x%x, task::0x%p\n", 2345 status, tag, t); 2346 2347 /* Print sas address of IO failed device */ 2348 if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && 2349 (status != IO_UNDERFLOW)) { 2350 if (!((t->dev->parent) && 2351 (dev_is_expander(t->dev->parent->dev_type)))) { 2352 for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++) 2353 sata_addr_low[i] = pm8001_ha->sas_addr[j]; 2354 for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++) 2355 sata_addr_hi[i] = pm8001_ha->sas_addr[j]; 2356 memcpy(&temp_sata_addr_low, sata_addr_low, 2357 sizeof(sata_addr_low)); 2358 memcpy(&temp_sata_addr_hi, sata_addr_hi, 2359 sizeof(sata_addr_hi)); 2360 temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) 2361 |((temp_sata_addr_hi << 8) & 2362 0xff0000) | 2363 ((temp_sata_addr_hi >> 8) 2364 & 0xff00) | 2365 ((temp_sata_addr_hi << 24) & 2366 0xff000000)); 2367 temp_sata_addr_low = ((((temp_sata_addr_low >> 24) 2368 & 0xff) | 2369 ((temp_sata_addr_low << 8) 2370 & 0xff0000) | 2371 ((temp_sata_addr_low >> 8) 2372 & 0xff00) | 2373 ((temp_sata_addr_low << 24) 2374 & 0xff000000)) + 2375 pm8001_dev->attached_phy + 2376 0x10); 2377 pm8001_dbg(pm8001_ha, FAIL, 2378 "SAS Address of IO Failure Drive:%08x%08x\n", 2379 temp_sata_addr_hi, 2380 temp_sata_addr_low); 2381 } else { 2382 pm8001_dbg(pm8001_ha, FAIL, 2383 "SAS Address of IO Failure Drive:%016llx\n", 2384 SAS_ADDR(t->dev->sas_addr)); 2385 } 2386 } 2387 switch (status) { 2388 case IO_SUCCESS: 2389 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); 2390 if (param == 0) { 2391 ts->resp = SAS_TASK_COMPLETE; 2392 ts->stat = SAS_SAM_STAT_GOOD; 2393 /* check if response is for SEND READ LOG */ 2394 if (pm8001_dev && 2395 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { 2396 /* set new bit for abort_all */ 2397 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; 2398 /* clear bit for read log */ 2399 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; 2400 pm8001_send_abort_all(pm8001_ha, pm8001_dev); 2401 /* Free the tag */ 2402 pm8001_tag_free(pm8001_ha, tag); 2403 sas_free_task(t); 2404 return; 2405 } 2406 } else { 2407 u8 len; 2408 ts->resp = SAS_TASK_COMPLETE; 2409 ts->stat = SAS_PROTO_RESPONSE; 2410 ts->residual = param; 2411 pm8001_dbg(pm8001_ha, IO, 2412 "SAS_PROTO_RESPONSE len = %d\n", 2413 param); 2414 sata_resp = &psataPayload->sata_resp[0]; 2415 resp = (struct ata_task_resp *)ts->buf; 2416 if (t->ata_task.dma_xfer == 0 && 2417 t->data_dir == DMA_FROM_DEVICE) { 2418 len = sizeof(struct pio_setup_fis); 2419 pm8001_dbg(pm8001_ha, IO, 2420 "PIO read len = %d\n", len); 2421 } else if (t->ata_task.use_ncq) { 2422 len = sizeof(struct set_dev_bits_fis); 2423 pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", 2424 len); 2425 } else { 2426 len = sizeof(struct dev_to_host_fis); 2427 pm8001_dbg(pm8001_ha, IO, "other len = %d\n", 2428 len); 2429 } 2430 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { 2431 resp->frame_len = len; 2432 memcpy(&resp->ending_fis[0], sata_resp, len); 2433 ts->buf_valid_size = sizeof(*resp); 2434 } else 2435 pm8001_dbg(pm8001_ha, IO, 2436 "response too large\n"); 2437 } 2438 if (pm8001_dev) 2439 atomic_dec(&pm8001_dev->running_req); 2440 break; 2441 case IO_ABORTED: 2442 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB Tag\n"); 2443 ts->resp = SAS_TASK_COMPLETE; 2444 ts->stat = SAS_ABORTED_TASK; 2445 if (pm8001_dev) 2446 atomic_dec(&pm8001_dev->running_req); 2447 break; 2448 /* following cases are to do cases */ 2449 case IO_UNDERFLOW: 2450 /* SATA Completion with error */ 2451 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW param = %d\n", param); 2452 ts->resp = SAS_TASK_COMPLETE; 2453 ts->stat = SAS_DATA_UNDERRUN; 2454 ts->residual = param; 2455 if (pm8001_dev) 2456 atomic_dec(&pm8001_dev->running_req); 2457 break; 2458 case IO_NO_DEVICE: 2459 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 2460 ts->resp = SAS_TASK_UNDELIVERED; 2461 ts->stat = SAS_PHY_DOWN; 2462 if (pm8001_dev) 2463 atomic_dec(&pm8001_dev->running_req); 2464 break; 2465 case IO_XFER_ERROR_BREAK: 2466 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2467 ts->resp = SAS_TASK_COMPLETE; 2468 ts->stat = SAS_INTERRUPTED; 2469 if (pm8001_dev) 2470 atomic_dec(&pm8001_dev->running_req); 2471 break; 2472 case IO_XFER_ERROR_PHY_NOT_READY: 2473 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2474 ts->resp = SAS_TASK_COMPLETE; 2475 ts->stat = SAS_OPEN_REJECT; 2476 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2477 if (pm8001_dev) 2478 atomic_dec(&pm8001_dev->running_req); 2479 break; 2480 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2481 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2482 ts->resp = SAS_TASK_COMPLETE; 2483 ts->stat = SAS_OPEN_REJECT; 2484 ts->open_rej_reason = SAS_OREJ_EPROTO; 2485 if (pm8001_dev) 2486 atomic_dec(&pm8001_dev->running_req); 2487 break; 2488 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2489 pm8001_dbg(pm8001_ha, IO, 2490 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2491 ts->resp = SAS_TASK_COMPLETE; 2492 ts->stat = SAS_OPEN_REJECT; 2493 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2494 if (pm8001_dev) 2495 atomic_dec(&pm8001_dev->running_req); 2496 break; 2497 case IO_OPEN_CNX_ERROR_BREAK: 2498 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2499 ts->resp = SAS_TASK_COMPLETE; 2500 ts->stat = SAS_OPEN_REJECT; 2501 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 2502 if (pm8001_dev) 2503 atomic_dec(&pm8001_dev->running_req); 2504 break; 2505 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2506 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2507 ts->resp = SAS_TASK_COMPLETE; 2508 ts->stat = SAS_DEV_NO_RESPONSE; 2509 if (!t->uldd_task) { 2510 pm8001_handle_event(pm8001_ha, 2511 pm8001_dev, 2512 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2513 ts->resp = SAS_TASK_UNDELIVERED; 2514 ts->stat = SAS_QUEUE_FULL; 2515 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2516 return; 2517 } 2518 break; 2519 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2520 pm8001_dbg(pm8001_ha, IO, 2521 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2522 ts->resp = SAS_TASK_UNDELIVERED; 2523 ts->stat = SAS_OPEN_REJECT; 2524 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2525 if (!t->uldd_task) { 2526 pm8001_handle_event(pm8001_ha, 2527 pm8001_dev, 2528 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2529 ts->resp = SAS_TASK_UNDELIVERED; 2530 ts->stat = SAS_QUEUE_FULL; 2531 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2532 return; 2533 } 2534 break; 2535 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2536 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2537 ts->resp = SAS_TASK_COMPLETE; 2538 ts->stat = SAS_OPEN_REJECT; 2539 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2540 if (pm8001_dev) 2541 atomic_dec(&pm8001_dev->running_req); 2542 break; 2543 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 2544 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"); 2545 ts->resp = SAS_TASK_COMPLETE; 2546 ts->stat = SAS_DEV_NO_RESPONSE; 2547 if (!t->uldd_task) { 2548 pm8001_handle_event(pm8001_ha, 2549 pm8001_dev, 2550 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); 2551 ts->resp = SAS_TASK_UNDELIVERED; 2552 ts->stat = SAS_QUEUE_FULL; 2553 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2554 return; 2555 } 2556 break; 2557 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2558 pm8001_dbg(pm8001_ha, IO, 2559 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2560 ts->resp = SAS_TASK_COMPLETE; 2561 ts->stat = SAS_OPEN_REJECT; 2562 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2563 if (pm8001_dev) 2564 atomic_dec(&pm8001_dev->running_req); 2565 break; 2566 case IO_XFER_ERROR_NAK_RECEIVED: 2567 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2568 ts->resp = SAS_TASK_COMPLETE; 2569 ts->stat = SAS_NAK_R_ERR; 2570 if (pm8001_dev) 2571 atomic_dec(&pm8001_dev->running_req); 2572 break; 2573 case IO_XFER_ERROR_ACK_NAK_TIMEOUT: 2574 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"); 2575 ts->resp = SAS_TASK_COMPLETE; 2576 ts->stat = SAS_NAK_R_ERR; 2577 if (pm8001_dev) 2578 atomic_dec(&pm8001_dev->running_req); 2579 break; 2580 case IO_XFER_ERROR_DMA: 2581 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_DMA\n"); 2582 ts->resp = SAS_TASK_COMPLETE; 2583 ts->stat = SAS_ABORTED_TASK; 2584 if (pm8001_dev) 2585 atomic_dec(&pm8001_dev->running_req); 2586 break; 2587 case IO_XFER_ERROR_SATA_LINK_TIMEOUT: 2588 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"); 2589 ts->resp = SAS_TASK_UNDELIVERED; 2590 ts->stat = SAS_DEV_NO_RESPONSE; 2591 if (pm8001_dev) 2592 atomic_dec(&pm8001_dev->running_req); 2593 break; 2594 case IO_XFER_ERROR_REJECTED_NCQ_MODE: 2595 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); 2596 ts->resp = SAS_TASK_COMPLETE; 2597 ts->stat = SAS_DATA_UNDERRUN; 2598 if (pm8001_dev) 2599 atomic_dec(&pm8001_dev->running_req); 2600 break; 2601 case IO_XFER_OPEN_RETRY_TIMEOUT: 2602 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2603 ts->resp = SAS_TASK_COMPLETE; 2604 ts->stat = SAS_OPEN_TO; 2605 if (pm8001_dev) 2606 atomic_dec(&pm8001_dev->running_req); 2607 break; 2608 case IO_PORT_IN_RESET: 2609 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 2610 ts->resp = SAS_TASK_COMPLETE; 2611 ts->stat = SAS_DEV_NO_RESPONSE; 2612 if (pm8001_dev) 2613 atomic_dec(&pm8001_dev->running_req); 2614 break; 2615 case IO_DS_NON_OPERATIONAL: 2616 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 2617 ts->resp = SAS_TASK_COMPLETE; 2618 ts->stat = SAS_DEV_NO_RESPONSE; 2619 if (!t->uldd_task) { 2620 pm8001_handle_event(pm8001_ha, pm8001_dev, 2621 IO_DS_NON_OPERATIONAL); 2622 ts->resp = SAS_TASK_UNDELIVERED; 2623 ts->stat = SAS_QUEUE_FULL; 2624 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2625 return; 2626 } 2627 break; 2628 case IO_DS_IN_RECOVERY: 2629 pm8001_dbg(pm8001_ha, IO, " IO_DS_IN_RECOVERY\n"); 2630 ts->resp = SAS_TASK_COMPLETE; 2631 ts->stat = SAS_DEV_NO_RESPONSE; 2632 if (pm8001_dev) 2633 atomic_dec(&pm8001_dev->running_req); 2634 break; 2635 case IO_DS_IN_ERROR: 2636 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_ERROR\n"); 2637 ts->resp = SAS_TASK_COMPLETE; 2638 ts->stat = SAS_DEV_NO_RESPONSE; 2639 if (!t->uldd_task) { 2640 pm8001_handle_event(pm8001_ha, pm8001_dev, 2641 IO_DS_IN_ERROR); 2642 ts->resp = SAS_TASK_UNDELIVERED; 2643 ts->stat = SAS_QUEUE_FULL; 2644 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2645 return; 2646 } 2647 break; 2648 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 2649 pm8001_dbg(pm8001_ha, IO, 2650 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 2651 ts->resp = SAS_TASK_COMPLETE; 2652 ts->stat = SAS_OPEN_REJECT; 2653 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2654 if (pm8001_dev) 2655 atomic_dec(&pm8001_dev->running_req); 2656 break; 2657 default: 2658 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); 2659 /* not allowed case. Therefore, return failed status */ 2660 ts->resp = SAS_TASK_COMPLETE; 2661 ts->stat = SAS_DEV_NO_RESPONSE; 2662 if (pm8001_dev) 2663 atomic_dec(&pm8001_dev->running_req); 2664 break; 2665 } 2666 spin_lock_irqsave(&t->task_state_lock, flags); 2667 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2668 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2669 t->task_state_flags |= SAS_TASK_STATE_DONE; 2670 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2671 spin_unlock_irqrestore(&t->task_state_lock, flags); 2672 pm8001_dbg(pm8001_ha, FAIL, 2673 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2674 t, status, ts->resp, ts->stat); 2675 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2676 } else { 2677 spin_unlock_irqrestore(&t->task_state_lock, flags); 2678 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2679 } 2680 } 2681 2682 /*See the comments for mpi_ssp_completion */ 2683 static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 2684 { 2685 struct sas_task *t; 2686 struct task_status_struct *ts; 2687 struct pm8001_ccb_info *ccb; 2688 struct pm8001_device *pm8001_dev; 2689 struct sata_event_resp *psataPayload = 2690 (struct sata_event_resp *)(piomb + 4); 2691 u32 event = le32_to_cpu(psataPayload->event); 2692 u32 tag = le32_to_cpu(psataPayload->tag); 2693 u32 port_id = le32_to_cpu(psataPayload->port_id); 2694 u32 dev_id = le32_to_cpu(psataPayload->device_id); 2695 unsigned long flags; 2696 2697 if (event) 2698 pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); 2699 2700 /* Check if this is NCQ error */ 2701 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { 2702 /* find device using device id */ 2703 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); 2704 /* send read log extension */ 2705 if (pm8001_dev) 2706 pm8001_send_read_log(pm8001_ha, pm8001_dev); 2707 return; 2708 } 2709 2710 ccb = &pm8001_ha->ccb_info[tag]; 2711 t = ccb->task; 2712 pm8001_dev = ccb->device; 2713 if (event) 2714 pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event); 2715 if (unlikely(!t || !t->lldd_task || !t->dev)) 2716 return; 2717 ts = &t->task_status; 2718 pm8001_dbg(pm8001_ha, DEVIO, 2719 "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", 2720 port_id, dev_id, tag, event); 2721 switch (event) { 2722 case IO_OVERFLOW: 2723 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 2724 ts->resp = SAS_TASK_COMPLETE; 2725 ts->stat = SAS_DATA_OVERRUN; 2726 ts->residual = 0; 2727 if (pm8001_dev) 2728 atomic_dec(&pm8001_dev->running_req); 2729 break; 2730 case IO_XFER_ERROR_BREAK: 2731 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2732 ts->resp = SAS_TASK_COMPLETE; 2733 ts->stat = SAS_INTERRUPTED; 2734 break; 2735 case IO_XFER_ERROR_PHY_NOT_READY: 2736 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2737 ts->resp = SAS_TASK_COMPLETE; 2738 ts->stat = SAS_OPEN_REJECT; 2739 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2740 break; 2741 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2742 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2743 ts->resp = SAS_TASK_COMPLETE; 2744 ts->stat = SAS_OPEN_REJECT; 2745 ts->open_rej_reason = SAS_OREJ_EPROTO; 2746 break; 2747 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2748 pm8001_dbg(pm8001_ha, IO, 2749 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2750 ts->resp = SAS_TASK_COMPLETE; 2751 ts->stat = SAS_OPEN_REJECT; 2752 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2753 break; 2754 case IO_OPEN_CNX_ERROR_BREAK: 2755 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2756 ts->resp = SAS_TASK_COMPLETE; 2757 ts->stat = SAS_OPEN_REJECT; 2758 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 2759 break; 2760 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2761 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2762 ts->resp = SAS_TASK_UNDELIVERED; 2763 ts->stat = SAS_DEV_NO_RESPONSE; 2764 if (!t->uldd_task) { 2765 pm8001_handle_event(pm8001_ha, 2766 pm8001_dev, 2767 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2768 ts->resp = SAS_TASK_COMPLETE; 2769 ts->stat = SAS_QUEUE_FULL; 2770 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2771 return; 2772 } 2773 break; 2774 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2775 pm8001_dbg(pm8001_ha, IO, 2776 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2777 ts->resp = SAS_TASK_UNDELIVERED; 2778 ts->stat = SAS_OPEN_REJECT; 2779 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2780 break; 2781 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2782 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2783 ts->resp = SAS_TASK_COMPLETE; 2784 ts->stat = SAS_OPEN_REJECT; 2785 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2786 break; 2787 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2788 pm8001_dbg(pm8001_ha, IO, 2789 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2790 ts->resp = SAS_TASK_COMPLETE; 2791 ts->stat = SAS_OPEN_REJECT; 2792 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2793 break; 2794 case IO_XFER_ERROR_NAK_RECEIVED: 2795 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_NAK_RECEIVED\n"); 2796 ts->resp = SAS_TASK_COMPLETE; 2797 ts->stat = SAS_NAK_R_ERR; 2798 break; 2799 case IO_XFER_ERROR_PEER_ABORTED: 2800 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PEER_ABORTED\n"); 2801 ts->resp = SAS_TASK_COMPLETE; 2802 ts->stat = SAS_NAK_R_ERR; 2803 break; 2804 case IO_XFER_ERROR_REJECTED_NCQ_MODE: 2805 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_REJECTED_NCQ_MODE\n"); 2806 ts->resp = SAS_TASK_COMPLETE; 2807 ts->stat = SAS_DATA_UNDERRUN; 2808 break; 2809 case IO_XFER_OPEN_RETRY_TIMEOUT: 2810 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 2811 ts->resp = SAS_TASK_COMPLETE; 2812 ts->stat = SAS_OPEN_TO; 2813 break; 2814 case IO_XFER_ERROR_UNEXPECTED_PHASE: 2815 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_UNEXPECTED_PHASE\n"); 2816 ts->resp = SAS_TASK_COMPLETE; 2817 ts->stat = SAS_OPEN_TO; 2818 break; 2819 case IO_XFER_ERROR_XFER_RDY_OVERRUN: 2820 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_XFER_RDY_OVERRUN\n"); 2821 ts->resp = SAS_TASK_COMPLETE; 2822 ts->stat = SAS_OPEN_TO; 2823 break; 2824 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: 2825 pm8001_dbg(pm8001_ha, IO, 2826 "IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"); 2827 ts->resp = SAS_TASK_COMPLETE; 2828 ts->stat = SAS_OPEN_TO; 2829 break; 2830 case IO_XFER_ERROR_OFFSET_MISMATCH: 2831 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_OFFSET_MISMATCH\n"); 2832 ts->resp = SAS_TASK_COMPLETE; 2833 ts->stat = SAS_OPEN_TO; 2834 break; 2835 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: 2836 pm8001_dbg(pm8001_ha, IO, 2837 "IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"); 2838 ts->resp = SAS_TASK_COMPLETE; 2839 ts->stat = SAS_OPEN_TO; 2840 break; 2841 case IO_XFER_CMD_FRAME_ISSUED: 2842 pm8001_dbg(pm8001_ha, IO, "IO_XFER_CMD_FRAME_ISSUED\n"); 2843 break; 2844 case IO_XFER_PIO_SETUP_ERROR: 2845 pm8001_dbg(pm8001_ha, IO, "IO_XFER_PIO_SETUP_ERROR\n"); 2846 ts->resp = SAS_TASK_COMPLETE; 2847 ts->stat = SAS_OPEN_TO; 2848 break; 2849 default: 2850 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", event); 2851 /* not allowed case. Therefore, return failed status */ 2852 ts->resp = SAS_TASK_COMPLETE; 2853 ts->stat = SAS_OPEN_TO; 2854 break; 2855 } 2856 spin_lock_irqsave(&t->task_state_lock, flags); 2857 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2858 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2859 t->task_state_flags |= SAS_TASK_STATE_DONE; 2860 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2861 spin_unlock_irqrestore(&t->task_state_lock, flags); 2862 pm8001_dbg(pm8001_ha, FAIL, 2863 "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 2864 t, event, ts->resp, ts->stat); 2865 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2866 } else { 2867 spin_unlock_irqrestore(&t->task_state_lock, flags); 2868 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2869 } 2870 } 2871 2872 /*See the comments for mpi_ssp_completion */ 2873 static void 2874 mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 2875 { 2876 struct sas_task *t; 2877 struct pm8001_ccb_info *ccb; 2878 unsigned long flags; 2879 u32 status; 2880 u32 tag; 2881 struct smp_completion_resp *psmpPayload; 2882 struct task_status_struct *ts; 2883 struct pm8001_device *pm8001_dev; 2884 2885 psmpPayload = (struct smp_completion_resp *)(piomb + 4); 2886 status = le32_to_cpu(psmpPayload->status); 2887 tag = le32_to_cpu(psmpPayload->tag); 2888 2889 ccb = &pm8001_ha->ccb_info[tag]; 2890 t = ccb->task; 2891 ts = &t->task_status; 2892 pm8001_dev = ccb->device; 2893 if (status) { 2894 pm8001_dbg(pm8001_ha, FAIL, "smp IO status 0x%x\n", status); 2895 pm8001_dbg(pm8001_ha, IOERR, 2896 "status:0x%x, tag:0x%x, task:0x%p\n", 2897 status, tag, t); 2898 } 2899 if (unlikely(!t || !t->lldd_task || !t->dev)) 2900 return; 2901 2902 switch (status) { 2903 case IO_SUCCESS: 2904 pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n"); 2905 ts->resp = SAS_TASK_COMPLETE; 2906 ts->stat = SAS_SAM_STAT_GOOD; 2907 if (pm8001_dev) 2908 atomic_dec(&pm8001_dev->running_req); 2909 break; 2910 case IO_ABORTED: 2911 pm8001_dbg(pm8001_ha, IO, "IO_ABORTED IOMB\n"); 2912 ts->resp = SAS_TASK_COMPLETE; 2913 ts->stat = SAS_ABORTED_TASK; 2914 if (pm8001_dev) 2915 atomic_dec(&pm8001_dev->running_req); 2916 break; 2917 case IO_OVERFLOW: 2918 pm8001_dbg(pm8001_ha, IO, "IO_UNDERFLOW\n"); 2919 ts->resp = SAS_TASK_COMPLETE; 2920 ts->stat = SAS_DATA_OVERRUN; 2921 ts->residual = 0; 2922 if (pm8001_dev) 2923 atomic_dec(&pm8001_dev->running_req); 2924 break; 2925 case IO_NO_DEVICE: 2926 pm8001_dbg(pm8001_ha, IO, "IO_NO_DEVICE\n"); 2927 ts->resp = SAS_TASK_COMPLETE; 2928 ts->stat = SAS_PHY_DOWN; 2929 break; 2930 case IO_ERROR_HW_TIMEOUT: 2931 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n"); 2932 ts->resp = SAS_TASK_COMPLETE; 2933 ts->stat = SAS_SAM_STAT_BUSY; 2934 break; 2935 case IO_XFER_ERROR_BREAK: 2936 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); 2937 ts->resp = SAS_TASK_COMPLETE; 2938 ts->stat = SAS_SAM_STAT_BUSY; 2939 break; 2940 case IO_XFER_ERROR_PHY_NOT_READY: 2941 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n"); 2942 ts->resp = SAS_TASK_COMPLETE; 2943 ts->stat = SAS_SAM_STAT_BUSY; 2944 break; 2945 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 2946 pm8001_dbg(pm8001_ha, IO, 2947 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"); 2948 ts->resp = SAS_TASK_COMPLETE; 2949 ts->stat = SAS_OPEN_REJECT; 2950 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2951 break; 2952 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: 2953 pm8001_dbg(pm8001_ha, IO, 2954 "IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"); 2955 ts->resp = SAS_TASK_COMPLETE; 2956 ts->stat = SAS_OPEN_REJECT; 2957 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2958 break; 2959 case IO_OPEN_CNX_ERROR_BREAK: 2960 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_BREAK\n"); 2961 ts->resp = SAS_TASK_COMPLETE; 2962 ts->stat = SAS_OPEN_REJECT; 2963 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; 2964 break; 2965 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2966 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"); 2967 ts->resp = SAS_TASK_COMPLETE; 2968 ts->stat = SAS_OPEN_REJECT; 2969 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 2970 pm8001_handle_event(pm8001_ha, 2971 pm8001_dev, 2972 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2973 break; 2974 case IO_OPEN_CNX_ERROR_BAD_DESTINATION: 2975 pm8001_dbg(pm8001_ha, IO, 2976 "IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"); 2977 ts->resp = SAS_TASK_COMPLETE; 2978 ts->stat = SAS_OPEN_REJECT; 2979 ts->open_rej_reason = SAS_OREJ_BAD_DEST; 2980 break; 2981 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2982 pm8001_dbg(pm8001_ha, IO, "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"); 2983 ts->resp = SAS_TASK_COMPLETE; 2984 ts->stat = SAS_OPEN_REJECT; 2985 ts->open_rej_reason = SAS_OREJ_CONN_RATE; 2986 break; 2987 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: 2988 pm8001_dbg(pm8001_ha, IO, 2989 "IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"); 2990 ts->resp = SAS_TASK_COMPLETE; 2991 ts->stat = SAS_OPEN_REJECT; 2992 ts->open_rej_reason = SAS_OREJ_WRONG_DEST; 2993 break; 2994 case IO_XFER_ERROR_RX_FRAME: 2995 pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_RX_FRAME\n"); 2996 ts->resp = SAS_TASK_COMPLETE; 2997 ts->stat = SAS_DEV_NO_RESPONSE; 2998 break; 2999 case IO_XFER_OPEN_RETRY_TIMEOUT: 3000 pm8001_dbg(pm8001_ha, IO, "IO_XFER_OPEN_RETRY_TIMEOUT\n"); 3001 ts->resp = SAS_TASK_COMPLETE; 3002 ts->stat = SAS_OPEN_REJECT; 3003 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3004 break; 3005 case IO_ERROR_INTERNAL_SMP_RESOURCE: 3006 pm8001_dbg(pm8001_ha, IO, "IO_ERROR_INTERNAL_SMP_RESOURCE\n"); 3007 ts->resp = SAS_TASK_COMPLETE; 3008 ts->stat = SAS_QUEUE_FULL; 3009 break; 3010 case IO_PORT_IN_RESET: 3011 pm8001_dbg(pm8001_ha, IO, "IO_PORT_IN_RESET\n"); 3012 ts->resp = SAS_TASK_COMPLETE; 3013 ts->stat = SAS_OPEN_REJECT; 3014 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3015 break; 3016 case IO_DS_NON_OPERATIONAL: 3017 pm8001_dbg(pm8001_ha, IO, "IO_DS_NON_OPERATIONAL\n"); 3018 ts->resp = SAS_TASK_COMPLETE; 3019 ts->stat = SAS_DEV_NO_RESPONSE; 3020 break; 3021 case IO_DS_IN_RECOVERY: 3022 pm8001_dbg(pm8001_ha, IO, "IO_DS_IN_RECOVERY\n"); 3023 ts->resp = SAS_TASK_COMPLETE; 3024 ts->stat = SAS_OPEN_REJECT; 3025 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3026 break; 3027 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: 3028 pm8001_dbg(pm8001_ha, IO, 3029 "IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"); 3030 ts->resp = SAS_TASK_COMPLETE; 3031 ts->stat = SAS_OPEN_REJECT; 3032 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 3033 break; 3034 default: 3035 pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status); 3036 ts->resp = SAS_TASK_COMPLETE; 3037 ts->stat = SAS_DEV_NO_RESPONSE; 3038 /* not allowed case. Therefore, return failed status */ 3039 break; 3040 } 3041 spin_lock_irqsave(&t->task_state_lock, flags); 3042 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 3043 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 3044 t->task_state_flags |= SAS_TASK_STATE_DONE; 3045 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 3046 spin_unlock_irqrestore(&t->task_state_lock, flags); 3047 pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", 3048 t, status, ts->resp, ts->stat); 3049 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3050 } else { 3051 spin_unlock_irqrestore(&t->task_state_lock, flags); 3052 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3053 mb();/* in order to force CPU ordering */ 3054 t->task_done(t); 3055 } 3056 } 3057 3058 void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, 3059 void *piomb) 3060 { 3061 struct set_dev_state_resp *pPayload = 3062 (struct set_dev_state_resp *)(piomb + 4); 3063 u32 tag = le32_to_cpu(pPayload->tag); 3064 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3065 struct pm8001_device *pm8001_dev = ccb->device; 3066 u32 status = le32_to_cpu(pPayload->status); 3067 u32 device_id = le32_to_cpu(pPayload->device_id); 3068 u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; 3069 u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; 3070 pm8001_dbg(pm8001_ha, MSG, "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n", 3071 device_id, pds, nds, status); 3072 complete(pm8001_dev->setds_completion); 3073 ccb->task = NULL; 3074 ccb->ccb_tag = 0xFFFFFFFF; 3075 pm8001_tag_free(pm8001_ha, tag); 3076 } 3077 3078 void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3079 { 3080 struct get_nvm_data_resp *pPayload = 3081 (struct get_nvm_data_resp *)(piomb + 4); 3082 u32 tag = le32_to_cpu(pPayload->tag); 3083 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3084 u32 dlen_status = le32_to_cpu(pPayload->dlen_status); 3085 complete(pm8001_ha->nvmd_completion); 3086 pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n"); 3087 if ((dlen_status & NVMD_STAT) != 0) { 3088 pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n", 3089 dlen_status); 3090 } 3091 ccb->task = NULL; 3092 ccb->ccb_tag = 0xFFFFFFFF; 3093 pm8001_tag_free(pm8001_ha, tag); 3094 } 3095 3096 void 3097 pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3098 { 3099 struct fw_control_ex *fw_control_context; 3100 struct get_nvm_data_resp *pPayload = 3101 (struct get_nvm_data_resp *)(piomb + 4); 3102 u32 tag = le32_to_cpu(pPayload->tag); 3103 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3104 u32 dlen_status = le32_to_cpu(pPayload->dlen_status); 3105 u32 ir_tds_bn_dps_das_nvm = 3106 le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); 3107 void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; 3108 fw_control_context = ccb->fw_control_context; 3109 3110 pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n"); 3111 if ((dlen_status & NVMD_STAT) != 0) { 3112 pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n", 3113 dlen_status); 3114 complete(pm8001_ha->nvmd_completion); 3115 /* We should free tag during failure also, the tag is not being 3116 * freed by requesting path anywhere. 3117 */ 3118 ccb->task = NULL; 3119 ccb->ccb_tag = 0xFFFFFFFF; 3120 pm8001_tag_free(pm8001_ha, tag); 3121 return; 3122 } 3123 if (ir_tds_bn_dps_das_nvm & IPMode) { 3124 /* indirect mode - IR bit set */ 3125 pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n"); 3126 if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { 3127 if (ir_tds_bn_dps_das_nvm == 0x80a80200) { 3128 memcpy(pm8001_ha->sas_addr, 3129 ((u8 *)virt_addr + 4), 3130 SAS_ADDR_SIZE); 3131 pm8001_dbg(pm8001_ha, MSG, "Get SAS address from VPD successfully!\n"); 3132 } 3133 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) 3134 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || 3135 ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { 3136 ; 3137 } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) 3138 || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { 3139 ; 3140 } else { 3141 /* Should not be happened*/ 3142 pm8001_dbg(pm8001_ha, MSG, 3143 "(IR=1)Wrong Device type 0x%x\n", 3144 ir_tds_bn_dps_das_nvm); 3145 } 3146 } else /* direct mode */{ 3147 pm8001_dbg(pm8001_ha, MSG, 3148 "Get NVMD success, IR=0, dataLen=%d\n", 3149 (dlen_status & NVMD_LEN) >> 24); 3150 } 3151 /* Though fw_control_context is freed below, usrAddr still needs 3152 * to be updated as this holds the response to the request function 3153 */ 3154 memcpy(fw_control_context->usrAddr, 3155 pm8001_ha->memoryMap.region[NVMD].virt_ptr, 3156 fw_control_context->len); 3157 kfree(ccb->fw_control_context); 3158 /* To avoid race condition, complete should be 3159 * called after the message is copied to 3160 * fw_control_context->usrAddr 3161 */ 3162 complete(pm8001_ha->nvmd_completion); 3163 pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n"); 3164 ccb->task = NULL; 3165 ccb->ccb_tag = 0xFFFFFFFF; 3166 pm8001_tag_free(pm8001_ha, tag); 3167 } 3168 3169 int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) 3170 { 3171 u32 tag; 3172 struct local_phy_ctl_resp *pPayload = 3173 (struct local_phy_ctl_resp *)(piomb + 4); 3174 u32 status = le32_to_cpu(pPayload->status); 3175 u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; 3176 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; 3177 tag = le32_to_cpu(pPayload->tag); 3178 if (status != 0) { 3179 pm8001_dbg(pm8001_ha, MSG, 3180 "%x phy execute %x phy op failed!\n", 3181 phy_id, phy_op); 3182 } else { 3183 pm8001_dbg(pm8001_ha, MSG, 3184 "%x phy execute %x phy op success!\n", 3185 phy_id, phy_op); 3186 pm8001_ha->phy[phy_id].reset_success = true; 3187 } 3188 if (pm8001_ha->phy[phy_id].enable_completion) { 3189 complete(pm8001_ha->phy[phy_id].enable_completion); 3190 pm8001_ha->phy[phy_id].enable_completion = NULL; 3191 } 3192 pm8001_tag_free(pm8001_ha, tag); 3193 return 0; 3194 } 3195 3196 /** 3197 * pm8001_bytes_dmaed - one of the interface function communication with libsas 3198 * @pm8001_ha: our hba card information 3199 * @i: which phy that received the event. 3200 * 3201 * when HBA driver received the identify done event or initiate FIS received 3202 * event(for SATA), it will invoke this function to notify the sas layer that 3203 * the sas toplogy has formed, please discover the the whole sas domain, 3204 * while receive a broadcast(change) primitive just tell the sas 3205 * layer to discover the changed domain rather than the whole domain. 3206 */ 3207 void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) 3208 { 3209 struct pm8001_phy *phy = &pm8001_ha->phy[i]; 3210 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3211 if (!phy->phy_attached) 3212 return; 3213 3214 if (sas_phy->phy) { 3215 struct sas_phy *sphy = sas_phy->phy; 3216 sphy->negotiated_linkrate = sas_phy->linkrate; 3217 sphy->minimum_linkrate = phy->minimum_linkrate; 3218 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 3219 sphy->maximum_linkrate = phy->maximum_linkrate; 3220 sphy->maximum_linkrate_hw = phy->maximum_linkrate; 3221 } 3222 3223 if (phy->phy_type & PORT_TYPE_SAS) { 3224 struct sas_identify_frame *id; 3225 id = (struct sas_identify_frame *)phy->frame_rcvd; 3226 id->dev_type = phy->identify.device_type; 3227 id->initiator_bits = SAS_PROTOCOL_ALL; 3228 id->target_bits = phy->identify.target_port_protocols; 3229 } else if (phy->phy_type & PORT_TYPE_SATA) { 3230 /*Nothing*/ 3231 } 3232 pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i); 3233 3234 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 3235 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC); 3236 } 3237 3238 /* Get the link rate speed */ 3239 void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) 3240 { 3241 struct sas_phy *sas_phy = phy->sas_phy.phy; 3242 3243 switch (link_rate) { 3244 case PHY_SPEED_120: 3245 phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS; 3246 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS; 3247 break; 3248 case PHY_SPEED_60: 3249 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; 3250 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; 3251 break; 3252 case PHY_SPEED_30: 3253 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; 3254 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; 3255 break; 3256 case PHY_SPEED_15: 3257 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; 3258 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; 3259 break; 3260 } 3261 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; 3262 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS; 3263 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 3264 sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; 3265 sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 3266 } 3267 3268 /** 3269 * pm8001_get_attached_sas_addr - extract/generate attached SAS address 3270 * @phy: pointer to asd_phy 3271 * @sas_addr: pointer to buffer where the SAS address is to be written 3272 * 3273 * This function extracts the SAS address from an IDENTIFY frame 3274 * received. If OOB is SATA, then a SAS address is generated from the 3275 * HA tables. 3276 * 3277 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame 3278 * buffer. 3279 */ 3280 void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, 3281 u8 *sas_addr) 3282 { 3283 if (phy->sas_phy.frame_rcvd[0] == 0x34 3284 && phy->sas_phy.oob_mode == SATA_OOB_MODE) { 3285 struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; 3286 /* FIS device-to-host */ 3287 u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); 3288 addr += phy->sas_phy.id; 3289 *(__be64 *)sas_addr = cpu_to_be64(addr); 3290 } else { 3291 struct sas_identify_frame *idframe = 3292 (void *) phy->sas_phy.frame_rcvd; 3293 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); 3294 } 3295 } 3296 3297 /** 3298 * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. 3299 * @pm8001_ha: our hba card information 3300 * @Qnum: the outbound queue message number. 3301 * @SEA: source of event to ack 3302 * @port_id: port id. 3303 * @phyId: phy id. 3304 * @param0: parameter 0. 3305 * @param1: parameter 1. 3306 */ 3307 static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, 3308 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) 3309 { 3310 struct hw_event_ack_req payload; 3311 u32 opc = OPC_INB_SAS_HW_EVENT_ACK; 3312 3313 struct inbound_queue_table *circularQ; 3314 3315 memset((u8 *)&payload, 0, sizeof(payload)); 3316 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; 3317 payload.tag = cpu_to_le32(1); 3318 payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | 3319 ((phyId & 0x0F) << 4) | (port_id & 0x0F)); 3320 payload.param0 = cpu_to_le32(param0); 3321 payload.param1 = cpu_to_le32(param1); 3322 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 3323 sizeof(payload), 0); 3324 } 3325 3326 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 3327 u32 phyId, u32 phy_op); 3328 3329 /** 3330 * hw_event_sas_phy_up -FW tells me a SAS phy up event. 3331 * @pm8001_ha: our hba card information 3332 * @piomb: IO message buffer 3333 */ 3334 static void 3335 hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) 3336 { 3337 struct hw_event_resp *pPayload = 3338 (struct hw_event_resp *)(piomb + 4); 3339 u32 lr_evt_status_phyid_portid = 3340 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 3341 u8 link_rate = 3342 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 3343 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); 3344 u8 phy_id = 3345 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3346 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3347 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3348 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3349 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3350 unsigned long flags; 3351 u8 deviceType = pPayload->sas_identify.dev_type; 3352 phy->port = port; 3353 port->port_id = port_id; 3354 port->port_state = portstate; 3355 phy->phy_state = PHY_STATE_LINK_UP_SPC; 3356 pm8001_dbg(pm8001_ha, MSG, 3357 "HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", 3358 port_id, phy_id); 3359 3360 switch (deviceType) { 3361 case SAS_PHY_UNUSED: 3362 pm8001_dbg(pm8001_ha, MSG, "device type no device.\n"); 3363 break; 3364 case SAS_END_DEVICE: 3365 pm8001_dbg(pm8001_ha, MSG, "end device.\n"); 3366 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 3367 PHY_NOTIFY_ENABLE_SPINUP); 3368 port->port_attached = 1; 3369 pm8001_get_lrate_mode(phy, link_rate); 3370 break; 3371 case SAS_EDGE_EXPANDER_DEVICE: 3372 pm8001_dbg(pm8001_ha, MSG, "expander device.\n"); 3373 port->port_attached = 1; 3374 pm8001_get_lrate_mode(phy, link_rate); 3375 break; 3376 case SAS_FANOUT_EXPANDER_DEVICE: 3377 pm8001_dbg(pm8001_ha, MSG, "fanout expander device.\n"); 3378 port->port_attached = 1; 3379 pm8001_get_lrate_mode(phy, link_rate); 3380 break; 3381 default: 3382 pm8001_dbg(pm8001_ha, DEVIO, "unknown device type(%x)\n", 3383 deviceType); 3384 break; 3385 } 3386 phy->phy_type |= PORT_TYPE_SAS; 3387 phy->identify.device_type = deviceType; 3388 phy->phy_attached = 1; 3389 if (phy->identify.device_type == SAS_END_DEVICE) 3390 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; 3391 else if (phy->identify.device_type != SAS_PHY_UNUSED) 3392 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; 3393 phy->sas_phy.oob_mode = SAS_OOB_MODE; 3394 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); 3395 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); 3396 memcpy(phy->frame_rcvd, &pPayload->sas_identify, 3397 sizeof(struct sas_identify_frame)-4); 3398 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; 3399 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3400 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3401 if (pm8001_ha->flags == PM8001F_RUN_TIME) 3402 mdelay(200);/*delay a moment to wait disk to spinup*/ 3403 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3404 } 3405 3406 /** 3407 * hw_event_sata_phy_up -FW tells me a SATA phy up event. 3408 * @pm8001_ha: our hba card information 3409 * @piomb: IO message buffer 3410 */ 3411 static void 3412 hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) 3413 { 3414 struct hw_event_resp *pPayload = 3415 (struct hw_event_resp *)(piomb + 4); 3416 u32 lr_evt_status_phyid_portid = 3417 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 3418 u8 link_rate = 3419 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 3420 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); 3421 u8 phy_id = 3422 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3423 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3424 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3425 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3426 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3427 unsigned long flags; 3428 pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n", 3429 port_id, phy_id); 3430 phy->port = port; 3431 port->port_id = port_id; 3432 port->port_state = portstate; 3433 phy->phy_state = PHY_STATE_LINK_UP_SPC; 3434 port->port_attached = 1; 3435 pm8001_get_lrate_mode(phy, link_rate); 3436 phy->phy_type |= PORT_TYPE_SATA; 3437 phy->phy_attached = 1; 3438 phy->sas_phy.oob_mode = SATA_OOB_MODE; 3439 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC); 3440 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); 3441 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), 3442 sizeof(struct dev_to_host_fis)); 3443 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3444 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3445 phy->identify.device_type = SAS_SATA_DEV; 3446 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3447 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3448 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3449 } 3450 3451 /** 3452 * hw_event_phy_down -we should notify the libsas the phy is down. 3453 * @pm8001_ha: our hba card information 3454 * @piomb: IO message buffer 3455 */ 3456 static void 3457 hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) 3458 { 3459 struct hw_event_resp *pPayload = 3460 (struct hw_event_resp *)(piomb + 4); 3461 u32 lr_evt_status_phyid_portid = 3462 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 3463 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); 3464 u8 phy_id = 3465 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3466 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3467 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3468 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3469 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3470 port->port_state = portstate; 3471 phy->phy_type = 0; 3472 phy->identify.device_type = 0; 3473 phy->phy_attached = 0; 3474 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); 3475 switch (portstate) { 3476 case PORT_VALID: 3477 break; 3478 case PORT_INVALID: 3479 pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n", 3480 port_id); 3481 pm8001_dbg(pm8001_ha, MSG, 3482 " Last phy Down and port invalid\n"); 3483 port->port_attached = 0; 3484 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3485 port_id, phy_id, 0, 0); 3486 break; 3487 case PORT_IN_RESET: 3488 pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n", 3489 port_id); 3490 break; 3491 case PORT_NOT_ESTABLISHED: 3492 pm8001_dbg(pm8001_ha, MSG, 3493 " phy Down and PORT_NOT_ESTABLISHED\n"); 3494 port->port_attached = 0; 3495 break; 3496 case PORT_LOSTCOMM: 3497 pm8001_dbg(pm8001_ha, MSG, " phy Down and PORT_LOSTCOMM\n"); 3498 pm8001_dbg(pm8001_ha, MSG, 3499 " Last phy Down and port invalid\n"); 3500 port->port_attached = 0; 3501 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3502 port_id, phy_id, 0, 0); 3503 break; 3504 default: 3505 port->port_attached = 0; 3506 pm8001_dbg(pm8001_ha, DEVIO, " phy Down and(default) = %x\n", 3507 portstate); 3508 break; 3509 3510 } 3511 } 3512 3513 /** 3514 * pm8001_mpi_reg_resp -process register device ID response. 3515 * @pm8001_ha: our hba card information 3516 * @piomb: IO message buffer 3517 * 3518 * when sas layer find a device it will notify LLDD, then the driver register 3519 * the domain device to FW, this event is the return device ID which the FW 3520 * has assigned, from now, inter-communication with FW is no longer using the 3521 * SAS address, use device ID which FW assigned. 3522 */ 3523 int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3524 { 3525 u32 status; 3526 u32 device_id; 3527 u32 htag; 3528 struct pm8001_ccb_info *ccb; 3529 struct pm8001_device *pm8001_dev; 3530 struct dev_reg_resp *registerRespPayload = 3531 (struct dev_reg_resp *)(piomb + 4); 3532 3533 htag = le32_to_cpu(registerRespPayload->tag); 3534 ccb = &pm8001_ha->ccb_info[htag]; 3535 pm8001_dev = ccb->device; 3536 status = le32_to_cpu(registerRespPayload->status); 3537 device_id = le32_to_cpu(registerRespPayload->device_id); 3538 pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n", 3539 status); 3540 switch (status) { 3541 case DEVREG_SUCCESS: 3542 pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n"); 3543 pm8001_dev->device_id = device_id; 3544 break; 3545 case DEVREG_FAILURE_OUT_OF_RESOURCE: 3546 pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_OUT_OF_RESOURCE\n"); 3547 break; 3548 case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: 3549 pm8001_dbg(pm8001_ha, MSG, 3550 "DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"); 3551 break; 3552 case DEVREG_FAILURE_INVALID_PHY_ID: 3553 pm8001_dbg(pm8001_ha, MSG, "DEVREG_FAILURE_INVALID_PHY_ID\n"); 3554 break; 3555 case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: 3556 pm8001_dbg(pm8001_ha, MSG, 3557 "DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"); 3558 break; 3559 case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: 3560 pm8001_dbg(pm8001_ha, MSG, 3561 "DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"); 3562 break; 3563 case DEVREG_FAILURE_PORT_NOT_VALID_STATE: 3564 pm8001_dbg(pm8001_ha, MSG, 3565 "DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"); 3566 break; 3567 case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: 3568 pm8001_dbg(pm8001_ha, MSG, 3569 "DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"); 3570 break; 3571 default: 3572 pm8001_dbg(pm8001_ha, MSG, 3573 "DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"); 3574 break; 3575 } 3576 complete(pm8001_dev->dcompletion); 3577 ccb->task = NULL; 3578 ccb->ccb_tag = 0xFFFFFFFF; 3579 pm8001_tag_free(pm8001_ha, htag); 3580 return 0; 3581 } 3582 3583 int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3584 { 3585 u32 status; 3586 u32 device_id; 3587 struct dev_reg_resp *registerRespPayload = 3588 (struct dev_reg_resp *)(piomb + 4); 3589 3590 status = le32_to_cpu(registerRespPayload->status); 3591 device_id = le32_to_cpu(registerRespPayload->device_id); 3592 if (status != 0) 3593 pm8001_dbg(pm8001_ha, MSG, 3594 " deregister device failed ,status = %x, device_id = %x\n", 3595 status, device_id); 3596 return 0; 3597 } 3598 3599 /** 3600 * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command. 3601 * @pm8001_ha: our hba card information 3602 * @piomb: IO message buffer 3603 */ 3604 int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, 3605 void *piomb) 3606 { 3607 u32 status; 3608 struct fw_flash_Update_resp *ppayload = 3609 (struct fw_flash_Update_resp *)(piomb + 4); 3610 u32 tag = le32_to_cpu(ppayload->tag); 3611 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3612 status = le32_to_cpu(ppayload->status); 3613 switch (status) { 3614 case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: 3615 pm8001_dbg(pm8001_ha, MSG, 3616 ": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"); 3617 break; 3618 case FLASH_UPDATE_IN_PROGRESS: 3619 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_IN_PROGRESS\n"); 3620 break; 3621 case FLASH_UPDATE_HDR_ERR: 3622 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HDR_ERR\n"); 3623 break; 3624 case FLASH_UPDATE_OFFSET_ERR: 3625 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_OFFSET_ERR\n"); 3626 break; 3627 case FLASH_UPDATE_CRC_ERR: 3628 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_CRC_ERR\n"); 3629 break; 3630 case FLASH_UPDATE_LENGTH_ERR: 3631 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_LENGTH_ERR\n"); 3632 break; 3633 case FLASH_UPDATE_HW_ERR: 3634 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_HW_ERR\n"); 3635 break; 3636 case FLASH_UPDATE_DNLD_NOT_SUPPORTED: 3637 pm8001_dbg(pm8001_ha, MSG, 3638 ": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"); 3639 break; 3640 case FLASH_UPDATE_DISABLED: 3641 pm8001_dbg(pm8001_ha, MSG, ": FLASH_UPDATE_DISABLED\n"); 3642 break; 3643 default: 3644 pm8001_dbg(pm8001_ha, DEVIO, "No matched status = %d\n", 3645 status); 3646 break; 3647 } 3648 kfree(ccb->fw_control_context); 3649 ccb->task = NULL; 3650 ccb->ccb_tag = 0xFFFFFFFF; 3651 pm8001_tag_free(pm8001_ha, tag); 3652 complete(pm8001_ha->nvmd_completion); 3653 return 0; 3654 } 3655 3656 int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 3657 { 3658 u32 status; 3659 int i; 3660 struct general_event_resp *pPayload = 3661 (struct general_event_resp *)(piomb + 4); 3662 status = le32_to_cpu(pPayload->status); 3663 pm8001_dbg(pm8001_ha, MSG, " status = 0x%x\n", status); 3664 for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) 3665 pm8001_dbg(pm8001_ha, MSG, "inb_IOMB_payload[0x%x] 0x%x,\n", 3666 i, 3667 pPayload->inb_IOMB_payload[i]); 3668 return 0; 3669 } 3670 3671 int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3672 { 3673 struct sas_task *t; 3674 struct pm8001_ccb_info *ccb; 3675 unsigned long flags; 3676 u32 status ; 3677 u32 tag, scp; 3678 struct task_status_struct *ts; 3679 struct pm8001_device *pm8001_dev; 3680 3681 struct task_abort_resp *pPayload = 3682 (struct task_abort_resp *)(piomb + 4); 3683 3684 status = le32_to_cpu(pPayload->status); 3685 tag = le32_to_cpu(pPayload->tag); 3686 if (!tag) { 3687 pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n"); 3688 return -1; 3689 } 3690 3691 scp = le32_to_cpu(pPayload->scp); 3692 ccb = &pm8001_ha->ccb_info[tag]; 3693 t = ccb->task; 3694 pm8001_dev = ccb->device; /* retrieve device */ 3695 3696 if (!t) { 3697 pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n"); 3698 return -1; 3699 } 3700 ts = &t->task_status; 3701 if (status != 0) 3702 pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n", 3703 status, tag, scp); 3704 switch (status) { 3705 case IO_SUCCESS: 3706 pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n"); 3707 ts->resp = SAS_TASK_COMPLETE; 3708 ts->stat = SAS_SAM_STAT_GOOD; 3709 break; 3710 case IO_NOT_VALID: 3711 pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n"); 3712 ts->resp = TMF_RESP_FUNC_FAILED; 3713 break; 3714 } 3715 spin_lock_irqsave(&t->task_state_lock, flags); 3716 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 3717 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 3718 t->task_state_flags |= SAS_TASK_STATE_DONE; 3719 spin_unlock_irqrestore(&t->task_state_lock, flags); 3720 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3721 mb(); 3722 3723 if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { 3724 pm8001_tag_free(pm8001_ha, tag); 3725 sas_free_task(t); 3726 /* clear the flag */ 3727 pm8001_dev->id &= 0xBFFFFFFF; 3728 } else 3729 t->task_done(t); 3730 3731 return 0; 3732 } 3733 3734 /** 3735 * mpi_hw_event -The hw event has come. 3736 * @pm8001_ha: our hba card information 3737 * @piomb: IO message buffer 3738 */ 3739 static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 3740 { 3741 unsigned long flags; 3742 struct hw_event_resp *pPayload = 3743 (struct hw_event_resp *)(piomb + 4); 3744 u32 lr_evt_status_phyid_portid = 3745 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 3746 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); 3747 u8 phy_id = 3748 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3749 u16 eventType = 3750 (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); 3751 u8 status = 3752 (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); 3753 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 3754 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3755 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; 3756 pm8001_dbg(pm8001_ha, DEVIO, 3757 "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n", 3758 port_id, phy_id, eventType, status); 3759 switch (eventType) { 3760 case HW_EVENT_PHY_START_STATUS: 3761 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n", 3762 status); 3763 if (status == 0) 3764 phy->phy_state = 1; 3765 3766 if (pm8001_ha->flags == PM8001F_RUN_TIME && 3767 phy->enable_completion != NULL) { 3768 complete(phy->enable_completion); 3769 phy->enable_completion = NULL; 3770 } 3771 break; 3772 case HW_EVENT_SAS_PHY_UP: 3773 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n"); 3774 hw_event_sas_phy_up(pm8001_ha, piomb); 3775 break; 3776 case HW_EVENT_SATA_PHY_UP: 3777 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n"); 3778 hw_event_sata_phy_up(pm8001_ha, piomb); 3779 break; 3780 case HW_EVENT_PHY_STOP_STATUS: 3781 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_STOP_STATUS status = %x\n", 3782 status); 3783 if (status == 0) 3784 phy->phy_state = 0; 3785 break; 3786 case HW_EVENT_SATA_SPINUP_HOLD: 3787 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n"); 3788 sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, 3789 GFP_ATOMIC); 3790 break; 3791 case HW_EVENT_PHY_DOWN: 3792 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); 3793 sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL, 3794 GFP_ATOMIC); 3795 phy->phy_attached = 0; 3796 phy->phy_state = 0; 3797 hw_event_phy_down(pm8001_ha, piomb); 3798 break; 3799 case HW_EVENT_PORT_INVALID: 3800 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n"); 3801 sas_phy_disconnected(sas_phy); 3802 phy->phy_attached = 0; 3803 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3804 GFP_ATOMIC); 3805 break; 3806 /* the broadcast change primitive received, tell the LIBSAS this event 3807 to revalidate the sas domain*/ 3808 case HW_EVENT_BROADCAST_CHANGE: 3809 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_CHANGE\n"); 3810 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, 3811 port_id, phy_id, 1, 0); 3812 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3813 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; 3814 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3815 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3816 GFP_ATOMIC); 3817 break; 3818 case HW_EVENT_PHY_ERROR: 3819 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n"); 3820 sas_phy_disconnected(&phy->sas_phy); 3821 phy->phy_attached = 0; 3822 sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); 3823 break; 3824 case HW_EVENT_BROADCAST_EXP: 3825 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n"); 3826 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3827 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; 3828 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3829 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3830 GFP_ATOMIC); 3831 break; 3832 case HW_EVENT_LINK_ERR_INVALID_DWORD: 3833 pm8001_dbg(pm8001_ha, MSG, 3834 "HW_EVENT_LINK_ERR_INVALID_DWORD\n"); 3835 pm8001_hw_event_ack_req(pm8001_ha, 0, 3836 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); 3837 sas_phy_disconnected(sas_phy); 3838 phy->phy_attached = 0; 3839 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3840 GFP_ATOMIC); 3841 break; 3842 case HW_EVENT_LINK_ERR_DISPARITY_ERROR: 3843 pm8001_dbg(pm8001_ha, MSG, 3844 "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"); 3845 pm8001_hw_event_ack_req(pm8001_ha, 0, 3846 HW_EVENT_LINK_ERR_DISPARITY_ERROR, 3847 port_id, phy_id, 0, 0); 3848 sas_phy_disconnected(sas_phy); 3849 phy->phy_attached = 0; 3850 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3851 GFP_ATOMIC); 3852 break; 3853 case HW_EVENT_LINK_ERR_CODE_VIOLATION: 3854 pm8001_dbg(pm8001_ha, MSG, 3855 "HW_EVENT_LINK_ERR_CODE_VIOLATION\n"); 3856 pm8001_hw_event_ack_req(pm8001_ha, 0, 3857 HW_EVENT_LINK_ERR_CODE_VIOLATION, 3858 port_id, phy_id, 0, 0); 3859 sas_phy_disconnected(sas_phy); 3860 phy->phy_attached = 0; 3861 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3862 GFP_ATOMIC); 3863 break; 3864 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: 3865 pm8001_dbg(pm8001_ha, MSG, 3866 "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"); 3867 pm8001_hw_event_ack_req(pm8001_ha, 0, 3868 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, 3869 port_id, phy_id, 0, 0); 3870 sas_phy_disconnected(sas_phy); 3871 phy->phy_attached = 0; 3872 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3873 GFP_ATOMIC); 3874 break; 3875 case HW_EVENT_MALFUNCTION: 3876 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n"); 3877 break; 3878 case HW_EVENT_BROADCAST_SES: 3879 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); 3880 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); 3881 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; 3882 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); 3883 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 3884 GFP_ATOMIC); 3885 break; 3886 case HW_EVENT_INBOUND_CRC_ERROR: 3887 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n"); 3888 pm8001_hw_event_ack_req(pm8001_ha, 0, 3889 HW_EVENT_INBOUND_CRC_ERROR, 3890 port_id, phy_id, 0, 0); 3891 break; 3892 case HW_EVENT_HARD_RESET_RECEIVED: 3893 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n"); 3894 sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); 3895 break; 3896 case HW_EVENT_ID_FRAME_TIMEOUT: 3897 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n"); 3898 sas_phy_disconnected(sas_phy); 3899 phy->phy_attached = 0; 3900 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3901 GFP_ATOMIC); 3902 break; 3903 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: 3904 pm8001_dbg(pm8001_ha, MSG, 3905 "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"); 3906 pm8001_hw_event_ack_req(pm8001_ha, 0, 3907 HW_EVENT_LINK_ERR_PHY_RESET_FAILED, 3908 port_id, phy_id, 0, 0); 3909 sas_phy_disconnected(sas_phy); 3910 phy->phy_attached = 0; 3911 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3912 GFP_ATOMIC); 3913 break; 3914 case HW_EVENT_PORT_RESET_TIMER_TMO: 3915 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); 3916 sas_phy_disconnected(sas_phy); 3917 phy->phy_attached = 0; 3918 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3919 GFP_ATOMIC); 3920 break; 3921 case HW_EVENT_PORT_RECOVERY_TIMER_TMO: 3922 pm8001_dbg(pm8001_ha, MSG, 3923 "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"); 3924 sas_phy_disconnected(sas_phy); 3925 phy->phy_attached = 0; 3926 sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, 3927 GFP_ATOMIC); 3928 break; 3929 case HW_EVENT_PORT_RECOVER: 3930 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n"); 3931 break; 3932 case HW_EVENT_PORT_RESET_COMPLETE: 3933 pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n"); 3934 break; 3935 case EVENT_BROADCAST_ASYNCH_EVENT: 3936 pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); 3937 break; 3938 default: 3939 pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type = %x\n", 3940 eventType); 3941 break; 3942 } 3943 return 0; 3944 } 3945 3946 /** 3947 * process_one_iomb - process one outbound Queue memory block 3948 * @pm8001_ha: our hba card information 3949 * @piomb: IO message buffer 3950 */ 3951 static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) 3952 { 3953 __le32 pHeader = *(__le32 *)piomb; 3954 u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); 3955 3956 pm8001_dbg(pm8001_ha, MSG, "process_one_iomb:\n"); 3957 3958 switch (opc) { 3959 case OPC_OUB_ECHO: 3960 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_ECHO\n"); 3961 break; 3962 case OPC_OUB_HW_EVENT: 3963 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_HW_EVENT\n"); 3964 mpi_hw_event(pm8001_ha, piomb); 3965 break; 3966 case OPC_OUB_SSP_COMP: 3967 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_COMP\n"); 3968 mpi_ssp_completion(pm8001_ha, piomb); 3969 break; 3970 case OPC_OUB_SMP_COMP: 3971 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_COMP\n"); 3972 mpi_smp_completion(pm8001_ha, piomb); 3973 break; 3974 case OPC_OUB_LOCAL_PHY_CNTRL: 3975 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_LOCAL_PHY_CNTRL\n"); 3976 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); 3977 break; 3978 case OPC_OUB_DEV_REGIST: 3979 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_REGIST\n"); 3980 pm8001_mpi_reg_resp(pm8001_ha, piomb); 3981 break; 3982 case OPC_OUB_DEREG_DEV: 3983 pm8001_dbg(pm8001_ha, MSG, "unregister the device\n"); 3984 pm8001_mpi_dereg_resp(pm8001_ha, piomb); 3985 break; 3986 case OPC_OUB_GET_DEV_HANDLE: 3987 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEV_HANDLE\n"); 3988 break; 3989 case OPC_OUB_SATA_COMP: 3990 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); 3991 mpi_sata_completion(pm8001_ha, piomb); 3992 break; 3993 case OPC_OUB_SATA_EVENT: 3994 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); 3995 mpi_sata_event(pm8001_ha, piomb); 3996 break; 3997 case OPC_OUB_SSP_EVENT: 3998 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); 3999 mpi_ssp_event(pm8001_ha, piomb); 4000 break; 4001 case OPC_OUB_DEV_HANDLE_ARRIV: 4002 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_HANDLE_ARRIV\n"); 4003 /*This is for target*/ 4004 break; 4005 case OPC_OUB_SSP_RECV_EVENT: 4006 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_RECV_EVENT\n"); 4007 /*This is for target*/ 4008 break; 4009 case OPC_OUB_DEV_INFO: 4010 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEV_INFO\n"); 4011 break; 4012 case OPC_OUB_FW_FLASH_UPDATE: 4013 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_FW_FLASH_UPDATE\n"); 4014 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); 4015 break; 4016 case OPC_OUB_GPIO_RESPONSE: 4017 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_RESPONSE\n"); 4018 break; 4019 case OPC_OUB_GPIO_EVENT: 4020 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GPIO_EVENT\n"); 4021 break; 4022 case OPC_OUB_GENERAL_EVENT: 4023 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GENERAL_EVENT\n"); 4024 pm8001_mpi_general_event(pm8001_ha, piomb); 4025 break; 4026 case OPC_OUB_SSP_ABORT_RSP: 4027 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_ABORT_RSP\n"); 4028 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 4029 break; 4030 case OPC_OUB_SATA_ABORT_RSP: 4031 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_ABORT_RSP\n"); 4032 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 4033 break; 4034 case OPC_OUB_SAS_DIAG_MODE_START_END: 4035 pm8001_dbg(pm8001_ha, MSG, 4036 "OPC_OUB_SAS_DIAG_MODE_START_END\n"); 4037 break; 4038 case OPC_OUB_SAS_DIAG_EXECUTE: 4039 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_DIAG_EXECUTE\n"); 4040 break; 4041 case OPC_OUB_GET_TIME_STAMP: 4042 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_TIME_STAMP\n"); 4043 break; 4044 case OPC_OUB_SAS_HW_EVENT_ACK: 4045 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_HW_EVENT_ACK\n"); 4046 break; 4047 case OPC_OUB_PORT_CONTROL: 4048 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PORT_CONTROL\n"); 4049 break; 4050 case OPC_OUB_SMP_ABORT_RSP: 4051 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SMP_ABORT_RSP\n"); 4052 pm8001_mpi_task_abort_resp(pm8001_ha, piomb); 4053 break; 4054 case OPC_OUB_GET_NVMD_DATA: 4055 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_NVMD_DATA\n"); 4056 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); 4057 break; 4058 case OPC_OUB_SET_NVMD_DATA: 4059 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_NVMD_DATA\n"); 4060 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); 4061 break; 4062 case OPC_OUB_DEVICE_HANDLE_REMOVAL: 4063 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_DEVICE_HANDLE_REMOVAL\n"); 4064 break; 4065 case OPC_OUB_SET_DEVICE_STATE: 4066 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEVICE_STATE\n"); 4067 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); 4068 break; 4069 case OPC_OUB_GET_DEVICE_STATE: 4070 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_GET_DEVICE_STATE\n"); 4071 break; 4072 case OPC_OUB_SET_DEV_INFO: 4073 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); 4074 break; 4075 case OPC_OUB_SAS_RE_INITIALIZE: 4076 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SAS_RE_INITIALIZE\n"); 4077 break; 4078 default: 4079 pm8001_dbg(pm8001_ha, DEVIO, 4080 "Unknown outbound Queue IOMB OPC = %x\n", 4081 opc); 4082 break; 4083 } 4084 } 4085 4086 static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) 4087 { 4088 struct outbound_queue_table *circularQ; 4089 void *pMsg1 = NULL; 4090 u8 bc; 4091 u32 ret = MPI_IO_STATUS_FAIL; 4092 unsigned long flags; 4093 4094 spin_lock_irqsave(&pm8001_ha->lock, flags); 4095 circularQ = &pm8001_ha->outbnd_q_tbl[vec]; 4096 do { 4097 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 4098 if (MPI_IO_STATUS_SUCCESS == ret) { 4099 /* process the outbound message */ 4100 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); 4101 /* free the message from the outbound circular buffer */ 4102 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, 4103 circularQ, bc); 4104 } 4105 if (MPI_IO_STATUS_BUSY == ret) { 4106 /* Update the producer index from SPC */ 4107 circularQ->producer_index = 4108 cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); 4109 if (le32_to_cpu(circularQ->producer_index) == 4110 circularQ->consumer_idx) 4111 /* OQ is empty */ 4112 break; 4113 } 4114 } while (1); 4115 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 4116 return ret; 4117 } 4118 4119 /* DMA_... to our direction translation. */ 4120 static const u8 data_dir_flags[] = { 4121 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ 4122 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ 4123 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ 4124 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ 4125 }; 4126 void 4127 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) 4128 { 4129 int i; 4130 struct scatterlist *sg; 4131 struct pm8001_prd *buf_prd = prd; 4132 4133 for_each_sg(scatter, sg, nr, i) { 4134 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 4135 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); 4136 buf_prd->im_len.e = 0; 4137 buf_prd++; 4138 } 4139 } 4140 4141 static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd) 4142 { 4143 psmp_cmd->tag = hTag; 4144 psmp_cmd->device_id = cpu_to_le32(deviceID); 4145 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); 4146 } 4147 4148 /** 4149 * pm8001_chip_smp_req - send a SMP task to FW 4150 * @pm8001_ha: our hba card information. 4151 * @ccb: the ccb information this request used. 4152 */ 4153 static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, 4154 struct pm8001_ccb_info *ccb) 4155 { 4156 int elem, rc; 4157 struct sas_task *task = ccb->task; 4158 struct domain_device *dev = task->dev; 4159 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4160 struct scatterlist *sg_req, *sg_resp; 4161 u32 req_len, resp_len; 4162 struct smp_req smp_cmd; 4163 u32 opc; 4164 struct inbound_queue_table *circularQ; 4165 4166 memset(&smp_cmd, 0, sizeof(smp_cmd)); 4167 /* 4168 * DMA-map SMP request, response buffers 4169 */ 4170 sg_req = &task->smp_task.smp_req; 4171 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE); 4172 if (!elem) 4173 return -ENOMEM; 4174 req_len = sg_dma_len(sg_req); 4175 4176 sg_resp = &task->smp_task.smp_resp; 4177 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE); 4178 if (!elem) { 4179 rc = -ENOMEM; 4180 goto err_out; 4181 } 4182 resp_len = sg_dma_len(sg_resp); 4183 /* must be in dwords */ 4184 if ((req_len & 0x3) || (resp_len & 0x3)) { 4185 rc = -EINVAL; 4186 goto err_out_2; 4187 } 4188 4189 opc = OPC_INB_SMP_REQUEST; 4190 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4191 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); 4192 smp_cmd.long_smp_req.long_req_addr = 4193 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); 4194 smp_cmd.long_smp_req.long_req_size = 4195 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); 4196 smp_cmd.long_smp_req.long_resp_addr = 4197 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); 4198 smp_cmd.long_smp_req.long_resp_size = 4199 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); 4200 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); 4201 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, 4202 &smp_cmd, sizeof(smp_cmd), 0); 4203 if (rc) 4204 goto err_out_2; 4205 4206 return 0; 4207 4208 err_out_2: 4209 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, 4210 DMA_FROM_DEVICE); 4211 err_out: 4212 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, 4213 DMA_TO_DEVICE); 4214 return rc; 4215 } 4216 4217 /** 4218 * pm8001_chip_ssp_io_req - send a SSP task to FW 4219 * @pm8001_ha: our hba card information. 4220 * @ccb: the ccb information this request used. 4221 */ 4222 static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, 4223 struct pm8001_ccb_info *ccb) 4224 { 4225 struct sas_task *task = ccb->task; 4226 struct domain_device *dev = task->dev; 4227 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4228 struct ssp_ini_io_start_req ssp_cmd; 4229 u32 tag = ccb->ccb_tag; 4230 int ret; 4231 u64 phys_addr; 4232 struct inbound_queue_table *circularQ; 4233 u32 opc = OPC_INB_SSPINIIOSTART; 4234 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 4235 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); 4236 ssp_cmd.dir_m_tlr = 4237 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for 4238 SAS 1.1 compatible TLR*/ 4239 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); 4240 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); 4241 ssp_cmd.tag = cpu_to_le32(tag); 4242 if (task->ssp_task.enable_first_burst) 4243 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; 4244 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); 4245 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); 4246 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, 4247 task->ssp_task.cmd->cmd_len); 4248 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4249 4250 /* fill in PRD (scatter/gather) table, if any */ 4251 if (task->num_scatter > 1) { 4252 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); 4253 phys_addr = ccb->ccb_dma_handle; 4254 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); 4255 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr)); 4256 ssp_cmd.esgl = cpu_to_le32(1<<31); 4257 } else if (task->num_scatter == 1) { 4258 u64 dma_addr = sg_dma_address(task->scatter); 4259 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); 4260 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); 4261 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4262 ssp_cmd.esgl = 0; 4263 } else if (task->num_scatter == 0) { 4264 ssp_cmd.addr_low = 0; 4265 ssp_cmd.addr_high = 0; 4266 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4267 ssp_cmd.esgl = 0; 4268 } 4269 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 4270 sizeof(ssp_cmd), 0); 4271 return ret; 4272 } 4273 4274 static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, 4275 struct pm8001_ccb_info *ccb) 4276 { 4277 struct sas_task *task = ccb->task; 4278 struct domain_device *dev = task->dev; 4279 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; 4280 u32 tag = ccb->ccb_tag; 4281 int ret; 4282 struct sata_start_req sata_cmd; 4283 u32 hdr_tag, ncg_tag = 0; 4284 u64 phys_addr; 4285 u32 ATAP = 0x0; 4286 u32 dir; 4287 struct inbound_queue_table *circularQ; 4288 unsigned long flags; 4289 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4290 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4291 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4292 if (task->data_dir == DMA_NONE) { 4293 ATAP = 0x04; /* no data*/ 4294 pm8001_dbg(pm8001_ha, IO, "no data\n"); 4295 } else if (likely(!task->ata_task.device_control_reg_update)) { 4296 if (task->ata_task.dma_xfer) { 4297 ATAP = 0x06; /* DMA */ 4298 pm8001_dbg(pm8001_ha, IO, "DMA\n"); 4299 } else { 4300 ATAP = 0x05; /* PIO*/ 4301 pm8001_dbg(pm8001_ha, IO, "PIO\n"); 4302 } 4303 if (task->ata_task.use_ncq && 4304 dev->sata_dev.class != ATA_DEV_ATAPI) { 4305 ATAP = 0x07; /* FPDMA */ 4306 pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); 4307 } 4308 } 4309 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { 4310 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 4311 ncg_tag = hdr_tag; 4312 } 4313 dir = data_dir_flags[task->data_dir] << 8; 4314 sata_cmd.tag = cpu_to_le32(tag); 4315 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 4316 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); 4317 sata_cmd.ncqtag_atap_dir_m = 4318 cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir); 4319 sata_cmd.sata_fis = task->ata_task.fis; 4320 if (likely(!task->ata_task.device_control_reg_update)) 4321 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ 4322 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ 4323 /* fill in PRD (scatter/gather) table, if any */ 4324 if (task->num_scatter > 1) { 4325 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); 4326 phys_addr = ccb->ccb_dma_handle; 4327 sata_cmd.addr_low = lower_32_bits(phys_addr); 4328 sata_cmd.addr_high = upper_32_bits(phys_addr); 4329 sata_cmd.esgl = cpu_to_le32(1 << 31); 4330 } else if (task->num_scatter == 1) { 4331 u64 dma_addr = sg_dma_address(task->scatter); 4332 sata_cmd.addr_low = lower_32_bits(dma_addr); 4333 sata_cmd.addr_high = upper_32_bits(dma_addr); 4334 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4335 sata_cmd.esgl = 0; 4336 } else if (task->num_scatter == 0) { 4337 sata_cmd.addr_low = 0; 4338 sata_cmd.addr_high = 0; 4339 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4340 sata_cmd.esgl = 0; 4341 } 4342 4343 /* Check for read log for failed drive and return */ 4344 if (sata_cmd.sata_fis.command == 0x2f) { 4345 if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || 4346 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || 4347 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { 4348 struct task_status_struct *ts; 4349 4350 pm8001_ha_dev->id &= 0xDFFFFFFF; 4351 ts = &task->task_status; 4352 4353 spin_lock_irqsave(&task->task_state_lock, flags); 4354 ts->resp = SAS_TASK_COMPLETE; 4355 ts->stat = SAS_SAM_STAT_GOOD; 4356 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 4357 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 4358 task->task_state_flags |= SAS_TASK_STATE_DONE; 4359 if (unlikely((task->task_state_flags & 4360 SAS_TASK_STATE_ABORTED))) { 4361 spin_unlock_irqrestore(&task->task_state_lock, 4362 flags); 4363 pm8001_dbg(pm8001_ha, FAIL, 4364 "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", 4365 task, ts->resp, 4366 ts->stat); 4367 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4368 } else { 4369 spin_unlock_irqrestore(&task->task_state_lock, 4370 flags); 4371 pm8001_ccb_task_free_done(pm8001_ha, task, 4372 ccb, tag); 4373 return 0; 4374 } 4375 } 4376 } 4377 4378 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 4379 sizeof(sata_cmd), 0); 4380 return ret; 4381 } 4382 4383 /** 4384 * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND 4385 * @pm8001_ha: our hba card information. 4386 * @phy_id: the phy id which we wanted to start up. 4387 */ 4388 static int 4389 pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) 4390 { 4391 struct phy_start_req payload; 4392 struct inbound_queue_table *circularQ; 4393 int ret; 4394 u32 tag = 0x01; 4395 u32 opcode = OPC_INB_PHYSTART; 4396 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4397 memset(&payload, 0, sizeof(payload)); 4398 payload.tag = cpu_to_le32(tag); 4399 /* 4400 ** [0:7] PHY Identifier 4401 ** [8:11] link rate 1.5G, 3G, 6G 4402 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both 4403 ** [14] 0b disable spin up hold; 1b enable spin up hold 4404 */ 4405 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 4406 LINKMODE_AUTO | LINKRATE_15 | 4407 LINKRATE_30 | LINKRATE_60 | phy_id); 4408 payload.sas_identify.dev_type = SAS_END_DEVICE; 4409 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; 4410 memcpy(payload.sas_identify.sas_addr, 4411 pm8001_ha->sas_addr, SAS_ADDR_SIZE); 4412 payload.sas_identify.phy_id = phy_id; 4413 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 4414 sizeof(payload), 0); 4415 return ret; 4416 } 4417 4418 /** 4419 * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND 4420 * @pm8001_ha: our hba card information. 4421 * @phy_id: the phy id which we wanted to start up. 4422 */ 4423 static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, 4424 u8 phy_id) 4425 { 4426 struct phy_stop_req payload; 4427 struct inbound_queue_table *circularQ; 4428 int ret; 4429 u32 tag = 0x01; 4430 u32 opcode = OPC_INB_PHYSTOP; 4431 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4432 memset(&payload, 0, sizeof(payload)); 4433 payload.tag = cpu_to_le32(tag); 4434 payload.phy_id = cpu_to_le32(phy_id); 4435 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 4436 sizeof(payload), 0); 4437 return ret; 4438 } 4439 4440 /* 4441 * see comments on pm8001_mpi_reg_resp. 4442 */ 4443 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, 4444 struct pm8001_device *pm8001_dev, u32 flag) 4445 { 4446 struct reg_dev_req payload; 4447 u32 opc; 4448 u32 stp_sspsmp_sata = 0x4; 4449 struct inbound_queue_table *circularQ; 4450 u32 linkrate, phy_id; 4451 int rc, tag = 0xdeadbeef; 4452 struct pm8001_ccb_info *ccb; 4453 u8 retryFlag = 0x1; 4454 u16 firstBurstSize = 0; 4455 u16 ITNT = 2000; 4456 struct domain_device *dev = pm8001_dev->sas_device; 4457 struct domain_device *parent_dev = dev->parent; 4458 struct pm8001_port *port = dev->port->lldd_port; 4459 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4460 4461 memset(&payload, 0, sizeof(payload)); 4462 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4463 if (rc) 4464 return rc; 4465 ccb = &pm8001_ha->ccb_info[tag]; 4466 ccb->device = pm8001_dev; 4467 ccb->ccb_tag = tag; 4468 payload.tag = cpu_to_le32(tag); 4469 if (flag == 1) 4470 stp_sspsmp_sata = 0x02; /*direct attached sata */ 4471 else { 4472 if (pm8001_dev->dev_type == SAS_SATA_DEV) 4473 stp_sspsmp_sata = 0x00; /* stp*/ 4474 else if (pm8001_dev->dev_type == SAS_END_DEVICE || 4475 dev_is_expander(pm8001_dev->dev_type)) 4476 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4477 } 4478 if (parent_dev && dev_is_expander(parent_dev->dev_type)) 4479 phy_id = parent_dev->ex_dev.ex_phy->phy_id; 4480 else 4481 phy_id = pm8001_dev->attached_phy; 4482 opc = OPC_INB_REG_DEV; 4483 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? 4484 pm8001_dev->sas_device->linkrate : dev->port->linkrate; 4485 payload.phyid_portid = 4486 cpu_to_le32(((port->port_id) & 0x0F) | 4487 ((phy_id & 0x0F) << 4)); 4488 payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | 4489 ((linkrate & 0x0F) * 0x1000000) | 4490 ((stp_sspsmp_sata & 0x03) * 0x10000000)); 4491 payload.firstburstsize_ITNexustimeout = 4492 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4493 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, 4494 SAS_ADDR_SIZE); 4495 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4496 sizeof(payload), 0); 4497 return rc; 4498 } 4499 4500 /* 4501 * see comments on pm8001_mpi_reg_resp. 4502 */ 4503 int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, 4504 u32 device_id) 4505 { 4506 struct dereg_dev_req payload; 4507 u32 opc = OPC_INB_DEREG_DEV_HANDLE; 4508 int ret; 4509 struct inbound_queue_table *circularQ; 4510 4511 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4512 memset(&payload, 0, sizeof(payload)); 4513 payload.tag = cpu_to_le32(1); 4514 payload.device_id = cpu_to_le32(device_id); 4515 pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n", 4516 device_id); 4517 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4518 sizeof(payload), 0); 4519 return ret; 4520 } 4521 4522 /** 4523 * pm8001_chip_phy_ctl_req - support the local phy operation 4524 * @pm8001_ha: our hba card information. 4525 * @phyId: the phy id which we wanted to operate 4526 * @phy_op: the phy operation to request 4527 */ 4528 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 4529 u32 phyId, u32 phy_op) 4530 { 4531 struct local_phy_ctl_req payload; 4532 struct inbound_queue_table *circularQ; 4533 int ret; 4534 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4535 memset(&payload, 0, sizeof(payload)); 4536 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4537 payload.tag = cpu_to_le32(1); 4538 payload.phyop_phyid = 4539 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); 4540 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4541 sizeof(payload), 0); 4542 return ret; 4543 } 4544 4545 static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) 4546 { 4547 #ifdef PM8001_USE_MSIX 4548 return 1; 4549 #else 4550 u32 value; 4551 4552 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); 4553 if (value) 4554 return 1; 4555 return 0; 4556 #endif 4557 } 4558 4559 /** 4560 * pm8001_chip_isr - PM8001 isr handler. 4561 * @pm8001_ha: our hba card information. 4562 * @vec: IRQ number 4563 */ 4564 static irqreturn_t 4565 pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) 4566 { 4567 pm8001_chip_interrupt_disable(pm8001_ha, vec); 4568 pm8001_dbg(pm8001_ha, DEVIO, 4569 "irq vec %d, ODMR:0x%x\n", 4570 vec, pm8001_cr32(pm8001_ha, 0, 0x30)); 4571 process_oq(pm8001_ha, vec); 4572 pm8001_chip_interrupt_enable(pm8001_ha, vec); 4573 return IRQ_HANDLED; 4574 } 4575 4576 static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, 4577 u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag) 4578 { 4579 struct task_abort_req task_abort; 4580 struct inbound_queue_table *circularQ; 4581 int ret; 4582 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4583 memset(&task_abort, 0, sizeof(task_abort)); 4584 if (ABORT_SINGLE == (flag & ABORT_MASK)) { 4585 task_abort.abort_all = 0; 4586 task_abort.device_id = cpu_to_le32(dev_id); 4587 task_abort.tag_to_abort = cpu_to_le32(task_tag); 4588 task_abort.tag = cpu_to_le32(cmd_tag); 4589 } else if (ABORT_ALL == (flag & ABORT_MASK)) { 4590 task_abort.abort_all = cpu_to_le32(1); 4591 task_abort.device_id = cpu_to_le32(dev_id); 4592 task_abort.tag = cpu_to_le32(cmd_tag); 4593 } 4594 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 4595 sizeof(task_abort), 0); 4596 return ret; 4597 } 4598 4599 /* 4600 * pm8001_chip_abort_task - SAS abort task when error or exception happened. 4601 */ 4602 int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, 4603 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) 4604 { 4605 u32 opc, device_id; 4606 int rc = TMF_RESP_FUNC_FAILED; 4607 pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n", 4608 cmd_tag, task_tag); 4609 if (pm8001_dev->dev_type == SAS_END_DEVICE) 4610 opc = OPC_INB_SSP_ABORT; 4611 else if (pm8001_dev->dev_type == SAS_SATA_DEV) 4612 opc = OPC_INB_SATA_ABORT; 4613 else 4614 opc = OPC_INB_SMP_ABORT;/* SMP */ 4615 device_id = pm8001_dev->device_id; 4616 rc = send_task_abort(pm8001_ha, opc, device_id, flag, 4617 task_tag, cmd_tag); 4618 if (rc != TMF_RESP_FUNC_COMPLETE) 4619 pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc); 4620 return rc; 4621 } 4622 4623 /** 4624 * pm8001_chip_ssp_tm_req - built the task management command. 4625 * @pm8001_ha: our hba card information. 4626 * @ccb: the ccb information. 4627 * @tmf: task management function. 4628 */ 4629 int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, 4630 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 4631 { 4632 struct sas_task *task = ccb->task; 4633 struct domain_device *dev = task->dev; 4634 struct pm8001_device *pm8001_dev = dev->lldd_dev; 4635 u32 opc = OPC_INB_SSPINITMSTART; 4636 struct inbound_queue_table *circularQ; 4637 struct ssp_ini_tm_start_req sspTMCmd; 4638 int ret; 4639 4640 memset(&sspTMCmd, 0, sizeof(sspTMCmd)); 4641 sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); 4642 sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed); 4643 sspTMCmd.tmf = cpu_to_le32(tmf->tmf); 4644 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); 4645 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); 4646 if (pm8001_ha->chip_id != chip_8001) 4647 sspTMCmd.ds_ads_m = 0x08; 4648 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4649 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 4650 sizeof(sspTMCmd), 0); 4651 return ret; 4652 } 4653 4654 int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4655 void *payload) 4656 { 4657 u32 opc = OPC_INB_GET_NVMD_DATA; 4658 u32 nvmd_type; 4659 int rc; 4660 u32 tag; 4661 struct pm8001_ccb_info *ccb; 4662 struct inbound_queue_table *circularQ; 4663 struct get_nvm_data_req nvmd_req; 4664 struct fw_control_ex *fw_control_context; 4665 struct pm8001_ioctl_payload *ioctl_payload = payload; 4666 4667 nvmd_type = ioctl_payload->minor_function; 4668 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4669 if (!fw_control_context) 4670 return -ENOMEM; 4671 fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; 4672 fw_control_context->len = ioctl_payload->rd_length; 4673 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4674 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4675 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4676 if (rc) { 4677 kfree(fw_control_context); 4678 return rc; 4679 } 4680 ccb = &pm8001_ha->ccb_info[tag]; 4681 ccb->ccb_tag = tag; 4682 ccb->fw_control_context = fw_control_context; 4683 nvmd_req.tag = cpu_to_le32(tag); 4684 4685 switch (nvmd_type) { 4686 case TWI_DEVICE: { 4687 u32 twi_addr, twi_page_size; 4688 twi_addr = 0xa8; 4689 twi_page_size = 2; 4690 4691 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | 4692 twi_page_size << 8 | TWI_DEVICE); 4693 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); 4694 nvmd_req.resp_addr_hi = 4695 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4696 nvmd_req.resp_addr_lo = 4697 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4698 break; 4699 } 4700 case C_SEEPROM: { 4701 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); 4702 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); 4703 nvmd_req.resp_addr_hi = 4704 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4705 nvmd_req.resp_addr_lo = 4706 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4707 break; 4708 } 4709 case VPD_FLASH: { 4710 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); 4711 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); 4712 nvmd_req.resp_addr_hi = 4713 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4714 nvmd_req.resp_addr_lo = 4715 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4716 break; 4717 } 4718 case EXPAN_ROM: { 4719 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); 4720 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); 4721 nvmd_req.resp_addr_hi = 4722 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4723 nvmd_req.resp_addr_lo = 4724 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4725 break; 4726 } 4727 case IOP_RDUMP: { 4728 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); 4729 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length); 4730 nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); 4731 nvmd_req.resp_addr_hi = 4732 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4733 nvmd_req.resp_addr_lo = 4734 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4735 break; 4736 } 4737 default: 4738 break; 4739 } 4740 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 4741 sizeof(nvmd_req), 0); 4742 if (rc) { 4743 kfree(fw_control_context); 4744 pm8001_tag_free(pm8001_ha, tag); 4745 } 4746 return rc; 4747 } 4748 4749 int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4750 void *payload) 4751 { 4752 u32 opc = OPC_INB_SET_NVMD_DATA; 4753 u32 nvmd_type; 4754 int rc; 4755 u32 tag; 4756 struct pm8001_ccb_info *ccb; 4757 struct inbound_queue_table *circularQ; 4758 struct set_nvm_data_req nvmd_req; 4759 struct fw_control_ex *fw_control_context; 4760 struct pm8001_ioctl_payload *ioctl_payload = payload; 4761 4762 nvmd_type = ioctl_payload->minor_function; 4763 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4764 if (!fw_control_context) 4765 return -ENOMEM; 4766 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4767 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, 4768 &ioctl_payload->func_specific, 4769 ioctl_payload->wr_length); 4770 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4771 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4772 if (rc) { 4773 kfree(fw_control_context); 4774 return -EBUSY; 4775 } 4776 ccb = &pm8001_ha->ccb_info[tag]; 4777 ccb->fw_control_context = fw_control_context; 4778 ccb->ccb_tag = tag; 4779 nvmd_req.tag = cpu_to_le32(tag); 4780 switch (nvmd_type) { 4781 case TWI_DEVICE: { 4782 u32 twi_addr, twi_page_size; 4783 twi_addr = 0xa8; 4784 twi_page_size = 2; 4785 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); 4786 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | 4787 twi_page_size << 8 | TWI_DEVICE); 4788 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); 4789 nvmd_req.resp_addr_hi = 4790 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4791 nvmd_req.resp_addr_lo = 4792 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4793 break; 4794 } 4795 case C_SEEPROM: 4796 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); 4797 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); 4798 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); 4799 nvmd_req.resp_addr_hi = 4800 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4801 nvmd_req.resp_addr_lo = 4802 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4803 break; 4804 case VPD_FLASH: 4805 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); 4806 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); 4807 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); 4808 nvmd_req.resp_addr_hi = 4809 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4810 nvmd_req.resp_addr_lo = 4811 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4812 break; 4813 case EXPAN_ROM: 4814 nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); 4815 nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length); 4816 nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); 4817 nvmd_req.resp_addr_hi = 4818 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); 4819 nvmd_req.resp_addr_lo = 4820 cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); 4821 break; 4822 default: 4823 break; 4824 } 4825 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 4826 sizeof(nvmd_req), 0); 4827 if (rc) { 4828 kfree(fw_control_context); 4829 pm8001_tag_free(pm8001_ha, tag); 4830 } 4831 return rc; 4832 } 4833 4834 /** 4835 * pm8001_chip_fw_flash_update_build - support the firmware update operation 4836 * @pm8001_ha: our hba card information. 4837 * @fw_flash_updata_info: firmware flash update param 4838 * @tag: Tag to apply to the payload 4839 */ 4840 int 4841 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, 4842 void *fw_flash_updata_info, u32 tag) 4843 { 4844 struct fw_flash_Update_req payload; 4845 struct fw_flash_updata_info *info; 4846 struct inbound_queue_table *circularQ; 4847 int ret; 4848 u32 opc = OPC_INB_FW_FLASH_UPDATE; 4849 4850 memset(&payload, 0, sizeof(struct fw_flash_Update_req)); 4851 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4852 info = fw_flash_updata_info; 4853 payload.tag = cpu_to_le32(tag); 4854 payload.cur_image_len = cpu_to_le32(info->cur_image_len); 4855 payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); 4856 payload.total_image_len = cpu_to_le32(info->total_image_len); 4857 payload.len = info->sgl.im_len.len ; 4858 payload.sgl_addr_lo = 4859 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); 4860 payload.sgl_addr_hi = 4861 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); 4862 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 4863 sizeof(payload), 0); 4864 return ret; 4865 } 4866 4867 int 4868 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, 4869 void *payload) 4870 { 4871 struct fw_flash_updata_info flash_update_info; 4872 struct fw_control_info *fw_control; 4873 struct fw_control_ex *fw_control_context; 4874 int rc; 4875 u32 tag; 4876 struct pm8001_ccb_info *ccb; 4877 void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; 4878 dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; 4879 struct pm8001_ioctl_payload *ioctl_payload = payload; 4880 4881 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4882 if (!fw_control_context) 4883 return -ENOMEM; 4884 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; 4885 pm8001_dbg(pm8001_ha, DEVIO, 4886 "dma fw_control context input length :%x\n", 4887 fw_control->len); 4888 memcpy(buffer, fw_control->buffer, fw_control->len); 4889 flash_update_info.sgl.addr = cpu_to_le64(phys_addr); 4890 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); 4891 flash_update_info.sgl.im_len.e = 0; 4892 flash_update_info.cur_image_offset = fw_control->offset; 4893 flash_update_info.cur_image_len = fw_control->len; 4894 flash_update_info.total_image_len = fw_control->size; 4895 fw_control_context->fw_control = fw_control; 4896 fw_control_context->virtAddr = buffer; 4897 fw_control_context->phys_addr = phys_addr; 4898 fw_control_context->len = fw_control->len; 4899 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4900 if (rc) { 4901 kfree(fw_control_context); 4902 return -EBUSY; 4903 } 4904 ccb = &pm8001_ha->ccb_info[tag]; 4905 ccb->fw_control_context = fw_control_context; 4906 ccb->ccb_tag = tag; 4907 rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, 4908 tag); 4909 return rc; 4910 } 4911 4912 ssize_t 4913 pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) 4914 { 4915 u32 value, rem, offset = 0, bar = 0; 4916 u32 index, work_offset, dw_length; 4917 u32 shift_value, gsm_base, gsm_dump_offset; 4918 char *direct_data; 4919 struct Scsi_Host *shost = class_to_shost(cdev); 4920 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 4921 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 4922 4923 direct_data = buf; 4924 gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; 4925 4926 /* check max is 1 Mbytes */ 4927 if ((length > 0x100000) || (gsm_dump_offset & 3) || 4928 ((gsm_dump_offset + length) > 0x1000000)) 4929 return -EINVAL; 4930 4931 if (pm8001_ha->chip_id == chip_8001) 4932 bar = 2; 4933 else 4934 bar = 1; 4935 4936 work_offset = gsm_dump_offset & 0xFFFF0000; 4937 offset = gsm_dump_offset & 0x0000FFFF; 4938 gsm_dump_offset = work_offset; 4939 /* adjust length to dword boundary */ 4940 rem = length & 3; 4941 dw_length = length >> 2; 4942 4943 for (index = 0; index < dw_length; index++) { 4944 if ((work_offset + offset) & 0xFFFF0000) { 4945 if (pm8001_ha->chip_id == chip_8001) 4946 shift_value = ((gsm_dump_offset + offset) & 4947 SHIFT_REG_64K_MASK); 4948 else 4949 shift_value = (((gsm_dump_offset + offset) & 4950 SHIFT_REG_64K_MASK) >> 4951 SHIFT_REG_BIT_SHIFT); 4952 4953 if (pm8001_ha->chip_id == chip_8001) { 4954 gsm_base = GSM_BASE; 4955 if (-1 == pm8001_bar4_shift(pm8001_ha, 4956 (gsm_base + shift_value))) 4957 return -EIO; 4958 } else { 4959 gsm_base = 0; 4960 if (-1 == pm80xx_bar4_shift(pm8001_ha, 4961 (gsm_base + shift_value))) 4962 return -EIO; 4963 } 4964 gsm_dump_offset = (gsm_dump_offset + offset) & 4965 0xFFFF0000; 4966 work_offset = 0; 4967 offset = offset & 0x0000FFFF; 4968 } 4969 value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & 4970 0x0000FFFF); 4971 direct_data += sprintf(direct_data, "%08x ", value); 4972 offset += 4; 4973 } 4974 if (rem != 0) { 4975 value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & 4976 0x0000FFFF); 4977 /* xfr for non_dw */ 4978 direct_data += sprintf(direct_data, "%08x ", value); 4979 } 4980 /* Shift back to BAR4 original address */ 4981 if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) 4982 return -EIO; 4983 pm8001_ha->fatal_forensic_shift_offset += 1024; 4984 4985 if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) 4986 pm8001_ha->fatal_forensic_shift_offset = 0; 4987 return direct_data - buf; 4988 } 4989 4990 int 4991 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, 4992 struct pm8001_device *pm8001_dev, u32 state) 4993 { 4994 struct set_dev_state_req payload; 4995 struct inbound_queue_table *circularQ; 4996 struct pm8001_ccb_info *ccb; 4997 int rc; 4998 u32 tag; 4999 u32 opc = OPC_INB_SET_DEVICE_STATE; 5000 memset(&payload, 0, sizeof(payload)); 5001 rc = pm8001_tag_alloc(pm8001_ha, &tag); 5002 if (rc) 5003 return -1; 5004 ccb = &pm8001_ha->ccb_info[tag]; 5005 ccb->ccb_tag = tag; 5006 ccb->device = pm8001_dev; 5007 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 5008 payload.tag = cpu_to_le32(tag); 5009 payload.device_id = cpu_to_le32(pm8001_dev->device_id); 5010 payload.nds = cpu_to_le32(state); 5011 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 5012 sizeof(payload), 0); 5013 return rc; 5014 5015 } 5016 5017 static int 5018 pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) 5019 { 5020 struct sas_re_initialization_req payload; 5021 struct inbound_queue_table *circularQ; 5022 struct pm8001_ccb_info *ccb; 5023 int rc; 5024 u32 tag; 5025 u32 opc = OPC_INB_SAS_RE_INITIALIZE; 5026 memset(&payload, 0, sizeof(payload)); 5027 rc = pm8001_tag_alloc(pm8001_ha, &tag); 5028 if (rc) 5029 return -ENOMEM; 5030 ccb = &pm8001_ha->ccb_info[tag]; 5031 ccb->ccb_tag = tag; 5032 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 5033 payload.tag = cpu_to_le32(tag); 5034 payload.SSAHOLT = cpu_to_le32(0xd << 25); 5035 payload.sata_hol_tmo = cpu_to_le32(80); 5036 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); 5037 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 5038 sizeof(payload), 0); 5039 if (rc) 5040 pm8001_tag_free(pm8001_ha, tag); 5041 return rc; 5042 5043 } 5044 5045 const struct pm8001_dispatch pm8001_8001_dispatch = { 5046 .name = "pmc8001", 5047 .chip_init = pm8001_chip_init, 5048 .chip_soft_rst = pm8001_chip_soft_rst, 5049 .chip_rst = pm8001_hw_chip_rst, 5050 .chip_iounmap = pm8001_chip_iounmap, 5051 .isr = pm8001_chip_isr, 5052 .is_our_interrupt = pm8001_chip_is_our_interrupt, 5053 .isr_process_oq = process_oq, 5054 .interrupt_enable = pm8001_chip_interrupt_enable, 5055 .interrupt_disable = pm8001_chip_interrupt_disable, 5056 .make_prd = pm8001_chip_make_sg, 5057 .smp_req = pm8001_chip_smp_req, 5058 .ssp_io_req = pm8001_chip_ssp_io_req, 5059 .sata_req = pm8001_chip_sata_req, 5060 .phy_start_req = pm8001_chip_phy_start_req, 5061 .phy_stop_req = pm8001_chip_phy_stop_req, 5062 .reg_dev_req = pm8001_chip_reg_dev_req, 5063 .dereg_dev_req = pm8001_chip_dereg_dev_req, 5064 .phy_ctl_req = pm8001_chip_phy_ctl_req, 5065 .task_abort = pm8001_chip_abort_task, 5066 .ssp_tm_req = pm8001_chip_ssp_tm_req, 5067 .get_nvmd_req = pm8001_chip_get_nvmd_req, 5068 .set_nvmd_req = pm8001_chip_set_nvmd_req, 5069 .fw_flash_update_req = pm8001_chip_fw_flash_update_req, 5070 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5071 .sas_re_init_req = pm8001_chip_sas_re_initialization, 5072 .fatal_errors = pm80xx_fatal_errors, 5073 }; 5074