1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/delay.h> 36 #include <linux/jiffies.h> 37 #include <linux/string.h> 38 #include <scsi/scsi_device.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "csio_hw.h" 42 #include "csio_lnode.h" 43 #include "csio_rnode.h" 44 #include "csio_mb.h" 45 #include "csio_wr.h" 46 47 #define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL) 48 49 /* MB Command/Response Helpers */ 50 /* 51 * csio_mb_fw_retval - FW return value from a mailbox response. 52 * @mbp: Mailbox structure 53 * 54 */ 55 enum fw_retval 56 csio_mb_fw_retval(struct csio_mb *mbp) 57 { 58 struct fw_cmd_hdr *hdr; 59 60 hdr = (struct fw_cmd_hdr *)(mbp->mb); 61 62 return FW_CMD_RETVAL_G(ntohl(hdr->lo)); 63 } 64 65 /* 66 * csio_mb_hello - FW HELLO command helper 67 * @hw: The HW structure 68 * @mbp: Mailbox structure 69 * @m_mbox: Master mailbox number, if any. 70 * @a_mbox: Mailbox number for asycn notifications. 71 * @master: Device mastership. 72 * @cbfn: Callback, if any. 73 * 74 */ 75 void 76 csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 77 uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master, 78 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 79 { 80 struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb); 81 82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 83 84 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_HELLO_CMD) | 85 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 86 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 87 cmdp->err_to_clearinit = htonl( 88 FW_HELLO_CMD_MASTERDIS_V(master == CSIO_MASTER_CANT) | 89 FW_HELLO_CMD_MASTERFORCE_V(master == CSIO_MASTER_MUST) | 90 FW_HELLO_CMD_MBMASTER_V(master == CSIO_MASTER_MUST ? 91 m_mbox : FW_HELLO_CMD_MBMASTER_M) | 92 FW_HELLO_CMD_MBASYNCNOT_V(a_mbox) | 93 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | 94 FW_HELLO_CMD_CLEARINIT_F); 95 96 } 97 98 /* 99 * csio_mb_process_hello_rsp - FW HELLO response processing helper 100 * @hw: The HW structure 101 * @mbp: Mailbox structure 102 * @retval: Mailbox return value from Firmware 103 * @state: State that the function is in. 104 * @mpfn: Master pfn 105 * 106 */ 107 void 108 csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp, 109 enum fw_retval *retval, enum csio_dev_state *state, 110 uint8_t *mpfn) 111 { 112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb); 113 uint32_t value; 114 115 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 116 117 if (*retval == FW_SUCCESS) { 118 hw->fwrev = ntohl(rsp->fwrev); 119 120 value = ntohl(rsp->err_to_clearinit); 121 *mpfn = FW_HELLO_CMD_MBMASTER_G(value); 122 123 if (value & FW_HELLO_CMD_INIT_F) 124 *state = CSIO_DEV_STATE_INIT; 125 else if (value & FW_HELLO_CMD_ERR_F) 126 *state = CSIO_DEV_STATE_ERR; 127 else 128 *state = CSIO_DEV_STATE_UNINIT; 129 } 130 } 131 132 /* 133 * csio_mb_bye - FW BYE command helper 134 * @hw: The HW structure 135 * @mbp: Mailbox structure 136 * @cbfn: Callback, if any. 137 * 138 */ 139 void 140 csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 141 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 142 { 143 struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb); 144 145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 146 147 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_BYE_CMD) | 148 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 149 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 150 151 } 152 153 /* 154 * csio_mb_reset - FW RESET command helper 155 * @hw: The HW structure 156 * @mbp: Mailbox structure 157 * @reset: Type of reset. 158 * @cbfn: Callback, if any. 159 * 160 */ 161 void 162 csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 163 int reset, int halt, 164 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 165 { 166 struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb); 167 168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 169 170 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_RESET_CMD) | 171 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 172 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 173 cmdp->val = htonl(reset); 174 cmdp->halt_pkd = htonl(halt); 175 176 } 177 178 /* 179 * csio_mb_params - FW PARAMS command helper 180 * @hw: The HW structure 181 * @mbp: Mailbox structure 182 * @tmo: Command timeout. 183 * @pf: PF number. 184 * @vf: VF number. 185 * @nparams: Number of parameters 186 * @params: Parameter mnemonic array. 187 * @val: Parameter value array. 188 * @wr: Write/Read PARAMS. 189 * @cbfn: Callback, if any. 190 * 191 */ 192 void 193 csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 194 unsigned int pf, unsigned int vf, unsigned int nparams, 195 const u32 *params, u32 *val, bool wr, 196 void (*cbfn)(struct csio_hw *, struct csio_mb *)) 197 { 198 uint32_t i; 199 uint32_t temp_params = 0, temp_val = 0; 200 struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb); 201 __be32 *p = &cmdp->param[0].mnem; 202 203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 204 205 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | 206 FW_CMD_REQUEST_F | 207 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F) | 208 FW_PARAMS_CMD_PFN_V(pf) | 209 FW_PARAMS_CMD_VFN_V(vf)); 210 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 211 212 /* Write Params */ 213 if (wr) { 214 while (nparams--) { 215 temp_params = *params++; 216 temp_val = *val++; 217 218 *p++ = htonl(temp_params); 219 *p++ = htonl(temp_val); 220 } 221 } else { 222 for (i = 0; i < nparams; i++, p += 2) { 223 temp_params = *params++; 224 *p = htonl(temp_params); 225 } 226 } 227 228 } 229 230 /* 231 * csio_mb_process_read_params_rsp - FW PARAMS response processing helper 232 * @hw: The HW structure 233 * @mbp: Mailbox structure 234 * @retval: Mailbox return value from Firmware 235 * @nparams: Number of parameters 236 * @val: Parameter value array. 237 * 238 */ 239 void 240 csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp, 241 enum fw_retval *retval, unsigned int nparams, 242 u32 *val) 243 { 244 struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb); 245 uint32_t i; 246 __be32 *p = &rsp->param[0].val; 247 248 *retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16)); 249 250 if (*retval == FW_SUCCESS) 251 for (i = 0; i < nparams; i++, p += 2) 252 *val++ = ntohl(*p); 253 } 254 255 /* 256 * csio_mb_ldst - FW LDST command 257 * @hw: The HW structure 258 * @mbp: Mailbox structure 259 * @tmo: timeout 260 * @reg: register 261 * 262 */ 263 void 264 csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg) 265 { 266 struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); 267 CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1); 268 269 /* 270 * Construct and send the Firmware LDST Command to retrieve the 271 * specified PCI-E Configuration Space register. 272 */ 273 ldst_cmd->op_to_addrspace = 274 htonl(FW_CMD_OP_V(FW_LDST_CMD) | 275 FW_CMD_REQUEST_F | 276 FW_CMD_READ_F | 277 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); 278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd)); 279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); 280 ldst_cmd->u.pcie.ctrl_to_fn = 281 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn)); 282 ldst_cmd->u.pcie.r = (uint8_t)reg; 283 } 284 285 /* 286 * 287 * csio_mb_caps_config - FW Read/Write Capabilities command helper 288 * @hw: The HW structure 289 * @mbp: Mailbox structure 290 * @wr: Write if 1, Read if 0 291 * @init: Turn on initiator mode. 292 * @tgt: Turn on target mode. 293 * @cofld: If 1, Control Offload for FCoE 294 * @cbfn: Callback, if any. 295 * 296 * This helper assumes that cmdp has MB payload from a previous CAPS 297 * read command. 298 */ 299 void 300 csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 301 bool wr, bool init, bool tgt, bool cofld, 302 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 303 { 304 struct fw_caps_config_cmd *cmdp = 305 (struct fw_caps_config_cmd *)(mbp->mb); 306 307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1); 308 309 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | 310 FW_CMD_REQUEST_F | 311 (wr ? FW_CMD_WRITE_F : FW_CMD_READ_F)); 312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 313 314 /* Read config */ 315 if (!wr) 316 return; 317 318 /* Write config */ 319 cmdp->fcoecaps = 0; 320 321 if (cofld) 322 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD); 323 if (init) 324 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR); 325 if (tgt) 326 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET); 327 } 328 329 #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 330 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\ 331 FW_PORT_CAP_ANEG) 332 333 /* 334 * csio_mb_port- FW PORT command helper 335 * @hw: The HW structure 336 * @mbp: Mailbox structure 337 * @tmo: COmmand timeout 338 * @portid: Port ID to get/set info 339 * @wr: Write/Read PORT information. 340 * @fc: Flow control 341 * @caps: Port capabilites to set. 342 * @cbfn: Callback, if any. 343 * 344 */ 345 void 346 csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 347 uint8_t portid, bool wr, uint32_t fc, uint16_t caps, 348 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 349 { 350 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb); 351 unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); 352 353 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 354 355 cmdp->op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | 356 FW_CMD_REQUEST_F | 357 (wr ? FW_CMD_EXEC_F : FW_CMD_READ_F) | 358 FW_PORT_CMD_PORTID_V(portid)); 359 if (!wr) { 360 cmdp->action_to_len16 = htonl( 361 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 362 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 363 return; 364 } 365 366 /* Set port */ 367 cmdp->action_to_len16 = htonl( 368 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | 369 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 370 371 if (fc & PAUSE_RX) 372 lfc |= FW_PORT_CAP_FC_RX; 373 if (fc & PAUSE_TX) 374 lfc |= FW_PORT_CAP_FC_TX; 375 376 if (!(caps & FW_PORT_CAP_ANEG)) 377 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc); 378 else 379 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | 380 lfc | mdi); 381 } 382 383 /* 384 * csio_mb_process_read_port_rsp - FW PORT command response processing helper 385 * @hw: The HW structure 386 * @mbp: Mailbox structure 387 * @retval: Mailbox return value from Firmware 388 * @caps: port capabilities 389 * 390 */ 391 void 392 csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, 393 enum fw_retval *retval, uint16_t *caps) 394 { 395 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb); 396 397 *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16)); 398 399 if (*retval == FW_SUCCESS) 400 *caps = ntohs(rsp->u.info.pcap); 401 } 402 403 /* 404 * csio_mb_initialize - FW INITIALIZE command helper 405 * @hw: The HW structure 406 * @mbp: Mailbox structure 407 * @tmo: COmmand timeout 408 * @cbfn: Callback, if any. 409 * 410 */ 411 void 412 csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, 413 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 414 { 415 struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb); 416 417 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); 418 419 cmdp->op_to_write = htonl(FW_CMD_OP_V(FW_INITIALIZE_CMD) | 420 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 421 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 422 423 } 424 425 /* 426 * csio_mb_iq_alloc - Initializes the mailbox to allocate an 427 * Ingress DMA queue in the firmware. 428 * 429 * @hw: The hw structure 430 * @mbp: Mailbox structure to initialize 431 * @priv: Private object 432 * @mb_tmo: Mailbox time-out period (in ms). 433 * @iq_params: Ingress queue params needed for allocation. 434 * @cbfn: The call-back function 435 * 436 * 437 */ 438 static void 439 csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 440 uint32_t mb_tmo, struct csio_iq_params *iq_params, 441 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 442 { 443 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); 444 445 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 446 447 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | 448 FW_CMD_REQUEST_F | FW_CMD_EXEC_F | 449 FW_IQ_CMD_PFN_V(iq_params->pfn) | 450 FW_IQ_CMD_VFN_V(iq_params->vfn)); 451 452 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | 453 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 454 455 cmdp->type_to_iqandstindex = htonl( 456 FW_IQ_CMD_VIID_V(iq_params->viid) | 457 FW_IQ_CMD_TYPE_V(iq_params->type) | 458 FW_IQ_CMD_IQASYNCH_V(iq_params->iqasynch)); 459 460 cmdp->fl0size = htons(iq_params->fl0size); 461 cmdp->fl0size = htons(iq_params->fl1size); 462 463 } /* csio_mb_iq_alloc */ 464 465 /* 466 * csio_mb_iq_write - Initializes the mailbox for writing into an 467 * Ingress DMA Queue. 468 * 469 * @hw: The HW structure 470 * @mbp: Mailbox structure to initialize 471 * @priv: Private object 472 * @mb_tmo: Mailbox time-out period (in ms). 473 * @cascaded_req: TRUE - if this request is cascased with iq-alloc request. 474 * @iq_params: Ingress queue params needed for writing. 475 * @cbfn: The call-back function 476 * 477 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, 478 * because this IQ write request can be cascaded with a previous 479 * IQ alloc request, and we dont want to over-write the bits set by 480 * that request. This logic will work even in a non-cascaded case, since the 481 * cmdp structure is zeroed out by CSIO_INIT_MBP. 482 */ 483 static void 484 csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 485 uint32_t mb_tmo, bool cascaded_req, 486 struct csio_iq_params *iq_params, 487 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 488 { 489 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); 490 491 uint32_t iq_start_stop = (iq_params->iq_start) ? 492 FW_IQ_CMD_IQSTART_F : 493 FW_IQ_CMD_IQSTOP_F; 494 495 /* 496 * If this IQ write is cascaded with IQ alloc request, do not 497 * re-initialize with 0's. 498 * 499 */ 500 if (!cascaded_req) 501 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 502 503 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_IQ_CMD) | 504 FW_CMD_REQUEST_F | FW_CMD_WRITE_F | 505 FW_IQ_CMD_PFN_V(iq_params->pfn) | 506 FW_IQ_CMD_VFN_V(iq_params->vfn)); 507 cmdp->alloc_to_len16 |= htonl(iq_start_stop | 508 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 509 cmdp->iqid |= htons(iq_params->iqid); 510 cmdp->fl0id |= htons(iq_params->fl0id); 511 cmdp->fl1id |= htons(iq_params->fl1id); 512 cmdp->type_to_iqandstindex |= htonl( 513 FW_IQ_CMD_IQANDST_V(iq_params->iqandst) | 514 FW_IQ_CMD_IQANUS_V(iq_params->iqanus) | 515 FW_IQ_CMD_IQANUD_V(iq_params->iqanud) | 516 FW_IQ_CMD_IQANDSTINDEX_V(iq_params->iqandstindex)); 517 cmdp->iqdroprss_to_iqesize |= htons( 518 FW_IQ_CMD_IQPCIECH_V(iq_params->iqpciech) | 519 FW_IQ_CMD_IQDCAEN_V(iq_params->iqdcaen) | 520 FW_IQ_CMD_IQDCACPU_V(iq_params->iqdcacpu) | 521 FW_IQ_CMD_IQINTCNTTHRESH_V(iq_params->iqintcntthresh) | 522 FW_IQ_CMD_IQCPRIO_V(iq_params->iqcprio) | 523 FW_IQ_CMD_IQESIZE_V(iq_params->iqesize)); 524 525 cmdp->iqsize |= htons(iq_params->iqsize); 526 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr); 527 528 if (iq_params->type == 0) { 529 cmdp->iqns_to_fl0congen |= htonl( 530 FW_IQ_CMD_IQFLINTIQHSEN_V(iq_params->iqflintiqhsen)| 531 FW_IQ_CMD_IQFLINTCONGEN_V(iq_params->iqflintcongen)); 532 } 533 534 if (iq_params->fl0size && iq_params->fl0addr && 535 (iq_params->fl0id != 0xFFFF)) { 536 537 cmdp->iqns_to_fl0congen |= htonl( 538 FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)| 539 FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) | 540 FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) | 541 FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen)); 542 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( 543 FW_IQ_CMD_FL0DCAEN_V(iq_params->fl0dcaen) | 544 FW_IQ_CMD_FL0DCACPU_V(iq_params->fl0dcacpu) | 545 FW_IQ_CMD_FL0FBMIN_V(iq_params->fl0fbmin) | 546 FW_IQ_CMD_FL0FBMAX_V(iq_params->fl0fbmax) | 547 FW_IQ_CMD_FL0CIDXFTHRESH_V(iq_params->fl0cidxfthresh)); 548 cmdp->fl0size |= htons(iq_params->fl0size); 549 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr); 550 } 551 } /* csio_mb_iq_write */ 552 553 /* 554 * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an 555 * Ingress DMA Queue. 556 * 557 * @hw: The HW structure 558 * @mbp: Mailbox structure to initialize 559 * @priv: Private data. 560 * @mb_tmo: Mailbox time-out period (in ms). 561 * @iq_params: Ingress queue params needed for allocation & writing. 562 * @cbfn: The call-back function 563 * 564 * 565 */ 566 void 567 csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 568 uint32_t mb_tmo, struct csio_iq_params *iq_params, 569 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 570 { 571 csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn); 572 csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn); 573 } /* csio_mb_iq_alloc_write */ 574 575 /* 576 * csio_mb_iq_alloc_write_rsp - Process the allocation & writing 577 * of ingress DMA queue mailbox's response. 578 * 579 * @hw: The HW structure. 580 * @mbp: Mailbox structure to initialize. 581 * @retval: Firmware return value. 582 * @iq_params: Ingress queue parameters, after allocation and write. 583 * 584 */ 585 void 586 csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp, 587 enum fw_retval *ret_val, 588 struct csio_iq_params *iq_params) 589 { 590 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb); 591 592 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); 593 if (*ret_val == FW_SUCCESS) { 594 iq_params->physiqid = ntohs(rsp->physiqid); 595 iq_params->iqid = ntohs(rsp->iqid); 596 iq_params->fl0id = ntohs(rsp->fl0id); 597 iq_params->fl1id = ntohs(rsp->fl1id); 598 } else { 599 iq_params->physiqid = iq_params->iqid = 600 iq_params->fl0id = iq_params->fl1id = 0; 601 } 602 } /* csio_mb_iq_alloc_write_rsp */ 603 604 /* 605 * csio_mb_iq_free - Initializes the mailbox for freeing a 606 * specified Ingress DMA Queue. 607 * 608 * @hw: The HW structure 609 * @mbp: Mailbox structure to initialize 610 * @priv: Private data 611 * @mb_tmo: Mailbox time-out period (in ms). 612 * @iq_params: Parameters of ingress queue, that is to be freed. 613 * @cbfn: The call-back function 614 * 615 * 616 */ 617 void 618 csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 619 uint32_t mb_tmo, struct csio_iq_params *iq_params, 620 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 621 { 622 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb); 623 624 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 625 626 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | 627 FW_CMD_REQUEST_F | FW_CMD_EXEC_F | 628 FW_IQ_CMD_PFN_V(iq_params->pfn) | 629 FW_IQ_CMD_VFN_V(iq_params->vfn)); 630 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | 631 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 632 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iq_params->type)); 633 634 cmdp->iqid = htons(iq_params->iqid); 635 cmdp->fl0id = htons(iq_params->fl0id); 636 cmdp->fl1id = htons(iq_params->fl1id); 637 638 } /* csio_mb_iq_free */ 639 640 /* 641 * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating 642 * an offload-egress queue. 643 * 644 * @hw: The HW structure 645 * @mbp: Mailbox structure to initialize 646 * @priv: Private data 647 * @mb_tmo: Mailbox time-out period (in ms). 648 * @eq_ofld_params: (Offload) Egress queue parameters. 649 * @cbfn: The call-back function 650 * 651 * 652 */ 653 static void 654 csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 655 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, 656 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 657 { 658 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); 659 660 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 661 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | 662 FW_CMD_REQUEST_F | FW_CMD_EXEC_F | 663 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | 664 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); 665 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | 666 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 667 668 } /* csio_mb_eq_ofld_alloc */ 669 670 /* 671 * csio_mb_eq_ofld_write - Initializes the mailbox for writing 672 * an alloacted offload-egress queue. 673 * 674 * @hw: The HW structure 675 * @mbp: Mailbox structure to initialize 676 * @priv: Private data 677 * @mb_tmo: Mailbox time-out period (in ms). 678 * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request. 679 * @eq_ofld_params: (Offload) Egress queue parameters. 680 * @cbfn: The call-back function 681 * 682 * 683 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating, 684 * because this EQ write request can be cascaded with a previous 685 * EQ alloc request, and we dont want to over-write the bits set by 686 * that request. This logic will work even in a non-cascaded case, since the 687 * cmdp structure is zeroed out by CSIO_INIT_MBP. 688 */ 689 static void 690 csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 691 uint32_t mb_tmo, bool cascaded_req, 692 struct csio_eq_params *eq_ofld_params, 693 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 694 { 695 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); 696 697 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ? 698 FW_EQ_OFLD_CMD_EQSTART_F : 699 FW_EQ_OFLD_CMD_EQSTOP_F; 700 701 /* 702 * If this EQ write is cascaded with EQ alloc request, do not 703 * re-initialize with 0's. 704 * 705 */ 706 if (!cascaded_req) 707 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 708 709 cmdp->op_to_vfn |= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | 710 FW_CMD_REQUEST_F | FW_CMD_WRITE_F | 711 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | 712 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); 713 cmdp->alloc_to_len16 |= htonl(eq_start_stop | 714 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 715 716 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); 717 718 cmdp->fetchszm_to_iqid |= htonl( 719 FW_EQ_OFLD_CMD_HOSTFCMODE_V(eq_ofld_params->hostfcmode) | 720 FW_EQ_OFLD_CMD_CPRIO_V(eq_ofld_params->cprio) | 721 FW_EQ_OFLD_CMD_PCIECHN_V(eq_ofld_params->pciechn) | 722 FW_EQ_OFLD_CMD_IQID_V(eq_ofld_params->iqid)); 723 724 cmdp->dcaen_to_eqsize |= htonl( 725 FW_EQ_OFLD_CMD_DCAEN_V(eq_ofld_params->dcaen) | 726 FW_EQ_OFLD_CMD_DCACPU_V(eq_ofld_params->dcacpu) | 727 FW_EQ_OFLD_CMD_FBMIN_V(eq_ofld_params->fbmin) | 728 FW_EQ_OFLD_CMD_FBMAX_V(eq_ofld_params->fbmax) | 729 FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(eq_ofld_params->cidxfthresho) | 730 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(eq_ofld_params->cidxfthresh) | 731 FW_EQ_OFLD_CMD_EQSIZE_V(eq_ofld_params->eqsize)); 732 733 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr); 734 735 } /* csio_mb_eq_ofld_write */ 736 737 /* 738 * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation 739 * writing into an Engress DMA Queue. 740 * 741 * @hw: The HW structure 742 * @mbp: Mailbox structure to initialize 743 * @priv: Private data. 744 * @mb_tmo: Mailbox time-out period (in ms). 745 * @eq_ofld_params: (Offload) Egress queue parameters. 746 * @cbfn: The call-back function 747 * 748 * 749 */ 750 void 751 csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, 752 void *priv, uint32_t mb_tmo, 753 struct csio_eq_params *eq_ofld_params, 754 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 755 { 756 csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn); 757 csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true, 758 eq_ofld_params, cbfn); 759 } /* csio_mb_eq_ofld_alloc_write */ 760 761 /* 762 * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation 763 * & write egress DMA queue mailbox's response. 764 * 765 * @hw: The HW structure. 766 * @mbp: Mailbox structure to initialize. 767 * @retval: Firmware return value. 768 * @eq_ofld_params: (Offload) Egress queue parameters. 769 * 770 */ 771 void 772 csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, 773 struct csio_mb *mbp, enum fw_retval *ret_val, 774 struct csio_eq_params *eq_ofld_params) 775 { 776 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb); 777 778 *ret_val = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); 779 780 if (*ret_val == FW_SUCCESS) { 781 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_G( 782 ntohl(rsp->eqid_pkd)); 783 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_G( 784 ntohl(rsp->physeqid_pkd)); 785 } else 786 eq_ofld_params->eqid = 0; 787 788 } /* csio_mb_eq_ofld_alloc_write_rsp */ 789 790 /* 791 * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a 792 * specified Engress DMA Queue. 793 * 794 * @hw: The HW structure 795 * @mbp: Mailbox structure to initialize 796 * @priv: Private data area. 797 * @mb_tmo: Mailbox time-out period (in ms). 798 * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed. 799 * @cbfn: The call-back function 800 * 801 * 802 */ 803 void 804 csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, 805 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params, 806 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 807 { 808 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb); 809 810 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1); 811 812 cmdp->op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | 813 FW_CMD_REQUEST_F | FW_CMD_EXEC_F | 814 FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) | 815 FW_EQ_OFLD_CMD_VFN_V(eq_ofld_params->vfn)); 816 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | 817 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 818 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eq_ofld_params->eqid)); 819 820 } /* csio_mb_eq_ofld_free */ 821 822 /* 823 * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link 824 * condition. 825 * 826 * @ln: The Lnode structure 827 * @mbp: Mailbox structure to initialize 828 * @mb_tmo: Mailbox time-out period (in ms). 829 * @cbfn: The call back function. 830 * 831 * 832 */ 833 void 834 csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, 835 uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode, 836 uint8_t cos, bool link_status, uint32_t fcfi, 837 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 838 { 839 struct fw_fcoe_link_cmd *cmdp = 840 (struct fw_fcoe_link_cmd *)(mbp->mb); 841 842 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 843 844 cmdp->op_to_portid = htonl(( 845 FW_CMD_OP_V(FW_FCOE_LINK_CMD) | 846 FW_CMD_REQUEST_F | 847 FW_CMD_WRITE_F | 848 FW_FCOE_LINK_CMD_PORTID(port_id))); 849 cmdp->sub_opcode_fcfi = htonl( 850 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) | 851 FW_FCOE_LINK_CMD_FCFI(fcfi)); 852 cmdp->lstatus = link_status; 853 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 854 855 } /* csio_write_fcoe_link_cond_init_mb */ 856 857 /* 858 * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE 859 * resource information(FW_GET_RES_INFO_CMD). 860 * 861 * @hw: The HW structure 862 * @mbp: Mailbox structure to initialize 863 * @mb_tmo: Mailbox time-out period (in ms). 864 * @cbfn: The call-back function 865 * 866 * 867 */ 868 void 869 csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp, 870 uint32_t mb_tmo, 871 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 872 { 873 struct fw_fcoe_res_info_cmd *cmdp = 874 (struct fw_fcoe_res_info_cmd *)(mbp->mb); 875 876 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); 877 878 cmdp->op_to_read = htonl((FW_CMD_OP_V(FW_FCOE_RES_INFO_CMD) | 879 FW_CMD_REQUEST_F | 880 FW_CMD_READ_F)); 881 882 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 883 884 } /* csio_fcoe_read_res_info_init_mb */ 885 886 /* 887 * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP 888 * in the firmware (FW_FCOE_VNP_CMD). 889 * 890 * @ln: The Lnode structure. 891 * @mbp: Mailbox structure to initialize. 892 * @mb_tmo: Mailbox time-out period (in ms). 893 * @fcfi: FCF Index. 894 * @vnpi: vnpi 895 * @iqid: iqid 896 * @vnport_wwnn: vnport WWNN 897 * @vnport_wwpn: vnport WWPN 898 * @cbfn: The call-back function. 899 * 900 * 901 */ 902 void 903 csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, 904 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid, 905 uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8], 906 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 907 { 908 struct fw_fcoe_vnp_cmd *cmdp = 909 (struct fw_fcoe_vnp_cmd *)(mbp->mb); 910 911 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 912 913 cmdp->op_to_fcfi = htonl((FW_CMD_OP_V(FW_FCOE_VNP_CMD) | 914 FW_CMD_REQUEST_F | 915 FW_CMD_EXEC_F | 916 FW_FCOE_VNP_CMD_FCFI(fcfi))); 917 918 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC | 919 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 920 921 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 922 923 cmdp->iqid = htons(iqid); 924 925 if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn)) 926 cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN); 927 928 if (vnport_wwnn) 929 memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8); 930 if (vnport_wwpn) 931 memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8); 932 933 } /* csio_fcoe_vnp_alloc_init_mb */ 934 935 /* 936 * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd. 937 * @ln: The Lnode structure. 938 * @mbp: Mailbox structure to initialize. 939 * @mb_tmo: Mailbox time-out period (in ms). 940 * @fcfi: FCF Index. 941 * @vnpi: vnpi 942 * @cbfn: The call-back handler. 943 */ 944 void 945 csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, 946 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, 947 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 948 { 949 struct fw_fcoe_vnp_cmd *cmdp = 950 (struct fw_fcoe_vnp_cmd *)(mbp->mb); 951 952 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 953 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | 954 FW_CMD_REQUEST_F | 955 FW_CMD_READ_F | 956 FW_FCOE_VNP_CMD_FCFI(fcfi)); 957 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 958 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 959 } 960 961 /* 962 * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an 963 * alloacted VNP in the firmware (FW_FCOE_VNP_CMD). 964 * 965 * @ln: The Lnode structure. 966 * @mbp: Mailbox structure to initialize. 967 * @mb_tmo: Mailbox time-out period (in ms). 968 * @fcfi: FCF flow id 969 * @vnpi: VNP flow id 970 * @cbfn: The call-back function. 971 * Return: None 972 */ 973 void 974 csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, 975 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, 976 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 977 { 978 struct fw_fcoe_vnp_cmd *cmdp = 979 (struct fw_fcoe_vnp_cmd *)(mbp->mb); 980 981 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 982 983 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_VNP_CMD) | 984 FW_CMD_REQUEST_F | 985 FW_CMD_EXEC_F | 986 FW_FCOE_VNP_CMD_FCFI(fcfi)); 987 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE | 988 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 989 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi)); 990 } 991 992 /* 993 * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the 994 * FCF records. 995 * 996 * @ln: The Lnode structure 997 * @mbp: Mailbox structure to initialize 998 * @mb_tmo: Mailbox time-out period (in ms). 999 * @fcf_params: FC-Forwarder parameters. 1000 * @cbfn: The call-back function 1001 * 1002 * 1003 */ 1004 void 1005 csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp, 1006 uint32_t mb_tmo, uint32_t portid, uint32_t fcfi, 1007 void (*cbfn) (struct csio_hw *, struct csio_mb *)) 1008 { 1009 struct fw_fcoe_fcf_cmd *cmdp = 1010 (struct fw_fcoe_fcf_cmd *)(mbp->mb); 1011 1012 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1); 1013 1014 cmdp->op_to_fcfi = htonl(FW_CMD_OP_V(FW_FCOE_FCF_CMD) | 1015 FW_CMD_REQUEST_F | 1016 FW_CMD_READ_F | 1017 FW_FCOE_FCF_CMD_FCFI(fcfi)); 1018 cmdp->retval_len16 = htonl(FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 1019 1020 } /* csio_fcoe_read_fcf_init_mb */ 1021 1022 void 1023 csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp, 1024 uint32_t mb_tmo, 1025 struct fw_fcoe_port_cmd_params *portparams, 1026 void (*cbfn)(struct csio_hw *, 1027 struct csio_mb *)) 1028 { 1029 struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb); 1030 1031 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1); 1032 mbp->mb_size = 64; 1033 1034 cmdp->op_to_flowid = htonl(FW_CMD_OP_V(FW_FCOE_STATS_CMD) | 1035 FW_CMD_REQUEST_F | FW_CMD_READ_F); 1036 cmdp->free_to_len16 = htonl(FW_CMD_LEN16_V(CSIO_MAX_MB_SIZE/16)); 1037 1038 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) | 1039 FW_FCOE_STATS_CMD_PORT(portparams->portid); 1040 1041 cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) | 1042 FW_FCOE_STATS_CMD_PORT_VALID; 1043 1044 } /* csio_fcoe_read_portparams_init_mb */ 1045 1046 void 1047 csio_mb_process_portparams_rsp(struct csio_hw *hw, 1048 struct csio_mb *mbp, 1049 enum fw_retval *retval, 1050 struct fw_fcoe_port_cmd_params *portparams, 1051 struct fw_fcoe_port_stats *portstats) 1052 { 1053 struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb); 1054 struct fw_fcoe_port_stats stats; 1055 uint8_t *src; 1056 uint8_t *dst; 1057 1058 *retval = FW_CMD_RETVAL_G(ntohl(rsp->free_to_len16)); 1059 1060 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats)); 1061 1062 if (*retval == FW_SUCCESS) { 1063 dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8); 1064 src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8); 1065 memcpy(dst, src, (portparams->nstats * 8)); 1066 if (portparams->idx == 1) { 1067 /* Get the first 6 flits from the Mailbox */ 1068 portstats->tx_bcast_bytes = stats.tx_bcast_bytes; 1069 portstats->tx_bcast_frames = stats.tx_bcast_frames; 1070 portstats->tx_mcast_bytes = stats.tx_mcast_bytes; 1071 portstats->tx_mcast_frames = stats.tx_mcast_frames; 1072 portstats->tx_ucast_bytes = stats.tx_ucast_bytes; 1073 portstats->tx_ucast_frames = stats.tx_ucast_frames; 1074 } 1075 if (portparams->idx == 7) { 1076 /* Get the second 6 flits from the Mailbox */ 1077 portstats->tx_drop_frames = stats.tx_drop_frames; 1078 portstats->tx_offload_bytes = stats.tx_offload_bytes; 1079 portstats->tx_offload_frames = stats.tx_offload_frames; 1080 #if 0 1081 portstats->rx_pf_bytes = stats.rx_pf_bytes; 1082 portstats->rx_pf_frames = stats.rx_pf_frames; 1083 #endif 1084 portstats->rx_bcast_bytes = stats.rx_bcast_bytes; 1085 portstats->rx_bcast_frames = stats.rx_bcast_frames; 1086 portstats->rx_mcast_bytes = stats.rx_mcast_bytes; 1087 } 1088 if (portparams->idx == 13) { 1089 /* Get the last 4 flits from the Mailbox */ 1090 portstats->rx_mcast_frames = stats.rx_mcast_frames; 1091 portstats->rx_ucast_bytes = stats.rx_ucast_bytes; 1092 portstats->rx_ucast_frames = stats.rx_ucast_frames; 1093 portstats->rx_err_frames = stats.rx_err_frames; 1094 } 1095 } 1096 } 1097 1098 /* Entry points/APIs for MB module */ 1099 /* 1100 * csio_mb_intr_enable - Enable Interrupts from mailboxes. 1101 * @hw: The HW structure 1102 * 1103 * Enables CIM interrupt bit in appropriate INT_ENABLE registers. 1104 */ 1105 void 1106 csio_mb_intr_enable(struct csio_hw *hw) 1107 { 1108 csio_wr_reg32(hw, MBMSGRDYINTEN_F, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); 1109 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); 1110 } 1111 1112 /* 1113 * csio_mb_intr_disable - Disable Interrupts from mailboxes. 1114 * @hw: The HW structure 1115 * 1116 * Disable bit in HostInterruptEnable CIM register. 1117 */ 1118 void 1119 csio_mb_intr_disable(struct csio_hw *hw) 1120 { 1121 csio_wr_reg32(hw, MBMSGRDYINTEN_V(0), 1122 MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); 1123 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE_A)); 1124 } 1125 1126 static void 1127 csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd) 1128 { 1129 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd; 1130 1131 if ((FW_DEBUG_CMD_TYPE_G(ntohl(dbg->op_type))) == 1) { 1132 csio_info(hw, "FW print message:\n"); 1133 csio_info(hw, "\tdebug->dprtstridx = %d\n", 1134 ntohs(dbg->u.prt.dprtstridx)); 1135 csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n", 1136 ntohl(dbg->u.prt.dprtstrparam0)); 1137 csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n", 1138 ntohl(dbg->u.prt.dprtstrparam1)); 1139 csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n", 1140 ntohl(dbg->u.prt.dprtstrparam2)); 1141 csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n", 1142 ntohl(dbg->u.prt.dprtstrparam3)); 1143 } else { 1144 /* This is a FW assertion */ 1145 csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 1146 dbg->u.assert.filename_0_7, 1147 ntohl(dbg->u.assert.line), 1148 ntohl(dbg->u.assert.x), 1149 ntohl(dbg->u.assert.y)); 1150 } 1151 } 1152 1153 static void 1154 csio_mb_debug_cmd_handler(struct csio_hw *hw) 1155 { 1156 int i; 1157 __be64 cmd[CSIO_MB_MAX_REGS]; 1158 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); 1159 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); 1160 int size = sizeof(struct fw_debug_cmd); 1161 1162 /* Copy mailbox data */ 1163 for (i = 0; i < size; i += 8) 1164 cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i)); 1165 1166 csio_mb_dump_fw_dbg(hw, cmd); 1167 1168 /* Notify FW of mailbox by setting owner as UP */ 1169 csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | 1170 MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); 1171 1172 csio_rd_reg32(hw, ctl_reg); 1173 wmb(); 1174 } 1175 1176 /* 1177 * csio_mb_issue - generic routine for issuing Mailbox commands. 1178 * @hw: The HW structure 1179 * @mbp: Mailbox command to issue 1180 * 1181 * Caller should hold hw lock across this call. 1182 */ 1183 int 1184 csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) 1185 { 1186 uint32_t owner, ctl; 1187 int i; 1188 uint32_t ii; 1189 __be64 *cmd = mbp->mb; 1190 __be64 hdr; 1191 struct csio_mbm *mbm = &hw->mbm; 1192 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); 1193 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); 1194 int size = mbp->mb_size; 1195 int rv = -EINVAL; 1196 struct fw_cmd_hdr *fw_hdr; 1197 1198 /* Determine mode */ 1199 if (mbp->mb_cbfn == NULL) { 1200 /* Need to issue/get results in the same context */ 1201 if (mbp->tmo < CSIO_MB_POLL_FREQ) { 1202 csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo); 1203 goto error_out; 1204 } 1205 } else if (!csio_is_host_intr_enabled(hw) || 1206 !csio_is_hw_intr_enabled(hw)) { 1207 csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", 1208 *((uint8_t *)mbp->mb)); 1209 goto error_out; 1210 } 1211 1212 if (mbm->mcurrent != NULL) { 1213 /* Queue mbox cmd, if another mbox cmd is active */ 1214 if (mbp->mb_cbfn == NULL) { 1215 rv = -EBUSY; 1216 csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n", 1217 hw->pfn, *((uint8_t *)mbp->mb)); 1218 1219 goto error_out; 1220 } else { 1221 list_add_tail(&mbp->list, &mbm->req_q); 1222 CSIO_INC_STATS(mbm, n_activeq); 1223 1224 return 0; 1225 } 1226 } 1227 1228 /* Now get ownership of mailbox */ 1229 owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); 1230 1231 if (!csio_mb_is_host_owner(owner)) { 1232 1233 for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++) 1234 owner = MBOWNER_G(csio_rd_reg32(hw, ctl_reg)); 1235 /* 1236 * Mailbox unavailable. In immediate mode, fail the command. 1237 * In other modes, enqueue the request. 1238 */ 1239 if (!csio_mb_is_host_owner(owner)) { 1240 if (mbp->mb_cbfn == NULL) { 1241 rv = owner ? -EBUSY : -ETIMEDOUT; 1242 1243 csio_dbg(hw, 1244 "Couldnt own Mailbox %x op:0x%x " 1245 "owner:%x\n", 1246 hw->pfn, *((uint8_t *)mbp->mb), owner); 1247 goto error_out; 1248 } else { 1249 if (mbm->mcurrent == NULL) { 1250 csio_err(hw, 1251 "Couldnt own Mailbox %x " 1252 "op:0x%x owner:%x\n", 1253 hw->pfn, *((uint8_t *)mbp->mb), 1254 owner); 1255 csio_err(hw, 1256 "No outstanding driver" 1257 " mailbox as well\n"); 1258 goto error_out; 1259 } 1260 } 1261 } 1262 } 1263 1264 /* Mailbox is available, copy mailbox data into it */ 1265 for (i = 0; i < size; i += 8) { 1266 csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i); 1267 cmd++; 1268 } 1269 1270 CSIO_DUMP_MB(hw, hw->pfn, data_reg); 1271 1272 /* Start completion timers in non-immediate modes and notify FW */ 1273 if (mbp->mb_cbfn != NULL) { 1274 mbm->mcurrent = mbp; 1275 mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo)); 1276 csio_wr_reg32(hw, MBMSGVALID_F | MBINTREQ_F | 1277 MBOWNER_V(CSIO_MBOWNER_FW), ctl_reg); 1278 } else 1279 csio_wr_reg32(hw, MBMSGVALID_F | MBOWNER_V(CSIO_MBOWNER_FW), 1280 ctl_reg); 1281 1282 /* Flush posted writes */ 1283 csio_rd_reg32(hw, ctl_reg); 1284 wmb(); 1285 1286 CSIO_INC_STATS(mbm, n_req); 1287 1288 if (mbp->mb_cbfn) 1289 return 0; 1290 1291 /* Poll for completion in immediate mode */ 1292 cmd = mbp->mb; 1293 1294 for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) { 1295 mdelay(CSIO_MB_POLL_FREQ); 1296 1297 /* Check for response */ 1298 ctl = csio_rd_reg32(hw, ctl_reg); 1299 if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { 1300 1301 if (!(ctl & MBMSGVALID_F)) { 1302 csio_wr_reg32(hw, 0, ctl_reg); 1303 continue; 1304 } 1305 1306 CSIO_DUMP_MB(hw, hw->pfn, data_reg); 1307 1308 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); 1309 fw_hdr = (struct fw_cmd_hdr *)&hdr; 1310 1311 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { 1312 case FW_DEBUG_CMD: 1313 csio_mb_debug_cmd_handler(hw); 1314 continue; 1315 } 1316 1317 /* Copy response */ 1318 for (i = 0; i < size; i += 8) 1319 *cmd++ = cpu_to_be64(csio_rd_reg64 1320 (hw, data_reg + i)); 1321 csio_wr_reg32(hw, 0, ctl_reg); 1322 1323 if (csio_mb_fw_retval(mbp) != FW_SUCCESS) 1324 CSIO_INC_STATS(mbm, n_err); 1325 1326 CSIO_INC_STATS(mbm, n_rsp); 1327 return 0; 1328 } 1329 } 1330 1331 CSIO_INC_STATS(mbm, n_tmo); 1332 1333 csio_err(hw, "Mailbox %x op:0x%x timed out!\n", 1334 hw->pfn, *((uint8_t *)cmd)); 1335 1336 return -ETIMEDOUT; 1337 1338 error_out: 1339 CSIO_INC_STATS(mbm, n_err); 1340 return rv; 1341 } 1342 1343 /* 1344 * csio_mb_completions - Completion handler for Mailbox commands 1345 * @hw: The HW structure 1346 * @cbfn_q: Completion queue. 1347 * 1348 */ 1349 void 1350 csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q) 1351 { 1352 struct csio_mb *mbp; 1353 struct csio_mbm *mbm = &hw->mbm; 1354 enum fw_retval rv; 1355 1356 while (!list_empty(cbfn_q)) { 1357 mbp = list_first_entry(cbfn_q, struct csio_mb, list); 1358 list_del_init(&mbp->list); 1359 1360 rv = csio_mb_fw_retval(mbp); 1361 if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR)) 1362 CSIO_INC_STATS(mbm, n_err); 1363 else if (rv != FW_HOSTERROR) 1364 CSIO_INC_STATS(mbm, n_rsp); 1365 1366 if (mbp->mb_cbfn) 1367 mbp->mb_cbfn(hw, mbp); 1368 } 1369 } 1370 1371 static void 1372 csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id) 1373 { 1374 static char *mod_str[] = { 1375 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 1376 }; 1377 1378 struct csio_pport *port = &hw->pport[port_id]; 1379 1380 if (port->mod_type == FW_PORT_MOD_TYPE_NONE) 1381 csio_info(hw, "Port:%d - port module unplugged\n", port_id); 1382 else if (port->mod_type < ARRAY_SIZE(mod_str)) 1383 csio_info(hw, "Port:%d - %s port module inserted\n", port_id, 1384 mod_str[port->mod_type]); 1385 else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 1386 csio_info(hw, 1387 "Port:%d - unsupported optical port module " 1388 "inserted\n", port_id); 1389 else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 1390 csio_info(hw, 1391 "Port:%d - unknown port module inserted, forcing " 1392 "TWINAX\n", port_id); 1393 else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR) 1394 csio_info(hw, "Port:%d - transceiver module error\n", port_id); 1395 else 1396 csio_info(hw, "Port:%d - unknown module type %d inserted\n", 1397 port_id, port->mod_type); 1398 } 1399 1400 int 1401 csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd) 1402 { 1403 uint8_t opcode = *(uint8_t *)cmd; 1404 struct fw_port_cmd *pcmd; 1405 uint8_t port_id; 1406 uint32_t link_status; 1407 uint16_t action; 1408 uint8_t mod_type; 1409 1410 if (opcode == FW_PORT_CMD) { 1411 pcmd = (struct fw_port_cmd *)cmd; 1412 port_id = FW_PORT_CMD_PORTID_G( 1413 ntohl(pcmd->op_to_portid)); 1414 action = FW_PORT_CMD_ACTION_G( 1415 ntohl(pcmd->action_to_len16)); 1416 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1417 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n", 1418 action); 1419 return -EINVAL; 1420 } 1421 1422 link_status = ntohl(pcmd->u.info.lstatus_to_modtype); 1423 mod_type = FW_PORT_CMD_MODTYPE_G(link_status); 1424 1425 hw->pport[port_id].link_status = 1426 FW_PORT_CMD_LSTATUS_G(link_status); 1427 hw->pport[port_id].link_speed = 1428 FW_PORT_CMD_LSPEED_G(link_status); 1429 1430 csio_info(hw, "Port:%x - LINK %s\n", port_id, 1431 FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN"); 1432 1433 if (mod_type != hw->pport[port_id].mod_type) { 1434 hw->pport[port_id].mod_type = mod_type; 1435 csio_mb_portmod_changed(hw, port_id); 1436 } 1437 } else if (opcode == FW_DEBUG_CMD) { 1438 csio_mb_dump_fw_dbg(hw, cmd); 1439 } else { 1440 csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode); 1441 return -EINVAL; 1442 } 1443 1444 return 0; 1445 } 1446 1447 /* 1448 * csio_mb_isr_handler - Handle mailboxes related interrupts. 1449 * @hw: The HW structure 1450 * 1451 * Called from the ISR to handle Mailbox related interrupts. 1452 * HW Lock should be held across this call. 1453 */ 1454 int 1455 csio_mb_isr_handler(struct csio_hw *hw) 1456 { 1457 struct csio_mbm *mbm = &hw->mbm; 1458 struct csio_mb *mbp = mbm->mcurrent; 1459 __be64 *cmd; 1460 uint32_t ctl, cim_cause, pl_cause; 1461 int i; 1462 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A); 1463 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A); 1464 int size; 1465 __be64 hdr; 1466 struct fw_cmd_hdr *fw_hdr; 1467 1468 pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE_A)); 1469 cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); 1470 1471 if (!(pl_cause & PFCIM_F) || !(cim_cause & MBMSGRDYINT_F)) { 1472 CSIO_INC_STATS(hw, n_mbint_unexp); 1473 return -EINVAL; 1474 } 1475 1476 /* 1477 * The cause registers below HAVE to be cleared in the SAME 1478 * order as below: The low level cause register followed by 1479 * the upper level cause register. In other words, CIM-cause 1480 * first followed by PL-Cause next. 1481 */ 1482 csio_wr_reg32(hw, MBMSGRDYINT_F, MYPF_REG(CIM_PF_HOST_INT_CAUSE_A)); 1483 csio_wr_reg32(hw, PFCIM_F, MYPF_REG(PL_PF_INT_CAUSE_A)); 1484 1485 ctl = csio_rd_reg32(hw, ctl_reg); 1486 1487 if (csio_mb_is_host_owner(MBOWNER_G(ctl))) { 1488 1489 CSIO_DUMP_MB(hw, hw->pfn, data_reg); 1490 1491 if (!(ctl & MBMSGVALID_F)) { 1492 csio_warn(hw, 1493 "Stray mailbox interrupt recvd," 1494 " mailbox data not valid\n"); 1495 csio_wr_reg32(hw, 0, ctl_reg); 1496 /* Flush */ 1497 csio_rd_reg32(hw, ctl_reg); 1498 return -EINVAL; 1499 } 1500 1501 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg)); 1502 fw_hdr = (struct fw_cmd_hdr *)&hdr; 1503 1504 switch (FW_CMD_OP_G(ntohl(fw_hdr->hi))) { 1505 case FW_DEBUG_CMD: 1506 csio_mb_debug_cmd_handler(hw); 1507 return -EINVAL; 1508 #if 0 1509 case FW_ERROR_CMD: 1510 case FW_INITIALIZE_CMD: /* When we are not master */ 1511 #endif 1512 } 1513 1514 CSIO_ASSERT(mbp != NULL); 1515 1516 cmd = mbp->mb; 1517 size = mbp->mb_size; 1518 /* Get response */ 1519 for (i = 0; i < size; i += 8) 1520 *cmd++ = cpu_to_be64(csio_rd_reg64 1521 (hw, data_reg + i)); 1522 1523 csio_wr_reg32(hw, 0, ctl_reg); 1524 /* Flush */ 1525 csio_rd_reg32(hw, ctl_reg); 1526 1527 mbm->mcurrent = NULL; 1528 1529 /* Add completion to tail of cbfn queue */ 1530 list_add_tail(&mbp->list, &mbm->cbfn_q); 1531 CSIO_INC_STATS(mbm, n_cbfnq); 1532 1533 /* 1534 * Enqueue event to EventQ. Events processing happens 1535 * in Event worker thread context 1536 */ 1537 if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp))) 1538 CSIO_INC_STATS(hw, n_evt_drop); 1539 1540 return 0; 1541 1542 } else { 1543 /* 1544 * We can get here if mailbox MSIX vector is shared, 1545 * or in INTx case. Or a stray interrupt. 1546 */ 1547 csio_dbg(hw, "Host not owner, no mailbox interrupt\n"); 1548 CSIO_INC_STATS(hw, n_int_stray); 1549 return -EINVAL; 1550 } 1551 } 1552 1553 /* 1554 * csio_mb_tmo_handler - Timeout handler 1555 * @hw: The HW structure 1556 * 1557 */ 1558 struct csio_mb * 1559 csio_mb_tmo_handler(struct csio_hw *hw) 1560 { 1561 struct csio_mbm *mbm = &hw->mbm; 1562 struct csio_mb *mbp = mbm->mcurrent; 1563 struct fw_cmd_hdr *fw_hdr; 1564 1565 /* 1566 * Could be a race b/w the completion handler and the timer 1567 * and the completion handler won that race. 1568 */ 1569 if (mbp == NULL) { 1570 CSIO_DB_ASSERT(0); 1571 return NULL; 1572 } 1573 1574 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb); 1575 1576 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn, 1577 FW_CMD_OP_G(ntohl(fw_hdr->hi))); 1578 1579 mbm->mcurrent = NULL; 1580 CSIO_INC_STATS(mbm, n_tmo); 1581 fw_hdr->lo = htonl(FW_CMD_RETVAL_V(FW_ETIMEDOUT)); 1582 1583 return mbp; 1584 } 1585 1586 /* 1587 * csio_mb_cancel_all - Cancel all waiting commands. 1588 * @hw: The HW structure 1589 * @cbfn_q: The callback queue. 1590 * 1591 * Caller should hold hw lock across this call. 1592 */ 1593 void 1594 csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) 1595 { 1596 struct csio_mb *mbp; 1597 struct csio_mbm *mbm = &hw->mbm; 1598 struct fw_cmd_hdr *hdr; 1599 struct list_head *tmp; 1600 1601 if (mbm->mcurrent) { 1602 mbp = mbm->mcurrent; 1603 1604 /* Stop mailbox completion timer */ 1605 del_timer_sync(&mbm->timer); 1606 1607 /* Add completion to tail of cbfn queue */ 1608 list_add_tail(&mbp->list, cbfn_q); 1609 mbm->mcurrent = NULL; 1610 } 1611 1612 if (!list_empty(&mbm->req_q)) { 1613 list_splice_tail_init(&mbm->req_q, cbfn_q); 1614 mbm->stats.n_activeq = 0; 1615 } 1616 1617 if (!list_empty(&mbm->cbfn_q)) { 1618 list_splice_tail_init(&mbm->cbfn_q, cbfn_q); 1619 mbm->stats.n_cbfnq = 0; 1620 } 1621 1622 if (list_empty(cbfn_q)) 1623 return; 1624 1625 list_for_each(tmp, cbfn_q) { 1626 mbp = (struct csio_mb *)tmp; 1627 hdr = (struct fw_cmd_hdr *)(mbp->mb); 1628 1629 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n", 1630 hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi))); 1631 1632 CSIO_INC_STATS(mbm, n_cancel); 1633 hdr->lo = htonl(FW_CMD_RETVAL_V(FW_HOSTERROR)); 1634 } 1635 } 1636 1637 /* 1638 * csio_mbm_init - Initialize Mailbox module 1639 * @mbm: Mailbox module 1640 * @hw: The HW structure 1641 * @timer: Timing function for interrupting mailboxes 1642 * 1643 * Initialize timer and the request/response queues. 1644 */ 1645 int 1646 csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, 1647 void (*timer_fn)(uintptr_t)) 1648 { 1649 struct timer_list *timer = &mbm->timer; 1650 1651 init_timer(timer); 1652 timer->function = timer_fn; 1653 timer->data = (unsigned long)hw; 1654 1655 INIT_LIST_HEAD(&mbm->req_q); 1656 INIT_LIST_HEAD(&mbm->cbfn_q); 1657 csio_set_mb_intr_idx(mbm, -1); 1658 1659 return 0; 1660 } 1661 1662 /* 1663 * csio_mbm_exit - Uninitialize mailbox module 1664 * @mbm: Mailbox module 1665 * 1666 * Stop timer. 1667 */ 1668 void 1669 csio_mbm_exit(struct csio_mbm *mbm) 1670 { 1671 del_timer_sync(&mbm->timer); 1672 1673 CSIO_DB_ASSERT(mbm->mcurrent == NULL); 1674 CSIO_DB_ASSERT(list_empty(&mbm->req_q)); 1675 CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q)); 1676 } 1677