1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term 4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 5 */ 6 7 /** 8 * All common (i.e. transport-independent) SLI-4 functions are implemented 9 * in this file. 10 */ 11 #include "sli4.h" 12 13 static struct sli4_asic_entry_t sli4_asic_table[] = { 14 { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5}, 15 { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5}, 16 { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, 17 { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6}, 18 { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6}, 19 { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6}, 20 { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7}, 21 { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7}, 22 }; 23 24 /* Convert queue type enum (SLI_QTYPE_*) into a string */ 25 static char *SLI4_QNAME[] = { 26 "Event Queue", 27 "Completion Queue", 28 "Mailbox Queue", 29 "Work Queue", 30 "Receive Queue", 31 "Undefined" 32 }; 33 34 /** 35 * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer. 36 * 37 * @sli4: SLI context pointer. 38 * @buf: Destination buffer for the command. 39 * @length: Length in bytes of attached command. 40 * @dma: DMA buffer for non-embedded commands. 41 * Return: Command payload buffer. 42 */ 43 static void * 44 sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length, 45 struct efc_dma *dma) 46 { 47 struct sli4_cmd_sli_config *config; 48 u32 flags; 49 50 if (length > sizeof(config->payload.embed) && !dma) { 51 efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n", 52 length); 53 return NULL; 54 } 55 56 memset(buf, 0, SLI4_BMBX_SIZE); 57 58 config = buf; 59 60 config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG; 61 if (!dma) { 62 flags = SLI4_SLICONF_EMB; 63 config->dw1_flags = cpu_to_le32(flags); 64 config->payload_len = cpu_to_le32(length); 65 return config->payload.embed; 66 } 67 68 flags = SLI4_SLICONF_PMDCMD_VAL_1; 69 flags &= ~SLI4_SLICONF_EMB; 70 config->dw1_flags = cpu_to_le32(flags); 71 72 config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys)); 73 config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys)); 74 config->payload.mem.length = 75 cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN); 76 config->payload_len = cpu_to_le32(dma->size); 77 /* save pointer to DMA for BMBX dumping purposes */ 78 sli4->bmbx_non_emb_pmd = dma; 79 return dma->virt; 80 } 81 82 /** 83 * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command. 84 * 85 * @sli4: SLI context pointer. 86 * @buf: Destination buffer for the command. 87 * @qmem: DMA memory for queue. 88 * @eq_id: EQ id assosiated with this cq. 89 * Return: status -EIO/0. 90 */ 91 static int 92 sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 93 u16 eq_id) 94 { 95 struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL; 96 u32 p; 97 uintptr_t addr; 98 u32 num_pages = 0; 99 size_t cmd_size = 0; 100 u32 page_size = 0; 101 u32 n_cqe = 0; 102 u32 dw5_flags = 0; 103 u16 dw6w1_arm = 0; 104 __le32 len; 105 106 /* First calculate number of pages and the mailbox cmd length */ 107 n_cqe = qmem->size / SLI4_CQE_BYTES; 108 switch (n_cqe) { 109 case 256: 110 case 512: 111 case 1024: 112 case 2048: 113 page_size = SZ_4K; 114 break; 115 case 4096: 116 page_size = SZ_8K; 117 break; 118 default: 119 return -EIO; 120 } 121 num_pages = sli_page_count(qmem->size, page_size); 122 123 cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2) 124 + SZ_DMAADDR * num_pages; 125 126 cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL); 127 if (!cqv2) 128 return -EIO; 129 130 len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages); 131 sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON, 132 CMD_V2, len); 133 cqv2->page_size = page_size / SLI_PAGE_SIZE; 134 135 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */ 136 cqv2->num_pages = cpu_to_le16(num_pages); 137 if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES) 138 return -EIO; 139 140 switch (num_pages) { 141 case 1: 142 dw5_flags |= SLI4_CQ_CNT_VAL(256); 143 break; 144 case 2: 145 dw5_flags |= SLI4_CQ_CNT_VAL(512); 146 break; 147 case 4: 148 dw5_flags |= SLI4_CQ_CNT_VAL(1024); 149 break; 150 case 8: 151 dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); 152 cqv2->cqe_count = cpu_to_le16(n_cqe); 153 break; 154 default: 155 efc_log_err(sli4, "num_pages %d not valid\n", num_pages); 156 return -EIO; 157 } 158 159 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 160 dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID; 161 162 dw5_flags |= SLI4_CREATE_CQV2_EVT; 163 dw5_flags |= SLI4_CREATE_CQV2_VALID; 164 165 cqv2->dw5_flags = cpu_to_le32(dw5_flags); 166 cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm); 167 cqv2->eq_id = cpu_to_le16(eq_id); 168 169 for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { 170 cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 171 cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 172 } 173 174 return 0; 175 } 176 177 static int 178 sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem) 179 { 180 struct sli4_rqst_cmn_create_eq *eq; 181 u32 p; 182 uintptr_t addr; 183 u16 num_pages; 184 u32 dw5_flags = 0; 185 u32 dw6_flags = 0, ver; 186 187 eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq), 188 NULL); 189 if (!eq) 190 return -EIO; 191 192 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 193 ver = CMD_V2; 194 else 195 ver = CMD_V0; 196 197 sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON, 198 ver, SLI4_RQST_PYLD_LEN(cmn_create_eq)); 199 200 /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */ 201 num_pages = qmem->size / SLI_PAGE_SIZE; 202 eq->num_pages = cpu_to_le16(num_pages); 203 204 switch (num_pages) { 205 case 1: 206 dw5_flags |= SLI4_EQE_SIZE_4; 207 dw6_flags |= SLI4_EQ_CNT_VAL(1024); 208 break; 209 case 2: 210 dw5_flags |= SLI4_EQE_SIZE_4; 211 dw6_flags |= SLI4_EQ_CNT_VAL(2048); 212 break; 213 case 4: 214 dw5_flags |= SLI4_EQE_SIZE_4; 215 dw6_flags |= SLI4_EQ_CNT_VAL(4096); 216 break; 217 default: 218 efc_log_err(sli4, "num_pages %d not valid\n", num_pages); 219 return -EIO; 220 } 221 222 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 223 dw5_flags |= SLI4_CREATE_EQ_AUTOVALID; 224 225 dw5_flags |= SLI4_CREATE_EQ_VALID; 226 dw6_flags &= (~SLI4_CREATE_EQ_ARM); 227 eq->dw5_flags = cpu_to_le32(dw5_flags); 228 eq->dw6_flags = cpu_to_le32(dw6_flags); 229 eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI); 230 231 for (p = 0, addr = qmem->phys; p < num_pages; 232 p++, addr += SLI_PAGE_SIZE) { 233 eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr)); 234 eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr)); 235 } 236 237 return 0; 238 } 239 240 static int 241 sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 242 u16 cq_id) 243 { 244 struct sli4_rqst_cmn_create_mq_ext *mq; 245 u32 p; 246 uintptr_t addr; 247 u32 num_pages; 248 u16 dw6w1_flags = 0; 249 250 mq = sli_config_cmd_init(sli4, buf, 251 SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL); 252 if (!mq) 253 return -EIO; 254 255 sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT, 256 SLI4_SUBSYSTEM_COMMON, CMD_V0, 257 SLI4_RQST_PYLD_LEN(cmn_create_mq_ext)); 258 259 /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */ 260 num_pages = qmem->size / SLI_PAGE_SIZE; 261 mq->num_pages = cpu_to_le16(num_pages); 262 switch (num_pages) { 263 case 1: 264 dw6w1_flags |= SLI4_MQE_SIZE_16; 265 break; 266 case 2: 267 dw6w1_flags |= SLI4_MQE_SIZE_32; 268 break; 269 case 4: 270 dw6w1_flags |= SLI4_MQE_SIZE_64; 271 break; 272 case 8: 273 dw6w1_flags |= SLI4_MQE_SIZE_128; 274 break; 275 default: 276 efc_log_info(sli4, "num_pages %d not valid\n", num_pages); 277 return -EIO; 278 } 279 280 mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL); 281 282 if (sli4->params.mq_create_version) { 283 mq->cq_id_v1 = cpu_to_le16(cq_id); 284 mq->hdr.dw3_version = cpu_to_le32(CMD_V1); 285 } else { 286 dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT); 287 } 288 mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL); 289 290 mq->dw6w1_flags = cpu_to_le16(dw6w1_flags); 291 for (p = 0, addr = qmem->phys; p < num_pages; 292 p++, addr += SLI_PAGE_SIZE) { 293 mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 294 mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 295 } 296 297 return 0; 298 } 299 300 int 301 sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id) 302 { 303 struct sli4_rqst_wq_create *wq; 304 u32 p; 305 uintptr_t addr; 306 u32 page_size = 0; 307 u32 n_wqe = 0; 308 u16 num_pages; 309 310 wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create), 311 NULL); 312 if (!wq) 313 return -EIO; 314 315 sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC, 316 CMD_V1, SLI4_RQST_PYLD_LEN(wq_create)); 317 n_wqe = qmem->size / sli4->wqe_size; 318 319 switch (qmem->size) { 320 case 4096: 321 case 8192: 322 case 16384: 323 case 32768: 324 page_size = SZ_4K; 325 break; 326 case 65536: 327 page_size = SZ_8K; 328 break; 329 case 131072: 330 page_size = SZ_16K; 331 break; 332 case 262144: 333 page_size = SZ_32K; 334 break; 335 case 524288: 336 page_size = SZ_64K; 337 break; 338 default: 339 return -EIO; 340 } 341 342 /* valid values for number of pages(num_pages): 1-8 */ 343 num_pages = sli_page_count(qmem->size, page_size); 344 wq->num_pages = cpu_to_le16(num_pages); 345 if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES) 346 return -EIO; 347 348 wq->cq_id = cpu_to_le16(cq_id); 349 350 wq->page_size = page_size / SLI_PAGE_SIZE; 351 352 if (sli4->wqe_size == SLI4_WQE_EXT_BYTES) 353 wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE; 354 else 355 wq->wqe_size_byte |= SLI4_WQE_SIZE; 356 357 wq->wqe_count = cpu_to_le16(n_wqe); 358 359 for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) { 360 wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 361 wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 362 } 363 364 return 0; 365 } 366 367 static int 368 sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem, 369 u16 cq_id, u16 buffer_size) 370 { 371 struct sli4_rqst_rq_create_v1 *rq; 372 u32 p; 373 uintptr_t addr; 374 u32 num_pages; 375 376 rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1), 377 NULL); 378 if (!rq) 379 return -EIO; 380 381 sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, 382 CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1)); 383 /* Disable "no buffer warnings" to avoid Lancer bug */ 384 rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB; 385 386 /* valid values for number of pages: 1-8 (sec 4.5.6) */ 387 num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE); 388 rq->num_pages = cpu_to_le16(num_pages); 389 if (!num_pages || 390 num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) { 391 efc_log_info(sli4, "num_pages %d not valid, max %d\n", 392 num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES); 393 return -EIO; 394 } 395 396 /* 397 * RQE count is the total number of entries (note not lg2(# entries)) 398 */ 399 rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE); 400 401 rq->rqe_size_byte |= SLI4_RQE_SIZE_8; 402 403 rq->page_size = SLI4_RQ_PAGE_SIZE_4096; 404 405 if (buffer_size < sli4->rq_min_buf_size || 406 buffer_size > sli4->rq_max_buf_size) { 407 efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n", 408 buffer_size, sli4->rq_min_buf_size, 409 sli4->rq_max_buf_size); 410 return -EIO; 411 } 412 rq->buffer_size = cpu_to_le32(buffer_size); 413 414 rq->cq_id = cpu_to_le16(cq_id); 415 416 for (p = 0, addr = qmem->phys; 417 p < num_pages; 418 p++, addr += SLI_PAGE_SIZE) { 419 rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr)); 420 rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr)); 421 } 422 423 return 0; 424 } 425 426 static int 427 sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs, 428 struct sli4_queue *qs[], u32 base_cq_id, 429 u32 header_buffer_size, 430 u32 payload_buffer_size, struct efc_dma *dma) 431 { 432 struct sli4_rqst_rq_create_v2 *req = NULL; 433 u32 i, p, offset = 0; 434 u32 payload_size, page_count; 435 uintptr_t addr; 436 u32 num_pages; 437 __le32 len; 438 439 page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs; 440 441 /* Payload length must accommodate both request and response */ 442 payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) + 443 SZ_DMAADDR * page_count, 444 sizeof(struct sli4_rsp_cmn_create_queue_set)); 445 446 dma->size = payload_size; 447 dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, 448 &dma->phys, GFP_DMA); 449 if (!dma->virt) 450 return -EIO; 451 452 memset(dma->virt, 0, payload_size); 453 454 req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); 455 if (!req) 456 return -EIO; 457 458 len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count); 459 sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC, 460 CMD_V2, len); 461 /* Fill Payload fields */ 462 req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB; 463 num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE); 464 req->num_pages = cpu_to_le16(num_pages); 465 req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE); 466 req->rqe_size_byte |= SLI4_RQE_SIZE_8; 467 req->page_size = SLI4_RQ_PAGE_SIZE_4096; 468 req->rq_count = num_rqs; 469 req->base_cq_id = cpu_to_le16(base_cq_id); 470 req->hdr_buffer_size = cpu_to_le16(header_buffer_size); 471 req->payload_buffer_size = cpu_to_le16(payload_buffer_size); 472 473 for (i = 0; i < num_rqs; i++) { 474 for (p = 0, addr = qs[i]->dma.phys; p < num_pages; 475 p++, addr += SLI_PAGE_SIZE) { 476 req->page_phys_addr[offset].low = 477 cpu_to_le32(lower_32_bits(addr)); 478 req->page_phys_addr[offset].high = 479 cpu_to_le32(upper_32_bits(addr)); 480 offset++; 481 } 482 } 483 484 return 0; 485 } 486 487 static void 488 __sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q) 489 { 490 if (!q->dma.size) 491 return; 492 493 dma_free_coherent(&sli4->pci->dev, q->dma.size, 494 q->dma.virt, q->dma.phys); 495 memset(&q->dma, 0, sizeof(struct efc_dma)); 496 } 497 498 int 499 __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype, 500 size_t size, u32 n_entries, u32 align) 501 { 502 if (q->dma.virt) { 503 efc_log_err(sli4, "%s failed\n", __func__); 504 return -EIO; 505 } 506 507 memset(q, 0, sizeof(struct sli4_queue)); 508 509 q->dma.size = size * n_entries; 510 q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size, 511 &q->dma.phys, GFP_DMA); 512 if (!q->dma.virt) { 513 memset(&q->dma, 0, sizeof(struct efc_dma)); 514 efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]); 515 return -EIO; 516 } 517 518 memset(q->dma.virt, 0, size * n_entries); 519 520 spin_lock_init(&q->lock); 521 522 q->type = qtype; 523 q->size = size; 524 q->length = n_entries; 525 526 if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) { 527 /* For prism, phase will be flipped after 528 * a sweep through eq and cq 529 */ 530 q->phase = 1; 531 } 532 533 /* Limit to hwf the queue size per interrupt */ 534 q->proc_limit = n_entries / 2; 535 536 if (q->type == SLI4_QTYPE_EQ) 537 q->posted_limit = q->length / 2; 538 else 539 q->posted_limit = 64; 540 541 return 0; 542 } 543 544 int 545 sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, 546 u32 n_entries, u32 buffer_size, 547 struct sli4_queue *cq, bool is_hdr) 548 { 549 if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE, 550 n_entries, SLI_PAGE_SIZE)) 551 return -EIO; 552 553 if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id, 554 buffer_size)) 555 goto error; 556 557 if (__sli_create_queue(sli4, q)) 558 goto error; 559 560 if (is_hdr && q->id & 1) { 561 efc_log_info(sli4, "bad header RQ_ID %d\n", q->id); 562 goto error; 563 } else if (!is_hdr && (q->id & 1) == 0) { 564 efc_log_info(sli4, "bad data RQ_ID %d\n", q->id); 565 goto error; 566 } 567 568 if (is_hdr) 569 q->u.flag |= SLI4_QUEUE_FLAG_HDR; 570 else 571 q->u.flag &= ~SLI4_QUEUE_FLAG_HDR; 572 573 return 0; 574 575 error: 576 __sli_queue_destroy(sli4, q); 577 return -EIO; 578 } 579 580 int 581 sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, 582 struct sli4_queue *qs[], u32 base_cq_id, 583 u32 n_entries, u32 header_buffer_size, 584 u32 payload_buffer_size) 585 { 586 u32 i; 587 struct efc_dma dma = {0}; 588 struct sli4_rsp_cmn_create_queue_set *rsp = NULL; 589 void __iomem *db_regaddr = NULL; 590 u32 num_rqs = num_rq_pairs * 2; 591 592 for (i = 0; i < num_rqs; i++) { 593 if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ, 594 SLI4_RQE_SIZE, n_entries, 595 SLI_PAGE_SIZE)) { 596 goto error; 597 } 598 } 599 600 if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id, 601 header_buffer_size, payload_buffer_size, 602 &dma)) { 603 goto error; 604 } 605 606 if (sli_bmbx_command(sli4)) { 607 efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n"); 608 goto error; 609 } 610 611 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 612 db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; 613 else 614 db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; 615 616 rsp = dma.virt; 617 if (rsp->hdr.status) { 618 efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n", 619 rsp->hdr.status, rsp->hdr.additional_status); 620 goto error; 621 } 622 623 for (i = 0; i < num_rqs; i++) { 624 qs[i]->id = i + le16_to_cpu(rsp->q_id); 625 if ((qs[i]->id & 1) == 0) 626 qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR; 627 else 628 qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR; 629 630 qs[i]->db_regaddr = db_regaddr; 631 } 632 633 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); 634 635 return 0; 636 637 error: 638 for (i = 0; i < num_rqs; i++) 639 __sli_queue_destroy(sli4, qs[i]); 640 641 if (dma.virt) 642 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, 643 dma.phys); 644 645 return -EIO; 646 } 647 648 static int 649 sli_res_sli_config(struct sli4 *sli4, void *buf) 650 { 651 struct sli4_cmd_sli_config *sli_config = buf; 652 653 /* sanity check */ 654 if (!buf || sli_config->hdr.command != 655 SLI4_MBX_CMD_SLI_CONFIG) { 656 efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf, 657 buf ? sli_config->hdr.command : -1); 658 return -EIO; 659 } 660 661 if (le16_to_cpu(sli_config->hdr.status)) 662 return le16_to_cpu(sli_config->hdr.status); 663 664 if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB) 665 return sli_config->payload.embed[4]; 666 667 efc_log_info(sli4, "external buffers not supported\n"); 668 return -EIO; 669 } 670 671 int 672 __sli_create_queue(struct sli4 *sli4, struct sli4_queue *q) 673 { 674 struct sli4_rsp_cmn_create_queue *res_q = NULL; 675 676 if (sli_bmbx_command(sli4)) { 677 efc_log_crit(sli4, "bootstrap mailbox write fail %s\n", 678 SLI4_QNAME[q->type]); 679 return -EIO; 680 } 681 if (sli_res_sli_config(sli4, sli4->bmbx.virt)) { 682 efc_log_err(sli4, "bad status create %s\n", 683 SLI4_QNAME[q->type]); 684 return -EIO; 685 } 686 res_q = (void *)((u8 *)sli4->bmbx.virt + 687 offsetof(struct sli4_cmd_sli_config, payload)); 688 689 if (res_q->hdr.status) { 690 efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n", 691 SLI4_QNAME[q->type], res_q->hdr.status, 692 res_q->hdr.additional_status); 693 return -EIO; 694 } 695 q->id = le16_to_cpu(res_q->q_id); 696 switch (q->type) { 697 case SLI4_QTYPE_EQ: 698 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 699 q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG; 700 else 701 q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 702 break; 703 case SLI4_QTYPE_CQ: 704 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 705 q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; 706 else 707 q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 708 break; 709 case SLI4_QTYPE_MQ: 710 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 711 q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG; 712 else 713 q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG; 714 break; 715 case SLI4_QTYPE_RQ: 716 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 717 q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG; 718 else 719 q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG; 720 break; 721 case SLI4_QTYPE_WQ: 722 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 723 q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG; 724 else 725 q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG; 726 break; 727 default: 728 break; 729 } 730 731 return 0; 732 } 733 734 int 735 sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype) 736 { 737 u32 size = 0; 738 739 switch (qtype) { 740 case SLI4_QTYPE_EQ: 741 size = sizeof(u32); 742 break; 743 case SLI4_QTYPE_CQ: 744 size = 16; 745 break; 746 case SLI4_QTYPE_MQ: 747 size = 256; 748 break; 749 case SLI4_QTYPE_WQ: 750 size = sli4->wqe_size; 751 break; 752 case SLI4_QTYPE_RQ: 753 size = SLI4_RQE_SIZE; 754 break; 755 default: 756 efc_log_info(sli4, "unknown queue type %d\n", qtype); 757 return -1; 758 } 759 return size; 760 } 761 762 int 763 sli_queue_alloc(struct sli4 *sli4, u32 qtype, 764 struct sli4_queue *q, u32 n_entries, 765 struct sli4_queue *assoc) 766 { 767 int size; 768 u32 align = 0; 769 770 /* get queue size */ 771 size = sli_get_queue_entry_size(sli4, qtype); 772 if (size < 0) 773 return -EIO; 774 align = SLI_PAGE_SIZE; 775 776 if (__sli_queue_init(sli4, q, qtype, size, n_entries, align)) 777 return -EIO; 778 779 switch (qtype) { 780 case SLI4_QTYPE_EQ: 781 if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) && 782 !__sli_create_queue(sli4, q)) 783 return 0; 784 785 break; 786 case SLI4_QTYPE_CQ: 787 if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma, 788 assoc ? assoc->id : 0) && 789 !__sli_create_queue(sli4, q)) 790 return 0; 791 792 break; 793 case SLI4_QTYPE_MQ: 794 assoc->u.flag |= SLI4_QUEUE_FLAG_MQ; 795 if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt, 796 &q->dma, assoc->id) && 797 !__sli_create_queue(sli4, q)) 798 return 0; 799 800 break; 801 case SLI4_QTYPE_WQ: 802 if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma, 803 assoc ? assoc->id : 0) && 804 !__sli_create_queue(sli4, q)) 805 return 0; 806 807 break; 808 default: 809 efc_log_info(sli4, "unknown queue type %d\n", qtype); 810 } 811 812 __sli_queue_destroy(sli4, q); 813 return -EIO; 814 } 815 816 static int sli_cmd_cq_set_create(struct sli4 *sli4, 817 struct sli4_queue *qs[], u32 num_cqs, 818 struct sli4_queue *eqs[], 819 struct efc_dma *dma) 820 { 821 struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL; 822 uintptr_t addr; 823 u32 i, offset = 0, page_bytes = 0, payload_size; 824 u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq; 825 u32 dw5_flags = 0; 826 u16 dw6w1_flags = 0; 827 __le32 req_len; 828 829 n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES; 830 switch (n_cqe) { 831 case 256: 832 case 512: 833 case 1024: 834 case 2048: 835 page_size = 1; 836 break; 837 case 4096: 838 page_size = 2; 839 break; 840 default: 841 return -EIO; 842 } 843 844 page_bytes = page_size * SLI_PAGE_SIZE; 845 num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes); 846 payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) + 847 (SZ_DMAADDR * num_pages_cq * num_cqs), 848 sizeof(struct sli4_rsp_cmn_create_queue_set)); 849 850 dma->size = payload_size; 851 dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size, 852 &dma->phys, GFP_DMA); 853 if (!dma->virt) 854 return -EIO; 855 856 memset(dma->virt, 0, payload_size); 857 858 req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma); 859 if (!req) 860 return -EIO; 861 862 req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0, 863 SZ_DMAADDR * num_pages_cq * num_cqs); 864 sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC, 865 CMD_V0, req_len); 866 req->page_size = page_size; 867 868 req->num_pages = cpu_to_le16(num_pages_cq); 869 switch (num_pages_cq) { 870 case 1: 871 dw5_flags |= SLI4_CQ_CNT_VAL(256); 872 break; 873 case 2: 874 dw5_flags |= SLI4_CQ_CNT_VAL(512); 875 break; 876 case 4: 877 dw5_flags |= SLI4_CQ_CNT_VAL(1024); 878 break; 879 case 8: 880 dw5_flags |= SLI4_CQ_CNT_VAL(LARGE); 881 dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT); 882 break; 883 default: 884 efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq); 885 return -EIO; 886 } 887 888 dw5_flags |= SLI4_CREATE_CQSETV0_EVT; 889 dw5_flags |= SLI4_CREATE_CQSETV0_VALID; 890 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 891 dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID; 892 893 dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM; 894 895 req->dw5_flags = cpu_to_le32(dw5_flags); 896 req->dw6w1_flags = cpu_to_le16(dw6w1_flags); 897 898 req->num_cq_req = cpu_to_le16(num_cqs); 899 900 /* Fill page addresses of all the CQs. */ 901 for (i = 0; i < num_cqs; i++) { 902 req->eq_id[i] = cpu_to_le16(eqs[i]->id); 903 for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq; 904 p++, addr += page_bytes) { 905 req->page_phys_addr[offset].low = 906 cpu_to_le32(lower_32_bits(addr)); 907 req->page_phys_addr[offset].high = 908 cpu_to_le32(upper_32_bits(addr)); 909 offset++; 910 } 911 } 912 913 return 0; 914 } 915 916 int 917 sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], 918 u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[]) 919 { 920 u32 i; 921 struct efc_dma dma = {0}; 922 struct sli4_rsp_cmn_create_queue_set *res; 923 void __iomem *db_regaddr; 924 925 /* Align the queue DMA memory */ 926 for (i = 0; i < num_cqs; i++) { 927 if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES, 928 n_entries, SLI_PAGE_SIZE)) 929 goto error; 930 } 931 932 if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma)) 933 goto error; 934 935 if (sli_bmbx_command(sli4)) 936 goto error; 937 938 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 939 db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG; 940 else 941 db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG; 942 943 res = dma.virt; 944 if (res->hdr.status) { 945 efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n", 946 res->hdr.status, res->hdr.additional_status); 947 goto error; 948 } 949 950 /* Check if we got all requested CQs. */ 951 if (le16_to_cpu(res->num_q_allocated) != num_cqs) { 952 efc_log_crit(sli4, "Requested count CQs doesn't match.\n"); 953 goto error; 954 } 955 /* Fill the resp cq ids. */ 956 for (i = 0; i < num_cqs; i++) { 957 qs[i]->id = le16_to_cpu(res->q_id) + i; 958 qs[i]->db_regaddr = db_regaddr; 959 } 960 961 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys); 962 963 return 0; 964 965 error: 966 for (i = 0; i < num_cqs; i++) 967 __sli_queue_destroy(sli4, qs[i]); 968 969 if (dma.virt) 970 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, 971 dma.phys); 972 973 return -EIO; 974 } 975 976 static int 977 sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id) 978 { 979 struct sli4_rqst_cmn_destroy_q *req; 980 981 /* Payload length must accommodate both request and response */ 982 req = sli_config_cmd_init(sli4, sli4->bmbx.virt, 983 SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL); 984 if (!req) 985 return -EIO; 986 987 sli_cmd_fill_hdr(&req->hdr, opc, subsystem, 988 CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q)); 989 req->q_id = cpu_to_le16(q_id); 990 991 return 0; 992 } 993 994 int 995 sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, 996 u32 destroy_queues, u32 free_memory) 997 { 998 int rc = 0; 999 u8 opcode, subsystem; 1000 struct sli4_rsp_hdr *res; 1001 1002 if (!q) { 1003 efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q); 1004 return -EIO; 1005 } 1006 1007 if (!destroy_queues) 1008 goto free_mem; 1009 1010 switch (q->type) { 1011 case SLI4_QTYPE_EQ: 1012 opcode = SLI4_CMN_DESTROY_EQ; 1013 subsystem = SLI4_SUBSYSTEM_COMMON; 1014 break; 1015 case SLI4_QTYPE_CQ: 1016 opcode = SLI4_CMN_DESTROY_CQ; 1017 subsystem = SLI4_SUBSYSTEM_COMMON; 1018 break; 1019 case SLI4_QTYPE_MQ: 1020 opcode = SLI4_CMN_DESTROY_MQ; 1021 subsystem = SLI4_SUBSYSTEM_COMMON; 1022 break; 1023 case SLI4_QTYPE_WQ: 1024 opcode = SLI4_OPC_WQ_DESTROY; 1025 subsystem = SLI4_SUBSYSTEM_FC; 1026 break; 1027 case SLI4_QTYPE_RQ: 1028 opcode = SLI4_OPC_RQ_DESTROY; 1029 subsystem = SLI4_SUBSYSTEM_FC; 1030 break; 1031 default: 1032 efc_log_info(sli4, "bad queue type %d\n", q->type); 1033 rc = -EIO; 1034 goto free_mem; 1035 } 1036 1037 rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id); 1038 if (rc) 1039 goto free_mem; 1040 1041 rc = sli_bmbx_command(sli4); 1042 if (rc) 1043 goto free_mem; 1044 1045 rc = sli_res_sli_config(sli4, sli4->bmbx.virt); 1046 if (rc) 1047 goto free_mem; 1048 1049 res = (void *)((u8 *)sli4->bmbx.virt + 1050 offsetof(struct sli4_cmd_sli_config, payload)); 1051 if (res->status) { 1052 efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n", 1053 SLI4_QNAME[q->type], res->status, 1054 res->additional_status); 1055 rc = -EIO; 1056 goto free_mem; 1057 } 1058 1059 free_mem: 1060 if (free_memory) 1061 __sli_queue_destroy(sli4, q); 1062 1063 return rc; 1064 } 1065 1066 int 1067 sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) 1068 { 1069 u32 val; 1070 unsigned long flags = 0; 1071 u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; 1072 1073 spin_lock_irqsave(&q->lock, flags); 1074 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1075 val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); 1076 else 1077 val = sli_format_eq_db_data(q->n_posted, q->id, a); 1078 1079 writel(val, q->db_regaddr); 1080 q->n_posted = 0; 1081 spin_unlock_irqrestore(&q->lock, flags); 1082 1083 return 0; 1084 } 1085 1086 int 1087 sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm) 1088 { 1089 u32 val = 0; 1090 unsigned long flags = 0; 1091 u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM; 1092 1093 spin_lock_irqsave(&q->lock, flags); 1094 1095 switch (q->type) { 1096 case SLI4_QTYPE_EQ: 1097 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1098 val = sli_format_if6_eq_db_data(q->n_posted, q->id, a); 1099 else 1100 val = sli_format_eq_db_data(q->n_posted, q->id, a); 1101 1102 writel(val, q->db_regaddr); 1103 q->n_posted = 0; 1104 break; 1105 case SLI4_QTYPE_CQ: 1106 if (sli4->if_type == SLI4_INTF_IF_TYPE_6) 1107 val = sli_format_if6_cq_db_data(q->n_posted, q->id, a); 1108 else 1109 val = sli_format_cq_db_data(q->n_posted, q->id, a); 1110 1111 writel(val, q->db_regaddr); 1112 q->n_posted = 0; 1113 break; 1114 default: 1115 efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n", 1116 SLI4_QNAME[q->type]); 1117 } 1118 1119 spin_unlock_irqrestore(&q->lock, flags); 1120 1121 return 0; 1122 } 1123 1124 int 1125 sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1126 { 1127 u8 *qe = q->dma.virt; 1128 u32 qindex; 1129 u32 val = 0; 1130 1131 qindex = q->index; 1132 qe += q->index * q->size; 1133 1134 if (sli4->params.perf_wq_id_association) 1135 sli_set_wq_id_association(entry, q->id); 1136 1137 memcpy(qe, entry, q->size); 1138 val = sli_format_wq_db_data(q->id); 1139 1140 writel(val, q->db_regaddr); 1141 q->index = (q->index + 1) & (q->length - 1); 1142 1143 return qindex; 1144 } 1145 1146 int 1147 sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1148 { 1149 u8 *qe = q->dma.virt; 1150 u32 qindex; 1151 u32 val = 0; 1152 unsigned long flags; 1153 1154 spin_lock_irqsave(&q->lock, flags); 1155 qindex = q->index; 1156 qe += q->index * q->size; 1157 1158 memcpy(qe, entry, q->size); 1159 val = sli_format_mq_db_data(q->id); 1160 writel(val, q->db_regaddr); 1161 q->index = (q->index + 1) & (q->length - 1); 1162 spin_unlock_irqrestore(&q->lock, flags); 1163 1164 return qindex; 1165 } 1166 1167 int 1168 sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1169 { 1170 u8 *qe = q->dma.virt; 1171 u32 qindex; 1172 u32 val = 0; 1173 1174 qindex = q->index; 1175 qe += q->index * q->size; 1176 1177 memcpy(qe, entry, q->size); 1178 1179 /* 1180 * In RQ-pair, an RQ either contains the FC header 1181 * (i.e. is_hdr == TRUE) or the payload. 1182 * 1183 * Don't ring doorbell for payload RQ 1184 */ 1185 if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR)) 1186 goto skip; 1187 1188 val = sli_format_rq_db_data(q->id); 1189 writel(val, q->db_regaddr); 1190 skip: 1191 q->index = (q->index + 1) & (q->length - 1); 1192 1193 return qindex; 1194 } 1195 1196 int 1197 sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1198 { 1199 u8 *qe = q->dma.virt; 1200 unsigned long flags = 0; 1201 u16 wflags = 0; 1202 1203 spin_lock_irqsave(&q->lock, flags); 1204 1205 qe += q->index * q->size; 1206 1207 /* Check if eqe is valid */ 1208 wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags); 1209 1210 if ((wflags & SLI4_EQE_VALID) != q->phase) { 1211 spin_unlock_irqrestore(&q->lock, flags); 1212 return -EIO; 1213 } 1214 1215 if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { 1216 wflags &= ~SLI4_EQE_VALID; 1217 ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags); 1218 } 1219 1220 memcpy(entry, qe, q->size); 1221 q->index = (q->index + 1) & (q->length - 1); 1222 q->n_posted++; 1223 /* 1224 * For prism, the phase value will be used 1225 * to check the validity of eq/cq entries. 1226 * The value toggles after a complete sweep 1227 * through the queue. 1228 */ 1229 1230 if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) 1231 q->phase ^= (u16)0x1; 1232 1233 spin_unlock_irqrestore(&q->lock, flags); 1234 1235 return 0; 1236 } 1237 1238 int 1239 sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1240 { 1241 u8 *qe = q->dma.virt; 1242 unsigned long flags = 0; 1243 u32 dwflags = 0; 1244 bool valid_bit_set; 1245 1246 spin_lock_irqsave(&q->lock, flags); 1247 1248 qe += q->index * q->size; 1249 1250 /* Check if cqe is valid */ 1251 dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags); 1252 valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0; 1253 1254 if (valid_bit_set != q->phase) { 1255 spin_unlock_irqrestore(&q->lock, flags); 1256 return -EIO; 1257 } 1258 1259 if (sli4->if_type != SLI4_INTF_IF_TYPE_6) { 1260 dwflags &= ~SLI4_MCQE_VALID; 1261 ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags); 1262 } 1263 1264 memcpy(entry, qe, q->size); 1265 q->index = (q->index + 1) & (q->length - 1); 1266 q->n_posted++; 1267 /* 1268 * For prism, the phase value will be used 1269 * to check the validity of eq/cq entries. 1270 * The value toggles after a complete sweep 1271 * through the queue. 1272 */ 1273 1274 if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0) 1275 q->phase ^= (u16)0x1; 1276 1277 spin_unlock_irqrestore(&q->lock, flags); 1278 1279 return 0; 1280 } 1281 1282 int 1283 sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry) 1284 { 1285 u8 *qe = q->dma.virt; 1286 unsigned long flags = 0; 1287 1288 spin_lock_irqsave(&q->lock, flags); 1289 1290 qe += q->u.r_idx * q->size; 1291 1292 /* Check if mqe is valid */ 1293 if (q->index == q->u.r_idx) { 1294 spin_unlock_irqrestore(&q->lock, flags); 1295 return -EIO; 1296 } 1297 1298 memcpy(entry, qe, q->size); 1299 q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1); 1300 1301 spin_unlock_irqrestore(&q->lock, flags); 1302 1303 return 0; 1304 } 1305 1306 int 1307 sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id) 1308 { 1309 struct sli4_eqe *eqe = (void *)buf; 1310 int rc = 0; 1311 u16 flags = 0; 1312 u16 majorcode; 1313 u16 minorcode; 1314 1315 if (!buf || !cq_id) { 1316 efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n", 1317 sli4, buf, cq_id); 1318 return -EIO; 1319 } 1320 1321 flags = le16_to_cpu(eqe->dw0w0_flags); 1322 majorcode = (flags & SLI4_EQE_MJCODE) >> 1; 1323 minorcode = (flags & SLI4_EQE_MNCODE) >> 4; 1324 switch (majorcode) { 1325 case SLI4_MAJOR_CODE_STANDARD: 1326 *cq_id = le16_to_cpu(eqe->resource_id); 1327 break; 1328 case SLI4_MAJOR_CODE_SENTINEL: 1329 efc_log_info(sli4, "sentinel EQE\n"); 1330 rc = SLI4_EQE_STATUS_EQ_FULL; 1331 break; 1332 default: 1333 efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n", 1334 majorcode, minorcode); 1335 rc = -EIO; 1336 } 1337 1338 return rc; 1339 } 1340 1341 int 1342 sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe, 1343 enum sli4_qentry *etype, u16 *q_id) 1344 { 1345 int rc = 0; 1346 1347 if (!cq || !cqe || !etype) { 1348 efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n", 1349 sli4, cq, cqe, etype, q_id); 1350 return -EINVAL; 1351 } 1352 1353 /* Parse a CQ entry to retrieve the event type and the queue id */ 1354 if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) { 1355 struct sli4_mcqe *mcqe = (void *)cqe; 1356 1357 if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) { 1358 *etype = SLI4_QENTRY_ASYNC; 1359 } else { 1360 *etype = SLI4_QENTRY_MQ; 1361 rc = sli_cqe_mq(sli4, mcqe); 1362 } 1363 *q_id = -1; 1364 } else { 1365 rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id); 1366 } 1367 1368 return rc; 1369 } 1370 1371 int 1372 sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type, 1373 bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id) 1374 { 1375 struct sli4_abort_wqe *abort = buf; 1376 1377 memset(buf, 0, sli->wqe_size); 1378 1379 switch (type) { 1380 case SLI4_ABORT_XRI: 1381 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; 1382 if (mask) { 1383 efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask", 1384 mask, ids); 1385 mask = 0; 1386 } 1387 break; 1388 case SLI4_ABORT_ABORT_ID: 1389 abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG; 1390 break; 1391 case SLI4_ABORT_REQUEST_ID: 1392 abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG; 1393 break; 1394 default: 1395 efc_log_info(sli, "unsupported type %#x\n", type); 1396 return -EIO; 1397 } 1398 1399 abort->ia_ir_byte |= send_abts ? 0 : 1; 1400 1401 /* Suppress ABTS retries */ 1402 abort->ia_ir_byte |= SLI4_ABRT_WQE_IR; 1403 1404 abort->t_mask = cpu_to_le32(mask); 1405 abort->t_tag = cpu_to_le32(ids); 1406 abort->command = SLI4_WQE_ABORT; 1407 abort->request_tag = cpu_to_le16(tag); 1408 1409 abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD); 1410 1411 abort->cq_id = cpu_to_le16(cq_id); 1412 abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE; 1413 1414 return 0; 1415 } 1416 1417 int 1418 sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, 1419 struct sli_els_params *params) 1420 { 1421 struct sli4_els_request64_wqe *els = buf; 1422 struct sli4_sge *sge = sgl->virt; 1423 bool is_fabric = false; 1424 struct sli4_bde *bptr; 1425 1426 memset(buf, 0, sli->wqe_size); 1427 1428 bptr = &els->els_request_payload; 1429 if (sli->params.sgl_pre_registered) { 1430 els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL; 1431 1432 els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE; 1433 bptr->bde_type_buflen = 1434 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1435 (params->xmit_len & SLI4_BDE_LEN_MASK)); 1436 1437 bptr->u.data.low = sge[0].buffer_address_low; 1438 bptr->u.data.high = sge[0].buffer_address_high; 1439 } else { 1440 els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL; 1441 1442 bptr->bde_type_buflen = 1443 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 1444 ((2 * sizeof(struct sli4_sge)) & 1445 SLI4_BDE_LEN_MASK)); 1446 bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); 1447 bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); 1448 } 1449 1450 els->els_request_payload_length = cpu_to_le32(params->xmit_len); 1451 els->max_response_payload_length = cpu_to_le32(params->rsp_len); 1452 1453 els->xri_tag = cpu_to_le16(params->xri); 1454 els->timer = params->timeout; 1455 els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1456 1457 els->command = SLI4_WQE_ELS_REQUEST64; 1458 1459 els->request_tag = cpu_to_le16(params->tag); 1460 1461 els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD; 1462 1463 els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD; 1464 1465 /* figure out the ELS_ID value from the request buffer */ 1466 1467 switch (params->cmd) { 1468 case ELS_LOGO: 1469 els->cmdtype_elsid_byte |= 1470 SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT; 1471 if (params->rpi_registered) { 1472 els->ct_byte |= 1473 SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT; 1474 els->context_tag = cpu_to_le16(params->rpi); 1475 } else { 1476 els->ct_byte |= 1477 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1478 els->context_tag = cpu_to_le16(params->vpi); 1479 } 1480 if (params->d_id == FC_FID_FLOGI) 1481 is_fabric = true; 1482 break; 1483 case ELS_FDISC: 1484 if (params->d_id == FC_FID_FLOGI) 1485 is_fabric = true; 1486 if (params->s_id == 0) { 1487 els->cmdtype_elsid_byte |= 1488 SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT; 1489 is_fabric = true; 1490 } else { 1491 els->cmdtype_elsid_byte |= 1492 SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; 1493 } 1494 els->ct_byte |= 1495 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1496 els->context_tag = cpu_to_le16(params->vpi); 1497 els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); 1498 break; 1499 case ELS_FLOGI: 1500 els->ct_byte |= 1501 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1502 els->context_tag = cpu_to_le16(params->vpi); 1503 /* 1504 * Set SP here ... we haven't done a REG_VPI yet 1505 * need to maybe not set this when we have 1506 * completed VFI/VPI registrations ... 1507 * 1508 * Use the FC_ID of the SPORT if it has been allocated, 1509 * otherwise use an S_ID of zero. 1510 */ 1511 els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT); 1512 if (params->s_id != U32_MAX) 1513 els->sid_sp_dword |= cpu_to_le32(params->s_id); 1514 break; 1515 case ELS_PLOGI: 1516 els->cmdtype_elsid_byte |= 1517 SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT; 1518 els->ct_byte |= 1519 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1520 els->context_tag = cpu_to_le16(params->vpi); 1521 break; 1522 case ELS_SCR: 1523 els->cmdtype_elsid_byte |= 1524 SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; 1525 els->ct_byte |= 1526 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1527 els->context_tag = cpu_to_le16(params->vpi); 1528 break; 1529 default: 1530 els->cmdtype_elsid_byte |= 1531 SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT; 1532 if (params->rpi_registered) { 1533 els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI << 1534 SLI4_REQ_WQE_CT_SHFT); 1535 els->context_tag = cpu_to_le16(params->vpi); 1536 } else { 1537 els->ct_byte |= 1538 SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT; 1539 els->context_tag = cpu_to_le16(params->vpi); 1540 } 1541 break; 1542 } 1543 1544 if (is_fabric) 1545 els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC; 1546 else 1547 els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC; 1548 1549 els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); 1550 1551 if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) != 1552 SLI4_GENERIC_CONTEXT_RPI) 1553 els->remote_id_dword = cpu_to_le32(params->d_id); 1554 1555 if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) == 1556 SLI4_GENERIC_CONTEXT_VPI) 1557 els->temporary_rpi = cpu_to_le16(params->rpi); 1558 1559 return 0; 1560 } 1561 1562 int 1563 sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri, 1564 u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout) 1565 { 1566 struct sli4_fcp_icmnd64_wqe *icmnd = buf; 1567 struct sli4_sge *sge = NULL; 1568 struct sli4_bde *bptr; 1569 u32 len; 1570 1571 memset(buf, 0, sli->wqe_size); 1572 1573 if (!sgl || !sgl->virt) { 1574 efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", 1575 sgl, sgl ? sgl->virt : NULL); 1576 return -EIO; 1577 } 1578 sge = sgl->virt; 1579 bptr = &icmnd->bde; 1580 if (sli->params.sgl_pre_registered) { 1581 icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL; 1582 1583 icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE; 1584 bptr->bde_type_buflen = 1585 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1586 (le32_to_cpu(sge[0].buffer_length) & 1587 SLI4_BDE_LEN_MASK)); 1588 1589 bptr->u.data.low = sge[0].buffer_address_low; 1590 bptr->u.data.high = sge[0].buffer_address_high; 1591 } else { 1592 icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL; 1593 1594 bptr->bde_type_buflen = 1595 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 1596 (sgl->size & SLI4_BDE_LEN_MASK)); 1597 1598 bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); 1599 bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); 1600 } 1601 1602 len = le32_to_cpu(sge[0].buffer_length) + 1603 le32_to_cpu(sge[1].buffer_length); 1604 icmnd->payload_offset_length = cpu_to_le16(len); 1605 icmnd->xri_tag = cpu_to_le16(xri); 1606 icmnd->context_tag = cpu_to_le16(rpi); 1607 icmnd->timer = timeout; 1608 1609 /* WQE word 4 contains read transfer length */ 1610 icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT; 1611 icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1612 icmnd->command = SLI4_WQE_FCP_ICMND64; 1613 icmnd->dif_ct_bs_byte |= 1614 SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT; 1615 1616 icmnd->abort_tag = cpu_to_le32(xri); 1617 1618 icmnd->request_tag = cpu_to_le16(tag); 1619 icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1; 1620 icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2; 1621 icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE; 1622 icmnd->cq_id = cpu_to_le16(cq_id); 1623 1624 return 0; 1625 } 1626 1627 int 1628 sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, 1629 u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag, 1630 u16 cq_id, u32 rpi, u32 rnode_fcid, 1631 u8 dif, u8 bs, u8 timeout) 1632 { 1633 struct sli4_fcp_iread64_wqe *iread = buf; 1634 struct sli4_sge *sge = NULL; 1635 struct sli4_bde *bptr; 1636 u32 sge_flags, len; 1637 1638 memset(buf, 0, sli->wqe_size); 1639 1640 if (!sgl || !sgl->virt) { 1641 efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", 1642 sgl, sgl ? sgl->virt : NULL); 1643 return -EIO; 1644 } 1645 1646 sge = sgl->virt; 1647 bptr = &iread->bde; 1648 if (sli->params.sgl_pre_registered) { 1649 iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL; 1650 1651 iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE; 1652 1653 bptr->bde_type_buflen = 1654 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1655 (le32_to_cpu(sge[0].buffer_length) & 1656 SLI4_BDE_LEN_MASK)); 1657 1658 bptr->u.blp.low = sge[0].buffer_address_low; 1659 bptr->u.blp.high = sge[0].buffer_address_high; 1660 } else { 1661 iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL; 1662 1663 bptr->bde_type_buflen = 1664 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 1665 (sgl->size & SLI4_BDE_LEN_MASK)); 1666 1667 bptr->u.blp.low = 1668 cpu_to_le32(lower_32_bits(sgl->phys)); 1669 bptr->u.blp.high = 1670 cpu_to_le32(upper_32_bits(sgl->phys)); 1671 1672 /* 1673 * fill out fcp_cmnd buffer len and change resp buffer to be of 1674 * type "skip" (note: response will still be written to sge[1] 1675 * if necessary) 1676 */ 1677 len = le32_to_cpu(sge[0].buffer_length); 1678 iread->fcp_cmd_buffer_length = cpu_to_le16(len); 1679 1680 sge_flags = le32_to_cpu(sge[1].dw2_flags); 1681 sge_flags &= (~SLI4_SGE_TYPE_MASK); 1682 sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); 1683 sge[1].dw2_flags = cpu_to_le32(sge_flags); 1684 } 1685 1686 len = le32_to_cpu(sge[0].buffer_length) + 1687 le32_to_cpu(sge[1].buffer_length); 1688 iread->payload_offset_length = cpu_to_le16(len); 1689 iread->total_transfer_length = cpu_to_le32(xfer_len); 1690 1691 iread->xri_tag = cpu_to_le16(xri); 1692 iread->context_tag = cpu_to_le16(rpi); 1693 1694 iread->timer = timeout; 1695 1696 /* WQE word 4 contains read transfer length */ 1697 iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT; 1698 iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1699 iread->command = SLI4_WQE_FCP_IREAD64; 1700 iread->dif_ct_bs_byte |= 1701 SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT; 1702 iread->dif_ct_bs_byte |= dif; 1703 iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT; 1704 1705 iread->abort_tag = cpu_to_le32(xri); 1706 1707 iread->request_tag = cpu_to_le16(tag); 1708 iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1; 1709 iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2; 1710 iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD; 1711 iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE; 1712 iread->cq_id = cpu_to_le16(cq_id); 1713 1714 if (sli->params.perf_hint) { 1715 bptr = &iread->first_data_bde; 1716 bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1717 (le32_to_cpu(sge[first_data_sge].buffer_length) & 1718 SLI4_BDE_LEN_MASK)); 1719 bptr->u.data.low = 1720 sge[first_data_sge].buffer_address_low; 1721 bptr->u.data.high = 1722 sge[first_data_sge].buffer_address_high; 1723 } 1724 1725 return 0; 1726 } 1727 1728 int 1729 sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, 1730 u32 first_data_sge, u32 xfer_len, 1731 u32 first_burst, u16 xri, u16 tag, 1732 u16 cq_id, u32 rpi, 1733 u32 rnode_fcid, 1734 u8 dif, u8 bs, u8 timeout) 1735 { 1736 struct sli4_fcp_iwrite64_wqe *iwrite = buf; 1737 struct sli4_sge *sge = NULL; 1738 struct sli4_bde *bptr; 1739 u32 sge_flags, min, len; 1740 1741 memset(buf, 0, sli->wqe_size); 1742 1743 if (!sgl || !sgl->virt) { 1744 efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", 1745 sgl, sgl ? sgl->virt : NULL); 1746 return -EIO; 1747 } 1748 sge = sgl->virt; 1749 bptr = &iwrite->bde; 1750 if (sli->params.sgl_pre_registered) { 1751 iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL; 1752 1753 iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE; 1754 bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1755 (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK)); 1756 bptr->u.data.low = sge[0].buffer_address_low; 1757 bptr->u.data.high = sge[0].buffer_address_high; 1758 } else { 1759 iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL; 1760 1761 bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1762 (sgl->size & SLI4_BDE_LEN_MASK)); 1763 1764 bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); 1765 bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys)); 1766 1767 /* 1768 * fill out fcp_cmnd buffer len and change resp buffer to be of 1769 * type "skip" (note: response will still be written to sge[1] 1770 * if necessary) 1771 */ 1772 len = le32_to_cpu(sge[0].buffer_length); 1773 iwrite->fcp_cmd_buffer_length = cpu_to_le16(len); 1774 sge_flags = le32_to_cpu(sge[1].dw2_flags); 1775 sge_flags &= ~SLI4_SGE_TYPE_MASK; 1776 sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT); 1777 sge[1].dw2_flags = cpu_to_le32(sge_flags); 1778 } 1779 1780 len = le32_to_cpu(sge[0].buffer_length) + 1781 le32_to_cpu(sge[1].buffer_length); 1782 iwrite->payload_offset_length = cpu_to_le16(len); 1783 iwrite->total_transfer_length = cpu_to_le16(xfer_len); 1784 min = (xfer_len < first_burst) ? xfer_len : first_burst; 1785 iwrite->initial_transfer_length = cpu_to_le16(min); 1786 1787 iwrite->xri_tag = cpu_to_le16(xri); 1788 iwrite->context_tag = cpu_to_le16(rpi); 1789 1790 iwrite->timer = timeout; 1791 /* WQE word 4 contains read transfer length */ 1792 iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT; 1793 iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1794 iwrite->command = SLI4_WQE_FCP_IWRITE64; 1795 iwrite->dif_ct_bs_byte |= 1796 SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT; 1797 iwrite->dif_ct_bs_byte |= dif; 1798 iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT; 1799 1800 iwrite->abort_tag = cpu_to_le32(xri); 1801 1802 iwrite->request_tag = cpu_to_le16(tag); 1803 iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1; 1804 iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2; 1805 iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE; 1806 iwrite->cq_id = cpu_to_le16(cq_id); 1807 1808 if (sli->params.perf_hint) { 1809 bptr = &iwrite->first_data_bde; 1810 1811 bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1812 (le32_to_cpu(sge[first_data_sge].buffer_length) & 1813 SLI4_BDE_LEN_MASK)); 1814 1815 bptr->u.data.low = sge[first_data_sge].buffer_address_low; 1816 bptr->u.data.high = sge[first_data_sge].buffer_address_high; 1817 } 1818 1819 return 0; 1820 } 1821 1822 int 1823 sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, 1824 u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, 1825 struct sli_fcp_tgt_params *params) 1826 { 1827 struct sli4_fcp_treceive64_wqe *trecv = buf; 1828 struct sli4_fcp_128byte_wqe *trecv_128 = buf; 1829 struct sli4_sge *sge = NULL; 1830 struct sli4_bde *bptr; 1831 1832 memset(buf, 0, sli->wqe_size); 1833 1834 if (!sgl || !sgl->virt) { 1835 efc_log_err(sli, "bad parameter sgl=%p virt=%p\n", 1836 sgl, sgl ? sgl->virt : NULL); 1837 return -EIO; 1838 } 1839 sge = sgl->virt; 1840 bptr = &trecv->bde; 1841 if (sli->params.sgl_pre_registered) { 1842 trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL; 1843 1844 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; 1845 1846 bptr->bde_type_buflen = 1847 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1848 (le32_to_cpu(sge[0].buffer_length) 1849 & SLI4_BDE_LEN_MASK)); 1850 1851 bptr->u.data.low = sge[0].buffer_address_low; 1852 bptr->u.data.high = sge[0].buffer_address_high; 1853 1854 trecv->payload_offset_length = sge[0].buffer_length; 1855 } else { 1856 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL; 1857 1858 /* if data is a single physical address, use a BDE */ 1859 if (!dif && 1860 params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { 1861 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE; 1862 bptr->bde_type_buflen = 1863 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1864 (le32_to_cpu(sge[2].buffer_length) 1865 & SLI4_BDE_LEN_MASK)); 1866 1867 bptr->u.data.low = sge[2].buffer_address_low; 1868 bptr->u.data.high = sge[2].buffer_address_high; 1869 } else { 1870 bptr->bde_type_buflen = 1871 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 1872 (sgl->size & SLI4_BDE_LEN_MASK)); 1873 bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys)); 1874 bptr->u.blp.high = 1875 cpu_to_le32(upper_32_bits(sgl->phys)); 1876 } 1877 } 1878 1879 trecv->relative_offset = cpu_to_le32(params->offset); 1880 1881 if (params->flags & SLI4_IO_CONTINUATION) 1882 trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC; 1883 1884 trecv->xri_tag = cpu_to_le16(params->xri); 1885 1886 trecv->context_tag = cpu_to_le16(params->rpi); 1887 1888 /* WQE uses relative offset */ 1889 trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT; 1890 1891 if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) 1892 trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR; 1893 1894 trecv->command = SLI4_WQE_FCP_TRECEIVE64; 1895 trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1896 trecv->dif_ct_bs_byte |= 1897 SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT; 1898 trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT; 1899 1900 trecv->remote_xid = cpu_to_le16(params->ox_id); 1901 1902 trecv->request_tag = cpu_to_le16(params->tag); 1903 1904 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD; 1905 1906 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2; 1907 1908 trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE; 1909 1910 trecv->cq_id = cpu_to_le16(cq_id); 1911 1912 trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len); 1913 1914 if (sli->params.perf_hint) { 1915 bptr = &trecv->first_data_bde; 1916 1917 bptr->bde_type_buflen = 1918 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1919 (le32_to_cpu(sge[first_data_sge].buffer_length) & 1920 SLI4_BDE_LEN_MASK)); 1921 bptr->u.data.low = sge[first_data_sge].buffer_address_low; 1922 bptr->u.data.high = sge[first_data_sge].buffer_address_high; 1923 } 1924 1925 /* The upper 7 bits of csctl is the priority */ 1926 if (params->cs_ctl & SLI4_MASK_CCP) { 1927 trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE; 1928 trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP); 1929 } 1930 1931 if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES && 1932 !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { 1933 trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID; 1934 trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES; 1935 trecv_128->dw[31] = params->app_id; 1936 } 1937 return 0; 1938 } 1939 1940 int 1941 sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, 1942 struct efc_dma *sgl, u32 first_data_sge, 1943 u16 sec_xri, u16 cq_id, u8 dif, u8 bs, 1944 struct sli_fcp_tgt_params *params) 1945 { 1946 int rc; 1947 1948 rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge, 1949 cq_id, dif, bs, params); 1950 if (!rc) { 1951 struct sli4_fcp_treceive64_wqe *trecv = buf; 1952 1953 trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64; 1954 trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri); 1955 } 1956 return rc; 1957 } 1958 1959 int 1960 sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, 1961 u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params) 1962 { 1963 struct sli4_fcp_trsp64_wqe *trsp = buf; 1964 struct sli4_fcp_128byte_wqe *trsp_128 = buf; 1965 1966 memset(buf, 0, sli4->wqe_size); 1967 1968 if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) { 1969 trsp->class_ag_byte |= SLI4_TRSP_WQE_AG; 1970 } else { 1971 struct sli4_sge *sge = sgl->virt; 1972 struct sli4_bde *bptr; 1973 1974 if (sli4->params.sgl_pre_registered || port_owned) 1975 trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE; 1976 else 1977 trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL; 1978 bptr = &trsp->bde; 1979 1980 bptr->bde_type_buflen = 1981 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 1982 (le32_to_cpu(sge[0].buffer_length) & 1983 SLI4_BDE_LEN_MASK)); 1984 bptr->u.data.low = sge[0].buffer_address_low; 1985 bptr->u.data.high = sge[0].buffer_address_high; 1986 1987 trsp->fcp_response_length = cpu_to_le32(params->xmit_len); 1988 } 1989 1990 if (params->flags & SLI4_IO_CONTINUATION) 1991 trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC; 1992 1993 trsp->xri_tag = cpu_to_le16(params->xri); 1994 trsp->rpi = cpu_to_le16(params->rpi); 1995 1996 trsp->command = SLI4_WQE_FCP_TRSP64; 1997 trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3; 1998 1999 trsp->remote_xid = cpu_to_le16(params->ox_id); 2000 trsp->request_tag = cpu_to_le16(params->tag); 2001 if (params->flags & SLI4_IO_DNRX) 2002 trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX; 2003 else 2004 trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX; 2005 2006 trsp->lloc1_appid |= 0x1; 2007 trsp->cq_id = cpu_to_le16(cq_id); 2008 trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE; 2009 2010 /* The upper 7 bits of csctl is the priority */ 2011 if (params->cs_ctl & SLI4_MASK_CCP) { 2012 trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE; 2013 trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP); 2014 } 2015 2016 if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && 2017 !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) { 2018 trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID; 2019 trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES; 2020 trsp_128->dw[31] = params->app_id; 2021 } 2022 return 0; 2023 } 2024 2025 int 2026 sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, 2027 u32 first_data_sge, u16 cq_id, u8 dif, u8 bs, 2028 struct sli_fcp_tgt_params *params) 2029 { 2030 struct sli4_fcp_tsend64_wqe *tsend = buf; 2031 struct sli4_fcp_128byte_wqe *tsend_128 = buf; 2032 struct sli4_sge *sge = NULL; 2033 struct sli4_bde *bptr; 2034 2035 memset(buf, 0, sli4->wqe_size); 2036 2037 if (!sgl || !sgl->virt) { 2038 efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", 2039 sgl, sgl ? sgl->virt : NULL); 2040 return -EIO; 2041 } 2042 sge = sgl->virt; 2043 2044 bptr = &tsend->bde; 2045 if (sli4->params.sgl_pre_registered) { 2046 tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL; 2047 2048 tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; 2049 2050 bptr->bde_type_buflen = 2051 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2052 (le32_to_cpu(sge[2].buffer_length) & 2053 SLI4_BDE_LEN_MASK)); 2054 2055 /* TSEND64_WQE specifies first two SGE are skipped (3rd is 2056 * valid) 2057 */ 2058 bptr->u.data.low = sge[2].buffer_address_low; 2059 bptr->u.data.high = sge[2].buffer_address_high; 2060 } else { 2061 tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL; 2062 2063 /* if data is a single physical address, use a BDE */ 2064 if (!dif && 2065 params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) { 2066 tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE; 2067 2068 bptr->bde_type_buflen = 2069 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2070 (le32_to_cpu(sge[2].buffer_length) & 2071 SLI4_BDE_LEN_MASK)); 2072 /* 2073 * TSEND64_WQE specifies first two SGE are skipped 2074 * (i.e. 3rd is valid) 2075 */ 2076 bptr->u.data.low = 2077 sge[2].buffer_address_low; 2078 bptr->u.data.high = 2079 sge[2].buffer_address_high; 2080 } else { 2081 bptr->bde_type_buflen = 2082 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 2083 (sgl->size & 2084 SLI4_BDE_LEN_MASK)); 2085 bptr->u.blp.low = 2086 cpu_to_le32(lower_32_bits(sgl->phys)); 2087 bptr->u.blp.high = 2088 cpu_to_le32(upper_32_bits(sgl->phys)); 2089 } 2090 } 2091 2092 tsend->relative_offset = cpu_to_le32(params->offset); 2093 2094 if (params->flags & SLI4_IO_CONTINUATION) 2095 tsend->dw10byte2 |= SLI4_TSEND_XC; 2096 2097 tsend->xri_tag = cpu_to_le16(params->xri); 2098 2099 tsend->rpi = cpu_to_le16(params->rpi); 2100 /* WQE uses relative offset */ 2101 tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT; 2102 2103 if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) 2104 tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR; 2105 2106 tsend->command = SLI4_WQE_FCP_TSEND64; 2107 tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3; 2108 tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT; 2109 tsend->ct_byte |= dif; 2110 tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT; 2111 2112 tsend->remote_xid = cpu_to_le16(params->ox_id); 2113 2114 tsend->request_tag = cpu_to_le16(params->tag); 2115 2116 tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2; 2117 2118 tsend->cq_id = cpu_to_le16(cq_id); 2119 2120 tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE; 2121 2122 tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len); 2123 2124 if (sli4->params.perf_hint) { 2125 bptr = &tsend->first_data_bde; 2126 bptr->bde_type_buflen = 2127 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2128 (le32_to_cpu(sge[first_data_sge].buffer_length) & 2129 SLI4_BDE_LEN_MASK)); 2130 bptr->u.data.low = 2131 sge[first_data_sge].buffer_address_low; 2132 bptr->u.data.high = 2133 sge[first_data_sge].buffer_address_high; 2134 } 2135 2136 /* The upper 7 bits of csctl is the priority */ 2137 if (params->cs_ctl & SLI4_MASK_CCP) { 2138 tsend->dw10byte2 |= SLI4_TSEND_CCPE; 2139 tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP); 2140 } 2141 2142 if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES && 2143 !(tsend->dw10byte2 & SLI4_TSEND_EAT)) { 2144 tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID; 2145 tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES; 2146 tsend_128->dw[31] = params->app_id; 2147 } 2148 return 0; 2149 } 2150 2151 int 2152 sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, 2153 struct sli_ct_params *params) 2154 { 2155 struct sli4_gen_request64_wqe *gen = buf; 2156 struct sli4_sge *sge = NULL; 2157 struct sli4_bde *bptr; 2158 2159 memset(buf, 0, sli4->wqe_size); 2160 2161 if (!sgl || !sgl->virt) { 2162 efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", 2163 sgl, sgl ? sgl->virt : NULL); 2164 return -EIO; 2165 } 2166 sge = sgl->virt; 2167 bptr = &gen->bde; 2168 2169 if (sli4->params.sgl_pre_registered) { 2170 gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL; 2171 2172 gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE; 2173 bptr->bde_type_buflen = 2174 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2175 (params->xmit_len & SLI4_BDE_LEN_MASK)); 2176 2177 bptr->u.data.low = sge[0].buffer_address_low; 2178 bptr->u.data.high = sge[0].buffer_address_high; 2179 } else { 2180 gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL; 2181 2182 bptr->bde_type_buflen = 2183 cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) | 2184 ((2 * sizeof(struct sli4_sge)) & 2185 SLI4_BDE_LEN_MASK)); 2186 2187 bptr->u.blp.low = 2188 cpu_to_le32(lower_32_bits(sgl->phys)); 2189 bptr->u.blp.high = 2190 cpu_to_le32(upper_32_bits(sgl->phys)); 2191 } 2192 2193 gen->request_payload_length = cpu_to_le32(params->xmit_len); 2194 gen->max_response_payload_length = cpu_to_le32(params->rsp_len); 2195 2196 gen->df_ctl = params->df_ctl; 2197 gen->type = params->type; 2198 gen->r_ctl = params->r_ctl; 2199 2200 gen->xri_tag = cpu_to_le16(params->xri); 2201 2202 gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT; 2203 gen->context_tag = cpu_to_le16(params->rpi); 2204 2205 gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3; 2206 2207 gen->command = SLI4_WQE_GEN_REQUEST64; 2208 2209 gen->timer = params->timeout; 2210 2211 gen->request_tag = cpu_to_le16(params->tag); 2212 2213 gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD; 2214 2215 gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD; 2216 2217 gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE; 2218 2219 gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); 2220 2221 return 0; 2222 } 2223 2224 int 2225 sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr, 2226 struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri, 2227 u16 req_tag) 2228 { 2229 struct sli4_send_frame_wqe *sf = buf; 2230 2231 memset(buf, 0, sli->wqe_size); 2232 2233 sf->dw10flags1 |= SLI4_SF_WQE_DBDE; 2234 sf->bde.bde_type_buflen = cpu_to_le32(req_len & 2235 SLI4_BDE_LEN_MASK); 2236 sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys)); 2237 sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys)); 2238 2239 /* Copy FC header */ 2240 sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]); 2241 sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]); 2242 sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]); 2243 sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]); 2244 sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]); 2245 sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]); 2246 2247 sf->frame_length = cpu_to_le32(req_len); 2248 2249 sf->xri_tag = cpu_to_le16(xri); 2250 sf->dw7flags0 &= ~SLI4_SF_PU; 2251 sf->context_tag = 0; 2252 2253 sf->ct_byte &= ~SLI4_SF_CT; 2254 sf->command = SLI4_WQE_SEND_FRAME; 2255 sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3; 2256 sf->timer = timeout; 2257 2258 sf->request_tag = cpu_to_le16(req_tag); 2259 sf->eof = eof; 2260 sf->sof = sof; 2261 2262 sf->dw10flags1 &= ~SLI4_SF_QOSD; 2263 sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1; 2264 sf->dw10flags2 &= ~SLI4_SF_XC; 2265 2266 sf->dw10flags1 |= SLI4_SF_XBL; 2267 2268 sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE; 2269 sf->cq_id = cpu_to_le16(0xffff); 2270 2271 return 0; 2272 } 2273 2274 int 2275 sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf, 2276 struct sli_bls_payload *payload, 2277 struct sli_bls_params *params) 2278 { 2279 struct sli4_xmit_bls_rsp_wqe *bls = buf; 2280 u32 dw_ridflags = 0; 2281 2282 /* 2283 * Callers can either specify RPI or S_ID, but not both 2284 */ 2285 if (params->rpi_registered && params->s_id != U32_MAX) { 2286 efc_log_info(sli, "S_ID specified for attached remote node %d\n", 2287 params->rpi); 2288 return -EIO; 2289 } 2290 2291 memset(buf, 0, sli->wqe_size); 2292 2293 if (payload->type == SLI4_SLI_BLS_ACC) { 2294 bls->payload_word0 = 2295 cpu_to_le32((payload->u.acc.seq_id_last << 16) | 2296 (payload->u.acc.seq_id_validity << 24)); 2297 bls->high_seq_cnt = payload->u.acc.high_seq_cnt; 2298 bls->low_seq_cnt = payload->u.acc.low_seq_cnt; 2299 } else if (payload->type == SLI4_SLI_BLS_RJT) { 2300 bls->payload_word0 = 2301 cpu_to_le32(*((u32 *)&payload->u.rjt)); 2302 dw_ridflags |= SLI4_BLS_RSP_WQE_AR; 2303 } else { 2304 efc_log_info(sli, "bad BLS type %#x\n", payload->type); 2305 return -EIO; 2306 } 2307 2308 bls->ox_id = payload->ox_id; 2309 bls->rx_id = payload->rx_id; 2310 2311 if (params->rpi_registered) { 2312 bls->dw8flags0 |= 2313 SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT; 2314 bls->context_tag = cpu_to_le16(params->rpi); 2315 } else { 2316 bls->dw8flags0 |= 2317 SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT; 2318 bls->context_tag = cpu_to_le16(params->vpi); 2319 2320 if (params->s_id != U32_MAX) 2321 bls->local_n_port_id_dword |= 2322 cpu_to_le32(params->s_id & 0x00ffffff); 2323 else 2324 bls->local_n_port_id_dword |= 2325 cpu_to_le32(params->s_id & 0x00ffffff); 2326 2327 dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) | 2328 (params->d_id & SLI4_BLS_RSP_RID); 2329 2330 bls->temporary_rpi = cpu_to_le16(params->rpi); 2331 } 2332 2333 bls->xri_tag = cpu_to_le16(params->xri); 2334 2335 bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3; 2336 2337 bls->command = SLI4_WQE_XMIT_BLS_RSP; 2338 2339 bls->request_tag = cpu_to_le16(params->tag); 2340 2341 bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD; 2342 2343 bls->remote_id_dword = cpu_to_le32(dw_ridflags); 2344 bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); 2345 2346 bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE; 2347 2348 return 0; 2349 } 2350 2351 int 2352 sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp, 2353 struct sli_els_params *params) 2354 { 2355 struct sli4_xmit_els_rsp64_wqe *els = buf; 2356 2357 memset(buf, 0, sli->wqe_size); 2358 2359 if (sli->params.sgl_pre_registered) 2360 els->flags2 |= SLI4_ELS_DBDE; 2361 else 2362 els->flags2 |= SLI4_ELS_XBL; 2363 2364 els->els_response_payload.bde_type_buflen = 2365 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2366 (params->rsp_len & SLI4_BDE_LEN_MASK)); 2367 els->els_response_payload.u.data.low = 2368 cpu_to_le32(lower_32_bits(rsp->phys)); 2369 els->els_response_payload.u.data.high = 2370 cpu_to_le32(upper_32_bits(rsp->phys)); 2371 2372 els->els_response_payload_length = cpu_to_le32(params->rsp_len); 2373 2374 els->xri_tag = cpu_to_le16(params->xri); 2375 2376 els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3; 2377 2378 els->command = SLI4_WQE_ELS_RSP64; 2379 2380 els->request_tag = cpu_to_le16(params->tag); 2381 2382 els->ox_id = cpu_to_le16(params->ox_id); 2383 2384 els->flags2 |= SLI4_ELS_IOD & SLI4_ELS_REQUEST64_DIR_WRITE; 2385 2386 els->flags2 |= SLI4_ELS_QOSD; 2387 2388 els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN; 2389 2390 els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); 2391 2392 if (params->rpi_registered) { 2393 els->ct_byte |= 2394 SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET; 2395 els->context_tag = cpu_to_le16(params->rpi); 2396 return 0; 2397 } 2398 2399 els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET; 2400 els->context_tag = cpu_to_le16(params->vpi); 2401 els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID); 2402 els->temporary_rpi = cpu_to_le16(params->rpi); 2403 if (params->s_id != U32_MAX) { 2404 els->sid_dw |= 2405 cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID)); 2406 } 2407 2408 return 0; 2409 } 2410 2411 int 2412 sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload, 2413 struct sli_ct_params *params) 2414 { 2415 struct sli4_xmit_sequence64_wqe *xmit = buf; 2416 2417 memset(buf, 0, sli4->wqe_size); 2418 2419 if (!payload || !payload->virt) { 2420 efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n", 2421 payload, payload ? payload->virt : NULL); 2422 return -EIO; 2423 } 2424 2425 if (sli4->params.sgl_pre_registered) 2426 xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE); 2427 else 2428 xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL); 2429 2430 xmit->bde.bde_type_buflen = 2431 cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) | 2432 (params->rsp_len & SLI4_BDE_LEN_MASK)); 2433 xmit->bde.u.data.low = 2434 cpu_to_le32(lower_32_bits(payload->phys)); 2435 xmit->bde.u.data.high = 2436 cpu_to_le32(upper_32_bits(payload->phys)); 2437 xmit->sequence_payload_len = cpu_to_le32(params->rsp_len); 2438 2439 xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff); 2440 2441 xmit->relative_offset = 0; 2442 2443 /* sequence initiative - this matches what is seen from 2444 * FC switches in response to FCGS commands 2445 */ 2446 xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI); 2447 xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */ 2448 xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */ 2449 xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */ 2450 xmit->df_ctl = params->df_ctl; 2451 xmit->type = params->type; 2452 xmit->r_ctl = params->r_ctl; 2453 2454 xmit->xri_tag = cpu_to_le16(params->xri); 2455 xmit->context_tag = cpu_to_le16(params->rpi); 2456 2457 xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF; 2458 xmit->dw7flags0 |= 2459 SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT; 2460 xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS; 2461 2462 xmit->command = SLI4_WQE_XMIT_SEQUENCE64; 2463 xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3; 2464 xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU; 2465 xmit->timer = params->timeout; 2466 2467 xmit->abort_tag = 0; 2468 xmit->request_tag = cpu_to_le16(params->tag); 2469 xmit->remote_xid = cpu_to_le16(params->ox_id); 2470 2471 xmit->dw10w0 |= 2472 cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT); 2473 2474 xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE; 2475 2476 xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT); 2477 2478 xmit->cq_id = cpu_to_le16(0xFFFF); 2479 2480 return 0; 2481 } 2482 2483 int 2484 sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id) 2485 { 2486 struct sli4_requeue_xri_wqe *requeue = buf; 2487 2488 memset(buf, 0, sli4->wqe_size); 2489 2490 requeue->command = SLI4_WQE_REQUEUE_XRI; 2491 requeue->xri_tag = cpu_to_le16(xri); 2492 requeue->request_tag = cpu_to_le16(tag); 2493 requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC); 2494 requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD); 2495 requeue->cq_id = cpu_to_le16(cq_id); 2496 requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE; 2497 return 0; 2498 } 2499 2500 int 2501 sli_fc_process_link_attention(struct sli4 *sli4, void *acqe) 2502 { 2503 struct sli4_link_attention *link_attn = acqe; 2504 struct sli4_link_event event = { 0 }; 2505 2506 efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n", 2507 link_attn->link_number, link_attn->attn_type, 2508 link_attn->topology, link_attn->port_speed, 2509 link_attn->port_fault); 2510 efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n", 2511 link_attn->shared_link_status, 2512 le16_to_cpu(link_attn->logical_link_speed), 2513 le32_to_cpu(link_attn->event_tag)); 2514 2515 if (!sli4->link) 2516 return -EIO; 2517 2518 event.medium = SLI4_LINK_MEDIUM_FC; 2519 2520 switch (link_attn->attn_type) { 2521 case SLI4_LNK_ATTN_TYPE_LINK_UP: 2522 event.status = SLI4_LINK_STATUS_UP; 2523 break; 2524 case SLI4_LNK_ATTN_TYPE_LINK_DOWN: 2525 event.status = SLI4_LINK_STATUS_DOWN; 2526 break; 2527 case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA: 2528 efc_log_info(sli4, "attn_type: no hard alpa\n"); 2529 event.status = SLI4_LINK_STATUS_NO_ALPA; 2530 break; 2531 default: 2532 efc_log_info(sli4, "attn_type: unknown\n"); 2533 break; 2534 } 2535 2536 switch (link_attn->event_type) { 2537 case SLI4_EVENT_LINK_ATTENTION: 2538 break; 2539 case SLI4_EVENT_SHARED_LINK_ATTENTION: 2540 efc_log_info(sli4, "event_type: FC shared link event\n"); 2541 break; 2542 default: 2543 efc_log_info(sli4, "event_type: unknown\n"); 2544 break; 2545 } 2546 2547 switch (link_attn->topology) { 2548 case SLI4_LNK_ATTN_P2P: 2549 event.topology = SLI4_LINK_TOPO_NON_FC_AL; 2550 break; 2551 case SLI4_LNK_ATTN_FC_AL: 2552 event.topology = SLI4_LINK_TOPO_FC_AL; 2553 break; 2554 case SLI4_LNK_ATTN_INTERNAL_LOOPBACK: 2555 efc_log_info(sli4, "topology Internal loopback\n"); 2556 event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL; 2557 break; 2558 case SLI4_LNK_ATTN_SERDES_LOOPBACK: 2559 efc_log_info(sli4, "topology serdes loopback\n"); 2560 event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL; 2561 break; 2562 default: 2563 efc_log_info(sli4, "topology: unknown\n"); 2564 break; 2565 } 2566 2567 event.speed = link_attn->port_speed * 1000; 2568 2569 sli4->link(sli4->link_arg, (void *)&event); 2570 2571 return 0; 2572 } 2573 2574 int 2575 sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq, 2576 u8 *cqe, enum sli4_qentry *etype, u16 *r_id) 2577 { 2578 u8 code = cqe[SLI4_CQE_CODE_OFFSET]; 2579 int rc; 2580 2581 switch (code) { 2582 case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION: 2583 { 2584 struct sli4_fc_wcqe *wcqe = (void *)cqe; 2585 2586 *etype = SLI4_QENTRY_WQ; 2587 *r_id = le16_to_cpu(wcqe->request_tag); 2588 rc = wcqe->status; 2589 2590 /* Flag errors except for FCP_RSP_FAILURE */ 2591 if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) { 2592 efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n", 2593 wcqe->status, wcqe->hw_status, 2594 le16_to_cpu(wcqe->request_tag)); 2595 efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n", 2596 le32_to_cpu(wcqe->wqe_specific_1), 2597 le32_to_cpu(wcqe->wqe_specific_2), 2598 (wcqe->flags & SLI4_WCQE_XB)); 2599 efc_log_info(sli4, " %08X %08X %08X %08X\n", 2600 ((u32 *)cqe)[0], ((u32 *)cqe)[1], 2601 ((u32 *)cqe)[2], ((u32 *)cqe)[3]); 2602 } 2603 2604 break; 2605 } 2606 case SLI4_CQE_CODE_RQ_ASYNC: 2607 { 2608 struct sli4_fc_async_rcqe *rcqe = (void *)cqe; 2609 2610 *etype = SLI4_QENTRY_RQ; 2611 *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; 2612 rc = rcqe->status; 2613 break; 2614 } 2615 case SLI4_CQE_CODE_RQ_ASYNC_V1: 2616 { 2617 struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe; 2618 2619 *etype = SLI4_QENTRY_RQ; 2620 *r_id = le16_to_cpu(rcqe->rq_id); 2621 rc = rcqe->status; 2622 break; 2623 } 2624 case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: 2625 { 2626 struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; 2627 2628 *etype = SLI4_QENTRY_OPT_WRITE_CMD; 2629 *r_id = le16_to_cpu(optcqe->rq_id); 2630 rc = optcqe->status; 2631 break; 2632 } 2633 case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA: 2634 { 2635 struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe; 2636 2637 *etype = SLI4_QENTRY_OPT_WRITE_DATA; 2638 *r_id = le16_to_cpu(dcqe->xri); 2639 rc = dcqe->status; 2640 2641 /* Flag errors */ 2642 if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) { 2643 efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n", 2644 dcqe->status); 2645 efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n", 2646 dcqe->hw_status, le16_to_cpu(dcqe->xri), 2647 le32_to_cpu(dcqe->total_data_placed), 2648 ((u32 *)cqe)[3], 2649 (dcqe->flags & SLI4_OCQE_XB)); 2650 } 2651 break; 2652 } 2653 case SLI4_CQE_CODE_RQ_COALESCING: 2654 { 2655 struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; 2656 2657 *etype = SLI4_QENTRY_RQ; 2658 *r_id = le16_to_cpu(rcqe->rq_id); 2659 rc = rcqe->status; 2660 break; 2661 } 2662 case SLI4_CQE_CODE_XRI_ABORTED: 2663 { 2664 struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe; 2665 2666 *etype = SLI4_QENTRY_XABT; 2667 *r_id = le16_to_cpu(xa->xri); 2668 rc = 0; 2669 break; 2670 } 2671 case SLI4_CQE_CODE_RELEASE_WQE: 2672 { 2673 struct sli4_fc_wqec *wqec = (void *)cqe; 2674 2675 *etype = SLI4_QENTRY_WQ_RELEASE; 2676 *r_id = le16_to_cpu(wqec->wq_id); 2677 rc = 0; 2678 break; 2679 } 2680 default: 2681 efc_log_info(sli4, "CQE completion code %d not handled\n", 2682 code); 2683 *etype = SLI4_QENTRY_MAX; 2684 *r_id = U16_MAX; 2685 rc = -EINVAL; 2686 } 2687 2688 return rc; 2689 } 2690 2691 u32 2692 sli_fc_response_length(struct sli4 *sli4, u8 *cqe) 2693 { 2694 struct sli4_fc_wcqe *wcqe = (void *)cqe; 2695 2696 return le32_to_cpu(wcqe->wqe_specific_1); 2697 } 2698 2699 u32 2700 sli_fc_io_length(struct sli4 *sli4, u8 *cqe) 2701 { 2702 struct sli4_fc_wcqe *wcqe = (void *)cqe; 2703 2704 return le32_to_cpu(wcqe->wqe_specific_1); 2705 } 2706 2707 int 2708 sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id) 2709 { 2710 struct sli4_fc_wcqe *wcqe = (void *)cqe; 2711 2712 *d_id = 0; 2713 2714 if (wcqe->status) 2715 return -EIO; 2716 *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff; 2717 return 0; 2718 } 2719 2720 u32 2721 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe) 2722 { 2723 struct sli4_fc_wcqe *wcqe = (void *)cqe; 2724 u32 mask; 2725 2726 switch (wcqe->status) { 2727 case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE: 2728 mask = U32_MAX; 2729 break; 2730 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: 2731 case SLI4_FC_WCQE_STATUS_CMD_REJECT: 2732 mask = 0xff; 2733 break; 2734 case SLI4_FC_WCQE_STATUS_NPORT_RJT: 2735 case SLI4_FC_WCQE_STATUS_FABRIC_RJT: 2736 case SLI4_FC_WCQE_STATUS_NPORT_BSY: 2737 case SLI4_FC_WCQE_STATUS_FABRIC_BSY: 2738 case SLI4_FC_WCQE_STATUS_LS_RJT: 2739 mask = U32_MAX; 2740 break; 2741 case SLI4_FC_WCQE_STATUS_DI_ERROR: 2742 mask = U32_MAX; 2743 break; 2744 default: 2745 mask = 0; 2746 } 2747 2748 return le32_to_cpu(wcqe->wqe_specific_2) & mask; 2749 } 2750 2751 int 2752 sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index) 2753 { 2754 int rc = -EIO; 2755 u8 code = 0; 2756 u16 rq_element_index; 2757 2758 *rq_id = 0; 2759 *index = U32_MAX; 2760 2761 code = cqe[SLI4_CQE_CODE_OFFSET]; 2762 2763 /* Retrieve the RQ index from the completion */ 2764 if (code == SLI4_CQE_CODE_RQ_ASYNC) { 2765 struct sli4_fc_async_rcqe *rcqe = (void *)cqe; 2766 2767 *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID; 2768 rq_element_index = 2769 le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX; 2770 *index = rq_element_index; 2771 if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { 2772 rc = 0; 2773 } else { 2774 rc = rcqe->status; 2775 efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n", 2776 rcqe->status, 2777 sli_fc_get_status_string(rcqe->status), 2778 le16_to_cpu(rcqe->fcfi_rq_id_word) & 2779 SLI4_RACQE_RQ_ID); 2780 2781 efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", 2782 le16_to_cpu(rcqe->data_placement_length), 2783 rcqe->sof_byte, rcqe->eof_byte, 2784 rcqe->hdpl_byte & SLI4_RACQE_HDPL); 2785 } 2786 } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) { 2787 struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe; 2788 2789 *rq_id = le16_to_cpu(rcqe_v1->rq_id); 2790 rq_element_index = 2791 (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) & 2792 SLI4_RACQE_RQ_EL_INDX); 2793 *index = rq_element_index; 2794 if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) { 2795 rc = 0; 2796 } else { 2797 rc = rcqe_v1->status; 2798 efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n", 2799 rcqe_v1->status, 2800 sli_fc_get_status_string(rcqe_v1->status), 2801 le16_to_cpu(rcqe_v1->rq_id), rq_element_index); 2802 2803 efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n", 2804 le16_to_cpu(rcqe_v1->data_placement_length), 2805 rcqe_v1->sof_byte, rcqe_v1->eof_byte, 2806 rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL); 2807 } 2808 } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) { 2809 struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe; 2810 2811 *rq_id = le16_to_cpu(optcqe->rq_id); 2812 *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX; 2813 if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) { 2814 rc = 0; 2815 } else { 2816 rc = optcqe->status; 2817 efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n", 2818 optcqe->status, 2819 sli_fc_get_status_string(optcqe->status), 2820 le16_to_cpu(optcqe->rq_id), *index, 2821 le16_to_cpu(optcqe->data_placement_length)); 2822 2823 efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n", 2824 (optcqe->hdpl_vld & SLI4_OCQE_HDPL), 2825 (optcqe->flags1 & SLI4_OCQE_OOX), 2826 (optcqe->flags1 & SLI4_OCQE_AGXR), 2827 optcqe->xri, le16_to_cpu(optcqe->rpi)); 2828 } 2829 } else if (code == SLI4_CQE_CODE_RQ_COALESCING) { 2830 struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe; 2831 2832 rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) & 2833 SLI4_RCQE_RQ_EL_INDX); 2834 2835 *rq_id = le16_to_cpu(rcqe->rq_id); 2836 if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) { 2837 *index = rq_element_index; 2838 rc = 0; 2839 } else { 2840 *index = U32_MAX; 2841 rc = rcqe->status; 2842 2843 efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n", 2844 rcqe->status, 2845 sli_fc_get_status_string(rcqe->status), 2846 le16_to_cpu(rcqe->rq_id), rq_element_index); 2847 efc_log_info(sli4, "rq_id=%#x sdpl=%x\n", 2848 le16_to_cpu(rcqe->rq_id), 2849 le16_to_cpu(rcqe->seq_placement_length)); 2850 } 2851 } else { 2852 struct sli4_fc_async_rcqe *rcqe = (void *)cqe; 2853 2854 *index = U32_MAX; 2855 rc = rcqe->status; 2856 2857 efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n", 2858 rcqe->status, 2859 le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID, 2860 (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX), 2861 le16_to_cpu(rcqe->data_placement_length)); 2862 efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n", 2863 rcqe->sof_byte, rcqe->eof_byte, 2864 rcqe->hdpl_byte & SLI4_RACQE_HDPL); 2865 } 2866 2867 return rc; 2868 } 2869