1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/io.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/string.h> 21 #include <linux/qed/qed_chain.h> 22 #include "qed.h" 23 #include "qed_hsi.h" 24 #include "qed_hw.h" 25 #include "qed_reg_addr.h" 26 27 #define QED_BAR_ACQUIRE_TIMEOUT 1000 28 29 /* Invalid values */ 30 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) 31 32 struct qed_ptt { 33 struct list_head list_entry; 34 unsigned int idx; 35 struct pxp_ptt_entry pxp; 36 }; 37 38 struct qed_ptt_pool { 39 struct list_head free_list; 40 spinlock_t lock; /* ptt synchronized access */ 41 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; 42 }; 43 44 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) 45 { 46 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), 47 GFP_KERNEL); 48 int i; 49 50 if (!p_pool) 51 return -ENOMEM; 52 53 INIT_LIST_HEAD(&p_pool->free_list); 54 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 55 p_pool->ptts[i].idx = i; 56 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; 57 p_pool->ptts[i].pxp.pretend.control = 0; 58 if (i >= RESERVED_PTT_MAX) 59 list_add(&p_pool->ptts[i].list_entry, 60 &p_pool->free_list); 61 } 62 63 p_hwfn->p_ptt_pool = p_pool; 64 spin_lock_init(&p_pool->lock); 65 66 return 0; 67 } 68 69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) 70 { 71 struct qed_ptt *p_ptt; 72 int i; 73 74 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; 76 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; 77 } 78 } 79 80 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) 81 { 82 kfree(p_hwfn->p_ptt_pool); 83 p_hwfn->p_ptt_pool = NULL; 84 } 85 86 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) 87 { 88 struct qed_ptt *p_ptt; 89 unsigned int i; 90 91 /* Take the free PTT from the list */ 92 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { 93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 94 95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { 96 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, 97 struct qed_ptt, list_entry); 98 list_del(&p_ptt->list_entry); 99 100 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 101 102 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 103 "allocated ptt %d\n", p_ptt->idx); 104 return p_ptt; 105 } 106 107 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 108 usleep_range(1000, 2000); 109 } 110 111 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); 112 return NULL; 113 } 114 115 void qed_ptt_release(struct qed_hwfn *p_hwfn, 116 struct qed_ptt *p_ptt) 117 { 118 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 119 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); 120 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 121 } 122 123 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, 124 struct qed_ptt *p_ptt) 125 { 126 /* The HW is using DWORDS and we need to translate it to Bytes */ 127 return le32_to_cpu(p_ptt->pxp.offset) << 2; 128 } 129 130 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) 131 { 132 return PXP_PF_WINDOW_ADMIN_PER_PF_START + 133 p_ptt->idx * sizeof(struct pxp_ptt_entry); 134 } 135 136 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) 137 { 138 return PXP_EXTERNAL_BAR_PF_WINDOW_START + 139 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; 140 } 141 142 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 143 struct qed_ptt *p_ptt, 144 u32 new_hw_addr) 145 { 146 u32 prev_hw_addr; 147 148 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 149 150 if (new_hw_addr == prev_hw_addr) 151 return; 152 153 /* Update PTT entery in admin window */ 154 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 155 "Updating PTT entry %d to offset 0x%x\n", 156 p_ptt->idx, new_hw_addr); 157 158 /* The HW is using DWORDS and the address is in Bytes */ 159 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); 160 161 REG_WR(p_hwfn, 162 qed_ptt_config_addr(p_ptt) + 163 offsetof(struct pxp_ptt_entry, offset), 164 le32_to_cpu(p_ptt->pxp.offset)); 165 } 166 167 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, 168 struct qed_ptt *p_ptt, 169 u32 hw_addr) 170 { 171 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 172 u32 offset; 173 174 offset = hw_addr - win_hw_addr; 175 176 /* Verify the address is within the window */ 177 if (hw_addr < win_hw_addr || 178 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { 179 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); 180 offset = 0; 181 } 182 183 return qed_ptt_get_bar_addr(p_ptt) + offset; 184 } 185 186 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 187 enum reserved_ptts ptt_idx) 188 { 189 if (ptt_idx >= RESERVED_PTT_MAX) { 190 DP_NOTICE(p_hwfn, 191 "Requested PTT %d is out of range\n", ptt_idx); 192 return NULL; 193 } 194 195 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; 196 } 197 198 void qed_wr(struct qed_hwfn *p_hwfn, 199 struct qed_ptt *p_ptt, 200 u32 hw_addr, u32 val) 201 { 202 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 203 204 REG_WR(p_hwfn, bar_addr, val); 205 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 206 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 207 bar_addr, hw_addr, val); 208 } 209 210 u32 qed_rd(struct qed_hwfn *p_hwfn, 211 struct qed_ptt *p_ptt, 212 u32 hw_addr) 213 { 214 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 215 u32 val = REG_RD(p_hwfn, bar_addr); 216 217 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 218 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 219 bar_addr, hw_addr, val); 220 221 return val; 222 } 223 224 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, 225 struct qed_ptt *p_ptt, 226 void *addr, 227 u32 hw_addr, 228 size_t n, 229 bool to_device) 230 { 231 u32 dw_count, *host_addr, hw_offset; 232 size_t quota, done = 0; 233 u32 __iomem *reg_addr; 234 235 while (done < n) { 236 quota = min_t(size_t, n - done, 237 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); 238 239 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); 240 hw_offset = qed_ptt_get_bar_addr(p_ptt); 241 242 dw_count = quota / 4; 243 host_addr = (u32 *)((u8 *)addr + done); 244 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); 245 if (to_device) 246 while (dw_count--) 247 DIRECT_REG_WR(reg_addr++, *host_addr++); 248 else 249 while (dw_count--) 250 *host_addr++ = DIRECT_REG_RD(reg_addr++); 251 252 done += quota; 253 } 254 } 255 256 void qed_memcpy_from(struct qed_hwfn *p_hwfn, 257 struct qed_ptt *p_ptt, 258 void *dest, u32 hw_addr, size_t n) 259 { 260 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 261 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", 262 hw_addr, dest, hw_addr, (unsigned long)n); 263 264 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); 265 } 266 267 void qed_memcpy_to(struct qed_hwfn *p_hwfn, 268 struct qed_ptt *p_ptt, 269 u32 hw_addr, void *src, size_t n) 270 { 271 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 272 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", 273 hw_addr, hw_addr, src, (unsigned long)n); 274 275 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); 276 } 277 278 void qed_fid_pretend(struct qed_hwfn *p_hwfn, 279 struct qed_ptt *p_ptt, 280 u16 fid) 281 { 282 u16 control = 0; 283 284 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 285 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 286 287 /* Every pretend undos previous pretends, including 288 * previous port pretend. 289 */ 290 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 291 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 292 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 293 294 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 295 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 296 297 p_ptt->pxp.pretend.control = cpu_to_le16(control); 298 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 299 300 REG_WR(p_hwfn, 301 qed_ptt_config_addr(p_ptt) + 302 offsetof(struct pxp_ptt_entry, pretend), 303 *(u32 *)&p_ptt->pxp.pretend); 304 } 305 306 void qed_port_pretend(struct qed_hwfn *p_hwfn, 307 struct qed_ptt *p_ptt, 308 u8 port_id) 309 { 310 u16 control = 0; 311 312 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 313 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 314 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 315 316 p_ptt->pxp.pretend.control = cpu_to_le16(control); 317 318 REG_WR(p_hwfn, 319 qed_ptt_config_addr(p_ptt) + 320 offsetof(struct pxp_ptt_entry, pretend), 321 *(u32 *)&p_ptt->pxp.pretend); 322 } 323 324 void qed_port_unpretend(struct qed_hwfn *p_hwfn, 325 struct qed_ptt *p_ptt) 326 { 327 u16 control = 0; 328 329 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 330 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 331 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 332 333 p_ptt->pxp.pretend.control = cpu_to_le16(control); 334 335 REG_WR(p_hwfn, 336 qed_ptt_config_addr(p_ptt) + 337 offsetof(struct pxp_ptt_entry, pretend), 338 *(u32 *)&p_ptt->pxp.pretend); 339 } 340 341 /* DMAE */ 342 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, 343 const u8 is_src_type_grc, 344 const u8 is_dst_type_grc, 345 struct qed_dmae_params *p_params) 346 { 347 u32 opcode = 0; 348 u16 opcodeB = 0; 349 350 /* Whether the source is the PCIe or the GRC. 351 * 0- The source is the PCIe 352 * 1- The source is the GRC. 353 */ 354 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC 355 : DMAE_CMD_SRC_MASK_PCIE) << 356 DMAE_CMD_SRC_SHIFT; 357 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << 358 DMAE_CMD_SRC_PF_ID_SHIFT); 359 360 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ 361 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC 362 : DMAE_CMD_DST_MASK_PCIE) << 363 DMAE_CMD_DST_SHIFT; 364 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << 365 DMAE_CMD_DST_PF_ID_SHIFT); 366 367 /* Whether to write a completion word to the completion destination: 368 * 0-Do not write a completion word 369 * 1-Write the completion word 370 */ 371 opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); 372 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 373 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 374 375 if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST) 376 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); 377 378 opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); 379 380 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); 381 382 /* reset source address in next go */ 383 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 384 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 385 386 /* reset dest address in next go */ 387 opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << 388 DMAE_CMD_DST_ADDR_RESET_SHIFT); 389 390 opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << 391 DMAE_CMD_SRC_VF_ID_SHIFT); 392 393 opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << 394 DMAE_CMD_DST_VF_ID_SHIFT); 395 396 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); 397 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); 398 } 399 400 u32 qed_dmae_idx_to_go_cmd(u8 idx) 401 { 402 /* All the DMAE 'go' registers form an array in internal memory */ 403 return DMAE_REG_GO_C0 + (idx << 2); 404 } 405 406 static int 407 qed_dmae_post_command(struct qed_hwfn *p_hwfn, 408 struct qed_ptt *p_ptt) 409 { 410 struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; 411 u8 idx_cmd = p_hwfn->dmae_info.channel, i; 412 int qed_status = 0; 413 414 /* verify address is not NULL */ 415 if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) || 416 ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) { 417 DP_NOTICE(p_hwfn, 418 "source or destination address 0 idx_cmd=%d\n" 419 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 420 idx_cmd, 421 le32_to_cpu(command->opcode), 422 le16_to_cpu(command->opcode_b), 423 le16_to_cpu(command->length), 424 le32_to_cpu(command->src_addr_hi), 425 le32_to_cpu(command->src_addr_lo), 426 le32_to_cpu(command->dst_addr_hi), 427 le32_to_cpu(command->dst_addr_lo)); 428 429 return -EINVAL; 430 } 431 432 DP_VERBOSE(p_hwfn, 433 NETIF_MSG_HW, 434 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 435 idx_cmd, 436 le32_to_cpu(command->opcode), 437 le16_to_cpu(command->opcode_b), 438 le16_to_cpu(command->length), 439 le32_to_cpu(command->src_addr_hi), 440 le32_to_cpu(command->src_addr_lo), 441 le32_to_cpu(command->dst_addr_hi), 442 le32_to_cpu(command->dst_addr_lo)); 443 444 /* Copy the command to DMAE - need to do it before every call 445 * for source/dest address no reset. 446 * The first 9 DWs are the command registers, the 10 DW is the 447 * GO register, and the rest are result registers 448 * (which are read only by the client). 449 */ 450 for (i = 0; i < DMAE_CMD_SIZE; i++) { 451 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? 452 *(((u32 *)command) + i) : 0; 453 454 qed_wr(p_hwfn, p_ptt, 455 DMAE_REG_CMD_MEM + 456 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + 457 (i * sizeof(u32)), data); 458 } 459 460 qed_wr(p_hwfn, p_ptt, 461 qed_dmae_idx_to_go_cmd(idx_cmd), 462 DMAE_GO_VALUE); 463 464 return qed_status; 465 } 466 467 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) 468 { 469 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; 470 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; 471 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; 472 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; 473 474 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 475 sizeof(u32), 476 p_addr, 477 GFP_KERNEL); 478 if (!*p_comp) { 479 DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); 480 goto err; 481 } 482 483 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; 484 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 485 sizeof(struct dmae_cmd), 486 p_addr, GFP_KERNEL); 487 if (!*p_cmd) { 488 DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n"); 489 goto err; 490 } 491 492 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; 493 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 494 sizeof(u32) * DMAE_MAX_RW_SIZE, 495 p_addr, GFP_KERNEL); 496 if (!*p_buff) { 497 DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n"); 498 goto err; 499 } 500 501 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; 502 503 return 0; 504 err: 505 qed_dmae_info_free(p_hwfn); 506 return -ENOMEM; 507 } 508 509 void qed_dmae_info_free(struct qed_hwfn *p_hwfn) 510 { 511 dma_addr_t p_phys; 512 513 /* Just make sure no one is in the middle */ 514 mutex_lock(&p_hwfn->dmae_info.mutex); 515 516 if (p_hwfn->dmae_info.p_completion_word) { 517 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; 518 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 519 sizeof(u32), 520 p_hwfn->dmae_info.p_completion_word, 521 p_phys); 522 p_hwfn->dmae_info.p_completion_word = NULL; 523 } 524 525 if (p_hwfn->dmae_info.p_dmae_cmd) { 526 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; 527 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 528 sizeof(struct dmae_cmd), 529 p_hwfn->dmae_info.p_dmae_cmd, 530 p_phys); 531 p_hwfn->dmae_info.p_dmae_cmd = NULL; 532 } 533 534 if (p_hwfn->dmae_info.p_intermediate_buffer) { 535 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 536 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 537 sizeof(u32) * DMAE_MAX_RW_SIZE, 538 p_hwfn->dmae_info.p_intermediate_buffer, 539 p_phys); 540 p_hwfn->dmae_info.p_intermediate_buffer = NULL; 541 } 542 543 mutex_unlock(&p_hwfn->dmae_info.mutex); 544 } 545 546 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) 547 { 548 u32 wait_cnt = 0; 549 u32 wait_cnt_limit = 10000; 550 551 int qed_status = 0; 552 553 barrier(); 554 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { 555 udelay(DMAE_MIN_WAIT_TIME); 556 if (++wait_cnt > wait_cnt_limit) { 557 DP_NOTICE(p_hwfn->cdev, 558 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", 559 *p_hwfn->dmae_info.p_completion_word, 560 DMAE_COMPLETION_VAL); 561 qed_status = -EBUSY; 562 break; 563 } 564 565 /* to sync the completion_word since we are not 566 * using the volatile keyword for p_completion_word 567 */ 568 barrier(); 569 } 570 571 if (qed_status == 0) 572 *p_hwfn->dmae_info.p_completion_word = 0; 573 574 return qed_status; 575 } 576 577 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, 578 struct qed_ptt *p_ptt, 579 u64 src_addr, 580 u64 dst_addr, 581 u8 src_type, 582 u8 dst_type, 583 u32 length) 584 { 585 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 586 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 587 int qed_status = 0; 588 589 switch (src_type) { 590 case QED_DMAE_ADDRESS_GRC: 591 case QED_DMAE_ADDRESS_HOST_PHYS: 592 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); 593 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); 594 break; 595 /* for virtual source addresses we use the intermediate buffer. */ 596 case QED_DMAE_ADDRESS_HOST_VIRT: 597 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); 598 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); 599 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], 600 (void *)(uintptr_t)src_addr, 601 length * sizeof(u32)); 602 break; 603 default: 604 return -EINVAL; 605 } 606 607 switch (dst_type) { 608 case QED_DMAE_ADDRESS_GRC: 609 case QED_DMAE_ADDRESS_HOST_PHYS: 610 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); 611 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); 612 break; 613 /* for virtual source addresses we use the intermediate buffer. */ 614 case QED_DMAE_ADDRESS_HOST_VIRT: 615 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); 616 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); 617 break; 618 default: 619 return -EINVAL; 620 } 621 622 cmd->length = cpu_to_le16((u16)length); 623 624 qed_dmae_post_command(p_hwfn, p_ptt); 625 626 qed_status = qed_dmae_operation_wait(p_hwfn); 627 628 if (qed_status) { 629 DP_NOTICE(p_hwfn, 630 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", 631 src_addr, 632 dst_addr, 633 length); 634 return qed_status; 635 } 636 637 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) 638 memcpy((void *)(uintptr_t)(dst_addr), 639 &p_hwfn->dmae_info.p_intermediate_buffer[0], 640 length * sizeof(u32)); 641 642 return 0; 643 } 644 645 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, 646 struct qed_ptt *p_ptt, 647 u64 src_addr, u64 dst_addr, 648 u8 src_type, u8 dst_type, 649 u32 size_in_dwords, 650 struct qed_dmae_params *p_params) 651 { 652 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; 653 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; 654 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 655 u64 src_addr_split = 0, dst_addr_split = 0; 656 u16 length_limit = DMAE_MAX_RW_SIZE; 657 int qed_status = 0; 658 u32 offset = 0; 659 660 qed_dmae_opcode(p_hwfn, 661 (src_type == QED_DMAE_ADDRESS_GRC), 662 (dst_type == QED_DMAE_ADDRESS_GRC), 663 p_params); 664 665 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); 666 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); 667 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); 668 669 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ 670 cnt_split = size_in_dwords / length_limit; 671 length_mod = size_in_dwords % length_limit; 672 673 src_addr_split = src_addr; 674 dst_addr_split = dst_addr; 675 676 for (i = 0; i <= cnt_split; i++) { 677 offset = length_limit * i; 678 679 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) { 680 if (src_type == QED_DMAE_ADDRESS_GRC) 681 src_addr_split = src_addr + offset; 682 else 683 src_addr_split = src_addr + (offset * 4); 684 } 685 686 if (dst_type == QED_DMAE_ADDRESS_GRC) 687 dst_addr_split = dst_addr + offset; 688 else 689 dst_addr_split = dst_addr + (offset * 4); 690 691 length_cur = (cnt_split == i) ? length_mod : length_limit; 692 693 /* might be zero on last iteration */ 694 if (!length_cur) 695 continue; 696 697 qed_status = qed_dmae_execute_sub_operation(p_hwfn, 698 p_ptt, 699 src_addr_split, 700 dst_addr_split, 701 src_type, 702 dst_type, 703 length_cur); 704 if (qed_status) { 705 DP_NOTICE(p_hwfn, 706 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", 707 qed_status, 708 src_addr, 709 dst_addr, 710 length_cur); 711 break; 712 } 713 } 714 715 return qed_status; 716 } 717 718 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 719 struct qed_ptt *p_ptt, 720 u64 source_addr, 721 u32 grc_addr, 722 u32 size_in_dwords, 723 u32 flags) 724 { 725 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 726 struct qed_dmae_params params; 727 int rc; 728 729 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 730 params.flags = flags; 731 732 mutex_lock(&p_hwfn->dmae_info.mutex); 733 734 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 735 grc_addr_in_dw, 736 QED_DMAE_ADDRESS_HOST_VIRT, 737 QED_DMAE_ADDRESS_GRC, 738 size_in_dwords, ¶ms); 739 740 mutex_unlock(&p_hwfn->dmae_info.mutex); 741 742 return rc; 743 } 744 745 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 746 enum protocol_type proto, 747 union qed_qm_pq_params *p_params) 748 { 749 u16 pq_id = 0; 750 751 if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && 752 !p_params) { 753 DP_NOTICE(p_hwfn, 754 "Protocol %d received NULL PQ params\n", 755 proto); 756 return 0; 757 } 758 759 switch (proto) { 760 case PROTOCOLID_CORE: 761 if (p_params->core.tc == LB_TC) 762 pq_id = p_hwfn->qm_info.pure_lb_pq; 763 else 764 pq_id = p_hwfn->qm_info.offload_pq; 765 break; 766 case PROTOCOLID_ETH: 767 pq_id = p_params->eth.tc; 768 break; 769 default: 770 pq_id = 0; 771 } 772 773 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); 774 775 return pq_id; 776 } 777