1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/io.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/string.h> 21 #include <linux/qed/qed_chain.h> 22 #include "qed.h" 23 #include "qed_hsi.h" 24 #include "qed_hw.h" 25 #include "qed_reg_addr.h" 26 #include "qed_sriov.h" 27 28 #define QED_BAR_ACQUIRE_TIMEOUT 1000 29 30 /* Invalid values */ 31 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) 32 33 struct qed_ptt { 34 struct list_head list_entry; 35 unsigned int idx; 36 struct pxp_ptt_entry pxp; 37 }; 38 39 struct qed_ptt_pool { 40 struct list_head free_list; 41 spinlock_t lock; /* ptt synchronized access */ 42 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; 43 }; 44 45 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) 46 { 47 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); 48 int i; 49 50 if (!p_pool) 51 return -ENOMEM; 52 53 INIT_LIST_HEAD(&p_pool->free_list); 54 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 55 p_pool->ptts[i].idx = i; 56 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; 57 p_pool->ptts[i].pxp.pretend.control = 0; 58 if (i >= RESERVED_PTT_MAX) 59 list_add(&p_pool->ptts[i].list_entry, 60 &p_pool->free_list); 61 } 62 63 p_hwfn->p_ptt_pool = p_pool; 64 spin_lock_init(&p_pool->lock); 65 66 return 0; 67 } 68 69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) 70 { 71 struct qed_ptt *p_ptt; 72 int i; 73 74 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; 76 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; 77 } 78 } 79 80 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) 81 { 82 kfree(p_hwfn->p_ptt_pool); 83 p_hwfn->p_ptt_pool = NULL; 84 } 85 86 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) 87 { 88 struct qed_ptt *p_ptt; 89 unsigned int i; 90 91 /* Take the free PTT from the list */ 92 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { 93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 94 95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { 96 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, 97 struct qed_ptt, list_entry); 98 list_del(&p_ptt->list_entry); 99 100 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 101 102 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 103 "allocated ptt %d\n", p_ptt->idx); 104 return p_ptt; 105 } 106 107 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 108 usleep_range(1000, 2000); 109 } 110 111 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); 112 return NULL; 113 } 114 115 void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 116 { 117 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 118 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); 119 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 120 } 121 122 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 123 { 124 /* The HW is using DWORDS and we need to translate it to Bytes */ 125 return le32_to_cpu(p_ptt->pxp.offset) << 2; 126 } 127 128 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) 129 { 130 return PXP_PF_WINDOW_ADMIN_PER_PF_START + 131 p_ptt->idx * sizeof(struct pxp_ptt_entry); 132 } 133 134 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) 135 { 136 return PXP_EXTERNAL_BAR_PF_WINDOW_START + 137 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; 138 } 139 140 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 141 struct qed_ptt *p_ptt, u32 new_hw_addr) 142 { 143 u32 prev_hw_addr; 144 145 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 146 147 if (new_hw_addr == prev_hw_addr) 148 return; 149 150 /* Update PTT entery in admin window */ 151 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 152 "Updating PTT entry %d to offset 0x%x\n", 153 p_ptt->idx, new_hw_addr); 154 155 /* The HW is using DWORDS and the address is in Bytes */ 156 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); 157 158 REG_WR(p_hwfn, 159 qed_ptt_config_addr(p_ptt) + 160 offsetof(struct pxp_ptt_entry, offset), 161 le32_to_cpu(p_ptt->pxp.offset)); 162 } 163 164 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, 165 struct qed_ptt *p_ptt, u32 hw_addr) 166 { 167 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 168 u32 offset; 169 170 offset = hw_addr - win_hw_addr; 171 172 /* Verify the address is within the window */ 173 if (hw_addr < win_hw_addr || 174 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { 175 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); 176 offset = 0; 177 } 178 179 return qed_ptt_get_bar_addr(p_ptt) + offset; 180 } 181 182 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 183 enum reserved_ptts ptt_idx) 184 { 185 if (ptt_idx >= RESERVED_PTT_MAX) { 186 DP_NOTICE(p_hwfn, 187 "Requested PTT %d is out of range\n", ptt_idx); 188 return NULL; 189 } 190 191 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; 192 } 193 194 void qed_wr(struct qed_hwfn *p_hwfn, 195 struct qed_ptt *p_ptt, 196 u32 hw_addr, u32 val) 197 { 198 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 199 200 REG_WR(p_hwfn, bar_addr, val); 201 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 202 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 203 bar_addr, hw_addr, val); 204 } 205 206 u32 qed_rd(struct qed_hwfn *p_hwfn, 207 struct qed_ptt *p_ptt, 208 u32 hw_addr) 209 { 210 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 211 u32 val = REG_RD(p_hwfn, bar_addr); 212 213 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 214 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 215 bar_addr, hw_addr, val); 216 217 return val; 218 } 219 220 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, 221 struct qed_ptt *p_ptt, 222 void *addr, u32 hw_addr, size_t n, bool to_device) 223 { 224 u32 dw_count, *host_addr, hw_offset; 225 size_t quota, done = 0; 226 u32 __iomem *reg_addr; 227 228 while (done < n) { 229 quota = min_t(size_t, n - done, 230 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); 231 232 if (IS_PF(p_hwfn->cdev)) { 233 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); 234 hw_offset = qed_ptt_get_bar_addr(p_ptt); 235 } else { 236 hw_offset = hw_addr + done; 237 } 238 239 dw_count = quota / 4; 240 host_addr = (u32 *)((u8 *)addr + done); 241 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); 242 if (to_device) 243 while (dw_count--) 244 DIRECT_REG_WR(reg_addr++, *host_addr++); 245 else 246 while (dw_count--) 247 *host_addr++ = DIRECT_REG_RD(reg_addr++); 248 249 done += quota; 250 } 251 } 252 253 void qed_memcpy_from(struct qed_hwfn *p_hwfn, 254 struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) 255 { 256 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 257 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", 258 hw_addr, dest, hw_addr, (unsigned long)n); 259 260 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); 261 } 262 263 void qed_memcpy_to(struct qed_hwfn *p_hwfn, 264 struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) 265 { 266 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 267 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", 268 hw_addr, hw_addr, src, (unsigned long)n); 269 270 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); 271 } 272 273 void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) 274 { 275 u16 control = 0; 276 277 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 278 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 279 280 /* Every pretend undos previous pretends, including 281 * previous port pretend. 282 */ 283 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 284 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 285 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 286 287 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 288 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 289 290 p_ptt->pxp.pretend.control = cpu_to_le16(control); 291 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 292 293 REG_WR(p_hwfn, 294 qed_ptt_config_addr(p_ptt) + 295 offsetof(struct pxp_ptt_entry, pretend), 296 *(u32 *)&p_ptt->pxp.pretend); 297 } 298 299 void qed_port_pretend(struct qed_hwfn *p_hwfn, 300 struct qed_ptt *p_ptt, u8 port_id) 301 { 302 u16 control = 0; 303 304 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 305 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 306 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 307 308 p_ptt->pxp.pretend.control = cpu_to_le16(control); 309 310 REG_WR(p_hwfn, 311 qed_ptt_config_addr(p_ptt) + 312 offsetof(struct pxp_ptt_entry, pretend), 313 *(u32 *)&p_ptt->pxp.pretend); 314 } 315 316 void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 317 { 318 u16 control = 0; 319 320 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 321 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 322 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 323 324 p_ptt->pxp.pretend.control = cpu_to_le16(control); 325 326 REG_WR(p_hwfn, 327 qed_ptt_config_addr(p_ptt) + 328 offsetof(struct pxp_ptt_entry, pretend), 329 *(u32 *)&p_ptt->pxp.pretend); 330 } 331 332 u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) 333 { 334 u32 concrete_fid = 0; 335 336 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); 337 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); 338 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); 339 340 return concrete_fid; 341 } 342 343 /* DMAE */ 344 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, 345 const u8 is_src_type_grc, 346 const u8 is_dst_type_grc, 347 struct qed_dmae_params *p_params) 348 { 349 u16 opcode_b = 0; 350 u32 opcode = 0; 351 352 /* Whether the source is the PCIe or the GRC. 353 * 0- The source is the PCIe 354 * 1- The source is the GRC. 355 */ 356 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC 357 : DMAE_CMD_SRC_MASK_PCIE) << 358 DMAE_CMD_SRC_SHIFT; 359 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << 360 DMAE_CMD_SRC_PF_ID_SHIFT); 361 362 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ 363 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC 364 : DMAE_CMD_DST_MASK_PCIE) << 365 DMAE_CMD_DST_SHIFT; 366 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << 367 DMAE_CMD_DST_PF_ID_SHIFT); 368 369 /* Whether to write a completion word to the completion destination: 370 * 0-Do not write a completion word 371 * 1-Write the completion word 372 */ 373 opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); 374 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 375 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 376 377 if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST) 378 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); 379 380 opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); 381 382 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); 383 384 /* reset source address in next go */ 385 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 386 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 387 388 /* reset dest address in next go */ 389 opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << 390 DMAE_CMD_DST_ADDR_RESET_SHIFT); 391 392 /* SRC/DST VFID: all 1's - pf, otherwise VF id */ 393 if (p_params->flags & QED_DMAE_FLAG_VF_SRC) { 394 opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; 395 opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; 396 } else { 397 opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << 398 DMAE_CMD_SRC_VF_ID_SHIFT; 399 } 400 401 if (p_params->flags & QED_DMAE_FLAG_VF_DST) { 402 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; 403 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; 404 } else { 405 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; 406 } 407 408 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); 409 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); 410 } 411 412 u32 qed_dmae_idx_to_go_cmd(u8 idx) 413 { 414 /* All the DMAE 'go' registers form an array in internal memory */ 415 return DMAE_REG_GO_C0 + (idx << 2); 416 } 417 418 static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, 419 struct qed_ptt *p_ptt) 420 { 421 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; 422 u8 idx_cmd = p_hwfn->dmae_info.channel, i; 423 int qed_status = 0; 424 425 /* verify address is not NULL */ 426 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || 427 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { 428 DP_NOTICE(p_hwfn, 429 "source or destination address 0 idx_cmd=%d\n" 430 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 431 idx_cmd, 432 le32_to_cpu(p_command->opcode), 433 le16_to_cpu(p_command->opcode_b), 434 le16_to_cpu(p_command->length_dw), 435 le32_to_cpu(p_command->src_addr_hi), 436 le32_to_cpu(p_command->src_addr_lo), 437 le32_to_cpu(p_command->dst_addr_hi), 438 le32_to_cpu(p_command->dst_addr_lo)); 439 440 return -EINVAL; 441 } 442 443 DP_VERBOSE(p_hwfn, 444 NETIF_MSG_HW, 445 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 446 idx_cmd, 447 le32_to_cpu(p_command->opcode), 448 le16_to_cpu(p_command->opcode_b), 449 le16_to_cpu(p_command->length_dw), 450 le32_to_cpu(p_command->src_addr_hi), 451 le32_to_cpu(p_command->src_addr_lo), 452 le32_to_cpu(p_command->dst_addr_hi), 453 le32_to_cpu(p_command->dst_addr_lo)); 454 455 /* Copy the command to DMAE - need to do it before every call 456 * for source/dest address no reset. 457 * The first 9 DWs are the command registers, the 10 DW is the 458 * GO register, and the rest are result registers 459 * (which are read only by the client). 460 */ 461 for (i = 0; i < DMAE_CMD_SIZE; i++) { 462 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? 463 *(((u32 *)p_command) + i) : 0; 464 465 qed_wr(p_hwfn, p_ptt, 466 DMAE_REG_CMD_MEM + 467 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + 468 (i * sizeof(u32)), data); 469 } 470 471 qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); 472 473 return qed_status; 474 } 475 476 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) 477 { 478 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; 479 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; 480 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; 481 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; 482 483 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 484 sizeof(u32), p_addr, GFP_KERNEL); 485 if (!*p_comp) 486 goto err; 487 488 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; 489 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 490 sizeof(struct dmae_cmd), 491 p_addr, GFP_KERNEL); 492 if (!*p_cmd) 493 goto err; 494 495 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; 496 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 497 sizeof(u32) * DMAE_MAX_RW_SIZE, 498 p_addr, GFP_KERNEL); 499 if (!*p_buff) 500 goto err; 501 502 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; 503 504 return 0; 505 err: 506 qed_dmae_info_free(p_hwfn); 507 return -ENOMEM; 508 } 509 510 void qed_dmae_info_free(struct qed_hwfn *p_hwfn) 511 { 512 dma_addr_t p_phys; 513 514 /* Just make sure no one is in the middle */ 515 mutex_lock(&p_hwfn->dmae_info.mutex); 516 517 if (p_hwfn->dmae_info.p_completion_word) { 518 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; 519 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 520 sizeof(u32), 521 p_hwfn->dmae_info.p_completion_word, p_phys); 522 p_hwfn->dmae_info.p_completion_word = NULL; 523 } 524 525 if (p_hwfn->dmae_info.p_dmae_cmd) { 526 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; 527 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 528 sizeof(struct dmae_cmd), 529 p_hwfn->dmae_info.p_dmae_cmd, p_phys); 530 p_hwfn->dmae_info.p_dmae_cmd = NULL; 531 } 532 533 if (p_hwfn->dmae_info.p_intermediate_buffer) { 534 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 535 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 536 sizeof(u32) * DMAE_MAX_RW_SIZE, 537 p_hwfn->dmae_info.p_intermediate_buffer, 538 p_phys); 539 p_hwfn->dmae_info.p_intermediate_buffer = NULL; 540 } 541 542 mutex_unlock(&p_hwfn->dmae_info.mutex); 543 } 544 545 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) 546 { 547 u32 wait_cnt_limit = 10000, wait_cnt = 0; 548 int qed_status = 0; 549 550 barrier(); 551 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { 552 udelay(DMAE_MIN_WAIT_TIME); 553 if (++wait_cnt > wait_cnt_limit) { 554 DP_NOTICE(p_hwfn->cdev, 555 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", 556 *p_hwfn->dmae_info.p_completion_word, 557 DMAE_COMPLETION_VAL); 558 qed_status = -EBUSY; 559 break; 560 } 561 562 /* to sync the completion_word since we are not 563 * using the volatile keyword for p_completion_word 564 */ 565 barrier(); 566 } 567 568 if (qed_status == 0) 569 *p_hwfn->dmae_info.p_completion_word = 0; 570 571 return qed_status; 572 } 573 574 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, 575 struct qed_ptt *p_ptt, 576 u64 src_addr, 577 u64 dst_addr, 578 u8 src_type, 579 u8 dst_type, 580 u32 length_dw) 581 { 582 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 583 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 584 int qed_status = 0; 585 586 switch (src_type) { 587 case QED_DMAE_ADDRESS_GRC: 588 case QED_DMAE_ADDRESS_HOST_PHYS: 589 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); 590 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); 591 break; 592 /* for virtual source addresses we use the intermediate buffer. */ 593 case QED_DMAE_ADDRESS_HOST_VIRT: 594 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); 595 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); 596 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], 597 (void *)(uintptr_t)src_addr, 598 length_dw * sizeof(u32)); 599 break; 600 default: 601 return -EINVAL; 602 } 603 604 switch (dst_type) { 605 case QED_DMAE_ADDRESS_GRC: 606 case QED_DMAE_ADDRESS_HOST_PHYS: 607 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); 608 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); 609 break; 610 /* for virtual source addresses we use the intermediate buffer. */ 611 case QED_DMAE_ADDRESS_HOST_VIRT: 612 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); 613 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); 614 break; 615 default: 616 return -EINVAL; 617 } 618 619 cmd->length_dw = cpu_to_le16((u16)length_dw); 620 621 qed_dmae_post_command(p_hwfn, p_ptt); 622 623 qed_status = qed_dmae_operation_wait(p_hwfn); 624 625 if (qed_status) { 626 DP_NOTICE(p_hwfn, 627 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", 628 src_addr, dst_addr, length_dw); 629 return qed_status; 630 } 631 632 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) 633 memcpy((void *)(uintptr_t)(dst_addr), 634 &p_hwfn->dmae_info.p_intermediate_buffer[0], 635 length_dw * sizeof(u32)); 636 637 return 0; 638 } 639 640 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, 641 struct qed_ptt *p_ptt, 642 u64 src_addr, u64 dst_addr, 643 u8 src_type, u8 dst_type, 644 u32 size_in_dwords, 645 struct qed_dmae_params *p_params) 646 { 647 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; 648 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; 649 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 650 u64 src_addr_split = 0, dst_addr_split = 0; 651 u16 length_limit = DMAE_MAX_RW_SIZE; 652 int qed_status = 0; 653 u32 offset = 0; 654 655 qed_dmae_opcode(p_hwfn, 656 (src_type == QED_DMAE_ADDRESS_GRC), 657 (dst_type == QED_DMAE_ADDRESS_GRC), 658 p_params); 659 660 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); 661 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); 662 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); 663 664 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ 665 cnt_split = size_in_dwords / length_limit; 666 length_mod = size_in_dwords % length_limit; 667 668 src_addr_split = src_addr; 669 dst_addr_split = dst_addr; 670 671 for (i = 0; i <= cnt_split; i++) { 672 offset = length_limit * i; 673 674 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) { 675 if (src_type == QED_DMAE_ADDRESS_GRC) 676 src_addr_split = src_addr + offset; 677 else 678 src_addr_split = src_addr + (offset * 4); 679 } 680 681 if (dst_type == QED_DMAE_ADDRESS_GRC) 682 dst_addr_split = dst_addr + offset; 683 else 684 dst_addr_split = dst_addr + (offset * 4); 685 686 length_cur = (cnt_split == i) ? length_mod : length_limit; 687 688 /* might be zero on last iteration */ 689 if (!length_cur) 690 continue; 691 692 qed_status = qed_dmae_execute_sub_operation(p_hwfn, 693 p_ptt, 694 src_addr_split, 695 dst_addr_split, 696 src_type, 697 dst_type, 698 length_cur); 699 if (qed_status) { 700 DP_NOTICE(p_hwfn, 701 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", 702 qed_status, src_addr, dst_addr, length_cur); 703 break; 704 } 705 } 706 707 return qed_status; 708 } 709 710 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 711 struct qed_ptt *p_ptt, 712 u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags) 713 { 714 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 715 struct qed_dmae_params params; 716 int rc; 717 718 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 719 params.flags = flags; 720 721 mutex_lock(&p_hwfn->dmae_info.mutex); 722 723 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 724 grc_addr_in_dw, 725 QED_DMAE_ADDRESS_HOST_VIRT, 726 QED_DMAE_ADDRESS_GRC, 727 size_in_dwords, ¶ms); 728 729 mutex_unlock(&p_hwfn->dmae_info.mutex); 730 731 return rc; 732 } 733 734 int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, 735 struct qed_ptt *p_ptt, 736 u32 grc_addr, 737 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) 738 { 739 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 740 struct qed_dmae_params params; 741 int rc; 742 743 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 744 params.flags = flags; 745 746 mutex_lock(&p_hwfn->dmae_info.mutex); 747 748 rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, 749 dest_addr, QED_DMAE_ADDRESS_GRC, 750 QED_DMAE_ADDRESS_HOST_VIRT, 751 size_in_dwords, ¶ms); 752 753 mutex_unlock(&p_hwfn->dmae_info.mutex); 754 755 return rc; 756 } 757 758 int qed_dmae_host2host(struct qed_hwfn *p_hwfn, 759 struct qed_ptt *p_ptt, 760 dma_addr_t source_addr, 761 dma_addr_t dest_addr, 762 u32 size_in_dwords, struct qed_dmae_params *p_params) 763 { 764 int rc; 765 766 mutex_lock(&(p_hwfn->dmae_info.mutex)); 767 768 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 769 dest_addr, 770 QED_DMAE_ADDRESS_HOST_PHYS, 771 QED_DMAE_ADDRESS_HOST_PHYS, 772 size_in_dwords, p_params); 773 774 mutex_unlock(&(p_hwfn->dmae_info.mutex)); 775 776 return rc; 777 } 778 779 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 780 enum protocol_type proto, union qed_qm_pq_params *p_params) 781 { 782 u16 pq_id = 0; 783 784 if ((proto == PROTOCOLID_CORE || 785 proto == PROTOCOLID_ETH || 786 proto == PROTOCOLID_ISCSI || 787 proto == PROTOCOLID_ROCE) && !p_params) { 788 DP_NOTICE(p_hwfn, 789 "Protocol %d received NULL PQ params\n", proto); 790 return 0; 791 } 792 793 switch (proto) { 794 case PROTOCOLID_CORE: 795 if (p_params->core.tc == LB_TC) 796 pq_id = p_hwfn->qm_info.pure_lb_pq; 797 else if (p_params->core.tc == OOO_LB_TC) 798 pq_id = p_hwfn->qm_info.ooo_pq; 799 else 800 pq_id = p_hwfn->qm_info.offload_pq; 801 break; 802 case PROTOCOLID_ETH: 803 pq_id = p_params->eth.tc; 804 if (p_params->eth.is_vf) 805 pq_id += p_hwfn->qm_info.vf_queues_offset + 806 p_params->eth.vf_id; 807 break; 808 case PROTOCOLID_ISCSI: 809 if (p_params->iscsi.q_idx == 1) 810 pq_id = p_hwfn->qm_info.pure_ack_pq; 811 break; 812 case PROTOCOLID_ROCE: 813 if (p_params->roce.dcqcn) 814 pq_id = p_params->roce.qpid; 815 else 816 pq_id = p_hwfn->qm_info.offload_pq; 817 if (pq_id > p_hwfn->qm_info.num_pf_rls) 818 pq_id = p_hwfn->qm_info.offload_pq; 819 break; 820 default: 821 pq_id = 0; 822 } 823 824 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); 825 826 return pq_id; 827 } 828