1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/if_vlan.h> 11 #include <linux/kernel.h> 12 #include <linux/pci.h> 13 #include <linux/slab.h> 14 #include <linux/stddef.h> 15 #include <linux/workqueue.h> 16 #include <net/ipv6.h> 17 #include <linux/bitops.h> 18 #include <linux/delay.h> 19 #include <linux/errno.h> 20 #include <linux/etherdevice.h> 21 #include <linux/io.h> 22 #include <linux/list.h> 23 #include <linux/mutex.h> 24 #include <linux/spinlock.h> 25 #include <linux/string.h> 26 #include <linux/qed/qed_ll2_if.h> 27 #include "qed.h" 28 #include "qed_cxt.h" 29 #include "qed_dev_api.h" 30 #include "qed_hsi.h" 31 #include "qed_hw.h" 32 #include "qed_int.h" 33 #include "qed_ll2.h" 34 #include "qed_mcp.h" 35 #include "qed_ooo.h" 36 #include "qed_reg_addr.h" 37 #include "qed_sp.h" 38 #include "qed_rdma.h" 39 40 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) 41 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) 42 43 #define QED_LL2_TX_SIZE (256) 44 #define QED_LL2_RX_SIZE (4096) 45 46 struct qed_cb_ll2_info { 47 int rx_cnt; 48 u32 rx_size; 49 u8 handle; 50 51 /* Lock protecting LL2 buffer lists in sleepless context */ 52 spinlock_t lock; 53 struct list_head list; 54 55 const struct qed_ll2_cb_ops *cbs; 56 void *cb_cookie; 57 }; 58 59 struct qed_ll2_buffer { 60 struct list_head list; 61 void *data; 62 dma_addr_t phys_addr; 63 }; 64 65 static void qed_ll2b_complete_tx_packet(void *cxt, 66 u8 connection_handle, 67 void *cookie, 68 dma_addr_t first_frag_addr, 69 bool b_last_fragment, 70 bool b_last_packet) 71 { 72 struct qed_hwfn *p_hwfn = cxt; 73 struct qed_dev *cdev = p_hwfn->cdev; 74 struct sk_buff *skb = cookie; 75 76 /* All we need to do is release the mapping */ 77 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, 78 skb_headlen(skb), DMA_TO_DEVICE); 79 80 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) 81 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, 82 b_last_fragment); 83 84 dev_kfree_skb_any(skb); 85 } 86 87 static int qed_ll2_alloc_buffer(struct qed_dev *cdev, 88 u8 **data, dma_addr_t *phys_addr) 89 { 90 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC); 91 if (!(*data)) { 92 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); 93 return -ENOMEM; 94 } 95 96 *phys_addr = dma_map_single(&cdev->pdev->dev, 97 ((*data) + NET_SKB_PAD), 98 cdev->ll2->rx_size, DMA_FROM_DEVICE); 99 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) { 100 DP_INFO(cdev, "Failed to map LL2 buffer data\n"); 101 kfree((*data)); 102 return -ENOMEM; 103 } 104 105 return 0; 106 } 107 108 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, 109 struct qed_ll2_buffer *buffer) 110 { 111 spin_lock_bh(&cdev->ll2->lock); 112 113 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, 114 cdev->ll2->rx_size, DMA_FROM_DEVICE); 115 kfree(buffer->data); 116 list_del(&buffer->list); 117 118 cdev->ll2->rx_cnt--; 119 if (!cdev->ll2->rx_cnt) 120 DP_INFO(cdev, "All LL2 entries were removed\n"); 121 122 spin_unlock_bh(&cdev->ll2->lock); 123 124 return 0; 125 } 126 127 static void qed_ll2_kill_buffers(struct qed_dev *cdev) 128 { 129 struct qed_ll2_buffer *buffer, *tmp_buffer; 130 131 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) 132 qed_ll2_dealloc_buffer(cdev, buffer); 133 } 134 135 static void qed_ll2b_complete_rx_packet(void *cxt, 136 struct qed_ll2_comp_rx_data *data) 137 { 138 struct qed_hwfn *p_hwfn = cxt; 139 struct qed_ll2_buffer *buffer = data->cookie; 140 struct qed_dev *cdev = p_hwfn->cdev; 141 dma_addr_t new_phys_addr; 142 struct sk_buff *skb; 143 bool reuse = false; 144 int rc = -EINVAL; 145 u8 *new_data; 146 147 DP_VERBOSE(p_hwfn, 148 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), 149 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", 150 (u64)data->rx_buf_addr, 151 data->u.placement_offset, 152 data->length.packet_length, 153 data->parse_flags, 154 data->vlan, data->opaque_data_0, data->opaque_data_1); 155 156 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { 157 print_hex_dump(KERN_INFO, "", 158 DUMP_PREFIX_OFFSET, 16, 1, 159 buffer->data, data->length.packet_length, false); 160 } 161 162 /* Determine if data is valid */ 163 if (data->length.packet_length < ETH_HLEN) 164 reuse = true; 165 166 /* Allocate a replacement for buffer; Reuse upon failure */ 167 if (!reuse) 168 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data, 169 &new_phys_addr); 170 171 /* If need to reuse or there's no replacement buffer, repost this */ 172 if (rc) 173 goto out_post; 174 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, 175 cdev->ll2->rx_size, DMA_FROM_DEVICE); 176 177 skb = build_skb(buffer->data, 0); 178 if (!skb) { 179 DP_INFO(cdev, "Failed to build SKB\n"); 180 kfree(buffer->data); 181 goto out_post1; 182 } 183 184 data->u.placement_offset += NET_SKB_PAD; 185 skb_reserve(skb, data->u.placement_offset); 186 skb_put(skb, data->length.packet_length); 187 skb_checksum_none_assert(skb); 188 189 /* Get parital ethernet information instead of eth_type_trans(), 190 * Since we don't have an associated net_device. 191 */ 192 skb_reset_mac_header(skb); 193 skb->protocol = eth_hdr(skb)->h_proto; 194 195 /* Pass SKB onward */ 196 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { 197 if (data->vlan) 198 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 199 data->vlan); 200 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, 201 data->opaque_data_0, 202 data->opaque_data_1); 203 } else { 204 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | 205 QED_MSG_LL2 | QED_MSG_STORAGE), 206 "Dropping the packet\n"); 207 kfree(buffer->data); 208 } 209 210 out_post1: 211 /* Update Buffer information and update FW producer */ 212 buffer->data = new_data; 213 buffer->phys_addr = new_phys_addr; 214 215 out_post: 216 rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle, 217 buffer->phys_addr, 0, buffer, 1); 218 if (rc) 219 qed_ll2_dealloc_buffer(cdev, buffer); 220 } 221 222 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, 223 u8 connection_handle, 224 bool b_lock, 225 bool b_only_active) 226 { 227 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL; 228 229 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) 230 return NULL; 231 232 if (!p_hwfn->p_ll2_info) 233 return NULL; 234 235 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; 236 237 if (b_only_active) { 238 if (b_lock) 239 mutex_lock(&p_ll2_conn->mutex); 240 if (p_ll2_conn->b_active) 241 p_ret = p_ll2_conn; 242 if (b_lock) 243 mutex_unlock(&p_ll2_conn->mutex); 244 } else { 245 p_ret = p_ll2_conn; 246 } 247 248 return p_ret; 249 } 250 251 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, 252 u8 connection_handle) 253 { 254 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true); 255 } 256 257 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn, 258 u8 connection_handle) 259 { 260 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true); 261 } 262 263 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn 264 *p_hwfn, 265 u8 connection_handle) 266 { 267 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false); 268 } 269 270 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) 271 { 272 bool b_last_packet = false, b_last_frag = false; 273 struct qed_ll2_tx_packet *p_pkt = NULL; 274 struct qed_ll2_info *p_ll2_conn; 275 struct qed_ll2_tx_queue *p_tx; 276 unsigned long flags = 0; 277 dma_addr_t tx_frag; 278 279 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 280 if (!p_ll2_conn) 281 return; 282 283 p_tx = &p_ll2_conn->tx_queue; 284 285 spin_lock_irqsave(&p_tx->lock, flags); 286 while (!list_empty(&p_tx->active_descq)) { 287 p_pkt = list_first_entry(&p_tx->active_descq, 288 struct qed_ll2_tx_packet, list_entry); 289 if (!p_pkt) 290 break; 291 292 list_del(&p_pkt->list_entry); 293 b_last_packet = list_empty(&p_tx->active_descq); 294 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 295 spin_unlock_irqrestore(&p_tx->lock, flags); 296 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 297 struct qed_ooo_buffer *p_buffer; 298 299 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 300 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, 301 p_buffer); 302 } else { 303 p_tx->cur_completing_packet = *p_pkt; 304 p_tx->cur_completing_bd_idx = 1; 305 b_last_frag = 306 p_tx->cur_completing_bd_idx == p_pkt->bd_used; 307 tx_frag = p_pkt->bds_set[0].tx_frag; 308 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, 309 p_ll2_conn->my_id, 310 p_pkt->cookie, 311 tx_frag, 312 b_last_frag, 313 b_last_packet); 314 } 315 spin_lock_irqsave(&p_tx->lock, flags); 316 } 317 spin_unlock_irqrestore(&p_tx->lock, flags); 318 } 319 320 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 321 { 322 struct qed_ll2_info *p_ll2_conn = p_cookie; 323 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 324 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; 325 struct qed_ll2_tx_packet *p_pkt; 326 bool b_last_frag = false; 327 unsigned long flags; 328 int rc = -EINVAL; 329 330 spin_lock_irqsave(&p_tx->lock, flags); 331 if (p_tx->b_completing_packet) { 332 rc = -EBUSY; 333 goto out; 334 } 335 336 new_idx = le16_to_cpu(*p_tx->p_fw_cons); 337 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 338 while (num_bds) { 339 if (list_empty(&p_tx->active_descq)) 340 goto out; 341 342 p_pkt = list_first_entry(&p_tx->active_descq, 343 struct qed_ll2_tx_packet, list_entry); 344 if (!p_pkt) 345 goto out; 346 347 p_tx->b_completing_packet = true; 348 p_tx->cur_completing_packet = *p_pkt; 349 num_bds_in_packet = p_pkt->bd_used; 350 list_del(&p_pkt->list_entry); 351 352 if (num_bds < num_bds_in_packet) { 353 DP_NOTICE(p_hwfn, 354 "Rest of BDs does not cover whole packet\n"); 355 goto out; 356 } 357 358 num_bds -= num_bds_in_packet; 359 p_tx->bds_idx += num_bds_in_packet; 360 while (num_bds_in_packet--) 361 qed_chain_consume(&p_tx->txq_chain); 362 363 p_tx->cur_completing_bd_idx = 1; 364 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; 365 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 366 367 spin_unlock_irqrestore(&p_tx->lock, flags); 368 369 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, 370 p_ll2_conn->my_id, 371 p_pkt->cookie, 372 p_pkt->bds_set[0].tx_frag, 373 b_last_frag, !num_bds); 374 375 spin_lock_irqsave(&p_tx->lock, flags); 376 } 377 378 p_tx->b_completing_packet = false; 379 rc = 0; 380 out: 381 spin_unlock_irqrestore(&p_tx->lock, flags); 382 return rc; 383 } 384 385 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, 386 union core_rx_cqe_union *p_cqe, 387 struct qed_ll2_comp_rx_data *data) 388 { 389 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); 390 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); 391 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); 392 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); 393 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); 394 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; 395 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); 396 397 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); 398 } 399 400 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, 401 union core_rx_cqe_union *p_cqe, 402 struct qed_ll2_comp_rx_data *data) 403 { 404 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); 405 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags); 406 data->length.packet_length = 407 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); 408 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); 409 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); 410 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); 411 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; 412 } 413 414 static int 415 qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, 416 struct qed_ll2_info *p_ll2_conn, 417 union core_rx_cqe_union *p_cqe, 418 unsigned long *p_lock_flags) 419 { 420 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 421 struct core_rx_slow_path_cqe *sp_cqe; 422 423 sp_cqe = &p_cqe->rx_cqe_sp; 424 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { 425 DP_NOTICE(p_hwfn, 426 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", 427 sp_cqe->ramrod_cmd_id); 428 return -EINVAL; 429 } 430 431 if (!p_ll2_conn->cbs.slowpath_cb) { 432 DP_NOTICE(p_hwfn, 433 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); 434 return -EINVAL; 435 } 436 437 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); 438 439 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, 440 p_ll2_conn->my_id, 441 le32_to_cpu(sp_cqe->opaque_data.data[0]), 442 le32_to_cpu(sp_cqe->opaque_data.data[1])); 443 444 spin_lock_irqsave(&p_rx->lock, *p_lock_flags); 445 446 return 0; 447 } 448 449 static int 450 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, 451 struct qed_ll2_info *p_ll2_conn, 452 union core_rx_cqe_union *p_cqe, 453 unsigned long *p_lock_flags, bool b_last_cqe) 454 { 455 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 456 struct qed_ll2_rx_packet *p_pkt = NULL; 457 struct qed_ll2_comp_rx_data data; 458 459 if (!list_empty(&p_rx->active_descq)) 460 p_pkt = list_first_entry(&p_rx->active_descq, 461 struct qed_ll2_rx_packet, list_entry); 462 if (!p_pkt) { 463 DP_NOTICE(p_hwfn, 464 "[%d] LL2 Rx completion but active_descq is empty\n", 465 p_ll2_conn->input.conn_type); 466 467 return -EIO; 468 } 469 list_del(&p_pkt->list_entry); 470 471 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) 472 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); 473 else 474 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); 475 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) 476 DP_NOTICE(p_hwfn, 477 "Mismatch between active_descq and the LL2 Rx chain\n"); 478 479 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); 480 481 data.connection_handle = p_ll2_conn->my_id; 482 data.cookie = p_pkt->cookie; 483 data.rx_buf_addr = p_pkt->rx_buf_addr; 484 data.b_last_packet = b_last_cqe; 485 486 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); 487 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); 488 489 spin_lock_irqsave(&p_rx->lock, *p_lock_flags); 490 491 return 0; 492 } 493 494 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) 495 { 496 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; 497 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 498 union core_rx_cqe_union *cqe = NULL; 499 u16 cq_new_idx = 0, cq_old_idx = 0; 500 unsigned long flags = 0; 501 int rc = 0; 502 503 spin_lock_irqsave(&p_rx->lock, flags); 504 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); 505 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 506 507 while (cq_new_idx != cq_old_idx) { 508 bool b_last_cqe = (cq_new_idx == cq_old_idx); 509 510 cqe = 511 (union core_rx_cqe_union *) 512 qed_chain_consume(&p_rx->rcq_chain); 513 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 514 515 DP_VERBOSE(p_hwfn, 516 QED_MSG_LL2, 517 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n", 518 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); 519 520 switch (cqe->rx_cqe_sp.type) { 521 case CORE_RX_CQE_TYPE_SLOW_PATH: 522 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, 523 cqe, &flags); 524 break; 525 case CORE_RX_CQE_TYPE_GSI_OFFLOAD: 526 case CORE_RX_CQE_TYPE_REGULAR: 527 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, 528 cqe, &flags, 529 b_last_cqe); 530 break; 531 default: 532 rc = -EIO; 533 } 534 } 535 536 spin_unlock_irqrestore(&p_rx->lock, flags); 537 return rc; 538 } 539 540 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) 541 { 542 struct qed_ll2_info *p_ll2_conn = NULL; 543 struct qed_ll2_rx_packet *p_pkt = NULL; 544 struct qed_ll2_rx_queue *p_rx; 545 unsigned long flags = 0; 546 547 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 548 if (!p_ll2_conn) 549 return; 550 551 p_rx = &p_ll2_conn->rx_queue; 552 553 spin_lock_irqsave(&p_rx->lock, flags); 554 while (!list_empty(&p_rx->active_descq)) { 555 p_pkt = list_first_entry(&p_rx->active_descq, 556 struct qed_ll2_rx_packet, list_entry); 557 if (!p_pkt) 558 break; 559 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 560 spin_unlock_irqrestore(&p_rx->lock, flags); 561 562 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 563 struct qed_ooo_buffer *p_buffer; 564 565 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 566 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, 567 p_buffer); 568 } else { 569 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; 570 void *cookie = p_pkt->cookie; 571 bool b_last; 572 573 b_last = list_empty(&p_rx->active_descq); 574 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, 575 p_ll2_conn->my_id, 576 cookie, 577 rx_buf_addr, b_last); 578 } 579 spin_lock_irqsave(&p_rx->lock, flags); 580 } 581 spin_unlock_irqrestore(&p_rx->lock, flags); 582 } 583 584 static bool 585 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, 586 struct core_rx_slow_path_cqe *p_cqe) 587 { 588 struct ooo_opaque *iscsi_ooo; 589 u32 cid; 590 591 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) 592 return false; 593 594 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; 595 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) 596 return false; 597 598 /* Need to make a flush */ 599 cid = le32_to_cpu(iscsi_ooo->cid); 600 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); 601 602 return true; 603 } 604 605 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, 606 struct qed_ll2_info *p_ll2_conn) 607 { 608 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 609 u16 packet_length = 0, parse_flags = 0, vlan = 0; 610 struct qed_ll2_rx_packet *p_pkt = NULL; 611 u32 num_ooo_add_to_peninsula = 0, cid; 612 union core_rx_cqe_union *cqe = NULL; 613 u16 cq_new_idx = 0, cq_old_idx = 0; 614 struct qed_ooo_buffer *p_buffer; 615 struct ooo_opaque *iscsi_ooo; 616 u8 placement_offset = 0; 617 u8 cqe_type; 618 619 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); 620 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 621 if (cq_new_idx == cq_old_idx) 622 return 0; 623 624 while (cq_new_idx != cq_old_idx) { 625 struct core_rx_fast_path_cqe *p_cqe_fp; 626 627 cqe = qed_chain_consume(&p_rx->rcq_chain); 628 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 629 cqe_type = cqe->rx_cqe_sp.type; 630 631 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) 632 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, 633 &cqe->rx_cqe_sp)) 634 continue; 635 636 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { 637 DP_NOTICE(p_hwfn, 638 "Got a non-regular LB LL2 completion [type 0x%02x]\n", 639 cqe_type); 640 return -EINVAL; 641 } 642 p_cqe_fp = &cqe->rx_cqe_fp; 643 644 placement_offset = p_cqe_fp->placement_offset; 645 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); 646 packet_length = le16_to_cpu(p_cqe_fp->packet_length); 647 vlan = le16_to_cpu(p_cqe_fp->vlan); 648 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; 649 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, 650 iscsi_ooo); 651 cid = le32_to_cpu(iscsi_ooo->cid); 652 653 /* Process delete isle first */ 654 if (iscsi_ooo->drop_size) 655 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, 656 iscsi_ooo->drop_isle, 657 iscsi_ooo->drop_size); 658 659 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) 660 continue; 661 662 /* Now process create/add/join isles */ 663 if (list_empty(&p_rx->active_descq)) { 664 DP_NOTICE(p_hwfn, 665 "LL2 OOO RX chain has no submitted buffers\n" 666 ); 667 return -EIO; 668 } 669 670 p_pkt = list_first_entry(&p_rx->active_descq, 671 struct qed_ll2_rx_packet, list_entry); 672 673 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || 674 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || 675 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || 676 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || 677 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { 678 if (!p_pkt) { 679 DP_NOTICE(p_hwfn, 680 "LL2 OOO RX packet is not valid\n"); 681 return -EIO; 682 } 683 list_del(&p_pkt->list_entry); 684 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 685 p_buffer->packet_length = packet_length; 686 p_buffer->parse_flags = parse_flags; 687 p_buffer->vlan = vlan; 688 p_buffer->placement_offset = placement_offset; 689 qed_chain_consume(&p_rx->rxq_chain); 690 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); 691 692 switch (iscsi_ooo->ooo_opcode) { 693 case TCP_EVENT_ADD_NEW_ISLE: 694 qed_ooo_add_new_isle(p_hwfn, 695 p_hwfn->p_ooo_info, 696 cid, 697 iscsi_ooo->ooo_isle, 698 p_buffer); 699 break; 700 case TCP_EVENT_ADD_ISLE_RIGHT: 701 qed_ooo_add_new_buffer(p_hwfn, 702 p_hwfn->p_ooo_info, 703 cid, 704 iscsi_ooo->ooo_isle, 705 p_buffer, 706 QED_OOO_RIGHT_BUF); 707 break; 708 case TCP_EVENT_ADD_ISLE_LEFT: 709 qed_ooo_add_new_buffer(p_hwfn, 710 p_hwfn->p_ooo_info, 711 cid, 712 iscsi_ooo->ooo_isle, 713 p_buffer, 714 QED_OOO_LEFT_BUF); 715 break; 716 case TCP_EVENT_JOIN: 717 qed_ooo_add_new_buffer(p_hwfn, 718 p_hwfn->p_ooo_info, 719 cid, 720 iscsi_ooo->ooo_isle + 721 1, 722 p_buffer, 723 QED_OOO_LEFT_BUF); 724 qed_ooo_join_isles(p_hwfn, 725 p_hwfn->p_ooo_info, 726 cid, iscsi_ooo->ooo_isle); 727 break; 728 case TCP_EVENT_ADD_PEN: 729 num_ooo_add_to_peninsula++; 730 qed_ooo_put_ready_buffer(p_hwfn, 731 p_hwfn->p_ooo_info, 732 p_buffer, true); 733 break; 734 } 735 } else { 736 DP_NOTICE(p_hwfn, 737 "Unexpected event (%d) TX OOO completion\n", 738 iscsi_ooo->ooo_opcode); 739 } 740 } 741 742 return 0; 743 } 744 745 static void 746 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, 747 struct qed_ll2_info *p_ll2_conn) 748 { 749 struct qed_ll2_tx_pkt_info tx_pkt; 750 struct qed_ooo_buffer *p_buffer; 751 u16 l4_hdr_offset_w; 752 dma_addr_t first_frag; 753 u8 bd_flags; 754 int rc; 755 756 /* Submit Tx buffers here */ 757 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, 758 p_hwfn->p_ooo_info))) { 759 l4_hdr_offset_w = 0; 760 bd_flags = 0; 761 762 first_frag = p_buffer->rx_buffer_phys_addr + 763 p_buffer->placement_offset; 764 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); 765 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); 766 767 memset(&tx_pkt, 0, sizeof(tx_pkt)); 768 tx_pkt.num_of_bds = 1; 769 tx_pkt.vlan = p_buffer->vlan; 770 tx_pkt.bd_flags = bd_flags; 771 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; 772 switch (p_ll2_conn->tx_dest) { 773 case CORE_TX_DEST_NW: 774 tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; 775 break; 776 case CORE_TX_DEST_LB: 777 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; 778 break; 779 case CORE_TX_DEST_DROP: 780 default: 781 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; 782 break; 783 } 784 tx_pkt.first_frag = first_frag; 785 tx_pkt.first_frag_len = p_buffer->packet_length; 786 tx_pkt.cookie = p_buffer; 787 788 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 789 &tx_pkt, true); 790 if (rc) { 791 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, 792 p_buffer, false); 793 break; 794 } 795 } 796 } 797 798 static void 799 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, 800 struct qed_ll2_info *p_ll2_conn) 801 { 802 struct qed_ooo_buffer *p_buffer; 803 int rc; 804 805 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, 806 p_hwfn->p_ooo_info))) { 807 rc = qed_ll2_post_rx_buffer(p_hwfn, 808 p_ll2_conn->my_id, 809 p_buffer->rx_buffer_phys_addr, 810 0, p_buffer, true); 811 if (rc) { 812 qed_ooo_put_free_buffer(p_hwfn, 813 p_hwfn->p_ooo_info, p_buffer); 814 break; 815 } 816 } 817 } 818 819 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 820 { 821 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 822 int rc; 823 824 if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) 825 return 0; 826 827 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); 828 if (rc) 829 return rc; 830 831 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); 832 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); 833 834 return 0; 835 } 836 837 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 838 { 839 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 840 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 841 struct qed_ll2_tx_packet *p_pkt = NULL; 842 struct qed_ooo_buffer *p_buffer; 843 bool b_dont_submit_rx = false; 844 u16 new_idx = 0, num_bds = 0; 845 int rc; 846 847 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 848 return 0; 849 850 new_idx = le16_to_cpu(*p_tx->p_fw_cons); 851 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 852 853 if (!num_bds) 854 return 0; 855 856 while (num_bds) { 857 if (list_empty(&p_tx->active_descq)) 858 return -EINVAL; 859 860 p_pkt = list_first_entry(&p_tx->active_descq, 861 struct qed_ll2_tx_packet, list_entry); 862 if (!p_pkt) 863 return -EINVAL; 864 865 if (p_pkt->bd_used != 1) { 866 DP_NOTICE(p_hwfn, 867 "Unexpectedly many BDs(%d) in TX OOO completion\n", 868 p_pkt->bd_used); 869 return -EINVAL; 870 } 871 872 list_del(&p_pkt->list_entry); 873 874 num_bds--; 875 p_tx->bds_idx++; 876 qed_chain_consume(&p_tx->txq_chain); 877 878 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 879 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 880 881 if (b_dont_submit_rx) { 882 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, 883 p_buffer); 884 continue; 885 } 886 887 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, 888 p_buffer->rx_buffer_phys_addr, 0, 889 p_buffer, true); 890 if (rc != 0) { 891 qed_ooo_put_free_buffer(p_hwfn, 892 p_hwfn->p_ooo_info, p_buffer); 893 b_dont_submit_rx = true; 894 } 895 } 896 897 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); 898 899 return 0; 900 } 901 902 static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn) 903 { 904 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 905 906 DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2), 907 "Stopping LL2 OOO queue [%02x]\n", *handle); 908 909 qed_ll2_terminate_connection(p_hwfn, *handle); 910 qed_ll2_release_connection(p_hwfn, *handle); 911 *handle = QED_LL2_UNUSED_HANDLE; 912 } 913 914 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, 915 struct qed_ll2_info *p_ll2_conn, 916 u8 action_on_error) 917 { 918 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; 919 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 920 struct core_rx_start_ramrod_data *p_ramrod = NULL; 921 struct qed_spq_entry *p_ent = NULL; 922 struct qed_sp_init_data init_data; 923 u16 cqe_pbl_size; 924 int rc = 0; 925 926 /* Get SPQ entry */ 927 memset(&init_data, 0, sizeof(init_data)); 928 init_data.cid = p_ll2_conn->cid; 929 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 930 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 931 932 rc = qed_sp_init_request(p_hwfn, &p_ent, 933 CORE_RAMROD_RX_QUEUE_START, 934 PROTOCOLID_CORE, &init_data); 935 if (rc) 936 return rc; 937 938 p_ramrod = &p_ent->ramrod.core_rx_queue_start; 939 memset(p_ramrod, 0, sizeof(*p_ramrod)); 940 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); 941 p_ramrod->sb_index = p_rx->rx_sb_index; 942 p_ramrod->complete_event_flg = 1; 943 944 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); 945 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); 946 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); 947 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 948 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, 949 qed_chain_get_pbl_phys(&p_rx->rcq_chain)); 950 951 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; 952 p_ramrod->inner_vlan_stripping_en = 953 p_ll2_conn->input.rx_vlan_removal_en; 954 955 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && 956 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) 957 p_ramrod->report_outer_vlan = 1; 958 p_ramrod->queue_id = p_ll2_conn->queue_id; 959 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; 960 961 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && 962 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && 963 conn_type != QED_LL2_TYPE_IWARP && 964 (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) { 965 p_ramrod->mf_si_bcast_accept_all = 1; 966 p_ramrod->mf_si_mcast_accept_all = 1; 967 } else { 968 p_ramrod->mf_si_bcast_accept_all = 0; 969 p_ramrod->mf_si_mcast_accept_all = 0; 970 } 971 972 p_ramrod->action_on_error.error_type = action_on_error; 973 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; 974 p_ramrod->zero_prod_flg = 1; 975 976 return qed_spq_post(p_hwfn, p_ent, NULL); 977 } 978 979 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, 980 struct qed_ll2_info *p_ll2_conn) 981 { 982 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; 983 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 984 struct core_tx_start_ramrod_data *p_ramrod = NULL; 985 struct qed_spq_entry *p_ent = NULL; 986 struct qed_sp_init_data init_data; 987 u16 pq_id = 0, pbl_size; 988 int rc = -EINVAL; 989 990 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 991 return 0; 992 993 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) 994 p_ll2_conn->tx_stats_en = 0; 995 else 996 p_ll2_conn->tx_stats_en = 1; 997 998 /* Get SPQ entry */ 999 memset(&init_data, 0, sizeof(init_data)); 1000 init_data.cid = p_ll2_conn->cid; 1001 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1002 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1003 1004 rc = qed_sp_init_request(p_hwfn, &p_ent, 1005 CORE_RAMROD_TX_QUEUE_START, 1006 PROTOCOLID_CORE, &init_data); 1007 if (rc) 1008 return rc; 1009 1010 p_ramrod = &p_ent->ramrod.core_tx_queue_start; 1011 1012 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); 1013 p_ramrod->sb_index = p_tx->tx_sb_index; 1014 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); 1015 p_ramrod->stats_en = p_ll2_conn->tx_stats_en; 1016 p_ramrod->stats_id = p_ll2_conn->tx_stats_id; 1017 1018 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, 1019 qed_chain_get_pbl_phys(&p_tx->txq_chain)); 1020 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); 1021 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1022 1023 switch (p_ll2_conn->input.tx_tc) { 1024 case PURE_LB_TC: 1025 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); 1026 break; 1027 case PKT_LB_TC: 1028 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); 1029 break; 1030 default: 1031 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 1032 break; 1033 } 1034 1035 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1036 1037 switch (conn_type) { 1038 case QED_LL2_TYPE_FCOE: 1039 p_ramrod->conn_type = PROTOCOLID_FCOE; 1040 break; 1041 case QED_LL2_TYPE_TCP_ULP: 1042 p_ramrod->conn_type = PROTOCOLID_TCP_ULP; 1043 break; 1044 case QED_LL2_TYPE_ROCE: 1045 p_ramrod->conn_type = PROTOCOLID_ROCE; 1046 break; 1047 case QED_LL2_TYPE_IWARP: 1048 p_ramrod->conn_type = PROTOCOLID_IWARP; 1049 break; 1050 case QED_LL2_TYPE_OOO: 1051 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || 1052 p_hwfn->hw_info.personality == QED_PCI_NVMETCP) 1053 p_ramrod->conn_type = PROTOCOLID_TCP_ULP; 1054 else 1055 p_ramrod->conn_type = PROTOCOLID_IWARP; 1056 break; 1057 default: 1058 p_ramrod->conn_type = PROTOCOLID_ETH; 1059 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); 1060 } 1061 1062 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; 1063 1064 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1065 if (rc) 1066 return rc; 1067 1068 rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, 1069 &p_tx->db_msg, DB_REC_WIDTH_32B, 1070 DB_REC_KERNEL); 1071 return rc; 1072 } 1073 1074 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, 1075 struct qed_ll2_info *p_ll2_conn) 1076 { 1077 struct core_rx_stop_ramrod_data *p_ramrod = NULL; 1078 struct qed_spq_entry *p_ent = NULL; 1079 struct qed_sp_init_data init_data; 1080 int rc = -EINVAL; 1081 1082 /* Get SPQ entry */ 1083 memset(&init_data, 0, sizeof(init_data)); 1084 init_data.cid = p_ll2_conn->cid; 1085 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1086 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1087 1088 rc = qed_sp_init_request(p_hwfn, &p_ent, 1089 CORE_RAMROD_RX_QUEUE_STOP, 1090 PROTOCOLID_CORE, &init_data); 1091 if (rc) 1092 return rc; 1093 1094 p_ramrod = &p_ent->ramrod.core_rx_queue_stop; 1095 1096 p_ramrod->complete_event_flg = 1; 1097 p_ramrod->queue_id = p_ll2_conn->queue_id; 1098 1099 return qed_spq_post(p_hwfn, p_ent, NULL); 1100 } 1101 1102 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, 1103 struct qed_ll2_info *p_ll2_conn) 1104 { 1105 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1106 struct qed_spq_entry *p_ent = NULL; 1107 struct qed_sp_init_data init_data; 1108 int rc = -EINVAL; 1109 qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); 1110 1111 /* Get SPQ entry */ 1112 memset(&init_data, 0, sizeof(init_data)); 1113 init_data.cid = p_ll2_conn->cid; 1114 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1115 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1116 1117 rc = qed_sp_init_request(p_hwfn, &p_ent, 1118 CORE_RAMROD_TX_QUEUE_STOP, 1119 PROTOCOLID_CORE, &init_data); 1120 if (rc) 1121 return rc; 1122 1123 return qed_spq_post(p_hwfn, p_ent, NULL); 1124 } 1125 1126 static int 1127 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, 1128 struct qed_ll2_info *p_ll2_info) 1129 { 1130 struct qed_chain_init_params params = { 1131 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1132 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 1133 .num_elems = p_ll2_info->input.rx_num_desc, 1134 }; 1135 struct qed_dev *cdev = p_hwfn->cdev; 1136 struct qed_ll2_rx_packet *p_descq; 1137 u32 capacity; 1138 int rc = 0; 1139 1140 if (!p_ll2_info->input.rx_num_desc) 1141 goto out; 1142 1143 params.mode = QED_CHAIN_MODE_NEXT_PTR; 1144 params.elem_size = sizeof(struct core_rx_bd); 1145 1146 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); 1147 if (rc) { 1148 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); 1149 goto out; 1150 } 1151 1152 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); 1153 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet), 1154 GFP_KERNEL); 1155 if (!p_descq) { 1156 rc = -ENOMEM; 1157 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n"); 1158 goto out; 1159 } 1160 p_ll2_info->rx_queue.descq_array = p_descq; 1161 1162 params.mode = QED_CHAIN_MODE_PBL; 1163 params.elem_size = sizeof(struct core_rx_fast_path_cqe); 1164 1165 rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); 1166 if (rc) { 1167 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); 1168 goto out; 1169 } 1170 1171 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1172 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", 1173 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); 1174 1175 out: 1176 return rc; 1177 } 1178 1179 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, 1180 struct qed_ll2_info *p_ll2_info) 1181 { 1182 struct qed_chain_init_params params = { 1183 .mode = QED_CHAIN_MODE_PBL, 1184 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1185 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 1186 .num_elems = p_ll2_info->input.tx_num_desc, 1187 .elem_size = sizeof(struct core_tx_bd), 1188 }; 1189 struct qed_ll2_tx_packet *p_descq; 1190 size_t desc_size; 1191 u32 capacity; 1192 int rc = 0; 1193 1194 if (!p_ll2_info->input.tx_num_desc) 1195 goto out; 1196 1197 rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, 1198 ¶ms); 1199 if (rc) 1200 goto out; 1201 1202 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); 1203 /* All bds_set elements are flexibily added. */ 1204 desc_size = struct_size(p_descq, bds_set, 1205 p_ll2_info->input.tx_max_bds_per_packet); 1206 1207 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL); 1208 if (!p_descq) { 1209 rc = -ENOMEM; 1210 goto out; 1211 } 1212 p_ll2_info->tx_queue.descq_mem = p_descq; 1213 1214 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1215 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", 1216 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); 1217 1218 out: 1219 if (rc) 1220 DP_NOTICE(p_hwfn, 1221 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", 1222 p_ll2_info->input.tx_num_desc); 1223 return rc; 1224 } 1225 1226 static int 1227 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, 1228 struct qed_ll2_info *p_ll2_info, u16 mtu) 1229 { 1230 struct qed_ooo_buffer *p_buf = NULL; 1231 void *p_virt; 1232 u16 buf_idx; 1233 int rc = 0; 1234 1235 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) 1236 return rc; 1237 1238 /* Correct number of requested OOO buffers if needed */ 1239 if (!p_ll2_info->input.rx_num_ooo_buffers) { 1240 u16 num_desc = p_ll2_info->input.rx_num_desc; 1241 1242 if (!num_desc) 1243 return -EINVAL; 1244 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; 1245 } 1246 1247 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; 1248 buf_idx++) { 1249 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); 1250 if (!p_buf) { 1251 rc = -ENOMEM; 1252 goto out; 1253 } 1254 1255 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; 1256 p_buf->rx_buffer_size = (p_buf->rx_buffer_size + 1257 ETH_CACHE_LINE_SIZE - 1) & 1258 ~(ETH_CACHE_LINE_SIZE - 1); 1259 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1260 p_buf->rx_buffer_size, 1261 &p_buf->rx_buffer_phys_addr, 1262 GFP_KERNEL); 1263 if (!p_virt) { 1264 kfree(p_buf); 1265 rc = -ENOMEM; 1266 goto out; 1267 } 1268 1269 p_buf->rx_buffer_virt_addr = p_virt; 1270 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); 1271 } 1272 1273 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1274 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", 1275 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); 1276 1277 out: 1278 return rc; 1279 } 1280 1281 static int 1282 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) 1283 { 1284 if (!cbs || (!cbs->rx_comp_cb || 1285 !cbs->rx_release_cb || 1286 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) 1287 return -EINVAL; 1288 1289 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; 1290 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; 1291 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; 1292 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; 1293 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; 1294 p_ll2_info->cbs.cookie = cbs->cookie; 1295 1296 return 0; 1297 } 1298 1299 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn, 1300 struct qed_ll2_acquire_data *data, 1301 u8 *start_idx, u8 *last_idx) 1302 { 1303 /* LL2 queues handles will be split as follows: 1304 * First will be the legacy queues, and then the ctx based. 1305 */ 1306 if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { 1307 *start_idx = QED_LL2_LEGACY_CONN_BASE_PF; 1308 *last_idx = *start_idx + 1309 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF; 1310 } else { 1311 /* QED_LL2_RX_TYPE_CTX */ 1312 *start_idx = QED_LL2_CTX_CONN_BASE_PF; 1313 *last_idx = *start_idx + 1314 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF; 1315 } 1316 } 1317 1318 static enum core_error_handle 1319 qed_ll2_get_error_choice(enum qed_ll2_error_handle err) 1320 { 1321 switch (err) { 1322 case QED_LL2_DROP_PACKET: 1323 return LL2_DROP_PACKET; 1324 case QED_LL2_DO_NOTHING: 1325 return LL2_DO_NOTHING; 1326 case QED_LL2_ASSERT: 1327 return LL2_ASSERT; 1328 default: 1329 return LL2_DO_NOTHING; 1330 } 1331 } 1332 1333 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) 1334 { 1335 struct qed_hwfn *p_hwfn = cxt; 1336 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; 1337 struct qed_ll2_info *p_ll2_info = NULL; 1338 u8 i, first_idx, last_idx, *p_tx_max; 1339 int rc; 1340 1341 if (!data->p_connection_handle || !p_hwfn->p_ll2_info) 1342 return -EINVAL; 1343 1344 _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx); 1345 1346 /* Find a free connection to be used */ 1347 for (i = first_idx; i < last_idx; i++) { 1348 mutex_lock(&p_hwfn->p_ll2_info[i].mutex); 1349 if (p_hwfn->p_ll2_info[i].b_active) { 1350 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); 1351 continue; 1352 } 1353 1354 p_hwfn->p_ll2_info[i].b_active = true; 1355 p_ll2_info = &p_hwfn->p_ll2_info[i]; 1356 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); 1357 break; 1358 } 1359 if (!p_ll2_info) 1360 return -EBUSY; 1361 1362 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); 1363 1364 switch (data->input.tx_dest) { 1365 case QED_LL2_TX_DEST_NW: 1366 p_ll2_info->tx_dest = CORE_TX_DEST_NW; 1367 break; 1368 case QED_LL2_TX_DEST_LB: 1369 p_ll2_info->tx_dest = CORE_TX_DEST_LB; 1370 break; 1371 case QED_LL2_TX_DEST_DROP: 1372 p_ll2_info->tx_dest = CORE_TX_DEST_DROP; 1373 break; 1374 default: 1375 return -EINVAL; 1376 } 1377 1378 if (data->input.conn_type == QED_LL2_TYPE_OOO || 1379 data->input.secondary_queue) 1380 p_ll2_info->main_func_queue = false; 1381 else 1382 p_ll2_info->main_func_queue = true; 1383 1384 /* Correct maximum number of Tx BDs */ 1385 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; 1386 if (*p_tx_max == 0) 1387 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; 1388 else 1389 *p_tx_max = min_t(u8, *p_tx_max, 1390 CORE_LL2_TX_MAX_BDS_PER_PACKET); 1391 1392 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs); 1393 if (rc) { 1394 DP_NOTICE(p_hwfn, "Invalid callback functions\n"); 1395 goto q_allocate_fail; 1396 } 1397 1398 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); 1399 if (rc) 1400 goto q_allocate_fail; 1401 1402 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); 1403 if (rc) 1404 goto q_allocate_fail; 1405 1406 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, 1407 data->input.mtu); 1408 if (rc) 1409 goto q_allocate_fail; 1410 1411 /* Register callbacks for the Rx/Tx queues */ 1412 if (data->input.conn_type == QED_LL2_TYPE_OOO) { 1413 comp_rx_cb = qed_ll2_lb_rxq_completion; 1414 comp_tx_cb = qed_ll2_lb_txq_completion; 1415 } else { 1416 comp_rx_cb = qed_ll2_rxq_completion; 1417 comp_tx_cb = qed_ll2_txq_completion; 1418 } 1419 1420 if (data->input.rx_num_desc) { 1421 qed_int_register_cb(p_hwfn, comp_rx_cb, 1422 &p_hwfn->p_ll2_info[i], 1423 &p_ll2_info->rx_queue.rx_sb_index, 1424 &p_ll2_info->rx_queue.p_fw_cons); 1425 p_ll2_info->rx_queue.b_cb_registered = true; 1426 } 1427 1428 if (data->input.tx_num_desc) { 1429 qed_int_register_cb(p_hwfn, 1430 comp_tx_cb, 1431 &p_hwfn->p_ll2_info[i], 1432 &p_ll2_info->tx_queue.tx_sb_index, 1433 &p_ll2_info->tx_queue.p_fw_cons); 1434 p_ll2_info->tx_queue.b_cb_registered = true; 1435 } 1436 1437 *data->p_connection_handle = i; 1438 return rc; 1439 1440 q_allocate_fail: 1441 qed_ll2_release_connection(p_hwfn, i); 1442 return -ENOMEM; 1443 } 1444 1445 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, 1446 struct qed_ll2_info *p_ll2_conn) 1447 { 1448 enum qed_ll2_error_handle error_input; 1449 enum core_error_handle error_mode; 1450 u8 action_on_error = 0; 1451 int rc; 1452 1453 if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) 1454 return 0; 1455 1456 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); 1457 error_input = p_ll2_conn->input.ai_err_packet_too_big; 1458 error_mode = qed_ll2_get_error_choice(error_input); 1459 SET_FIELD(action_on_error, 1460 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); 1461 error_input = p_ll2_conn->input.ai_err_no_buf; 1462 error_mode = qed_ll2_get_error_choice(error_input); 1463 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); 1464 1465 rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); 1466 if (rc) 1467 return rc; 1468 1469 if (p_ll2_conn->rx_queue.ctx_based) { 1470 rc = qed_db_recovery_add(p_hwfn->cdev, 1471 p_ll2_conn->rx_queue.set_prod_addr, 1472 &p_ll2_conn->rx_queue.db_data, 1473 DB_REC_WIDTH_64B, DB_REC_KERNEL); 1474 } 1475 1476 return rc; 1477 } 1478 1479 static void 1480 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, 1481 struct qed_ll2_info *p_ll2_conn) 1482 { 1483 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) 1484 return; 1485 1486 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 1487 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); 1488 } 1489 1490 static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, 1491 u8 handle, 1492 u8 ll2_queue_type) 1493 { 1494 u8 qid; 1495 1496 if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) 1497 return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle; 1498 1499 /* QED_LL2_RX_TYPE_CTX 1500 * FW distinguishes between the legacy queues (ram based) and the 1501 * ctx based queues by the queue_id. 1502 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy 1503 * and the queue ids above that are ctx base. 1504 */ 1505 qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] + 1506 MAX_NUM_LL2_RX_RAM_QUEUES; 1507 1508 /* See comment on the acquire connection for how the ll2 1509 * queues handles are divided. 1510 */ 1511 qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF); 1512 1513 return qid; 1514 } 1515 1516 int qed_ll2_establish_connection(void *cxt, u8 connection_handle) 1517 { 1518 struct e4_core_conn_context *p_cxt; 1519 struct qed_ll2_tx_packet *p_pkt; 1520 struct qed_ll2_info *p_ll2_conn; 1521 struct qed_hwfn *p_hwfn = cxt; 1522 struct qed_ll2_rx_queue *p_rx; 1523 struct qed_ll2_tx_queue *p_tx; 1524 struct qed_cxt_info cxt_info; 1525 struct qed_ptt *p_ptt; 1526 int rc = -EINVAL; 1527 u32 i, capacity; 1528 size_t desc_size; 1529 u8 qid; 1530 1531 p_ptt = qed_ptt_acquire(p_hwfn); 1532 if (!p_ptt) 1533 return -EAGAIN; 1534 1535 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); 1536 if (!p_ll2_conn) { 1537 rc = -EINVAL; 1538 goto out; 1539 } 1540 1541 p_rx = &p_ll2_conn->rx_queue; 1542 p_tx = &p_ll2_conn->tx_queue; 1543 1544 qed_chain_reset(&p_rx->rxq_chain); 1545 qed_chain_reset(&p_rx->rcq_chain); 1546 INIT_LIST_HEAD(&p_rx->active_descq); 1547 INIT_LIST_HEAD(&p_rx->free_descq); 1548 INIT_LIST_HEAD(&p_rx->posting_descq); 1549 spin_lock_init(&p_rx->lock); 1550 capacity = qed_chain_get_capacity(&p_rx->rxq_chain); 1551 for (i = 0; i < capacity; i++) 1552 list_add_tail(&p_rx->descq_array[i].list_entry, 1553 &p_rx->free_descq); 1554 *p_rx->p_fw_cons = 0; 1555 1556 qed_chain_reset(&p_tx->txq_chain); 1557 INIT_LIST_HEAD(&p_tx->active_descq); 1558 INIT_LIST_HEAD(&p_tx->free_descq); 1559 INIT_LIST_HEAD(&p_tx->sending_descq); 1560 spin_lock_init(&p_tx->lock); 1561 capacity = qed_chain_get_capacity(&p_tx->txq_chain); 1562 /* All bds_set elements are flexibily added. */ 1563 desc_size = struct_size(p_pkt, bds_set, 1564 p_ll2_conn->input.tx_max_bds_per_packet); 1565 1566 for (i = 0; i < capacity; i++) { 1567 p_pkt = p_tx->descq_mem + desc_size * i; 1568 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 1569 } 1570 p_tx->cur_completing_bd_idx = 0; 1571 p_tx->bds_idx = 0; 1572 p_tx->b_completing_packet = false; 1573 p_tx->cur_send_packet = NULL; 1574 p_tx->cur_send_frag_num = 0; 1575 p_tx->cur_completing_frag_num = 0; 1576 *p_tx->p_fw_cons = 0; 1577 1578 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); 1579 if (rc) 1580 goto out; 1581 cxt_info.iid = p_ll2_conn->cid; 1582 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 1583 if (rc) { 1584 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", 1585 p_ll2_conn->cid); 1586 goto out; 1587 } 1588 1589 p_cxt = cxt_info.p_cxt; 1590 1591 memset(p_cxt, 0, sizeof(*p_cxt)); 1592 1593 qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, 1594 p_ll2_conn->input.rx_conn_type); 1595 p_ll2_conn->queue_id = qid; 1596 p_ll2_conn->tx_stats_id = qid; 1597 1598 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1599 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", 1600 p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); 1601 1602 if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { 1603 p_rx->set_prod_addr = p_hwfn->regview + 1604 GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); 1605 } else { 1606 /* QED_LL2_RX_TYPE_CTX - using doorbell */ 1607 p_rx->ctx_based = 1; 1608 1609 p_rx->set_prod_addr = p_hwfn->doorbells + 1610 p_hwfn->dpi_start_offset + 1611 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE); 1612 1613 /* prepare db data */ 1614 p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid); 1615 SET_FIELD(p_rx->db_data.params, 1616 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET); 1617 SET_FIELD(p_rx->db_data.params, 1618 CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0); 1619 } 1620 1621 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + 1622 qed_db_addr(p_ll2_conn->cid, 1623 DQ_DEMS_LEGACY); 1624 /* prepare db data */ 1625 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); 1626 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); 1627 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, 1628 DQ_XCM_CORE_TX_BD_PROD_CMD); 1629 p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; 1630 1631 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); 1632 if (rc) 1633 goto out; 1634 1635 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); 1636 if (rc) 1637 goto out; 1638 1639 if (!QED_IS_RDMA_PERSONALITY(p_hwfn) && 1640 !QED_IS_NVMETCP_PERSONALITY(p_hwfn)) 1641 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); 1642 1643 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); 1644 1645 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { 1646 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 1647 qed_llh_add_protocol_filter(p_hwfn->cdev, 0, 1648 QED_LLH_FILTER_ETHERTYPE, 1649 ETH_P_FCOE, 0); 1650 qed_llh_add_protocol_filter(p_hwfn->cdev, 0, 1651 QED_LLH_FILTER_ETHERTYPE, 1652 ETH_P_FIP, 0); 1653 } 1654 1655 out: 1656 qed_ptt_release(p_hwfn, p_ptt); 1657 return rc; 1658 } 1659 1660 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, 1661 struct qed_ll2_rx_queue *p_rx, 1662 struct qed_ll2_rx_packet *p_curp) 1663 { 1664 struct qed_ll2_rx_packet *p_posting_packet = NULL; 1665 struct core_ll2_rx_prod rx_prod = { 0, 0 }; 1666 bool b_notify_fw = false; 1667 u16 bd_prod, cq_prod; 1668 1669 /* This handles the flushing of already posted buffers */ 1670 while (!list_empty(&p_rx->posting_descq)) { 1671 p_posting_packet = list_first_entry(&p_rx->posting_descq, 1672 struct qed_ll2_rx_packet, 1673 list_entry); 1674 list_move_tail(&p_posting_packet->list_entry, 1675 &p_rx->active_descq); 1676 b_notify_fw = true; 1677 } 1678 1679 /* This handles the supplied packet [if there is one] */ 1680 if (p_curp) { 1681 list_add_tail(&p_curp->list_entry, &p_rx->active_descq); 1682 b_notify_fw = true; 1683 } 1684 1685 if (!b_notify_fw) 1686 return; 1687 1688 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain); 1689 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); 1690 if (p_rx->ctx_based) { 1691 /* update producer by giving a doorbell */ 1692 p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod); 1693 p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod); 1694 /* Make sure chain element is updated before ringing the 1695 * doorbell 1696 */ 1697 dma_wmb(); 1698 DIRECT_REG_WR64(p_rx->set_prod_addr, 1699 *((u64 *)&p_rx->db_data)); 1700 } else { 1701 rx_prod.bd_prod = cpu_to_le16(bd_prod); 1702 rx_prod.cqe_prod = cpu_to_le16(cq_prod); 1703 1704 /* Make sure chain element is updated before ringing the 1705 * doorbell 1706 */ 1707 dma_wmb(); 1708 1709 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1710 } 1711 } 1712 1713 int qed_ll2_post_rx_buffer(void *cxt, 1714 u8 connection_handle, 1715 dma_addr_t addr, 1716 u16 buf_len, void *cookie, u8 notify_fw) 1717 { 1718 struct qed_hwfn *p_hwfn = cxt; 1719 struct core_rx_bd_with_buff_len *p_curb = NULL; 1720 struct qed_ll2_rx_packet *p_curp = NULL; 1721 struct qed_ll2_info *p_ll2_conn; 1722 struct qed_ll2_rx_queue *p_rx; 1723 unsigned long flags; 1724 void *p_data; 1725 int rc = 0; 1726 1727 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); 1728 if (!p_ll2_conn) 1729 return -EINVAL; 1730 p_rx = &p_ll2_conn->rx_queue; 1731 1732 spin_lock_irqsave(&p_rx->lock, flags); 1733 if (!list_empty(&p_rx->free_descq)) 1734 p_curp = list_first_entry(&p_rx->free_descq, 1735 struct qed_ll2_rx_packet, list_entry); 1736 if (p_curp) { 1737 if (qed_chain_get_elem_left(&p_rx->rxq_chain) && 1738 qed_chain_get_elem_left(&p_rx->rcq_chain)) { 1739 p_data = qed_chain_produce(&p_rx->rxq_chain); 1740 p_curb = (struct core_rx_bd_with_buff_len *)p_data; 1741 qed_chain_produce(&p_rx->rcq_chain); 1742 } 1743 } 1744 1745 /* If we're lacking entires, let's try to flush buffers to FW */ 1746 if (!p_curp || !p_curb) { 1747 rc = -EBUSY; 1748 p_curp = NULL; 1749 goto out_notify; 1750 } 1751 1752 /* We have an Rx packet we can fill */ 1753 DMA_REGPAIR_LE(p_curb->addr, addr); 1754 p_curb->buff_length = cpu_to_le16(buf_len); 1755 p_curp->rx_buf_addr = addr; 1756 p_curp->cookie = cookie; 1757 p_curp->rxq_bd = p_curb; 1758 p_curp->buf_length = buf_len; 1759 list_del(&p_curp->list_entry); 1760 1761 /* Check if we only want to enqueue this packet without informing FW */ 1762 if (!notify_fw) { 1763 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq); 1764 goto out; 1765 } 1766 1767 out_notify: 1768 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); 1769 out: 1770 spin_unlock_irqrestore(&p_rx->lock, flags); 1771 return rc; 1772 } 1773 1774 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, 1775 struct qed_ll2_tx_queue *p_tx, 1776 struct qed_ll2_tx_packet *p_curp, 1777 struct qed_ll2_tx_pkt_info *pkt, 1778 u8 notify_fw) 1779 { 1780 list_del(&p_curp->list_entry); 1781 p_curp->cookie = pkt->cookie; 1782 p_curp->bd_used = pkt->num_of_bds; 1783 p_curp->notify_fw = notify_fw; 1784 p_tx->cur_send_packet = p_curp; 1785 p_tx->cur_send_frag_num = 0; 1786 1787 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; 1788 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; 1789 p_tx->cur_send_frag_num++; 1790 } 1791 1792 static void 1793 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, 1794 struct qed_ll2_info *p_ll2, 1795 struct qed_ll2_tx_packet *p_curp, 1796 struct qed_ll2_tx_pkt_info *pkt) 1797 { 1798 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; 1799 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); 1800 struct core_tx_bd *start_bd = NULL; 1801 enum core_roce_flavor_type roce_flavor; 1802 enum core_tx_dest tx_dest; 1803 u16 bd_data = 0, frag_idx; 1804 u16 bitfield1; 1805 1806 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE 1807 : CORE_RROCE; 1808 1809 switch (pkt->tx_dest) { 1810 case QED_LL2_TX_DEST_NW: 1811 tx_dest = CORE_TX_DEST_NW; 1812 break; 1813 case QED_LL2_TX_DEST_LB: 1814 tx_dest = CORE_TX_DEST_LB; 1815 break; 1816 case QED_LL2_TX_DEST_DROP: 1817 tx_dest = CORE_TX_DEST_DROP; 1818 break; 1819 default: 1820 tx_dest = CORE_TX_DEST_LB; 1821 break; 1822 } 1823 1824 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); 1825 if (QED_IS_IWARP_PERSONALITY(p_hwfn) && 1826 p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { 1827 start_bd->nw_vlan_or_lb_echo = 1828 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); 1829 } else { 1830 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); 1831 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && 1832 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) 1833 pkt->remove_stag = true; 1834 } 1835 1836 bitfield1 = le16_to_cpu(start_bd->bitfield1); 1837 SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); 1838 SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); 1839 start_bd->bitfield1 = cpu_to_le16(bitfield1); 1840 1841 bd_data |= pkt->bd_flags; 1842 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); 1843 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); 1844 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); 1845 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); 1846 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); 1847 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); 1848 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, 1849 !!(pkt->remove_stag)); 1850 1851 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); 1852 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); 1853 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); 1854 1855 DP_VERBOSE(p_hwfn, 1856 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1857 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1858 p_ll2->queue_id, 1859 p_ll2->cid, 1860 p_ll2->input.conn_type, 1861 prod_idx, 1862 pkt->first_frag_len, 1863 pkt->num_of_bds, 1864 le32_to_cpu(start_bd->addr.hi), 1865 le32_to_cpu(start_bd->addr.lo)); 1866 1867 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) 1868 return; 1869 1870 /* Need to provide the packet with additional BDs for frags */ 1871 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; 1872 frag_idx < pkt->num_of_bds; frag_idx++) { 1873 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; 1874 1875 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); 1876 (*p_bd)->bd_data.as_bitfield = 0; 1877 (*p_bd)->bitfield1 = 0; 1878 p_curp->bds_set[frag_idx].tx_frag = 0; 1879 p_curp->bds_set[frag_idx].frag_len = 0; 1880 } 1881 } 1882 1883 /* This should be called while the Txq spinlock is being held */ 1884 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, 1885 struct qed_ll2_info *p_ll2_conn) 1886 { 1887 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; 1888 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1889 struct qed_ll2_tx_packet *p_pkt = NULL; 1890 u16 bd_prod; 1891 1892 /* If there are missing BDs, don't do anything now */ 1893 if (p_ll2_conn->tx_queue.cur_send_frag_num != 1894 p_ll2_conn->tx_queue.cur_send_packet->bd_used) 1895 return; 1896 1897 /* Push the current packet to the list and clean after it */ 1898 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry, 1899 &p_ll2_conn->tx_queue.sending_descq); 1900 p_ll2_conn->tx_queue.cur_send_packet = NULL; 1901 p_ll2_conn->tx_queue.cur_send_frag_num = 0; 1902 1903 /* Notify FW of packet only if requested to */ 1904 if (!b_notify) 1905 return; 1906 1907 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain); 1908 1909 while (!list_empty(&p_tx->sending_descq)) { 1910 p_pkt = list_first_entry(&p_tx->sending_descq, 1911 struct qed_ll2_tx_packet, list_entry); 1912 if (!p_pkt) 1913 break; 1914 1915 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); 1916 } 1917 1918 p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); 1919 1920 /* Make sure the BDs data is updated before ringing the doorbell */ 1921 wmb(); 1922 1923 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); 1924 1925 DP_VERBOSE(p_hwfn, 1926 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1927 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", 1928 p_ll2_conn->queue_id, 1929 p_ll2_conn->cid, 1930 p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); 1931 } 1932 1933 int qed_ll2_prepare_tx_packet(void *cxt, 1934 u8 connection_handle, 1935 struct qed_ll2_tx_pkt_info *pkt, 1936 bool notify_fw) 1937 { 1938 struct qed_hwfn *p_hwfn = cxt; 1939 struct qed_ll2_tx_packet *p_curp = NULL; 1940 struct qed_ll2_info *p_ll2_conn = NULL; 1941 struct qed_ll2_tx_queue *p_tx; 1942 struct qed_chain *p_tx_chain; 1943 unsigned long flags; 1944 int rc = 0; 1945 1946 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); 1947 if (!p_ll2_conn) 1948 return -EINVAL; 1949 p_tx = &p_ll2_conn->tx_queue; 1950 p_tx_chain = &p_tx->txq_chain; 1951 1952 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) 1953 return -EIO; 1954 1955 spin_lock_irqsave(&p_tx->lock, flags); 1956 if (p_tx->cur_send_packet) { 1957 rc = -EEXIST; 1958 goto out; 1959 } 1960 1961 /* Get entry, but only if we have tx elements for it */ 1962 if (!list_empty(&p_tx->free_descq)) 1963 p_curp = list_first_entry(&p_tx->free_descq, 1964 struct qed_ll2_tx_packet, list_entry); 1965 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) 1966 p_curp = NULL; 1967 1968 if (!p_curp) { 1969 rc = -EBUSY; 1970 goto out; 1971 } 1972 1973 /* Prepare packet and BD, and perhaps send a doorbell to FW */ 1974 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); 1975 1976 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt); 1977 1978 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); 1979 1980 out: 1981 spin_unlock_irqrestore(&p_tx->lock, flags); 1982 return rc; 1983 } 1984 1985 int qed_ll2_set_fragment_of_tx_packet(void *cxt, 1986 u8 connection_handle, 1987 dma_addr_t addr, u16 nbytes) 1988 { 1989 struct qed_ll2_tx_packet *p_cur_send_packet = NULL; 1990 struct qed_hwfn *p_hwfn = cxt; 1991 struct qed_ll2_info *p_ll2_conn = NULL; 1992 u16 cur_send_frag_num = 0; 1993 struct core_tx_bd *p_bd; 1994 unsigned long flags; 1995 1996 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); 1997 if (!p_ll2_conn) 1998 return -EINVAL; 1999 2000 if (!p_ll2_conn->tx_queue.cur_send_packet) 2001 return -EINVAL; 2002 2003 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; 2004 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; 2005 2006 if (cur_send_frag_num >= p_cur_send_packet->bd_used) 2007 return -EINVAL; 2008 2009 /* Fill the BD information, and possibly notify FW */ 2010 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; 2011 DMA_REGPAIR_LE(p_bd->addr, addr); 2012 p_bd->nbytes = cpu_to_le16(nbytes); 2013 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; 2014 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; 2015 2016 p_ll2_conn->tx_queue.cur_send_frag_num++; 2017 2018 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags); 2019 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); 2020 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags); 2021 2022 return 0; 2023 } 2024 2025 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) 2026 { 2027 struct qed_hwfn *p_hwfn = cxt; 2028 struct qed_ll2_info *p_ll2_conn = NULL; 2029 int rc = -EINVAL; 2030 struct qed_ptt *p_ptt; 2031 2032 p_ptt = qed_ptt_acquire(p_hwfn); 2033 if (!p_ptt) 2034 return -EAGAIN; 2035 2036 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); 2037 if (!p_ll2_conn) { 2038 rc = -EINVAL; 2039 goto out; 2040 } 2041 2042 /* Stop Tx & Rx of connection, if needed */ 2043 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { 2044 p_ll2_conn->tx_queue.b_cb_registered = false; 2045 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ 2046 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); 2047 if (rc) 2048 goto out; 2049 2050 qed_ll2_txq_flush(p_hwfn, connection_handle); 2051 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); 2052 } 2053 2054 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { 2055 p_ll2_conn->rx_queue.b_cb_registered = false; 2056 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ 2057 2058 if (p_ll2_conn->rx_queue.ctx_based) 2059 qed_db_recovery_del(p_hwfn->cdev, 2060 p_ll2_conn->rx_queue.set_prod_addr, 2061 &p_ll2_conn->rx_queue.db_data); 2062 2063 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); 2064 if (rc) 2065 goto out; 2066 2067 qed_ll2_rxq_flush(p_hwfn, connection_handle); 2068 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); 2069 } 2070 2071 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) 2072 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 2073 2074 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { 2075 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 2076 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, 2077 QED_LLH_FILTER_ETHERTYPE, 2078 ETH_P_FCOE, 0); 2079 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, 2080 QED_LLH_FILTER_ETHERTYPE, 2081 ETH_P_FIP, 0); 2082 } 2083 2084 out: 2085 qed_ptt_release(p_hwfn, p_ptt); 2086 return rc; 2087 } 2088 2089 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, 2090 struct qed_ll2_info *p_ll2_conn) 2091 { 2092 struct qed_ooo_buffer *p_buffer; 2093 2094 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) 2095 return; 2096 2097 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 2098 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, 2099 p_hwfn->p_ooo_info))) { 2100 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2101 p_buffer->rx_buffer_size, 2102 p_buffer->rx_buffer_virt_addr, 2103 p_buffer->rx_buffer_phys_addr); 2104 kfree(p_buffer); 2105 } 2106 } 2107 2108 void qed_ll2_release_connection(void *cxt, u8 connection_handle) 2109 { 2110 struct qed_hwfn *p_hwfn = cxt; 2111 struct qed_ll2_info *p_ll2_conn = NULL; 2112 2113 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); 2114 if (!p_ll2_conn) 2115 return; 2116 2117 kfree(p_ll2_conn->tx_queue.descq_mem); 2118 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); 2119 2120 kfree(p_ll2_conn->rx_queue.descq_array); 2121 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain); 2122 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain); 2123 2124 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); 2125 2126 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); 2127 2128 mutex_lock(&p_ll2_conn->mutex); 2129 p_ll2_conn->b_active = false; 2130 mutex_unlock(&p_ll2_conn->mutex); 2131 } 2132 2133 int qed_ll2_alloc(struct qed_hwfn *p_hwfn) 2134 { 2135 struct qed_ll2_info *p_ll2_connections; 2136 u8 i; 2137 2138 /* Allocate LL2's set struct */ 2139 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS, 2140 sizeof(struct qed_ll2_info), GFP_KERNEL); 2141 if (!p_ll2_connections) { 2142 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); 2143 return -ENOMEM; 2144 } 2145 2146 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) 2147 p_ll2_connections[i].my_id = i; 2148 2149 p_hwfn->p_ll2_info = p_ll2_connections; 2150 return 0; 2151 } 2152 2153 void qed_ll2_setup(struct qed_hwfn *p_hwfn) 2154 { 2155 int i; 2156 2157 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) 2158 mutex_init(&p_hwfn->p_ll2_info[i].mutex); 2159 } 2160 2161 void qed_ll2_free(struct qed_hwfn *p_hwfn) 2162 { 2163 if (!p_hwfn->p_ll2_info) 2164 return; 2165 2166 kfree(p_hwfn->p_ll2_info); 2167 p_hwfn->p_ll2_info = NULL; 2168 } 2169 2170 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn, 2171 struct qed_ptt *p_ptt, 2172 struct qed_ll2_stats *p_stats) 2173 { 2174 struct core_ll2_port_stats port_stats; 2175 2176 memset(&port_stats, 0, sizeof(port_stats)); 2177 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 2178 BAR0_MAP_REG_TSDM_RAM + 2179 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), 2180 sizeof(port_stats)); 2181 2182 p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); 2183 p_stats->gsi_invalid_pkt_length += 2184 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); 2185 p_stats->gsi_unsupported_pkt_typ += 2186 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); 2187 p_stats->gsi_crcchksm_error += 2188 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); 2189 } 2190 2191 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, 2192 struct qed_ptt *p_ptt, 2193 struct qed_ll2_info *p_ll2_conn, 2194 struct qed_ll2_stats *p_stats) 2195 { 2196 struct core_ll2_tstorm_per_queue_stat tstats; 2197 u8 qid = p_ll2_conn->queue_id; 2198 u32 tstats_addr; 2199 2200 memset(&tstats, 0, sizeof(tstats)); 2201 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 2202 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); 2203 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); 2204 2205 p_stats->packet_too_big_discard += 2206 HILO_64_REGPAIR(tstats.packet_too_big_discard); 2207 p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard); 2208 } 2209 2210 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn, 2211 struct qed_ptt *p_ptt, 2212 struct qed_ll2_info *p_ll2_conn, 2213 struct qed_ll2_stats *p_stats) 2214 { 2215 struct core_ll2_ustorm_per_queue_stat ustats; 2216 u8 qid = p_ll2_conn->queue_id; 2217 u32 ustats_addr; 2218 2219 memset(&ustats, 0, sizeof(ustats)); 2220 ustats_addr = BAR0_MAP_REG_USDM_RAM + 2221 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); 2222 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); 2223 2224 p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 2225 p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 2226 p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 2227 p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 2228 p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 2229 p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 2230 } 2231 2232 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, 2233 struct qed_ptt *p_ptt, 2234 struct qed_ll2_info *p_ll2_conn, 2235 struct qed_ll2_stats *p_stats) 2236 { 2237 struct core_ll2_pstorm_per_queue_stat pstats; 2238 u8 stats_id = p_ll2_conn->tx_stats_id; 2239 u32 pstats_addr; 2240 2241 memset(&pstats, 0, sizeof(pstats)); 2242 pstats_addr = BAR0_MAP_REG_PSDM_RAM + 2243 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); 2244 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); 2245 2246 p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); 2247 p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); 2248 p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); 2249 p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); 2250 p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); 2251 p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); 2252 } 2253 2254 static int __qed_ll2_get_stats(void *cxt, u8 connection_handle, 2255 struct qed_ll2_stats *p_stats) 2256 { 2257 struct qed_hwfn *p_hwfn = cxt; 2258 struct qed_ll2_info *p_ll2_conn = NULL; 2259 struct qed_ptt *p_ptt; 2260 2261 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) || 2262 !p_hwfn->p_ll2_info) 2263 return -EINVAL; 2264 2265 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; 2266 2267 p_ptt = qed_ptt_acquire(p_hwfn); 2268 if (!p_ptt) { 2269 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2270 return -EINVAL; 2271 } 2272 2273 if (p_ll2_conn->input.gsi_enable) 2274 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); 2275 2276 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2277 2278 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2279 2280 if (p_ll2_conn->tx_stats_en) 2281 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2282 2283 qed_ptt_release(p_hwfn, p_ptt); 2284 2285 return 0; 2286 } 2287 2288 int qed_ll2_get_stats(void *cxt, 2289 u8 connection_handle, struct qed_ll2_stats *p_stats) 2290 { 2291 memset(p_stats, 0, sizeof(*p_stats)); 2292 return __qed_ll2_get_stats(cxt, connection_handle, p_stats); 2293 } 2294 2295 static void qed_ll2b_release_rx_packet(void *cxt, 2296 u8 connection_handle, 2297 void *cookie, 2298 dma_addr_t rx_buf_addr, 2299 bool b_last_packet) 2300 { 2301 struct qed_hwfn *p_hwfn = cxt; 2302 2303 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie); 2304 } 2305 2306 static void qed_ll2_register_cb_ops(struct qed_dev *cdev, 2307 const struct qed_ll2_cb_ops *ops, 2308 void *cookie) 2309 { 2310 cdev->ll2->cbs = ops; 2311 cdev->ll2->cb_cookie = cookie; 2312 } 2313 2314 static struct qed_ll2_cbs ll2_cbs = { 2315 .rx_comp_cb = &qed_ll2b_complete_rx_packet, 2316 .rx_release_cb = &qed_ll2b_release_rx_packet, 2317 .tx_comp_cb = &qed_ll2b_complete_tx_packet, 2318 .tx_release_cb = &qed_ll2b_complete_tx_packet, 2319 }; 2320 2321 static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn, 2322 struct qed_ll2_acquire_data *data, 2323 struct qed_ll2_params *params, 2324 enum qed_ll2_conn_type conn_type, 2325 u8 *handle, bool lb) 2326 { 2327 memset(data, 0, sizeof(*data)); 2328 2329 data->input.conn_type = conn_type; 2330 data->input.mtu = params->mtu; 2331 data->input.rx_num_desc = QED_LL2_RX_SIZE; 2332 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; 2333 data->input.rx_vlan_removal_en = params->rx_vlan_stripping; 2334 data->input.tx_num_desc = QED_LL2_TX_SIZE; 2335 data->p_connection_handle = handle; 2336 data->cbs = &ll2_cbs; 2337 ll2_cbs.cookie = p_hwfn; 2338 2339 if (lb) { 2340 data->input.tx_tc = PKT_LB_TC; 2341 data->input.tx_dest = QED_LL2_TX_DEST_LB; 2342 } else { 2343 data->input.tx_tc = 0; 2344 data->input.tx_dest = QED_LL2_TX_DEST_NW; 2345 } 2346 } 2347 2348 static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn, 2349 struct qed_ll2_params *params) 2350 { 2351 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 2352 struct qed_ll2_acquire_data data; 2353 int rc; 2354 2355 qed_ll2_set_conn_data(p_hwfn, &data, params, 2356 QED_LL2_TYPE_OOO, handle, true); 2357 2358 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2359 if (rc) { 2360 DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n"); 2361 goto out; 2362 } 2363 2364 rc = qed_ll2_establish_connection(p_hwfn, *handle); 2365 if (rc) { 2366 DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n"); 2367 goto fail; 2368 } 2369 2370 return 0; 2371 2372 fail: 2373 qed_ll2_release_connection(p_hwfn, *handle); 2374 out: 2375 *handle = QED_LL2_UNUSED_HANDLE; 2376 return rc; 2377 } 2378 2379 static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev) 2380 { 2381 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) || 2382 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) || 2383 QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) && 2384 (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev)); 2385 } 2386 2387 static int __qed_ll2_stop(struct qed_hwfn *p_hwfn) 2388 { 2389 struct qed_dev *cdev = p_hwfn->cdev; 2390 int rc; 2391 2392 rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); 2393 if (rc) 2394 DP_INFO(cdev, "Failed to terminate LL2 connection\n"); 2395 2396 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); 2397 2398 return rc; 2399 } 2400 2401 static int qed_ll2_stop(struct qed_dev *cdev) 2402 { 2403 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); 2404 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 2405 int rc = 0, rc2 = 0; 2406 2407 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) 2408 return 0; 2409 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) 2410 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); 2411 2412 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); 2413 eth_zero_addr(cdev->ll2_mac_address); 2414 2415 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) 2416 qed_ll2_stop_ooo(p_hwfn); 2417 2418 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ 2419 if (b_is_storage_eng1) { 2420 rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev)); 2421 if (rc2) 2422 DP_NOTICE(QED_LEADING_HWFN(cdev), 2423 "Failed to stop LL2 on engine 0\n"); 2424 } 2425 2426 rc = __qed_ll2_stop(p_hwfn); 2427 if (rc) 2428 DP_NOTICE(p_hwfn, "Failed to stop LL2\n"); 2429 2430 qed_ll2_kill_buffers(cdev); 2431 2432 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; 2433 2434 return rc | rc2; 2435 } 2436 2437 static int __qed_ll2_start(struct qed_hwfn *p_hwfn, 2438 struct qed_ll2_params *params) 2439 { 2440 struct qed_ll2_buffer *buffer, *tmp_buffer; 2441 struct qed_dev *cdev = p_hwfn->cdev; 2442 enum qed_ll2_conn_type conn_type; 2443 struct qed_ll2_acquire_data data; 2444 int rc, rx_cnt; 2445 2446 switch (p_hwfn->hw_info.personality) { 2447 case QED_PCI_FCOE: 2448 conn_type = QED_LL2_TYPE_FCOE; 2449 break; 2450 case QED_PCI_ISCSI: 2451 case QED_PCI_NVMETCP: 2452 conn_type = QED_LL2_TYPE_TCP_ULP; 2453 break; 2454 case QED_PCI_ETH_ROCE: 2455 conn_type = QED_LL2_TYPE_ROCE; 2456 break; 2457 default: 2458 2459 conn_type = QED_LL2_TYPE_TEST; 2460 } 2461 2462 qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type, 2463 &cdev->ll2->handle, false); 2464 2465 rc = qed_ll2_acquire_connection(p_hwfn, &data); 2466 if (rc) { 2467 DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n"); 2468 return rc; 2469 } 2470 2471 rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle); 2472 if (rc) { 2473 DP_INFO(p_hwfn, "Failed to establish LL2 connection\n"); 2474 goto release_conn; 2475 } 2476 2477 /* Post all Rx buffers to FW */ 2478 spin_lock_bh(&cdev->ll2->lock); 2479 rx_cnt = cdev->ll2->rx_cnt; 2480 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) { 2481 rc = qed_ll2_post_rx_buffer(p_hwfn, 2482 cdev->ll2->handle, 2483 buffer->phys_addr, 0, buffer, 1); 2484 if (rc) { 2485 DP_INFO(p_hwfn, 2486 "Failed to post an Rx buffer; Deleting it\n"); 2487 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, 2488 cdev->ll2->rx_size, DMA_FROM_DEVICE); 2489 kfree(buffer->data); 2490 list_del(&buffer->list); 2491 kfree(buffer); 2492 } else { 2493 rx_cnt++; 2494 } 2495 } 2496 spin_unlock_bh(&cdev->ll2->lock); 2497 2498 if (rx_cnt == cdev->ll2->rx_cnt) { 2499 DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n"); 2500 goto terminate_conn; 2501 } 2502 cdev->ll2->rx_cnt = rx_cnt; 2503 2504 return 0; 2505 2506 terminate_conn: 2507 qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); 2508 release_conn: 2509 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); 2510 return rc; 2511 } 2512 2513 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) 2514 { 2515 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); 2516 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 2517 struct qed_ll2_buffer *buffer; 2518 int rx_num_desc, i, rc; 2519 2520 if (!is_valid_ether_addr(params->ll2_mac_address)) { 2521 DP_NOTICE(cdev, "Invalid Ethernet address\n"); 2522 return -EINVAL; 2523 } 2524 2525 WARN_ON(!cdev->ll2->cbs); 2526 2527 /* Initialize LL2 locks & lists */ 2528 INIT_LIST_HEAD(&cdev->ll2->list); 2529 spin_lock_init(&cdev->ll2->lock); 2530 2531 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + 2532 L1_CACHE_BYTES + params->mtu; 2533 2534 /* Allocate memory for LL2. 2535 * In CMT mode, in case of a storage PF which is affintized to engine 1, 2536 * LL2 is started also on engine 0 and thus we need twofold buffers. 2537 */ 2538 rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1); 2539 DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n", 2540 rx_num_desc, cdev->ll2->rx_size); 2541 for (i = 0; i < rx_num_desc; i++) { 2542 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 2543 if (!buffer) { 2544 DP_INFO(cdev, "Failed to allocate LL2 buffers\n"); 2545 rc = -ENOMEM; 2546 goto err0; 2547 } 2548 2549 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data, 2550 &buffer->phys_addr); 2551 if (rc) { 2552 kfree(buffer); 2553 goto err0; 2554 } 2555 2556 list_add_tail(&buffer->list, &cdev->ll2->list); 2557 } 2558 2559 rc = __qed_ll2_start(p_hwfn, params); 2560 if (rc) { 2561 DP_NOTICE(cdev, "Failed to start LL2\n"); 2562 goto err0; 2563 } 2564 2565 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF, 2566 * since broadcast/mutlicast packets are routed to engine 0. 2567 */ 2568 if (b_is_storage_eng1) { 2569 rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params); 2570 if (rc) { 2571 DP_NOTICE(QED_LEADING_HWFN(cdev), 2572 "Failed to start LL2 on engine 0\n"); 2573 goto err1; 2574 } 2575 } 2576 2577 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { 2578 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); 2579 rc = qed_ll2_start_ooo(p_hwfn, params); 2580 if (rc) { 2581 DP_NOTICE(cdev, "Failed to start OOO LL2\n"); 2582 goto err2; 2583 } 2584 } 2585 2586 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { 2587 rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address); 2588 if (rc) { 2589 DP_NOTICE(cdev, "Failed to add an LLH filter\n"); 2590 goto err3; 2591 } 2592 2593 } 2594 2595 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); 2596 2597 return 0; 2598 2599 err3: 2600 if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) 2601 qed_ll2_stop_ooo(p_hwfn); 2602 err2: 2603 if (b_is_storage_eng1) 2604 __qed_ll2_stop(QED_LEADING_HWFN(cdev)); 2605 err1: 2606 __qed_ll2_stop(p_hwfn); 2607 err0: 2608 qed_ll2_kill_buffers(cdev); 2609 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; 2610 return rc; 2611 } 2612 2613 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, 2614 unsigned long xmit_flags) 2615 { 2616 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 2617 struct qed_ll2_tx_pkt_info pkt; 2618 const skb_frag_t *frag; 2619 u8 flags = 0, nr_frags; 2620 int rc = -EINVAL, i; 2621 dma_addr_t mapping; 2622 u16 vlan = 0; 2623 2624 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2625 DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2626 return -EINVAL; 2627 } 2628 2629 /* Cache number of fragments from SKB since SKB may be freed by 2630 * the completion routine after calling qed_ll2_prepare_tx_packet() 2631 */ 2632 nr_frags = skb_shinfo(skb)->nr_frags; 2633 2634 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2635 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 2636 1 + nr_frags); 2637 return -EINVAL; 2638 } 2639 2640 mapping = dma_map_single(&cdev->pdev->dev, skb->data, 2641 skb->len, DMA_TO_DEVICE); 2642 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { 2643 DP_NOTICE(cdev, "SKB mapping failed\n"); 2644 return -EINVAL; 2645 } 2646 2647 /* Request HW to calculate IP csum */ 2648 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && 2649 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 2650 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); 2651 2652 if (skb_vlan_tag_present(skb)) { 2653 vlan = skb_vlan_tag_get(skb); 2654 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); 2655 } 2656 2657 memset(&pkt, 0, sizeof(pkt)); 2658 pkt.num_of_bds = 1 + nr_frags; 2659 pkt.vlan = vlan; 2660 pkt.bd_flags = flags; 2661 pkt.tx_dest = QED_LL2_TX_DEST_NW; 2662 pkt.first_frag = mapping; 2663 pkt.first_frag_len = skb->len; 2664 pkt.cookie = skb; 2665 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && 2666 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2667 pkt.remove_stag = true; 2668 2669 /* qed_ll2_prepare_tx_packet() may actually send the packet if 2670 * there are no fragments in the skb and subsequently the completion 2671 * routine may run and free the SKB, so no dereferencing the SKB 2672 * beyond this point unless skb has any fragments. 2673 */ 2674 rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, 2675 &pkt, 1); 2676 if (rc) 2677 goto err; 2678 2679 for (i = 0; i < nr_frags; i++) { 2680 frag = &skb_shinfo(skb)->frags[i]; 2681 2682 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, 2683 skb_frag_size(frag), DMA_TO_DEVICE); 2684 2685 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { 2686 DP_NOTICE(cdev, 2687 "Unable to map frag - dropping packet\n"); 2688 rc = -ENOMEM; 2689 goto err; 2690 } 2691 2692 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, 2693 cdev->ll2->handle, 2694 mapping, 2695 skb_frag_size(frag)); 2696 2697 /* if failed not much to do here, partial packet has been posted 2698 * we can't free memory, will need to wait for completion 2699 */ 2700 if (rc) 2701 goto err2; 2702 } 2703 2704 return 0; 2705 2706 err: 2707 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); 2708 err2: 2709 return rc; 2710 } 2711 2712 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) 2713 { 2714 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); 2715 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); 2716 int rc; 2717 2718 if (!cdev->ll2) 2719 return -EINVAL; 2720 2721 rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats); 2722 if (rc) { 2723 DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n"); 2724 return rc; 2725 } 2726 2727 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ 2728 if (b_is_storage_eng1) { 2729 rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev), 2730 cdev->ll2->handle, stats); 2731 if (rc) { 2732 DP_NOTICE(QED_LEADING_HWFN(cdev), 2733 "Failed to get LL2 stats on engine 0\n"); 2734 return rc; 2735 } 2736 } 2737 2738 return 0; 2739 } 2740 2741 const struct qed_ll2_ops qed_ll2_ops_pass = { 2742 .start = &qed_ll2_start, 2743 .stop = &qed_ll2_stop, 2744 .start_xmit = &qed_ll2_start_xmit, 2745 .register_cb_ops = &qed_ll2_register_cb_ops, 2746 .get_stats = &qed_ll2_stats, 2747 }; 2748 2749 int qed_ll2_alloc_if(struct qed_dev *cdev) 2750 { 2751 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL); 2752 return cdev->ll2 ? 0 : -ENOMEM; 2753 } 2754 2755 void qed_ll2_dealloc_if(struct qed_dev *cdev) 2756 { 2757 kfree(cdev->ll2); 2758 cdev->ll2 = NULL; 2759 } 2760