151ff1725SRam Amrani /* QLogic qed NIC Driver 2e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 351ff1725SRam Amrani * 451ff1725SRam Amrani * This software is available to you under a choice of one of two 551ff1725SRam Amrani * licenses. You may choose to be licensed under the terms of the GNU 651ff1725SRam Amrani * General Public License (GPL) Version 2, available from the file 751ff1725SRam Amrani * COPYING in the main directory of this source tree, or the 851ff1725SRam Amrani * OpenIB.org BSD license below: 951ff1725SRam Amrani * 1051ff1725SRam Amrani * Redistribution and use in source and binary forms, with or 1151ff1725SRam Amrani * without modification, are permitted provided that the following 1251ff1725SRam Amrani * conditions are met: 1351ff1725SRam Amrani * 1451ff1725SRam Amrani * - Redistributions of source code must retain the above 1551ff1725SRam Amrani * copyright notice, this list of conditions and the following 1651ff1725SRam Amrani * disclaimer. 1751ff1725SRam Amrani * 1851ff1725SRam Amrani * - Redistributions in binary form must reproduce the above 1951ff1725SRam Amrani * copyright notice, this list of conditions and the following 2051ff1725SRam Amrani * disclaimer in the documentation and /or other materials 2151ff1725SRam Amrani * provided with the distribution. 2251ff1725SRam Amrani * 2351ff1725SRam Amrani * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 2451ff1725SRam Amrani * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 2551ff1725SRam Amrani * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 2651ff1725SRam Amrani * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 2751ff1725SRam Amrani * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 2851ff1725SRam Amrani * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 2951ff1725SRam Amrani * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 3051ff1725SRam Amrani * SOFTWARE. 3151ff1725SRam Amrani */ 3251ff1725SRam Amrani #include <linux/types.h> 3351ff1725SRam Amrani #include <asm/byteorder.h> 3451ff1725SRam Amrani #include <linux/bitops.h> 3551ff1725SRam Amrani #include <linux/delay.h> 3651ff1725SRam Amrani #include <linux/dma-mapping.h> 3751ff1725SRam Amrani #include <linux/errno.h> 3851ff1725SRam Amrani #include <linux/if_ether.h> 3951ff1725SRam Amrani #include <linux/if_vlan.h> 4051ff1725SRam Amrani #include <linux/io.h> 4151ff1725SRam Amrani #include <linux/ip.h> 4251ff1725SRam Amrani #include <linux/ipv6.h> 4351ff1725SRam Amrani #include <linux/kernel.h> 4451ff1725SRam Amrani #include <linux/list.h> 4551ff1725SRam Amrani #include <linux/module.h> 4651ff1725SRam Amrani #include <linux/mutex.h> 4751ff1725SRam Amrani #include <linux/pci.h> 4851ff1725SRam Amrani #include <linux/slab.h> 4951ff1725SRam Amrani #include <linux/spinlock.h> 5051ff1725SRam Amrani #include <linux/string.h> 5151ff1725SRam Amrani #include <linux/tcp.h> 5251ff1725SRam Amrani #include <linux/bitops.h> 5351ff1725SRam Amrani #include <linux/qed/qed_roce_if.h> 5451ff1725SRam Amrani #include <linux/qed/qed_roce_if.h> 5551ff1725SRam Amrani #include "qed.h" 5651ff1725SRam Amrani #include "qed_cxt.h" 5751ff1725SRam Amrani #include "qed_hsi.h" 5851ff1725SRam Amrani #include "qed_hw.h" 5951ff1725SRam Amrani #include "qed_init_ops.h" 6051ff1725SRam Amrani #include "qed_int.h" 6151ff1725SRam Amrani #include "qed_ll2.h" 6251ff1725SRam Amrani #include "qed_mcp.h" 6351ff1725SRam Amrani #include "qed_reg_addr.h" 6451ff1725SRam Amrani #include "qed_sp.h" 6551ff1725SRam Amrani #include "qed_roce.h" 66abd49676SRam Amrani #include "qed_ll2.h" 670518c12fSMichal Kalderon #include <linux/qed/qed_ll2_if.h> 6851ff1725SRam Amrani 69be086e7cSMintz, Yuval static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); 7051ff1725SRam Amrani 71be086e7cSMintz, Yuval void qed_roce_async_event(struct qed_hwfn *p_hwfn, 72be086e7cSMintz, Yuval u8 fw_event_code, union rdma_eqe_data *rdma_data) 73be086e7cSMintz, Yuval { 74be086e7cSMintz, Yuval if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { 75be086e7cSMintz, Yuval u16 icid = 76be086e7cSMintz, Yuval (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid); 77be086e7cSMintz, Yuval 78be086e7cSMintz, Yuval /* icid release in this async event can occur only if the icid 79be086e7cSMintz, Yuval * was offloaded to the FW. In case it wasn't offloaded this is 80be086e7cSMintz, Yuval * handled in qed_roce_sp_destroy_qp. 81be086e7cSMintz, Yuval */ 82be086e7cSMintz, Yuval qed_roce_free_real_icid(p_hwfn, icid); 83be086e7cSMintz, Yuval } else { 84be086e7cSMintz, Yuval struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; 85be086e7cSMintz, Yuval 86be086e7cSMintz, Yuval events->affiliated_event(p_hwfn->p_rdma_info->events.context, 87be086e7cSMintz, Yuval fw_event_code, 88be086e7cSMintz, Yuval &rdma_data->async_handle); 89be086e7cSMintz, Yuval } 9051ff1725SRam Amrani } 9151ff1725SRam Amrani 9251ff1725SRam Amrani static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, 93e015d58bSRam Amrani struct qed_bmap *bmap, u32 max_count, char *name) 9451ff1725SRam Amrani { 9551ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); 9651ff1725SRam Amrani 9751ff1725SRam Amrani bmap->max_count = max_count; 9851ff1725SRam Amrani 9951ff1725SRam Amrani bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long), 10051ff1725SRam Amrani GFP_KERNEL); 10151ff1725SRam Amrani if (!bmap->bitmap) { 10251ff1725SRam Amrani DP_NOTICE(p_hwfn, 10351ff1725SRam Amrani "qed bmap alloc failed: cannot allocate memory (bitmap)\n"); 10451ff1725SRam Amrani return -ENOMEM; 10551ff1725SRam Amrani } 10651ff1725SRam Amrani 107e015d58bSRam Amrani snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); 108e015d58bSRam Amrani 109e015d58bSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 11051ff1725SRam Amrani return 0; 11151ff1725SRam Amrani } 11251ff1725SRam Amrani 11351ff1725SRam Amrani static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, 11451ff1725SRam Amrani struct qed_bmap *bmap, u32 *id_num) 11551ff1725SRam Amrani { 11651ff1725SRam Amrani *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); 117e015d58bSRam Amrani if (*id_num >= bmap->max_count) 11851ff1725SRam Amrani return -EINVAL; 11951ff1725SRam Amrani 12051ff1725SRam Amrani __set_bit(*id_num, bmap->bitmap); 12151ff1725SRam Amrani 122e015d58bSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", 123e015d58bSRam Amrani bmap->name, *id_num); 124e015d58bSRam Amrani 12551ff1725SRam Amrani return 0; 12651ff1725SRam Amrani } 12751ff1725SRam Amrani 128be086e7cSMintz, Yuval static void qed_bmap_set_id(struct qed_hwfn *p_hwfn, 129be086e7cSMintz, Yuval struct qed_bmap *bmap, u32 id_num) 130be086e7cSMintz, Yuval { 131be086e7cSMintz, Yuval if (id_num >= bmap->max_count) 132be086e7cSMintz, Yuval return; 133be086e7cSMintz, Yuval 134be086e7cSMintz, Yuval __set_bit(id_num, bmap->bitmap); 135be086e7cSMintz, Yuval } 136be086e7cSMintz, Yuval 13751ff1725SRam Amrani static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, 13851ff1725SRam Amrani struct qed_bmap *bmap, u32 id_num) 13951ff1725SRam Amrani { 14051ff1725SRam Amrani bool b_acquired; 14151ff1725SRam Amrani 14251ff1725SRam Amrani if (id_num >= bmap->max_count) 14351ff1725SRam Amrani return; 14451ff1725SRam Amrani 14551ff1725SRam Amrani b_acquired = test_and_clear_bit(id_num, bmap->bitmap); 14651ff1725SRam Amrani if (!b_acquired) { 147e015d58bSRam Amrani DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", 148e015d58bSRam Amrani bmap->name, id_num); 14951ff1725SRam Amrani return; 15051ff1725SRam Amrani } 151e015d58bSRam Amrani 152e015d58bSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", 153e015d58bSRam Amrani bmap->name, id_num); 15451ff1725SRam Amrani } 15551ff1725SRam Amrani 156be086e7cSMintz, Yuval static int qed_bmap_test_id(struct qed_hwfn *p_hwfn, 157be086e7cSMintz, Yuval struct qed_bmap *bmap, u32 id_num) 158be086e7cSMintz, Yuval { 159be086e7cSMintz, Yuval if (id_num >= bmap->max_count) 160be086e7cSMintz, Yuval return -1; 161be086e7cSMintz, Yuval 162be086e7cSMintz, Yuval return test_bit(id_num, bmap->bitmap); 163be086e7cSMintz, Yuval } 164be086e7cSMintz, Yuval 1659331dad1SMintz, Yuval static bool qed_bmap_is_empty(struct qed_bmap *bmap) 1669331dad1SMintz, Yuval { 1679331dad1SMintz, Yuval return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); 1689331dad1SMintz, Yuval } 1699331dad1SMintz, Yuval 1700189efb8SYuval Mintz static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 17151ff1725SRam Amrani { 17251ff1725SRam Amrani /* First sb id for RoCE is after all the l2 sb */ 17351ff1725SRam Amrani return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 17451ff1725SRam Amrani } 17551ff1725SRam Amrani 17651ff1725SRam Amrani static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 17751ff1725SRam Amrani struct qed_ptt *p_ptt, 17851ff1725SRam Amrani struct qed_rdma_start_in_params *params) 17951ff1725SRam Amrani { 18051ff1725SRam Amrani struct qed_rdma_info *p_rdma_info; 18151ff1725SRam Amrani u32 num_cons, num_tasks; 18251ff1725SRam Amrani int rc = -ENOMEM; 18351ff1725SRam Amrani 18451ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); 18551ff1725SRam Amrani 18651ff1725SRam Amrani /* Allocate a struct with current pf rdma info */ 18751ff1725SRam Amrani p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); 18851ff1725SRam Amrani if (!p_rdma_info) { 18951ff1725SRam Amrani DP_NOTICE(p_hwfn, 19051ff1725SRam Amrani "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n", 19151ff1725SRam Amrani rc); 19251ff1725SRam Amrani return rc; 19351ff1725SRam Amrani } 19451ff1725SRam Amrani 19551ff1725SRam Amrani p_hwfn->p_rdma_info = p_rdma_info; 19651ff1725SRam Amrani p_rdma_info->proto = PROTOCOLID_ROCE; 19751ff1725SRam Amrani 1988c93beafSYuval Mintz num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 1998c93beafSYuval Mintz NULL); 20051ff1725SRam Amrani 20151ff1725SRam Amrani p_rdma_info->num_qps = num_cons / 2; 20251ff1725SRam Amrani 20351ff1725SRam Amrani num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); 20451ff1725SRam Amrani 20551ff1725SRam Amrani /* Each MR uses a single task */ 20651ff1725SRam Amrani p_rdma_info->num_mrs = num_tasks; 20751ff1725SRam Amrani 20851ff1725SRam Amrani /* Queue zone lines are shared between RoCE and L2 in such a way that 20951ff1725SRam Amrani * they can be used by each without obstructing the other. 21051ff1725SRam Amrani */ 211be086e7cSMintz, Yuval p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); 212be086e7cSMintz, Yuval p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); 21351ff1725SRam Amrani 21451ff1725SRam Amrani /* Allocate a struct with device params and fill it */ 21551ff1725SRam Amrani p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); 21651ff1725SRam Amrani if (!p_rdma_info->dev) { 21751ff1725SRam Amrani DP_NOTICE(p_hwfn, 21851ff1725SRam Amrani "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n", 21951ff1725SRam Amrani rc); 22051ff1725SRam Amrani goto free_rdma_info; 22151ff1725SRam Amrani } 22251ff1725SRam Amrani 22351ff1725SRam Amrani /* Allocate a struct with port params and fill it */ 22451ff1725SRam Amrani p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); 22551ff1725SRam Amrani if (!p_rdma_info->port) { 22651ff1725SRam Amrani DP_NOTICE(p_hwfn, 22751ff1725SRam Amrani "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n", 22851ff1725SRam Amrani rc); 22951ff1725SRam Amrani goto free_rdma_dev; 23051ff1725SRam Amrani } 23151ff1725SRam Amrani 23251ff1725SRam Amrani /* Allocate bit map for pd's */ 233e015d58bSRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, 234e015d58bSRam Amrani "PD"); 23551ff1725SRam Amrani if (rc) { 23651ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 23751ff1725SRam Amrani "Failed to allocate pd_map, rc = %d\n", 23851ff1725SRam Amrani rc); 23951ff1725SRam Amrani goto free_rdma_port; 24051ff1725SRam Amrani } 24151ff1725SRam Amrani 24251ff1725SRam Amrani /* Allocate DPI bitmap */ 24351ff1725SRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, 244e015d58bSRam Amrani p_hwfn->dpi_count, "DPI"); 24551ff1725SRam Amrani if (rc) { 24651ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 24751ff1725SRam Amrani "Failed to allocate DPI bitmap, rc = %d\n", rc); 24851ff1725SRam Amrani goto free_pd_map; 24951ff1725SRam Amrani } 25051ff1725SRam Amrani 25151ff1725SRam Amrani /* Allocate bitmap for cq's. The maximum number of CQs is bounded to 25251ff1725SRam Amrani * twice the number of QPs. 25351ff1725SRam Amrani */ 25451ff1725SRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, 255e015d58bSRam Amrani p_rdma_info->num_qps * 2, "CQ"); 25651ff1725SRam Amrani if (rc) { 25751ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 25851ff1725SRam Amrani "Failed to allocate cq bitmap, rc = %d\n", rc); 25951ff1725SRam Amrani goto free_dpi_map; 26051ff1725SRam Amrani } 26151ff1725SRam Amrani 26251ff1725SRam Amrani /* Allocate bitmap for toggle bit for cq icids 26351ff1725SRam Amrani * We toggle the bit every time we create or resize cq for a given icid. 26451ff1725SRam Amrani * The maximum number of CQs is bounded to twice the number of QPs. 26551ff1725SRam Amrani */ 26651ff1725SRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, 267e015d58bSRam Amrani p_rdma_info->num_qps * 2, "Toggle"); 26851ff1725SRam Amrani if (rc) { 26951ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 27051ff1725SRam Amrani "Failed to allocate toogle bits, rc = %d\n", rc); 27151ff1725SRam Amrani goto free_cq_map; 27251ff1725SRam Amrani } 27351ff1725SRam Amrani 27451ff1725SRam Amrani /* Allocate bitmap for itids */ 27551ff1725SRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, 276e015d58bSRam Amrani p_rdma_info->num_mrs, "MR"); 27751ff1725SRam Amrani if (rc) { 27851ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 27951ff1725SRam Amrani "Failed to allocate itids bitmaps, rc = %d\n", rc); 28051ff1725SRam Amrani goto free_toggle_map; 28151ff1725SRam Amrani } 28251ff1725SRam Amrani 28351ff1725SRam Amrani /* Allocate bitmap for cids used for qps. */ 284e015d58bSRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, 285e015d58bSRam Amrani "CID"); 28651ff1725SRam Amrani if (rc) { 28751ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 28851ff1725SRam Amrani "Failed to allocate cid bitmap, rc = %d\n", rc); 28951ff1725SRam Amrani goto free_tid_map; 29051ff1725SRam Amrani } 29151ff1725SRam Amrani 292be086e7cSMintz, Yuval /* Allocate bitmap for cids used for responders/requesters. */ 293e015d58bSRam Amrani rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, 294e015d58bSRam Amrani "REAL_CID"); 295be086e7cSMintz, Yuval if (rc) { 296be086e7cSMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 297be086e7cSMintz, Yuval "Failed to allocate real cid bitmap, rc = %d\n", rc); 298be086e7cSMintz, Yuval goto free_cid_map; 299be086e7cSMintz, Yuval } 30051ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); 30151ff1725SRam Amrani return 0; 30251ff1725SRam Amrani 303be086e7cSMintz, Yuval free_cid_map: 304be086e7cSMintz, Yuval kfree(p_rdma_info->cid_map.bitmap); 30551ff1725SRam Amrani free_tid_map: 30651ff1725SRam Amrani kfree(p_rdma_info->tid_map.bitmap); 30751ff1725SRam Amrani free_toggle_map: 30851ff1725SRam Amrani kfree(p_rdma_info->toggle_bits.bitmap); 30951ff1725SRam Amrani free_cq_map: 31051ff1725SRam Amrani kfree(p_rdma_info->cq_map.bitmap); 31151ff1725SRam Amrani free_dpi_map: 31251ff1725SRam Amrani kfree(p_rdma_info->dpi_map.bitmap); 31351ff1725SRam Amrani free_pd_map: 31451ff1725SRam Amrani kfree(p_rdma_info->pd_map.bitmap); 31551ff1725SRam Amrani free_rdma_port: 31651ff1725SRam Amrani kfree(p_rdma_info->port); 31751ff1725SRam Amrani free_rdma_dev: 31851ff1725SRam Amrani kfree(p_rdma_info->dev); 31951ff1725SRam Amrani free_rdma_info: 32051ff1725SRam Amrani kfree(p_rdma_info); 32151ff1725SRam Amrani 32251ff1725SRam Amrani return rc; 32351ff1725SRam Amrani } 32451ff1725SRam Amrani 325e015d58bSRam Amrani static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, 326e015d58bSRam Amrani struct qed_bmap *bmap, bool check) 327e015d58bSRam Amrani { 328e015d58bSRam Amrani int weight = bitmap_weight(bmap->bitmap, bmap->max_count); 329e015d58bSRam Amrani int last_line = bmap->max_count / (64 * 8); 330e015d58bSRam Amrani int last_item = last_line * 8 + 331e015d58bSRam Amrani DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); 332e015d58bSRam Amrani u64 *pmap = (u64 *)bmap->bitmap; 333e015d58bSRam Amrani int line, item, offset; 334e015d58bSRam Amrani u8 str_last_line[200] = { 0 }; 335e015d58bSRam Amrani 336e015d58bSRam Amrani if (!weight || !check) 337e015d58bSRam Amrani goto end; 338e015d58bSRam Amrani 339e015d58bSRam Amrani DP_NOTICE(p_hwfn, 340e015d58bSRam Amrani "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", 341e015d58bSRam Amrani bmap->name, bmap->max_count, weight); 342e015d58bSRam Amrani 343e015d58bSRam Amrani /* print aligned non-zero lines, if any */ 344e015d58bSRam Amrani for (item = 0, line = 0; line < last_line; line++, item += 8) 345e015d58bSRam Amrani if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) 346e015d58bSRam Amrani DP_NOTICE(p_hwfn, 347e015d58bSRam Amrani "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", 348e015d58bSRam Amrani line, 349e015d58bSRam Amrani pmap[item], 350e015d58bSRam Amrani pmap[item + 1], 351e015d58bSRam Amrani pmap[item + 2], 352e015d58bSRam Amrani pmap[item + 3], 353e015d58bSRam Amrani pmap[item + 4], 354e015d58bSRam Amrani pmap[item + 5], 355e015d58bSRam Amrani pmap[item + 6], pmap[item + 7]); 356e015d58bSRam Amrani 357e015d58bSRam Amrani /* print last unaligned non-zero line, if any */ 358e015d58bSRam Amrani if ((bmap->max_count % (64 * 8)) && 359e015d58bSRam Amrani (bitmap_weight((unsigned long *)&pmap[item], 360e015d58bSRam Amrani bmap->max_count - item * 64))) { 361e015d58bSRam Amrani offset = sprintf(str_last_line, "line 0x%04x: ", line); 362e015d58bSRam Amrani for (; item < last_item; item++) 363e015d58bSRam Amrani offset += sprintf(str_last_line + offset, 364e015d58bSRam Amrani "0x%016llx ", pmap[item]); 365e015d58bSRam Amrani DP_NOTICE(p_hwfn, "%s\n", str_last_line); 366e015d58bSRam Amrani } 367e015d58bSRam Amrani 368e015d58bSRam Amrani end: 369e015d58bSRam Amrani kfree(bmap->bitmap); 370e015d58bSRam Amrani bmap->bitmap = NULL; 371e015d58bSRam Amrani } 372e015d58bSRam Amrani 3730189efb8SYuval Mintz static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 37451ff1725SRam Amrani { 375be086e7cSMintz, Yuval struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; 37651ff1725SRam Amrani struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 377be086e7cSMintz, Yuval int wait_count = 0; 378be086e7cSMintz, Yuval 379be086e7cSMintz, Yuval /* when destroying a_RoCE QP the control is returned to the user after 380be086e7cSMintz, Yuval * the synchronous part. The asynchronous part may take a little longer. 381be086e7cSMintz, Yuval * We delay for a short while if an async destroy QP is still expected. 382be086e7cSMintz, Yuval * Beyond the added delay we clear the bitmap anyway. 383be086e7cSMintz, Yuval */ 384be086e7cSMintz, Yuval while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 385be086e7cSMintz, Yuval msleep(100); 386be086e7cSMintz, Yuval if (wait_count++ > 20) { 387be086e7cSMintz, Yuval DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); 388be086e7cSMintz, Yuval break; 389be086e7cSMintz, Yuval } 390be086e7cSMintz, Yuval } 39151ff1725SRam Amrani 392e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); 393e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); 394e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); 395e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); 396e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); 397e015d58bSRam Amrani qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); 39851ff1725SRam Amrani 39951ff1725SRam Amrani kfree(p_rdma_info->port); 40051ff1725SRam Amrani kfree(p_rdma_info->dev); 40151ff1725SRam Amrani 40251ff1725SRam Amrani kfree(p_rdma_info); 40351ff1725SRam Amrani } 40451ff1725SRam Amrani 40551ff1725SRam Amrani static void qed_rdma_free(struct qed_hwfn *p_hwfn) 40651ff1725SRam Amrani { 40751ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 40851ff1725SRam Amrani 40951ff1725SRam Amrani qed_rdma_resc_free(p_hwfn); 41051ff1725SRam Amrani } 41151ff1725SRam Amrani 41251ff1725SRam Amrani static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) 41351ff1725SRam Amrani { 41451ff1725SRam Amrani guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; 41551ff1725SRam Amrani guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; 41651ff1725SRam Amrani guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; 41751ff1725SRam Amrani guid[3] = 0xff; 41851ff1725SRam Amrani guid[4] = 0xfe; 41951ff1725SRam Amrani guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; 42051ff1725SRam Amrani guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; 42151ff1725SRam Amrani guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; 42251ff1725SRam Amrani } 42351ff1725SRam Amrani 42451ff1725SRam Amrani static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, 42551ff1725SRam Amrani struct qed_rdma_start_in_params *params) 42651ff1725SRam Amrani { 42751ff1725SRam Amrani struct qed_rdma_events *events; 42851ff1725SRam Amrani 42951ff1725SRam Amrani events = &p_hwfn->p_rdma_info->events; 43051ff1725SRam Amrani 43151ff1725SRam Amrani events->unaffiliated_event = params->events->unaffiliated_event; 43251ff1725SRam Amrani events->affiliated_event = params->events->affiliated_event; 43351ff1725SRam Amrani events->context = params->events->context; 43451ff1725SRam Amrani } 43551ff1725SRam Amrani 43651ff1725SRam Amrani static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, 43751ff1725SRam Amrani struct qed_rdma_start_in_params *params) 43851ff1725SRam Amrani { 43951ff1725SRam Amrani struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 44051ff1725SRam Amrani struct qed_dev *cdev = p_hwfn->cdev; 44151ff1725SRam Amrani u32 pci_status_control; 44251ff1725SRam Amrani u32 num_qps; 44351ff1725SRam Amrani 44451ff1725SRam Amrani /* Vendor specific information */ 44551ff1725SRam Amrani dev->vendor_id = cdev->vendor_id; 44651ff1725SRam Amrani dev->vendor_part_id = cdev->device_id; 44751ff1725SRam Amrani dev->hw_ver = 0; 44851ff1725SRam Amrani dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | 44951ff1725SRam Amrani (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); 45051ff1725SRam Amrani 45151ff1725SRam Amrani qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); 45251ff1725SRam Amrani dev->node_guid = dev->sys_image_guid; 45351ff1725SRam Amrani 45451ff1725SRam Amrani dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, 45551ff1725SRam Amrani RDMA_MAX_SGE_PER_RQ_WQE); 45651ff1725SRam Amrani 45751ff1725SRam Amrani if (cdev->rdma_max_sge) 45851ff1725SRam Amrani dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); 45951ff1725SRam Amrani 46051ff1725SRam Amrani dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; 46151ff1725SRam Amrani 46251ff1725SRam Amrani dev->max_inline = (cdev->rdma_max_inline) ? 46351ff1725SRam Amrani min_t(u32, cdev->rdma_max_inline, dev->max_inline) : 46451ff1725SRam Amrani dev->max_inline; 46551ff1725SRam Amrani 46651ff1725SRam Amrani dev->max_wqe = QED_RDMA_MAX_WQE; 46751ff1725SRam Amrani dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); 46851ff1725SRam Amrani 46951ff1725SRam Amrani /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because 47051ff1725SRam Amrani * it is up-aligned to 16 and then to ILT page size within qed cxt. 47151ff1725SRam Amrani * This is OK in terms of ILT but we don't want to configure the FW 47251ff1725SRam Amrani * above its abilities 47351ff1725SRam Amrani */ 47451ff1725SRam Amrani num_qps = ROCE_MAX_QPS; 47551ff1725SRam Amrani num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); 47651ff1725SRam Amrani dev->max_qp = num_qps; 47751ff1725SRam Amrani 47851ff1725SRam Amrani /* CQs uses the same icids that QPs use hence they are limited by the 47951ff1725SRam Amrani * number of icids. There are two icids per QP. 48051ff1725SRam Amrani */ 48151ff1725SRam Amrani dev->max_cq = num_qps * 2; 48251ff1725SRam Amrani 48351ff1725SRam Amrani /* The number of mrs is smaller by 1 since the first is reserved */ 48451ff1725SRam Amrani dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; 48551ff1725SRam Amrani dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; 48651ff1725SRam Amrani 48751ff1725SRam Amrani /* The maximum CQE capacity per CQ supported. 48851ff1725SRam Amrani * max number of cqes will be in two layer pbl, 48951ff1725SRam Amrani * 8 is the pointer size in bytes 49051ff1725SRam Amrani * 32 is the size of cq element in bytes 49151ff1725SRam Amrani */ 49251ff1725SRam Amrani if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) 49351ff1725SRam Amrani dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; 49451ff1725SRam Amrani else 49551ff1725SRam Amrani dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; 49651ff1725SRam Amrani 49751ff1725SRam Amrani dev->max_mw = 0; 49851ff1725SRam Amrani dev->max_fmr = QED_RDMA_MAX_FMR; 49951ff1725SRam Amrani dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); 50051ff1725SRam Amrani dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; 50151ff1725SRam Amrani dev->max_pkey = QED_RDMA_MAX_P_KEY; 50251ff1725SRam Amrani 50351ff1725SRam Amrani dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 50451ff1725SRam Amrani (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); 50551ff1725SRam Amrani dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / 50651ff1725SRam Amrani RDMA_REQ_RD_ATOMIC_ELM_SIZE; 50751ff1725SRam Amrani dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * 50851ff1725SRam Amrani p_hwfn->p_rdma_info->num_qps; 50951ff1725SRam Amrani dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; 51051ff1725SRam Amrani dev->dev_ack_delay = QED_RDMA_ACK_DELAY; 51151ff1725SRam Amrani dev->max_pd = RDMA_MAX_PDS; 51251ff1725SRam Amrani dev->max_ah = p_hwfn->p_rdma_info->num_qps; 51351ff1725SRam Amrani dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); 51451ff1725SRam Amrani 51551ff1725SRam Amrani /* Set capablities */ 51651ff1725SRam Amrani dev->dev_caps = 0; 51751ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); 51851ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); 51951ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); 52051ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); 52151ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); 52251ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); 52351ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); 52451ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); 52551ff1725SRam Amrani 52651ff1725SRam Amrani /* Check atomic operations support in PCI configuration space. */ 52751ff1725SRam Amrani pci_read_config_dword(cdev->pdev, 52851ff1725SRam Amrani cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, 52951ff1725SRam Amrani &pci_status_control); 53051ff1725SRam Amrani 53151ff1725SRam Amrani if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) 53251ff1725SRam Amrani SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); 53351ff1725SRam Amrani } 53451ff1725SRam Amrani 53551ff1725SRam Amrani static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) 53651ff1725SRam Amrani { 53751ff1725SRam Amrani struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; 53851ff1725SRam Amrani struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 53951ff1725SRam Amrani 54051ff1725SRam Amrani port->port_state = p_hwfn->mcp_info->link_output.link_up ? 54151ff1725SRam Amrani QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 54251ff1725SRam Amrani 54351ff1725SRam Amrani port->max_msg_size = min_t(u64, 54451ff1725SRam Amrani (dev->max_mr_mw_fmr_size * 54551ff1725SRam Amrani p_hwfn->cdev->rdma_max_sge), 54651ff1725SRam Amrani BIT(31)); 54751ff1725SRam Amrani 54851ff1725SRam Amrani port->pkey_bad_counter = 0; 54951ff1725SRam Amrani } 55051ff1725SRam Amrani 55151ff1725SRam Amrani static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 55251ff1725SRam Amrani { 55351ff1725SRam Amrani u32 ll2_ethertype_en; 55451ff1725SRam Amrani 55551ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); 55651ff1725SRam Amrani p_hwfn->b_rdma_enabled_in_prs = false; 55751ff1725SRam Amrani 55851ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 55951ff1725SRam Amrani 56051ff1725SRam Amrani p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; 56151ff1725SRam Amrani 56251ff1725SRam Amrani /* We delay writing to this reg until first cid is allocated. See 56351ff1725SRam Amrani * qed_cxt_dynamic_ilt_alloc function for more details 56451ff1725SRam Amrani */ 56551ff1725SRam Amrani ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 56651ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 56751ff1725SRam Amrani (ll2_ethertype_en | 0x01)); 56851ff1725SRam Amrani 56951ff1725SRam Amrani if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { 57051ff1725SRam Amrani DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); 57151ff1725SRam Amrani return -EINVAL; 57251ff1725SRam Amrani } 57351ff1725SRam Amrani 57451ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); 57551ff1725SRam Amrani return 0; 57651ff1725SRam Amrani } 57751ff1725SRam Amrani 57851ff1725SRam Amrani static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, 57951ff1725SRam Amrani struct qed_rdma_start_in_params *params, 58051ff1725SRam Amrani struct qed_ptt *p_ptt) 58151ff1725SRam Amrani { 58251ff1725SRam Amrani struct rdma_init_func_ramrod_data *p_ramrod; 58351ff1725SRam Amrani struct qed_rdma_cnq_params *p_cnq_pbl_list; 58451ff1725SRam Amrani struct rdma_init_func_hdr *p_params_header; 58551ff1725SRam Amrani struct rdma_cnq_params *p_cnq_params; 58651ff1725SRam Amrani struct qed_sp_init_data init_data; 58751ff1725SRam Amrani struct qed_spq_entry *p_ent; 58851ff1725SRam Amrani u32 cnq_id, sb_id; 58950a20714SMintz, Yuval u16 igu_sb_id; 59051ff1725SRam Amrani int rc; 59151ff1725SRam Amrani 59251ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); 59351ff1725SRam Amrani 59451ff1725SRam Amrani /* Save the number of cnqs for the function close ramrod */ 59551ff1725SRam Amrani p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; 59651ff1725SRam Amrani 59751ff1725SRam Amrani /* Get SPQ entry */ 59851ff1725SRam Amrani memset(&init_data, 0, sizeof(init_data)); 59951ff1725SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 60051ff1725SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 60151ff1725SRam Amrani 60251ff1725SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, 60351ff1725SRam Amrani p_hwfn->p_rdma_info->proto, &init_data); 60451ff1725SRam Amrani if (rc) 60551ff1725SRam Amrani return rc; 60651ff1725SRam Amrani 60751ff1725SRam Amrani p_ramrod = &p_ent->ramrod.roce_init_func.rdma; 60851ff1725SRam Amrani 60951ff1725SRam Amrani p_params_header = &p_ramrod->params_header; 61051ff1725SRam Amrani p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, 61151ff1725SRam Amrani QED_RDMA_CNQ_RAM); 61251ff1725SRam Amrani p_params_header->num_cnqs = params->desired_cnq; 61351ff1725SRam Amrani 61451ff1725SRam Amrani if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) 61551ff1725SRam Amrani p_params_header->cq_ring_mode = 1; 61651ff1725SRam Amrani else 61751ff1725SRam Amrani p_params_header->cq_ring_mode = 0; 61851ff1725SRam Amrani 61951ff1725SRam Amrani for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { 62051ff1725SRam Amrani sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); 62150a20714SMintz, Yuval igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 62250a20714SMintz, Yuval p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); 62351ff1725SRam Amrani p_cnq_params = &p_ramrod->cnq_params[cnq_id]; 62451ff1725SRam Amrani p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; 62551ff1725SRam Amrani 62651ff1725SRam Amrani p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; 62751ff1725SRam Amrani p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; 62851ff1725SRam Amrani 62951ff1725SRam Amrani DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, 63051ff1725SRam Amrani p_cnq_pbl_list->pbl_ptr); 63151ff1725SRam Amrani 63251ff1725SRam Amrani /* we assume here that cnq_id and qz_offset are the same */ 63351ff1725SRam Amrani p_cnq_params->queue_zone_num = 63451ff1725SRam Amrani cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + 63551ff1725SRam Amrani cnq_id); 63651ff1725SRam Amrani } 63751ff1725SRam Amrani 63851ff1725SRam Amrani return qed_spq_post(p_hwfn, p_ent, NULL); 63951ff1725SRam Amrani } 64051ff1725SRam Amrani 6410189efb8SYuval Mintz static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 6420189efb8SYuval Mintz { 6430189efb8SYuval Mintz struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 6440189efb8SYuval Mintz int rc; 6450189efb8SYuval Mintz 6460189efb8SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); 6470189efb8SYuval Mintz 6480189efb8SYuval Mintz spin_lock_bh(&p_hwfn->p_rdma_info->lock); 6490189efb8SYuval Mintz rc = qed_rdma_bmap_alloc_id(p_hwfn, 6500189efb8SYuval Mintz &p_hwfn->p_rdma_info->tid_map, itid); 6510189efb8SYuval Mintz spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 6520189efb8SYuval Mintz if (rc) 6530189efb8SYuval Mintz goto out; 6540189efb8SYuval Mintz 6550189efb8SYuval Mintz rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); 6560189efb8SYuval Mintz out: 6570189efb8SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); 6580189efb8SYuval Mintz return rc; 6590189efb8SYuval Mintz } 6600189efb8SYuval Mintz 66151ff1725SRam Amrani static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 66251ff1725SRam Amrani { 66351ff1725SRam Amrani struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 66451ff1725SRam Amrani 66551ff1725SRam Amrani /* The first DPI is reserved for the Kernel */ 66651ff1725SRam Amrani __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); 66751ff1725SRam Amrani 66851ff1725SRam Amrani /* Tid 0 will be used as the key for "reserved MR". 66951ff1725SRam Amrani * The driver should allocate memory for it so it can be loaded but no 67051ff1725SRam Amrani * ramrod should be passed on it. 67151ff1725SRam Amrani */ 67251ff1725SRam Amrani qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); 67351ff1725SRam Amrani if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { 67451ff1725SRam Amrani DP_NOTICE(p_hwfn, 67551ff1725SRam Amrani "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); 67651ff1725SRam Amrani return -EINVAL; 67751ff1725SRam Amrani } 67851ff1725SRam Amrani 67951ff1725SRam Amrani return 0; 68051ff1725SRam Amrani } 68151ff1725SRam Amrani 68251ff1725SRam Amrani static int qed_rdma_setup(struct qed_hwfn *p_hwfn, 68351ff1725SRam Amrani struct qed_ptt *p_ptt, 68451ff1725SRam Amrani struct qed_rdma_start_in_params *params) 68551ff1725SRam Amrani { 68651ff1725SRam Amrani int rc; 68751ff1725SRam Amrani 68851ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); 68951ff1725SRam Amrani 69051ff1725SRam Amrani spin_lock_init(&p_hwfn->p_rdma_info->lock); 69151ff1725SRam Amrani 69251ff1725SRam Amrani qed_rdma_init_devinfo(p_hwfn, params); 69351ff1725SRam Amrani qed_rdma_init_port(p_hwfn); 69451ff1725SRam Amrani qed_rdma_init_events(p_hwfn, params); 69551ff1725SRam Amrani 69651ff1725SRam Amrani rc = qed_rdma_reserve_lkey(p_hwfn); 69751ff1725SRam Amrani if (rc) 69851ff1725SRam Amrani return rc; 69951ff1725SRam Amrani 70051ff1725SRam Amrani rc = qed_rdma_init_hw(p_hwfn, p_ptt); 70151ff1725SRam Amrani if (rc) 70251ff1725SRam Amrani return rc; 70351ff1725SRam Amrani 70451ff1725SRam Amrani return qed_rdma_start_fw(p_hwfn, params, p_ptt); 70551ff1725SRam Amrani } 70651ff1725SRam Amrani 7070189efb8SYuval Mintz static int qed_rdma_stop(void *rdma_cxt) 70851ff1725SRam Amrani { 70951ff1725SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 71051ff1725SRam Amrani struct rdma_close_func_ramrod_data *p_ramrod; 71151ff1725SRam Amrani struct qed_sp_init_data init_data; 71251ff1725SRam Amrani struct qed_spq_entry *p_ent; 71351ff1725SRam Amrani struct qed_ptt *p_ptt; 71451ff1725SRam Amrani u32 ll2_ethertype_en; 71551ff1725SRam Amrani int rc = -EBUSY; 71651ff1725SRam Amrani 71751ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); 71851ff1725SRam Amrani 71951ff1725SRam Amrani p_ptt = qed_ptt_acquire(p_hwfn); 72051ff1725SRam Amrani if (!p_ptt) { 72151ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); 72251ff1725SRam Amrani return rc; 72351ff1725SRam Amrani } 72451ff1725SRam Amrani 72551ff1725SRam Amrani /* Disable RoCE search */ 72651ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); 72751ff1725SRam Amrani p_hwfn->b_rdma_enabled_in_prs = false; 72851ff1725SRam Amrani 72951ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); 73051ff1725SRam Amrani 73151ff1725SRam Amrani ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); 73251ff1725SRam Amrani 73351ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, 73451ff1725SRam Amrani (ll2_ethertype_en & 0xFFFE)); 73551ff1725SRam Amrani 73651ff1725SRam Amrani qed_ptt_release(p_hwfn, p_ptt); 73751ff1725SRam Amrani 73851ff1725SRam Amrani /* Get SPQ entry */ 73951ff1725SRam Amrani memset(&init_data, 0, sizeof(init_data)); 74051ff1725SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 74151ff1725SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 74251ff1725SRam Amrani 74351ff1725SRam Amrani /* Stop RoCE */ 74451ff1725SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, 74551ff1725SRam Amrani p_hwfn->p_rdma_info->proto, &init_data); 74651ff1725SRam Amrani if (rc) 74751ff1725SRam Amrani goto out; 74851ff1725SRam Amrani 74951ff1725SRam Amrani p_ramrod = &p_ent->ramrod.rdma_close_func; 75051ff1725SRam Amrani 75151ff1725SRam Amrani p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; 75251ff1725SRam Amrani p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); 75351ff1725SRam Amrani 75451ff1725SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 75551ff1725SRam Amrani 75651ff1725SRam Amrani out: 75751ff1725SRam Amrani qed_rdma_free(p_hwfn); 75851ff1725SRam Amrani 75951ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); 76051ff1725SRam Amrani return rc; 76151ff1725SRam Amrani } 76251ff1725SRam Amrani 7630189efb8SYuval Mintz static int qed_rdma_add_user(void *rdma_cxt, 76451ff1725SRam Amrani struct qed_rdma_add_user_out_params *out_params) 76551ff1725SRam Amrani { 76651ff1725SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 76751ff1725SRam Amrani u32 dpi_start_offset; 76851ff1725SRam Amrani u32 returned_id = 0; 76951ff1725SRam Amrani int rc; 77051ff1725SRam Amrani 77151ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); 77251ff1725SRam Amrani 77351ff1725SRam Amrani /* Allocate DPI */ 77451ff1725SRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 77551ff1725SRam Amrani rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 77651ff1725SRam Amrani &returned_id); 77751ff1725SRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 77851ff1725SRam Amrani 77951ff1725SRam Amrani out_params->dpi = (u16)returned_id; 78051ff1725SRam Amrani 78151ff1725SRam Amrani /* Calculate the corresponding DPI address */ 78251ff1725SRam Amrani dpi_start_offset = p_hwfn->dpi_start_offset; 78351ff1725SRam Amrani 78451ff1725SRam Amrani out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + 78551ff1725SRam Amrani dpi_start_offset + 78651ff1725SRam Amrani ((out_params->dpi) * p_hwfn->dpi_size)); 78751ff1725SRam Amrani 78851ff1725SRam Amrani out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + 78951ff1725SRam Amrani dpi_start_offset + 79051ff1725SRam Amrani ((out_params->dpi) * p_hwfn->dpi_size); 79151ff1725SRam Amrani 79251ff1725SRam Amrani out_params->dpi_size = p_hwfn->dpi_size; 79320b1bd96SRam Amrani out_params->wid_count = p_hwfn->wid_count; 79451ff1725SRam Amrani 79551ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); 79651ff1725SRam Amrani return rc; 79751ff1725SRam Amrani } 79851ff1725SRam Amrani 7990189efb8SYuval Mintz static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 800c295f86eSRam Amrani { 801c295f86eSRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 802c295f86eSRam Amrani struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 803c295f86eSRam Amrani 804c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); 805c295f86eSRam Amrani 806c295f86eSRam Amrani /* Link may have changed */ 807c295f86eSRam Amrani p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? 808c295f86eSRam Amrani QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; 809c295f86eSRam Amrani 810c295f86eSRam Amrani p_port->link_speed = p_hwfn->mcp_info->link_output.speed; 811c295f86eSRam Amrani 812793ea8a9SRam Amrani p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; 813793ea8a9SRam Amrani 814c295f86eSRam Amrani return p_port; 815c295f86eSRam Amrani } 816c295f86eSRam Amrani 8170189efb8SYuval Mintz static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 81851ff1725SRam Amrani { 81951ff1725SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 82051ff1725SRam Amrani 82151ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); 82251ff1725SRam Amrani 82351ff1725SRam Amrani /* Return struct with device parameters */ 82451ff1725SRam Amrani return p_hwfn->p_rdma_info->dev; 82551ff1725SRam Amrani } 82651ff1725SRam Amrani 8270189efb8SYuval Mintz static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 828ee8eaea3SRam Amrani { 829ee8eaea3SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 830ee8eaea3SRam Amrani 831ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 832ee8eaea3SRam Amrani 833ee8eaea3SRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 834ee8eaea3SRam Amrani qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); 835ee8eaea3SRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 836ee8eaea3SRam Amrani } 837ee8eaea3SRam Amrani 8380189efb8SYuval Mintz static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) 83951ff1725SRam Amrani { 84051ff1725SRam Amrani struct qed_hwfn *p_hwfn; 84151ff1725SRam Amrani u16 qz_num; 84251ff1725SRam Amrani u32 addr; 84351ff1725SRam Amrani 84451ff1725SRam Amrani p_hwfn = (struct qed_hwfn *)rdma_cxt; 845be086e7cSMintz, Yuval 846be086e7cSMintz, Yuval if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { 847be086e7cSMintz, Yuval DP_NOTICE(p_hwfn, 848be086e7cSMintz, Yuval "queue zone offset %d is too large (max is %d)\n", 849be086e7cSMintz, Yuval qz_offset, p_hwfn->p_rdma_info->max_queue_zones); 850be086e7cSMintz, Yuval return; 851be086e7cSMintz, Yuval } 852be086e7cSMintz, Yuval 85351ff1725SRam Amrani qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; 85451ff1725SRam Amrani addr = GTT_BAR0_MAP_REG_USDM_RAM + 85551ff1725SRam Amrani USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); 85651ff1725SRam Amrani 85751ff1725SRam Amrani REG_WR16(p_hwfn, addr, prod); 85851ff1725SRam Amrani 85951ff1725SRam Amrani /* keep prod updates ordered */ 86051ff1725SRam Amrani wmb(); 86151ff1725SRam Amrani } 86251ff1725SRam Amrani 86351ff1725SRam Amrani static int qed_fill_rdma_dev_info(struct qed_dev *cdev, 86451ff1725SRam Amrani struct qed_dev_rdma_info *info) 86551ff1725SRam Amrani { 86620b1bd96SRam Amrani struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 86720b1bd96SRam Amrani 86851ff1725SRam Amrani memset(info, 0, sizeof(*info)); 86951ff1725SRam Amrani 87051ff1725SRam Amrani info->rdma_type = QED_RDMA_TYPE_ROCE; 87120b1bd96SRam Amrani info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); 87251ff1725SRam Amrani 87351ff1725SRam Amrani qed_fill_dev_info(cdev, &info->common); 87451ff1725SRam Amrani 87551ff1725SRam Amrani return 0; 87651ff1725SRam Amrani } 87751ff1725SRam Amrani 87851ff1725SRam Amrani static int qed_rdma_get_sb_start(struct qed_dev *cdev) 87951ff1725SRam Amrani { 88051ff1725SRam Amrani int feat_num; 88151ff1725SRam Amrani 88251ff1725SRam Amrani if (cdev->num_hwfns > 1) 88351ff1725SRam Amrani feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); 88451ff1725SRam Amrani else 88551ff1725SRam Amrani feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * 88651ff1725SRam Amrani cdev->num_hwfns; 88751ff1725SRam Amrani 88851ff1725SRam Amrani return feat_num; 88951ff1725SRam Amrani } 89051ff1725SRam Amrani 89151ff1725SRam Amrani static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) 89251ff1725SRam Amrani { 89351ff1725SRam Amrani int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); 89451ff1725SRam Amrani int n_msix = cdev->int_params.rdma_msix_cnt; 89551ff1725SRam Amrani 89651ff1725SRam Amrani return min_t(int, n_cnq, n_msix); 89751ff1725SRam Amrani } 89851ff1725SRam Amrani 89951ff1725SRam Amrani static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) 90051ff1725SRam Amrani { 90151ff1725SRam Amrani int limit = 0; 90251ff1725SRam Amrani 90351ff1725SRam Amrani /* Mark the fastpath as free/used */ 90451ff1725SRam Amrani cdev->int_params.fp_initialized = cnt ? true : false; 90551ff1725SRam Amrani 90651ff1725SRam Amrani if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { 90751ff1725SRam Amrani DP_ERR(cdev, 90851ff1725SRam Amrani "qed roce supports only MSI-X interrupts (detected %d).\n", 90951ff1725SRam Amrani cdev->int_params.out.int_mode); 91051ff1725SRam Amrani return -EINVAL; 91151ff1725SRam Amrani } else if (cdev->int_params.fp_msix_cnt) { 91251ff1725SRam Amrani limit = cdev->int_params.rdma_msix_cnt; 91351ff1725SRam Amrani } 91451ff1725SRam Amrani 91551ff1725SRam Amrani if (!limit) 91651ff1725SRam Amrani return -ENOMEM; 91751ff1725SRam Amrani 91851ff1725SRam Amrani return min_t(int, cnt, limit); 91951ff1725SRam Amrani } 92051ff1725SRam Amrani 92151ff1725SRam Amrani static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) 92251ff1725SRam Amrani { 92351ff1725SRam Amrani memset(info, 0, sizeof(*info)); 92451ff1725SRam Amrani 92551ff1725SRam Amrani if (!cdev->int_params.fp_initialized) { 92651ff1725SRam Amrani DP_INFO(cdev, 92751ff1725SRam Amrani "Protocol driver requested interrupt information, but its support is not yet configured\n"); 92851ff1725SRam Amrani return -EINVAL; 92951ff1725SRam Amrani } 93051ff1725SRam Amrani 93151ff1725SRam Amrani if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 93251ff1725SRam Amrani int msix_base = cdev->int_params.rdma_msix_base; 93351ff1725SRam Amrani 93451ff1725SRam Amrani info->msix_cnt = cdev->int_params.rdma_msix_cnt; 93551ff1725SRam Amrani info->msix = &cdev->int_params.msix_table[msix_base]; 93651ff1725SRam Amrani 93751ff1725SRam Amrani DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", 93851ff1725SRam Amrani info->msix_cnt, msix_base); 93951ff1725SRam Amrani } 94051ff1725SRam Amrani 94151ff1725SRam Amrani return 0; 94251ff1725SRam Amrani } 94351ff1725SRam Amrani 9440189efb8SYuval Mintz static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 945c295f86eSRam Amrani { 946c295f86eSRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 947c295f86eSRam Amrani u32 returned_id; 948c295f86eSRam Amrani int rc; 949c295f86eSRam Amrani 950c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); 951c295f86eSRam Amrani 952c295f86eSRam Amrani /* Allocates an unused protection domain */ 953c295f86eSRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 954c295f86eSRam Amrani rc = qed_rdma_bmap_alloc_id(p_hwfn, 955c295f86eSRam Amrani &p_hwfn->p_rdma_info->pd_map, &returned_id); 956c295f86eSRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 957c295f86eSRam Amrani 958c295f86eSRam Amrani *pd = (u16)returned_id; 959c295f86eSRam Amrani 960c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); 961c295f86eSRam Amrani return rc; 962c295f86eSRam Amrani } 963c295f86eSRam Amrani 9648c93beafSYuval Mintz static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) 965c295f86eSRam Amrani { 966c295f86eSRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 967c295f86eSRam Amrani 968c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); 969c295f86eSRam Amrani 970c295f86eSRam Amrani /* Returns a previously allocated protection domain for reuse */ 971c295f86eSRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 972c295f86eSRam Amrani qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); 973c295f86eSRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 974c295f86eSRam Amrani } 975c295f86eSRam Amrani 976c295f86eSRam Amrani static enum qed_rdma_toggle_bit 977c295f86eSRam Amrani qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) 978c295f86eSRam Amrani { 979c295f86eSRam Amrani struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 980c295f86eSRam Amrani enum qed_rdma_toggle_bit toggle_bit; 981c295f86eSRam Amrani u32 bmap_id; 982c295f86eSRam Amrani 983c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); 984c295f86eSRam Amrani 985c295f86eSRam Amrani /* the function toggle the bit that is related to a given icid 986c295f86eSRam Amrani * and returns the new toggle bit's value 987c295f86eSRam Amrani */ 988c295f86eSRam Amrani bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); 989c295f86eSRam Amrani 990c295f86eSRam Amrani spin_lock_bh(&p_info->lock); 991c295f86eSRam Amrani toggle_bit = !test_and_change_bit(bmap_id, 992c295f86eSRam Amrani p_info->toggle_bits.bitmap); 993c295f86eSRam Amrani spin_unlock_bh(&p_info->lock); 994c295f86eSRam Amrani 995c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", 996c295f86eSRam Amrani toggle_bit); 997c295f86eSRam Amrani 998c295f86eSRam Amrani return toggle_bit; 999c295f86eSRam Amrani } 1000c295f86eSRam Amrani 10018c93beafSYuval Mintz static int qed_rdma_create_cq(void *rdma_cxt, 10028c93beafSYuval Mintz struct qed_rdma_create_cq_in_params *params, 10038c93beafSYuval Mintz u16 *icid) 1004c295f86eSRam Amrani { 1005c295f86eSRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1006c295f86eSRam Amrani struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; 1007c295f86eSRam Amrani struct rdma_create_cq_ramrod_data *p_ramrod; 1008c295f86eSRam Amrani enum qed_rdma_toggle_bit toggle_bit; 1009c295f86eSRam Amrani struct qed_sp_init_data init_data; 1010c295f86eSRam Amrani struct qed_spq_entry *p_ent; 1011c295f86eSRam Amrani u32 returned_id, start_cid; 1012c295f86eSRam Amrani int rc; 1013c295f86eSRam Amrani 1014c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", 1015c295f86eSRam Amrani params->cq_handle_hi, params->cq_handle_lo); 1016c295f86eSRam Amrani 1017c295f86eSRam Amrani /* Allocate icid */ 1018c295f86eSRam Amrani spin_lock_bh(&p_info->lock); 1019e015d58bSRam Amrani rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); 1020c295f86eSRam Amrani spin_unlock_bh(&p_info->lock); 1021c295f86eSRam Amrani 1022c295f86eSRam Amrani if (rc) { 1023c295f86eSRam Amrani DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); 1024c295f86eSRam Amrani return rc; 1025c295f86eSRam Amrani } 1026c295f86eSRam Amrani 1027c295f86eSRam Amrani start_cid = qed_cxt_get_proto_cid_start(p_hwfn, 1028c295f86eSRam Amrani p_info->proto); 1029c295f86eSRam Amrani *icid = returned_id + start_cid; 1030c295f86eSRam Amrani 1031c295f86eSRam Amrani /* Check if icid requires a page allocation */ 1032c295f86eSRam Amrani rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); 1033c295f86eSRam Amrani if (rc) 1034c295f86eSRam Amrani goto err; 1035c295f86eSRam Amrani 1036c295f86eSRam Amrani /* Get SPQ entry */ 1037c295f86eSRam Amrani memset(&init_data, 0, sizeof(init_data)); 1038c295f86eSRam Amrani init_data.cid = *icid; 1039c295f86eSRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1040c295f86eSRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1041c295f86eSRam Amrani 1042c295f86eSRam Amrani /* Send create CQ ramrod */ 1043c295f86eSRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1044c295f86eSRam Amrani RDMA_RAMROD_CREATE_CQ, 1045c295f86eSRam Amrani p_info->proto, &init_data); 1046c295f86eSRam Amrani if (rc) 1047c295f86eSRam Amrani goto err; 1048c295f86eSRam Amrani 1049c295f86eSRam Amrani p_ramrod = &p_ent->ramrod.rdma_create_cq; 1050c295f86eSRam Amrani 1051c295f86eSRam Amrani p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); 1052c295f86eSRam Amrani p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); 1053c295f86eSRam Amrani p_ramrod->dpi = cpu_to_le16(params->dpi); 1054c295f86eSRam Amrani p_ramrod->is_two_level_pbl = params->pbl_two_level; 1055c295f86eSRam Amrani p_ramrod->max_cqes = cpu_to_le32(params->cq_size); 1056c295f86eSRam Amrani DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); 1057c295f86eSRam Amrani p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); 1058c295f86eSRam Amrani p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + 1059c295f86eSRam Amrani params->cnq_id; 1060c295f86eSRam Amrani p_ramrod->int_timeout = params->int_timeout; 1061c295f86eSRam Amrani 1062c295f86eSRam Amrani /* toggle the bit for every resize or create cq for a given icid */ 1063c295f86eSRam Amrani toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1064c295f86eSRam Amrani 1065c295f86eSRam Amrani p_ramrod->toggle_bit = toggle_bit; 1066c295f86eSRam Amrani 1067c295f86eSRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1068c295f86eSRam Amrani if (rc) { 1069c295f86eSRam Amrani /* restore toggle bit */ 1070c295f86eSRam Amrani qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); 1071c295f86eSRam Amrani goto err; 1072c295f86eSRam Amrani } 1073c295f86eSRam Amrani 1074c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); 1075c295f86eSRam Amrani return rc; 1076c295f86eSRam Amrani 1077c295f86eSRam Amrani err: 1078c295f86eSRam Amrani /* release allocated icid */ 1079670dde55SRam Amrani spin_lock_bh(&p_info->lock); 1080c295f86eSRam Amrani qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); 1081670dde55SRam Amrani spin_unlock_bh(&p_info->lock); 1082c295f86eSRam Amrani DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); 1083c295f86eSRam Amrani 1084c295f86eSRam Amrani return rc; 1085c295f86eSRam Amrani } 1086c295f86eSRam Amrani 10878c93beafSYuval Mintz static int 10888c93beafSYuval Mintz qed_rdma_destroy_cq(void *rdma_cxt, 1089c295f86eSRam Amrani struct qed_rdma_destroy_cq_in_params *in_params, 1090c295f86eSRam Amrani struct qed_rdma_destroy_cq_out_params *out_params) 1091c295f86eSRam Amrani { 1092c295f86eSRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1093c295f86eSRam Amrani struct rdma_destroy_cq_output_params *p_ramrod_res; 1094c295f86eSRam Amrani struct rdma_destroy_cq_ramrod_data *p_ramrod; 1095c295f86eSRam Amrani struct qed_sp_init_data init_data; 1096c295f86eSRam Amrani struct qed_spq_entry *p_ent; 1097c295f86eSRam Amrani dma_addr_t ramrod_res_phys; 1098c295f86eSRam Amrani int rc = -ENOMEM; 1099c295f86eSRam Amrani 1100c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); 1101c295f86eSRam Amrani 1102c295f86eSRam Amrani p_ramrod_res = 1103c295f86eSRam Amrani (struct rdma_destroy_cq_output_params *) 1104c295f86eSRam Amrani dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1105c295f86eSRam Amrani sizeof(struct rdma_destroy_cq_output_params), 1106c295f86eSRam Amrani &ramrod_res_phys, GFP_KERNEL); 1107c295f86eSRam Amrani if (!p_ramrod_res) { 1108c295f86eSRam Amrani DP_NOTICE(p_hwfn, 1109c295f86eSRam Amrani "qed destroy cq failed: cannot allocate memory (ramrod)\n"); 1110c295f86eSRam Amrani return rc; 1111c295f86eSRam Amrani } 1112c295f86eSRam Amrani 1113c295f86eSRam Amrani /* Get SPQ entry */ 1114c295f86eSRam Amrani memset(&init_data, 0, sizeof(init_data)); 1115c295f86eSRam Amrani init_data.cid = in_params->icid; 1116c295f86eSRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1117c295f86eSRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1118c295f86eSRam Amrani 1119c295f86eSRam Amrani /* Send destroy CQ ramrod */ 1120c295f86eSRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1121c295f86eSRam Amrani RDMA_RAMROD_DESTROY_CQ, 1122c295f86eSRam Amrani p_hwfn->p_rdma_info->proto, &init_data); 1123c295f86eSRam Amrani if (rc) 1124c295f86eSRam Amrani goto err; 1125c295f86eSRam Amrani 1126c295f86eSRam Amrani p_ramrod = &p_ent->ramrod.rdma_destroy_cq; 1127c295f86eSRam Amrani DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1128c295f86eSRam Amrani 1129c295f86eSRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1130c295f86eSRam Amrani if (rc) 1131c295f86eSRam Amrani goto err; 1132c295f86eSRam Amrani 1133c295f86eSRam Amrani out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); 1134c295f86eSRam Amrani 1135c295f86eSRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1136c295f86eSRam Amrani sizeof(struct rdma_destroy_cq_output_params), 1137c295f86eSRam Amrani p_ramrod_res, ramrod_res_phys); 1138c295f86eSRam Amrani 1139c295f86eSRam Amrani /* Free icid */ 1140c295f86eSRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1141c295f86eSRam Amrani 1142c295f86eSRam Amrani qed_bmap_release_id(p_hwfn, 1143c295f86eSRam Amrani &p_hwfn->p_rdma_info->cq_map, 1144c295f86eSRam Amrani (in_params->icid - 1145c295f86eSRam Amrani qed_cxt_get_proto_cid_start(p_hwfn, 1146c295f86eSRam Amrani p_hwfn-> 1147c295f86eSRam Amrani p_rdma_info->proto))); 1148c295f86eSRam Amrani 1149c295f86eSRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1150c295f86eSRam Amrani 1151c295f86eSRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); 1152c295f86eSRam Amrani return rc; 1153c295f86eSRam Amrani 1154c295f86eSRam Amrani err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1155c295f86eSRam Amrani sizeof(struct rdma_destroy_cq_output_params), 1156c295f86eSRam Amrani p_ramrod_res, ramrod_res_phys); 1157c295f86eSRam Amrani 1158c295f86eSRam Amrani return rc; 1159c295f86eSRam Amrani } 1160c295f86eSRam Amrani 1161f1093940SRam Amrani static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) 1162f1093940SRam Amrani { 1163f1093940SRam Amrani p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); 1164f1093940SRam Amrani p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); 1165f1093940SRam Amrani p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); 1166f1093940SRam Amrani } 1167f1093940SRam Amrani 1168f1093940SRam Amrani static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, 1169f1093940SRam Amrani __le32 *dst_gid) 1170f1093940SRam Amrani { 1171f1093940SRam Amrani u32 i; 1172f1093940SRam Amrani 1173f1093940SRam Amrani if (qp->roce_mode == ROCE_V2_IPV4) { 1174f1093940SRam Amrani /* The IPv4 addresses shall be aligned to the highest word. 1175f1093940SRam Amrani * The lower words must be zero. 1176f1093940SRam Amrani */ 1177f1093940SRam Amrani memset(src_gid, 0, sizeof(union qed_gid)); 1178f1093940SRam Amrani memset(dst_gid, 0, sizeof(union qed_gid)); 1179f1093940SRam Amrani src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); 1180f1093940SRam Amrani dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); 1181f1093940SRam Amrani } else { 1182f1093940SRam Amrani /* GIDs and IPv6 addresses coincide in location and size */ 1183f1093940SRam Amrani for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { 1184f1093940SRam Amrani src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); 1185f1093940SRam Amrani dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); 1186f1093940SRam Amrani } 1187f1093940SRam Amrani } 1188f1093940SRam Amrani } 1189f1093940SRam Amrani 1190f1093940SRam Amrani static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 1191f1093940SRam Amrani { 1192f1093940SRam Amrani enum roce_flavor flavor; 1193f1093940SRam Amrani 1194f1093940SRam Amrani switch (roce_mode) { 1195f1093940SRam Amrani case ROCE_V1: 1196f1093940SRam Amrani flavor = PLAIN_ROCE; 1197f1093940SRam Amrani break; 1198f1093940SRam Amrani case ROCE_V2_IPV4: 1199f1093940SRam Amrani flavor = RROCE_IPV4; 1200f1093940SRam Amrani break; 1201f1093940SRam Amrani case ROCE_V2_IPV6: 1202f1093940SRam Amrani flavor = ROCE_V2_IPV6; 1203f1093940SRam Amrani break; 1204f1093940SRam Amrani default: 1205f1093940SRam Amrani flavor = MAX_ROCE_MODE; 1206f1093940SRam Amrani break; 1207f1093940SRam Amrani } 1208f1093940SRam Amrani return flavor; 1209f1093940SRam Amrani } 1210f1093940SRam Amrani 1211be086e7cSMintz, Yuval void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 1212be086e7cSMintz, Yuval { 1213be086e7cSMintz, Yuval spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1214be086e7cSMintz, Yuval qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); 1215be086e7cSMintz, Yuval qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); 1216be086e7cSMintz, Yuval spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1217be086e7cSMintz, Yuval } 1218be086e7cSMintz, Yuval 12198c93beafSYuval Mintz static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) 1220f1093940SRam Amrani { 1221f1093940SRam Amrani struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1222f1093940SRam Amrani u32 responder_icid; 1223f1093940SRam Amrani u32 requester_icid; 1224f1093940SRam Amrani int rc; 1225f1093940SRam Amrani 1226f1093940SRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1227f1093940SRam Amrani rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 1228f1093940SRam Amrani &responder_icid); 1229f1093940SRam Amrani if (rc) { 1230f1093940SRam Amrani spin_unlock_bh(&p_rdma_info->lock); 1231f1093940SRam Amrani return rc; 1232f1093940SRam Amrani } 1233f1093940SRam Amrani 1234f1093940SRam Amrani rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, 1235f1093940SRam Amrani &requester_icid); 1236f1093940SRam Amrani 1237f1093940SRam Amrani spin_unlock_bh(&p_rdma_info->lock); 1238f1093940SRam Amrani if (rc) 1239f1093940SRam Amrani goto err; 1240f1093940SRam Amrani 1241f1093940SRam Amrani /* the two icid's should be adjacent */ 1242f1093940SRam Amrani if ((requester_icid - responder_icid) != 1) { 1243f1093940SRam Amrani DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); 1244f1093940SRam Amrani rc = -EINVAL; 1245f1093940SRam Amrani goto err; 1246f1093940SRam Amrani } 1247f1093940SRam Amrani 1248f1093940SRam Amrani responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, 1249f1093940SRam Amrani p_rdma_info->proto); 1250f1093940SRam Amrani requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, 1251f1093940SRam Amrani p_rdma_info->proto); 1252f1093940SRam Amrani 1253f1093940SRam Amrani /* If these icids require a new ILT line allocate DMA-able context for 1254f1093940SRam Amrani * an ILT page 1255f1093940SRam Amrani */ 1256f1093940SRam Amrani rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); 1257f1093940SRam Amrani if (rc) 1258f1093940SRam Amrani goto err; 1259f1093940SRam Amrani 1260f1093940SRam Amrani rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); 1261f1093940SRam Amrani if (rc) 1262f1093940SRam Amrani goto err; 1263f1093940SRam Amrani 1264f1093940SRam Amrani *cid = (u16)responder_icid; 1265f1093940SRam Amrani return rc; 1266f1093940SRam Amrani 1267f1093940SRam Amrani err: 1268f1093940SRam Amrani spin_lock_bh(&p_rdma_info->lock); 1269f1093940SRam Amrani qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); 1270f1093940SRam Amrani qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); 1271f1093940SRam Amrani 1272f1093940SRam Amrani spin_unlock_bh(&p_rdma_info->lock); 1273f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1274f1093940SRam Amrani "Allocate CID - failed, rc = %d\n", rc); 1275f1093940SRam Amrani return rc; 1276f1093940SRam Amrani } 1277f1093940SRam Amrani 1278be086e7cSMintz, Yuval static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) 1279be086e7cSMintz, Yuval { 1280be086e7cSMintz, Yuval spin_lock_bh(&p_hwfn->p_rdma_info->lock); 1281be086e7cSMintz, Yuval qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); 1282be086e7cSMintz, Yuval spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 1283be086e7cSMintz, Yuval } 1284be086e7cSMintz, Yuval 1285f1093940SRam Amrani static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, 1286f1093940SRam Amrani struct qed_rdma_qp *qp) 1287f1093940SRam Amrani { 1288f1093940SRam Amrani struct roce_create_qp_resp_ramrod_data *p_ramrod; 1289f1093940SRam Amrani struct qed_sp_init_data init_data; 1290f1093940SRam Amrani enum roce_flavor roce_flavor; 1291f1093940SRam Amrani struct qed_spq_entry *p_ent; 1292be086e7cSMintz, Yuval u16 regular_latency_queue; 1293be086e7cSMintz, Yuval enum protocol_type proto; 1294f1093940SRam Amrani int rc; 1295f1093940SRam Amrani 1296f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1297f1093940SRam Amrani 1298f1093940SRam Amrani /* Allocate DMA-able memory for IRQ */ 1299f1093940SRam Amrani qp->irq_num_pages = 1; 1300f1093940SRam Amrani qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1301f1093940SRam Amrani RDMA_RING_PAGE_SIZE, 1302f1093940SRam Amrani &qp->irq_phys_addr, GFP_KERNEL); 1303f1093940SRam Amrani if (!qp->irq) { 1304f1093940SRam Amrani rc = -ENOMEM; 1305f1093940SRam Amrani DP_NOTICE(p_hwfn, 1306f1093940SRam Amrani "qed create responder failed: cannot allocate memory (irq). rc = %d\n", 1307f1093940SRam Amrani rc); 1308f1093940SRam Amrani return rc; 1309f1093940SRam Amrani } 1310f1093940SRam Amrani 1311f1093940SRam Amrani /* Get SPQ entry */ 1312f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1313f1093940SRam Amrani init_data.cid = qp->icid; 1314f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1315f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1316f1093940SRam Amrani 1317f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 1318f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1319f1093940SRam Amrani if (rc) 1320f1093940SRam Amrani goto err; 1321f1093940SRam Amrani 1322f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 1323f1093940SRam Amrani 1324f1093940SRam Amrani p_ramrod->flags = 0; 1325f1093940SRam Amrani 1326f1093940SRam Amrani roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 1327f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1328f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 1329f1093940SRam Amrani 1330f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1331f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 1332f1093940SRam Amrani qp->incoming_rdma_read_en); 1333f1093940SRam Amrani 1334f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1335f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 1336f1093940SRam Amrani qp->incoming_rdma_write_en); 1337f1093940SRam Amrani 1338f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1339f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 1340f1093940SRam Amrani qp->incoming_atomic_en); 1341f1093940SRam Amrani 1342f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1343f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 1344f1093940SRam Amrani qp->e2e_flow_control_en); 1345f1093940SRam Amrani 1346f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1347f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); 1348f1093940SRam Amrani 1349f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1350f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 1351f1093940SRam Amrani qp->fmr_and_reserved_lkey); 1352f1093940SRam Amrani 1353f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1354f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 1355f1093940SRam Amrani qp->min_rnr_nak_timer); 1356f1093940SRam Amrani 1357f1093940SRam Amrani p_ramrod->max_ird = qp->max_rd_atomic_resp; 1358f1093940SRam Amrani p_ramrod->traffic_class = qp->traffic_class_tos; 1359f1093940SRam Amrani p_ramrod->hop_limit = qp->hop_limit_ttl; 1360f1093940SRam Amrani p_ramrod->irq_num_pages = qp->irq_num_pages; 1361f1093940SRam Amrani p_ramrod->p_key = cpu_to_le16(qp->pkey); 1362f1093940SRam Amrani p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1363f1093940SRam Amrani p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 1364f1093940SRam Amrani p_ramrod->mtu = cpu_to_le16(qp->mtu); 1365f1093940SRam Amrani p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); 1366f1093940SRam Amrani p_ramrod->pd = cpu_to_le16(qp->pd); 1367f1093940SRam Amrani p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); 1368f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 1369f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 1370f1093940SRam Amrani qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1371f1093940SRam Amrani p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 1372f1093940SRam Amrani p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1373f1093940SRam Amrani p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1374f1093940SRam Amrani p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1375f1093940SRam Amrani p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | 1376f1093940SRam Amrani qp->rq_cq_id); 1377f1093940SRam Amrani 1378b5a9ee7cSAriel Elior regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 1379f1093940SRam Amrani 1380be086e7cSMintz, Yuval p_ramrod->regular_latency_phy_queue = 1381be086e7cSMintz, Yuval cpu_to_le16(regular_latency_queue); 1382be086e7cSMintz, Yuval p_ramrod->low_latency_phy_queue = 1383be086e7cSMintz, Yuval cpu_to_le16(regular_latency_queue); 1384be086e7cSMintz, Yuval 1385f1093940SRam Amrani p_ramrod->dpi = cpu_to_le16(qp->dpi); 1386f1093940SRam Amrani 1387f1093940SRam Amrani qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1388f1093940SRam Amrani qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 1389f1093940SRam Amrani 1390f1093940SRam Amrani p_ramrod->udp_src_port = qp->udp_src_port; 1391f1093940SRam Amrani p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 1392f1093940SRam Amrani p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); 1393f1093940SRam Amrani p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); 1394f1093940SRam Amrani 1395f1093940SRam Amrani p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 1396f1093940SRam Amrani qp->stats_queue; 1397f1093940SRam Amrani 1398f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1399f1093940SRam Amrani 1400be086e7cSMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1401be086e7cSMintz, Yuval "rc = %d regular physical queue = 0x%x\n", rc, 1402be086e7cSMintz, Yuval regular_latency_queue); 1403f1093940SRam Amrani 1404f1093940SRam Amrani if (rc) 1405f1093940SRam Amrani goto err; 1406f1093940SRam Amrani 1407f1093940SRam Amrani qp->resp_offloaded = true; 1408be086e7cSMintz, Yuval qp->cq_prod = 0; 1409be086e7cSMintz, Yuval 1410be086e7cSMintz, Yuval proto = p_hwfn->p_rdma_info->proto; 1411be086e7cSMintz, Yuval qed_roce_set_real_cid(p_hwfn, qp->icid - 1412be086e7cSMintz, Yuval qed_cxt_get_proto_cid_start(p_hwfn, proto)); 1413f1093940SRam Amrani 1414f1093940SRam Amrani return rc; 1415f1093940SRam Amrani 1416f1093940SRam Amrani err: 1417f1093940SRam Amrani DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); 1418f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1419f1093940SRam Amrani qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 1420f1093940SRam Amrani qp->irq, qp->irq_phys_addr); 1421f1093940SRam Amrani 1422f1093940SRam Amrani return rc; 1423f1093940SRam Amrani } 1424f1093940SRam Amrani 1425f1093940SRam Amrani static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, 1426f1093940SRam Amrani struct qed_rdma_qp *qp) 1427f1093940SRam Amrani { 1428f1093940SRam Amrani struct roce_create_qp_req_ramrod_data *p_ramrod; 1429f1093940SRam Amrani struct qed_sp_init_data init_data; 1430f1093940SRam Amrani enum roce_flavor roce_flavor; 1431f1093940SRam Amrani struct qed_spq_entry *p_ent; 1432be086e7cSMintz, Yuval u16 regular_latency_queue; 1433be086e7cSMintz, Yuval enum protocol_type proto; 1434f1093940SRam Amrani int rc; 1435f1093940SRam Amrani 1436f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1437f1093940SRam Amrani 1438f1093940SRam Amrani /* Allocate DMA-able memory for ORQ */ 1439f1093940SRam Amrani qp->orq_num_pages = 1; 1440f1093940SRam Amrani qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1441f1093940SRam Amrani RDMA_RING_PAGE_SIZE, 1442f1093940SRam Amrani &qp->orq_phys_addr, GFP_KERNEL); 1443f1093940SRam Amrani if (!qp->orq) { 1444f1093940SRam Amrani rc = -ENOMEM; 1445f1093940SRam Amrani DP_NOTICE(p_hwfn, 1446f1093940SRam Amrani "qed create requester failed: cannot allocate memory (orq). rc = %d\n", 1447f1093940SRam Amrani rc); 1448f1093940SRam Amrani return rc; 1449f1093940SRam Amrani } 1450f1093940SRam Amrani 1451f1093940SRam Amrani /* Get SPQ entry */ 1452f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1453f1093940SRam Amrani init_data.cid = qp->icid + 1; 1454f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1455f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1456f1093940SRam Amrani 1457f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1458f1093940SRam Amrani ROCE_RAMROD_CREATE_QP, 1459f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1460f1093940SRam Amrani if (rc) 1461f1093940SRam Amrani goto err; 1462f1093940SRam Amrani 1463f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_create_qp_req; 1464f1093940SRam Amrani 1465f1093940SRam Amrani p_ramrod->flags = 0; 1466f1093940SRam Amrani 1467f1093940SRam Amrani roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); 1468f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1469f1093940SRam Amrani ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); 1470f1093940SRam Amrani 1471f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1472f1093940SRam Amrani ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 1473f1093940SRam Amrani qp->fmr_and_reserved_lkey); 1474f1093940SRam Amrani 1475f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1476f1093940SRam Amrani ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); 1477f1093940SRam Amrani 1478f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1479f1093940SRam Amrani ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 1480f1093940SRam Amrani 1481f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1482f1093940SRam Amrani ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 1483f1093940SRam Amrani qp->rnr_retry_cnt); 1484f1093940SRam Amrani 1485f1093940SRam Amrani p_ramrod->max_ord = qp->max_rd_atomic_req; 1486f1093940SRam Amrani p_ramrod->traffic_class = qp->traffic_class_tos; 1487f1093940SRam Amrani p_ramrod->hop_limit = qp->hop_limit_ttl; 1488f1093940SRam Amrani p_ramrod->orq_num_pages = qp->orq_num_pages; 1489f1093940SRam Amrani p_ramrod->p_key = cpu_to_le16(qp->pkey); 1490f1093940SRam Amrani p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1491f1093940SRam Amrani p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); 1492f1093940SRam Amrani p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 1493f1093940SRam Amrani p_ramrod->mtu = cpu_to_le16(qp->mtu); 1494f1093940SRam Amrani p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); 1495f1093940SRam Amrani p_ramrod->pd = cpu_to_le16(qp->pd); 1496f1093940SRam Amrani p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); 1497f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 1498f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 1499f1093940SRam Amrani qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1500f1093940SRam Amrani p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); 1501f1093940SRam Amrani p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); 1502f1093940SRam Amrani p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); 1503f1093940SRam Amrani p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); 1504be086e7cSMintz, Yuval p_ramrod->cq_cid = 1505be086e7cSMintz, Yuval cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); 1506f1093940SRam Amrani 1507b5a9ee7cSAriel Elior regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 1508f1093940SRam Amrani 1509be086e7cSMintz, Yuval p_ramrod->regular_latency_phy_queue = 1510be086e7cSMintz, Yuval cpu_to_le16(regular_latency_queue); 1511be086e7cSMintz, Yuval p_ramrod->low_latency_phy_queue = 1512be086e7cSMintz, Yuval cpu_to_le16(regular_latency_queue); 1513be086e7cSMintz, Yuval 1514f1093940SRam Amrani p_ramrod->dpi = cpu_to_le16(qp->dpi); 1515f1093940SRam Amrani 1516f1093940SRam Amrani qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 1517f1093940SRam Amrani qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 1518f1093940SRam Amrani 1519f1093940SRam Amrani p_ramrod->udp_src_port = qp->udp_src_port; 1520f1093940SRam Amrani p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); 1521f1093940SRam Amrani p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + 1522f1093940SRam Amrani qp->stats_queue; 1523f1093940SRam Amrani 1524f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1525f1093940SRam Amrani 1526f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 1527f1093940SRam Amrani 1528f1093940SRam Amrani if (rc) 1529f1093940SRam Amrani goto err; 1530f1093940SRam Amrani 1531f1093940SRam Amrani qp->req_offloaded = true; 1532be086e7cSMintz, Yuval proto = p_hwfn->p_rdma_info->proto; 1533be086e7cSMintz, Yuval qed_roce_set_real_cid(p_hwfn, 1534be086e7cSMintz, Yuval qp->icid + 1 - 1535be086e7cSMintz, Yuval qed_cxt_get_proto_cid_start(p_hwfn, proto)); 1536f1093940SRam Amrani 1537f1093940SRam Amrani return rc; 1538f1093940SRam Amrani 1539f1093940SRam Amrani err: 1540f1093940SRam Amrani DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); 1541f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1542f1093940SRam Amrani qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 1543f1093940SRam Amrani qp->orq, qp->orq_phys_addr); 1544f1093940SRam Amrani return rc; 1545f1093940SRam Amrani } 1546f1093940SRam Amrani 1547f1093940SRam Amrani static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, 1548f1093940SRam Amrani struct qed_rdma_qp *qp, 1549f1093940SRam Amrani bool move_to_err, u32 modify_flags) 1550f1093940SRam Amrani { 1551f1093940SRam Amrani struct roce_modify_qp_resp_ramrod_data *p_ramrod; 1552f1093940SRam Amrani struct qed_sp_init_data init_data; 1553f1093940SRam Amrani struct qed_spq_entry *p_ent; 1554f1093940SRam Amrani int rc; 1555f1093940SRam Amrani 1556f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1557f1093940SRam Amrani 1558f1093940SRam Amrani if (move_to_err && !qp->resp_offloaded) 1559f1093940SRam Amrani return 0; 1560f1093940SRam Amrani 1561f1093940SRam Amrani /* Get SPQ entry */ 1562f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1563f1093940SRam Amrani init_data.cid = qp->icid; 1564f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1565f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1566f1093940SRam Amrani 1567f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1568f1093940SRam Amrani ROCE_EVENT_MODIFY_QP, 1569f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1570f1093940SRam Amrani if (rc) { 1571f1093940SRam Amrani DP_NOTICE(p_hwfn, "rc = %d\n", rc); 1572f1093940SRam Amrani return rc; 1573f1093940SRam Amrani } 1574f1093940SRam Amrani 1575f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 1576f1093940SRam Amrani 1577f1093940SRam Amrani p_ramrod->flags = 0; 1578f1093940SRam Amrani 1579f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1580f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 1581f1093940SRam Amrani 1582f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1583f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 1584f1093940SRam Amrani qp->incoming_rdma_read_en); 1585f1093940SRam Amrani 1586f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1587f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 1588f1093940SRam Amrani qp->incoming_rdma_write_en); 1589f1093940SRam Amrani 1590f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1591f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 1592f1093940SRam Amrani qp->incoming_atomic_en); 1593f1093940SRam Amrani 1594f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1595f1093940SRam Amrani ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 1596f1093940SRam Amrani qp->e2e_flow_control_en); 1597f1093940SRam Amrani 1598f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1599f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 1600f1093940SRam Amrani GET_FIELD(modify_flags, 1601f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 1602f1093940SRam Amrani 1603f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1604f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 1605f1093940SRam Amrani GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 1606f1093940SRam Amrani 1607f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1608f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 1609f1093940SRam Amrani GET_FIELD(modify_flags, 1610f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 1611f1093940SRam Amrani 1612f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1613f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 1614f1093940SRam Amrani GET_FIELD(modify_flags, 1615f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 1616f1093940SRam Amrani 1617f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1618f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 1619f1093940SRam Amrani GET_FIELD(modify_flags, 1620f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 1621f1093940SRam Amrani 1622f1093940SRam Amrani p_ramrod->fields = 0; 1623f1093940SRam Amrani SET_FIELD(p_ramrod->fields, 1624f1093940SRam Amrani ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 1625f1093940SRam Amrani qp->min_rnr_nak_timer); 1626f1093940SRam Amrani 1627f1093940SRam Amrani p_ramrod->max_ird = qp->max_rd_atomic_resp; 1628f1093940SRam Amrani p_ramrod->traffic_class = qp->traffic_class_tos; 1629f1093940SRam Amrani p_ramrod->hop_limit = qp->hop_limit_ttl; 1630f1093940SRam Amrani p_ramrod->p_key = cpu_to_le16(qp->pkey); 1631f1093940SRam Amrani p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1632f1093940SRam Amrani p_ramrod->mtu = cpu_to_le16(qp->mtu); 1633f1093940SRam Amrani qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1634f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1635f1093940SRam Amrani 1636f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); 1637f1093940SRam Amrani return rc; 1638f1093940SRam Amrani } 1639f1093940SRam Amrani 1640f1093940SRam Amrani static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, 1641f1093940SRam Amrani struct qed_rdma_qp *qp, 1642f1093940SRam Amrani bool move_to_sqd, 1643f1093940SRam Amrani bool move_to_err, u32 modify_flags) 1644f1093940SRam Amrani { 1645f1093940SRam Amrani struct roce_modify_qp_req_ramrod_data *p_ramrod; 1646f1093940SRam Amrani struct qed_sp_init_data init_data; 1647f1093940SRam Amrani struct qed_spq_entry *p_ent; 1648f1093940SRam Amrani int rc; 1649f1093940SRam Amrani 1650f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1651f1093940SRam Amrani 1652f1093940SRam Amrani if (move_to_err && !(qp->req_offloaded)) 1653f1093940SRam Amrani return 0; 1654f1093940SRam Amrani 1655f1093940SRam Amrani /* Get SPQ entry */ 1656f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1657f1093940SRam Amrani init_data.cid = qp->icid + 1; 1658f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1659f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1660f1093940SRam Amrani 1661f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1662f1093940SRam Amrani ROCE_EVENT_MODIFY_QP, 1663f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1664f1093940SRam Amrani if (rc) { 1665f1093940SRam Amrani DP_NOTICE(p_hwfn, "rc = %d\n", rc); 1666f1093940SRam Amrani return rc; 1667f1093940SRam Amrani } 1668f1093940SRam Amrani 1669f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 1670f1093940SRam Amrani 1671f1093940SRam Amrani p_ramrod->flags = 0; 1672f1093940SRam Amrani 1673f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1674f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); 1675f1093940SRam Amrani 1676f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1677f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); 1678f1093940SRam Amrani 1679f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1680f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 1681f1093940SRam Amrani qp->sqd_async); 1682f1093940SRam Amrani 1683f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1684f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 1685f1093940SRam Amrani GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); 1686f1093940SRam Amrani 1687f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1688f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 1689f1093940SRam Amrani GET_FIELD(modify_flags, 1690f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 1691f1093940SRam Amrani 1692f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1693f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 1694f1093940SRam Amrani GET_FIELD(modify_flags, 1695f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 1696f1093940SRam Amrani 1697f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1698f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 1699f1093940SRam Amrani GET_FIELD(modify_flags, 1700f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 1701f1093940SRam Amrani 1702f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1703f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 1704f1093940SRam Amrani GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 1705f1093940SRam Amrani 1706f1093940SRam Amrani SET_FIELD(p_ramrod->flags, 1707f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 1708f1093940SRam Amrani GET_FIELD(modify_flags, 1709f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 1710f1093940SRam Amrani 1711f1093940SRam Amrani p_ramrod->fields = 0; 1712f1093940SRam Amrani SET_FIELD(p_ramrod->fields, 1713f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); 1714f1093940SRam Amrani 1715f1093940SRam Amrani SET_FIELD(p_ramrod->fields, 1716f1093940SRam Amrani ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 1717f1093940SRam Amrani qp->rnr_retry_cnt); 1718f1093940SRam Amrani 1719f1093940SRam Amrani p_ramrod->max_ord = qp->max_rd_atomic_req; 1720f1093940SRam Amrani p_ramrod->traffic_class = qp->traffic_class_tos; 1721f1093940SRam Amrani p_ramrod->hop_limit = qp->hop_limit_ttl; 1722f1093940SRam Amrani p_ramrod->p_key = cpu_to_le16(qp->pkey); 1723f1093940SRam Amrani p_ramrod->flow_label = cpu_to_le32(qp->flow_label); 1724f1093940SRam Amrani p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); 1725f1093940SRam Amrani p_ramrod->mtu = cpu_to_le16(qp->mtu); 1726f1093940SRam Amrani qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 1727f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1728f1093940SRam Amrani 1729f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); 1730f1093940SRam Amrani return rc; 1731f1093940SRam Amrani } 1732f1093940SRam Amrani 1733f1093940SRam Amrani static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, 1734f1093940SRam Amrani struct qed_rdma_qp *qp, 1735be086e7cSMintz, Yuval u32 *num_invalidated_mw, 1736be086e7cSMintz, Yuval u32 *cq_prod) 1737f1093940SRam Amrani { 1738f1093940SRam Amrani struct roce_destroy_qp_resp_output_params *p_ramrod_res; 1739f1093940SRam Amrani struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 1740f1093940SRam Amrani struct qed_sp_init_data init_data; 1741f1093940SRam Amrani struct qed_spq_entry *p_ent; 1742f1093940SRam Amrani dma_addr_t ramrod_res_phys; 1743f1093940SRam Amrani int rc; 1744f1093940SRam Amrani 1745f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1746f1093940SRam Amrani 1747be086e7cSMintz, Yuval *num_invalidated_mw = 0; 1748be086e7cSMintz, Yuval *cq_prod = qp->cq_prod; 1749be086e7cSMintz, Yuval 1750be086e7cSMintz, Yuval if (!qp->resp_offloaded) { 1751be086e7cSMintz, Yuval /* If a responder was never offload, we need to free the cids 1752be086e7cSMintz, Yuval * allocated in create_qp as a FW async event will never arrive 1753be086e7cSMintz, Yuval */ 1754be086e7cSMintz, Yuval u32 cid; 1755be086e7cSMintz, Yuval 1756be086e7cSMintz, Yuval cid = qp->icid - 1757be086e7cSMintz, Yuval qed_cxt_get_proto_cid_start(p_hwfn, 1758be086e7cSMintz, Yuval p_hwfn->p_rdma_info->proto); 1759be086e7cSMintz, Yuval qed_roce_free_cid_pair(p_hwfn, (u16)cid); 1760be086e7cSMintz, Yuval 1761f1093940SRam Amrani return 0; 1762be086e7cSMintz, Yuval } 1763f1093940SRam Amrani 1764f1093940SRam Amrani /* Get SPQ entry */ 1765f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1766f1093940SRam Amrani init_data.cid = qp->icid; 1767f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1768f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1769f1093940SRam Amrani 1770f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 1771f1093940SRam Amrani ROCE_RAMROD_DESTROY_QP, 1772f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1773f1093940SRam Amrani if (rc) 1774f1093940SRam Amrani return rc; 1775f1093940SRam Amrani 1776f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 1777f1093940SRam Amrani 1778f1093940SRam Amrani p_ramrod_res = (struct roce_destroy_qp_resp_output_params *) 1779f1093940SRam Amrani dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 1780f1093940SRam Amrani &ramrod_res_phys, GFP_KERNEL); 1781f1093940SRam Amrani 1782f1093940SRam Amrani if (!p_ramrod_res) { 1783f1093940SRam Amrani rc = -ENOMEM; 1784f1093940SRam Amrani DP_NOTICE(p_hwfn, 1785f1093940SRam Amrani "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 1786f1093940SRam Amrani rc); 1787f1093940SRam Amrani return rc; 1788f1093940SRam Amrani } 1789f1093940SRam Amrani 1790f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1791f1093940SRam Amrani 1792f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1793f1093940SRam Amrani if (rc) 1794f1093940SRam Amrani goto err; 1795f1093940SRam Amrani 1796f1093940SRam Amrani *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); 1797be086e7cSMintz, Yuval *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); 1798be086e7cSMintz, Yuval qp->cq_prod = *cq_prod; 1799f1093940SRam Amrani 1800f1093940SRam Amrani /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 1801f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1802f1093940SRam Amrani qp->irq_num_pages * RDMA_RING_PAGE_SIZE, 1803f1093940SRam Amrani qp->irq, qp->irq_phys_addr); 1804f1093940SRam Amrani 1805f1093940SRam Amrani qp->resp_offloaded = false; 1806f1093940SRam Amrani 1807f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 1808f1093940SRam Amrani 1809f1093940SRam Amrani err: 1810f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1811f1093940SRam Amrani sizeof(struct roce_destroy_qp_resp_output_params), 1812f1093940SRam Amrani p_ramrod_res, ramrod_res_phys); 1813f1093940SRam Amrani 1814f1093940SRam Amrani return rc; 1815f1093940SRam Amrani } 1816f1093940SRam Amrani 1817f1093940SRam Amrani static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, 1818f1093940SRam Amrani struct qed_rdma_qp *qp, 1819f1093940SRam Amrani u32 *num_bound_mw) 1820f1093940SRam Amrani { 1821f1093940SRam Amrani struct roce_destroy_qp_req_output_params *p_ramrod_res; 1822f1093940SRam Amrani struct roce_destroy_qp_req_ramrod_data *p_ramrod; 1823f1093940SRam Amrani struct qed_sp_init_data init_data; 1824f1093940SRam Amrani struct qed_spq_entry *p_ent; 1825f1093940SRam Amrani dma_addr_t ramrod_res_phys; 1826f1093940SRam Amrani int rc = -ENOMEM; 1827f1093940SRam Amrani 1828f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 1829f1093940SRam Amrani 1830f1093940SRam Amrani if (!qp->req_offloaded) 1831f1093940SRam Amrani return 0; 1832f1093940SRam Amrani 1833f1093940SRam Amrani p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 1834f1093940SRam Amrani dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1835f1093940SRam Amrani sizeof(*p_ramrod_res), 1836f1093940SRam Amrani &ramrod_res_phys, GFP_KERNEL); 1837f1093940SRam Amrani if (!p_ramrod_res) { 1838f1093940SRam Amrani DP_NOTICE(p_hwfn, 1839f1093940SRam Amrani "qed destroy requester failed: cannot allocate memory (ramrod)\n"); 1840f1093940SRam Amrani return rc; 1841f1093940SRam Amrani } 1842f1093940SRam Amrani 1843f1093940SRam Amrani /* Get SPQ entry */ 1844f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1845f1093940SRam Amrani init_data.cid = qp->icid + 1; 1846f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1847f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1848f1093940SRam Amrani 1849f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 1850f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1851f1093940SRam Amrani if (rc) 1852f1093940SRam Amrani goto err; 1853f1093940SRam Amrani 1854f1093940SRam Amrani p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 1855f1093940SRam Amrani DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1856f1093940SRam Amrani 1857f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1858f1093940SRam Amrani if (rc) 1859f1093940SRam Amrani goto err; 1860f1093940SRam Amrani 1861f1093940SRam Amrani *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); 1862f1093940SRam Amrani 1863f1093940SRam Amrani /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 1864f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1865f1093940SRam Amrani qp->orq_num_pages * RDMA_RING_PAGE_SIZE, 1866f1093940SRam Amrani qp->orq, qp->orq_phys_addr); 1867f1093940SRam Amrani 1868f1093940SRam Amrani qp->req_offloaded = false; 1869f1093940SRam Amrani 1870f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 1871f1093940SRam Amrani 1872f1093940SRam Amrani err: 1873f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), 1874f1093940SRam Amrani p_ramrod_res, ramrod_res_phys); 1875f1093940SRam Amrani 1876f1093940SRam Amrani return rc; 1877f1093940SRam Amrani } 1878f1093940SRam Amrani 18798c93beafSYuval Mintz static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, 1880f1093940SRam Amrani struct qed_rdma_qp *qp, 1881f1093940SRam Amrani struct qed_rdma_query_qp_out_params *out_params) 1882f1093940SRam Amrani { 1883f1093940SRam Amrani struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 1884f1093940SRam Amrani struct roce_query_qp_req_output_params *p_req_ramrod_res; 1885f1093940SRam Amrani struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 1886f1093940SRam Amrani struct roce_query_qp_req_ramrod_data *p_req_ramrod; 1887f1093940SRam Amrani struct qed_sp_init_data init_data; 1888f1093940SRam Amrani dma_addr_t resp_ramrod_res_phys; 1889f1093940SRam Amrani dma_addr_t req_ramrod_res_phys; 1890f1093940SRam Amrani struct qed_spq_entry *p_ent; 1891f1093940SRam Amrani bool rq_err_state; 1892f1093940SRam Amrani bool sq_err_state; 1893f1093940SRam Amrani bool sq_draining; 1894f1093940SRam Amrani int rc = -ENOMEM; 1895f1093940SRam Amrani 1896f1093940SRam Amrani if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { 1897f1093940SRam Amrani /* We can't send ramrod to the fw since this qp wasn't offloaded 1898f1093940SRam Amrani * to the fw yet 1899f1093940SRam Amrani */ 1900f1093940SRam Amrani out_params->draining = false; 1901f1093940SRam Amrani out_params->rq_psn = qp->rq_psn; 1902f1093940SRam Amrani out_params->sq_psn = qp->sq_psn; 1903f1093940SRam Amrani out_params->state = qp->cur_state; 1904f1093940SRam Amrani 1905f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); 1906f1093940SRam Amrani return 0; 1907f1093940SRam Amrani } 1908f1093940SRam Amrani 1909f1093940SRam Amrani if (!(qp->resp_offloaded)) { 1910f1093940SRam Amrani DP_NOTICE(p_hwfn, 1911f1093940SRam Amrani "The responder's qp should be offloded before requester's\n"); 1912f1093940SRam Amrani return -EINVAL; 1913f1093940SRam Amrani } 1914f1093940SRam Amrani 1915f1093940SRam Amrani /* Send a query responder ramrod to FW to get RQ-PSN and state */ 1916f1093940SRam Amrani p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *) 1917f1093940SRam Amrani dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1918f1093940SRam Amrani sizeof(*p_resp_ramrod_res), 1919f1093940SRam Amrani &resp_ramrod_res_phys, GFP_KERNEL); 1920f1093940SRam Amrani if (!p_resp_ramrod_res) { 1921f1093940SRam Amrani DP_NOTICE(p_hwfn, 1922f1093940SRam Amrani "qed query qp failed: cannot allocate memory (ramrod)\n"); 1923f1093940SRam Amrani return rc; 1924f1093940SRam Amrani } 1925f1093940SRam Amrani 1926f1093940SRam Amrani /* Get SPQ entry */ 1927f1093940SRam Amrani memset(&init_data, 0, sizeof(init_data)); 1928f1093940SRam Amrani init_data.cid = qp->icid; 1929f1093940SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1930f1093940SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1931f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1932f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1933f1093940SRam Amrani if (rc) 1934f1093940SRam Amrani goto err_resp; 1935f1093940SRam Amrani 1936f1093940SRam Amrani p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 1937f1093940SRam Amrani DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 1938f1093940SRam Amrani 1939f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1940f1093940SRam Amrani if (rc) 1941f1093940SRam Amrani goto err_resp; 1942f1093940SRam Amrani 1943f1093940SRam Amrani out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); 1944f1093940SRam Amrani rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), 1945f1093940SRam Amrani ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 1946f1093940SRam Amrani 1947c5212b94SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 1948c5212b94SRam Amrani p_resp_ramrod_res, resp_ramrod_res_phys); 1949c5212b94SRam Amrani 1950f1093940SRam Amrani if (!(qp->req_offloaded)) { 1951f1093940SRam Amrani /* Don't send query qp for the requester */ 1952f1093940SRam Amrani out_params->sq_psn = qp->sq_psn; 1953f1093940SRam Amrani out_params->draining = false; 1954f1093940SRam Amrani 1955f1093940SRam Amrani if (rq_err_state) 1956f1093940SRam Amrani qp->cur_state = QED_ROCE_QP_STATE_ERR; 1957f1093940SRam Amrani 1958f1093940SRam Amrani out_params->state = qp->cur_state; 1959f1093940SRam Amrani 1960f1093940SRam Amrani return 0; 1961f1093940SRam Amrani } 1962f1093940SRam Amrani 1963f1093940SRam Amrani /* Send a query requester ramrod to FW to get SQ-PSN and state */ 1964f1093940SRam Amrani p_req_ramrod_res = (struct roce_query_qp_req_output_params *) 1965f1093940SRam Amrani dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1966f1093940SRam Amrani sizeof(*p_req_ramrod_res), 1967f1093940SRam Amrani &req_ramrod_res_phys, 1968f1093940SRam Amrani GFP_KERNEL); 1969f1093940SRam Amrani if (!p_req_ramrod_res) { 1970f1093940SRam Amrani rc = -ENOMEM; 1971f1093940SRam Amrani DP_NOTICE(p_hwfn, 1972f1093940SRam Amrani "qed query qp failed: cannot allocate memory (ramrod)\n"); 1973f1093940SRam Amrani return rc; 1974f1093940SRam Amrani } 1975f1093940SRam Amrani 1976f1093940SRam Amrani /* Get SPQ entry */ 1977f1093940SRam Amrani init_data.cid = qp->icid + 1; 1978f1093940SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1979f1093940SRam Amrani PROTOCOLID_ROCE, &init_data); 1980f1093940SRam Amrani if (rc) 1981f1093940SRam Amrani goto err_req; 1982f1093940SRam Amrani 1983f1093940SRam Amrani p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 1984f1093940SRam Amrani DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 1985f1093940SRam Amrani 1986f1093940SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, NULL); 1987f1093940SRam Amrani if (rc) 1988f1093940SRam Amrani goto err_req; 1989f1093940SRam Amrani 1990f1093940SRam Amrani out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); 1991f1093940SRam Amrani sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 1992f1093940SRam Amrani ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 1993f1093940SRam Amrani sq_draining = 1994f1093940SRam Amrani GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), 1995f1093940SRam Amrani ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 1996f1093940SRam Amrani 1997c5212b94SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 1998c5212b94SRam Amrani p_req_ramrod_res, req_ramrod_res_phys); 1999c5212b94SRam Amrani 2000f1093940SRam Amrani out_params->draining = false; 2001f1093940SRam Amrani 2002be086e7cSMintz, Yuval if (rq_err_state || sq_err_state) 2003f1093940SRam Amrani qp->cur_state = QED_ROCE_QP_STATE_ERR; 2004f1093940SRam Amrani else if (sq_draining) 2005f1093940SRam Amrani out_params->draining = true; 2006f1093940SRam Amrani out_params->state = qp->cur_state; 2007f1093940SRam Amrani 2008f1093940SRam Amrani return 0; 2009f1093940SRam Amrani 2010f1093940SRam Amrani err_req: 2011f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), 2012f1093940SRam Amrani p_req_ramrod_res, req_ramrod_res_phys); 2013f1093940SRam Amrani return rc; 2014f1093940SRam Amrani err_resp: 2015f1093940SRam Amrani dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), 2016f1093940SRam Amrani p_resp_ramrod_res, resp_ramrod_res_phys); 2017f1093940SRam Amrani return rc; 2018f1093940SRam Amrani } 2019f1093940SRam Amrani 20208c93beafSYuval Mintz static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) 2021f1093940SRam Amrani { 2022f1093940SRam Amrani u32 num_invalidated_mw = 0; 2023f1093940SRam Amrani u32 num_bound_mw = 0; 2024be086e7cSMintz, Yuval u32 cq_prod; 2025f1093940SRam Amrani int rc; 2026f1093940SRam Amrani 2027f1093940SRam Amrani /* Destroys the specified QP */ 2028f1093940SRam Amrani if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && 2029f1093940SRam Amrani (qp->cur_state != QED_ROCE_QP_STATE_ERR) && 2030f1093940SRam Amrani (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { 2031f1093940SRam Amrani DP_NOTICE(p_hwfn, 2032f1093940SRam Amrani "QP must be in error, reset or init state before destroying it\n"); 2033f1093940SRam Amrani return -EINVAL; 2034f1093940SRam Amrani } 2035f1093940SRam Amrani 2036300c0d7cSRam Amrani if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { 2037300c0d7cSRam Amrani rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, 2038be086e7cSMintz, Yuval &num_invalidated_mw, 2039be086e7cSMintz, Yuval &cq_prod); 2040f1093940SRam Amrani if (rc) 2041f1093940SRam Amrani return rc; 2042f1093940SRam Amrani 2043f1093940SRam Amrani /* Send destroy requester ramrod */ 2044300c0d7cSRam Amrani rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, 2045300c0d7cSRam Amrani &num_bound_mw); 2046f1093940SRam Amrani if (rc) 2047f1093940SRam Amrani return rc; 2048f1093940SRam Amrani 2049f1093940SRam Amrani if (num_invalidated_mw != num_bound_mw) { 2050f1093940SRam Amrani DP_NOTICE(p_hwfn, 2051f1093940SRam Amrani "number of invalidate memory windows is different from bounded ones\n"); 2052f1093940SRam Amrani return -EINVAL; 2053f1093940SRam Amrani } 2054300c0d7cSRam Amrani } 2055f1093940SRam Amrani 2056f1093940SRam Amrani return 0; 2057f1093940SRam Amrani } 2058f1093940SRam Amrani 20590189efb8SYuval Mintz static int qed_rdma_query_qp(void *rdma_cxt, 2060f1093940SRam Amrani struct qed_rdma_qp *qp, 2061f1093940SRam Amrani struct qed_rdma_query_qp_out_params *out_params) 2062f1093940SRam Amrani { 2063f1093940SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2064f1093940SRam Amrani int rc; 2065f1093940SRam Amrani 2066f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 2067f1093940SRam Amrani 2068f1093940SRam Amrani /* The following fields are filled in from qp and not FW as they can't 2069f1093940SRam Amrani * be modified by FW 2070f1093940SRam Amrani */ 2071f1093940SRam Amrani out_params->mtu = qp->mtu; 2072f1093940SRam Amrani out_params->dest_qp = qp->dest_qp; 2073f1093940SRam Amrani out_params->incoming_atomic_en = qp->incoming_atomic_en; 2074f1093940SRam Amrani out_params->e2e_flow_control_en = qp->e2e_flow_control_en; 2075f1093940SRam Amrani out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; 2076f1093940SRam Amrani out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; 2077f1093940SRam Amrani out_params->dgid = qp->dgid; 2078f1093940SRam Amrani out_params->flow_label = qp->flow_label; 2079f1093940SRam Amrani out_params->hop_limit_ttl = qp->hop_limit_ttl; 2080f1093940SRam Amrani out_params->traffic_class_tos = qp->traffic_class_tos; 2081f1093940SRam Amrani out_params->timeout = qp->ack_timeout; 2082f1093940SRam Amrani out_params->rnr_retry = qp->rnr_retry_cnt; 2083f1093940SRam Amrani out_params->retry_cnt = qp->retry_cnt; 2084f1093940SRam Amrani out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; 2085f1093940SRam Amrani out_params->pkey_index = 0; 2086f1093940SRam Amrani out_params->max_rd_atomic = qp->max_rd_atomic_req; 2087f1093940SRam Amrani out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; 2088f1093940SRam Amrani out_params->sqd_async = qp->sqd_async; 2089f1093940SRam Amrani 2090f1093940SRam Amrani rc = qed_roce_query_qp(p_hwfn, qp, out_params); 2091f1093940SRam Amrani 2092f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); 2093f1093940SRam Amrani return rc; 2094f1093940SRam Amrani } 2095f1093940SRam Amrani 20960189efb8SYuval Mintz static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 2097f1093940SRam Amrani { 2098f1093940SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2099f1093940SRam Amrani int rc = 0; 2100f1093940SRam Amrani 2101f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); 2102f1093940SRam Amrani 2103f1093940SRam Amrani rc = qed_roce_destroy_qp(p_hwfn, qp); 2104f1093940SRam Amrani 2105f1093940SRam Amrani /* free qp params struct */ 2106f1093940SRam Amrani kfree(qp); 2107f1093940SRam Amrani 2108f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); 2109f1093940SRam Amrani return rc; 2110f1093940SRam Amrani } 2111f1093940SRam Amrani 21128c93beafSYuval Mintz static struct qed_rdma_qp * 2113f1093940SRam Amrani qed_rdma_create_qp(void *rdma_cxt, 2114f1093940SRam Amrani struct qed_rdma_create_qp_in_params *in_params, 2115f1093940SRam Amrani struct qed_rdma_create_qp_out_params *out_params) 2116f1093940SRam Amrani { 2117f1093940SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2118f1093940SRam Amrani struct qed_rdma_qp *qp; 2119f1093940SRam Amrani u8 max_stats_queues; 2120f1093940SRam Amrani int rc; 2121f1093940SRam Amrani 2122f1093940SRam Amrani if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { 2123f1093940SRam Amrani DP_ERR(p_hwfn->cdev, 2124f1093940SRam Amrani "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", 2125f1093940SRam Amrani rdma_cxt, in_params, out_params); 2126f1093940SRam Amrani return NULL; 2127f1093940SRam Amrani } 2128f1093940SRam Amrani 2129f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2130f1093940SRam Amrani "qed rdma create qp called with qp_handle = %08x%08x\n", 2131f1093940SRam Amrani in_params->qp_handle_hi, in_params->qp_handle_lo); 2132f1093940SRam Amrani 2133f1093940SRam Amrani /* Some sanity checks... */ 2134f1093940SRam Amrani max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; 2135f1093940SRam Amrani if (in_params->stats_queue >= max_stats_queues) { 2136f1093940SRam Amrani DP_ERR(p_hwfn->cdev, 2137f1093940SRam Amrani "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", 2138f1093940SRam Amrani in_params->stats_queue, max_stats_queues); 2139f1093940SRam Amrani return NULL; 2140f1093940SRam Amrani } 2141f1093940SRam Amrani 2142f1093940SRam Amrani qp = kzalloc(sizeof(*qp), GFP_KERNEL); 2143f1093940SRam Amrani if (!qp) { 2144f1093940SRam Amrani DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n"); 2145f1093940SRam Amrani return NULL; 2146f1093940SRam Amrani } 2147f1093940SRam Amrani 2148f1093940SRam Amrani rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); 2149f1093940SRam Amrani qp->qpid = ((0xFF << 16) | qp->icid); 2150f1093940SRam Amrani 2151f1093940SRam Amrani DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid); 2152f1093940SRam Amrani 2153f1093940SRam Amrani if (rc) { 2154f1093940SRam Amrani kfree(qp); 2155f1093940SRam Amrani return NULL; 2156f1093940SRam Amrani } 2157f1093940SRam Amrani 2158f1093940SRam Amrani qp->cur_state = QED_ROCE_QP_STATE_RESET; 2159f1093940SRam Amrani qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); 2160f1093940SRam Amrani qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); 2161f1093940SRam Amrani qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); 2162f1093940SRam Amrani qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); 2163f1093940SRam Amrani qp->use_srq = in_params->use_srq; 2164f1093940SRam Amrani qp->signal_all = in_params->signal_all; 2165f1093940SRam Amrani qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; 2166f1093940SRam Amrani qp->pd = in_params->pd; 2167f1093940SRam Amrani qp->dpi = in_params->dpi; 2168f1093940SRam Amrani qp->sq_cq_id = in_params->sq_cq_id; 2169f1093940SRam Amrani qp->sq_num_pages = in_params->sq_num_pages; 2170f1093940SRam Amrani qp->sq_pbl_ptr = in_params->sq_pbl_ptr; 2171f1093940SRam Amrani qp->rq_cq_id = in_params->rq_cq_id; 2172f1093940SRam Amrani qp->rq_num_pages = in_params->rq_num_pages; 2173f1093940SRam Amrani qp->rq_pbl_ptr = in_params->rq_pbl_ptr; 2174f1093940SRam Amrani qp->srq_id = in_params->srq_id; 2175f1093940SRam Amrani qp->req_offloaded = false; 2176f1093940SRam Amrani qp->resp_offloaded = false; 2177f1093940SRam Amrani qp->e2e_flow_control_en = qp->use_srq ? false : true; 2178f1093940SRam Amrani qp->stats_queue = in_params->stats_queue; 2179f1093940SRam Amrani 2180f1093940SRam Amrani out_params->icid = qp->icid; 2181f1093940SRam Amrani out_params->qp_id = qp->qpid; 2182f1093940SRam Amrani 2183f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); 2184f1093940SRam Amrani return qp; 2185f1093940SRam Amrani } 2186f1093940SRam Amrani 2187f1093940SRam Amrani static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, 2188f1093940SRam Amrani struct qed_rdma_qp *qp, 2189f1093940SRam Amrani enum qed_roce_qp_state prev_state, 2190f1093940SRam Amrani struct qed_rdma_modify_qp_in_params *params) 2191f1093940SRam Amrani { 2192f1093940SRam Amrani u32 num_invalidated_mw = 0, num_bound_mw = 0; 2193f1093940SRam Amrani int rc = 0; 2194f1093940SRam Amrani 2195f1093940SRam Amrani /* Perform additional operations according to the current state and the 2196f1093940SRam Amrani * next state 2197f1093940SRam Amrani */ 2198f1093940SRam Amrani if (((prev_state == QED_ROCE_QP_STATE_INIT) || 2199f1093940SRam Amrani (prev_state == QED_ROCE_QP_STATE_RESET)) && 2200f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { 2201f1093940SRam Amrani /* Init->RTR or Reset->RTR */ 2202f1093940SRam Amrani rc = qed_roce_sp_create_responder(p_hwfn, qp); 2203f1093940SRam Amrani return rc; 2204f1093940SRam Amrani } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && 2205f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2206f1093940SRam Amrani /* RTR-> RTS */ 2207f1093940SRam Amrani rc = qed_roce_sp_create_requester(p_hwfn, qp); 2208f1093940SRam Amrani if (rc) 2209f1093940SRam Amrani return rc; 2210f1093940SRam Amrani 2211f1093940SRam Amrani /* Send modify responder ramrod */ 2212f1093940SRam Amrani rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2213f1093940SRam Amrani params->modify_flags); 2214f1093940SRam Amrani return rc; 2215f1093940SRam Amrani } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 2216f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2217f1093940SRam Amrani /* RTS->RTS */ 2218f1093940SRam Amrani rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2219f1093940SRam Amrani params->modify_flags); 2220f1093940SRam Amrani if (rc) 2221f1093940SRam Amrani return rc; 2222f1093940SRam Amrani 2223f1093940SRam Amrani rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2224f1093940SRam Amrani params->modify_flags); 2225f1093940SRam Amrani return rc; 2226f1093940SRam Amrani } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && 2227f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 2228f1093940SRam Amrani /* RTS->SQD */ 2229f1093940SRam Amrani rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, 2230f1093940SRam Amrani params->modify_flags); 2231f1093940SRam Amrani return rc; 2232f1093940SRam Amrani } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 2233f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { 2234f1093940SRam Amrani /* SQD->SQD */ 2235f1093940SRam Amrani rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2236f1093940SRam Amrani params->modify_flags); 2237f1093940SRam Amrani if (rc) 2238f1093940SRam Amrani return rc; 2239f1093940SRam Amrani 2240f1093940SRam Amrani rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2241f1093940SRam Amrani params->modify_flags); 2242f1093940SRam Amrani return rc; 2243f1093940SRam Amrani } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && 2244f1093940SRam Amrani (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { 2245f1093940SRam Amrani /* SQD->RTS */ 2246f1093940SRam Amrani rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, 2247f1093940SRam Amrani params->modify_flags); 2248f1093940SRam Amrani if (rc) 2249f1093940SRam Amrani return rc; 2250f1093940SRam Amrani 2251f1093940SRam Amrani rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, 2252f1093940SRam Amrani params->modify_flags); 2253f1093940SRam Amrani 2254f1093940SRam Amrani return rc; 2255ba0154e9SRam Amrani } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { 2256f1093940SRam Amrani /* ->ERR */ 2257f1093940SRam Amrani rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, 2258f1093940SRam Amrani params->modify_flags); 2259f1093940SRam Amrani if (rc) 2260f1093940SRam Amrani return rc; 2261f1093940SRam Amrani 2262f1093940SRam Amrani rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, 2263f1093940SRam Amrani params->modify_flags); 2264f1093940SRam Amrani return rc; 2265f1093940SRam Amrani } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { 2266f1093940SRam Amrani /* Any state -> RESET */ 2267be086e7cSMintz, Yuval u32 cq_prod; 2268f1093940SRam Amrani 2269be086e7cSMintz, Yuval /* Send destroy responder ramrod */ 2270be086e7cSMintz, Yuval rc = qed_roce_sp_destroy_qp_responder(p_hwfn, 2271be086e7cSMintz, Yuval qp, 2272be086e7cSMintz, Yuval &num_invalidated_mw, 2273be086e7cSMintz, Yuval &cq_prod); 2274be086e7cSMintz, Yuval 2275f1093940SRam Amrani if (rc) 2276f1093940SRam Amrani return rc; 2277f1093940SRam Amrani 2278be086e7cSMintz, Yuval qp->cq_prod = cq_prod; 2279be086e7cSMintz, Yuval 2280f1093940SRam Amrani rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, 2281f1093940SRam Amrani &num_bound_mw); 2282f1093940SRam Amrani 2283f1093940SRam Amrani if (num_invalidated_mw != num_bound_mw) { 2284f1093940SRam Amrani DP_NOTICE(p_hwfn, 2285f1093940SRam Amrani "number of invalidate memory windows is different from bounded ones\n"); 2286f1093940SRam Amrani return -EINVAL; 2287f1093940SRam Amrani } 2288f1093940SRam Amrani } else { 2289f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); 2290f1093940SRam Amrani } 2291f1093940SRam Amrani 2292f1093940SRam Amrani return rc; 2293f1093940SRam Amrani } 2294f1093940SRam Amrani 22950189efb8SYuval Mintz static int qed_rdma_modify_qp(void *rdma_cxt, 2296f1093940SRam Amrani struct qed_rdma_qp *qp, 2297f1093940SRam Amrani struct qed_rdma_modify_qp_in_params *params) 2298f1093940SRam Amrani { 2299f1093940SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2300f1093940SRam Amrani enum qed_roce_qp_state prev_state; 2301f1093940SRam Amrani int rc = 0; 2302f1093940SRam Amrani 2303f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", 2304f1093940SRam Amrani qp->icid, params->new_state); 2305f1093940SRam Amrani 2306f1093940SRam Amrani if (rc) { 2307f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2308f1093940SRam Amrani return rc; 2309f1093940SRam Amrani } 2310f1093940SRam Amrani 2311f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2312f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { 2313f1093940SRam Amrani qp->incoming_rdma_read_en = params->incoming_rdma_read_en; 2314f1093940SRam Amrani qp->incoming_rdma_write_en = params->incoming_rdma_write_en; 2315f1093940SRam Amrani qp->incoming_atomic_en = params->incoming_atomic_en; 2316f1093940SRam Amrani } 2317f1093940SRam Amrani 2318f1093940SRam Amrani /* Update QP structure with the updated values */ 2319f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) 2320f1093940SRam Amrani qp->roce_mode = params->roce_mode; 2321f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) 2322f1093940SRam Amrani qp->pkey = params->pkey; 2323f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2324f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) 2325f1093940SRam Amrani qp->e2e_flow_control_en = params->e2e_flow_control_en; 2326f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) 2327f1093940SRam Amrani qp->dest_qp = params->dest_qp; 2328f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2329f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { 2330f1093940SRam Amrani /* Indicates that the following parameters have changed: 2331f1093940SRam Amrani * Traffic class, flow label, hop limit, source GID, 2332f1093940SRam Amrani * destination GID, loopback indicator 2333f1093940SRam Amrani */ 2334f1093940SRam Amrani qp->traffic_class_tos = params->traffic_class_tos; 2335f1093940SRam Amrani qp->flow_label = params->flow_label; 2336f1093940SRam Amrani qp->hop_limit_ttl = params->hop_limit_ttl; 2337f1093940SRam Amrani 2338f1093940SRam Amrani qp->sgid = params->sgid; 2339f1093940SRam Amrani qp->dgid = params->dgid; 2340f1093940SRam Amrani qp->udp_src_port = 0; 2341f1093940SRam Amrani qp->vlan_id = params->vlan_id; 2342f1093940SRam Amrani qp->mtu = params->mtu; 2343f1093940SRam Amrani qp->lb_indication = params->lb_indication; 2344f1093940SRam Amrani memcpy((u8 *)&qp->remote_mac_addr[0], 2345f1093940SRam Amrani (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); 2346f1093940SRam Amrani if (params->use_local_mac) { 2347f1093940SRam Amrani memcpy((u8 *)&qp->local_mac_addr[0], 2348f1093940SRam Amrani (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); 2349f1093940SRam Amrani } else { 2350f1093940SRam Amrani memcpy((u8 *)&qp->local_mac_addr[0], 2351f1093940SRam Amrani (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 2352f1093940SRam Amrani } 2353f1093940SRam Amrani } 2354f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) 2355f1093940SRam Amrani qp->rq_psn = params->rq_psn; 2356f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) 2357f1093940SRam Amrani qp->sq_psn = params->sq_psn; 2358f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2359f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) 2360f1093940SRam Amrani qp->max_rd_atomic_req = params->max_rd_atomic_req; 2361f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2362f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) 2363f1093940SRam Amrani qp->max_rd_atomic_resp = params->max_rd_atomic_resp; 2364f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2365f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) 2366f1093940SRam Amrani qp->ack_timeout = params->ack_timeout; 2367f1093940SRam Amrani if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) 2368f1093940SRam Amrani qp->retry_cnt = params->retry_cnt; 2369f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2370f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) 2371f1093940SRam Amrani qp->rnr_retry_cnt = params->rnr_retry_cnt; 2372f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2373f1093940SRam Amrani QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) 2374f1093940SRam Amrani qp->min_rnr_nak_timer = params->min_rnr_nak_timer; 2375f1093940SRam Amrani 2376f1093940SRam Amrani qp->sqd_async = params->sqd_async; 2377f1093940SRam Amrani 2378f1093940SRam Amrani prev_state = qp->cur_state; 2379f1093940SRam Amrani if (GET_FIELD(params->modify_flags, 2380f1093940SRam Amrani QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { 2381f1093940SRam Amrani qp->cur_state = params->new_state; 2382f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", 2383f1093940SRam Amrani qp->cur_state); 2384f1093940SRam Amrani } 2385f1093940SRam Amrani 2386f1093940SRam Amrani rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); 2387f1093940SRam Amrani 2388f1093940SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); 2389f1093940SRam Amrani return rc; 2390f1093940SRam Amrani } 2391f1093940SRam Amrani 23920189efb8SYuval Mintz static int 23930189efb8SYuval Mintz qed_rdma_register_tid(void *rdma_cxt, 2394ee8eaea3SRam Amrani struct qed_rdma_register_tid_in_params *params) 2395ee8eaea3SRam Amrani { 2396ee8eaea3SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2397ee8eaea3SRam Amrani struct rdma_register_tid_ramrod_data *p_ramrod; 2398ee8eaea3SRam Amrani struct qed_sp_init_data init_data; 2399ee8eaea3SRam Amrani struct qed_spq_entry *p_ent; 2400ee8eaea3SRam Amrani enum rdma_tid_type tid_type; 2401ee8eaea3SRam Amrani u8 fw_return_code; 2402ee8eaea3SRam Amrani int rc; 2403ee8eaea3SRam Amrani 2404ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); 2405ee8eaea3SRam Amrani 2406ee8eaea3SRam Amrani /* Get SPQ entry */ 2407ee8eaea3SRam Amrani memset(&init_data, 0, sizeof(init_data)); 2408ee8eaea3SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2409ee8eaea3SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2410ee8eaea3SRam Amrani 2411ee8eaea3SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, 2412ee8eaea3SRam Amrani p_hwfn->p_rdma_info->proto, &init_data); 2413ee8eaea3SRam Amrani if (rc) { 2414ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2415ee8eaea3SRam Amrani return rc; 2416ee8eaea3SRam Amrani } 2417ee8eaea3SRam Amrani 2418ee8eaea3SRam Amrani if (p_hwfn->p_rdma_info->last_tid < params->itid) 2419ee8eaea3SRam Amrani p_hwfn->p_rdma_info->last_tid = params->itid; 2420ee8eaea3SRam Amrani 2421ee8eaea3SRam Amrani p_ramrod = &p_ent->ramrod.rdma_register_tid; 2422ee8eaea3SRam Amrani 2423ee8eaea3SRam Amrani p_ramrod->flags = 0; 2424ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2425ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, 2426ee8eaea3SRam Amrani params->pbl_two_level); 2427ee8eaea3SRam Amrani 2428ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2429ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); 2430ee8eaea3SRam Amrani 2431ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2432ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); 2433ee8eaea3SRam Amrani 2434ee8eaea3SRam Amrani /* Don't initialize D/C field, as it may override other bits. */ 2435ee8eaea3SRam Amrani if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) 2436ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2437ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, 2438ee8eaea3SRam Amrani params->page_size_log - 12); 2439ee8eaea3SRam Amrani 2440ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2441ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, 2442ee8eaea3SRam Amrani params->remote_read); 2443ee8eaea3SRam Amrani 2444ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2445ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, 2446ee8eaea3SRam Amrani params->remote_write); 2447ee8eaea3SRam Amrani 2448ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2449ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, 2450ee8eaea3SRam Amrani params->remote_atomic); 2451ee8eaea3SRam Amrani 2452ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2453ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, 2454ee8eaea3SRam Amrani params->local_write); 2455ee8eaea3SRam Amrani 2456ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2457ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); 2458ee8eaea3SRam Amrani 2459ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags, 2460ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, 2461ee8eaea3SRam Amrani params->mw_bind); 2462ee8eaea3SRam Amrani 2463ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags1, 2464ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, 2465ee8eaea3SRam Amrani params->pbl_page_size_log - 12); 2466ee8eaea3SRam Amrani 2467ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags2, 2468ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); 2469ee8eaea3SRam Amrani 2470ee8eaea3SRam Amrani switch (params->tid_type) { 2471ee8eaea3SRam Amrani case QED_RDMA_TID_REGISTERED_MR: 2472ee8eaea3SRam Amrani tid_type = RDMA_TID_REGISTERED_MR; 2473ee8eaea3SRam Amrani break; 2474ee8eaea3SRam Amrani case QED_RDMA_TID_FMR: 2475ee8eaea3SRam Amrani tid_type = RDMA_TID_FMR; 2476ee8eaea3SRam Amrani break; 2477ee8eaea3SRam Amrani case QED_RDMA_TID_MW_TYPE1: 2478ee8eaea3SRam Amrani tid_type = RDMA_TID_MW_TYPE1; 2479ee8eaea3SRam Amrani break; 2480ee8eaea3SRam Amrani case QED_RDMA_TID_MW_TYPE2A: 2481ee8eaea3SRam Amrani tid_type = RDMA_TID_MW_TYPE2A; 2482ee8eaea3SRam Amrani break; 2483ee8eaea3SRam Amrani default: 2484ee8eaea3SRam Amrani rc = -EINVAL; 2485ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2486ee8eaea3SRam Amrani return rc; 2487ee8eaea3SRam Amrani } 2488ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags1, 2489ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); 2490ee8eaea3SRam Amrani 2491ee8eaea3SRam Amrani p_ramrod->itid = cpu_to_le32(params->itid); 2492ee8eaea3SRam Amrani p_ramrod->key = params->key; 2493ee8eaea3SRam Amrani p_ramrod->pd = cpu_to_le16(params->pd); 2494ee8eaea3SRam Amrani p_ramrod->length_hi = (u8)(params->length >> 32); 2495ee8eaea3SRam Amrani p_ramrod->length_lo = DMA_LO_LE(params->length); 2496ee8eaea3SRam Amrani if (params->zbva) { 2497ee8eaea3SRam Amrani /* Lower 32 bits of the registered MR address. 2498ee8eaea3SRam Amrani * In case of zero based MR, will hold FBO 2499ee8eaea3SRam Amrani */ 2500ee8eaea3SRam Amrani p_ramrod->va.hi = 0; 2501ee8eaea3SRam Amrani p_ramrod->va.lo = cpu_to_le32(params->fbo); 2502ee8eaea3SRam Amrani } else { 2503ee8eaea3SRam Amrani DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); 2504ee8eaea3SRam Amrani } 2505ee8eaea3SRam Amrani DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); 2506ee8eaea3SRam Amrani 2507ee8eaea3SRam Amrani /* DIF */ 2508ee8eaea3SRam Amrani if (params->dif_enabled) { 2509ee8eaea3SRam Amrani SET_FIELD(p_ramrod->flags2, 2510ee8eaea3SRam Amrani RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); 2511ee8eaea3SRam Amrani DMA_REGPAIR_LE(p_ramrod->dif_error_addr, 2512ee8eaea3SRam Amrani params->dif_error_addr); 2513ee8eaea3SRam Amrani DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); 2514ee8eaea3SRam Amrani } 2515ee8eaea3SRam Amrani 2516ee8eaea3SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 251710536194SRam Amrani if (rc) 251810536194SRam Amrani return rc; 2519ee8eaea3SRam Amrani 2520ee8eaea3SRam Amrani if (fw_return_code != RDMA_RETURN_OK) { 2521ee8eaea3SRam Amrani DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 2522ee8eaea3SRam Amrani return -EINVAL; 2523ee8eaea3SRam Amrani } 2524ee8eaea3SRam Amrani 2525ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); 2526ee8eaea3SRam Amrani return rc; 2527ee8eaea3SRam Amrani } 2528ee8eaea3SRam Amrani 25290189efb8SYuval Mintz static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 2530ee8eaea3SRam Amrani { 2531ee8eaea3SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2532ee8eaea3SRam Amrani struct rdma_deregister_tid_ramrod_data *p_ramrod; 2533ee8eaea3SRam Amrani struct qed_sp_init_data init_data; 2534ee8eaea3SRam Amrani struct qed_spq_entry *p_ent; 2535ee8eaea3SRam Amrani struct qed_ptt *p_ptt; 2536ee8eaea3SRam Amrani u8 fw_return_code; 2537ee8eaea3SRam Amrani int rc; 2538ee8eaea3SRam Amrani 2539ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); 2540ee8eaea3SRam Amrani 2541ee8eaea3SRam Amrani /* Get SPQ entry */ 2542ee8eaea3SRam Amrani memset(&init_data, 0, sizeof(init_data)); 2543ee8eaea3SRam Amrani init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2544ee8eaea3SRam Amrani init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2545ee8eaea3SRam Amrani 2546ee8eaea3SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, 2547ee8eaea3SRam Amrani p_hwfn->p_rdma_info->proto, &init_data); 2548ee8eaea3SRam Amrani if (rc) { 2549ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2550ee8eaea3SRam Amrani return rc; 2551ee8eaea3SRam Amrani } 2552ee8eaea3SRam Amrani 2553ee8eaea3SRam Amrani p_ramrod = &p_ent->ramrod.rdma_deregister_tid; 2554ee8eaea3SRam Amrani p_ramrod->itid = cpu_to_le32(itid); 2555ee8eaea3SRam Amrani 2556ee8eaea3SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 2557ee8eaea3SRam Amrani if (rc) { 2558ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); 2559ee8eaea3SRam Amrani return rc; 2560ee8eaea3SRam Amrani } 2561ee8eaea3SRam Amrani 2562ee8eaea3SRam Amrani if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { 2563ee8eaea3SRam Amrani DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); 2564ee8eaea3SRam Amrani return -EINVAL; 2565ee8eaea3SRam Amrani } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { 2566ee8eaea3SRam Amrani /* Bit indicating that the TID is in use and a nig drain is 2567ee8eaea3SRam Amrani * required before sending the ramrod again 2568ee8eaea3SRam Amrani */ 2569ee8eaea3SRam Amrani p_ptt = qed_ptt_acquire(p_hwfn); 2570ee8eaea3SRam Amrani if (!p_ptt) { 2571ee8eaea3SRam Amrani rc = -EBUSY; 2572ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2573ee8eaea3SRam Amrani "Failed to acquire PTT\n"); 2574ee8eaea3SRam Amrani return rc; 2575ee8eaea3SRam Amrani } 2576ee8eaea3SRam Amrani 2577ee8eaea3SRam Amrani rc = qed_mcp_drain(p_hwfn, p_ptt); 2578ee8eaea3SRam Amrani if (rc) { 2579ee8eaea3SRam Amrani qed_ptt_release(p_hwfn, p_ptt); 2580ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2581ee8eaea3SRam Amrani "Drain failed\n"); 2582ee8eaea3SRam Amrani return rc; 2583ee8eaea3SRam Amrani } 2584ee8eaea3SRam Amrani 2585ee8eaea3SRam Amrani qed_ptt_release(p_hwfn, p_ptt); 2586ee8eaea3SRam Amrani 2587ee8eaea3SRam Amrani /* Resend the ramrod */ 2588ee8eaea3SRam Amrani rc = qed_sp_init_request(p_hwfn, &p_ent, 2589ee8eaea3SRam Amrani RDMA_RAMROD_DEREGISTER_MR, 2590ee8eaea3SRam Amrani p_hwfn->p_rdma_info->proto, 2591ee8eaea3SRam Amrani &init_data); 2592ee8eaea3SRam Amrani if (rc) { 2593ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2594ee8eaea3SRam Amrani "Failed to init sp-element\n"); 2595ee8eaea3SRam Amrani return rc; 2596ee8eaea3SRam Amrani } 2597ee8eaea3SRam Amrani 2598ee8eaea3SRam Amrani rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); 2599ee8eaea3SRam Amrani if (rc) { 2600ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 2601ee8eaea3SRam Amrani "Ramrod failed\n"); 2602ee8eaea3SRam Amrani return rc; 2603ee8eaea3SRam Amrani } 2604ee8eaea3SRam Amrani 2605ee8eaea3SRam Amrani if (fw_return_code != RDMA_RETURN_OK) { 2606ee8eaea3SRam Amrani DP_NOTICE(p_hwfn, "fw_return_code = %d\n", 2607ee8eaea3SRam Amrani fw_return_code); 2608ee8eaea3SRam Amrani return rc; 2609ee8eaea3SRam Amrani } 2610ee8eaea3SRam Amrani } 2611ee8eaea3SRam Amrani 2612ee8eaea3SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); 2613ee8eaea3SRam Amrani return rc; 2614ee8eaea3SRam Amrani } 2615ee8eaea3SRam Amrani 2616be086e7cSMintz, Yuval static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) 2617be086e7cSMintz, Yuval { 2618be086e7cSMintz, Yuval struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 2619be086e7cSMintz, Yuval u32 start_cid, cid, xcid; 2620be086e7cSMintz, Yuval 2621be086e7cSMintz, Yuval /* an even icid belongs to a responder while an odd icid belongs to a 2622be086e7cSMintz, Yuval * requester. The 'cid' received as an input can be either. We calculate 2623be086e7cSMintz, Yuval * the "partner" icid and call it xcid. Only if both are free then the 2624be086e7cSMintz, Yuval * "cid" map can be cleared. 2625be086e7cSMintz, Yuval */ 2626be086e7cSMintz, Yuval start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 2627be086e7cSMintz, Yuval cid = icid - start_cid; 2628be086e7cSMintz, Yuval xcid = cid ^ 1; 2629be086e7cSMintz, Yuval 2630be086e7cSMintz, Yuval spin_lock_bh(&p_rdma_info->lock); 2631be086e7cSMintz, Yuval 2632be086e7cSMintz, Yuval qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); 2633be086e7cSMintz, Yuval if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { 2634be086e7cSMintz, Yuval qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); 2635be086e7cSMintz, Yuval qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); 2636be086e7cSMintz, Yuval } 2637be086e7cSMintz, Yuval 2638be086e7cSMintz, Yuval spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 2639be086e7cSMintz, Yuval } 2640be086e7cSMintz, Yuval 264151ff1725SRam Amrani static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) 264251ff1725SRam Amrani { 264351ff1725SRam Amrani return QED_LEADING_HWFN(cdev); 264451ff1725SRam Amrani } 264551ff1725SRam Amrani 26469331dad1SMintz, Yuval static bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) 26479331dad1SMintz, Yuval { 26489331dad1SMintz, Yuval bool result; 26499331dad1SMintz, Yuval 26509331dad1SMintz, Yuval /* if rdma info has not been allocated, naturally there are no qps */ 26519331dad1SMintz, Yuval if (!p_hwfn->p_rdma_info) 26529331dad1SMintz, Yuval return false; 26539331dad1SMintz, Yuval 26549331dad1SMintz, Yuval spin_lock_bh(&p_hwfn->p_rdma_info->lock); 26559331dad1SMintz, Yuval if (!p_hwfn->p_rdma_info->cid_map.bitmap) 26569331dad1SMintz, Yuval result = false; 26579331dad1SMintz, Yuval else 26589331dad1SMintz, Yuval result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); 26599331dad1SMintz, Yuval spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 26609331dad1SMintz, Yuval return result; 26619331dad1SMintz, Yuval } 26629331dad1SMintz, Yuval 266351ff1725SRam Amrani static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 266451ff1725SRam Amrani { 266551ff1725SRam Amrani u32 val; 266651ff1725SRam Amrani 266751ff1725SRam Amrani val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 266851ff1725SRam Amrani 266951ff1725SRam Amrani qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 267051ff1725SRam Amrani DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), 267151ff1725SRam Amrani "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 267251ff1725SRam Amrani val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 267351ff1725SRam Amrani } 267451ff1725SRam Amrani 26759331dad1SMintz, Yuval void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 26769331dad1SMintz, Yuval { 26779331dad1SMintz, Yuval u8 val; 26789331dad1SMintz, Yuval 26799331dad1SMintz, Yuval /* if any QPs are already active, we want to disable DPM, since their 26809331dad1SMintz, Yuval * context information contains information from before the latest DCBx 26819331dad1SMintz, Yuval * update. Otherwise enable it. 26829331dad1SMintz, Yuval */ 26839331dad1SMintz, Yuval val = qed_rdma_allocated_qps(p_hwfn) ? true : false; 26849331dad1SMintz, Yuval p_hwfn->dcbx_no_edpm = (u8)val; 26859331dad1SMintz, Yuval 26869331dad1SMintz, Yuval qed_rdma_dpm_conf(p_hwfn, p_ptt); 26879331dad1SMintz, Yuval } 26889331dad1SMintz, Yuval 268951ff1725SRam Amrani void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 269051ff1725SRam Amrani { 269151ff1725SRam Amrani p_hwfn->db_bar_no_edpm = true; 269251ff1725SRam Amrani 269351ff1725SRam Amrani qed_rdma_dpm_conf(p_hwfn, p_ptt); 269451ff1725SRam Amrani } 269551ff1725SRam Amrani 26960189efb8SYuval Mintz static int qed_rdma_start(void *rdma_cxt, 26970189efb8SYuval Mintz struct qed_rdma_start_in_params *params) 269851ff1725SRam Amrani { 269951ff1725SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 270051ff1725SRam Amrani struct qed_ptt *p_ptt; 270151ff1725SRam Amrani int rc = -EBUSY; 270251ff1725SRam Amrani 270351ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 270451ff1725SRam Amrani "desired_cnq = %08x\n", params->desired_cnq); 270551ff1725SRam Amrani 270651ff1725SRam Amrani p_ptt = qed_ptt_acquire(p_hwfn); 270751ff1725SRam Amrani if (!p_ptt) 270851ff1725SRam Amrani goto err; 270951ff1725SRam Amrani 271051ff1725SRam Amrani rc = qed_rdma_alloc(p_hwfn, p_ptt, params); 271151ff1725SRam Amrani if (rc) 271251ff1725SRam Amrani goto err1; 271351ff1725SRam Amrani 271451ff1725SRam Amrani rc = qed_rdma_setup(p_hwfn, p_ptt, params); 271551ff1725SRam Amrani if (rc) 271651ff1725SRam Amrani goto err2; 271751ff1725SRam Amrani 271851ff1725SRam Amrani qed_ptt_release(p_hwfn, p_ptt); 271951ff1725SRam Amrani 272051ff1725SRam Amrani return rc; 272151ff1725SRam Amrani 272251ff1725SRam Amrani err2: 272351ff1725SRam Amrani qed_rdma_free(p_hwfn); 272451ff1725SRam Amrani err1: 272551ff1725SRam Amrani qed_ptt_release(p_hwfn, p_ptt); 272651ff1725SRam Amrani err: 272751ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); 272851ff1725SRam Amrani return rc; 272951ff1725SRam Amrani } 273051ff1725SRam Amrani 273151ff1725SRam Amrani static int qed_rdma_init(struct qed_dev *cdev, 273251ff1725SRam Amrani struct qed_rdma_start_in_params *params) 273351ff1725SRam Amrani { 273451ff1725SRam Amrani return qed_rdma_start(QED_LEADING_HWFN(cdev), params); 273551ff1725SRam Amrani } 273651ff1725SRam Amrani 27370189efb8SYuval Mintz static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 273851ff1725SRam Amrani { 273951ff1725SRam Amrani struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 274051ff1725SRam Amrani 274151ff1725SRam Amrani DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); 274251ff1725SRam Amrani 274351ff1725SRam Amrani spin_lock_bh(&p_hwfn->p_rdma_info->lock); 274451ff1725SRam Amrani qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); 274551ff1725SRam Amrani spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 274651ff1725SRam Amrani } 274751ff1725SRam Amrani 2748abd49676SRam Amrani static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, 2749abd49676SRam Amrani u8 *old_mac_address, 2750abd49676SRam Amrani u8 *new_mac_address) 2751abd49676SRam Amrani { 27520518c12fSMichal Kalderon struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2753abd49676SRam Amrani struct qed_ptt *p_ptt; 2754abd49676SRam Amrani int rc = 0; 2755abd49676SRam Amrani 27560518c12fSMichal Kalderon p_ptt = qed_ptt_acquire(p_hwfn); 2757abd49676SRam Amrani if (!p_ptt) { 2758abd49676SRam Amrani DP_ERR(cdev, 2759abd49676SRam Amrani "qed roce ll2 mac filter set: failed to acquire PTT\n"); 2760abd49676SRam Amrani return -EINVAL; 2761abd49676SRam Amrani } 2762abd49676SRam Amrani 2763abd49676SRam Amrani if (old_mac_address) 27640518c12fSMichal Kalderon qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address); 2765abd49676SRam Amrani if (new_mac_address) 27660518c12fSMichal Kalderon rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address); 2767abd49676SRam Amrani 27680518c12fSMichal Kalderon qed_ptt_release(p_hwfn, p_ptt); 2769abd49676SRam Amrani 2770abd49676SRam Amrani if (rc) 2771abd49676SRam Amrani DP_ERR(cdev, 27720518c12fSMichal Kalderon "qed roce ll2 mac filter set: failed to add MAC filter\n"); 2773abd49676SRam Amrani 2774abd49676SRam Amrani return rc; 2775abd49676SRam Amrani } 2776abd49676SRam Amrani 277751ff1725SRam Amrani static const struct qed_rdma_ops qed_rdma_ops_pass = { 277851ff1725SRam Amrani .common = &qed_common_ops_pass, 277951ff1725SRam Amrani .fill_dev_info = &qed_fill_rdma_dev_info, 278051ff1725SRam Amrani .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, 278151ff1725SRam Amrani .rdma_init = &qed_rdma_init, 278251ff1725SRam Amrani .rdma_add_user = &qed_rdma_add_user, 278351ff1725SRam Amrani .rdma_remove_user = &qed_rdma_remove_user, 278451ff1725SRam Amrani .rdma_stop = &qed_rdma_stop, 2785c295f86eSRam Amrani .rdma_query_port = &qed_rdma_query_port, 278651ff1725SRam Amrani .rdma_query_device = &qed_rdma_query_device, 278751ff1725SRam Amrani .rdma_get_start_sb = &qed_rdma_get_sb_start, 278851ff1725SRam Amrani .rdma_get_rdma_int = &qed_rdma_get_int, 278951ff1725SRam Amrani .rdma_set_rdma_int = &qed_rdma_set_int, 279051ff1725SRam Amrani .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, 279151ff1725SRam Amrani .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, 2792c295f86eSRam Amrani .rdma_alloc_pd = &qed_rdma_alloc_pd, 2793c295f86eSRam Amrani .rdma_dealloc_pd = &qed_rdma_free_pd, 2794c295f86eSRam Amrani .rdma_create_cq = &qed_rdma_create_cq, 2795c295f86eSRam Amrani .rdma_destroy_cq = &qed_rdma_destroy_cq, 2796f1093940SRam Amrani .rdma_create_qp = &qed_rdma_create_qp, 2797f1093940SRam Amrani .rdma_modify_qp = &qed_rdma_modify_qp, 2798f1093940SRam Amrani .rdma_query_qp = &qed_rdma_query_qp, 2799f1093940SRam Amrani .rdma_destroy_qp = &qed_rdma_destroy_qp, 2800ee8eaea3SRam Amrani .rdma_alloc_tid = &qed_rdma_alloc_tid, 2801ee8eaea3SRam Amrani .rdma_free_tid = &qed_rdma_free_tid, 2802ee8eaea3SRam Amrani .rdma_register_tid = &qed_rdma_register_tid, 2803ee8eaea3SRam Amrani .rdma_deregister_tid = &qed_rdma_deregister_tid, 28040518c12fSMichal Kalderon .ll2_acquire_connection = &qed_ll2_acquire_connection, 28050518c12fSMichal Kalderon .ll2_establish_connection = &qed_ll2_establish_connection, 28060518c12fSMichal Kalderon .ll2_terminate_connection = &qed_ll2_terminate_connection, 28070518c12fSMichal Kalderon .ll2_release_connection = &qed_ll2_release_connection, 28080518c12fSMichal Kalderon .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, 28090518c12fSMichal Kalderon .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, 28100518c12fSMichal Kalderon .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, 28110518c12fSMichal Kalderon .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, 28120518c12fSMichal Kalderon .ll2_get_stats = &qed_ll2_get_stats, 281351ff1725SRam Amrani }; 281451ff1725SRam Amrani 2815d4e99131SArnd Bergmann const struct qed_rdma_ops *qed_get_rdma_ops(void) 281651ff1725SRam Amrani { 281751ff1725SRam Amrani return &qed_rdma_ops_pass; 281851ff1725SRam Amrani } 281951ff1725SRam Amrani EXPORT_SYMBOL(qed_get_rdma_ops); 2820