1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 2 /* Copyright (c) 2016 - 2021 Intel Corporation */ 3 #include "osdep.h" 4 #include "status.h" 5 #include "hmc.h" 6 #include "defs.h" 7 #include "type.h" 8 #include "protos.h" 9 #include "uda.h" 10 #include "uda_d.h" 11 12 /** 13 * irdma_sc_access_ah() - Create, modify or delete AH 14 * @cqp: struct for cqp hw 15 * @info: ah information 16 * @op: Operation 17 * @scratch: u64 saved to be used during cqp completion 18 */ 19 enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, 20 struct irdma_ah_info *info, 21 u32 op, u64 scratch) 22 { 23 __le64 *wqe; 24 u64 qw1, qw2; 25 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 27 if (!wqe) 28 return IRDMA_ERR_RING_FULL; 29 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); 31 qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) | 32 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) | 33 FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag); 34 35 qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) | 36 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) | 37 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) | 38 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16); 39 40 if (!info->ipv4_valid) { 41 set_64bit_val(wqe, 40, 42 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) | 43 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1])); 44 set_64bit_val(wqe, 32, 45 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) | 46 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3])); 47 48 set_64bit_val(wqe, 56, 49 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) | 50 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1])); 51 set_64bit_val(wqe, 48, 52 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) | 53 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3])); 54 } else { 55 set_64bit_val(wqe, 32, 56 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0])); 57 58 set_64bit_val(wqe, 48, 59 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0])); 60 } 61 62 set_64bit_val(wqe, 8, qw1); 63 set_64bit_val(wqe, 16, qw2); 64 65 dma_wmb(); /* need write block before writing WQE header */ 66 67 set_64bit_val( 68 wqe, 24, 69 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) | 70 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) | 71 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) | 72 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) | 73 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) | 74 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag)); 75 76 print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8, 77 wqe, IRDMA_CQP_WQE_SIZE * 8, false); 78 irdma_sc_cqp_post_sq(cqp); 79 80 return 0; 81 } 82 83 /** 84 * irdma_create_mg_ctx() - create a mcg context 85 * @info: multicast group context info 86 */ 87 static enum irdma_status_code 88 irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) 89 { 90 struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL; 91 u8 idx = 0; /* index in the array */ 92 u8 ctx_idx = 0; /* index in the MG context */ 93 94 memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64)); 95 96 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { 97 entry_info = &info->mg_ctx_info[idx]; 98 if (entry_info->valid_entry) { 99 set_64bit_val((__le64 *)info->dma_mem_mc.va, 100 ctx_idx * sizeof(u64), 101 FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) | 102 FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) | 103 FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id)); 104 ctx_idx++; 105 } 106 } 107 108 return 0; 109 } 110 111 /** 112 * irdma_access_mcast_grp() - Access mcast group based on op 113 * @cqp: Control QP 114 * @info: multicast group context info 115 * @op: operation to perform 116 * @scratch: u64 saved to be used during cqp completion 117 */ 118 enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, 119 struct irdma_mcast_grp_info *info, 120 u32 op, u64 scratch) 121 { 122 __le64 *wqe; 123 enum irdma_status_code ret_code = 0; 124 125 if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) { 126 ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n"); 127 return IRDMA_ERR_PARAM; 128 } 129 130 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 131 if (!wqe) { 132 ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n"); 133 return IRDMA_ERR_RING_FULL; 134 } 135 136 ret_code = irdma_create_mg_ctx(info); 137 if (ret_code) 138 return ret_code; 139 140 set_64bit_val(wqe, 32, info->dma_mem_mc.pa); 141 set_64bit_val(wqe, 16, 142 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) | 143 FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle)); 144 set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr)); 145 set_64bit_val(wqe, 8, 146 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id)); 147 148 if (!info->ipv4_valid) { 149 set_64bit_val(wqe, 56, 150 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) | 151 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1])); 152 set_64bit_val(wqe, 48, 153 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) | 154 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3])); 155 } else { 156 set_64bit_val(wqe, 48, 157 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0])); 158 } 159 160 dma_wmb(); /* need write memory block before writing the WQE header. */ 161 162 set_64bit_val(wqe, 24, 163 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) | 164 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) | 165 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) | 166 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) | 167 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid)); 168 169 print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8, 170 wqe, IRDMA_CQP_WQE_SIZE * 8, false); 171 print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 172 8, info->dma_mem_mc.va, 173 IRDMA_MAX_MGS_PER_CTX * 8, false); 174 irdma_sc_cqp_post_sq(cqp); 175 176 return 0; 177 } 178 179 /** 180 * irdma_compare_mgs - Compares two multicast group structures 181 * @entry1: Multcast group info 182 * @entry2: Multcast group info in context 183 */ 184 static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1, 185 struct irdma_mcast_grp_ctx_entry_info *entry2) 186 { 187 if (entry1->dest_port == entry2->dest_port && 188 entry1->qp_id == entry2->qp_id) 189 return true; 190 191 return false; 192 } 193 194 /** 195 * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx 196 * @ctx: Multcast group context 197 * @mg: Multcast group info 198 */ 199 enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, 200 struct irdma_mcast_grp_ctx_entry_info *mg) 201 { 202 u32 idx; 203 bool free_entry_found = false; 204 u32 free_entry_idx = 0; 205 206 /* find either an identical or a free entry for a multicast group */ 207 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { 208 if (ctx->mg_ctx_info[idx].valid_entry) { 209 if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) { 210 ctx->mg_ctx_info[idx].use_cnt++; 211 return 0; 212 } 213 continue; 214 } 215 if (!free_entry_found) { 216 free_entry_found = true; 217 free_entry_idx = idx; 218 } 219 } 220 221 if (free_entry_found) { 222 ctx->mg_ctx_info[free_entry_idx] = *mg; 223 ctx->mg_ctx_info[free_entry_idx].valid_entry = true; 224 ctx->mg_ctx_info[free_entry_idx].use_cnt = 1; 225 ctx->no_of_mgs++; 226 return 0; 227 } 228 229 return IRDMA_ERR_NO_MEMORY; 230 } 231 232 /** 233 * irdma_sc_del_mcast_grp - Delete mcast group 234 * @ctx: Multcast group context 235 * @mg: Multcast group info 236 * 237 * Finds and removes a specific mulicast group from context, all 238 * parameters must match to remove a multicast group. 239 */ 240 enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, 241 struct irdma_mcast_grp_ctx_entry_info *mg) 242 { 243 u32 idx; 244 245 /* find an entry in multicast group context */ 246 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) { 247 if (!ctx->mg_ctx_info[idx].valid_entry) 248 continue; 249 250 if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) { 251 ctx->mg_ctx_info[idx].use_cnt--; 252 253 if (!ctx->mg_ctx_info[idx].use_cnt) { 254 ctx->mg_ctx_info[idx].valid_entry = false; 255 ctx->no_of_mgs--; 256 /* Remove gap if element was not the last */ 257 if (idx != ctx->no_of_mgs && 258 ctx->no_of_mgs > 0) { 259 memcpy(&ctx->mg_ctx_info[idx], 260 &ctx->mg_ctx_info[ctx->no_of_mgs - 1], 261 sizeof(ctx->mg_ctx_info[idx])); 262 ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false; 263 } 264 } 265 266 return 0; 267 } 268 } 269 270 return IRDMA_ERR_PARAM; 271 } 272