xref: /openbmc/linux/drivers/infiniband/hw/irdma/uda.c (revision ea7596c1)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2016 - 2021 Intel Corporation */
3 #include <linux/etherdevice.h>
4 
5 #include "osdep.h"
6 #include "hmc.h"
7 #include "defs.h"
8 #include "type.h"
9 #include "protos.h"
10 #include "uda.h"
11 #include "uda_d.h"
12 
13 /**
14  * irdma_sc_access_ah() - Create, modify or delete AH
15  * @cqp: struct for cqp hw
16  * @info: ah information
17  * @op: Operation
18  * @scratch: u64 saved to be used during cqp completion
19  */
irdma_sc_access_ah(struct irdma_sc_cqp * cqp,struct irdma_ah_info * info,u32 op,u64 scratch)20 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
21 		       u32 op, u64 scratch)
22 {
23 	__le64 *wqe;
24 	u64 qw1, qw2;
25 
26 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
27 	if (!wqe)
28 		return -ENOMEM;
29 
30 	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
31 	qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
32 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
33 	      FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
34 
35 	qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
36 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
37 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
38 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
39 
40 	if (!info->ipv4_valid) {
41 		set_64bit_val(wqe, 40,
42 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
43 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
44 		set_64bit_val(wqe, 32,
45 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
46 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
47 
48 		set_64bit_val(wqe, 56,
49 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
50 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
51 		set_64bit_val(wqe, 48,
52 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
53 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
54 	} else {
55 		set_64bit_val(wqe, 32,
56 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
57 
58 		set_64bit_val(wqe, 48,
59 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
60 	}
61 
62 	set_64bit_val(wqe, 8, qw1);
63 	set_64bit_val(wqe, 16, qw2);
64 
65 	dma_wmb(); /* need write block before writing WQE header */
66 
67 	set_64bit_val(
68 		wqe, 24,
69 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
70 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
71 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
72 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
73 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
74 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
75 
76 	print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
77 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
78 	irdma_sc_cqp_post_sq(cqp);
79 
80 	return 0;
81 }
82 
83 /**
84  * irdma_create_mg_ctx() - create a mcg context
85  * @info: multicast group context info
86  */
irdma_create_mg_ctx(struct irdma_mcast_grp_info * info)87 static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
88 {
89 	struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
90 	u8 idx = 0; /* index in the array */
91 	u8 ctx_idx = 0; /* index in the MG context */
92 
93 	memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
94 
95 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
96 		entry_info = &info->mg_ctx_info[idx];
97 		if (entry_info->valid_entry) {
98 			set_64bit_val((__le64 *)info->dma_mem_mc.va,
99 				      ctx_idx * sizeof(u64),
100 				      FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
101 				      FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
102 				      FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
103 			ctx_idx++;
104 		}
105 	}
106 }
107 
108 /**
109  * irdma_access_mcast_grp() - Access mcast group based on op
110  * @cqp: Control QP
111  * @info: multicast group context info
112  * @op: operation to perform
113  * @scratch: u64 saved to be used during cqp completion
114  */
irdma_access_mcast_grp(struct irdma_sc_cqp * cqp,struct irdma_mcast_grp_info * info,u32 op,u64 scratch)115 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
116 			   struct irdma_mcast_grp_info *info, u32 op,
117 			   u64 scratch)
118 {
119 	__le64 *wqe;
120 
121 	if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
122 		ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
123 		return -EINVAL;
124 	}
125 
126 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
127 	if (!wqe) {
128 		ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
129 		return -ENOMEM;
130 	}
131 
132 	irdma_create_mg_ctx(info);
133 
134 	set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
135 	set_64bit_val(wqe, 16,
136 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
137 		      FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
138 	set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
139 	set_64bit_val(wqe, 8,
140 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
141 
142 	if (!info->ipv4_valid) {
143 		set_64bit_val(wqe, 56,
144 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
145 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
146 		set_64bit_val(wqe, 48,
147 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
148 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
149 	} else {
150 		set_64bit_val(wqe, 48,
151 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
152 	}
153 
154 	dma_wmb(); /* need write memory block before writing the WQE header. */
155 
156 	set_64bit_val(wqe, 24,
157 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
158 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
159 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
160 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
161 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
162 
163 	print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
164 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
165 	print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
166 			     8, info->dma_mem_mc.va,
167 			     IRDMA_MAX_MGS_PER_CTX * 8, false);
168 	irdma_sc_cqp_post_sq(cqp);
169 
170 	return 0;
171 }
172 
173 /**
174  * irdma_compare_mgs - Compares two multicast group structures
175  * @entry1: Multcast group info
176  * @entry2: Multcast group info in context
177  */
irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info * entry1,struct irdma_mcast_grp_ctx_entry_info * entry2)178 static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
179 			      struct irdma_mcast_grp_ctx_entry_info *entry2)
180 {
181 	if (entry1->dest_port == entry2->dest_port &&
182 	    entry1->qp_id == entry2->qp_id)
183 		return true;
184 
185 	return false;
186 }
187 
188 /**
189  * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
190  * @ctx: Multcast group context
191  * @mg: Multcast group info
192  */
irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info * ctx,struct irdma_mcast_grp_ctx_entry_info * mg)193 int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
194 			   struct irdma_mcast_grp_ctx_entry_info *mg)
195 {
196 	u32 idx;
197 	bool free_entry_found = false;
198 	u32 free_entry_idx = 0;
199 
200 	/* find either an identical or a free entry for a multicast group */
201 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
202 		if (ctx->mg_ctx_info[idx].valid_entry) {
203 			if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
204 				ctx->mg_ctx_info[idx].use_cnt++;
205 				return 0;
206 			}
207 			continue;
208 		}
209 		if (!free_entry_found) {
210 			free_entry_found = true;
211 			free_entry_idx = idx;
212 		}
213 	}
214 
215 	if (free_entry_found) {
216 		ctx->mg_ctx_info[free_entry_idx] = *mg;
217 		ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
218 		ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
219 		ctx->no_of_mgs++;
220 		return 0;
221 	}
222 
223 	return -ENOMEM;
224 }
225 
226 /**
227  * irdma_sc_del_mcast_grp - Delete mcast group
228  * @ctx: Multcast group context
229  * @mg: Multcast group info
230  *
231  * Finds and removes a specific mulicast group from context, all
232  * parameters must match to remove a multicast group.
233  */
irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info * ctx,struct irdma_mcast_grp_ctx_entry_info * mg)234 int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
235 			   struct irdma_mcast_grp_ctx_entry_info *mg)
236 {
237 	u32 idx;
238 
239 	/* find an entry in multicast group context */
240 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
241 		if (!ctx->mg_ctx_info[idx].valid_entry)
242 			continue;
243 
244 		if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
245 			ctx->mg_ctx_info[idx].use_cnt--;
246 
247 			if (!ctx->mg_ctx_info[idx].use_cnt) {
248 				ctx->mg_ctx_info[idx].valid_entry = false;
249 				ctx->no_of_mgs--;
250 				/* Remove gap if element was not the last */
251 				if (idx != ctx->no_of_mgs &&
252 				    ctx->no_of_mgs > 0) {
253 					memcpy(&ctx->mg_ctx_info[idx],
254 					       &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
255 					       sizeof(ctx->mg_ctx_info[idx]));
256 					ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
257 				}
258 			}
259 
260 			return 0;
261 		}
262 	}
263 
264 	return -EINVAL;
265 }
266