xref: /openbmc/linux/drivers/infiniband/hw/irdma/uda.c (revision 31eeb6b0)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2016 - 2021 Intel Corporation */
3 #include <linux/etherdevice.h>
4 
5 #include "osdep.h"
6 #include "status.h"
7 #include "hmc.h"
8 #include "defs.h"
9 #include "type.h"
10 #include "protos.h"
11 #include "uda.h"
12 #include "uda_d.h"
13 
14 /**
15  * irdma_sc_access_ah() - Create, modify or delete AH
16  * @cqp: struct for cqp hw
17  * @info: ah information
18  * @op: Operation
19  * @scratch: u64 saved to be used during cqp completion
20  */
21 enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
22 					  struct irdma_ah_info *info,
23 					  u32 op, u64 scratch)
24 {
25 	__le64 *wqe;
26 	u64 qw1, qw2;
27 
28 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
29 	if (!wqe)
30 		return IRDMA_ERR_RING_FULL;
31 
32 	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
33 	qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
34 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
35 	      FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
36 
37 	qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
38 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
39 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
40 	      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
41 
42 	if (!info->ipv4_valid) {
43 		set_64bit_val(wqe, 40,
44 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
45 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
46 		set_64bit_val(wqe, 32,
47 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
48 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
49 
50 		set_64bit_val(wqe, 56,
51 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
52 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
53 		set_64bit_val(wqe, 48,
54 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
55 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
56 	} else {
57 		set_64bit_val(wqe, 32,
58 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
59 
60 		set_64bit_val(wqe, 48,
61 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
62 	}
63 
64 	set_64bit_val(wqe, 8, qw1);
65 	set_64bit_val(wqe, 16, qw2);
66 
67 	dma_wmb(); /* need write block before writing WQE header */
68 
69 	set_64bit_val(
70 		wqe, 24,
71 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
72 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
73 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
74 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
75 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
76 		FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
77 
78 	print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
79 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
80 	irdma_sc_cqp_post_sq(cqp);
81 
82 	return 0;
83 }
84 
85 /**
86  * irdma_create_mg_ctx() - create a mcg context
87  * @info: multicast group context info
88  */
89 static enum irdma_status_code
90 irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
91 {
92 	struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
93 	u8 idx = 0; /* index in the array */
94 	u8 ctx_idx = 0; /* index in the MG context */
95 
96 	memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
97 
98 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
99 		entry_info = &info->mg_ctx_info[idx];
100 		if (entry_info->valid_entry) {
101 			set_64bit_val((__le64 *)info->dma_mem_mc.va,
102 				      ctx_idx * sizeof(u64),
103 				      FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
104 				      FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
105 				      FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
106 			ctx_idx++;
107 		}
108 	}
109 
110 	return 0;
111 }
112 
113 /**
114  * irdma_access_mcast_grp() - Access mcast group based on op
115  * @cqp: Control QP
116  * @info: multicast group context info
117  * @op: operation to perform
118  * @scratch: u64 saved to be used during cqp completion
119  */
120 enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
121 					      struct irdma_mcast_grp_info *info,
122 					      u32 op, u64 scratch)
123 {
124 	__le64 *wqe;
125 	enum irdma_status_code ret_code = 0;
126 
127 	if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
128 		ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
129 		return IRDMA_ERR_PARAM;
130 	}
131 
132 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
133 	if (!wqe) {
134 		ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
135 		return IRDMA_ERR_RING_FULL;
136 	}
137 
138 	ret_code = irdma_create_mg_ctx(info);
139 	if (ret_code)
140 		return ret_code;
141 
142 	set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
143 	set_64bit_val(wqe, 16,
144 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
145 		      FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
146 	set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
147 	set_64bit_val(wqe, 8,
148 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
149 
150 	if (!info->ipv4_valid) {
151 		set_64bit_val(wqe, 56,
152 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
153 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
154 		set_64bit_val(wqe, 48,
155 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
156 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
157 	} else {
158 		set_64bit_val(wqe, 48,
159 			      FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
160 	}
161 
162 	dma_wmb(); /* need write memory block before writing the WQE header. */
163 
164 	set_64bit_val(wqe, 24,
165 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
166 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
167 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
168 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
169 		      FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
170 
171 	print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
172 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
173 	print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
174 			     8, info->dma_mem_mc.va,
175 			     IRDMA_MAX_MGS_PER_CTX * 8, false);
176 	irdma_sc_cqp_post_sq(cqp);
177 
178 	return 0;
179 }
180 
181 /**
182  * irdma_compare_mgs - Compares two multicast group structures
183  * @entry1: Multcast group info
184  * @entry2: Multcast group info in context
185  */
186 static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
187 			      struct irdma_mcast_grp_ctx_entry_info *entry2)
188 {
189 	if (entry1->dest_port == entry2->dest_port &&
190 	    entry1->qp_id == entry2->qp_id)
191 		return true;
192 
193 	return false;
194 }
195 
196 /**
197  * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
198  * @ctx: Multcast group context
199  * @mg: Multcast group info
200  */
201 enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
202 					      struct irdma_mcast_grp_ctx_entry_info *mg)
203 {
204 	u32 idx;
205 	bool free_entry_found = false;
206 	u32 free_entry_idx = 0;
207 
208 	/* find either an identical or a free entry for a multicast group */
209 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
210 		if (ctx->mg_ctx_info[idx].valid_entry) {
211 			if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
212 				ctx->mg_ctx_info[idx].use_cnt++;
213 				return 0;
214 			}
215 			continue;
216 		}
217 		if (!free_entry_found) {
218 			free_entry_found = true;
219 			free_entry_idx = idx;
220 		}
221 	}
222 
223 	if (free_entry_found) {
224 		ctx->mg_ctx_info[free_entry_idx] = *mg;
225 		ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
226 		ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
227 		ctx->no_of_mgs++;
228 		return 0;
229 	}
230 
231 	return IRDMA_ERR_NO_MEMORY;
232 }
233 
234 /**
235  * irdma_sc_del_mcast_grp - Delete mcast group
236  * @ctx: Multcast group context
237  * @mg: Multcast group info
238  *
239  * Finds and removes a specific mulicast group from context, all
240  * parameters must match to remove a multicast group.
241  */
242 enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
243 					      struct irdma_mcast_grp_ctx_entry_info *mg)
244 {
245 	u32 idx;
246 
247 	/* find an entry in multicast group context */
248 	for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
249 		if (!ctx->mg_ctx_info[idx].valid_entry)
250 			continue;
251 
252 		if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
253 			ctx->mg_ctx_info[idx].use_cnt--;
254 
255 			if (!ctx->mg_ctx_info[idx].use_cnt) {
256 				ctx->mg_ctx_info[idx].valid_entry = false;
257 				ctx->no_of_mgs--;
258 				/* Remove gap if element was not the last */
259 				if (idx != ctx->no_of_mgs &&
260 				    ctx->no_of_mgs > 0) {
261 					memcpy(&ctx->mg_ctx_info[idx],
262 					       &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
263 					       sizeof(ctx->mg_ctx_info[idx]));
264 					ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
265 				}
266 			}
267 
268 			return 0;
269 		}
270 	}
271 
272 	return IRDMA_ERR_PARAM;
273 }
274