1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/bitops.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include "qed.h"
16 #include <linux/qed/qed_chain.h>
17 #include "qed_cxt.h"
18 #include "qed_dcbx.h"
19 #include "qed_hsi.h"
20 #include "qed_hw.h"
21 #include "qed_int.h"
22 #include "qed_reg_addr.h"
23 #include "qed_sp.h"
24 #include "qed_sriov.h"
25 
26 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
27 			struct qed_spq_entry **pp_ent,
28 			u8 cmd,
29 			u8 protocol,
30 			struct qed_sp_init_data *p_data)
31 {
32 	u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
33 	struct qed_spq_entry *p_ent = NULL;
34 	int rc;
35 
36 	if (!pp_ent)
37 		return -ENOMEM;
38 
39 	rc = qed_spq_get_entry(p_hwfn, pp_ent);
40 
41 	if (rc != 0)
42 		return rc;
43 
44 	p_ent = *pp_ent;
45 
46 	p_ent->elem.hdr.cid		= cpu_to_le32(opaque_cid);
47 	p_ent->elem.hdr.cmd_id		= cmd;
48 	p_ent->elem.hdr.protocol_id	= protocol;
49 
50 	p_ent->priority		= QED_SPQ_PRIORITY_NORMAL;
51 	p_ent->comp_mode	= p_data->comp_mode;
52 	p_ent->comp_done.done	= 0;
53 
54 	switch (p_ent->comp_mode) {
55 	case QED_SPQ_MODE_EBLOCK:
56 		p_ent->comp_cb.cookie = &p_ent->comp_done;
57 		break;
58 
59 	case QED_SPQ_MODE_BLOCK:
60 		if (!p_data->p_comp_data)
61 			return -EINVAL;
62 
63 		p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
64 		break;
65 
66 	case QED_SPQ_MODE_CB:
67 		if (!p_data->p_comp_data)
68 			p_ent->comp_cb.function = NULL;
69 		else
70 			p_ent->comp_cb = *p_data->p_comp_data;
71 		break;
72 
73 	default:
74 		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
75 			  p_ent->comp_mode);
76 		return -EINVAL;
77 	}
78 
79 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
80 		   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
81 		   opaque_cid, cmd, protocol,
82 		   (unsigned long)&p_ent->ramrod,
83 		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
84 			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
85 			   "MODE_CB"));
86 
87 	memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
88 
89 	return 0;
90 }
91 
92 static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
93 {
94 	switch (type) {
95 	case QED_TUNN_CLSS_MAC_VLAN:
96 		return TUNNEL_CLSS_MAC_VLAN;
97 	case QED_TUNN_CLSS_MAC_VNI:
98 		return TUNNEL_CLSS_MAC_VNI;
99 	case QED_TUNN_CLSS_INNER_MAC_VLAN:
100 		return TUNNEL_CLSS_INNER_MAC_VLAN;
101 	case QED_TUNN_CLSS_INNER_MAC_VNI:
102 		return TUNNEL_CLSS_INNER_MAC_VNI;
103 	default:
104 		return TUNNEL_CLSS_MAC_VLAN;
105 	}
106 }
107 
108 static void
109 qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
110 			      struct qed_tunn_update_params *p_src,
111 			      struct pf_update_tunnel_config *p_tunn_cfg)
112 {
113 	unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
114 	unsigned long update_mask = p_src->tunn_mode_update_mask;
115 	unsigned long tunn_mode = p_src->tunn_mode;
116 	unsigned long new_tunn_mode = 0;
117 
118 	if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
119 		if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
120 			__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
121 	} else {
122 		if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
123 			__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
124 	}
125 
126 	if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
127 		if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
128 			__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
129 	} else {
130 		if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
131 			__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
132 	}
133 
134 	if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
135 		if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
136 			__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
137 	} else {
138 		if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
139 			__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
140 	}
141 
142 	if (p_src->update_geneve_udp_port) {
143 		p_tunn_cfg->set_geneve_udp_port_flg = 1;
144 		p_tunn_cfg->geneve_udp_port =
145 				cpu_to_le16(p_src->geneve_udp_port);
146 	}
147 
148 	if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
149 		if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
150 			__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
151 	} else {
152 		if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
153 			__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
154 	}
155 
156 	if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
157 		if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
158 			__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
159 	} else {
160 		if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
161 			__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
162 	}
163 
164 	p_src->tunn_mode = new_tunn_mode;
165 }
166 
167 static void
168 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
169 			      struct qed_tunn_update_params *p_src,
170 			      struct pf_update_tunnel_config *p_tunn_cfg)
171 {
172 	unsigned long tunn_mode = p_src->tunn_mode;
173 	enum tunnel_clss type;
174 
175 	qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
176 	p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
177 	p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
178 
179 	type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
180 	p_tunn_cfg->tunnel_clss_vxlan  = type;
181 
182 	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
183 	p_tunn_cfg->tunnel_clss_l2gre = type;
184 
185 	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
186 	p_tunn_cfg->tunnel_clss_ipgre = type;
187 
188 	if (p_src->update_vxlan_udp_port) {
189 		p_tunn_cfg->set_vxlan_udp_port_flg = 1;
190 		p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
191 	}
192 
193 	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
194 		p_tunn_cfg->tx_enable_l2gre = 1;
195 
196 	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
197 		p_tunn_cfg->tx_enable_ipgre = 1;
198 
199 	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
200 		p_tunn_cfg->tx_enable_vxlan = 1;
201 
202 	if (p_src->update_geneve_udp_port) {
203 		p_tunn_cfg->set_geneve_udp_port_flg = 1;
204 		p_tunn_cfg->geneve_udp_port =
205 				cpu_to_le16(p_src->geneve_udp_port);
206 	}
207 
208 	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
209 		p_tunn_cfg->tx_enable_l2geneve = 1;
210 
211 	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
212 		p_tunn_cfg->tx_enable_ipgeneve = 1;
213 
214 	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
215 	p_tunn_cfg->tunnel_clss_l2geneve = type;
216 
217 	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
218 	p_tunn_cfg->tunnel_clss_ipgeneve = type;
219 }
220 
221 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
222 				 struct qed_ptt *p_ptt,
223 				 unsigned long tunn_mode)
224 {
225 	u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
226 	u8 l2geneve_enable = 0, ipgeneve_enable = 0;
227 
228 	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
229 		l2gre_enable = 1;
230 
231 	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
232 		ipgre_enable = 1;
233 
234 	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
235 		vxlan_enable = 1;
236 
237 	qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
238 	qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
239 
240 	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
241 		l2geneve_enable = 1;
242 
243 	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
244 		ipgeneve_enable = 1;
245 
246 	qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
247 			      ipgeneve_enable);
248 }
249 
250 static void
251 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
252 			     struct qed_tunn_start_params *p_src,
253 			     struct pf_start_tunnel_config *p_tunn_cfg)
254 {
255 	unsigned long tunn_mode;
256 	enum tunnel_clss type;
257 
258 	if (!p_src)
259 		return;
260 
261 	tunn_mode = p_src->tunn_mode;
262 	type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
263 	p_tunn_cfg->tunnel_clss_vxlan = type;
264 	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
265 	p_tunn_cfg->tunnel_clss_l2gre = type;
266 	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
267 	p_tunn_cfg->tunnel_clss_ipgre = type;
268 
269 	if (p_src->update_vxlan_udp_port) {
270 		p_tunn_cfg->set_vxlan_udp_port_flg = 1;
271 		p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
272 	}
273 
274 	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
275 		p_tunn_cfg->tx_enable_l2gre = 1;
276 
277 	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
278 		p_tunn_cfg->tx_enable_ipgre = 1;
279 
280 	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
281 		p_tunn_cfg->tx_enable_vxlan = 1;
282 
283 	if (p_src->update_geneve_udp_port) {
284 		p_tunn_cfg->set_geneve_udp_port_flg = 1;
285 		p_tunn_cfg->geneve_udp_port =
286 				cpu_to_le16(p_src->geneve_udp_port);
287 	}
288 
289 	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
290 		p_tunn_cfg->tx_enable_l2geneve = 1;
291 
292 	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
293 		p_tunn_cfg->tx_enable_ipgeneve = 1;
294 
295 	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
296 	p_tunn_cfg->tunnel_clss_l2geneve = type;
297 	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
298 	p_tunn_cfg->tunnel_clss_ipgeneve = type;
299 }
300 
301 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
302 		    struct qed_tunn_start_params *p_tunn,
303 		    enum qed_mf_mode mode, bool allow_npar_tx_switch)
304 {
305 	struct pf_start_ramrod_data *p_ramrod = NULL;
306 	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
307 	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
308 	struct qed_spq_entry *p_ent = NULL;
309 	struct qed_sp_init_data init_data;
310 	int rc = -EINVAL;
311 
312 	/* update initial eq producer */
313 	qed_eq_prod_update(p_hwfn,
314 			   qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
315 
316 	memset(&init_data, 0, sizeof(init_data));
317 	init_data.cid = qed_spq_get_cid(p_hwfn);
318 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
319 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
320 
321 	rc = qed_sp_init_request(p_hwfn, &p_ent,
322 				 COMMON_RAMROD_PF_START,
323 				 PROTOCOLID_COMMON,
324 				 &init_data);
325 	if (rc)
326 		return rc;
327 
328 	p_ramrod = &p_ent->ramrod.pf_start;
329 
330 	p_ramrod->event_ring_sb_id	= cpu_to_le16(sb);
331 	p_ramrod->event_ring_sb_index	= sb_index;
332 	p_ramrod->path_id		= QED_PATH_ID(p_hwfn);
333 	p_ramrod->dont_log_ramrods	= 0;
334 	p_ramrod->log_type_mask		= cpu_to_le16(0xf);
335 	p_ramrod->mf_mode = mode;
336 	switch (mode) {
337 	case QED_MF_DEFAULT:
338 	case QED_MF_NPAR:
339 		p_ramrod->mf_mode = MF_NPAR;
340 		break;
341 	case QED_MF_OVLAN:
342 		p_ramrod->mf_mode = MF_OVLAN;
343 		break;
344 	default:
345 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
346 		p_ramrod->mf_mode = MF_NPAR;
347 	}
348 	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
349 
350 	/* Place EQ address in RAMROD */
351 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
352 		       p_hwfn->p_eq->chain.pbl.p_phys_table);
353 	p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
354 
355 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
356 		       p_hwfn->p_consq->chain.pbl.p_phys_table);
357 
358 	qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
359 				     &p_ramrod->tunnel_config);
360 	p_hwfn->hw_info.personality = PERSONALITY_ETH;
361 
362 	if (IS_MF_SI(p_hwfn))
363 		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
364 
365 	if (p_hwfn->cdev->p_iov_info) {
366 		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
367 
368 		p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
369 		p_ramrod->num_vfs = (u8) p_iov->total_vfs;
370 	}
371 
372 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
373 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
374 		   sb, sb_index,
375 		   p_ramrod->outer_tag);
376 
377 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
378 
379 	if (p_tunn) {
380 		qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
381 				     p_tunn->tunn_mode);
382 		p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
383 	}
384 
385 	return rc;
386 }
387 
388 int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
389 {
390 	struct qed_spq_entry *p_ent = NULL;
391 	struct qed_sp_init_data init_data;
392 	int rc = -EINVAL;
393 
394 	/* Get SPQ entry */
395 	memset(&init_data, 0, sizeof(init_data));
396 	init_data.cid = qed_spq_get_cid(p_hwfn);
397 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
398 	init_data.comp_mode = QED_SPQ_MODE_CB;
399 
400 	rc = qed_sp_init_request(p_hwfn, &p_ent,
401 				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
402 				 &init_data);
403 	if (rc)
404 		return rc;
405 
406 	qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
407 				      &p_ent->ramrod.pf_update);
408 
409 	return qed_spq_post(p_hwfn, p_ent, NULL);
410 }
411 
412 /* Set pf update ramrod command params */
413 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
414 			      struct qed_tunn_update_params *p_tunn,
415 			      enum spq_mode comp_mode,
416 			      struct qed_spq_comp_cb *p_comp_data)
417 {
418 	struct qed_spq_entry *p_ent = NULL;
419 	struct qed_sp_init_data init_data;
420 	int rc = -EINVAL;
421 
422 	/* Get SPQ entry */
423 	memset(&init_data, 0, sizeof(init_data));
424 	init_data.cid = qed_spq_get_cid(p_hwfn);
425 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
426 	init_data.comp_mode = comp_mode;
427 	init_data.p_comp_data = p_comp_data;
428 
429 	rc = qed_sp_init_request(p_hwfn, &p_ent,
430 				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
431 				 &init_data);
432 	if (rc)
433 		return rc;
434 
435 	qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
436 				      &p_ent->ramrod.pf_update.tunnel_config);
437 
438 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
439 	if (rc)
440 		return rc;
441 
442 	if (p_tunn->update_vxlan_udp_port)
443 		qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
444 					p_tunn->vxlan_udp_port);
445 	if (p_tunn->update_geneve_udp_port)
446 		qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
447 					 p_tunn->geneve_udp_port);
448 
449 	qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
450 	p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
451 
452 	return rc;
453 }
454 
455 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
456 {
457 	struct qed_spq_entry *p_ent = NULL;
458 	struct qed_sp_init_data init_data;
459 	int rc = -EINVAL;
460 
461 	/* Get SPQ entry */
462 	memset(&init_data, 0, sizeof(init_data));
463 	init_data.cid = qed_spq_get_cid(p_hwfn);
464 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
465 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
466 
467 	rc = qed_sp_init_request(p_hwfn, &p_ent,
468 				 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
469 				 &init_data);
470 	if (rc)
471 		return rc;
472 
473 	return qed_spq_post(p_hwfn, p_ent, NULL);
474 }
475 
476 int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
477 {
478 	struct qed_spq_entry *p_ent = NULL;
479 	struct qed_sp_init_data init_data;
480 	int rc;
481 
482 	/* Get SPQ entry */
483 	memset(&init_data, 0, sizeof(init_data));
484 	init_data.cid = qed_spq_get_cid(p_hwfn);
485 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
486 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
487 
488 	rc = qed_sp_init_request(p_hwfn, &p_ent,
489 				 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
490 				 &init_data);
491 	if (rc)
492 		return rc;
493 
494 	return qed_spq_post(p_hwfn, p_ent, NULL);
495 }
496