1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright 2021 Marvell. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <asm/byteorder.h>
6 #include <asm/param.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/etherdevice.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/stddef.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/qed/qed_nvmetcp_if.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_hw.h"
24 #include "qed_int.h"
25 #include "qed_nvmetcp.h"
26 #include "qed_ll2.h"
27 #include "qed_mcp.h"
28 #include "qed_sp.h"
29 #include "qed_reg_addr.h"
30 
31 static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
32 				   u16 echo, union event_ring_data *data,
33 				   u8 fw_return_code)
34 {
35 	if (p_hwfn->p_nvmetcp_info->event_cb) {
36 		struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
37 
38 		return p_nvmetcp->event_cb(p_nvmetcp->event_context,
39 					 fw_event_code, data);
40 	} else {
41 		DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
42 
43 		return -EINVAL;
44 	}
45 }
46 
47 static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
48 				     enum spq_mode comp_mode,
49 				     struct qed_spq_comp_cb *p_comp_addr,
50 				     void *event_context,
51 				     nvmetcp_event_cb_t async_event_cb)
52 {
53 	struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
54 	struct qed_nvmetcp_pf_params *p_params = NULL;
55 	struct scsi_init_func_queues *p_queue = NULL;
56 	struct nvmetcp_spe_func_init *p_init = NULL;
57 	struct qed_sp_init_data init_data = {};
58 	struct qed_spq_entry *p_ent = NULL;
59 	int rc = 0;
60 	u16 val;
61 	u8 i;
62 
63 	/* Get SPQ entry */
64 	init_data.cid = qed_spq_get_cid(p_hwfn);
65 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
66 	init_data.comp_mode = comp_mode;
67 	init_data.p_comp_data = p_comp_addr;
68 	rc = qed_sp_init_request(p_hwfn, &p_ent,
69 				 NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
70 				 PROTOCOLID_TCP_ULP, &init_data);
71 	if (rc)
72 		return rc;
73 
74 	p_ramrod = &p_ent->ramrod.nvmetcp_init;
75 	p_init = &p_ramrod->nvmetcp_init_spe;
76 	p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
77 	p_queue = &p_init->q_params;
78 	p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
79 	p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
80 	p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
81 	p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
82 					p_params->ll2_ooo_queue_id;
83 	SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
84 	p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
85 	p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
86 	p_init->debug_flags = p_params->debug_mode;
87 	DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
88 		       p_params->glbl_q_params_addr);
89 	p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
90 	p_queue->num_queues = p_params->num_queues;
91 	val = RESC_START(p_hwfn, QED_CMDQS_CQS);
92 	p_queue->queue_relative_offset = cpu_to_le16((u16)val);
93 	p_queue->cq_sb_pi = p_params->gl_rq_pi;
94 
95 	for (i = 0; i < p_params->num_queues; i++) {
96 		val = qed_get_igu_sb_id(p_hwfn, i);
97 		p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
98 	}
99 
100 	SET_FIELD(p_queue->q_validity,
101 		  SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
102 	p_queue->cmdq_num_entries = 0;
103 	p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
104 	p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
105 	p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
106 	p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
107 	p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
108 	SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
109 		  NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
110 	p_hwfn->p_nvmetcp_info->event_context = event_context;
111 	p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
112 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
113 				  qed_nvmetcp_async_event);
114 
115 	return qed_spq_post(p_hwfn, p_ent, NULL);
116 }
117 
118 static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
119 				    enum spq_mode comp_mode,
120 				    struct qed_spq_comp_cb *p_comp_addr)
121 {
122 	struct qed_spq_entry *p_ent = NULL;
123 	struct qed_sp_init_data init_data;
124 	int rc;
125 
126 	/* Get SPQ entry */
127 	memset(&init_data, 0, sizeof(init_data));
128 	init_data.cid = qed_spq_get_cid(p_hwfn);
129 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
130 	init_data.comp_mode = comp_mode;
131 	init_data.p_comp_data = p_comp_addr;
132 	rc = qed_sp_init_request(p_hwfn, &p_ent,
133 				 NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
134 				 PROTOCOLID_TCP_ULP, &init_data);
135 	if (rc)
136 		return rc;
137 
138 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
139 	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
140 
141 	return rc;
142 }
143 
144 static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
145 				     struct qed_dev_nvmetcp_info *info)
146 {
147 	struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
148 	int rc;
149 
150 	memset(info, 0, sizeof(*info));
151 	rc = qed_fill_dev_info(cdev, &info->common);
152 	info->port_id = MFW_PORT(hwfn);
153 	info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
154 
155 	return rc;
156 }
157 
158 static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
159 				     struct qed_nvmetcp_cb_ops *ops,
160 				     void *cookie)
161 {
162 	cdev->protocol_ops.nvmetcp = ops;
163 	cdev->ops_cookie = cookie;
164 }
165 
166 static int qed_nvmetcp_stop(struct qed_dev *cdev)
167 {
168 	int rc;
169 
170 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
171 		DP_NOTICE(cdev, "nvmetcp already stopped\n");
172 
173 		return 0;
174 	}
175 
176 	if (!hash_empty(cdev->connections)) {
177 		DP_NOTICE(cdev,
178 			  "Can't stop nvmetcp - not all connections were returned\n");
179 
180 		return -EINVAL;
181 	}
182 
183 	/* Stop the nvmetcp */
184 	rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
185 				      NULL);
186 	cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
187 
188 	return rc;
189 }
190 
191 static int qed_nvmetcp_start(struct qed_dev *cdev,
192 			     struct qed_nvmetcp_tid *tasks,
193 			     void *event_context,
194 			     nvmetcp_event_cb_t async_event_cb)
195 {
196 	struct qed_tid_mem *tid_info;
197 	int rc;
198 
199 	if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
200 		DP_NOTICE(cdev, "nvmetcp already started;\n");
201 
202 		return 0;
203 	}
204 
205 	rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
206 				       QED_SPQ_MODE_EBLOCK, NULL,
207 				       event_context, async_event_cb);
208 	if (rc) {
209 		DP_NOTICE(cdev, "Failed to start nvmetcp\n");
210 
211 		return rc;
212 	}
213 
214 	cdev->flags |= QED_FLAG_STORAGE_STARTED;
215 	hash_init(cdev->connections);
216 
217 	if (!tasks)
218 		return 0;
219 
220 	tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
221 	if (!tid_info) {
222 		qed_nvmetcp_stop(cdev);
223 
224 		return -ENOMEM;
225 	}
226 
227 	rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
228 	if (rc) {
229 		DP_NOTICE(cdev, "Failed to gather task information\n");
230 		qed_nvmetcp_stop(cdev);
231 		kfree(tid_info);
232 
233 		return rc;
234 	}
235 
236 	/* Fill task information */
237 	tasks->size = tid_info->tid_size;
238 	tasks->num_tids_per_block = tid_info->num_tids_per_block;
239 	memcpy(tasks->blocks, tid_info->blocks,
240 	       MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
241 	kfree(tid_info);
242 
243 	return 0;
244 }
245 
246 static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
247 							 u32 handle)
248 {
249 	struct qed_hash_nvmetcp_con *hash_con = NULL;
250 
251 	if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
252 		return NULL;
253 
254 	hash_for_each_possible(cdev->connections, hash_con, node, handle) {
255 		if (hash_con->con->icid == handle)
256 			break;
257 	}
258 
259 	if (!hash_con || hash_con->con->icid != handle)
260 		return NULL;
261 
262 	return hash_con;
263 }
264 
265 static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
266 				       struct qed_nvmetcp_conn *p_conn,
267 				       enum spq_mode comp_mode,
268 				       struct qed_spq_comp_cb *p_comp_addr)
269 {
270 	struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
271 	struct tcp_offload_params_opt2 *p_tcp = NULL;
272 	struct qed_sp_init_data init_data = { 0 };
273 	struct qed_spq_entry *p_ent = NULL;
274 	dma_addr_t r2tq_pbl_addr;
275 	dma_addr_t xhq_pbl_addr;
276 	dma_addr_t uhq_pbl_addr;
277 	u16 physical_q;
278 	int rc = 0;
279 	u8 i;
280 
281 	/* Get SPQ entry */
282 	init_data.cid = p_conn->icid;
283 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
284 	init_data.comp_mode = comp_mode;
285 	init_data.p_comp_data = p_comp_addr;
286 	rc = qed_sp_init_request(p_hwfn, &p_ent,
287 				 NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
288 				 PROTOCOLID_TCP_ULP, &init_data);
289 	if (rc)
290 		return rc;
291 
292 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
293 
294 	/* Transmission PQ is the first of the PF */
295 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
296 	p_conn->physical_q0 = cpu_to_le16(physical_q);
297 	p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
298 
299 	/* nvmetcp Pure-ACK PQ */
300 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
301 	p_conn->physical_q1 = cpu_to_le16(physical_q);
302 	p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
303 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
304 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
305 	r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
306 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
307 	xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
308 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
309 	uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
310 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
311 	p_ramrod->nvmetcp.flags = p_conn->offl_flags;
312 	p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
313 	p_ramrod->nvmetcp.initial_ack = 0;
314 	DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
315 		       p_conn->nvmetcp_cccid_itid_table_addr);
316 	p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
317 		 cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
318 	p_tcp = &p_ramrod->tcp;
319 	qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
320 			    &p_tcp->remote_mac_addr_mid,
321 			    &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
322 	qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
323 			    &p_tcp->local_mac_addr_mid,
324 			    &p_tcp->local_mac_addr_lo, p_conn->local_mac);
325 	p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
326 	p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
327 	p_tcp->ip_version = p_conn->ip_version;
328 	if (p_tcp->ip_version == TCP_IPV6) {
329 		for (i = 0; i < 4; i++) {
330 			p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
331 			p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
332 		}
333 	} else {
334 		p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
335 		p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
336 	}
337 
338 	p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
339 	p_tcp->ttl = p_conn->ttl;
340 	p_tcp->tos_or_tc = p_conn->tos_or_tc;
341 	p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
342 	p_tcp->local_port = cpu_to_le16(p_conn->local_port);
343 	p_tcp->mss = cpu_to_le16(p_conn->mss);
344 	p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
345 	p_tcp->connect_mode = p_conn->connect_mode;
346 	p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
347 	p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
348 	p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
349 	p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
350 	p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
351 
352 	return qed_spq_post(p_hwfn, p_ent, NULL);
353 }
354 
355 static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
356 				      struct qed_nvmetcp_conn *p_conn,
357 				      enum spq_mode comp_mode,
358 				      struct qed_spq_comp_cb *p_comp_addr)
359 {
360 	struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
361 	struct qed_spq_entry *p_ent = NULL;
362 	struct qed_sp_init_data init_data;
363 	int rc = -EINVAL;
364 	u32 dval;
365 
366 	/* Get SPQ entry */
367 	memset(&init_data, 0, sizeof(init_data));
368 	init_data.cid = p_conn->icid;
369 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
370 	init_data.comp_mode = comp_mode;
371 	init_data.p_comp_data = p_comp_addr;
372 
373 	rc = qed_sp_init_request(p_hwfn, &p_ent,
374 				 NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
375 				 PROTOCOLID_TCP_ULP, &init_data);
376 	if (rc)
377 		return rc;
378 
379 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
380 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
381 	p_ramrod->flags = p_conn->update_flag;
382 	p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
383 	dval = p_conn->max_recv_pdu_length;
384 	p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
385 	dval = p_conn->max_send_pdu_length;
386 	p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
387 	p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
388 
389 	return qed_spq_post(p_hwfn, p_ent, NULL);
390 }
391 
392 static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
393 					 struct qed_nvmetcp_conn *p_conn,
394 					 enum spq_mode comp_mode,
395 					 struct qed_spq_comp_cb *p_comp_addr)
396 {
397 	struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
398 	struct qed_spq_entry *p_ent = NULL;
399 	struct qed_sp_init_data init_data;
400 	int rc = -EINVAL;
401 
402 	/* Get SPQ entry */
403 	memset(&init_data, 0, sizeof(init_data));
404 	init_data.cid = p_conn->icid;
405 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
406 	init_data.comp_mode = comp_mode;
407 	init_data.p_comp_data = p_comp_addr;
408 	rc = qed_sp_init_request(p_hwfn, &p_ent,
409 				 NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
410 				 PROTOCOLID_TCP_ULP, &init_data);
411 	if (rc)
412 		return rc;
413 
414 	p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
415 	p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
416 	p_ramrod->abortive = p_conn->abortive_dsconnect;
417 
418 	return qed_spq_post(p_hwfn, p_ent, NULL);
419 }
420 
421 static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
422 					struct qed_nvmetcp_conn *p_conn,
423 					enum spq_mode comp_mode,
424 					struct qed_spq_comp_cb *p_comp_addr)
425 {
426 	struct qed_spq_entry *p_ent = NULL;
427 	struct qed_sp_init_data init_data;
428 	int rc = -EINVAL;
429 
430 	/* Get SPQ entry */
431 	memset(&init_data, 0, sizeof(init_data));
432 	init_data.cid = p_conn->icid;
433 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
434 	init_data.comp_mode = comp_mode;
435 	init_data.p_comp_data = p_comp_addr;
436 	rc = qed_sp_init_request(p_hwfn, &p_ent,
437 				 NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
438 				 PROTOCOLID_TCP_ULP, &init_data);
439 	if (rc)
440 		return rc;
441 
442 	return qed_spq_post(p_hwfn, p_ent, NULL);
443 }
444 
445 static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
446 {
447 	return (u8 __iomem *)p_hwfn->doorbells +
448 			     qed_db_addr(cid, DQ_DEMS_LEGACY);
449 }
450 
451 static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
452 					   struct qed_nvmetcp_conn **p_out_conn)
453 {
454 	struct qed_chain_init_params params = {
455 		.mode		= QED_CHAIN_MODE_PBL,
456 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
457 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
458 	};
459 	struct qed_nvmetcp_pf_params *p_params = NULL;
460 	struct qed_nvmetcp_conn *p_conn = NULL;
461 	int rc = 0;
462 
463 	/* Try finding a free connection that can be used */
464 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
465 	if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
466 		p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
467 					  struct qed_nvmetcp_conn, list_entry);
468 	if (p_conn) {
469 		list_del(&p_conn->list_entry);
470 		spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
471 		*p_out_conn = p_conn;
472 
473 		return 0;
474 	}
475 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
476 
477 	/* Need to allocate a new connection */
478 	p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
479 	p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
480 	if (!p_conn)
481 		return -ENOMEM;
482 
483 	params.num_elems = p_params->num_r2tq_pages_in_ring *
484 			   QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
485 	params.elem_size = sizeof(struct nvmetcp_wqe);
486 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
487 	if (rc)
488 		goto nomem_r2tq;
489 
490 	params.num_elems = p_params->num_uhq_pages_in_ring *
491 			   QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
492 	params.elem_size = sizeof(struct iscsi_uhqe);
493 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
494 	if (rc)
495 		goto nomem_uhq;
496 
497 	params.elem_size = sizeof(struct iscsi_xhqe);
498 	rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
499 	if (rc)
500 		goto nomem;
501 
502 	p_conn->free_on_delete = true;
503 	*p_out_conn = p_conn;
504 
505 	return 0;
506 
507 nomem:
508 	qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
509 nomem_uhq:
510 	qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
511 nomem_r2tq:
512 	kfree(p_conn);
513 
514 	return -ENOMEM;
515 }
516 
517 static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
518 					  struct qed_nvmetcp_conn **p_out_conn)
519 {
520 	struct qed_nvmetcp_conn *p_conn = NULL;
521 	int rc = 0;
522 	u32 icid;
523 
524 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
525 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
526 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
527 
528 	if (rc)
529 		return rc;
530 
531 	rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
532 	if (rc) {
533 		spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
534 		qed_cxt_release_cid(p_hwfn, icid);
535 		spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
536 
537 		return rc;
538 	}
539 
540 	p_conn->icid = icid;
541 	p_conn->conn_id = (u16)icid;
542 	p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
543 	*p_out_conn = p_conn;
544 
545 	return rc;
546 }
547 
548 static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
549 					   struct qed_nvmetcp_conn *p_conn)
550 {
551 	spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
552 	list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
553 	qed_cxt_release_cid(p_hwfn, p_conn->icid);
554 	spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
555 }
556 
557 static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
558 					struct qed_nvmetcp_conn *p_conn)
559 {
560 	qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
561 	qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
562 	qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
563 	kfree(p_conn);
564 }
565 
566 int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
567 {
568 	struct qed_nvmetcp_info *p_nvmetcp_info;
569 
570 	p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
571 	if (!p_nvmetcp_info)
572 		return -ENOMEM;
573 
574 	INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
575 	p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
576 
577 	return 0;
578 }
579 
580 void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
581 {
582 	spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
583 }
584 
585 void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
586 {
587 	struct qed_nvmetcp_conn *p_conn = NULL;
588 
589 	if (!p_hwfn->p_nvmetcp_info)
590 		return;
591 
592 	while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
593 		p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
594 					  struct qed_nvmetcp_conn, list_entry);
595 		if (p_conn) {
596 			list_del(&p_conn->list_entry);
597 			qed_nvmetcp_free_connection(p_hwfn, p_conn);
598 		}
599 	}
600 
601 	kfree(p_hwfn->p_nvmetcp_info);
602 	p_hwfn->p_nvmetcp_info = NULL;
603 }
604 
605 static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
606 				    u32 *handle,
607 				    u32 *fw_cid, void __iomem **p_doorbell)
608 {
609 	struct qed_hash_nvmetcp_con *hash_con;
610 	int rc;
611 
612 	/* Allocate a hashed connection */
613 	hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
614 	if (!hash_con)
615 		return -ENOMEM;
616 
617 	/* Acquire the connection */
618 	rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
619 					    &hash_con->con);
620 	if (rc) {
621 		DP_NOTICE(cdev, "Failed to acquire Connection\n");
622 		kfree(hash_con);
623 
624 		return rc;
625 	}
626 
627 	/* Added the connection to hash table */
628 	*handle = hash_con->con->icid;
629 	*fw_cid = hash_con->con->fw_cid;
630 	hash_add(cdev->connections, &hash_con->node, *handle);
631 	if (p_doorbell)
632 		*p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
633 						      *handle);
634 
635 	return 0;
636 }
637 
638 static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
639 {
640 	struct qed_hash_nvmetcp_con *hash_con;
641 
642 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
643 	if (!hash_con) {
644 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
645 			  handle);
646 
647 		return -EINVAL;
648 	}
649 
650 	hlist_del(&hash_con->node);
651 	qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
652 	kfree(hash_con);
653 
654 	return 0;
655 }
656 
657 static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
658 				    struct qed_nvmetcp_params_offload *conn_info)
659 {
660 	struct qed_hash_nvmetcp_con *hash_con;
661 	struct qed_nvmetcp_conn *con;
662 
663 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
664 	if (!hash_con) {
665 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
666 			  handle);
667 
668 		return -EINVAL;
669 	}
670 
671 	/* Update the connection with information from the params */
672 	con = hash_con->con;
673 
674 	/* FW initializations */
675 	con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
676 	con->sq_pbl_addr = conn_info->sq_pbl_addr;
677 	con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
678 	con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
679 	con->default_cq = conn_info->default_cq;
680 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
681 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
682 	SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
683 
684 	/* Networking and TCP stack initializations */
685 	ether_addr_copy(con->local_mac, conn_info->src.mac);
686 	ether_addr_copy(con->remote_mac, conn_info->dst.mac);
687 	memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
688 	memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
689 	con->local_port = conn_info->src.port;
690 	con->remote_port = conn_info->dst.port;
691 	con->vlan_id = conn_info->vlan_id;
692 
693 	if (conn_info->timestamp_en)
694 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
695 
696 	if (conn_info->delayed_ack_en)
697 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
698 
699 	if (conn_info->tcp_keep_alive_en)
700 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
701 
702 	if (conn_info->ecn_en)
703 		SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
704 
705 	con->ip_version = conn_info->ip_version;
706 	con->flow_label = QED_TCP_FLOW_LABEL;
707 	con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
708 	con->ka_timeout = conn_info->ka_timeout;
709 	con->ka_interval = conn_info->ka_interval;
710 	con->max_rt_time = conn_info->max_rt_time;
711 	con->ttl = conn_info->ttl;
712 	con->tos_or_tc = conn_info->tos_or_tc;
713 	con->mss = conn_info->mss;
714 	con->cwnd = conn_info->cwnd;
715 	con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
716 	con->connect_mode = 0;
717 
718 	return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
719 					 QED_SPQ_MODE_EBLOCK, NULL);
720 }
721 
722 static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
723 				   u32 handle,
724 				   struct qed_nvmetcp_params_update *conn_info)
725 {
726 	struct qed_hash_nvmetcp_con *hash_con;
727 	struct qed_nvmetcp_conn *con;
728 
729 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
730 	if (!hash_con) {
731 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
732 			  handle);
733 
734 		return -EINVAL;
735 	}
736 
737 	/* Update the connection with information from the params */
738 	con = hash_con->con;
739 	SET_FIELD(con->update_flag,
740 		  ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
741 	SET_FIELD(con->update_flag,
742 		  ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
743 	if (conn_info->hdr_digest_en)
744 		SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
745 
746 	if (conn_info->data_digest_en)
747 		SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
748 
749 	/* Placeholder - initialize pfv, cpda, hpda */
750 
751 	con->max_seq_size = conn_info->max_io_size;
752 	con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
753 	con->max_send_pdu_length = conn_info->max_send_pdu_length;
754 	con->first_seq_length = conn_info->max_io_size;
755 
756 	return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
757 					QED_SPQ_MODE_EBLOCK, NULL);
758 }
759 
760 static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
761 {
762 	struct qed_hash_nvmetcp_con *hash_con;
763 
764 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
765 	if (!hash_con) {
766 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
767 			  handle);
768 
769 		return -EINVAL;
770 	}
771 
772 	return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
773 					    QED_SPQ_MODE_EBLOCK, NULL);
774 }
775 
776 static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
777 				    u32 handle, u8 abrt_conn)
778 {
779 	struct qed_hash_nvmetcp_con *hash_con;
780 
781 	hash_con = qed_nvmetcp_get_hash(cdev, handle);
782 	if (!hash_con) {
783 		DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
784 			  handle);
785 
786 		return -EINVAL;
787 	}
788 
789 	hash_con->con->abortive_dsconnect = abrt_conn;
790 
791 	return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
792 					   QED_SPQ_MODE_EBLOCK, NULL);
793 }
794 
795 static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
796 	.common = &qed_common_ops_pass,
797 	.ll2 = &qed_ll2_ops_pass,
798 	.fill_dev_info = &qed_fill_nvmetcp_dev_info,
799 	.register_ops = &qed_register_nvmetcp_ops,
800 	.start = &qed_nvmetcp_start,
801 	.stop = &qed_nvmetcp_stop,
802 	.acquire_conn = &qed_nvmetcp_acquire_conn,
803 	.release_conn = &qed_nvmetcp_release_conn,
804 	.offload_conn = &qed_nvmetcp_offload_conn,
805 	.update_conn = &qed_nvmetcp_update_conn,
806 	.destroy_conn = &qed_nvmetcp_destroy_conn,
807 	.clear_sq = &qed_nvmetcp_clear_conn_sq,
808 };
809 
810 const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
811 {
812 	return &qed_nvmetcp_ops_pass;
813 }
814 EXPORT_SYMBOL(qed_get_nvmetcp_ops);
815 
816 void qed_put_nvmetcp_ops(void)
817 {
818 }
819 EXPORT_SYMBOL(qed_put_nvmetcp_ops);
820