1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/pci.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/semaphore.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/io.h>
16 #include <linux/err.h>
17 
18 #include "hinic_hw_dev.h"
19 #include "hinic_hw_if.h"
20 #include "hinic_hw_eqs.h"
21 #include "hinic_hw_wqe.h"
22 #include "hinic_hw_wq.h"
23 #include "hinic_hw_cmdq.h"
24 #include "hinic_hw_qp_ctxt.h"
25 #include "hinic_hw_qp.h"
26 #include "hinic_hw_io.h"
27 
28 #define CI_Q_ADDR_SIZE                  sizeof(u32)
29 
30 #define CI_ADDR(base_addr, q_id)        ((base_addr) + \
31 					 (q_id) * CI_Q_ADDR_SIZE)
32 
33 #define CI_TABLE_SIZE(num_qps)          ((num_qps) * CI_Q_ADDR_SIZE)
34 
35 #define DB_IDX(db, db_base)             \
36 	(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
37 
38 #define HINIC_PAGE_SIZE_HW(pg_size)	((u8)ilog2((u32)((pg_size) >> 12)))
39 
40 enum io_cmd {
41 	IO_CMD_MODIFY_QUEUE_CTXT = 0,
42 	IO_CMD_CLEAN_QUEUE_CTXT,
43 };
44 
45 static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
46 {
47 	int i;
48 
49 	for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
50 		free_db_area->db_idx[i] = i;
51 
52 	free_db_area->alloc_pos = 0;
53 	free_db_area->return_pos = HINIC_DB_MAX_AREAS;
54 
55 	free_db_area->num_free = HINIC_DB_MAX_AREAS;
56 
57 	sema_init(&free_db_area->idx_lock, 1);
58 }
59 
60 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
61 {
62 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
63 	int pos, idx;
64 
65 	down(&free_db_area->idx_lock);
66 
67 	free_db_area->num_free--;
68 
69 	if (free_db_area->num_free < 0) {
70 		free_db_area->num_free++;
71 		up(&free_db_area->idx_lock);
72 		return ERR_PTR(-ENOMEM);
73 	}
74 
75 	pos = free_db_area->alloc_pos++;
76 	pos &= HINIC_DB_MAX_AREAS - 1;
77 
78 	idx = free_db_area->db_idx[pos];
79 
80 	free_db_area->db_idx[pos] = -1;
81 
82 	up(&free_db_area->idx_lock);
83 
84 	return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
85 }
86 
87 static void return_db_area(struct hinic_func_to_io *func_to_io,
88 			   void __iomem *db_base)
89 {
90 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
91 	int pos, idx = DB_IDX(db_base, func_to_io->db_base);
92 
93 	down(&free_db_area->idx_lock);
94 
95 	pos = free_db_area->return_pos++;
96 	pos &= HINIC_DB_MAX_AREAS - 1;
97 
98 	free_db_area->db_idx[pos] = idx;
99 
100 	free_db_area->num_free++;
101 
102 	up(&free_db_area->idx_lock);
103 }
104 
105 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
106 			  u16 num_sqs)
107 {
108 	struct hinic_hwif *hwif = func_to_io->hwif;
109 	struct hinic_sq_ctxt_block *sq_ctxt_block;
110 	struct pci_dev *pdev = hwif->pdev;
111 	struct hinic_cmdq_buf cmdq_buf;
112 	struct hinic_sq_ctxt *sq_ctxt;
113 	struct hinic_qp *qp;
114 	u64 out_param;
115 	int err, i;
116 
117 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
118 	if (err) {
119 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
120 		return err;
121 	}
122 
123 	sq_ctxt_block = cmdq_buf.buf;
124 	sq_ctxt = sq_ctxt_block->sq_ctxt;
125 
126 	hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
127 				num_sqs, func_to_io->max_qps);
128 	for (i = 0; i < num_sqs; i++) {
129 		qp = &func_to_io->qps[i];
130 
131 		hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
132 				      base_qpn + qp->q_id);
133 	}
134 
135 	cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
136 
137 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
138 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
139 				     &out_param);
140 	if ((err) || (out_param != 0)) {
141 		dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
142 		err = -EFAULT;
143 	}
144 
145 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
146 	return err;
147 }
148 
149 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
150 			  u16 num_rqs)
151 {
152 	struct hinic_hwif *hwif = func_to_io->hwif;
153 	struct hinic_rq_ctxt_block *rq_ctxt_block;
154 	struct pci_dev *pdev = hwif->pdev;
155 	struct hinic_cmdq_buf cmdq_buf;
156 	struct hinic_rq_ctxt *rq_ctxt;
157 	struct hinic_qp *qp;
158 	u64 out_param;
159 	int err, i;
160 
161 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
162 	if (err) {
163 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
164 		return err;
165 	}
166 
167 	rq_ctxt_block = cmdq_buf.buf;
168 	rq_ctxt = rq_ctxt_block->rq_ctxt;
169 
170 	hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
171 				num_rqs, func_to_io->max_qps);
172 	for (i = 0; i < num_rqs; i++) {
173 		qp = &func_to_io->qps[i];
174 
175 		hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
176 				      base_qpn + qp->q_id);
177 	}
178 
179 	cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
180 
181 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
182 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
183 				     &out_param);
184 	if ((err) || (out_param != 0)) {
185 		dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
186 		err = -EFAULT;
187 	}
188 
189 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
190 	return err;
191 }
192 
193 /**
194  * write_qp_ctxts - write the qp ctxt to HW
195  * @func_to_io: func to io channel that holds the IO components
196  * @base_qpn: first qp number
197  * @num_qps: number of qps to write
198  *
199  * Return 0 - Success, negative - Failure
200  **/
201 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
202 			  u16 num_qps)
203 {
204 	return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
205 		write_rq_ctxts(func_to_io, base_qpn, num_qps));
206 }
207 
208 static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
209 					  enum hinic_qp_ctxt_type ctxt_type)
210 {
211 	struct hinic_hwif *hwif = func_to_io->hwif;
212 	struct hinic_clean_queue_ctxt *ctxt_block;
213 	struct pci_dev *pdev = hwif->pdev;
214 	struct hinic_cmdq_buf cmdq_buf;
215 	u64 out_param = 0;
216 	int err;
217 
218 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
219 	if (err) {
220 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
221 		return err;
222 	}
223 
224 	ctxt_block = cmdq_buf.buf;
225 	ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
226 	ctxt_block->cmdq_hdr.queue_type = ctxt_type;
227 	ctxt_block->cmdq_hdr.addr_offset = 0;
228 
229 	/* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
230 	ctxt_block->ctxt_size = 0x3;
231 
232 	hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
233 
234 	cmdq_buf.size = sizeof(*ctxt_block);
235 
236 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
237 				     IO_CMD_CLEAN_QUEUE_CTXT,
238 				     &cmdq_buf, &out_param);
239 
240 	if (err || out_param) {
241 		dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
242 			err, out_param);
243 
244 		err = -EFAULT;
245 	}
246 
247 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
248 
249 	return err;
250 }
251 
252 static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
253 {
254 	/* clean LRO/TSO context space */
255 	return (hinic_clean_queue_offload_ctxt(func_to_io,
256 					       HINIC_QP_CTXT_TYPE_SQ) ||
257 		hinic_clean_queue_offload_ctxt(func_to_io,
258 					       HINIC_QP_CTXT_TYPE_RQ));
259 }
260 
261 /**
262  * init_qp - Initialize a Queue Pair
263  * @func_to_io: func to io channel that holds the IO components
264  * @qp: pointer to the qp to initialize
265  * @q_id: the id of the qp
266  * @sq_msix_entry: msix entry for sq
267  * @rq_msix_entry: msix entry for rq
268  *
269  * Return 0 - Success, negative - Failure
270  **/
271 static int init_qp(struct hinic_func_to_io *func_to_io,
272 		   struct hinic_qp *qp, int q_id,
273 		   struct msix_entry *sq_msix_entry,
274 		   struct msix_entry *rq_msix_entry)
275 {
276 	struct hinic_hwif *hwif = func_to_io->hwif;
277 	struct pci_dev *pdev = hwif->pdev;
278 	void __iomem *db_base;
279 	int err;
280 
281 	qp->q_id = q_id;
282 
283 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
284 				HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
285 				func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
286 	if (err) {
287 		dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
288 		return err;
289 	}
290 
291 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
292 				HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
293 				func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
294 	if (err) {
295 		dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
296 		goto err_rq_alloc;
297 	}
298 
299 	db_base = get_db_area(func_to_io);
300 	if (IS_ERR(db_base)) {
301 		dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
302 		err = PTR_ERR(db_base);
303 		goto err_get_db;
304 	}
305 
306 	func_to_io->sq_db[q_id] = db_base;
307 
308 	err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
309 			    sq_msix_entry,
310 			    CI_ADDR(func_to_io->ci_addr_base, q_id),
311 			    CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
312 	if (err) {
313 		dev_err(&pdev->dev, "Failed to init SQ\n");
314 		goto err_sq_init;
315 	}
316 
317 	err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
318 			    rq_msix_entry);
319 	if (err) {
320 		dev_err(&pdev->dev, "Failed to init RQ\n");
321 		goto err_rq_init;
322 	}
323 
324 	return 0;
325 
326 err_rq_init:
327 	hinic_clean_sq(&qp->sq);
328 
329 err_sq_init:
330 	return_db_area(func_to_io, db_base);
331 
332 err_get_db:
333 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
334 
335 err_rq_alloc:
336 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
337 	return err;
338 }
339 
340 /**
341  * destroy_qp - Clean the resources of a Queue Pair
342  * @func_to_io: func to io channel that holds the IO components
343  * @qp: pointer to the qp to clean
344  **/
345 static void destroy_qp(struct hinic_func_to_io *func_to_io,
346 		       struct hinic_qp *qp)
347 {
348 	int q_id = qp->q_id;
349 
350 	hinic_clean_rq(&qp->rq);
351 	hinic_clean_sq(&qp->sq);
352 
353 	return_db_area(func_to_io, func_to_io->sq_db[q_id]);
354 
355 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
356 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
357 }
358 
359 /**
360  * hinic_io_create_qps - Create Queue Pairs
361  * @func_to_io: func to io channel that holds the IO components
362  * @base_qpn: base qp number
363  * @num_qps: number queue pairs to create
364  * @sq_msix_entry: msix entries for sq
365  * @rq_msix_entry: msix entries for rq
366  *
367  * Return 0 - Success, negative - Failure
368  **/
369 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
370 			u16 base_qpn, int num_qps,
371 			struct msix_entry *sq_msix_entries,
372 			struct msix_entry *rq_msix_entries)
373 {
374 	struct hinic_hwif *hwif = func_to_io->hwif;
375 	struct pci_dev *pdev = hwif->pdev;
376 	size_t qps_size, wq_size, db_size;
377 	void *ci_addr_base;
378 	int i, j, err;
379 
380 	qps_size = num_qps * sizeof(*func_to_io->qps);
381 	func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
382 	if (!func_to_io->qps)
383 		return -ENOMEM;
384 
385 	wq_size = num_qps * sizeof(*func_to_io->sq_wq);
386 	func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
387 	if (!func_to_io->sq_wq) {
388 		err = -ENOMEM;
389 		goto err_sq_wq;
390 	}
391 
392 	wq_size = num_qps * sizeof(*func_to_io->rq_wq);
393 	func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
394 	if (!func_to_io->rq_wq) {
395 		err = -ENOMEM;
396 		goto err_rq_wq;
397 	}
398 
399 	db_size = num_qps * sizeof(*func_to_io->sq_db);
400 	func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
401 	if (!func_to_io->sq_db) {
402 		err = -ENOMEM;
403 		goto err_sq_db;
404 	}
405 
406 	ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
407 					  &func_to_io->ci_dma_base,
408 					  GFP_KERNEL);
409 	if (!ci_addr_base) {
410 		dev_err(&pdev->dev, "Failed to allocate CI area\n");
411 		err = -ENOMEM;
412 		goto err_ci_base;
413 	}
414 
415 	func_to_io->ci_addr_base = ci_addr_base;
416 
417 	for (i = 0; i < num_qps; i++) {
418 		err = init_qp(func_to_io, &func_to_io->qps[i], i,
419 			      &sq_msix_entries[i], &rq_msix_entries[i]);
420 		if (err) {
421 			dev_err(&pdev->dev, "Failed to create QP %d\n", i);
422 			goto err_init_qp;
423 		}
424 	}
425 
426 	err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
427 	if (err) {
428 		dev_err(&pdev->dev, "Failed to init QP ctxts\n");
429 		goto err_write_qp_ctxts;
430 	}
431 
432 	err = hinic_clean_qp_offload_ctxt(func_to_io);
433 	if (err) {
434 		dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
435 		goto err_write_qp_ctxts;
436 	}
437 
438 	return 0;
439 
440 err_write_qp_ctxts:
441 err_init_qp:
442 	for (j = 0; j < i; j++)
443 		destroy_qp(func_to_io, &func_to_io->qps[j]);
444 
445 	dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
446 			  func_to_io->ci_addr_base, func_to_io->ci_dma_base);
447 
448 err_ci_base:
449 	devm_kfree(&pdev->dev, func_to_io->sq_db);
450 
451 err_sq_db:
452 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
453 
454 err_rq_wq:
455 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
456 
457 err_sq_wq:
458 	devm_kfree(&pdev->dev, func_to_io->qps);
459 	return err;
460 }
461 
462 /**
463  * hinic_io_destroy_qps - Destroy the IO Queue Pairs
464  * @func_to_io: func to io channel that holds the IO components
465  * @num_qps: number queue pairs to destroy
466  **/
467 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
468 {
469 	struct hinic_hwif *hwif = func_to_io->hwif;
470 	struct pci_dev *pdev = hwif->pdev;
471 	size_t ci_table_size;
472 	int i;
473 
474 	ci_table_size = CI_TABLE_SIZE(num_qps);
475 
476 	for (i = 0; i < num_qps; i++)
477 		destroy_qp(func_to_io, &func_to_io->qps[i]);
478 
479 	dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
480 			  func_to_io->ci_dma_base);
481 
482 	devm_kfree(&pdev->dev, func_to_io->sq_db);
483 
484 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
485 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
486 
487 	devm_kfree(&pdev->dev, func_to_io->qps);
488 }
489 
490 int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
491 			   u32 page_size)
492 {
493 	struct hinic_wq_page_size page_size_info = {0};
494 	u16 out_size = sizeof(page_size_info);
495 	struct hinic_pfhwdev *pfhwdev;
496 	int err;
497 
498 	pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
499 
500 	page_size_info.func_idx = func_idx;
501 	page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
502 	page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
503 
504 	err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
505 				HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
506 				sizeof(page_size_info), &page_size_info,
507 				&out_size, HINIC_MGMT_MSG_SYNC);
508 	if (err || !out_size || page_size_info.status) {
509 		dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
510 			err, page_size_info.status, out_size);
511 		return -EFAULT;
512 	}
513 
514 	return 0;
515 }
516 
517 /**
518  * hinic_io_init - Initialize the IO components
519  * @func_to_io: func to io channel that holds the IO components
520  * @hwif: HW interface for accessing IO
521  * @max_qps: maximum QPs in HW
522  * @num_ceqs: number completion event queues
523  * @ceq_msix_entries: msix entries for ceqs
524  *
525  * Return 0 - Success, negative - Failure
526  **/
527 int hinic_io_init(struct hinic_func_to_io *func_to_io,
528 		  struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
529 		  struct msix_entry *ceq_msix_entries)
530 {
531 	struct pci_dev *pdev = hwif->pdev;
532 	enum hinic_cmdq_type cmdq, type;
533 	void __iomem *db_area;
534 	int err;
535 
536 	func_to_io->hwif = hwif;
537 	func_to_io->qps = NULL;
538 	func_to_io->max_qps = max_qps;
539 	func_to_io->ceqs.hwdev = func_to_io->hwdev;
540 
541 	err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
542 			      HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
543 			      ceq_msix_entries);
544 	if (err) {
545 		dev_err(&pdev->dev, "Failed to init CEQs\n");
546 		return err;
547 	}
548 
549 	err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
550 	if (err) {
551 		dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
552 		goto err_wqs_alloc;
553 	}
554 
555 	func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
556 	if (!func_to_io->db_base) {
557 		dev_err(&pdev->dev, "Failed to remap IO DB area\n");
558 		err = -ENOMEM;
559 		goto err_db_ioremap;
560 	}
561 
562 	init_db_area_idx(&func_to_io->free_db_area);
563 
564 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
565 		db_area = get_db_area(func_to_io);
566 		if (IS_ERR(db_area)) {
567 			dev_err(&pdev->dev, "Failed to get cmdq db area\n");
568 			err = PTR_ERR(db_area);
569 			goto err_db_area;
570 		}
571 
572 		func_to_io->cmdq_db_area[cmdq] = db_area;
573 	}
574 
575 	err = hinic_set_wq_page_size(func_to_io->hwdev,
576 				     HINIC_HWIF_FUNC_IDX(hwif),
577 				     HINIC_DEFAULT_WQ_PAGE_SIZE);
578 	if (err) {
579 		dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
580 		goto init_wq_pg_size_err;
581 	}
582 
583 	err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
584 			       func_to_io->cmdq_db_area);
585 	if (err) {
586 		dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
587 		goto err_init_cmdqs;
588 	}
589 
590 	return 0;
591 
592 err_init_cmdqs:
593 	if (!HINIC_IS_VF(func_to_io->hwif))
594 		hinic_set_wq_page_size(func_to_io->hwdev,
595 				       HINIC_HWIF_FUNC_IDX(hwif),
596 				       HINIC_HW_WQ_PAGE_SIZE);
597 init_wq_pg_size_err:
598 err_db_area:
599 	for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
600 		return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
601 
602 	iounmap(func_to_io->db_base);
603 
604 err_db_ioremap:
605 	hinic_wqs_free(&func_to_io->wqs);
606 
607 err_wqs_alloc:
608 	hinic_ceqs_free(&func_to_io->ceqs);
609 	return err;
610 }
611 
612 /**
613  * hinic_io_free - Free the IO components
614  * @func_to_io: func to io channel that holds the IO components
615  **/
616 void hinic_io_free(struct hinic_func_to_io *func_to_io)
617 {
618 	enum hinic_cmdq_type cmdq;
619 
620 	hinic_free_cmdqs(&func_to_io->cmdqs);
621 
622 	if (!HINIC_IS_VF(func_to_io->hwif))
623 		hinic_set_wq_page_size(func_to_io->hwdev,
624 				       HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
625 				       HINIC_HW_WQ_PAGE_SIZE);
626 
627 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
628 		return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
629 
630 	iounmap(func_to_io->db_base);
631 	hinic_wqs_free(&func_to_io->wqs);
632 	hinic_ceqs_free(&func_to_io->ceqs);
633 }
634