1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright (c) 2020 Marvell International Ltd. */
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/qed/qed_chain.h>
6 #include <linux/vmalloc.h>
7 
8 #include "qed_dev_api.h"
9 
10 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
11 				    struct qed_chain *chain)
12 {
13 	struct device *dev = &cdev->pdev->dev;
14 	struct qed_chain_next *next;
15 	dma_addr_t phys, phys_next;
16 	void *virt, *virt_next;
17 	u32 size, i;
18 
19 	size = chain->elem_size * chain->usable_per_page;
20 	virt = chain->p_virt_addr;
21 	phys = chain->p_phys_addr;
22 
23 	for (i = 0; i < chain->page_cnt; i++) {
24 		if (!virt)
25 			break;
26 
27 		next = virt + size;
28 		virt_next = next->next_virt;
29 		phys_next = HILO_DMA_REGPAIR(next->next_phys);
30 
31 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
32 
33 		virt = virt_next;
34 		phys = phys_next;
35 	}
36 }
37 
38 static void qed_chain_free_single(struct qed_dev *cdev,
39 				  struct qed_chain *chain)
40 {
41 	if (!chain->p_virt_addr)
42 		return;
43 
44 	dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
45 			  chain->p_virt_addr, chain->p_phys_addr);
46 }
47 
48 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
49 {
50 	struct device *dev = &cdev->pdev->dev;
51 	struct addr_tbl_entry *entry;
52 	u32 i;
53 
54 	if (!chain->pbl.pp_addr_tbl)
55 		return;
56 
57 	for (i = 0; i < chain->page_cnt; i++) {
58 		entry = chain->pbl.pp_addr_tbl + i;
59 		if (!entry->virt_addr)
60 			break;
61 
62 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
63 				  entry->dma_map);
64 	}
65 
66 	if (!chain->b_external_pbl)
67 		dma_free_coherent(dev, chain->pbl_sp.table_size,
68 				  chain->pbl_sp.table_virt,
69 				  chain->pbl_sp.table_phys);
70 
71 	vfree(chain->pbl.pp_addr_tbl);
72 	chain->pbl.pp_addr_tbl = NULL;
73 }
74 
75 /**
76  * qed_chain_free() - Free chain DMA memory.
77  *
78  * @cdev: Main device structure.
79  * @chain: Chain to free.
80  */
81 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
82 {
83 	switch (chain->mode) {
84 	case QED_CHAIN_MODE_NEXT_PTR:
85 		qed_chain_free_next_ptr(cdev, chain);
86 		break;
87 	case QED_CHAIN_MODE_SINGLE:
88 		qed_chain_free_single(cdev, chain);
89 		break;
90 	case QED_CHAIN_MODE_PBL:
91 		qed_chain_free_pbl(cdev, chain);
92 		break;
93 	default:
94 		return;
95 	}
96 
97 	qed_chain_init_mem(chain, NULL, 0);
98 }
99 
100 static int
101 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
102 			     enum qed_chain_cnt_type cnt_type,
103 			     size_t elem_size, u32 page_cnt)
104 {
105 	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
106 
107 	/* The actual chain size can be larger than the maximal possible value
108 	 * after rounding up the requested elements number to pages, and after
109 	 * taking into account the unusuable elements (next-ptr elements).
110 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
111 	 * size/capacity fields are of u32 type.
112 	 */
113 	switch (cnt_type) {
114 	case QED_CHAIN_CNT_TYPE_U16:
115 		if (chain_size > U16_MAX + 1)
116 			break;
117 
118 		return 0;
119 	case QED_CHAIN_CNT_TYPE_U32:
120 		if (chain_size > U32_MAX)
121 			break;
122 
123 		return 0;
124 	default:
125 		return -EINVAL;
126 	}
127 
128 	DP_NOTICE(cdev,
129 		  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
130 		  chain_size);
131 
132 	return -EINVAL;
133 }
134 
135 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
136 				    struct qed_chain *chain)
137 {
138 	struct device *dev = &cdev->pdev->dev;
139 	void *virt, *virt_prev = NULL;
140 	dma_addr_t phys;
141 	u32 i;
142 
143 	for (i = 0; i < chain->page_cnt; i++) {
144 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
145 					  GFP_KERNEL);
146 		if (!virt)
147 			return -ENOMEM;
148 
149 		if (i == 0) {
150 			qed_chain_init_mem(chain, virt, phys);
151 			qed_chain_reset(chain);
152 		} else {
153 			qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
154 						     phys);
155 		}
156 
157 		virt_prev = virt;
158 	}
159 
160 	/* Last page's next element should point to the beginning of the
161 	 * chain.
162 	 */
163 	qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
164 				     chain->p_phys_addr);
165 
166 	return 0;
167 }
168 
169 static int qed_chain_alloc_single(struct qed_dev *cdev,
170 				  struct qed_chain *chain)
171 {
172 	dma_addr_t phys;
173 	void *virt;
174 
175 	virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
176 				  &phys, GFP_KERNEL);
177 	if (!virt)
178 		return -ENOMEM;
179 
180 	qed_chain_init_mem(chain, virt, phys);
181 	qed_chain_reset(chain);
182 
183 	return 0;
184 }
185 
186 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain,
187 			       struct qed_chain_ext_pbl *ext_pbl)
188 {
189 	struct device *dev = &cdev->pdev->dev;
190 	struct addr_tbl_entry *addr_tbl;
191 	dma_addr_t phys, pbl_phys;
192 	__le64 *pbl_virt;
193 	u32 page_cnt, i;
194 	size_t size;
195 	void *virt;
196 
197 	page_cnt = chain->page_cnt;
198 
199 	size = array_size(page_cnt, sizeof(*addr_tbl));
200 	if (unlikely(size == SIZE_MAX))
201 		return -EOVERFLOW;
202 
203 	addr_tbl = vzalloc(size);
204 	if (!addr_tbl)
205 		return -ENOMEM;
206 
207 	chain->pbl.pp_addr_tbl = addr_tbl;
208 
209 	if (ext_pbl) {
210 		size = 0;
211 		pbl_virt = ext_pbl->p_pbl_virt;
212 		pbl_phys = ext_pbl->p_pbl_phys;
213 
214 		chain->b_external_pbl = true;
215 	} else {
216 		size = array_size(page_cnt, sizeof(*pbl_virt));
217 		if (unlikely(size == SIZE_MAX))
218 			return -EOVERFLOW;
219 
220 		pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys,
221 					      GFP_KERNEL);
222 	}
223 
224 	if (!pbl_virt)
225 		return -ENOMEM;
226 
227 	chain->pbl_sp.table_virt = pbl_virt;
228 	chain->pbl_sp.table_phys = pbl_phys;
229 	chain->pbl_sp.table_size = size;
230 
231 	for (i = 0; i < page_cnt; i++) {
232 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
233 					  GFP_KERNEL);
234 		if (!virt)
235 			return -ENOMEM;
236 
237 		if (i == 0) {
238 			qed_chain_init_mem(chain, virt, phys);
239 			qed_chain_reset(chain);
240 		}
241 
242 		/* Fill the PBL table with the physical address of the page */
243 		pbl_virt[i] = cpu_to_le64(phys);
244 
245 		/* Keep the virtual address of the page */
246 		addr_tbl[i].virt_addr = virt;
247 		addr_tbl[i].dma_map = phys;
248 	}
249 
250 	return 0;
251 }
252 
253 int qed_chain_alloc(struct qed_dev *cdev,
254 		    enum qed_chain_use_mode intended_use,
255 		    enum qed_chain_mode mode,
256 		    enum qed_chain_cnt_type cnt_type,
257 		    u32 num_elems,
258 		    size_t elem_size,
259 		    struct qed_chain *chain,
260 		    struct qed_chain_ext_pbl *ext_pbl)
261 {
262 	u32 page_cnt;
263 	int rc;
264 
265 	if (mode == QED_CHAIN_MODE_SINGLE)
266 		page_cnt = 1;
267 	else
268 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
269 
270 	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
271 	if (rc) {
272 		DP_NOTICE(cdev,
273 			  "Cannot allocate a chain with the given arguments:\n");
274 		DP_NOTICE(cdev,
275 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
276 			  intended_use, mode, cnt_type, num_elems, elem_size);
277 		return rc;
278 	}
279 
280 	qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
281 			      cnt_type);
282 
283 	switch (mode) {
284 	case QED_CHAIN_MODE_NEXT_PTR:
285 		rc = qed_chain_alloc_next_ptr(cdev, chain);
286 		break;
287 	case QED_CHAIN_MODE_SINGLE:
288 		rc = qed_chain_alloc_single(cdev, chain);
289 		break;
290 	case QED_CHAIN_MODE_PBL:
291 		rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl);
292 		break;
293 	default:
294 		return -EINVAL;
295 	}
296 
297 	if (!rc)
298 		return 0;
299 
300 	qed_chain_free(cdev, chain);
301 
302 	return rc;
303 }
304