1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright (c) 2020 Marvell International Ltd. */
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/qed/qed_chain.h>
6 #include <linux/vmalloc.h>
7 
8 #include "qed_dev_api.h"
9 
10 static void qed_chain_init(struct qed_chain *chain,
11 			   const struct qed_chain_init_params *params,
12 			   u32 page_cnt)
13 {
14 	memset(chain, 0, sizeof(*chain));
15 
16 	chain->elem_size = params->elem_size;
17 	chain->intended_use = params->intended_use;
18 	chain->mode = params->mode;
19 	chain->cnt_type = params->cnt_type;
20 
21 	chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size);
22 	chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
23 						       params->mode);
24 	chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
25 						       params->mode);
26 
27 	chain->elem_per_page_mask = chain->elem_per_page - 1;
28 	chain->next_page_mask = chain->usable_per_page &
29 				chain->elem_per_page_mask;
30 
31 	chain->page_cnt = page_cnt;
32 	chain->capacity = chain->usable_per_page * page_cnt;
33 	chain->size = chain->elem_per_page * page_cnt;
34 
35 	if (params->ext_pbl_virt) {
36 		chain->pbl_sp.table_virt = params->ext_pbl_virt;
37 		chain->pbl_sp.table_phys = params->ext_pbl_phys;
38 
39 		chain->b_external_pbl = true;
40 	}
41 }
42 
43 static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
44 					 void *virt_curr, void *virt_next,
45 					 dma_addr_t phys_next)
46 {
47 	struct qed_chain_next *next;
48 	u32 size;
49 
50 	size = chain->elem_size * chain->usable_per_page;
51 	next = virt_curr + size;
52 
53 	DMA_REGPAIR_LE(next->next_phys, phys_next);
54 	next->next_virt = virt_next;
55 }
56 
57 static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
58 			       dma_addr_t phys_addr)
59 {
60 	chain->p_virt_addr = virt_addr;
61 	chain->p_phys_addr = phys_addr;
62 }
63 
64 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
65 				    struct qed_chain *chain)
66 {
67 	struct device *dev = &cdev->pdev->dev;
68 	struct qed_chain_next *next;
69 	dma_addr_t phys, phys_next;
70 	void *virt, *virt_next;
71 	u32 size, i;
72 
73 	size = chain->elem_size * chain->usable_per_page;
74 	virt = chain->p_virt_addr;
75 	phys = chain->p_phys_addr;
76 
77 	for (i = 0; i < chain->page_cnt; i++) {
78 		if (!virt)
79 			break;
80 
81 		next = virt + size;
82 		virt_next = next->next_virt;
83 		phys_next = HILO_DMA_REGPAIR(next->next_phys);
84 
85 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
86 
87 		virt = virt_next;
88 		phys = phys_next;
89 	}
90 }
91 
92 static void qed_chain_free_single(struct qed_dev *cdev,
93 				  struct qed_chain *chain)
94 {
95 	if (!chain->p_virt_addr)
96 		return;
97 
98 	dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
99 			  chain->p_virt_addr, chain->p_phys_addr);
100 }
101 
102 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
103 {
104 	struct device *dev = &cdev->pdev->dev;
105 	struct addr_tbl_entry *entry;
106 	u32 i;
107 
108 	if (!chain->pbl.pp_addr_tbl)
109 		return;
110 
111 	for (i = 0; i < chain->page_cnt; i++) {
112 		entry = chain->pbl.pp_addr_tbl + i;
113 		if (!entry->virt_addr)
114 			break;
115 
116 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
117 				  entry->dma_map);
118 	}
119 
120 	if (!chain->b_external_pbl)
121 		dma_free_coherent(dev, chain->pbl_sp.table_size,
122 				  chain->pbl_sp.table_virt,
123 				  chain->pbl_sp.table_phys);
124 
125 	vfree(chain->pbl.pp_addr_tbl);
126 	chain->pbl.pp_addr_tbl = NULL;
127 }
128 
129 /**
130  * qed_chain_free() - Free chain DMA memory.
131  *
132  * @cdev: Main device structure.
133  * @chain: Chain to free.
134  */
135 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
136 {
137 	switch (chain->mode) {
138 	case QED_CHAIN_MODE_NEXT_PTR:
139 		qed_chain_free_next_ptr(cdev, chain);
140 		break;
141 	case QED_CHAIN_MODE_SINGLE:
142 		qed_chain_free_single(cdev, chain);
143 		break;
144 	case QED_CHAIN_MODE_PBL:
145 		qed_chain_free_pbl(cdev, chain);
146 		break;
147 	default:
148 		return;
149 	}
150 
151 	qed_chain_init_mem(chain, NULL, 0);
152 }
153 
154 static int
155 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
156 			     const struct qed_chain_init_params *params,
157 			     u32 page_cnt)
158 {
159 	u64 chain_size;
160 
161 	chain_size = ELEMS_PER_PAGE(params->elem_size);
162 	chain_size *= page_cnt;
163 
164 	if (!chain_size)
165 		return -EINVAL;
166 
167 	/* The actual chain size can be larger than the maximal possible value
168 	 * after rounding up the requested elements number to pages, and after
169 	 * taking into account the unusuable elements (next-ptr elements).
170 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
171 	 * size/capacity fields are of u32 type.
172 	 */
173 	switch (params->cnt_type) {
174 	case QED_CHAIN_CNT_TYPE_U16:
175 		if (chain_size > U16_MAX + 1)
176 			break;
177 
178 		return 0;
179 	case QED_CHAIN_CNT_TYPE_U32:
180 		if (chain_size > U32_MAX)
181 			break;
182 
183 		return 0;
184 	default:
185 		return -EINVAL;
186 	}
187 
188 	DP_NOTICE(cdev,
189 		  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
190 		  chain_size);
191 
192 	return -EINVAL;
193 }
194 
195 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
196 				    struct qed_chain *chain)
197 {
198 	struct device *dev = &cdev->pdev->dev;
199 	void *virt, *virt_prev = NULL;
200 	dma_addr_t phys;
201 	u32 i;
202 
203 	for (i = 0; i < chain->page_cnt; i++) {
204 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
205 					  GFP_KERNEL);
206 		if (!virt)
207 			return -ENOMEM;
208 
209 		if (i == 0) {
210 			qed_chain_init_mem(chain, virt, phys);
211 			qed_chain_reset(chain);
212 		} else {
213 			qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
214 						     phys);
215 		}
216 
217 		virt_prev = virt;
218 	}
219 
220 	/* Last page's next element should point to the beginning of the
221 	 * chain.
222 	 */
223 	qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
224 				     chain->p_phys_addr);
225 
226 	return 0;
227 }
228 
229 static int qed_chain_alloc_single(struct qed_dev *cdev,
230 				  struct qed_chain *chain)
231 {
232 	dma_addr_t phys;
233 	void *virt;
234 
235 	virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
236 				  &phys, GFP_KERNEL);
237 	if (!virt)
238 		return -ENOMEM;
239 
240 	qed_chain_init_mem(chain, virt, phys);
241 	qed_chain_reset(chain);
242 
243 	return 0;
244 }
245 
246 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
247 {
248 	struct device *dev = &cdev->pdev->dev;
249 	struct addr_tbl_entry *addr_tbl;
250 	dma_addr_t phys, pbl_phys;
251 	__le64 *pbl_virt;
252 	u32 page_cnt, i;
253 	size_t size;
254 	void *virt;
255 
256 	page_cnt = chain->page_cnt;
257 
258 	size = array_size(page_cnt, sizeof(*addr_tbl));
259 	if (unlikely(size == SIZE_MAX))
260 		return -EOVERFLOW;
261 
262 	addr_tbl = vzalloc(size);
263 	if (!addr_tbl)
264 		return -ENOMEM;
265 
266 	chain->pbl.pp_addr_tbl = addr_tbl;
267 
268 	if (chain->b_external_pbl)
269 		goto alloc_pages;
270 
271 	size = array_size(page_cnt, sizeof(*pbl_virt));
272 	if (unlikely(size == SIZE_MAX))
273 		return -EOVERFLOW;
274 
275 	pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
276 	if (!pbl_virt)
277 		return -ENOMEM;
278 
279 	chain->pbl_sp.table_virt = pbl_virt;
280 	chain->pbl_sp.table_phys = pbl_phys;
281 	chain->pbl_sp.table_size = size;
282 
283 alloc_pages:
284 	for (i = 0; i < page_cnt; i++) {
285 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
286 					  GFP_KERNEL);
287 		if (!virt)
288 			return -ENOMEM;
289 
290 		if (i == 0) {
291 			qed_chain_init_mem(chain, virt, phys);
292 			qed_chain_reset(chain);
293 		}
294 
295 		/* Fill the PBL table with the physical address of the page */
296 		pbl_virt[i] = cpu_to_le64(phys);
297 
298 		/* Keep the virtual address of the page */
299 		addr_tbl[i].virt_addr = virt;
300 		addr_tbl[i].dma_map = phys;
301 	}
302 
303 	return 0;
304 }
305 
306 /**
307  * qed_chain_alloc() - Allocate and initialize a chain.
308  *
309  * @cdev: Main device structure.
310  * @chain: Chain to be processed.
311  * @params: Chain initialization parameters.
312  *
313  * Return: 0 on success, negative errno otherwise.
314  */
315 int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
316 		    struct qed_chain_init_params *params)
317 {
318 	u32 page_cnt;
319 	int rc;
320 
321 	if (params->mode == QED_CHAIN_MODE_SINGLE)
322 		page_cnt = 1;
323 	else
324 		page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
325 					      params->elem_size,
326 					      params->mode);
327 
328 	rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
329 	if (rc) {
330 		DP_NOTICE(cdev,
331 			  "Cannot allocate a chain with the given arguments:\n");
332 		DP_NOTICE(cdev,
333 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
334 			  params->intended_use, params->mode, params->cnt_type,
335 			  params->num_elems, params->elem_size);
336 		return rc;
337 	}
338 
339 	qed_chain_init(chain, params, page_cnt);
340 
341 	switch (params->mode) {
342 	case QED_CHAIN_MODE_NEXT_PTR:
343 		rc = qed_chain_alloc_next_ptr(cdev, chain);
344 		break;
345 	case QED_CHAIN_MODE_SINGLE:
346 		rc = qed_chain_alloc_single(cdev, chain);
347 		break;
348 	case QED_CHAIN_MODE_PBL:
349 		rc = qed_chain_alloc_pbl(cdev, chain);
350 		break;
351 	default:
352 		return -EINVAL;
353 	}
354 
355 	if (!rc)
356 		return 0;
357 
358 	qed_chain_free(cdev, chain);
359 
360 	return rc;
361 }
362