1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright (c) 2020 Marvell International Ltd. */
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/qed/qed_chain.h>
6 #include <linux/vmalloc.h>
7 
8 #include "qed_dev_api.h"
9 
10 static void qed_chain_init_params(struct qed_chain *chain,
11 				  u32 page_cnt, u8 elem_size,
12 				  enum qed_chain_use_mode intended_use,
13 				  enum qed_chain_mode mode,
14 				  enum qed_chain_cnt_type cnt_type,
15 				  const struct qed_chain_ext_pbl *ext_pbl)
16 {
17 	memset(chain, 0, sizeof(*chain));
18 
19 	chain->elem_size = elem_size;
20 	chain->intended_use = intended_use;
21 	chain->mode = mode;
22 	chain->cnt_type = cnt_type;
23 
24 	chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
25 	chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
26 	chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
27 
28 	chain->elem_per_page_mask = chain->elem_per_page - 1;
29 	chain->next_page_mask = chain->usable_per_page &
30 				chain->elem_per_page_mask;
31 
32 	chain->page_cnt = page_cnt;
33 	chain->capacity = chain->usable_per_page * page_cnt;
34 	chain->size = chain->elem_per_page * page_cnt;
35 
36 	if (ext_pbl && ext_pbl->p_pbl_virt) {
37 		chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt;
38 		chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys;
39 
40 		chain->b_external_pbl = true;
41 	}
42 }
43 
44 static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
45 					 void *virt_curr, void *virt_next,
46 					 dma_addr_t phys_next)
47 {
48 	struct qed_chain_next *next;
49 	u32 size;
50 
51 	size = chain->elem_size * chain->usable_per_page;
52 	next = virt_curr + size;
53 
54 	DMA_REGPAIR_LE(next->next_phys, phys_next);
55 	next->next_virt = virt_next;
56 }
57 
58 static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
59 			       dma_addr_t phys_addr)
60 {
61 	chain->p_virt_addr = virt_addr;
62 	chain->p_phys_addr = phys_addr;
63 }
64 
65 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
66 				    struct qed_chain *chain)
67 {
68 	struct device *dev = &cdev->pdev->dev;
69 	struct qed_chain_next *next;
70 	dma_addr_t phys, phys_next;
71 	void *virt, *virt_next;
72 	u32 size, i;
73 
74 	size = chain->elem_size * chain->usable_per_page;
75 	virt = chain->p_virt_addr;
76 	phys = chain->p_phys_addr;
77 
78 	for (i = 0; i < chain->page_cnt; i++) {
79 		if (!virt)
80 			break;
81 
82 		next = virt + size;
83 		virt_next = next->next_virt;
84 		phys_next = HILO_DMA_REGPAIR(next->next_phys);
85 
86 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys);
87 
88 		virt = virt_next;
89 		phys = phys_next;
90 	}
91 }
92 
93 static void qed_chain_free_single(struct qed_dev *cdev,
94 				  struct qed_chain *chain)
95 {
96 	if (!chain->p_virt_addr)
97 		return;
98 
99 	dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
100 			  chain->p_virt_addr, chain->p_phys_addr);
101 }
102 
103 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
104 {
105 	struct device *dev = &cdev->pdev->dev;
106 	struct addr_tbl_entry *entry;
107 	u32 i;
108 
109 	if (!chain->pbl.pp_addr_tbl)
110 		return;
111 
112 	for (i = 0; i < chain->page_cnt; i++) {
113 		entry = chain->pbl.pp_addr_tbl + i;
114 		if (!entry->virt_addr)
115 			break;
116 
117 		dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr,
118 				  entry->dma_map);
119 	}
120 
121 	if (!chain->b_external_pbl)
122 		dma_free_coherent(dev, chain->pbl_sp.table_size,
123 				  chain->pbl_sp.table_virt,
124 				  chain->pbl_sp.table_phys);
125 
126 	vfree(chain->pbl.pp_addr_tbl);
127 	chain->pbl.pp_addr_tbl = NULL;
128 }
129 
130 /**
131  * qed_chain_free() - Free chain DMA memory.
132  *
133  * @cdev: Main device structure.
134  * @chain: Chain to free.
135  */
136 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
137 {
138 	switch (chain->mode) {
139 	case QED_CHAIN_MODE_NEXT_PTR:
140 		qed_chain_free_next_ptr(cdev, chain);
141 		break;
142 	case QED_CHAIN_MODE_SINGLE:
143 		qed_chain_free_single(cdev, chain);
144 		break;
145 	case QED_CHAIN_MODE_PBL:
146 		qed_chain_free_pbl(cdev, chain);
147 		break;
148 	default:
149 		return;
150 	}
151 
152 	qed_chain_init_mem(chain, NULL, 0);
153 }
154 
155 static int
156 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
157 			     enum qed_chain_cnt_type cnt_type,
158 			     size_t elem_size, u32 page_cnt)
159 {
160 	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
161 
162 	/* The actual chain size can be larger than the maximal possible value
163 	 * after rounding up the requested elements number to pages, and after
164 	 * taking into account the unusuable elements (next-ptr elements).
165 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
166 	 * size/capacity fields are of u32 type.
167 	 */
168 	switch (cnt_type) {
169 	case QED_CHAIN_CNT_TYPE_U16:
170 		if (chain_size > U16_MAX + 1)
171 			break;
172 
173 		return 0;
174 	case QED_CHAIN_CNT_TYPE_U32:
175 		if (chain_size > U32_MAX)
176 			break;
177 
178 		return 0;
179 	default:
180 		return -EINVAL;
181 	}
182 
183 	DP_NOTICE(cdev,
184 		  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
185 		  chain_size);
186 
187 	return -EINVAL;
188 }
189 
190 static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
191 				    struct qed_chain *chain)
192 {
193 	struct device *dev = &cdev->pdev->dev;
194 	void *virt, *virt_prev = NULL;
195 	dma_addr_t phys;
196 	u32 i;
197 
198 	for (i = 0; i < chain->page_cnt; i++) {
199 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
200 					  GFP_KERNEL);
201 		if (!virt)
202 			return -ENOMEM;
203 
204 		if (i == 0) {
205 			qed_chain_init_mem(chain, virt, phys);
206 			qed_chain_reset(chain);
207 		} else {
208 			qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
209 						     phys);
210 		}
211 
212 		virt_prev = virt;
213 	}
214 
215 	/* Last page's next element should point to the beginning of the
216 	 * chain.
217 	 */
218 	qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
219 				     chain->p_phys_addr);
220 
221 	return 0;
222 }
223 
224 static int qed_chain_alloc_single(struct qed_dev *cdev,
225 				  struct qed_chain *chain)
226 {
227 	dma_addr_t phys;
228 	void *virt;
229 
230 	virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE,
231 				  &phys, GFP_KERNEL);
232 	if (!virt)
233 		return -ENOMEM;
234 
235 	qed_chain_init_mem(chain, virt, phys);
236 	qed_chain_reset(chain);
237 
238 	return 0;
239 }
240 
241 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
242 {
243 	struct device *dev = &cdev->pdev->dev;
244 	struct addr_tbl_entry *addr_tbl;
245 	dma_addr_t phys, pbl_phys;
246 	__le64 *pbl_virt;
247 	u32 page_cnt, i;
248 	size_t size;
249 	void *virt;
250 
251 	page_cnt = chain->page_cnt;
252 
253 	size = array_size(page_cnt, sizeof(*addr_tbl));
254 	if (unlikely(size == SIZE_MAX))
255 		return -EOVERFLOW;
256 
257 	addr_tbl = vzalloc(size);
258 	if (!addr_tbl)
259 		return -ENOMEM;
260 
261 	chain->pbl.pp_addr_tbl = addr_tbl;
262 
263 	if (chain->b_external_pbl)
264 		goto alloc_pages;
265 
266 	size = array_size(page_cnt, sizeof(*pbl_virt));
267 	if (unlikely(size == SIZE_MAX))
268 		return -EOVERFLOW;
269 
270 	pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
271 	if (!pbl_virt)
272 		return -ENOMEM;
273 
274 	chain->pbl_sp.table_virt = pbl_virt;
275 	chain->pbl_sp.table_phys = pbl_phys;
276 	chain->pbl_sp.table_size = size;
277 
278 alloc_pages:
279 	for (i = 0; i < page_cnt; i++) {
280 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
281 					  GFP_KERNEL);
282 		if (!virt)
283 			return -ENOMEM;
284 
285 		if (i == 0) {
286 			qed_chain_init_mem(chain, virt, phys);
287 			qed_chain_reset(chain);
288 		}
289 
290 		/* Fill the PBL table with the physical address of the page */
291 		pbl_virt[i] = cpu_to_le64(phys);
292 
293 		/* Keep the virtual address of the page */
294 		addr_tbl[i].virt_addr = virt;
295 		addr_tbl[i].dma_map = phys;
296 	}
297 
298 	return 0;
299 }
300 
301 int qed_chain_alloc(struct qed_dev *cdev,
302 		    enum qed_chain_use_mode intended_use,
303 		    enum qed_chain_mode mode,
304 		    enum qed_chain_cnt_type cnt_type,
305 		    u32 num_elems,
306 		    size_t elem_size,
307 		    struct qed_chain *chain,
308 		    struct qed_chain_ext_pbl *ext_pbl)
309 {
310 	u32 page_cnt;
311 	int rc;
312 
313 	if (mode == QED_CHAIN_MODE_SINGLE)
314 		page_cnt = 1;
315 	else
316 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
317 
318 	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
319 	if (rc) {
320 		DP_NOTICE(cdev,
321 			  "Cannot allocate a chain with the given arguments:\n");
322 		DP_NOTICE(cdev,
323 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
324 			  intended_use, mode, cnt_type, num_elems, elem_size);
325 		return rc;
326 	}
327 
328 	qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode,
329 			      cnt_type, ext_pbl);
330 
331 	switch (mode) {
332 	case QED_CHAIN_MODE_NEXT_PTR:
333 		rc = qed_chain_alloc_next_ptr(cdev, chain);
334 		break;
335 	case QED_CHAIN_MODE_SINGLE:
336 		rc = qed_chain_alloc_single(cdev, chain);
337 		break;
338 	case QED_CHAIN_MODE_PBL:
339 		rc = qed_chain_alloc_pbl(cdev, chain);
340 		break;
341 	default:
342 		return -EINVAL;
343 	}
344 
345 	if (!rc)
346 		return 0;
347 
348 	qed_chain_free(cdev, chain);
349 
350 	return rc;
351 }
352