xref: /openbmc/linux/drivers/net/ipa/ipa_mem.c (revision 3128aae8)
1ba764c4dSAlex Elder // SPDX-License-Identifier: GPL-2.0
2ba764c4dSAlex Elder 
3ba764c4dSAlex Elder /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4ba764c4dSAlex Elder  * Copyright (C) 2019-2020 Linaro Ltd.
5ba764c4dSAlex Elder  */
6ba764c4dSAlex Elder 
7ba764c4dSAlex Elder #include <linux/types.h>
8ba764c4dSAlex Elder #include <linux/bitfield.h>
9ba764c4dSAlex Elder #include <linux/bug.h>
10ba764c4dSAlex Elder #include <linux/dma-mapping.h>
11ba764c4dSAlex Elder #include <linux/io.h>
12ba764c4dSAlex Elder 
13ba764c4dSAlex Elder #include "ipa.h"
14ba764c4dSAlex Elder #include "ipa_reg.h"
153128aae8SAlex Elder #include "ipa_data.h"
16ba764c4dSAlex Elder #include "ipa_cmd.h"
17ba764c4dSAlex Elder #include "ipa_mem.h"
18ba764c4dSAlex Elder #include "ipa_data.h"
19ba764c4dSAlex Elder #include "ipa_table.h"
20ba764c4dSAlex Elder #include "gsi_trans.h"
21ba764c4dSAlex Elder 
22ba764c4dSAlex Elder /* "Canary" value placed between memory regions to detect overflow */
23ba764c4dSAlex Elder #define IPA_MEM_CANARY_VAL		cpu_to_le32(0xdeadbeef)
24ba764c4dSAlex Elder 
25ba764c4dSAlex Elder /* Add an immediate command to a transaction that zeroes a memory region */
26ba764c4dSAlex Elder static void
27ba764c4dSAlex Elder ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
28ba764c4dSAlex Elder {
29ba764c4dSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
30ba764c4dSAlex Elder 	dma_addr_t addr = ipa->zero_addr;
31ba764c4dSAlex Elder 
32ba764c4dSAlex Elder 	if (!mem->size)
33ba764c4dSAlex Elder 		return;
34ba764c4dSAlex Elder 
35ba764c4dSAlex Elder 	ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
36ba764c4dSAlex Elder }
37ba764c4dSAlex Elder 
38ba764c4dSAlex Elder /**
39ba764c4dSAlex Elder  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
40ba764c4dSAlex Elder  *
41ba764c4dSAlex Elder  * Set up the shared memory regions in IPA local memory.  This involves
42ba764c4dSAlex Elder  * zero-filling memory regions, and in the case of header memory, telling
43ba764c4dSAlex Elder  * the IPA where it's located.
44ba764c4dSAlex Elder  *
45ba764c4dSAlex Elder  * This function performs the initial setup of this memory.  If the modem
46ba764c4dSAlex Elder  * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
47ba764c4dSAlex Elder  *
48ba764c4dSAlex Elder  * The AP informs the modem where its portions of memory are located
49ba764c4dSAlex Elder  * in a QMI exchange that occurs at modem startup.
50ba764c4dSAlex Elder  *
51ba764c4dSAlex Elder  * @Return:	0 if successful, or a negative error code
52ba764c4dSAlex Elder  */
53ba764c4dSAlex Elder int ipa_mem_setup(struct ipa *ipa)
54ba764c4dSAlex Elder {
55ba764c4dSAlex Elder 	dma_addr_t addr = ipa->zero_addr;
56ba764c4dSAlex Elder 	struct gsi_trans *trans;
57ba764c4dSAlex Elder 	u32 offset;
58ba764c4dSAlex Elder 	u16 size;
59ba764c4dSAlex Elder 
60ba764c4dSAlex Elder 	/* Get a transaction to define the header memory region and to zero
61ba764c4dSAlex Elder 	 * the processing context and modem memory regions.
62ba764c4dSAlex Elder 	 */
63ba764c4dSAlex Elder 	trans = ipa_cmd_trans_alloc(ipa, 4);
64ba764c4dSAlex Elder 	if (!trans) {
65ba764c4dSAlex Elder 		dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
66ba764c4dSAlex Elder 		return -EBUSY;
67ba764c4dSAlex Elder 	}
68ba764c4dSAlex Elder 
69ba764c4dSAlex Elder 	/* Initialize IPA-local header memory.  The modem and AP header
70ba764c4dSAlex Elder 	 * regions are contiguous, and initialized together.
71ba764c4dSAlex Elder 	 */
72ba764c4dSAlex Elder 	offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
73ba764c4dSAlex Elder 	size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
74ba764c4dSAlex Elder 	size += ipa->mem[IPA_MEM_AP_HEADER].size;
75ba764c4dSAlex Elder 
76ba764c4dSAlex Elder 	ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
77ba764c4dSAlex Elder 
78ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
79ba764c4dSAlex Elder 
80ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
81ba764c4dSAlex Elder 
82ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
83ba764c4dSAlex Elder 
84ba764c4dSAlex Elder 	gsi_trans_commit_wait(trans);
85ba764c4dSAlex Elder 
86ba764c4dSAlex Elder 	/* Tell the hardware where the processing context area is located */
87ba764c4dSAlex Elder 	iowrite32(ipa->mem_offset + offset,
88ba764c4dSAlex Elder 		  ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
89ba764c4dSAlex Elder 
90ba764c4dSAlex Elder 	return 0;
91ba764c4dSAlex Elder }
92ba764c4dSAlex Elder 
93ba764c4dSAlex Elder void ipa_mem_teardown(struct ipa *ipa)
94ba764c4dSAlex Elder {
95ba764c4dSAlex Elder 	/* Nothing to do */
96ba764c4dSAlex Elder }
97ba764c4dSAlex Elder 
98ba764c4dSAlex Elder #ifdef IPA_VALIDATE
99ba764c4dSAlex Elder 
100ba764c4dSAlex Elder static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
101ba764c4dSAlex Elder {
102ba764c4dSAlex Elder 	const struct ipa_mem *mem = &ipa->mem[mem_id];
103ba764c4dSAlex Elder 	struct device *dev = &ipa->pdev->dev;
104ba764c4dSAlex Elder 	u16 size_multiple;
105ba764c4dSAlex Elder 
106ba764c4dSAlex Elder 	/* Other than modem memory, sizes must be a multiple of 8 */
107ba764c4dSAlex Elder 	size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
108ba764c4dSAlex Elder 	if (mem->size % size_multiple)
109ba764c4dSAlex Elder 		dev_err(dev, "region %u size not a multiple of %u bytes\n",
110ba764c4dSAlex Elder 			mem_id, size_multiple);
111ba764c4dSAlex Elder 	else if (mem->offset % 8)
112ba764c4dSAlex Elder 		dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
113ba764c4dSAlex Elder 	else if (mem->offset < mem->canary_count * sizeof(__le32))
114ba764c4dSAlex Elder 		dev_err(dev, "region %u offset too small for %hu canaries\n",
115ba764c4dSAlex Elder 			mem_id, mem->canary_count);
116ba764c4dSAlex Elder 	else if (mem->offset + mem->size > ipa->mem_size)
117ba764c4dSAlex Elder 		dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
118ba764c4dSAlex Elder 			mem_id, ipa->mem_size);
119ba764c4dSAlex Elder 	else
120ba764c4dSAlex Elder 		return true;
121ba764c4dSAlex Elder 
122ba764c4dSAlex Elder 	return false;
123ba764c4dSAlex Elder }
124ba764c4dSAlex Elder 
125ba764c4dSAlex Elder #else /* !IPA_VALIDATE */
126ba764c4dSAlex Elder 
127ba764c4dSAlex Elder static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
128ba764c4dSAlex Elder {
129ba764c4dSAlex Elder 	return true;
130ba764c4dSAlex Elder }
131ba764c4dSAlex Elder 
132ba764c4dSAlex Elder #endif /*! IPA_VALIDATE */
133ba764c4dSAlex Elder 
134ba764c4dSAlex Elder /**
135ba764c4dSAlex Elder  * ipa_mem_config() - Configure IPA shared memory
136ba764c4dSAlex Elder  *
137ba764c4dSAlex Elder  * @Return:	0 if successful, or a negative error code
138ba764c4dSAlex Elder  */
139ba764c4dSAlex Elder int ipa_mem_config(struct ipa *ipa)
140ba764c4dSAlex Elder {
141ba764c4dSAlex Elder 	struct device *dev = &ipa->pdev->dev;
142ba764c4dSAlex Elder 	enum ipa_mem_id mem_id;
143ba764c4dSAlex Elder 	dma_addr_t addr;
144ba764c4dSAlex Elder 	u32 mem_size;
145ba764c4dSAlex Elder 	void *virt;
146ba764c4dSAlex Elder 	u32 val;
147ba764c4dSAlex Elder 
148ba764c4dSAlex Elder 	/* Check the advertised location and size of the shared memory area */
149ba764c4dSAlex Elder 	val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
150ba764c4dSAlex Elder 
151ba764c4dSAlex Elder 	/* The fields in the register are in 8 byte units */
152ba764c4dSAlex Elder 	ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
153ba764c4dSAlex Elder 	/* Make sure the end is within the region's mapped space */
154ba764c4dSAlex Elder 	mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
155ba764c4dSAlex Elder 
156ba764c4dSAlex Elder 	/* If the sizes don't match, issue a warning */
157ba764c4dSAlex Elder 	if (ipa->mem_offset + mem_size > ipa->mem_size) {
158ba764c4dSAlex Elder 		dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
159ba764c4dSAlex Elder 			mem_size);
160ba764c4dSAlex Elder 	} else if (ipa->mem_offset + mem_size < ipa->mem_size) {
161ba764c4dSAlex Elder 		dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
162ba764c4dSAlex Elder 			 mem_size);
163ba764c4dSAlex Elder 		ipa->mem_size = mem_size;
164ba764c4dSAlex Elder 	}
165ba764c4dSAlex Elder 
166ba764c4dSAlex Elder 	/* Prealloc DMA memory for zeroing regions */
167ba764c4dSAlex Elder 	virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
168ba764c4dSAlex Elder 	if (!virt)
169ba764c4dSAlex Elder 		return -ENOMEM;
170ba764c4dSAlex Elder 	ipa->zero_addr = addr;
171ba764c4dSAlex Elder 	ipa->zero_virt = virt;
172ba764c4dSAlex Elder 	ipa->zero_size = IPA_MEM_MAX;
173ba764c4dSAlex Elder 
174ba764c4dSAlex Elder 	/* Verify each defined memory region is valid, and if indicated
175ba764c4dSAlex Elder 	 * for the region, write "canary" values in the space prior to
176ba764c4dSAlex Elder 	 * the region's base address.
177ba764c4dSAlex Elder 	 */
178ba764c4dSAlex Elder 	for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
179ba764c4dSAlex Elder 		const struct ipa_mem *mem = &ipa->mem[mem_id];
180ba764c4dSAlex Elder 		u16 canary_count;
181ba764c4dSAlex Elder 		__le32 *canary;
182ba764c4dSAlex Elder 
183ba764c4dSAlex Elder 		/* Validate all regions (even undefined ones) */
184ba764c4dSAlex Elder 		if (!ipa_mem_valid(ipa, mem_id))
185ba764c4dSAlex Elder 			goto err_dma_free;
186ba764c4dSAlex Elder 
187ba764c4dSAlex Elder 		/* Skip over undefined regions */
188ba764c4dSAlex Elder 		if (!mem->offset && !mem->size)
189ba764c4dSAlex Elder 			continue;
190ba764c4dSAlex Elder 
191ba764c4dSAlex Elder 		canary_count = mem->canary_count;
192ba764c4dSAlex Elder 		if (!canary_count)
193ba764c4dSAlex Elder 			continue;
194ba764c4dSAlex Elder 
195ba764c4dSAlex Elder 		/* Write canary values in the space before the region */
196ba764c4dSAlex Elder 		canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
197ba764c4dSAlex Elder 		do
198ba764c4dSAlex Elder 			*--canary = IPA_MEM_CANARY_VAL;
199ba764c4dSAlex Elder 		while (--canary_count);
200ba764c4dSAlex Elder 	}
201ba764c4dSAlex Elder 
202ba764c4dSAlex Elder 	/* Make sure filter and route table memory regions are valid */
203ba764c4dSAlex Elder 	if (!ipa_table_valid(ipa))
204ba764c4dSAlex Elder 		goto err_dma_free;
205ba764c4dSAlex Elder 
206ba764c4dSAlex Elder 	/* Validate memory-related properties relevant to immediate commands */
207ba764c4dSAlex Elder 	if (!ipa_cmd_data_valid(ipa))
208ba764c4dSAlex Elder 		goto err_dma_free;
209ba764c4dSAlex Elder 
210ba764c4dSAlex Elder 	/* Verify the microcontroller ring alignment (0 is OK too) */
211ba764c4dSAlex Elder 	if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
212ba764c4dSAlex Elder 		dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
213ba764c4dSAlex Elder 		goto err_dma_free;
214ba764c4dSAlex Elder 	}
215ba764c4dSAlex Elder 
216ba764c4dSAlex Elder 	return 0;
217ba764c4dSAlex Elder 
218ba764c4dSAlex Elder err_dma_free:
219ba764c4dSAlex Elder 	dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
220ba764c4dSAlex Elder 
221ba764c4dSAlex Elder 	return -EINVAL;
222ba764c4dSAlex Elder }
223ba764c4dSAlex Elder 
224ba764c4dSAlex Elder /* Inverse of ipa_mem_config() */
225ba764c4dSAlex Elder void ipa_mem_deconfig(struct ipa *ipa)
226ba764c4dSAlex Elder {
227ba764c4dSAlex Elder 	struct device *dev = &ipa->pdev->dev;
228ba764c4dSAlex Elder 
229ba764c4dSAlex Elder 	dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
230ba764c4dSAlex Elder 	ipa->zero_size = 0;
231ba764c4dSAlex Elder 	ipa->zero_virt = NULL;
232ba764c4dSAlex Elder 	ipa->zero_addr = 0;
233ba764c4dSAlex Elder }
234ba764c4dSAlex Elder 
235ba764c4dSAlex Elder /**
236ba764c4dSAlex Elder  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
237ba764c4dSAlex Elder  *
238ba764c4dSAlex Elder  * Zero regions of IPA-local memory used by the modem.  These are configured
239ba764c4dSAlex Elder  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
240ba764c4dSAlex Elder  * restarts via SSR we need to re-initialize them.  A QMI message tells the
241ba764c4dSAlex Elder  * modem where to find regions of IPA local memory it needs to know about
242ba764c4dSAlex Elder  * (these included).
243ba764c4dSAlex Elder  */
244ba764c4dSAlex Elder int ipa_mem_zero_modem(struct ipa *ipa)
245ba764c4dSAlex Elder {
246ba764c4dSAlex Elder 	struct gsi_trans *trans;
247ba764c4dSAlex Elder 
248ba764c4dSAlex Elder 	/* Get a transaction to zero the modem memory, modem header,
249ba764c4dSAlex Elder 	 * and modem processing context regions.
250ba764c4dSAlex Elder 	 */
251ba764c4dSAlex Elder 	trans = ipa_cmd_trans_alloc(ipa, 3);
252ba764c4dSAlex Elder 	if (!trans) {
253ba764c4dSAlex Elder 		dev_err(&ipa->pdev->dev,
254ba764c4dSAlex Elder 			"no transaction to zero modem memory\n");
255ba764c4dSAlex Elder 		return -EBUSY;
256ba764c4dSAlex Elder 	}
257ba764c4dSAlex Elder 
258ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
259ba764c4dSAlex Elder 
260ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
261ba764c4dSAlex Elder 
262ba764c4dSAlex Elder 	ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
263ba764c4dSAlex Elder 
264ba764c4dSAlex Elder 	gsi_trans_commit_wait(trans);
265ba764c4dSAlex Elder 
266ba764c4dSAlex Elder 	return 0;
267ba764c4dSAlex Elder }
268ba764c4dSAlex Elder 
269ba764c4dSAlex Elder /* Perform memory region-related initialization */
2703128aae8SAlex Elder int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
271ba764c4dSAlex Elder {
272ba764c4dSAlex Elder 	struct device *dev = &ipa->pdev->dev;
273ba764c4dSAlex Elder 	struct resource *res;
274ba764c4dSAlex Elder 	int ret;
275ba764c4dSAlex Elder 
2763128aae8SAlex Elder 	if (mem_data->local_count > IPA_MEM_COUNT) {
277ba764c4dSAlex Elder 		dev_err(dev, "to many memory regions (%u > %u)\n",
2783128aae8SAlex Elder 			mem_data->local_count, IPA_MEM_COUNT);
279ba764c4dSAlex Elder 		return -EINVAL;
280ba764c4dSAlex Elder 	}
281ba764c4dSAlex Elder 
282ba764c4dSAlex Elder 	ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
283ba764c4dSAlex Elder 	if (ret) {
284ba764c4dSAlex Elder 		dev_err(dev, "error %d setting DMA mask\n", ret);
285ba764c4dSAlex Elder 		return ret;
286ba764c4dSAlex Elder 	}
287ba764c4dSAlex Elder 
288ba764c4dSAlex Elder 	res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
289ba764c4dSAlex Elder 					   "ipa-shared");
290ba764c4dSAlex Elder 	if (!res) {
291ba764c4dSAlex Elder 		dev_err(dev,
292ba764c4dSAlex Elder 			"DT error getting \"ipa-shared\" memory property\n");
293ba764c4dSAlex Elder 		return -ENODEV;
294ba764c4dSAlex Elder 	}
295ba764c4dSAlex Elder 
296ba764c4dSAlex Elder 	ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
297ba764c4dSAlex Elder 	if (!ipa->mem_virt) {
298ba764c4dSAlex Elder 		dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
299ba764c4dSAlex Elder 		return -ENOMEM;
300ba764c4dSAlex Elder 	}
301ba764c4dSAlex Elder 
302ba764c4dSAlex Elder 	ipa->mem_addr = res->start;
303ba764c4dSAlex Elder 	ipa->mem_size = resource_size(res);
304ba764c4dSAlex Elder 
305ba764c4dSAlex Elder 	/* The ipa->mem[] array is indexed by enum ipa_mem_id values */
3063128aae8SAlex Elder 	ipa->mem = mem_data->local;
307ba764c4dSAlex Elder 
308ba764c4dSAlex Elder 	return 0;
309ba764c4dSAlex Elder }
310ba764c4dSAlex Elder 
311ba764c4dSAlex Elder /* Inverse of ipa_mem_init() */
312ba764c4dSAlex Elder void ipa_mem_exit(struct ipa *ipa)
313ba764c4dSAlex Elder {
314ba764c4dSAlex Elder 	memunmap(ipa->mem_virt);
315ba764c4dSAlex Elder }
316