1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018, NVIDIA CORPORATION.
4  */
5 
6 #include <linux/genalloc.h>
7 #include <linux/io.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/of_address.h>
10 #include <linux/platform_device.h>
11 
12 #include <soc/tegra/bpmp.h>
13 #include <soc/tegra/bpmp-abi.h>
14 #include <soc/tegra/ivc.h>
15 
16 #include "bpmp-private.h"
17 
18 struct tegra186_bpmp {
19 	struct tegra_bpmp *parent;
20 
21 	struct {
22 		struct gen_pool *pool;
23 		union {
24 			void __iomem *sram;
25 			void *dram;
26 		};
27 		dma_addr_t phys;
28 	} tx, rx;
29 
30 	struct {
31 		struct mbox_client client;
32 		struct mbox_chan *channel;
33 	} mbox;
34 };
35 
36 static inline struct tegra_bpmp *
mbox_client_to_bpmp(struct mbox_client * client)37 mbox_client_to_bpmp(struct mbox_client *client)
38 {
39 	struct tegra186_bpmp *priv;
40 
41 	priv = container_of(client, struct tegra186_bpmp, mbox.client);
42 
43 	return priv->parent;
44 }
45 
tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel * channel)46 static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
47 {
48 	int err;
49 
50 	err = tegra_ivc_read_get_next_frame(channel->ivc, &channel->ib);
51 	if (err) {
52 		iosys_map_clear(&channel->ib);
53 		return false;
54 	}
55 
56 	return true;
57 }
58 
tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel * channel)59 static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
60 {
61 	int err;
62 
63 	err = tegra_ivc_write_get_next_frame(channel->ivc, &channel->ob);
64 	if (err) {
65 		iosys_map_clear(&channel->ob);
66 		return false;
67 	}
68 
69 	return true;
70 }
71 
tegra186_bpmp_ack_message(struct tegra_bpmp_channel * channel)72 static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
73 {
74 	return tegra_ivc_read_advance(channel->ivc);
75 }
76 
tegra186_bpmp_post_message(struct tegra_bpmp_channel * channel)77 static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
78 {
79 	return tegra_ivc_write_advance(channel->ivc);
80 }
81 
tegra186_bpmp_ring_doorbell(struct tegra_bpmp * bpmp)82 static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
83 {
84 	struct tegra186_bpmp *priv = bpmp->priv;
85 	int err;
86 
87 	err = mbox_send_message(priv->mbox.channel, NULL);
88 	if (err < 0)
89 		return err;
90 
91 	mbox_client_txdone(priv->mbox.channel, 0);
92 
93 	return 0;
94 }
95 
tegra186_bpmp_ivc_notify(struct tegra_ivc * ivc,void * data)96 static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
97 {
98 	struct tegra_bpmp *bpmp = data;
99 	struct tegra186_bpmp *priv = bpmp->priv;
100 
101 	if (WARN_ON(priv->mbox.channel == NULL))
102 		return;
103 
104 	tegra186_bpmp_ring_doorbell(bpmp);
105 }
106 
tegra186_bpmp_channel_init(struct tegra_bpmp_channel * channel,struct tegra_bpmp * bpmp,unsigned int index)107 static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
108 				      struct tegra_bpmp *bpmp,
109 				      unsigned int index)
110 {
111 	struct tegra186_bpmp *priv = bpmp->priv;
112 	size_t message_size, queue_size;
113 	struct iosys_map rx, tx;
114 	unsigned int offset;
115 	int err;
116 
117 	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
118 				    GFP_KERNEL);
119 	if (!channel->ivc)
120 		return -ENOMEM;
121 
122 	message_size = tegra_ivc_align(MSG_MIN_SZ);
123 	queue_size = tegra_ivc_total_queue_size(message_size);
124 	offset = queue_size * index;
125 
126 	if (priv->rx.pool) {
127 		iosys_map_set_vaddr_iomem(&rx, priv->rx.sram + offset);
128 		iosys_map_set_vaddr_iomem(&tx, priv->tx.sram + offset);
129 	} else {
130 		iosys_map_set_vaddr(&rx, priv->rx.dram + offset);
131 		iosys_map_set_vaddr(&tx, priv->tx.dram + offset);
132 	}
133 
134 	err = tegra_ivc_init(channel->ivc, NULL, &rx, priv->rx.phys + offset, &tx,
135 			     priv->tx.phys + offset, 1, message_size, tegra186_bpmp_ivc_notify,
136 			     bpmp);
137 	if (err < 0) {
138 		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
139 			index, err);
140 		return err;
141 	}
142 
143 	init_completion(&channel->completion);
144 	channel->bpmp = bpmp;
145 
146 	return 0;
147 }
148 
tegra186_bpmp_channel_reset(struct tegra_bpmp_channel * channel)149 static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
150 {
151 	/* reset the channel state */
152 	tegra_ivc_reset(channel->ivc);
153 
154 	/* sync the channel state with BPMP */
155 	while (tegra_ivc_notified(channel->ivc))
156 		;
157 }
158 
tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel * channel)159 static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
160 {
161 	tegra_ivc_cleanup(channel->ivc);
162 }
163 
mbox_handle_rx(struct mbox_client * client,void * data)164 static void mbox_handle_rx(struct mbox_client *client, void *data)
165 {
166 	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
167 
168 	tegra_bpmp_handle_rx(bpmp);
169 }
170 
tegra186_bpmp_teardown_channels(struct tegra_bpmp * bpmp)171 static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
172 {
173 	struct tegra186_bpmp *priv = bpmp->priv;
174 	unsigned int i;
175 
176 	for (i = 0; i < bpmp->threaded.count; i++) {
177 		if (!bpmp->threaded_channels[i].bpmp)
178 			continue;
179 
180 		tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
181 	}
182 
183 	tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
184 	tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
185 
186 	if (priv->tx.pool) {
187 		gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
188 		gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.sram, 4096);
189 	}
190 }
191 
tegra186_bpmp_dram_init(struct tegra_bpmp * bpmp)192 static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
193 {
194 	struct tegra186_bpmp *priv = bpmp->priv;
195 	struct device_node *np;
196 	struct resource res;
197 	size_t size;
198 	int err;
199 
200 	np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
201 	if (!np)
202 		return -ENODEV;
203 
204 	err = of_address_to_resource(np, 0, &res);
205 	if (err < 0) {
206 		dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
207 		return err;
208 	}
209 
210 	size = resource_size(&res);
211 
212 	if (size < SZ_8K) {
213 		dev_warn(bpmp->dev, "DRAM region must be larger than 8 KiB\n");
214 		return -EINVAL;
215 	}
216 
217 	priv->tx.phys = res.start;
218 	priv->rx.phys = res.start + SZ_4K;
219 
220 	priv->tx.dram = devm_memremap(bpmp->dev, priv->tx.phys, size,
221 				      MEMREMAP_WC);
222 	if (IS_ERR(priv->tx.dram)) {
223 		err = PTR_ERR(priv->tx.dram);
224 		dev_warn(bpmp->dev, "failed to map DRAM region: %d\n", err);
225 		return err;
226 	}
227 
228 	priv->rx.dram = priv->tx.dram + SZ_4K;
229 
230 	return 0;
231 }
232 
tegra186_bpmp_sram_init(struct tegra_bpmp * bpmp)233 static int tegra186_bpmp_sram_init(struct tegra_bpmp *bpmp)
234 {
235 	struct tegra186_bpmp *priv = bpmp->priv;
236 	int err;
237 
238 	priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
239 	if (!priv->tx.pool) {
240 		dev_err(bpmp->dev, "TX shmem pool not found\n");
241 		return -EPROBE_DEFER;
242 	}
243 
244 	priv->tx.sram = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096,
245 							   &priv->tx.phys);
246 	if (!priv->tx.sram) {
247 		dev_err(bpmp->dev, "failed to allocate from TX pool\n");
248 		return -ENOMEM;
249 	}
250 
251 	priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
252 	if (!priv->rx.pool) {
253 		dev_err(bpmp->dev, "RX shmem pool not found\n");
254 		err = -EPROBE_DEFER;
255 		goto free_tx;
256 	}
257 
258 	priv->rx.sram = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096,
259 							   &priv->rx.phys);
260 	if (!priv->rx.sram) {
261 		dev_err(bpmp->dev, "failed to allocate from RX pool\n");
262 		err = -ENOMEM;
263 		goto free_tx;
264 	}
265 
266 	return 0;
267 
268 free_tx:
269 	gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
270 
271 	return err;
272 }
273 
tegra186_bpmp_setup_channels(struct tegra_bpmp * bpmp)274 static int tegra186_bpmp_setup_channels(struct tegra_bpmp *bpmp)
275 {
276 	unsigned int i;
277 	int err;
278 
279 	err = tegra186_bpmp_dram_init(bpmp);
280 	if (err == -ENODEV) {
281 		err = tegra186_bpmp_sram_init(bpmp);
282 		if (err < 0)
283 			return err;
284 	}
285 
286 	err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
287 					 bpmp->soc->channels.cpu_tx.offset);
288 	if (err < 0)
289 		return err;
290 
291 	err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
292 					 bpmp->soc->channels.cpu_rx.offset);
293 	if (err < 0) {
294 		tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
295 		return err;
296 	}
297 
298 	for (i = 0; i < bpmp->threaded.count; i++) {
299 		unsigned int index = bpmp->soc->channels.thread.offset + i;
300 
301 		err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
302 						 bpmp, index);
303 		if (err < 0)
304 			break;
305 	}
306 
307 	if (err < 0)
308 		tegra186_bpmp_teardown_channels(bpmp);
309 
310 	return err;
311 }
312 
tegra186_bpmp_reset_channels(struct tegra_bpmp * bpmp)313 static void tegra186_bpmp_reset_channels(struct tegra_bpmp *bpmp)
314 {
315 	unsigned int i;
316 
317 	/* reset message channels */
318 	tegra186_bpmp_channel_reset(bpmp->tx_channel);
319 	tegra186_bpmp_channel_reset(bpmp->rx_channel);
320 
321 	for (i = 0; i < bpmp->threaded.count; i++)
322 		tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
323 }
324 
tegra186_bpmp_init(struct tegra_bpmp * bpmp)325 static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
326 {
327 	struct tegra186_bpmp *priv;
328 	int err;
329 
330 	priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
331 	if (!priv)
332 		return -ENOMEM;
333 
334 	priv->parent = bpmp;
335 	bpmp->priv = priv;
336 
337 	err = tegra186_bpmp_setup_channels(bpmp);
338 	if (err < 0)
339 		return err;
340 
341 	/* mbox registration */
342 	priv->mbox.client.dev = bpmp->dev;
343 	priv->mbox.client.rx_callback = mbox_handle_rx;
344 	priv->mbox.client.tx_block = false;
345 	priv->mbox.client.knows_txdone = false;
346 
347 	priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
348 	if (IS_ERR(priv->mbox.channel)) {
349 		err = PTR_ERR(priv->mbox.channel);
350 		dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
351 		tegra186_bpmp_teardown_channels(bpmp);
352 		return err;
353 	}
354 
355 	tegra186_bpmp_reset_channels(bpmp);
356 
357 	return 0;
358 }
359 
tegra186_bpmp_deinit(struct tegra_bpmp * bpmp)360 static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
361 {
362 	struct tegra186_bpmp *priv = bpmp->priv;
363 
364 	mbox_free_channel(priv->mbox.channel);
365 
366 	tegra186_bpmp_teardown_channels(bpmp);
367 }
368 
tegra186_bpmp_resume(struct tegra_bpmp * bpmp)369 static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
370 {
371 	tegra186_bpmp_reset_channels(bpmp);
372 
373 	return 0;
374 }
375 
376 const struct tegra_bpmp_ops tegra186_bpmp_ops = {
377 	.init = tegra186_bpmp_init,
378 	.deinit = tegra186_bpmp_deinit,
379 	.is_response_ready = tegra186_bpmp_is_message_ready,
380 	.is_request_ready = tegra186_bpmp_is_message_ready,
381 	.ack_response = tegra186_bpmp_ack_message,
382 	.ack_request = tegra186_bpmp_ack_message,
383 	.is_response_channel_free = tegra186_bpmp_is_channel_free,
384 	.is_request_channel_free = tegra186_bpmp_is_channel_free,
385 	.post_response = tegra186_bpmp_post_message,
386 	.post_request = tegra186_bpmp_post_message,
387 	.ring_doorbell = tegra186_bpmp_ring_doorbell,
388 	.resume = tegra186_bpmp_resume,
389 };
390