xref: /openbmc/linux/drivers/firmware/tegra/bpmp.c (revision 5ff32883)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  */
13 
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/semaphore.h>
23 #include <linux/sched/clock.h>
24 
25 #include <soc/tegra/bpmp.h>
26 #include <soc/tegra/bpmp-abi.h>
27 #include <soc/tegra/ivc.h>
28 
29 #define MSG_ACK		BIT(0)
30 #define MSG_RING	BIT(1)
31 #define TAG_SZ		32
32 
33 static inline struct tegra_bpmp *
34 mbox_client_to_bpmp(struct mbox_client *client)
35 {
36 	return container_of(client, struct tegra_bpmp, mbox.client);
37 }
38 
39 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
40 {
41 	struct platform_device *pdev;
42 	struct tegra_bpmp *bpmp;
43 	struct device_node *np;
44 
45 	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
46 	if (!np)
47 		return ERR_PTR(-ENOENT);
48 
49 	pdev = of_find_device_by_node(np);
50 	if (!pdev) {
51 		bpmp = ERR_PTR(-ENODEV);
52 		goto put;
53 	}
54 
55 	bpmp = platform_get_drvdata(pdev);
56 	if (!bpmp) {
57 		bpmp = ERR_PTR(-EPROBE_DEFER);
58 		put_device(&pdev->dev);
59 		goto put;
60 	}
61 
62 put:
63 	of_node_put(np);
64 	return bpmp;
65 }
66 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
67 
68 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
69 {
70 	if (bpmp)
71 		put_device(bpmp->dev);
72 }
73 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
74 
75 static int
76 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
77 {
78 	struct tegra_bpmp *bpmp = channel->bpmp;
79 	unsigned int count;
80 	int index;
81 
82 	count = bpmp->soc->channels.thread.count;
83 
84 	index = channel - channel->bpmp->threaded_channels;
85 	if (index < 0 || index >= count)
86 		return -EINVAL;
87 
88 	return index;
89 }
90 
91 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
92 {
93 	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
94 	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
95 	       (msg->tx.size == 0 || msg->tx.data) &&
96 	       (msg->rx.size == 0 || msg->rx.data);
97 }
98 
99 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
100 {
101 	void *frame;
102 
103 	frame = tegra_ivc_read_get_next_frame(channel->ivc);
104 	if (IS_ERR(frame)) {
105 		channel->ib = NULL;
106 		return false;
107 	}
108 
109 	channel->ib = frame;
110 
111 	return true;
112 }
113 
114 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
115 {
116 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
117 	ktime_t end;
118 
119 	end = ktime_add_us(ktime_get(), timeout);
120 
121 	do {
122 		if (tegra_bpmp_master_acked(channel))
123 			return 0;
124 	} while (ktime_before(ktime_get(), end));
125 
126 	return -ETIMEDOUT;
127 }
128 
129 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
130 {
131 	void *frame;
132 
133 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
134 	if (IS_ERR(frame)) {
135 		channel->ob = NULL;
136 		return false;
137 	}
138 
139 	channel->ob = frame;
140 
141 	return true;
142 }
143 
144 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
145 {
146 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
147 	ktime_t start, now;
148 
149 	start = ns_to_ktime(local_clock());
150 
151 	do {
152 		if (tegra_bpmp_master_free(channel))
153 			return 0;
154 
155 		now = ns_to_ktime(local_clock());
156 	} while (ktime_us_delta(now, start) < timeout);
157 
158 	return -ETIMEDOUT;
159 }
160 
161 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
162 					 void *data, size_t size, int *ret)
163 {
164 	int err;
165 
166 	if (data && size > 0)
167 		memcpy(data, channel->ib->data, size);
168 
169 	err = tegra_ivc_read_advance(channel->ivc);
170 	if (err < 0)
171 		return err;
172 
173 	*ret = channel->ib->code;
174 
175 	return 0;
176 }
177 
178 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
179 				       void *data, size_t size, int *ret)
180 {
181 	struct tegra_bpmp *bpmp = channel->bpmp;
182 	unsigned long flags;
183 	ssize_t err;
184 	int index;
185 
186 	index = tegra_bpmp_channel_get_thread_index(channel);
187 	if (index < 0) {
188 		err = index;
189 		goto unlock;
190 	}
191 
192 	spin_lock_irqsave(&bpmp->lock, flags);
193 	err = __tegra_bpmp_channel_read(channel, data, size, ret);
194 	clear_bit(index, bpmp->threaded.allocated);
195 	spin_unlock_irqrestore(&bpmp->lock, flags);
196 
197 unlock:
198 	up(&bpmp->threaded.lock);
199 
200 	return err;
201 }
202 
203 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
204 					  unsigned int mrq, unsigned long flags,
205 					  const void *data, size_t size)
206 {
207 	channel->ob->code = mrq;
208 	channel->ob->flags = flags;
209 
210 	if (data && size > 0)
211 		memcpy(channel->ob->data, data, size);
212 
213 	return tegra_ivc_write_advance(channel->ivc);
214 }
215 
216 static struct tegra_bpmp_channel *
217 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
218 			  const void *data, size_t size)
219 {
220 	unsigned long timeout = bpmp->soc->channels.thread.timeout;
221 	unsigned int count = bpmp->soc->channels.thread.count;
222 	struct tegra_bpmp_channel *channel;
223 	unsigned long flags;
224 	unsigned int index;
225 	int err;
226 
227 	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
228 	if (err < 0)
229 		return ERR_PTR(err);
230 
231 	spin_lock_irqsave(&bpmp->lock, flags);
232 
233 	index = find_first_zero_bit(bpmp->threaded.allocated, count);
234 	if (index == count) {
235 		err = -EBUSY;
236 		goto unlock;
237 	}
238 
239 	channel = &bpmp->threaded_channels[index];
240 
241 	if (!tegra_bpmp_master_free(channel)) {
242 		err = -EBUSY;
243 		goto unlock;
244 	}
245 
246 	set_bit(index, bpmp->threaded.allocated);
247 
248 	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
249 					 data, size);
250 	if (err < 0)
251 		goto clear_allocated;
252 
253 	set_bit(index, bpmp->threaded.busy);
254 
255 	spin_unlock_irqrestore(&bpmp->lock, flags);
256 	return channel;
257 
258 clear_allocated:
259 	clear_bit(index, bpmp->threaded.allocated);
260 unlock:
261 	spin_unlock_irqrestore(&bpmp->lock, flags);
262 	up(&bpmp->threaded.lock);
263 
264 	return ERR_PTR(err);
265 }
266 
267 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
268 					unsigned int mrq, unsigned long flags,
269 					const void *data, size_t size)
270 {
271 	int err;
272 
273 	err = tegra_bpmp_wait_master_free(channel);
274 	if (err < 0)
275 		return err;
276 
277 	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
278 }
279 
280 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
281 			       struct tegra_bpmp_message *msg)
282 {
283 	struct tegra_bpmp_channel *channel;
284 	int err;
285 
286 	if (WARN_ON(!irqs_disabled()))
287 		return -EPERM;
288 
289 	if (!tegra_bpmp_message_valid(msg))
290 		return -EINVAL;
291 
292 	channel = bpmp->tx_channel;
293 
294 	spin_lock(&bpmp->atomic_tx_lock);
295 
296 	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
297 				       msg->tx.data, msg->tx.size);
298 	if (err < 0) {
299 		spin_unlock(&bpmp->atomic_tx_lock);
300 		return err;
301 	}
302 
303 	spin_unlock(&bpmp->atomic_tx_lock);
304 
305 	err = mbox_send_message(bpmp->mbox.channel, NULL);
306 	if (err < 0)
307 		return err;
308 
309 	mbox_client_txdone(bpmp->mbox.channel, 0);
310 
311 	err = tegra_bpmp_wait_ack(channel);
312 	if (err < 0)
313 		return err;
314 
315 	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
316 					 &msg->rx.ret);
317 }
318 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
319 
320 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
321 			struct tegra_bpmp_message *msg)
322 {
323 	struct tegra_bpmp_channel *channel;
324 	unsigned long timeout;
325 	int err;
326 
327 	if (WARN_ON(irqs_disabled()))
328 		return -EPERM;
329 
330 	if (!tegra_bpmp_message_valid(msg))
331 		return -EINVAL;
332 
333 	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
334 					    msg->tx.size);
335 	if (IS_ERR(channel))
336 		return PTR_ERR(channel);
337 
338 	err = mbox_send_message(bpmp->mbox.channel, NULL);
339 	if (err < 0)
340 		return err;
341 
342 	mbox_client_txdone(bpmp->mbox.channel, 0);
343 
344 	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
345 
346 	err = wait_for_completion_timeout(&channel->completion, timeout);
347 	if (err == 0)
348 		return -ETIMEDOUT;
349 
350 	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
351 				       &msg->rx.ret);
352 }
353 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
354 
355 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
356 						  unsigned int mrq)
357 {
358 	struct tegra_bpmp_mrq *entry;
359 
360 	list_for_each_entry(entry, &bpmp->mrqs, list)
361 		if (entry->mrq == mrq)
362 			return entry;
363 
364 	return NULL;
365 }
366 
367 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
368 			   const void *data, size_t size)
369 {
370 	unsigned long flags = channel->ib->flags;
371 	struct tegra_bpmp *bpmp = channel->bpmp;
372 	struct tegra_bpmp_mb_data *frame;
373 	int err;
374 
375 	if (WARN_ON(size > MSG_DATA_MIN_SZ))
376 		return;
377 
378 	err = tegra_ivc_read_advance(channel->ivc);
379 	if (WARN_ON(err < 0))
380 		return;
381 
382 	if ((flags & MSG_ACK) == 0)
383 		return;
384 
385 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
386 	if (WARN_ON(IS_ERR(frame)))
387 		return;
388 
389 	frame->code = code;
390 
391 	if (data && size > 0)
392 		memcpy(frame->data, data, size);
393 
394 	err = tegra_ivc_write_advance(channel->ivc);
395 	if (WARN_ON(err < 0))
396 		return;
397 
398 	if (flags & MSG_RING) {
399 		err = mbox_send_message(bpmp->mbox.channel, NULL);
400 		if (WARN_ON(err < 0))
401 			return;
402 
403 		mbox_client_txdone(bpmp->mbox.channel, 0);
404 	}
405 }
406 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
407 
408 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
409 				  unsigned int mrq,
410 				  struct tegra_bpmp_channel *channel)
411 {
412 	struct tegra_bpmp_mrq *entry;
413 	u32 zero = 0;
414 
415 	spin_lock(&bpmp->lock);
416 
417 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
418 	if (!entry) {
419 		spin_unlock(&bpmp->lock);
420 		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
421 		return;
422 	}
423 
424 	entry->handler(mrq, channel, entry->data);
425 
426 	spin_unlock(&bpmp->lock);
427 }
428 
429 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
430 			   tegra_bpmp_mrq_handler_t handler, void *data)
431 {
432 	struct tegra_bpmp_mrq *entry;
433 	unsigned long flags;
434 
435 	if (!handler)
436 		return -EINVAL;
437 
438 	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
439 	if (!entry)
440 		return -ENOMEM;
441 
442 	spin_lock_irqsave(&bpmp->lock, flags);
443 
444 	entry->mrq = mrq;
445 	entry->handler = handler;
446 	entry->data = data;
447 	list_add(&entry->list, &bpmp->mrqs);
448 
449 	spin_unlock_irqrestore(&bpmp->lock, flags);
450 
451 	return 0;
452 }
453 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
454 
455 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
456 {
457 	struct tegra_bpmp_mrq *entry;
458 	unsigned long flags;
459 
460 	spin_lock_irqsave(&bpmp->lock, flags);
461 
462 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
463 	if (!entry)
464 		goto unlock;
465 
466 	list_del(&entry->list);
467 	devm_kfree(bpmp->dev, entry);
468 
469 unlock:
470 	spin_unlock_irqrestore(&bpmp->lock, flags);
471 }
472 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
473 
474 bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
475 {
476 	struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
477 	struct mrq_query_abi_response resp;
478 	struct tegra_bpmp_message msg = {
479 		.mrq = MRQ_QUERY_ABI,
480 		.tx = {
481 			.data = &req,
482 			.size = sizeof(req),
483 		},
484 		.rx = {
485 			.data = &resp,
486 			.size = sizeof(resp),
487 		},
488 	};
489 	int ret;
490 
491 	ret = tegra_bpmp_transfer(bpmp, &msg);
492 	if (ret || msg.rx.ret)
493 		return false;
494 
495 	return resp.status == 0;
496 }
497 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
498 
499 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
500 				       struct tegra_bpmp_channel *channel,
501 				       void *data)
502 {
503 	struct mrq_ping_request *request;
504 	struct mrq_ping_response response;
505 
506 	request = (struct mrq_ping_request *)channel->ib->data;
507 
508 	memset(&response, 0, sizeof(response));
509 	response.reply = request->challenge << 1;
510 
511 	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
512 }
513 
514 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
515 {
516 	struct mrq_ping_response response;
517 	struct mrq_ping_request request;
518 	struct tegra_bpmp_message msg;
519 	unsigned long flags;
520 	ktime_t start, end;
521 	int err;
522 
523 	memset(&request, 0, sizeof(request));
524 	request.challenge = 1;
525 
526 	memset(&response, 0, sizeof(response));
527 
528 	memset(&msg, 0, sizeof(msg));
529 	msg.mrq = MRQ_PING;
530 	msg.tx.data = &request;
531 	msg.tx.size = sizeof(request);
532 	msg.rx.data = &response;
533 	msg.rx.size = sizeof(response);
534 
535 	local_irq_save(flags);
536 	start = ktime_get();
537 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
538 	end = ktime_get();
539 	local_irq_restore(flags);
540 
541 	if (!err)
542 		dev_dbg(bpmp->dev,
543 			"ping ok: challenge: %u, response: %u, time: %lld\n",
544 			request.challenge, response.reply,
545 			ktime_to_us(ktime_sub(end, start)));
546 
547 	return err;
548 }
549 
550 /* deprecated version of tag query */
551 static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
552 					   size_t size)
553 {
554 	struct mrq_query_tag_request request;
555 	struct tegra_bpmp_message msg;
556 	unsigned long flags;
557 	dma_addr_t phys;
558 	void *virt;
559 	int err;
560 
561 	if (size != TAG_SZ)
562 		return -EINVAL;
563 
564 	virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
565 				  GFP_KERNEL | GFP_DMA32);
566 	if (!virt)
567 		return -ENOMEM;
568 
569 	memset(&request, 0, sizeof(request));
570 	request.addr = phys;
571 
572 	memset(&msg, 0, sizeof(msg));
573 	msg.mrq = MRQ_QUERY_TAG;
574 	msg.tx.data = &request;
575 	msg.tx.size = sizeof(request);
576 
577 	local_irq_save(flags);
578 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
579 	local_irq_restore(flags);
580 
581 	if (err == 0)
582 		memcpy(tag, virt, TAG_SZ);
583 
584 	dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
585 
586 	return err;
587 }
588 
589 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
590 				       size_t size)
591 {
592 	if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
593 		struct mrq_query_fw_tag_response resp;
594 		struct tegra_bpmp_message msg = {
595 			.mrq = MRQ_QUERY_FW_TAG,
596 			.rx = {
597 				.data = &resp,
598 				.size = sizeof(resp),
599 			},
600 		};
601 		int err;
602 
603 		if (size != sizeof(resp.tag))
604 			return -EINVAL;
605 
606 		err = tegra_bpmp_transfer(bpmp, &msg);
607 
608 		if (err)
609 			return err;
610 		if (msg.rx.ret < 0)
611 			return -EINVAL;
612 
613 		memcpy(tag, resp.tag, sizeof(resp.tag));
614 		return 0;
615 	}
616 
617 	return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
618 }
619 
620 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
621 {
622 	unsigned long flags = channel->ob->flags;
623 
624 	if ((flags & MSG_RING) == 0)
625 		return;
626 
627 	complete(&channel->completion);
628 }
629 
630 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
631 {
632 	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
633 	struct tegra_bpmp_channel *channel;
634 	unsigned int i, count;
635 	unsigned long *busy;
636 
637 	channel = bpmp->rx_channel;
638 	count = bpmp->soc->channels.thread.count;
639 	busy = bpmp->threaded.busy;
640 
641 	if (tegra_bpmp_master_acked(channel))
642 		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
643 
644 	spin_lock(&bpmp->lock);
645 
646 	for_each_set_bit(i, busy, count) {
647 		struct tegra_bpmp_channel *channel;
648 
649 		channel = &bpmp->threaded_channels[i];
650 
651 		if (tegra_bpmp_master_acked(channel)) {
652 			tegra_bpmp_channel_signal(channel);
653 			clear_bit(i, busy);
654 		}
655 	}
656 
657 	spin_unlock(&bpmp->lock);
658 }
659 
660 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
661 {
662 	struct tegra_bpmp *bpmp = data;
663 	int err;
664 
665 	if (WARN_ON(bpmp->mbox.channel == NULL))
666 		return;
667 
668 	err = mbox_send_message(bpmp->mbox.channel, NULL);
669 	if (err < 0)
670 		return;
671 
672 	mbox_client_txdone(bpmp->mbox.channel, 0);
673 }
674 
675 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
676 				   struct tegra_bpmp *bpmp,
677 				   unsigned int index)
678 {
679 	size_t message_size, queue_size;
680 	unsigned int offset;
681 	int err;
682 
683 	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
684 				    GFP_KERNEL);
685 	if (!channel->ivc)
686 		return -ENOMEM;
687 
688 	message_size = tegra_ivc_align(MSG_MIN_SZ);
689 	queue_size = tegra_ivc_total_queue_size(message_size);
690 	offset = queue_size * index;
691 
692 	err = tegra_ivc_init(channel->ivc, NULL,
693 			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
694 			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
695 			     1, message_size, tegra_bpmp_ivc_notify,
696 			     bpmp);
697 	if (err < 0) {
698 		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
699 			index, err);
700 		return err;
701 	}
702 
703 	init_completion(&channel->completion);
704 	channel->bpmp = bpmp;
705 
706 	return 0;
707 }
708 
709 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
710 {
711 	/* reset the channel state */
712 	tegra_ivc_reset(channel->ivc);
713 
714 	/* sync the channel state with BPMP */
715 	while (tegra_ivc_notified(channel->ivc))
716 		;
717 }
718 
719 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
720 {
721 	tegra_ivc_cleanup(channel->ivc);
722 }
723 
724 static int tegra_bpmp_probe(struct platform_device *pdev)
725 {
726 	struct tegra_bpmp *bpmp;
727 	unsigned int i;
728 	char tag[TAG_SZ];
729 	size_t size;
730 	int err;
731 
732 	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
733 	if (!bpmp)
734 		return -ENOMEM;
735 
736 	bpmp->soc = of_device_get_match_data(&pdev->dev);
737 	bpmp->dev = &pdev->dev;
738 
739 	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
740 	if (!bpmp->tx.pool) {
741 		dev_err(&pdev->dev, "TX shmem pool not found\n");
742 		return -ENOMEM;
743 	}
744 
745 	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
746 	if (!bpmp->tx.virt) {
747 		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
748 		return -ENOMEM;
749 	}
750 
751 	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
752 	if (!bpmp->rx.pool) {
753 		dev_err(&pdev->dev, "RX shmem pool not found\n");
754 		err = -ENOMEM;
755 		goto free_tx;
756 	}
757 
758 	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
759 	if (!bpmp->rx.virt) {
760 		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
761 		err = -ENOMEM;
762 		goto free_tx;
763 	}
764 
765 	INIT_LIST_HEAD(&bpmp->mrqs);
766 	spin_lock_init(&bpmp->lock);
767 
768 	bpmp->threaded.count = bpmp->soc->channels.thread.count;
769 	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
770 
771 	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
772 
773 	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
774 	if (!bpmp->threaded.allocated) {
775 		err = -ENOMEM;
776 		goto free_rx;
777 	}
778 
779 	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
780 	if (!bpmp->threaded.busy) {
781 		err = -ENOMEM;
782 		goto free_rx;
783 	}
784 
785 	spin_lock_init(&bpmp->atomic_tx_lock);
786 	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
787 					GFP_KERNEL);
788 	if (!bpmp->tx_channel) {
789 		err = -ENOMEM;
790 		goto free_rx;
791 	}
792 
793 	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
794 	                                GFP_KERNEL);
795 	if (!bpmp->rx_channel) {
796 		err = -ENOMEM;
797 		goto free_rx;
798 	}
799 
800 	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
801 					       sizeof(*bpmp->threaded_channels),
802 					       GFP_KERNEL);
803 	if (!bpmp->threaded_channels) {
804 		err = -ENOMEM;
805 		goto free_rx;
806 	}
807 
808 	err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
809 				      bpmp->soc->channels.cpu_tx.offset);
810 	if (err < 0)
811 		goto free_rx;
812 
813 	err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
814 				      bpmp->soc->channels.cpu_rx.offset);
815 	if (err < 0)
816 		goto cleanup_tx_channel;
817 
818 	for (i = 0; i < bpmp->threaded.count; i++) {
819 		err = tegra_bpmp_channel_init(
820 			&bpmp->threaded_channels[i], bpmp,
821 			bpmp->soc->channels.thread.offset + i);
822 		if (err < 0)
823 			goto cleanup_threaded_channels;
824 	}
825 
826 	/* mbox registration */
827 	bpmp->mbox.client.dev = &pdev->dev;
828 	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
829 	bpmp->mbox.client.tx_block = false;
830 	bpmp->mbox.client.knows_txdone = false;
831 
832 	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
833 	if (IS_ERR(bpmp->mbox.channel)) {
834 		err = PTR_ERR(bpmp->mbox.channel);
835 		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
836 		goto cleanup_threaded_channels;
837 	}
838 
839 	/* reset message channels */
840 	tegra_bpmp_channel_reset(bpmp->tx_channel);
841 	tegra_bpmp_channel_reset(bpmp->rx_channel);
842 	for (i = 0; i < bpmp->threaded.count; i++)
843 		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
844 
845 	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
846 				     tegra_bpmp_mrq_handle_ping, bpmp);
847 	if (err < 0)
848 		goto free_mbox;
849 
850 	err = tegra_bpmp_ping(bpmp);
851 	if (err < 0) {
852 		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
853 		goto free_mrq;
854 	}
855 
856 	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
857 	if (err < 0) {
858 		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
859 		goto free_mrq;
860 	}
861 
862 	dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
863 
864 	platform_set_drvdata(pdev, bpmp);
865 
866 	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
867 	if (err < 0)
868 		goto free_mrq;
869 
870 	err = tegra_bpmp_init_clocks(bpmp);
871 	if (err < 0)
872 		goto free_mrq;
873 
874 	err = tegra_bpmp_init_resets(bpmp);
875 	if (err < 0)
876 		goto free_mrq;
877 
878 	err = tegra_bpmp_init_powergates(bpmp);
879 	if (err < 0)
880 		goto free_mrq;
881 
882 	err = tegra_bpmp_init_debugfs(bpmp);
883 	if (err < 0)
884 		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
885 
886 	return 0;
887 
888 free_mrq:
889 	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
890 free_mbox:
891 	mbox_free_channel(bpmp->mbox.channel);
892 cleanup_threaded_channels:
893 	for (i = 0; i < bpmp->threaded.count; i++) {
894 		if (bpmp->threaded_channels[i].bpmp)
895 			tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
896 	}
897 
898 	tegra_bpmp_channel_cleanup(bpmp->rx_channel);
899 cleanup_tx_channel:
900 	tegra_bpmp_channel_cleanup(bpmp->tx_channel);
901 free_rx:
902 	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
903 free_tx:
904 	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
905 	return err;
906 }
907 
908 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
909 {
910 	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
911 	unsigned int i;
912 
913 	/* reset message channels */
914 	tegra_bpmp_channel_reset(bpmp->tx_channel);
915 	tegra_bpmp_channel_reset(bpmp->rx_channel);
916 
917 	for (i = 0; i < bpmp->threaded.count; i++)
918 		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
919 
920 	return 0;
921 }
922 
923 static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
924 
925 static const struct tegra_bpmp_soc tegra186_soc = {
926 	.channels = {
927 		.cpu_tx = {
928 			.offset = 3,
929 			.timeout = 60 * USEC_PER_SEC,
930 		},
931 		.thread = {
932 			.offset = 0,
933 			.count = 3,
934 			.timeout = 600 * USEC_PER_SEC,
935 		},
936 		.cpu_rx = {
937 			.offset = 13,
938 			.timeout = 0,
939 		},
940 	},
941 	.num_resets = 193,
942 };
943 
944 static const struct of_device_id tegra_bpmp_match[] = {
945 	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
946 	{ }
947 };
948 
949 static struct platform_driver tegra_bpmp_driver = {
950 	.driver = {
951 		.name = "tegra-bpmp",
952 		.of_match_table = tegra_bpmp_match,
953 		.pm = &tegra_bpmp_pm_ops,
954 	},
955 	.probe = tegra_bpmp_probe,
956 };
957 
958 static int __init tegra_bpmp_init(void)
959 {
960 	return platform_driver_register(&tegra_bpmp_driver);
961 }
962 core_initcall(tegra_bpmp_init);
963