xref: /openbmc/linux/drivers/firmware/tegra/bpmp.c (revision addee42a)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  */
13 
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/semaphore.h>
22 #include <linux/sched/clock.h>
23 
24 #include <soc/tegra/bpmp.h>
25 #include <soc/tegra/bpmp-abi.h>
26 #include <soc/tegra/ivc.h>
27 
28 #define MSG_ACK		BIT(0)
29 #define MSG_RING	BIT(1)
30 
31 static inline struct tegra_bpmp *
32 mbox_client_to_bpmp(struct mbox_client *client)
33 {
34 	return container_of(client, struct tegra_bpmp, mbox.client);
35 }
36 
37 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
38 {
39 	struct platform_device *pdev;
40 	struct tegra_bpmp *bpmp;
41 	struct device_node *np;
42 
43 	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
44 	if (!np)
45 		return ERR_PTR(-ENOENT);
46 
47 	pdev = of_find_device_by_node(np);
48 	if (!pdev) {
49 		bpmp = ERR_PTR(-ENODEV);
50 		goto put;
51 	}
52 
53 	bpmp = platform_get_drvdata(pdev);
54 	if (!bpmp) {
55 		bpmp = ERR_PTR(-EPROBE_DEFER);
56 		put_device(&pdev->dev);
57 		goto put;
58 	}
59 
60 put:
61 	of_node_put(np);
62 	return bpmp;
63 }
64 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
65 
66 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
67 {
68 	if (bpmp)
69 		put_device(bpmp->dev);
70 }
71 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
72 
73 static int
74 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
75 {
76 	struct tegra_bpmp *bpmp = channel->bpmp;
77 	unsigned int count;
78 	int index;
79 
80 	count = bpmp->soc->channels.thread.count;
81 
82 	index = channel - channel->bpmp->threaded_channels;
83 	if (index < 0 || index >= count)
84 		return -EINVAL;
85 
86 	return index;
87 }
88 
89 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
90 {
91 	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
92 	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
93 	       (msg->tx.size == 0 || msg->tx.data) &&
94 	       (msg->rx.size == 0 || msg->rx.data);
95 }
96 
97 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
98 {
99 	void *frame;
100 
101 	frame = tegra_ivc_read_get_next_frame(channel->ivc);
102 	if (IS_ERR(frame)) {
103 		channel->ib = NULL;
104 		return false;
105 	}
106 
107 	channel->ib = frame;
108 
109 	return true;
110 }
111 
112 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
113 {
114 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
115 	ktime_t end;
116 
117 	end = ktime_add_us(ktime_get(), timeout);
118 
119 	do {
120 		if (tegra_bpmp_master_acked(channel))
121 			return 0;
122 	} while (ktime_before(ktime_get(), end));
123 
124 	return -ETIMEDOUT;
125 }
126 
127 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
128 {
129 	void *frame;
130 
131 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
132 	if (IS_ERR(frame)) {
133 		channel->ob = NULL;
134 		return false;
135 	}
136 
137 	channel->ob = frame;
138 
139 	return true;
140 }
141 
142 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
143 {
144 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
145 	ktime_t start, now;
146 
147 	start = ns_to_ktime(local_clock());
148 
149 	do {
150 		if (tegra_bpmp_master_free(channel))
151 			return 0;
152 
153 		now = ns_to_ktime(local_clock());
154 	} while (ktime_us_delta(now, start) < timeout);
155 
156 	return -ETIMEDOUT;
157 }
158 
159 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
160 					 void *data, size_t size, int *ret)
161 {
162 	int err;
163 
164 	if (data && size > 0)
165 		memcpy(data, channel->ib->data, size);
166 
167 	err = tegra_ivc_read_advance(channel->ivc);
168 	if (err < 0)
169 		return err;
170 
171 	*ret = channel->ib->code;
172 
173 	return 0;
174 }
175 
176 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
177 				       void *data, size_t size, int *ret)
178 {
179 	struct tegra_bpmp *bpmp = channel->bpmp;
180 	unsigned long flags;
181 	ssize_t err;
182 	int index;
183 
184 	index = tegra_bpmp_channel_get_thread_index(channel);
185 	if (index < 0) {
186 		err = index;
187 		goto unlock;
188 	}
189 
190 	spin_lock_irqsave(&bpmp->lock, flags);
191 	err = __tegra_bpmp_channel_read(channel, data, size, ret);
192 	clear_bit(index, bpmp->threaded.allocated);
193 	spin_unlock_irqrestore(&bpmp->lock, flags);
194 
195 unlock:
196 	up(&bpmp->threaded.lock);
197 
198 	return err;
199 }
200 
201 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
202 					  unsigned int mrq, unsigned long flags,
203 					  const void *data, size_t size)
204 {
205 	channel->ob->code = mrq;
206 	channel->ob->flags = flags;
207 
208 	if (data && size > 0)
209 		memcpy(channel->ob->data, data, size);
210 
211 	return tegra_ivc_write_advance(channel->ivc);
212 }
213 
214 static struct tegra_bpmp_channel *
215 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
216 			  const void *data, size_t size)
217 {
218 	unsigned long timeout = bpmp->soc->channels.thread.timeout;
219 	unsigned int count = bpmp->soc->channels.thread.count;
220 	struct tegra_bpmp_channel *channel;
221 	unsigned long flags;
222 	unsigned int index;
223 	int err;
224 
225 	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
226 	if (err < 0)
227 		return ERR_PTR(err);
228 
229 	spin_lock_irqsave(&bpmp->lock, flags);
230 
231 	index = find_first_zero_bit(bpmp->threaded.allocated, count);
232 	if (index == count) {
233 		err = -EBUSY;
234 		goto unlock;
235 	}
236 
237 	channel = &bpmp->threaded_channels[index];
238 
239 	if (!tegra_bpmp_master_free(channel)) {
240 		err = -EBUSY;
241 		goto unlock;
242 	}
243 
244 	set_bit(index, bpmp->threaded.allocated);
245 
246 	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
247 					 data, size);
248 	if (err < 0)
249 		goto clear_allocated;
250 
251 	set_bit(index, bpmp->threaded.busy);
252 
253 	spin_unlock_irqrestore(&bpmp->lock, flags);
254 	return channel;
255 
256 clear_allocated:
257 	clear_bit(index, bpmp->threaded.allocated);
258 unlock:
259 	spin_unlock_irqrestore(&bpmp->lock, flags);
260 	up(&bpmp->threaded.lock);
261 
262 	return ERR_PTR(err);
263 }
264 
265 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
266 					unsigned int mrq, unsigned long flags,
267 					const void *data, size_t size)
268 {
269 	int err;
270 
271 	err = tegra_bpmp_wait_master_free(channel);
272 	if (err < 0)
273 		return err;
274 
275 	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
276 }
277 
278 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
279 			       struct tegra_bpmp_message *msg)
280 {
281 	struct tegra_bpmp_channel *channel;
282 	int err;
283 
284 	if (WARN_ON(!irqs_disabled()))
285 		return -EPERM;
286 
287 	if (!tegra_bpmp_message_valid(msg))
288 		return -EINVAL;
289 
290 	channel = bpmp->tx_channel;
291 
292 	spin_lock(&bpmp->atomic_tx_lock);
293 
294 	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
295 				       msg->tx.data, msg->tx.size);
296 	if (err < 0) {
297 		spin_unlock(&bpmp->atomic_tx_lock);
298 		return err;
299 	}
300 
301 	spin_unlock(&bpmp->atomic_tx_lock);
302 
303 	err = mbox_send_message(bpmp->mbox.channel, NULL);
304 	if (err < 0)
305 		return err;
306 
307 	mbox_client_txdone(bpmp->mbox.channel, 0);
308 
309 	err = tegra_bpmp_wait_ack(channel);
310 	if (err < 0)
311 		return err;
312 
313 	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
314 					 &msg->rx.ret);
315 }
316 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
317 
318 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
319 			struct tegra_bpmp_message *msg)
320 {
321 	struct tegra_bpmp_channel *channel;
322 	unsigned long timeout;
323 	int err;
324 
325 	if (WARN_ON(irqs_disabled()))
326 		return -EPERM;
327 
328 	if (!tegra_bpmp_message_valid(msg))
329 		return -EINVAL;
330 
331 	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
332 					    msg->tx.size);
333 	if (IS_ERR(channel))
334 		return PTR_ERR(channel);
335 
336 	err = mbox_send_message(bpmp->mbox.channel, NULL);
337 	if (err < 0)
338 		return err;
339 
340 	mbox_client_txdone(bpmp->mbox.channel, 0);
341 
342 	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
343 
344 	err = wait_for_completion_timeout(&channel->completion, timeout);
345 	if (err == 0)
346 		return -ETIMEDOUT;
347 
348 	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
349 				       &msg->rx.ret);
350 }
351 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
352 
353 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
354 						  unsigned int mrq)
355 {
356 	struct tegra_bpmp_mrq *entry;
357 
358 	list_for_each_entry(entry, &bpmp->mrqs, list)
359 		if (entry->mrq == mrq)
360 			return entry;
361 
362 	return NULL;
363 }
364 
365 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
366 			   const void *data, size_t size)
367 {
368 	unsigned long flags = channel->ib->flags;
369 	struct tegra_bpmp *bpmp = channel->bpmp;
370 	struct tegra_bpmp_mb_data *frame;
371 	int err;
372 
373 	if (WARN_ON(size > MSG_DATA_MIN_SZ))
374 		return;
375 
376 	err = tegra_ivc_read_advance(channel->ivc);
377 	if (WARN_ON(err < 0))
378 		return;
379 
380 	if ((flags & MSG_ACK) == 0)
381 		return;
382 
383 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
384 	if (WARN_ON(IS_ERR(frame)))
385 		return;
386 
387 	frame->code = code;
388 
389 	if (data && size > 0)
390 		memcpy(frame->data, data, size);
391 
392 	err = tegra_ivc_write_advance(channel->ivc);
393 	if (WARN_ON(err < 0))
394 		return;
395 
396 	if (flags & MSG_RING) {
397 		err = mbox_send_message(bpmp->mbox.channel, NULL);
398 		if (WARN_ON(err < 0))
399 			return;
400 
401 		mbox_client_txdone(bpmp->mbox.channel, 0);
402 	}
403 }
404 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
405 
406 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
407 				  unsigned int mrq,
408 				  struct tegra_bpmp_channel *channel)
409 {
410 	struct tegra_bpmp_mrq *entry;
411 	u32 zero = 0;
412 
413 	spin_lock(&bpmp->lock);
414 
415 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
416 	if (!entry) {
417 		spin_unlock(&bpmp->lock);
418 		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
419 		return;
420 	}
421 
422 	entry->handler(mrq, channel, entry->data);
423 
424 	spin_unlock(&bpmp->lock);
425 }
426 
427 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
428 			   tegra_bpmp_mrq_handler_t handler, void *data)
429 {
430 	struct tegra_bpmp_mrq *entry;
431 	unsigned long flags;
432 
433 	if (!handler)
434 		return -EINVAL;
435 
436 	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
437 	if (!entry)
438 		return -ENOMEM;
439 
440 	spin_lock_irqsave(&bpmp->lock, flags);
441 
442 	entry->mrq = mrq;
443 	entry->handler = handler;
444 	entry->data = data;
445 	list_add(&entry->list, &bpmp->mrqs);
446 
447 	spin_unlock_irqrestore(&bpmp->lock, flags);
448 
449 	return 0;
450 }
451 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
452 
453 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
454 {
455 	struct tegra_bpmp_mrq *entry;
456 	unsigned long flags;
457 
458 	spin_lock_irqsave(&bpmp->lock, flags);
459 
460 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
461 	if (!entry)
462 		goto unlock;
463 
464 	list_del(&entry->list);
465 	devm_kfree(bpmp->dev, entry);
466 
467 unlock:
468 	spin_unlock_irqrestore(&bpmp->lock, flags);
469 }
470 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
471 
472 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
473 				       struct tegra_bpmp_channel *channel,
474 				       void *data)
475 {
476 	struct mrq_ping_request *request;
477 	struct mrq_ping_response response;
478 
479 	request = (struct mrq_ping_request *)channel->ib->data;
480 
481 	memset(&response, 0, sizeof(response));
482 	response.reply = request->challenge << 1;
483 
484 	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
485 }
486 
487 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
488 {
489 	struct mrq_ping_response response;
490 	struct mrq_ping_request request;
491 	struct tegra_bpmp_message msg;
492 	unsigned long flags;
493 	ktime_t start, end;
494 	int err;
495 
496 	memset(&request, 0, sizeof(request));
497 	request.challenge = 1;
498 
499 	memset(&response, 0, sizeof(response));
500 
501 	memset(&msg, 0, sizeof(msg));
502 	msg.mrq = MRQ_PING;
503 	msg.tx.data = &request;
504 	msg.tx.size = sizeof(request);
505 	msg.rx.data = &response;
506 	msg.rx.size = sizeof(response);
507 
508 	local_irq_save(flags);
509 	start = ktime_get();
510 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
511 	end = ktime_get();
512 	local_irq_restore(flags);
513 
514 	if (!err)
515 		dev_dbg(bpmp->dev,
516 			"ping ok: challenge: %u, response: %u, time: %lld\n",
517 			request.challenge, response.reply,
518 			ktime_to_us(ktime_sub(end, start)));
519 
520 	return err;
521 }
522 
523 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
524 				       size_t size)
525 {
526 	struct mrq_query_tag_request request;
527 	struct tegra_bpmp_message msg;
528 	unsigned long flags;
529 	dma_addr_t phys;
530 	void *virt;
531 	int err;
532 
533 	virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
534 				  GFP_KERNEL | GFP_DMA32);
535 	if (!virt)
536 		return -ENOMEM;
537 
538 	memset(&request, 0, sizeof(request));
539 	request.addr = phys;
540 
541 	memset(&msg, 0, sizeof(msg));
542 	msg.mrq = MRQ_QUERY_TAG;
543 	msg.tx.data = &request;
544 	msg.tx.size = sizeof(request);
545 
546 	local_irq_save(flags);
547 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
548 	local_irq_restore(flags);
549 
550 	if (err == 0)
551 		strlcpy(tag, virt, size);
552 
553 	dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
554 
555 	return err;
556 }
557 
558 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
559 {
560 	unsigned long flags = channel->ob->flags;
561 
562 	if ((flags & MSG_RING) == 0)
563 		return;
564 
565 	complete(&channel->completion);
566 }
567 
568 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
569 {
570 	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
571 	struct tegra_bpmp_channel *channel;
572 	unsigned int i, count;
573 	unsigned long *busy;
574 
575 	channel = bpmp->rx_channel;
576 	count = bpmp->soc->channels.thread.count;
577 	busy = bpmp->threaded.busy;
578 
579 	if (tegra_bpmp_master_acked(channel))
580 		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
581 
582 	spin_lock(&bpmp->lock);
583 
584 	for_each_set_bit(i, busy, count) {
585 		struct tegra_bpmp_channel *channel;
586 
587 		channel = &bpmp->threaded_channels[i];
588 
589 		if (tegra_bpmp_master_acked(channel)) {
590 			tegra_bpmp_channel_signal(channel);
591 			clear_bit(i, busy);
592 		}
593 	}
594 
595 	spin_unlock(&bpmp->lock);
596 }
597 
598 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
599 {
600 	struct tegra_bpmp *bpmp = data;
601 	int err;
602 
603 	if (WARN_ON(bpmp->mbox.channel == NULL))
604 		return;
605 
606 	err = mbox_send_message(bpmp->mbox.channel, NULL);
607 	if (err < 0)
608 		return;
609 
610 	mbox_client_txdone(bpmp->mbox.channel, 0);
611 }
612 
613 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
614 				   struct tegra_bpmp *bpmp,
615 				   unsigned int index)
616 {
617 	size_t message_size, queue_size;
618 	unsigned int offset;
619 	int err;
620 
621 	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
622 				    GFP_KERNEL);
623 	if (!channel->ivc)
624 		return -ENOMEM;
625 
626 	message_size = tegra_ivc_align(MSG_MIN_SZ);
627 	queue_size = tegra_ivc_total_queue_size(message_size);
628 	offset = queue_size * index;
629 
630 	err = tegra_ivc_init(channel->ivc, NULL,
631 			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
632 			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
633 			     1, message_size, tegra_bpmp_ivc_notify,
634 			     bpmp);
635 	if (err < 0) {
636 		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
637 			index, err);
638 		return err;
639 	}
640 
641 	init_completion(&channel->completion);
642 	channel->bpmp = bpmp;
643 
644 	return 0;
645 }
646 
647 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
648 {
649 	/* reset the channel state */
650 	tegra_ivc_reset(channel->ivc);
651 
652 	/* sync the channel state with BPMP */
653 	while (tegra_ivc_notified(channel->ivc))
654 		;
655 }
656 
657 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
658 {
659 	tegra_ivc_cleanup(channel->ivc);
660 }
661 
662 static int tegra_bpmp_probe(struct platform_device *pdev)
663 {
664 	struct tegra_bpmp *bpmp;
665 	unsigned int i;
666 	char tag[32];
667 	size_t size;
668 	int err;
669 
670 	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
671 	if (!bpmp)
672 		return -ENOMEM;
673 
674 	bpmp->soc = of_device_get_match_data(&pdev->dev);
675 	bpmp->dev = &pdev->dev;
676 
677 	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
678 	if (!bpmp->tx.pool) {
679 		dev_err(&pdev->dev, "TX shmem pool not found\n");
680 		return -ENOMEM;
681 	}
682 
683 	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
684 	if (!bpmp->tx.virt) {
685 		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
686 		return -ENOMEM;
687 	}
688 
689 	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
690 	if (!bpmp->rx.pool) {
691 		dev_err(&pdev->dev, "RX shmem pool not found\n");
692 		err = -ENOMEM;
693 		goto free_tx;
694 	}
695 
696 	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
697 	if (!bpmp->rx.virt) {
698 		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
699 		err = -ENOMEM;
700 		goto free_tx;
701 	}
702 
703 	INIT_LIST_HEAD(&bpmp->mrqs);
704 	spin_lock_init(&bpmp->lock);
705 
706 	bpmp->threaded.count = bpmp->soc->channels.thread.count;
707 	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
708 
709 	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
710 
711 	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
712 	if (!bpmp->threaded.allocated) {
713 		err = -ENOMEM;
714 		goto free_rx;
715 	}
716 
717 	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
718 	if (!bpmp->threaded.busy) {
719 		err = -ENOMEM;
720 		goto free_rx;
721 	}
722 
723 	spin_lock_init(&bpmp->atomic_tx_lock);
724 	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
725 					GFP_KERNEL);
726 	if (!bpmp->tx_channel) {
727 		err = -ENOMEM;
728 		goto free_rx;
729 	}
730 
731 	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
732 	                                GFP_KERNEL);
733 	if (!bpmp->rx_channel) {
734 		err = -ENOMEM;
735 		goto free_rx;
736 	}
737 
738 	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
739 					       sizeof(*bpmp->threaded_channels),
740 					       GFP_KERNEL);
741 	if (!bpmp->threaded_channels) {
742 		err = -ENOMEM;
743 		goto free_rx;
744 	}
745 
746 	err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
747 				      bpmp->soc->channels.cpu_tx.offset);
748 	if (err < 0)
749 		goto free_rx;
750 
751 	err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
752 				      bpmp->soc->channels.cpu_rx.offset);
753 	if (err < 0)
754 		goto cleanup_tx_channel;
755 
756 	for (i = 0; i < bpmp->threaded.count; i++) {
757 		err = tegra_bpmp_channel_init(
758 			&bpmp->threaded_channels[i], bpmp,
759 			bpmp->soc->channels.thread.offset + i);
760 		if (err < 0)
761 			goto cleanup_threaded_channels;
762 	}
763 
764 	/* mbox registration */
765 	bpmp->mbox.client.dev = &pdev->dev;
766 	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
767 	bpmp->mbox.client.tx_block = false;
768 	bpmp->mbox.client.knows_txdone = false;
769 
770 	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
771 	if (IS_ERR(bpmp->mbox.channel)) {
772 		err = PTR_ERR(bpmp->mbox.channel);
773 		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
774 		goto cleanup_threaded_channels;
775 	}
776 
777 	/* reset message channels */
778 	tegra_bpmp_channel_reset(bpmp->tx_channel);
779 	tegra_bpmp_channel_reset(bpmp->rx_channel);
780 	for (i = 0; i < bpmp->threaded.count; i++)
781 		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
782 
783 	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
784 				     tegra_bpmp_mrq_handle_ping, bpmp);
785 	if (err < 0)
786 		goto free_mbox;
787 
788 	err = tegra_bpmp_ping(bpmp);
789 	if (err < 0) {
790 		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
791 		goto free_mrq;
792 	}
793 
794 	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
795 	if (err < 0) {
796 		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
797 		goto free_mrq;
798 	}
799 
800 	dev_info(&pdev->dev, "firmware: %s\n", tag);
801 
802 	platform_set_drvdata(pdev, bpmp);
803 
804 	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
805 	if (err < 0)
806 		goto free_mrq;
807 
808 	err = tegra_bpmp_init_clocks(bpmp);
809 	if (err < 0)
810 		goto free_mrq;
811 
812 	err = tegra_bpmp_init_resets(bpmp);
813 	if (err < 0)
814 		goto free_mrq;
815 
816 	err = tegra_bpmp_init_powergates(bpmp);
817 	if (err < 0)
818 		goto free_mrq;
819 
820 	err = tegra_bpmp_init_debugfs(bpmp);
821 	if (err < 0)
822 		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
823 
824 	return 0;
825 
826 free_mrq:
827 	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
828 free_mbox:
829 	mbox_free_channel(bpmp->mbox.channel);
830 cleanup_threaded_channels:
831 	for (i = 0; i < bpmp->threaded.count; i++) {
832 		if (bpmp->threaded_channels[i].bpmp)
833 			tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
834 	}
835 
836 	tegra_bpmp_channel_cleanup(bpmp->rx_channel);
837 cleanup_tx_channel:
838 	tegra_bpmp_channel_cleanup(bpmp->tx_channel);
839 free_rx:
840 	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
841 free_tx:
842 	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
843 	return err;
844 }
845 
846 static const struct tegra_bpmp_soc tegra186_soc = {
847 	.channels = {
848 		.cpu_tx = {
849 			.offset = 3,
850 			.timeout = 60 * USEC_PER_SEC,
851 		},
852 		.thread = {
853 			.offset = 0,
854 			.count = 3,
855 			.timeout = 600 * USEC_PER_SEC,
856 		},
857 		.cpu_rx = {
858 			.offset = 13,
859 			.timeout = 0,
860 		},
861 	},
862 	.num_resets = 193,
863 };
864 
865 static const struct of_device_id tegra_bpmp_match[] = {
866 	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
867 	{ }
868 };
869 
870 static struct platform_driver tegra_bpmp_driver = {
871 	.driver = {
872 		.name = "tegra-bpmp",
873 		.of_match_table = tegra_bpmp_match,
874 	},
875 	.probe = tegra_bpmp_probe,
876 };
877 
878 static int __init tegra_bpmp_init(void)
879 {
880 	return platform_driver_register(&tegra_bpmp_driver);
881 }
882 core_initcall(tegra_bpmp_init);
883