xref: /openbmc/linux/drivers/firmware/tegra/bpmp.c (revision 0c874100)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  */
13 
14 #include <linux/clk/tegra.h>
15 #include <linux/genalloc.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/semaphore.h>
23 #include <linux/sched/clock.h>
24 
25 #include <soc/tegra/bpmp.h>
26 #include <soc/tegra/bpmp-abi.h>
27 #include <soc/tegra/ivc.h>
28 
29 #define MSG_ACK		BIT(0)
30 #define MSG_RING	BIT(1)
31 
32 static inline struct tegra_bpmp *
33 mbox_client_to_bpmp(struct mbox_client *client)
34 {
35 	return container_of(client, struct tegra_bpmp, mbox.client);
36 }
37 
38 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
39 {
40 	struct platform_device *pdev;
41 	struct tegra_bpmp *bpmp;
42 	struct device_node *np;
43 
44 	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
45 	if (!np)
46 		return ERR_PTR(-ENOENT);
47 
48 	pdev = of_find_device_by_node(np);
49 	if (!pdev) {
50 		bpmp = ERR_PTR(-ENODEV);
51 		goto put;
52 	}
53 
54 	bpmp = platform_get_drvdata(pdev);
55 	if (!bpmp) {
56 		bpmp = ERR_PTR(-EPROBE_DEFER);
57 		put_device(&pdev->dev);
58 		goto put;
59 	}
60 
61 put:
62 	of_node_put(np);
63 	return bpmp;
64 }
65 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
66 
67 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
68 {
69 	if (bpmp)
70 		put_device(bpmp->dev);
71 }
72 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
73 
74 static int
75 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
76 {
77 	struct tegra_bpmp *bpmp = channel->bpmp;
78 	unsigned int count;
79 	int index;
80 
81 	count = bpmp->soc->channels.thread.count;
82 
83 	index = channel - channel->bpmp->threaded_channels;
84 	if (index < 0 || index >= count)
85 		return -EINVAL;
86 
87 	return index;
88 }
89 
90 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
91 {
92 	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
93 	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
94 	       (msg->tx.size == 0 || msg->tx.data) &&
95 	       (msg->rx.size == 0 || msg->rx.data);
96 }
97 
98 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
99 {
100 	void *frame;
101 
102 	frame = tegra_ivc_read_get_next_frame(channel->ivc);
103 	if (IS_ERR(frame)) {
104 		channel->ib = NULL;
105 		return false;
106 	}
107 
108 	channel->ib = frame;
109 
110 	return true;
111 }
112 
113 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
114 {
115 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
116 	ktime_t end;
117 
118 	end = ktime_add_us(ktime_get(), timeout);
119 
120 	do {
121 		if (tegra_bpmp_master_acked(channel))
122 			return 0;
123 	} while (ktime_before(ktime_get(), end));
124 
125 	return -ETIMEDOUT;
126 }
127 
128 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
129 {
130 	void *frame;
131 
132 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
133 	if (IS_ERR(frame)) {
134 		channel->ob = NULL;
135 		return false;
136 	}
137 
138 	channel->ob = frame;
139 
140 	return true;
141 }
142 
143 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
144 {
145 	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
146 	ktime_t start, now;
147 
148 	start = ns_to_ktime(local_clock());
149 
150 	do {
151 		if (tegra_bpmp_master_free(channel))
152 			return 0;
153 
154 		now = ns_to_ktime(local_clock());
155 	} while (ktime_us_delta(now, start) < timeout);
156 
157 	return -ETIMEDOUT;
158 }
159 
160 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
161 					 void *data, size_t size, int *ret)
162 {
163 	int err;
164 
165 	if (data && size > 0)
166 		memcpy(data, channel->ib->data, size);
167 
168 	err = tegra_ivc_read_advance(channel->ivc);
169 	if (err < 0)
170 		return err;
171 
172 	*ret = channel->ib->code;
173 
174 	return 0;
175 }
176 
177 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
178 				       void *data, size_t size, int *ret)
179 {
180 	struct tegra_bpmp *bpmp = channel->bpmp;
181 	unsigned long flags;
182 	ssize_t err;
183 	int index;
184 
185 	index = tegra_bpmp_channel_get_thread_index(channel);
186 	if (index < 0) {
187 		err = index;
188 		goto unlock;
189 	}
190 
191 	spin_lock_irqsave(&bpmp->lock, flags);
192 	err = __tegra_bpmp_channel_read(channel, data, size, ret);
193 	clear_bit(index, bpmp->threaded.allocated);
194 	spin_unlock_irqrestore(&bpmp->lock, flags);
195 
196 unlock:
197 	up(&bpmp->threaded.lock);
198 
199 	return err;
200 }
201 
202 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
203 					  unsigned int mrq, unsigned long flags,
204 					  const void *data, size_t size)
205 {
206 	channel->ob->code = mrq;
207 	channel->ob->flags = flags;
208 
209 	if (data && size > 0)
210 		memcpy(channel->ob->data, data, size);
211 
212 	return tegra_ivc_write_advance(channel->ivc);
213 }
214 
215 static struct tegra_bpmp_channel *
216 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
217 			  const void *data, size_t size)
218 {
219 	unsigned long timeout = bpmp->soc->channels.thread.timeout;
220 	unsigned int count = bpmp->soc->channels.thread.count;
221 	struct tegra_bpmp_channel *channel;
222 	unsigned long flags;
223 	unsigned int index;
224 	int err;
225 
226 	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
227 	if (err < 0)
228 		return ERR_PTR(err);
229 
230 	spin_lock_irqsave(&bpmp->lock, flags);
231 
232 	index = find_first_zero_bit(bpmp->threaded.allocated, count);
233 	if (index == count) {
234 		err = -EBUSY;
235 		goto unlock;
236 	}
237 
238 	channel = &bpmp->threaded_channels[index];
239 
240 	if (!tegra_bpmp_master_free(channel)) {
241 		err = -EBUSY;
242 		goto unlock;
243 	}
244 
245 	set_bit(index, bpmp->threaded.allocated);
246 
247 	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
248 					 data, size);
249 	if (err < 0)
250 		goto clear_allocated;
251 
252 	set_bit(index, bpmp->threaded.busy);
253 
254 	spin_unlock_irqrestore(&bpmp->lock, flags);
255 	return channel;
256 
257 clear_allocated:
258 	clear_bit(index, bpmp->threaded.allocated);
259 unlock:
260 	spin_unlock_irqrestore(&bpmp->lock, flags);
261 	up(&bpmp->threaded.lock);
262 
263 	return ERR_PTR(err);
264 }
265 
266 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
267 					unsigned int mrq, unsigned long flags,
268 					const void *data, size_t size)
269 {
270 	int err;
271 
272 	err = tegra_bpmp_wait_master_free(channel);
273 	if (err < 0)
274 		return err;
275 
276 	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
277 }
278 
279 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
280 			       struct tegra_bpmp_message *msg)
281 {
282 	struct tegra_bpmp_channel *channel;
283 	int err;
284 
285 	if (WARN_ON(!irqs_disabled()))
286 		return -EPERM;
287 
288 	if (!tegra_bpmp_message_valid(msg))
289 		return -EINVAL;
290 
291 	channel = bpmp->tx_channel;
292 
293 	spin_lock(&bpmp->atomic_tx_lock);
294 
295 	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
296 				       msg->tx.data, msg->tx.size);
297 	if (err < 0) {
298 		spin_unlock(&bpmp->atomic_tx_lock);
299 		return err;
300 	}
301 
302 	spin_unlock(&bpmp->atomic_tx_lock);
303 
304 	err = mbox_send_message(bpmp->mbox.channel, NULL);
305 	if (err < 0)
306 		return err;
307 
308 	mbox_client_txdone(bpmp->mbox.channel, 0);
309 
310 	err = tegra_bpmp_wait_ack(channel);
311 	if (err < 0)
312 		return err;
313 
314 	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
315 					 &msg->rx.ret);
316 }
317 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
318 
319 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
320 			struct tegra_bpmp_message *msg)
321 {
322 	struct tegra_bpmp_channel *channel;
323 	unsigned long timeout;
324 	int err;
325 
326 	if (WARN_ON(irqs_disabled()))
327 		return -EPERM;
328 
329 	if (!tegra_bpmp_message_valid(msg))
330 		return -EINVAL;
331 
332 	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
333 					    msg->tx.size);
334 	if (IS_ERR(channel))
335 		return PTR_ERR(channel);
336 
337 	err = mbox_send_message(bpmp->mbox.channel, NULL);
338 	if (err < 0)
339 		return err;
340 
341 	mbox_client_txdone(bpmp->mbox.channel, 0);
342 
343 	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
344 
345 	err = wait_for_completion_timeout(&channel->completion, timeout);
346 	if (err == 0)
347 		return -ETIMEDOUT;
348 
349 	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
350 				       &msg->rx.ret);
351 }
352 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
353 
354 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
355 						  unsigned int mrq)
356 {
357 	struct tegra_bpmp_mrq *entry;
358 
359 	list_for_each_entry(entry, &bpmp->mrqs, list)
360 		if (entry->mrq == mrq)
361 			return entry;
362 
363 	return NULL;
364 }
365 
366 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
367 			   const void *data, size_t size)
368 {
369 	unsigned long flags = channel->ib->flags;
370 	struct tegra_bpmp *bpmp = channel->bpmp;
371 	struct tegra_bpmp_mb_data *frame;
372 	int err;
373 
374 	if (WARN_ON(size > MSG_DATA_MIN_SZ))
375 		return;
376 
377 	err = tegra_ivc_read_advance(channel->ivc);
378 	if (WARN_ON(err < 0))
379 		return;
380 
381 	if ((flags & MSG_ACK) == 0)
382 		return;
383 
384 	frame = tegra_ivc_write_get_next_frame(channel->ivc);
385 	if (WARN_ON(IS_ERR(frame)))
386 		return;
387 
388 	frame->code = code;
389 
390 	if (data && size > 0)
391 		memcpy(frame->data, data, size);
392 
393 	err = tegra_ivc_write_advance(channel->ivc);
394 	if (WARN_ON(err < 0))
395 		return;
396 
397 	if (flags & MSG_RING) {
398 		err = mbox_send_message(bpmp->mbox.channel, NULL);
399 		if (WARN_ON(err < 0))
400 			return;
401 
402 		mbox_client_txdone(bpmp->mbox.channel, 0);
403 	}
404 }
405 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
406 
407 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
408 				  unsigned int mrq,
409 				  struct tegra_bpmp_channel *channel)
410 {
411 	struct tegra_bpmp_mrq *entry;
412 	u32 zero = 0;
413 
414 	spin_lock(&bpmp->lock);
415 
416 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
417 	if (!entry) {
418 		spin_unlock(&bpmp->lock);
419 		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
420 		return;
421 	}
422 
423 	entry->handler(mrq, channel, entry->data);
424 
425 	spin_unlock(&bpmp->lock);
426 }
427 
428 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
429 			   tegra_bpmp_mrq_handler_t handler, void *data)
430 {
431 	struct tegra_bpmp_mrq *entry;
432 	unsigned long flags;
433 
434 	if (!handler)
435 		return -EINVAL;
436 
437 	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
438 	if (!entry)
439 		return -ENOMEM;
440 
441 	spin_lock_irqsave(&bpmp->lock, flags);
442 
443 	entry->mrq = mrq;
444 	entry->handler = handler;
445 	entry->data = data;
446 	list_add(&entry->list, &bpmp->mrqs);
447 
448 	spin_unlock_irqrestore(&bpmp->lock, flags);
449 
450 	return 0;
451 }
452 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
453 
454 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
455 {
456 	struct tegra_bpmp_mrq *entry;
457 	unsigned long flags;
458 
459 	spin_lock_irqsave(&bpmp->lock, flags);
460 
461 	entry = tegra_bpmp_find_mrq(bpmp, mrq);
462 	if (!entry)
463 		goto unlock;
464 
465 	list_del(&entry->list);
466 	devm_kfree(bpmp->dev, entry);
467 
468 unlock:
469 	spin_unlock_irqrestore(&bpmp->lock, flags);
470 }
471 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
472 
473 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
474 				       struct tegra_bpmp_channel *channel,
475 				       void *data)
476 {
477 	struct mrq_ping_request *request;
478 	struct mrq_ping_response response;
479 
480 	request = (struct mrq_ping_request *)channel->ib->data;
481 
482 	memset(&response, 0, sizeof(response));
483 	response.reply = request->challenge << 1;
484 
485 	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
486 }
487 
488 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
489 {
490 	struct mrq_ping_response response;
491 	struct mrq_ping_request request;
492 	struct tegra_bpmp_message msg;
493 	unsigned long flags;
494 	ktime_t start, end;
495 	int err;
496 
497 	memset(&request, 0, sizeof(request));
498 	request.challenge = 1;
499 
500 	memset(&response, 0, sizeof(response));
501 
502 	memset(&msg, 0, sizeof(msg));
503 	msg.mrq = MRQ_PING;
504 	msg.tx.data = &request;
505 	msg.tx.size = sizeof(request);
506 	msg.rx.data = &response;
507 	msg.rx.size = sizeof(response);
508 
509 	local_irq_save(flags);
510 	start = ktime_get();
511 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
512 	end = ktime_get();
513 	local_irq_restore(flags);
514 
515 	if (!err)
516 		dev_dbg(bpmp->dev,
517 			"ping ok: challenge: %u, response: %u, time: %lld\n",
518 			request.challenge, response.reply,
519 			ktime_to_us(ktime_sub(end, start)));
520 
521 	return err;
522 }
523 
524 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
525 				       size_t size)
526 {
527 	struct mrq_query_tag_request request;
528 	struct tegra_bpmp_message msg;
529 	unsigned long flags;
530 	dma_addr_t phys;
531 	void *virt;
532 	int err;
533 
534 	virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
535 				  GFP_KERNEL | GFP_DMA32);
536 	if (!virt)
537 		return -ENOMEM;
538 
539 	memset(&request, 0, sizeof(request));
540 	request.addr = phys;
541 
542 	memset(&msg, 0, sizeof(msg));
543 	msg.mrq = MRQ_QUERY_TAG;
544 	msg.tx.data = &request;
545 	msg.tx.size = sizeof(request);
546 
547 	local_irq_save(flags);
548 	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
549 	local_irq_restore(flags);
550 
551 	if (err == 0)
552 		strlcpy(tag, virt, size);
553 
554 	dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
555 
556 	return err;
557 }
558 
559 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
560 {
561 	unsigned long flags = channel->ob->flags;
562 
563 	if ((flags & MSG_RING) == 0)
564 		return;
565 
566 	complete(&channel->completion);
567 }
568 
569 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
570 {
571 	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
572 	struct tegra_bpmp_channel *channel;
573 	unsigned int i, count;
574 	unsigned long *busy;
575 
576 	channel = bpmp->rx_channel;
577 	count = bpmp->soc->channels.thread.count;
578 	busy = bpmp->threaded.busy;
579 
580 	if (tegra_bpmp_master_acked(channel))
581 		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
582 
583 	spin_lock(&bpmp->lock);
584 
585 	for_each_set_bit(i, busy, count) {
586 		struct tegra_bpmp_channel *channel;
587 
588 		channel = &bpmp->threaded_channels[i];
589 
590 		if (tegra_bpmp_master_acked(channel)) {
591 			tegra_bpmp_channel_signal(channel);
592 			clear_bit(i, busy);
593 		}
594 	}
595 
596 	spin_unlock(&bpmp->lock);
597 }
598 
599 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
600 {
601 	struct tegra_bpmp *bpmp = data;
602 	int err;
603 
604 	if (WARN_ON(bpmp->mbox.channel == NULL))
605 		return;
606 
607 	err = mbox_send_message(bpmp->mbox.channel, NULL);
608 	if (err < 0)
609 		return;
610 
611 	mbox_client_txdone(bpmp->mbox.channel, 0);
612 }
613 
614 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
615 				   struct tegra_bpmp *bpmp,
616 				   unsigned int index)
617 {
618 	size_t message_size, queue_size;
619 	unsigned int offset;
620 	int err;
621 
622 	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
623 				    GFP_KERNEL);
624 	if (!channel->ivc)
625 		return -ENOMEM;
626 
627 	message_size = tegra_ivc_align(MSG_MIN_SZ);
628 	queue_size = tegra_ivc_total_queue_size(message_size);
629 	offset = queue_size * index;
630 
631 	err = tegra_ivc_init(channel->ivc, NULL,
632 			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
633 			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
634 			     1, message_size, tegra_bpmp_ivc_notify,
635 			     bpmp);
636 	if (err < 0) {
637 		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
638 			index, err);
639 		return err;
640 	}
641 
642 	init_completion(&channel->completion);
643 	channel->bpmp = bpmp;
644 
645 	return 0;
646 }
647 
648 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
649 {
650 	/* reset the channel state */
651 	tegra_ivc_reset(channel->ivc);
652 
653 	/* sync the channel state with BPMP */
654 	while (tegra_ivc_notified(channel->ivc))
655 		;
656 }
657 
658 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
659 {
660 	tegra_ivc_cleanup(channel->ivc);
661 }
662 
663 static int tegra_bpmp_probe(struct platform_device *pdev)
664 {
665 	struct tegra_bpmp *bpmp;
666 	unsigned int i;
667 	char tag[32];
668 	size_t size;
669 	int err;
670 
671 	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
672 	if (!bpmp)
673 		return -ENOMEM;
674 
675 	bpmp->soc = of_device_get_match_data(&pdev->dev);
676 	bpmp->dev = &pdev->dev;
677 
678 	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
679 	if (!bpmp->tx.pool) {
680 		dev_err(&pdev->dev, "TX shmem pool not found\n");
681 		return -ENOMEM;
682 	}
683 
684 	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
685 	if (!bpmp->tx.virt) {
686 		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
687 		return -ENOMEM;
688 	}
689 
690 	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
691 	if (!bpmp->rx.pool) {
692 		dev_err(&pdev->dev, "RX shmem pool not found\n");
693 		err = -ENOMEM;
694 		goto free_tx;
695 	}
696 
697 	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
698 	if (!bpmp->rx.virt) {
699 		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
700 		err = -ENOMEM;
701 		goto free_tx;
702 	}
703 
704 	INIT_LIST_HEAD(&bpmp->mrqs);
705 	spin_lock_init(&bpmp->lock);
706 
707 	bpmp->threaded.count = bpmp->soc->channels.thread.count;
708 	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
709 
710 	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
711 
712 	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
713 	if (!bpmp->threaded.allocated) {
714 		err = -ENOMEM;
715 		goto free_rx;
716 	}
717 
718 	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
719 	if (!bpmp->threaded.busy) {
720 		err = -ENOMEM;
721 		goto free_rx;
722 	}
723 
724 	spin_lock_init(&bpmp->atomic_tx_lock);
725 	bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
726 					GFP_KERNEL);
727 	if (!bpmp->tx_channel) {
728 		err = -ENOMEM;
729 		goto free_rx;
730 	}
731 
732 	bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
733 	                                GFP_KERNEL);
734 	if (!bpmp->rx_channel) {
735 		err = -ENOMEM;
736 		goto free_rx;
737 	}
738 
739 	bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
740 					       sizeof(*bpmp->threaded_channels),
741 					       GFP_KERNEL);
742 	if (!bpmp->threaded_channels) {
743 		err = -ENOMEM;
744 		goto free_rx;
745 	}
746 
747 	err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
748 				      bpmp->soc->channels.cpu_tx.offset);
749 	if (err < 0)
750 		goto free_rx;
751 
752 	err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
753 				      bpmp->soc->channels.cpu_rx.offset);
754 	if (err < 0)
755 		goto cleanup_tx_channel;
756 
757 	for (i = 0; i < bpmp->threaded.count; i++) {
758 		err = tegra_bpmp_channel_init(
759 			&bpmp->threaded_channels[i], bpmp,
760 			bpmp->soc->channels.thread.offset + i);
761 		if (err < 0)
762 			goto cleanup_threaded_channels;
763 	}
764 
765 	/* mbox registration */
766 	bpmp->mbox.client.dev = &pdev->dev;
767 	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
768 	bpmp->mbox.client.tx_block = false;
769 	bpmp->mbox.client.knows_txdone = false;
770 
771 	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
772 	if (IS_ERR(bpmp->mbox.channel)) {
773 		err = PTR_ERR(bpmp->mbox.channel);
774 		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
775 		goto cleanup_threaded_channels;
776 	}
777 
778 	/* reset message channels */
779 	tegra_bpmp_channel_reset(bpmp->tx_channel);
780 	tegra_bpmp_channel_reset(bpmp->rx_channel);
781 	for (i = 0; i < bpmp->threaded.count; i++)
782 		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
783 
784 	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
785 				     tegra_bpmp_mrq_handle_ping, bpmp);
786 	if (err < 0)
787 		goto free_mbox;
788 
789 	err = tegra_bpmp_ping(bpmp);
790 	if (err < 0) {
791 		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
792 		goto free_mrq;
793 	}
794 
795 	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
796 	if (err < 0) {
797 		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
798 		goto free_mrq;
799 	}
800 
801 	dev_info(&pdev->dev, "firmware: %s\n", tag);
802 
803 	platform_set_drvdata(pdev, bpmp);
804 
805 	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
806 	if (err < 0)
807 		goto free_mrq;
808 
809 	err = tegra_bpmp_init_clocks(bpmp);
810 	if (err < 0)
811 		goto free_mrq;
812 
813 	err = tegra_bpmp_init_resets(bpmp);
814 	if (err < 0)
815 		goto free_mrq;
816 
817 	err = tegra_bpmp_init_powergates(bpmp);
818 	if (err < 0)
819 		goto free_mrq;
820 
821 	err = tegra_bpmp_init_debugfs(bpmp);
822 	if (err < 0)
823 		dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
824 
825 	return 0;
826 
827 free_mrq:
828 	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
829 free_mbox:
830 	mbox_free_channel(bpmp->mbox.channel);
831 cleanup_threaded_channels:
832 	for (i = 0; i < bpmp->threaded.count; i++) {
833 		if (bpmp->threaded_channels[i].bpmp)
834 			tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
835 	}
836 
837 	tegra_bpmp_channel_cleanup(bpmp->rx_channel);
838 cleanup_tx_channel:
839 	tegra_bpmp_channel_cleanup(bpmp->tx_channel);
840 free_rx:
841 	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
842 free_tx:
843 	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
844 	return err;
845 }
846 
847 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
848 {
849 	struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
850 	unsigned int i;
851 
852 	/* reset message channels */
853 	tegra_bpmp_channel_reset(bpmp->tx_channel);
854 	tegra_bpmp_channel_reset(bpmp->rx_channel);
855 
856 	for (i = 0; i < bpmp->threaded.count; i++)
857 		tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
858 
859 	return 0;
860 }
861 
862 static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
863 
864 static const struct tegra_bpmp_soc tegra186_soc = {
865 	.channels = {
866 		.cpu_tx = {
867 			.offset = 3,
868 			.timeout = 60 * USEC_PER_SEC,
869 		},
870 		.thread = {
871 			.offset = 0,
872 			.count = 3,
873 			.timeout = 600 * USEC_PER_SEC,
874 		},
875 		.cpu_rx = {
876 			.offset = 13,
877 			.timeout = 0,
878 		},
879 	},
880 	.num_resets = 193,
881 };
882 
883 static const struct of_device_id tegra_bpmp_match[] = {
884 	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
885 	{ }
886 };
887 
888 static struct platform_driver tegra_bpmp_driver = {
889 	.driver = {
890 		.name = "tegra-bpmp",
891 		.of_match_table = tegra_bpmp_match,
892 		.pm = &tegra_bpmp_pm_ops,
893 	},
894 	.probe = tegra_bpmp_probe,
895 };
896 
897 static int __init tegra_bpmp_init(void)
898 {
899 	return platform_driver_register(&tegra_bpmp_driver);
900 }
901 core_initcall(tegra_bpmp_init);
902