1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 */ 13 14 #include <linux/clk/tegra.h> 15 #include <linux/genalloc.h> 16 #include <linux/mailbox_client.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/semaphore.h> 22 #include <linux/sched/clock.h> 23 24 #include <soc/tegra/bpmp.h> 25 #include <soc/tegra/bpmp-abi.h> 26 #include <soc/tegra/ivc.h> 27 28 #define MSG_ACK BIT(0) 29 #define MSG_RING BIT(1) 30 31 static inline struct tegra_bpmp * 32 mbox_client_to_bpmp(struct mbox_client *client) 33 { 34 return container_of(client, struct tegra_bpmp, mbox.client); 35 } 36 37 struct tegra_bpmp *tegra_bpmp_get(struct device *dev) 38 { 39 struct platform_device *pdev; 40 struct tegra_bpmp *bpmp; 41 struct device_node *np; 42 43 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0); 44 if (!np) 45 return ERR_PTR(-ENOENT); 46 47 pdev = of_find_device_by_node(np); 48 if (!pdev) { 49 bpmp = ERR_PTR(-ENODEV); 50 goto put; 51 } 52 53 bpmp = platform_get_drvdata(pdev); 54 if (!bpmp) { 55 bpmp = ERR_PTR(-EPROBE_DEFER); 56 put_device(&pdev->dev); 57 goto put; 58 } 59 60 put: 61 of_node_put(np); 62 return bpmp; 63 } 64 EXPORT_SYMBOL_GPL(tegra_bpmp_get); 65 66 void tegra_bpmp_put(struct tegra_bpmp *bpmp) 67 { 68 if (bpmp) 69 put_device(bpmp->dev); 70 } 71 EXPORT_SYMBOL_GPL(tegra_bpmp_put); 72 73 static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel) 74 { 75 return channel - channel->bpmp->channels; 76 } 77 78 static int 79 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel) 80 { 81 struct tegra_bpmp *bpmp = channel->bpmp; 82 unsigned int offset, count; 83 int index; 84 85 offset = bpmp->soc->channels.thread.offset; 86 count = bpmp->soc->channels.thread.count; 87 88 index = tegra_bpmp_channel_get_index(channel); 89 if (index < 0) 90 return index; 91 92 if (index < offset || index >= offset + count) 93 return -EINVAL; 94 95 return index - offset; 96 } 97 98 static struct tegra_bpmp_channel * 99 tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index) 100 { 101 unsigned int offset = bpmp->soc->channels.thread.offset; 102 unsigned int count = bpmp->soc->channels.thread.count; 103 104 if (index >= count) 105 return NULL; 106 107 return &bpmp->channels[offset + index]; 108 } 109 110 static struct tegra_bpmp_channel * 111 tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp) 112 { 113 unsigned int offset = bpmp->soc->channels.cpu_tx.offset; 114 115 return &bpmp->channels[offset + smp_processor_id()]; 116 } 117 118 static struct tegra_bpmp_channel * 119 tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp) 120 { 121 unsigned int offset = bpmp->soc->channels.cpu_rx.offset; 122 123 return &bpmp->channels[offset]; 124 } 125 126 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg) 127 { 128 return (msg->tx.size <= MSG_DATA_MIN_SZ) && 129 (msg->rx.size <= MSG_DATA_MIN_SZ) && 130 (msg->tx.size == 0 || msg->tx.data) && 131 (msg->rx.size == 0 || msg->rx.data); 132 } 133 134 static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel) 135 { 136 void *frame; 137 138 frame = tegra_ivc_read_get_next_frame(channel->ivc); 139 if (IS_ERR(frame)) { 140 channel->ib = NULL; 141 return false; 142 } 143 144 channel->ib = frame; 145 146 return true; 147 } 148 149 static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel) 150 { 151 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; 152 ktime_t end; 153 154 end = ktime_add_us(ktime_get(), timeout); 155 156 do { 157 if (tegra_bpmp_master_acked(channel)) 158 return 0; 159 } while (ktime_before(ktime_get(), end)); 160 161 return -ETIMEDOUT; 162 } 163 164 static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel) 165 { 166 void *frame; 167 168 frame = tegra_ivc_write_get_next_frame(channel->ivc); 169 if (IS_ERR(frame)) { 170 channel->ob = NULL; 171 return false; 172 } 173 174 channel->ob = frame; 175 176 return true; 177 } 178 179 static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) 180 { 181 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; 182 ktime_t start, now; 183 184 start = ns_to_ktime(local_clock()); 185 186 do { 187 if (tegra_bpmp_master_free(channel)) 188 return 0; 189 190 now = ns_to_ktime(local_clock()); 191 } while (ktime_us_delta(now, start) < timeout); 192 193 return -ETIMEDOUT; 194 } 195 196 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, 197 void *data, size_t size, int *ret) 198 { 199 int err; 200 201 if (data && size > 0) 202 memcpy(data, channel->ib->data, size); 203 204 err = tegra_ivc_read_advance(channel->ivc); 205 if (err < 0) 206 return err; 207 208 *ret = channel->ib->code; 209 210 return 0; 211 } 212 213 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, 214 void *data, size_t size, int *ret) 215 { 216 struct tegra_bpmp *bpmp = channel->bpmp; 217 unsigned long flags; 218 ssize_t err; 219 int index; 220 221 index = tegra_bpmp_channel_get_thread_index(channel); 222 if (index < 0) { 223 err = index; 224 goto unlock; 225 } 226 227 spin_lock_irqsave(&bpmp->lock, flags); 228 err = __tegra_bpmp_channel_read(channel, data, size, ret); 229 clear_bit(index, bpmp->threaded.allocated); 230 spin_unlock_irqrestore(&bpmp->lock, flags); 231 232 unlock: 233 up(&bpmp->threaded.lock); 234 235 return err; 236 } 237 238 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, 239 unsigned int mrq, unsigned long flags, 240 const void *data, size_t size) 241 { 242 channel->ob->code = mrq; 243 channel->ob->flags = flags; 244 245 if (data && size > 0) 246 memcpy(channel->ob->data, data, size); 247 248 return tegra_ivc_write_advance(channel->ivc); 249 } 250 251 static struct tegra_bpmp_channel * 252 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq, 253 const void *data, size_t size) 254 { 255 unsigned long timeout = bpmp->soc->channels.thread.timeout; 256 unsigned int count = bpmp->soc->channels.thread.count; 257 struct tegra_bpmp_channel *channel; 258 unsigned long flags; 259 unsigned int index; 260 int err; 261 262 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout)); 263 if (err < 0) 264 return ERR_PTR(err); 265 266 spin_lock_irqsave(&bpmp->lock, flags); 267 268 index = find_first_zero_bit(bpmp->threaded.allocated, count); 269 if (index == count) { 270 err = -EBUSY; 271 goto unlock; 272 } 273 274 channel = tegra_bpmp_channel_get_thread(bpmp, index); 275 if (!channel) { 276 err = -EINVAL; 277 goto unlock; 278 } 279 280 if (!tegra_bpmp_master_free(channel)) { 281 err = -EBUSY; 282 goto unlock; 283 } 284 285 set_bit(index, bpmp->threaded.allocated); 286 287 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING, 288 data, size); 289 if (err < 0) 290 goto clear_allocated; 291 292 set_bit(index, bpmp->threaded.busy); 293 294 spin_unlock_irqrestore(&bpmp->lock, flags); 295 return channel; 296 297 clear_allocated: 298 clear_bit(index, bpmp->threaded.allocated); 299 unlock: 300 spin_unlock_irqrestore(&bpmp->lock, flags); 301 up(&bpmp->threaded.lock); 302 303 return ERR_PTR(err); 304 } 305 306 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, 307 unsigned int mrq, unsigned long flags, 308 const void *data, size_t size) 309 { 310 int err; 311 312 err = tegra_bpmp_wait_master_free(channel); 313 if (err < 0) 314 return err; 315 316 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size); 317 } 318 319 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, 320 struct tegra_bpmp_message *msg) 321 { 322 struct tegra_bpmp_channel *channel; 323 int err; 324 325 if (WARN_ON(!irqs_disabled())) 326 return -EPERM; 327 328 if (!tegra_bpmp_message_valid(msg)) 329 return -EINVAL; 330 331 channel = tegra_bpmp_channel_get_tx(bpmp); 332 333 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK, 334 msg->tx.data, msg->tx.size); 335 if (err < 0) 336 return err; 337 338 err = mbox_send_message(bpmp->mbox.channel, NULL); 339 if (err < 0) 340 return err; 341 342 mbox_client_txdone(bpmp->mbox.channel, 0); 343 344 err = tegra_bpmp_wait_ack(channel); 345 if (err < 0) 346 return err; 347 348 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size, 349 &msg->rx.ret); 350 } 351 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic); 352 353 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp, 354 struct tegra_bpmp_message *msg) 355 { 356 struct tegra_bpmp_channel *channel; 357 unsigned long timeout; 358 int err; 359 360 if (WARN_ON(irqs_disabled())) 361 return -EPERM; 362 363 if (!tegra_bpmp_message_valid(msg)) 364 return -EINVAL; 365 366 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data, 367 msg->tx.size); 368 if (IS_ERR(channel)) 369 return PTR_ERR(channel); 370 371 err = mbox_send_message(bpmp->mbox.channel, NULL); 372 if (err < 0) 373 return err; 374 375 mbox_client_txdone(bpmp->mbox.channel, 0); 376 377 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout); 378 379 err = wait_for_completion_timeout(&channel->completion, timeout); 380 if (err == 0) 381 return -ETIMEDOUT; 382 383 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size, 384 &msg->rx.ret); 385 } 386 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer); 387 388 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp, 389 unsigned int mrq) 390 { 391 struct tegra_bpmp_mrq *entry; 392 393 list_for_each_entry(entry, &bpmp->mrqs, list) 394 if (entry->mrq == mrq) 395 return entry; 396 397 return NULL; 398 } 399 400 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, 401 const void *data, size_t size) 402 { 403 unsigned long flags = channel->ib->flags; 404 struct tegra_bpmp *bpmp = channel->bpmp; 405 struct tegra_bpmp_mb_data *frame; 406 int err; 407 408 if (WARN_ON(size > MSG_DATA_MIN_SZ)) 409 return; 410 411 err = tegra_ivc_read_advance(channel->ivc); 412 if (WARN_ON(err < 0)) 413 return; 414 415 if ((flags & MSG_ACK) == 0) 416 return; 417 418 frame = tegra_ivc_write_get_next_frame(channel->ivc); 419 if (WARN_ON(IS_ERR(frame))) 420 return; 421 422 frame->code = code; 423 424 if (data && size > 0) 425 memcpy(frame->data, data, size); 426 427 err = tegra_ivc_write_advance(channel->ivc); 428 if (WARN_ON(err < 0)) 429 return; 430 431 if (flags & MSG_RING) { 432 err = mbox_send_message(bpmp->mbox.channel, NULL); 433 if (WARN_ON(err < 0)) 434 return; 435 436 mbox_client_txdone(bpmp->mbox.channel, 0); 437 } 438 } 439 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return); 440 441 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp, 442 unsigned int mrq, 443 struct tegra_bpmp_channel *channel) 444 { 445 struct tegra_bpmp_mrq *entry; 446 u32 zero = 0; 447 448 spin_lock(&bpmp->lock); 449 450 entry = tegra_bpmp_find_mrq(bpmp, mrq); 451 if (!entry) { 452 spin_unlock(&bpmp->lock); 453 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero)); 454 return; 455 } 456 457 entry->handler(mrq, channel, entry->data); 458 459 spin_unlock(&bpmp->lock); 460 } 461 462 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, 463 tegra_bpmp_mrq_handler_t handler, void *data) 464 { 465 struct tegra_bpmp_mrq *entry; 466 unsigned long flags; 467 468 if (!handler) 469 return -EINVAL; 470 471 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL); 472 if (!entry) 473 return -ENOMEM; 474 475 spin_lock_irqsave(&bpmp->lock, flags); 476 477 entry->mrq = mrq; 478 entry->handler = handler; 479 entry->data = data; 480 list_add(&entry->list, &bpmp->mrqs); 481 482 spin_unlock_irqrestore(&bpmp->lock, flags); 483 484 return 0; 485 } 486 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq); 487 488 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data) 489 { 490 struct tegra_bpmp_mrq *entry; 491 unsigned long flags; 492 493 spin_lock_irqsave(&bpmp->lock, flags); 494 495 entry = tegra_bpmp_find_mrq(bpmp, mrq); 496 if (!entry) 497 goto unlock; 498 499 list_del(&entry->list); 500 devm_kfree(bpmp->dev, entry); 501 502 unlock: 503 spin_unlock_irqrestore(&bpmp->lock, flags); 504 } 505 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq); 506 507 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq, 508 struct tegra_bpmp_channel *channel, 509 void *data) 510 { 511 struct mrq_ping_request *request; 512 struct mrq_ping_response response; 513 514 request = (struct mrq_ping_request *)channel->ib->data; 515 516 memset(&response, 0, sizeof(response)); 517 response.reply = request->challenge << 1; 518 519 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response)); 520 } 521 522 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp) 523 { 524 struct mrq_ping_response response; 525 struct mrq_ping_request request; 526 struct tegra_bpmp_message msg; 527 unsigned long flags; 528 ktime_t start, end; 529 int err; 530 531 memset(&request, 0, sizeof(request)); 532 request.challenge = 1; 533 534 memset(&response, 0, sizeof(response)); 535 536 memset(&msg, 0, sizeof(msg)); 537 msg.mrq = MRQ_PING; 538 msg.tx.data = &request; 539 msg.tx.size = sizeof(request); 540 msg.rx.data = &response; 541 msg.rx.size = sizeof(response); 542 543 local_irq_save(flags); 544 start = ktime_get(); 545 err = tegra_bpmp_transfer_atomic(bpmp, &msg); 546 end = ktime_get(); 547 local_irq_restore(flags); 548 549 if (!err) 550 dev_dbg(bpmp->dev, 551 "ping ok: challenge: %u, response: %u, time: %lld\n", 552 request.challenge, response.reply, 553 ktime_to_us(ktime_sub(end, start))); 554 555 return err; 556 } 557 558 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, 559 size_t size) 560 { 561 struct mrq_query_tag_request request; 562 struct tegra_bpmp_message msg; 563 unsigned long flags; 564 dma_addr_t phys; 565 void *virt; 566 int err; 567 568 virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys, 569 GFP_KERNEL | GFP_DMA32); 570 if (!virt) 571 return -ENOMEM; 572 573 memset(&request, 0, sizeof(request)); 574 request.addr = phys; 575 576 memset(&msg, 0, sizeof(msg)); 577 msg.mrq = MRQ_QUERY_TAG; 578 msg.tx.data = &request; 579 msg.tx.size = sizeof(request); 580 581 local_irq_save(flags); 582 err = tegra_bpmp_transfer_atomic(bpmp, &msg); 583 local_irq_restore(flags); 584 585 if (err == 0) 586 strlcpy(tag, virt, size); 587 588 dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys); 589 590 return err; 591 } 592 593 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel) 594 { 595 unsigned long flags = channel->ob->flags; 596 597 if ((flags & MSG_RING) == 0) 598 return; 599 600 complete(&channel->completion); 601 } 602 603 static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) 604 { 605 struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client); 606 struct tegra_bpmp_channel *channel; 607 unsigned int i, count; 608 unsigned long *busy; 609 610 channel = tegra_bpmp_channel_get_rx(bpmp); 611 count = bpmp->soc->channels.thread.count; 612 busy = bpmp->threaded.busy; 613 614 if (tegra_bpmp_master_acked(channel)) 615 tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel); 616 617 spin_lock(&bpmp->lock); 618 619 for_each_set_bit(i, busy, count) { 620 struct tegra_bpmp_channel *channel; 621 622 channel = tegra_bpmp_channel_get_thread(bpmp, i); 623 if (!channel) 624 continue; 625 626 if (tegra_bpmp_master_acked(channel)) { 627 tegra_bpmp_channel_signal(channel); 628 clear_bit(i, busy); 629 } 630 } 631 632 spin_unlock(&bpmp->lock); 633 } 634 635 static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data) 636 { 637 struct tegra_bpmp *bpmp = data; 638 int err; 639 640 if (WARN_ON(bpmp->mbox.channel == NULL)) 641 return; 642 643 err = mbox_send_message(bpmp->mbox.channel, NULL); 644 if (err < 0) 645 return; 646 647 mbox_client_txdone(bpmp->mbox.channel, 0); 648 } 649 650 static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel, 651 struct tegra_bpmp *bpmp, 652 unsigned int index) 653 { 654 size_t message_size, queue_size; 655 unsigned int offset; 656 int err; 657 658 channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc), 659 GFP_KERNEL); 660 if (!channel->ivc) 661 return -ENOMEM; 662 663 message_size = tegra_ivc_align(MSG_MIN_SZ); 664 queue_size = tegra_ivc_total_queue_size(message_size); 665 offset = queue_size * index; 666 667 err = tegra_ivc_init(channel->ivc, NULL, 668 bpmp->rx.virt + offset, bpmp->rx.phys + offset, 669 bpmp->tx.virt + offset, bpmp->tx.phys + offset, 670 1, message_size, tegra_bpmp_ivc_notify, 671 bpmp); 672 if (err < 0) { 673 dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n", 674 index, err); 675 return err; 676 } 677 678 init_completion(&channel->completion); 679 channel->bpmp = bpmp; 680 681 return 0; 682 } 683 684 static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel) 685 { 686 /* reset the channel state */ 687 tegra_ivc_reset(channel->ivc); 688 689 /* sync the channel state with BPMP */ 690 while (tegra_ivc_notified(channel->ivc)) 691 ; 692 } 693 694 static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) 695 { 696 tegra_ivc_cleanup(channel->ivc); 697 } 698 699 static int tegra_bpmp_probe(struct platform_device *pdev) 700 { 701 struct tegra_bpmp_channel *channel; 702 struct tegra_bpmp *bpmp; 703 unsigned int i; 704 char tag[32]; 705 size_t size; 706 int err; 707 708 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL); 709 if (!bpmp) 710 return -ENOMEM; 711 712 bpmp->soc = of_device_get_match_data(&pdev->dev); 713 bpmp->dev = &pdev->dev; 714 715 bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0); 716 if (!bpmp->tx.pool) { 717 dev_err(&pdev->dev, "TX shmem pool not found\n"); 718 return -ENOMEM; 719 } 720 721 bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys); 722 if (!bpmp->tx.virt) { 723 dev_err(&pdev->dev, "failed to allocate from TX pool\n"); 724 return -ENOMEM; 725 } 726 727 bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1); 728 if (!bpmp->rx.pool) { 729 dev_err(&pdev->dev, "RX shmem pool not found\n"); 730 err = -ENOMEM; 731 goto free_tx; 732 } 733 734 bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys); 735 if (!bpmp->rx.pool) { 736 dev_err(&pdev->dev, "failed to allocate from RX pool\n"); 737 err = -ENOMEM; 738 goto free_tx; 739 } 740 741 INIT_LIST_HEAD(&bpmp->mrqs); 742 spin_lock_init(&bpmp->lock); 743 744 bpmp->threaded.count = bpmp->soc->channels.thread.count; 745 sema_init(&bpmp->threaded.lock, bpmp->threaded.count); 746 747 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long); 748 749 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 750 if (!bpmp->threaded.allocated) { 751 err = -ENOMEM; 752 goto free_rx; 753 } 754 755 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 756 if (!bpmp->threaded.busy) { 757 err = -ENOMEM; 758 goto free_rx; 759 } 760 761 bpmp->num_channels = bpmp->soc->channels.cpu_tx.count + 762 bpmp->soc->channels.thread.count + 763 bpmp->soc->channels.cpu_rx.count; 764 765 bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels, 766 sizeof(*channel), GFP_KERNEL); 767 if (!bpmp->channels) { 768 err = -ENOMEM; 769 goto free_rx; 770 } 771 772 /* message channel initialization */ 773 for (i = 0; i < bpmp->num_channels; i++) { 774 struct tegra_bpmp_channel *channel = &bpmp->channels[i]; 775 776 err = tegra_bpmp_channel_init(channel, bpmp, i); 777 if (err < 0) 778 goto cleanup_channels; 779 } 780 781 /* mbox registration */ 782 bpmp->mbox.client.dev = &pdev->dev; 783 bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx; 784 bpmp->mbox.client.tx_block = false; 785 bpmp->mbox.client.knows_txdone = false; 786 787 bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0); 788 if (IS_ERR(bpmp->mbox.channel)) { 789 err = PTR_ERR(bpmp->mbox.channel); 790 dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err); 791 goto cleanup_channels; 792 } 793 794 /* reset message channels */ 795 for (i = 0; i < bpmp->num_channels; i++) { 796 struct tegra_bpmp_channel *channel = &bpmp->channels[i]; 797 798 tegra_bpmp_channel_reset(channel); 799 } 800 801 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING, 802 tegra_bpmp_mrq_handle_ping, bpmp); 803 if (err < 0) 804 goto free_mbox; 805 806 err = tegra_bpmp_ping(bpmp); 807 if (err < 0) { 808 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err); 809 goto free_mrq; 810 } 811 812 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1); 813 if (err < 0) { 814 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err); 815 goto free_mrq; 816 } 817 818 dev_info(&pdev->dev, "firmware: %s\n", tag); 819 820 platform_set_drvdata(pdev, bpmp); 821 822 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev); 823 if (err < 0) 824 goto free_mrq; 825 826 err = tegra_bpmp_init_clocks(bpmp); 827 if (err < 0) 828 goto free_mrq; 829 830 err = tegra_bpmp_init_resets(bpmp); 831 if (err < 0) 832 goto free_mrq; 833 834 err = tegra_bpmp_init_powergates(bpmp); 835 if (err < 0) 836 goto free_mrq; 837 838 err = tegra_bpmp_init_debugfs(bpmp); 839 if (err < 0) 840 dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err); 841 842 return 0; 843 844 free_mrq: 845 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp); 846 free_mbox: 847 mbox_free_channel(bpmp->mbox.channel); 848 cleanup_channels: 849 while (i--) 850 tegra_bpmp_channel_cleanup(&bpmp->channels[i]); 851 free_rx: 852 gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096); 853 free_tx: 854 gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096); 855 return err; 856 } 857 858 static const struct tegra_bpmp_soc tegra186_soc = { 859 .channels = { 860 .cpu_tx = { 861 .offset = 0, 862 .count = 6, 863 .timeout = 60 * USEC_PER_SEC, 864 }, 865 .thread = { 866 .offset = 6, 867 .count = 7, 868 .timeout = 600 * USEC_PER_SEC, 869 }, 870 .cpu_rx = { 871 .offset = 13, 872 .count = 1, 873 .timeout = 0, 874 }, 875 }, 876 .num_resets = 193, 877 }; 878 879 static const struct of_device_id tegra_bpmp_match[] = { 880 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc }, 881 { } 882 }; 883 884 static struct platform_driver tegra_bpmp_driver = { 885 .driver = { 886 .name = "tegra-bpmp", 887 .of_match_table = tegra_bpmp_match, 888 }, 889 .probe = tegra_bpmp_probe, 890 }; 891 892 static int __init tegra_bpmp_init(void) 893 { 894 return platform_driver_register(&tegra_bpmp_driver); 895 } 896 core_initcall(tegra_bpmp_init); 897