1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/msgqueue.h>
25 #include <subdev/acr.h>
26 
27 #include <nvfw/pmu.h>
28 
29 static int
30 gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr)
31 {
32 	struct nv_pmu_acr_bootstrap_falcon_msg *msg =
33 		container_of(hdr, typeof(*msg), msg.hdr);
34 	return msg->falcon_id;
35 }
36 
37 int
38 gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
39 			       enum nvkm_acr_lsf_id id)
40 {
41 	struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
42 	struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
43 		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
44 		.cmd.hdr.size = sizeof(cmd),
45 		.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
46 		.flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
47 		.falcon_id = id,
48 	};
49 	int ret;
50 
51 	ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
52 				    gm20b_pmu_acr_bootstrap_falcon_cb,
53 				    &pmu->subdev, msecs_to_jiffies(1000));
54 	if (ret >= 0 && ret != cmd.falcon_id)
55 		ret = -EIO;
56 	return ret;
57 }
58 
59 static const struct nvkm_acr_lsf_func
60 gm20b_pmu_acr = {
61 	.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
62 };
63 
64 static int
65 gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr)
66 {
67 	struct nv_pmu_acr_init_wpr_region_msg *msg =
68 		container_of(hdr, typeof(*msg), msg.hdr);
69 	struct nvkm_pmu *pmu = priv;
70 	struct nvkm_subdev *subdev = &pmu->subdev;
71 
72 	if (msg->error_code) {
73 		nvkm_error(subdev, "ACR WPR init failure: %d\n",
74 			   msg->error_code);
75 		return -EINVAL;
76 	}
77 
78 	nvkm_debug(subdev, "ACR WPR init complete\n");
79 	complete_all(&pmu->wpr_ready);
80 	return 0;
81 }
82 
83 static int
84 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
85 {
86 	struct nv_pmu_acr_init_wpr_region_cmd cmd = {
87 		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
88 		.cmd.hdr.size = sizeof(cmd),
89 		.cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
90 		.region_id = 1,
91 		.wpr_offset = 0,
92 	};
93 
94 	return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
95 				     gm20b_pmu_acr_init_wpr_callback, pmu, 0);
96 }
97 
98 int
99 gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
100 {
101 	struct nv_pmu_init_msg msg;
102 	int ret;
103 
104 	ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
105 	if (ret)
106 		return ret;
107 
108 	if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
109 	    msg.msg_type != NV_PMU_INIT_MSG_INIT)
110 		return -EINVAL;
111 
112 	nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
113 					msg.queue_info[0].offset,
114 					msg.queue_info[0].size);
115 	nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
116 					msg.queue_info[1].offset,
117 					msg.queue_info[1].size);
118 	nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
119 					 msg.queue_info[4].offset,
120 					 msg.queue_info[4].size);
121 	return gm20b_pmu_acr_init_wpr(pmu);
122 }
123 
124 void
125 gm20b_pmu_recv(struct nvkm_pmu *pmu)
126 {
127 	if (!pmu->initmsg_received) {
128 		int ret = pmu->func->initmsg(pmu);
129 		if (ret) {
130 			nvkm_error(&pmu->subdev,
131 				   "error parsing init message: %d\n", ret);
132 			return;
133 		}
134 
135 		pmu->initmsg_received = true;
136 	}
137 
138 	nvkm_falcon_msgq_recv(pmu->msgq);
139 }
140 
141 static const struct nvkm_pmu_func
142 gm20b_pmu = {
143 	.flcn = &gt215_pmu_flcn,
144 	.enabled = gf100_pmu_enabled,
145 	.intr = gt215_pmu_intr,
146 	.recv = gm20b_pmu_recv,
147 	.initmsg = gm20b_pmu_initmsg,
148 };
149 
150 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
151 MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
152 MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
153 MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
154 #endif
155 
156 int
157 gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
158 {
159 	return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
160 						 NVKM_ACR_LSF_PMU, "pmu/",
161 						 ver, fwif->acr);
162 }
163 
164 static const struct nvkm_pmu_fwif
165 gm20b_pmu_fwif[] = {
166 	{ 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
167 	{}
168 };
169 
170 int
171 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
172 {
173 	return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
174 }
175