1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <nvfw/acr.h>
25 
26 static int
ga102_acr_wpr_patch(struct nvkm_acr * acr,s64 adjust)27 ga102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
28 {
29 	struct wpr_header_v2 hdr;
30 	struct lsb_header_v2 *lsb;
31 	struct nvkm_acr_lsfw *lsfw;
32 	u32 offset = 0;
33 
34 	lsb = kvmalloc(sizeof(*lsb), GFP_KERNEL);
35 	if (!lsb)
36 		return -ENOMEM;
37 
38 	do {
39 		nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
40 		wpr_header_v2_dump(&acr->subdev, &hdr);
41 
42 		list_for_each_entry(lsfw, &acr->lsfw, head) {
43 			if (lsfw->id != hdr.wpr.falcon_id)
44 				continue;
45 
46 			nvkm_robj(acr->wpr, hdr.wpr.lsb_offset, lsb, sizeof(*lsb));
47 			lsb_header_v2_dump(&acr->subdev, lsb);
48 
49 			lsfw->func->bld_patch(acr, lsb->bl_data_off, adjust);
50 			break;
51 		}
52 
53 		offset += sizeof(hdr);
54 	} while (hdr.wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
55 
56 	kvfree(lsb);
57 	return 0;
58 }
59 
60 static int
ga102_acr_wpr_build_lsb(struct nvkm_acr * acr,struct nvkm_acr_lsfw * lsfw)61 ga102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
62 {
63 	struct lsb_header_v2 *hdr;
64 	int ret = 0;
65 
66 	if (WARN_ON(lsfw->sig->size != sizeof(hdr->signature)))
67 		return -EINVAL;
68 
69 	hdr = kvzalloc(sizeof(*hdr), GFP_KERNEL);
70 	if (!hdr)
71 		return -ENOMEM;
72 
73 	hdr->hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_LSB_HEADER;
74 	hdr->hdr.version = 2;
75 	hdr->hdr.size = sizeof(*hdr);
76 
77 	memcpy(&hdr->signature, lsfw->sig->data, lsfw->sig->size);
78 	hdr->ucode_off = lsfw->offset.img;
79 	hdr->ucode_size = lsfw->ucode_size;
80 	hdr->data_size = lsfw->data_size;
81 	hdr->bl_code_size = lsfw->bootloader_size;
82 	hdr->bl_imem_off = lsfw->bootloader_imem_offset;
83 	hdr->bl_data_off = lsfw->offset.bld;
84 	hdr->bl_data_size = lsfw->bl_data_size;
85 	hdr->app_code_off = lsfw->app_start_offset + lsfw->app_resident_code_offset;
86 	hdr->app_code_size = ALIGN(lsfw->app_resident_code_size, 0x100);
87 	hdr->app_data_off = lsfw->app_start_offset + lsfw->app_resident_data_offset;
88 	hdr->app_data_size = ALIGN(lsfw->app_resident_data_size, 0x100);
89 	hdr->app_imem_offset = lsfw->app_imem_offset;
90 	hdr->app_dmem_offset = lsfw->app_dmem_offset;
91 	hdr->flags = lsfw->func->flags;
92 	hdr->monitor_code_offset = 0;
93 	hdr->monitor_data_offset = 0;
94 	hdr->manifest_offset = 0;
95 
96 	if (lsfw->secure_bootloader) {
97 		struct nvkm_falcon_fw fw = {
98 			.fw.img = hdr->hs_fmc_params.pkc_signature,
99 			.fw.name = "LSFW",
100 			.func = &(const struct nvkm_falcon_fw_func) {
101 				.signature = ga100_flcn_fw_signature,
102 			},
103 			.sig_size = lsfw->sig_size,
104 			.sig_nr = lsfw->sig_nr,
105 			.sigs = lsfw->sigs,
106 			.fuse_ver = lsfw->fuse_ver,
107 			.engine_id = lsfw->engine_id,
108 			.ucode_id = lsfw->ucode_id,
109 			.falcon = lsfw->falcon,
110 
111 		};
112 
113 		ret = nvkm_falcon_get(fw.falcon, &acr->subdev);
114 		if (ret == 0) {
115 			hdr->hs_fmc_params.hs_fmc = 1;
116 			hdr->hs_fmc_params.pkc_algo = 0;
117 			hdr->hs_fmc_params.pkc_algo_version = 1;
118 			hdr->hs_fmc_params.engid_mask = lsfw->engine_id;
119 			hdr->hs_fmc_params.ucode_id = lsfw->ucode_id;
120 			hdr->hs_fmc_params.fuse_ver = lsfw->fuse_ver;
121 			ret = nvkm_falcon_fw_patch(&fw);
122 			nvkm_falcon_put(fw.falcon, &acr->subdev);
123 		}
124 	}
125 
126 	nvkm_wobj(acr->wpr, lsfw->offset.lsb, hdr, sizeof(*hdr));
127 	kvfree(hdr);
128 	return ret;
129 }
130 
131 static int
ga102_acr_wpr_build(struct nvkm_acr * acr,struct nvkm_acr_lsf * rtos)132 ga102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
133 {
134 	struct nvkm_acr_lsfw *lsfw;
135 	struct wpr_header_v2 hdr;
136 	u32 offset = 0;
137 	int ret;
138 
139 	/*XXX: shared sub-WPR headers, fill terminator for now. */
140 	nvkm_wo32(acr->wpr, 0x300, (2 << 16) | WPR_GENERIC_HEADER_ID_LSF_SHARED_SUB_WPR);
141 	nvkm_wo32(acr->wpr, 0x304, 0x14);
142 	nvkm_wo32(acr->wpr, 0x308, 0xffffffff);
143 	nvkm_wo32(acr->wpr, 0x30c, 0);
144 	nvkm_wo32(acr->wpr, 0x310, 0);
145 
146 	/* Fill per-LSF structures. */
147 	list_for_each_entry(lsfw, &acr->lsfw, head) {
148 		struct lsf_signature_v2 *sig = (void *)lsfw->sig->data;
149 
150 		hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
151 		hdr.hdr.version = 2;
152 		hdr.hdr.size = sizeof(hdr);
153 		hdr.wpr.falcon_id = lsfw->id;
154 		hdr.wpr.lsb_offset = lsfw->offset.lsb;
155 		hdr.wpr.bootstrap_owner = NVKM_ACR_LSF_GSPLITE;
156 		hdr.wpr.lazy_bootstrap = 1;
157 		hdr.wpr.bin_version = sig->ls_ucode_version;
158 		hdr.wpr.status = WPR_HEADER_V1_STATUS_COPY;
159 
160 		/* Write WPR header. */
161 		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
162 		offset += sizeof(hdr);
163 
164 		/* Write LSB header. */
165 		ret = ga102_acr_wpr_build_lsb(acr, lsfw);
166 		if (ret)
167 			return ret;
168 
169 		/* Write ucode image. */
170 		nvkm_wobj(acr->wpr, lsfw->offset.img,
171 				    lsfw->img.data,
172 				    lsfw->img.size);
173 
174 		/* Write bootloader data. */
175 		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
176 	}
177 
178 	/* Finalise WPR. */
179 	hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
180 	hdr.hdr.version = 2;
181 	hdr.hdr.size = sizeof(hdr);
182 	hdr.wpr.falcon_id = WPR_HEADER_V1_FALCON_ID_INVALID;
183 	nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
184 	return 0;
185 }
186 
187 static u32
ga102_acr_wpr_layout(struct nvkm_acr * acr)188 ga102_acr_wpr_layout(struct nvkm_acr *acr)
189 {
190 	struct nvkm_acr_lsfw *lsfw;
191 	u32 wpr = 0;
192 
193 	wpr += 21 /* MAX_LSF */ * sizeof(struct wpr_header_v2);
194 	wpr  = ALIGN(wpr, 256);
195 
196 	wpr += 0x100; /* Shared sub-WPR headers. */
197 
198 	list_for_each_entry(lsfw, &acr->lsfw, head) {
199 		wpr  = ALIGN(wpr, 256);
200 		lsfw->offset.lsb = wpr;
201 		wpr += sizeof(struct lsb_header_v2);
202 
203 		wpr  = ALIGN(wpr, 4096);
204 		lsfw->offset.img = wpr;
205 		wpr += lsfw->img.size;
206 
207 		wpr  = ALIGN(wpr, 256);
208 		lsfw->offset.bld = wpr;
209 		lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
210 		wpr += lsfw->bl_data_size;
211 	}
212 
213 	return wpr;
214 }
215 
216 static int
ga102_acr_wpr_parse(struct nvkm_acr * acr)217 ga102_acr_wpr_parse(struct nvkm_acr *acr)
218 {
219 	const struct wpr_header_v2 *hdr = (void *)acr->wpr_fw->data;
220 
221 	while (hdr->wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
222 		wpr_header_v2_dump(&acr->subdev, hdr);
223 		if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->wpr.falcon_id))
224 			return -ENOMEM;
225 	}
226 
227 	return 0;
228 }
229 
230 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_unload.bin");
231 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_unload.bin");
232 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_unload.bin");
233 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_unload.bin");
234 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_unload.bin");
235 
236 static const struct nvkm_acr_hsf_fwif
237 ga102_acr_unload_fwif[] = {
238 	{  0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_SEC2 },
239 	{}
240 };
241 
242 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_asb.bin");
243 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_asb.bin");
244 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_asb.bin");
245 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_asb.bin");
246 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_asb.bin");
247 
248 static const struct nvkm_acr_hsf_fwif
249 ga102_acr_asb_fwif[] = {
250 	{  0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_GSP },
251 	{}
252 };
253 
254 static const struct nvkm_falcon_fw_func
255 ga102_acr_ahesasc_0 = {
256 	.signature = ga100_flcn_fw_signature,
257 	.reset = gm200_flcn_fw_reset,
258 	.setup = gp102_acr_load_setup,
259 	.load = ga102_flcn_fw_load,
260 	.boot = ga102_flcn_fw_boot,
261 };
262 
263 MODULE_FIRMWARE("nvidia/ga102/acr/ucode_ahesasc.bin");
264 MODULE_FIRMWARE("nvidia/ga103/acr/ucode_ahesasc.bin");
265 MODULE_FIRMWARE("nvidia/ga104/acr/ucode_ahesasc.bin");
266 MODULE_FIRMWARE("nvidia/ga106/acr/ucode_ahesasc.bin");
267 MODULE_FIRMWARE("nvidia/ga107/acr/ucode_ahesasc.bin");
268 
269 static const struct nvkm_acr_hsf_fwif
270 ga102_acr_ahesasc_fwif[] = {
271 	{  0, ga100_acr_hsfw_ctor, &ga102_acr_ahesasc_0, NVKM_ACR_HSF_SEC2 },
272 	{}
273 };
274 
275 static const struct nvkm_acr_func
276 ga102_acr = {
277 	.ahesasc = ga102_acr_ahesasc_fwif,
278 	.asb = ga102_acr_asb_fwif,
279 	.unload = ga102_acr_unload_fwif,
280 	.wpr_parse = ga102_acr_wpr_parse,
281 	.wpr_layout = ga102_acr_wpr_layout,
282 	.wpr_alloc = gp102_acr_wpr_alloc,
283 	.wpr_patch = ga102_acr_wpr_patch,
284 	.wpr_build = ga102_acr_wpr_build,
285 	.wpr_check = ga100_acr_wpr_check,
286 	.init = tu102_acr_init,
287 };
288 
289 static int
ga102_acr_load(struct nvkm_acr * acr,int version,const struct nvkm_acr_fwif * fwif)290 ga102_acr_load(struct nvkm_acr *acr, int version,
291 	       const struct nvkm_acr_fwif *fwif)
292 {
293 	struct nvkm_subdev *subdev = &acr->subdev;
294 	const struct nvkm_acr_hsf_fwif *hsfwif;
295 
296 	hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
297 				    acr, NULL, "acr/ucode_ahesasc", "AHESASC");
298 	if (IS_ERR(hsfwif))
299 		return PTR_ERR(hsfwif);
300 
301 	hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
302 				    acr, NULL, "acr/ucode_asb", "ASB");
303 	if (IS_ERR(hsfwif))
304 		return PTR_ERR(hsfwif);
305 
306 	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
307 				    acr, NULL, "acr/ucode_unload", "unload");
308 	if (IS_ERR(hsfwif))
309 		return PTR_ERR(hsfwif);
310 
311 	return 0;
312 }
313 
314 static const struct nvkm_acr_fwif
315 ga102_acr_fwif[] = {
316 	{  0, ga102_acr_load, &ga102_acr },
317 	{ -1, gm200_acr_nofw, &gm200_acr },
318 	{}
319 };
320 
321 int
ga102_acr_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_acr ** pacr)322 ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
323 	      struct nvkm_acr **pacr)
324 {
325 	return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
326 }
327