xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c (revision da0119a9123c73269657fc61f537223d6affef02)
1  /*
2   * Copyright 2019 Red Hat Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   */
22  #include "priv.h"
23  
24  #include <core/falcon.h>
25  #include <core/firmware.h>
26  #include <core/memory.h>
27  #include <subdev/mc.h>
28  #include <subdev/mmu.h>
29  #include <subdev/pmu.h>
30  #include <subdev/timer.h>
31  
32  #include <nvfw/acr.h>
33  #include <nvfw/flcn.h>
34  
35  const struct nvkm_acr_func
36  gm200_acr = {
37  };
38  
39  int
40  gm200_acr_nofw(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
41  {
42  	nvkm_warn(&acr->subdev, "firmware unavailable\n");
43  	return 0;
44  }
45  
46  int
47  gm200_acr_init(struct nvkm_acr *acr)
48  {
49  	return nvkm_acr_hsf_boot(acr, "load");
50  }
51  
52  void
53  gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
54  {
55  	struct nvkm_device *device = acr->subdev.device;
56  
57  	nvkm_wr32(device, 0x100cd4, 2);
58  	*start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
59  	nvkm_wr32(device, 0x100cd4, 3);
60  	*limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
61  	*limit = *limit + 0x20000;
62  }
63  
64  void
65  gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
66  {
67  	struct nvkm_subdev *subdev = &acr->subdev;
68  	struct wpr_header hdr;
69  	struct lsb_header lsb;
70  	struct nvkm_acr_lsf *lsfw;
71  	u32 offset = 0;
72  
73  	do {
74  		nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
75  		wpr_header_dump(subdev, &hdr);
76  
77  		list_for_each_entry(lsfw, &acr->lsfw, head) {
78  			if (lsfw->id != hdr.falcon_id)
79  				continue;
80  
81  			nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
82  			lsb_header_dump(subdev, &lsb);
83  
84  			lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
85  			break;
86  		}
87  		offset += sizeof(hdr);
88  	} while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
89  }
90  
91  void
92  gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
93  			     struct lsb_header_tail *hdr)
94  {
95  	hdr->ucode_off = lsfw->offset.img;
96  	hdr->ucode_size = lsfw->ucode_size;
97  	hdr->data_size = lsfw->data_size;
98  	hdr->bl_code_size = lsfw->bootloader_size;
99  	hdr->bl_imem_off = lsfw->bootloader_imem_offset;
100  	hdr->bl_data_off = lsfw->offset.bld;
101  	hdr->bl_data_size = lsfw->bl_data_size;
102  	hdr->app_code_off = lsfw->app_start_offset +
103  			   lsfw->app_resident_code_offset;
104  	hdr->app_code_size = lsfw->app_resident_code_size;
105  	hdr->app_data_off = lsfw->app_start_offset +
106  			   lsfw->app_resident_data_offset;
107  	hdr->app_data_size = lsfw->app_resident_data_size;
108  	hdr->flags = lsfw->func->flags;
109  }
110  
111  static int
112  gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
113  {
114  	struct lsb_header hdr;
115  
116  	if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
117  		return -EINVAL;
118  
119  	memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
120  	gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
121  
122  	nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
123  	return 0;
124  }
125  
126  int
127  gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
128  {
129  	struct nvkm_acr_lsfw *lsfw;
130  	u32 offset = 0;
131  	int ret;
132  
133  	/* Fill per-LSF structures. */
134  	list_for_each_entry(lsfw, &acr->lsfw, head) {
135  		struct wpr_header hdr = {
136  			.falcon_id = lsfw->id,
137  			.lsb_offset = lsfw->offset.lsb,
138  			.bootstrap_owner = NVKM_ACR_LSF_PMU,
139  			.lazy_bootstrap = rtos && lsfw->id != rtos->id,
140  			.status = WPR_HEADER_V0_STATUS_COPY,
141  		};
142  
143  		/* Write WPR header. */
144  		nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
145  		offset += sizeof(hdr);
146  
147  		/* Write LSB header. */
148  		ret = gm200_acr_wpr_build_lsb(acr, lsfw);
149  		if (ret)
150  			return ret;
151  
152  		/* Write ucode image. */
153  		nvkm_wobj(acr->wpr, lsfw->offset.img,
154  				    lsfw->img.data,
155  				    lsfw->img.size);
156  
157  		/* Write bootloader data. */
158  		lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
159  	}
160  
161  	/* Finalise WPR. */
162  	nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
163  	return 0;
164  }
165  
166  static int
167  gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
168  {
169  	int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
170  				  ALIGN(wpr_size, 0x40000), 0x40000, true,
171  				  &acr->wpr);
172  	if (ret)
173  		return ret;
174  
175  	acr->wpr_start = nvkm_memory_addr(acr->wpr);
176  	acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
177  	return 0;
178  }
179  
180  u32
181  gm200_acr_wpr_layout(struct nvkm_acr *acr)
182  {
183  	struct nvkm_acr_lsfw *lsfw;
184  	u32 wpr = 0;
185  
186  	wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
187  
188  	list_for_each_entry(lsfw, &acr->lsfw, head) {
189  		wpr  = ALIGN(wpr, 256);
190  		lsfw->offset.lsb = wpr;
191  		wpr += sizeof(struct lsb_header);
192  
193  		wpr  = ALIGN(wpr, 4096);
194  		lsfw->offset.img = wpr;
195  		wpr += lsfw->img.size;
196  
197  		wpr  = ALIGN(wpr, 256);
198  		lsfw->offset.bld = wpr;
199  		lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
200  		wpr += lsfw->bl_data_size;
201  	}
202  
203  	return wpr;
204  }
205  
206  int
207  gm200_acr_wpr_parse(struct nvkm_acr *acr)
208  {
209  	const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
210  	struct nvkm_acr_lsfw *lsfw;
211  
212  	while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
213  		wpr_header_dump(&acr->subdev, hdr);
214  		lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
215  		if (IS_ERR(lsfw))
216  			return PTR_ERR(lsfw);
217  	}
218  
219  	return 0;
220  }
221  
222  void
223  gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
224  {
225  	struct flcn_bl_dmem_desc_v1 hsdesc = {
226  		.ctx_dma = FALCON_DMAIDX_VIRT,
227  		.code_dma_base = hsf->vma->addr,
228  		.non_sec_code_off = hsf->non_sec_addr,
229  		.non_sec_code_size = hsf->non_sec_size,
230  		.sec_code_off = hsf->sec_addr,
231  		.sec_code_size = hsf->sec_size,
232  		.code_entry_point = 0,
233  		.data_dma_base = hsf->vma->addr + hsf->data_addr,
234  		.data_size = hsf->data_size,
235  	};
236  
237  	flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc);
238  
239  	nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0);
240  }
241  
242  int
243  gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf,
244  		    u32 intr_clear, u32 mbox0_ok)
245  {
246  	struct nvkm_subdev *subdev = &acr->subdev;
247  	struct nvkm_device *device = subdev->device;
248  	struct nvkm_falcon *falcon = hsf->falcon;
249  	u32 mbox0, mbox1;
250  	int ret;
251  
252  	/* Reset falcon. */
253  	nvkm_falcon_reset(falcon);
254  	nvkm_falcon_bind_context(falcon, acr->inst);
255  
256  	/* Load bootloader into IMEM. */
257  	nvkm_falcon_load_imem(falcon, hsf->imem,
258  				      falcon->code.limit - hsf->imem_size,
259  				      hsf->imem_size,
260  				      hsf->imem_tag,
261  				      0, false);
262  
263  	/* Load bootloader data into DMEM. */
264  	hsf->func->bld(acr, hsf);
265  
266  	/* Boot the falcon. */
267  	nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, false);
268  
269  	nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
270  	nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8);
271  	nvkm_falcon_start(falcon);
272  	ret = nvkm_falcon_wait_for_halt(falcon, 100);
273  	if (ret)
274  		return ret;
275  
276  	/* Check for successful completion. */
277  	mbox0 = nvkm_falcon_rd32(falcon, 0x040);
278  	mbox1 = nvkm_falcon_rd32(falcon, 0x044);
279  	nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1);
280  	if (mbox0 && mbox0 != mbox0_ok)
281  		return -EIO;
282  
283  	nvkm_falcon_clear_interrupt(falcon, intr_clear);
284  	nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, true);
285  	return ret;
286  }
287  
288  int
289  gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw,
290  		    struct nvkm_falcon *falcon)
291  {
292  	struct nvkm_subdev *subdev = &acr->subdev;
293  	struct nvkm_acr_hsf *hsf;
294  	int ret;
295  
296  	/* Patch the appropriate signature (production/debug) into the FW
297  	 * image, as determined by the mode the falcon is in.
298  	 */
299  	ret = nvkm_falcon_get(falcon, subdev);
300  	if (ret)
301  		return ret;
302  
303  	if (hsfw->sig.patch_loc) {
304  		if (!falcon->debug) {
305  			nvkm_debug(subdev, "patching production signature\n");
306  			memcpy(hsfw->image + hsfw->sig.patch_loc,
307  			       hsfw->sig.prod.data,
308  			       hsfw->sig.prod.size);
309  		} else {
310  			nvkm_debug(subdev, "patching debug signature\n");
311  			memcpy(hsfw->image + hsfw->sig.patch_loc,
312  			       hsfw->sig.dbg.data,
313  			       hsfw->sig.dbg.size);
314  		}
315  	}
316  
317  	nvkm_falcon_put(falcon, subdev);
318  
319  	if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL)))
320  		return -ENOMEM;
321  	hsf->func = hsfw->func;
322  	hsf->name = hsfw->name;
323  	list_add_tail(&hsf->head, &acr->hsf);
324  
325  	hsf->imem_size = hsfw->imem_size;
326  	hsf->imem_tag = hsfw->imem_tag;
327  	hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL);
328  	if (!hsf->imem)
329  		return -ENOMEM;
330  
331  	hsf->non_sec_addr = hsfw->non_sec_addr;
332  	hsf->non_sec_size = hsfw->non_sec_size;
333  	hsf->sec_addr = hsfw->sec_addr;
334  	hsf->sec_size = hsfw->sec_size;
335  	hsf->data_addr = hsfw->data_addr;
336  	hsf->data_size = hsfw->data_size;
337  
338  	/* Make the FW image accessible to the HS bootloader. */
339  	ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
340  			      hsfw->image_size, 0x1000, false, &hsf->ucode);
341  	if (ret)
342  		return ret;
343  
344  	nvkm_kmap(hsf->ucode);
345  	nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size);
346  	nvkm_done(hsf->ucode);
347  
348  	ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode),
349  			   &hsf->vma);
350  	if (ret)
351  		return ret;
352  
353  	ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0);
354  	if (ret)
355  		return ret;
356  
357  	hsf->falcon = falcon;
358  	return 0;
359  }
360  
361  int
362  gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
363  {
364  	return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d);
365  }
366  
367  int
368  gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
369  {
370  	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
371  }
372  
373  const struct nvkm_acr_hsf_func
374  gm200_acr_unload_0 = {
375  	.load = gm200_acr_unload_load,
376  	.boot = gm200_acr_unload_boot,
377  	.bld = gm200_acr_hsfw_bld,
378  };
379  
380  MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
381  MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
382  MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
383  MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
384  
385  static const struct nvkm_acr_hsf_fwif
386  gm200_acr_unload_fwif[] = {
387  	{ 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 },
388  	{}
389  };
390  
391  int
392  gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf)
393  {
394  	return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0);
395  }
396  
397  static int
398  gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw)
399  {
400  	struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr];
401  
402  	desc->wpr_region_id = 1;
403  	desc->regions.no_regions = 2;
404  	desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
405  	desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
406  	desc->regions.region_props[0].region_id = 1;
407  	desc->regions.region_props[0].read_mask = 0xf;
408  	desc->regions.region_props[0].write_mask = 0xc;
409  	desc->regions.region_props[0].client_mask = 0x2;
410  	flcn_acr_desc_dump(&acr->subdev, desc);
411  
412  	return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon);
413  }
414  
415  static const struct nvkm_acr_hsf_func
416  gm200_acr_load_0 = {
417  	.load = gm200_acr_load_load,
418  	.boot = gm200_acr_load_boot,
419  	.bld = gm200_acr_hsfw_bld,
420  };
421  
422  MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
423  MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
424  
425  MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
426  MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
427  
428  MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
429  MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
430  
431  MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
432  MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
433  
434  static const struct nvkm_acr_hsf_fwif
435  gm200_acr_load_fwif[] = {
436  	{ 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 },
437  	{}
438  };
439  
440  static const struct nvkm_acr_func
441  gm200_acr_0 = {
442  	.load = gm200_acr_load_fwif,
443  	.unload = gm200_acr_unload_fwif,
444  	.wpr_parse = gm200_acr_wpr_parse,
445  	.wpr_layout = gm200_acr_wpr_layout,
446  	.wpr_alloc = gm200_acr_wpr_alloc,
447  	.wpr_build = gm200_acr_wpr_build,
448  	.wpr_patch = gm200_acr_wpr_patch,
449  	.wpr_check = gm200_acr_wpr_check,
450  	.init = gm200_acr_init,
451  	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
452  			     BIT_ULL(NVKM_ACR_LSF_GPCCS),
453  };
454  
455  static int
456  gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
457  {
458  	struct nvkm_subdev *subdev = &acr->subdev;
459  	const struct nvkm_acr_hsf_fwif *hsfwif;
460  
461  	hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
462  				    acr, "acr/bl", "acr/ucode_load", "load");
463  	if (IS_ERR(hsfwif))
464  		return PTR_ERR(hsfwif);
465  
466  	hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
467  				    acr, "acr/bl", "acr/ucode_unload",
468  				    "unload");
469  	if (IS_ERR(hsfwif))
470  		return PTR_ERR(hsfwif);
471  
472  	return 0;
473  }
474  
475  static const struct nvkm_acr_fwif
476  gm200_acr_fwif[] = {
477  	{  0, gm200_acr_load, &gm200_acr_0 },
478  	{ -1, gm200_acr_nofw, &gm200_acr },
479  	{}
480  };
481  
482  int
483  gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
484  	      struct nvkm_acr **pacr)
485  {
486  	return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr);
487  }
488