1 /*
2  * Copyright 2022 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 #include <subdev/timer.h>
26 
27 static bool
ga102_flcn_dma_done(struct nvkm_falcon * falcon)28 ga102_flcn_dma_done(struct nvkm_falcon *falcon)
29 {
30 	return !!(nvkm_falcon_rd32(falcon, 0x118) & 0x00000002);
31 }
32 
33 static void
ga102_flcn_dma_xfer(struct nvkm_falcon * falcon,u32 mem_base,u32 dma_base,u32 cmd)34 ga102_flcn_dma_xfer(struct nvkm_falcon *falcon, u32 mem_base, u32 dma_base, u32 cmd)
35 {
36 	nvkm_falcon_wr32(falcon, 0x114, mem_base);
37 	nvkm_falcon_wr32(falcon, 0x11c, dma_base);
38 	nvkm_falcon_wr32(falcon, 0x118, cmd);
39 }
40 
41 static int
ga102_flcn_dma_init(struct nvkm_falcon * falcon,u64 dma_addr,int xfer_len,enum nvkm_falcon_mem mem_type,bool sec,u32 * cmd)42 ga102_flcn_dma_init(struct nvkm_falcon *falcon, u64 dma_addr, int xfer_len,
43 		    enum nvkm_falcon_mem mem_type, bool sec, u32 *cmd)
44 {
45 	*cmd = (ilog2(xfer_len) - 2) << 8;
46 	if (mem_type == IMEM)
47 		*cmd |= 0x00000010;
48 	if (sec)
49 		*cmd |= 0x00000004;
50 
51 	nvkm_falcon_wr32(falcon, 0x110, dma_addr >> 8);
52 	nvkm_falcon_wr32(falcon, 0x128, 0x00000000);
53 	return 0;
54 }
55 
56 const struct nvkm_falcon_func_dma
57 ga102_flcn_dma = {
58 	.init = ga102_flcn_dma_init,
59 	.xfer = ga102_flcn_dma_xfer,
60 	.done = ga102_flcn_dma_done,
61 };
62 
63 int
ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon * falcon)64 ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
65 {
66 	nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
67 
68 	if (nvkm_msec(falcon->owner->device, 20,
69 		if (!(nvkm_falcon_rd32(falcon, 0x0f4) & 0x00001000))
70 			break;
71 	) < 0)
72 		return -ETIMEDOUT;
73 
74 	return 0;
75 }
76 
77 int
ga102_flcn_reset_prep(struct nvkm_falcon * falcon)78 ga102_flcn_reset_prep(struct nvkm_falcon *falcon)
79 {
80 	nvkm_falcon_rd32(falcon, 0x0f4);
81 
82 	nvkm_usec(falcon->owner->device, 150,
83 		if (nvkm_falcon_rd32(falcon, 0x0f4) & 0x80000000)
84 			break;
85 		_warn = false;
86 	);
87 
88 	return 0;
89 }
90 
91 int
ga102_flcn_select(struct nvkm_falcon * falcon)92 ga102_flcn_select(struct nvkm_falcon *falcon)
93 {
94 	if ((nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000010) != 0x00000000) {
95 		nvkm_falcon_wr32(falcon, falcon->addr2 + 0x668, 0x00000000);
96 		if (nvkm_msec(falcon->owner->device, 10,
97 			if (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000001)
98 				break;
99 		) < 0)
100 			return -ETIMEDOUT;
101 	}
102 
103 	return 0;
104 }
105 
106 int
ga102_flcn_fw_boot(struct nvkm_falcon_fw * fw,u32 * mbox0,u32 * mbox1,u32 mbox0_ok,u32 irqsclr)107 ga102_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *mbox0, u32 *mbox1, u32 mbox0_ok, u32 irqsclr)
108 {
109 	struct nvkm_falcon *falcon = fw->falcon;
110 
111 	nvkm_falcon_wr32(falcon, falcon->addr2 + 0x210, fw->dmem_sign);
112 	nvkm_falcon_wr32(falcon, falcon->addr2 + 0x19c, fw->engine_id);
113 	nvkm_falcon_wr32(falcon, falcon->addr2 + 0x198, fw->ucode_id);
114 	nvkm_falcon_wr32(falcon, falcon->addr2 + 0x180, 0x00000001);
115 
116 	return gm200_flcn_fw_boot(fw, mbox0, mbox1, mbox0_ok, irqsclr);
117 }
118 
119 int
ga102_flcn_fw_load(struct nvkm_falcon_fw * fw)120 ga102_flcn_fw_load(struct nvkm_falcon_fw *fw)
121 {
122 	struct nvkm_falcon *falcon = fw->falcon;
123 	int ret = 0;
124 
125 	nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
126 	nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
127 	nvkm_falcon_mask(falcon, 0x600, 0x00010007, (0 << 16) | (1 << 2) | 1);
128 
129 	ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->imem_base_img,
130 				 IMEM, fw->imem_base, fw->imem_size, true);
131 	if (ret)
132 		return ret;
133 
134 	ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->dmem_base_img,
135 				 DMEM, fw->dmem_base, fw->dmem_size, false);
136 	if (ret)
137 		return ret;
138 
139 	return 0;
140 }
141 
142 const struct nvkm_falcon_fw_func
143 ga102_flcn_fw = {
144 	.signature = ga100_flcn_fw_signature,
145 	.reset = gm200_flcn_fw_reset,
146 	.load = ga102_flcn_fw_load,
147 	.boot = ga102_flcn_fw_boot,
148 };
149