1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 #include <subdev/acr.h> 26 27 #include <nvfw/flcn.h> 28 #include <nvfw/pmu.h> 29 30 static int 31 gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) 32 { 33 struct nv_pmu_acr_bootstrap_falcon_msg *msg = 34 container_of(hdr, typeof(*msg), msg.hdr); 35 return msg->falcon_id; 36 } 37 38 int 39 gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, 40 enum nvkm_acr_lsf_id id) 41 { 42 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); 43 struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { 44 .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 45 .cmd.hdr.size = sizeof(cmd), 46 .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, 47 .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, 48 .falcon_id = id, 49 }; 50 int ret; 51 52 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 53 gm20b_pmu_acr_bootstrap_falcon_cb, 54 &pmu->subdev, msecs_to_jiffies(1000)); 55 if (ret >= 0) { 56 if (ret != cmd.falcon_id) 57 ret = -EIO; 58 else 59 ret = 0; 60 } 61 62 return ret; 63 } 64 65 int 66 gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) 67 { 68 struct nv_pmu_args args = { .secure_mode = true }; 69 const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); 70 nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); 71 nvkm_falcon_start(falcon); 72 return 0; 73 } 74 75 void 76 gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 77 { 78 struct loader_config hdr; 79 u64 addr; 80 81 nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 82 addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); 83 hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); 84 hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); 85 addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); 86 hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); 87 hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); 88 addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); 89 hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); 90 hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); 91 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 92 93 loader_config_dump(&acr->subdev, &hdr); 94 } 95 96 void 97 gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, 98 struct nvkm_acr_lsfw *lsfw) 99 { 100 const u64 base = lsfw->offset.img + lsfw->app_start_offset; 101 const u64 code = (base + lsfw->app_resident_code_offset) >> 8; 102 const u64 data = (base + lsfw->app_resident_data_offset) >> 8; 103 const struct loader_config hdr = { 104 .dma_idx = FALCON_DMAIDX_UCODE, 105 .code_dma_base = lower_32_bits(code), 106 .code_size_total = lsfw->app_size, 107 .code_size_to_load = lsfw->app_resident_code_size, 108 .code_entry_point = lsfw->app_imem_entry, 109 .data_dma_base = lower_32_bits(data), 110 .data_size = lsfw->app_resident_data_size, 111 .overlay_dma_base = lower_32_bits(code), 112 .argc = 1, 113 .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), 114 .code_dma_base1 = upper_32_bits(code), 115 .data_dma_base1 = upper_32_bits(data), 116 .overlay_dma_base1 = upper_32_bits(code), 117 }; 118 119 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 120 } 121 122 static const struct nvkm_acr_lsf_func 123 gm20b_pmu_acr = { 124 .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, 125 .bld_size = sizeof(struct loader_config), 126 .bld_write = gm20b_pmu_acr_bld_write, 127 .bld_patch = gm20b_pmu_acr_bld_patch, 128 .boot = gm20b_pmu_acr_boot, 129 .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, 130 }; 131 132 static int 133 gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) 134 { 135 struct nv_pmu_acr_init_wpr_region_msg *msg = 136 container_of(hdr, typeof(*msg), msg.hdr); 137 struct nvkm_pmu *pmu = priv; 138 struct nvkm_subdev *subdev = &pmu->subdev; 139 140 if (msg->error_code) { 141 nvkm_error(subdev, "ACR WPR init failure: %d\n", 142 msg->error_code); 143 return -EINVAL; 144 } 145 146 nvkm_debug(subdev, "ACR WPR init complete\n"); 147 complete_all(&pmu->wpr_ready); 148 return 0; 149 } 150 151 static int 152 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) 153 { 154 struct nv_pmu_acr_init_wpr_region_cmd cmd = { 155 .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 156 .cmd.hdr.size = sizeof(cmd), 157 .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, 158 .region_id = 1, 159 .wpr_offset = 0, 160 }; 161 162 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 163 gm20b_pmu_acr_init_wpr_callback, pmu, 0); 164 } 165 166 int 167 gm20b_pmu_initmsg(struct nvkm_pmu *pmu) 168 { 169 struct nv_pmu_init_msg msg; 170 int ret; 171 172 ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); 173 if (ret) 174 return ret; 175 176 if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || 177 msg.msg_type != NV_PMU_INIT_MSG_INIT) 178 return -EINVAL; 179 180 nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, 181 msg.queue_info[0].offset, 182 msg.queue_info[0].size); 183 nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, 184 msg.queue_info[1].offset, 185 msg.queue_info[1].size); 186 nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, 187 msg.queue_info[4].offset, 188 msg.queue_info[4].size); 189 return gm20b_pmu_acr_init_wpr(pmu); 190 } 191 192 void 193 gm20b_pmu_recv(struct nvkm_pmu *pmu) 194 { 195 if (!pmu->initmsg_received) { 196 int ret = pmu->func->initmsg(pmu); 197 if (ret) { 198 nvkm_error(&pmu->subdev, 199 "error parsing init message: %d\n", ret); 200 return; 201 } 202 203 pmu->initmsg_received = true; 204 } 205 206 nvkm_falcon_msgq_recv(pmu->msgq); 207 } 208 209 static const struct nvkm_pmu_func 210 gm20b_pmu = { 211 .flcn = >215_pmu_flcn, 212 .enabled = gf100_pmu_enabled, 213 .intr = gt215_pmu_intr, 214 .recv = gm20b_pmu_recv, 215 .initmsg = gm20b_pmu_initmsg, 216 }; 217 218 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 219 MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); 220 MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); 221 MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); 222 #endif 223 224 int 225 gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) 226 { 227 return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, 228 NVKM_ACR_LSF_PMU, "pmu/", 229 ver, fwif->acr); 230 } 231 232 static const struct nvkm_pmu_fwif 233 gm20b_pmu_fwif[] = { 234 { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, 235 {} 236 }; 237 238 int 239 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 240 { 241 return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); 242 } 243