1 /* 2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <subdev/mc.h> 25 26 void 27 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, 28 u32 size, u16 tag, u8 port, bool secure) 29 { 30 if (secure && !falcon->secret) { 31 nvkm_warn(falcon->user, 32 "writing with secure tag on a non-secure falcon!\n"); 33 return; 34 } 35 36 falcon->func->load_imem(falcon, data, start, size, tag, port, 37 secure); 38 } 39 40 void 41 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, 42 u32 size, u8 port) 43 { 44 mutex_lock(&falcon->dmem_mutex); 45 46 falcon->func->load_dmem(falcon, data, start, size, port); 47 48 mutex_unlock(&falcon->dmem_mutex); 49 } 50 51 void 52 nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, 53 void *data) 54 { 55 mutex_lock(&falcon->dmem_mutex); 56 57 falcon->func->read_dmem(falcon, start, size, port, data); 58 59 mutex_unlock(&falcon->dmem_mutex); 60 } 61 62 void 63 nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst) 64 { 65 if (!falcon->func->bind_context) { 66 nvkm_error(falcon->user, 67 "Context binding not supported on this falcon!\n"); 68 return; 69 } 70 71 falcon->func->bind_context(falcon, inst); 72 } 73 74 void 75 nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) 76 { 77 falcon->func->set_start_addr(falcon, start_addr); 78 } 79 80 void 81 nvkm_falcon_start(struct nvkm_falcon *falcon) 82 { 83 falcon->func->start(falcon); 84 } 85 86 int 87 nvkm_falcon_enable(struct nvkm_falcon *falcon) 88 { 89 struct nvkm_device *device = falcon->owner->device; 90 enum nvkm_devidx id = falcon->owner->index; 91 int ret; 92 93 nvkm_mc_enable(device, id); 94 ret = falcon->func->enable(falcon); 95 if (ret) { 96 nvkm_mc_disable(device, id); 97 return ret; 98 } 99 100 return 0; 101 } 102 103 void 104 nvkm_falcon_disable(struct nvkm_falcon *falcon) 105 { 106 struct nvkm_device *device = falcon->owner->device; 107 enum nvkm_devidx id = falcon->owner->index; 108 109 /* already disabled, return or wait_idle will timeout */ 110 if (!nvkm_mc_enabled(device, id)) 111 return; 112 113 falcon->func->disable(falcon); 114 115 nvkm_mc_disable(device, id); 116 } 117 118 int 119 nvkm_falcon_reset(struct nvkm_falcon *falcon) 120 { 121 nvkm_falcon_disable(falcon); 122 return nvkm_falcon_enable(falcon); 123 } 124 125 int 126 nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) 127 { 128 return falcon->func->wait_for_halt(falcon, ms); 129 } 130 131 int 132 nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) 133 { 134 return falcon->func->clear_interrupt(falcon, mask); 135 } 136 137 void 138 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) 139 { 140 if (unlikely(!falcon)) 141 return; 142 143 mutex_lock(&falcon->mutex); 144 if (falcon->user == user) { 145 nvkm_debug(falcon->user, "released %s falcon\n", falcon->name); 146 falcon->user = NULL; 147 } 148 mutex_unlock(&falcon->mutex); 149 } 150 151 int 152 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) 153 { 154 mutex_lock(&falcon->mutex); 155 if (falcon->user) { 156 nvkm_error(user, "%s falcon already acquired by %s!\n", 157 falcon->name, nvkm_subdev_name[falcon->user->index]); 158 mutex_unlock(&falcon->mutex); 159 return -EBUSY; 160 } 161 162 nvkm_debug(user, "acquired %s falcon\n", falcon->name); 163 falcon->user = user; 164 mutex_unlock(&falcon->mutex); 165 return 0; 166 } 167 168 void 169 nvkm_falcon_ctor(const struct nvkm_falcon_func *func, 170 struct nvkm_subdev *subdev, const char *name, u32 addr, 171 struct nvkm_falcon *falcon) 172 { 173 u32 debug_reg; 174 u32 reg; 175 176 falcon->func = func; 177 falcon->owner = subdev; 178 falcon->name = name; 179 falcon->addr = addr; 180 mutex_init(&falcon->mutex); 181 mutex_init(&falcon->dmem_mutex); 182 183 reg = nvkm_falcon_rd32(falcon, 0x12c); 184 falcon->version = reg & 0xf; 185 falcon->secret = (reg >> 4) & 0x3; 186 falcon->code.ports = (reg >> 8) & 0xf; 187 falcon->data.ports = (reg >> 12) & 0xf; 188 189 reg = nvkm_falcon_rd32(falcon, 0x108); 190 falcon->code.limit = (reg & 0x1ff) << 8; 191 falcon->data.limit = (reg & 0x3fe00) >> 1; 192 193 switch (subdev->index) { 194 case NVKM_ENGINE_GR: 195 debug_reg = 0x0; 196 break; 197 case NVKM_SUBDEV_PMU: 198 debug_reg = 0xc08; 199 break; 200 case NVKM_ENGINE_NVDEC: 201 debug_reg = 0xd00; 202 break; 203 case NVKM_ENGINE_SEC2: 204 debug_reg = 0x408; 205 falcon->has_emem = true; 206 break; 207 default: 208 nvkm_warn(subdev, "unsupported falcon %s!\n", 209 nvkm_subdev_name[subdev->index]); 210 debug_reg = 0; 211 break; 212 } 213 214 if (debug_reg) { 215 u32 val = nvkm_falcon_rd32(falcon, debug_reg); 216 falcon->debug = (val >> 20) & 0x1; 217 } 218 } 219 220 void 221 nvkm_falcon_del(struct nvkm_falcon **pfalcon) 222 { 223 if (*pfalcon) { 224 kfree(*pfalcon); 225 *pfalcon = NULL; 226 } 227 } 228