1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "fuc/gt215.fuc3.h" 26 27 #include <subdev/timer.h> 28 29 int 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], 31 u32 process, u32 message, u32 data0, u32 data1) 32 { 33 struct nvkm_subdev *subdev = &pmu->subdev; 34 struct nvkm_device *device = subdev->device; 35 u32 addr; 36 37 mutex_lock(&pmu->send.mutex); 38 /* wait for a free slot in the fifo */ 39 addr = nvkm_rd32(device, 0x10a4a0); 40 if (nvkm_msec(device, 2000, 41 u32 tmp = nvkm_rd32(device, 0x10a4b0); 42 if (tmp != (addr ^ 8)) 43 break; 44 ) < 0) { 45 mutex_unlock(&pmu->send.mutex); 46 return -EBUSY; 47 } 48 49 /* we currently only support a single process at a time waiting 50 * on a synchronous reply, take the PMU mutex and tell the 51 * receive handler what we're waiting for 52 */ 53 if (reply) { 54 pmu->recv.message = message; 55 pmu->recv.process = process; 56 } 57 58 /* acquire data segment access */ 59 do { 60 nvkm_wr32(device, 0x10a580, 0x00000001); 61 } while (nvkm_rd32(device, 0x10a580) != 0x00000001); 62 63 /* write the packet */ 64 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + 65 pmu->send.base)); 66 nvkm_wr32(device, 0x10a1c4, process); 67 nvkm_wr32(device, 0x10a1c4, message); 68 nvkm_wr32(device, 0x10a1c4, data0); 69 nvkm_wr32(device, 0x10a1c4, data1); 70 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); 71 72 /* release data segment access */ 73 nvkm_wr32(device, 0x10a580, 0x00000000); 74 75 /* wait for reply, if requested */ 76 if (reply) { 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); 78 reply[0] = pmu->recv.data[0]; 79 reply[1] = pmu->recv.data[1]; 80 } 81 82 mutex_unlock(&pmu->send.mutex); 83 return 0; 84 } 85 86 void 87 gt215_pmu_recv(struct nvkm_pmu *pmu) 88 { 89 struct nvkm_subdev *subdev = &pmu->subdev; 90 struct nvkm_device *device = subdev->device; 91 u32 process, message, data0, data1; 92 93 /* nothing to do if GET == PUT */ 94 u32 addr = nvkm_rd32(device, 0x10a4cc); 95 if (addr == nvkm_rd32(device, 0x10a4c8)) 96 return; 97 98 /* acquire data segment access */ 99 do { 100 nvkm_wr32(device, 0x10a580, 0x00000002); 101 } while (nvkm_rd32(device, 0x10a580) != 0x00000002); 102 103 /* read the packet */ 104 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + 105 pmu->recv.base)); 106 process = nvkm_rd32(device, 0x10a1c4); 107 message = nvkm_rd32(device, 0x10a1c4); 108 data0 = nvkm_rd32(device, 0x10a1c4); 109 data1 = nvkm_rd32(device, 0x10a1c4); 110 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); 111 112 /* release data segment access */ 113 nvkm_wr32(device, 0x10a580, 0x00000000); 114 115 /* wake process if it's waiting on a synchronous reply */ 116 if (pmu->recv.process) { 117 if (process == pmu->recv.process && 118 message == pmu->recv.message) { 119 pmu->recv.data[0] = data0; 120 pmu->recv.data[1] = data1; 121 pmu->recv.process = 0; 122 wake_up(&pmu->recv.wait); 123 return; 124 } 125 } 126 127 /* right now there's no other expected responses from the engine, 128 * so assume that any unexpected message is an error. 129 */ 130 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", 131 (char)((process & 0x000000ff) >> 0), 132 (char)((process & 0x0000ff00) >> 8), 133 (char)((process & 0x00ff0000) >> 16), 134 (char)((process & 0xff000000) >> 24), 135 process, message, data0, data1); 136 } 137 138 void 139 gt215_pmu_intr(struct nvkm_pmu *pmu) 140 { 141 struct nvkm_subdev *subdev = &pmu->subdev; 142 struct nvkm_device *device = subdev->device; 143 u32 disp = nvkm_rd32(device, 0x10a01c); 144 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); 145 146 if (intr & 0x00000020) { 147 u32 stat = nvkm_rd32(device, 0x10a16c); 148 if (stat & 0x80000000) { 149 nvkm_error(subdev, "UAS fault at %06x addr %08x\n", 150 stat & 0x00ffffff, 151 nvkm_rd32(device, 0x10a168)); 152 nvkm_wr32(device, 0x10a16c, 0x00000000); 153 intr &= ~0x00000020; 154 } 155 } 156 157 if (intr & 0x00000040) { 158 schedule_work(&pmu->recv.work); 159 nvkm_wr32(device, 0x10a004, 0x00000040); 160 intr &= ~0x00000040; 161 } 162 163 if (intr & 0x00000080) { 164 nvkm_info(subdev, "wr32 %06x %08x\n", 165 nvkm_rd32(device, 0x10a7a0), 166 nvkm_rd32(device, 0x10a7a4)); 167 nvkm_wr32(device, 0x10a004, 0x00000080); 168 intr &= ~0x00000080; 169 } 170 171 if (intr) { 172 nvkm_error(subdev, "intr %08x\n", intr); 173 nvkm_wr32(device, 0x10a004, intr); 174 } 175 } 176 177 void 178 gt215_pmu_fini(struct nvkm_pmu *pmu) 179 { 180 nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060); 181 flush_work(&pmu->recv.work); 182 } 183 184 static void 185 gt215_pmu_reset(struct nvkm_pmu *pmu) 186 { 187 struct nvkm_device *device = pmu->subdev.device; 188 189 nvkm_mask(device, 0x022210, 0x00000001, 0x00000000); 190 nvkm_mask(device, 0x022210, 0x00000001, 0x00000001); 191 nvkm_rd32(device, 0x022210); 192 } 193 194 static bool 195 gt215_pmu_enabled(struct nvkm_pmu *pmu) 196 { 197 return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001; 198 } 199 200 int 201 gt215_pmu_init(struct nvkm_pmu *pmu) 202 { 203 struct nvkm_device *device = pmu->subdev.device; 204 int i; 205 206 /* Inhibit interrupts, and wait for idle. */ 207 if (pmu->func->enabled(pmu)) { 208 nvkm_wr32(device, 0x10a014, 0x0000ffff); 209 nvkm_msec(device, 2000, 210 if (!nvkm_rd32(device, 0x10a04c)) 211 break; 212 ); 213 } 214 215 pmu->func->reset(pmu); 216 217 /* Wait for IMEM/DMEM scrubbing to be complete. */ 218 nvkm_msec(device, 2000, 219 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) 220 break; 221 ); 222 223 /* upload data segment */ 224 nvkm_wr32(device, 0x10a1c0, 0x01000000); 225 for (i = 0; i < pmu->func->data.size / 4; i++) 226 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]); 227 228 /* upload code segment */ 229 nvkm_wr32(device, 0x10a180, 0x01000000); 230 for (i = 0; i < pmu->func->code.size / 4; i++) { 231 if ((i & 0x3f) == 0) 232 nvkm_wr32(device, 0x10a188, i >> 6); 233 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]); 234 } 235 236 /* start it running */ 237 nvkm_wr32(device, 0x10a10c, 0x00000000); 238 nvkm_wr32(device, 0x10a104, 0x00000000); 239 nvkm_wr32(device, 0x10a100, 0x00000002); 240 241 /* wait for valid host->pmu ring configuration */ 242 if (nvkm_msec(device, 2000, 243 if (nvkm_rd32(device, 0x10a4d0)) 244 break; 245 ) < 0) 246 return -EBUSY; 247 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; 248 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; 249 250 /* wait for valid pmu->host ring configuration */ 251 if (nvkm_msec(device, 2000, 252 if (nvkm_rd32(device, 0x10a4dc)) 253 break; 254 ) < 0) 255 return -EBUSY; 256 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; 257 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; 258 259 nvkm_wr32(device, 0x10a010, 0x000000e0); 260 return 0; 261 } 262 263 const struct nvkm_falcon_func 264 gt215_pmu_flcn = { 265 }; 266 267 static const struct nvkm_pmu_func 268 gt215_pmu = { 269 .flcn = >215_pmu_flcn, 270 .code.data = gt215_pmu_code, 271 .code.size = sizeof(gt215_pmu_code), 272 .data.data = gt215_pmu_data, 273 .data.size = sizeof(gt215_pmu_data), 274 .enabled = gt215_pmu_enabled, 275 .reset = gt215_pmu_reset, 276 .init = gt215_pmu_init, 277 .fini = gt215_pmu_fini, 278 .intr = gt215_pmu_intr, 279 .send = gt215_pmu_send, 280 .recv = gt215_pmu_recv, 281 }; 282 283 static const struct nvkm_pmu_fwif 284 gt215_pmu_fwif[] = { 285 { -1, gf100_pmu_nofw, >215_pmu }, 286 {} 287 }; 288 289 int 290 gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 291 struct nvkm_pmu **ppmu) 292 { 293 return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu); 294 } 295