1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "fuc/gt215.fuc3.h" 26 27 #include <subdev/timer.h> 28 29 int 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], 31 u32 process, u32 message, u32 data0, u32 data1) 32 { 33 struct nvkm_subdev *subdev = &pmu->subdev; 34 struct nvkm_device *device = subdev->device; 35 u32 addr; 36 37 mutex_lock(&subdev->mutex); 38 /* wait for a free slot in the fifo */ 39 addr = nvkm_rd32(device, 0x10a4a0); 40 if (nvkm_msec(device, 2000, 41 u32 tmp = nvkm_rd32(device, 0x10a4b0); 42 if (tmp != (addr ^ 8)) 43 break; 44 ) < 0) { 45 mutex_unlock(&subdev->mutex); 46 return -EBUSY; 47 } 48 49 /* we currently only support a single process at a time waiting 50 * on a synchronous reply, take the PMU mutex and tell the 51 * receive handler what we're waiting for 52 */ 53 if (reply) { 54 pmu->recv.message = message; 55 pmu->recv.process = process; 56 } 57 58 /* acquire data segment access */ 59 do { 60 nvkm_wr32(device, 0x10a580, 0x00000001); 61 } while (nvkm_rd32(device, 0x10a580) != 0x00000001); 62 63 /* write the packet */ 64 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + 65 pmu->send.base)); 66 nvkm_wr32(device, 0x10a1c4, process); 67 nvkm_wr32(device, 0x10a1c4, message); 68 nvkm_wr32(device, 0x10a1c4, data0); 69 nvkm_wr32(device, 0x10a1c4, data1); 70 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); 71 72 /* release data segment access */ 73 nvkm_wr32(device, 0x10a580, 0x00000000); 74 75 /* wait for reply, if requested */ 76 if (reply) { 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); 78 reply[0] = pmu->recv.data[0]; 79 reply[1] = pmu->recv.data[1]; 80 } 81 82 mutex_unlock(&subdev->mutex); 83 return 0; 84 } 85 86 void 87 gt215_pmu_recv(struct nvkm_pmu *pmu) 88 { 89 struct nvkm_subdev *subdev = &pmu->subdev; 90 struct nvkm_device *device = subdev->device; 91 u32 process, message, data0, data1; 92 93 /* nothing to do if GET == PUT */ 94 u32 addr = nvkm_rd32(device, 0x10a4cc); 95 if (addr == nvkm_rd32(device, 0x10a4c8)) 96 return; 97 98 /* acquire data segment access */ 99 do { 100 nvkm_wr32(device, 0x10a580, 0x00000002); 101 } while (nvkm_rd32(device, 0x10a580) != 0x00000002); 102 103 /* read the packet */ 104 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + 105 pmu->recv.base)); 106 process = nvkm_rd32(device, 0x10a1c4); 107 message = nvkm_rd32(device, 0x10a1c4); 108 data0 = nvkm_rd32(device, 0x10a1c4); 109 data1 = nvkm_rd32(device, 0x10a1c4); 110 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); 111 112 /* release data segment access */ 113 nvkm_wr32(device, 0x10a580, 0x00000000); 114 115 /* wake process if it's waiting on a synchronous reply */ 116 if (pmu->recv.process) { 117 if (process == pmu->recv.process && 118 message == pmu->recv.message) { 119 pmu->recv.data[0] = data0; 120 pmu->recv.data[1] = data1; 121 pmu->recv.process = 0; 122 wake_up(&pmu->recv.wait); 123 return; 124 } 125 } 126 127 /* right now there's no other expected responses from the engine, 128 * so assume that any unexpected message is an error. 129 */ 130 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", 131 (char)((process & 0x000000ff) >> 0), 132 (char)((process & 0x0000ff00) >> 8), 133 (char)((process & 0x00ff0000) >> 16), 134 (char)((process & 0xff000000) >> 24), 135 process, message, data0, data1); 136 } 137 138 void 139 gt215_pmu_intr(struct nvkm_pmu *pmu) 140 { 141 struct nvkm_subdev *subdev = &pmu->subdev; 142 struct nvkm_device *device = subdev->device; 143 u32 disp = nvkm_rd32(device, 0x10a01c); 144 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); 145 146 if (intr & 0x00000020) { 147 u32 stat = nvkm_rd32(device, 0x10a16c); 148 if (stat & 0x80000000) { 149 nvkm_error(subdev, "UAS fault at %06x addr %08x\n", 150 stat & 0x00ffffff, 151 nvkm_rd32(device, 0x10a168)); 152 nvkm_wr32(device, 0x10a16c, 0x00000000); 153 intr &= ~0x00000020; 154 } 155 } 156 157 if (intr & 0x00000040) { 158 schedule_work(&pmu->recv.work); 159 nvkm_wr32(device, 0x10a004, 0x00000040); 160 intr &= ~0x00000040; 161 } 162 163 if (intr & 0x00000080) { 164 nvkm_info(subdev, "wr32 %06x %08x\n", 165 nvkm_rd32(device, 0x10a7a0), 166 nvkm_rd32(device, 0x10a7a4)); 167 nvkm_wr32(device, 0x10a004, 0x00000080); 168 intr &= ~0x00000080; 169 } 170 171 if (intr) { 172 nvkm_error(subdev, "intr %08x\n", intr); 173 nvkm_wr32(device, 0x10a004, intr); 174 } 175 } 176 177 void 178 gt215_pmu_fini(struct nvkm_pmu *pmu) 179 { 180 nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060); 181 } 182 183 static void 184 gt215_pmu_reset(struct nvkm_pmu *pmu) 185 { 186 struct nvkm_device *device = pmu->subdev.device; 187 nvkm_mask(device, 0x022210, 0x00000001, 0x00000000); 188 nvkm_mask(device, 0x022210, 0x00000001, 0x00000001); 189 nvkm_rd32(device, 0x022210); 190 } 191 192 static bool 193 gt215_pmu_enabled(struct nvkm_pmu *pmu) 194 { 195 return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001; 196 } 197 198 int 199 gt215_pmu_init(struct nvkm_pmu *pmu) 200 { 201 struct nvkm_device *device = pmu->subdev.device; 202 int i; 203 204 /* upload data segment */ 205 nvkm_wr32(device, 0x10a1c0, 0x01000000); 206 for (i = 0; i < pmu->func->data.size / 4; i++) 207 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]); 208 209 /* upload code segment */ 210 nvkm_wr32(device, 0x10a180, 0x01000000); 211 for (i = 0; i < pmu->func->code.size / 4; i++) { 212 if ((i & 0x3f) == 0) 213 nvkm_wr32(device, 0x10a188, i >> 6); 214 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]); 215 } 216 217 /* start it running */ 218 nvkm_wr32(device, 0x10a10c, 0x00000000); 219 nvkm_wr32(device, 0x10a104, 0x00000000); 220 nvkm_wr32(device, 0x10a100, 0x00000002); 221 222 /* wait for valid host->pmu ring configuration */ 223 if (nvkm_msec(device, 2000, 224 if (nvkm_rd32(device, 0x10a4d0)) 225 break; 226 ) < 0) 227 return -EBUSY; 228 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; 229 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; 230 231 /* wait for valid pmu->host ring configuration */ 232 if (nvkm_msec(device, 2000, 233 if (nvkm_rd32(device, 0x10a4dc)) 234 break; 235 ) < 0) 236 return -EBUSY; 237 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; 238 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; 239 240 nvkm_wr32(device, 0x10a010, 0x000000e0); 241 return 0; 242 } 243 244 static const struct nvkm_pmu_func 245 gt215_pmu = { 246 .code.data = gt215_pmu_code, 247 .code.size = sizeof(gt215_pmu_code), 248 .data.data = gt215_pmu_data, 249 .data.size = sizeof(gt215_pmu_data), 250 .enabled = gt215_pmu_enabled, 251 .reset = gt215_pmu_reset, 252 .init = gt215_pmu_init, 253 .fini = gt215_pmu_fini, 254 .intr = gt215_pmu_intr, 255 .send = gt215_pmu_send, 256 .recv = gt215_pmu_recv, 257 }; 258 259 int 260 gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 261 { 262 return nvkm_pmu_new_(>215_pmu, device, index, ppmu); 263 } 264