1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <subdev/timer.h> 27 28 void 29 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 30 { 31 const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu); 32 if (impl->pgob) 33 impl->pgob(pmu, enable); 34 } 35 36 static int 37 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], 38 u32 process, u32 message, u32 data0, u32 data1) 39 { 40 struct nvkm_subdev *subdev = &pmu->subdev; 41 struct nvkm_device *device = subdev->device; 42 u32 addr; 43 44 /* wait for a free slot in the fifo */ 45 addr = nvkm_rd32(device, 0x10a4a0); 46 if (nvkm_msec(device, 2000, 47 u32 tmp = nvkm_rd32(device, 0x10a4b0); 48 if (tmp != (addr ^ 8)) 49 break; 50 ) < 0) 51 return -EBUSY; 52 53 /* we currently only support a single process at a time waiting 54 * on a synchronous reply, take the PMU mutex and tell the 55 * receive handler what we're waiting for 56 */ 57 if (reply) { 58 mutex_lock(&subdev->mutex); 59 pmu->recv.message = message; 60 pmu->recv.process = process; 61 } 62 63 /* acquire data segment access */ 64 do { 65 nvkm_wr32(device, 0x10a580, 0x00000001); 66 } while (nvkm_rd32(device, 0x10a580) != 0x00000001); 67 68 /* write the packet */ 69 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + 70 pmu->send.base)); 71 nvkm_wr32(device, 0x10a1c4, process); 72 nvkm_wr32(device, 0x10a1c4, message); 73 nvkm_wr32(device, 0x10a1c4, data0); 74 nvkm_wr32(device, 0x10a1c4, data1); 75 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); 76 77 /* release data segment access */ 78 nvkm_wr32(device, 0x10a580, 0x00000000); 79 80 /* wait for reply, if requested */ 81 if (reply) { 82 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); 83 reply[0] = pmu->recv.data[0]; 84 reply[1] = pmu->recv.data[1]; 85 mutex_unlock(&subdev->mutex); 86 } 87 88 return 0; 89 } 90 91 static void 92 nvkm_pmu_recv(struct work_struct *work) 93 { 94 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); 95 struct nvkm_subdev *subdev = &pmu->subdev; 96 struct nvkm_device *device = subdev->device; 97 u32 process, message, data0, data1; 98 99 /* nothing to do if GET == PUT */ 100 u32 addr = nvkm_rd32(device, 0x10a4cc); 101 if (addr == nvkm_rd32(device, 0x10a4c8)) 102 return; 103 104 /* acquire data segment access */ 105 do { 106 nvkm_wr32(device, 0x10a580, 0x00000002); 107 } while (nvkm_rd32(device, 0x10a580) != 0x00000002); 108 109 /* read the packet */ 110 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + 111 pmu->recv.base)); 112 process = nvkm_rd32(device, 0x10a1c4); 113 message = nvkm_rd32(device, 0x10a1c4); 114 data0 = nvkm_rd32(device, 0x10a1c4); 115 data1 = nvkm_rd32(device, 0x10a1c4); 116 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); 117 118 /* release data segment access */ 119 nvkm_wr32(device, 0x10a580, 0x00000000); 120 121 /* wake process if it's waiting on a synchronous reply */ 122 if (pmu->recv.process) { 123 if (process == pmu->recv.process && 124 message == pmu->recv.message) { 125 pmu->recv.data[0] = data0; 126 pmu->recv.data[1] = data1; 127 pmu->recv.process = 0; 128 wake_up(&pmu->recv.wait); 129 return; 130 } 131 } 132 133 /* right now there's no other expected responses from the engine, 134 * so assume that any unexpected message is an error. 135 */ 136 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n", 137 (char)((process & 0x000000ff) >> 0), 138 (char)((process & 0x0000ff00) >> 8), 139 (char)((process & 0x00ff0000) >> 16), 140 (char)((process & 0xff000000) >> 24), 141 process, message, data0, data1); 142 } 143 144 static void 145 nvkm_pmu_intr(struct nvkm_subdev *subdev) 146 { 147 struct nvkm_pmu *pmu = container_of(subdev, typeof(*pmu), subdev); 148 struct nvkm_device *device = pmu->subdev.device; 149 u32 disp = nvkm_rd32(device, 0x10a01c); 150 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); 151 152 if (intr & 0x00000020) { 153 u32 stat = nvkm_rd32(device, 0x10a16c); 154 if (stat & 0x80000000) { 155 nvkm_error(subdev, "UAS fault at %06x addr %08x\n", 156 stat & 0x00ffffff, 157 nvkm_rd32(device, 0x10a168)); 158 nvkm_wr32(device, 0x10a16c, 0x00000000); 159 intr &= ~0x00000020; 160 } 161 } 162 163 if (intr & 0x00000040) { 164 schedule_work(&pmu->recv.work); 165 nvkm_wr32(device, 0x10a004, 0x00000040); 166 intr &= ~0x00000040; 167 } 168 169 if (intr & 0x00000080) { 170 nvkm_info(subdev, "wr32 %06x %08x\n", 171 nvkm_rd32(device, 0x10a7a0), 172 nvkm_rd32(device, 0x10a7a4)); 173 nvkm_wr32(device, 0x10a004, 0x00000080); 174 intr &= ~0x00000080; 175 } 176 177 if (intr) { 178 nvkm_error(subdev, "intr %08x\n", intr); 179 nvkm_wr32(device, 0x10a004, intr); 180 } 181 } 182 183 int 184 _nvkm_pmu_fini(struct nvkm_object *object, bool suspend) 185 { 186 struct nvkm_pmu *pmu = (void *)object; 187 struct nvkm_device *device = pmu->subdev.device; 188 189 nvkm_wr32(device, 0x10a014, 0x00000060); 190 flush_work(&pmu->recv.work); 191 192 return nvkm_subdev_fini(&pmu->subdev, suspend); 193 } 194 195 int 196 _nvkm_pmu_init(struct nvkm_object *object) 197 { 198 const struct nvkm_pmu_impl *impl = (void *)object->oclass; 199 struct nvkm_pmu *pmu = (void *)object; 200 struct nvkm_device *device = pmu->subdev.device; 201 int ret, i; 202 203 ret = nvkm_subdev_init(&pmu->subdev); 204 if (ret) 205 return ret; 206 207 nv_subdev(pmu)->intr = nvkm_pmu_intr; 208 pmu->message = nvkm_pmu_send; 209 pmu->pgob = nvkm_pmu_pgob; 210 211 /* prevent previous ucode from running, wait for idle, reset */ 212 nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ 213 nvkm_msec(device, 2000, 214 if (!nvkm_rd32(device, 0x10a04c)) 215 break; 216 ); 217 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); 218 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); 219 nvkm_rd32(device, 0x000200); 220 nvkm_msec(device, 2000, 221 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) 222 break; 223 ); 224 225 /* upload data segment */ 226 nvkm_wr32(device, 0x10a1c0, 0x01000000); 227 for (i = 0; i < impl->data.size / 4; i++) 228 nvkm_wr32(device, 0x10a1c4, impl->data.data[i]); 229 230 /* upload code segment */ 231 nvkm_wr32(device, 0x10a180, 0x01000000); 232 for (i = 0; i < impl->code.size / 4; i++) { 233 if ((i & 0x3f) == 0) 234 nvkm_wr32(device, 0x10a188, i >> 6); 235 nvkm_wr32(device, 0x10a184, impl->code.data[i]); 236 } 237 238 /* start it running */ 239 nvkm_wr32(device, 0x10a10c, 0x00000000); 240 nvkm_wr32(device, 0x10a104, 0x00000000); 241 nvkm_wr32(device, 0x10a100, 0x00000002); 242 243 /* wait for valid host->pmu ring configuration */ 244 if (nvkm_msec(device, 2000, 245 if (nvkm_rd32(device, 0x10a4d0)) 246 break; 247 ) < 0) 248 return -EBUSY; 249 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; 250 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; 251 252 /* wait for valid pmu->host ring configuration */ 253 if (nvkm_msec(device, 2000, 254 if (nvkm_rd32(device, 0x10a4dc)) 255 break; 256 ) < 0) 257 return -EBUSY; 258 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; 259 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; 260 261 nvkm_wr32(device, 0x10a010, 0x000000e0); 262 return 0; 263 } 264 265 int 266 nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine, 267 struct nvkm_oclass *oclass, int length, void **pobject) 268 { 269 struct nvkm_pmu *pmu; 270 int ret; 271 272 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU", 273 "pmu", length, pobject); 274 pmu = *pobject; 275 if (ret) 276 return ret; 277 278 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); 279 init_waitqueue_head(&pmu->recv.wait); 280 return 0; 281 } 282 283 int 284 _nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 285 struct nvkm_oclass *oclass, void *data, u32 size, 286 struct nvkm_object **pobject) 287 { 288 struct nvkm_pmu *pmu; 289 int ret = nvkm_pmu_create(parent, engine, oclass, &pmu); 290 *pobject = nv_object(pmu); 291 return ret; 292 } 293