1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <subdev/timer.h> 27 28 void 29 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) 30 { 31 const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu); 32 if (impl->pgob) 33 impl->pgob(pmu, enable); 34 } 35 36 static int 37 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], 38 u32 process, u32 message, u32 data0, u32 data1) 39 { 40 struct nvkm_subdev *subdev = &pmu->subdev; 41 struct nvkm_device *device = subdev->device; 42 u32 addr; 43 44 /* wait for a free slot in the fifo */ 45 addr = nvkm_rd32(device, 0x10a4a0); 46 if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8)) 47 return -EBUSY; 48 49 /* we currently only support a single process at a time waiting 50 * on a synchronous reply, take the PMU mutex and tell the 51 * receive handler what we're waiting for 52 */ 53 if (reply) { 54 mutex_lock(&subdev->mutex); 55 pmu->recv.message = message; 56 pmu->recv.process = process; 57 } 58 59 /* acquire data segment access */ 60 do { 61 nvkm_wr32(device, 0x10a580, 0x00000001); 62 } while (nvkm_rd32(device, 0x10a580) != 0x00000001); 63 64 /* write the packet */ 65 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) + 66 pmu->send.base)); 67 nvkm_wr32(device, 0x10a1c4, process); 68 nvkm_wr32(device, 0x10a1c4, message); 69 nvkm_wr32(device, 0x10a1c4, data0); 70 nvkm_wr32(device, 0x10a1c4, data1); 71 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f); 72 73 /* release data segment access */ 74 nvkm_wr32(device, 0x10a580, 0x00000000); 75 76 /* wait for reply, if requested */ 77 if (reply) { 78 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); 79 reply[0] = pmu->recv.data[0]; 80 reply[1] = pmu->recv.data[1]; 81 mutex_unlock(&subdev->mutex); 82 } 83 84 return 0; 85 } 86 87 static void 88 nvkm_pmu_recv(struct work_struct *work) 89 { 90 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work); 91 struct nvkm_device *device = pmu->subdev.device; 92 u32 process, message, data0, data1; 93 94 /* nothing to do if GET == PUT */ 95 u32 addr = nvkm_rd32(device, 0x10a4cc); 96 if (addr == nvkm_rd32(device, 0x10a4c8)) 97 return; 98 99 /* acquire data segment access */ 100 do { 101 nvkm_wr32(device, 0x10a580, 0x00000002); 102 } while (nvkm_rd32(device, 0x10a580) != 0x00000002); 103 104 /* read the packet */ 105 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) + 106 pmu->recv.base)); 107 process = nvkm_rd32(device, 0x10a1c4); 108 message = nvkm_rd32(device, 0x10a1c4); 109 data0 = nvkm_rd32(device, 0x10a1c4); 110 data1 = nvkm_rd32(device, 0x10a1c4); 111 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f); 112 113 /* release data segment access */ 114 nvkm_wr32(device, 0x10a580, 0x00000000); 115 116 /* wake process if it's waiting on a synchronous reply */ 117 if (pmu->recv.process) { 118 if (process == pmu->recv.process && 119 message == pmu->recv.message) { 120 pmu->recv.data[0] = data0; 121 pmu->recv.data[1] = data1; 122 pmu->recv.process = 0; 123 wake_up(&pmu->recv.wait); 124 return; 125 } 126 } 127 128 /* right now there's no other expected responses from the engine, 129 * so assume that any unexpected message is an error. 130 */ 131 nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n", 132 (char)((process & 0x000000ff) >> 0), 133 (char)((process & 0x0000ff00) >> 8), 134 (char)((process & 0x00ff0000) >> 16), 135 (char)((process & 0xff000000) >> 24), 136 process, message, data0, data1); 137 } 138 139 static void 140 nvkm_pmu_intr(struct nvkm_subdev *subdev) 141 { 142 struct nvkm_pmu *pmu = container_of(subdev, typeof(*pmu), subdev); 143 struct nvkm_device *device = pmu->subdev.device; 144 u32 disp = nvkm_rd32(device, 0x10a01c); 145 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16); 146 147 if (intr & 0x00000020) { 148 u32 stat = nvkm_rd32(device, 0x10a16c); 149 if (stat & 0x80000000) { 150 nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n", 151 stat & 0x00ffffff, nvkm_rd32(device, 0x10a168)); 152 nvkm_wr32(device, 0x10a16c, 0x00000000); 153 intr &= ~0x00000020; 154 } 155 } 156 157 if (intr & 0x00000040) { 158 schedule_work(&pmu->recv.work); 159 nvkm_wr32(device, 0x10a004, 0x00000040); 160 intr &= ~0x00000040; 161 } 162 163 if (intr & 0x00000080) { 164 nv_info(pmu, "wr32 0x%06x 0x%08x\n", nvkm_rd32(device, 0x10a7a0), 165 nvkm_rd32(device, 0x10a7a4)); 166 nvkm_wr32(device, 0x10a004, 0x00000080); 167 intr &= ~0x00000080; 168 } 169 170 if (intr) { 171 nv_error(pmu, "intr 0x%08x\n", intr); 172 nvkm_wr32(device, 0x10a004, intr); 173 } 174 } 175 176 int 177 _nvkm_pmu_fini(struct nvkm_object *object, bool suspend) 178 { 179 struct nvkm_pmu *pmu = (void *)object; 180 struct nvkm_device *device = pmu->subdev.device; 181 182 nvkm_wr32(device, 0x10a014, 0x00000060); 183 flush_work(&pmu->recv.work); 184 185 return nvkm_subdev_fini(&pmu->subdev, suspend); 186 } 187 188 int 189 _nvkm_pmu_init(struct nvkm_object *object) 190 { 191 const struct nvkm_pmu_impl *impl = (void *)object->oclass; 192 struct nvkm_pmu *pmu = (void *)object; 193 struct nvkm_device *device = pmu->subdev.device; 194 int ret, i; 195 196 ret = nvkm_subdev_init(&pmu->subdev); 197 if (ret) 198 return ret; 199 200 nv_subdev(pmu)->intr = nvkm_pmu_intr; 201 pmu->message = nvkm_pmu_send; 202 pmu->pgob = nvkm_pmu_pgob; 203 204 /* prevent previous ucode from running, wait for idle, reset */ 205 nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ 206 nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000); 207 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000); 208 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000); 209 nvkm_rd32(device, 0x000200); 210 nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000); 211 212 /* upload data segment */ 213 nvkm_wr32(device, 0x10a1c0, 0x01000000); 214 for (i = 0; i < impl->data.size / 4; i++) 215 nvkm_wr32(device, 0x10a1c4, impl->data.data[i]); 216 217 /* upload code segment */ 218 nvkm_wr32(device, 0x10a180, 0x01000000); 219 for (i = 0; i < impl->code.size / 4; i++) { 220 if ((i & 0x3f) == 0) 221 nvkm_wr32(device, 0x10a188, i >> 6); 222 nvkm_wr32(device, 0x10a184, impl->code.data[i]); 223 } 224 225 /* start it running */ 226 nvkm_wr32(device, 0x10a10c, 0x00000000); 227 nvkm_wr32(device, 0x10a104, 0x00000000); 228 nvkm_wr32(device, 0x10a100, 0x00000002); 229 230 /* wait for valid host->pmu ring configuration */ 231 if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000)) 232 return -EBUSY; 233 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff; 234 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16; 235 236 /* wait for valid pmu->host ring configuration */ 237 if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000)) 238 return -EBUSY; 239 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff; 240 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16; 241 242 nvkm_wr32(device, 0x10a010, 0x000000e0); 243 return 0; 244 } 245 246 int 247 nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine, 248 struct nvkm_oclass *oclass, int length, void **pobject) 249 { 250 struct nvkm_pmu *pmu; 251 int ret; 252 253 ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU", 254 "pmu", length, pobject); 255 pmu = *pobject; 256 if (ret) 257 return ret; 258 259 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); 260 init_waitqueue_head(&pmu->recv.wait); 261 return 0; 262 } 263 264 int 265 _nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 266 struct nvkm_oclass *oclass, void *data, u32 size, 267 struct nvkm_object **pobject) 268 { 269 struct nvkm_pmu *pmu; 270 int ret = nvkm_pmu_create(parent, engine, oclass, &pmu); 271 *pobject = nv_object(pmu); 272 return ret; 273 } 274