1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include <engine/fifo.h> 25 26 #include <core/client.h> 27 #include <core/handle.h> 28 #include <core/notify.h> 29 #include <engine/dmaobj.h> 30 31 #include <nvif/class.h> 32 #include <nvif/event.h> 33 #include <nvif/unpack.h> 34 35 void 36 nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, 37 struct nvkm_fifo_chan **pchan) 38 { 39 struct nvkm_fifo_chan *chan = *pchan; 40 if (likely(chan)) { 41 *pchan = NULL; 42 spin_unlock_irqrestore(&fifo->lock, flags); 43 } 44 } 45 46 struct nvkm_fifo_chan * 47 nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) 48 { 49 unsigned long flags; 50 int i; 51 spin_lock_irqsave(&fifo->lock, flags); 52 for (i = fifo->min; i < fifo->max; i++) { 53 struct nvkm_fifo_chan *chan = (void *)fifo->channel[i]; 54 if (chan && chan->inst == inst) { 55 *rflags = flags; 56 return chan; 57 } 58 } 59 spin_unlock_irqrestore(&fifo->lock, flags); 60 return NULL; 61 } 62 63 struct nvkm_fifo_chan * 64 nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) 65 { 66 unsigned long flags; 67 spin_lock_irqsave(&fifo->lock, flags); 68 if (fifo->channel[chid]) { 69 *rflags = flags; 70 return (void *)fifo->channel[chid]; 71 } 72 spin_unlock_irqrestore(&fifo->lock, flags); 73 return NULL; 74 } 75 76 static int 77 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, 78 struct nvkm_notify *notify) 79 { 80 if (size == 0) { 81 notify->size = 0; 82 notify->types = 1; 83 notify->index = 0; 84 return 0; 85 } 86 return -ENOSYS; 87 } 88 89 static const struct nvkm_event_func 90 nvkm_fifo_event_func = { 91 .ctor = nvkm_fifo_event_ctor, 92 }; 93 94 int 95 nvkm_fifo_channel_create_(struct nvkm_object *parent, 96 struct nvkm_object *engine, 97 struct nvkm_oclass *oclass, 98 int bar, u32 addr, u32 size, u64 pushbuf, 99 u64 engmask, int len, void **ptr) 100 { 101 struct nvkm_client *client = nvkm_client(parent); 102 struct nvkm_handle *handle; 103 struct nvkm_dmaobj *dmaobj; 104 struct nvkm_fifo *fifo = (void *)engine; 105 struct nvkm_fifo_base *base = (void *)parent; 106 struct nvkm_fifo_chan *chan; 107 struct nvkm_dmaeng *dmaeng; 108 struct nvkm_subdev *subdev = &fifo->engine.subdev; 109 struct nvkm_device *device = subdev->device; 110 unsigned long flags; 111 int ret; 112 113 /* create base object class */ 114 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 115 engmask, len, ptr); 116 chan = *ptr; 117 if (ret) 118 return ret; 119 120 /* validate dma object representing push buffer */ 121 if (pushbuf) { 122 handle = nvkm_client_search(client, pushbuf); 123 if (!handle) 124 return -ENOENT; 125 dmaobj = (void *)handle->object; 126 127 dmaeng = (void *)dmaobj->base.engine; 128 switch (dmaobj->base.oclass->handle) { 129 case NV_DMA_FROM_MEMORY: 130 case NV_DMA_IN_MEMORY: 131 break; 132 default: 133 return -EINVAL; 134 } 135 136 ret = dmaeng->bind(dmaobj, &base->gpuobj, &chan->pushgpu); 137 if (ret) 138 return ret; 139 } 140 141 /* find a free fifo channel */ 142 spin_lock_irqsave(&fifo->lock, flags); 143 for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) { 144 if (!fifo->channel[chan->chid]) { 145 fifo->channel[chan->chid] = nv_object(chan); 146 break; 147 } 148 } 149 spin_unlock_irqrestore(&fifo->lock, flags); 150 151 if (chan->chid == fifo->max) { 152 nvkm_error(subdev, "no free channels\n"); 153 return -ENOSPC; 154 } 155 156 chan->addr = nv_device_resource_start(device, bar) + 157 addr + size * chan->chid; 158 chan->size = size; 159 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); 160 return 0; 161 } 162 163 void 164 nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) 165 { 166 struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine; 167 unsigned long flags; 168 169 if (chan->user) 170 iounmap(chan->user); 171 172 spin_lock_irqsave(&fifo->lock, flags); 173 fifo->channel[chan->chid] = NULL; 174 spin_unlock_irqrestore(&fifo->lock, flags); 175 176 nvkm_gpuobj_del(&chan->pushgpu); 177 nvkm_namedb_destroy(&chan->namedb); 178 } 179 180 void 181 _nvkm_fifo_channel_dtor(struct nvkm_object *object) 182 { 183 struct nvkm_fifo_chan *chan = (void *)object; 184 nvkm_fifo_channel_destroy(chan); 185 } 186 187 int 188 _nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size) 189 { 190 struct nvkm_fifo_chan *chan = (void *)object; 191 *addr = chan->addr; 192 *size = chan->size; 193 return 0; 194 } 195 196 u32 197 _nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr) 198 { 199 struct nvkm_fifo_chan *chan = (void *)object; 200 if (unlikely(!chan->user)) { 201 chan->user = ioremap(chan->addr, chan->size); 202 if (WARN_ON_ONCE(chan->user == NULL)) 203 return 0; 204 } 205 return ioread32_native(chan->user + addr); 206 } 207 208 void 209 _nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data) 210 { 211 struct nvkm_fifo_chan *chan = (void *)object; 212 if (unlikely(!chan->user)) { 213 chan->user = ioremap(chan->addr, chan->size); 214 if (WARN_ON_ONCE(chan->user == NULL)) 215 return; 216 } 217 iowrite32_native(data, chan->user + addr); 218 } 219 220 int 221 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 222 struct nvkm_notify *notify) 223 { 224 union { 225 struct nvif_notify_uevent_req none; 226 } *req = data; 227 int ret; 228 229 if (nvif_unvers(req->none)) { 230 notify->size = sizeof(struct nvif_notify_uevent_rep); 231 notify->types = 1; 232 notify->index = 0; 233 } 234 235 return ret; 236 } 237 238 void 239 nvkm_fifo_uevent(struct nvkm_fifo *fifo) 240 { 241 struct nvif_notify_uevent_rep rep = { 242 }; 243 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); 244 } 245 246 int 247 _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type, 248 struct nvkm_event **event) 249 { 250 struct nvkm_fifo *fifo = (void *)object->engine; 251 switch (type) { 252 case G82_CHANNEL_DMA_V0_NTFY_UEVENT: 253 if (nv_mclass(object) >= G82_CHANNEL_DMA) { 254 *event = &fifo->uevent; 255 return 0; 256 } 257 break; 258 default: 259 break; 260 } 261 return -EINVAL; 262 } 263 264 static int 265 nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object) 266 { 267 int engidx = nv_hclass(fifo) & 0xff; 268 269 while (object && object->parent) { 270 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && 271 (nv_hclass(object->parent) & 0xff) == engidx) 272 return nvkm_fifo_chan(object)->chid; 273 object = object->parent; 274 } 275 276 return -1; 277 } 278 279 const char * 280 nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid) 281 { 282 struct nvkm_fifo_chan *chan = NULL; 283 unsigned long flags; 284 285 spin_lock_irqsave(&fifo->lock, flags); 286 if (chid >= fifo->min && chid <= fifo->max) 287 chan = (void *)fifo->channel[chid]; 288 spin_unlock_irqrestore(&fifo->lock, flags); 289 290 return nvkm_client_name(chan); 291 } 292 293 void 294 nvkm_fifo_destroy(struct nvkm_fifo *fifo) 295 { 296 kfree(fifo->channel); 297 nvkm_event_fini(&fifo->uevent); 298 nvkm_event_fini(&fifo->cevent); 299 nvkm_engine_destroy(&fifo->engine); 300 } 301 302 int 303 nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, 304 struct nvkm_oclass *oclass, 305 int min, int max, int length, void **pobject) 306 { 307 struct nvkm_fifo *fifo; 308 int ret; 309 310 ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", 311 "fifo", length, pobject); 312 fifo = *pobject; 313 if (ret) 314 return ret; 315 316 fifo->min = min; 317 fifo->max = max; 318 fifo->channel = kzalloc(sizeof(*fifo->channel) * (max + 1), GFP_KERNEL); 319 if (!fifo->channel) 320 return -ENOMEM; 321 322 ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); 323 if (ret) 324 return ret; 325 326 fifo->chid = nvkm_fifo_chid; 327 spin_lock_init(&fifo->lock); 328 return 0; 329 } 330