1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include <engine/fifo.h> 25 26 #include <core/client.h> 27 #include <core/handle.h> 28 #include <core/notify.h> 29 #include <engine/dmaobj.h> 30 31 #include <nvif/class.h> 32 #include <nvif/event.h> 33 #include <nvif/unpack.h> 34 35 static int 36 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size, 37 struct nvkm_notify *notify) 38 { 39 if (size == 0) { 40 notify->size = 0; 41 notify->types = 1; 42 notify->index = 0; 43 return 0; 44 } 45 return -ENOSYS; 46 } 47 48 static const struct nvkm_event_func 49 nvkm_fifo_event_func = { 50 .ctor = nvkm_fifo_event_ctor, 51 }; 52 53 int 54 nvkm_fifo_channel_create_(struct nvkm_object *parent, 55 struct nvkm_object *engine, 56 struct nvkm_oclass *oclass, 57 int bar, u32 addr, u32 size, u32 pushbuf, 58 u64 engmask, int len, void **ptr) 59 { 60 struct nvkm_device *device = nv_device(engine); 61 struct nvkm_fifo *priv = (void *)engine; 62 struct nvkm_fifo_chan *chan; 63 struct nvkm_dmaeng *dmaeng; 64 unsigned long flags; 65 int ret; 66 67 /* create base object class */ 68 ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL, 69 engmask, len, ptr); 70 chan = *ptr; 71 if (ret) 72 return ret; 73 74 /* validate dma object representing push buffer */ 75 chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf); 76 if (!chan->pushdma) 77 return -ENOENT; 78 79 dmaeng = (void *)chan->pushdma->base.engine; 80 switch (chan->pushdma->base.oclass->handle) { 81 case NV_DMA_FROM_MEMORY: 82 case NV_DMA_IN_MEMORY: 83 break; 84 default: 85 return -EINVAL; 86 } 87 88 ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu); 89 if (ret) 90 return ret; 91 92 /* find a free fifo channel */ 93 spin_lock_irqsave(&priv->lock, flags); 94 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { 95 if (!priv->channel[chan->chid]) { 96 priv->channel[chan->chid] = nv_object(chan); 97 break; 98 } 99 } 100 spin_unlock_irqrestore(&priv->lock, flags); 101 102 if (chan->chid == priv->max) { 103 nv_error(priv, "no free channels\n"); 104 return -ENOSPC; 105 } 106 107 chan->addr = nv_device_resource_start(device, bar) + 108 addr + size * chan->chid; 109 chan->size = size; 110 nvkm_event_send(&priv->cevent, 1, 0, NULL, 0); 111 return 0; 112 } 113 114 void 115 nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) 116 { 117 struct nvkm_fifo *priv = (void *)nv_object(chan)->engine; 118 unsigned long flags; 119 120 if (chan->user) 121 iounmap(chan->user); 122 123 spin_lock_irqsave(&priv->lock, flags); 124 priv->channel[chan->chid] = NULL; 125 spin_unlock_irqrestore(&priv->lock, flags); 126 127 nvkm_gpuobj_ref(NULL, &chan->pushgpu); 128 nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma); 129 nvkm_namedb_destroy(&chan->namedb); 130 } 131 132 void 133 _nvkm_fifo_channel_dtor(struct nvkm_object *object) 134 { 135 struct nvkm_fifo_chan *chan = (void *)object; 136 nvkm_fifo_channel_destroy(chan); 137 } 138 139 int 140 _nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size) 141 { 142 struct nvkm_fifo_chan *chan = (void *)object; 143 *addr = chan->addr; 144 *size = chan->size; 145 return 0; 146 } 147 148 u32 149 _nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr) 150 { 151 struct nvkm_fifo_chan *chan = (void *)object; 152 if (unlikely(!chan->user)) { 153 chan->user = ioremap(chan->addr, chan->size); 154 if (WARN_ON_ONCE(chan->user == NULL)) 155 return 0; 156 } 157 return ioread32_native(chan->user + addr); 158 } 159 160 void 161 _nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data) 162 { 163 struct nvkm_fifo_chan *chan = (void *)object; 164 if (unlikely(!chan->user)) { 165 chan->user = ioremap(chan->addr, chan->size); 166 if (WARN_ON_ONCE(chan->user == NULL)) 167 return; 168 } 169 iowrite32_native(data, chan->user + addr); 170 } 171 172 int 173 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 174 struct nvkm_notify *notify) 175 { 176 union { 177 struct nvif_notify_uevent_req none; 178 } *req = data; 179 int ret; 180 181 if (nvif_unvers(req->none)) { 182 notify->size = sizeof(struct nvif_notify_uevent_rep); 183 notify->types = 1; 184 notify->index = 0; 185 } 186 187 return ret; 188 } 189 190 void 191 nvkm_fifo_uevent(struct nvkm_fifo *fifo) 192 { 193 struct nvif_notify_uevent_rep rep = { 194 }; 195 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); 196 } 197 198 int 199 _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type, 200 struct nvkm_event **event) 201 { 202 struct nvkm_fifo *fifo = (void *)object->engine; 203 switch (type) { 204 case G82_CHANNEL_DMA_V0_NTFY_UEVENT: 205 if (nv_mclass(object) >= G82_CHANNEL_DMA) { 206 *event = &fifo->uevent; 207 return 0; 208 } 209 break; 210 default: 211 break; 212 } 213 return -EINVAL; 214 } 215 216 static int 217 nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object) 218 { 219 int engidx = nv_hclass(priv) & 0xff; 220 221 while (object && object->parent) { 222 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && 223 (nv_hclass(object->parent) & 0xff) == engidx) 224 return nvkm_fifo_chan(object)->chid; 225 object = object->parent; 226 } 227 228 return -1; 229 } 230 231 const char * 232 nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid) 233 { 234 struct nvkm_fifo_chan *chan = NULL; 235 unsigned long flags; 236 237 spin_lock_irqsave(&fifo->lock, flags); 238 if (chid >= fifo->min && chid <= fifo->max) 239 chan = (void *)fifo->channel[chid]; 240 spin_unlock_irqrestore(&fifo->lock, flags); 241 242 return nvkm_client_name(chan); 243 } 244 245 void 246 nvkm_fifo_destroy(struct nvkm_fifo *priv) 247 { 248 kfree(priv->channel); 249 nvkm_event_fini(&priv->uevent); 250 nvkm_event_fini(&priv->cevent); 251 nvkm_engine_destroy(&priv->base); 252 } 253 254 int 255 nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, 256 struct nvkm_oclass *oclass, 257 int min, int max, int length, void **pobject) 258 { 259 struct nvkm_fifo *priv; 260 int ret; 261 262 ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", 263 "fifo", length, pobject); 264 priv = *pobject; 265 if (ret) 266 return ret; 267 268 priv->min = min; 269 priv->max = max; 270 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL); 271 if (!priv->channel) 272 return -ENOMEM; 273 274 ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent); 275 if (ret) 276 return ret; 277 278 priv->chid = nvkm_fifo_chid; 279 spin_lock_init(&priv->lock); 280 return 0; 281 } 282