1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <engine/fifo.h>
25 
26 #include <core/client.h>
27 #include <core/handle.h>
28 #include <core/notify.h>
29 #include <engine/dmaobj.h>
30 
31 #include <nvif/class.h>
32 #include <nvif/event.h>
33 #include <nvif/unpack.h>
34 
35 static int
36 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
37 		     struct nvkm_notify *notify)
38 {
39 	if (size == 0) {
40 		notify->size  = 0;
41 		notify->types = 1;
42 		notify->index = 0;
43 		return 0;
44 	}
45 	return -ENOSYS;
46 }
47 
48 static const struct nvkm_event_func
49 nvkm_fifo_event_func = {
50 	.ctor = nvkm_fifo_event_ctor,
51 };
52 
53 int
54 nvkm_fifo_channel_create_(struct nvkm_object *parent,
55 			  struct nvkm_object *engine,
56 			  struct nvkm_oclass *oclass,
57 			  int bar, u32 addr, u32 size, u64 pushbuf,
58 			  u64 engmask, int len, void **ptr)
59 {
60 	struct nvkm_client *client = nvkm_client(parent);
61 	struct nvkm_handle *handle;
62 	struct nvkm_dmaobj *dmaobj;
63 	struct nvkm_fifo *fifo = (void *)engine;
64 	struct nvkm_fifo_chan *chan;
65 	struct nvkm_dmaeng *dmaeng;
66 	struct nvkm_subdev *subdev = &fifo->engine.subdev;
67 	struct nvkm_device *device = subdev->device;
68 	unsigned long flags;
69 	int ret;
70 
71 	/* create base object class */
72 	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
73 				  engmask, len, ptr);
74 	chan = *ptr;
75 	if (ret)
76 		return ret;
77 
78 	/* validate dma object representing push buffer */
79 	if (pushbuf) {
80 		handle = nvkm_client_search(client, pushbuf);
81 		if (!handle)
82 			return -ENOENT;
83 		dmaobj = (void *)handle->object;
84 
85 		dmaeng = (void *)dmaobj->base.engine;
86 		switch (dmaobj->base.oclass->handle) {
87 		case NV_DMA_FROM_MEMORY:
88 		case NV_DMA_IN_MEMORY:
89 			break;
90 		default:
91 			return -EINVAL;
92 		}
93 
94 		ret = dmaeng->bind(dmaobj, parent, &chan->pushgpu);
95 		if (ret)
96 			return ret;
97 	}
98 
99 	/* find a free fifo channel */
100 	spin_lock_irqsave(&fifo->lock, flags);
101 	for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) {
102 		if (!fifo->channel[chan->chid]) {
103 			fifo->channel[chan->chid] = nv_object(chan);
104 			break;
105 		}
106 	}
107 	spin_unlock_irqrestore(&fifo->lock, flags);
108 
109 	if (chan->chid == fifo->max) {
110 		nvkm_error(subdev, "no free channels\n");
111 		return -ENOSPC;
112 	}
113 
114 	chan->addr = nv_device_resource_start(device, bar) +
115 		     addr + size * chan->chid;
116 	chan->size = size;
117 	nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
118 	return 0;
119 }
120 
121 void
122 nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
123 {
124 	struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine;
125 	unsigned long flags;
126 
127 	if (chan->user)
128 		iounmap(chan->user);
129 
130 	spin_lock_irqsave(&fifo->lock, flags);
131 	fifo->channel[chan->chid] = NULL;
132 	spin_unlock_irqrestore(&fifo->lock, flags);
133 
134 	nvkm_gpuobj_ref(NULL, &chan->pushgpu);
135 	nvkm_namedb_destroy(&chan->namedb);
136 }
137 
138 void
139 _nvkm_fifo_channel_dtor(struct nvkm_object *object)
140 {
141 	struct nvkm_fifo_chan *chan = (void *)object;
142 	nvkm_fifo_channel_destroy(chan);
143 }
144 
145 int
146 _nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
147 {
148 	struct nvkm_fifo_chan *chan = (void *)object;
149 	*addr = chan->addr;
150 	*size = chan->size;
151 	return 0;
152 }
153 
154 u32
155 _nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
156 {
157 	struct nvkm_fifo_chan *chan = (void *)object;
158 	if (unlikely(!chan->user)) {
159 		chan->user = ioremap(chan->addr, chan->size);
160 		if (WARN_ON_ONCE(chan->user == NULL))
161 			return 0;
162 	}
163 	return ioread32_native(chan->user + addr);
164 }
165 
166 void
167 _nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
168 {
169 	struct nvkm_fifo_chan *chan = (void *)object;
170 	if (unlikely(!chan->user)) {
171 		chan->user = ioremap(chan->addr, chan->size);
172 		if (WARN_ON_ONCE(chan->user == NULL))
173 			return;
174 	}
175 	iowrite32_native(data, chan->user + addr);
176 }
177 
178 int
179 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
180 		      struct nvkm_notify *notify)
181 {
182 	union {
183 		struct nvif_notify_uevent_req none;
184 	} *req = data;
185 	int ret;
186 
187 	if (nvif_unvers(req->none)) {
188 		notify->size  = sizeof(struct nvif_notify_uevent_rep);
189 		notify->types = 1;
190 		notify->index = 0;
191 	}
192 
193 	return ret;
194 }
195 
196 void
197 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
198 {
199 	struct nvif_notify_uevent_rep rep = {
200 	};
201 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
202 }
203 
204 int
205 _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
206 			struct nvkm_event **event)
207 {
208 	struct nvkm_fifo *fifo = (void *)object->engine;
209 	switch (type) {
210 	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
211 		if (nv_mclass(object) >= G82_CHANNEL_DMA) {
212 			*event = &fifo->uevent;
213 			return 0;
214 		}
215 		break;
216 	default:
217 		break;
218 	}
219 	return -EINVAL;
220 }
221 
222 static int
223 nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object)
224 {
225 	int engidx = nv_hclass(fifo) & 0xff;
226 
227 	while (object && object->parent) {
228 		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
229 		    (nv_hclass(object->parent) & 0xff) == engidx)
230 			return nvkm_fifo_chan(object)->chid;
231 		object = object->parent;
232 	}
233 
234 	return -1;
235 }
236 
237 const char *
238 nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
239 {
240 	struct nvkm_fifo_chan *chan = NULL;
241 	unsigned long flags;
242 
243 	spin_lock_irqsave(&fifo->lock, flags);
244 	if (chid >= fifo->min && chid <= fifo->max)
245 		chan = (void *)fifo->channel[chid];
246 	spin_unlock_irqrestore(&fifo->lock, flags);
247 
248 	return nvkm_client_name(chan);
249 }
250 
251 void
252 nvkm_fifo_destroy(struct nvkm_fifo *fifo)
253 {
254 	kfree(fifo->channel);
255 	nvkm_event_fini(&fifo->uevent);
256 	nvkm_event_fini(&fifo->cevent);
257 	nvkm_engine_destroy(&fifo->engine);
258 }
259 
260 int
261 nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
262 		  struct nvkm_oclass *oclass,
263 		  int min, int max, int length, void **pobject)
264 {
265 	struct nvkm_fifo *fifo;
266 	int ret;
267 
268 	ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
269 				  "fifo", length, pobject);
270 	fifo = *pobject;
271 	if (ret)
272 		return ret;
273 
274 	fifo->min = min;
275 	fifo->max = max;
276 	fifo->channel = kzalloc(sizeof(*fifo->channel) * (max + 1), GFP_KERNEL);
277 	if (!fifo->channel)
278 		return -ENOMEM;
279 
280 	ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
281 	if (ret)
282 		return ret;
283 
284 	fifo->chid = nvkm_fifo_chid;
285 	spin_lock_init(&fifo->lock);
286 	return 0;
287 }
288