xref: /openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c (revision f48dd2936138882d7755cbbc5d9984015c75980c)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "runl.h"
23 #include "cgrp.h"
24 #include "chan.h"
25 #include "chid.h"
26 #include "priv.h"
27 
28 #include <core/gpuobj.h>
29 #include <subdev/top.h>
30 
31 struct nvkm_chan *
32 nvkm_runl_chan_get_inst(struct nvkm_runl *runl, u64 inst, unsigned long *pirqflags)
33 {
34 	struct nvkm_chid *chid = runl->chid;
35 	struct nvkm_chan *chan;
36 	unsigned long flags;
37 	int id;
38 
39 	spin_lock_irqsave(&chid->lock, flags);
40 	for_each_set_bit(id, chid->used, chid->nr) {
41 		chan = chid->data[id];
42 		if (likely(chan)) {
43 			if (chan->inst->addr == inst) {
44 				spin_lock(&chan->cgrp->lock);
45 				*pirqflags = flags;
46 				spin_unlock(&chid->lock);
47 				return chan;
48 			}
49 		}
50 	}
51 	spin_unlock_irqrestore(&chid->lock, flags);
52 	return NULL;
53 }
54 
55 struct nvkm_chan *
56 nvkm_runl_chan_get_chid(struct nvkm_runl *runl, int id, unsigned long *pirqflags)
57 {
58 	struct nvkm_chid *chid = runl->chid;
59 	struct nvkm_chan *chan;
60 	unsigned long flags;
61 
62 	spin_lock_irqsave(&chid->lock, flags);
63 	if (!WARN_ON(id >= chid->nr)) {
64 		chan = chid->data[id];
65 		if (likely(chan)) {
66 			spin_lock(&chan->cgrp->lock);
67 			*pirqflags = flags;
68 			spin_unlock(&chid->lock);
69 			return chan;
70 		}
71 	}
72 	spin_unlock_irqrestore(&chid->lock, flags);
73 	return NULL;
74 }
75 
76 void
77 nvkm_runl_del(struct nvkm_runl *runl)
78 {
79 	struct nvkm_engn *engn, *engt;
80 
81 	list_for_each_entry_safe(engn, engt, &runl->engns, head) {
82 		list_del(&engn->head);
83 		kfree(engn);
84 	}
85 
86 	nvkm_chid_unref(&runl->chid);
87 	nvkm_chid_unref(&runl->cgid);
88 
89 	list_del(&runl->head);
90 	mutex_destroy(&runl->mutex);
91 	kfree(runl);
92 }
93 
94 struct nvkm_engn *
95 nvkm_runl_add(struct nvkm_runl *runl, int engi, const struct nvkm_engn_func *func,
96 	      enum nvkm_subdev_type type, int inst)
97 {
98 	struct nvkm_fifo *fifo = runl->fifo;
99 	struct nvkm_device *device = fifo->engine.subdev.device;
100 	struct nvkm_engine *engine;
101 	struct nvkm_engn *engn;
102 
103 	engine = nvkm_device_engine(device, type, inst);
104 	if (!engine) {
105 		RUNL_DEBUG(runl, "engn %d.%d[%s] not found", engi, inst, nvkm_subdev_type[type]);
106 		return NULL;
107 	}
108 
109 	if (!(engn = kzalloc(sizeof(*engn), GFP_KERNEL)))
110 		return NULL;
111 
112 	engn->func = func;
113 	engn->runl = runl;
114 	engn->id = engi;
115 	engn->engine = engine;
116 	engn->fault = -1;
117 	list_add_tail(&engn->head, &runl->engns);
118 
119 	/* Lookup MMU engine ID for fault handling. */
120 	if (device->top)
121 		engn->fault = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
122 
123 	if (engn->fault < 0 && fifo->func->mmu_fault) {
124 		const struct nvkm_enum *map = fifo->func->mmu_fault->engine;
125 
126 		while (map->name) {
127 			if (map->data2 == engine->subdev.type && map->inst == engine->subdev.inst) {
128 				engn->fault = map->value;
129 				break;
130 			}
131 			map++;
132 		}
133 	}
134 
135 	return engn;
136 }
137 
138 struct nvkm_runl *
139 nvkm_runl_get(struct nvkm_fifo *fifo, int runi, u32 addr)
140 {
141 	struct nvkm_runl *runl;
142 
143 	nvkm_runl_foreach(runl, fifo) {
144 		if ((runi >= 0 && runl->id == runi) || (runi < 0 && runl->addr == addr))
145 			return runl;
146 	}
147 
148 	return NULL;
149 }
150 
151 struct nvkm_runl *
152 nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
153 {
154 	struct nvkm_subdev *subdev = &fifo->engine.subdev;
155 	struct nvkm_runl *runl;
156 	int ret;
157 
158 	if (!(runl = kzalloc(sizeof(*runl), GFP_KERNEL)))
159 		return NULL;
160 
161 	runl->func = fifo->func->runl;
162 	runl->fifo = fifo;
163 	runl->id = runi;
164 	runl->addr = addr;
165 	INIT_LIST_HEAD(&runl->engns);
166 	INIT_LIST_HEAD(&runl->cgrps);
167 	mutex_init(&runl->mutex);
168 	list_add_tail(&runl->head, &fifo->runls);
169 
170 	if (!fifo->chid) {
171 		if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->cgid)) ||
172 		    (ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->chid))) {
173 			RUNL_ERROR(runl, "cgid/chid: %d", ret);
174 			nvkm_runl_del(runl);
175 			return NULL;
176 		}
177 	} else {
178 		runl->cgid = nvkm_chid_ref(fifo->cgid);
179 		runl->chid = nvkm_chid_ref(fifo->chid);
180 	}
181 
182 	return runl;
183 }
184