1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "qmgr.h"
24 
25 static void
26 msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
27 {
28 	struct nvkm_falcon *falcon = priv->falcon;
29 	mutex_lock(&queue->mutex);
30 	queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
31 }
32 
33 static void
34 msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
35 		bool commit)
36 {
37 	struct nvkm_falcon *falcon = priv->falcon;
38 
39 	if (commit)
40 		nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
41 
42 	mutex_unlock(&queue->mutex);
43 }
44 
45 static bool
46 msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
47 {
48 	struct nvkm_falcon *falcon = priv->falcon;
49 	u32 head = nvkm_falcon_rd32(falcon, queue->head_reg);
50 	u32 tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
51 	return head == tail;
52 }
53 
54 static int
55 msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
56 	      void *data, u32 size)
57 {
58 	struct nvkm_falcon *falcon = priv->falcon;
59 	const struct nvkm_subdev *subdev = priv->falcon->owner;
60 	u32 head, tail, available;
61 
62 	head = nvkm_falcon_rd32(falcon, queue->head_reg);
63 	/* has the buffer looped? */
64 	if (head < queue->position)
65 		queue->position = queue->offset;
66 
67 	tail = queue->position;
68 
69 	available = head - tail;
70 
71 	if (available == 0) {
72 		nvkm_warn(subdev, "no message data available\n");
73 		return 0;
74 	}
75 
76 	if (size > available) {
77 		nvkm_warn(subdev, "message data smaller than read request\n");
78 		size = available;
79 	}
80 
81 	nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
82 	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
83 	return size;
84 }
85 
86 static int
87 msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
88 	       struct nv_falcon_msg *hdr)
89 {
90 	const struct nvkm_subdev *subdev = priv->falcon->owner;
91 	int ret;
92 
93 	msg_queue_open(priv, queue);
94 
95 	if (msg_queue_empty(priv, queue)) {
96 		ret = 0;
97 		goto close;
98 	}
99 
100 	ret = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
101 	if (ret >= 0 && ret != HDR_SIZE)
102 		ret = -EINVAL;
103 	if (ret < 0) {
104 		nvkm_error(subdev, "failed to read message header: %d\n", ret);
105 		goto close;
106 	}
107 
108 	if (hdr->size > MSG_BUF_SIZE) {
109 		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
110 		ret = -ENOSPC;
111 		goto close;
112 	}
113 
114 	if (hdr->size > HDR_SIZE) {
115 		u32 read_size = hdr->size - HDR_SIZE;
116 
117 		ret = msg_queue_pop(priv, queue, (hdr + 1), read_size);
118 		if (ret >= 0 && ret != read_size)
119 			ret = -EINVAL;
120 		if (ret < 0) {
121 			nvkm_error(subdev, "failed to read message: %d\n", ret);
122 			goto close;
123 		}
124 	}
125 
126 close:
127 	msg_queue_close(priv, queue, (ret >= 0));
128 	return ret;
129 }
130 
131 static int
132 msgqueue_msg_handle(struct nvkm_msgqueue *priv,
133 		    struct nvkm_falcon_msgq *msgq,
134 		    struct nv_falcon_msg *hdr)
135 {
136 	const struct nvkm_subdev *subdev = priv->falcon->owner;
137 	struct nvkm_falcon_qmgr_seq *seq;
138 
139 	seq = &msgq->qmgr->seq.id[hdr->seq_id];
140 	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
141 		nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
142 		return -EINVAL;
143 	}
144 
145 	if (seq->state == SEQ_STATE_USED) {
146 		if (seq->callback)
147 			seq->result = seq->callback(seq->priv, hdr);
148 	}
149 
150 	if (seq->async) {
151 		nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
152 		return 0;
153 	}
154 
155 	complete_all(&seq->done);
156 	return 0;
157 }
158 
159 static int
160 msgqueue_handle_init_msg(struct nvkm_msgqueue *priv)
161 {
162 	struct nvkm_falcon *falcon = priv->falcon;
163 	const struct nvkm_subdev *subdev = falcon->owner;
164 	const u32 tail_reg = falcon->func->msgq.tail;
165 	u8 msg_buffer[MSG_BUF_SIZE];
166 	struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
167 	u32 tail;
168 	int ret;
169 
170 	/*
171 	 * Read the message - queues are not initialized yet so we cannot rely
172 	 * on msg_queue_read()
173 	 */
174 	tail = nvkm_falcon_rd32(falcon, tail_reg);
175 	nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
176 
177 	if (hdr->size > MSG_BUF_SIZE) {
178 		nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
179 		return -ENOSPC;
180 	}
181 
182 	nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
183 			      (hdr + 1));
184 
185 	tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
186 	nvkm_falcon_wr32(falcon, tail_reg, tail);
187 
188 	ret = priv->func->init_func->init_callback(priv, hdr);
189 	if (ret)
190 		return ret;
191 
192 	return 0;
193 }
194 
195 void
196 nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
197 			   struct nvkm_msgqueue_queue *queue)
198 {
199 	/*
200 	 * We are invoked from a worker thread, so normally we have plenty of
201 	 * stack space to work with.
202 	 */
203 	u8 msg_buffer[MSG_BUF_SIZE];
204 	struct nv_falcon_msg *hdr = (void *)msg_buffer;
205 	int ret;
206 
207 	/* the first message we receive must be the init message */
208 	if ((!priv->init_msg_received)) {
209 		ret = msgqueue_handle_init_msg(priv);
210 		if (!ret)
211 			priv->init_msg_received = true;
212 	} else {
213 		while (msg_queue_read(priv, queue, hdr) > 0)
214 			msgqueue_msg_handle(priv, queue, hdr);
215 	}
216 }
217 
218 void
219 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
220 		      u32 index, u32 offset, u32 size)
221 {
222 	const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
223 
224 	msgq->head_reg = func->msgq.head + index * func->msgq.stride;
225 	msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
226 	msgq->offset = offset;
227 
228 	FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
229 		  index, msgq->offset, size);
230 }
231 
232 void
233 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
234 {
235 	struct nvkm_falcon_msgq *msgq = *pmsgq;
236 	if (msgq) {
237 		kfree(*pmsgq);
238 		*pmsgq = NULL;
239 	}
240 }
241 
242 int
243 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
244 		     struct nvkm_falcon_msgq **pmsgq)
245 {
246 	struct nvkm_falcon_msgq *msgq = *pmsgq;
247 
248 	if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
249 		return -ENOMEM;
250 
251 	msgq->qmgr = qmgr;
252 	msgq->name = name;
253 	mutex_init(&msgq->mutex);
254 	return 0;
255 }
256