1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "qmgr.h"
24 
25 static void
nvkm_falcon_msgq_open(struct nvkm_falcon_msgq * msgq)26 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
27 {
28 	spin_lock(&msgq->lock);
29 	msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
30 }
31 
32 static void
nvkm_falcon_msgq_close(struct nvkm_falcon_msgq * msgq,bool commit)33 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
34 {
35 	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
36 
37 	if (commit)
38 		nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
39 
40 	spin_unlock(&msgq->lock);
41 }
42 
43 bool
nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq * msgq)44 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
45 {
46 	u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
47 	u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
48 	return head == tail;
49 }
50 
51 static int
nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq * msgq,void * data,u32 size)52 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
53 {
54 	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
55 	u32 head, tail, available;
56 
57 	head = nvkm_falcon_rd32(falcon, msgq->head_reg);
58 	/* has the buffer looped? */
59 	if (head < msgq->position)
60 		msgq->position = msgq->offset;
61 
62 	tail = msgq->position;
63 
64 	available = head - tail;
65 	if (size > available) {
66 		FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
67 			  size, available);
68 		return -EINVAL;
69 	}
70 
71 	nvkm_falcon_pio_rd(falcon, 0, DMEM, tail, data, 0, size);
72 	msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
73 	return 0;
74 }
75 
76 static int
nvkm_falcon_msgq_read(struct nvkm_falcon_msgq * msgq,struct nvfw_falcon_msg * hdr)77 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
78 {
79 	int ret = 0;
80 
81 	nvkm_falcon_msgq_open(msgq);
82 
83 	if (nvkm_falcon_msgq_empty(msgq))
84 		goto close;
85 
86 	ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
87 	if (ret) {
88 		FLCNQ_ERR(msgq, "failed to read message header");
89 		goto close;
90 	}
91 
92 	if (hdr->size > MSG_BUF_SIZE) {
93 		FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
94 		ret = -ENOSPC;
95 		goto close;
96 	}
97 
98 	if (hdr->size > HDR_SIZE) {
99 		u32 read_size = hdr->size - HDR_SIZE;
100 
101 		ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
102 		if (ret) {
103 			FLCNQ_ERR(msgq, "failed to read message data");
104 			goto close;
105 		}
106 	}
107 
108 	ret = 1;
109 close:
110 	nvkm_falcon_msgq_close(msgq, (ret >= 0));
111 	return ret;
112 }
113 
114 static int
nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq * msgq,struct nvfw_falcon_msg * hdr)115 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
116 {
117 	struct nvkm_falcon_qmgr_seq *seq;
118 
119 	seq = &msgq->qmgr->seq.id[hdr->seq_id];
120 	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
121 		FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
122 		return -EINVAL;
123 	}
124 
125 	if (seq->state == SEQ_STATE_USED) {
126 		if (seq->callback)
127 			seq->result = seq->callback(seq->priv, hdr);
128 	}
129 
130 	if (seq->async) {
131 		nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
132 		return 0;
133 	}
134 
135 	complete_all(&seq->done);
136 	return 0;
137 }
138 
139 void
nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq * msgq)140 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
141 {
142 	/*
143 	 * We are invoked from a worker thread, so normally we have plenty of
144 	 * stack space to work with.
145 	 */
146 	u8 msg_buffer[MSG_BUF_SIZE];
147 	struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
148 
149 	while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
150 		nvkm_falcon_msgq_exec(msgq, hdr);
151 }
152 
153 int
nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq * msgq,void * data,u32 size)154 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
155 			      void *data, u32 size)
156 {
157 	struct nvkm_falcon *falcon = msgq->qmgr->falcon;
158 	struct nvfw_falcon_msg *hdr = data;
159 	int ret;
160 
161 	msgq->head_reg = falcon->func->msgq.head;
162 	msgq->tail_reg = falcon->func->msgq.tail;
163 	msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
164 
165 	nvkm_falcon_msgq_open(msgq);
166 	ret = nvkm_falcon_msgq_pop(msgq, data, size);
167 	if (ret == 0 && hdr->size != size) {
168 		FLCN_ERR(falcon, "unexpected init message size %d vs %d",
169 			 hdr->size, size);
170 		ret = -EINVAL;
171 	}
172 	nvkm_falcon_msgq_close(msgq, ret == 0);
173 	return ret;
174 }
175 
176 void
nvkm_falcon_msgq_init(struct nvkm_falcon_msgq * msgq,u32 index,u32 offset,u32 size)177 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
178 		      u32 index, u32 offset, u32 size)
179 {
180 	const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
181 
182 	msgq->head_reg = func->msgq.head + index * func->msgq.stride;
183 	msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
184 	msgq->offset = offset;
185 
186 	FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
187 		  index, msgq->offset, size);
188 }
189 
190 void
nvkm_falcon_msgq_del(struct nvkm_falcon_msgq ** pmsgq)191 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
192 {
193 	struct nvkm_falcon_msgq *msgq = *pmsgq;
194 	if (msgq) {
195 		kfree(*pmsgq);
196 		*pmsgq = NULL;
197 	}
198 }
199 
200 int
nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr * qmgr,const char * name,struct nvkm_falcon_msgq ** pmsgq)201 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
202 		     struct nvkm_falcon_msgq **pmsgq)
203 {
204 	struct nvkm_falcon_msgq *msgq = *pmsgq;
205 
206 	if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
207 		return -ENOMEM;
208 
209 	msgq->qmgr = qmgr;
210 	msgq->name = name;
211 	spin_lock_init(&msgq->lock);
212 	return 0;
213 }
214