1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4 
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
11 
12 #define CMDQ_WRITE_ENABLE_MASK	BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK	BIT(0)
14 #define CMDQ_EOC_IRQ_EN		BIT(0)
15 #define CMDQ_REG_TYPE		1
16 
17 struct cmdq_instruction {
18 	union {
19 		u32 value;
20 		u32 mask;
21 	};
22 	union {
23 		u16 offset;
24 		u16 event;
25 		u16 reg_dst;
26 	};
27 	union {
28 		u8 subsys;
29 		struct {
30 			u8 sop:5;
31 			u8 arg_c_t:1;
32 			u8 src_t:1;
33 			u8 dst_t:1;
34 		};
35 	};
36 	u8 op;
37 };
38 
39 int cmdq_dev_get_client_reg(struct device *dev,
40 			    struct cmdq_client_reg *client_reg, int idx)
41 {
42 	struct of_phandle_args spec;
43 	int err;
44 
45 	if (!client_reg)
46 		return -ENOENT;
47 
48 	err = of_parse_phandle_with_fixed_args(dev->of_node,
49 					       "mediatek,gce-client-reg",
50 					       3, idx, &spec);
51 	if (err < 0) {
52 		dev_err(dev,
53 			"error %d can't parse gce-client-reg property (%d)",
54 			err, idx);
55 
56 		return err;
57 	}
58 
59 	client_reg->subsys = (u8)spec.args[0];
60 	client_reg->offset = (u16)spec.args[1];
61 	client_reg->size = (u16)spec.args[2];
62 	of_node_put(spec.np);
63 
64 	return 0;
65 }
66 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
67 
68 static void cmdq_client_timeout(struct timer_list *t)
69 {
70 	struct cmdq_client *client = from_timer(client, t, timer);
71 
72 	dev_err(client->client.dev, "cmdq timeout!\n");
73 }
74 
75 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
76 {
77 	struct cmdq_client *client;
78 
79 	client = kzalloc(sizeof(*client), GFP_KERNEL);
80 	if (!client)
81 		return (struct cmdq_client *)-ENOMEM;
82 
83 	client->timeout_ms = timeout;
84 	if (timeout != CMDQ_NO_TIMEOUT) {
85 		spin_lock_init(&client->lock);
86 		timer_setup(&client->timer, cmdq_client_timeout, 0);
87 	}
88 	client->pkt_cnt = 0;
89 	client->client.dev = dev;
90 	client->client.tx_block = false;
91 	client->client.knows_txdone = true;
92 	client->chan = mbox_request_channel(&client->client, index);
93 
94 	if (IS_ERR(client->chan)) {
95 		long err;
96 
97 		dev_err(dev, "failed to request channel\n");
98 		err = PTR_ERR(client->chan);
99 		kfree(client);
100 
101 		return ERR_PTR(err);
102 	}
103 
104 	return client;
105 }
106 EXPORT_SYMBOL(cmdq_mbox_create);
107 
108 void cmdq_mbox_destroy(struct cmdq_client *client)
109 {
110 	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
111 		spin_lock(&client->lock);
112 		del_timer_sync(&client->timer);
113 		spin_unlock(&client->lock);
114 	}
115 	mbox_free_channel(client->chan);
116 	kfree(client);
117 }
118 EXPORT_SYMBOL(cmdq_mbox_destroy);
119 
120 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
121 {
122 	struct cmdq_pkt *pkt;
123 	struct device *dev;
124 	dma_addr_t dma_addr;
125 
126 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
127 	if (!pkt)
128 		return ERR_PTR(-ENOMEM);
129 	pkt->va_base = kzalloc(size, GFP_KERNEL);
130 	if (!pkt->va_base) {
131 		kfree(pkt);
132 		return ERR_PTR(-ENOMEM);
133 	}
134 	pkt->buf_size = size;
135 	pkt->cl = (void *)client;
136 
137 	dev = client->chan->mbox->dev;
138 	dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
139 				  DMA_TO_DEVICE);
140 	if (dma_mapping_error(dev, dma_addr)) {
141 		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
142 		kfree(pkt->va_base);
143 		kfree(pkt);
144 		return ERR_PTR(-ENOMEM);
145 	}
146 
147 	pkt->pa_base = dma_addr;
148 
149 	return pkt;
150 }
151 EXPORT_SYMBOL(cmdq_pkt_create);
152 
153 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
154 {
155 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
156 
157 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
158 			 DMA_TO_DEVICE);
159 	kfree(pkt->va_base);
160 	kfree(pkt);
161 }
162 EXPORT_SYMBOL(cmdq_pkt_destroy);
163 
164 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
165 				   struct cmdq_instruction inst)
166 {
167 	struct cmdq_instruction *cmd_ptr;
168 
169 	if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
170 		/*
171 		 * In the case of allocated buffer size (pkt->buf_size) is used
172 		 * up, the real required size (pkt->cmdq_buf_size) is still
173 		 * increased, so that the user knows how much memory should be
174 		 * ultimately allocated after appending all commands and
175 		 * flushing the command packet. Therefor, the user can call
176 		 * cmdq_pkt_create() again with the real required buffer size.
177 		 */
178 		pkt->cmd_buf_size += CMDQ_INST_SIZE;
179 		WARN_ONCE(1, "%s: buffer size %u is too small !\n",
180 			__func__, (u32)pkt->buf_size);
181 		return -ENOMEM;
182 	}
183 
184 	cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
185 	*cmd_ptr = inst;
186 	pkt->cmd_buf_size += CMDQ_INST_SIZE;
187 
188 	return 0;
189 }
190 
191 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
192 {
193 	struct cmdq_instruction inst;
194 
195 	inst.op = CMDQ_CODE_WRITE;
196 	inst.value = value;
197 	inst.offset = offset;
198 	inst.subsys = subsys;
199 
200 	return cmdq_pkt_append_command(pkt, inst);
201 }
202 EXPORT_SYMBOL(cmdq_pkt_write);
203 
204 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
205 			u16 offset, u32 value, u32 mask)
206 {
207 	struct cmdq_instruction inst = { {0} };
208 	u16 offset_mask = offset;
209 	int err;
210 
211 	if (mask != 0xffffffff) {
212 		inst.op = CMDQ_CODE_MASK;
213 		inst.mask = ~mask;
214 		err = cmdq_pkt_append_command(pkt, inst);
215 		if (err < 0)
216 			return err;
217 
218 		offset_mask |= CMDQ_WRITE_ENABLE_MASK;
219 	}
220 	err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
221 
222 	return err;
223 }
224 EXPORT_SYMBOL(cmdq_pkt_write_mask);
225 
226 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
227 {
228 	struct cmdq_instruction inst = { {0} };
229 
230 	if (event >= CMDQ_MAX_EVENT)
231 		return -EINVAL;
232 
233 	inst.op = CMDQ_CODE_WFE;
234 	inst.value = CMDQ_WFE_OPTION;
235 	inst.event = event;
236 
237 	return cmdq_pkt_append_command(pkt, inst);
238 }
239 EXPORT_SYMBOL(cmdq_pkt_wfe);
240 
241 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
242 {
243 	struct cmdq_instruction inst = { {0} };
244 
245 	if (event >= CMDQ_MAX_EVENT)
246 		return -EINVAL;
247 
248 	inst.op = CMDQ_CODE_WFE;
249 	inst.value = CMDQ_WFE_UPDATE;
250 	inst.event = event;
251 
252 	return cmdq_pkt_append_command(pkt, inst);
253 }
254 EXPORT_SYMBOL(cmdq_pkt_clear_event);
255 
256 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
257 {
258 	struct cmdq_instruction inst = {};
259 
260 	if (event >= CMDQ_MAX_EVENT)
261 		return -EINVAL;
262 
263 	inst.op = CMDQ_CODE_WFE;
264 	inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
265 	inst.event = event;
266 
267 	return cmdq_pkt_append_command(pkt, inst);
268 }
269 EXPORT_SYMBOL(cmdq_pkt_set_event);
270 
271 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
272 		  u16 offset, u32 value)
273 {
274 	struct cmdq_instruction inst = { {0} };
275 	int err;
276 
277 	inst.op = CMDQ_CODE_POLL;
278 	inst.value = value;
279 	inst.offset = offset;
280 	inst.subsys = subsys;
281 	err = cmdq_pkt_append_command(pkt, inst);
282 
283 	return err;
284 }
285 EXPORT_SYMBOL(cmdq_pkt_poll);
286 
287 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
288 		       u16 offset, u32 value, u32 mask)
289 {
290 	struct cmdq_instruction inst = { {0} };
291 	int err;
292 
293 	inst.op = CMDQ_CODE_MASK;
294 	inst.mask = ~mask;
295 	err = cmdq_pkt_append_command(pkt, inst);
296 	if (err < 0)
297 		return err;
298 
299 	offset = offset | CMDQ_POLL_ENABLE_MASK;
300 	err = cmdq_pkt_poll(pkt, subsys, offset, value);
301 
302 	return err;
303 }
304 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
305 
306 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
307 {
308 	struct cmdq_instruction inst = {};
309 
310 	inst.op = CMDQ_CODE_LOGIC;
311 	inst.dst_t = CMDQ_REG_TYPE;
312 	inst.reg_dst = reg_idx;
313 	inst.value = value;
314 	return cmdq_pkt_append_command(pkt, inst);
315 }
316 EXPORT_SYMBOL(cmdq_pkt_assign);
317 
318 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
319 {
320 	struct cmdq_instruction inst = { {0} };
321 	int err;
322 
323 	/* insert EOC and generate IRQ for each command iteration */
324 	inst.op = CMDQ_CODE_EOC;
325 	inst.value = CMDQ_EOC_IRQ_EN;
326 	err = cmdq_pkt_append_command(pkt, inst);
327 	if (err < 0)
328 		return err;
329 
330 	/* JUMP to end */
331 	inst.op = CMDQ_CODE_JUMP;
332 	inst.value = CMDQ_JUMP_PASS;
333 	err = cmdq_pkt_append_command(pkt, inst);
334 
335 	return err;
336 }
337 EXPORT_SYMBOL(cmdq_pkt_finalize);
338 
339 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
340 {
341 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
342 	struct cmdq_task_cb *cb = &pkt->cb;
343 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
344 
345 	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
346 		unsigned long flags = 0;
347 
348 		spin_lock_irqsave(&client->lock, flags);
349 		if (--client->pkt_cnt == 0)
350 			del_timer(&client->timer);
351 		else
352 			mod_timer(&client->timer, jiffies +
353 				  msecs_to_jiffies(client->timeout_ms));
354 		spin_unlock_irqrestore(&client->lock, flags);
355 	}
356 
357 	dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
358 				pkt->cmd_buf_size, DMA_TO_DEVICE);
359 	if (cb->cb) {
360 		data.data = cb->data;
361 		cb->cb(data);
362 	}
363 }
364 
365 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
366 			 void *data)
367 {
368 	int err;
369 	unsigned long flags = 0;
370 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
371 
372 	pkt->cb.cb = cb;
373 	pkt->cb.data = data;
374 	pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
375 	pkt->async_cb.data = pkt;
376 
377 	dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
378 				   pkt->cmd_buf_size, DMA_TO_DEVICE);
379 
380 	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
381 		spin_lock_irqsave(&client->lock, flags);
382 		if (client->pkt_cnt++ == 0)
383 			mod_timer(&client->timer, jiffies +
384 				  msecs_to_jiffies(client->timeout_ms));
385 		spin_unlock_irqrestore(&client->lock, flags);
386 	}
387 
388 	err = mbox_send_message(client->chan, pkt);
389 	if (err < 0)
390 		return err;
391 	/* We can send next packet immediately, so just call txdone. */
392 	mbox_client_txdone(client->chan, 0);
393 
394 	return 0;
395 }
396 EXPORT_SYMBOL(cmdq_pkt_flush_async);
397 
398 struct cmdq_flush_completion {
399 	struct completion cmplt;
400 	bool err;
401 };
402 
403 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
404 {
405 	struct cmdq_flush_completion *cmplt;
406 
407 	cmplt = (struct cmdq_flush_completion *)data.data;
408 	if (data.sta != CMDQ_CB_NORMAL)
409 		cmplt->err = true;
410 	else
411 		cmplt->err = false;
412 	complete(&cmplt->cmplt);
413 }
414 
415 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
416 {
417 	struct cmdq_flush_completion cmplt;
418 	int err;
419 
420 	init_completion(&cmplt.cmplt);
421 	err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
422 	if (err < 0)
423 		return err;
424 	wait_for_completion(&cmplt.cmplt);
425 
426 	return cmplt.err ? -EFAULT : 0;
427 }
428 EXPORT_SYMBOL(cmdq_pkt_flush);
429 
430 MODULE_LICENSE("GPL v2");
431