1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4 
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
11 
12 #define CMDQ_WRITE_ENABLE_MASK	BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK	BIT(0)
14 #define CMDQ_EOC_IRQ_EN		BIT(0)
15 #define CMDQ_REG_TYPE		1
16 #define CMDQ_JUMP_RELATIVE	1
17 
18 struct cmdq_instruction {
19 	union {
20 		u32 value;
21 		u32 mask;
22 		struct {
23 			u16 arg_c;
24 			u16 src_reg;
25 		};
26 	};
27 	union {
28 		u16 offset;
29 		u16 event;
30 		u16 reg_dst;
31 	};
32 	union {
33 		u8 subsys;
34 		struct {
35 			u8 sop:5;
36 			u8 arg_c_t:1;
37 			u8 src_t:1;
38 			u8 dst_t:1;
39 		};
40 	};
41 	u8 op;
42 };
43 
44 int cmdq_dev_get_client_reg(struct device *dev,
45 			    struct cmdq_client_reg *client_reg, int idx)
46 {
47 	struct of_phandle_args spec;
48 	int err;
49 
50 	if (!client_reg)
51 		return -ENOENT;
52 
53 	err = of_parse_phandle_with_fixed_args(dev->of_node,
54 					       "mediatek,gce-client-reg",
55 					       3, idx, &spec);
56 	if (err < 0) {
57 		dev_err(dev,
58 			"error %d can't parse gce-client-reg property (%d)",
59 			err, idx);
60 
61 		return err;
62 	}
63 
64 	client_reg->subsys = (u8)spec.args[0];
65 	client_reg->offset = (u16)spec.args[1];
66 	client_reg->size = (u16)spec.args[2];
67 	of_node_put(spec.np);
68 
69 	return 0;
70 }
71 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
72 
73 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
74 {
75 	struct cmdq_client *client;
76 
77 	client = kzalloc(sizeof(*client), GFP_KERNEL);
78 	if (!client)
79 		return (struct cmdq_client *)-ENOMEM;
80 
81 	client->client.dev = dev;
82 	client->client.tx_block = false;
83 	client->client.knows_txdone = true;
84 	client->chan = mbox_request_channel(&client->client, index);
85 
86 	if (IS_ERR(client->chan)) {
87 		long err;
88 
89 		dev_err(dev, "failed to request channel\n");
90 		err = PTR_ERR(client->chan);
91 		kfree(client);
92 
93 		return ERR_PTR(err);
94 	}
95 
96 	return client;
97 }
98 EXPORT_SYMBOL(cmdq_mbox_create);
99 
100 void cmdq_mbox_destroy(struct cmdq_client *client)
101 {
102 	mbox_free_channel(client->chan);
103 	kfree(client);
104 }
105 EXPORT_SYMBOL(cmdq_mbox_destroy);
106 
107 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
108 {
109 	struct cmdq_pkt *pkt;
110 	struct device *dev;
111 	dma_addr_t dma_addr;
112 
113 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
114 	if (!pkt)
115 		return ERR_PTR(-ENOMEM);
116 	pkt->va_base = kzalloc(size, GFP_KERNEL);
117 	if (!pkt->va_base) {
118 		kfree(pkt);
119 		return ERR_PTR(-ENOMEM);
120 	}
121 	pkt->buf_size = size;
122 	pkt->cl = (void *)client;
123 
124 	dev = client->chan->mbox->dev;
125 	dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
126 				  DMA_TO_DEVICE);
127 	if (dma_mapping_error(dev, dma_addr)) {
128 		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
129 		kfree(pkt->va_base);
130 		kfree(pkt);
131 		return ERR_PTR(-ENOMEM);
132 	}
133 
134 	pkt->pa_base = dma_addr;
135 
136 	return pkt;
137 }
138 EXPORT_SYMBOL(cmdq_pkt_create);
139 
140 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
141 {
142 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
143 
144 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
145 			 DMA_TO_DEVICE);
146 	kfree(pkt->va_base);
147 	kfree(pkt);
148 }
149 EXPORT_SYMBOL(cmdq_pkt_destroy);
150 
151 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
152 				   struct cmdq_instruction inst)
153 {
154 	struct cmdq_instruction *cmd_ptr;
155 
156 	if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
157 		/*
158 		 * In the case of allocated buffer size (pkt->buf_size) is used
159 		 * up, the real required size (pkt->cmdq_buf_size) is still
160 		 * increased, so that the user knows how much memory should be
161 		 * ultimately allocated after appending all commands and
162 		 * flushing the command packet. Therefor, the user can call
163 		 * cmdq_pkt_create() again with the real required buffer size.
164 		 */
165 		pkt->cmd_buf_size += CMDQ_INST_SIZE;
166 		WARN_ONCE(1, "%s: buffer size %u is too small !\n",
167 			__func__, (u32)pkt->buf_size);
168 		return -ENOMEM;
169 	}
170 
171 	cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
172 	*cmd_ptr = inst;
173 	pkt->cmd_buf_size += CMDQ_INST_SIZE;
174 
175 	return 0;
176 }
177 
178 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
179 {
180 	struct cmdq_instruction inst;
181 
182 	inst.op = CMDQ_CODE_WRITE;
183 	inst.value = value;
184 	inst.offset = offset;
185 	inst.subsys = subsys;
186 
187 	return cmdq_pkt_append_command(pkt, inst);
188 }
189 EXPORT_SYMBOL(cmdq_pkt_write);
190 
191 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
192 			u16 offset, u32 value, u32 mask)
193 {
194 	struct cmdq_instruction inst = { {0} };
195 	u16 offset_mask = offset;
196 	int err;
197 
198 	if (mask != 0xffffffff) {
199 		inst.op = CMDQ_CODE_MASK;
200 		inst.mask = ~mask;
201 		err = cmdq_pkt_append_command(pkt, inst);
202 		if (err < 0)
203 			return err;
204 
205 		offset_mask |= CMDQ_WRITE_ENABLE_MASK;
206 	}
207 	err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
208 
209 	return err;
210 }
211 EXPORT_SYMBOL(cmdq_pkt_write_mask);
212 
213 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
214 		    u16 reg_idx)
215 {
216 	struct cmdq_instruction inst = {};
217 
218 	inst.op = CMDQ_CODE_READ_S;
219 	inst.dst_t = CMDQ_REG_TYPE;
220 	inst.sop = high_addr_reg_idx;
221 	inst.reg_dst = reg_idx;
222 	inst.src_reg = addr_low;
223 
224 	return cmdq_pkt_append_command(pkt, inst);
225 }
226 EXPORT_SYMBOL(cmdq_pkt_read_s);
227 
228 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
229 		     u16 addr_low, u16 src_reg_idx)
230 {
231 	struct cmdq_instruction inst = {};
232 
233 	inst.op = CMDQ_CODE_WRITE_S;
234 	inst.src_t = CMDQ_REG_TYPE;
235 	inst.sop = high_addr_reg_idx;
236 	inst.offset = addr_low;
237 	inst.src_reg = src_reg_idx;
238 
239 	return cmdq_pkt_append_command(pkt, inst);
240 }
241 EXPORT_SYMBOL(cmdq_pkt_write_s);
242 
243 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
244 			  u16 addr_low, u16 src_reg_idx, u32 mask)
245 {
246 	struct cmdq_instruction inst = {};
247 	int err;
248 
249 	inst.op = CMDQ_CODE_MASK;
250 	inst.mask = ~mask;
251 	err = cmdq_pkt_append_command(pkt, inst);
252 	if (err < 0)
253 		return err;
254 
255 	inst.mask = 0;
256 	inst.op = CMDQ_CODE_WRITE_S_MASK;
257 	inst.src_t = CMDQ_REG_TYPE;
258 	inst.sop = high_addr_reg_idx;
259 	inst.offset = addr_low;
260 	inst.src_reg = src_reg_idx;
261 
262 	return cmdq_pkt_append_command(pkt, inst);
263 }
264 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
265 
266 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
267 			   u16 addr_low, u32 value)
268 {
269 	struct cmdq_instruction inst = {};
270 
271 	inst.op = CMDQ_CODE_WRITE_S;
272 	inst.sop = high_addr_reg_idx;
273 	inst.offset = addr_low;
274 	inst.value = value;
275 
276 	return cmdq_pkt_append_command(pkt, inst);
277 }
278 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
279 
280 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
281 				u16 addr_low, u32 value, u32 mask)
282 {
283 	struct cmdq_instruction inst = {};
284 	int err;
285 
286 	inst.op = CMDQ_CODE_MASK;
287 	inst.mask = ~mask;
288 	err = cmdq_pkt_append_command(pkt, inst);
289 	if (err < 0)
290 		return err;
291 
292 	inst.op = CMDQ_CODE_WRITE_S_MASK;
293 	inst.sop = high_addr_reg_idx;
294 	inst.offset = addr_low;
295 	inst.value = value;
296 
297 	return cmdq_pkt_append_command(pkt, inst);
298 }
299 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
300 
301 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
302 {
303 	struct cmdq_instruction inst = { {0} };
304 	u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
305 
306 	if (event >= CMDQ_MAX_EVENT)
307 		return -EINVAL;
308 
309 	inst.op = CMDQ_CODE_WFE;
310 	inst.value = CMDQ_WFE_OPTION | clear_option;
311 	inst.event = event;
312 
313 	return cmdq_pkt_append_command(pkt, inst);
314 }
315 EXPORT_SYMBOL(cmdq_pkt_wfe);
316 
317 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
318 {
319 	struct cmdq_instruction inst = { {0} };
320 
321 	if (event >= CMDQ_MAX_EVENT)
322 		return -EINVAL;
323 
324 	inst.op = CMDQ_CODE_WFE;
325 	inst.value = CMDQ_WFE_UPDATE;
326 	inst.event = event;
327 
328 	return cmdq_pkt_append_command(pkt, inst);
329 }
330 EXPORT_SYMBOL(cmdq_pkt_clear_event);
331 
332 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
333 {
334 	struct cmdq_instruction inst = {};
335 
336 	if (event >= CMDQ_MAX_EVENT)
337 		return -EINVAL;
338 
339 	inst.op = CMDQ_CODE_WFE;
340 	inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
341 	inst.event = event;
342 
343 	return cmdq_pkt_append_command(pkt, inst);
344 }
345 EXPORT_SYMBOL(cmdq_pkt_set_event);
346 
347 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
348 		  u16 offset, u32 value)
349 {
350 	struct cmdq_instruction inst = { {0} };
351 	int err;
352 
353 	inst.op = CMDQ_CODE_POLL;
354 	inst.value = value;
355 	inst.offset = offset;
356 	inst.subsys = subsys;
357 	err = cmdq_pkt_append_command(pkt, inst);
358 
359 	return err;
360 }
361 EXPORT_SYMBOL(cmdq_pkt_poll);
362 
363 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
364 		       u16 offset, u32 value, u32 mask)
365 {
366 	struct cmdq_instruction inst = { {0} };
367 	int err;
368 
369 	inst.op = CMDQ_CODE_MASK;
370 	inst.mask = ~mask;
371 	err = cmdq_pkt_append_command(pkt, inst);
372 	if (err < 0)
373 		return err;
374 
375 	offset = offset | CMDQ_POLL_ENABLE_MASK;
376 	err = cmdq_pkt_poll(pkt, subsys, offset, value);
377 
378 	return err;
379 }
380 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
381 
382 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
383 {
384 	struct cmdq_instruction inst = {};
385 
386 	inst.op = CMDQ_CODE_LOGIC;
387 	inst.dst_t = CMDQ_REG_TYPE;
388 	inst.reg_dst = reg_idx;
389 	inst.value = value;
390 	return cmdq_pkt_append_command(pkt, inst);
391 }
392 EXPORT_SYMBOL(cmdq_pkt_assign);
393 
394 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
395 {
396 	struct cmdq_instruction inst = {};
397 
398 	inst.op = CMDQ_CODE_JUMP;
399 	inst.offset = CMDQ_JUMP_RELATIVE;
400 	inst.value = addr >>
401 		cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
402 	return cmdq_pkt_append_command(pkt, inst);
403 }
404 EXPORT_SYMBOL(cmdq_pkt_jump);
405 
406 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
407 {
408 	struct cmdq_instruction inst = { {0} };
409 	int err;
410 
411 	/* insert EOC and generate IRQ for each command iteration */
412 	inst.op = CMDQ_CODE_EOC;
413 	inst.value = CMDQ_EOC_IRQ_EN;
414 	err = cmdq_pkt_append_command(pkt, inst);
415 	if (err < 0)
416 		return err;
417 
418 	/* JUMP to end */
419 	inst.op = CMDQ_CODE_JUMP;
420 	inst.value = CMDQ_JUMP_PASS >>
421 		cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
422 	err = cmdq_pkt_append_command(pkt, inst);
423 
424 	return err;
425 }
426 EXPORT_SYMBOL(cmdq_pkt_finalize);
427 
428 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
429 {
430 	struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
431 	struct cmdq_task_cb *cb = &pkt->cb;
432 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
433 
434 	dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
435 				pkt->cmd_buf_size, DMA_TO_DEVICE);
436 	if (cb->cb) {
437 		data.data = cb->data;
438 		cb->cb(data);
439 	}
440 }
441 
442 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
443 			 void *data)
444 {
445 	int err;
446 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
447 
448 	pkt->cb.cb = cb;
449 	pkt->cb.data = data;
450 	pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
451 	pkt->async_cb.data = pkt;
452 
453 	dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
454 				   pkt->cmd_buf_size, DMA_TO_DEVICE);
455 
456 	err = mbox_send_message(client->chan, pkt);
457 	if (err < 0)
458 		return err;
459 	/* We can send next packet immediately, so just call txdone. */
460 	mbox_client_txdone(client->chan, 0);
461 
462 	return 0;
463 }
464 EXPORT_SYMBOL(cmdq_pkt_flush_async);
465 
466 struct cmdq_flush_completion {
467 	struct completion cmplt;
468 	bool err;
469 };
470 
471 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
472 {
473 	struct cmdq_flush_completion *cmplt;
474 
475 	cmplt = (struct cmdq_flush_completion *)data.data;
476 	if (data.sta != CMDQ_CB_NORMAL)
477 		cmplt->err = true;
478 	else
479 		cmplt->err = false;
480 	complete(&cmplt->cmplt);
481 }
482 
483 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
484 {
485 	struct cmdq_flush_completion cmplt;
486 	int err;
487 
488 	init_completion(&cmplt.cmplt);
489 	err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
490 	if (err < 0)
491 		return err;
492 	wait_for_completion(&cmplt.cmplt);
493 
494 	return cmplt.err ? -EFAULT : 0;
495 }
496 EXPORT_SYMBOL(cmdq_pkt_flush);
497 
498 MODULE_LICENSE("GPL v2");
499