1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Copyright (c) 2018 MediaTek Inc.
4
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/of.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
12
13 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
14 #define CMDQ_POLL_ENABLE_MASK BIT(0)
15 #define CMDQ_EOC_IRQ_EN BIT(0)
16 #define CMDQ_REG_TYPE 1
17 #define CMDQ_JUMP_RELATIVE 0
18 #define CMDQ_JUMP_ABSOLUTE 1
19
20 struct cmdq_instruction {
21 union {
22 u32 value;
23 u32 mask;
24 struct {
25 u16 arg_c;
26 u16 src_reg;
27 };
28 };
29 union {
30 u16 offset;
31 u16 event;
32 u16 reg_dst;
33 };
34 union {
35 u8 subsys;
36 struct {
37 u8 sop:5;
38 u8 arg_c_t:1;
39 u8 src_t:1;
40 u8 dst_t:1;
41 };
42 };
43 u8 op;
44 };
45
cmdq_dev_get_client_reg(struct device * dev,struct cmdq_client_reg * client_reg,int idx)46 int cmdq_dev_get_client_reg(struct device *dev,
47 struct cmdq_client_reg *client_reg, int idx)
48 {
49 struct of_phandle_args spec;
50 int err;
51
52 if (!client_reg)
53 return -ENOENT;
54
55 err = of_parse_phandle_with_fixed_args(dev->of_node,
56 "mediatek,gce-client-reg",
57 3, idx, &spec);
58 if (err < 0) {
59 dev_err(dev,
60 "error %d can't parse gce-client-reg property (%d)",
61 err, idx);
62
63 return err;
64 }
65
66 client_reg->subsys = (u8)spec.args[0];
67 client_reg->offset = (u16)spec.args[1];
68 client_reg->size = (u16)spec.args[2];
69 of_node_put(spec.np);
70
71 return 0;
72 }
73 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
74
cmdq_mbox_create(struct device * dev,int index)75 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
76 {
77 struct cmdq_client *client;
78
79 client = kzalloc(sizeof(*client), GFP_KERNEL);
80 if (!client)
81 return (struct cmdq_client *)-ENOMEM;
82
83 client->client.dev = dev;
84 client->client.tx_block = false;
85 client->client.knows_txdone = true;
86 client->chan = mbox_request_channel(&client->client, index);
87
88 if (IS_ERR(client->chan)) {
89 long err;
90
91 dev_err(dev, "failed to request channel\n");
92 err = PTR_ERR(client->chan);
93 kfree(client);
94
95 return ERR_PTR(err);
96 }
97
98 return client;
99 }
100 EXPORT_SYMBOL(cmdq_mbox_create);
101
cmdq_mbox_destroy(struct cmdq_client * client)102 void cmdq_mbox_destroy(struct cmdq_client *client)
103 {
104 mbox_free_channel(client->chan);
105 kfree(client);
106 }
107 EXPORT_SYMBOL(cmdq_mbox_destroy);
108
cmdq_pkt_create(struct cmdq_client * client,size_t size)109 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
110 {
111 struct cmdq_pkt *pkt;
112 struct device *dev;
113 dma_addr_t dma_addr;
114
115 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
116 if (!pkt)
117 return ERR_PTR(-ENOMEM);
118 pkt->va_base = kzalloc(size, GFP_KERNEL);
119 if (!pkt->va_base) {
120 kfree(pkt);
121 return ERR_PTR(-ENOMEM);
122 }
123 pkt->buf_size = size;
124 pkt->cl = (void *)client;
125
126 dev = client->chan->mbox->dev;
127 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
128 DMA_TO_DEVICE);
129 if (dma_mapping_error(dev, dma_addr)) {
130 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
131 kfree(pkt->va_base);
132 kfree(pkt);
133 return ERR_PTR(-ENOMEM);
134 }
135
136 pkt->pa_base = dma_addr;
137
138 return pkt;
139 }
140 EXPORT_SYMBOL(cmdq_pkt_create);
141
cmdq_pkt_destroy(struct cmdq_pkt * pkt)142 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
143 {
144 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
145
146 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
147 DMA_TO_DEVICE);
148 kfree(pkt->va_base);
149 kfree(pkt);
150 }
151 EXPORT_SYMBOL(cmdq_pkt_destroy);
152
cmdq_pkt_append_command(struct cmdq_pkt * pkt,struct cmdq_instruction inst)153 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
154 struct cmdq_instruction inst)
155 {
156 struct cmdq_instruction *cmd_ptr;
157
158 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
159 /*
160 * In the case of allocated buffer size (pkt->buf_size) is used
161 * up, the real required size (pkt->cmdq_buf_size) is still
162 * increased, so that the user knows how much memory should be
163 * ultimately allocated after appending all commands and
164 * flushing the command packet. Therefor, the user can call
165 * cmdq_pkt_create() again with the real required buffer size.
166 */
167 pkt->cmd_buf_size += CMDQ_INST_SIZE;
168 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
169 __func__, (u32)pkt->buf_size);
170 return -ENOMEM;
171 }
172
173 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
174 *cmd_ptr = inst;
175 pkt->cmd_buf_size += CMDQ_INST_SIZE;
176
177 return 0;
178 }
179
cmdq_pkt_write(struct cmdq_pkt * pkt,u8 subsys,u16 offset,u32 value)180 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
181 {
182 struct cmdq_instruction inst;
183
184 inst.op = CMDQ_CODE_WRITE;
185 inst.value = value;
186 inst.offset = offset;
187 inst.subsys = subsys;
188
189 return cmdq_pkt_append_command(pkt, inst);
190 }
191 EXPORT_SYMBOL(cmdq_pkt_write);
192
cmdq_pkt_write_mask(struct cmdq_pkt * pkt,u8 subsys,u16 offset,u32 value,u32 mask)193 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
194 u16 offset, u32 value, u32 mask)
195 {
196 struct cmdq_instruction inst = { {0} };
197 u16 offset_mask = offset;
198 int err;
199
200 if (mask != 0xffffffff) {
201 inst.op = CMDQ_CODE_MASK;
202 inst.mask = ~mask;
203 err = cmdq_pkt_append_command(pkt, inst);
204 if (err < 0)
205 return err;
206
207 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
208 }
209 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
210
211 return err;
212 }
213 EXPORT_SYMBOL(cmdq_pkt_write_mask);
214
cmdq_pkt_read_s(struct cmdq_pkt * pkt,u16 high_addr_reg_idx,u16 addr_low,u16 reg_idx)215 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
216 u16 reg_idx)
217 {
218 struct cmdq_instruction inst = {};
219
220 inst.op = CMDQ_CODE_READ_S;
221 inst.dst_t = CMDQ_REG_TYPE;
222 inst.sop = high_addr_reg_idx;
223 inst.reg_dst = reg_idx;
224 inst.src_reg = addr_low;
225
226 return cmdq_pkt_append_command(pkt, inst);
227 }
228 EXPORT_SYMBOL(cmdq_pkt_read_s);
229
cmdq_pkt_write_s(struct cmdq_pkt * pkt,u16 high_addr_reg_idx,u16 addr_low,u16 src_reg_idx)230 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
231 u16 addr_low, u16 src_reg_idx)
232 {
233 struct cmdq_instruction inst = {};
234
235 inst.op = CMDQ_CODE_WRITE_S;
236 inst.src_t = CMDQ_REG_TYPE;
237 inst.sop = high_addr_reg_idx;
238 inst.offset = addr_low;
239 inst.src_reg = src_reg_idx;
240
241 return cmdq_pkt_append_command(pkt, inst);
242 }
243 EXPORT_SYMBOL(cmdq_pkt_write_s);
244
cmdq_pkt_write_s_mask(struct cmdq_pkt * pkt,u16 high_addr_reg_idx,u16 addr_low,u16 src_reg_idx,u32 mask)245 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
246 u16 addr_low, u16 src_reg_idx, u32 mask)
247 {
248 struct cmdq_instruction inst = {};
249 int err;
250
251 inst.op = CMDQ_CODE_MASK;
252 inst.mask = ~mask;
253 err = cmdq_pkt_append_command(pkt, inst);
254 if (err < 0)
255 return err;
256
257 inst.mask = 0;
258 inst.op = CMDQ_CODE_WRITE_S_MASK;
259 inst.src_t = CMDQ_REG_TYPE;
260 inst.sop = high_addr_reg_idx;
261 inst.offset = addr_low;
262 inst.src_reg = src_reg_idx;
263
264 return cmdq_pkt_append_command(pkt, inst);
265 }
266 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
267
cmdq_pkt_write_s_value(struct cmdq_pkt * pkt,u8 high_addr_reg_idx,u16 addr_low,u32 value)268 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
269 u16 addr_low, u32 value)
270 {
271 struct cmdq_instruction inst = {};
272
273 inst.op = CMDQ_CODE_WRITE_S;
274 inst.sop = high_addr_reg_idx;
275 inst.offset = addr_low;
276 inst.value = value;
277
278 return cmdq_pkt_append_command(pkt, inst);
279 }
280 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
281
cmdq_pkt_write_s_mask_value(struct cmdq_pkt * pkt,u8 high_addr_reg_idx,u16 addr_low,u32 value,u32 mask)282 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
283 u16 addr_low, u32 value, u32 mask)
284 {
285 struct cmdq_instruction inst = {};
286 int err;
287
288 inst.op = CMDQ_CODE_MASK;
289 inst.mask = ~mask;
290 err = cmdq_pkt_append_command(pkt, inst);
291 if (err < 0)
292 return err;
293
294 inst.op = CMDQ_CODE_WRITE_S_MASK;
295 inst.sop = high_addr_reg_idx;
296 inst.offset = addr_low;
297 inst.value = value;
298
299 return cmdq_pkt_append_command(pkt, inst);
300 }
301 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
302
cmdq_pkt_wfe(struct cmdq_pkt * pkt,u16 event,bool clear)303 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
304 {
305 struct cmdq_instruction inst = { {0} };
306 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
307
308 if (event >= CMDQ_MAX_EVENT)
309 return -EINVAL;
310
311 inst.op = CMDQ_CODE_WFE;
312 inst.value = CMDQ_WFE_OPTION | clear_option;
313 inst.event = event;
314
315 return cmdq_pkt_append_command(pkt, inst);
316 }
317 EXPORT_SYMBOL(cmdq_pkt_wfe);
318
cmdq_pkt_clear_event(struct cmdq_pkt * pkt,u16 event)319 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
320 {
321 struct cmdq_instruction inst = { {0} };
322
323 if (event >= CMDQ_MAX_EVENT)
324 return -EINVAL;
325
326 inst.op = CMDQ_CODE_WFE;
327 inst.value = CMDQ_WFE_UPDATE;
328 inst.event = event;
329
330 return cmdq_pkt_append_command(pkt, inst);
331 }
332 EXPORT_SYMBOL(cmdq_pkt_clear_event);
333
cmdq_pkt_set_event(struct cmdq_pkt * pkt,u16 event)334 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
335 {
336 struct cmdq_instruction inst = {};
337
338 if (event >= CMDQ_MAX_EVENT)
339 return -EINVAL;
340
341 inst.op = CMDQ_CODE_WFE;
342 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
343 inst.event = event;
344
345 return cmdq_pkt_append_command(pkt, inst);
346 }
347 EXPORT_SYMBOL(cmdq_pkt_set_event);
348
cmdq_pkt_poll(struct cmdq_pkt * pkt,u8 subsys,u16 offset,u32 value)349 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
350 u16 offset, u32 value)
351 {
352 struct cmdq_instruction inst = { {0} };
353 int err;
354
355 inst.op = CMDQ_CODE_POLL;
356 inst.value = value;
357 inst.offset = offset;
358 inst.subsys = subsys;
359 err = cmdq_pkt_append_command(pkt, inst);
360
361 return err;
362 }
363 EXPORT_SYMBOL(cmdq_pkt_poll);
364
cmdq_pkt_poll_mask(struct cmdq_pkt * pkt,u8 subsys,u16 offset,u32 value,u32 mask)365 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
366 u16 offset, u32 value, u32 mask)
367 {
368 struct cmdq_instruction inst = { {0} };
369 int err;
370
371 inst.op = CMDQ_CODE_MASK;
372 inst.mask = ~mask;
373 err = cmdq_pkt_append_command(pkt, inst);
374 if (err < 0)
375 return err;
376
377 offset = offset | CMDQ_POLL_ENABLE_MASK;
378 err = cmdq_pkt_poll(pkt, subsys, offset, value);
379
380 return err;
381 }
382 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
383
cmdq_pkt_assign(struct cmdq_pkt * pkt,u16 reg_idx,u32 value)384 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
385 {
386 struct cmdq_instruction inst = {};
387
388 inst.op = CMDQ_CODE_LOGIC;
389 inst.dst_t = CMDQ_REG_TYPE;
390 inst.reg_dst = reg_idx;
391 inst.value = value;
392 return cmdq_pkt_append_command(pkt, inst);
393 }
394 EXPORT_SYMBOL(cmdq_pkt_assign);
395
cmdq_pkt_jump(struct cmdq_pkt * pkt,dma_addr_t addr)396 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
397 {
398 struct cmdq_instruction inst = {};
399
400 inst.op = CMDQ_CODE_JUMP;
401 inst.offset = CMDQ_JUMP_ABSOLUTE;
402 inst.value = addr >>
403 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
404 return cmdq_pkt_append_command(pkt, inst);
405 }
406 EXPORT_SYMBOL(cmdq_pkt_jump);
407
cmdq_pkt_finalize(struct cmdq_pkt * pkt)408 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
409 {
410 struct cmdq_instruction inst = { {0} };
411 int err;
412
413 /* insert EOC and generate IRQ for each command iteration */
414 inst.op = CMDQ_CODE_EOC;
415 inst.value = CMDQ_EOC_IRQ_EN;
416 err = cmdq_pkt_append_command(pkt, inst);
417 if (err < 0)
418 return err;
419
420 /* JUMP to end */
421 inst.op = CMDQ_CODE_JUMP;
422 inst.value = CMDQ_JUMP_PASS >>
423 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
424 err = cmdq_pkt_append_command(pkt, inst);
425
426 return err;
427 }
428 EXPORT_SYMBOL(cmdq_pkt_finalize);
429
cmdq_pkt_flush_async(struct cmdq_pkt * pkt)430 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
431 {
432 int err;
433 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
434
435 err = mbox_send_message(client->chan, pkt);
436 if (err < 0)
437 return err;
438 /* We can send next packet immediately, so just call txdone. */
439 mbox_client_txdone(client->chan, 0);
440
441 return 0;
442 }
443 EXPORT_SYMBOL(cmdq_pkt_flush_async);
444
445 MODULE_LICENSE("GPL v2");
446