1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2018 MediaTek Inc. 4 5 #include <linux/completion.h> 6 #include <linux/errno.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/module.h> 9 #include <linux/mailbox_controller.h> 10 #include <linux/soc/mediatek/mtk-cmdq.h> 11 12 #define CMDQ_WRITE_ENABLE_MASK BIT(0) 13 #define CMDQ_POLL_ENABLE_MASK BIT(0) 14 #define CMDQ_EOC_IRQ_EN BIT(0) 15 #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ 16 << 32 | CMDQ_EOC_IRQ_EN) 17 18 struct cmdq_instruction { 19 union { 20 u32 value; 21 u32 mask; 22 }; 23 union { 24 u16 offset; 25 u16 event; 26 }; 27 u8 subsys; 28 u8 op; 29 }; 30 31 int cmdq_dev_get_client_reg(struct device *dev, 32 struct cmdq_client_reg *client_reg, int idx) 33 { 34 struct of_phandle_args spec; 35 int err; 36 37 if (!client_reg) 38 return -ENOENT; 39 40 err = of_parse_phandle_with_fixed_args(dev->of_node, 41 "mediatek,gce-client-reg", 42 3, idx, &spec); 43 if (err < 0) { 44 dev_err(dev, 45 "error %d can't parse gce-client-reg property (%d)", 46 err, idx); 47 48 return err; 49 } 50 51 client_reg->subsys = (u8)spec.args[0]; 52 client_reg->offset = (u16)spec.args[1]; 53 client_reg->size = (u16)spec.args[2]; 54 of_node_put(spec.np); 55 56 return 0; 57 } 58 EXPORT_SYMBOL(cmdq_dev_get_client_reg); 59 60 static void cmdq_client_timeout(struct timer_list *t) 61 { 62 struct cmdq_client *client = from_timer(client, t, timer); 63 64 dev_err(client->client.dev, "cmdq timeout!\n"); 65 } 66 67 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout) 68 { 69 struct cmdq_client *client; 70 71 client = kzalloc(sizeof(*client), GFP_KERNEL); 72 if (!client) 73 return (struct cmdq_client *)-ENOMEM; 74 75 client->timeout_ms = timeout; 76 if (timeout != CMDQ_NO_TIMEOUT) { 77 spin_lock_init(&client->lock); 78 timer_setup(&client->timer, cmdq_client_timeout, 0); 79 } 80 client->pkt_cnt = 0; 81 client->client.dev = dev; 82 client->client.tx_block = false; 83 client->chan = mbox_request_channel(&client->client, index); 84 85 if (IS_ERR(client->chan)) { 86 long err; 87 88 dev_err(dev, "failed to request channel\n"); 89 err = PTR_ERR(client->chan); 90 kfree(client); 91 92 return ERR_PTR(err); 93 } 94 95 return client; 96 } 97 EXPORT_SYMBOL(cmdq_mbox_create); 98 99 void cmdq_mbox_destroy(struct cmdq_client *client) 100 { 101 if (client->timeout_ms != CMDQ_NO_TIMEOUT) { 102 spin_lock(&client->lock); 103 del_timer_sync(&client->timer); 104 spin_unlock(&client->lock); 105 } 106 mbox_free_channel(client->chan); 107 kfree(client); 108 } 109 EXPORT_SYMBOL(cmdq_mbox_destroy); 110 111 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) 112 { 113 struct cmdq_pkt *pkt; 114 struct device *dev; 115 dma_addr_t dma_addr; 116 117 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); 118 if (!pkt) 119 return ERR_PTR(-ENOMEM); 120 pkt->va_base = kzalloc(size, GFP_KERNEL); 121 if (!pkt->va_base) { 122 kfree(pkt); 123 return ERR_PTR(-ENOMEM); 124 } 125 pkt->buf_size = size; 126 pkt->cl = (void *)client; 127 128 dev = client->chan->mbox->dev; 129 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, 130 DMA_TO_DEVICE); 131 if (dma_mapping_error(dev, dma_addr)) { 132 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); 133 kfree(pkt->va_base); 134 kfree(pkt); 135 return ERR_PTR(-ENOMEM); 136 } 137 138 pkt->pa_base = dma_addr; 139 140 return pkt; 141 } 142 EXPORT_SYMBOL(cmdq_pkt_create); 143 144 void cmdq_pkt_destroy(struct cmdq_pkt *pkt) 145 { 146 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; 147 148 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, 149 DMA_TO_DEVICE); 150 kfree(pkt->va_base); 151 kfree(pkt); 152 } 153 EXPORT_SYMBOL(cmdq_pkt_destroy); 154 155 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, 156 struct cmdq_instruction inst) 157 { 158 struct cmdq_instruction *cmd_ptr; 159 160 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) { 161 /* 162 * In the case of allocated buffer size (pkt->buf_size) is used 163 * up, the real required size (pkt->cmdq_buf_size) is still 164 * increased, so that the user knows how much memory should be 165 * ultimately allocated after appending all commands and 166 * flushing the command packet. Therefor, the user can call 167 * cmdq_pkt_create() again with the real required buffer size. 168 */ 169 pkt->cmd_buf_size += CMDQ_INST_SIZE; 170 WARN_ONCE(1, "%s: buffer size %u is too small !\n", 171 __func__, (u32)pkt->buf_size); 172 return -ENOMEM; 173 } 174 175 cmd_ptr = pkt->va_base + pkt->cmd_buf_size; 176 *cmd_ptr = inst; 177 pkt->cmd_buf_size += CMDQ_INST_SIZE; 178 179 return 0; 180 } 181 182 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) 183 { 184 struct cmdq_instruction inst; 185 186 inst.op = CMDQ_CODE_WRITE; 187 inst.value = value; 188 inst.offset = offset; 189 inst.subsys = subsys; 190 191 return cmdq_pkt_append_command(pkt, inst); 192 } 193 EXPORT_SYMBOL(cmdq_pkt_write); 194 195 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 196 u16 offset, u32 value, u32 mask) 197 { 198 struct cmdq_instruction inst = { {0} }; 199 u16 offset_mask = offset; 200 int err; 201 202 if (mask != 0xffffffff) { 203 inst.op = CMDQ_CODE_MASK; 204 inst.mask = ~mask; 205 err = cmdq_pkt_append_command(pkt, inst); 206 if (err < 0) 207 return err; 208 209 offset_mask |= CMDQ_WRITE_ENABLE_MASK; 210 } 211 err = cmdq_pkt_write(pkt, subsys, offset_mask, value); 212 213 return err; 214 } 215 EXPORT_SYMBOL(cmdq_pkt_write_mask); 216 217 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) 218 { 219 struct cmdq_instruction inst = { {0} }; 220 221 if (event >= CMDQ_MAX_EVENT) 222 return -EINVAL; 223 224 inst.op = CMDQ_CODE_WFE; 225 inst.value = CMDQ_WFE_OPTION; 226 inst.event = event; 227 228 return cmdq_pkt_append_command(pkt, inst); 229 } 230 EXPORT_SYMBOL(cmdq_pkt_wfe); 231 232 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) 233 { 234 struct cmdq_instruction inst = { {0} }; 235 236 if (event >= CMDQ_MAX_EVENT) 237 return -EINVAL; 238 239 inst.op = CMDQ_CODE_WFE; 240 inst.value = CMDQ_WFE_UPDATE; 241 inst.event = event; 242 243 return cmdq_pkt_append_command(pkt, inst); 244 } 245 EXPORT_SYMBOL(cmdq_pkt_clear_event); 246 247 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, 248 u16 offset, u32 value) 249 { 250 struct cmdq_instruction inst = { {0} }; 251 int err; 252 253 inst.op = CMDQ_CODE_POLL; 254 inst.value = value; 255 inst.offset = offset; 256 inst.subsys = subsys; 257 err = cmdq_pkt_append_command(pkt, inst); 258 259 return err; 260 } 261 EXPORT_SYMBOL(cmdq_pkt_poll); 262 263 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, 264 u16 offset, u32 value, u32 mask) 265 { 266 struct cmdq_instruction inst = { {0} }; 267 int err; 268 269 inst.op = CMDQ_CODE_MASK; 270 inst.mask = ~mask; 271 err = cmdq_pkt_append_command(pkt, inst); 272 if (err < 0) 273 return err; 274 275 offset = offset | CMDQ_POLL_ENABLE_MASK; 276 err = cmdq_pkt_poll(pkt, subsys, offset, value); 277 278 return err; 279 } 280 EXPORT_SYMBOL(cmdq_pkt_poll_mask); 281 282 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) 283 { 284 struct cmdq_instruction inst = { {0} }; 285 int err; 286 287 /* insert EOC and generate IRQ for each command iteration */ 288 inst.op = CMDQ_CODE_EOC; 289 inst.value = CMDQ_EOC_IRQ_EN; 290 err = cmdq_pkt_append_command(pkt, inst); 291 if (err < 0) 292 return err; 293 294 /* JUMP to end */ 295 inst.op = CMDQ_CODE_JUMP; 296 inst.value = CMDQ_JUMP_PASS; 297 err = cmdq_pkt_append_command(pkt, inst); 298 299 return err; 300 } 301 302 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data) 303 { 304 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data; 305 struct cmdq_task_cb *cb = &pkt->cb; 306 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; 307 308 if (client->timeout_ms != CMDQ_NO_TIMEOUT) { 309 unsigned long flags = 0; 310 311 spin_lock_irqsave(&client->lock, flags); 312 if (--client->pkt_cnt == 0) 313 del_timer(&client->timer); 314 else 315 mod_timer(&client->timer, jiffies + 316 msecs_to_jiffies(client->timeout_ms)); 317 spin_unlock_irqrestore(&client->lock, flags); 318 } 319 320 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base, 321 pkt->cmd_buf_size, DMA_TO_DEVICE); 322 if (cb->cb) { 323 data.data = cb->data; 324 cb->cb(data); 325 } 326 } 327 328 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, 329 void *data) 330 { 331 int err; 332 unsigned long flags = 0; 333 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; 334 335 err = cmdq_pkt_finalize(pkt); 336 if (err < 0) 337 return err; 338 339 pkt->cb.cb = cb; 340 pkt->cb.data = data; 341 pkt->async_cb.cb = cmdq_pkt_flush_async_cb; 342 pkt->async_cb.data = pkt; 343 344 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base, 345 pkt->cmd_buf_size, DMA_TO_DEVICE); 346 347 if (client->timeout_ms != CMDQ_NO_TIMEOUT) { 348 spin_lock_irqsave(&client->lock, flags); 349 if (client->pkt_cnt++ == 0) 350 mod_timer(&client->timer, jiffies + 351 msecs_to_jiffies(client->timeout_ms)); 352 spin_unlock_irqrestore(&client->lock, flags); 353 } 354 355 mbox_send_message(client->chan, pkt); 356 /* We can send next packet immediately, so just call txdone. */ 357 mbox_client_txdone(client->chan, 0); 358 359 return 0; 360 } 361 EXPORT_SYMBOL(cmdq_pkt_flush_async); 362 363 struct cmdq_flush_completion { 364 struct completion cmplt; 365 bool err; 366 }; 367 368 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data) 369 { 370 struct cmdq_flush_completion *cmplt; 371 372 cmplt = (struct cmdq_flush_completion *)data.data; 373 if (data.sta != CMDQ_CB_NORMAL) 374 cmplt->err = true; 375 else 376 cmplt->err = false; 377 complete(&cmplt->cmplt); 378 } 379 380 int cmdq_pkt_flush(struct cmdq_pkt *pkt) 381 { 382 struct cmdq_flush_completion cmplt; 383 int err; 384 385 init_completion(&cmplt.cmplt); 386 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt); 387 if (err < 0) 388 return err; 389 wait_for_completion(&cmplt.cmplt); 390 391 return cmplt.err ? -EFAULT : 0; 392 } 393 EXPORT_SYMBOL(cmdq_pkt_flush); 394 395 MODULE_LICENSE("GPL v2"); 396