1ce0925e8STomas Winkler // SPDX-License-Identifier: GPL-2.0
2ce0925e8STomas Winkler /*
3ce0925e8STomas Winkler * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
4ce0925e8STomas Winkler */
5ce0925e8STomas Winkler #include <linux/dma-mapping.h>
6ce0925e8STomas Winkler #include <linux/mei.h>
7ce0925e8STomas Winkler
8ce0925e8STomas Winkler #include "mei_dev.h"
9ce0925e8STomas Winkler
10ce0925e8STomas Winkler /**
11ce0925e8STomas Winkler * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
12ce0925e8STomas Winkler * for the dma descriptor
13ce0925e8STomas Winkler * @dev: mei_device
14ce0925e8STomas Winkler * @dscr: dma descriptor
15ce0925e8STomas Winkler *
16ce0925e8STomas Winkler * Return:
17ce0925e8STomas Winkler * * 0 - on success or zero allocation request
18ce0925e8STomas Winkler * * -EINVAL - if size is not power of 2
19ce0925e8STomas Winkler * * -ENOMEM - of allocation has failed
20ce0925e8STomas Winkler */
mei_dmam_dscr_alloc(struct mei_device * dev,struct mei_dma_dscr * dscr)21ce0925e8STomas Winkler static int mei_dmam_dscr_alloc(struct mei_device *dev,
22ce0925e8STomas Winkler struct mei_dma_dscr *dscr)
23ce0925e8STomas Winkler {
24ce0925e8STomas Winkler if (!dscr->size)
25ce0925e8STomas Winkler return 0;
26ce0925e8STomas Winkler
27ce0925e8STomas Winkler if (WARN_ON(!is_power_of_2(dscr->size)))
28ce0925e8STomas Winkler return -EINVAL;
29ce0925e8STomas Winkler
30ce0925e8STomas Winkler if (dscr->vaddr)
31ce0925e8STomas Winkler return 0;
32ce0925e8STomas Winkler
33ce0925e8STomas Winkler dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
34ce0925e8STomas Winkler GFP_KERNEL);
35ce0925e8STomas Winkler if (!dscr->vaddr)
36ce0925e8STomas Winkler return -ENOMEM;
37ce0925e8STomas Winkler
38ce0925e8STomas Winkler return 0;
39ce0925e8STomas Winkler }
40ce0925e8STomas Winkler
41ce0925e8STomas Winkler /**
42ce0925e8STomas Winkler * mei_dmam_dscr_free() - free a managed coherent buffer
43ce0925e8STomas Winkler * from the dma descriptor
44ce0925e8STomas Winkler * @dev: mei_device
45ce0925e8STomas Winkler * @dscr: dma descriptor
46ce0925e8STomas Winkler */
mei_dmam_dscr_free(struct mei_device * dev,struct mei_dma_dscr * dscr)47ce0925e8STomas Winkler static void mei_dmam_dscr_free(struct mei_device *dev,
48ce0925e8STomas Winkler struct mei_dma_dscr *dscr)
49ce0925e8STomas Winkler {
50ce0925e8STomas Winkler if (!dscr->vaddr)
51ce0925e8STomas Winkler return;
52ce0925e8STomas Winkler
53ce0925e8STomas Winkler dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
54ce0925e8STomas Winkler dscr->vaddr = NULL;
55ce0925e8STomas Winkler }
56ce0925e8STomas Winkler
57ce0925e8STomas Winkler /**
58ce0925e8STomas Winkler * mei_dmam_ring_free() - free dma ring buffers
59ce0925e8STomas Winkler * @dev: mei device
60ce0925e8STomas Winkler */
mei_dmam_ring_free(struct mei_device * dev)61ce0925e8STomas Winkler void mei_dmam_ring_free(struct mei_device *dev)
62ce0925e8STomas Winkler {
63ce0925e8STomas Winkler int i;
64ce0925e8STomas Winkler
65ce0925e8STomas Winkler for (i = 0; i < DMA_DSCR_NUM; i++)
66ce0925e8STomas Winkler mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
67ce0925e8STomas Winkler }
68ce0925e8STomas Winkler
69ce0925e8STomas Winkler /**
70ce0925e8STomas Winkler * mei_dmam_ring_alloc() - allocate dma ring buffers
71ce0925e8STomas Winkler * @dev: mei device
72ce0925e8STomas Winkler *
73ce0925e8STomas Winkler * Return: -ENOMEM on allocation failure 0 otherwise
74ce0925e8STomas Winkler */
mei_dmam_ring_alloc(struct mei_device * dev)75ce0925e8STomas Winkler int mei_dmam_ring_alloc(struct mei_device *dev)
76ce0925e8STomas Winkler {
77ce0925e8STomas Winkler int i;
78ce0925e8STomas Winkler
79ce0925e8STomas Winkler for (i = 0; i < DMA_DSCR_NUM; i++)
80ce0925e8STomas Winkler if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
81ce0925e8STomas Winkler goto err;
82ce0925e8STomas Winkler
83ce0925e8STomas Winkler return 0;
84ce0925e8STomas Winkler
85ce0925e8STomas Winkler err:
86ce0925e8STomas Winkler mei_dmam_ring_free(dev);
87ce0925e8STomas Winkler return -ENOMEM;
88ce0925e8STomas Winkler }
89ce0925e8STomas Winkler
90ce0925e8STomas Winkler /**
91ce0925e8STomas Winkler * mei_dma_ring_is_allocated() - check if dma ring is allocated
92ce0925e8STomas Winkler * @dev: mei device
93ce0925e8STomas Winkler *
94ce0925e8STomas Winkler * Return: true if dma ring is allocated
95ce0925e8STomas Winkler */
mei_dma_ring_is_allocated(struct mei_device * dev)96ce0925e8STomas Winkler bool mei_dma_ring_is_allocated(struct mei_device *dev)
97ce0925e8STomas Winkler {
98ce0925e8STomas Winkler return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
99ce0925e8STomas Winkler }
1002513eb0dSTomas Winkler
1012513eb0dSTomas Winkler static inline
mei_dma_ring_ctrl(struct mei_device * dev)1022513eb0dSTomas Winkler struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
1032513eb0dSTomas Winkler {
1042513eb0dSTomas Winkler return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
1052513eb0dSTomas Winkler }
1062513eb0dSTomas Winkler
1072513eb0dSTomas Winkler /**
1082513eb0dSTomas Winkler * mei_dma_ring_reset() - reset the dma control block
1092513eb0dSTomas Winkler * @dev: mei device
1102513eb0dSTomas Winkler */
mei_dma_ring_reset(struct mei_device * dev)1112513eb0dSTomas Winkler void mei_dma_ring_reset(struct mei_device *dev)
1122513eb0dSTomas Winkler {
1132513eb0dSTomas Winkler struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
1142513eb0dSTomas Winkler
1152513eb0dSTomas Winkler if (!ctrl)
1162513eb0dSTomas Winkler return;
1172513eb0dSTomas Winkler
1182513eb0dSTomas Winkler memset(ctrl, 0, sizeof(*ctrl));
1192513eb0dSTomas Winkler }
1206316321fSTomas Winkler
1216316321fSTomas Winkler /**
1226316321fSTomas Winkler * mei_dma_copy_from() - copy from dma ring into buffer
1236316321fSTomas Winkler * @dev: mei device
1246316321fSTomas Winkler * @buf: data buffer
1256316321fSTomas Winkler * @offset: offset in slots.
1266316321fSTomas Winkler * @n: number of slots to copy.
1276316321fSTomas Winkler */
mei_dma_copy_from(struct mei_device * dev,unsigned char * buf,u32 offset,u32 n)1286316321fSTomas Winkler static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
1296316321fSTomas Winkler u32 offset, u32 n)
1306316321fSTomas Winkler {
1316316321fSTomas Winkler unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
1326316321fSTomas Winkler
1336316321fSTomas Winkler size_t b_offset = offset << 2;
1346316321fSTomas Winkler size_t b_n = n << 2;
1356316321fSTomas Winkler
1366316321fSTomas Winkler memcpy(buf, dbuf + b_offset, b_n);
1376316321fSTomas Winkler
1386316321fSTomas Winkler return b_n;
1396316321fSTomas Winkler }
1406316321fSTomas Winkler
1416316321fSTomas Winkler /**
142*c30362ccSTomas Winkler * mei_dma_copy_to() - copy to a buffer to the dma ring
143*c30362ccSTomas Winkler * @dev: mei device
144*c30362ccSTomas Winkler * @buf: data buffer
145*c30362ccSTomas Winkler * @offset: offset in slots.
146*c30362ccSTomas Winkler * @n: number of slots to copy.
147*c30362ccSTomas Winkler */
mei_dma_copy_to(struct mei_device * dev,unsigned char * buf,u32 offset,u32 n)148*c30362ccSTomas Winkler static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
149*c30362ccSTomas Winkler u32 offset, u32 n)
150*c30362ccSTomas Winkler {
151*c30362ccSTomas Winkler unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
152*c30362ccSTomas Winkler
153*c30362ccSTomas Winkler size_t b_offset = offset << 2;
154*c30362ccSTomas Winkler size_t b_n = n << 2;
155*c30362ccSTomas Winkler
156*c30362ccSTomas Winkler memcpy(hbuf + b_offset, buf, b_n);
157*c30362ccSTomas Winkler
158*c30362ccSTomas Winkler return b_n;
159*c30362ccSTomas Winkler }
160*c30362ccSTomas Winkler
161*c30362ccSTomas Winkler /**
1626316321fSTomas Winkler * mei_dma_ring_read() - read data from the ring
1636316321fSTomas Winkler * @dev: mei device
1646316321fSTomas Winkler * @buf: buffer to read into: may be NULL in case of droping the data.
1656316321fSTomas Winkler * @len: length to read.
1666316321fSTomas Winkler */
mei_dma_ring_read(struct mei_device * dev,unsigned char * buf,u32 len)1676316321fSTomas Winkler void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
1686316321fSTomas Winkler {
1696316321fSTomas Winkler struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
1706316321fSTomas Winkler u32 dbuf_depth;
1716316321fSTomas Winkler u32 rd_idx, rem, slots;
1726316321fSTomas Winkler
1736316321fSTomas Winkler if (WARN_ON(!ctrl))
1746316321fSTomas Winkler return;
1756316321fSTomas Winkler
1766316321fSTomas Winkler dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
1776316321fSTomas Winkler
1786316321fSTomas Winkler if (!len)
1796316321fSTomas Winkler return;
1806316321fSTomas Winkler
1816316321fSTomas Winkler dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
1826316321fSTomas Winkler rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
1836316321fSTomas Winkler slots = mei_data2slots(len);
1846316321fSTomas Winkler
1856316321fSTomas Winkler /* if buf is NULL we drop the packet by advancing the pointer.*/
1866316321fSTomas Winkler if (!buf)
1876316321fSTomas Winkler goto out;
1886316321fSTomas Winkler
1896316321fSTomas Winkler if (rd_idx + slots > dbuf_depth) {
1906316321fSTomas Winkler buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
1916316321fSTomas Winkler rem = slots - (dbuf_depth - rd_idx);
1926316321fSTomas Winkler rd_idx = 0;
1936316321fSTomas Winkler } else {
1946316321fSTomas Winkler rem = slots;
1956316321fSTomas Winkler }
1966316321fSTomas Winkler
1976316321fSTomas Winkler mei_dma_copy_from(dev, buf, rd_idx, rem);
1986316321fSTomas Winkler out:
1996316321fSTomas Winkler WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
2006316321fSTomas Winkler }
201*c30362ccSTomas Winkler
mei_dma_ring_hbuf_depth(struct mei_device * dev)202*c30362ccSTomas Winkler static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
203*c30362ccSTomas Winkler {
204*c30362ccSTomas Winkler return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
205*c30362ccSTomas Winkler }
206*c30362ccSTomas Winkler
207*c30362ccSTomas Winkler /**
208*c30362ccSTomas Winkler * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
209*c30362ccSTomas Winkler * @dev: mei_device
210*c30362ccSTomas Winkler *
211*c30362ccSTomas Winkler * Return: number of empty slots
212*c30362ccSTomas Winkler */
mei_dma_ring_empty_slots(struct mei_device * dev)213*c30362ccSTomas Winkler u32 mei_dma_ring_empty_slots(struct mei_device *dev)
214*c30362ccSTomas Winkler {
215*c30362ccSTomas Winkler struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
216*c30362ccSTomas Winkler u32 wr_idx, rd_idx, hbuf_depth, empty;
217*c30362ccSTomas Winkler
218*c30362ccSTomas Winkler if (!mei_dma_ring_is_allocated(dev))
219*c30362ccSTomas Winkler return 0;
220*c30362ccSTomas Winkler
221*c30362ccSTomas Winkler if (WARN_ON(!ctrl))
222*c30362ccSTomas Winkler return 0;
223*c30362ccSTomas Winkler
224*c30362ccSTomas Winkler /* easier to work in slots */
225*c30362ccSTomas Winkler hbuf_depth = mei_dma_ring_hbuf_depth(dev);
226*c30362ccSTomas Winkler rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
227*c30362ccSTomas Winkler wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
228*c30362ccSTomas Winkler
229*c30362ccSTomas Winkler if (rd_idx > wr_idx)
230*c30362ccSTomas Winkler empty = rd_idx - wr_idx;
231*c30362ccSTomas Winkler else
232*c30362ccSTomas Winkler empty = hbuf_depth - (wr_idx - rd_idx);
233*c30362ccSTomas Winkler
234*c30362ccSTomas Winkler return empty;
235*c30362ccSTomas Winkler }
236*c30362ccSTomas Winkler
237*c30362ccSTomas Winkler /**
238*c30362ccSTomas Winkler * mei_dma_ring_write - write data to dma ring host buffer
239*c30362ccSTomas Winkler *
240*c30362ccSTomas Winkler * @dev: mei_device
241*c30362ccSTomas Winkler * @buf: data will be written
242*c30362ccSTomas Winkler * @len: data length
243*c30362ccSTomas Winkler */
mei_dma_ring_write(struct mei_device * dev,unsigned char * buf,u32 len)244*c30362ccSTomas Winkler void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
245*c30362ccSTomas Winkler {
246*c30362ccSTomas Winkler struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
247*c30362ccSTomas Winkler u32 hbuf_depth;
248*c30362ccSTomas Winkler u32 wr_idx, rem, slots;
249*c30362ccSTomas Winkler
250*c30362ccSTomas Winkler if (WARN_ON(!ctrl))
251*c30362ccSTomas Winkler return;
252*c30362ccSTomas Winkler
253*c30362ccSTomas Winkler dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
254*c30362ccSTomas Winkler hbuf_depth = mei_dma_ring_hbuf_depth(dev);
255*c30362ccSTomas Winkler wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
256*c30362ccSTomas Winkler slots = mei_data2slots(len);
257*c30362ccSTomas Winkler
258*c30362ccSTomas Winkler if (wr_idx + slots > hbuf_depth) {
259*c30362ccSTomas Winkler buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
260*c30362ccSTomas Winkler rem = slots - (hbuf_depth - wr_idx);
261*c30362ccSTomas Winkler wr_idx = 0;
262*c30362ccSTomas Winkler } else {
263*c30362ccSTomas Winkler rem = slots;
264*c30362ccSTomas Winkler }
265*c30362ccSTomas Winkler
266*c30362ccSTomas Winkler mei_dma_copy_to(dev, buf, wr_idx, rem);
267*c30362ccSTomas Winkler
268*c30362ccSTomas Winkler WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
269*c30362ccSTomas Winkler }
270