11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b285192aSMauro Carvalho Chehab /*
3b285192aSMauro Carvalho Chehab User DMA
4b285192aSMauro Carvalho Chehab
5b285192aSMauro Carvalho Chehab Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
6b285192aSMauro Carvalho Chehab Copyright (C) 2004 Chris Kennedy <c@groovy.org>
7b285192aSMauro Carvalho Chehab Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
8b285192aSMauro Carvalho Chehab
9b285192aSMauro Carvalho Chehab */
10b285192aSMauro Carvalho Chehab
11b285192aSMauro Carvalho Chehab #include "ivtv-driver.h"
12b285192aSMauro Carvalho Chehab #include "ivtv-udma.h"
13b285192aSMauro Carvalho Chehab
ivtv_udma_get_page_info(struct ivtv_dma_page_info * dma_page,unsigned long first,unsigned long size)14b285192aSMauro Carvalho Chehab void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
15b285192aSMauro Carvalho Chehab {
16b285192aSMauro Carvalho Chehab dma_page->uaddr = first & PAGE_MASK;
17b285192aSMauro Carvalho Chehab dma_page->offset = first & ~PAGE_MASK;
18b285192aSMauro Carvalho Chehab dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
19b285192aSMauro Carvalho Chehab dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
20b285192aSMauro Carvalho Chehab dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
21b285192aSMauro Carvalho Chehab dma_page->page_count = dma_page->last - dma_page->first + 1;
22b285192aSMauro Carvalho Chehab if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
23b285192aSMauro Carvalho Chehab }
24b285192aSMauro Carvalho Chehab
ivtv_udma_fill_sg_list(struct ivtv_user_dma * dma,struct ivtv_dma_page_info * dma_page,int map_offset)25b285192aSMauro Carvalho Chehab int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
26b285192aSMauro Carvalho Chehab {
27b285192aSMauro Carvalho Chehab int i, offset;
28b285192aSMauro Carvalho Chehab unsigned long flags;
29b285192aSMauro Carvalho Chehab
30b285192aSMauro Carvalho Chehab if (map_offset < 0)
31b285192aSMauro Carvalho Chehab return map_offset;
32b285192aSMauro Carvalho Chehab
33b285192aSMauro Carvalho Chehab offset = dma_page->offset;
34b285192aSMauro Carvalho Chehab
35b285192aSMauro Carvalho Chehab /* Fill SG Array with new values */
36b285192aSMauro Carvalho Chehab for (i = 0; i < dma_page->page_count; i++) {
37b285192aSMauro Carvalho Chehab unsigned int len = (i == dma_page->page_count - 1) ?
38b285192aSMauro Carvalho Chehab dma_page->tail : PAGE_SIZE - offset;
39b285192aSMauro Carvalho Chehab
40b285192aSMauro Carvalho Chehab if (PageHighMem(dma->map[map_offset])) {
41b285192aSMauro Carvalho Chehab void *src;
42b285192aSMauro Carvalho Chehab
43b285192aSMauro Carvalho Chehab if (dma->bouncemap[map_offset] == NULL)
44b285192aSMauro Carvalho Chehab dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
45b285192aSMauro Carvalho Chehab if (dma->bouncemap[map_offset] == NULL)
46b285192aSMauro Carvalho Chehab return -1;
47b285192aSMauro Carvalho Chehab local_irq_save(flags);
48b285192aSMauro Carvalho Chehab src = kmap_atomic(dma->map[map_offset]) + offset;
49b285192aSMauro Carvalho Chehab memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
50b285192aSMauro Carvalho Chehab kunmap_atomic(src);
51b285192aSMauro Carvalho Chehab local_irq_restore(flags);
52b285192aSMauro Carvalho Chehab sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
53b285192aSMauro Carvalho Chehab }
54b285192aSMauro Carvalho Chehab else {
55b285192aSMauro Carvalho Chehab sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
56b285192aSMauro Carvalho Chehab }
57b285192aSMauro Carvalho Chehab offset = 0;
58b285192aSMauro Carvalho Chehab map_offset++;
59b285192aSMauro Carvalho Chehab }
60b285192aSMauro Carvalho Chehab return map_offset;
61b285192aSMauro Carvalho Chehab }
62b285192aSMauro Carvalho Chehab
ivtv_udma_fill_sg_array(struct ivtv_user_dma * dma,u32 buffer_offset,u32 buffer_offset_2,u32 split)63b285192aSMauro Carvalho Chehab void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
64b285192aSMauro Carvalho Chehab int i;
65b285192aSMauro Carvalho Chehab struct scatterlist *sg;
66b285192aSMauro Carvalho Chehab
67acf0e531SGeliang Tang for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
68b285192aSMauro Carvalho Chehab dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
69b285192aSMauro Carvalho Chehab dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
70b285192aSMauro Carvalho Chehab dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
71b285192aSMauro Carvalho Chehab buffer_offset += sg_dma_len(sg);
72b285192aSMauro Carvalho Chehab
73b285192aSMauro Carvalho Chehab split -= sg_dma_len(sg);
74b285192aSMauro Carvalho Chehab if (split == 0)
75b285192aSMauro Carvalho Chehab buffer_offset = buffer_offset_2;
76b285192aSMauro Carvalho Chehab }
77b285192aSMauro Carvalho Chehab }
78b285192aSMauro Carvalho Chehab
79b285192aSMauro Carvalho Chehab /* User DMA Buffers */
ivtv_udma_alloc(struct ivtv * itv)80b285192aSMauro Carvalho Chehab void ivtv_udma_alloc(struct ivtv *itv)
81b285192aSMauro Carvalho Chehab {
82b285192aSMauro Carvalho Chehab if (itv->udma.SG_handle == 0) {
83b285192aSMauro Carvalho Chehab /* Map DMA Page Array Buffer */
841932dc2fSChristophe JAILLET itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
851932dc2fSChristophe JAILLET itv->udma.SGarray,
861932dc2fSChristophe JAILLET sizeof(itv->udma.SGarray),
871932dc2fSChristophe JAILLET DMA_TO_DEVICE);
88b285192aSMauro Carvalho Chehab ivtv_udma_sync_for_cpu(itv);
89b285192aSMauro Carvalho Chehab }
90b285192aSMauro Carvalho Chehab }
91b285192aSMauro Carvalho Chehab
ivtv_udma_setup(struct ivtv * itv,unsigned long ivtv_dest_addr,void __user * userbuf,int size_in_bytes)92b285192aSMauro Carvalho Chehab int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
93b285192aSMauro Carvalho Chehab void __user *userbuf, int size_in_bytes)
94b285192aSMauro Carvalho Chehab {
95b285192aSMauro Carvalho Chehab struct ivtv_dma_page_info user_dma;
96b285192aSMauro Carvalho Chehab struct ivtv_user_dma *dma = &itv->udma;
97e7920310SJohn Hubbard int err;
98b285192aSMauro Carvalho Chehab
99b285192aSMauro Carvalho Chehab IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
100b285192aSMauro Carvalho Chehab
101b285192aSMauro Carvalho Chehab /* Still in USE */
102b285192aSMauro Carvalho Chehab if (dma->SG_length || dma->page_count) {
103b285192aSMauro Carvalho Chehab IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
104b285192aSMauro Carvalho Chehab dma->SG_length, dma->page_count);
105b285192aSMauro Carvalho Chehab return -EBUSY;
106b285192aSMauro Carvalho Chehab }
107b285192aSMauro Carvalho Chehab
108b285192aSMauro Carvalho Chehab ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
109b285192aSMauro Carvalho Chehab
110b285192aSMauro Carvalho Chehab if (user_dma.page_count <= 0) {
111b285192aSMauro Carvalho Chehab IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
112b285192aSMauro Carvalho Chehab user_dma.page_count, size_in_bytes, user_dma.offset);
113b285192aSMauro Carvalho Chehab return -EINVAL;
114b285192aSMauro Carvalho Chehab }
115b285192aSMauro Carvalho Chehab
116e7920310SJohn Hubbard /* Pin user pages for DMA Xfer */
117e7920310SJohn Hubbard err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
11870b96f24SDavid Hildenbrand dma->map, 0);
119b285192aSMauro Carvalho Chehab
120b285192aSMauro Carvalho Chehab if (user_dma.page_count != err) {
121b285192aSMauro Carvalho Chehab IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
122b285192aSMauro Carvalho Chehab err, user_dma.page_count);
123b285192aSMauro Carvalho Chehab if (err >= 0) {
124e7920310SJohn Hubbard unpin_user_pages(dma->map, err);
125b285192aSMauro Carvalho Chehab return -EINVAL;
126b285192aSMauro Carvalho Chehab }
127b285192aSMauro Carvalho Chehab return err;
128b285192aSMauro Carvalho Chehab }
129b285192aSMauro Carvalho Chehab
130b285192aSMauro Carvalho Chehab dma->page_count = user_dma.page_count;
131b285192aSMauro Carvalho Chehab
132b285192aSMauro Carvalho Chehab /* Fill SG List with new values */
133b285192aSMauro Carvalho Chehab if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
134*3d8fd929SMikhail Kobuk IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n",
135*3d8fd929SMikhail Kobuk __func__);
136e7920310SJohn Hubbard unpin_user_pages(dma->map, dma->page_count);
137b285192aSMauro Carvalho Chehab dma->page_count = 0;
138b285192aSMauro Carvalho Chehab return -ENOMEM;
139b285192aSMauro Carvalho Chehab }
140b285192aSMauro Carvalho Chehab
141b285192aSMauro Carvalho Chehab /* Map SG List */
1421932dc2fSChristophe JAILLET dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
1431932dc2fSChristophe JAILLET dma->page_count, DMA_TO_DEVICE);
144*3d8fd929SMikhail Kobuk if (!dma->SG_length) {
145*3d8fd929SMikhail Kobuk IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
146*3d8fd929SMikhail Kobuk unpin_user_pages(dma->map, dma->page_count);
147*3d8fd929SMikhail Kobuk dma->page_count = 0;
148*3d8fd929SMikhail Kobuk return -EINVAL;
149*3d8fd929SMikhail Kobuk }
150b285192aSMauro Carvalho Chehab
151b285192aSMauro Carvalho Chehab /* Fill SG Array with new values */
152b285192aSMauro Carvalho Chehab ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
153b285192aSMauro Carvalho Chehab
154b285192aSMauro Carvalho Chehab /* Tag SG Array with Interrupt Bit */
155b285192aSMauro Carvalho Chehab dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
156b285192aSMauro Carvalho Chehab
157b285192aSMauro Carvalho Chehab ivtv_udma_sync_for_device(itv);
158b285192aSMauro Carvalho Chehab return dma->page_count;
159b285192aSMauro Carvalho Chehab }
160b285192aSMauro Carvalho Chehab
ivtv_udma_unmap(struct ivtv * itv)161b285192aSMauro Carvalho Chehab void ivtv_udma_unmap(struct ivtv *itv)
162b285192aSMauro Carvalho Chehab {
163b285192aSMauro Carvalho Chehab struct ivtv_user_dma *dma = &itv->udma;
164b285192aSMauro Carvalho Chehab
165b285192aSMauro Carvalho Chehab IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
166b285192aSMauro Carvalho Chehab
167b285192aSMauro Carvalho Chehab /* Nothing to free */
168b285192aSMauro Carvalho Chehab if (dma->page_count == 0)
169b285192aSMauro Carvalho Chehab return;
170b285192aSMauro Carvalho Chehab
171b285192aSMauro Carvalho Chehab /* Unmap Scatterlist */
172b285192aSMauro Carvalho Chehab if (dma->SG_length) {
1731932dc2fSChristophe JAILLET dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
1741932dc2fSChristophe JAILLET DMA_TO_DEVICE);
175b285192aSMauro Carvalho Chehab dma->SG_length = 0;
176b285192aSMauro Carvalho Chehab }
177b285192aSMauro Carvalho Chehab /* sync DMA */
178b285192aSMauro Carvalho Chehab ivtv_udma_sync_for_cpu(itv);
179b285192aSMauro Carvalho Chehab
180e7920310SJohn Hubbard unpin_user_pages(dma->map, dma->page_count);
181b285192aSMauro Carvalho Chehab dma->page_count = 0;
182b285192aSMauro Carvalho Chehab }
183b285192aSMauro Carvalho Chehab
ivtv_udma_free(struct ivtv * itv)184b285192aSMauro Carvalho Chehab void ivtv_udma_free(struct ivtv *itv)
185b285192aSMauro Carvalho Chehab {
186b285192aSMauro Carvalho Chehab int i;
187b285192aSMauro Carvalho Chehab
188b285192aSMauro Carvalho Chehab /* Unmap SG Array */
189b285192aSMauro Carvalho Chehab if (itv->udma.SG_handle) {
1901932dc2fSChristophe JAILLET dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
1911932dc2fSChristophe JAILLET sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
192b285192aSMauro Carvalho Chehab }
193b285192aSMauro Carvalho Chehab
194b285192aSMauro Carvalho Chehab /* Unmap Scatterlist */
195b285192aSMauro Carvalho Chehab if (itv->udma.SG_length) {
1961932dc2fSChristophe JAILLET dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
1971932dc2fSChristophe JAILLET itv->udma.page_count, DMA_TO_DEVICE);
198b285192aSMauro Carvalho Chehab }
199b285192aSMauro Carvalho Chehab
200b285192aSMauro Carvalho Chehab for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
201b285192aSMauro Carvalho Chehab if (itv->udma.bouncemap[i])
202b285192aSMauro Carvalho Chehab __free_page(itv->udma.bouncemap[i]);
203b285192aSMauro Carvalho Chehab }
204b285192aSMauro Carvalho Chehab }
205b285192aSMauro Carvalho Chehab
ivtv_udma_start(struct ivtv * itv)206b285192aSMauro Carvalho Chehab void ivtv_udma_start(struct ivtv *itv)
207b285192aSMauro Carvalho Chehab {
208b285192aSMauro Carvalho Chehab IVTV_DEBUG_DMA("start UDMA\n");
209b285192aSMauro Carvalho Chehab write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
210b285192aSMauro Carvalho Chehab write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
211b285192aSMauro Carvalho Chehab set_bit(IVTV_F_I_DMA, &itv->i_flags);
212b285192aSMauro Carvalho Chehab set_bit(IVTV_F_I_UDMA, &itv->i_flags);
213b285192aSMauro Carvalho Chehab clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
214b285192aSMauro Carvalho Chehab }
215b285192aSMauro Carvalho Chehab
ivtv_udma_prepare(struct ivtv * itv)216b285192aSMauro Carvalho Chehab void ivtv_udma_prepare(struct ivtv *itv)
217b285192aSMauro Carvalho Chehab {
218b285192aSMauro Carvalho Chehab unsigned long flags;
219b285192aSMauro Carvalho Chehab
220b285192aSMauro Carvalho Chehab spin_lock_irqsave(&itv->dma_reg_lock, flags);
221b285192aSMauro Carvalho Chehab if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
222b285192aSMauro Carvalho Chehab ivtv_udma_start(itv);
223b285192aSMauro Carvalho Chehab else
224b285192aSMauro Carvalho Chehab set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
225b285192aSMauro Carvalho Chehab spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
226b285192aSMauro Carvalho Chehab }
227