xref: /openbmc/linux/sound/pci/ctxfi/ctvmem.c (revision 7d545e77)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
4  *
5  * @File    ctvmem.c
6  *
7  * @Brief
8  * This file contains the implementation of virtual memory management object
9  * for card device.
10  *
11  * @Author Liu Chun
12  * @Date Apr 1 2008
13  */
14 
15 #include "ctvmem.h"
16 #include "ctatc.h"
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/io.h>
20 #include <sound/pcm.h>
21 
22 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
23 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
24 
25 /* *
26  * Find or create vm block based on requested @size.
27  * @size must be page aligned.
28  * */
29 static struct ct_vm_block *
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
31 {
32 	struct ct_vm_block *block = NULL, *entry;
33 	struct list_head *pos;
34 
35 	size = CT_PAGE_ALIGN(size);
36 	if (size > vm->size) {
37 		dev_err(atc->card->dev,
38 			"Fail! No sufficient device virtual memory space available!\n");
39 		return NULL;
40 	}
41 
42 	mutex_lock(&vm->lock);
43 	list_for_each(pos, &vm->unused) {
44 		entry = list_entry(pos, struct ct_vm_block, list);
45 		if (entry->size >= size)
46 			break; /* found a block that is big enough */
47 	}
48 	if (pos == &vm->unused)
49 		goto out;
50 
51 	if (entry->size == size) {
52 		/* Move the vm node from unused list to used list directly */
53 		list_move(&entry->list, &vm->used);
54 		vm->size -= size;
55 		block = entry;
56 		goto out;
57 	}
58 
59 	block = kzalloc(sizeof(*block), GFP_KERNEL);
60 	if (!block)
61 		goto out;
62 
63 	block->addr = entry->addr;
64 	block->size = size;
65 	list_add(&block->list, &vm->used);
66 	entry->addr += size;
67 	entry->size -= size;
68 	vm->size -= size;
69 
70  out:
71 	mutex_unlock(&vm->lock);
72 	return block;
73 }
74 
75 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
76 {
77 	struct ct_vm_block *entry, *pre_ent;
78 	struct list_head *pos, *pre;
79 
80 	block->size = CT_PAGE_ALIGN(block->size);
81 
82 	mutex_lock(&vm->lock);
83 	list_del(&block->list);
84 	vm->size += block->size;
85 
86 	list_for_each(pos, &vm->unused) {
87 		entry = list_entry(pos, struct ct_vm_block, list);
88 		if (entry->addr >= (block->addr + block->size))
89 			break; /* found a position */
90 	}
91 	if (pos == &vm->unused) {
92 		list_add_tail(&block->list, &vm->unused);
93 		entry = block;
94 	} else {
95 		if ((block->addr + block->size) == entry->addr) {
96 			entry->addr = block->addr;
97 			entry->size += block->size;
98 			kfree(block);
99 		} else {
100 			__list_add(&block->list, pos->prev, pos);
101 			entry = block;
102 		}
103 	}
104 
105 	pos = &entry->list;
106 	pre = pos->prev;
107 	while (pre != &vm->unused) {
108 		entry = list_entry(pos, struct ct_vm_block, list);
109 		pre_ent = list_entry(pre, struct ct_vm_block, list);
110 		if ((pre_ent->addr + pre_ent->size) > entry->addr)
111 			break;
112 
113 		pre_ent->size += entry->size;
114 		list_del(pos);
115 		kfree(entry);
116 		pos = pre;
117 		pre = pos->prev;
118 	}
119 	mutex_unlock(&vm->lock);
120 }
121 
122 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
123 static struct ct_vm_block *
124 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
125 {
126 	struct ct_vm_block *block;
127 	unsigned int pte_start;
128 	unsigned i, pages;
129 	unsigned long *ptp;
130 	struct ct_atc *atc = snd_pcm_substream_chip(substream);
131 
132 	block = get_vm_block(vm, size, atc);
133 	if (block == NULL) {
134 		dev_err(atc->card->dev,
135 			"No virtual memory block that is big enough to allocate!\n");
136 		return NULL;
137 	}
138 
139 	ptp = (unsigned long *)vm->ptp[0].area;
140 	pte_start = (block->addr >> CT_PAGE_SHIFT);
141 	pages = block->size >> CT_PAGE_SHIFT;
142 	for (i = 0; i < pages; i++) {
143 		unsigned long addr;
144 		addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
145 		ptp[pte_start + i] = addr;
146 	}
147 
148 	block->size = size;
149 	return block;
150 }
151 
152 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
153 {
154 	/* do unmapping */
155 	put_vm_block(vm, block);
156 }
157 
158 /* *
159  * return the host physical addr of the @index-th device
160  * page table page on success, or ~0UL on failure.
161  * The first returned ~0UL indicates the termination.
162  * */
163 static dma_addr_t
164 ct_get_ptp_phys(struct ct_vm *vm, int index)
165 {
166 	return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
167 }
168 
169 int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
170 {
171 	struct ct_vm *vm;
172 	struct ct_vm_block *block;
173 	int i, err = 0;
174 
175 	*rvm = NULL;
176 
177 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
178 	if (!vm)
179 		return -ENOMEM;
180 
181 	mutex_init(&vm->lock);
182 
183 	/* Allocate page table pages */
184 	for (i = 0; i < CT_PTP_NUM; i++) {
185 		err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
186 					  snd_dma_pci_data(pci),
187 					  PAGE_SIZE, &vm->ptp[i]);
188 		if (err < 0)
189 			break;
190 	}
191 	if (err < 0) {
192 		/* no page table pages are allocated */
193 		ct_vm_destroy(vm);
194 		return -ENOMEM;
195 	}
196 	vm->size = CT_ADDRS_PER_PAGE * i;
197 	vm->map = ct_vm_map;
198 	vm->unmap = ct_vm_unmap;
199 	vm->get_ptp_phys = ct_get_ptp_phys;
200 	INIT_LIST_HEAD(&vm->unused);
201 	INIT_LIST_HEAD(&vm->used);
202 	block = kzalloc(sizeof(*block), GFP_KERNEL);
203 	if (NULL != block) {
204 		block->addr = 0;
205 		block->size = vm->size;
206 		list_add(&block->list, &vm->unused);
207 	}
208 
209 	*rvm = vm;
210 	return 0;
211 }
212 
213 /* The caller must ensure no mapping pages are being used
214  * by hardware before calling this function */
215 void ct_vm_destroy(struct ct_vm *vm)
216 {
217 	int i;
218 	struct list_head *pos;
219 	struct ct_vm_block *entry;
220 
221 	/* free used and unused list nodes */
222 	while (!list_empty(&vm->used)) {
223 		pos = vm->used.next;
224 		list_del(pos);
225 		entry = list_entry(pos, struct ct_vm_block, list);
226 		kfree(entry);
227 	}
228 	while (!list_empty(&vm->unused)) {
229 		pos = vm->unused.next;
230 		list_del(pos);
231 		entry = list_entry(pos, struct ct_vm_block, list);
232 		kfree(entry);
233 	}
234 
235 	/* free allocated page table pages */
236 	for (i = 0; i < CT_PTP_NUM; i++)
237 		snd_dma_free_pages(&vm->ptp[i]);
238 
239 	vm->size = 0;
240 
241 	kfree(vm);
242 }
243