1e65e175bSOded Gabbay // SPDX-License-Identifier: GPL-2.0
2e65e175bSOded Gabbay
3e65e175bSOded Gabbay /*
4e65e175bSOded Gabbay * Copyright 2022 HabanaLabs, Ltd.
5e65e175bSOded Gabbay * All Rights Reserved.
6e65e175bSOded Gabbay */
7e65e175bSOded Gabbay
8e65e175bSOded Gabbay #include "habanalabs.h"
9e65e175bSOded Gabbay
10e65e175bSOded Gabbay /**
11e65e175bSOded Gabbay * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
12e65e175bSOded Gabbay * the buffer descriptor.
13e65e175bSOded Gabbay *
14e65e175bSOded Gabbay * @mmg: parent unified memory manager
15e65e175bSOded Gabbay * @handle: requested buffer handle
16e65e175bSOded Gabbay *
17e65e175bSOded Gabbay * Find the buffer in the store and return a pointer to its descriptor.
18e65e175bSOded Gabbay * Increase buffer refcount. If not found - return NULL.
19e65e175bSOded Gabbay */
hl_mmap_mem_buf_get(struct hl_mem_mgr * mmg,u64 handle)20e65e175bSOded Gabbay struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
21e65e175bSOded Gabbay {
22e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf;
23e65e175bSOded Gabbay
24e65e175bSOded Gabbay spin_lock(&mmg->lock);
25e65e175bSOded Gabbay buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
26e65e175bSOded Gabbay if (!buf) {
27e65e175bSOded Gabbay spin_unlock(&mmg->lock);
28e65e175bSOded Gabbay dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
29e65e175bSOded Gabbay return NULL;
30e65e175bSOded Gabbay }
31e65e175bSOded Gabbay kref_get(&buf->refcount);
32e65e175bSOded Gabbay spin_unlock(&mmg->lock);
33e65e175bSOded Gabbay return buf;
34e65e175bSOded Gabbay }
35e65e175bSOded Gabbay
36e65e175bSOded Gabbay /**
37e65e175bSOded Gabbay * hl_mmap_mem_buf_destroy - destroy the unused buffer
38e65e175bSOded Gabbay *
39e65e175bSOded Gabbay * @buf: memory manager buffer descriptor
40e65e175bSOded Gabbay *
41e65e175bSOded Gabbay * Internal function, used as a final step of buffer release. Shall be invoked
42e65e175bSOded Gabbay * only when the buffer is no longer in use (removed from idr). Will call the
43e65e175bSOded Gabbay * release callback (if applicable), and free the memory.
44e65e175bSOded Gabbay */
hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf * buf)45e65e175bSOded Gabbay static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
46e65e175bSOded Gabbay {
47e65e175bSOded Gabbay if (buf->behavior->release)
48e65e175bSOded Gabbay buf->behavior->release(buf);
49e65e175bSOded Gabbay
50e65e175bSOded Gabbay kfree(buf);
51e65e175bSOded Gabbay }
52e65e175bSOded Gabbay
53e65e175bSOded Gabbay /**
54e65e175bSOded Gabbay * hl_mmap_mem_buf_release - release buffer
55e65e175bSOded Gabbay *
56e65e175bSOded Gabbay * @kref: kref that reached 0.
57e65e175bSOded Gabbay *
58e65e175bSOded Gabbay * Internal function, used as a kref release callback, when the last user of
59e65e175bSOded Gabbay * the buffer is released. Shall be called from an interrupt context.
60e65e175bSOded Gabbay */
hl_mmap_mem_buf_release(struct kref * kref)61e65e175bSOded Gabbay static void hl_mmap_mem_buf_release(struct kref *kref)
62e65e175bSOded Gabbay {
63e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf =
64e65e175bSOded Gabbay container_of(kref, struct hl_mmap_mem_buf, refcount);
65e65e175bSOded Gabbay
66e65e175bSOded Gabbay spin_lock(&buf->mmg->lock);
67e65e175bSOded Gabbay idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
68e65e175bSOded Gabbay spin_unlock(&buf->mmg->lock);
69e65e175bSOded Gabbay
70e65e175bSOded Gabbay hl_mmap_mem_buf_destroy(buf);
71e65e175bSOded Gabbay }
72e65e175bSOded Gabbay
73e65e175bSOded Gabbay /**
74e65e175bSOded Gabbay * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
75e65e175bSOded Gabbay *
76e65e175bSOded Gabbay * @kref: kref that reached 0.
77e65e175bSOded Gabbay *
78e65e175bSOded Gabbay * Internal function, used for kref put by handle. Assumes mmg lock is taken.
79e65e175bSOded Gabbay * Will remove the buffer from idr, without destroying it.
80e65e175bSOded Gabbay */
hl_mmap_mem_buf_remove_idr_locked(struct kref * kref)81e65e175bSOded Gabbay static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
82e65e175bSOded Gabbay {
83e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf =
84e65e175bSOded Gabbay container_of(kref, struct hl_mmap_mem_buf, refcount);
85e65e175bSOded Gabbay
86e65e175bSOded Gabbay idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
87e65e175bSOded Gabbay }
88e65e175bSOded Gabbay
89e65e175bSOded Gabbay /**
90e65e175bSOded Gabbay * hl_mmap_mem_buf_put - decrease the reference to the buffer
91e65e175bSOded Gabbay *
92e65e175bSOded Gabbay * @buf: memory manager buffer descriptor
93e65e175bSOded Gabbay *
94e65e175bSOded Gabbay * Decrease the reference to the buffer, and release it if it was the last one.
95e65e175bSOded Gabbay * Shall be called from an interrupt context.
96e65e175bSOded Gabbay */
hl_mmap_mem_buf_put(struct hl_mmap_mem_buf * buf)97e65e175bSOded Gabbay int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
98e65e175bSOded Gabbay {
99e65e175bSOded Gabbay return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
100e65e175bSOded Gabbay }
101e65e175bSOded Gabbay
102e65e175bSOded Gabbay /**
103e65e175bSOded Gabbay * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
104e65e175bSOded Gabbay * given handle.
105e65e175bSOded Gabbay *
106e65e175bSOded Gabbay * @mmg: parent unified memory manager
107e65e175bSOded Gabbay * @handle: requested buffer handle
108e65e175bSOded Gabbay *
109e65e175bSOded Gabbay * Decrease the reference to the buffer, and release it if it was the last one.
110e65e175bSOded Gabbay * Shall not be called from an interrupt context. Return -EINVAL if handle was
111e65e175bSOded Gabbay * not found, else return the put outcome (0 or 1).
112e65e175bSOded Gabbay */
hl_mmap_mem_buf_put_handle(struct hl_mem_mgr * mmg,u64 handle)113e65e175bSOded Gabbay int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
114e65e175bSOded Gabbay {
115e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf;
116e65e175bSOded Gabbay
117e65e175bSOded Gabbay spin_lock(&mmg->lock);
118e65e175bSOded Gabbay buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
119e65e175bSOded Gabbay if (!buf) {
120e65e175bSOded Gabbay spin_unlock(&mmg->lock);
121e65e175bSOded Gabbay dev_dbg(mmg->dev,
122e65e175bSOded Gabbay "Buff put failed, no match to handle %#llx\n", handle);
123e65e175bSOded Gabbay return -EINVAL;
124e65e175bSOded Gabbay }
125e65e175bSOded Gabbay
126e65e175bSOded Gabbay if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
127e65e175bSOded Gabbay spin_unlock(&mmg->lock);
128e65e175bSOded Gabbay hl_mmap_mem_buf_destroy(buf);
129e65e175bSOded Gabbay return 1;
130e65e175bSOded Gabbay }
131e65e175bSOded Gabbay
132e65e175bSOded Gabbay spin_unlock(&mmg->lock);
133e65e175bSOded Gabbay return 0;
134e65e175bSOded Gabbay }
135e65e175bSOded Gabbay
136e65e175bSOded Gabbay /**
137e65e175bSOded Gabbay * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
138e65e175bSOded Gabbay *
139e65e175bSOded Gabbay * @mmg: parent unified memory manager
140e65e175bSOded Gabbay * @behavior: behavior object describing this buffer polymorphic behavior
141e65e175bSOded Gabbay * @gfp: gfp flags to use for the memory allocations
142e65e175bSOded Gabbay * @args: additional args passed to behavior->alloc
143e65e175bSOded Gabbay *
144e65e175bSOded Gabbay * Allocate and register a new memory buffer inside the give memory manager.
145e65e175bSOded Gabbay * Return the pointer to the new buffer on success or NULL on failure.
146e65e175bSOded Gabbay */
147e65e175bSOded Gabbay struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr * mmg,struct hl_mmap_mem_buf_behavior * behavior,gfp_t gfp,void * args)148e65e175bSOded Gabbay hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
149e65e175bSOded Gabbay struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
150e65e175bSOded Gabbay void *args)
151e65e175bSOded Gabbay {
152e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf;
153e65e175bSOded Gabbay int rc;
154e65e175bSOded Gabbay
155e65e175bSOded Gabbay buf = kzalloc(sizeof(*buf), gfp);
156e65e175bSOded Gabbay if (!buf)
157e65e175bSOded Gabbay return NULL;
158e65e175bSOded Gabbay
159e65e175bSOded Gabbay spin_lock(&mmg->lock);
160e65e175bSOded Gabbay rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
161e65e175bSOded Gabbay spin_unlock(&mmg->lock);
162e65e175bSOded Gabbay if (rc < 0) {
163e65e175bSOded Gabbay dev_err(mmg->dev,
164e65e175bSOded Gabbay "%s: Failed to allocate IDR for a new buffer, rc=%d\n",
165e65e175bSOded Gabbay behavior->topic, rc);
166e65e175bSOded Gabbay goto free_buf;
167e65e175bSOded Gabbay }
168e65e175bSOded Gabbay
169e65e175bSOded Gabbay buf->mmg = mmg;
170e65e175bSOded Gabbay buf->behavior = behavior;
171e65e175bSOded Gabbay buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
172e65e175bSOded Gabbay kref_init(&buf->refcount);
173e65e175bSOded Gabbay
174e65e175bSOded Gabbay rc = buf->behavior->alloc(buf, gfp, args);
175e65e175bSOded Gabbay if (rc) {
176e65e175bSOded Gabbay dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
177e65e175bSOded Gabbay behavior->topic, rc);
178e65e175bSOded Gabbay goto remove_idr;
179e65e175bSOded Gabbay }
180e65e175bSOded Gabbay
181e65e175bSOded Gabbay return buf;
182e65e175bSOded Gabbay
183e65e175bSOded Gabbay remove_idr:
184e65e175bSOded Gabbay spin_lock(&mmg->lock);
185e65e175bSOded Gabbay idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
186e65e175bSOded Gabbay spin_unlock(&mmg->lock);
187e65e175bSOded Gabbay free_buf:
188e65e175bSOded Gabbay kfree(buf);
189e65e175bSOded Gabbay return NULL;
190e65e175bSOded Gabbay }
191e65e175bSOded Gabbay
192e65e175bSOded Gabbay /**
193e65e175bSOded Gabbay * hl_mmap_mem_buf_vm_close - handle mmap close
194e65e175bSOded Gabbay *
195e65e175bSOded Gabbay * @vma: the vma object for which mmap was closed.
196e65e175bSOded Gabbay *
197e65e175bSOded Gabbay * Put the memory buffer if it is no longer mapped.
198e65e175bSOded Gabbay */
hl_mmap_mem_buf_vm_close(struct vm_area_struct * vma)199e65e175bSOded Gabbay static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
200e65e175bSOded Gabbay {
201e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf =
202e65e175bSOded Gabbay (struct hl_mmap_mem_buf *)vma->vm_private_data;
203e65e175bSOded Gabbay long new_mmap_size;
204e65e175bSOded Gabbay
205e65e175bSOded Gabbay new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
206e65e175bSOded Gabbay
207e65e175bSOded Gabbay if (new_mmap_size > 0) {
208e65e175bSOded Gabbay buf->real_mapped_size = new_mmap_size;
209e65e175bSOded Gabbay return;
210e65e175bSOded Gabbay }
211e65e175bSOded Gabbay
212e65e175bSOded Gabbay atomic_set(&buf->mmap, 0);
213e65e175bSOded Gabbay hl_mmap_mem_buf_put(buf);
214e65e175bSOded Gabbay vma->vm_private_data = NULL;
215e65e175bSOded Gabbay }
216e65e175bSOded Gabbay
217e65e175bSOded Gabbay static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
218e65e175bSOded Gabbay .close = hl_mmap_mem_buf_vm_close
219e65e175bSOded Gabbay };
220e65e175bSOded Gabbay
221e65e175bSOded Gabbay /**
222e65e175bSOded Gabbay * hl_mem_mgr_mmap - map the given buffer to the user
223e65e175bSOded Gabbay *
224e65e175bSOded Gabbay * @mmg: unified memory manager
225e65e175bSOded Gabbay * @vma: the vma object for which mmap was closed.
226e65e175bSOded Gabbay * @args: additional args passed to behavior->mmap
227e65e175bSOded Gabbay *
228e65e175bSOded Gabbay * Map the buffer specified by the vma->vm_pgoff to the given vma.
229e65e175bSOded Gabbay */
hl_mem_mgr_mmap(struct hl_mem_mgr * mmg,struct vm_area_struct * vma,void * args)230e65e175bSOded Gabbay int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
231e65e175bSOded Gabbay void *args)
232e65e175bSOded Gabbay {
233e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf;
234e65e175bSOded Gabbay u64 user_mem_size;
235e65e175bSOded Gabbay u64 handle;
236e65e175bSOded Gabbay int rc;
237e65e175bSOded Gabbay
238e65e175bSOded Gabbay /* We use the page offset to hold the idr and thus we need to clear
239e65e175bSOded Gabbay * it before doing the mmap itself
240e65e175bSOded Gabbay */
241e65e175bSOded Gabbay handle = vma->vm_pgoff << PAGE_SHIFT;
242e65e175bSOded Gabbay vma->vm_pgoff = 0;
243e65e175bSOded Gabbay
244e65e175bSOded Gabbay /* Reference was taken here */
245e65e175bSOded Gabbay buf = hl_mmap_mem_buf_get(mmg, handle);
246e65e175bSOded Gabbay if (!buf) {
247e65e175bSOded Gabbay dev_err(mmg->dev,
248e65e175bSOded Gabbay "Memory mmap failed, no match to handle %#llx\n", handle);
249e65e175bSOded Gabbay return -EINVAL;
250e65e175bSOded Gabbay }
251e65e175bSOded Gabbay
252e65e175bSOded Gabbay /* Validation check */
253e65e175bSOded Gabbay user_mem_size = vma->vm_end - vma->vm_start;
254e65e175bSOded Gabbay if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
255e65e175bSOded Gabbay dev_err(mmg->dev,
256e65e175bSOded Gabbay "%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
257e65e175bSOded Gabbay buf->behavior->topic, user_mem_size, buf->mappable_size);
258e65e175bSOded Gabbay rc = -EINVAL;
259e65e175bSOded Gabbay goto put_mem;
260e65e175bSOded Gabbay }
261e65e175bSOded Gabbay
262e65e175bSOded Gabbay #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
263e65e175bSOded Gabbay if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
264e65e175bSOded Gabbay user_mem_size)) {
265e65e175bSOded Gabbay #else
266e65e175bSOded Gabbay if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
267e65e175bSOded Gabbay user_mem_size)) {
268e65e175bSOded Gabbay #endif
269e65e175bSOded Gabbay dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
270e65e175bSOded Gabbay buf->behavior->topic, vma->vm_start);
271e65e175bSOded Gabbay
272e65e175bSOded Gabbay rc = -EINVAL;
273e65e175bSOded Gabbay goto put_mem;
274e65e175bSOded Gabbay }
275e65e175bSOded Gabbay
276e65e175bSOded Gabbay if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
277e65e175bSOded Gabbay dev_err(mmg->dev,
278*443355d2SColin Ian King "%s, Memory mmap failed, already mapped to user\n",
279e65e175bSOded Gabbay buf->behavior->topic);
280e65e175bSOded Gabbay rc = -EINVAL;
281e65e175bSOded Gabbay goto put_mem;
282e65e175bSOded Gabbay }
283e65e175bSOded Gabbay
284e65e175bSOded Gabbay vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
285e65e175bSOded Gabbay
286e65e175bSOded Gabbay /* Note: We're transferring the memory reference to vma->vm_private_data here. */
287e65e175bSOded Gabbay
288e65e175bSOded Gabbay vma->vm_private_data = buf;
289e65e175bSOded Gabbay
290e65e175bSOded Gabbay rc = buf->behavior->mmap(buf, vma, args);
291e65e175bSOded Gabbay if (rc) {
292e65e175bSOded Gabbay atomic_set(&buf->mmap, 0);
293e65e175bSOded Gabbay goto put_mem;
294e65e175bSOded Gabbay }
295e65e175bSOded Gabbay
296e65e175bSOded Gabbay buf->real_mapped_size = buf->mappable_size;
297e65e175bSOded Gabbay vma->vm_pgoff = handle >> PAGE_SHIFT;
298e65e175bSOded Gabbay
299e65e175bSOded Gabbay return 0;
300e65e175bSOded Gabbay
301e65e175bSOded Gabbay put_mem:
302e65e175bSOded Gabbay hl_mmap_mem_buf_put(buf);
303e65e175bSOded Gabbay return rc;
304e65e175bSOded Gabbay }
305e65e175bSOded Gabbay
306e65e175bSOded Gabbay /**
307e65e175bSOded Gabbay * hl_mem_mgr_init - initialize unified memory manager
308e65e175bSOded Gabbay *
309e65e175bSOded Gabbay * @dev: owner device pointer
310e65e175bSOded Gabbay * @mmg: structure to initialize
311e65e175bSOded Gabbay *
312e65e175bSOded Gabbay * Initialize an instance of unified memory manager
313e65e175bSOded Gabbay */
314e2a079a2STomer Tayar void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
315e65e175bSOded Gabbay {
316e65e175bSOded Gabbay mmg->dev = dev;
317e65e175bSOded Gabbay spin_lock_init(&mmg->lock);
318e65e175bSOded Gabbay idr_init(&mmg->handles);
319e65e175bSOded Gabbay }
320e65e175bSOded Gabbay
321e65e175bSOded Gabbay /**
322e65e175bSOded Gabbay * hl_mem_mgr_fini - release unified memory manager
323e65e175bSOded Gabbay *
324e65e175bSOded Gabbay * @mmg: parent unified memory manager
325e65e175bSOded Gabbay *
326e65e175bSOded Gabbay * Release the unified memory manager. Shall be called from an interrupt context.
327e65e175bSOded Gabbay */
328e65e175bSOded Gabbay void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
329e65e175bSOded Gabbay {
330e65e175bSOded Gabbay struct hl_mmap_mem_buf *buf;
331e65e175bSOded Gabbay struct idr *idp;
332e65e175bSOded Gabbay const char *topic;
333e65e175bSOded Gabbay u32 id;
334e65e175bSOded Gabbay
335e65e175bSOded Gabbay idp = &mmg->handles;
336e65e175bSOded Gabbay
337e65e175bSOded Gabbay idr_for_each_entry(idp, buf, id) {
338e65e175bSOded Gabbay topic = buf->behavior->topic;
339e65e175bSOded Gabbay if (hl_mmap_mem_buf_put(buf) != 1)
340e65e175bSOded Gabbay dev_err(mmg->dev,
341e65e175bSOded Gabbay "%s: Buff handle %u for CTX is still alive\n",
342e65e175bSOded Gabbay topic, id);
343e65e175bSOded Gabbay }
3442e8e9a89STomer Tayar }
345e65e175bSOded Gabbay
3462e8e9a89STomer Tayar /**
3472e8e9a89STomer Tayar * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
3482e8e9a89STomer Tayar * @mmg: parent unified memory manager
3492e8e9a89STomer Tayar *
3502e8e9a89STomer Tayar * Destroy the memory manager IDR.
3512e8e9a89STomer Tayar * Shall be called when IDR is empty and no memory buffers are in use.
3522e8e9a89STomer Tayar */
3532e8e9a89STomer Tayar void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
3542e8e9a89STomer Tayar {
3552e8e9a89STomer Tayar if (!idr_is_empty(&mmg->handles))
3562e8e9a89STomer Tayar dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
357e65e175bSOded Gabbay
358e65e175bSOded Gabbay idr_destroy(&mmg->handles);
359e65e175bSOded Gabbay }
360