1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2667ce33eSRob Clark /*
3667ce33eSRob Clark * Copyright (C) 2016 Red Hat
4667ce33eSRob Clark * Author: Rob Clark <robdclark@gmail.com>
5667ce33eSRob Clark */
6667ce33eSRob Clark
7667ce33eSRob Clark #include "msm_drv.h"
895d1deb0SRob Clark #include "msm_fence.h"
9667ce33eSRob Clark #include "msm_gem.h"
10667ce33eSRob Clark #include "msm_mmu.h"
11667ce33eSRob Clark
12ee546cd3SJordan Crouse static void
msm_gem_address_space_destroy(struct kref * kref)13ee546cd3SJordan Crouse msm_gem_address_space_destroy(struct kref *kref)
14ee546cd3SJordan Crouse {
15ee546cd3SJordan Crouse struct msm_gem_address_space *aspace = container_of(kref,
16ee546cd3SJordan Crouse struct msm_gem_address_space, kref);
17ee546cd3SJordan Crouse
18ee546cd3SJordan Crouse drm_mm_takedown(&aspace->mm);
19ee546cd3SJordan Crouse if (aspace->mmu)
20ee546cd3SJordan Crouse aspace->mmu->funcs->destroy(aspace->mmu);
2125faf2f2SRob Clark put_pid(aspace->pid);
22ee546cd3SJordan Crouse kfree(aspace);
23ee546cd3SJordan Crouse }
24ee546cd3SJordan Crouse
25ee546cd3SJordan Crouse
msm_gem_address_space_put(struct msm_gem_address_space * aspace)26ee546cd3SJordan Crouse void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
27ee546cd3SJordan Crouse {
28ee546cd3SJordan Crouse if (aspace)
29ee546cd3SJordan Crouse kref_put(&aspace->kref, msm_gem_address_space_destroy);
30ee546cd3SJordan Crouse }
31ee546cd3SJordan Crouse
32933415e2SJordan Crouse struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space * aspace)33933415e2SJordan Crouse msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34933415e2SJordan Crouse {
35933415e2SJordan Crouse if (!IS_ERR_OR_NULL(aspace))
36933415e2SJordan Crouse kref_get(&aspace->kref);
37933415e2SJordan Crouse
38933415e2SJordan Crouse return aspace;
39933415e2SJordan Crouse }
40933415e2SJordan Crouse
417ad0e8cfSJordan Crouse /* Actually unmap memory for the vma */
msm_gem_vma_purge(struct msm_gem_vma * vma)42fc2f0756SRob Clark void msm_gem_vma_purge(struct msm_gem_vma *vma)
43667ce33eSRob Clark {
44fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace;
452ee4b5d2SRob Clark unsigned size = vma->node.size;
467ad0e8cfSJordan Crouse
477ad0e8cfSJordan Crouse /* Don't do anything if the memory isn't mapped */
487ad0e8cfSJordan Crouse if (!vma->mapped)
497ad0e8cfSJordan Crouse return;
507ad0e8cfSJordan Crouse
5170dc51b4SJordan Crouse aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
527ad0e8cfSJordan Crouse
537ad0e8cfSJordan Crouse vma->mapped = false;
54667ce33eSRob Clark }
55667ce33eSRob Clark
5695d1deb0SRob Clark /* Map and pin vma: */
57667ce33eSRob Clark int
msm_gem_vma_map(struct msm_gem_vma * vma,int prot,struct sg_table * sgt,int size)58fc2f0756SRob Clark msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
592ee4b5d2SRob Clark struct sg_table *sgt, int size)
60667ce33eSRob Clark {
61fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace;
62fc2f0756SRob Clark int ret;
63667ce33eSRob Clark
648e30fa32SRob Clark if (GEM_WARN_ON(!vma->iova))
65c0ee9794SJordan Crouse return -EINVAL;
66c0ee9794SJordan Crouse
67c0ee9794SJordan Crouse if (vma->mapped)
68667ce33eSRob Clark return 0;
69c0ee9794SJordan Crouse
70c0ee9794SJordan Crouse vma->mapped = true;
71c0ee9794SJordan Crouse
72fc2f0756SRob Clark if (!aspace)
73fc2f0756SRob Clark return 0;
74fc2f0756SRob Clark
75b14b8c5fSRob Clark /*
76b14b8c5fSRob Clark * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
77b14b8c5fSRob Clark * a lock across map/unmap which is also used in the job_run()
78b14b8c5fSRob Clark * path, as this can cause deadlock in job_run() vs shrinker/
79b14b8c5fSRob Clark * reclaim.
80b14b8c5fSRob Clark *
81b14b8c5fSRob Clark * Revisit this if we can come up with a scheme to pre-alloc pages
82b14b8c5fSRob Clark * for the pgtable in map/unmap ops.
83b14b8c5fSRob Clark */
84fc2f0756SRob Clark ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
85c0ee9794SJordan Crouse
8663ca94adSAkhil P Oommen if (ret) {
87c0ee9794SJordan Crouse vma->mapped = false;
8863ca94adSAkhil P Oommen }
89c0ee9794SJordan Crouse
90c0ee9794SJordan Crouse return ret;
910e08270aSSushmita Susheelendra }
92667ce33eSRob Clark
937ad0e8cfSJordan Crouse /* Close an iova. Warn if it is still in use */
msm_gem_vma_close(struct msm_gem_vma * vma)94fc2f0756SRob Clark void msm_gem_vma_close(struct msm_gem_vma *vma)
957ad0e8cfSJordan Crouse {
96fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace;
97fc2f0756SRob Clark
98*7391c282SRob Clark GEM_WARN_ON(vma->mapped);
997ad0e8cfSJordan Crouse
1007ad0e8cfSJordan Crouse spin_lock(&aspace->lock);
1017ad0e8cfSJordan Crouse if (vma->iova)
1027ad0e8cfSJordan Crouse drm_mm_remove_node(&vma->node);
1037ad0e8cfSJordan Crouse spin_unlock(&aspace->lock);
1047ad0e8cfSJordan Crouse
1057ad0e8cfSJordan Crouse vma->iova = 0;
1067ad0e8cfSJordan Crouse
1077ad0e8cfSJordan Crouse msm_gem_address_space_put(aspace);
1087ad0e8cfSJordan Crouse }
1097ad0e8cfSJordan Crouse
msm_gem_vma_new(struct msm_gem_address_space * aspace)110fc2f0756SRob Clark struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
111fc2f0756SRob Clark {
112fc2f0756SRob Clark struct msm_gem_vma *vma;
113fc2f0756SRob Clark
114fc2f0756SRob Clark vma = kzalloc(sizeof(*vma), GFP_KERNEL);
115fc2f0756SRob Clark if (!vma)
116fc2f0756SRob Clark return NULL;
117fc2f0756SRob Clark
118fc2f0756SRob Clark vma->aspace = aspace;
119fc2f0756SRob Clark
120fc2f0756SRob Clark return vma;
121fc2f0756SRob Clark }
122fc2f0756SRob Clark
123c0ee9794SJordan Crouse /* Initialize a new vma and allocate an iova for it */
msm_gem_vma_init(struct msm_gem_vma * vma,int size,u64 range_start,u64 range_end)124fc2f0756SRob Clark int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
125d3b8877eSJonathan Marek u64 range_start, u64 range_end)
126c0ee9794SJordan Crouse {
127fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace;
128c0ee9794SJordan Crouse int ret;
129c0ee9794SJordan Crouse
130fc2f0756SRob Clark if (GEM_WARN_ON(!aspace))
131fc2f0756SRob Clark return -EINVAL;
132fc2f0756SRob Clark
1338e30fa32SRob Clark if (GEM_WARN_ON(vma->iova))
134c0ee9794SJordan Crouse return -EBUSY;
135c0ee9794SJordan Crouse
136c0ee9794SJordan Crouse spin_lock(&aspace->lock);
1372ee4b5d2SRob Clark ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
1382ee4b5d2SRob Clark size, PAGE_SIZE, 0,
1392ee4b5d2SRob Clark range_start, range_end, 0);
1400e08270aSSushmita Susheelendra spin_unlock(&aspace->lock);
1410e08270aSSushmita Susheelendra
142667ce33eSRob Clark if (ret)
143667ce33eSRob Clark return ret;
144667ce33eSRob Clark
1452ee4b5d2SRob Clark vma->iova = vma->node.start;
146c0ee9794SJordan Crouse vma->mapped = false;
147667ce33eSRob Clark
148ee546cd3SJordan Crouse kref_get(&aspace->kref);
149667ce33eSRob Clark
150c0ee9794SJordan Crouse return 0;
151667ce33eSRob Clark }
152667ce33eSRob Clark
153667ce33eSRob Clark struct msm_gem_address_space *
msm_gem_address_space_create(struct msm_mmu * mmu,const char * name,u64 va_start,u64 size)154ccac7ce3SJordan Crouse msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
155ccac7ce3SJordan Crouse u64 va_start, u64 size)
156667ce33eSRob Clark {
157667ce33eSRob Clark struct msm_gem_address_space *aspace;
158ccac7ce3SJordan Crouse
159ccac7ce3SJordan Crouse if (IS_ERR(mmu))
160ccac7ce3SJordan Crouse return ERR_CAST(mmu);
161667ce33eSRob Clark
162667ce33eSRob Clark aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
163667ce33eSRob Clark if (!aspace)
164667ce33eSRob Clark return ERR_PTR(-ENOMEM);
165667ce33eSRob Clark
1660e08270aSSushmita Susheelendra spin_lock_init(&aspace->lock);
167667ce33eSRob Clark aspace->name = name;
168ccac7ce3SJordan Crouse aspace->mmu = mmu;
169a636a0ffSRob Clark aspace->va_start = va_start;
170a636a0ffSRob Clark aspace->va_size = size;
171667ce33eSRob Clark
1722ee4b5d2SRob Clark drm_mm_init(&aspace->mm, va_start, size);
173c2052a4eSJonathan Marek
174c2052a4eSJonathan Marek kref_init(&aspace->kref);
175c2052a4eSJonathan Marek
176c2052a4eSJonathan Marek return aspace;
177c2052a4eSJonathan Marek }
178