1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 
28 #include "mock_engine.h"
29 #include "mock_context.h"
30 #include "mock_request.h"
31 #include "mock_gem_device.h"
32 #include "mock_gem_object.h"
33 #include "mock_gtt.h"
34 #include "mock_uncore.h"
35 
36 void mock_device_flush(struct drm_i915_private *i915)
37 {
38 	struct intel_engine_cs *engine;
39 	enum intel_engine_id id;
40 
41 	lockdep_assert_held(&i915->drm.struct_mutex);
42 
43 	for_each_engine(engine, i915, id)
44 		mock_engine_flush(engine);
45 
46 	i915_gem_retire_requests(i915);
47 }
48 
49 static void mock_device_release(struct drm_device *dev)
50 {
51 	struct drm_i915_private *i915 = to_i915(dev);
52 	struct intel_engine_cs *engine;
53 	enum intel_engine_id id;
54 
55 	mutex_lock(&i915->drm.struct_mutex);
56 	mock_device_flush(i915);
57 	i915_gem_contexts_lost(i915);
58 	mutex_unlock(&i915->drm.struct_mutex);
59 
60 	cancel_delayed_work_sync(&i915->gt.retire_work);
61 	cancel_delayed_work_sync(&i915->gt.idle_work);
62 	i915_gem_drain_workqueue(i915);
63 
64 	mutex_lock(&i915->drm.struct_mutex);
65 	for_each_engine(engine, i915, id)
66 		mock_engine_free(engine);
67 	i915_gem_contexts_fini(i915);
68 	mutex_unlock(&i915->drm.struct_mutex);
69 
70 	drain_workqueue(i915->wq);
71 	i915_gem_drain_freed_objects(i915);
72 
73 	mutex_lock(&i915->drm.struct_mutex);
74 	mock_fini_ggtt(i915);
75 	i915_gem_timeline_fini(&i915->gt.global_timeline);
76 	mutex_unlock(&i915->drm.struct_mutex);
77 
78 	destroy_workqueue(i915->wq);
79 
80 	kmem_cache_destroy(i915->priorities);
81 	kmem_cache_destroy(i915->dependencies);
82 	kmem_cache_destroy(i915->requests);
83 	kmem_cache_destroy(i915->vmas);
84 	kmem_cache_destroy(i915->objects);
85 
86 	i915_gemfs_fini(i915);
87 
88 	drm_dev_fini(&i915->drm);
89 	put_device(&i915->drm.pdev->dev);
90 }
91 
92 static struct drm_driver mock_driver = {
93 	.name = "mock",
94 	.driver_features = DRIVER_GEM,
95 	.release = mock_device_release,
96 
97 	.gem_close_object = i915_gem_close_object,
98 	.gem_free_object_unlocked = i915_gem_free_object,
99 };
100 
101 static void release_dev(struct device *dev)
102 {
103 	struct pci_dev *pdev = to_pci_dev(dev);
104 
105 	kfree(pdev);
106 }
107 
108 static void mock_retire_work_handler(struct work_struct *work)
109 {
110 }
111 
112 static void mock_idle_work_handler(struct work_struct *work)
113 {
114 }
115 
116 static int pm_domain_resume(struct device *dev)
117 {
118 	return pm_generic_runtime_resume(dev);
119 }
120 
121 static int pm_domain_suspend(struct device *dev)
122 {
123 	return pm_generic_runtime_suspend(dev);
124 }
125 
126 static struct dev_pm_domain pm_domain = {
127 	.ops = {
128 		.runtime_suspend = pm_domain_suspend,
129 		.runtime_resume = pm_domain_resume,
130 	},
131 };
132 
133 struct drm_i915_private *mock_gem_device(void)
134 {
135 	struct drm_i915_private *i915;
136 	struct intel_engine_cs *engine;
137 	enum intel_engine_id id;
138 	struct pci_dev *pdev;
139 	int err;
140 
141 	pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
142 	if (!pdev)
143 		goto err;
144 
145 	device_initialize(&pdev->dev);
146 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
147 	pdev->dev.release = release_dev;
148 	dev_set_name(&pdev->dev, "mock");
149 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
150 
151 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
152 	/* hack to disable iommu for the fake device; force identity mapping */
153 	pdev->dev.archdata.iommu = (void *)-1;
154 #endif
155 
156 	dev_pm_domain_set(&pdev->dev, &pm_domain);
157 	pm_runtime_enable(&pdev->dev);
158 	pm_runtime_dont_use_autosuspend(&pdev->dev);
159 	WARN_ON(pm_runtime_get_sync(&pdev->dev));
160 
161 	i915 = (struct drm_i915_private *)(pdev + 1);
162 	pci_set_drvdata(pdev, i915);
163 
164 	err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
165 	if (err) {
166 		pr_err("Failed to initialise mock GEM device: err=%d\n", err);
167 		goto put_device;
168 	}
169 	i915->drm.pdev = pdev;
170 	i915->drm.dev_private = i915;
171 
172 	/* Using the global GTT may ask questions about KMS users, so prepare */
173 	drm_mode_config_init(&i915->drm);
174 
175 	mkwrite_device_info(i915)->gen = -1;
176 
177 	mkwrite_device_info(i915)->page_sizes =
178 		I915_GTT_PAGE_SIZE_4K |
179 		I915_GTT_PAGE_SIZE_64K |
180 		I915_GTT_PAGE_SIZE_2M;
181 
182 	spin_lock_init(&i915->mm.object_stat_lock);
183 	mock_uncore_init(i915);
184 
185 	init_waitqueue_head(&i915->gpu_error.wait_queue);
186 	init_waitqueue_head(&i915->gpu_error.reset_queue);
187 
188 	i915->wq = alloc_ordered_workqueue("mock", 0);
189 	if (!i915->wq)
190 		goto put_device;
191 
192 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
193 	init_llist_head(&i915->mm.free_list);
194 	INIT_LIST_HEAD(&i915->mm.unbound_list);
195 	INIT_LIST_HEAD(&i915->mm.bound_list);
196 
197 	mock_init_contexts(i915);
198 
199 	INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
200 	INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
201 
202 	i915->gt.awake = true;
203 
204 	i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
205 	if (!i915->objects)
206 		goto err_wq;
207 
208 	i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
209 	if (!i915->vmas)
210 		goto err_objects;
211 
212 	i915->requests = KMEM_CACHE(mock_request,
213 				    SLAB_HWCACHE_ALIGN |
214 				    SLAB_RECLAIM_ACCOUNT |
215 				    SLAB_TYPESAFE_BY_RCU);
216 	if (!i915->requests)
217 		goto err_vmas;
218 
219 	i915->dependencies = KMEM_CACHE(i915_dependency,
220 					SLAB_HWCACHE_ALIGN |
221 					SLAB_RECLAIM_ACCOUNT);
222 	if (!i915->dependencies)
223 		goto err_requests;
224 
225 	i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
226 	if (!i915->priorities)
227 		goto err_dependencies;
228 
229 	mutex_lock(&i915->drm.struct_mutex);
230 	INIT_LIST_HEAD(&i915->gt.timelines);
231 	err = i915_gem_timeline_init__global(i915);
232 	if (err) {
233 		mutex_unlock(&i915->drm.struct_mutex);
234 		goto err_priorities;
235 	}
236 
237 	mock_init_ggtt(i915);
238 	mutex_unlock(&i915->drm.struct_mutex);
239 
240 	mkwrite_device_info(i915)->ring_mask = BIT(0);
241 	i915->engine[RCS] = mock_engine(i915, "mock", RCS);
242 	if (!i915->engine[RCS])
243 		goto err_priorities;
244 
245 	i915->kernel_context = mock_context(i915, NULL);
246 	if (!i915->kernel_context)
247 		goto err_engine;
248 
249 	i915->preempt_context = mock_context(i915, NULL);
250 	if (!i915->preempt_context)
251 		goto err_kernel_context;
252 
253 	WARN_ON(i915_gemfs_init(i915));
254 
255 	return i915;
256 
257 err_kernel_context:
258 	i915_gem_context_put(i915->kernel_context);
259 err_engine:
260 	for_each_engine(engine, i915, id)
261 		mock_engine_free(engine);
262 err_priorities:
263 	kmem_cache_destroy(i915->priorities);
264 err_dependencies:
265 	kmem_cache_destroy(i915->dependencies);
266 err_requests:
267 	kmem_cache_destroy(i915->requests);
268 err_vmas:
269 	kmem_cache_destroy(i915->vmas);
270 err_objects:
271 	kmem_cache_destroy(i915->objects);
272 err_wq:
273 	destroy_workqueue(i915->wq);
274 put_device:
275 	put_device(&pdev->dev);
276 err:
277 	return NULL;
278 }
279