1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 
28 #include "mock_engine.h"
29 #include "mock_context.h"
30 #include "mock_request.h"
31 #include "mock_gem_device.h"
32 #include "mock_gem_object.h"
33 #include "mock_gtt.h"
34 #include "mock_uncore.h"
35 
36 void mock_device_flush(struct drm_i915_private *i915)
37 {
38 	struct intel_engine_cs *engine;
39 	enum intel_engine_id id;
40 
41 	lockdep_assert_held(&i915->drm.struct_mutex);
42 
43 	for_each_engine(engine, i915, id)
44 		mock_engine_flush(engine);
45 
46 	i915_retire_requests(i915);
47 	GEM_BUG_ON(i915->gt.active_requests);
48 }
49 
50 static void mock_device_release(struct drm_device *dev)
51 {
52 	struct drm_i915_private *i915 = to_i915(dev);
53 	struct intel_engine_cs *engine;
54 	enum intel_engine_id id;
55 
56 	mutex_lock(&i915->drm.struct_mutex);
57 	mock_device_flush(i915);
58 	i915_gem_contexts_lost(i915);
59 	mutex_unlock(&i915->drm.struct_mutex);
60 
61 	cancel_delayed_work_sync(&i915->gt.retire_work);
62 	cancel_delayed_work_sync(&i915->gt.idle_work);
63 	i915_gem_drain_workqueue(i915);
64 
65 	mutex_lock(&i915->drm.struct_mutex);
66 	for_each_engine(engine, i915, id)
67 		mock_engine_free(engine);
68 	i915_gem_contexts_fini(i915);
69 	mutex_unlock(&i915->drm.struct_mutex);
70 
71 	drain_workqueue(i915->wq);
72 	i915_gem_drain_freed_objects(i915);
73 
74 	mutex_lock(&i915->drm.struct_mutex);
75 	mock_fini_ggtt(i915);
76 	mutex_unlock(&i915->drm.struct_mutex);
77 	WARN_ON(!list_empty(&i915->gt.timelines));
78 
79 	destroy_workqueue(i915->wq);
80 
81 	kmem_cache_destroy(i915->priorities);
82 	kmem_cache_destroy(i915->dependencies);
83 	kmem_cache_destroy(i915->requests);
84 	kmem_cache_destroy(i915->vmas);
85 	kmem_cache_destroy(i915->objects);
86 
87 	i915_gemfs_fini(i915);
88 
89 	drm_mode_config_cleanup(&i915->drm);
90 
91 	drm_dev_fini(&i915->drm);
92 	put_device(&i915->drm.pdev->dev);
93 }
94 
95 static struct drm_driver mock_driver = {
96 	.name = "mock",
97 	.driver_features = DRIVER_GEM,
98 	.release = mock_device_release,
99 
100 	.gem_close_object = i915_gem_close_object,
101 	.gem_free_object_unlocked = i915_gem_free_object,
102 };
103 
104 static void release_dev(struct device *dev)
105 {
106 	struct pci_dev *pdev = to_pci_dev(dev);
107 
108 	kfree(pdev);
109 }
110 
111 static void mock_retire_work_handler(struct work_struct *work)
112 {
113 }
114 
115 static void mock_idle_work_handler(struct work_struct *work)
116 {
117 }
118 
119 static int pm_domain_resume(struct device *dev)
120 {
121 	return pm_generic_runtime_resume(dev);
122 }
123 
124 static int pm_domain_suspend(struct device *dev)
125 {
126 	return pm_generic_runtime_suspend(dev);
127 }
128 
129 static struct dev_pm_domain pm_domain = {
130 	.ops = {
131 		.runtime_suspend = pm_domain_suspend,
132 		.runtime_resume = pm_domain_resume,
133 	},
134 };
135 
136 struct drm_i915_private *mock_gem_device(void)
137 {
138 	struct drm_i915_private *i915;
139 	struct pci_dev *pdev;
140 	int err;
141 
142 	pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
143 	if (!pdev)
144 		goto err;
145 
146 	device_initialize(&pdev->dev);
147 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
148 	pdev->dev.release = release_dev;
149 	dev_set_name(&pdev->dev, "mock");
150 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
151 
152 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
153 	/* hack to disable iommu for the fake device; force identity mapping */
154 	pdev->dev.archdata.iommu = (void *)-1;
155 #endif
156 
157 	dev_pm_domain_set(&pdev->dev, &pm_domain);
158 	pm_runtime_enable(&pdev->dev);
159 	pm_runtime_dont_use_autosuspend(&pdev->dev);
160 	if (pm_runtime_enabled(&pdev->dev))
161 		WARN_ON(pm_runtime_get_sync(&pdev->dev));
162 
163 	i915 = (struct drm_i915_private *)(pdev + 1);
164 	pci_set_drvdata(pdev, i915);
165 
166 	err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
167 	if (err) {
168 		pr_err("Failed to initialise mock GEM device: err=%d\n", err);
169 		goto put_device;
170 	}
171 	i915->drm.pdev = pdev;
172 	i915->drm.dev_private = i915;
173 
174 	/* Using the global GTT may ask questions about KMS users, so prepare */
175 	drm_mode_config_init(&i915->drm);
176 
177 	mkwrite_device_info(i915)->gen = -1;
178 
179 	mkwrite_device_info(i915)->page_sizes =
180 		I915_GTT_PAGE_SIZE_4K |
181 		I915_GTT_PAGE_SIZE_64K |
182 		I915_GTT_PAGE_SIZE_2M;
183 
184 	mock_uncore_init(i915);
185 	i915_gem_init__mm(i915);
186 
187 	init_waitqueue_head(&i915->gpu_error.wait_queue);
188 	init_waitqueue_head(&i915->gpu_error.reset_queue);
189 
190 	i915->wq = alloc_ordered_workqueue("mock", 0);
191 	if (!i915->wq)
192 		goto err_drv;
193 
194 	mock_init_contexts(i915);
195 
196 	INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
197 	INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
198 
199 	i915->gt.awake = true;
200 
201 	i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
202 	if (!i915->objects)
203 		goto err_wq;
204 
205 	i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
206 	if (!i915->vmas)
207 		goto err_objects;
208 
209 	i915->requests = KMEM_CACHE(mock_request,
210 				    SLAB_HWCACHE_ALIGN |
211 				    SLAB_RECLAIM_ACCOUNT |
212 				    SLAB_TYPESAFE_BY_RCU);
213 	if (!i915->requests)
214 		goto err_vmas;
215 
216 	i915->dependencies = KMEM_CACHE(i915_dependency,
217 					SLAB_HWCACHE_ALIGN |
218 					SLAB_RECLAIM_ACCOUNT);
219 	if (!i915->dependencies)
220 		goto err_requests;
221 
222 	i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
223 	if (!i915->priorities)
224 		goto err_dependencies;
225 
226 	INIT_LIST_HEAD(&i915->gt.timelines);
227 	INIT_LIST_HEAD(&i915->gt.active_rings);
228 	INIT_LIST_HEAD(&i915->gt.closed_vma);
229 
230 	mutex_lock(&i915->drm.struct_mutex);
231 
232 	mock_init_ggtt(i915);
233 
234 	mkwrite_device_info(i915)->ring_mask = BIT(0);
235 	i915->kernel_context = mock_context(i915, NULL);
236 	if (!i915->kernel_context)
237 		goto err_unlock;
238 
239 	i915->engine[RCS] = mock_engine(i915, "mock", RCS);
240 	if (!i915->engine[RCS])
241 		goto err_context;
242 
243 	mutex_unlock(&i915->drm.struct_mutex);
244 
245 	WARN_ON(i915_gemfs_init(i915));
246 
247 	return i915;
248 
249 err_context:
250 	i915_gem_contexts_fini(i915);
251 err_unlock:
252 	mutex_unlock(&i915->drm.struct_mutex);
253 	kmem_cache_destroy(i915->priorities);
254 err_dependencies:
255 	kmem_cache_destroy(i915->dependencies);
256 err_requests:
257 	kmem_cache_destroy(i915->requests);
258 err_vmas:
259 	kmem_cache_destroy(i915->vmas);
260 err_objects:
261 	kmem_cache_destroy(i915->objects);
262 err_wq:
263 	destroy_workqueue(i915->wq);
264 err_drv:
265 	drm_mode_config_cleanup(&i915->drm);
266 	drm_dev_fini(&i915->drm);
267 put_device:
268 	put_device(&pdev->dev);
269 err:
270 	return NULL;
271 }
272