1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iommu.h>
28 
29 #include <drm/drm_managed.h>
30 
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_requests.h"
33 #include "gt/mock_engine.h"
34 #include "intel_memory_region.h"
35 #include "intel_region_ttm.h"
36 
37 #include "mock_request.h"
38 #include "mock_gem_device.h"
39 #include "mock_gtt.h"
40 #include "mock_uncore.h"
41 #include "mock_region.h"
42 
43 #include "gem/selftests/mock_context.h"
44 #include "gem/selftests/mock_gem_object.h"
45 
46 void mock_device_flush(struct drm_i915_private *i915)
47 {
48 	struct intel_gt *gt = to_gt(i915);
49 	struct intel_engine_cs *engine;
50 	enum intel_engine_id id;
51 
52 	do {
53 		for_each_engine(engine, gt, id)
54 			mock_engine_flush(engine);
55 	} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
56 						  NULL));
57 }
58 
59 static void mock_device_release(struct drm_device *dev)
60 {
61 	struct drm_i915_private *i915 = to_i915(dev);
62 
63 	if (!i915->do_release)
64 		goto out;
65 
66 	mock_device_flush(i915);
67 	intel_gt_driver_remove(to_gt(i915));
68 
69 	i915_gem_drain_workqueue(i915);
70 
71 	mock_fini_ggtt(to_gt(i915)->ggtt);
72 	destroy_workqueue(i915->unordered_wq);
73 	destroy_workqueue(i915->wq);
74 
75 	intel_region_ttm_device_fini(i915);
76 	intel_gt_driver_late_release_all(i915);
77 	intel_memory_regions_driver_release(i915);
78 
79 	drm_mode_config_cleanup(&i915->drm);
80 
81 out:
82 	i915_params_free(&i915->params);
83 }
84 
85 static const struct drm_driver mock_driver = {
86 	.name = "mock",
87 	.driver_features = DRIVER_GEM,
88 	.release = mock_device_release,
89 };
90 
91 static void release_dev(struct device *dev)
92 {
93 	struct pci_dev *pdev = to_pci_dev(dev);
94 
95 	kfree(pdev);
96 }
97 
98 static int pm_domain_resume(struct device *dev)
99 {
100 	return pm_generic_runtime_resume(dev);
101 }
102 
103 static int pm_domain_suspend(struct device *dev)
104 {
105 	return pm_generic_runtime_suspend(dev);
106 }
107 
108 static struct dev_pm_domain pm_domain = {
109 	.ops = {
110 		.runtime_suspend = pm_domain_suspend,
111 		.runtime_resume = pm_domain_resume,
112 	},
113 };
114 
115 static void mock_gt_probe(struct drm_i915_private *i915)
116 {
117 	i915->gt[0] = &i915->gt0;
118 	i915->gt[0]->name = "Mock GT";
119 }
120 
121 struct drm_i915_private *mock_gem_device(void)
122 {
123 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
124 	static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
125 #endif
126 	struct drm_i915_private *i915;
127 	struct intel_device_info *i915_info;
128 	struct pci_dev *pdev;
129 	unsigned int i;
130 	int ret;
131 
132 	pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
133 	if (!pdev)
134 		return NULL;
135 	device_initialize(&pdev->dev);
136 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
137 	pdev->dev.release = release_dev;
138 	dev_set_name(&pdev->dev, "mock");
139 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
140 
141 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
142 	/* HACK to disable iommu for the fake device; force identity mapping */
143 	pdev->dev.iommu = &fake_iommu;
144 #endif
145 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
146 		put_device(&pdev->dev);
147 		return NULL;
148 	}
149 
150 	i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
151 				  struct drm_i915_private, drm);
152 	if (IS_ERR(i915)) {
153 		pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
154 		devres_release_group(&pdev->dev, NULL);
155 		put_device(&pdev->dev);
156 
157 		return NULL;
158 	}
159 
160 	pci_set_drvdata(pdev, i915);
161 
162 	dev_pm_domain_set(&pdev->dev, &pm_domain);
163 	pm_runtime_enable(&pdev->dev);
164 	pm_runtime_dont_use_autosuspend(&pdev->dev);
165 	if (pm_runtime_enabled(&pdev->dev))
166 		WARN_ON(pm_runtime_get_sync(&pdev->dev));
167 
168 
169 	i915_params_copy(&i915->params, &i915_modparams);
170 
171 	intel_runtime_pm_init_early(&i915->runtime_pm);
172 	/* wakeref tracking has significant overhead */
173 	i915->runtime_pm.no_wakeref_tracking = true;
174 
175 	/* Using the global GTT may ask questions about KMS users, so prepare */
176 	drm_mode_config_init(&i915->drm);
177 
178 	RUNTIME_INFO(i915)->graphics.ip.ver = -1;
179 
180 	RUNTIME_INFO(i915)->page_sizes =
181 		I915_GTT_PAGE_SIZE_4K |
182 		I915_GTT_PAGE_SIZE_64K |
183 		I915_GTT_PAGE_SIZE_2M;
184 
185 	RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
186 
187 	/* simply use legacy cache level for mock device */
188 	i915_info = (struct intel_device_info *)INTEL_INFO(i915);
189 	i915_info->max_pat_index = 3;
190 	for (i = 0; i < I915_MAX_CACHE_LEVEL; i++)
191 		i915_info->cachelevel_to_pat[i] = i;
192 
193 	intel_memory_regions_hw_probe(i915);
194 
195 	spin_lock_init(&i915->gpu_error.lock);
196 
197 	i915_gem_init__mm(i915);
198 	intel_root_gt_init_early(i915);
199 	mock_uncore_init(&i915->uncore, i915);
200 	atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
201 	to_gt(i915)->awake = -ENODEV;
202 	mock_gt_probe(i915);
203 
204 	ret = intel_region_ttm_device_init(i915);
205 	if (ret)
206 		goto err_ttm;
207 
208 	i915->wq = alloc_ordered_workqueue("mock", 0);
209 	if (!i915->wq)
210 		goto err_drv;
211 
212 	i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
213 	if (!i915->unordered_wq)
214 		goto err_wq;
215 
216 	mock_init_contexts(i915);
217 
218 	/* allocate the ggtt */
219 	ret = intel_gt_assign_ggtt(to_gt(i915));
220 	if (ret)
221 		goto err_unlock;
222 
223 	mock_init_ggtt(to_gt(i915));
224 	to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
225 
226 	RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
227 	to_gt(i915)->info.engine_mask = BIT(0);
228 
229 	to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
230 	if (!to_gt(i915)->engine[RCS0])
231 		goto err_unlock;
232 
233 	if (mock_engine_init(to_gt(i915)->engine[RCS0]))
234 		goto err_context;
235 
236 	__clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
237 	intel_engines_driver_register(i915);
238 
239 	i915->do_release = true;
240 	ida_init(&i915->selftest.mock_region_instances);
241 
242 	return i915;
243 
244 err_context:
245 	intel_gt_driver_remove(to_gt(i915));
246 err_unlock:
247 	destroy_workqueue(i915->unordered_wq);
248 err_wq:
249 	destroy_workqueue(i915->wq);
250 err_drv:
251 	intel_region_ttm_device_fini(i915);
252 err_ttm:
253 	intel_gt_driver_late_release_all(i915);
254 	intel_memory_regions_driver_release(i915);
255 	drm_mode_config_cleanup(&i915->drm);
256 	mock_destroy_device(i915);
257 
258 	return NULL;
259 }
260 
261 void mock_destroy_device(struct drm_i915_private *i915)
262 {
263 	struct device *dev = i915->drm.dev;
264 
265 	devres_release_group(dev, NULL);
266 	put_device(dev);
267 }
268