1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iommu.h>
28 
29 #include <drm/drm_managed.h>
30 
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_requests.h"
33 #include "gt/mock_engine.h"
34 #include "intel_memory_region.h"
35 #include "intel_region_ttm.h"
36 
37 #include "mock_request.h"
38 #include "mock_gem_device.h"
39 #include "mock_gtt.h"
40 #include "mock_uncore.h"
41 #include "mock_region.h"
42 
43 #include "gem/selftests/mock_context.h"
44 #include "gem/selftests/mock_gem_object.h"
45 
46 void mock_device_flush(struct drm_i915_private *i915)
47 {
48 	struct intel_gt *gt = to_gt(i915);
49 	struct intel_engine_cs *engine;
50 	enum intel_engine_id id;
51 
52 	do {
53 		for_each_engine(engine, gt, id)
54 			mock_engine_flush(engine);
55 	} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
56 						  NULL));
57 }
58 
59 static void mock_device_release(struct drm_device *dev)
60 {
61 	struct drm_i915_private *i915 = to_i915(dev);
62 
63 	if (!i915->do_release)
64 		goto out;
65 
66 	mock_device_flush(i915);
67 	intel_gt_driver_remove(to_gt(i915));
68 
69 	i915_gem_drain_workqueue(i915);
70 	i915_gem_drain_freed_objects(i915);
71 
72 	mock_fini_ggtt(to_gt(i915)->ggtt);
73 	destroy_workqueue(i915->wq);
74 
75 	intel_region_ttm_device_fini(i915);
76 	intel_gt_driver_late_release(to_gt(i915));
77 	intel_memory_regions_driver_release(i915);
78 
79 	drm_mode_config_cleanup(&i915->drm);
80 
81 out:
82 	i915_params_free(&i915->params);
83 }
84 
85 static const struct drm_driver mock_driver = {
86 	.name = "mock",
87 	.driver_features = DRIVER_GEM,
88 	.release = mock_device_release,
89 };
90 
91 static void release_dev(struct device *dev)
92 {
93 	struct pci_dev *pdev = to_pci_dev(dev);
94 
95 	kfree(pdev);
96 }
97 
98 static int pm_domain_resume(struct device *dev)
99 {
100 	return pm_generic_runtime_resume(dev);
101 }
102 
103 static int pm_domain_suspend(struct device *dev)
104 {
105 	return pm_generic_runtime_suspend(dev);
106 }
107 
108 static struct dev_pm_domain pm_domain = {
109 	.ops = {
110 		.runtime_suspend = pm_domain_suspend,
111 		.runtime_resume = pm_domain_resume,
112 	},
113 };
114 
115 struct drm_i915_private *mock_gem_device(void)
116 {
117 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
118 	static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
119 #endif
120 	struct drm_i915_private *i915;
121 	struct pci_dev *pdev;
122 	int ret;
123 
124 	pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
125 	if (!pdev)
126 		return NULL;
127 	device_initialize(&pdev->dev);
128 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
129 	pdev->dev.release = release_dev;
130 	dev_set_name(&pdev->dev, "mock");
131 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
132 
133 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
134 	/* HACK to disable iommu for the fake device; force identity mapping */
135 	pdev->dev.iommu = &fake_iommu;
136 #endif
137 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
138 		put_device(&pdev->dev);
139 		return NULL;
140 	}
141 
142 	i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
143 				  struct drm_i915_private, drm);
144 	if (IS_ERR(i915)) {
145 		pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
146 		devres_release_group(&pdev->dev, NULL);
147 		put_device(&pdev->dev);
148 
149 		return NULL;
150 	}
151 
152 	pci_set_drvdata(pdev, i915);
153 
154 	dev_pm_domain_set(&pdev->dev, &pm_domain);
155 	pm_runtime_enable(&pdev->dev);
156 	pm_runtime_dont_use_autosuspend(&pdev->dev);
157 	if (pm_runtime_enabled(&pdev->dev))
158 		WARN_ON(pm_runtime_get_sync(&pdev->dev));
159 
160 
161 	i915_params_copy(&i915->params, &i915_modparams);
162 
163 	intel_runtime_pm_init_early(&i915->runtime_pm);
164 	/* wakeref tracking has significant overhead */
165 	i915->runtime_pm.no_wakeref_tracking = true;
166 
167 	/* Using the global GTT may ask questions about KMS users, so prepare */
168 	drm_mode_config_init(&i915->drm);
169 
170 	mkwrite_device_info(i915)->graphics.ver = -1;
171 
172 	mkwrite_device_info(i915)->page_sizes =
173 		I915_GTT_PAGE_SIZE_4K |
174 		I915_GTT_PAGE_SIZE_64K |
175 		I915_GTT_PAGE_SIZE_2M;
176 
177 	mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
178 	intel_memory_regions_hw_probe(i915);
179 
180 	spin_lock_init(&i915->gpu_error.lock);
181 
182 	i915_gem_init__mm(i915);
183 	intel_gt_init_early(to_gt(i915), i915);
184 	__intel_gt_init_early(to_gt(i915), i915);
185 	mock_uncore_init(&i915->uncore, i915);
186 	atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
187 	to_gt(i915)->awake = -ENODEV;
188 
189 	ret = intel_region_ttm_device_init(i915);
190 	if (ret)
191 		goto err_ttm;
192 
193 	i915->wq = alloc_ordered_workqueue("mock", 0);
194 	if (!i915->wq)
195 		goto err_drv;
196 
197 	mock_init_contexts(i915);
198 
199 	/* allocate the ggtt */
200 	ret = intel_gt_assign_ggtt(to_gt(i915));
201 	if (ret)
202 		goto err_unlock;
203 
204 	mock_init_ggtt(to_gt(i915));
205 	to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
206 
207 	mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
208 	to_gt(i915)->info.engine_mask = BIT(0);
209 
210 	to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
211 	if (!to_gt(i915)->engine[RCS0])
212 		goto err_unlock;
213 
214 	if (mock_engine_init(to_gt(i915)->engine[RCS0]))
215 		goto err_context;
216 
217 	__clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
218 	intel_engines_driver_register(i915);
219 
220 	i915->do_release = true;
221 	ida_init(&i915->selftest.mock_region_instances);
222 
223 	return i915;
224 
225 err_context:
226 	intel_gt_driver_remove(to_gt(i915));
227 err_unlock:
228 	destroy_workqueue(i915->wq);
229 err_drv:
230 	intel_region_ttm_device_fini(i915);
231 err_ttm:
232 	intel_gt_driver_late_release(to_gt(i915));
233 	intel_memory_regions_driver_release(i915);
234 	drm_mode_config_cleanup(&i915->drm);
235 	mock_destroy_device(i915);
236 
237 	return NULL;
238 }
239 
240 void mock_destroy_device(struct drm_i915_private *i915)
241 {
242 	struct device *dev = i915->drm.dev;
243 
244 	devres_release_group(dev, NULL);
245 	put_device(dev);
246 }
247