1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/pm_domain.h> 26 #include <linux/pm_runtime.h> 27 28 #include "mock_engine.h" 29 #include "mock_context.h" 30 #include "mock_request.h" 31 #include "mock_gem_device.h" 32 #include "mock_gem_object.h" 33 #include "mock_gtt.h" 34 #include "mock_uncore.h" 35 36 void mock_device_flush(struct drm_i915_private *i915) 37 { 38 struct intel_engine_cs *engine; 39 enum intel_engine_id id; 40 41 lockdep_assert_held(&i915->drm.struct_mutex); 42 43 for_each_engine(engine, i915, id) 44 mock_engine_flush(engine); 45 46 i915_gem_retire_requests(i915); 47 } 48 49 static void mock_device_release(struct drm_device *dev) 50 { 51 struct drm_i915_private *i915 = to_i915(dev); 52 struct intel_engine_cs *engine; 53 enum intel_engine_id id; 54 55 mutex_lock(&i915->drm.struct_mutex); 56 mock_device_flush(i915); 57 i915_gem_contexts_lost(i915); 58 mutex_unlock(&i915->drm.struct_mutex); 59 60 cancel_delayed_work_sync(&i915->gt.retire_work); 61 cancel_delayed_work_sync(&i915->gt.idle_work); 62 i915_gem_drain_workqueue(i915); 63 64 mutex_lock(&i915->drm.struct_mutex); 65 for_each_engine(engine, i915, id) 66 mock_engine_free(engine); 67 i915_gem_contexts_fini(i915); 68 mutex_unlock(&i915->drm.struct_mutex); 69 70 drain_workqueue(i915->wq); 71 i915_gem_drain_freed_objects(i915); 72 73 mutex_lock(&i915->drm.struct_mutex); 74 mock_fini_ggtt(i915); 75 i915_gem_timeline_fini(&i915->gt.global_timeline); 76 mutex_unlock(&i915->drm.struct_mutex); 77 78 destroy_workqueue(i915->wq); 79 80 kmem_cache_destroy(i915->priorities); 81 kmem_cache_destroy(i915->dependencies); 82 kmem_cache_destroy(i915->requests); 83 kmem_cache_destroy(i915->vmas); 84 kmem_cache_destroy(i915->objects); 85 86 drm_dev_fini(&i915->drm); 87 put_device(&i915->drm.pdev->dev); 88 } 89 90 static struct drm_driver mock_driver = { 91 .name = "mock", 92 .driver_features = DRIVER_GEM, 93 .release = mock_device_release, 94 95 .gem_close_object = i915_gem_close_object, 96 .gem_free_object_unlocked = i915_gem_free_object, 97 }; 98 99 static void release_dev(struct device *dev) 100 { 101 struct pci_dev *pdev = to_pci_dev(dev); 102 103 kfree(pdev); 104 } 105 106 static void mock_retire_work_handler(struct work_struct *work) 107 { 108 } 109 110 static void mock_idle_work_handler(struct work_struct *work) 111 { 112 } 113 114 static int pm_domain_resume(struct device *dev) 115 { 116 return pm_generic_runtime_resume(dev); 117 } 118 119 static int pm_domain_suspend(struct device *dev) 120 { 121 return pm_generic_runtime_suspend(dev); 122 } 123 124 static struct dev_pm_domain pm_domain = { 125 .ops = { 126 .runtime_suspend = pm_domain_suspend, 127 .runtime_resume = pm_domain_resume, 128 }, 129 }; 130 131 struct drm_i915_private *mock_gem_device(void) 132 { 133 struct drm_i915_private *i915; 134 struct intel_engine_cs *engine; 135 enum intel_engine_id id; 136 struct pci_dev *pdev; 137 int err; 138 139 pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL); 140 if (!pdev) 141 goto err; 142 143 device_initialize(&pdev->dev); 144 pdev->class = PCI_BASE_CLASS_DISPLAY << 16; 145 pdev->dev.release = release_dev; 146 dev_set_name(&pdev->dev, "mock"); 147 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 148 149 dev_pm_domain_set(&pdev->dev, &pm_domain); 150 pm_runtime_enable(&pdev->dev); 151 pm_runtime_dont_use_autosuspend(&pdev->dev); 152 WARN_ON(pm_runtime_get_sync(&pdev->dev)); 153 154 i915 = (struct drm_i915_private *)(pdev + 1); 155 pci_set_drvdata(pdev, i915); 156 157 err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); 158 if (err) { 159 pr_err("Failed to initialise mock GEM device: err=%d\n", err); 160 goto put_device; 161 } 162 i915->drm.pdev = pdev; 163 i915->drm.dev_private = i915; 164 165 /* Using the global GTT may ask questions about KMS users, so prepare */ 166 drm_mode_config_init(&i915->drm); 167 168 mkwrite_device_info(i915)->gen = -1; 169 170 spin_lock_init(&i915->mm.object_stat_lock); 171 mock_uncore_init(i915); 172 173 init_waitqueue_head(&i915->gpu_error.wait_queue); 174 init_waitqueue_head(&i915->gpu_error.reset_queue); 175 176 i915->wq = alloc_ordered_workqueue("mock", 0); 177 if (!i915->wq) 178 goto put_device; 179 180 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); 181 init_llist_head(&i915->mm.free_list); 182 INIT_LIST_HEAD(&i915->mm.unbound_list); 183 INIT_LIST_HEAD(&i915->mm.bound_list); 184 185 mock_init_contexts(i915); 186 187 INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); 188 INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); 189 190 i915->gt.awake = true; 191 192 i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN); 193 if (!i915->objects) 194 goto err_wq; 195 196 i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 197 if (!i915->vmas) 198 goto err_objects; 199 200 i915->requests = KMEM_CACHE(mock_request, 201 SLAB_HWCACHE_ALIGN | 202 SLAB_RECLAIM_ACCOUNT | 203 SLAB_TYPESAFE_BY_RCU); 204 if (!i915->requests) 205 goto err_vmas; 206 207 i915->dependencies = KMEM_CACHE(i915_dependency, 208 SLAB_HWCACHE_ALIGN | 209 SLAB_RECLAIM_ACCOUNT); 210 if (!i915->dependencies) 211 goto err_requests; 212 213 i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); 214 if (!i915->priorities) 215 goto err_dependencies; 216 217 mutex_lock(&i915->drm.struct_mutex); 218 INIT_LIST_HEAD(&i915->gt.timelines); 219 err = i915_gem_timeline_init__global(i915); 220 if (err) { 221 mutex_unlock(&i915->drm.struct_mutex); 222 goto err_priorities; 223 } 224 225 mock_init_ggtt(i915); 226 mutex_unlock(&i915->drm.struct_mutex); 227 228 mkwrite_device_info(i915)->ring_mask = BIT(0); 229 i915->engine[RCS] = mock_engine(i915, "mock", RCS); 230 if (!i915->engine[RCS]) 231 goto err_priorities; 232 233 i915->kernel_context = mock_context(i915, NULL); 234 if (!i915->kernel_context) 235 goto err_engine; 236 237 return i915; 238 239 err_engine: 240 for_each_engine(engine, i915, id) 241 mock_engine_free(engine); 242 err_priorities: 243 kmem_cache_destroy(i915->priorities); 244 err_dependencies: 245 kmem_cache_destroy(i915->dependencies); 246 err_requests: 247 kmem_cache_destroy(i915->requests); 248 err_vmas: 249 kmem_cache_destroy(i915->vmas); 250 err_objects: 251 kmem_cache_destroy(i915->objects); 252 err_wq: 253 destroy_workqueue(i915->wq); 254 put_device: 255 put_device(&pdev->dev); 256 err: 257 return NULL; 258 } 259