1 /* 2 * Copyright © 2010 Daniel Vetter 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "drmP.h" 26 #include "drm.h" 27 #include "i915_drm.h" 28 #include "i915_drv.h" 29 #include "i915_trace.h" 30 #include "intel_drv.h" 31 32 /* XXX kill agp_type! */ 33 static unsigned int cache_level_to_agp_type(struct drm_device *dev, 34 enum i915_cache_level cache_level) 35 { 36 switch (cache_level) { 37 case I915_CACHE_LLC_MLC: 38 if (INTEL_INFO(dev)->gen >= 6) 39 return AGP_USER_CACHED_MEMORY_LLC_MLC; 40 /* Older chipsets do not have this extra level of CPU 41 * cacheing, so fallthrough and request the PTE simply 42 * as cached. 43 */ 44 case I915_CACHE_LLC: 45 return AGP_USER_CACHED_MEMORY; 46 default: 47 case I915_CACHE_NONE: 48 return AGP_USER_MEMORY; 49 } 50 } 51 52 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 53 { 54 struct drm_i915_private *dev_priv = dev->dev_private; 55 struct drm_i915_gem_object *obj; 56 57 /* First fill our portion of the GTT with scratch pages */ 58 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 60 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 62 i915_gem_clflush_object(obj); 63 i915_gem_gtt_rebind_object(obj, obj->cache_level); 64 } 65 66 intel_gtt_chipset_flush(); 67 } 68 69 int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) 70 { 71 struct drm_device *dev = obj->base.dev; 72 struct drm_i915_private *dev_priv = dev->dev_private; 73 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level); 74 int ret; 75 76 if (dev_priv->mm.gtt->needs_dmar) { 77 ret = intel_gtt_map_memory(obj->pages, 78 obj->base.size >> PAGE_SHIFT, 79 &obj->sg_list, 80 &obj->num_sg); 81 if (ret != 0) 82 return ret; 83 84 intel_gtt_insert_sg_entries(obj->sg_list, 85 obj->num_sg, 86 obj->gtt_space->start >> PAGE_SHIFT, 87 agp_type); 88 } else 89 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, 90 obj->base.size >> PAGE_SHIFT, 91 obj->pages, 92 agp_type); 93 94 return 0; 95 } 96 97 void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 98 enum i915_cache_level cache_level) 99 { 100 struct drm_device *dev = obj->base.dev; 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 103 104 if (dev_priv->mm.gtt->needs_dmar) { 105 BUG_ON(!obj->sg_list); 106 107 intel_gtt_insert_sg_entries(obj->sg_list, 108 obj->num_sg, 109 obj->gtt_space->start >> PAGE_SHIFT, 110 agp_type); 111 } else 112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, 113 obj->base.size >> PAGE_SHIFT, 114 obj->pages, 115 agp_type); 116 } 117 118 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 119 { 120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 121 obj->base.size >> PAGE_SHIFT); 122 123 if (obj->sg_list) { 124 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 125 obj->sg_list = NULL; 126 } 127 } 128