1b908be54SMatthew Auld // SPDX-License-Identifier: MIT
2b908be54SMatthew Auld /*
3b908be54SMatthew Auld  * Copyright © 2019 Intel Corporation
4b908be54SMatthew Auld  */
5b908be54SMatthew Auld 
6b908be54SMatthew Auld #include "intel_memory_region.h"
7b908be54SMatthew Auld #include "gem/i915_gem_region.h"
8b908be54SMatthew Auld #include "gem/i915_gem_lmem.h"
9b908be54SMatthew Auld #include "i915_drv.h"
10b908be54SMatthew Auld 
114bc91dbdSAnusha Srivatsa void __iomem *
124bc91dbdSAnusha Srivatsa i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
134bc91dbdSAnusha Srivatsa 			    unsigned long n,
144bc91dbdSAnusha Srivatsa 			    unsigned long size)
154bc91dbdSAnusha Srivatsa {
164bc91dbdSAnusha Srivatsa 	resource_size_t offset;
174bc91dbdSAnusha Srivatsa 
184bc91dbdSAnusha Srivatsa 	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
194bc91dbdSAnusha Srivatsa 
204bc91dbdSAnusha Srivatsa 	offset = i915_gem_object_get_dma_address(obj, n);
214bc91dbdSAnusha Srivatsa 	offset -= obj->mm.region->region.start;
224bc91dbdSAnusha Srivatsa 
234bc91dbdSAnusha Srivatsa 	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
244bc91dbdSAnusha Srivatsa }
254bc91dbdSAnusha Srivatsa 
262e53d7c1SThomas Hellström /**
272e53d7c1SThomas Hellström  * i915_gem_object_is_lmem - Whether the object is resident in
282e53d7c1SThomas Hellström  * lmem
292e53d7c1SThomas Hellström  * @obj: The object to check.
302e53d7c1SThomas Hellström  *
312e53d7c1SThomas Hellström  * Even if an object is allowed to migrate and change memory region,
322e53d7c1SThomas Hellström  * this function checks whether it will always be present in lmem when
332e53d7c1SThomas Hellström  * valid *or* if that's not the case, whether it's currently resident in lmem.
342e53d7c1SThomas Hellström  * For migratable and evictable objects, the latter only makes sense when
352e53d7c1SThomas Hellström  * the object is locked.
362e53d7c1SThomas Hellström  *
372e53d7c1SThomas Hellström  * Return: Whether the object migratable but resident in lmem, or not
382e53d7c1SThomas Hellström  * migratable and will be present in lmem when valid.
392e53d7c1SThomas Hellström  */
40b908be54SMatthew Auld bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
41b908be54SMatthew Auld {
422e53d7c1SThomas Hellström 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
43b75947e6SMatthew Auld 
442e53d7c1SThomas Hellström #ifdef CONFIG_LOCKDEP
452e53d7c1SThomas Hellström 	if (i915_gem_object_migratable(obj) &&
462e53d7c1SThomas Hellström 	    i915_gem_object_evictable(obj))
472e53d7c1SThomas Hellström 		assert_object_held(obj);
482e53d7c1SThomas Hellström #endif
49b75947e6SMatthew Auld 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
50b75947e6SMatthew Auld 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
51b908be54SMatthew Auld }
52b908be54SMatthew Auld 
530ff37575SThomas Hellström /**
540ff37575SThomas Hellström  * __i915_gem_object_is_lmem - Whether the object is resident in
550ff37575SThomas Hellström  * lmem while in the fence signaling critical path.
560ff37575SThomas Hellström  * @obj: The object to check.
570ff37575SThomas Hellström  *
580ff37575SThomas Hellström  * This function is intended to be called from within the fence signaling
5991160c83SMatthew Brost  * path where the fence, or a pin, keeps the object from being migrated. For
6091160c83SMatthew Brost  * example during gpu reset or similar.
610ff37575SThomas Hellström  *
620ff37575SThomas Hellström  * Return: Whether the object is resident in lmem.
630ff37575SThomas Hellström  */
640ff37575SThomas Hellström bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
650ff37575SThomas Hellström {
660ff37575SThomas Hellström 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
670ff37575SThomas Hellström 
680ff37575SThomas Hellström #ifdef CONFIG_LOCKDEP
6991160c83SMatthew Brost 	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
70*2dfa597dSThomas Hellström 		    i915_gem_object_evictable(obj));
710ff37575SThomas Hellström #endif
720ff37575SThomas Hellström 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
730ff37575SThomas Hellström 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
740ff37575SThomas Hellström }
750ff37575SThomas Hellström 
76d22632c8SMatthew Auld /**
77d22632c8SMatthew Auld  * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
78d22632c8SMatthew Auld  * minimum page size for the backing pages.
79d22632c8SMatthew Auld  * @i915: The i915 instance.
80d22632c8SMatthew Auld  * @size: The size in bytes for the object. Note that we need to round the size
81d22632c8SMatthew Auld  * up depending on the @page_size. The final object size can be fished out from
82d22632c8SMatthew Auld  * the drm GEM object.
83d22632c8SMatthew Auld  * @page_size: The requested minimum page size in bytes for this object. This is
84d22632c8SMatthew Auld  * useful if we need something bigger than the regions min_page_size due to some
85d22632c8SMatthew Auld  * hw restriction, or in some very specialised cases where it needs to be
86d22632c8SMatthew Auld  * smaller, where the internal fragmentation cost is too great when rounding up
87d22632c8SMatthew Auld  * the object size.
88d22632c8SMatthew Auld  * @flags: The optional BO allocation flags.
89d22632c8SMatthew Auld  *
90d22632c8SMatthew Auld  * Note that this interface assumes you know what you are doing when forcing the
91d22632c8SMatthew Auld  * @page_size. If this is smaller than the regions min_page_size then it can
92d22632c8SMatthew Auld  * never be inserted into any GTT, otherwise it might lead to undefined
93d22632c8SMatthew Auld  * behaviour.
94d22632c8SMatthew Auld  *
95d22632c8SMatthew Auld  * Return: The object pointer, which might be an ERR_PTR in the case of failure.
96d22632c8SMatthew Auld  */
97d22632c8SMatthew Auld struct drm_i915_gem_object *
98d22632c8SMatthew Auld __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
99d22632c8SMatthew Auld 				      resource_size_t size,
100d22632c8SMatthew Auld 				      resource_size_t page_size,
101d22632c8SMatthew Auld 				      unsigned int flags)
102d22632c8SMatthew Auld {
103d22632c8SMatthew Auld 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
104d22632c8SMatthew Auld 					     size, page_size, flags);
105d22632c8SMatthew Auld }
106d22632c8SMatthew Auld 
107b908be54SMatthew Auld struct drm_i915_gem_object *
1087acbbc7cSDaniele Ceraolo Spurio i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
1097acbbc7cSDaniele Ceraolo Spurio 				      const void *data, size_t size)
1107acbbc7cSDaniele Ceraolo Spurio {
1117acbbc7cSDaniele Ceraolo Spurio 	struct drm_i915_gem_object *obj;
1127acbbc7cSDaniele Ceraolo Spurio 	void *map;
1137acbbc7cSDaniele Ceraolo Spurio 
1147acbbc7cSDaniele Ceraolo Spurio 	obj = i915_gem_object_create_lmem(i915,
1157acbbc7cSDaniele Ceraolo Spurio 					  round_up(size, PAGE_SIZE),
1167acbbc7cSDaniele Ceraolo Spurio 					  I915_BO_ALLOC_CONTIGUOUS);
1177acbbc7cSDaniele Ceraolo Spurio 	if (IS_ERR(obj))
1187acbbc7cSDaniele Ceraolo Spurio 		return obj;
1197acbbc7cSDaniele Ceraolo Spurio 
1207acbbc7cSDaniele Ceraolo Spurio 	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1217acbbc7cSDaniele Ceraolo Spurio 	if (IS_ERR(map)) {
1227acbbc7cSDaniele Ceraolo Spurio 		i915_gem_object_put(obj);
1237acbbc7cSDaniele Ceraolo Spurio 		return map;
1247acbbc7cSDaniele Ceraolo Spurio 	}
1257acbbc7cSDaniele Ceraolo Spurio 
1267acbbc7cSDaniele Ceraolo Spurio 	memcpy(map, data, size);
1277acbbc7cSDaniele Ceraolo Spurio 
1287acbbc7cSDaniele Ceraolo Spurio 	i915_gem_object_unpin_map(obj);
1297acbbc7cSDaniele Ceraolo Spurio 
1307acbbc7cSDaniele Ceraolo Spurio 	return obj;
1317acbbc7cSDaniele Ceraolo Spurio }
1327acbbc7cSDaniele Ceraolo Spurio 
1337acbbc7cSDaniele Ceraolo Spurio struct drm_i915_gem_object *
134b908be54SMatthew Auld i915_gem_object_create_lmem(struct drm_i915_private *i915,
135b908be54SMatthew Auld 			    resource_size_t size,
136b908be54SMatthew Auld 			    unsigned int flags)
137b908be54SMatthew Auld {
138b908be54SMatthew Auld 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
139d22632c8SMatthew Auld 					     size, 0, flags);
140b908be54SMatthew Auld }
141