1b908be54SMatthew Auld // SPDX-License-Identifier: MIT
2b908be54SMatthew Auld /*
3b908be54SMatthew Auld  * Copyright © 2019 Intel Corporation
4b908be54SMatthew Auld  */
5b908be54SMatthew Auld 
6109d101eSJani Nikula #include <uapi/drm/i915_drm.h>
7109d101eSJani Nikula 
8b908be54SMatthew Auld #include "intel_memory_region.h"
9b908be54SMatthew Auld #include "gem/i915_gem_region.h"
10b908be54SMatthew Auld #include "gem/i915_gem_lmem.h"
11b908be54SMatthew Auld #include "i915_drv.h"
12b908be54SMatthew Auld 
134bc91dbdSAnusha Srivatsa void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object * obj,unsigned long n,unsigned long size)144bc91dbdSAnusha Srivatsa i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
154bc91dbdSAnusha Srivatsa 			    unsigned long n,
164bc91dbdSAnusha Srivatsa 			    unsigned long size)
174bc91dbdSAnusha Srivatsa {
184bc91dbdSAnusha Srivatsa 	resource_size_t offset;
194bc91dbdSAnusha Srivatsa 
204bc91dbdSAnusha Srivatsa 	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
214bc91dbdSAnusha Srivatsa 
224bc91dbdSAnusha Srivatsa 	offset = i915_gem_object_get_dma_address(obj, n);
234bc91dbdSAnusha Srivatsa 	offset -= obj->mm.region->region.start;
244bc91dbdSAnusha Srivatsa 
254bc91dbdSAnusha Srivatsa 	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
264bc91dbdSAnusha Srivatsa }
274bc91dbdSAnusha Srivatsa 
282e53d7c1SThomas Hellström /**
292e53d7c1SThomas Hellström  * i915_gem_object_is_lmem - Whether the object is resident in
302e53d7c1SThomas Hellström  * lmem
312e53d7c1SThomas Hellström  * @obj: The object to check.
322e53d7c1SThomas Hellström  *
332e53d7c1SThomas Hellström  * Even if an object is allowed to migrate and change memory region,
342e53d7c1SThomas Hellström  * this function checks whether it will always be present in lmem when
352e53d7c1SThomas Hellström  * valid *or* if that's not the case, whether it's currently resident in lmem.
362e53d7c1SThomas Hellström  * For migratable and evictable objects, the latter only makes sense when
372e53d7c1SThomas Hellström  * the object is locked.
382e53d7c1SThomas Hellström  *
392e53d7c1SThomas Hellström  * Return: Whether the object migratable but resident in lmem, or not
402e53d7c1SThomas Hellström  * migratable and will be present in lmem when valid.
412e53d7c1SThomas Hellström  */
i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)42b908be54SMatthew Auld bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
43b908be54SMatthew Auld {
442e53d7c1SThomas Hellström 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
45b75947e6SMatthew Auld 
462e53d7c1SThomas Hellström #ifdef CONFIG_LOCKDEP
472e53d7c1SThomas Hellström 	if (i915_gem_object_migratable(obj) &&
482e53d7c1SThomas Hellström 	    i915_gem_object_evictable(obj))
492e53d7c1SThomas Hellström 		assert_object_held(obj);
502e53d7c1SThomas Hellström #endif
51b75947e6SMatthew Auld 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
52b75947e6SMatthew Auld 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
53b908be54SMatthew Auld }
54b908be54SMatthew Auld 
550ff37575SThomas Hellström /**
560ff37575SThomas Hellström  * __i915_gem_object_is_lmem - Whether the object is resident in
570ff37575SThomas Hellström  * lmem while in the fence signaling critical path.
580ff37575SThomas Hellström  * @obj: The object to check.
590ff37575SThomas Hellström  *
600ff37575SThomas Hellström  * This function is intended to be called from within the fence signaling
6191160c83SMatthew Brost  * path where the fence, or a pin, keeps the object from being migrated. For
6291160c83SMatthew Brost  * example during gpu reset or similar.
630ff37575SThomas Hellström  *
640ff37575SThomas Hellström  * Return: Whether the object is resident in lmem.
650ff37575SThomas Hellström  */
__i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)660ff37575SThomas Hellström bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
670ff37575SThomas Hellström {
680ff37575SThomas Hellström 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
690ff37575SThomas Hellström 
700ff37575SThomas Hellström #ifdef CONFIG_LOCKDEP
710cc848a7SChristian König 	GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
722dfa597dSThomas Hellström 		    i915_gem_object_evictable(obj));
730ff37575SThomas Hellström #endif
740ff37575SThomas Hellström 	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
750ff37575SThomas Hellström 		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
760ff37575SThomas Hellström }
770ff37575SThomas Hellström 
78d22632c8SMatthew Auld /**
79d22632c8SMatthew Auld  * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
80d22632c8SMatthew Auld  * minimum page size for the backing pages.
81d22632c8SMatthew Auld  * @i915: The i915 instance.
82d22632c8SMatthew Auld  * @size: The size in bytes for the object. Note that we need to round the size
83d22632c8SMatthew Auld  * up depending on the @page_size. The final object size can be fished out from
84d22632c8SMatthew Auld  * the drm GEM object.
85d22632c8SMatthew Auld  * @page_size: The requested minimum page size in bytes for this object. This is
86d22632c8SMatthew Auld  * useful if we need something bigger than the regions min_page_size due to some
87d22632c8SMatthew Auld  * hw restriction, or in some very specialised cases where it needs to be
88d22632c8SMatthew Auld  * smaller, where the internal fragmentation cost is too great when rounding up
89d22632c8SMatthew Auld  * the object size.
90d22632c8SMatthew Auld  * @flags: The optional BO allocation flags.
91d22632c8SMatthew Auld  *
92d22632c8SMatthew Auld  * Note that this interface assumes you know what you are doing when forcing the
93d22632c8SMatthew Auld  * @page_size. If this is smaller than the regions min_page_size then it can
94d22632c8SMatthew Auld  * never be inserted into any GTT, otherwise it might lead to undefined
95d22632c8SMatthew Auld  * behaviour.
96d22632c8SMatthew Auld  *
97d22632c8SMatthew Auld  * Return: The object pointer, which might be an ERR_PTR in the case of failure.
98d22632c8SMatthew Auld  */
99d22632c8SMatthew Auld struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private * i915,resource_size_t size,resource_size_t page_size,unsigned int flags)100d22632c8SMatthew Auld __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
101d22632c8SMatthew Auld 				      resource_size_t size,
102d22632c8SMatthew Auld 				      resource_size_t page_size,
103d22632c8SMatthew Auld 				      unsigned int flags)
104d22632c8SMatthew Auld {
105fa732088SAndi Shyti 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
106d22632c8SMatthew Auld 					     size, page_size, flags);
107d22632c8SMatthew Auld }
108d22632c8SMatthew Auld 
109b908be54SMatthew Auld struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private * i915,const void * data,size_t size)1107acbbc7cSDaniele Ceraolo Spurio i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
1117acbbc7cSDaniele Ceraolo Spurio 				      const void *data, size_t size)
1127acbbc7cSDaniele Ceraolo Spurio {
1137acbbc7cSDaniele Ceraolo Spurio 	struct drm_i915_gem_object *obj;
1147acbbc7cSDaniele Ceraolo Spurio 	void *map;
1157acbbc7cSDaniele Ceraolo Spurio 
1167acbbc7cSDaniele Ceraolo Spurio 	obj = i915_gem_object_create_lmem(i915,
1177acbbc7cSDaniele Ceraolo Spurio 					  round_up(size, PAGE_SIZE),
1187acbbc7cSDaniele Ceraolo Spurio 					  I915_BO_ALLOC_CONTIGUOUS);
1197acbbc7cSDaniele Ceraolo Spurio 	if (IS_ERR(obj))
1207acbbc7cSDaniele Ceraolo Spurio 		return obj;
1217acbbc7cSDaniele Ceraolo Spurio 
1227acbbc7cSDaniele Ceraolo Spurio 	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1237acbbc7cSDaniele Ceraolo Spurio 	if (IS_ERR(map)) {
1247acbbc7cSDaniele Ceraolo Spurio 		i915_gem_object_put(obj);
1257acbbc7cSDaniele Ceraolo Spurio 		return map;
1267acbbc7cSDaniele Ceraolo Spurio 	}
1277acbbc7cSDaniele Ceraolo Spurio 
1287acbbc7cSDaniele Ceraolo Spurio 	memcpy(map, data, size);
1297acbbc7cSDaniele Ceraolo Spurio 
130*d032ca43SChris Wilson 	i915_gem_object_flush_map(obj);
131*d032ca43SChris Wilson 	__i915_gem_object_release_map(obj);
1327acbbc7cSDaniele Ceraolo Spurio 
1337acbbc7cSDaniele Ceraolo Spurio 	return obj;
1347acbbc7cSDaniele Ceraolo Spurio }
1357acbbc7cSDaniele Ceraolo Spurio 
1367acbbc7cSDaniele Ceraolo Spurio struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private * i915,resource_size_t size,unsigned int flags)137b908be54SMatthew Auld i915_gem_object_create_lmem(struct drm_i915_private *i915,
138b908be54SMatthew Auld 			    resource_size_t size,
139b908be54SMatthew Auld 			    unsigned int flags)
140b908be54SMatthew Auld {
141fa732088SAndi Shyti 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
142d22632c8SMatthew Auld 					     size, 0, flags);
143b908be54SMatthew Auld }
144