137d63f8fSChris Wilson /*
237d63f8fSChris Wilson  * SPDX-License-Identifier: MIT
337d63f8fSChris Wilson  *
437d63f8fSChris Wilson  * Copyright © 2016 Intel Corporation
537d63f8fSChris Wilson  */
637d63f8fSChris Wilson 
737d63f8fSChris Wilson #ifndef I915_SCATTERLIST_H
837d63f8fSChris Wilson #define I915_SCATTERLIST_H
937d63f8fSChris Wilson 
1037d63f8fSChris Wilson #include <linux/pfn.h>
1137d63f8fSChris Wilson #include <linux/scatterlist.h>
1278a07fe7SRobert Beckett #include <linux/dma-mapping.h>
1378a07fe7SRobert Beckett #include <xen/xen.h>
1437d63f8fSChris Wilson 
1537d63f8fSChris Wilson #include "i915_gem.h"
1637d63f8fSChris Wilson 
17d1487389SThomas Hellström struct drm_mm_node;
18f701b16dSMatthew Auld struct ttm_resource;
19d1487389SThomas Hellström 
2037d63f8fSChris Wilson /*
2137d63f8fSChris Wilson  * Optimised SGL iterator for GEM objects
2237d63f8fSChris Wilson  */
2337d63f8fSChris Wilson static __always_inline struct sgt_iter {
2437d63f8fSChris Wilson 	struct scatterlist *sgp;
2537d63f8fSChris Wilson 	union {
2637d63f8fSChris Wilson 		unsigned long pfn;
2737d63f8fSChris Wilson 		dma_addr_t dma;
2837d63f8fSChris Wilson 	};
2937d63f8fSChris Wilson 	unsigned int curr;
3037d63f8fSChris Wilson 	unsigned int max;
__sgt_iter(struct scatterlist * sgl,bool dma)3137d63f8fSChris Wilson } __sgt_iter(struct scatterlist *sgl, bool dma) {
3237d63f8fSChris Wilson 	struct sgt_iter s = { .sgp = sgl };
3337d63f8fSChris Wilson 
348a473dbaSTvrtko Ursulin 	if (dma && s.sgp && sg_dma_len(s.sgp) == 0) {
358a473dbaSTvrtko Ursulin 		s.sgp = NULL;
368a473dbaSTvrtko Ursulin 	} else if (s.sgp) {
3737d63f8fSChris Wilson 		s.max = s.curr = s.sgp->offset;
388a473dbaSTvrtko Ursulin 		if (dma) {
3937d63f8fSChris Wilson 			s.dma = sg_dma_address(s.sgp);
408a473dbaSTvrtko Ursulin 			s.max += sg_dma_len(s.sgp);
418a473dbaSTvrtko Ursulin 		} else {
4237d63f8fSChris Wilson 			s.pfn = page_to_pfn(sg_page(s.sgp));
438a473dbaSTvrtko Ursulin 			s.max += s.sgp->length;
448a473dbaSTvrtko Ursulin 		}
4537d63f8fSChris Wilson 	}
4637d63f8fSChris Wilson 
4737d63f8fSChris Wilson 	return s;
4837d63f8fSChris Wilson }
4937d63f8fSChris Wilson 
__sg_page_count(const struct scatterlist * sg)5037d63f8fSChris Wilson static inline int __sg_page_count(const struct scatterlist *sg)
5137d63f8fSChris Wilson {
5237d63f8fSChris Wilson 	return sg->length >> PAGE_SHIFT;
5337d63f8fSChris Wilson }
5437d63f8fSChris Wilson 
__sg_dma_page_count(const struct scatterlist * sg)55934941edSTvrtko Ursulin static inline int __sg_dma_page_count(const struct scatterlist *sg)
56934941edSTvrtko Ursulin {
57934941edSTvrtko Ursulin 	return sg_dma_len(sg) >> PAGE_SHIFT;
58934941edSTvrtko Ursulin }
59934941edSTvrtko Ursulin 
____sg_next(struct scatterlist * sg)6037d63f8fSChris Wilson static inline struct scatterlist *____sg_next(struct scatterlist *sg)
6137d63f8fSChris Wilson {
6237d63f8fSChris Wilson 	++sg;
6337d63f8fSChris Wilson 	if (unlikely(sg_is_chain(sg)))
6437d63f8fSChris Wilson 		sg = sg_chain_ptr(sg);
6537d63f8fSChris Wilson 	return sg;
6637d63f8fSChris Wilson }
6737d63f8fSChris Wilson 
6837d63f8fSChris Wilson /**
6937d63f8fSChris Wilson  * __sg_next - return the next scatterlist entry in a list
7037d63f8fSChris Wilson  * @sg:		The current sg entry
7137d63f8fSChris Wilson  *
7237d63f8fSChris Wilson  * Description:
7337d63f8fSChris Wilson  *   If the entry is the last, return NULL; otherwise, step to the next
7437d63f8fSChris Wilson  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
7537d63f8fSChris Wilson  *   otherwise just return the pointer to the current element.
7637d63f8fSChris Wilson  **/
__sg_next(struct scatterlist * sg)7737d63f8fSChris Wilson static inline struct scatterlist *__sg_next(struct scatterlist *sg)
7837d63f8fSChris Wilson {
7937d63f8fSChris Wilson 	return sg_is_last(sg) ? NULL : ____sg_next(sg);
8037d63f8fSChris Wilson }
8137d63f8fSChris Wilson 
8237d63f8fSChris Wilson /**
8331444afbSMatthew Auld  * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
8431444afbSMatthew Auld  * @__dp:	Device address (output)
8537d63f8fSChris Wilson  * @__iter:	'struct sgt_iter' (iterator state, internal)
8637d63f8fSChris Wilson  * @__sgt:	sg_table to iterate over (input)
8737d63f8fSChris Wilson  * @__step:	step size
8837d63f8fSChris Wilson  */
8931444afbSMatthew Auld #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step)		\
9037d63f8fSChris Wilson 	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
9131444afbSMatthew Auld 	     ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;	\
9237d63f8fSChris Wilson 	     (((__iter).curr += (__step)) >= (__iter).max) ?		\
9337d63f8fSChris Wilson 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
9437d63f8fSChris Wilson 
9537d63f8fSChris Wilson /**
9637d63f8fSChris Wilson  * for_each_sgt_page - iterate over the pages of the given sg_table
9737d63f8fSChris Wilson  * @__pp:	page pointer (output)
9837d63f8fSChris Wilson  * @__iter:	'struct sgt_iter' (iterator state, internal)
9937d63f8fSChris Wilson  * @__sgt:	sg_table to iterate over (input)
10037d63f8fSChris Wilson  */
10137d63f8fSChris Wilson #define for_each_sgt_page(__pp, __iter, __sgt)				\
10237d63f8fSChris Wilson 	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
10337d63f8fSChris Wilson 	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
10437d63f8fSChris Wilson 	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
10537d63f8fSChris Wilson 	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
10637d63f8fSChris Wilson 	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
10737d63f8fSChris Wilson 
10862445a97SThomas Hellström /**
10962445a97SThomas Hellström  * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist
11062445a97SThomas Hellström  * @sg: The scatterlist
11162445a97SThomas Hellström  *
11262445a97SThomas Hellström  * Return: An unsigned int with segment sizes logically or'ed together.
11362445a97SThomas Hellström  * A caller can use this information to determine what hardware page table
11462445a97SThomas Hellström  * entry sizes can be used to map the memory represented by the scatterlist.
11562445a97SThomas Hellström  */
i915_sg_dma_sizes(struct scatterlist * sg)11662445a97SThomas Hellström static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
11737d63f8fSChris Wilson {
11837d63f8fSChris Wilson 	unsigned int page_sizes;
11937d63f8fSChris Wilson 
12037d63f8fSChris Wilson 	page_sizes = 0;
12162445a97SThomas Hellström 	while (sg && sg_dma_len(sg)) {
12237d63f8fSChris Wilson 		GEM_BUG_ON(sg->offset);
12362445a97SThomas Hellström 		GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
12462445a97SThomas Hellström 		page_sizes |= sg_dma_len(sg);
12537d63f8fSChris Wilson 		sg = __sg_next(sg);
12637d63f8fSChris Wilson 	}
12737d63f8fSChris Wilson 
12837d63f8fSChris Wilson 	return page_sizes;
12937d63f8fSChris Wilson }
13037d63f8fSChris Wilson 
i915_sg_segment_size(struct device * dev)13178a07fe7SRobert Beckett static inline unsigned int i915_sg_segment_size(struct device *dev)
13237d63f8fSChris Wilson {
13378a07fe7SRobert Beckett 	size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));
13437d63f8fSChris Wilson 
13578a07fe7SRobert Beckett 	/*
13678a07fe7SRobert Beckett 	 * For Xen PV guests pages aren't contiguous in DMA (machine) address
13778a07fe7SRobert Beckett 	 * space.  The DMA API takes care of that both in dma_alloc_* (by
13878a07fe7SRobert Beckett 	 * calling into the hypervisor to make the pages contiguous) and in
13978a07fe7SRobert Beckett 	 * dma_map_* (by bounce buffering).  But i915 abuses ignores the
14078a07fe7SRobert Beckett 	 * coherency aspects of the DMA API and thus can't cope with bounce
14178a07fe7SRobert Beckett 	 * buffering actually happening, so add a hack here to force small
14278a07fe7SRobert Beckett 	 * allocations and mappings when running in PV mode on Xen.
14378a07fe7SRobert Beckett 	 *
14478a07fe7SRobert Beckett 	 * Note this will still break if bounce buffering is required for other
14578a07fe7SRobert Beckett 	 * reasons, like confidential computing hypervisors or PCIe root ports
14678a07fe7SRobert Beckett 	 * with addressing limitations.
14778a07fe7SRobert Beckett 	 */
14878a07fe7SRobert Beckett 	if (xen_pv_domain())
14978a07fe7SRobert Beckett 		max = PAGE_SIZE;
15078a07fe7SRobert Beckett 	return round_down(max, PAGE_SIZE);
15137d63f8fSChris Wilson }
15237d63f8fSChris Wilson 
15337d63f8fSChris Wilson bool i915_sg_trim(struct sg_table *orig_st);
15437d63f8fSChris Wilson 
155cad7109aSThomas Hellström /**
156cad7109aSThomas Hellström  * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
157cad7109aSThomas Hellström  */
158cad7109aSThomas Hellström struct i915_refct_sgt_ops {
159cad7109aSThomas Hellström 	/**
160*9d7fe94dSJani Nikula 	 * @release: Free the memory of the struct i915_refct_sgt
161cad7109aSThomas Hellström 	 */
162cad7109aSThomas Hellström 	void (*release)(struct kref *ref);
163cad7109aSThomas Hellström };
164cad7109aSThomas Hellström 
165cad7109aSThomas Hellström /**
166cad7109aSThomas Hellström  * struct i915_refct_sgt - A refcounted scatter-gather table
167cad7109aSThomas Hellström  * @kref: struct kref for refcounting
168cad7109aSThomas Hellström  * @table: struct sg_table holding the scatter-gather table itself. Note that
169cad7109aSThomas Hellström  * @table->sgl = NULL can be used to determine whether a scatter-gather table
170cad7109aSThomas Hellström  * is present or not.
171cad7109aSThomas Hellström  * @size: The size in bytes of the underlying memory buffer
172cad7109aSThomas Hellström  * @ops: The operations structure.
173cad7109aSThomas Hellström  */
174cad7109aSThomas Hellström struct i915_refct_sgt {
175cad7109aSThomas Hellström 	struct kref kref;
176cad7109aSThomas Hellström 	struct sg_table table;
177cad7109aSThomas Hellström 	size_t size;
178cad7109aSThomas Hellström 	const struct i915_refct_sgt_ops *ops;
179cad7109aSThomas Hellström };
180cad7109aSThomas Hellström 
181cad7109aSThomas Hellström /**
182cad7109aSThomas Hellström  * i915_refct_sgt_put - Put a refcounted sg-table
1838b2a7394SJani Nikula  * @rsgt: the struct i915_refct_sgt to put.
184cad7109aSThomas Hellström  */
i915_refct_sgt_put(struct i915_refct_sgt * rsgt)185cad7109aSThomas Hellström static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
186cad7109aSThomas Hellström {
187cad7109aSThomas Hellström 	if (rsgt)
188cad7109aSThomas Hellström 		kref_put(&rsgt->kref, rsgt->ops->release);
189cad7109aSThomas Hellström }
190cad7109aSThomas Hellström 
191cad7109aSThomas Hellström /**
192cad7109aSThomas Hellström  * i915_refct_sgt_get - Get a refcounted sg-table
1938b2a7394SJani Nikula  * @rsgt: the struct i915_refct_sgt to get.
194cad7109aSThomas Hellström  */
195cad7109aSThomas Hellström static inline struct i915_refct_sgt *
i915_refct_sgt_get(struct i915_refct_sgt * rsgt)196cad7109aSThomas Hellström i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
197cad7109aSThomas Hellström {
198cad7109aSThomas Hellström 	kref_get(&rsgt->kref);
199cad7109aSThomas Hellström 	return rsgt;
200cad7109aSThomas Hellström }
201cad7109aSThomas Hellström 
202cad7109aSThomas Hellström /**
203cad7109aSThomas Hellström  * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
204cad7109aSThomas Hellström  * operations structure
2058b2a7394SJani Nikula  * @rsgt: The struct i915_refct_sgt to initialize.
206cad7109aSThomas Hellström  * @size: Size in bytes of the underlying memory buffer.
207cad7109aSThomas Hellström  * @ops: A customized operations structure in case the refcounted sg-list
208cad7109aSThomas Hellström  * is embedded into another structure.
209cad7109aSThomas Hellström  */
__i915_refct_sgt_init(struct i915_refct_sgt * rsgt,size_t size,const struct i915_refct_sgt_ops * ops)210cad7109aSThomas Hellström static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
211cad7109aSThomas Hellström 					 size_t size,
212cad7109aSThomas Hellström 					 const struct i915_refct_sgt_ops *ops)
213cad7109aSThomas Hellström {
214cad7109aSThomas Hellström 	kref_init(&rsgt->kref);
215cad7109aSThomas Hellström 	rsgt->table.sgl = NULL;
216cad7109aSThomas Hellström 	rsgt->size = size;
217cad7109aSThomas Hellström 	rsgt->ops = ops;
218cad7109aSThomas Hellström }
219cad7109aSThomas Hellström 
220cad7109aSThomas Hellström void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
221cad7109aSThomas Hellström 
222cad7109aSThomas Hellström struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
223bc99f120SMatthew Auld 					      u64 region_start,
2249306b2b2SMatthew Auld 					      u32 page_alignment);
225f701b16dSMatthew Auld 
226cad7109aSThomas Hellström struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
227bc99f120SMatthew Auld 						     u64 region_start,
2289306b2b2SMatthew Auld 						     u32 page_alignment);
229f701b16dSMatthew Auld 
23037d63f8fSChris Wilson #endif
231