1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef I915_SCATTERLIST_H 8 #define I915_SCATTERLIST_H 9 10 #include <linux/pfn.h> 11 #include <linux/scatterlist.h> 12 #include <linux/swiotlb.h> 13 14 #include "i915_gem.h" 15 16 struct drm_mm_node; 17 18 /* 19 * Optimised SGL iterator for GEM objects 20 */ 21 static __always_inline struct sgt_iter { 22 struct scatterlist *sgp; 23 union { 24 unsigned long pfn; 25 dma_addr_t dma; 26 }; 27 unsigned int curr; 28 unsigned int max; 29 } __sgt_iter(struct scatterlist *sgl, bool dma) { 30 struct sgt_iter s = { .sgp = sgl }; 31 32 if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { 33 s.sgp = NULL; 34 } else if (s.sgp) { 35 s.max = s.curr = s.sgp->offset; 36 if (dma) { 37 s.dma = sg_dma_address(s.sgp); 38 s.max += sg_dma_len(s.sgp); 39 } else { 40 s.pfn = page_to_pfn(sg_page(s.sgp)); 41 s.max += s.sgp->length; 42 } 43 } 44 45 return s; 46 } 47 48 static inline int __sg_page_count(const struct scatterlist *sg) 49 { 50 return sg->length >> PAGE_SHIFT; 51 } 52 53 static inline int __sg_dma_page_count(const struct scatterlist *sg) 54 { 55 return sg_dma_len(sg) >> PAGE_SHIFT; 56 } 57 58 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 59 { 60 ++sg; 61 if (unlikely(sg_is_chain(sg))) 62 sg = sg_chain_ptr(sg); 63 return sg; 64 } 65 66 /** 67 * __sg_next - return the next scatterlist entry in a list 68 * @sg: The current sg entry 69 * 70 * Description: 71 * If the entry is the last, return NULL; otherwise, step to the next 72 * element in the array (@sg@+1). If that's a chain pointer, follow it; 73 * otherwise just return the pointer to the current element. 74 **/ 75 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 76 { 77 return sg_is_last(sg) ? NULL : ____sg_next(sg); 78 } 79 80 /** 81 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table 82 * @__dp: Device address (output) 83 * @__iter: 'struct sgt_iter' (iterator state, internal) 84 * @__sgt: sg_table to iterate over (input) 85 * @__step: step size 86 */ 87 #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ 88 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 89 ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ 90 (((__iter).curr += (__step)) >= (__iter).max) ? \ 91 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 92 93 /** 94 * for_each_sgt_page - iterate over the pages of the given sg_table 95 * @__pp: page pointer (output) 96 * @__iter: 'struct sgt_iter' (iterator state, internal) 97 * @__sgt: sg_table to iterate over (input) 98 */ 99 #define for_each_sgt_page(__pp, __iter, __sgt) \ 100 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 101 ((__pp) = (__iter).pfn == 0 ? NULL : \ 102 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 103 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 104 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 105 106 /** 107 * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist 108 * @sg: The scatterlist 109 * 110 * Return: An unsigned int with segment sizes logically or'ed together. 111 * A caller can use this information to determine what hardware page table 112 * entry sizes can be used to map the memory represented by the scatterlist. 113 */ 114 static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) 115 { 116 unsigned int page_sizes; 117 118 page_sizes = 0; 119 while (sg && sg_dma_len(sg)) { 120 GEM_BUG_ON(sg->offset); 121 GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE)); 122 page_sizes |= sg_dma_len(sg); 123 sg = __sg_next(sg); 124 } 125 126 return page_sizes; 127 } 128 129 static inline unsigned int i915_sg_segment_size(void) 130 { 131 unsigned int size = swiotlb_max_segment(); 132 133 if (size == 0) 134 size = UINT_MAX; 135 136 size = rounddown(size, PAGE_SIZE); 137 /* swiotlb_max_segment_size can return 1 byte when it means one page. */ 138 if (size < PAGE_SIZE) 139 size = PAGE_SIZE; 140 141 return size; 142 } 143 144 bool i915_sg_trim(struct sg_table *orig_st); 145 146 struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, 147 u64 region_start); 148 #endif 149