1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef I915_SCATTERLIST_H 8 #define I915_SCATTERLIST_H 9 10 #include <linux/pfn.h> 11 #include <linux/scatterlist.h> 12 #include <linux/swiotlb.h> 13 14 #include "i915_gem.h" 15 16 /* 17 * Optimised SGL iterator for GEM objects 18 */ 19 static __always_inline struct sgt_iter { 20 struct scatterlist *sgp; 21 union { 22 unsigned long pfn; 23 dma_addr_t dma; 24 }; 25 unsigned int curr; 26 unsigned int max; 27 } __sgt_iter(struct scatterlist *sgl, bool dma) { 28 struct sgt_iter s = { .sgp = sgl }; 29 30 if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { 31 s.sgp = NULL; 32 } else if (s.sgp) { 33 s.max = s.curr = s.sgp->offset; 34 if (dma) { 35 s.dma = sg_dma_address(s.sgp); 36 s.max += sg_dma_len(s.sgp); 37 } else { 38 s.pfn = page_to_pfn(sg_page(s.sgp)); 39 s.max += s.sgp->length; 40 } 41 } 42 43 return s; 44 } 45 46 static inline int __sg_page_count(const struct scatterlist *sg) 47 { 48 return sg->length >> PAGE_SHIFT; 49 } 50 51 static inline int __sg_dma_page_count(const struct scatterlist *sg) 52 { 53 return sg_dma_len(sg) >> PAGE_SHIFT; 54 } 55 56 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 57 { 58 ++sg; 59 if (unlikely(sg_is_chain(sg))) 60 sg = sg_chain_ptr(sg); 61 return sg; 62 } 63 64 /** 65 * __sg_next - return the next scatterlist entry in a list 66 * @sg: The current sg entry 67 * 68 * Description: 69 * If the entry is the last, return NULL; otherwise, step to the next 70 * element in the array (@sg@+1). If that's a chain pointer, follow it; 71 * otherwise just return the pointer to the current element. 72 **/ 73 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 74 { 75 return sg_is_last(sg) ? NULL : ____sg_next(sg); 76 } 77 78 /** 79 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table 80 * @__dp: Device address (output) 81 * @__iter: 'struct sgt_iter' (iterator state, internal) 82 * @__sgt: sg_table to iterate over (input) 83 * @__step: step size 84 */ 85 #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ 86 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 87 ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ 88 (((__iter).curr += (__step)) >= (__iter).max) ? \ 89 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 90 91 /** 92 * for_each_sgt_page - iterate over the pages of the given sg_table 93 * @__pp: page pointer (output) 94 * @__iter: 'struct sgt_iter' (iterator state, internal) 95 * @__sgt: sg_table to iterate over (input) 96 */ 97 #define for_each_sgt_page(__pp, __iter, __sgt) \ 98 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 99 ((__pp) = (__iter).pfn == 0 ? NULL : \ 100 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 101 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 102 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 103 104 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) 105 { 106 unsigned int page_sizes; 107 108 page_sizes = 0; 109 while (sg) { 110 GEM_BUG_ON(sg->offset); 111 GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); 112 page_sizes |= sg->length; 113 sg = __sg_next(sg); 114 } 115 116 return page_sizes; 117 } 118 119 static inline unsigned int i915_sg_segment_size(void) 120 { 121 unsigned int size = swiotlb_max_segment(); 122 123 if (size == 0) 124 size = UINT_MAX; 125 126 size = rounddown(size, PAGE_SIZE); 127 /* swiotlb_max_segment_size can return 1 byte when it means one page. */ 128 if (size < PAGE_SIZE) 129 size = PAGE_SIZE; 130 131 return size; 132 } 133 134 bool i915_sg_trim(struct sg_table *orig_st); 135 136 #endif 137