1 /* 2 * Copyright (c) 2006, Intel Corporation. 3 * 4 * This file is released under the GPLv2. 5 * 6 * Copyright (C) 2006-2008 Intel Corporation 7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * 9 */ 10 11 #ifndef _IOVA_H_ 12 #define _IOVA_H_ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/rbtree.h> 17 #include <linux/dma-mapping.h> 18 19 /* iova structure */ 20 struct iova { 21 struct rb_node node; 22 unsigned long pfn_hi; /* Highest allocated pfn */ 23 unsigned long pfn_lo; /* Lowest allocated pfn */ 24 }; 25 26 struct iova_magazine; 27 struct iova_cpu_rcache; 28 29 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ 30 #define MAX_GLOBAL_MAGS 32 /* magazines per bin */ 31 32 struct iova_rcache { 33 spinlock_t lock; 34 unsigned long depot_size; 35 struct iova_magazine *depot[MAX_GLOBAL_MAGS]; 36 struct iova_cpu_rcache __percpu *cpu_rcaches; 37 }; 38 39 /* holds all the iova translations for a domain */ 40 struct iova_domain { 41 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 42 struct rb_root rbroot; /* iova domain rbtree root */ 43 struct rb_node *cached32_node; /* Save last alloced node */ 44 unsigned long granule; /* pfn granularity for this domain */ 45 unsigned long start_pfn; /* Lower limit for this domain */ 46 unsigned long dma_32bit_pfn; 47 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ 48 }; 49 50 static inline unsigned long iova_size(struct iova *iova) 51 { 52 return iova->pfn_hi - iova->pfn_lo + 1; 53 } 54 55 static inline unsigned long iova_shift(struct iova_domain *iovad) 56 { 57 return __ffs(iovad->granule); 58 } 59 60 static inline unsigned long iova_mask(struct iova_domain *iovad) 61 { 62 return iovad->granule - 1; 63 } 64 65 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) 66 { 67 return iova & iova_mask(iovad); 68 } 69 70 static inline size_t iova_align(struct iova_domain *iovad, size_t size) 71 { 72 return ALIGN(size, iovad->granule); 73 } 74 75 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) 76 { 77 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); 78 } 79 80 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) 81 { 82 return iova >> iova_shift(iovad); 83 } 84 85 #if IS_ENABLED(CONFIG_IOMMU_IOVA) 86 int iova_cache_get(void); 87 void iova_cache_put(void); 88 89 struct iova *alloc_iova_mem(void); 90 void free_iova_mem(struct iova *iova); 91 void free_iova(struct iova_domain *iovad, unsigned long pfn); 92 void __free_iova(struct iova_domain *iovad, struct iova *iova); 93 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, 94 unsigned long limit_pfn, 95 bool size_aligned); 96 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, 97 unsigned long size); 98 unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, 99 unsigned long limit_pfn); 100 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 101 unsigned long pfn_hi); 102 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 103 void init_iova_domain(struct iova_domain *iovad, unsigned long granule, 104 unsigned long start_pfn, unsigned long pfn_32bit); 105 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 106 void put_iova_domain(struct iova_domain *iovad); 107 struct iova *split_and_remove_iova(struct iova_domain *iovad, 108 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); 109 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); 110 #else 111 static inline int iova_cache_get(void) 112 { 113 return -ENOTSUPP; 114 } 115 116 static inline void iova_cache_put(void) 117 { 118 } 119 120 static inline struct iova *alloc_iova_mem(void) 121 { 122 return NULL; 123 } 124 125 static inline void free_iova_mem(struct iova *iova) 126 { 127 } 128 129 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) 130 { 131 } 132 133 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) 134 { 135 } 136 137 static inline struct iova *alloc_iova(struct iova_domain *iovad, 138 unsigned long size, 139 unsigned long limit_pfn, 140 bool size_aligned) 141 { 142 return NULL; 143 } 144 145 static inline void free_iova_fast(struct iova_domain *iovad, 146 unsigned long pfn, 147 unsigned long size) 148 { 149 } 150 151 static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, 152 unsigned long size, 153 unsigned long limit_pfn) 154 { 155 return 0; 156 } 157 158 static inline struct iova *reserve_iova(struct iova_domain *iovad, 159 unsigned long pfn_lo, 160 unsigned long pfn_hi) 161 { 162 return NULL; 163 } 164 165 static inline void copy_reserved_iova(struct iova_domain *from, 166 struct iova_domain *to) 167 { 168 } 169 170 static inline void init_iova_domain(struct iova_domain *iovad, 171 unsigned long granule, 172 unsigned long start_pfn, 173 unsigned long pfn_32bit) 174 { 175 } 176 177 static inline struct iova *find_iova(struct iova_domain *iovad, 178 unsigned long pfn) 179 { 180 return NULL; 181 } 182 183 static inline void put_iova_domain(struct iova_domain *iovad) 184 { 185 } 186 187 static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, 188 struct iova *iova, 189 unsigned long pfn_lo, 190 unsigned long pfn_hi) 191 { 192 return NULL; 193 } 194 195 static inline void free_cpu_cached_iovas(unsigned int cpu, 196 struct iova_domain *iovad) 197 { 198 } 199 #endif 200 201 #endif 202