1 /* 2 * Copyright (c) 2012-2015, NVIDIA Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #ifndef HOST1X_DEV_H 18 #define HOST1X_DEV_H 19 20 #include <linux/device.h> 21 #include <linux/iommu.h> 22 #include <linux/iova.h> 23 #include <linux/platform_device.h> 24 #include <linux/reset.h> 25 26 #include "cdma.h" 27 #include "channel.h" 28 #include "intr.h" 29 #include "job.h" 30 #include "syncpt.h" 31 32 struct host1x_syncpt; 33 struct host1x_syncpt_base; 34 struct host1x_channel; 35 struct host1x_cdma; 36 struct host1x_job; 37 struct push_buffer; 38 struct output; 39 struct dentry; 40 41 struct host1x_channel_ops { 42 int (*init)(struct host1x_channel *channel, struct host1x *host, 43 unsigned int id); 44 int (*submit)(struct host1x_job *job); 45 }; 46 47 struct host1x_cdma_ops { 48 void (*start)(struct host1x_cdma *cdma); 49 void (*stop)(struct host1x_cdma *cdma); 50 void (*flush)(struct host1x_cdma *cdma); 51 int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt); 52 void (*timeout_destroy)(struct host1x_cdma *cdma); 53 void (*freeze)(struct host1x_cdma *cdma); 54 void (*resume)(struct host1x_cdma *cdma, u32 getptr); 55 void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr, 56 u32 syncpt_incrs, u32 syncval, u32 nr_slots); 57 }; 58 59 struct host1x_pushbuffer_ops { 60 void (*init)(struct push_buffer *pb); 61 }; 62 63 struct host1x_debug_ops { 64 void (*debug_init)(struct dentry *de); 65 void (*show_channel_cdma)(struct host1x *host, 66 struct host1x_channel *ch, 67 struct output *o); 68 void (*show_channel_fifo)(struct host1x *host, 69 struct host1x_channel *ch, 70 struct output *o); 71 void (*show_mlocks)(struct host1x *host, struct output *output); 72 73 }; 74 75 struct host1x_syncpt_ops { 76 void (*restore)(struct host1x_syncpt *syncpt); 77 void (*restore_wait_base)(struct host1x_syncpt *syncpt); 78 void (*load_wait_base)(struct host1x_syncpt *syncpt); 79 u32 (*load)(struct host1x_syncpt *syncpt); 80 int (*cpu_incr)(struct host1x_syncpt *syncpt); 81 void (*assign_to_channel)(struct host1x_syncpt *syncpt, 82 struct host1x_channel *channel); 83 void (*enable_protection)(struct host1x *host); 84 }; 85 86 struct host1x_intr_ops { 87 int (*init_host_sync)(struct host1x *host, u32 cpm, 88 void (*syncpt_thresh_work)(struct work_struct *work)); 89 void (*set_syncpt_threshold)( 90 struct host1x *host, unsigned int id, u32 thresh); 91 void (*enable_syncpt_intr)(struct host1x *host, unsigned int id); 92 void (*disable_syncpt_intr)(struct host1x *host, unsigned int id); 93 void (*disable_all_syncpt_intrs)(struct host1x *host); 94 int (*free_syncpt_irq)(struct host1x *host); 95 }; 96 97 struct host1x_sid_entry { 98 unsigned int base; 99 unsigned int offset; 100 unsigned int limit; 101 }; 102 103 struct host1x_info { 104 unsigned int nb_channels; /* host1x: number of channels supported */ 105 unsigned int nb_pts; /* host1x: number of syncpoints supported */ 106 unsigned int nb_bases; /* host1x: number of syncpoint bases supported */ 107 unsigned int nb_mlocks; /* host1x: number of mlocks supported */ 108 int (*init)(struct host1x *host1x); /* initialize per SoC ops */ 109 unsigned int sync_offset; /* offset of syncpoint registers */ 110 u64 dma_mask; /* mask of addressable memory */ 111 bool has_hypervisor; /* has hypervisor registers */ 112 unsigned int num_sid_entries; 113 const struct host1x_sid_entry *sid_table; 114 }; 115 116 struct host1x { 117 const struct host1x_info *info; 118 119 void __iomem *regs; 120 void __iomem *hv_regs; /* hypervisor region */ 121 struct host1x_syncpt *syncpt; 122 struct host1x_syncpt_base *bases; 123 struct device *dev; 124 struct clk *clk; 125 struct reset_control *rst; 126 127 struct iommu_group *group; 128 struct iommu_domain *domain; 129 struct iova_domain iova; 130 dma_addr_t iova_end; 131 132 struct mutex intr_mutex; 133 int intr_syncpt_irq; 134 135 const struct host1x_syncpt_ops *syncpt_op; 136 const struct host1x_intr_ops *intr_op; 137 const struct host1x_channel_ops *channel_op; 138 const struct host1x_cdma_ops *cdma_op; 139 const struct host1x_pushbuffer_ops *cdma_pb_op; 140 const struct host1x_debug_ops *debug_op; 141 142 struct host1x_syncpt *nop_sp; 143 144 struct mutex syncpt_mutex; 145 146 struct host1x_channel_list channel_list; 147 148 struct dentry *debugfs; 149 150 struct mutex devices_lock; 151 struct list_head devices; 152 153 struct list_head list; 154 }; 155 156 void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); 157 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); 158 void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); 159 u32 host1x_sync_readl(struct host1x *host1x, u32 r); 160 void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v); 161 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r); 162 163 static inline void host1x_hw_syncpt_restore(struct host1x *host, 164 struct host1x_syncpt *sp) 165 { 166 host->syncpt_op->restore(sp); 167 } 168 169 static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host, 170 struct host1x_syncpt *sp) 171 { 172 host->syncpt_op->restore_wait_base(sp); 173 } 174 175 static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host, 176 struct host1x_syncpt *sp) 177 { 178 host->syncpt_op->load_wait_base(sp); 179 } 180 181 static inline u32 host1x_hw_syncpt_load(struct host1x *host, 182 struct host1x_syncpt *sp) 183 { 184 return host->syncpt_op->load(sp); 185 } 186 187 static inline int host1x_hw_syncpt_cpu_incr(struct host1x *host, 188 struct host1x_syncpt *sp) 189 { 190 return host->syncpt_op->cpu_incr(sp); 191 } 192 193 static inline void host1x_hw_syncpt_assign_to_channel( 194 struct host1x *host, struct host1x_syncpt *sp, 195 struct host1x_channel *ch) 196 { 197 return host->syncpt_op->assign_to_channel(sp, ch); 198 } 199 200 static inline void host1x_hw_syncpt_enable_protection(struct host1x *host) 201 { 202 return host->syncpt_op->enable_protection(host); 203 } 204 205 static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm, 206 void (*syncpt_thresh_work)(struct work_struct *)) 207 { 208 return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work); 209 } 210 211 static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host, 212 unsigned int id, 213 u32 thresh) 214 { 215 host->intr_op->set_syncpt_threshold(host, id, thresh); 216 } 217 218 static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host, 219 unsigned int id) 220 { 221 host->intr_op->enable_syncpt_intr(host, id); 222 } 223 224 static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host, 225 unsigned int id) 226 { 227 host->intr_op->disable_syncpt_intr(host, id); 228 } 229 230 static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host) 231 { 232 host->intr_op->disable_all_syncpt_intrs(host); 233 } 234 235 static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host) 236 { 237 return host->intr_op->free_syncpt_irq(host); 238 } 239 240 static inline int host1x_hw_channel_init(struct host1x *host, 241 struct host1x_channel *channel, 242 unsigned int id) 243 { 244 return host->channel_op->init(channel, host, id); 245 } 246 247 static inline int host1x_hw_channel_submit(struct host1x *host, 248 struct host1x_job *job) 249 { 250 return host->channel_op->submit(job); 251 } 252 253 static inline void host1x_hw_cdma_start(struct host1x *host, 254 struct host1x_cdma *cdma) 255 { 256 host->cdma_op->start(cdma); 257 } 258 259 static inline void host1x_hw_cdma_stop(struct host1x *host, 260 struct host1x_cdma *cdma) 261 { 262 host->cdma_op->stop(cdma); 263 } 264 265 static inline void host1x_hw_cdma_flush(struct host1x *host, 266 struct host1x_cdma *cdma) 267 { 268 host->cdma_op->flush(cdma); 269 } 270 271 static inline int host1x_hw_cdma_timeout_init(struct host1x *host, 272 struct host1x_cdma *cdma, 273 unsigned int syncpt) 274 { 275 return host->cdma_op->timeout_init(cdma, syncpt); 276 } 277 278 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host, 279 struct host1x_cdma *cdma) 280 { 281 host->cdma_op->timeout_destroy(cdma); 282 } 283 284 static inline void host1x_hw_cdma_freeze(struct host1x *host, 285 struct host1x_cdma *cdma) 286 { 287 host->cdma_op->freeze(cdma); 288 } 289 290 static inline void host1x_hw_cdma_resume(struct host1x *host, 291 struct host1x_cdma *cdma, u32 getptr) 292 { 293 host->cdma_op->resume(cdma, getptr); 294 } 295 296 static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host, 297 struct host1x_cdma *cdma, 298 u32 getptr, 299 u32 syncpt_incrs, 300 u32 syncval, u32 nr_slots) 301 { 302 host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval, 303 nr_slots); 304 } 305 306 static inline void host1x_hw_pushbuffer_init(struct host1x *host, 307 struct push_buffer *pb) 308 { 309 host->cdma_pb_op->init(pb); 310 } 311 312 static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de) 313 { 314 if (host->debug_op && host->debug_op->debug_init) 315 host->debug_op->debug_init(de); 316 } 317 318 static inline void host1x_hw_show_channel_cdma(struct host1x *host, 319 struct host1x_channel *channel, 320 struct output *o) 321 { 322 host->debug_op->show_channel_cdma(host, channel, o); 323 } 324 325 static inline void host1x_hw_show_channel_fifo(struct host1x *host, 326 struct host1x_channel *channel, 327 struct output *o) 328 { 329 host->debug_op->show_channel_fifo(host, channel, o); 330 } 331 332 static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o) 333 { 334 host->debug_op->show_mlocks(host, o); 335 } 336 337 extern struct platform_driver tegra_mipi_driver; 338 339 #endif 340