1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2015-2018 Etnaviv Project 4 */ 5 6 #ifndef __ETNAVIV_GPU_H__ 7 #define __ETNAVIV_GPU_H__ 8 9 #include "etnaviv_cmdbuf.h" 10 #include "etnaviv_gem.h" 11 #include "etnaviv_mmu.h" 12 #include "etnaviv_drv.h" 13 #include "common.xml.h" 14 15 struct etnaviv_gem_submit; 16 struct etnaviv_vram_mapping; 17 18 struct etnaviv_chip_identity { 19 u32 model; 20 u32 revision; 21 u32 product_id; 22 u32 customer_id; 23 u32 eco_id; 24 25 /* Supported feature fields. */ 26 u32 features; 27 28 /* Supported minor feature fields. */ 29 u32 minor_features0; 30 u32 minor_features1; 31 u32 minor_features2; 32 u32 minor_features3; 33 u32 minor_features4; 34 u32 minor_features5; 35 u32 minor_features6; 36 u32 minor_features7; 37 u32 minor_features8; 38 u32 minor_features9; 39 u32 minor_features10; 40 u32 minor_features11; 41 42 /* Number of streams supported. */ 43 u32 stream_count; 44 45 /* Total number of temporary registers per thread. */ 46 u32 register_max; 47 48 /* Maximum number of threads. */ 49 u32 thread_count; 50 51 /* Number of shader cores. */ 52 u32 shader_core_count; 53 54 /* Size of the vertex cache. */ 55 u32 vertex_cache_size; 56 57 /* Number of entries in the vertex output buffer. */ 58 u32 vertex_output_buffer_size; 59 60 /* Number of pixel pipes. */ 61 u32 pixel_pipes; 62 63 /* Number of instructions. */ 64 u32 instruction_count; 65 66 /* Number of constants. */ 67 u32 num_constants; 68 69 /* Buffer size */ 70 u32 buffer_size; 71 72 /* Number of varyings */ 73 u8 varyings_count; 74 }; 75 76 enum etnaviv_sec_mode { 77 ETNA_SEC_NONE = 0, 78 ETNA_SEC_KERNEL, 79 ETNA_SEC_TZ 80 }; 81 82 struct etnaviv_event { 83 struct dma_fence *fence; 84 struct etnaviv_gem_submit *submit; 85 86 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 87 }; 88 89 struct etnaviv_cmdbuf_suballoc; 90 struct regulator; 91 struct clk; 92 93 #define ETNA_NR_EVENTS 30 94 95 struct etnaviv_gpu { 96 struct drm_device *drm; 97 struct thermal_cooling_device *cooling; 98 struct device *dev; 99 struct mutex lock; 100 struct etnaviv_chip_identity identity; 101 enum etnaviv_sec_mode sec_mode; 102 struct workqueue_struct *wq; 103 struct drm_gpu_scheduler sched; 104 bool initialized; 105 bool fe_running; 106 107 /* 'ring'-buffer: */ 108 struct etnaviv_cmdbuf buffer; 109 int exec_state; 110 111 /* event management: */ 112 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS); 113 struct etnaviv_event event[ETNA_NR_EVENTS]; 114 struct completion event_free; 115 spinlock_t event_spinlock; 116 117 u32 idle_mask; 118 119 /* Fencing support */ 120 struct mutex fence_lock; 121 struct idr fence_idr; 122 u32 next_fence; 123 u32 completed_fence; 124 wait_queue_head_t fence_event; 125 u64 fence_context; 126 spinlock_t fence_spinlock; 127 128 /* worker for handling 'sync' points: */ 129 struct work_struct sync_point_work; 130 int sync_point_event; 131 132 /* hang detection */ 133 u32 hangcheck_dma_addr; 134 u32 hangcheck_fence; 135 136 void __iomem *mmio; 137 int irq; 138 139 struct etnaviv_iommu_context *mmu_context; 140 unsigned int flush_seq; 141 142 /* Power Control: */ 143 struct clk *clk_bus; 144 struct clk *clk_reg; 145 struct clk *clk_core; 146 struct clk *clk_shader; 147 148 unsigned int freq_scale; 149 unsigned long base_rate_core; 150 unsigned long base_rate_shader; 151 }; 152 153 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) 154 { 155 writel(data, gpu->mmio + reg); 156 } 157 158 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) 159 { 160 return readl(gpu->mmio + reg); 161 } 162 163 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) 164 { 165 /* Power registers in GC300 < 2.0 are offset by 0x100 */ 166 if (gpu->identity.model == chipModel_GC300 && 167 gpu->identity.revision < 0x2000) 168 reg += 0x100; 169 170 return reg; 171 } 172 173 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) 174 { 175 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); 176 } 177 178 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg) 179 { 180 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); 181 } 182 183 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); 184 185 int etnaviv_gpu_init(struct etnaviv_gpu *gpu); 186 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu); 187 188 #ifdef CONFIG_DEBUG_FS 189 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); 190 #endif 191 192 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); 193 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); 194 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, 195 u32 fence, struct drm_etnaviv_timespec *timeout); 196 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, 197 struct etnaviv_gem_object *etnaviv_obj, 198 struct drm_etnaviv_timespec *timeout); 199 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit); 200 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); 201 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); 202 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms); 203 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch); 204 205 extern struct platform_driver etnaviv_gpu_driver; 206 207 #endif /* __ETNAVIV_GPU_H__ */ 208