1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __INTEL_UNCORE_H__ 26 #define __INTEL_UNCORE_H__ 27 28 #include <linux/spinlock.h> 29 #include <linux/notifier.h> 30 #include <linux/hrtimer.h> 31 #include <linux/io-64-nonatomic-lo-hi.h> 32 33 #include "i915_reg.h" 34 35 struct drm_i915_private; 36 struct intel_runtime_pm; 37 struct intel_uncore; 38 39 enum forcewake_domain_id { 40 FW_DOMAIN_ID_RENDER = 0, 41 FW_DOMAIN_ID_BLITTER, 42 FW_DOMAIN_ID_MEDIA, 43 FW_DOMAIN_ID_MEDIA_VDBOX0, 44 FW_DOMAIN_ID_MEDIA_VDBOX1, 45 FW_DOMAIN_ID_MEDIA_VDBOX2, 46 FW_DOMAIN_ID_MEDIA_VDBOX3, 47 FW_DOMAIN_ID_MEDIA_VEBOX0, 48 FW_DOMAIN_ID_MEDIA_VEBOX1, 49 50 FW_DOMAIN_ID_COUNT 51 }; 52 53 enum forcewake_domains { 54 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 55 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 56 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 57 FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0), 58 FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1), 59 FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2), 60 FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3), 61 FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0), 62 FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1), 63 64 FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1 65 }; 66 67 struct intel_uncore_funcs { 68 void (*force_wake_get)(struct intel_uncore *uncore, 69 enum forcewake_domains domains); 70 void (*force_wake_put)(struct intel_uncore *uncore, 71 enum forcewake_domains domains); 72 73 u8 (*mmio_readb)(struct intel_uncore *uncore, 74 i915_reg_t r, bool trace); 75 u16 (*mmio_readw)(struct intel_uncore *uncore, 76 i915_reg_t r, bool trace); 77 u32 (*mmio_readl)(struct intel_uncore *uncore, 78 i915_reg_t r, bool trace); 79 u64 (*mmio_readq)(struct intel_uncore *uncore, 80 i915_reg_t r, bool trace); 81 82 void (*mmio_writeb)(struct intel_uncore *uncore, 83 i915_reg_t r, u8 val, bool trace); 84 void (*mmio_writew)(struct intel_uncore *uncore, 85 i915_reg_t r, u16 val, bool trace); 86 void (*mmio_writel)(struct intel_uncore *uncore, 87 i915_reg_t r, u32 val, bool trace); 88 }; 89 90 struct intel_forcewake_range { 91 u32 start; 92 u32 end; 93 94 enum forcewake_domains domains; 95 }; 96 97 struct intel_uncore { 98 void __iomem *regs; 99 100 struct intel_runtime_pm *rpm; 101 102 spinlock_t lock; /** lock is also taken in irq contexts. */ 103 104 unsigned int flags; 105 #define UNCORE_HAS_FORCEWAKE BIT(0) 106 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) 107 #define UNCORE_HAS_DBG_UNCLAIMED BIT(2) 108 #define UNCORE_HAS_FIFO BIT(3) 109 110 const struct intel_forcewake_range *fw_domains_table; 111 unsigned int fw_domains_table_entries; 112 113 struct notifier_block pmic_bus_access_nb; 114 struct intel_uncore_funcs funcs; 115 116 unsigned int fifo_count; 117 118 enum forcewake_domains fw_domains; 119 enum forcewake_domains fw_domains_active; 120 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ 121 122 struct intel_uncore_forcewake_domain { 123 enum forcewake_domain_id id; 124 enum forcewake_domains mask; 125 unsigned int wake_count; 126 bool active; 127 struct hrtimer timer; 128 u32 __iomem *reg_set; 129 u32 __iomem *reg_ack; 130 } fw_domain[FW_DOMAIN_ID_COUNT]; 131 132 struct { 133 unsigned int count; 134 135 int saved_mmio_check; 136 int saved_mmio_debug; 137 } user_forcewake; 138 139 int unclaimed_mmio_check; 140 }; 141 142 /* Iterate over initialised fw domains */ 143 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ 144 for (tmp__ = (mask__); \ 145 tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) 146 147 #define for_each_fw_domain(domain__, uncore__, tmp__) \ 148 for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) 149 150 static inline struct intel_uncore * 151 forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d) 152 { 153 return container_of(d, struct intel_uncore, fw_domain[d->id]); 154 } 155 156 static inline bool 157 intel_uncore_has_forcewake(const struct intel_uncore *uncore) 158 { 159 return uncore->flags & UNCORE_HAS_FORCEWAKE; 160 } 161 162 static inline bool 163 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore) 164 { 165 return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED; 166 } 167 168 static inline bool 169 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore) 170 { 171 return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED; 172 } 173 174 static inline bool 175 intel_uncore_has_fifo(const struct intel_uncore *uncore) 176 { 177 return uncore->flags & UNCORE_HAS_FIFO; 178 } 179 180 void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 181 void intel_uncore_init_early(struct intel_uncore *uncore); 182 int intel_uncore_init_mmio(struct intel_uncore *uncore); 183 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); 184 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); 185 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); 186 void intel_uncore_fini_mmio(struct intel_uncore *uncore); 187 void intel_uncore_suspend(struct intel_uncore *uncore); 188 void intel_uncore_resume_early(struct intel_uncore *uncore); 189 void intel_uncore_runtime_resume(struct intel_uncore *uncore); 190 191 void assert_forcewakes_inactive(struct intel_uncore *uncore); 192 void assert_forcewakes_active(struct intel_uncore *uncore, 193 enum forcewake_domains fw_domains); 194 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 195 196 enum forcewake_domains 197 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 198 i915_reg_t reg, unsigned int op); 199 #define FW_REG_READ (1) 200 #define FW_REG_WRITE (2) 201 202 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 203 enum forcewake_domains domains); 204 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 205 enum forcewake_domains domains); 206 /* Like above but the caller must manage the uncore.lock itself. 207 * Must be used with I915_READ_FW and friends. 208 */ 209 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 210 enum forcewake_domains domains); 211 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 212 enum forcewake_domains domains); 213 214 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore); 215 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore); 216 217 int __intel_wait_for_register(struct intel_uncore *uncore, 218 i915_reg_t reg, 219 u32 mask, 220 u32 value, 221 unsigned int fast_timeout_us, 222 unsigned int slow_timeout_ms, 223 u32 *out_value); 224 static inline int 225 intel_wait_for_register(struct intel_uncore *uncore, 226 i915_reg_t reg, 227 u32 mask, 228 u32 value, 229 unsigned int timeout_ms) 230 { 231 return __intel_wait_for_register(uncore, reg, mask, value, 2, 232 timeout_ms, NULL); 233 } 234 235 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 236 i915_reg_t reg, 237 u32 mask, 238 u32 value, 239 unsigned int fast_timeout_us, 240 unsigned int slow_timeout_ms, 241 u32 *out_value); 242 static inline int 243 intel_wait_for_register_fw(struct intel_uncore *uncore, 244 i915_reg_t reg, 245 u32 mask, 246 u32 value, 247 unsigned int timeout_ms) 248 { 249 return __intel_wait_for_register_fw(uncore, reg, mask, value, 250 2, timeout_ms, NULL); 251 } 252 253 /* register access functions */ 254 #define __raw_read(x__, s__) \ 255 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ 256 i915_reg_t reg) \ 257 { \ 258 return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ 259 } 260 261 #define __raw_write(x__, s__) \ 262 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ 263 i915_reg_t reg, u##x__ val) \ 264 { \ 265 write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ 266 } 267 __raw_read(8, b) 268 __raw_read(16, w) 269 __raw_read(32, l) 270 __raw_read(64, q) 271 272 __raw_write(8, b) 273 __raw_write(16, w) 274 __raw_write(32, l) 275 __raw_write(64, q) 276 277 #undef __raw_read 278 #undef __raw_write 279 280 #define __uncore_read(name__, x__, s__, trace__) \ 281 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \ 282 i915_reg_t reg) \ 283 { \ 284 return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \ 285 } 286 287 #define __uncore_write(name__, x__, s__, trace__) \ 288 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \ 289 i915_reg_t reg, u##x__ val) \ 290 { \ 291 uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \ 292 } 293 294 __uncore_read(read8, 8, b, true) 295 __uncore_read(read16, 16, w, true) 296 __uncore_read(read, 32, l, true) 297 __uncore_read(read16_notrace, 16, w, false) 298 __uncore_read(read_notrace, 32, l, false) 299 300 __uncore_write(write8, 8, b, true) 301 __uncore_write(write16, 16, w, true) 302 __uncore_write(write, 32, l, true) 303 __uncore_write(write_notrace, 32, l, false) 304 305 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 306 * will be implemented using 2 32-bit writes in an arbitrary order with 307 * an arbitrary delay between them. This can cause the hardware to 308 * act upon the intermediate value, possibly leading to corruption and 309 * machine death. For this reason we do not support I915_WRITE64, or 310 * uncore->funcs.mmio_writeq. 311 * 312 * When reading a 64-bit value as two 32-bit values, the delay may cause 313 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 314 * occasionally a 64-bit register does not actually support a full readq 315 * and must be read using two 32-bit reads. 316 * 317 * You have been warned. 318 */ 319 __uncore_read(read64, 64, q, true) 320 321 static inline u64 322 intel_uncore_read64_2x32(struct intel_uncore *uncore, 323 i915_reg_t lower_reg, i915_reg_t upper_reg) 324 { 325 u32 upper, lower, old_upper, loop = 0; 326 upper = intel_uncore_read(uncore, upper_reg); 327 do { 328 old_upper = upper; 329 lower = intel_uncore_read(uncore, lower_reg); 330 upper = intel_uncore_read(uncore, upper_reg); 331 } while (upper != old_upper && loop++ < 2); 332 return (u64)upper << 32 | lower; 333 } 334 335 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__)) 336 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) 337 338 #undef __uncore_read 339 #undef __uncore_write 340 341 /* These are untraced mmio-accessors that are only valid to be used inside 342 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 343 * controlled. 344 * 345 * Think twice, and think again, before using these. 346 * 347 * As an example, these accessors can possibly be used between: 348 * 349 * spin_lock_irq(&uncore->lock); 350 * intel_uncore_forcewake_get__locked(); 351 * 352 * and 353 * 354 * intel_uncore_forcewake_put__locked(); 355 * spin_unlock_irq(&uncore->lock); 356 * 357 * 358 * Note: some registers may not need forcewake held, so 359 * intel_uncore_forcewake_{get,put} can be omitted, see 360 * intel_uncore_forcewake_for_reg(). 361 * 362 * Certain architectures will die if the same cacheline is concurrently accessed 363 * by different clients (e.g. on Ivybridge). Access to registers should 364 * therefore generally be serialised, by either the dev_priv->uncore.lock or 365 * a more localised lock guarding all access to that bank of registers. 366 */ 367 #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__) 368 #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__) 369 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__) 370 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__)) 371 372 static inline void intel_uncore_rmw(struct intel_uncore *uncore, 373 i915_reg_t reg, u32 clear, u32 set) 374 { 375 u32 val; 376 377 val = intel_uncore_read(uncore, reg); 378 val &= ~clear; 379 val |= set; 380 intel_uncore_write(uncore, reg, val); 381 } 382 383 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, 384 i915_reg_t reg, u32 clear, u32 set) 385 { 386 u32 val; 387 388 val = intel_uncore_read_fw(uncore, reg); 389 val &= ~clear; 390 val |= set; 391 intel_uncore_write_fw(uncore, reg, val); 392 } 393 394 #define raw_reg_read(base, reg) \ 395 readl(base + i915_mmio_reg_offset(reg)) 396 #define raw_reg_write(base, reg, value) \ 397 writel(value, base + i915_mmio_reg_offset(reg)) 398 399 #endif /* !__INTEL_UNCORE_H__ */ 400