1e7858254SMatt Roper /* SPDX-License-Identifier: MIT */
2e7858254SMatt Roper /*
3e7858254SMatt Roper  * Copyright © 2022 Intel Corporation
4e7858254SMatt Roper  */
5e7858254SMatt Roper 
6e7858254SMatt Roper #ifndef __INTEL_GT_MCR__
7e7858254SMatt Roper #define __INTEL_GT_MCR__
8e7858254SMatt Roper 
9e7858254SMatt Roper #include "intel_gt_types.h"
10e7858254SMatt Roper 
11e7858254SMatt Roper void intel_gt_mcr_init(struct intel_gt *gt);
12*4186e218SMatt Roper void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
13*4186e218SMatt Roper void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
14e7858254SMatt Roper 
153fe6c7f5SMatt Roper u32 intel_gt_mcr_read(struct intel_gt *gt,
1658bc2453SMatt Roper 		      i915_mcr_reg_t reg,
173fe6c7f5SMatt Roper 		      int group, int instance);
1858bc2453SMatt Roper u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg);
1958bc2453SMatt Roper u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg);
203fe6c7f5SMatt Roper 
213fe6c7f5SMatt Roper void intel_gt_mcr_unicast_write(struct intel_gt *gt,
2258bc2453SMatt Roper 				i915_mcr_reg_t reg, u32 value,
233fe6c7f5SMatt Roper 				int group, int instance);
243fe6c7f5SMatt Roper void intel_gt_mcr_multicast_write(struct intel_gt *gt,
2558bc2453SMatt Roper 				  i915_mcr_reg_t reg, u32 value);
263fe6c7f5SMatt Roper void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
2758bc2453SMatt Roper 				     i915_mcr_reg_t reg, u32 value);
28e7858254SMatt Roper 
2958bc2453SMatt Roper u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
30851435ecSMatt Roper 			       u32 clear, u32 set);
31851435ecSMatt Roper 
323fe6c7f5SMatt Roper void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
3358bc2453SMatt Roper 					     i915_mcr_reg_t reg,
343fe6c7f5SMatt Roper 					     u8 *group, u8 *instance);
35e7858254SMatt Roper 
363fe6c7f5SMatt Roper void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
37e7858254SMatt Roper 				  bool dump_table);
38e7858254SMatt Roper 
399a92732fSMatt Roper void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
409a92732fSMatt Roper 				  unsigned int *group, unsigned int *instance);
419a92732fSMatt Roper 
42192bb40fSMatt Roper int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
4358bc2453SMatt Roper 			      i915_mcr_reg_t reg,
443068bec8SMatt Roper 			      u32 mask,
453068bec8SMatt Roper 			      u32 value,
463068bec8SMatt Roper 			      unsigned int fast_timeout_us,
473068bec8SMatt Roper 			      unsigned int slow_timeout_ms);
483068bec8SMatt Roper 
499a92732fSMatt Roper /*
509a92732fSMatt Roper  * Helper for for_each_ss_steering loop.  On pre-Xe_HP platforms, subslice
519a92732fSMatt Roper  * presence is determined by using the group/instance as direct lookups in the
529a92732fSMatt Roper  * slice/subslice topology.  On Xe_HP and beyond, the steering is unrelated to
539a92732fSMatt Roper  * the topology, so we lookup the DSS ID directly in "slice 0."
549a92732fSMatt Roper  */
559a92732fSMatt Roper #define _HAS_SS(ss_, gt_, group_, instance_) ( \
569a92732fSMatt Roper 	GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
579a92732fSMatt Roper 		intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
589a92732fSMatt Roper 		intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
599a92732fSMatt Roper 
609a92732fSMatt Roper /*
619a92732fSMatt Roper  * Loop over each subslice/DSS and determine the group and instance IDs that
629a92732fSMatt Roper  * should be used to steer MCR accesses toward this DSS.
639a92732fSMatt Roper  */
649a92732fSMatt Roper #define for_each_ss_steering(ss_, gt_, group_, instance_) \
659a92732fSMatt Roper 	for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \
669a92732fSMatt Roper 	     ss_ < I915_MAX_SS_FUSE_BITS; \
679a92732fSMatt Roper 	     ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \
689a92732fSMatt Roper 		for_each_if(_HAS_SS(ss_, gt_, group_, instance_))
699a92732fSMatt Roper 
70e7858254SMatt Roper #endif /* __INTEL_GT_MCR__ */
71