1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_gt.h"
9 #include "intel_gt_clock_utils.h"
10 #include "intel_gt_regs.h"
11 
12 static u32 read_reference_ts_freq(struct intel_uncore *uncore)
13 {
14 	u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
15 	u32 base_freq, frac_freq;
16 
17 	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
18 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
19 	base_freq *= 1000000;
20 
21 	frac_freq = ((ts_override &
22 		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
23 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
24 	frac_freq = 1000000 / (frac_freq + 1);
25 
26 	return base_freq + frac_freq;
27 }
28 
29 static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
30 					u32 rpm_config_reg)
31 {
32 	u32 f19_2_mhz = 19200000;
33 	u32 f24_mhz = 24000000;
34 	u32 f25_mhz = 25000000;
35 	u32 f38_4_mhz = 38400000;
36 	u32 crystal_clock =
37 		(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
38 		GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
39 
40 	switch (crystal_clock) {
41 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
42 		return f24_mhz;
43 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
44 		return f19_2_mhz;
45 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
46 		return f38_4_mhz;
47 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
48 		return f25_mhz;
49 	default:
50 		MISSING_CASE(crystal_clock);
51 		return 0;
52 	}
53 }
54 
55 static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
56 {
57 	u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
58 	u32 freq = 0;
59 
60 	/*
61 	 * Note that on gen11+, the clock frequency may be reconfigured.
62 	 * We do not, and we assume nobody else does.
63 	 *
64 	 * First figure out the reference frequency. There are 2 ways
65 	 * we can compute the frequency, either through the
66 	 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
67 	 * tells us which one we should use.
68 	 */
69 	if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
70 		freq = read_reference_ts_freq(uncore);
71 	} else {
72 		u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
73 
74 		freq = gen11_get_crystal_clock_freq(uncore, c0);
75 
76 		/*
77 		 * Now figure out how the command stream's timestamp
78 		 * register increments from this frequency (it might
79 		 * increment only every few clock cycle).
80 		 */
81 		freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
82 			      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
83 	}
84 
85 	return freq;
86 }
87 
88 static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
89 {
90 	u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
91 	u32 freq = 0;
92 
93 	if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
94 		freq = read_reference_ts_freq(uncore);
95 	} else {
96 		freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
97 
98 		/*
99 		 * Now figure out how the command stream's timestamp
100 		 * register increments from this frequency (it might
101 		 * increment only every few clock cycle).
102 		 */
103 		freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
104 			      CTC_SHIFT_PARAMETER_SHIFT);
105 	}
106 
107 	return freq;
108 }
109 
110 static u32 gen6_read_clock_frequency(struct intel_uncore *uncore)
111 {
112 	/*
113 	 * PRMs say:
114 	 *
115 	 *     "The PCU TSC counts 10ns increments; this timestamp
116 	 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
117 	 *      rolling over every 1.5 hours).
118 	 */
119 	return 12500000;
120 }
121 
122 static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
123 {
124 	/*
125 	 * 63:32 increments every 1000 ns
126 	 * 31:0 mbz
127 	 */
128 	return 1000000000 / 1000;
129 }
130 
131 static u32 g4x_read_clock_frequency(struct intel_uncore *uncore)
132 {
133 	/*
134 	 * 63:20 increments every 1/4 ns
135 	 * 19:0 mbz
136 	 *
137 	 * -> 63:32 increments every 1024 ns
138 	 */
139 	return 1000000000 / 1024;
140 }
141 
142 static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
143 {
144 	/*
145 	 * PRMs say:
146 	 *
147 	 *     "The value in this register increments once every 16
148 	 *      hclks." (through the “Clocking Configuration”
149 	 *      (“CLKCFG”) MCHBAR register)
150 	 *
151 	 * Testing on actual hardware has shown there is no /16.
152 	 */
153 	return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000;
154 }
155 
156 static u32 read_clock_frequency(struct intel_uncore *uncore)
157 {
158 	if (GRAPHICS_VER(uncore->i915) >= 11)
159 		return gen11_read_clock_frequency(uncore);
160 	else if (GRAPHICS_VER(uncore->i915) >= 9)
161 		return gen9_read_clock_frequency(uncore);
162 	else if (GRAPHICS_VER(uncore->i915) >= 6)
163 		return gen6_read_clock_frequency(uncore);
164 	else if (GRAPHICS_VER(uncore->i915) == 5)
165 		return gen5_read_clock_frequency(uncore);
166 	else if (IS_G4X(uncore->i915))
167 		return g4x_read_clock_frequency(uncore);
168 	else if (GRAPHICS_VER(uncore->i915) == 4)
169 		return gen4_read_clock_frequency(uncore);
170 	else
171 		return 0;
172 }
173 
174 void intel_gt_init_clock_frequency(struct intel_gt *gt)
175 {
176 	gt->clock_frequency = read_clock_frequency(gt->uncore);
177 
178 	/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
179 	if (GRAPHICS_VER(gt->i915) == 11)
180 		gt->clock_period_ns = NSEC_PER_SEC / 13750000;
181 	else if (gt->clock_frequency)
182 		gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
183 
184 	GT_TRACE(gt,
185 		 "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
186 		 gt->clock_frequency / 1000,
187 		 gt->clock_period_ns,
188 		 div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
189 			 USEC_PER_SEC));
190 }
191 
192 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
193 void intel_gt_check_clock_frequency(const struct intel_gt *gt)
194 {
195 	if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
196 		dev_err(gt->i915->drm.dev,
197 			"GT clock frequency changed, was %uHz, now %uHz!\n",
198 			gt->clock_frequency,
199 			read_clock_frequency(gt->uncore));
200 	}
201 }
202 #endif
203 
204 static u64 div_u64_roundup(u64 nom, u32 den)
205 {
206 	return div_u64(nom + den - 1, den);
207 }
208 
209 u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
210 {
211 	return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
212 }
213 
214 u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
215 {
216 	return intel_gt_clock_interval_to_ns(gt, 16 * count);
217 }
218 
219 u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
220 {
221 	return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
222 }
223 
224 u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
225 {
226 	u64 val;
227 
228 	/*
229 	 * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
230 	 * 8300) freezing up around GPU hangs. Looks as if even
231 	 * scheduling/timer interrupts start misbehaving if the RPS
232 	 * EI/thresholds are "bad", leading to a very sluggish or even
233 	 * frozen machine.
234 	 */
235 	val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
236 	if (GRAPHICS_VER(gt->i915) == 6)
237 		val = div_u64_roundup(val, 25) * 25;
238 
239 	return val;
240 }
241