1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
3  */
4 #include "sja1105.h"
5 
6 #define SJA1105_TAS_CLKSRC_DISABLED	0
7 #define SJA1105_TAS_CLKSRC_STANDALONE	1
8 #define SJA1105_TAS_CLKSRC_AS6802	2
9 #define SJA1105_TAS_CLKSRC_PTP		3
10 #define SJA1105_TAS_MAX_DELTA		BIT(19)
11 #define SJA1105_GATE_MASK		GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
12 
13 /* This is not a preprocessor macro because the "ns" argument may or may not be
14  * s64 at caller side. This ensures it is properly type-cast before div_s64.
15  */
16 static s64 ns_to_sja1105_delta(s64 ns)
17 {
18 	return div_s64(ns, 200);
19 }
20 
21 /* Lo and behold: the egress scheduler from hell.
22  *
23  * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
24  * all schedule entries for all ports. These are the Gate Control List (GCL)
25  * entries, let's call them "timeslots" for short. This linear array of
26  * timeslots is held in BLK_IDX_SCHEDULE.
27  *
28  * Then there are a maximum of 8 "execution threads" inside the switch, which
29  * iterate cyclically through the "schedule". Each "cycle" has an entry point
30  * and an exit point, both being timeslot indices in the schedule table. The
31  * hardware calls each cycle a "subschedule".
32  *
33  * Subschedule (cycle) i starts when
34  *   ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
35  *
36  * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
37  *   k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
38  *   k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
39  *
40  * For each schedule entry (timeslot) k, the engine executes the gate control
41  * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
42  *
43  *         +---------+
44  *         |         | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
45  *         +---------+
46  *              |
47  *              +-----------------+
48  *                                | .actsubsch
49  *  BLK_IDX_SCHEDULE_ENTRY_POINTS v
50  *                 +-------+-------+
51  *                 |cycle 0|cycle 1|
52  *                 +-------+-------+
53  *                   |  |      |  |
54  *  +----------------+  |      |  +-------------------------------------+
55  *  |   .subschindx     |      |             .subschindx                |
56  *  |                   |      +---------------+                        |
57  *  |          .address |        .address      |                        |
58  *  |                   |                      |                        |
59  *  |                   |                      |                        |
60  *  |  BLK_IDX_SCHEDULE v                      v                        |
61  *  |              +-------+-------+-------+-------+-------+------+     |
62  *  |              |entry 0|entry 1|entry 2|entry 3|entry 4|entry5|     |
63  *  |              +-------+-------+-------+-------+-------+------+     |
64  *  |                                  ^                    ^  ^  ^     |
65  *  |                                  |                    |  |  |     |
66  *  |        +-------------------------+                    |  |  |     |
67  *  |        |              +-------------------------------+  |  |     |
68  *  |        |              |              +-------------------+  |     |
69  *  |        |              |              |                      |     |
70  *  | +---------------------------------------------------------------+ |
71  *  | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
72  *  | +---------------------------------------------------------------+ |
73  *  |        ^              ^                BLK_IDX_SCHEDULE_PARAMS    |
74  *  |        |              |                                           |
75  *  +--------+              +-------------------------------------------+
76  *
77  *  In the above picture there are two subschedules (cycles):
78  *
79  *  - cycle 0: iterates the schedule table from 0 to 2 (and back)
80  *  - cycle 1: iterates the schedule table from 3 to 5 (and back)
81  *
82  *  All other possible execution threads must be marked as unused by making
83  *  their "subschedule end index" (subscheind) equal to the last valid
84  *  subschedule's end index (in this case 5).
85  */
86 static int sja1105_init_scheduling(struct sja1105_private *priv)
87 {
88 	struct sja1105_schedule_entry_points_entry *schedule_entry_points;
89 	struct sja1105_schedule_entry_points_params_entry
90 					*schedule_entry_points_params;
91 	struct sja1105_schedule_params_entry *schedule_params;
92 	struct sja1105_tas_data *tas_data = &priv->tas_data;
93 	struct sja1105_schedule_entry *schedule;
94 	struct sja1105_table *table;
95 	int schedule_start_idx;
96 	s64 entry_point_delta;
97 	int schedule_end_idx;
98 	int num_entries = 0;
99 	int num_cycles = 0;
100 	int cycle = 0;
101 	int i, k = 0;
102 	int port;
103 
104 	/* Discard previous Schedule Table */
105 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
106 	if (table->entry_count) {
107 		kfree(table->entries);
108 		table->entry_count = 0;
109 	}
110 
111 	/* Discard previous Schedule Entry Points Parameters Table */
112 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
113 	if (table->entry_count) {
114 		kfree(table->entries);
115 		table->entry_count = 0;
116 	}
117 
118 	/* Discard previous Schedule Parameters Table */
119 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
120 	if (table->entry_count) {
121 		kfree(table->entries);
122 		table->entry_count = 0;
123 	}
124 
125 	/* Discard previous Schedule Entry Points Table */
126 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
127 	if (table->entry_count) {
128 		kfree(table->entries);
129 		table->entry_count = 0;
130 	}
131 
132 	/* Figure out the dimensioning of the problem */
133 	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
134 		if (tas_data->offload[port]) {
135 			num_entries += tas_data->offload[port]->num_entries;
136 			num_cycles++;
137 		}
138 	}
139 
140 	/* Nothing to do */
141 	if (!num_cycles)
142 		return 0;
143 
144 	/* Pre-allocate space in the static config tables */
145 
146 	/* Schedule Table */
147 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
148 	table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
149 				 GFP_KERNEL);
150 	if (!table->entries)
151 		return -ENOMEM;
152 	table->entry_count = num_entries;
153 	schedule = table->entries;
154 
155 	/* Schedule Points Parameters Table */
156 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
157 	table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
158 				 table->ops->unpacked_entry_size, GFP_KERNEL);
159 	if (!table->entries)
160 		/* Previously allocated memory will be freed automatically in
161 		 * sja1105_static_config_free. This is true for all early
162 		 * returns below.
163 		 */
164 		return -ENOMEM;
165 	table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
166 	schedule_entry_points_params = table->entries;
167 
168 	/* Schedule Parameters Table */
169 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
170 	table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
171 				 table->ops->unpacked_entry_size, GFP_KERNEL);
172 	if (!table->entries)
173 		return -ENOMEM;
174 	table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
175 	schedule_params = table->entries;
176 
177 	/* Schedule Entry Points Table */
178 	table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
179 	table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
180 				 GFP_KERNEL);
181 	if (!table->entries)
182 		return -ENOMEM;
183 	table->entry_count = num_cycles;
184 	schedule_entry_points = table->entries;
185 
186 	/* Finally start populating the static config tables */
187 	schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE;
188 	schedule_entry_points_params->actsubsch = num_cycles - 1;
189 
190 	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
191 		const struct tc_taprio_qopt_offload *offload;
192 
193 		offload = tas_data->offload[port];
194 		if (!offload)
195 			continue;
196 
197 		schedule_start_idx = k;
198 		schedule_end_idx = k + offload->num_entries - 1;
199 		/* TODO this is the base time for the port's subschedule,
200 		 * relative to PTPSCHTM. But as we're using the standalone
201 		 * clock source and not PTP clock as time reference, there's
202 		 * little point in even trying to put more logic into this,
203 		 * like preserving the phases between the subschedules of
204 		 * different ports. We'll get all of that when switching to the
205 		 * PTP clock source.
206 		 */
207 		entry_point_delta = 1;
208 
209 		schedule_entry_points[cycle].subschindx = cycle;
210 		schedule_entry_points[cycle].delta = entry_point_delta;
211 		schedule_entry_points[cycle].address = schedule_start_idx;
212 
213 		/* The subschedule end indices need to be
214 		 * monotonically increasing.
215 		 */
216 		for (i = cycle; i < 8; i++)
217 			schedule_params->subscheind[i] = schedule_end_idx;
218 
219 		for (i = 0; i < offload->num_entries; i++, k++) {
220 			s64 delta_ns = offload->entries[i].interval;
221 
222 			schedule[k].delta = ns_to_sja1105_delta(delta_ns);
223 			schedule[k].destports = BIT(port);
224 			schedule[k].resmedia_en = true;
225 			schedule[k].resmedia = SJA1105_GATE_MASK &
226 					~offload->entries[i].gate_mask;
227 		}
228 		cycle++;
229 	}
230 
231 	return 0;
232 }
233 
234 /* Be there 2 port subschedules, each executing an arbitrary number of gate
235  * open/close events cyclically.
236  * None of those gate events must ever occur at the exact same time, otherwise
237  * the switch is known to act in exotically strange ways.
238  * However the hardware doesn't bother performing these integrity checks.
239  * So here we are with the task of validating whether the new @admin offload
240  * has any conflict with the already established TAS configuration in
241  * tas_data->offload.  We already know the other ports are in harmony with one
242  * another, otherwise we wouldn't have saved them.
243  * Each gate event executes periodically, with a period of @cycle_time and a
244  * phase given by its cycle's @base_time plus its offset within the cycle
245  * (which in turn is given by the length of the events prior to it).
246  * There are two aspects to possible collisions:
247  * - Collisions within one cycle's (actually the longest cycle's) time frame.
248  *   For that, we need to compare the cartesian product of each possible
249  *   occurrence of each event within one cycle time.
250  * - Collisions in the future. Events may not collide within one cycle time,
251  *   but if two port schedules don't have the same periodicity (aka the cycle
252  *   times aren't multiples of one another), they surely will some time in the
253  *   future (actually they will collide an infinite amount of times).
254  */
255 static bool
256 sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
257 			    const struct tc_taprio_qopt_offload *admin)
258 {
259 	struct sja1105_tas_data *tas_data = &priv->tas_data;
260 	const struct tc_taprio_qopt_offload *offload;
261 	s64 max_cycle_time, min_cycle_time;
262 	s64 delta1, delta2;
263 	s64 rbt1, rbt2;
264 	s64 stop_time;
265 	s64 t1, t2;
266 	int i, j;
267 	s32 rem;
268 
269 	offload = tas_data->offload[port];
270 	if (!offload)
271 		return false;
272 
273 	/* Check if the two cycle times are multiples of one another.
274 	 * If they aren't, then they will surely collide.
275 	 */
276 	max_cycle_time = max(offload->cycle_time, admin->cycle_time);
277 	min_cycle_time = min(offload->cycle_time, admin->cycle_time);
278 	div_s64_rem(max_cycle_time, min_cycle_time, &rem);
279 	if (rem)
280 		return true;
281 
282 	/* Calculate the "reduced" base time of each of the two cycles
283 	 * (transposed back as close to 0 as possible) by dividing to
284 	 * the cycle time.
285 	 */
286 	div_s64_rem(offload->base_time, offload->cycle_time, &rem);
287 	rbt1 = rem;
288 
289 	div_s64_rem(admin->base_time, admin->cycle_time, &rem);
290 	rbt2 = rem;
291 
292 	stop_time = max_cycle_time + max(rbt1, rbt2);
293 
294 	/* delta1 is the relative base time of each GCL entry within
295 	 * the established ports' TAS config.
296 	 */
297 	for (i = 0, delta1 = 0;
298 	     i < offload->num_entries;
299 	     delta1 += offload->entries[i].interval, i++) {
300 		/* delta2 is the relative base time of each GCL entry
301 		 * within the newly added TAS config.
302 		 */
303 		for (j = 0, delta2 = 0;
304 		     j < admin->num_entries;
305 		     delta2 += admin->entries[j].interval, j++) {
306 			/* t1 follows all possible occurrences of the
307 			 * established ports' GCL entry i within the
308 			 * first cycle time.
309 			 */
310 			for (t1 = rbt1 + delta1;
311 			     t1 <= stop_time;
312 			     t1 += offload->cycle_time) {
313 				/* t2 follows all possible occurrences
314 				 * of the newly added GCL entry j
315 				 * within the first cycle time.
316 				 */
317 				for (t2 = rbt2 + delta2;
318 				     t2 <= stop_time;
319 				     t2 += admin->cycle_time) {
320 					if (t1 == t2) {
321 						dev_warn(priv->ds->dev,
322 							 "GCL entry %d collides with entry %d of port %d\n",
323 							 j, i, port);
324 						return true;
325 					}
326 				}
327 			}
328 		}
329 	}
330 
331 	return false;
332 }
333 
334 int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
335 			    struct tc_taprio_qopt_offload *admin)
336 {
337 	struct sja1105_private *priv = ds->priv;
338 	struct sja1105_tas_data *tas_data = &priv->tas_data;
339 	int other_port, rc, i;
340 
341 	/* Can't change an already configured port (must delete qdisc first).
342 	 * Can't delete the qdisc from an unconfigured port.
343 	 */
344 	if (!!tas_data->offload[port] == admin->enable)
345 		return -EINVAL;
346 
347 	if (!admin->enable) {
348 		taprio_offload_free(tas_data->offload[port]);
349 		tas_data->offload[port] = NULL;
350 
351 		rc = sja1105_init_scheduling(priv);
352 		if (rc < 0)
353 			return rc;
354 
355 		return sja1105_static_config_reload(priv);
356 	}
357 
358 	/* The cycle time extension is the amount of time the last cycle from
359 	 * the old OPER needs to be extended in order to phase-align with the
360 	 * base time of the ADMIN when that becomes the new OPER.
361 	 * But of course our switch needs to be reset to switch-over between
362 	 * the ADMIN and the OPER configs - so much for a seamless transition.
363 	 * So don't add insult over injury and just say we don't support cycle
364 	 * time extension.
365 	 */
366 	if (admin->cycle_time_extension)
367 		return -ENOTSUPP;
368 
369 	if (!ns_to_sja1105_delta(admin->base_time)) {
370 		dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
371 		return -ERANGE;
372 	}
373 
374 	for (i = 0; i < admin->num_entries; i++) {
375 		s64 delta_ns = admin->entries[i].interval;
376 		s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
377 		bool too_long, too_short;
378 
379 		too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
380 		too_short = (delta_cycles == 0);
381 		if (too_long || too_short) {
382 			dev_err(priv->ds->dev,
383 				"Interval %llu too %s for GCL entry %d\n",
384 				delta_ns, too_long ? "long" : "short", i);
385 			return -ERANGE;
386 		}
387 	}
388 
389 	for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
390 		if (other_port == port)
391 			continue;
392 
393 		if (sja1105_tas_check_conflicts(priv, other_port, admin))
394 			return -ERANGE;
395 	}
396 
397 	tas_data->offload[port] = taprio_offload_get(admin);
398 
399 	rc = sja1105_init_scheduling(priv);
400 	if (rc < 0)
401 		return rc;
402 
403 	return sja1105_static_config_reload(priv);
404 }
405 
406 void sja1105_tas_setup(struct dsa_switch *ds)
407 {
408 }
409 
410 void sja1105_tas_teardown(struct dsa_switch *ds)
411 {
412 	struct sja1105_private *priv = ds->priv;
413 	struct tc_taprio_qopt_offload *offload;
414 	int port;
415 
416 	for (port = 0; port < SJA1105_NUM_PORTS; port++) {
417 		offload = priv->tas_data.offload[port];
418 		if (!offload)
419 			continue;
420 
421 		taprio_offload_free(offload);
422 	}
423 }
424