1 /*
2  * Resource Director Technology(RDT)
3  * - Cache Allocation code.
4  *
5  * Copyright (C) 2016 Intel Corporation
6  *
7  * Authors:
8  *    Fenghua Yu <fenghua.yu@intel.com>
9  *    Tony Luck <tony.luck@intel.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * More information about RDT be found in the Intel (R) x86 Architecture
21  * Software Developer Manual June 2016, volume 3, section 17.17.
22  */
23 
24 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
25 
26 #include <linux/cpu.h>
27 #include <linux/kernfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "internal.h"
31 
32 /*
33  * Check whether MBA bandwidth percentage value is correct. The value is
34  * checked against the minimum and maximum bandwidth values specified by
35  * the hardware. The allocated bandwidth percentage is rounded to the next
36  * control step available on the hardware.
37  */
38 static bool bw_validate_amd(char *buf, unsigned long *data,
39 			    struct rdt_resource *r)
40 {
41 	unsigned long bw;
42 	int ret;
43 
44 	ret = kstrtoul(buf, 10, &bw);
45 	if (ret) {
46 		rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
47 		return false;
48 	}
49 
50 	if (bw < r->membw.min_bw || bw > r->default_ctrl) {
51 		rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52 				    r->membw.min_bw, r->default_ctrl);
53 		return false;
54 	}
55 
56 	*data = roundup(bw, (unsigned long)r->membw.bw_gran);
57 	return true;
58 }
59 
60 int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
61 		 struct rdt_domain *d)
62 {
63 	unsigned long bw_val;
64 
65 	if (d->have_new_ctrl) {
66 		rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
67 		return -EINVAL;
68 	}
69 
70 	if (!bw_validate_amd(data->buf, &bw_val, r))
71 		return -EINVAL;
72 
73 	d->new_ctrl = bw_val;
74 	d->have_new_ctrl = true;
75 
76 	return 0;
77 }
78 
79 /*
80  * Check whether MBA bandwidth percentage value is correct. The value is
81  * checked against the minimum and max bandwidth values specified by the
82  * hardware. The allocated bandwidth percentage is rounded to the next
83  * control step available on the hardware.
84  */
85 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
86 {
87 	unsigned long bw;
88 	int ret;
89 
90 	/*
91 	 * Only linear delay values is supported for current Intel SKUs.
92 	 */
93 	if (!r->membw.delay_linear) {
94 		rdt_last_cmd_puts("No support for non-linear MB domains\n");
95 		return false;
96 	}
97 
98 	ret = kstrtoul(buf, 10, &bw);
99 	if (ret) {
100 		rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
101 		return false;
102 	}
103 
104 	if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
105 	    !is_mba_sc(r)) {
106 		rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
107 				    r->membw.min_bw, r->default_ctrl);
108 		return false;
109 	}
110 
111 	*data = roundup(bw, (unsigned long)r->membw.bw_gran);
112 	return true;
113 }
114 
115 int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
116 		   struct rdt_domain *d)
117 {
118 	unsigned long bw_val;
119 
120 	if (d->have_new_ctrl) {
121 		rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
122 		return -EINVAL;
123 	}
124 
125 	if (!bw_validate(data->buf, &bw_val, r))
126 		return -EINVAL;
127 	d->new_ctrl = bw_val;
128 	d->have_new_ctrl = true;
129 
130 	return 0;
131 }
132 
133 /*
134  * Check whether a cache bit mask is valid. The SDM says:
135  *	Please note that all (and only) contiguous '1' combinations
136  *	are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
137  * Additionally Haswell requires at least two bits set.
138  */
139 bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
140 {
141 	unsigned long first_bit, zero_bit, val;
142 	unsigned int cbm_len = r->cache.cbm_len;
143 	int ret;
144 
145 	ret = kstrtoul(buf, 16, &val);
146 	if (ret) {
147 		rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
148 		return false;
149 	}
150 
151 	if (val == 0 || val > r->default_ctrl) {
152 		rdt_last_cmd_puts("Mask out of range\n");
153 		return false;
154 	}
155 
156 	first_bit = find_first_bit(&val, cbm_len);
157 	zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
158 
159 	if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
160 		rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
161 		return false;
162 	}
163 
164 	if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
165 		rdt_last_cmd_printf("Need at least %d bits in the mask\n",
166 				    r->cache.min_cbm_bits);
167 		return false;
168 	}
169 
170 	*data = val;
171 	return true;
172 }
173 
174 /*
175  * Check whether a cache bit mask is valid. AMD allows non-contiguous
176  * bitmasks
177  */
178 bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
179 {
180 	unsigned long val;
181 	int ret;
182 
183 	ret = kstrtoul(buf, 16, &val);
184 	if (ret) {
185 		rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
186 		return false;
187 	}
188 
189 	if (val > r->default_ctrl) {
190 		rdt_last_cmd_puts("Mask out of range\n");
191 		return false;
192 	}
193 
194 	*data = val;
195 	return true;
196 }
197 
198 /*
199  * Read one cache bit mask (hex). Check that it is valid for the current
200  * resource type.
201  */
202 int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
203 	      struct rdt_domain *d)
204 {
205 	struct rdtgroup *rdtgrp = data->rdtgrp;
206 	u32 cbm_val;
207 
208 	if (d->have_new_ctrl) {
209 		rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
210 		return -EINVAL;
211 	}
212 
213 	/*
214 	 * Cannot set up more than one pseudo-locked region in a cache
215 	 * hierarchy.
216 	 */
217 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
218 	    rdtgroup_pseudo_locked_in_hierarchy(d)) {
219 		rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
220 		return -EINVAL;
221 	}
222 
223 	if (!r->cbm_validate(data->buf, &cbm_val, r))
224 		return -EINVAL;
225 
226 	if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
227 	     rdtgrp->mode == RDT_MODE_SHAREABLE) &&
228 	    rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
229 		rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
230 		return -EINVAL;
231 	}
232 
233 	/*
234 	 * The CBM may not overlap with the CBM of another closid if
235 	 * either is exclusive.
236 	 */
237 	if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
238 		rdt_last_cmd_puts("Overlaps with exclusive group\n");
239 		return -EINVAL;
240 	}
241 
242 	if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
243 		if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
244 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
245 			rdt_last_cmd_puts("Overlaps with other group\n");
246 			return -EINVAL;
247 		}
248 	}
249 
250 	d->new_ctrl = cbm_val;
251 	d->have_new_ctrl = true;
252 
253 	return 0;
254 }
255 
256 /*
257  * For each domain in this resource we expect to find a series of:
258  *	id=mask
259  * separated by ";". The "id" is in decimal, and must match one of
260  * the "id"s for this resource.
261  */
262 static int parse_line(char *line, struct rdt_resource *r,
263 		      struct rdtgroup *rdtgrp)
264 {
265 	struct rdt_parse_data data;
266 	char *dom = NULL, *id;
267 	struct rdt_domain *d;
268 	unsigned long dom_id;
269 
270 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
271 	    r->rid == RDT_RESOURCE_MBA) {
272 		rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
273 		return -EINVAL;
274 	}
275 
276 next:
277 	if (!line || line[0] == '\0')
278 		return 0;
279 	dom = strsep(&line, ";");
280 	id = strsep(&dom, "=");
281 	if (!dom || kstrtoul(id, 10, &dom_id)) {
282 		rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
283 		return -EINVAL;
284 	}
285 	dom = strim(dom);
286 	list_for_each_entry(d, &r->domains, list) {
287 		if (d->id == dom_id) {
288 			data.buf = dom;
289 			data.rdtgrp = rdtgrp;
290 			if (r->parse_ctrlval(&data, r, d))
291 				return -EINVAL;
292 			if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
293 				/*
294 				 * In pseudo-locking setup mode and just
295 				 * parsed a valid CBM that should be
296 				 * pseudo-locked. Only one locked region per
297 				 * resource group and domain so just do
298 				 * the required initialization for single
299 				 * region and return.
300 				 */
301 				rdtgrp->plr->r = r;
302 				rdtgrp->plr->d = d;
303 				rdtgrp->plr->cbm = d->new_ctrl;
304 				d->plr = rdtgrp->plr;
305 				return 0;
306 			}
307 			goto next;
308 		}
309 	}
310 	return -EINVAL;
311 }
312 
313 int update_domains(struct rdt_resource *r, int closid)
314 {
315 	struct msr_param msr_param;
316 	cpumask_var_t cpu_mask;
317 	struct rdt_domain *d;
318 	bool mba_sc;
319 	u32 *dc;
320 	int cpu;
321 
322 	if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
323 		return -ENOMEM;
324 
325 	msr_param.low = closid;
326 	msr_param.high = msr_param.low + 1;
327 	msr_param.res = r;
328 
329 	mba_sc = is_mba_sc(r);
330 	list_for_each_entry(d, &r->domains, list) {
331 		dc = !mba_sc ? d->ctrl_val : d->mbps_val;
332 		if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
333 			cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
334 			dc[closid] = d->new_ctrl;
335 		}
336 	}
337 
338 	/*
339 	 * Avoid writing the control msr with control values when
340 	 * MBA software controller is enabled
341 	 */
342 	if (cpumask_empty(cpu_mask) || mba_sc)
343 		goto done;
344 	cpu = get_cpu();
345 	/* Update CBM on this cpu if it's in cpu_mask. */
346 	if (cpumask_test_cpu(cpu, cpu_mask))
347 		rdt_ctrl_update(&msr_param);
348 	/* Update CBM on other cpus. */
349 	smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
350 	put_cpu();
351 
352 done:
353 	free_cpumask_var(cpu_mask);
354 
355 	return 0;
356 }
357 
358 static int rdtgroup_parse_resource(char *resname, char *tok,
359 				   struct rdtgroup *rdtgrp)
360 {
361 	struct rdt_resource *r;
362 
363 	for_each_alloc_enabled_rdt_resource(r) {
364 		if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
365 			return parse_line(tok, r, rdtgrp);
366 	}
367 	rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
368 	return -EINVAL;
369 }
370 
371 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
372 				char *buf, size_t nbytes, loff_t off)
373 {
374 	struct rdtgroup *rdtgrp;
375 	struct rdt_domain *dom;
376 	struct rdt_resource *r;
377 	char *tok, *resname;
378 	int ret = 0;
379 
380 	/* Valid input requires a trailing newline */
381 	if (nbytes == 0 || buf[nbytes - 1] != '\n')
382 		return -EINVAL;
383 	buf[nbytes - 1] = '\0';
384 
385 	cpus_read_lock();
386 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
387 	if (!rdtgrp) {
388 		rdtgroup_kn_unlock(of->kn);
389 		cpus_read_unlock();
390 		return -ENOENT;
391 	}
392 	rdt_last_cmd_clear();
393 
394 	/*
395 	 * No changes to pseudo-locked region allowed. It has to be removed
396 	 * and re-created instead.
397 	 */
398 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
399 		ret = -EINVAL;
400 		rdt_last_cmd_puts("Resource group is pseudo-locked\n");
401 		goto out;
402 	}
403 
404 	for_each_alloc_enabled_rdt_resource(r) {
405 		list_for_each_entry(dom, &r->domains, list)
406 			dom->have_new_ctrl = false;
407 	}
408 
409 	while ((tok = strsep(&buf, "\n")) != NULL) {
410 		resname = strim(strsep(&tok, ":"));
411 		if (!tok) {
412 			rdt_last_cmd_puts("Missing ':'\n");
413 			ret = -EINVAL;
414 			goto out;
415 		}
416 		if (tok[0] == '\0') {
417 			rdt_last_cmd_printf("Missing '%s' value\n", resname);
418 			ret = -EINVAL;
419 			goto out;
420 		}
421 		ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
422 		if (ret)
423 			goto out;
424 	}
425 
426 	for_each_alloc_enabled_rdt_resource(r) {
427 		ret = update_domains(r, rdtgrp->closid);
428 		if (ret)
429 			goto out;
430 	}
431 
432 	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
433 		/*
434 		 * If pseudo-locking fails we keep the resource group in
435 		 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
436 		 * active and updated for just the domain the pseudo-locked
437 		 * region was requested for.
438 		 */
439 		ret = rdtgroup_pseudo_lock_create(rdtgrp);
440 	}
441 
442 out:
443 	rdtgroup_kn_unlock(of->kn);
444 	cpus_read_unlock();
445 	return ret ?: nbytes;
446 }
447 
448 static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
449 {
450 	struct rdt_domain *dom;
451 	bool sep = false;
452 	u32 ctrl_val;
453 
454 	seq_printf(s, "%*s:", max_name_width, r->name);
455 	list_for_each_entry(dom, &r->domains, list) {
456 		if (sep)
457 			seq_puts(s, ";");
458 
459 		ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
460 			    dom->mbps_val[closid]);
461 		seq_printf(s, r->format_str, dom->id, max_data_width,
462 			   ctrl_val);
463 		sep = true;
464 	}
465 	seq_puts(s, "\n");
466 }
467 
468 int rdtgroup_schemata_show(struct kernfs_open_file *of,
469 			   struct seq_file *s, void *v)
470 {
471 	struct rdtgroup *rdtgrp;
472 	struct rdt_resource *r;
473 	int ret = 0;
474 	u32 closid;
475 
476 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
477 	if (rdtgrp) {
478 		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
479 			for_each_alloc_enabled_rdt_resource(r)
480 				seq_printf(s, "%s:uninitialized\n", r->name);
481 		} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
482 			if (!rdtgrp->plr->d) {
483 				rdt_last_cmd_clear();
484 				rdt_last_cmd_puts("Cache domain offline\n");
485 				ret = -ENODEV;
486 			} else {
487 				seq_printf(s, "%s:%d=%x\n",
488 					   rdtgrp->plr->r->name,
489 					   rdtgrp->plr->d->id,
490 					   rdtgrp->plr->cbm);
491 			}
492 		} else {
493 			closid = rdtgrp->closid;
494 			for_each_alloc_enabled_rdt_resource(r) {
495 				if (closid < r->num_closid)
496 					show_doms(s, r, closid);
497 			}
498 		}
499 	} else {
500 		ret = -ENOENT;
501 	}
502 	rdtgroup_kn_unlock(of->kn);
503 	return ret;
504 }
505 
506 void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
507 		    struct rdtgroup *rdtgrp, int evtid, int first)
508 {
509 	/*
510 	 * setup the parameters to send to the IPI to read the data.
511 	 */
512 	rr->rgrp = rdtgrp;
513 	rr->evtid = evtid;
514 	rr->d = d;
515 	rr->val = 0;
516 	rr->first = first;
517 
518 	smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
519 }
520 
521 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
522 {
523 	struct kernfs_open_file *of = m->private;
524 	u32 resid, evtid, domid;
525 	struct rdtgroup *rdtgrp;
526 	struct rdt_resource *r;
527 	union mon_data_bits md;
528 	struct rdt_domain *d;
529 	struct rmid_read rr;
530 	int ret = 0;
531 
532 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
533 
534 	md.priv = of->kn->priv;
535 	resid = md.u.rid;
536 	domid = md.u.domid;
537 	evtid = md.u.evtid;
538 
539 	r = &rdt_resources_all[resid];
540 	d = rdt_find_domain(r, domid, NULL);
541 	if (IS_ERR_OR_NULL(d)) {
542 		ret = -ENOENT;
543 		goto out;
544 	}
545 
546 	mon_event_read(&rr, d, rdtgrp, evtid, false);
547 
548 	if (rr.val & RMID_VAL_ERROR)
549 		seq_puts(m, "Error\n");
550 	else if (rr.val & RMID_VAL_UNAVAIL)
551 		seq_puts(m, "Unavailable\n");
552 	else
553 		seq_printf(m, "%llu\n", rr.val * r->mon_scale);
554 
555 out:
556 	rdtgroup_kn_unlock(of->kn);
557 	return ret;
558 }
559