xref: /openbmc/linux/mm/damon/sysfs.c (revision 0bcba960)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 static DEFINE_MUTEX(damon_sysfs_lock);
15 
16 /*
17  * unsigned long range directory
18  */
19 
20 struct damon_sysfs_ul_range {
21 	struct kobject kobj;
22 	unsigned long min;
23 	unsigned long max;
24 };
25 
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 		unsigned long min,
28 		unsigned long max)
29 {
30 	struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 			GFP_KERNEL);
32 
33 	if (!range)
34 		return NULL;
35 	range->kobj = (struct kobject){};
36 	range->min = min;
37 	range->max = max;
38 
39 	return range;
40 }
41 
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 		char *buf)
44 {
45 	struct damon_sysfs_ul_range *range = container_of(kobj,
46 			struct damon_sysfs_ul_range, kobj);
47 
48 	return sysfs_emit(buf, "%lu\n", range->min);
49 }
50 
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 		const char *buf, size_t count)
53 {
54 	struct damon_sysfs_ul_range *range = container_of(kobj,
55 			struct damon_sysfs_ul_range, kobj);
56 	unsigned long min;
57 	int err;
58 
59 	err = kstrtoul(buf, 0, &min);
60 	if (err)
61 		return -EINVAL;
62 
63 	range->min = min;
64 	return count;
65 }
66 
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 		char *buf)
69 {
70 	struct damon_sysfs_ul_range *range = container_of(kobj,
71 			struct damon_sysfs_ul_range, kobj);
72 
73 	return sysfs_emit(buf, "%lu\n", range->max);
74 }
75 
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 		const char *buf, size_t count)
78 {
79 	struct damon_sysfs_ul_range *range = container_of(kobj,
80 			struct damon_sysfs_ul_range, kobj);
81 	unsigned long max;
82 	int err;
83 
84 	err = kstrtoul(buf, 0, &max);
85 	if (err)
86 		return -EINVAL;
87 
88 	range->max = max;
89 	return count;
90 }
91 
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
93 {
94 	kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95 }
96 
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 		__ATTR_RW_MODE(min, 0600);
99 
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 		__ATTR_RW_MODE(max, 0600);
102 
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 	&damon_sysfs_ul_range_min_attr.attr,
105 	&damon_sysfs_ul_range_max_attr.attr,
106 	NULL,
107 };
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109 
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 	.release = damon_sysfs_ul_range_release,
112 	.sysfs_ops = &kobj_sysfs_ops,
113 	.default_groups = damon_sysfs_ul_range_groups,
114 };
115 
116 /*
117  * schemes/stats directory
118  */
119 
120 struct damon_sysfs_stats {
121 	struct kobject kobj;
122 	unsigned long nr_tried;
123 	unsigned long sz_tried;
124 	unsigned long nr_applied;
125 	unsigned long sz_applied;
126 	unsigned long qt_exceeds;
127 };
128 
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130 {
131 	return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132 }
133 
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 		char *buf)
136 {
137 	struct damon_sysfs_stats *stats = container_of(kobj,
138 			struct damon_sysfs_stats, kobj);
139 
140 	return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141 }
142 
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 		char *buf)
145 {
146 	struct damon_sysfs_stats *stats = container_of(kobj,
147 			struct damon_sysfs_stats, kobj);
148 
149 	return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150 }
151 
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 		struct kobj_attribute *attr, char *buf)
154 {
155 	struct damon_sysfs_stats *stats = container_of(kobj,
156 			struct damon_sysfs_stats, kobj);
157 
158 	return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159 }
160 
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 		struct kobj_attribute *attr, char *buf)
163 {
164 	struct damon_sysfs_stats *stats = container_of(kobj,
165 			struct damon_sysfs_stats, kobj);
166 
167 	return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168 }
169 
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 		struct kobj_attribute *attr, char *buf)
172 {
173 	struct damon_sysfs_stats *stats = container_of(kobj,
174 			struct damon_sysfs_stats, kobj);
175 
176 	return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177 }
178 
179 static void damon_sysfs_stats_release(struct kobject *kobj)
180 {
181 	kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182 }
183 
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 		__ATTR_RO_MODE(nr_tried, 0400);
186 
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 		__ATTR_RO_MODE(sz_tried, 0400);
189 
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 		__ATTR_RO_MODE(nr_applied, 0400);
192 
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 		__ATTR_RO_MODE(sz_applied, 0400);
195 
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 		__ATTR_RO_MODE(qt_exceeds, 0400);
198 
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 	&damon_sysfs_stats_nr_tried_attr.attr,
201 	&damon_sysfs_stats_sz_tried_attr.attr,
202 	&damon_sysfs_stats_nr_applied_attr.attr,
203 	&damon_sysfs_stats_sz_applied_attr.attr,
204 	&damon_sysfs_stats_qt_exceeds_attr.attr,
205 	NULL,
206 };
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
208 
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 	.release = damon_sysfs_stats_release,
211 	.sysfs_ops = &kobj_sysfs_ops,
212 	.default_groups = damon_sysfs_stats_groups,
213 };
214 
215 /*
216  * watermarks directory
217  */
218 
219 struct damon_sysfs_watermarks {
220 	struct kobject kobj;
221 	enum damos_wmark_metric metric;
222 	unsigned long interval_us;
223 	unsigned long high;
224 	unsigned long mid;
225 	unsigned long low;
226 };
227 
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 		enum damos_wmark_metric metric, unsigned long interval_us,
230 		unsigned long high, unsigned long mid, unsigned long low)
231 {
232 	struct damon_sysfs_watermarks *watermarks = kmalloc(
233 			sizeof(*watermarks), GFP_KERNEL);
234 
235 	if (!watermarks)
236 		return NULL;
237 	watermarks->kobj = (struct kobject){};
238 	watermarks->metric = metric;
239 	watermarks->interval_us = interval_us;
240 	watermarks->high = high;
241 	watermarks->mid = mid;
242 	watermarks->low = low;
243 	return watermarks;
244 }
245 
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
248 	"none",
249 	"free_mem_rate",
250 };
251 
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 		char *buf)
254 {
255 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 			struct damon_sysfs_watermarks, kobj);
257 
258 	return sysfs_emit(buf, "%s\n",
259 			damon_sysfs_wmark_metric_strs[watermarks->metric]);
260 }
261 
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 		const char *buf, size_t count)
264 {
265 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 			struct damon_sysfs_watermarks, kobj);
267 	enum damos_wmark_metric metric;
268 
269 	for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 		if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 			watermarks->metric = metric;
272 			return count;
273 		}
274 	}
275 	return -EINVAL;
276 }
277 
278 static ssize_t interval_us_show(struct kobject *kobj,
279 		struct kobj_attribute *attr, char *buf)
280 {
281 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 			struct damon_sysfs_watermarks, kobj);
283 
284 	return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285 }
286 
287 static ssize_t interval_us_store(struct kobject *kobj,
288 		struct kobj_attribute *attr, const char *buf, size_t count)
289 {
290 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 			struct damon_sysfs_watermarks, kobj);
292 	int err = kstrtoul(buf, 0, &watermarks->interval_us);
293 
294 	if (err)
295 		return -EINVAL;
296 	return count;
297 }
298 
299 static ssize_t high_show(struct kobject *kobj,
300 		struct kobj_attribute *attr, char *buf)
301 {
302 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 			struct damon_sysfs_watermarks, kobj);
304 
305 	return sysfs_emit(buf, "%lu\n", watermarks->high);
306 }
307 
308 static ssize_t high_store(struct kobject *kobj,
309 		struct kobj_attribute *attr, const char *buf, size_t count)
310 {
311 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 			struct damon_sysfs_watermarks, kobj);
313 	int err = kstrtoul(buf, 0, &watermarks->high);
314 
315 	if (err)
316 		return -EINVAL;
317 	return count;
318 }
319 
320 static ssize_t mid_show(struct kobject *kobj,
321 		struct kobj_attribute *attr, char *buf)
322 {
323 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 			struct damon_sysfs_watermarks, kobj);
325 
326 	return sysfs_emit(buf, "%lu\n", watermarks->mid);
327 }
328 
329 static ssize_t mid_store(struct kobject *kobj,
330 		struct kobj_attribute *attr, const char *buf, size_t count)
331 {
332 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 			struct damon_sysfs_watermarks, kobj);
334 	int err = kstrtoul(buf, 0, &watermarks->mid);
335 
336 	if (err)
337 		return -EINVAL;
338 	return count;
339 }
340 
341 static ssize_t low_show(struct kobject *kobj,
342 		struct kobj_attribute *attr, char *buf)
343 {
344 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 			struct damon_sysfs_watermarks, kobj);
346 
347 	return sysfs_emit(buf, "%lu\n", watermarks->low);
348 }
349 
350 static ssize_t low_store(struct kobject *kobj,
351 		struct kobj_attribute *attr, const char *buf, size_t count)
352 {
353 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 			struct damon_sysfs_watermarks, kobj);
355 	int err = kstrtoul(buf, 0, &watermarks->low);
356 
357 	if (err)
358 		return -EINVAL;
359 	return count;
360 }
361 
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
363 {
364 	kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
365 }
366 
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 		__ATTR_RW_MODE(metric, 0600);
369 
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 		__ATTR_RW_MODE(interval_us, 0600);
372 
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 		__ATTR_RW_MODE(high, 0600);
375 
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 		__ATTR_RW_MODE(mid, 0600);
378 
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 		__ATTR_RW_MODE(low, 0600);
381 
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 	&damon_sysfs_watermarks_metric_attr.attr,
384 	&damon_sysfs_watermarks_interval_us_attr.attr,
385 	&damon_sysfs_watermarks_high_attr.attr,
386 	&damon_sysfs_watermarks_mid_attr.attr,
387 	&damon_sysfs_watermarks_low_attr.attr,
388 	NULL,
389 };
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
391 
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 	.release = damon_sysfs_watermarks_release,
394 	.sysfs_ops = &kobj_sysfs_ops,
395 	.default_groups = damon_sysfs_watermarks_groups,
396 };
397 
398 /*
399  * scheme/weights directory
400  */
401 
402 struct damon_sysfs_weights {
403 	struct kobject kobj;
404 	unsigned int sz;
405 	unsigned int nr_accesses;
406 	unsigned int age;
407 };
408 
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 		unsigned int nr_accesses, unsigned int age)
411 {
412 	struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
413 			GFP_KERNEL);
414 
415 	if (!weights)
416 		return NULL;
417 	weights->kobj = (struct kobject){};
418 	weights->sz = sz;
419 	weights->nr_accesses = nr_accesses;
420 	weights->age = age;
421 	return weights;
422 }
423 
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 		struct kobj_attribute *attr, char *buf)
426 {
427 	struct damon_sysfs_weights *weights = container_of(kobj,
428 			struct damon_sysfs_weights, kobj);
429 
430 	return sysfs_emit(buf, "%u\n", weights->sz);
431 }
432 
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 		struct kobj_attribute *attr, const char *buf, size_t count)
435 {
436 	struct damon_sysfs_weights *weights = container_of(kobj,
437 			struct damon_sysfs_weights, kobj);
438 	int err = kstrtouint(buf, 0, &weights->sz);
439 
440 	if (err)
441 		return -EINVAL;
442 	return count;
443 }
444 
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 		struct kobj_attribute *attr, char *buf)
447 {
448 	struct damon_sysfs_weights *weights = container_of(kobj,
449 			struct damon_sysfs_weights, kobj);
450 
451 	return sysfs_emit(buf, "%u\n", weights->nr_accesses);
452 }
453 
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 		struct kobj_attribute *attr, const char *buf, size_t count)
456 {
457 	struct damon_sysfs_weights *weights = container_of(kobj,
458 			struct damon_sysfs_weights, kobj);
459 	int err = kstrtouint(buf, 0, &weights->nr_accesses);
460 
461 	if (err)
462 		return -EINVAL;
463 	return count;
464 }
465 
466 static ssize_t age_permil_show(struct kobject *kobj,
467 		struct kobj_attribute *attr, char *buf)
468 {
469 	struct damon_sysfs_weights *weights = container_of(kobj,
470 			struct damon_sysfs_weights, kobj);
471 
472 	return sysfs_emit(buf, "%u\n", weights->age);
473 }
474 
475 static ssize_t age_permil_store(struct kobject *kobj,
476 		struct kobj_attribute *attr, const char *buf, size_t count)
477 {
478 	struct damon_sysfs_weights *weights = container_of(kobj,
479 			struct damon_sysfs_weights, kobj);
480 	int err = kstrtouint(buf, 0, &weights->age);
481 
482 	if (err)
483 		return -EINVAL;
484 	return count;
485 }
486 
487 static void damon_sysfs_weights_release(struct kobject *kobj)
488 {
489 	kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
490 }
491 
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 		__ATTR_RW_MODE(sz_permil, 0600);
494 
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 		__ATTR_RW_MODE(nr_accesses_permil, 0600);
497 
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 		__ATTR_RW_MODE(age_permil, 0600);
500 
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 	&damon_sysfs_weights_sz_attr.attr,
503 	&damon_sysfs_weights_nr_accesses_attr.attr,
504 	&damon_sysfs_weights_age_attr.attr,
505 	NULL,
506 };
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
508 
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 	.release = damon_sysfs_weights_release,
511 	.sysfs_ops = &kobj_sysfs_ops,
512 	.default_groups = damon_sysfs_weights_groups,
513 };
514 
515 /*
516  * quotas directory
517  */
518 
519 struct damon_sysfs_quotas {
520 	struct kobject kobj;
521 	struct damon_sysfs_weights *weights;
522 	unsigned long ms;
523 	unsigned long sz;
524 	unsigned long reset_interval_ms;
525 };
526 
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
528 {
529 	return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
530 }
531 
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
533 {
534 	struct damon_sysfs_weights *weights;
535 	int err;
536 
537 	weights = damon_sysfs_weights_alloc(0, 0, 0);
538 	if (!weights)
539 		return -ENOMEM;
540 
541 	err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 			&quotas->kobj, "weights");
543 	if (err)
544 		kobject_put(&weights->kobj);
545 	else
546 		quotas->weights = weights;
547 	return err;
548 }
549 
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
551 {
552 	kobject_put(&quotas->weights->kobj);
553 }
554 
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
556 		char *buf)
557 {
558 	struct damon_sysfs_quotas *quotas = container_of(kobj,
559 			struct damon_sysfs_quotas, kobj);
560 
561 	return sysfs_emit(buf, "%lu\n", quotas->ms);
562 }
563 
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 		const char *buf, size_t count)
566 {
567 	struct damon_sysfs_quotas *quotas = container_of(kobj,
568 			struct damon_sysfs_quotas, kobj);
569 	int err = kstrtoul(buf, 0, &quotas->ms);
570 
571 	if (err)
572 		return -EINVAL;
573 	return count;
574 }
575 
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
577 		char *buf)
578 {
579 	struct damon_sysfs_quotas *quotas = container_of(kobj,
580 			struct damon_sysfs_quotas, kobj);
581 
582 	return sysfs_emit(buf, "%lu\n", quotas->sz);
583 }
584 
585 static ssize_t bytes_store(struct kobject *kobj,
586 		struct kobj_attribute *attr, const char *buf, size_t count)
587 {
588 	struct damon_sysfs_quotas *quotas = container_of(kobj,
589 			struct damon_sysfs_quotas, kobj);
590 	int err = kstrtoul(buf, 0, &quotas->sz);
591 
592 	if (err)
593 		return -EINVAL;
594 	return count;
595 }
596 
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 		struct kobj_attribute *attr, char *buf)
599 {
600 	struct damon_sysfs_quotas *quotas = container_of(kobj,
601 			struct damon_sysfs_quotas, kobj);
602 
603 	return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
604 }
605 
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 		struct kobj_attribute *attr, const char *buf, size_t count)
608 {
609 	struct damon_sysfs_quotas *quotas = container_of(kobj,
610 			struct damon_sysfs_quotas, kobj);
611 	int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
612 
613 	if (err)
614 		return -EINVAL;
615 	return count;
616 }
617 
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
619 {
620 	kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
621 }
622 
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 		__ATTR_RW_MODE(ms, 0600);
625 
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 		__ATTR_RW_MODE(bytes, 0600);
628 
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 		__ATTR_RW_MODE(reset_interval_ms, 0600);
631 
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 	&damon_sysfs_quotas_ms_attr.attr,
634 	&damon_sysfs_quotas_sz_attr.attr,
635 	&damon_sysfs_quotas_reset_interval_ms_attr.attr,
636 	NULL,
637 };
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
639 
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 	.release = damon_sysfs_quotas_release,
642 	.sysfs_ops = &kobj_sysfs_ops,
643 	.default_groups = damon_sysfs_quotas_groups,
644 };
645 
646 /*
647  * access_pattern directory
648  */
649 
650 struct damon_sysfs_access_pattern {
651 	struct kobject kobj;
652 	struct damon_sysfs_ul_range *sz;
653 	struct damon_sysfs_ul_range *nr_accesses;
654 	struct damon_sysfs_ul_range *age;
655 };
656 
657 static
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
659 {
660 	struct damon_sysfs_access_pattern *access_pattern =
661 		kmalloc(sizeof(*access_pattern), GFP_KERNEL);
662 
663 	if (!access_pattern)
664 		return NULL;
665 	access_pattern->kobj = (struct kobject){};
666 	return access_pattern;
667 }
668 
669 static int damon_sysfs_access_pattern_add_range_dir(
670 		struct damon_sysfs_access_pattern *access_pattern,
671 		struct damon_sysfs_ul_range **range_dir_ptr,
672 		char *name)
673 {
674 	struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
675 	int err;
676 
677 	if (!range)
678 		return -ENOMEM;
679 	err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 			&access_pattern->kobj, name);
681 	if (err)
682 		kobject_put(&range->kobj);
683 	else
684 		*range_dir_ptr = range;
685 	return err;
686 }
687 
688 static int damon_sysfs_access_pattern_add_dirs(
689 		struct damon_sysfs_access_pattern *access_pattern)
690 {
691 	int err;
692 
693 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 			&access_pattern->sz, "sz");
695 	if (err)
696 		goto put_sz_out;
697 
698 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 			&access_pattern->nr_accesses, "nr_accesses");
700 	if (err)
701 		goto put_nr_accesses_sz_out;
702 
703 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 			&access_pattern->age, "age");
705 	if (err)
706 		goto put_age_nr_accesses_sz_out;
707 	return 0;
708 
709 put_age_nr_accesses_sz_out:
710 	kobject_put(&access_pattern->age->kobj);
711 	access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 	kobject_put(&access_pattern->nr_accesses->kobj);
714 	access_pattern->nr_accesses = NULL;
715 put_sz_out:
716 	kobject_put(&access_pattern->sz->kobj);
717 	access_pattern->sz = NULL;
718 	return err;
719 }
720 
721 static void damon_sysfs_access_pattern_rm_dirs(
722 		struct damon_sysfs_access_pattern *access_pattern)
723 {
724 	kobject_put(&access_pattern->sz->kobj);
725 	kobject_put(&access_pattern->nr_accesses->kobj);
726 	kobject_put(&access_pattern->age->kobj);
727 }
728 
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
730 {
731 	kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
732 }
733 
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
735 	NULL,
736 };
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
738 
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 	.release = damon_sysfs_access_pattern_release,
741 	.sysfs_ops = &kobj_sysfs_ops,
742 	.default_groups = damon_sysfs_access_pattern_groups,
743 };
744 
745 /*
746  * scheme directory
747  */
748 
749 struct damon_sysfs_scheme {
750 	struct kobject kobj;
751 	enum damos_action action;
752 	struct damon_sysfs_access_pattern *access_pattern;
753 	struct damon_sysfs_quotas *quotas;
754 	struct damon_sysfs_watermarks *watermarks;
755 	struct damon_sysfs_stats *stats;
756 };
757 
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
760 	"willneed",
761 	"cold",
762 	"pageout",
763 	"hugepage",
764 	"nohugepage",
765 	"lru_prio",
766 	"stat",
767 };
768 
769 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
770 		enum damos_action action)
771 {
772 	struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
773 				GFP_KERNEL);
774 
775 	if (!scheme)
776 		return NULL;
777 	scheme->kobj = (struct kobject){};
778 	scheme->action = action;
779 	return scheme;
780 }
781 
782 static int damon_sysfs_scheme_set_access_pattern(
783 		struct damon_sysfs_scheme *scheme)
784 {
785 	struct damon_sysfs_access_pattern *access_pattern;
786 	int err;
787 
788 	access_pattern = damon_sysfs_access_pattern_alloc();
789 	if (!access_pattern)
790 		return -ENOMEM;
791 	err = kobject_init_and_add(&access_pattern->kobj,
792 			&damon_sysfs_access_pattern_ktype, &scheme->kobj,
793 			"access_pattern");
794 	if (err)
795 		goto out;
796 	err = damon_sysfs_access_pattern_add_dirs(access_pattern);
797 	if (err)
798 		goto out;
799 	scheme->access_pattern = access_pattern;
800 	return 0;
801 
802 out:
803 	kobject_put(&access_pattern->kobj);
804 	return err;
805 }
806 
807 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
808 {
809 	struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
810 	int err;
811 
812 	if (!quotas)
813 		return -ENOMEM;
814 	err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
815 			&scheme->kobj, "quotas");
816 	if (err)
817 		goto out;
818 	err = damon_sysfs_quotas_add_dirs(quotas);
819 	if (err)
820 		goto out;
821 	scheme->quotas = quotas;
822 	return 0;
823 
824 out:
825 	kobject_put(&quotas->kobj);
826 	return err;
827 }
828 
829 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
830 {
831 	struct damon_sysfs_watermarks *watermarks =
832 		damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
833 	int err;
834 
835 	if (!watermarks)
836 		return -ENOMEM;
837 	err = kobject_init_and_add(&watermarks->kobj,
838 			&damon_sysfs_watermarks_ktype, &scheme->kobj,
839 			"watermarks");
840 	if (err)
841 		kobject_put(&watermarks->kobj);
842 	else
843 		scheme->watermarks = watermarks;
844 	return err;
845 }
846 
847 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
848 {
849 	struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
850 	int err;
851 
852 	if (!stats)
853 		return -ENOMEM;
854 	err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
855 			&scheme->kobj, "stats");
856 	if (err)
857 		kobject_put(&stats->kobj);
858 	else
859 		scheme->stats = stats;
860 	return err;
861 }
862 
863 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
864 {
865 	int err;
866 
867 	err = damon_sysfs_scheme_set_access_pattern(scheme);
868 	if (err)
869 		return err;
870 	err = damon_sysfs_scheme_set_quotas(scheme);
871 	if (err)
872 		goto put_access_pattern_out;
873 	err = damon_sysfs_scheme_set_watermarks(scheme);
874 	if (err)
875 		goto put_quotas_access_pattern_out;
876 	err = damon_sysfs_scheme_set_stats(scheme);
877 	if (err)
878 		goto put_watermarks_quotas_access_pattern_out;
879 	return 0;
880 
881 put_watermarks_quotas_access_pattern_out:
882 	kobject_put(&scheme->watermarks->kobj);
883 	scheme->watermarks = NULL;
884 put_quotas_access_pattern_out:
885 	kobject_put(&scheme->quotas->kobj);
886 	scheme->quotas = NULL;
887 put_access_pattern_out:
888 	kobject_put(&scheme->access_pattern->kobj);
889 	scheme->access_pattern = NULL;
890 	return err;
891 }
892 
893 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
894 {
895 	damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
896 	kobject_put(&scheme->access_pattern->kobj);
897 	damon_sysfs_quotas_rm_dirs(scheme->quotas);
898 	kobject_put(&scheme->quotas->kobj);
899 	kobject_put(&scheme->watermarks->kobj);
900 	kobject_put(&scheme->stats->kobj);
901 }
902 
903 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
904 		char *buf)
905 {
906 	struct damon_sysfs_scheme *scheme = container_of(kobj,
907 			struct damon_sysfs_scheme, kobj);
908 
909 	return sysfs_emit(buf, "%s\n",
910 			damon_sysfs_damos_action_strs[scheme->action]);
911 }
912 
913 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
914 		const char *buf, size_t count)
915 {
916 	struct damon_sysfs_scheme *scheme = container_of(kobj,
917 			struct damon_sysfs_scheme, kobj);
918 	enum damos_action action;
919 
920 	for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
921 		if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
922 			scheme->action = action;
923 			return count;
924 		}
925 	}
926 	return -EINVAL;
927 }
928 
929 static void damon_sysfs_scheme_release(struct kobject *kobj)
930 {
931 	kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
932 }
933 
934 static struct kobj_attribute damon_sysfs_scheme_action_attr =
935 		__ATTR_RW_MODE(action, 0600);
936 
937 static struct attribute *damon_sysfs_scheme_attrs[] = {
938 	&damon_sysfs_scheme_action_attr.attr,
939 	NULL,
940 };
941 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
942 
943 static struct kobj_type damon_sysfs_scheme_ktype = {
944 	.release = damon_sysfs_scheme_release,
945 	.sysfs_ops = &kobj_sysfs_ops,
946 	.default_groups = damon_sysfs_scheme_groups,
947 };
948 
949 /*
950  * schemes directory
951  */
952 
953 struct damon_sysfs_schemes {
954 	struct kobject kobj;
955 	struct damon_sysfs_scheme **schemes_arr;
956 	int nr;
957 };
958 
959 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
960 {
961 	return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
962 }
963 
964 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
965 {
966 	struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
967 	int i;
968 
969 	for (i = 0; i < schemes->nr; i++) {
970 		damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
971 		kobject_put(&schemes_arr[i]->kobj);
972 	}
973 	schemes->nr = 0;
974 	kfree(schemes_arr);
975 	schemes->schemes_arr = NULL;
976 }
977 
978 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
979 		int nr_schemes)
980 {
981 	struct damon_sysfs_scheme **schemes_arr, *scheme;
982 	int err, i;
983 
984 	damon_sysfs_schemes_rm_dirs(schemes);
985 	if (!nr_schemes)
986 		return 0;
987 
988 	schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
989 			GFP_KERNEL | __GFP_NOWARN);
990 	if (!schemes_arr)
991 		return -ENOMEM;
992 	schemes->schemes_arr = schemes_arr;
993 
994 	for (i = 0; i < nr_schemes; i++) {
995 		scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
996 		if (!scheme) {
997 			damon_sysfs_schemes_rm_dirs(schemes);
998 			return -ENOMEM;
999 		}
1000 
1001 		err = kobject_init_and_add(&scheme->kobj,
1002 				&damon_sysfs_scheme_ktype, &schemes->kobj,
1003 				"%d", i);
1004 		if (err)
1005 			goto out;
1006 		err = damon_sysfs_scheme_add_dirs(scheme);
1007 		if (err)
1008 			goto out;
1009 
1010 		schemes_arr[i] = scheme;
1011 		schemes->nr++;
1012 	}
1013 	return 0;
1014 
1015 out:
1016 	damon_sysfs_schemes_rm_dirs(schemes);
1017 	kobject_put(&scheme->kobj);
1018 	return err;
1019 }
1020 
1021 static ssize_t nr_schemes_show(struct kobject *kobj,
1022 		struct kobj_attribute *attr, char *buf)
1023 {
1024 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1025 			struct damon_sysfs_schemes, kobj);
1026 
1027 	return sysfs_emit(buf, "%d\n", schemes->nr);
1028 }
1029 
1030 static ssize_t nr_schemes_store(struct kobject *kobj,
1031 		struct kobj_attribute *attr, const char *buf, size_t count)
1032 {
1033 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1034 			struct damon_sysfs_schemes, kobj);
1035 	int nr, err = kstrtoint(buf, 0, &nr);
1036 
1037 	if (err)
1038 		return err;
1039 	if (nr < 0)
1040 		return -EINVAL;
1041 
1042 	if (!mutex_trylock(&damon_sysfs_lock))
1043 		return -EBUSY;
1044 	err = damon_sysfs_schemes_add_dirs(schemes, nr);
1045 	mutex_unlock(&damon_sysfs_lock);
1046 	if (err)
1047 		return err;
1048 	return count;
1049 }
1050 
1051 static void damon_sysfs_schemes_release(struct kobject *kobj)
1052 {
1053 	kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1054 }
1055 
1056 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1057 		__ATTR_RW_MODE(nr_schemes, 0600);
1058 
1059 static struct attribute *damon_sysfs_schemes_attrs[] = {
1060 	&damon_sysfs_schemes_nr_attr.attr,
1061 	NULL,
1062 };
1063 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1064 
1065 static struct kobj_type damon_sysfs_schemes_ktype = {
1066 	.release = damon_sysfs_schemes_release,
1067 	.sysfs_ops = &kobj_sysfs_ops,
1068 	.default_groups = damon_sysfs_schemes_groups,
1069 };
1070 
1071 /*
1072  * init region directory
1073  */
1074 
1075 struct damon_sysfs_region {
1076 	struct kobject kobj;
1077 	unsigned long start;
1078 	unsigned long end;
1079 };
1080 
1081 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1082 		unsigned long start,
1083 		unsigned long end)
1084 {
1085 	struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1086 			GFP_KERNEL);
1087 
1088 	if (!region)
1089 		return NULL;
1090 	region->kobj = (struct kobject){};
1091 	region->start = start;
1092 	region->end = end;
1093 	return region;
1094 }
1095 
1096 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1097 		char *buf)
1098 {
1099 	struct damon_sysfs_region *region = container_of(kobj,
1100 			struct damon_sysfs_region, kobj);
1101 
1102 	return sysfs_emit(buf, "%lu\n", region->start);
1103 }
1104 
1105 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1106 		const char *buf, size_t count)
1107 {
1108 	struct damon_sysfs_region *region = container_of(kobj,
1109 			struct damon_sysfs_region, kobj);
1110 	int err = kstrtoul(buf, 0, &region->start);
1111 
1112 	if (err)
1113 		return -EINVAL;
1114 	return count;
1115 }
1116 
1117 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1118 		char *buf)
1119 {
1120 	struct damon_sysfs_region *region = container_of(kobj,
1121 			struct damon_sysfs_region, kobj);
1122 
1123 	return sysfs_emit(buf, "%lu\n", region->end);
1124 }
1125 
1126 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1127 		const char *buf, size_t count)
1128 {
1129 	struct damon_sysfs_region *region = container_of(kobj,
1130 			struct damon_sysfs_region, kobj);
1131 	int err = kstrtoul(buf, 0, &region->end);
1132 
1133 	if (err)
1134 		return -EINVAL;
1135 	return count;
1136 }
1137 
1138 static void damon_sysfs_region_release(struct kobject *kobj)
1139 {
1140 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1141 }
1142 
1143 static struct kobj_attribute damon_sysfs_region_start_attr =
1144 		__ATTR_RW_MODE(start, 0600);
1145 
1146 static struct kobj_attribute damon_sysfs_region_end_attr =
1147 		__ATTR_RW_MODE(end, 0600);
1148 
1149 static struct attribute *damon_sysfs_region_attrs[] = {
1150 	&damon_sysfs_region_start_attr.attr,
1151 	&damon_sysfs_region_end_attr.attr,
1152 	NULL,
1153 };
1154 ATTRIBUTE_GROUPS(damon_sysfs_region);
1155 
1156 static struct kobj_type damon_sysfs_region_ktype = {
1157 	.release = damon_sysfs_region_release,
1158 	.sysfs_ops = &kobj_sysfs_ops,
1159 	.default_groups = damon_sysfs_region_groups,
1160 };
1161 
1162 /*
1163  * init_regions directory
1164  */
1165 
1166 struct damon_sysfs_regions {
1167 	struct kobject kobj;
1168 	struct damon_sysfs_region **regions_arr;
1169 	int nr;
1170 };
1171 
1172 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1173 {
1174 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1175 }
1176 
1177 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1178 {
1179 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
1180 	int i;
1181 
1182 	for (i = 0; i < regions->nr; i++)
1183 		kobject_put(&regions_arr[i]->kobj);
1184 	regions->nr = 0;
1185 	kfree(regions_arr);
1186 	regions->regions_arr = NULL;
1187 }
1188 
1189 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1190 		int nr_regions)
1191 {
1192 	struct damon_sysfs_region **regions_arr, *region;
1193 	int err, i;
1194 
1195 	damon_sysfs_regions_rm_dirs(regions);
1196 	if (!nr_regions)
1197 		return 0;
1198 
1199 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1200 			GFP_KERNEL | __GFP_NOWARN);
1201 	if (!regions_arr)
1202 		return -ENOMEM;
1203 	regions->regions_arr = regions_arr;
1204 
1205 	for (i = 0; i < nr_regions; i++) {
1206 		region = damon_sysfs_region_alloc(0, 0);
1207 		if (!region) {
1208 			damon_sysfs_regions_rm_dirs(regions);
1209 			return -ENOMEM;
1210 		}
1211 
1212 		err = kobject_init_and_add(&region->kobj,
1213 				&damon_sysfs_region_ktype, &regions->kobj,
1214 				"%d", i);
1215 		if (err) {
1216 			kobject_put(&region->kobj);
1217 			damon_sysfs_regions_rm_dirs(regions);
1218 			return err;
1219 		}
1220 
1221 		regions_arr[i] = region;
1222 		regions->nr++;
1223 	}
1224 	return 0;
1225 }
1226 
1227 static ssize_t nr_regions_show(struct kobject *kobj,
1228 		struct kobj_attribute *attr, char *buf)
1229 {
1230 	struct damon_sysfs_regions *regions = container_of(kobj,
1231 			struct damon_sysfs_regions, kobj);
1232 
1233 	return sysfs_emit(buf, "%d\n", regions->nr);
1234 }
1235 
1236 static ssize_t nr_regions_store(struct kobject *kobj,
1237 		struct kobj_attribute *attr, const char *buf, size_t count)
1238 {
1239 	struct damon_sysfs_regions *regions = container_of(kobj,
1240 			struct damon_sysfs_regions, kobj);
1241 	int nr, err = kstrtoint(buf, 0, &nr);
1242 
1243 	if (err)
1244 		return err;
1245 	if (nr < 0)
1246 		return -EINVAL;
1247 
1248 	if (!mutex_trylock(&damon_sysfs_lock))
1249 		return -EBUSY;
1250 	err = damon_sysfs_regions_add_dirs(regions, nr);
1251 	mutex_unlock(&damon_sysfs_lock);
1252 	if (err)
1253 		return err;
1254 
1255 	return count;
1256 }
1257 
1258 static void damon_sysfs_regions_release(struct kobject *kobj)
1259 {
1260 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1261 }
1262 
1263 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1264 		__ATTR_RW_MODE(nr_regions, 0600);
1265 
1266 static struct attribute *damon_sysfs_regions_attrs[] = {
1267 	&damon_sysfs_regions_nr_attr.attr,
1268 	NULL,
1269 };
1270 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1271 
1272 static struct kobj_type damon_sysfs_regions_ktype = {
1273 	.release = damon_sysfs_regions_release,
1274 	.sysfs_ops = &kobj_sysfs_ops,
1275 	.default_groups = damon_sysfs_regions_groups,
1276 };
1277 
1278 /*
1279  * target directory
1280  */
1281 
1282 struct damon_sysfs_target {
1283 	struct kobject kobj;
1284 	struct damon_sysfs_regions *regions;
1285 	int pid;
1286 };
1287 
1288 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1289 {
1290 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1291 }
1292 
1293 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1294 {
1295 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1296 	int err;
1297 
1298 	if (!regions)
1299 		return -ENOMEM;
1300 
1301 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1302 			&target->kobj, "regions");
1303 	if (err)
1304 		kobject_put(&regions->kobj);
1305 	else
1306 		target->regions = regions;
1307 	return err;
1308 }
1309 
1310 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1311 {
1312 	damon_sysfs_regions_rm_dirs(target->regions);
1313 	kobject_put(&target->regions->kobj);
1314 }
1315 
1316 static ssize_t pid_target_show(struct kobject *kobj,
1317 		struct kobj_attribute *attr, char *buf)
1318 {
1319 	struct damon_sysfs_target *target = container_of(kobj,
1320 			struct damon_sysfs_target, kobj);
1321 
1322 	return sysfs_emit(buf, "%d\n", target->pid);
1323 }
1324 
1325 static ssize_t pid_target_store(struct kobject *kobj,
1326 		struct kobj_attribute *attr, const char *buf, size_t count)
1327 {
1328 	struct damon_sysfs_target *target = container_of(kobj,
1329 			struct damon_sysfs_target, kobj);
1330 	int err = kstrtoint(buf, 0, &target->pid);
1331 
1332 	if (err)
1333 		return -EINVAL;
1334 	return count;
1335 }
1336 
1337 static void damon_sysfs_target_release(struct kobject *kobj)
1338 {
1339 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1340 }
1341 
1342 static struct kobj_attribute damon_sysfs_target_pid_attr =
1343 		__ATTR_RW_MODE(pid_target, 0600);
1344 
1345 static struct attribute *damon_sysfs_target_attrs[] = {
1346 	&damon_sysfs_target_pid_attr.attr,
1347 	NULL,
1348 };
1349 ATTRIBUTE_GROUPS(damon_sysfs_target);
1350 
1351 static struct kobj_type damon_sysfs_target_ktype = {
1352 	.release = damon_sysfs_target_release,
1353 	.sysfs_ops = &kobj_sysfs_ops,
1354 	.default_groups = damon_sysfs_target_groups,
1355 };
1356 
1357 /*
1358  * targets directory
1359  */
1360 
1361 struct damon_sysfs_targets {
1362 	struct kobject kobj;
1363 	struct damon_sysfs_target **targets_arr;
1364 	int nr;
1365 };
1366 
1367 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1368 {
1369 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1370 }
1371 
1372 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1373 {
1374 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
1375 	int i;
1376 
1377 	for (i = 0; i < targets->nr; i++) {
1378 		damon_sysfs_target_rm_dirs(targets_arr[i]);
1379 		kobject_put(&targets_arr[i]->kobj);
1380 	}
1381 	targets->nr = 0;
1382 	kfree(targets_arr);
1383 	targets->targets_arr = NULL;
1384 }
1385 
1386 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1387 		int nr_targets)
1388 {
1389 	struct damon_sysfs_target **targets_arr, *target;
1390 	int err, i;
1391 
1392 	damon_sysfs_targets_rm_dirs(targets);
1393 	if (!nr_targets)
1394 		return 0;
1395 
1396 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1397 			GFP_KERNEL | __GFP_NOWARN);
1398 	if (!targets_arr)
1399 		return -ENOMEM;
1400 	targets->targets_arr = targets_arr;
1401 
1402 	for (i = 0; i < nr_targets; i++) {
1403 		target = damon_sysfs_target_alloc();
1404 		if (!target) {
1405 			damon_sysfs_targets_rm_dirs(targets);
1406 			return -ENOMEM;
1407 		}
1408 
1409 		err = kobject_init_and_add(&target->kobj,
1410 				&damon_sysfs_target_ktype, &targets->kobj,
1411 				"%d", i);
1412 		if (err)
1413 			goto out;
1414 
1415 		err = damon_sysfs_target_add_dirs(target);
1416 		if (err)
1417 			goto out;
1418 
1419 		targets_arr[i] = target;
1420 		targets->nr++;
1421 	}
1422 	return 0;
1423 
1424 out:
1425 	damon_sysfs_targets_rm_dirs(targets);
1426 	kobject_put(&target->kobj);
1427 	return err;
1428 }
1429 
1430 static ssize_t nr_targets_show(struct kobject *kobj,
1431 		struct kobj_attribute *attr, char *buf)
1432 {
1433 	struct damon_sysfs_targets *targets = container_of(kobj,
1434 			struct damon_sysfs_targets, kobj);
1435 
1436 	return sysfs_emit(buf, "%d\n", targets->nr);
1437 }
1438 
1439 static ssize_t nr_targets_store(struct kobject *kobj,
1440 		struct kobj_attribute *attr, const char *buf, size_t count)
1441 {
1442 	struct damon_sysfs_targets *targets = container_of(kobj,
1443 			struct damon_sysfs_targets, kobj);
1444 	int nr, err = kstrtoint(buf, 0, &nr);
1445 
1446 	if (err)
1447 		return err;
1448 	if (nr < 0)
1449 		return -EINVAL;
1450 
1451 	if (!mutex_trylock(&damon_sysfs_lock))
1452 		return -EBUSY;
1453 	err = damon_sysfs_targets_add_dirs(targets, nr);
1454 	mutex_unlock(&damon_sysfs_lock);
1455 	if (err)
1456 		return err;
1457 
1458 	return count;
1459 }
1460 
1461 static void damon_sysfs_targets_release(struct kobject *kobj)
1462 {
1463 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1464 }
1465 
1466 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1467 		__ATTR_RW_MODE(nr_targets, 0600);
1468 
1469 static struct attribute *damon_sysfs_targets_attrs[] = {
1470 	&damon_sysfs_targets_nr_attr.attr,
1471 	NULL,
1472 };
1473 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1474 
1475 static struct kobj_type damon_sysfs_targets_ktype = {
1476 	.release = damon_sysfs_targets_release,
1477 	.sysfs_ops = &kobj_sysfs_ops,
1478 	.default_groups = damon_sysfs_targets_groups,
1479 };
1480 
1481 /*
1482  * intervals directory
1483  */
1484 
1485 struct damon_sysfs_intervals {
1486 	struct kobject kobj;
1487 	unsigned long sample_us;
1488 	unsigned long aggr_us;
1489 	unsigned long update_us;
1490 };
1491 
1492 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1493 		unsigned long sample_us, unsigned long aggr_us,
1494 		unsigned long update_us)
1495 {
1496 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1497 			GFP_KERNEL);
1498 
1499 	if (!intervals)
1500 		return NULL;
1501 
1502 	intervals->kobj = (struct kobject){};
1503 	intervals->sample_us = sample_us;
1504 	intervals->aggr_us = aggr_us;
1505 	intervals->update_us = update_us;
1506 	return intervals;
1507 }
1508 
1509 static ssize_t sample_us_show(struct kobject *kobj,
1510 		struct kobj_attribute *attr, char *buf)
1511 {
1512 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1513 			struct damon_sysfs_intervals, kobj);
1514 
1515 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1516 }
1517 
1518 static ssize_t sample_us_store(struct kobject *kobj,
1519 		struct kobj_attribute *attr, const char *buf, size_t count)
1520 {
1521 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1522 			struct damon_sysfs_intervals, kobj);
1523 	unsigned long us;
1524 	int err = kstrtoul(buf, 0, &us);
1525 
1526 	if (err)
1527 		return -EINVAL;
1528 
1529 	intervals->sample_us = us;
1530 	return count;
1531 }
1532 
1533 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1534 		char *buf)
1535 {
1536 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1537 			struct damon_sysfs_intervals, kobj);
1538 
1539 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1540 }
1541 
1542 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1543 		const char *buf, size_t count)
1544 {
1545 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1546 			struct damon_sysfs_intervals, kobj);
1547 	unsigned long us;
1548 	int err = kstrtoul(buf, 0, &us);
1549 
1550 	if (err)
1551 		return -EINVAL;
1552 
1553 	intervals->aggr_us = us;
1554 	return count;
1555 }
1556 
1557 static ssize_t update_us_show(struct kobject *kobj,
1558 		struct kobj_attribute *attr, char *buf)
1559 {
1560 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1561 			struct damon_sysfs_intervals, kobj);
1562 
1563 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
1564 }
1565 
1566 static ssize_t update_us_store(struct kobject *kobj,
1567 		struct kobj_attribute *attr, const char *buf, size_t count)
1568 {
1569 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1570 			struct damon_sysfs_intervals, kobj);
1571 	unsigned long us;
1572 	int err = kstrtoul(buf, 0, &us);
1573 
1574 	if (err)
1575 		return -EINVAL;
1576 
1577 	intervals->update_us = us;
1578 	return count;
1579 }
1580 
1581 static void damon_sysfs_intervals_release(struct kobject *kobj)
1582 {
1583 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1584 }
1585 
1586 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1587 		__ATTR_RW_MODE(sample_us, 0600);
1588 
1589 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1590 		__ATTR_RW_MODE(aggr_us, 0600);
1591 
1592 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1593 		__ATTR_RW_MODE(update_us, 0600);
1594 
1595 static struct attribute *damon_sysfs_intervals_attrs[] = {
1596 	&damon_sysfs_intervals_sample_us_attr.attr,
1597 	&damon_sysfs_intervals_aggr_us_attr.attr,
1598 	&damon_sysfs_intervals_update_us_attr.attr,
1599 	NULL,
1600 };
1601 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1602 
1603 static struct kobj_type damon_sysfs_intervals_ktype = {
1604 	.release = damon_sysfs_intervals_release,
1605 	.sysfs_ops = &kobj_sysfs_ops,
1606 	.default_groups = damon_sysfs_intervals_groups,
1607 };
1608 
1609 /*
1610  * monitoring_attrs directory
1611  */
1612 
1613 struct damon_sysfs_attrs {
1614 	struct kobject kobj;
1615 	struct damon_sysfs_intervals *intervals;
1616 	struct damon_sysfs_ul_range *nr_regions_range;
1617 };
1618 
1619 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1620 {
1621 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1622 
1623 	if (!attrs)
1624 		return NULL;
1625 	attrs->kobj = (struct kobject){};
1626 	return attrs;
1627 }
1628 
1629 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1630 {
1631 	struct damon_sysfs_intervals *intervals;
1632 	struct damon_sysfs_ul_range *nr_regions_range;
1633 	int err;
1634 
1635 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1636 	if (!intervals)
1637 		return -ENOMEM;
1638 
1639 	err = kobject_init_and_add(&intervals->kobj,
1640 			&damon_sysfs_intervals_ktype, &attrs->kobj,
1641 			"intervals");
1642 	if (err)
1643 		goto put_intervals_out;
1644 	attrs->intervals = intervals;
1645 
1646 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1647 	if (!nr_regions_range) {
1648 		err = -ENOMEM;
1649 		goto put_intervals_out;
1650 	}
1651 
1652 	err = kobject_init_and_add(&nr_regions_range->kobj,
1653 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
1654 			"nr_regions");
1655 	if (err)
1656 		goto put_nr_regions_intervals_out;
1657 	attrs->nr_regions_range = nr_regions_range;
1658 	return 0;
1659 
1660 put_nr_regions_intervals_out:
1661 	kobject_put(&nr_regions_range->kobj);
1662 	attrs->nr_regions_range = NULL;
1663 put_intervals_out:
1664 	kobject_put(&intervals->kobj);
1665 	attrs->intervals = NULL;
1666 	return err;
1667 }
1668 
1669 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1670 {
1671 	kobject_put(&attrs->nr_regions_range->kobj);
1672 	kobject_put(&attrs->intervals->kobj);
1673 }
1674 
1675 static void damon_sysfs_attrs_release(struct kobject *kobj)
1676 {
1677 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1678 }
1679 
1680 static struct attribute *damon_sysfs_attrs_attrs[] = {
1681 	NULL,
1682 };
1683 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1684 
1685 static struct kobj_type damon_sysfs_attrs_ktype = {
1686 	.release = damon_sysfs_attrs_release,
1687 	.sysfs_ops = &kobj_sysfs_ops,
1688 	.default_groups = damon_sysfs_attrs_groups,
1689 };
1690 
1691 /*
1692  * context directory
1693  */
1694 
1695 /* This should match with enum damon_ops_id */
1696 static const char * const damon_sysfs_ops_strs[] = {
1697 	"vaddr",
1698 	"fvaddr",
1699 	"paddr",
1700 };
1701 
1702 struct damon_sysfs_context {
1703 	struct kobject kobj;
1704 	enum damon_ops_id ops_id;
1705 	struct damon_sysfs_attrs *attrs;
1706 	struct damon_sysfs_targets *targets;
1707 	struct damon_sysfs_schemes *schemes;
1708 };
1709 
1710 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1711 		enum damon_ops_id ops_id)
1712 {
1713 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1714 				GFP_KERNEL);
1715 
1716 	if (!context)
1717 		return NULL;
1718 	context->kobj = (struct kobject){};
1719 	context->ops_id = ops_id;
1720 	return context;
1721 }
1722 
1723 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1724 {
1725 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1726 	int err;
1727 
1728 	if (!attrs)
1729 		return -ENOMEM;
1730 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1731 			&context->kobj, "monitoring_attrs");
1732 	if (err)
1733 		goto out;
1734 	err = damon_sysfs_attrs_add_dirs(attrs);
1735 	if (err)
1736 		goto out;
1737 	context->attrs = attrs;
1738 	return 0;
1739 
1740 out:
1741 	kobject_put(&attrs->kobj);
1742 	return err;
1743 }
1744 
1745 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1746 {
1747 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1748 	int err;
1749 
1750 	if (!targets)
1751 		return -ENOMEM;
1752 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1753 			&context->kobj, "targets");
1754 	if (err) {
1755 		kobject_put(&targets->kobj);
1756 		return err;
1757 	}
1758 	context->targets = targets;
1759 	return 0;
1760 }
1761 
1762 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1763 {
1764 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1765 	int err;
1766 
1767 	if (!schemes)
1768 		return -ENOMEM;
1769 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1770 			&context->kobj, "schemes");
1771 	if (err) {
1772 		kobject_put(&schemes->kobj);
1773 		return err;
1774 	}
1775 	context->schemes = schemes;
1776 	return 0;
1777 }
1778 
1779 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1780 {
1781 	int err;
1782 
1783 	err = damon_sysfs_context_set_attrs(context);
1784 	if (err)
1785 		return err;
1786 
1787 	err = damon_sysfs_context_set_targets(context);
1788 	if (err)
1789 		goto put_attrs_out;
1790 
1791 	err = damon_sysfs_context_set_schemes(context);
1792 	if (err)
1793 		goto put_targets_attrs_out;
1794 	return 0;
1795 
1796 put_targets_attrs_out:
1797 	kobject_put(&context->targets->kobj);
1798 	context->targets = NULL;
1799 put_attrs_out:
1800 	kobject_put(&context->attrs->kobj);
1801 	context->attrs = NULL;
1802 	return err;
1803 }
1804 
1805 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1806 {
1807 	damon_sysfs_attrs_rm_dirs(context->attrs);
1808 	kobject_put(&context->attrs->kobj);
1809 	damon_sysfs_targets_rm_dirs(context->targets);
1810 	kobject_put(&context->targets->kobj);
1811 	damon_sysfs_schemes_rm_dirs(context->schemes);
1812 	kobject_put(&context->schemes->kobj);
1813 }
1814 
1815 static ssize_t avail_operations_show(struct kobject *kobj,
1816 		struct kobj_attribute *attr, char *buf)
1817 {
1818 	enum damon_ops_id id;
1819 	int len = 0;
1820 
1821 	for (id = 0; id < NR_DAMON_OPS; id++) {
1822 		if (!damon_is_registered_ops(id))
1823 			continue;
1824 		len += sysfs_emit_at(buf, len, "%s\n",
1825 				damon_sysfs_ops_strs[id]);
1826 	}
1827 	return len;
1828 }
1829 
1830 static ssize_t operations_show(struct kobject *kobj,
1831 		struct kobj_attribute *attr, char *buf)
1832 {
1833 	struct damon_sysfs_context *context = container_of(kobj,
1834 			struct damon_sysfs_context, kobj);
1835 
1836 	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1837 }
1838 
1839 static ssize_t operations_store(struct kobject *kobj,
1840 		struct kobj_attribute *attr, const char *buf, size_t count)
1841 {
1842 	struct damon_sysfs_context *context = container_of(kobj,
1843 			struct damon_sysfs_context, kobj);
1844 	enum damon_ops_id id;
1845 
1846 	for (id = 0; id < NR_DAMON_OPS; id++) {
1847 		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1848 			context->ops_id = id;
1849 			return count;
1850 		}
1851 	}
1852 	return -EINVAL;
1853 }
1854 
1855 static void damon_sysfs_context_release(struct kobject *kobj)
1856 {
1857 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1858 }
1859 
1860 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1861 		__ATTR_RO_MODE(avail_operations, 0400);
1862 
1863 static struct kobj_attribute damon_sysfs_context_operations_attr =
1864 		__ATTR_RW_MODE(operations, 0600);
1865 
1866 static struct attribute *damon_sysfs_context_attrs[] = {
1867 	&damon_sysfs_context_avail_operations_attr.attr,
1868 	&damon_sysfs_context_operations_attr.attr,
1869 	NULL,
1870 };
1871 ATTRIBUTE_GROUPS(damon_sysfs_context);
1872 
1873 static struct kobj_type damon_sysfs_context_ktype = {
1874 	.release = damon_sysfs_context_release,
1875 	.sysfs_ops = &kobj_sysfs_ops,
1876 	.default_groups = damon_sysfs_context_groups,
1877 };
1878 
1879 /*
1880  * contexts directory
1881  */
1882 
1883 struct damon_sysfs_contexts {
1884 	struct kobject kobj;
1885 	struct damon_sysfs_context **contexts_arr;
1886 	int nr;
1887 };
1888 
1889 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1890 {
1891 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1892 }
1893 
1894 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1895 {
1896 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1897 	int i;
1898 
1899 	for (i = 0; i < contexts->nr; i++) {
1900 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1901 		kobject_put(&contexts_arr[i]->kobj);
1902 	}
1903 	contexts->nr = 0;
1904 	kfree(contexts_arr);
1905 	contexts->contexts_arr = NULL;
1906 }
1907 
1908 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1909 		int nr_contexts)
1910 {
1911 	struct damon_sysfs_context **contexts_arr, *context;
1912 	int err, i;
1913 
1914 	damon_sysfs_contexts_rm_dirs(contexts);
1915 	if (!nr_contexts)
1916 		return 0;
1917 
1918 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1919 			GFP_KERNEL | __GFP_NOWARN);
1920 	if (!contexts_arr)
1921 		return -ENOMEM;
1922 	contexts->contexts_arr = contexts_arr;
1923 
1924 	for (i = 0; i < nr_contexts; i++) {
1925 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1926 		if (!context) {
1927 			damon_sysfs_contexts_rm_dirs(contexts);
1928 			return -ENOMEM;
1929 		}
1930 
1931 		err = kobject_init_and_add(&context->kobj,
1932 				&damon_sysfs_context_ktype, &contexts->kobj,
1933 				"%d", i);
1934 		if (err)
1935 			goto out;
1936 
1937 		err = damon_sysfs_context_add_dirs(context);
1938 		if (err)
1939 			goto out;
1940 
1941 		contexts_arr[i] = context;
1942 		contexts->nr++;
1943 	}
1944 	return 0;
1945 
1946 out:
1947 	damon_sysfs_contexts_rm_dirs(contexts);
1948 	kobject_put(&context->kobj);
1949 	return err;
1950 }
1951 
1952 static ssize_t nr_contexts_show(struct kobject *kobj,
1953 		struct kobj_attribute *attr, char *buf)
1954 {
1955 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1956 			struct damon_sysfs_contexts, kobj);
1957 
1958 	return sysfs_emit(buf, "%d\n", contexts->nr);
1959 }
1960 
1961 static ssize_t nr_contexts_store(struct kobject *kobj,
1962 		struct kobj_attribute *attr, const char *buf, size_t count)
1963 {
1964 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1965 			struct damon_sysfs_contexts, kobj);
1966 	int nr, err;
1967 
1968 	err = kstrtoint(buf, 0, &nr);
1969 	if (err)
1970 		return err;
1971 	/* TODO: support multiple contexts per kdamond */
1972 	if (nr < 0 || 1 < nr)
1973 		return -EINVAL;
1974 
1975 	if (!mutex_trylock(&damon_sysfs_lock))
1976 		return -EBUSY;
1977 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1978 	mutex_unlock(&damon_sysfs_lock);
1979 	if (err)
1980 		return err;
1981 
1982 	return count;
1983 }
1984 
1985 static void damon_sysfs_contexts_release(struct kobject *kobj)
1986 {
1987 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1988 }
1989 
1990 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1991 		= __ATTR_RW_MODE(nr_contexts, 0600);
1992 
1993 static struct attribute *damon_sysfs_contexts_attrs[] = {
1994 	&damon_sysfs_contexts_nr_attr.attr,
1995 	NULL,
1996 };
1997 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1998 
1999 static struct kobj_type damon_sysfs_contexts_ktype = {
2000 	.release = damon_sysfs_contexts_release,
2001 	.sysfs_ops = &kobj_sysfs_ops,
2002 	.default_groups = damon_sysfs_contexts_groups,
2003 };
2004 
2005 /*
2006  * kdamond directory
2007  */
2008 
2009 struct damon_sysfs_kdamond {
2010 	struct kobject kobj;
2011 	struct damon_sysfs_contexts *contexts;
2012 	struct damon_ctx *damon_ctx;
2013 };
2014 
2015 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2016 {
2017 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2018 }
2019 
2020 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2021 {
2022 	struct damon_sysfs_contexts *contexts;
2023 	int err;
2024 
2025 	contexts = damon_sysfs_contexts_alloc();
2026 	if (!contexts)
2027 		return -ENOMEM;
2028 
2029 	err = kobject_init_and_add(&contexts->kobj,
2030 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
2031 			"contexts");
2032 	if (err) {
2033 		kobject_put(&contexts->kobj);
2034 		return err;
2035 	}
2036 	kdamond->contexts = contexts;
2037 
2038 	return err;
2039 }
2040 
2041 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2042 {
2043 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2044 	kobject_put(&kdamond->contexts->kobj);
2045 }
2046 
2047 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2048 {
2049 	bool running;
2050 
2051 	mutex_lock(&ctx->kdamond_lock);
2052 	running = ctx->kdamond != NULL;
2053 	mutex_unlock(&ctx->kdamond_lock);
2054 	return running;
2055 }
2056 
2057 /*
2058  * enum damon_sysfs_cmd - Commands for a specific kdamond.
2059  */
2060 enum damon_sysfs_cmd {
2061 	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2062 	DAMON_SYSFS_CMD_ON,
2063 	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2064 	DAMON_SYSFS_CMD_OFF,
2065 	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2066 	DAMON_SYSFS_CMD_COMMIT,
2067 	/*
2068 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2069 	 * files.
2070 	 */
2071 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2072 	/*
2073 	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2074 	 */
2075 	NR_DAMON_SYSFS_CMDS,
2076 };
2077 
2078 /* Should match with enum damon_sysfs_cmd */
2079 static const char * const damon_sysfs_cmd_strs[] = {
2080 	"on",
2081 	"off",
2082 	"commit",
2083 	"update_schemes_stats",
2084 };
2085 
2086 /*
2087  * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2088  * @cmd:	The command that needs to be handled by the callback.
2089  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2090  *
2091  * This structure represents a sysfs command request that need to access some
2092  * DAMON context-internal data.  Because DAMON context-internal data can be
2093  * safely accessed from DAMON callbacks without additional synchronization, the
2094  * request will be handled by the DAMON callback.  None-``NULL`` @kdamond means
2095  * the request is valid.
2096  */
2097 struct damon_sysfs_cmd_request {
2098 	enum damon_sysfs_cmd cmd;
2099 	struct damon_sysfs_kdamond *kdamond;
2100 };
2101 
2102 /* Current DAMON callback request.  Protected by damon_sysfs_lock. */
2103 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2104 
2105 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2106 		char *buf)
2107 {
2108 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2109 			struct damon_sysfs_kdamond, kobj);
2110 	struct damon_ctx *ctx = kdamond->damon_ctx;
2111 	bool running;
2112 
2113 	if (!ctx)
2114 		running = false;
2115 	else
2116 		running = damon_sysfs_ctx_running(ctx);
2117 
2118 	return sysfs_emit(buf, "%s\n", running ?
2119 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2120 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2121 }
2122 
2123 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2124 		struct damon_sysfs_attrs *sys_attrs)
2125 {
2126 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2127 	struct damon_sysfs_ul_range *sys_nr_regions =
2128 		sys_attrs->nr_regions_range;
2129 
2130 	return damon_set_attrs(ctx, sys_intervals->sample_us,
2131 			sys_intervals->aggr_us, sys_intervals->update_us,
2132 			sys_nr_regions->min, sys_nr_regions->max);
2133 }
2134 
2135 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2136 {
2137 	struct damon_target *t, *next;
2138 
2139 	damon_for_each_target_safe(t, next, ctx) {
2140 		if (damon_target_has_pid(ctx))
2141 			put_pid(t->pid);
2142 		damon_destroy_target(t);
2143 	}
2144 }
2145 
2146 static int damon_sysfs_set_regions(struct damon_target *t,
2147 		struct damon_sysfs_regions *sysfs_regions)
2148 {
2149 	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2150 			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2151 	int i, err = -EINVAL;
2152 
2153 	if (!ranges)
2154 		return -ENOMEM;
2155 	for (i = 0; i < sysfs_regions->nr; i++) {
2156 		struct damon_sysfs_region *sys_region =
2157 			sysfs_regions->regions_arr[i];
2158 
2159 		if (sys_region->start > sys_region->end)
2160 			goto out;
2161 
2162 		ranges[i].start = sys_region->start;
2163 		ranges[i].end = sys_region->end;
2164 		if (i == 0)
2165 			continue;
2166 		if (ranges[i - 1].end > ranges[i].start)
2167 			goto out;
2168 	}
2169 	err = damon_set_regions(t, ranges, sysfs_regions->nr);
2170 out:
2171 	kfree(ranges);
2172 	return err;
2173 
2174 }
2175 
2176 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2177 		struct damon_ctx *ctx)
2178 {
2179 	struct damon_target *t = damon_new_target();
2180 	int err = -EINVAL;
2181 
2182 	if (!t)
2183 		return -ENOMEM;
2184 	if (damon_target_has_pid(ctx)) {
2185 		t->pid = find_get_pid(sys_target->pid);
2186 		if (!t->pid)
2187 			goto destroy_targets_out;
2188 	}
2189 	damon_add_target(ctx, t);
2190 	err = damon_sysfs_set_regions(t, sys_target->regions);
2191 	if (err)
2192 		goto destroy_targets_out;
2193 	return 0;
2194 
2195 destroy_targets_out:
2196 	damon_sysfs_destroy_targets(ctx);
2197 	return err;
2198 }
2199 
2200 /*
2201  * Search a target in a context that corresponds to the sysfs target input.
2202  *
2203  * Return: pointer to the target if found, NULL if not found, or negative
2204  * error code if the search failed.
2205  */
2206 static struct damon_target *damon_sysfs_existing_target(
2207 		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2208 {
2209 	struct pid *pid;
2210 	struct damon_target *t;
2211 
2212 	if (!damon_target_has_pid(ctx)) {
2213 		/* Up to only one target for paddr could exist */
2214 		damon_for_each_target(t, ctx)
2215 			return t;
2216 		return NULL;
2217 	}
2218 
2219 	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2220 	pid = find_get_pid(sys_target->pid);
2221 	if (!pid)
2222 		return ERR_PTR(-EINVAL);
2223 	damon_for_each_target(t, ctx) {
2224 		if (t->pid == pid) {
2225 			put_pid(pid);
2226 			return t;
2227 		}
2228 	}
2229 	put_pid(pid);
2230 	return NULL;
2231 }
2232 
2233 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2234 		struct damon_sysfs_targets *sysfs_targets)
2235 {
2236 	int i, err;
2237 
2238 	/* Multiple physical address space monitoring targets makes no sense */
2239 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2240 		return -EINVAL;
2241 
2242 	for (i = 0; i < sysfs_targets->nr; i++) {
2243 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2244 		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2245 
2246 		if (IS_ERR(t))
2247 			return PTR_ERR(t);
2248 		if (!t)
2249 			err = damon_sysfs_add_target(st, ctx);
2250 		else
2251 			err = damon_sysfs_set_regions(t, st->regions);
2252 		if (err)
2253 			return err;
2254 	}
2255 	return 0;
2256 }
2257 
2258 static struct damos *damon_sysfs_mk_scheme(
2259 		struct damon_sysfs_scheme *sysfs_scheme)
2260 {
2261 	struct damon_sysfs_access_pattern *pattern =
2262 		sysfs_scheme->access_pattern;
2263 	struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2264 	struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2265 	struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2266 	struct damos_quota quota = {
2267 		.ms = sysfs_quotas->ms,
2268 		.sz = sysfs_quotas->sz,
2269 		.reset_interval = sysfs_quotas->reset_interval_ms,
2270 		.weight_sz = sysfs_weights->sz,
2271 		.weight_nr_accesses = sysfs_weights->nr_accesses,
2272 		.weight_age = sysfs_weights->age,
2273 	};
2274 	struct damos_watermarks wmarks = {
2275 		.metric = sysfs_wmarks->metric,
2276 		.interval = sysfs_wmarks->interval_us,
2277 		.high = sysfs_wmarks->high,
2278 		.mid = sysfs_wmarks->mid,
2279 		.low = sysfs_wmarks->low,
2280 	};
2281 
2282 	return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2283 			pattern->nr_accesses->min, pattern->nr_accesses->max,
2284 			pattern->age->min, pattern->age->max,
2285 			sysfs_scheme->action, &quota, &wmarks);
2286 }
2287 
2288 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2289 		struct damon_sysfs_schemes *sysfs_schemes)
2290 {
2291 	int i;
2292 
2293 	for (i = 0; i < sysfs_schemes->nr; i++) {
2294 		struct damos *scheme, *next;
2295 
2296 		scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2297 		if (!scheme) {
2298 			damon_for_each_scheme_safe(scheme, next, ctx)
2299 				damon_destroy_scheme(scheme);
2300 			return -ENOMEM;
2301 		}
2302 		damon_add_scheme(ctx, scheme);
2303 	}
2304 	return 0;
2305 }
2306 
2307 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2308 {
2309 	struct damon_target *t, *next;
2310 
2311 	if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
2312 		return;
2313 
2314 	mutex_lock(&ctx->kdamond_lock);
2315 	damon_for_each_target_safe(t, next, ctx) {
2316 		put_pid(t->pid);
2317 		damon_destroy_target(t);
2318 	}
2319 	mutex_unlock(&ctx->kdamond_lock);
2320 }
2321 
2322 /*
2323  * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2324  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2325  *
2326  * This function reads the schemes stats of specific kdamond and update the
2327  * related values for sysfs files.  This function should be called from DAMON
2328  * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2329  * contexts-internal data and DAMON sysfs variables.
2330  */
2331 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2332 {
2333 	struct damon_ctx *ctx = kdamond->damon_ctx;
2334 	struct damon_sysfs_schemes *sysfs_schemes;
2335 	struct damos *scheme;
2336 	int schemes_idx = 0;
2337 
2338 	if (!ctx)
2339 		return -EINVAL;
2340 	sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2341 	damon_for_each_scheme(scheme, ctx) {
2342 		struct damon_sysfs_stats *sysfs_stats;
2343 
2344 		sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2345 		sysfs_stats->nr_tried = scheme->stat.nr_tried;
2346 		sysfs_stats->sz_tried = scheme->stat.sz_tried;
2347 		sysfs_stats->nr_applied = scheme->stat.nr_applied;
2348 		sysfs_stats->sz_applied = scheme->stat.sz_applied;
2349 		sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2350 	}
2351 	return 0;
2352 }
2353 
2354 static inline bool damon_sysfs_kdamond_running(
2355 		struct damon_sysfs_kdamond *kdamond)
2356 {
2357 	return kdamond->damon_ctx &&
2358 		damon_sysfs_ctx_running(kdamond->damon_ctx);
2359 }
2360 
2361 static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
2362 		struct damon_sysfs_context *sys_ctx)
2363 {
2364 	int err;
2365 
2366 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2367 	if (err)
2368 		return err;
2369 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2370 	if (err)
2371 		return err;
2372 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2373 	if (err)
2374 		return err;
2375 	return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2376 }
2377 
2378 /*
2379  * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2380  * @kdamond:	The kobject wrapper for the associated kdamond.
2381  *
2382  * If the sysfs input is wrong, the kdamond will be terminated.
2383  */
2384 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2385 {
2386 	if (!damon_sysfs_kdamond_running(kdamond))
2387 		return -EINVAL;
2388 	/* TODO: Support multiple contexts per kdamond */
2389 	if (kdamond->contexts->nr != 1)
2390 		return -EINVAL;
2391 
2392 	return damon_sysfs_apply_inputs(kdamond->damon_ctx,
2393 			kdamond->contexts->contexts_arr[0]);
2394 }
2395 
2396 /*
2397  * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2398  * @c:	The DAMON context of the callback.
2399  *
2400  * This function is periodically called back from the kdamond thread for @c.
2401  * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2402  */
2403 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2404 {
2405 	struct damon_sysfs_kdamond *kdamond;
2406 	int err = 0;
2407 
2408 	/* avoid deadlock due to concurrent state_store('off') */
2409 	if (!mutex_trylock(&damon_sysfs_lock))
2410 		return 0;
2411 	kdamond = damon_sysfs_cmd_request.kdamond;
2412 	if (!kdamond || kdamond->damon_ctx != c)
2413 		goto out;
2414 	switch (damon_sysfs_cmd_request.cmd) {
2415 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2416 		err = damon_sysfs_upd_schemes_stats(kdamond);
2417 		break;
2418 	case DAMON_SYSFS_CMD_COMMIT:
2419 		err = damon_sysfs_commit_input(kdamond);
2420 		break;
2421 	default:
2422 		break;
2423 	}
2424 	/* Mark the request as invalid now. */
2425 	damon_sysfs_cmd_request.kdamond = NULL;
2426 out:
2427 	mutex_unlock(&damon_sysfs_lock);
2428 	return err;
2429 }
2430 
2431 static struct damon_ctx *damon_sysfs_build_ctx(
2432 		struct damon_sysfs_context *sys_ctx)
2433 {
2434 	struct damon_ctx *ctx = damon_new_ctx();
2435 	int err;
2436 
2437 	if (!ctx)
2438 		return ERR_PTR(-ENOMEM);
2439 
2440 	err = damon_sysfs_apply_inputs(ctx, sys_ctx);
2441 	if (err) {
2442 		damon_destroy_ctx(ctx);
2443 		return ERR_PTR(err);
2444 	}
2445 
2446 	ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2447 	ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2448 	ctx->callback.before_terminate = damon_sysfs_before_terminate;
2449 	return ctx;
2450 }
2451 
2452 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2453 {
2454 	struct damon_ctx *ctx;
2455 	int err;
2456 
2457 	if (kdamond->damon_ctx &&
2458 			damon_sysfs_ctx_running(kdamond->damon_ctx))
2459 		return -EBUSY;
2460 	if (damon_sysfs_cmd_request.kdamond == kdamond)
2461 		return -EBUSY;
2462 	/* TODO: support multiple contexts per kdamond */
2463 	if (kdamond->contexts->nr != 1)
2464 		return -EINVAL;
2465 
2466 	if (kdamond->damon_ctx)
2467 		damon_destroy_ctx(kdamond->damon_ctx);
2468 	kdamond->damon_ctx = NULL;
2469 
2470 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2471 	if (IS_ERR(ctx))
2472 		return PTR_ERR(ctx);
2473 	err = damon_start(&ctx, 1, false);
2474 	if (err) {
2475 		damon_destroy_ctx(ctx);
2476 		return err;
2477 	}
2478 	kdamond->damon_ctx = ctx;
2479 	return err;
2480 }
2481 
2482 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2483 {
2484 	if (!kdamond->damon_ctx)
2485 		return -EINVAL;
2486 	return damon_stop(&kdamond->damon_ctx, 1);
2487 	/*
2488 	 * To allow users show final monitoring results of already turned-off
2489 	 * DAMON, we free kdamond->damon_ctx in next
2490 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2491 	 */
2492 }
2493 
2494 /*
2495  * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2496  * @cmd:	The command to handle.
2497  * @kdamond:	The kobject wrapper for the associated kdamond.
2498  *
2499  * This function handles a DAMON sysfs command for a kdamond.  For commands
2500  * that need to access running DAMON context-internal data, it requests
2501  * handling of the command to the DAMON callback
2502  * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2503  * or the context is completed.
2504  *
2505  * Return: 0 on success, negative error code otherwise.
2506  */
2507 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2508 		struct damon_sysfs_kdamond *kdamond)
2509 {
2510 	bool need_wait = true;
2511 
2512 	/* Handle commands that doesn't access DAMON context-internal data */
2513 	switch (cmd) {
2514 	case DAMON_SYSFS_CMD_ON:
2515 		return damon_sysfs_turn_damon_on(kdamond);
2516 	case DAMON_SYSFS_CMD_OFF:
2517 		return damon_sysfs_turn_damon_off(kdamond);
2518 	default:
2519 		break;
2520 	}
2521 
2522 	/* Pass the command to DAMON callback for safe DAMON context access */
2523 	if (damon_sysfs_cmd_request.kdamond)
2524 		return -EBUSY;
2525 	if (!damon_sysfs_kdamond_running(kdamond))
2526 		return -EINVAL;
2527 	damon_sysfs_cmd_request.cmd = cmd;
2528 	damon_sysfs_cmd_request.kdamond = kdamond;
2529 
2530 	/*
2531 	 * wait until damon_sysfs_cmd_request_callback() handles the request
2532 	 * from kdamond context
2533 	 */
2534 	mutex_unlock(&damon_sysfs_lock);
2535 	while (need_wait) {
2536 		schedule_timeout_idle(msecs_to_jiffies(100));
2537 		if (!mutex_trylock(&damon_sysfs_lock))
2538 			continue;
2539 		if (!damon_sysfs_cmd_request.kdamond) {
2540 			/* damon_sysfs_cmd_request_callback() handled */
2541 			need_wait = false;
2542 		} else if (!damon_sysfs_kdamond_running(kdamond)) {
2543 			/* kdamond has already finished */
2544 			need_wait = false;
2545 			damon_sysfs_cmd_request.kdamond = NULL;
2546 		}
2547 		mutex_unlock(&damon_sysfs_lock);
2548 	}
2549 	mutex_lock(&damon_sysfs_lock);
2550 	return 0;
2551 }
2552 
2553 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2554 		const char *buf, size_t count)
2555 {
2556 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2557 			struct damon_sysfs_kdamond, kobj);
2558 	enum damon_sysfs_cmd cmd;
2559 	ssize_t ret = -EINVAL;
2560 
2561 	if (!mutex_trylock(&damon_sysfs_lock))
2562 		return -EBUSY;
2563 	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2564 		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2565 			ret = damon_sysfs_handle_cmd(cmd, kdamond);
2566 			break;
2567 		}
2568 	}
2569 	mutex_unlock(&damon_sysfs_lock);
2570 	if (!ret)
2571 		ret = count;
2572 	return ret;
2573 }
2574 
2575 static ssize_t pid_show(struct kobject *kobj,
2576 		struct kobj_attribute *attr, char *buf)
2577 {
2578 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2579 			struct damon_sysfs_kdamond, kobj);
2580 	struct damon_ctx *ctx;
2581 	int pid;
2582 
2583 	if (!mutex_trylock(&damon_sysfs_lock))
2584 		return -EBUSY;
2585 	ctx = kdamond->damon_ctx;
2586 	if (!ctx) {
2587 		pid = -1;
2588 		goto out;
2589 	}
2590 	mutex_lock(&ctx->kdamond_lock);
2591 	if (!ctx->kdamond)
2592 		pid = -1;
2593 	else
2594 		pid = ctx->kdamond->pid;
2595 	mutex_unlock(&ctx->kdamond_lock);
2596 out:
2597 	mutex_unlock(&damon_sysfs_lock);
2598 	return sysfs_emit(buf, "%d\n", pid);
2599 }
2600 
2601 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2602 {
2603 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2604 			struct damon_sysfs_kdamond, kobj);
2605 
2606 	if (kdamond->damon_ctx)
2607 		damon_destroy_ctx(kdamond->damon_ctx);
2608 	kfree(kdamond);
2609 }
2610 
2611 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2612 		__ATTR_RW_MODE(state, 0600);
2613 
2614 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2615 		__ATTR_RO_MODE(pid, 0400);
2616 
2617 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2618 	&damon_sysfs_kdamond_state_attr.attr,
2619 	&damon_sysfs_kdamond_pid_attr.attr,
2620 	NULL,
2621 };
2622 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2623 
2624 static struct kobj_type damon_sysfs_kdamond_ktype = {
2625 	.release = damon_sysfs_kdamond_release,
2626 	.sysfs_ops = &kobj_sysfs_ops,
2627 	.default_groups = damon_sysfs_kdamond_groups,
2628 };
2629 
2630 /*
2631  * kdamonds directory
2632  */
2633 
2634 struct damon_sysfs_kdamonds {
2635 	struct kobject kobj;
2636 	struct damon_sysfs_kdamond **kdamonds_arr;
2637 	int nr;
2638 };
2639 
2640 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2641 {
2642 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2643 }
2644 
2645 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2646 {
2647 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2648 	int i;
2649 
2650 	for (i = 0; i < kdamonds->nr; i++) {
2651 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2652 		kobject_put(&kdamonds_arr[i]->kobj);
2653 	}
2654 	kdamonds->nr = 0;
2655 	kfree(kdamonds_arr);
2656 	kdamonds->kdamonds_arr = NULL;
2657 }
2658 
2659 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2660 		int nr_kdamonds)
2661 {
2662 	int nr_running_ctxs = 0;
2663 	int i;
2664 
2665 	for (i = 0; i < nr_kdamonds; i++) {
2666 		struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2667 
2668 		if (!ctx)
2669 			continue;
2670 		mutex_lock(&ctx->kdamond_lock);
2671 		if (ctx->kdamond)
2672 			nr_running_ctxs++;
2673 		mutex_unlock(&ctx->kdamond_lock);
2674 	}
2675 	return nr_running_ctxs;
2676 }
2677 
2678 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2679 		int nr_kdamonds)
2680 {
2681 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2682 	int err, i;
2683 
2684 	if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2685 		return -EBUSY;
2686 
2687 	for (i = 0; i < kdamonds->nr; i++) {
2688 		if (damon_sysfs_cmd_request.kdamond ==
2689 				kdamonds->kdamonds_arr[i])
2690 			return -EBUSY;
2691 	}
2692 
2693 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2694 	if (!nr_kdamonds)
2695 		return 0;
2696 
2697 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2698 			GFP_KERNEL | __GFP_NOWARN);
2699 	if (!kdamonds_arr)
2700 		return -ENOMEM;
2701 	kdamonds->kdamonds_arr = kdamonds_arr;
2702 
2703 	for (i = 0; i < nr_kdamonds; i++) {
2704 		kdamond = damon_sysfs_kdamond_alloc();
2705 		if (!kdamond) {
2706 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
2707 			return -ENOMEM;
2708 		}
2709 
2710 		err = kobject_init_and_add(&kdamond->kobj,
2711 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2712 				"%d", i);
2713 		if (err)
2714 			goto out;
2715 
2716 		err = damon_sysfs_kdamond_add_dirs(kdamond);
2717 		if (err)
2718 			goto out;
2719 
2720 		kdamonds_arr[i] = kdamond;
2721 		kdamonds->nr++;
2722 	}
2723 	return 0;
2724 
2725 out:
2726 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2727 	kobject_put(&kdamond->kobj);
2728 	return err;
2729 }
2730 
2731 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2732 		struct kobj_attribute *attr, char *buf)
2733 {
2734 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2735 			struct damon_sysfs_kdamonds, kobj);
2736 
2737 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
2738 }
2739 
2740 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2741 		struct kobj_attribute *attr, const char *buf, size_t count)
2742 {
2743 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2744 			struct damon_sysfs_kdamonds, kobj);
2745 	int nr, err;
2746 
2747 	err = kstrtoint(buf, 0, &nr);
2748 	if (err)
2749 		return err;
2750 	if (nr < 0)
2751 		return -EINVAL;
2752 
2753 	if (!mutex_trylock(&damon_sysfs_lock))
2754 		return -EBUSY;
2755 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2756 	mutex_unlock(&damon_sysfs_lock);
2757 	if (err)
2758 		return err;
2759 
2760 	return count;
2761 }
2762 
2763 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2764 {
2765 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2766 }
2767 
2768 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2769 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2770 
2771 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2772 	&damon_sysfs_kdamonds_nr_attr.attr,
2773 	NULL,
2774 };
2775 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2776 
2777 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2778 	.release = damon_sysfs_kdamonds_release,
2779 	.sysfs_ops = &kobj_sysfs_ops,
2780 	.default_groups = damon_sysfs_kdamonds_groups,
2781 };
2782 
2783 /*
2784  * damon user interface directory
2785  */
2786 
2787 struct damon_sysfs_ui_dir {
2788 	struct kobject kobj;
2789 	struct damon_sysfs_kdamonds *kdamonds;
2790 };
2791 
2792 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2793 {
2794 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2795 }
2796 
2797 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2798 {
2799 	struct damon_sysfs_kdamonds *kdamonds;
2800 	int err;
2801 
2802 	kdamonds = damon_sysfs_kdamonds_alloc();
2803 	if (!kdamonds)
2804 		return -ENOMEM;
2805 
2806 	err = kobject_init_and_add(&kdamonds->kobj,
2807 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2808 			"kdamonds");
2809 	if (err) {
2810 		kobject_put(&kdamonds->kobj);
2811 		return err;
2812 	}
2813 	ui_dir->kdamonds = kdamonds;
2814 	return err;
2815 }
2816 
2817 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2818 {
2819 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2820 }
2821 
2822 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2823 	NULL,
2824 };
2825 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2826 
2827 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2828 	.release = damon_sysfs_ui_dir_release,
2829 	.sysfs_ops = &kobj_sysfs_ops,
2830 	.default_groups = damon_sysfs_ui_dir_groups,
2831 };
2832 
2833 static int __init damon_sysfs_init(void)
2834 {
2835 	struct kobject *damon_sysfs_root;
2836 	struct damon_sysfs_ui_dir *admin;
2837 	int err;
2838 
2839 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2840 	if (!damon_sysfs_root)
2841 		return -ENOMEM;
2842 
2843 	admin = damon_sysfs_ui_dir_alloc();
2844 	if (!admin) {
2845 		kobject_put(damon_sysfs_root);
2846 		return -ENOMEM;
2847 	}
2848 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2849 			damon_sysfs_root, "admin");
2850 	if (err)
2851 		goto out;
2852 	err = damon_sysfs_ui_dir_add_dirs(admin);
2853 	if (err)
2854 		goto out;
2855 	return 0;
2856 
2857 out:
2858 	kobject_put(&admin->kobj);
2859 	kobject_put(damon_sysfs_root);
2860 	return err;
2861 }
2862 subsys_initcall(damon_sysfs_init);
2863