xref: /openbmc/linux/mm/damon/sysfs.c (revision 8ab59da2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 static DEFINE_MUTEX(damon_sysfs_lock);
15 
16 /*
17  * unsigned long range directory
18  */
19 
20 struct damon_sysfs_ul_range {
21 	struct kobject kobj;
22 	unsigned long min;
23 	unsigned long max;
24 };
25 
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 		unsigned long min,
28 		unsigned long max)
29 {
30 	struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 			GFP_KERNEL);
32 
33 	if (!range)
34 		return NULL;
35 	range->kobj = (struct kobject){};
36 	range->min = min;
37 	range->max = max;
38 
39 	return range;
40 }
41 
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 		char *buf)
44 {
45 	struct damon_sysfs_ul_range *range = container_of(kobj,
46 			struct damon_sysfs_ul_range, kobj);
47 
48 	return sysfs_emit(buf, "%lu\n", range->min);
49 }
50 
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 		const char *buf, size_t count)
53 {
54 	struct damon_sysfs_ul_range *range = container_of(kobj,
55 			struct damon_sysfs_ul_range, kobj);
56 	unsigned long min;
57 	int err;
58 
59 	err = kstrtoul(buf, 0, &min);
60 	if (err)
61 		return err;
62 
63 	range->min = min;
64 	return count;
65 }
66 
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 		char *buf)
69 {
70 	struct damon_sysfs_ul_range *range = container_of(kobj,
71 			struct damon_sysfs_ul_range, kobj);
72 
73 	return sysfs_emit(buf, "%lu\n", range->max);
74 }
75 
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 		const char *buf, size_t count)
78 {
79 	struct damon_sysfs_ul_range *range = container_of(kobj,
80 			struct damon_sysfs_ul_range, kobj);
81 	unsigned long max;
82 	int err;
83 
84 	err = kstrtoul(buf, 0, &max);
85 	if (err)
86 		return err;
87 
88 	range->max = max;
89 	return count;
90 }
91 
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
93 {
94 	kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95 }
96 
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 		__ATTR_RW_MODE(min, 0600);
99 
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 		__ATTR_RW_MODE(max, 0600);
102 
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 	&damon_sysfs_ul_range_min_attr.attr,
105 	&damon_sysfs_ul_range_max_attr.attr,
106 	NULL,
107 };
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109 
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 	.release = damon_sysfs_ul_range_release,
112 	.sysfs_ops = &kobj_sysfs_ops,
113 	.default_groups = damon_sysfs_ul_range_groups,
114 };
115 
116 /*
117  * schemes/stats directory
118  */
119 
120 struct damon_sysfs_stats {
121 	struct kobject kobj;
122 	unsigned long nr_tried;
123 	unsigned long sz_tried;
124 	unsigned long nr_applied;
125 	unsigned long sz_applied;
126 	unsigned long qt_exceeds;
127 };
128 
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130 {
131 	return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132 }
133 
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 		char *buf)
136 {
137 	struct damon_sysfs_stats *stats = container_of(kobj,
138 			struct damon_sysfs_stats, kobj);
139 
140 	return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141 }
142 
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 		char *buf)
145 {
146 	struct damon_sysfs_stats *stats = container_of(kobj,
147 			struct damon_sysfs_stats, kobj);
148 
149 	return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150 }
151 
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 		struct kobj_attribute *attr, char *buf)
154 {
155 	struct damon_sysfs_stats *stats = container_of(kobj,
156 			struct damon_sysfs_stats, kobj);
157 
158 	return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159 }
160 
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 		struct kobj_attribute *attr, char *buf)
163 {
164 	struct damon_sysfs_stats *stats = container_of(kobj,
165 			struct damon_sysfs_stats, kobj);
166 
167 	return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168 }
169 
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 		struct kobj_attribute *attr, char *buf)
172 {
173 	struct damon_sysfs_stats *stats = container_of(kobj,
174 			struct damon_sysfs_stats, kobj);
175 
176 	return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177 }
178 
179 static void damon_sysfs_stats_release(struct kobject *kobj)
180 {
181 	kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182 }
183 
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 		__ATTR_RO_MODE(nr_tried, 0400);
186 
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 		__ATTR_RO_MODE(sz_tried, 0400);
189 
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 		__ATTR_RO_MODE(nr_applied, 0400);
192 
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 		__ATTR_RO_MODE(sz_applied, 0400);
195 
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 		__ATTR_RO_MODE(qt_exceeds, 0400);
198 
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 	&damon_sysfs_stats_nr_tried_attr.attr,
201 	&damon_sysfs_stats_sz_tried_attr.attr,
202 	&damon_sysfs_stats_nr_applied_attr.attr,
203 	&damon_sysfs_stats_sz_applied_attr.attr,
204 	&damon_sysfs_stats_qt_exceeds_attr.attr,
205 	NULL,
206 };
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
208 
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 	.release = damon_sysfs_stats_release,
211 	.sysfs_ops = &kobj_sysfs_ops,
212 	.default_groups = damon_sysfs_stats_groups,
213 };
214 
215 /*
216  * watermarks directory
217  */
218 
219 struct damon_sysfs_watermarks {
220 	struct kobject kobj;
221 	enum damos_wmark_metric metric;
222 	unsigned long interval_us;
223 	unsigned long high;
224 	unsigned long mid;
225 	unsigned long low;
226 };
227 
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 		enum damos_wmark_metric metric, unsigned long interval_us,
230 		unsigned long high, unsigned long mid, unsigned long low)
231 {
232 	struct damon_sysfs_watermarks *watermarks = kmalloc(
233 			sizeof(*watermarks), GFP_KERNEL);
234 
235 	if (!watermarks)
236 		return NULL;
237 	watermarks->kobj = (struct kobject){};
238 	watermarks->metric = metric;
239 	watermarks->interval_us = interval_us;
240 	watermarks->high = high;
241 	watermarks->mid = mid;
242 	watermarks->low = low;
243 	return watermarks;
244 }
245 
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
248 	"none",
249 	"free_mem_rate",
250 };
251 
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 		char *buf)
254 {
255 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 			struct damon_sysfs_watermarks, kobj);
257 
258 	return sysfs_emit(buf, "%s\n",
259 			damon_sysfs_wmark_metric_strs[watermarks->metric]);
260 }
261 
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 		const char *buf, size_t count)
264 {
265 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 			struct damon_sysfs_watermarks, kobj);
267 	enum damos_wmark_metric metric;
268 
269 	for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 		if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 			watermarks->metric = metric;
272 			return count;
273 		}
274 	}
275 	return -EINVAL;
276 }
277 
278 static ssize_t interval_us_show(struct kobject *kobj,
279 		struct kobj_attribute *attr, char *buf)
280 {
281 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 			struct damon_sysfs_watermarks, kobj);
283 
284 	return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285 }
286 
287 static ssize_t interval_us_store(struct kobject *kobj,
288 		struct kobj_attribute *attr, const char *buf, size_t count)
289 {
290 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 			struct damon_sysfs_watermarks, kobj);
292 	int err = kstrtoul(buf, 0, &watermarks->interval_us);
293 
294 	return err ? err : count;
295 }
296 
297 static ssize_t high_show(struct kobject *kobj,
298 		struct kobj_attribute *attr, char *buf)
299 {
300 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
301 			struct damon_sysfs_watermarks, kobj);
302 
303 	return sysfs_emit(buf, "%lu\n", watermarks->high);
304 }
305 
306 static ssize_t high_store(struct kobject *kobj,
307 		struct kobj_attribute *attr, const char *buf, size_t count)
308 {
309 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
310 			struct damon_sysfs_watermarks, kobj);
311 	int err = kstrtoul(buf, 0, &watermarks->high);
312 
313 	return err ? err : count;
314 }
315 
316 static ssize_t mid_show(struct kobject *kobj,
317 		struct kobj_attribute *attr, char *buf)
318 {
319 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
320 			struct damon_sysfs_watermarks, kobj);
321 
322 	return sysfs_emit(buf, "%lu\n", watermarks->mid);
323 }
324 
325 static ssize_t mid_store(struct kobject *kobj,
326 		struct kobj_attribute *attr, const char *buf, size_t count)
327 {
328 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
329 			struct damon_sysfs_watermarks, kobj);
330 	int err = kstrtoul(buf, 0, &watermarks->mid);
331 
332 	return err ? err : count;
333 }
334 
335 static ssize_t low_show(struct kobject *kobj,
336 		struct kobj_attribute *attr, char *buf)
337 {
338 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
339 			struct damon_sysfs_watermarks, kobj);
340 
341 	return sysfs_emit(buf, "%lu\n", watermarks->low);
342 }
343 
344 static ssize_t low_store(struct kobject *kobj,
345 		struct kobj_attribute *attr, const char *buf, size_t count)
346 {
347 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
348 			struct damon_sysfs_watermarks, kobj);
349 	int err = kstrtoul(buf, 0, &watermarks->low);
350 
351 	return err ? err : count;
352 }
353 
354 static void damon_sysfs_watermarks_release(struct kobject *kobj)
355 {
356 	kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
357 }
358 
359 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
360 		__ATTR_RW_MODE(metric, 0600);
361 
362 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
363 		__ATTR_RW_MODE(interval_us, 0600);
364 
365 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
366 		__ATTR_RW_MODE(high, 0600);
367 
368 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
369 		__ATTR_RW_MODE(mid, 0600);
370 
371 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
372 		__ATTR_RW_MODE(low, 0600);
373 
374 static struct attribute *damon_sysfs_watermarks_attrs[] = {
375 	&damon_sysfs_watermarks_metric_attr.attr,
376 	&damon_sysfs_watermarks_interval_us_attr.attr,
377 	&damon_sysfs_watermarks_high_attr.attr,
378 	&damon_sysfs_watermarks_mid_attr.attr,
379 	&damon_sysfs_watermarks_low_attr.attr,
380 	NULL,
381 };
382 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
383 
384 static struct kobj_type damon_sysfs_watermarks_ktype = {
385 	.release = damon_sysfs_watermarks_release,
386 	.sysfs_ops = &kobj_sysfs_ops,
387 	.default_groups = damon_sysfs_watermarks_groups,
388 };
389 
390 /*
391  * scheme/weights directory
392  */
393 
394 struct damon_sysfs_weights {
395 	struct kobject kobj;
396 	unsigned int sz;
397 	unsigned int nr_accesses;
398 	unsigned int age;
399 };
400 
401 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
402 		unsigned int nr_accesses, unsigned int age)
403 {
404 	struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
405 			GFP_KERNEL);
406 
407 	if (!weights)
408 		return NULL;
409 	weights->kobj = (struct kobject){};
410 	weights->sz = sz;
411 	weights->nr_accesses = nr_accesses;
412 	weights->age = age;
413 	return weights;
414 }
415 
416 static ssize_t sz_permil_show(struct kobject *kobj,
417 		struct kobj_attribute *attr, char *buf)
418 {
419 	struct damon_sysfs_weights *weights = container_of(kobj,
420 			struct damon_sysfs_weights, kobj);
421 
422 	return sysfs_emit(buf, "%u\n", weights->sz);
423 }
424 
425 static ssize_t sz_permil_store(struct kobject *kobj,
426 		struct kobj_attribute *attr, const char *buf, size_t count)
427 {
428 	struct damon_sysfs_weights *weights = container_of(kobj,
429 			struct damon_sysfs_weights, kobj);
430 	int err = kstrtouint(buf, 0, &weights->sz);
431 
432 	return err ? err : count;
433 }
434 
435 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
436 		struct kobj_attribute *attr, char *buf)
437 {
438 	struct damon_sysfs_weights *weights = container_of(kobj,
439 			struct damon_sysfs_weights, kobj);
440 
441 	return sysfs_emit(buf, "%u\n", weights->nr_accesses);
442 }
443 
444 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
445 		struct kobj_attribute *attr, const char *buf, size_t count)
446 {
447 	struct damon_sysfs_weights *weights = container_of(kobj,
448 			struct damon_sysfs_weights, kobj);
449 	int err = kstrtouint(buf, 0, &weights->nr_accesses);
450 
451 	return err ? err : count;
452 }
453 
454 static ssize_t age_permil_show(struct kobject *kobj,
455 		struct kobj_attribute *attr, char *buf)
456 {
457 	struct damon_sysfs_weights *weights = container_of(kobj,
458 			struct damon_sysfs_weights, kobj);
459 
460 	return sysfs_emit(buf, "%u\n", weights->age);
461 }
462 
463 static ssize_t age_permil_store(struct kobject *kobj,
464 		struct kobj_attribute *attr, const char *buf, size_t count)
465 {
466 	struct damon_sysfs_weights *weights = container_of(kobj,
467 			struct damon_sysfs_weights, kobj);
468 	int err = kstrtouint(buf, 0, &weights->age);
469 
470 	return err ? err : count;
471 }
472 
473 static void damon_sysfs_weights_release(struct kobject *kobj)
474 {
475 	kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
476 }
477 
478 static struct kobj_attribute damon_sysfs_weights_sz_attr =
479 		__ATTR_RW_MODE(sz_permil, 0600);
480 
481 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
482 		__ATTR_RW_MODE(nr_accesses_permil, 0600);
483 
484 static struct kobj_attribute damon_sysfs_weights_age_attr =
485 		__ATTR_RW_MODE(age_permil, 0600);
486 
487 static struct attribute *damon_sysfs_weights_attrs[] = {
488 	&damon_sysfs_weights_sz_attr.attr,
489 	&damon_sysfs_weights_nr_accesses_attr.attr,
490 	&damon_sysfs_weights_age_attr.attr,
491 	NULL,
492 };
493 ATTRIBUTE_GROUPS(damon_sysfs_weights);
494 
495 static struct kobj_type damon_sysfs_weights_ktype = {
496 	.release = damon_sysfs_weights_release,
497 	.sysfs_ops = &kobj_sysfs_ops,
498 	.default_groups = damon_sysfs_weights_groups,
499 };
500 
501 /*
502  * quotas directory
503  */
504 
505 struct damon_sysfs_quotas {
506 	struct kobject kobj;
507 	struct damon_sysfs_weights *weights;
508 	unsigned long ms;
509 	unsigned long sz;
510 	unsigned long reset_interval_ms;
511 };
512 
513 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
514 {
515 	return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
516 }
517 
518 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
519 {
520 	struct damon_sysfs_weights *weights;
521 	int err;
522 
523 	weights = damon_sysfs_weights_alloc(0, 0, 0);
524 	if (!weights)
525 		return -ENOMEM;
526 
527 	err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
528 			&quotas->kobj, "weights");
529 	if (err)
530 		kobject_put(&weights->kobj);
531 	else
532 		quotas->weights = weights;
533 	return err;
534 }
535 
536 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
537 {
538 	kobject_put(&quotas->weights->kobj);
539 }
540 
541 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
542 		char *buf)
543 {
544 	struct damon_sysfs_quotas *quotas = container_of(kobj,
545 			struct damon_sysfs_quotas, kobj);
546 
547 	return sysfs_emit(buf, "%lu\n", quotas->ms);
548 }
549 
550 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
551 		const char *buf, size_t count)
552 {
553 	struct damon_sysfs_quotas *quotas = container_of(kobj,
554 			struct damon_sysfs_quotas, kobj);
555 	int err = kstrtoul(buf, 0, &quotas->ms);
556 
557 	if (err)
558 		return -EINVAL;
559 	return count;
560 }
561 
562 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
563 		char *buf)
564 {
565 	struct damon_sysfs_quotas *quotas = container_of(kobj,
566 			struct damon_sysfs_quotas, kobj);
567 
568 	return sysfs_emit(buf, "%lu\n", quotas->sz);
569 }
570 
571 static ssize_t bytes_store(struct kobject *kobj,
572 		struct kobj_attribute *attr, const char *buf, size_t count)
573 {
574 	struct damon_sysfs_quotas *quotas = container_of(kobj,
575 			struct damon_sysfs_quotas, kobj);
576 	int err = kstrtoul(buf, 0, &quotas->sz);
577 
578 	if (err)
579 		return -EINVAL;
580 	return count;
581 }
582 
583 static ssize_t reset_interval_ms_show(struct kobject *kobj,
584 		struct kobj_attribute *attr, char *buf)
585 {
586 	struct damon_sysfs_quotas *quotas = container_of(kobj,
587 			struct damon_sysfs_quotas, kobj);
588 
589 	return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
590 }
591 
592 static ssize_t reset_interval_ms_store(struct kobject *kobj,
593 		struct kobj_attribute *attr, const char *buf, size_t count)
594 {
595 	struct damon_sysfs_quotas *quotas = container_of(kobj,
596 			struct damon_sysfs_quotas, kobj);
597 	int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
598 
599 	if (err)
600 		return -EINVAL;
601 	return count;
602 }
603 
604 static void damon_sysfs_quotas_release(struct kobject *kobj)
605 {
606 	kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
607 }
608 
609 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
610 		__ATTR_RW_MODE(ms, 0600);
611 
612 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
613 		__ATTR_RW_MODE(bytes, 0600);
614 
615 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
616 		__ATTR_RW_MODE(reset_interval_ms, 0600);
617 
618 static struct attribute *damon_sysfs_quotas_attrs[] = {
619 	&damon_sysfs_quotas_ms_attr.attr,
620 	&damon_sysfs_quotas_sz_attr.attr,
621 	&damon_sysfs_quotas_reset_interval_ms_attr.attr,
622 	NULL,
623 };
624 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
625 
626 static struct kobj_type damon_sysfs_quotas_ktype = {
627 	.release = damon_sysfs_quotas_release,
628 	.sysfs_ops = &kobj_sysfs_ops,
629 	.default_groups = damon_sysfs_quotas_groups,
630 };
631 
632 /*
633  * access_pattern directory
634  */
635 
636 struct damon_sysfs_access_pattern {
637 	struct kobject kobj;
638 	struct damon_sysfs_ul_range *sz;
639 	struct damon_sysfs_ul_range *nr_accesses;
640 	struct damon_sysfs_ul_range *age;
641 };
642 
643 static
644 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
645 {
646 	struct damon_sysfs_access_pattern *access_pattern =
647 		kmalloc(sizeof(*access_pattern), GFP_KERNEL);
648 
649 	if (!access_pattern)
650 		return NULL;
651 	access_pattern->kobj = (struct kobject){};
652 	return access_pattern;
653 }
654 
655 static int damon_sysfs_access_pattern_add_range_dir(
656 		struct damon_sysfs_access_pattern *access_pattern,
657 		struct damon_sysfs_ul_range **range_dir_ptr,
658 		char *name)
659 {
660 	struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
661 	int err;
662 
663 	if (!range)
664 		return -ENOMEM;
665 	err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
666 			&access_pattern->kobj, name);
667 	if (err)
668 		kobject_put(&range->kobj);
669 	else
670 		*range_dir_ptr = range;
671 	return err;
672 }
673 
674 static int damon_sysfs_access_pattern_add_dirs(
675 		struct damon_sysfs_access_pattern *access_pattern)
676 {
677 	int err;
678 
679 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
680 			&access_pattern->sz, "sz");
681 	if (err)
682 		goto put_sz_out;
683 
684 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
685 			&access_pattern->nr_accesses, "nr_accesses");
686 	if (err)
687 		goto put_nr_accesses_sz_out;
688 
689 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
690 			&access_pattern->age, "age");
691 	if (err)
692 		goto put_age_nr_accesses_sz_out;
693 	return 0;
694 
695 put_age_nr_accesses_sz_out:
696 	kobject_put(&access_pattern->age->kobj);
697 	access_pattern->age = NULL;
698 put_nr_accesses_sz_out:
699 	kobject_put(&access_pattern->nr_accesses->kobj);
700 	access_pattern->nr_accesses = NULL;
701 put_sz_out:
702 	kobject_put(&access_pattern->sz->kobj);
703 	access_pattern->sz = NULL;
704 	return err;
705 }
706 
707 static void damon_sysfs_access_pattern_rm_dirs(
708 		struct damon_sysfs_access_pattern *access_pattern)
709 {
710 	kobject_put(&access_pattern->sz->kobj);
711 	kobject_put(&access_pattern->nr_accesses->kobj);
712 	kobject_put(&access_pattern->age->kobj);
713 }
714 
715 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
716 {
717 	kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
718 }
719 
720 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
721 	NULL,
722 };
723 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
724 
725 static struct kobj_type damon_sysfs_access_pattern_ktype = {
726 	.release = damon_sysfs_access_pattern_release,
727 	.sysfs_ops = &kobj_sysfs_ops,
728 	.default_groups = damon_sysfs_access_pattern_groups,
729 };
730 
731 /*
732  * scheme directory
733  */
734 
735 struct damon_sysfs_scheme {
736 	struct kobject kobj;
737 	enum damos_action action;
738 	struct damon_sysfs_access_pattern *access_pattern;
739 	struct damon_sysfs_quotas *quotas;
740 	struct damon_sysfs_watermarks *watermarks;
741 	struct damon_sysfs_stats *stats;
742 };
743 
744 /* This should match with enum damos_action */
745 static const char * const damon_sysfs_damos_action_strs[] = {
746 	"willneed",
747 	"cold",
748 	"pageout",
749 	"hugepage",
750 	"nohugepage",
751 	"lru_prio",
752 	"lru_deprio",
753 	"stat",
754 };
755 
756 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
757 		enum damos_action action)
758 {
759 	struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
760 				GFP_KERNEL);
761 
762 	if (!scheme)
763 		return NULL;
764 	scheme->kobj = (struct kobject){};
765 	scheme->action = action;
766 	return scheme;
767 }
768 
769 static int damon_sysfs_scheme_set_access_pattern(
770 		struct damon_sysfs_scheme *scheme)
771 {
772 	struct damon_sysfs_access_pattern *access_pattern;
773 	int err;
774 
775 	access_pattern = damon_sysfs_access_pattern_alloc();
776 	if (!access_pattern)
777 		return -ENOMEM;
778 	err = kobject_init_and_add(&access_pattern->kobj,
779 			&damon_sysfs_access_pattern_ktype, &scheme->kobj,
780 			"access_pattern");
781 	if (err)
782 		goto out;
783 	err = damon_sysfs_access_pattern_add_dirs(access_pattern);
784 	if (err)
785 		goto out;
786 	scheme->access_pattern = access_pattern;
787 	return 0;
788 
789 out:
790 	kobject_put(&access_pattern->kobj);
791 	return err;
792 }
793 
794 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
795 {
796 	struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
797 	int err;
798 
799 	if (!quotas)
800 		return -ENOMEM;
801 	err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
802 			&scheme->kobj, "quotas");
803 	if (err)
804 		goto out;
805 	err = damon_sysfs_quotas_add_dirs(quotas);
806 	if (err)
807 		goto out;
808 	scheme->quotas = quotas;
809 	return 0;
810 
811 out:
812 	kobject_put(&quotas->kobj);
813 	return err;
814 }
815 
816 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
817 {
818 	struct damon_sysfs_watermarks *watermarks =
819 		damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
820 	int err;
821 
822 	if (!watermarks)
823 		return -ENOMEM;
824 	err = kobject_init_and_add(&watermarks->kobj,
825 			&damon_sysfs_watermarks_ktype, &scheme->kobj,
826 			"watermarks");
827 	if (err)
828 		kobject_put(&watermarks->kobj);
829 	else
830 		scheme->watermarks = watermarks;
831 	return err;
832 }
833 
834 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
835 {
836 	struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
837 	int err;
838 
839 	if (!stats)
840 		return -ENOMEM;
841 	err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
842 			&scheme->kobj, "stats");
843 	if (err)
844 		kobject_put(&stats->kobj);
845 	else
846 		scheme->stats = stats;
847 	return err;
848 }
849 
850 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
851 {
852 	int err;
853 
854 	err = damon_sysfs_scheme_set_access_pattern(scheme);
855 	if (err)
856 		return err;
857 	err = damon_sysfs_scheme_set_quotas(scheme);
858 	if (err)
859 		goto put_access_pattern_out;
860 	err = damon_sysfs_scheme_set_watermarks(scheme);
861 	if (err)
862 		goto put_quotas_access_pattern_out;
863 	err = damon_sysfs_scheme_set_stats(scheme);
864 	if (err)
865 		goto put_watermarks_quotas_access_pattern_out;
866 	return 0;
867 
868 put_watermarks_quotas_access_pattern_out:
869 	kobject_put(&scheme->watermarks->kobj);
870 	scheme->watermarks = NULL;
871 put_quotas_access_pattern_out:
872 	kobject_put(&scheme->quotas->kobj);
873 	scheme->quotas = NULL;
874 put_access_pattern_out:
875 	kobject_put(&scheme->access_pattern->kobj);
876 	scheme->access_pattern = NULL;
877 	return err;
878 }
879 
880 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
881 {
882 	damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
883 	kobject_put(&scheme->access_pattern->kobj);
884 	damon_sysfs_quotas_rm_dirs(scheme->quotas);
885 	kobject_put(&scheme->quotas->kobj);
886 	kobject_put(&scheme->watermarks->kobj);
887 	kobject_put(&scheme->stats->kobj);
888 }
889 
890 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
891 		char *buf)
892 {
893 	struct damon_sysfs_scheme *scheme = container_of(kobj,
894 			struct damon_sysfs_scheme, kobj);
895 
896 	return sysfs_emit(buf, "%s\n",
897 			damon_sysfs_damos_action_strs[scheme->action]);
898 }
899 
900 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
901 		const char *buf, size_t count)
902 {
903 	struct damon_sysfs_scheme *scheme = container_of(kobj,
904 			struct damon_sysfs_scheme, kobj);
905 	enum damos_action action;
906 
907 	for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
908 		if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
909 			scheme->action = action;
910 			return count;
911 		}
912 	}
913 	return -EINVAL;
914 }
915 
916 static void damon_sysfs_scheme_release(struct kobject *kobj)
917 {
918 	kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
919 }
920 
921 static struct kobj_attribute damon_sysfs_scheme_action_attr =
922 		__ATTR_RW_MODE(action, 0600);
923 
924 static struct attribute *damon_sysfs_scheme_attrs[] = {
925 	&damon_sysfs_scheme_action_attr.attr,
926 	NULL,
927 };
928 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
929 
930 static struct kobj_type damon_sysfs_scheme_ktype = {
931 	.release = damon_sysfs_scheme_release,
932 	.sysfs_ops = &kobj_sysfs_ops,
933 	.default_groups = damon_sysfs_scheme_groups,
934 };
935 
936 /*
937  * schemes directory
938  */
939 
940 struct damon_sysfs_schemes {
941 	struct kobject kobj;
942 	struct damon_sysfs_scheme **schemes_arr;
943 	int nr;
944 };
945 
946 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
947 {
948 	return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
949 }
950 
951 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
952 {
953 	struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
954 	int i;
955 
956 	for (i = 0; i < schemes->nr; i++) {
957 		damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
958 		kobject_put(&schemes_arr[i]->kobj);
959 	}
960 	schemes->nr = 0;
961 	kfree(schemes_arr);
962 	schemes->schemes_arr = NULL;
963 }
964 
965 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
966 		int nr_schemes)
967 {
968 	struct damon_sysfs_scheme **schemes_arr, *scheme;
969 	int err, i;
970 
971 	damon_sysfs_schemes_rm_dirs(schemes);
972 	if (!nr_schemes)
973 		return 0;
974 
975 	schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
976 			GFP_KERNEL | __GFP_NOWARN);
977 	if (!schemes_arr)
978 		return -ENOMEM;
979 	schemes->schemes_arr = schemes_arr;
980 
981 	for (i = 0; i < nr_schemes; i++) {
982 		scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
983 		if (!scheme) {
984 			damon_sysfs_schemes_rm_dirs(schemes);
985 			return -ENOMEM;
986 		}
987 
988 		err = kobject_init_and_add(&scheme->kobj,
989 				&damon_sysfs_scheme_ktype, &schemes->kobj,
990 				"%d", i);
991 		if (err)
992 			goto out;
993 		err = damon_sysfs_scheme_add_dirs(scheme);
994 		if (err)
995 			goto out;
996 
997 		schemes_arr[i] = scheme;
998 		schemes->nr++;
999 	}
1000 	return 0;
1001 
1002 out:
1003 	damon_sysfs_schemes_rm_dirs(schemes);
1004 	kobject_put(&scheme->kobj);
1005 	return err;
1006 }
1007 
1008 static ssize_t nr_schemes_show(struct kobject *kobj,
1009 		struct kobj_attribute *attr, char *buf)
1010 {
1011 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1012 			struct damon_sysfs_schemes, kobj);
1013 
1014 	return sysfs_emit(buf, "%d\n", schemes->nr);
1015 }
1016 
1017 static ssize_t nr_schemes_store(struct kobject *kobj,
1018 		struct kobj_attribute *attr, const char *buf, size_t count)
1019 {
1020 	struct damon_sysfs_schemes *schemes;
1021 	int nr, err = kstrtoint(buf, 0, &nr);
1022 
1023 	if (err)
1024 		return err;
1025 	if (nr < 0)
1026 		return -EINVAL;
1027 
1028 	schemes = container_of(kobj, struct damon_sysfs_schemes, kobj);
1029 
1030 	if (!mutex_trylock(&damon_sysfs_lock))
1031 		return -EBUSY;
1032 	err = damon_sysfs_schemes_add_dirs(schemes, nr);
1033 	mutex_unlock(&damon_sysfs_lock);
1034 	if (err)
1035 		return err;
1036 	return count;
1037 }
1038 
1039 static void damon_sysfs_schemes_release(struct kobject *kobj)
1040 {
1041 	kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1042 }
1043 
1044 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1045 		__ATTR_RW_MODE(nr_schemes, 0600);
1046 
1047 static struct attribute *damon_sysfs_schemes_attrs[] = {
1048 	&damon_sysfs_schemes_nr_attr.attr,
1049 	NULL,
1050 };
1051 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1052 
1053 static struct kobj_type damon_sysfs_schemes_ktype = {
1054 	.release = damon_sysfs_schemes_release,
1055 	.sysfs_ops = &kobj_sysfs_ops,
1056 	.default_groups = damon_sysfs_schemes_groups,
1057 };
1058 
1059 /*
1060  * init region directory
1061  */
1062 
1063 struct damon_sysfs_region {
1064 	struct kobject kobj;
1065 	unsigned long start;
1066 	unsigned long end;
1067 };
1068 
1069 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1070 		unsigned long start,
1071 		unsigned long end)
1072 {
1073 	struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1074 			GFP_KERNEL);
1075 
1076 	if (!region)
1077 		return NULL;
1078 	region->kobj = (struct kobject){};
1079 	region->start = start;
1080 	region->end = end;
1081 	return region;
1082 }
1083 
1084 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1085 		char *buf)
1086 {
1087 	struct damon_sysfs_region *region = container_of(kobj,
1088 			struct damon_sysfs_region, kobj);
1089 
1090 	return sysfs_emit(buf, "%lu\n", region->start);
1091 }
1092 
1093 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1094 		const char *buf, size_t count)
1095 {
1096 	struct damon_sysfs_region *region = container_of(kobj,
1097 			struct damon_sysfs_region, kobj);
1098 	int err = kstrtoul(buf, 0, &region->start);
1099 
1100 	return err ? err : count;
1101 }
1102 
1103 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1104 		char *buf)
1105 {
1106 	struct damon_sysfs_region *region = container_of(kobj,
1107 			struct damon_sysfs_region, kobj);
1108 
1109 	return sysfs_emit(buf, "%lu\n", region->end);
1110 }
1111 
1112 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1113 		const char *buf, size_t count)
1114 {
1115 	struct damon_sysfs_region *region = container_of(kobj,
1116 			struct damon_sysfs_region, kobj);
1117 	int err = kstrtoul(buf, 0, &region->end);
1118 
1119 	return err ? err : count;
1120 }
1121 
1122 static void damon_sysfs_region_release(struct kobject *kobj)
1123 {
1124 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1125 }
1126 
1127 static struct kobj_attribute damon_sysfs_region_start_attr =
1128 		__ATTR_RW_MODE(start, 0600);
1129 
1130 static struct kobj_attribute damon_sysfs_region_end_attr =
1131 		__ATTR_RW_MODE(end, 0600);
1132 
1133 static struct attribute *damon_sysfs_region_attrs[] = {
1134 	&damon_sysfs_region_start_attr.attr,
1135 	&damon_sysfs_region_end_attr.attr,
1136 	NULL,
1137 };
1138 ATTRIBUTE_GROUPS(damon_sysfs_region);
1139 
1140 static struct kobj_type damon_sysfs_region_ktype = {
1141 	.release = damon_sysfs_region_release,
1142 	.sysfs_ops = &kobj_sysfs_ops,
1143 	.default_groups = damon_sysfs_region_groups,
1144 };
1145 
1146 /*
1147  * init_regions directory
1148  */
1149 
1150 struct damon_sysfs_regions {
1151 	struct kobject kobj;
1152 	struct damon_sysfs_region **regions_arr;
1153 	int nr;
1154 };
1155 
1156 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1157 {
1158 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1159 }
1160 
1161 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1162 {
1163 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
1164 	int i;
1165 
1166 	for (i = 0; i < regions->nr; i++)
1167 		kobject_put(&regions_arr[i]->kobj);
1168 	regions->nr = 0;
1169 	kfree(regions_arr);
1170 	regions->regions_arr = NULL;
1171 }
1172 
1173 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1174 		int nr_regions)
1175 {
1176 	struct damon_sysfs_region **regions_arr, *region;
1177 	int err, i;
1178 
1179 	damon_sysfs_regions_rm_dirs(regions);
1180 	if (!nr_regions)
1181 		return 0;
1182 
1183 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1184 			GFP_KERNEL | __GFP_NOWARN);
1185 	if (!regions_arr)
1186 		return -ENOMEM;
1187 	regions->regions_arr = regions_arr;
1188 
1189 	for (i = 0; i < nr_regions; i++) {
1190 		region = damon_sysfs_region_alloc(0, 0);
1191 		if (!region) {
1192 			damon_sysfs_regions_rm_dirs(regions);
1193 			return -ENOMEM;
1194 		}
1195 
1196 		err = kobject_init_and_add(&region->kobj,
1197 				&damon_sysfs_region_ktype, &regions->kobj,
1198 				"%d", i);
1199 		if (err) {
1200 			kobject_put(&region->kobj);
1201 			damon_sysfs_regions_rm_dirs(regions);
1202 			return err;
1203 		}
1204 
1205 		regions_arr[i] = region;
1206 		regions->nr++;
1207 	}
1208 	return 0;
1209 }
1210 
1211 static ssize_t nr_regions_show(struct kobject *kobj,
1212 		struct kobj_attribute *attr, char *buf)
1213 {
1214 	struct damon_sysfs_regions *regions = container_of(kobj,
1215 			struct damon_sysfs_regions, kobj);
1216 
1217 	return sysfs_emit(buf, "%d\n", regions->nr);
1218 }
1219 
1220 static ssize_t nr_regions_store(struct kobject *kobj,
1221 		struct kobj_attribute *attr, const char *buf, size_t count)
1222 {
1223 	struct damon_sysfs_regions *regions;
1224 	int nr, err = kstrtoint(buf, 0, &nr);
1225 
1226 	if (err)
1227 		return err;
1228 	if (nr < 0)
1229 		return -EINVAL;
1230 
1231 	regions = container_of(kobj, struct damon_sysfs_regions, kobj);
1232 
1233 	if (!mutex_trylock(&damon_sysfs_lock))
1234 		return -EBUSY;
1235 	err = damon_sysfs_regions_add_dirs(regions, nr);
1236 	mutex_unlock(&damon_sysfs_lock);
1237 	if (err)
1238 		return err;
1239 
1240 	return count;
1241 }
1242 
1243 static void damon_sysfs_regions_release(struct kobject *kobj)
1244 {
1245 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1246 }
1247 
1248 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1249 		__ATTR_RW_MODE(nr_regions, 0600);
1250 
1251 static struct attribute *damon_sysfs_regions_attrs[] = {
1252 	&damon_sysfs_regions_nr_attr.attr,
1253 	NULL,
1254 };
1255 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1256 
1257 static struct kobj_type damon_sysfs_regions_ktype = {
1258 	.release = damon_sysfs_regions_release,
1259 	.sysfs_ops = &kobj_sysfs_ops,
1260 	.default_groups = damon_sysfs_regions_groups,
1261 };
1262 
1263 /*
1264  * target directory
1265  */
1266 
1267 struct damon_sysfs_target {
1268 	struct kobject kobj;
1269 	struct damon_sysfs_regions *regions;
1270 	int pid;
1271 };
1272 
1273 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1274 {
1275 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1276 }
1277 
1278 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1279 {
1280 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1281 	int err;
1282 
1283 	if (!regions)
1284 		return -ENOMEM;
1285 
1286 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1287 			&target->kobj, "regions");
1288 	if (err)
1289 		kobject_put(&regions->kobj);
1290 	else
1291 		target->regions = regions;
1292 	return err;
1293 }
1294 
1295 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1296 {
1297 	damon_sysfs_regions_rm_dirs(target->regions);
1298 	kobject_put(&target->regions->kobj);
1299 }
1300 
1301 static ssize_t pid_target_show(struct kobject *kobj,
1302 		struct kobj_attribute *attr, char *buf)
1303 {
1304 	struct damon_sysfs_target *target = container_of(kobj,
1305 			struct damon_sysfs_target, kobj);
1306 
1307 	return sysfs_emit(buf, "%d\n", target->pid);
1308 }
1309 
1310 static ssize_t pid_target_store(struct kobject *kobj,
1311 		struct kobj_attribute *attr, const char *buf, size_t count)
1312 {
1313 	struct damon_sysfs_target *target = container_of(kobj,
1314 			struct damon_sysfs_target, kobj);
1315 	int err = kstrtoint(buf, 0, &target->pid);
1316 
1317 	if (err)
1318 		return -EINVAL;
1319 	return count;
1320 }
1321 
1322 static void damon_sysfs_target_release(struct kobject *kobj)
1323 {
1324 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1325 }
1326 
1327 static struct kobj_attribute damon_sysfs_target_pid_attr =
1328 		__ATTR_RW_MODE(pid_target, 0600);
1329 
1330 static struct attribute *damon_sysfs_target_attrs[] = {
1331 	&damon_sysfs_target_pid_attr.attr,
1332 	NULL,
1333 };
1334 ATTRIBUTE_GROUPS(damon_sysfs_target);
1335 
1336 static struct kobj_type damon_sysfs_target_ktype = {
1337 	.release = damon_sysfs_target_release,
1338 	.sysfs_ops = &kobj_sysfs_ops,
1339 	.default_groups = damon_sysfs_target_groups,
1340 };
1341 
1342 /*
1343  * targets directory
1344  */
1345 
1346 struct damon_sysfs_targets {
1347 	struct kobject kobj;
1348 	struct damon_sysfs_target **targets_arr;
1349 	int nr;
1350 };
1351 
1352 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1353 {
1354 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1355 }
1356 
1357 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1358 {
1359 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
1360 	int i;
1361 
1362 	for (i = 0; i < targets->nr; i++) {
1363 		damon_sysfs_target_rm_dirs(targets_arr[i]);
1364 		kobject_put(&targets_arr[i]->kobj);
1365 	}
1366 	targets->nr = 0;
1367 	kfree(targets_arr);
1368 	targets->targets_arr = NULL;
1369 }
1370 
1371 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1372 		int nr_targets)
1373 {
1374 	struct damon_sysfs_target **targets_arr, *target;
1375 	int err, i;
1376 
1377 	damon_sysfs_targets_rm_dirs(targets);
1378 	if (!nr_targets)
1379 		return 0;
1380 
1381 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1382 			GFP_KERNEL | __GFP_NOWARN);
1383 	if (!targets_arr)
1384 		return -ENOMEM;
1385 	targets->targets_arr = targets_arr;
1386 
1387 	for (i = 0; i < nr_targets; i++) {
1388 		target = damon_sysfs_target_alloc();
1389 		if (!target) {
1390 			damon_sysfs_targets_rm_dirs(targets);
1391 			return -ENOMEM;
1392 		}
1393 
1394 		err = kobject_init_and_add(&target->kobj,
1395 				&damon_sysfs_target_ktype, &targets->kobj,
1396 				"%d", i);
1397 		if (err)
1398 			goto out;
1399 
1400 		err = damon_sysfs_target_add_dirs(target);
1401 		if (err)
1402 			goto out;
1403 
1404 		targets_arr[i] = target;
1405 		targets->nr++;
1406 	}
1407 	return 0;
1408 
1409 out:
1410 	damon_sysfs_targets_rm_dirs(targets);
1411 	kobject_put(&target->kobj);
1412 	return err;
1413 }
1414 
1415 static ssize_t nr_targets_show(struct kobject *kobj,
1416 		struct kobj_attribute *attr, char *buf)
1417 {
1418 	struct damon_sysfs_targets *targets = container_of(kobj,
1419 			struct damon_sysfs_targets, kobj);
1420 
1421 	return sysfs_emit(buf, "%d\n", targets->nr);
1422 }
1423 
1424 static ssize_t nr_targets_store(struct kobject *kobj,
1425 		struct kobj_attribute *attr, const char *buf, size_t count)
1426 {
1427 	struct damon_sysfs_targets *targets;
1428 	int nr, err = kstrtoint(buf, 0, &nr);
1429 
1430 	if (err)
1431 		return err;
1432 	if (nr < 0)
1433 		return -EINVAL;
1434 
1435 	targets = container_of(kobj, struct damon_sysfs_targets, kobj);
1436 
1437 	if (!mutex_trylock(&damon_sysfs_lock))
1438 		return -EBUSY;
1439 	err = damon_sysfs_targets_add_dirs(targets, nr);
1440 	mutex_unlock(&damon_sysfs_lock);
1441 	if (err)
1442 		return err;
1443 
1444 	return count;
1445 }
1446 
1447 static void damon_sysfs_targets_release(struct kobject *kobj)
1448 {
1449 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1450 }
1451 
1452 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1453 		__ATTR_RW_MODE(nr_targets, 0600);
1454 
1455 static struct attribute *damon_sysfs_targets_attrs[] = {
1456 	&damon_sysfs_targets_nr_attr.attr,
1457 	NULL,
1458 };
1459 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1460 
1461 static struct kobj_type damon_sysfs_targets_ktype = {
1462 	.release = damon_sysfs_targets_release,
1463 	.sysfs_ops = &kobj_sysfs_ops,
1464 	.default_groups = damon_sysfs_targets_groups,
1465 };
1466 
1467 /*
1468  * intervals directory
1469  */
1470 
1471 struct damon_sysfs_intervals {
1472 	struct kobject kobj;
1473 	unsigned long sample_us;
1474 	unsigned long aggr_us;
1475 	unsigned long update_us;
1476 };
1477 
1478 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1479 		unsigned long sample_us, unsigned long aggr_us,
1480 		unsigned long update_us)
1481 {
1482 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1483 			GFP_KERNEL);
1484 
1485 	if (!intervals)
1486 		return NULL;
1487 
1488 	intervals->kobj = (struct kobject){};
1489 	intervals->sample_us = sample_us;
1490 	intervals->aggr_us = aggr_us;
1491 	intervals->update_us = update_us;
1492 	return intervals;
1493 }
1494 
1495 static ssize_t sample_us_show(struct kobject *kobj,
1496 		struct kobj_attribute *attr, char *buf)
1497 {
1498 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1499 			struct damon_sysfs_intervals, kobj);
1500 
1501 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1502 }
1503 
1504 static ssize_t sample_us_store(struct kobject *kobj,
1505 		struct kobj_attribute *attr, const char *buf, size_t count)
1506 {
1507 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1508 			struct damon_sysfs_intervals, kobj);
1509 	unsigned long us;
1510 	int err = kstrtoul(buf, 0, &us);
1511 
1512 	if (err)
1513 		return err;
1514 
1515 	intervals->sample_us = us;
1516 	return count;
1517 }
1518 
1519 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1520 		char *buf)
1521 {
1522 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1523 			struct damon_sysfs_intervals, kobj);
1524 
1525 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1526 }
1527 
1528 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1529 		const char *buf, size_t count)
1530 {
1531 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1532 			struct damon_sysfs_intervals, kobj);
1533 	unsigned long us;
1534 	int err = kstrtoul(buf, 0, &us);
1535 
1536 	if (err)
1537 		return err;
1538 
1539 	intervals->aggr_us = us;
1540 	return count;
1541 }
1542 
1543 static ssize_t update_us_show(struct kobject *kobj,
1544 		struct kobj_attribute *attr, char *buf)
1545 {
1546 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1547 			struct damon_sysfs_intervals, kobj);
1548 
1549 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
1550 }
1551 
1552 static ssize_t update_us_store(struct kobject *kobj,
1553 		struct kobj_attribute *attr, const char *buf, size_t count)
1554 {
1555 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1556 			struct damon_sysfs_intervals, kobj);
1557 	unsigned long us;
1558 	int err = kstrtoul(buf, 0, &us);
1559 
1560 	if (err)
1561 		return err;
1562 
1563 	intervals->update_us = us;
1564 	return count;
1565 }
1566 
1567 static void damon_sysfs_intervals_release(struct kobject *kobj)
1568 {
1569 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1570 }
1571 
1572 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1573 		__ATTR_RW_MODE(sample_us, 0600);
1574 
1575 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1576 		__ATTR_RW_MODE(aggr_us, 0600);
1577 
1578 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1579 		__ATTR_RW_MODE(update_us, 0600);
1580 
1581 static struct attribute *damon_sysfs_intervals_attrs[] = {
1582 	&damon_sysfs_intervals_sample_us_attr.attr,
1583 	&damon_sysfs_intervals_aggr_us_attr.attr,
1584 	&damon_sysfs_intervals_update_us_attr.attr,
1585 	NULL,
1586 };
1587 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1588 
1589 static struct kobj_type damon_sysfs_intervals_ktype = {
1590 	.release = damon_sysfs_intervals_release,
1591 	.sysfs_ops = &kobj_sysfs_ops,
1592 	.default_groups = damon_sysfs_intervals_groups,
1593 };
1594 
1595 /*
1596  * monitoring_attrs directory
1597  */
1598 
1599 struct damon_sysfs_attrs {
1600 	struct kobject kobj;
1601 	struct damon_sysfs_intervals *intervals;
1602 	struct damon_sysfs_ul_range *nr_regions_range;
1603 };
1604 
1605 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1606 {
1607 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1608 
1609 	if (!attrs)
1610 		return NULL;
1611 	attrs->kobj = (struct kobject){};
1612 	return attrs;
1613 }
1614 
1615 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1616 {
1617 	struct damon_sysfs_intervals *intervals;
1618 	struct damon_sysfs_ul_range *nr_regions_range;
1619 	int err;
1620 
1621 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1622 	if (!intervals)
1623 		return -ENOMEM;
1624 
1625 	err = kobject_init_and_add(&intervals->kobj,
1626 			&damon_sysfs_intervals_ktype, &attrs->kobj,
1627 			"intervals");
1628 	if (err)
1629 		goto put_intervals_out;
1630 	attrs->intervals = intervals;
1631 
1632 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1633 	if (!nr_regions_range) {
1634 		err = -ENOMEM;
1635 		goto put_intervals_out;
1636 	}
1637 
1638 	err = kobject_init_and_add(&nr_regions_range->kobj,
1639 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
1640 			"nr_regions");
1641 	if (err)
1642 		goto put_nr_regions_intervals_out;
1643 	attrs->nr_regions_range = nr_regions_range;
1644 	return 0;
1645 
1646 put_nr_regions_intervals_out:
1647 	kobject_put(&nr_regions_range->kobj);
1648 	attrs->nr_regions_range = NULL;
1649 put_intervals_out:
1650 	kobject_put(&intervals->kobj);
1651 	attrs->intervals = NULL;
1652 	return err;
1653 }
1654 
1655 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1656 {
1657 	kobject_put(&attrs->nr_regions_range->kobj);
1658 	kobject_put(&attrs->intervals->kobj);
1659 }
1660 
1661 static void damon_sysfs_attrs_release(struct kobject *kobj)
1662 {
1663 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1664 }
1665 
1666 static struct attribute *damon_sysfs_attrs_attrs[] = {
1667 	NULL,
1668 };
1669 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1670 
1671 static struct kobj_type damon_sysfs_attrs_ktype = {
1672 	.release = damon_sysfs_attrs_release,
1673 	.sysfs_ops = &kobj_sysfs_ops,
1674 	.default_groups = damon_sysfs_attrs_groups,
1675 };
1676 
1677 /*
1678  * context directory
1679  */
1680 
1681 /* This should match with enum damon_ops_id */
1682 static const char * const damon_sysfs_ops_strs[] = {
1683 	"vaddr",
1684 	"fvaddr",
1685 	"paddr",
1686 };
1687 
1688 struct damon_sysfs_context {
1689 	struct kobject kobj;
1690 	enum damon_ops_id ops_id;
1691 	struct damon_sysfs_attrs *attrs;
1692 	struct damon_sysfs_targets *targets;
1693 	struct damon_sysfs_schemes *schemes;
1694 };
1695 
1696 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1697 		enum damon_ops_id ops_id)
1698 {
1699 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1700 				GFP_KERNEL);
1701 
1702 	if (!context)
1703 		return NULL;
1704 	context->kobj = (struct kobject){};
1705 	context->ops_id = ops_id;
1706 	return context;
1707 }
1708 
1709 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1710 {
1711 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1712 	int err;
1713 
1714 	if (!attrs)
1715 		return -ENOMEM;
1716 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1717 			&context->kobj, "monitoring_attrs");
1718 	if (err)
1719 		goto out;
1720 	err = damon_sysfs_attrs_add_dirs(attrs);
1721 	if (err)
1722 		goto out;
1723 	context->attrs = attrs;
1724 	return 0;
1725 
1726 out:
1727 	kobject_put(&attrs->kobj);
1728 	return err;
1729 }
1730 
1731 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1732 {
1733 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1734 	int err;
1735 
1736 	if (!targets)
1737 		return -ENOMEM;
1738 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1739 			&context->kobj, "targets");
1740 	if (err) {
1741 		kobject_put(&targets->kobj);
1742 		return err;
1743 	}
1744 	context->targets = targets;
1745 	return 0;
1746 }
1747 
1748 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1749 {
1750 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1751 	int err;
1752 
1753 	if (!schemes)
1754 		return -ENOMEM;
1755 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1756 			&context->kobj, "schemes");
1757 	if (err) {
1758 		kobject_put(&schemes->kobj);
1759 		return err;
1760 	}
1761 	context->schemes = schemes;
1762 	return 0;
1763 }
1764 
1765 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1766 {
1767 	int err;
1768 
1769 	err = damon_sysfs_context_set_attrs(context);
1770 	if (err)
1771 		return err;
1772 
1773 	err = damon_sysfs_context_set_targets(context);
1774 	if (err)
1775 		goto put_attrs_out;
1776 
1777 	err = damon_sysfs_context_set_schemes(context);
1778 	if (err)
1779 		goto put_targets_attrs_out;
1780 	return 0;
1781 
1782 put_targets_attrs_out:
1783 	kobject_put(&context->targets->kobj);
1784 	context->targets = NULL;
1785 put_attrs_out:
1786 	kobject_put(&context->attrs->kobj);
1787 	context->attrs = NULL;
1788 	return err;
1789 }
1790 
1791 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1792 {
1793 	damon_sysfs_attrs_rm_dirs(context->attrs);
1794 	kobject_put(&context->attrs->kobj);
1795 	damon_sysfs_targets_rm_dirs(context->targets);
1796 	kobject_put(&context->targets->kobj);
1797 	damon_sysfs_schemes_rm_dirs(context->schemes);
1798 	kobject_put(&context->schemes->kobj);
1799 }
1800 
1801 static ssize_t avail_operations_show(struct kobject *kobj,
1802 		struct kobj_attribute *attr, char *buf)
1803 {
1804 	enum damon_ops_id id;
1805 	int len = 0;
1806 
1807 	for (id = 0; id < NR_DAMON_OPS; id++) {
1808 		if (!damon_is_registered_ops(id))
1809 			continue;
1810 		len += sysfs_emit_at(buf, len, "%s\n",
1811 				damon_sysfs_ops_strs[id]);
1812 	}
1813 	return len;
1814 }
1815 
1816 static ssize_t operations_show(struct kobject *kobj,
1817 		struct kobj_attribute *attr, char *buf)
1818 {
1819 	struct damon_sysfs_context *context = container_of(kobj,
1820 			struct damon_sysfs_context, kobj);
1821 
1822 	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1823 }
1824 
1825 static ssize_t operations_store(struct kobject *kobj,
1826 		struct kobj_attribute *attr, const char *buf, size_t count)
1827 {
1828 	struct damon_sysfs_context *context = container_of(kobj,
1829 			struct damon_sysfs_context, kobj);
1830 	enum damon_ops_id id;
1831 
1832 	for (id = 0; id < NR_DAMON_OPS; id++) {
1833 		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1834 			context->ops_id = id;
1835 			return count;
1836 		}
1837 	}
1838 	return -EINVAL;
1839 }
1840 
1841 static void damon_sysfs_context_release(struct kobject *kobj)
1842 {
1843 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1844 }
1845 
1846 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1847 		__ATTR_RO_MODE(avail_operations, 0400);
1848 
1849 static struct kobj_attribute damon_sysfs_context_operations_attr =
1850 		__ATTR_RW_MODE(operations, 0600);
1851 
1852 static struct attribute *damon_sysfs_context_attrs[] = {
1853 	&damon_sysfs_context_avail_operations_attr.attr,
1854 	&damon_sysfs_context_operations_attr.attr,
1855 	NULL,
1856 };
1857 ATTRIBUTE_GROUPS(damon_sysfs_context);
1858 
1859 static struct kobj_type damon_sysfs_context_ktype = {
1860 	.release = damon_sysfs_context_release,
1861 	.sysfs_ops = &kobj_sysfs_ops,
1862 	.default_groups = damon_sysfs_context_groups,
1863 };
1864 
1865 /*
1866  * contexts directory
1867  */
1868 
1869 struct damon_sysfs_contexts {
1870 	struct kobject kobj;
1871 	struct damon_sysfs_context **contexts_arr;
1872 	int nr;
1873 };
1874 
1875 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1876 {
1877 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1878 }
1879 
1880 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1881 {
1882 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1883 	int i;
1884 
1885 	for (i = 0; i < contexts->nr; i++) {
1886 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1887 		kobject_put(&contexts_arr[i]->kobj);
1888 	}
1889 	contexts->nr = 0;
1890 	kfree(contexts_arr);
1891 	contexts->contexts_arr = NULL;
1892 }
1893 
1894 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1895 		int nr_contexts)
1896 {
1897 	struct damon_sysfs_context **contexts_arr, *context;
1898 	int err, i;
1899 
1900 	damon_sysfs_contexts_rm_dirs(contexts);
1901 	if (!nr_contexts)
1902 		return 0;
1903 
1904 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1905 			GFP_KERNEL | __GFP_NOWARN);
1906 	if (!contexts_arr)
1907 		return -ENOMEM;
1908 	contexts->contexts_arr = contexts_arr;
1909 
1910 	for (i = 0; i < nr_contexts; i++) {
1911 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1912 		if (!context) {
1913 			damon_sysfs_contexts_rm_dirs(contexts);
1914 			return -ENOMEM;
1915 		}
1916 
1917 		err = kobject_init_and_add(&context->kobj,
1918 				&damon_sysfs_context_ktype, &contexts->kobj,
1919 				"%d", i);
1920 		if (err)
1921 			goto out;
1922 
1923 		err = damon_sysfs_context_add_dirs(context);
1924 		if (err)
1925 			goto out;
1926 
1927 		contexts_arr[i] = context;
1928 		contexts->nr++;
1929 	}
1930 	return 0;
1931 
1932 out:
1933 	damon_sysfs_contexts_rm_dirs(contexts);
1934 	kobject_put(&context->kobj);
1935 	return err;
1936 }
1937 
1938 static ssize_t nr_contexts_show(struct kobject *kobj,
1939 		struct kobj_attribute *attr, char *buf)
1940 {
1941 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1942 			struct damon_sysfs_contexts, kobj);
1943 
1944 	return sysfs_emit(buf, "%d\n", contexts->nr);
1945 }
1946 
1947 static ssize_t nr_contexts_store(struct kobject *kobj,
1948 		struct kobj_attribute *attr, const char *buf, size_t count)
1949 {
1950 	struct damon_sysfs_contexts *contexts;
1951 	int nr, err;
1952 
1953 	err = kstrtoint(buf, 0, &nr);
1954 	if (err)
1955 		return err;
1956 	/* TODO: support multiple contexts per kdamond */
1957 	if (nr < 0 || 1 < nr)
1958 		return -EINVAL;
1959 
1960 	contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
1961 	if (!mutex_trylock(&damon_sysfs_lock))
1962 		return -EBUSY;
1963 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1964 	mutex_unlock(&damon_sysfs_lock);
1965 	if (err)
1966 		return err;
1967 
1968 	return count;
1969 }
1970 
1971 static void damon_sysfs_contexts_release(struct kobject *kobj)
1972 {
1973 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1974 }
1975 
1976 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1977 		= __ATTR_RW_MODE(nr_contexts, 0600);
1978 
1979 static struct attribute *damon_sysfs_contexts_attrs[] = {
1980 	&damon_sysfs_contexts_nr_attr.attr,
1981 	NULL,
1982 };
1983 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1984 
1985 static struct kobj_type damon_sysfs_contexts_ktype = {
1986 	.release = damon_sysfs_contexts_release,
1987 	.sysfs_ops = &kobj_sysfs_ops,
1988 	.default_groups = damon_sysfs_contexts_groups,
1989 };
1990 
1991 /*
1992  * kdamond directory
1993  */
1994 
1995 struct damon_sysfs_kdamond {
1996 	struct kobject kobj;
1997 	struct damon_sysfs_contexts *contexts;
1998 	struct damon_ctx *damon_ctx;
1999 };
2000 
2001 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2002 {
2003 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2004 }
2005 
2006 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2007 {
2008 	struct damon_sysfs_contexts *contexts;
2009 	int err;
2010 
2011 	contexts = damon_sysfs_contexts_alloc();
2012 	if (!contexts)
2013 		return -ENOMEM;
2014 
2015 	err = kobject_init_and_add(&contexts->kobj,
2016 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
2017 			"contexts");
2018 	if (err) {
2019 		kobject_put(&contexts->kobj);
2020 		return err;
2021 	}
2022 	kdamond->contexts = contexts;
2023 
2024 	return err;
2025 }
2026 
2027 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2028 {
2029 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2030 	kobject_put(&kdamond->contexts->kobj);
2031 }
2032 
2033 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2034 {
2035 	bool running;
2036 
2037 	mutex_lock(&ctx->kdamond_lock);
2038 	running = ctx->kdamond != NULL;
2039 	mutex_unlock(&ctx->kdamond_lock);
2040 	return running;
2041 }
2042 
2043 /*
2044  * enum damon_sysfs_cmd - Commands for a specific kdamond.
2045  */
2046 enum damon_sysfs_cmd {
2047 	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2048 	DAMON_SYSFS_CMD_ON,
2049 	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2050 	DAMON_SYSFS_CMD_OFF,
2051 	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2052 	DAMON_SYSFS_CMD_COMMIT,
2053 	/*
2054 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2055 	 * files.
2056 	 */
2057 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2058 	/*
2059 	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2060 	 */
2061 	NR_DAMON_SYSFS_CMDS,
2062 };
2063 
2064 /* Should match with enum damon_sysfs_cmd */
2065 static const char * const damon_sysfs_cmd_strs[] = {
2066 	"on",
2067 	"off",
2068 	"commit",
2069 	"update_schemes_stats",
2070 };
2071 
2072 /*
2073  * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2074  * @cmd:	The command that needs to be handled by the callback.
2075  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2076  *
2077  * This structure represents a sysfs command request that need to access some
2078  * DAMON context-internal data.  Because DAMON context-internal data can be
2079  * safely accessed from DAMON callbacks without additional synchronization, the
2080  * request will be handled by the DAMON callback.  None-``NULL`` @kdamond means
2081  * the request is valid.
2082  */
2083 struct damon_sysfs_cmd_request {
2084 	enum damon_sysfs_cmd cmd;
2085 	struct damon_sysfs_kdamond *kdamond;
2086 };
2087 
2088 /* Current DAMON callback request.  Protected by damon_sysfs_lock. */
2089 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2090 
2091 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2092 		char *buf)
2093 {
2094 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2095 			struct damon_sysfs_kdamond, kobj);
2096 	struct damon_ctx *ctx = kdamond->damon_ctx;
2097 	bool running;
2098 
2099 	if (!ctx)
2100 		running = false;
2101 	else
2102 		running = damon_sysfs_ctx_running(ctx);
2103 
2104 	return sysfs_emit(buf, "%s\n", running ?
2105 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2106 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2107 }
2108 
2109 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2110 		struct damon_sysfs_attrs *sys_attrs)
2111 {
2112 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2113 	struct damon_sysfs_ul_range *sys_nr_regions =
2114 		sys_attrs->nr_regions_range;
2115 	struct damon_attrs attrs = {
2116 		.sample_interval = sys_intervals->sample_us,
2117 		.aggr_interval = sys_intervals->aggr_us,
2118 		.ops_update_interval = sys_intervals->update_us,
2119 		.min_nr_regions = sys_nr_regions->min,
2120 		.max_nr_regions = sys_nr_regions->max,
2121 	};
2122 	return damon_set_attrs(ctx, &attrs);
2123 }
2124 
2125 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2126 {
2127 	struct damon_target *t, *next;
2128 	bool has_pid = damon_target_has_pid(ctx);
2129 
2130 	damon_for_each_target_safe(t, next, ctx) {
2131 		if (has_pid)
2132 			put_pid(t->pid);
2133 		damon_destroy_target(t);
2134 	}
2135 }
2136 
2137 static int damon_sysfs_set_regions(struct damon_target *t,
2138 		struct damon_sysfs_regions *sysfs_regions)
2139 {
2140 	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2141 			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2142 	int i, err = -EINVAL;
2143 
2144 	if (!ranges)
2145 		return -ENOMEM;
2146 	for (i = 0; i < sysfs_regions->nr; i++) {
2147 		struct damon_sysfs_region *sys_region =
2148 			sysfs_regions->regions_arr[i];
2149 
2150 		if (sys_region->start > sys_region->end)
2151 			goto out;
2152 
2153 		ranges[i].start = sys_region->start;
2154 		ranges[i].end = sys_region->end;
2155 		if (i == 0)
2156 			continue;
2157 		if (ranges[i - 1].end > ranges[i].start)
2158 			goto out;
2159 	}
2160 	err = damon_set_regions(t, ranges, sysfs_regions->nr);
2161 out:
2162 	kfree(ranges);
2163 	return err;
2164 
2165 }
2166 
2167 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2168 		struct damon_ctx *ctx)
2169 {
2170 	struct damon_target *t = damon_new_target();
2171 	int err = -EINVAL;
2172 
2173 	if (!t)
2174 		return -ENOMEM;
2175 	damon_add_target(ctx, t);
2176 	if (damon_target_has_pid(ctx)) {
2177 		t->pid = find_get_pid(sys_target->pid);
2178 		if (!t->pid)
2179 			goto destroy_targets_out;
2180 	}
2181 	err = damon_sysfs_set_regions(t, sys_target->regions);
2182 	if (err)
2183 		goto destroy_targets_out;
2184 	return 0;
2185 
2186 destroy_targets_out:
2187 	damon_sysfs_destroy_targets(ctx);
2188 	return err;
2189 }
2190 
2191 /*
2192  * Search a target in a context that corresponds to the sysfs target input.
2193  *
2194  * Return: pointer to the target if found, NULL if not found, or negative
2195  * error code if the search failed.
2196  */
2197 static struct damon_target *damon_sysfs_existing_target(
2198 		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2199 {
2200 	struct pid *pid;
2201 	struct damon_target *t;
2202 
2203 	if (!damon_target_has_pid(ctx)) {
2204 		/* Up to only one target for paddr could exist */
2205 		damon_for_each_target(t, ctx)
2206 			return t;
2207 		return NULL;
2208 	}
2209 
2210 	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2211 	pid = find_get_pid(sys_target->pid);
2212 	if (!pid)
2213 		return ERR_PTR(-EINVAL);
2214 	damon_for_each_target(t, ctx) {
2215 		if (t->pid == pid) {
2216 			put_pid(pid);
2217 			return t;
2218 		}
2219 	}
2220 	put_pid(pid);
2221 	return NULL;
2222 }
2223 
2224 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2225 		struct damon_sysfs_targets *sysfs_targets)
2226 {
2227 	int i, err;
2228 
2229 	/* Multiple physical address space monitoring targets makes no sense */
2230 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2231 		return -EINVAL;
2232 
2233 	for (i = 0; i < sysfs_targets->nr; i++) {
2234 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2235 		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2236 
2237 		if (IS_ERR(t))
2238 			return PTR_ERR(t);
2239 		if (!t)
2240 			err = damon_sysfs_add_target(st, ctx);
2241 		else
2242 			err = damon_sysfs_set_regions(t, st->regions);
2243 		if (err)
2244 			return err;
2245 	}
2246 	return 0;
2247 }
2248 
2249 static struct damos *damon_sysfs_mk_scheme(
2250 		struct damon_sysfs_scheme *sysfs_scheme)
2251 {
2252 	struct damon_sysfs_access_pattern *access_pattern =
2253 		sysfs_scheme->access_pattern;
2254 	struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2255 	struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2256 	struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2257 
2258 	struct damos_access_pattern pattern = {
2259 		.min_sz_region = access_pattern->sz->min,
2260 		.max_sz_region = access_pattern->sz->max,
2261 		.min_nr_accesses = access_pattern->nr_accesses->min,
2262 		.max_nr_accesses = access_pattern->nr_accesses->max,
2263 		.min_age_region = access_pattern->age->min,
2264 		.max_age_region = access_pattern->age->max,
2265 	};
2266 	struct damos_quota quota = {
2267 		.ms = sysfs_quotas->ms,
2268 		.sz = sysfs_quotas->sz,
2269 		.reset_interval = sysfs_quotas->reset_interval_ms,
2270 		.weight_sz = sysfs_weights->sz,
2271 		.weight_nr_accesses = sysfs_weights->nr_accesses,
2272 		.weight_age = sysfs_weights->age,
2273 	};
2274 	struct damos_watermarks wmarks = {
2275 		.metric = sysfs_wmarks->metric,
2276 		.interval = sysfs_wmarks->interval_us,
2277 		.high = sysfs_wmarks->high,
2278 		.mid = sysfs_wmarks->mid,
2279 		.low = sysfs_wmarks->low,
2280 	};
2281 
2282 	return damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
2283 			&wmarks);
2284 }
2285 
2286 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2287 		struct damon_sysfs_schemes *sysfs_schemes)
2288 {
2289 	int i;
2290 
2291 	for (i = 0; i < sysfs_schemes->nr; i++) {
2292 		struct damos *scheme, *next;
2293 
2294 		scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2295 		if (!scheme) {
2296 			damon_for_each_scheme_safe(scheme, next, ctx)
2297 				damon_destroy_scheme(scheme);
2298 			return -ENOMEM;
2299 		}
2300 		damon_add_scheme(ctx, scheme);
2301 	}
2302 	return 0;
2303 }
2304 
2305 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2306 {
2307 	struct damon_target *t, *next;
2308 
2309 	if (!damon_target_has_pid(ctx))
2310 		return;
2311 
2312 	mutex_lock(&ctx->kdamond_lock);
2313 	damon_for_each_target_safe(t, next, ctx) {
2314 		put_pid(t->pid);
2315 		damon_destroy_target(t);
2316 	}
2317 	mutex_unlock(&ctx->kdamond_lock);
2318 }
2319 
2320 /*
2321  * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2322  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2323  *
2324  * This function reads the schemes stats of specific kdamond and update the
2325  * related values for sysfs files.  This function should be called from DAMON
2326  * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2327  * contexts-internal data and DAMON sysfs variables.
2328  */
2329 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2330 {
2331 	struct damon_ctx *ctx = kdamond->damon_ctx;
2332 	struct damon_sysfs_schemes *sysfs_schemes;
2333 	struct damos *scheme;
2334 	int schemes_idx = 0;
2335 
2336 	if (!ctx)
2337 		return -EINVAL;
2338 	sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2339 	damon_for_each_scheme(scheme, ctx) {
2340 		struct damon_sysfs_stats *sysfs_stats;
2341 
2342 		sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2343 		sysfs_stats->nr_tried = scheme->stat.nr_tried;
2344 		sysfs_stats->sz_tried = scheme->stat.sz_tried;
2345 		sysfs_stats->nr_applied = scheme->stat.nr_applied;
2346 		sysfs_stats->sz_applied = scheme->stat.sz_applied;
2347 		sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2348 	}
2349 	return 0;
2350 }
2351 
2352 static inline bool damon_sysfs_kdamond_running(
2353 		struct damon_sysfs_kdamond *kdamond)
2354 {
2355 	return kdamond->damon_ctx &&
2356 		damon_sysfs_ctx_running(kdamond->damon_ctx);
2357 }
2358 
2359 static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
2360 		struct damon_sysfs_context *sys_ctx)
2361 {
2362 	int err;
2363 
2364 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2365 	if (err)
2366 		return err;
2367 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2368 	if (err)
2369 		return err;
2370 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2371 	if (err)
2372 		return err;
2373 	return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2374 }
2375 
2376 /*
2377  * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2378  * @kdamond:	The kobject wrapper for the associated kdamond.
2379  *
2380  * If the sysfs input is wrong, the kdamond will be terminated.
2381  */
2382 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2383 {
2384 	if (!damon_sysfs_kdamond_running(kdamond))
2385 		return -EINVAL;
2386 	/* TODO: Support multiple contexts per kdamond */
2387 	if (kdamond->contexts->nr != 1)
2388 		return -EINVAL;
2389 
2390 	return damon_sysfs_apply_inputs(kdamond->damon_ctx,
2391 			kdamond->contexts->contexts_arr[0]);
2392 }
2393 
2394 /*
2395  * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2396  * @c:	The DAMON context of the callback.
2397  *
2398  * This function is periodically called back from the kdamond thread for @c.
2399  * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2400  */
2401 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2402 {
2403 	struct damon_sysfs_kdamond *kdamond;
2404 	int err = 0;
2405 
2406 	/* avoid deadlock due to concurrent state_store('off') */
2407 	if (!mutex_trylock(&damon_sysfs_lock))
2408 		return 0;
2409 	kdamond = damon_sysfs_cmd_request.kdamond;
2410 	if (!kdamond || kdamond->damon_ctx != c)
2411 		goto out;
2412 	switch (damon_sysfs_cmd_request.cmd) {
2413 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2414 		err = damon_sysfs_upd_schemes_stats(kdamond);
2415 		break;
2416 	case DAMON_SYSFS_CMD_COMMIT:
2417 		err = damon_sysfs_commit_input(kdamond);
2418 		break;
2419 	default:
2420 		break;
2421 	}
2422 	/* Mark the request as invalid now. */
2423 	damon_sysfs_cmd_request.kdamond = NULL;
2424 out:
2425 	mutex_unlock(&damon_sysfs_lock);
2426 	return err;
2427 }
2428 
2429 static struct damon_ctx *damon_sysfs_build_ctx(
2430 		struct damon_sysfs_context *sys_ctx)
2431 {
2432 	struct damon_ctx *ctx = damon_new_ctx();
2433 	int err;
2434 
2435 	if (!ctx)
2436 		return ERR_PTR(-ENOMEM);
2437 
2438 	err = damon_sysfs_apply_inputs(ctx, sys_ctx);
2439 	if (err) {
2440 		damon_destroy_ctx(ctx);
2441 		return ERR_PTR(err);
2442 	}
2443 
2444 	ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2445 	ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2446 	ctx->callback.before_terminate = damon_sysfs_before_terminate;
2447 	return ctx;
2448 }
2449 
2450 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2451 {
2452 	struct damon_ctx *ctx;
2453 	int err;
2454 
2455 	if (damon_sysfs_kdamond_running(kdamond))
2456 		return -EBUSY;
2457 	if (damon_sysfs_cmd_request.kdamond == kdamond)
2458 		return -EBUSY;
2459 	/* TODO: support multiple contexts per kdamond */
2460 	if (kdamond->contexts->nr != 1)
2461 		return -EINVAL;
2462 
2463 	if (kdamond->damon_ctx)
2464 		damon_destroy_ctx(kdamond->damon_ctx);
2465 	kdamond->damon_ctx = NULL;
2466 
2467 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2468 	if (IS_ERR(ctx))
2469 		return PTR_ERR(ctx);
2470 	err = damon_start(&ctx, 1, false);
2471 	if (err) {
2472 		damon_destroy_ctx(ctx);
2473 		return err;
2474 	}
2475 	kdamond->damon_ctx = ctx;
2476 	return err;
2477 }
2478 
2479 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2480 {
2481 	if (!kdamond->damon_ctx)
2482 		return -EINVAL;
2483 	return damon_stop(&kdamond->damon_ctx, 1);
2484 	/*
2485 	 * To allow users show final monitoring results of already turned-off
2486 	 * DAMON, we free kdamond->damon_ctx in next
2487 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2488 	 */
2489 }
2490 
2491 /*
2492  * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2493  * @cmd:	The command to handle.
2494  * @kdamond:	The kobject wrapper for the associated kdamond.
2495  *
2496  * This function handles a DAMON sysfs command for a kdamond.  For commands
2497  * that need to access running DAMON context-internal data, it requests
2498  * handling of the command to the DAMON callback
2499  * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2500  * or the context is completed.
2501  *
2502  * Return: 0 on success, negative error code otherwise.
2503  */
2504 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2505 		struct damon_sysfs_kdamond *kdamond)
2506 {
2507 	bool need_wait = true;
2508 
2509 	/* Handle commands that doesn't access DAMON context-internal data */
2510 	switch (cmd) {
2511 	case DAMON_SYSFS_CMD_ON:
2512 		return damon_sysfs_turn_damon_on(kdamond);
2513 	case DAMON_SYSFS_CMD_OFF:
2514 		return damon_sysfs_turn_damon_off(kdamond);
2515 	default:
2516 		break;
2517 	}
2518 
2519 	/* Pass the command to DAMON callback for safe DAMON context access */
2520 	if (damon_sysfs_cmd_request.kdamond)
2521 		return -EBUSY;
2522 	if (!damon_sysfs_kdamond_running(kdamond))
2523 		return -EINVAL;
2524 	damon_sysfs_cmd_request.cmd = cmd;
2525 	damon_sysfs_cmd_request.kdamond = kdamond;
2526 
2527 	/*
2528 	 * wait until damon_sysfs_cmd_request_callback() handles the request
2529 	 * from kdamond context
2530 	 */
2531 	mutex_unlock(&damon_sysfs_lock);
2532 	while (need_wait) {
2533 		schedule_timeout_idle(msecs_to_jiffies(100));
2534 		if (!mutex_trylock(&damon_sysfs_lock))
2535 			continue;
2536 		if (!damon_sysfs_cmd_request.kdamond) {
2537 			/* damon_sysfs_cmd_request_callback() handled */
2538 			need_wait = false;
2539 		} else if (!damon_sysfs_kdamond_running(kdamond)) {
2540 			/* kdamond has already finished */
2541 			need_wait = false;
2542 			damon_sysfs_cmd_request.kdamond = NULL;
2543 		}
2544 		mutex_unlock(&damon_sysfs_lock);
2545 	}
2546 	mutex_lock(&damon_sysfs_lock);
2547 	return 0;
2548 }
2549 
2550 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2551 		const char *buf, size_t count)
2552 {
2553 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2554 			struct damon_sysfs_kdamond, kobj);
2555 	enum damon_sysfs_cmd cmd;
2556 	ssize_t ret = -EINVAL;
2557 
2558 	if (!mutex_trylock(&damon_sysfs_lock))
2559 		return -EBUSY;
2560 	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2561 		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2562 			ret = damon_sysfs_handle_cmd(cmd, kdamond);
2563 			break;
2564 		}
2565 	}
2566 	mutex_unlock(&damon_sysfs_lock);
2567 	if (!ret)
2568 		ret = count;
2569 	return ret;
2570 }
2571 
2572 static ssize_t pid_show(struct kobject *kobj,
2573 		struct kobj_attribute *attr, char *buf)
2574 {
2575 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2576 			struct damon_sysfs_kdamond, kobj);
2577 	struct damon_ctx *ctx;
2578 	int pid = -1;
2579 
2580 	if (!mutex_trylock(&damon_sysfs_lock))
2581 		return -EBUSY;
2582 	ctx = kdamond->damon_ctx;
2583 	if (!ctx)
2584 		goto out;
2585 
2586 	mutex_lock(&ctx->kdamond_lock);
2587 	if (ctx->kdamond)
2588 		pid = ctx->kdamond->pid;
2589 	mutex_unlock(&ctx->kdamond_lock);
2590 out:
2591 	mutex_unlock(&damon_sysfs_lock);
2592 	return sysfs_emit(buf, "%d\n", pid);
2593 }
2594 
2595 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2596 {
2597 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2598 			struct damon_sysfs_kdamond, kobj);
2599 
2600 	if (kdamond->damon_ctx)
2601 		damon_destroy_ctx(kdamond->damon_ctx);
2602 	kfree(kdamond);
2603 }
2604 
2605 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2606 		__ATTR_RW_MODE(state, 0600);
2607 
2608 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2609 		__ATTR_RO_MODE(pid, 0400);
2610 
2611 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2612 	&damon_sysfs_kdamond_state_attr.attr,
2613 	&damon_sysfs_kdamond_pid_attr.attr,
2614 	NULL,
2615 };
2616 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2617 
2618 static struct kobj_type damon_sysfs_kdamond_ktype = {
2619 	.release = damon_sysfs_kdamond_release,
2620 	.sysfs_ops = &kobj_sysfs_ops,
2621 	.default_groups = damon_sysfs_kdamond_groups,
2622 };
2623 
2624 /*
2625  * kdamonds directory
2626  */
2627 
2628 struct damon_sysfs_kdamonds {
2629 	struct kobject kobj;
2630 	struct damon_sysfs_kdamond **kdamonds_arr;
2631 	int nr;
2632 };
2633 
2634 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2635 {
2636 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2637 }
2638 
2639 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2640 {
2641 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2642 	int i;
2643 
2644 	for (i = 0; i < kdamonds->nr; i++) {
2645 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2646 		kobject_put(&kdamonds_arr[i]->kobj);
2647 	}
2648 	kdamonds->nr = 0;
2649 	kfree(kdamonds_arr);
2650 	kdamonds->kdamonds_arr = NULL;
2651 }
2652 
2653 static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
2654 		int nr_kdamonds)
2655 {
2656 	int i;
2657 
2658 	for (i = 0; i < nr_kdamonds; i++) {
2659 		if (damon_sysfs_kdamond_running(kdamonds[i]) ||
2660 		    damon_sysfs_cmd_request.kdamond == kdamonds[i])
2661 			return true;
2662 	}
2663 
2664 	return false;
2665 }
2666 
2667 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2668 		int nr_kdamonds)
2669 {
2670 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2671 	int err, i;
2672 
2673 	if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
2674 		return -EBUSY;
2675 
2676 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2677 	if (!nr_kdamonds)
2678 		return 0;
2679 
2680 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2681 			GFP_KERNEL | __GFP_NOWARN);
2682 	if (!kdamonds_arr)
2683 		return -ENOMEM;
2684 	kdamonds->kdamonds_arr = kdamonds_arr;
2685 
2686 	for (i = 0; i < nr_kdamonds; i++) {
2687 		kdamond = damon_sysfs_kdamond_alloc();
2688 		if (!kdamond) {
2689 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
2690 			return -ENOMEM;
2691 		}
2692 
2693 		err = kobject_init_and_add(&kdamond->kobj,
2694 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2695 				"%d", i);
2696 		if (err)
2697 			goto out;
2698 
2699 		err = damon_sysfs_kdamond_add_dirs(kdamond);
2700 		if (err)
2701 			goto out;
2702 
2703 		kdamonds_arr[i] = kdamond;
2704 		kdamonds->nr++;
2705 	}
2706 	return 0;
2707 
2708 out:
2709 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2710 	kobject_put(&kdamond->kobj);
2711 	return err;
2712 }
2713 
2714 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2715 		struct kobj_attribute *attr, char *buf)
2716 {
2717 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2718 			struct damon_sysfs_kdamonds, kobj);
2719 
2720 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
2721 }
2722 
2723 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2724 		struct kobj_attribute *attr, const char *buf, size_t count)
2725 {
2726 	struct damon_sysfs_kdamonds *kdamonds;
2727 	int nr, err;
2728 
2729 	err = kstrtoint(buf, 0, &nr);
2730 	if (err)
2731 		return err;
2732 	if (nr < 0)
2733 		return -EINVAL;
2734 
2735 	kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
2736 
2737 	if (!mutex_trylock(&damon_sysfs_lock))
2738 		return -EBUSY;
2739 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2740 	mutex_unlock(&damon_sysfs_lock);
2741 	if (err)
2742 		return err;
2743 
2744 	return count;
2745 }
2746 
2747 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2748 {
2749 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2750 }
2751 
2752 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2753 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2754 
2755 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2756 	&damon_sysfs_kdamonds_nr_attr.attr,
2757 	NULL,
2758 };
2759 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2760 
2761 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2762 	.release = damon_sysfs_kdamonds_release,
2763 	.sysfs_ops = &kobj_sysfs_ops,
2764 	.default_groups = damon_sysfs_kdamonds_groups,
2765 };
2766 
2767 /*
2768  * damon user interface directory
2769  */
2770 
2771 struct damon_sysfs_ui_dir {
2772 	struct kobject kobj;
2773 	struct damon_sysfs_kdamonds *kdamonds;
2774 };
2775 
2776 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2777 {
2778 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2779 }
2780 
2781 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2782 {
2783 	struct damon_sysfs_kdamonds *kdamonds;
2784 	int err;
2785 
2786 	kdamonds = damon_sysfs_kdamonds_alloc();
2787 	if (!kdamonds)
2788 		return -ENOMEM;
2789 
2790 	err = kobject_init_and_add(&kdamonds->kobj,
2791 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2792 			"kdamonds");
2793 	if (err) {
2794 		kobject_put(&kdamonds->kobj);
2795 		return err;
2796 	}
2797 	ui_dir->kdamonds = kdamonds;
2798 	return err;
2799 }
2800 
2801 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2802 {
2803 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2804 }
2805 
2806 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2807 	NULL,
2808 };
2809 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2810 
2811 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2812 	.release = damon_sysfs_ui_dir_release,
2813 	.sysfs_ops = &kobj_sysfs_ops,
2814 	.default_groups = damon_sysfs_ui_dir_groups,
2815 };
2816 
2817 static int __init damon_sysfs_init(void)
2818 {
2819 	struct kobject *damon_sysfs_root;
2820 	struct damon_sysfs_ui_dir *admin;
2821 	int err;
2822 
2823 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2824 	if (!damon_sysfs_root)
2825 		return -ENOMEM;
2826 
2827 	admin = damon_sysfs_ui_dir_alloc();
2828 	if (!admin) {
2829 		kobject_put(damon_sysfs_root);
2830 		return -ENOMEM;
2831 	}
2832 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2833 			damon_sysfs_root, "admin");
2834 	if (err)
2835 		goto out;
2836 	err = damon_sysfs_ui_dir_add_dirs(admin);
2837 	if (err)
2838 		goto out;
2839 	return 0;
2840 
2841 out:
2842 	kobject_put(&admin->kobj);
2843 	kobject_put(damon_sysfs_root);
2844 	return err;
2845 }
2846 subsys_initcall(damon_sysfs_init);
2847