xref: /openbmc/linux/mm/damon/sysfs.c (revision acf50233)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 static DEFINE_MUTEX(damon_sysfs_lock);
15 
16 /*
17  * unsigned long range directory
18  */
19 
20 struct damon_sysfs_ul_range {
21 	struct kobject kobj;
22 	unsigned long min;
23 	unsigned long max;
24 };
25 
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 		unsigned long min,
28 		unsigned long max)
29 {
30 	struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 			GFP_KERNEL);
32 
33 	if (!range)
34 		return NULL;
35 	range->kobj = (struct kobject){};
36 	range->min = min;
37 	range->max = max;
38 
39 	return range;
40 }
41 
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 		char *buf)
44 {
45 	struct damon_sysfs_ul_range *range = container_of(kobj,
46 			struct damon_sysfs_ul_range, kobj);
47 
48 	return sysfs_emit(buf, "%lu\n", range->min);
49 }
50 
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 		const char *buf, size_t count)
53 {
54 	struct damon_sysfs_ul_range *range = container_of(kobj,
55 			struct damon_sysfs_ul_range, kobj);
56 	unsigned long min;
57 	int err;
58 
59 	err = kstrtoul(buf, 0, &min);
60 	if (err)
61 		return -EINVAL;
62 
63 	range->min = min;
64 	return count;
65 }
66 
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 		char *buf)
69 {
70 	struct damon_sysfs_ul_range *range = container_of(kobj,
71 			struct damon_sysfs_ul_range, kobj);
72 
73 	return sysfs_emit(buf, "%lu\n", range->max);
74 }
75 
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 		const char *buf, size_t count)
78 {
79 	struct damon_sysfs_ul_range *range = container_of(kobj,
80 			struct damon_sysfs_ul_range, kobj);
81 	unsigned long max;
82 	int err;
83 
84 	err = kstrtoul(buf, 0, &max);
85 	if (err)
86 		return -EINVAL;
87 
88 	range->max = max;
89 	return count;
90 }
91 
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
93 {
94 	kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95 }
96 
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 		__ATTR_RW_MODE(min, 0600);
99 
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 		__ATTR_RW_MODE(max, 0600);
102 
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 	&damon_sysfs_ul_range_min_attr.attr,
105 	&damon_sysfs_ul_range_max_attr.attr,
106 	NULL,
107 };
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109 
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 	.release = damon_sysfs_ul_range_release,
112 	.sysfs_ops = &kobj_sysfs_ops,
113 	.default_groups = damon_sysfs_ul_range_groups,
114 };
115 
116 /*
117  * schemes/stats directory
118  */
119 
120 struct damon_sysfs_stats {
121 	struct kobject kobj;
122 	unsigned long nr_tried;
123 	unsigned long sz_tried;
124 	unsigned long nr_applied;
125 	unsigned long sz_applied;
126 	unsigned long qt_exceeds;
127 };
128 
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130 {
131 	return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132 }
133 
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 		char *buf)
136 {
137 	struct damon_sysfs_stats *stats = container_of(kobj,
138 			struct damon_sysfs_stats, kobj);
139 
140 	return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141 }
142 
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 		char *buf)
145 {
146 	struct damon_sysfs_stats *stats = container_of(kobj,
147 			struct damon_sysfs_stats, kobj);
148 
149 	return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150 }
151 
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 		struct kobj_attribute *attr, char *buf)
154 {
155 	struct damon_sysfs_stats *stats = container_of(kobj,
156 			struct damon_sysfs_stats, kobj);
157 
158 	return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159 }
160 
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 		struct kobj_attribute *attr, char *buf)
163 {
164 	struct damon_sysfs_stats *stats = container_of(kobj,
165 			struct damon_sysfs_stats, kobj);
166 
167 	return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168 }
169 
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 		struct kobj_attribute *attr, char *buf)
172 {
173 	struct damon_sysfs_stats *stats = container_of(kobj,
174 			struct damon_sysfs_stats, kobj);
175 
176 	return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177 }
178 
179 static void damon_sysfs_stats_release(struct kobject *kobj)
180 {
181 	kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182 }
183 
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 		__ATTR_RO_MODE(nr_tried, 0400);
186 
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 		__ATTR_RO_MODE(sz_tried, 0400);
189 
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 		__ATTR_RO_MODE(nr_applied, 0400);
192 
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 		__ATTR_RO_MODE(sz_applied, 0400);
195 
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 		__ATTR_RO_MODE(qt_exceeds, 0400);
198 
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 	&damon_sysfs_stats_nr_tried_attr.attr,
201 	&damon_sysfs_stats_sz_tried_attr.attr,
202 	&damon_sysfs_stats_nr_applied_attr.attr,
203 	&damon_sysfs_stats_sz_applied_attr.attr,
204 	&damon_sysfs_stats_qt_exceeds_attr.attr,
205 	NULL,
206 };
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
208 
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 	.release = damon_sysfs_stats_release,
211 	.sysfs_ops = &kobj_sysfs_ops,
212 	.default_groups = damon_sysfs_stats_groups,
213 };
214 
215 /*
216  * watermarks directory
217  */
218 
219 struct damon_sysfs_watermarks {
220 	struct kobject kobj;
221 	enum damos_wmark_metric metric;
222 	unsigned long interval_us;
223 	unsigned long high;
224 	unsigned long mid;
225 	unsigned long low;
226 };
227 
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 		enum damos_wmark_metric metric, unsigned long interval_us,
230 		unsigned long high, unsigned long mid, unsigned long low)
231 {
232 	struct damon_sysfs_watermarks *watermarks = kmalloc(
233 			sizeof(*watermarks), GFP_KERNEL);
234 
235 	if (!watermarks)
236 		return NULL;
237 	watermarks->kobj = (struct kobject){};
238 	watermarks->metric = metric;
239 	watermarks->interval_us = interval_us;
240 	watermarks->high = high;
241 	watermarks->mid = mid;
242 	watermarks->low = low;
243 	return watermarks;
244 }
245 
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
248 	"none",
249 	"free_mem_rate",
250 };
251 
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 		char *buf)
254 {
255 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 			struct damon_sysfs_watermarks, kobj);
257 
258 	return sysfs_emit(buf, "%s\n",
259 			damon_sysfs_wmark_metric_strs[watermarks->metric]);
260 }
261 
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 		const char *buf, size_t count)
264 {
265 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 			struct damon_sysfs_watermarks, kobj);
267 	enum damos_wmark_metric metric;
268 
269 	for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 		if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 			watermarks->metric = metric;
272 			return count;
273 		}
274 	}
275 	return -EINVAL;
276 }
277 
278 static ssize_t interval_us_show(struct kobject *kobj,
279 		struct kobj_attribute *attr, char *buf)
280 {
281 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 			struct damon_sysfs_watermarks, kobj);
283 
284 	return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285 }
286 
287 static ssize_t interval_us_store(struct kobject *kobj,
288 		struct kobj_attribute *attr, const char *buf, size_t count)
289 {
290 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 			struct damon_sysfs_watermarks, kobj);
292 	int err = kstrtoul(buf, 0, &watermarks->interval_us);
293 
294 	if (err)
295 		return -EINVAL;
296 	return count;
297 }
298 
299 static ssize_t high_show(struct kobject *kobj,
300 		struct kobj_attribute *attr, char *buf)
301 {
302 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 			struct damon_sysfs_watermarks, kobj);
304 
305 	return sysfs_emit(buf, "%lu\n", watermarks->high);
306 }
307 
308 static ssize_t high_store(struct kobject *kobj,
309 		struct kobj_attribute *attr, const char *buf, size_t count)
310 {
311 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 			struct damon_sysfs_watermarks, kobj);
313 	int err = kstrtoul(buf, 0, &watermarks->high);
314 
315 	if (err)
316 		return -EINVAL;
317 	return count;
318 }
319 
320 static ssize_t mid_show(struct kobject *kobj,
321 		struct kobj_attribute *attr, char *buf)
322 {
323 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 			struct damon_sysfs_watermarks, kobj);
325 
326 	return sysfs_emit(buf, "%lu\n", watermarks->mid);
327 }
328 
329 static ssize_t mid_store(struct kobject *kobj,
330 		struct kobj_attribute *attr, const char *buf, size_t count)
331 {
332 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 			struct damon_sysfs_watermarks, kobj);
334 	int err = kstrtoul(buf, 0, &watermarks->mid);
335 
336 	if (err)
337 		return -EINVAL;
338 	return count;
339 }
340 
341 static ssize_t low_show(struct kobject *kobj,
342 		struct kobj_attribute *attr, char *buf)
343 {
344 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 			struct damon_sysfs_watermarks, kobj);
346 
347 	return sysfs_emit(buf, "%lu\n", watermarks->low);
348 }
349 
350 static ssize_t low_store(struct kobject *kobj,
351 		struct kobj_attribute *attr, const char *buf, size_t count)
352 {
353 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 			struct damon_sysfs_watermarks, kobj);
355 	int err = kstrtoul(buf, 0, &watermarks->low);
356 
357 	if (err)
358 		return -EINVAL;
359 	return count;
360 }
361 
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
363 {
364 	kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
365 }
366 
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 		__ATTR_RW_MODE(metric, 0600);
369 
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 		__ATTR_RW_MODE(interval_us, 0600);
372 
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 		__ATTR_RW_MODE(high, 0600);
375 
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 		__ATTR_RW_MODE(mid, 0600);
378 
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 		__ATTR_RW_MODE(low, 0600);
381 
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 	&damon_sysfs_watermarks_metric_attr.attr,
384 	&damon_sysfs_watermarks_interval_us_attr.attr,
385 	&damon_sysfs_watermarks_high_attr.attr,
386 	&damon_sysfs_watermarks_mid_attr.attr,
387 	&damon_sysfs_watermarks_low_attr.attr,
388 	NULL,
389 };
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
391 
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 	.release = damon_sysfs_watermarks_release,
394 	.sysfs_ops = &kobj_sysfs_ops,
395 	.default_groups = damon_sysfs_watermarks_groups,
396 };
397 
398 /*
399  * scheme/weights directory
400  */
401 
402 struct damon_sysfs_weights {
403 	struct kobject kobj;
404 	unsigned int sz;
405 	unsigned int nr_accesses;
406 	unsigned int age;
407 };
408 
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 		unsigned int nr_accesses, unsigned int age)
411 {
412 	struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
413 			GFP_KERNEL);
414 
415 	if (!weights)
416 		return NULL;
417 	weights->kobj = (struct kobject){};
418 	weights->sz = sz;
419 	weights->nr_accesses = nr_accesses;
420 	weights->age = age;
421 	return weights;
422 }
423 
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 		struct kobj_attribute *attr, char *buf)
426 {
427 	struct damon_sysfs_weights *weights = container_of(kobj,
428 			struct damon_sysfs_weights, kobj);
429 
430 	return sysfs_emit(buf, "%u\n", weights->sz);
431 }
432 
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 		struct kobj_attribute *attr, const char *buf, size_t count)
435 {
436 	struct damon_sysfs_weights *weights = container_of(kobj,
437 			struct damon_sysfs_weights, kobj);
438 	int err = kstrtouint(buf, 0, &weights->sz);
439 
440 	if (err)
441 		return -EINVAL;
442 	return count;
443 }
444 
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 		struct kobj_attribute *attr, char *buf)
447 {
448 	struct damon_sysfs_weights *weights = container_of(kobj,
449 			struct damon_sysfs_weights, kobj);
450 
451 	return sysfs_emit(buf, "%u\n", weights->nr_accesses);
452 }
453 
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 		struct kobj_attribute *attr, const char *buf, size_t count)
456 {
457 	struct damon_sysfs_weights *weights = container_of(kobj,
458 			struct damon_sysfs_weights, kobj);
459 	int err = kstrtouint(buf, 0, &weights->nr_accesses);
460 
461 	if (err)
462 		return -EINVAL;
463 	return count;
464 }
465 
466 static ssize_t age_permil_show(struct kobject *kobj,
467 		struct kobj_attribute *attr, char *buf)
468 {
469 	struct damon_sysfs_weights *weights = container_of(kobj,
470 			struct damon_sysfs_weights, kobj);
471 
472 	return sysfs_emit(buf, "%u\n", weights->age);
473 }
474 
475 static ssize_t age_permil_store(struct kobject *kobj,
476 		struct kobj_attribute *attr, const char *buf, size_t count)
477 {
478 	struct damon_sysfs_weights *weights = container_of(kobj,
479 			struct damon_sysfs_weights, kobj);
480 	int err = kstrtouint(buf, 0, &weights->age);
481 
482 	if (err)
483 		return -EINVAL;
484 	return count;
485 }
486 
487 static void damon_sysfs_weights_release(struct kobject *kobj)
488 {
489 	kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
490 }
491 
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 		__ATTR_RW_MODE(sz_permil, 0600);
494 
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 		__ATTR_RW_MODE(nr_accesses_permil, 0600);
497 
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 		__ATTR_RW_MODE(age_permil, 0600);
500 
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 	&damon_sysfs_weights_sz_attr.attr,
503 	&damon_sysfs_weights_nr_accesses_attr.attr,
504 	&damon_sysfs_weights_age_attr.attr,
505 	NULL,
506 };
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
508 
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 	.release = damon_sysfs_weights_release,
511 	.sysfs_ops = &kobj_sysfs_ops,
512 	.default_groups = damon_sysfs_weights_groups,
513 };
514 
515 /*
516  * quotas directory
517  */
518 
519 struct damon_sysfs_quotas {
520 	struct kobject kobj;
521 	struct damon_sysfs_weights *weights;
522 	unsigned long ms;
523 	unsigned long sz;
524 	unsigned long reset_interval_ms;
525 };
526 
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
528 {
529 	return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
530 }
531 
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
533 {
534 	struct damon_sysfs_weights *weights;
535 	int err;
536 
537 	weights = damon_sysfs_weights_alloc(0, 0, 0);
538 	if (!weights)
539 		return -ENOMEM;
540 
541 	err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 			&quotas->kobj, "weights");
543 	if (err)
544 		kobject_put(&weights->kobj);
545 	else
546 		quotas->weights = weights;
547 	return err;
548 }
549 
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
551 {
552 	kobject_put(&quotas->weights->kobj);
553 }
554 
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
556 		char *buf)
557 {
558 	struct damon_sysfs_quotas *quotas = container_of(kobj,
559 			struct damon_sysfs_quotas, kobj);
560 
561 	return sysfs_emit(buf, "%lu\n", quotas->ms);
562 }
563 
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 		const char *buf, size_t count)
566 {
567 	struct damon_sysfs_quotas *quotas = container_of(kobj,
568 			struct damon_sysfs_quotas, kobj);
569 	int err = kstrtoul(buf, 0, &quotas->ms);
570 
571 	if (err)
572 		return -EINVAL;
573 	return count;
574 }
575 
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
577 		char *buf)
578 {
579 	struct damon_sysfs_quotas *quotas = container_of(kobj,
580 			struct damon_sysfs_quotas, kobj);
581 
582 	return sysfs_emit(buf, "%lu\n", quotas->sz);
583 }
584 
585 static ssize_t bytes_store(struct kobject *kobj,
586 		struct kobj_attribute *attr, const char *buf, size_t count)
587 {
588 	struct damon_sysfs_quotas *quotas = container_of(kobj,
589 			struct damon_sysfs_quotas, kobj);
590 	int err = kstrtoul(buf, 0, &quotas->sz);
591 
592 	if (err)
593 		return -EINVAL;
594 	return count;
595 }
596 
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 		struct kobj_attribute *attr, char *buf)
599 {
600 	struct damon_sysfs_quotas *quotas = container_of(kobj,
601 			struct damon_sysfs_quotas, kobj);
602 
603 	return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
604 }
605 
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 		struct kobj_attribute *attr, const char *buf, size_t count)
608 {
609 	struct damon_sysfs_quotas *quotas = container_of(kobj,
610 			struct damon_sysfs_quotas, kobj);
611 	int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
612 
613 	if (err)
614 		return -EINVAL;
615 	return count;
616 }
617 
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
619 {
620 	kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
621 }
622 
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 		__ATTR_RW_MODE(ms, 0600);
625 
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 		__ATTR_RW_MODE(bytes, 0600);
628 
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 		__ATTR_RW_MODE(reset_interval_ms, 0600);
631 
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 	&damon_sysfs_quotas_ms_attr.attr,
634 	&damon_sysfs_quotas_sz_attr.attr,
635 	&damon_sysfs_quotas_reset_interval_ms_attr.attr,
636 	NULL,
637 };
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
639 
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 	.release = damon_sysfs_quotas_release,
642 	.sysfs_ops = &kobj_sysfs_ops,
643 	.default_groups = damon_sysfs_quotas_groups,
644 };
645 
646 /*
647  * access_pattern directory
648  */
649 
650 struct damon_sysfs_access_pattern {
651 	struct kobject kobj;
652 	struct damon_sysfs_ul_range *sz;
653 	struct damon_sysfs_ul_range *nr_accesses;
654 	struct damon_sysfs_ul_range *age;
655 };
656 
657 static
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
659 {
660 	struct damon_sysfs_access_pattern *access_pattern =
661 		kmalloc(sizeof(*access_pattern), GFP_KERNEL);
662 
663 	if (!access_pattern)
664 		return NULL;
665 	access_pattern->kobj = (struct kobject){};
666 	return access_pattern;
667 }
668 
669 static int damon_sysfs_access_pattern_add_range_dir(
670 		struct damon_sysfs_access_pattern *access_pattern,
671 		struct damon_sysfs_ul_range **range_dir_ptr,
672 		char *name)
673 {
674 	struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
675 	int err;
676 
677 	if (!range)
678 		return -ENOMEM;
679 	err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 			&access_pattern->kobj, name);
681 	if (err)
682 		kobject_put(&range->kobj);
683 	else
684 		*range_dir_ptr = range;
685 	return err;
686 }
687 
688 static int damon_sysfs_access_pattern_add_dirs(
689 		struct damon_sysfs_access_pattern *access_pattern)
690 {
691 	int err;
692 
693 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 			&access_pattern->sz, "sz");
695 	if (err)
696 		goto put_sz_out;
697 
698 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 			&access_pattern->nr_accesses, "nr_accesses");
700 	if (err)
701 		goto put_nr_accesses_sz_out;
702 
703 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 			&access_pattern->age, "age");
705 	if (err)
706 		goto put_age_nr_accesses_sz_out;
707 	return 0;
708 
709 put_age_nr_accesses_sz_out:
710 	kobject_put(&access_pattern->age->kobj);
711 	access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 	kobject_put(&access_pattern->nr_accesses->kobj);
714 	access_pattern->nr_accesses = NULL;
715 put_sz_out:
716 	kobject_put(&access_pattern->sz->kobj);
717 	access_pattern->sz = NULL;
718 	return err;
719 }
720 
721 static void damon_sysfs_access_pattern_rm_dirs(
722 		struct damon_sysfs_access_pattern *access_pattern)
723 {
724 	kobject_put(&access_pattern->sz->kobj);
725 	kobject_put(&access_pattern->nr_accesses->kobj);
726 	kobject_put(&access_pattern->age->kobj);
727 }
728 
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
730 {
731 	kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
732 }
733 
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
735 	NULL,
736 };
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
738 
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 	.release = damon_sysfs_access_pattern_release,
741 	.sysfs_ops = &kobj_sysfs_ops,
742 	.default_groups = damon_sysfs_access_pattern_groups,
743 };
744 
745 /*
746  * scheme directory
747  */
748 
749 struct damon_sysfs_scheme {
750 	struct kobject kobj;
751 	enum damos_action action;
752 	struct damon_sysfs_access_pattern *access_pattern;
753 	struct damon_sysfs_quotas *quotas;
754 	struct damon_sysfs_watermarks *watermarks;
755 	struct damon_sysfs_stats *stats;
756 };
757 
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
760 	"willneed",
761 	"cold",
762 	"pageout",
763 	"hugepage",
764 	"nohugepage",
765 	"stat",
766 };
767 
768 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
769 		enum damos_action action)
770 {
771 	struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
772 				GFP_KERNEL);
773 
774 	if (!scheme)
775 		return NULL;
776 	scheme->kobj = (struct kobject){};
777 	scheme->action = action;
778 	return scheme;
779 }
780 
781 static int damon_sysfs_scheme_set_access_pattern(
782 		struct damon_sysfs_scheme *scheme)
783 {
784 	struct damon_sysfs_access_pattern *access_pattern;
785 	int err;
786 
787 	access_pattern = damon_sysfs_access_pattern_alloc();
788 	if (!access_pattern)
789 		return -ENOMEM;
790 	err = kobject_init_and_add(&access_pattern->kobj,
791 			&damon_sysfs_access_pattern_ktype, &scheme->kobj,
792 			"access_pattern");
793 	if (err)
794 		goto out;
795 	err = damon_sysfs_access_pattern_add_dirs(access_pattern);
796 	if (err)
797 		goto out;
798 	scheme->access_pattern = access_pattern;
799 	return 0;
800 
801 out:
802 	kobject_put(&access_pattern->kobj);
803 	return err;
804 }
805 
806 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
807 {
808 	struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
809 	int err;
810 
811 	if (!quotas)
812 		return -ENOMEM;
813 	err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
814 			&scheme->kobj, "quotas");
815 	if (err)
816 		goto out;
817 	err = damon_sysfs_quotas_add_dirs(quotas);
818 	if (err)
819 		goto out;
820 	scheme->quotas = quotas;
821 	return 0;
822 
823 out:
824 	kobject_put(&quotas->kobj);
825 	return err;
826 }
827 
828 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
829 {
830 	struct damon_sysfs_watermarks *watermarks =
831 		damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
832 	int err;
833 
834 	if (!watermarks)
835 		return -ENOMEM;
836 	err = kobject_init_and_add(&watermarks->kobj,
837 			&damon_sysfs_watermarks_ktype, &scheme->kobj,
838 			"watermarks");
839 	if (err)
840 		kobject_put(&watermarks->kobj);
841 	else
842 		scheme->watermarks = watermarks;
843 	return err;
844 }
845 
846 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
847 {
848 	struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
849 	int err;
850 
851 	if (!stats)
852 		return -ENOMEM;
853 	err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
854 			&scheme->kobj, "stats");
855 	if (err)
856 		kobject_put(&stats->kobj);
857 	else
858 		scheme->stats = stats;
859 	return err;
860 }
861 
862 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
863 {
864 	int err;
865 
866 	err = damon_sysfs_scheme_set_access_pattern(scheme);
867 	if (err)
868 		return err;
869 	err = damon_sysfs_scheme_set_quotas(scheme);
870 	if (err)
871 		goto put_access_pattern_out;
872 	err = damon_sysfs_scheme_set_watermarks(scheme);
873 	if (err)
874 		goto put_quotas_access_pattern_out;
875 	err = damon_sysfs_scheme_set_stats(scheme);
876 	if (err)
877 		goto put_watermarks_quotas_access_pattern_out;
878 	return 0;
879 
880 put_watermarks_quotas_access_pattern_out:
881 	kobject_put(&scheme->watermarks->kobj);
882 	scheme->watermarks = NULL;
883 put_quotas_access_pattern_out:
884 	kobject_put(&scheme->quotas->kobj);
885 	scheme->quotas = NULL;
886 put_access_pattern_out:
887 	kobject_put(&scheme->access_pattern->kobj);
888 	scheme->access_pattern = NULL;
889 	return err;
890 }
891 
892 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
893 {
894 	damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
895 	kobject_put(&scheme->access_pattern->kobj);
896 	damon_sysfs_quotas_rm_dirs(scheme->quotas);
897 	kobject_put(&scheme->quotas->kobj);
898 	kobject_put(&scheme->watermarks->kobj);
899 	kobject_put(&scheme->stats->kobj);
900 }
901 
902 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
903 		char *buf)
904 {
905 	struct damon_sysfs_scheme *scheme = container_of(kobj,
906 			struct damon_sysfs_scheme, kobj);
907 
908 	return sysfs_emit(buf, "%s\n",
909 			damon_sysfs_damos_action_strs[scheme->action]);
910 }
911 
912 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
913 		const char *buf, size_t count)
914 {
915 	struct damon_sysfs_scheme *scheme = container_of(kobj,
916 			struct damon_sysfs_scheme, kobj);
917 	enum damos_action action;
918 
919 	for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
920 		if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
921 			scheme->action = action;
922 			return count;
923 		}
924 	}
925 	return -EINVAL;
926 }
927 
928 static void damon_sysfs_scheme_release(struct kobject *kobj)
929 {
930 	kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
931 }
932 
933 static struct kobj_attribute damon_sysfs_scheme_action_attr =
934 		__ATTR_RW_MODE(action, 0600);
935 
936 static struct attribute *damon_sysfs_scheme_attrs[] = {
937 	&damon_sysfs_scheme_action_attr.attr,
938 	NULL,
939 };
940 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
941 
942 static struct kobj_type damon_sysfs_scheme_ktype = {
943 	.release = damon_sysfs_scheme_release,
944 	.sysfs_ops = &kobj_sysfs_ops,
945 	.default_groups = damon_sysfs_scheme_groups,
946 };
947 
948 /*
949  * schemes directory
950  */
951 
952 struct damon_sysfs_schemes {
953 	struct kobject kobj;
954 	struct damon_sysfs_scheme **schemes_arr;
955 	int nr;
956 };
957 
958 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
959 {
960 	return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
961 }
962 
963 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
964 {
965 	struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
966 	int i;
967 
968 	for (i = 0; i < schemes->nr; i++) {
969 		damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
970 		kobject_put(&schemes_arr[i]->kobj);
971 	}
972 	schemes->nr = 0;
973 	kfree(schemes_arr);
974 	schemes->schemes_arr = NULL;
975 }
976 
977 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
978 		int nr_schemes)
979 {
980 	struct damon_sysfs_scheme **schemes_arr, *scheme;
981 	int err, i;
982 
983 	damon_sysfs_schemes_rm_dirs(schemes);
984 	if (!nr_schemes)
985 		return 0;
986 
987 	schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
988 			GFP_KERNEL | __GFP_NOWARN);
989 	if (!schemes_arr)
990 		return -ENOMEM;
991 	schemes->schemes_arr = schemes_arr;
992 
993 	for (i = 0; i < nr_schemes; i++) {
994 		scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
995 		if (!scheme) {
996 			damon_sysfs_schemes_rm_dirs(schemes);
997 			return -ENOMEM;
998 		}
999 
1000 		err = kobject_init_and_add(&scheme->kobj,
1001 				&damon_sysfs_scheme_ktype, &schemes->kobj,
1002 				"%d", i);
1003 		if (err)
1004 			goto out;
1005 		err = damon_sysfs_scheme_add_dirs(scheme);
1006 		if (err)
1007 			goto out;
1008 
1009 		schemes_arr[i] = scheme;
1010 		schemes->nr++;
1011 	}
1012 	return 0;
1013 
1014 out:
1015 	damon_sysfs_schemes_rm_dirs(schemes);
1016 	kobject_put(&scheme->kobj);
1017 	return err;
1018 }
1019 
1020 static ssize_t nr_schemes_show(struct kobject *kobj,
1021 		struct kobj_attribute *attr, char *buf)
1022 {
1023 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1024 			struct damon_sysfs_schemes, kobj);
1025 
1026 	return sysfs_emit(buf, "%d\n", schemes->nr);
1027 }
1028 
1029 static ssize_t nr_schemes_store(struct kobject *kobj,
1030 		struct kobj_attribute *attr, const char *buf, size_t count)
1031 {
1032 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1033 			struct damon_sysfs_schemes, kobj);
1034 	int nr, err = kstrtoint(buf, 0, &nr);
1035 
1036 	if (err)
1037 		return err;
1038 	if (nr < 0)
1039 		return -EINVAL;
1040 
1041 	if (!mutex_trylock(&damon_sysfs_lock))
1042 		return -EBUSY;
1043 	err = damon_sysfs_schemes_add_dirs(schemes, nr);
1044 	mutex_unlock(&damon_sysfs_lock);
1045 	if (err)
1046 		return err;
1047 	return count;
1048 }
1049 
1050 static void damon_sysfs_schemes_release(struct kobject *kobj)
1051 {
1052 	kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1053 }
1054 
1055 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1056 		__ATTR_RW_MODE(nr_schemes, 0600);
1057 
1058 static struct attribute *damon_sysfs_schemes_attrs[] = {
1059 	&damon_sysfs_schemes_nr_attr.attr,
1060 	NULL,
1061 };
1062 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1063 
1064 static struct kobj_type damon_sysfs_schemes_ktype = {
1065 	.release = damon_sysfs_schemes_release,
1066 	.sysfs_ops = &kobj_sysfs_ops,
1067 	.default_groups = damon_sysfs_schemes_groups,
1068 };
1069 
1070 /*
1071  * init region directory
1072  */
1073 
1074 struct damon_sysfs_region {
1075 	struct kobject kobj;
1076 	unsigned long start;
1077 	unsigned long end;
1078 };
1079 
1080 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1081 		unsigned long start,
1082 		unsigned long end)
1083 {
1084 	struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1085 			GFP_KERNEL);
1086 
1087 	if (!region)
1088 		return NULL;
1089 	region->kobj = (struct kobject){};
1090 	region->start = start;
1091 	region->end = end;
1092 	return region;
1093 }
1094 
1095 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1096 		char *buf)
1097 {
1098 	struct damon_sysfs_region *region = container_of(kobj,
1099 			struct damon_sysfs_region, kobj);
1100 
1101 	return sysfs_emit(buf, "%lu\n", region->start);
1102 }
1103 
1104 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1105 		const char *buf, size_t count)
1106 {
1107 	struct damon_sysfs_region *region = container_of(kobj,
1108 			struct damon_sysfs_region, kobj);
1109 	int err = kstrtoul(buf, 0, &region->start);
1110 
1111 	if (err)
1112 		return -EINVAL;
1113 	return count;
1114 }
1115 
1116 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1117 		char *buf)
1118 {
1119 	struct damon_sysfs_region *region = container_of(kobj,
1120 			struct damon_sysfs_region, kobj);
1121 
1122 	return sysfs_emit(buf, "%lu\n", region->end);
1123 }
1124 
1125 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1126 		const char *buf, size_t count)
1127 {
1128 	struct damon_sysfs_region *region = container_of(kobj,
1129 			struct damon_sysfs_region, kobj);
1130 	int err = kstrtoul(buf, 0, &region->end);
1131 
1132 	if (err)
1133 		return -EINVAL;
1134 	return count;
1135 }
1136 
1137 static void damon_sysfs_region_release(struct kobject *kobj)
1138 {
1139 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1140 }
1141 
1142 static struct kobj_attribute damon_sysfs_region_start_attr =
1143 		__ATTR_RW_MODE(start, 0600);
1144 
1145 static struct kobj_attribute damon_sysfs_region_end_attr =
1146 		__ATTR_RW_MODE(end, 0600);
1147 
1148 static struct attribute *damon_sysfs_region_attrs[] = {
1149 	&damon_sysfs_region_start_attr.attr,
1150 	&damon_sysfs_region_end_attr.attr,
1151 	NULL,
1152 };
1153 ATTRIBUTE_GROUPS(damon_sysfs_region);
1154 
1155 static struct kobj_type damon_sysfs_region_ktype = {
1156 	.release = damon_sysfs_region_release,
1157 	.sysfs_ops = &kobj_sysfs_ops,
1158 	.default_groups = damon_sysfs_region_groups,
1159 };
1160 
1161 /*
1162  * init_regions directory
1163  */
1164 
1165 struct damon_sysfs_regions {
1166 	struct kobject kobj;
1167 	struct damon_sysfs_region **regions_arr;
1168 	int nr;
1169 };
1170 
1171 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1172 {
1173 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1174 }
1175 
1176 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1177 {
1178 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
1179 	int i;
1180 
1181 	for (i = 0; i < regions->nr; i++)
1182 		kobject_put(&regions_arr[i]->kobj);
1183 	regions->nr = 0;
1184 	kfree(regions_arr);
1185 	regions->regions_arr = NULL;
1186 }
1187 
1188 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1189 		int nr_regions)
1190 {
1191 	struct damon_sysfs_region **regions_arr, *region;
1192 	int err, i;
1193 
1194 	damon_sysfs_regions_rm_dirs(regions);
1195 	if (!nr_regions)
1196 		return 0;
1197 
1198 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1199 			GFP_KERNEL | __GFP_NOWARN);
1200 	if (!regions_arr)
1201 		return -ENOMEM;
1202 	regions->regions_arr = regions_arr;
1203 
1204 	for (i = 0; i < nr_regions; i++) {
1205 		region = damon_sysfs_region_alloc(0, 0);
1206 		if (!region) {
1207 			damon_sysfs_regions_rm_dirs(regions);
1208 			return -ENOMEM;
1209 		}
1210 
1211 		err = kobject_init_and_add(&region->kobj,
1212 				&damon_sysfs_region_ktype, &regions->kobj,
1213 				"%d", i);
1214 		if (err) {
1215 			kobject_put(&region->kobj);
1216 			damon_sysfs_regions_rm_dirs(regions);
1217 			return err;
1218 		}
1219 
1220 		regions_arr[i] = region;
1221 		regions->nr++;
1222 	}
1223 	return 0;
1224 }
1225 
1226 static ssize_t nr_regions_show(struct kobject *kobj,
1227 		struct kobj_attribute *attr, char *buf)
1228 {
1229 	struct damon_sysfs_regions *regions = container_of(kobj,
1230 			struct damon_sysfs_regions, kobj);
1231 
1232 	return sysfs_emit(buf, "%d\n", regions->nr);
1233 }
1234 
1235 static ssize_t nr_regions_store(struct kobject *kobj,
1236 		struct kobj_attribute *attr, const char *buf, size_t count)
1237 {
1238 	struct damon_sysfs_regions *regions = container_of(kobj,
1239 			struct damon_sysfs_regions, kobj);
1240 	int nr, err = kstrtoint(buf, 0, &nr);
1241 
1242 	if (err)
1243 		return err;
1244 	if (nr < 0)
1245 		return -EINVAL;
1246 
1247 	if (!mutex_trylock(&damon_sysfs_lock))
1248 		return -EBUSY;
1249 	err = damon_sysfs_regions_add_dirs(regions, nr);
1250 	mutex_unlock(&damon_sysfs_lock);
1251 	if (err)
1252 		return err;
1253 
1254 	return count;
1255 }
1256 
1257 static void damon_sysfs_regions_release(struct kobject *kobj)
1258 {
1259 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1260 }
1261 
1262 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1263 		__ATTR_RW_MODE(nr_regions, 0600);
1264 
1265 static struct attribute *damon_sysfs_regions_attrs[] = {
1266 	&damon_sysfs_regions_nr_attr.attr,
1267 	NULL,
1268 };
1269 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1270 
1271 static struct kobj_type damon_sysfs_regions_ktype = {
1272 	.release = damon_sysfs_regions_release,
1273 	.sysfs_ops = &kobj_sysfs_ops,
1274 	.default_groups = damon_sysfs_regions_groups,
1275 };
1276 
1277 /*
1278  * target directory
1279  */
1280 
1281 struct damon_sysfs_target {
1282 	struct kobject kobj;
1283 	struct damon_sysfs_regions *regions;
1284 	int pid;
1285 };
1286 
1287 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1288 {
1289 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1290 }
1291 
1292 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1293 {
1294 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1295 	int err;
1296 
1297 	if (!regions)
1298 		return -ENOMEM;
1299 
1300 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1301 			&target->kobj, "regions");
1302 	if (err)
1303 		kobject_put(&regions->kobj);
1304 	else
1305 		target->regions = regions;
1306 	return err;
1307 }
1308 
1309 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1310 {
1311 	damon_sysfs_regions_rm_dirs(target->regions);
1312 	kobject_put(&target->regions->kobj);
1313 }
1314 
1315 static ssize_t pid_target_show(struct kobject *kobj,
1316 		struct kobj_attribute *attr, char *buf)
1317 {
1318 	struct damon_sysfs_target *target = container_of(kobj,
1319 			struct damon_sysfs_target, kobj);
1320 
1321 	return sysfs_emit(buf, "%d\n", target->pid);
1322 }
1323 
1324 static ssize_t pid_target_store(struct kobject *kobj,
1325 		struct kobj_attribute *attr, const char *buf, size_t count)
1326 {
1327 	struct damon_sysfs_target *target = container_of(kobj,
1328 			struct damon_sysfs_target, kobj);
1329 	int err = kstrtoint(buf, 0, &target->pid);
1330 
1331 	if (err)
1332 		return -EINVAL;
1333 	return count;
1334 }
1335 
1336 static void damon_sysfs_target_release(struct kobject *kobj)
1337 {
1338 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1339 }
1340 
1341 static struct kobj_attribute damon_sysfs_target_pid_attr =
1342 		__ATTR_RW_MODE(pid_target, 0600);
1343 
1344 static struct attribute *damon_sysfs_target_attrs[] = {
1345 	&damon_sysfs_target_pid_attr.attr,
1346 	NULL,
1347 };
1348 ATTRIBUTE_GROUPS(damon_sysfs_target);
1349 
1350 static struct kobj_type damon_sysfs_target_ktype = {
1351 	.release = damon_sysfs_target_release,
1352 	.sysfs_ops = &kobj_sysfs_ops,
1353 	.default_groups = damon_sysfs_target_groups,
1354 };
1355 
1356 /*
1357  * targets directory
1358  */
1359 
1360 struct damon_sysfs_targets {
1361 	struct kobject kobj;
1362 	struct damon_sysfs_target **targets_arr;
1363 	int nr;
1364 };
1365 
1366 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1367 {
1368 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1369 }
1370 
1371 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1372 {
1373 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
1374 	int i;
1375 
1376 	for (i = 0; i < targets->nr; i++) {
1377 		damon_sysfs_target_rm_dirs(targets_arr[i]);
1378 		kobject_put(&targets_arr[i]->kobj);
1379 	}
1380 	targets->nr = 0;
1381 	kfree(targets_arr);
1382 	targets->targets_arr = NULL;
1383 }
1384 
1385 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1386 		int nr_targets)
1387 {
1388 	struct damon_sysfs_target **targets_arr, *target;
1389 	int err, i;
1390 
1391 	damon_sysfs_targets_rm_dirs(targets);
1392 	if (!nr_targets)
1393 		return 0;
1394 
1395 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1396 			GFP_KERNEL | __GFP_NOWARN);
1397 	if (!targets_arr)
1398 		return -ENOMEM;
1399 	targets->targets_arr = targets_arr;
1400 
1401 	for (i = 0; i < nr_targets; i++) {
1402 		target = damon_sysfs_target_alloc();
1403 		if (!target) {
1404 			damon_sysfs_targets_rm_dirs(targets);
1405 			return -ENOMEM;
1406 		}
1407 
1408 		err = kobject_init_and_add(&target->kobj,
1409 				&damon_sysfs_target_ktype, &targets->kobj,
1410 				"%d", i);
1411 		if (err)
1412 			goto out;
1413 
1414 		err = damon_sysfs_target_add_dirs(target);
1415 		if (err)
1416 			goto out;
1417 
1418 		targets_arr[i] = target;
1419 		targets->nr++;
1420 	}
1421 	return 0;
1422 
1423 out:
1424 	damon_sysfs_targets_rm_dirs(targets);
1425 	kobject_put(&target->kobj);
1426 	return err;
1427 }
1428 
1429 static ssize_t nr_targets_show(struct kobject *kobj,
1430 		struct kobj_attribute *attr, char *buf)
1431 {
1432 	struct damon_sysfs_targets *targets = container_of(kobj,
1433 			struct damon_sysfs_targets, kobj);
1434 
1435 	return sysfs_emit(buf, "%d\n", targets->nr);
1436 }
1437 
1438 static ssize_t nr_targets_store(struct kobject *kobj,
1439 		struct kobj_attribute *attr, const char *buf, size_t count)
1440 {
1441 	struct damon_sysfs_targets *targets = container_of(kobj,
1442 			struct damon_sysfs_targets, kobj);
1443 	int nr, err = kstrtoint(buf, 0, &nr);
1444 
1445 	if (err)
1446 		return err;
1447 	if (nr < 0)
1448 		return -EINVAL;
1449 
1450 	if (!mutex_trylock(&damon_sysfs_lock))
1451 		return -EBUSY;
1452 	err = damon_sysfs_targets_add_dirs(targets, nr);
1453 	mutex_unlock(&damon_sysfs_lock);
1454 	if (err)
1455 		return err;
1456 
1457 	return count;
1458 }
1459 
1460 static void damon_sysfs_targets_release(struct kobject *kobj)
1461 {
1462 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1463 }
1464 
1465 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1466 		__ATTR_RW_MODE(nr_targets, 0600);
1467 
1468 static struct attribute *damon_sysfs_targets_attrs[] = {
1469 	&damon_sysfs_targets_nr_attr.attr,
1470 	NULL,
1471 };
1472 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1473 
1474 static struct kobj_type damon_sysfs_targets_ktype = {
1475 	.release = damon_sysfs_targets_release,
1476 	.sysfs_ops = &kobj_sysfs_ops,
1477 	.default_groups = damon_sysfs_targets_groups,
1478 };
1479 
1480 /*
1481  * intervals directory
1482  */
1483 
1484 struct damon_sysfs_intervals {
1485 	struct kobject kobj;
1486 	unsigned long sample_us;
1487 	unsigned long aggr_us;
1488 	unsigned long update_us;
1489 };
1490 
1491 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1492 		unsigned long sample_us, unsigned long aggr_us,
1493 		unsigned long update_us)
1494 {
1495 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1496 			GFP_KERNEL);
1497 
1498 	if (!intervals)
1499 		return NULL;
1500 
1501 	intervals->kobj = (struct kobject){};
1502 	intervals->sample_us = sample_us;
1503 	intervals->aggr_us = aggr_us;
1504 	intervals->update_us = update_us;
1505 	return intervals;
1506 }
1507 
1508 static ssize_t sample_us_show(struct kobject *kobj,
1509 		struct kobj_attribute *attr, char *buf)
1510 {
1511 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1512 			struct damon_sysfs_intervals, kobj);
1513 
1514 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1515 }
1516 
1517 static ssize_t sample_us_store(struct kobject *kobj,
1518 		struct kobj_attribute *attr, const char *buf, size_t count)
1519 {
1520 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1521 			struct damon_sysfs_intervals, kobj);
1522 	unsigned long us;
1523 	int err = kstrtoul(buf, 0, &us);
1524 
1525 	if (err)
1526 		return -EINVAL;
1527 
1528 	intervals->sample_us = us;
1529 	return count;
1530 }
1531 
1532 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1533 		char *buf)
1534 {
1535 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1536 			struct damon_sysfs_intervals, kobj);
1537 
1538 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1539 }
1540 
1541 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1542 		const char *buf, size_t count)
1543 {
1544 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1545 			struct damon_sysfs_intervals, kobj);
1546 	unsigned long us;
1547 	int err = kstrtoul(buf, 0, &us);
1548 
1549 	if (err)
1550 		return -EINVAL;
1551 
1552 	intervals->aggr_us = us;
1553 	return count;
1554 }
1555 
1556 static ssize_t update_us_show(struct kobject *kobj,
1557 		struct kobj_attribute *attr, char *buf)
1558 {
1559 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1560 			struct damon_sysfs_intervals, kobj);
1561 
1562 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
1563 }
1564 
1565 static ssize_t update_us_store(struct kobject *kobj,
1566 		struct kobj_attribute *attr, const char *buf, size_t count)
1567 {
1568 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1569 			struct damon_sysfs_intervals, kobj);
1570 	unsigned long us;
1571 	int err = kstrtoul(buf, 0, &us);
1572 
1573 	if (err)
1574 		return -EINVAL;
1575 
1576 	intervals->update_us = us;
1577 	return count;
1578 }
1579 
1580 static void damon_sysfs_intervals_release(struct kobject *kobj)
1581 {
1582 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1583 }
1584 
1585 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1586 		__ATTR_RW_MODE(sample_us, 0600);
1587 
1588 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1589 		__ATTR_RW_MODE(aggr_us, 0600);
1590 
1591 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1592 		__ATTR_RW_MODE(update_us, 0600);
1593 
1594 static struct attribute *damon_sysfs_intervals_attrs[] = {
1595 	&damon_sysfs_intervals_sample_us_attr.attr,
1596 	&damon_sysfs_intervals_aggr_us_attr.attr,
1597 	&damon_sysfs_intervals_update_us_attr.attr,
1598 	NULL,
1599 };
1600 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1601 
1602 static struct kobj_type damon_sysfs_intervals_ktype = {
1603 	.release = damon_sysfs_intervals_release,
1604 	.sysfs_ops = &kobj_sysfs_ops,
1605 	.default_groups = damon_sysfs_intervals_groups,
1606 };
1607 
1608 /*
1609  * monitoring_attrs directory
1610  */
1611 
1612 struct damon_sysfs_attrs {
1613 	struct kobject kobj;
1614 	struct damon_sysfs_intervals *intervals;
1615 	struct damon_sysfs_ul_range *nr_regions_range;
1616 };
1617 
1618 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1619 {
1620 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1621 
1622 	if (!attrs)
1623 		return NULL;
1624 	attrs->kobj = (struct kobject){};
1625 	return attrs;
1626 }
1627 
1628 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1629 {
1630 	struct damon_sysfs_intervals *intervals;
1631 	struct damon_sysfs_ul_range *nr_regions_range;
1632 	int err;
1633 
1634 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1635 	if (!intervals)
1636 		return -ENOMEM;
1637 
1638 	err = kobject_init_and_add(&intervals->kobj,
1639 			&damon_sysfs_intervals_ktype, &attrs->kobj,
1640 			"intervals");
1641 	if (err)
1642 		goto put_intervals_out;
1643 	attrs->intervals = intervals;
1644 
1645 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1646 	if (!nr_regions_range) {
1647 		err = -ENOMEM;
1648 		goto put_intervals_out;
1649 	}
1650 
1651 	err = kobject_init_and_add(&nr_regions_range->kobj,
1652 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
1653 			"nr_regions");
1654 	if (err)
1655 		goto put_nr_regions_intervals_out;
1656 	attrs->nr_regions_range = nr_regions_range;
1657 	return 0;
1658 
1659 put_nr_regions_intervals_out:
1660 	kobject_put(&nr_regions_range->kobj);
1661 	attrs->nr_regions_range = NULL;
1662 put_intervals_out:
1663 	kobject_put(&intervals->kobj);
1664 	attrs->intervals = NULL;
1665 	return err;
1666 }
1667 
1668 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1669 {
1670 	kobject_put(&attrs->nr_regions_range->kobj);
1671 	kobject_put(&attrs->intervals->kobj);
1672 }
1673 
1674 static void damon_sysfs_attrs_release(struct kobject *kobj)
1675 {
1676 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1677 }
1678 
1679 static struct attribute *damon_sysfs_attrs_attrs[] = {
1680 	NULL,
1681 };
1682 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1683 
1684 static struct kobj_type damon_sysfs_attrs_ktype = {
1685 	.release = damon_sysfs_attrs_release,
1686 	.sysfs_ops = &kobj_sysfs_ops,
1687 	.default_groups = damon_sysfs_attrs_groups,
1688 };
1689 
1690 /*
1691  * context directory
1692  */
1693 
1694 /* This should match with enum damon_ops_id */
1695 static const char * const damon_sysfs_ops_strs[] = {
1696 	"vaddr",
1697 	"fvaddr",
1698 	"paddr",
1699 };
1700 
1701 struct damon_sysfs_context {
1702 	struct kobject kobj;
1703 	enum damon_ops_id ops_id;
1704 	struct damon_sysfs_attrs *attrs;
1705 	struct damon_sysfs_targets *targets;
1706 	struct damon_sysfs_schemes *schemes;
1707 };
1708 
1709 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1710 		enum damon_ops_id ops_id)
1711 {
1712 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1713 				GFP_KERNEL);
1714 
1715 	if (!context)
1716 		return NULL;
1717 	context->kobj = (struct kobject){};
1718 	context->ops_id = ops_id;
1719 	return context;
1720 }
1721 
1722 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1723 {
1724 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1725 	int err;
1726 
1727 	if (!attrs)
1728 		return -ENOMEM;
1729 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1730 			&context->kobj, "monitoring_attrs");
1731 	if (err)
1732 		goto out;
1733 	err = damon_sysfs_attrs_add_dirs(attrs);
1734 	if (err)
1735 		goto out;
1736 	context->attrs = attrs;
1737 	return 0;
1738 
1739 out:
1740 	kobject_put(&attrs->kobj);
1741 	return err;
1742 }
1743 
1744 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1745 {
1746 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1747 	int err;
1748 
1749 	if (!targets)
1750 		return -ENOMEM;
1751 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1752 			&context->kobj, "targets");
1753 	if (err) {
1754 		kobject_put(&targets->kobj);
1755 		return err;
1756 	}
1757 	context->targets = targets;
1758 	return 0;
1759 }
1760 
1761 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1762 {
1763 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1764 	int err;
1765 
1766 	if (!schemes)
1767 		return -ENOMEM;
1768 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1769 			&context->kobj, "schemes");
1770 	if (err) {
1771 		kobject_put(&schemes->kobj);
1772 		return err;
1773 	}
1774 	context->schemes = schemes;
1775 	return 0;
1776 }
1777 
1778 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1779 {
1780 	int err;
1781 
1782 	err = damon_sysfs_context_set_attrs(context);
1783 	if (err)
1784 		return err;
1785 
1786 	err = damon_sysfs_context_set_targets(context);
1787 	if (err)
1788 		goto put_attrs_out;
1789 
1790 	err = damon_sysfs_context_set_schemes(context);
1791 	if (err)
1792 		goto put_targets_attrs_out;
1793 	return 0;
1794 
1795 put_targets_attrs_out:
1796 	kobject_put(&context->targets->kobj);
1797 	context->targets = NULL;
1798 put_attrs_out:
1799 	kobject_put(&context->attrs->kobj);
1800 	context->attrs = NULL;
1801 	return err;
1802 }
1803 
1804 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1805 {
1806 	damon_sysfs_attrs_rm_dirs(context->attrs);
1807 	kobject_put(&context->attrs->kobj);
1808 	damon_sysfs_targets_rm_dirs(context->targets);
1809 	kobject_put(&context->targets->kobj);
1810 	damon_sysfs_schemes_rm_dirs(context->schemes);
1811 	kobject_put(&context->schemes->kobj);
1812 }
1813 
1814 static ssize_t avail_operations_show(struct kobject *kobj,
1815 		struct kobj_attribute *attr, char *buf)
1816 {
1817 	enum damon_ops_id id;
1818 	int len = 0;
1819 
1820 	for (id = 0; id < NR_DAMON_OPS; id++) {
1821 		if (!damon_is_registered_ops(id))
1822 			continue;
1823 		len += sysfs_emit_at(buf, len, "%s\n",
1824 				damon_sysfs_ops_strs[id]);
1825 	}
1826 	return len;
1827 }
1828 
1829 static ssize_t operations_show(struct kobject *kobj,
1830 		struct kobj_attribute *attr, char *buf)
1831 {
1832 	struct damon_sysfs_context *context = container_of(kobj,
1833 			struct damon_sysfs_context, kobj);
1834 
1835 	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1836 }
1837 
1838 static ssize_t operations_store(struct kobject *kobj,
1839 		struct kobj_attribute *attr, const char *buf, size_t count)
1840 {
1841 	struct damon_sysfs_context *context = container_of(kobj,
1842 			struct damon_sysfs_context, kobj);
1843 	enum damon_ops_id id;
1844 
1845 	for (id = 0; id < NR_DAMON_OPS; id++) {
1846 		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1847 			context->ops_id = id;
1848 			return count;
1849 		}
1850 	}
1851 	return -EINVAL;
1852 }
1853 
1854 static void damon_sysfs_context_release(struct kobject *kobj)
1855 {
1856 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1857 }
1858 
1859 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1860 		__ATTR_RO_MODE(avail_operations, 0400);
1861 
1862 static struct kobj_attribute damon_sysfs_context_operations_attr =
1863 		__ATTR_RW_MODE(operations, 0600);
1864 
1865 static struct attribute *damon_sysfs_context_attrs[] = {
1866 	&damon_sysfs_context_avail_operations_attr.attr,
1867 	&damon_sysfs_context_operations_attr.attr,
1868 	NULL,
1869 };
1870 ATTRIBUTE_GROUPS(damon_sysfs_context);
1871 
1872 static struct kobj_type damon_sysfs_context_ktype = {
1873 	.release = damon_sysfs_context_release,
1874 	.sysfs_ops = &kobj_sysfs_ops,
1875 	.default_groups = damon_sysfs_context_groups,
1876 };
1877 
1878 /*
1879  * contexts directory
1880  */
1881 
1882 struct damon_sysfs_contexts {
1883 	struct kobject kobj;
1884 	struct damon_sysfs_context **contexts_arr;
1885 	int nr;
1886 };
1887 
1888 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1889 {
1890 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1891 }
1892 
1893 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1894 {
1895 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1896 	int i;
1897 
1898 	for (i = 0; i < contexts->nr; i++) {
1899 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1900 		kobject_put(&contexts_arr[i]->kobj);
1901 	}
1902 	contexts->nr = 0;
1903 	kfree(contexts_arr);
1904 	contexts->contexts_arr = NULL;
1905 }
1906 
1907 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1908 		int nr_contexts)
1909 {
1910 	struct damon_sysfs_context **contexts_arr, *context;
1911 	int err, i;
1912 
1913 	damon_sysfs_contexts_rm_dirs(contexts);
1914 	if (!nr_contexts)
1915 		return 0;
1916 
1917 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1918 			GFP_KERNEL | __GFP_NOWARN);
1919 	if (!contexts_arr)
1920 		return -ENOMEM;
1921 	contexts->contexts_arr = contexts_arr;
1922 
1923 	for (i = 0; i < nr_contexts; i++) {
1924 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1925 		if (!context) {
1926 			damon_sysfs_contexts_rm_dirs(contexts);
1927 			return -ENOMEM;
1928 		}
1929 
1930 		err = kobject_init_and_add(&context->kobj,
1931 				&damon_sysfs_context_ktype, &contexts->kobj,
1932 				"%d", i);
1933 		if (err)
1934 			goto out;
1935 
1936 		err = damon_sysfs_context_add_dirs(context);
1937 		if (err)
1938 			goto out;
1939 
1940 		contexts_arr[i] = context;
1941 		contexts->nr++;
1942 	}
1943 	return 0;
1944 
1945 out:
1946 	damon_sysfs_contexts_rm_dirs(contexts);
1947 	kobject_put(&context->kobj);
1948 	return err;
1949 }
1950 
1951 static ssize_t nr_contexts_show(struct kobject *kobj,
1952 		struct kobj_attribute *attr, char *buf)
1953 {
1954 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1955 			struct damon_sysfs_contexts, kobj);
1956 
1957 	return sysfs_emit(buf, "%d\n", contexts->nr);
1958 }
1959 
1960 static ssize_t nr_contexts_store(struct kobject *kobj,
1961 		struct kobj_attribute *attr, const char *buf, size_t count)
1962 {
1963 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1964 			struct damon_sysfs_contexts, kobj);
1965 	int nr, err;
1966 
1967 	err = kstrtoint(buf, 0, &nr);
1968 	if (err)
1969 		return err;
1970 	/* TODO: support multiple contexts per kdamond */
1971 	if (nr < 0 || 1 < nr)
1972 		return -EINVAL;
1973 
1974 	if (!mutex_trylock(&damon_sysfs_lock))
1975 		return -EBUSY;
1976 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1977 	mutex_unlock(&damon_sysfs_lock);
1978 	if (err)
1979 		return err;
1980 
1981 	return count;
1982 }
1983 
1984 static void damon_sysfs_contexts_release(struct kobject *kobj)
1985 {
1986 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1987 }
1988 
1989 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1990 		= __ATTR_RW_MODE(nr_contexts, 0600);
1991 
1992 static struct attribute *damon_sysfs_contexts_attrs[] = {
1993 	&damon_sysfs_contexts_nr_attr.attr,
1994 	NULL,
1995 };
1996 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1997 
1998 static struct kobj_type damon_sysfs_contexts_ktype = {
1999 	.release = damon_sysfs_contexts_release,
2000 	.sysfs_ops = &kobj_sysfs_ops,
2001 	.default_groups = damon_sysfs_contexts_groups,
2002 };
2003 
2004 /*
2005  * kdamond directory
2006  */
2007 
2008 struct damon_sysfs_kdamond {
2009 	struct kobject kobj;
2010 	struct damon_sysfs_contexts *contexts;
2011 	struct damon_ctx *damon_ctx;
2012 };
2013 
2014 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2015 {
2016 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2017 }
2018 
2019 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2020 {
2021 	struct damon_sysfs_contexts *contexts;
2022 	int err;
2023 
2024 	contexts = damon_sysfs_contexts_alloc();
2025 	if (!contexts)
2026 		return -ENOMEM;
2027 
2028 	err = kobject_init_and_add(&contexts->kobj,
2029 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
2030 			"contexts");
2031 	if (err) {
2032 		kobject_put(&contexts->kobj);
2033 		return err;
2034 	}
2035 	kdamond->contexts = contexts;
2036 
2037 	return err;
2038 }
2039 
2040 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2041 {
2042 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2043 	kobject_put(&kdamond->contexts->kobj);
2044 }
2045 
2046 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2047 {
2048 	bool running;
2049 
2050 	mutex_lock(&ctx->kdamond_lock);
2051 	running = ctx->kdamond != NULL;
2052 	mutex_unlock(&ctx->kdamond_lock);
2053 	return running;
2054 }
2055 
2056 /*
2057  * enum damon_sysfs_cmd - Commands for a specific kdamond.
2058  */
2059 enum damon_sysfs_cmd {
2060 	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2061 	DAMON_SYSFS_CMD_ON,
2062 	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2063 	DAMON_SYSFS_CMD_OFF,
2064 	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2065 	DAMON_SYSFS_CMD_COMMIT,
2066 	/*
2067 	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2068 	 * files.
2069 	 */
2070 	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2071 	/*
2072 	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2073 	 */
2074 	NR_DAMON_SYSFS_CMDS,
2075 };
2076 
2077 /* Should match with enum damon_sysfs_cmd */
2078 static const char * const damon_sysfs_cmd_strs[] = {
2079 	"on",
2080 	"off",
2081 	"commit",
2082 	"update_schemes_stats",
2083 };
2084 
2085 /*
2086  * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2087  * @cmd:	The command that needs to be handled by the callback.
2088  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2089  *
2090  * This structure represents a sysfs command request that need to access some
2091  * DAMON context-internal data.  Because DAMON context-internal data can be
2092  * safely accessed from DAMON callbacks without additional synchronization, the
2093  * request will be handled by the DAMON callback.  None-``NULL`` @kdamond means
2094  * the request is valid.
2095  */
2096 struct damon_sysfs_cmd_request {
2097 	enum damon_sysfs_cmd cmd;
2098 	struct damon_sysfs_kdamond *kdamond;
2099 };
2100 
2101 /* Current DAMON callback request.  Protected by damon_sysfs_lock. */
2102 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2103 
2104 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2105 		char *buf)
2106 {
2107 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2108 			struct damon_sysfs_kdamond, kobj);
2109 	struct damon_ctx *ctx = kdamond->damon_ctx;
2110 	bool running;
2111 
2112 	if (!ctx)
2113 		running = false;
2114 	else
2115 		running = damon_sysfs_ctx_running(ctx);
2116 
2117 	return sysfs_emit(buf, "%s\n", running ?
2118 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2119 			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2120 }
2121 
2122 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2123 		struct damon_sysfs_attrs *sys_attrs)
2124 {
2125 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2126 	struct damon_sysfs_ul_range *sys_nr_regions =
2127 		sys_attrs->nr_regions_range;
2128 
2129 	return damon_set_attrs(ctx, sys_intervals->sample_us,
2130 			sys_intervals->aggr_us, sys_intervals->update_us,
2131 			sys_nr_regions->min, sys_nr_regions->max);
2132 }
2133 
2134 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2135 {
2136 	struct damon_target *t, *next;
2137 
2138 	damon_for_each_target_safe(t, next, ctx) {
2139 		if (ctx->ops.id == DAMON_OPS_VADDR ||
2140 				ctx->ops.id == DAMON_OPS_FVADDR)
2141 			put_pid(t->pid);
2142 		damon_destroy_target(t);
2143 	}
2144 }
2145 
2146 static int damon_sysfs_set_regions(struct damon_target *t,
2147 		struct damon_sysfs_regions *sysfs_regions)
2148 {
2149 	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2150 			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2151 	int i, err = -EINVAL;
2152 
2153 	if (!ranges)
2154 		return -ENOMEM;
2155 	for (i = 0; i < sysfs_regions->nr; i++) {
2156 		struct damon_sysfs_region *sys_region =
2157 			sysfs_regions->regions_arr[i];
2158 
2159 		if (sys_region->start > sys_region->end)
2160 			goto out;
2161 
2162 		ranges[i].start = sys_region->start;
2163 		ranges[i].end = sys_region->end;
2164 		if (i == 0)
2165 			continue;
2166 		if (ranges[i - 1].end > ranges[i].start)
2167 			goto out;
2168 	}
2169 	err = damon_set_regions(t, ranges, sysfs_regions->nr);
2170 out:
2171 	kfree(ranges);
2172 	return err;
2173 
2174 }
2175 
2176 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2177 		struct damon_ctx *ctx)
2178 {
2179 	struct damon_target *t = damon_new_target();
2180 	int err = -EINVAL;
2181 
2182 	if (!t)
2183 		return -ENOMEM;
2184 	if (ctx->ops.id == DAMON_OPS_VADDR ||
2185 			ctx->ops.id == DAMON_OPS_FVADDR) {
2186 		t->pid = find_get_pid(sys_target->pid);
2187 		if (!t->pid)
2188 			goto destroy_targets_out;
2189 	}
2190 	damon_add_target(ctx, t);
2191 	err = damon_sysfs_set_regions(t, sys_target->regions);
2192 	if (err)
2193 		goto destroy_targets_out;
2194 	return 0;
2195 
2196 destroy_targets_out:
2197 	damon_sysfs_destroy_targets(ctx);
2198 	return err;
2199 }
2200 
2201 /*
2202  * Search a target in a context that corresponds to the sysfs target input.
2203  *
2204  * Return: pointer to the target if found, NULL if not found, or negative
2205  * error code if the search failed.
2206  */
2207 static struct damon_target *damon_sysfs_existing_target(
2208 		struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2209 {
2210 	struct pid *pid;
2211 	struct damon_target *t;
2212 
2213 	if (ctx->ops.id == DAMON_OPS_PADDR) {
2214 		/* Up to only one target for paddr could exist */
2215 		damon_for_each_target(t, ctx)
2216 			return t;
2217 		return NULL;
2218 	}
2219 
2220 	/* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2221 	pid = find_get_pid(sys_target->pid);
2222 	if (!pid)
2223 		return ERR_PTR(-EINVAL);
2224 	damon_for_each_target(t, ctx) {
2225 		if (t->pid == pid) {
2226 			put_pid(pid);
2227 			return t;
2228 		}
2229 	}
2230 	put_pid(pid);
2231 	return NULL;
2232 }
2233 
2234 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2235 		struct damon_sysfs_targets *sysfs_targets)
2236 {
2237 	int i, err;
2238 
2239 	/* Multiple physical address space monitoring targets makes no sense */
2240 	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2241 		return -EINVAL;
2242 
2243 	for (i = 0; i < sysfs_targets->nr; i++) {
2244 		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2245 		struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2246 
2247 		if (IS_ERR(t))
2248 			return PTR_ERR(t);
2249 		if (!t)
2250 			err = damon_sysfs_add_target(st, ctx);
2251 		else
2252 			err = damon_sysfs_set_regions(t, st->regions);
2253 		if (err)
2254 			return err;
2255 	}
2256 	return 0;
2257 }
2258 
2259 static struct damos *damon_sysfs_mk_scheme(
2260 		struct damon_sysfs_scheme *sysfs_scheme)
2261 {
2262 	struct damon_sysfs_access_pattern *pattern =
2263 		sysfs_scheme->access_pattern;
2264 	struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2265 	struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2266 	struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2267 	struct damos_quota quota = {
2268 		.ms = sysfs_quotas->ms,
2269 		.sz = sysfs_quotas->sz,
2270 		.reset_interval = sysfs_quotas->reset_interval_ms,
2271 		.weight_sz = sysfs_weights->sz,
2272 		.weight_nr_accesses = sysfs_weights->nr_accesses,
2273 		.weight_age = sysfs_weights->age,
2274 	};
2275 	struct damos_watermarks wmarks = {
2276 		.metric = sysfs_wmarks->metric,
2277 		.interval = sysfs_wmarks->interval_us,
2278 		.high = sysfs_wmarks->high,
2279 		.mid = sysfs_wmarks->mid,
2280 		.low = sysfs_wmarks->low,
2281 	};
2282 
2283 	return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2284 			pattern->nr_accesses->min, pattern->nr_accesses->max,
2285 			pattern->age->min, pattern->age->max,
2286 			sysfs_scheme->action, &quota, &wmarks);
2287 }
2288 
2289 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2290 		struct damon_sysfs_schemes *sysfs_schemes)
2291 {
2292 	int i;
2293 
2294 	for (i = 0; i < sysfs_schemes->nr; i++) {
2295 		struct damos *scheme, *next;
2296 
2297 		scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2298 		if (!scheme) {
2299 			damon_for_each_scheme_safe(scheme, next, ctx)
2300 				damon_destroy_scheme(scheme);
2301 			return -ENOMEM;
2302 		}
2303 		damon_add_scheme(ctx, scheme);
2304 	}
2305 	return 0;
2306 }
2307 
2308 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2309 {
2310 	struct damon_target *t, *next;
2311 
2312 	if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
2313 		return;
2314 
2315 	mutex_lock(&ctx->kdamond_lock);
2316 	damon_for_each_target_safe(t, next, ctx) {
2317 		put_pid(t->pid);
2318 		damon_destroy_target(t);
2319 	}
2320 	mutex_unlock(&ctx->kdamond_lock);
2321 }
2322 
2323 /*
2324  * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2325  * @kdamond:	The kobject wrapper that associated to the kdamond thread.
2326  *
2327  * This function reads the schemes stats of specific kdamond and update the
2328  * related values for sysfs files.  This function should be called from DAMON
2329  * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2330  * contexts-internal data and DAMON sysfs variables.
2331  */
2332 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2333 {
2334 	struct damon_ctx *ctx = kdamond->damon_ctx;
2335 	struct damon_sysfs_schemes *sysfs_schemes;
2336 	struct damos *scheme;
2337 	int schemes_idx = 0;
2338 
2339 	if (!ctx)
2340 		return -EINVAL;
2341 	sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2342 	damon_for_each_scheme(scheme, ctx) {
2343 		struct damon_sysfs_stats *sysfs_stats;
2344 
2345 		sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2346 		sysfs_stats->nr_tried = scheme->stat.nr_tried;
2347 		sysfs_stats->sz_tried = scheme->stat.sz_tried;
2348 		sysfs_stats->nr_applied = scheme->stat.nr_applied;
2349 		sysfs_stats->sz_applied = scheme->stat.sz_applied;
2350 		sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2351 	}
2352 	return 0;
2353 }
2354 
2355 static inline bool damon_sysfs_kdamond_running(
2356 		struct damon_sysfs_kdamond *kdamond)
2357 {
2358 	return kdamond->damon_ctx &&
2359 		damon_sysfs_ctx_running(kdamond->damon_ctx);
2360 }
2361 
2362 /*
2363  * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2364  * @kdamond:	The kobject wrapper for the associated kdamond.
2365  *
2366  * If the sysfs input is wrong, the kdamond will be terminated.
2367  */
2368 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2369 {
2370 	struct damon_ctx *ctx = kdamond->damon_ctx;
2371 	struct damon_sysfs_context *sys_ctx;
2372 	int err = 0;
2373 
2374 	if (!damon_sysfs_kdamond_running(kdamond))
2375 		return -EINVAL;
2376 	/* TODO: Support multiple contexts per kdamond */
2377 	if (kdamond->contexts->nr != 1)
2378 		return -EINVAL;
2379 
2380 	sys_ctx = kdamond->contexts->contexts_arr[0];
2381 
2382 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2383 	if (err)
2384 		return err;
2385 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2386 	if (err)
2387 		return err;
2388 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2389 	if (err)
2390 		return err;
2391 	err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2392 	if (err)
2393 		return err;
2394 	return err;
2395 }
2396 
2397 /*
2398  * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2399  * @c:	The DAMON context of the callback.
2400  *
2401  * This function is periodically called back from the kdamond thread for @c.
2402  * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2403  */
2404 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2405 {
2406 	struct damon_sysfs_kdamond *kdamond;
2407 	int err = 0;
2408 
2409 	/* avoid deadlock due to concurrent state_store('off') */
2410 	if (!mutex_trylock(&damon_sysfs_lock))
2411 		return 0;
2412 	kdamond = damon_sysfs_cmd_request.kdamond;
2413 	if (!kdamond || kdamond->damon_ctx != c)
2414 		goto out;
2415 	switch (damon_sysfs_cmd_request.cmd) {
2416 	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2417 		err = damon_sysfs_upd_schemes_stats(kdamond);
2418 		break;
2419 	case DAMON_SYSFS_CMD_COMMIT:
2420 		err = damon_sysfs_commit_input(kdamond);
2421 		break;
2422 	default:
2423 		break;
2424 	}
2425 	/* Mark the request as invalid now. */
2426 	damon_sysfs_cmd_request.kdamond = NULL;
2427 out:
2428 	mutex_unlock(&damon_sysfs_lock);
2429 	return err;
2430 }
2431 
2432 static struct damon_ctx *damon_sysfs_build_ctx(
2433 		struct damon_sysfs_context *sys_ctx)
2434 {
2435 	struct damon_ctx *ctx = damon_new_ctx();
2436 	int err;
2437 
2438 	if (!ctx)
2439 		return ERR_PTR(-ENOMEM);
2440 
2441 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2442 	if (err)
2443 		goto out;
2444 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2445 	if (err)
2446 		goto out;
2447 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2448 	if (err)
2449 		goto out;
2450 	err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2451 	if (err)
2452 		goto out;
2453 
2454 	ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2455 	ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2456 	ctx->callback.before_terminate = damon_sysfs_before_terminate;
2457 	return ctx;
2458 
2459 out:
2460 	damon_destroy_ctx(ctx);
2461 	return ERR_PTR(err);
2462 }
2463 
2464 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2465 {
2466 	struct damon_ctx *ctx;
2467 	int err;
2468 
2469 	if (kdamond->damon_ctx &&
2470 			damon_sysfs_ctx_running(kdamond->damon_ctx))
2471 		return -EBUSY;
2472 	if (damon_sysfs_cmd_request.kdamond == kdamond)
2473 		return -EBUSY;
2474 	/* TODO: support multiple contexts per kdamond */
2475 	if (kdamond->contexts->nr != 1)
2476 		return -EINVAL;
2477 
2478 	if (kdamond->damon_ctx)
2479 		damon_destroy_ctx(kdamond->damon_ctx);
2480 	kdamond->damon_ctx = NULL;
2481 
2482 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2483 	if (IS_ERR(ctx))
2484 		return PTR_ERR(ctx);
2485 	err = damon_start(&ctx, 1, false);
2486 	if (err) {
2487 		damon_destroy_ctx(ctx);
2488 		return err;
2489 	}
2490 	kdamond->damon_ctx = ctx;
2491 	return err;
2492 }
2493 
2494 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2495 {
2496 	if (!kdamond->damon_ctx)
2497 		return -EINVAL;
2498 	return damon_stop(&kdamond->damon_ctx, 1);
2499 	/*
2500 	 * To allow users show final monitoring results of already turned-off
2501 	 * DAMON, we free kdamond->damon_ctx in next
2502 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2503 	 */
2504 }
2505 
2506 /*
2507  * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2508  * @cmd:	The command to handle.
2509  * @kdamond:	The kobject wrapper for the associated kdamond.
2510  *
2511  * This function handles a DAMON sysfs command for a kdamond.  For commands
2512  * that need to access running DAMON context-internal data, it requests
2513  * handling of the command to the DAMON callback
2514  * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2515  * or the context is completed.
2516  *
2517  * Return: 0 on success, negative error code otherwise.
2518  */
2519 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2520 		struct damon_sysfs_kdamond *kdamond)
2521 {
2522 	bool need_wait = true;
2523 
2524 	/* Handle commands that doesn't access DAMON context-internal data */
2525 	switch (cmd) {
2526 	case DAMON_SYSFS_CMD_ON:
2527 		return damon_sysfs_turn_damon_on(kdamond);
2528 	case DAMON_SYSFS_CMD_OFF:
2529 		return damon_sysfs_turn_damon_off(kdamond);
2530 	default:
2531 		break;
2532 	}
2533 
2534 	/* Pass the command to DAMON callback for safe DAMON context access */
2535 	if (damon_sysfs_cmd_request.kdamond)
2536 		return -EBUSY;
2537 	if (!damon_sysfs_kdamond_running(kdamond))
2538 		return -EINVAL;
2539 	damon_sysfs_cmd_request.cmd = cmd;
2540 	damon_sysfs_cmd_request.kdamond = kdamond;
2541 
2542 	/*
2543 	 * wait until damon_sysfs_cmd_request_callback() handles the request
2544 	 * from kdamond context
2545 	 */
2546 	mutex_unlock(&damon_sysfs_lock);
2547 	while (need_wait) {
2548 		schedule_timeout_idle(msecs_to_jiffies(100));
2549 		if (!mutex_trylock(&damon_sysfs_lock))
2550 			continue;
2551 		if (!damon_sysfs_cmd_request.kdamond) {
2552 			/* damon_sysfs_cmd_request_callback() handled */
2553 			need_wait = false;
2554 		} else if (!damon_sysfs_kdamond_running(kdamond)) {
2555 			/* kdamond has already finished */
2556 			need_wait = false;
2557 			damon_sysfs_cmd_request.kdamond = NULL;
2558 		}
2559 		mutex_unlock(&damon_sysfs_lock);
2560 	}
2561 	mutex_lock(&damon_sysfs_lock);
2562 	return 0;
2563 }
2564 
2565 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2566 		const char *buf, size_t count)
2567 {
2568 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2569 			struct damon_sysfs_kdamond, kobj);
2570 	enum damon_sysfs_cmd cmd;
2571 	ssize_t ret = -EINVAL;
2572 
2573 	if (!mutex_trylock(&damon_sysfs_lock))
2574 		return -EBUSY;
2575 	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2576 		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2577 			ret = damon_sysfs_handle_cmd(cmd, kdamond);
2578 			break;
2579 		}
2580 	}
2581 	mutex_unlock(&damon_sysfs_lock);
2582 	if (!ret)
2583 		ret = count;
2584 	return ret;
2585 }
2586 
2587 static ssize_t pid_show(struct kobject *kobj,
2588 		struct kobj_attribute *attr, char *buf)
2589 {
2590 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2591 			struct damon_sysfs_kdamond, kobj);
2592 	struct damon_ctx *ctx;
2593 	int pid;
2594 
2595 	if (!mutex_trylock(&damon_sysfs_lock))
2596 		return -EBUSY;
2597 	ctx = kdamond->damon_ctx;
2598 	if (!ctx) {
2599 		pid = -1;
2600 		goto out;
2601 	}
2602 	mutex_lock(&ctx->kdamond_lock);
2603 	if (!ctx->kdamond)
2604 		pid = -1;
2605 	else
2606 		pid = ctx->kdamond->pid;
2607 	mutex_unlock(&ctx->kdamond_lock);
2608 out:
2609 	mutex_unlock(&damon_sysfs_lock);
2610 	return sysfs_emit(buf, "%d\n", pid);
2611 }
2612 
2613 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2614 {
2615 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2616 			struct damon_sysfs_kdamond, kobj);
2617 
2618 	if (kdamond->damon_ctx)
2619 		damon_destroy_ctx(kdamond->damon_ctx);
2620 	kfree(kdamond);
2621 }
2622 
2623 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2624 		__ATTR_RW_MODE(state, 0600);
2625 
2626 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2627 		__ATTR_RO_MODE(pid, 0400);
2628 
2629 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2630 	&damon_sysfs_kdamond_state_attr.attr,
2631 	&damon_sysfs_kdamond_pid_attr.attr,
2632 	NULL,
2633 };
2634 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2635 
2636 static struct kobj_type damon_sysfs_kdamond_ktype = {
2637 	.release = damon_sysfs_kdamond_release,
2638 	.sysfs_ops = &kobj_sysfs_ops,
2639 	.default_groups = damon_sysfs_kdamond_groups,
2640 };
2641 
2642 /*
2643  * kdamonds directory
2644  */
2645 
2646 struct damon_sysfs_kdamonds {
2647 	struct kobject kobj;
2648 	struct damon_sysfs_kdamond **kdamonds_arr;
2649 	int nr;
2650 };
2651 
2652 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2653 {
2654 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2655 }
2656 
2657 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2658 {
2659 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2660 	int i;
2661 
2662 	for (i = 0; i < kdamonds->nr; i++) {
2663 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2664 		kobject_put(&kdamonds_arr[i]->kobj);
2665 	}
2666 	kdamonds->nr = 0;
2667 	kfree(kdamonds_arr);
2668 	kdamonds->kdamonds_arr = NULL;
2669 }
2670 
2671 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2672 		int nr_kdamonds)
2673 {
2674 	int nr_running_ctxs = 0;
2675 	int i;
2676 
2677 	for (i = 0; i < nr_kdamonds; i++) {
2678 		struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2679 
2680 		if (!ctx)
2681 			continue;
2682 		mutex_lock(&ctx->kdamond_lock);
2683 		if (ctx->kdamond)
2684 			nr_running_ctxs++;
2685 		mutex_unlock(&ctx->kdamond_lock);
2686 	}
2687 	return nr_running_ctxs;
2688 }
2689 
2690 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2691 		int nr_kdamonds)
2692 {
2693 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2694 	int err, i;
2695 
2696 	if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2697 		return -EBUSY;
2698 
2699 	for (i = 0; i < kdamonds->nr; i++) {
2700 		if (damon_sysfs_cmd_request.kdamond ==
2701 				kdamonds->kdamonds_arr[i])
2702 			return -EBUSY;
2703 	}
2704 
2705 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2706 	if (!nr_kdamonds)
2707 		return 0;
2708 
2709 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2710 			GFP_KERNEL | __GFP_NOWARN);
2711 	if (!kdamonds_arr)
2712 		return -ENOMEM;
2713 	kdamonds->kdamonds_arr = kdamonds_arr;
2714 
2715 	for (i = 0; i < nr_kdamonds; i++) {
2716 		kdamond = damon_sysfs_kdamond_alloc();
2717 		if (!kdamond) {
2718 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
2719 			return -ENOMEM;
2720 		}
2721 
2722 		err = kobject_init_and_add(&kdamond->kobj,
2723 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2724 				"%d", i);
2725 		if (err)
2726 			goto out;
2727 
2728 		err = damon_sysfs_kdamond_add_dirs(kdamond);
2729 		if (err)
2730 			goto out;
2731 
2732 		kdamonds_arr[i] = kdamond;
2733 		kdamonds->nr++;
2734 	}
2735 	return 0;
2736 
2737 out:
2738 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2739 	kobject_put(&kdamond->kobj);
2740 	return err;
2741 }
2742 
2743 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2744 		struct kobj_attribute *attr, char *buf)
2745 {
2746 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2747 			struct damon_sysfs_kdamonds, kobj);
2748 
2749 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
2750 }
2751 
2752 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2753 		struct kobj_attribute *attr, const char *buf, size_t count)
2754 {
2755 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2756 			struct damon_sysfs_kdamonds, kobj);
2757 	int nr, err;
2758 
2759 	err = kstrtoint(buf, 0, &nr);
2760 	if (err)
2761 		return err;
2762 	if (nr < 0)
2763 		return -EINVAL;
2764 
2765 	if (!mutex_trylock(&damon_sysfs_lock))
2766 		return -EBUSY;
2767 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2768 	mutex_unlock(&damon_sysfs_lock);
2769 	if (err)
2770 		return err;
2771 
2772 	return count;
2773 }
2774 
2775 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2776 {
2777 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2778 }
2779 
2780 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2781 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2782 
2783 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2784 	&damon_sysfs_kdamonds_nr_attr.attr,
2785 	NULL,
2786 };
2787 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2788 
2789 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2790 	.release = damon_sysfs_kdamonds_release,
2791 	.sysfs_ops = &kobj_sysfs_ops,
2792 	.default_groups = damon_sysfs_kdamonds_groups,
2793 };
2794 
2795 /*
2796  * damon user interface directory
2797  */
2798 
2799 struct damon_sysfs_ui_dir {
2800 	struct kobject kobj;
2801 	struct damon_sysfs_kdamonds *kdamonds;
2802 };
2803 
2804 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2805 {
2806 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2807 }
2808 
2809 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2810 {
2811 	struct damon_sysfs_kdamonds *kdamonds;
2812 	int err;
2813 
2814 	kdamonds = damon_sysfs_kdamonds_alloc();
2815 	if (!kdamonds)
2816 		return -ENOMEM;
2817 
2818 	err = kobject_init_and_add(&kdamonds->kobj,
2819 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2820 			"kdamonds");
2821 	if (err) {
2822 		kobject_put(&kdamonds->kobj);
2823 		return err;
2824 	}
2825 	ui_dir->kdamonds = kdamonds;
2826 	return err;
2827 }
2828 
2829 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2830 {
2831 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2832 }
2833 
2834 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2835 	NULL,
2836 };
2837 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2838 
2839 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2840 	.release = damon_sysfs_ui_dir_release,
2841 	.sysfs_ops = &kobj_sysfs_ops,
2842 	.default_groups = damon_sysfs_ui_dir_groups,
2843 };
2844 
2845 static int __init damon_sysfs_init(void)
2846 {
2847 	struct kobject *damon_sysfs_root;
2848 	struct damon_sysfs_ui_dir *admin;
2849 	int err;
2850 
2851 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2852 	if (!damon_sysfs_root)
2853 		return -ENOMEM;
2854 
2855 	admin = damon_sysfs_ui_dir_alloc();
2856 	if (!admin) {
2857 		kobject_put(damon_sysfs_root);
2858 		return -ENOMEM;
2859 	}
2860 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2861 			damon_sysfs_root, "admin");
2862 	if (err)
2863 		goto out;
2864 	err = damon_sysfs_ui_dir_add_dirs(admin);
2865 	if (err)
2866 		goto out;
2867 	return 0;
2868 
2869 out:
2870 	kobject_put(&admin->kobj);
2871 	kobject_put(damon_sysfs_root);
2872 	return err;
2873 }
2874 subsys_initcall(damon_sysfs_init);
2875