xref: /openbmc/linux/mm/damon/sysfs.c (revision d0c44de2d8ffd2e4780d360b34ee6614aa4af080)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * DAMON sysfs Interface
4   *
5   * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6   */
7  
8  #include <linux/pid.h>
9  #include <linux/sched.h>
10  #include <linux/slab.h>
11  
12  #include "sysfs-common.h"
13  
14  /*
15   * init region directory
16   */
17  
18  struct damon_sysfs_region {
19  	struct kobject kobj;
20  	struct damon_addr_range ar;
21  };
22  
damon_sysfs_region_alloc(void)23  static struct damon_sysfs_region *damon_sysfs_region_alloc(void)
24  {
25  	return kzalloc(sizeof(struct damon_sysfs_region), GFP_KERNEL);
26  }
27  
start_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)28  static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
29  		char *buf)
30  {
31  	struct damon_sysfs_region *region = container_of(kobj,
32  			struct damon_sysfs_region, kobj);
33  
34  	return sysfs_emit(buf, "%lu\n", region->ar.start);
35  }
36  
start_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)37  static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
38  		const char *buf, size_t count)
39  {
40  	struct damon_sysfs_region *region = container_of(kobj,
41  			struct damon_sysfs_region, kobj);
42  	int err = kstrtoul(buf, 0, &region->ar.start);
43  
44  	return err ? err : count;
45  }
46  
end_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)47  static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
48  		char *buf)
49  {
50  	struct damon_sysfs_region *region = container_of(kobj,
51  			struct damon_sysfs_region, kobj);
52  
53  	return sysfs_emit(buf, "%lu\n", region->ar.end);
54  }
55  
end_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)56  static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
57  		const char *buf, size_t count)
58  {
59  	struct damon_sysfs_region *region = container_of(kobj,
60  			struct damon_sysfs_region, kobj);
61  	int err = kstrtoul(buf, 0, &region->ar.end);
62  
63  	return err ? err : count;
64  }
65  
damon_sysfs_region_release(struct kobject * kobj)66  static void damon_sysfs_region_release(struct kobject *kobj)
67  {
68  	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
69  }
70  
71  static struct kobj_attribute damon_sysfs_region_start_attr =
72  		__ATTR_RW_MODE(start, 0600);
73  
74  static struct kobj_attribute damon_sysfs_region_end_attr =
75  		__ATTR_RW_MODE(end, 0600);
76  
77  static struct attribute *damon_sysfs_region_attrs[] = {
78  	&damon_sysfs_region_start_attr.attr,
79  	&damon_sysfs_region_end_attr.attr,
80  	NULL,
81  };
82  ATTRIBUTE_GROUPS(damon_sysfs_region);
83  
84  static const struct kobj_type damon_sysfs_region_ktype = {
85  	.release = damon_sysfs_region_release,
86  	.sysfs_ops = &kobj_sysfs_ops,
87  	.default_groups = damon_sysfs_region_groups,
88  };
89  
90  /*
91   * init_regions directory
92   */
93  
94  struct damon_sysfs_regions {
95  	struct kobject kobj;
96  	struct damon_sysfs_region **regions_arr;
97  	int nr;
98  };
99  
damon_sysfs_regions_alloc(void)100  static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
101  {
102  	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
103  }
104  
damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions * regions)105  static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
106  {
107  	struct damon_sysfs_region **regions_arr = regions->regions_arr;
108  	int i;
109  
110  	for (i = 0; i < regions->nr; i++)
111  		kobject_put(&regions_arr[i]->kobj);
112  	regions->nr = 0;
113  	kfree(regions_arr);
114  	regions->regions_arr = NULL;
115  }
116  
damon_sysfs_regions_add_dirs(struct damon_sysfs_regions * regions,int nr_regions)117  static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
118  		int nr_regions)
119  {
120  	struct damon_sysfs_region **regions_arr, *region;
121  	int err, i;
122  
123  	damon_sysfs_regions_rm_dirs(regions);
124  	if (!nr_regions)
125  		return 0;
126  
127  	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
128  			GFP_KERNEL | __GFP_NOWARN);
129  	if (!regions_arr)
130  		return -ENOMEM;
131  	regions->regions_arr = regions_arr;
132  
133  	for (i = 0; i < nr_regions; i++) {
134  		region = damon_sysfs_region_alloc();
135  		if (!region) {
136  			damon_sysfs_regions_rm_dirs(regions);
137  			return -ENOMEM;
138  		}
139  
140  		err = kobject_init_and_add(&region->kobj,
141  				&damon_sysfs_region_ktype, &regions->kobj,
142  				"%d", i);
143  		if (err) {
144  			kobject_put(&region->kobj);
145  			damon_sysfs_regions_rm_dirs(regions);
146  			return err;
147  		}
148  
149  		regions_arr[i] = region;
150  		regions->nr++;
151  	}
152  	return 0;
153  }
154  
nr_regions_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)155  static ssize_t nr_regions_show(struct kobject *kobj,
156  		struct kobj_attribute *attr, char *buf)
157  {
158  	struct damon_sysfs_regions *regions = container_of(kobj,
159  			struct damon_sysfs_regions, kobj);
160  
161  	return sysfs_emit(buf, "%d\n", regions->nr);
162  }
163  
nr_regions_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)164  static ssize_t nr_regions_store(struct kobject *kobj,
165  		struct kobj_attribute *attr, const char *buf, size_t count)
166  {
167  	struct damon_sysfs_regions *regions;
168  	int nr, err = kstrtoint(buf, 0, &nr);
169  
170  	if (err)
171  		return err;
172  	if (nr < 0)
173  		return -EINVAL;
174  
175  	regions = container_of(kobj, struct damon_sysfs_regions, kobj);
176  
177  	if (!mutex_trylock(&damon_sysfs_lock))
178  		return -EBUSY;
179  	err = damon_sysfs_regions_add_dirs(regions, nr);
180  	mutex_unlock(&damon_sysfs_lock);
181  	if (err)
182  		return err;
183  
184  	return count;
185  }
186  
damon_sysfs_regions_release(struct kobject * kobj)187  static void damon_sysfs_regions_release(struct kobject *kobj)
188  {
189  	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
190  }
191  
192  static struct kobj_attribute damon_sysfs_regions_nr_attr =
193  		__ATTR_RW_MODE(nr_regions, 0600);
194  
195  static struct attribute *damon_sysfs_regions_attrs[] = {
196  	&damon_sysfs_regions_nr_attr.attr,
197  	NULL,
198  };
199  ATTRIBUTE_GROUPS(damon_sysfs_regions);
200  
201  static const struct kobj_type damon_sysfs_regions_ktype = {
202  	.release = damon_sysfs_regions_release,
203  	.sysfs_ops = &kobj_sysfs_ops,
204  	.default_groups = damon_sysfs_regions_groups,
205  };
206  
207  /*
208   * target directory
209   */
210  
211  struct damon_sysfs_target {
212  	struct kobject kobj;
213  	struct damon_sysfs_regions *regions;
214  	int pid;
215  };
216  
damon_sysfs_target_alloc(void)217  static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
218  {
219  	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
220  }
221  
damon_sysfs_target_add_dirs(struct damon_sysfs_target * target)222  static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
223  {
224  	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
225  	int err;
226  
227  	if (!regions)
228  		return -ENOMEM;
229  
230  	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
231  			&target->kobj, "regions");
232  	if (err)
233  		kobject_put(&regions->kobj);
234  	else
235  		target->regions = regions;
236  	return err;
237  }
238  
damon_sysfs_target_rm_dirs(struct damon_sysfs_target * target)239  static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
240  {
241  	damon_sysfs_regions_rm_dirs(target->regions);
242  	kobject_put(&target->regions->kobj);
243  }
244  
pid_target_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)245  static ssize_t pid_target_show(struct kobject *kobj,
246  		struct kobj_attribute *attr, char *buf)
247  {
248  	struct damon_sysfs_target *target = container_of(kobj,
249  			struct damon_sysfs_target, kobj);
250  
251  	return sysfs_emit(buf, "%d\n", target->pid);
252  }
253  
pid_target_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)254  static ssize_t pid_target_store(struct kobject *kobj,
255  		struct kobj_attribute *attr, const char *buf, size_t count)
256  {
257  	struct damon_sysfs_target *target = container_of(kobj,
258  			struct damon_sysfs_target, kobj);
259  	int err = kstrtoint(buf, 0, &target->pid);
260  
261  	if (err)
262  		return -EINVAL;
263  	return count;
264  }
265  
damon_sysfs_target_release(struct kobject * kobj)266  static void damon_sysfs_target_release(struct kobject *kobj)
267  {
268  	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
269  }
270  
271  static struct kobj_attribute damon_sysfs_target_pid_attr =
272  		__ATTR_RW_MODE(pid_target, 0600);
273  
274  static struct attribute *damon_sysfs_target_attrs[] = {
275  	&damon_sysfs_target_pid_attr.attr,
276  	NULL,
277  };
278  ATTRIBUTE_GROUPS(damon_sysfs_target);
279  
280  static const struct kobj_type damon_sysfs_target_ktype = {
281  	.release = damon_sysfs_target_release,
282  	.sysfs_ops = &kobj_sysfs_ops,
283  	.default_groups = damon_sysfs_target_groups,
284  };
285  
286  /*
287   * targets directory
288   */
289  
290  struct damon_sysfs_targets {
291  	struct kobject kobj;
292  	struct damon_sysfs_target **targets_arr;
293  	int nr;
294  };
295  
damon_sysfs_targets_alloc(void)296  static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
297  {
298  	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
299  }
300  
damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets * targets)301  static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
302  {
303  	struct damon_sysfs_target **targets_arr = targets->targets_arr;
304  	int i;
305  
306  	for (i = 0; i < targets->nr; i++) {
307  		damon_sysfs_target_rm_dirs(targets_arr[i]);
308  		kobject_put(&targets_arr[i]->kobj);
309  	}
310  	targets->nr = 0;
311  	kfree(targets_arr);
312  	targets->targets_arr = NULL;
313  }
314  
damon_sysfs_targets_add_dirs(struct damon_sysfs_targets * targets,int nr_targets)315  static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
316  		int nr_targets)
317  {
318  	struct damon_sysfs_target **targets_arr, *target;
319  	int err, i;
320  
321  	damon_sysfs_targets_rm_dirs(targets);
322  	if (!nr_targets)
323  		return 0;
324  
325  	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
326  			GFP_KERNEL | __GFP_NOWARN);
327  	if (!targets_arr)
328  		return -ENOMEM;
329  	targets->targets_arr = targets_arr;
330  
331  	for (i = 0; i < nr_targets; i++) {
332  		target = damon_sysfs_target_alloc();
333  		if (!target) {
334  			damon_sysfs_targets_rm_dirs(targets);
335  			return -ENOMEM;
336  		}
337  
338  		err = kobject_init_and_add(&target->kobj,
339  				&damon_sysfs_target_ktype, &targets->kobj,
340  				"%d", i);
341  		if (err)
342  			goto out;
343  
344  		err = damon_sysfs_target_add_dirs(target);
345  		if (err)
346  			goto out;
347  
348  		targets_arr[i] = target;
349  		targets->nr++;
350  	}
351  	return 0;
352  
353  out:
354  	damon_sysfs_targets_rm_dirs(targets);
355  	kobject_put(&target->kobj);
356  	return err;
357  }
358  
nr_targets_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)359  static ssize_t nr_targets_show(struct kobject *kobj,
360  		struct kobj_attribute *attr, char *buf)
361  {
362  	struct damon_sysfs_targets *targets = container_of(kobj,
363  			struct damon_sysfs_targets, kobj);
364  
365  	return sysfs_emit(buf, "%d\n", targets->nr);
366  }
367  
nr_targets_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)368  static ssize_t nr_targets_store(struct kobject *kobj,
369  		struct kobj_attribute *attr, const char *buf, size_t count)
370  {
371  	struct damon_sysfs_targets *targets;
372  	int nr, err = kstrtoint(buf, 0, &nr);
373  
374  	if (err)
375  		return err;
376  	if (nr < 0)
377  		return -EINVAL;
378  
379  	targets = container_of(kobj, struct damon_sysfs_targets, kobj);
380  
381  	if (!mutex_trylock(&damon_sysfs_lock))
382  		return -EBUSY;
383  	err = damon_sysfs_targets_add_dirs(targets, nr);
384  	mutex_unlock(&damon_sysfs_lock);
385  	if (err)
386  		return err;
387  
388  	return count;
389  }
390  
damon_sysfs_targets_release(struct kobject * kobj)391  static void damon_sysfs_targets_release(struct kobject *kobj)
392  {
393  	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
394  }
395  
396  static struct kobj_attribute damon_sysfs_targets_nr_attr =
397  		__ATTR_RW_MODE(nr_targets, 0600);
398  
399  static struct attribute *damon_sysfs_targets_attrs[] = {
400  	&damon_sysfs_targets_nr_attr.attr,
401  	NULL,
402  };
403  ATTRIBUTE_GROUPS(damon_sysfs_targets);
404  
405  static const struct kobj_type damon_sysfs_targets_ktype = {
406  	.release = damon_sysfs_targets_release,
407  	.sysfs_ops = &kobj_sysfs_ops,
408  	.default_groups = damon_sysfs_targets_groups,
409  };
410  
411  /*
412   * intervals directory
413   */
414  
415  struct damon_sysfs_intervals {
416  	struct kobject kobj;
417  	unsigned long sample_us;
418  	unsigned long aggr_us;
419  	unsigned long update_us;
420  };
421  
damon_sysfs_intervals_alloc(unsigned long sample_us,unsigned long aggr_us,unsigned long update_us)422  static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
423  		unsigned long sample_us, unsigned long aggr_us,
424  		unsigned long update_us)
425  {
426  	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
427  			GFP_KERNEL);
428  
429  	if (!intervals)
430  		return NULL;
431  
432  	intervals->kobj = (struct kobject){};
433  	intervals->sample_us = sample_us;
434  	intervals->aggr_us = aggr_us;
435  	intervals->update_us = update_us;
436  	return intervals;
437  }
438  
sample_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)439  static ssize_t sample_us_show(struct kobject *kobj,
440  		struct kobj_attribute *attr, char *buf)
441  {
442  	struct damon_sysfs_intervals *intervals = container_of(kobj,
443  			struct damon_sysfs_intervals, kobj);
444  
445  	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
446  }
447  
sample_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)448  static ssize_t sample_us_store(struct kobject *kobj,
449  		struct kobj_attribute *attr, const char *buf, size_t count)
450  {
451  	struct damon_sysfs_intervals *intervals = container_of(kobj,
452  			struct damon_sysfs_intervals, kobj);
453  	unsigned long us;
454  	int err = kstrtoul(buf, 0, &us);
455  
456  	if (err)
457  		return err;
458  
459  	intervals->sample_us = us;
460  	return count;
461  }
462  
aggr_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)463  static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
464  		char *buf)
465  {
466  	struct damon_sysfs_intervals *intervals = container_of(kobj,
467  			struct damon_sysfs_intervals, kobj);
468  
469  	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
470  }
471  
aggr_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)472  static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
473  		const char *buf, size_t count)
474  {
475  	struct damon_sysfs_intervals *intervals = container_of(kobj,
476  			struct damon_sysfs_intervals, kobj);
477  	unsigned long us;
478  	int err = kstrtoul(buf, 0, &us);
479  
480  	if (err)
481  		return err;
482  
483  	intervals->aggr_us = us;
484  	return count;
485  }
486  
update_us_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)487  static ssize_t update_us_show(struct kobject *kobj,
488  		struct kobj_attribute *attr, char *buf)
489  {
490  	struct damon_sysfs_intervals *intervals = container_of(kobj,
491  			struct damon_sysfs_intervals, kobj);
492  
493  	return sysfs_emit(buf, "%lu\n", intervals->update_us);
494  }
495  
update_us_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)496  static ssize_t update_us_store(struct kobject *kobj,
497  		struct kobj_attribute *attr, const char *buf, size_t count)
498  {
499  	struct damon_sysfs_intervals *intervals = container_of(kobj,
500  			struct damon_sysfs_intervals, kobj);
501  	unsigned long us;
502  	int err = kstrtoul(buf, 0, &us);
503  
504  	if (err)
505  		return err;
506  
507  	intervals->update_us = us;
508  	return count;
509  }
510  
damon_sysfs_intervals_release(struct kobject * kobj)511  static void damon_sysfs_intervals_release(struct kobject *kobj)
512  {
513  	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
514  }
515  
516  static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
517  		__ATTR_RW_MODE(sample_us, 0600);
518  
519  static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
520  		__ATTR_RW_MODE(aggr_us, 0600);
521  
522  static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
523  		__ATTR_RW_MODE(update_us, 0600);
524  
525  static struct attribute *damon_sysfs_intervals_attrs[] = {
526  	&damon_sysfs_intervals_sample_us_attr.attr,
527  	&damon_sysfs_intervals_aggr_us_attr.attr,
528  	&damon_sysfs_intervals_update_us_attr.attr,
529  	NULL,
530  };
531  ATTRIBUTE_GROUPS(damon_sysfs_intervals);
532  
533  static const struct kobj_type damon_sysfs_intervals_ktype = {
534  	.release = damon_sysfs_intervals_release,
535  	.sysfs_ops = &kobj_sysfs_ops,
536  	.default_groups = damon_sysfs_intervals_groups,
537  };
538  
539  /*
540   * monitoring_attrs directory
541   */
542  
543  struct damon_sysfs_attrs {
544  	struct kobject kobj;
545  	struct damon_sysfs_intervals *intervals;
546  	struct damon_sysfs_ul_range *nr_regions_range;
547  };
548  
damon_sysfs_attrs_alloc(void)549  static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
550  {
551  	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
552  
553  	if (!attrs)
554  		return NULL;
555  	attrs->kobj = (struct kobject){};
556  	return attrs;
557  }
558  
damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs * attrs)559  static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
560  {
561  	struct damon_sysfs_intervals *intervals;
562  	struct damon_sysfs_ul_range *nr_regions_range;
563  	int err;
564  
565  	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
566  	if (!intervals)
567  		return -ENOMEM;
568  
569  	err = kobject_init_and_add(&intervals->kobj,
570  			&damon_sysfs_intervals_ktype, &attrs->kobj,
571  			"intervals");
572  	if (err)
573  		goto put_intervals_out;
574  	attrs->intervals = intervals;
575  
576  	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
577  	if (!nr_regions_range) {
578  		err = -ENOMEM;
579  		goto put_intervals_out;
580  	}
581  
582  	err = kobject_init_and_add(&nr_regions_range->kobj,
583  			&damon_sysfs_ul_range_ktype, &attrs->kobj,
584  			"nr_regions");
585  	if (err)
586  		goto put_nr_regions_intervals_out;
587  	attrs->nr_regions_range = nr_regions_range;
588  	return 0;
589  
590  put_nr_regions_intervals_out:
591  	kobject_put(&nr_regions_range->kobj);
592  	attrs->nr_regions_range = NULL;
593  put_intervals_out:
594  	kobject_put(&intervals->kobj);
595  	attrs->intervals = NULL;
596  	return err;
597  }
598  
damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs * attrs)599  static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
600  {
601  	kobject_put(&attrs->nr_regions_range->kobj);
602  	kobject_put(&attrs->intervals->kobj);
603  }
604  
damon_sysfs_attrs_release(struct kobject * kobj)605  static void damon_sysfs_attrs_release(struct kobject *kobj)
606  {
607  	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
608  }
609  
610  static struct attribute *damon_sysfs_attrs_attrs[] = {
611  	NULL,
612  };
613  ATTRIBUTE_GROUPS(damon_sysfs_attrs);
614  
615  static const struct kobj_type damon_sysfs_attrs_ktype = {
616  	.release = damon_sysfs_attrs_release,
617  	.sysfs_ops = &kobj_sysfs_ops,
618  	.default_groups = damon_sysfs_attrs_groups,
619  };
620  
621  /*
622   * context directory
623   */
624  
625  /* This should match with enum damon_ops_id */
626  static const char * const damon_sysfs_ops_strs[] = {
627  	"vaddr",
628  	"fvaddr",
629  	"paddr",
630  };
631  
632  struct damon_sysfs_context {
633  	struct kobject kobj;
634  	enum damon_ops_id ops_id;
635  	struct damon_sysfs_attrs *attrs;
636  	struct damon_sysfs_targets *targets;
637  	struct damon_sysfs_schemes *schemes;
638  };
639  
damon_sysfs_context_alloc(enum damon_ops_id ops_id)640  static struct damon_sysfs_context *damon_sysfs_context_alloc(
641  		enum damon_ops_id ops_id)
642  {
643  	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
644  				GFP_KERNEL);
645  
646  	if (!context)
647  		return NULL;
648  	context->kobj = (struct kobject){};
649  	context->ops_id = ops_id;
650  	return context;
651  }
652  
damon_sysfs_context_set_attrs(struct damon_sysfs_context * context)653  static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
654  {
655  	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
656  	int err;
657  
658  	if (!attrs)
659  		return -ENOMEM;
660  	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
661  			&context->kobj, "monitoring_attrs");
662  	if (err)
663  		goto out;
664  	err = damon_sysfs_attrs_add_dirs(attrs);
665  	if (err)
666  		goto out;
667  	context->attrs = attrs;
668  	return 0;
669  
670  out:
671  	kobject_put(&attrs->kobj);
672  	return err;
673  }
674  
damon_sysfs_context_set_targets(struct damon_sysfs_context * context)675  static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
676  {
677  	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
678  	int err;
679  
680  	if (!targets)
681  		return -ENOMEM;
682  	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
683  			&context->kobj, "targets");
684  	if (err) {
685  		kobject_put(&targets->kobj);
686  		return err;
687  	}
688  	context->targets = targets;
689  	return 0;
690  }
691  
damon_sysfs_context_set_schemes(struct damon_sysfs_context * context)692  static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
693  {
694  	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
695  	int err;
696  
697  	if (!schemes)
698  		return -ENOMEM;
699  	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
700  			&context->kobj, "schemes");
701  	if (err) {
702  		kobject_put(&schemes->kobj);
703  		return err;
704  	}
705  	context->schemes = schemes;
706  	return 0;
707  }
708  
damon_sysfs_context_add_dirs(struct damon_sysfs_context * context)709  static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
710  {
711  	int err;
712  
713  	err = damon_sysfs_context_set_attrs(context);
714  	if (err)
715  		return err;
716  
717  	err = damon_sysfs_context_set_targets(context);
718  	if (err)
719  		goto put_attrs_out;
720  
721  	err = damon_sysfs_context_set_schemes(context);
722  	if (err)
723  		goto put_targets_attrs_out;
724  	return 0;
725  
726  put_targets_attrs_out:
727  	kobject_put(&context->targets->kobj);
728  	context->targets = NULL;
729  put_attrs_out:
730  	kobject_put(&context->attrs->kobj);
731  	context->attrs = NULL;
732  	return err;
733  }
734  
damon_sysfs_context_rm_dirs(struct damon_sysfs_context * context)735  static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
736  {
737  	damon_sysfs_attrs_rm_dirs(context->attrs);
738  	kobject_put(&context->attrs->kobj);
739  	damon_sysfs_targets_rm_dirs(context->targets);
740  	kobject_put(&context->targets->kobj);
741  	damon_sysfs_schemes_rm_dirs(context->schemes);
742  	kobject_put(&context->schemes->kobj);
743  }
744  
avail_operations_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)745  static ssize_t avail_operations_show(struct kobject *kobj,
746  		struct kobj_attribute *attr, char *buf)
747  {
748  	enum damon_ops_id id;
749  	int len = 0;
750  
751  	for (id = 0; id < NR_DAMON_OPS; id++) {
752  		if (!damon_is_registered_ops(id))
753  			continue;
754  		len += sysfs_emit_at(buf, len, "%s\n",
755  				damon_sysfs_ops_strs[id]);
756  	}
757  	return len;
758  }
759  
operations_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)760  static ssize_t operations_show(struct kobject *kobj,
761  		struct kobj_attribute *attr, char *buf)
762  {
763  	struct damon_sysfs_context *context = container_of(kobj,
764  			struct damon_sysfs_context, kobj);
765  
766  	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
767  }
768  
operations_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)769  static ssize_t operations_store(struct kobject *kobj,
770  		struct kobj_attribute *attr, const char *buf, size_t count)
771  {
772  	struct damon_sysfs_context *context = container_of(kobj,
773  			struct damon_sysfs_context, kobj);
774  	enum damon_ops_id id;
775  
776  	for (id = 0; id < NR_DAMON_OPS; id++) {
777  		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
778  			context->ops_id = id;
779  			return count;
780  		}
781  	}
782  	return -EINVAL;
783  }
784  
damon_sysfs_context_release(struct kobject * kobj)785  static void damon_sysfs_context_release(struct kobject *kobj)
786  {
787  	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
788  }
789  
790  static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
791  		__ATTR_RO_MODE(avail_operations, 0400);
792  
793  static struct kobj_attribute damon_sysfs_context_operations_attr =
794  		__ATTR_RW_MODE(operations, 0600);
795  
796  static struct attribute *damon_sysfs_context_attrs[] = {
797  	&damon_sysfs_context_avail_operations_attr.attr,
798  	&damon_sysfs_context_operations_attr.attr,
799  	NULL,
800  };
801  ATTRIBUTE_GROUPS(damon_sysfs_context);
802  
803  static const struct kobj_type damon_sysfs_context_ktype = {
804  	.release = damon_sysfs_context_release,
805  	.sysfs_ops = &kobj_sysfs_ops,
806  	.default_groups = damon_sysfs_context_groups,
807  };
808  
809  /*
810   * contexts directory
811   */
812  
813  struct damon_sysfs_contexts {
814  	struct kobject kobj;
815  	struct damon_sysfs_context **contexts_arr;
816  	int nr;
817  };
818  
damon_sysfs_contexts_alloc(void)819  static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
820  {
821  	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
822  }
823  
damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts * contexts)824  static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
825  {
826  	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
827  	int i;
828  
829  	for (i = 0; i < contexts->nr; i++) {
830  		damon_sysfs_context_rm_dirs(contexts_arr[i]);
831  		kobject_put(&contexts_arr[i]->kobj);
832  	}
833  	contexts->nr = 0;
834  	kfree(contexts_arr);
835  	contexts->contexts_arr = NULL;
836  }
837  
damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts * contexts,int nr_contexts)838  static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
839  		int nr_contexts)
840  {
841  	struct damon_sysfs_context **contexts_arr, *context;
842  	int err, i;
843  
844  	damon_sysfs_contexts_rm_dirs(contexts);
845  	if (!nr_contexts)
846  		return 0;
847  
848  	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
849  			GFP_KERNEL | __GFP_NOWARN);
850  	if (!contexts_arr)
851  		return -ENOMEM;
852  	contexts->contexts_arr = contexts_arr;
853  
854  	for (i = 0; i < nr_contexts; i++) {
855  		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
856  		if (!context) {
857  			damon_sysfs_contexts_rm_dirs(contexts);
858  			return -ENOMEM;
859  		}
860  
861  		err = kobject_init_and_add(&context->kobj,
862  				&damon_sysfs_context_ktype, &contexts->kobj,
863  				"%d", i);
864  		if (err)
865  			goto out;
866  
867  		err = damon_sysfs_context_add_dirs(context);
868  		if (err)
869  			goto out;
870  
871  		contexts_arr[i] = context;
872  		contexts->nr++;
873  	}
874  	return 0;
875  
876  out:
877  	damon_sysfs_contexts_rm_dirs(contexts);
878  	kobject_put(&context->kobj);
879  	return err;
880  }
881  
nr_contexts_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)882  static ssize_t nr_contexts_show(struct kobject *kobj,
883  		struct kobj_attribute *attr, char *buf)
884  {
885  	struct damon_sysfs_contexts *contexts = container_of(kobj,
886  			struct damon_sysfs_contexts, kobj);
887  
888  	return sysfs_emit(buf, "%d\n", contexts->nr);
889  }
890  
nr_contexts_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)891  static ssize_t nr_contexts_store(struct kobject *kobj,
892  		struct kobj_attribute *attr, const char *buf, size_t count)
893  {
894  	struct damon_sysfs_contexts *contexts;
895  	int nr, err;
896  
897  	err = kstrtoint(buf, 0, &nr);
898  	if (err)
899  		return err;
900  	/* TODO: support multiple contexts per kdamond */
901  	if (nr < 0 || 1 < nr)
902  		return -EINVAL;
903  
904  	contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
905  	if (!mutex_trylock(&damon_sysfs_lock))
906  		return -EBUSY;
907  	err = damon_sysfs_contexts_add_dirs(contexts, nr);
908  	mutex_unlock(&damon_sysfs_lock);
909  	if (err)
910  		return err;
911  
912  	return count;
913  }
914  
damon_sysfs_contexts_release(struct kobject * kobj)915  static void damon_sysfs_contexts_release(struct kobject *kobj)
916  {
917  	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
918  }
919  
920  static struct kobj_attribute damon_sysfs_contexts_nr_attr
921  		= __ATTR_RW_MODE(nr_contexts, 0600);
922  
923  static struct attribute *damon_sysfs_contexts_attrs[] = {
924  	&damon_sysfs_contexts_nr_attr.attr,
925  	NULL,
926  };
927  ATTRIBUTE_GROUPS(damon_sysfs_contexts);
928  
929  static const struct kobj_type damon_sysfs_contexts_ktype = {
930  	.release = damon_sysfs_contexts_release,
931  	.sysfs_ops = &kobj_sysfs_ops,
932  	.default_groups = damon_sysfs_contexts_groups,
933  };
934  
935  /*
936   * kdamond directory
937   */
938  
939  struct damon_sysfs_kdamond {
940  	struct kobject kobj;
941  	struct damon_sysfs_contexts *contexts;
942  	struct damon_ctx *damon_ctx;
943  };
944  
damon_sysfs_kdamond_alloc(void)945  static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
946  {
947  	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
948  }
949  
damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond * kdamond)950  static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
951  {
952  	struct damon_sysfs_contexts *contexts;
953  	int err;
954  
955  	contexts = damon_sysfs_contexts_alloc();
956  	if (!contexts)
957  		return -ENOMEM;
958  
959  	err = kobject_init_and_add(&contexts->kobj,
960  			&damon_sysfs_contexts_ktype, &kdamond->kobj,
961  			"contexts");
962  	if (err) {
963  		kobject_put(&contexts->kobj);
964  		return err;
965  	}
966  	kdamond->contexts = contexts;
967  
968  	return err;
969  }
970  
damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond * kdamond)971  static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
972  {
973  	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
974  	kobject_put(&kdamond->contexts->kobj);
975  }
976  
damon_sysfs_ctx_running(struct damon_ctx * ctx)977  static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
978  {
979  	bool running;
980  
981  	mutex_lock(&ctx->kdamond_lock);
982  	running = ctx->kdamond != NULL;
983  	mutex_unlock(&ctx->kdamond_lock);
984  	return running;
985  }
986  
987  /*
988   * enum damon_sysfs_cmd - Commands for a specific kdamond.
989   */
990  enum damon_sysfs_cmd {
991  	/* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
992  	DAMON_SYSFS_CMD_ON,
993  	/* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
994  	DAMON_SYSFS_CMD_OFF,
995  	/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
996  	DAMON_SYSFS_CMD_COMMIT,
997  	/*
998  	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
999  	 * files.
1000  	 */
1001  	DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
1002  	/*
1003  	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: Update
1004  	 * tried_regions/total_bytes sysfs files for each scheme.
1005  	 */
1006  	DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES,
1007  	/*
1008  	 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: Update schemes tried
1009  	 * regions
1010  	 */
1011  	DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS,
1012  	/*
1013  	 * @DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: Clear schemes tried
1014  	 * regions
1015  	 */
1016  	DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS,
1017  	/*
1018  	 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
1019  	 */
1020  	NR_DAMON_SYSFS_CMDS,
1021  };
1022  
1023  /* Should match with enum damon_sysfs_cmd */
1024  static const char * const damon_sysfs_cmd_strs[] = {
1025  	"on",
1026  	"off",
1027  	"commit",
1028  	"update_schemes_stats",
1029  	"update_schemes_tried_bytes",
1030  	"update_schemes_tried_regions",
1031  	"clear_schemes_tried_regions",
1032  };
1033  
1034  /*
1035   * struct damon_sysfs_cmd_request - A request to the DAMON callback.
1036   * @cmd:	The command that needs to be handled by the callback.
1037   * @kdamond:	The kobject wrapper that associated to the kdamond thread.
1038   *
1039   * This structure represents a sysfs command request that need to access some
1040   * DAMON context-internal data.  Because DAMON context-internal data can be
1041   * safely accessed from DAMON callbacks without additional synchronization, the
1042   * request will be handled by the DAMON callback.  None-``NULL`` @kdamond means
1043   * the request is valid.
1044   */
1045  struct damon_sysfs_cmd_request {
1046  	enum damon_sysfs_cmd cmd;
1047  	struct damon_sysfs_kdamond *kdamond;
1048  };
1049  
1050  /* Current DAMON callback request.  Protected by damon_sysfs_lock. */
1051  static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
1052  
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1053  static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
1054  		char *buf)
1055  {
1056  	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1057  			struct damon_sysfs_kdamond, kobj);
1058  	struct damon_ctx *ctx = kdamond->damon_ctx;
1059  	bool running;
1060  
1061  	if (!ctx)
1062  		running = false;
1063  	else
1064  		running = damon_sysfs_ctx_running(ctx);
1065  
1066  	return sysfs_emit(buf, "%s\n", running ?
1067  			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
1068  			damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
1069  }
1070  
damon_sysfs_set_attrs(struct damon_ctx * ctx,struct damon_sysfs_attrs * sys_attrs)1071  static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
1072  		struct damon_sysfs_attrs *sys_attrs)
1073  {
1074  	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
1075  	struct damon_sysfs_ul_range *sys_nr_regions =
1076  		sys_attrs->nr_regions_range;
1077  	struct damon_attrs attrs = {
1078  		.sample_interval = sys_intervals->sample_us,
1079  		.aggr_interval = sys_intervals->aggr_us,
1080  		.ops_update_interval = sys_intervals->update_us,
1081  		.min_nr_regions = sys_nr_regions->min,
1082  		.max_nr_regions = sys_nr_regions->max,
1083  	};
1084  	return damon_set_attrs(ctx, &attrs);
1085  }
1086  
damon_sysfs_destroy_targets(struct damon_ctx * ctx)1087  static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
1088  {
1089  	struct damon_target *t, *next;
1090  	bool has_pid = damon_target_has_pid(ctx);
1091  
1092  	damon_for_each_target_safe(t, next, ctx) {
1093  		if (has_pid)
1094  			put_pid(t->pid);
1095  		damon_destroy_target(t);
1096  	}
1097  }
1098  
damon_sysfs_set_regions(struct damon_target * t,struct damon_sysfs_regions * sysfs_regions)1099  static int damon_sysfs_set_regions(struct damon_target *t,
1100  		struct damon_sysfs_regions *sysfs_regions)
1101  {
1102  	struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
1103  			sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1104  	int i, err = -EINVAL;
1105  
1106  	if (!ranges)
1107  		return -ENOMEM;
1108  	for (i = 0; i < sysfs_regions->nr; i++) {
1109  		struct damon_sysfs_region *sys_region =
1110  			sysfs_regions->regions_arr[i];
1111  
1112  		if (sys_region->ar.start > sys_region->ar.end)
1113  			goto out;
1114  
1115  		ranges[i].start = sys_region->ar.start;
1116  		ranges[i].end = sys_region->ar.end;
1117  		if (i == 0)
1118  			continue;
1119  		if (ranges[i - 1].end > ranges[i].start)
1120  			goto out;
1121  	}
1122  	err = damon_set_regions(t, ranges, sysfs_regions->nr);
1123  out:
1124  	kfree(ranges);
1125  	return err;
1126  
1127  }
1128  
damon_sysfs_add_target(struct damon_sysfs_target * sys_target,struct damon_ctx * ctx)1129  static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
1130  		struct damon_ctx *ctx)
1131  {
1132  	struct damon_target *t = damon_new_target();
1133  	int err = -EINVAL;
1134  
1135  	if (!t)
1136  		return -ENOMEM;
1137  	damon_add_target(ctx, t);
1138  	if (damon_target_has_pid(ctx)) {
1139  		t->pid = find_get_pid(sys_target->pid);
1140  		if (!t->pid)
1141  			goto destroy_targets_out;
1142  	}
1143  	err = damon_sysfs_set_regions(t, sys_target->regions);
1144  	if (err)
1145  		goto destroy_targets_out;
1146  	return 0;
1147  
1148  destroy_targets_out:
1149  	damon_sysfs_destroy_targets(ctx);
1150  	return err;
1151  }
1152  
damon_sysfs_update_target_pid(struct damon_target * target,int pid)1153  static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
1154  {
1155  	struct pid *pid_new;
1156  
1157  	pid_new = find_get_pid(pid);
1158  	if (!pid_new)
1159  		return -EINVAL;
1160  
1161  	if (pid_new == target->pid) {
1162  		put_pid(pid_new);
1163  		return 0;
1164  	}
1165  
1166  	put_pid(target->pid);
1167  	target->pid = pid_new;
1168  	return 0;
1169  }
1170  
damon_sysfs_update_target(struct damon_target * target,struct damon_ctx * ctx,struct damon_sysfs_target * sys_target)1171  static int damon_sysfs_update_target(struct damon_target *target,
1172  		struct damon_ctx *ctx,
1173  		struct damon_sysfs_target *sys_target)
1174  {
1175  	int err = 0;
1176  
1177  	if (damon_target_has_pid(ctx)) {
1178  		err = damon_sysfs_update_target_pid(target, sys_target->pid);
1179  		if (err)
1180  			return err;
1181  	}
1182  
1183  	/*
1184  	 * Do monitoring target region boundary update only if one or more
1185  	 * regions are set by the user.  This is for keeping current monitoring
1186  	 * target results and range easier, especially for dynamic monitoring
1187  	 * target regions update ops like 'vaddr'.
1188  	 */
1189  	if (sys_target->regions->nr)
1190  		err = damon_sysfs_set_regions(target, sys_target->regions);
1191  	return err;
1192  }
1193  
damon_sysfs_set_targets(struct damon_ctx * ctx,struct damon_sysfs_targets * sysfs_targets)1194  static int damon_sysfs_set_targets(struct damon_ctx *ctx,
1195  		struct damon_sysfs_targets *sysfs_targets)
1196  {
1197  	struct damon_target *t, *next;
1198  	int i = 0, err;
1199  
1200  	/* Multiple physical address space monitoring targets makes no sense */
1201  	if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
1202  		return -EINVAL;
1203  
1204  	damon_for_each_target_safe(t, next, ctx) {
1205  		if (i < sysfs_targets->nr) {
1206  			err = damon_sysfs_update_target(t, ctx,
1207  					sysfs_targets->targets_arr[i]);
1208  			if (err)
1209  				return err;
1210  		} else {
1211  			if (damon_target_has_pid(ctx))
1212  				put_pid(t->pid);
1213  			damon_destroy_target(t);
1214  		}
1215  		i++;
1216  	}
1217  
1218  	for (; i < sysfs_targets->nr; i++) {
1219  		struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
1220  
1221  		err = damon_sysfs_add_target(st, ctx);
1222  		if (err)
1223  			return err;
1224  	}
1225  	return 0;
1226  }
1227  
1228  static bool damon_sysfs_schemes_regions_updating;
1229  
damon_sysfs_before_terminate(struct damon_ctx * ctx)1230  static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
1231  {
1232  	struct damon_target *t, *next;
1233  	struct damon_sysfs_kdamond *kdamond;
1234  	enum damon_sysfs_cmd cmd;
1235  
1236  	/* damon_sysfs_schemes_update_regions_stop() might not yet called */
1237  	kdamond = damon_sysfs_cmd_request.kdamond;
1238  	cmd = damon_sysfs_cmd_request.cmd;
1239  	if (kdamond && ctx == kdamond->damon_ctx &&
1240  			(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
1241  			 cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
1242  			damon_sysfs_schemes_regions_updating) {
1243  		damon_sysfs_schemes_update_regions_stop(ctx);
1244  		damon_sysfs_schemes_regions_updating = false;
1245  		mutex_unlock(&damon_sysfs_lock);
1246  	}
1247  
1248  	if (!damon_target_has_pid(ctx))
1249  		return;
1250  
1251  	mutex_lock(&ctx->kdamond_lock);
1252  	damon_for_each_target_safe(t, next, ctx) {
1253  		put_pid(t->pid);
1254  		damon_destroy_target(t);
1255  	}
1256  	mutex_unlock(&ctx->kdamond_lock);
1257  }
1258  
1259  /*
1260   * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
1261   * @kdamond:	The kobject wrapper that associated to the kdamond thread.
1262   *
1263   * This function reads the schemes stats of specific kdamond and update the
1264   * related values for sysfs files.  This function should be called from DAMON
1265   * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
1266   * contexts-internal data and DAMON sysfs variables.
1267   */
damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond * kdamond)1268  static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
1269  {
1270  	struct damon_ctx *ctx = kdamond->damon_ctx;
1271  
1272  	if (!ctx)
1273  		return -EINVAL;
1274  	damon_sysfs_schemes_update_stats(
1275  			kdamond->contexts->contexts_arr[0]->schemes, ctx);
1276  	return 0;
1277  }
1278  
damon_sysfs_upd_schemes_regions_start(struct damon_sysfs_kdamond * kdamond,bool total_bytes_only)1279  static int damon_sysfs_upd_schemes_regions_start(
1280  		struct damon_sysfs_kdamond *kdamond, bool total_bytes_only)
1281  {
1282  	struct damon_ctx *ctx = kdamond->damon_ctx;
1283  
1284  	if (!ctx)
1285  		return -EINVAL;
1286  	return damon_sysfs_schemes_update_regions_start(
1287  			kdamond->contexts->contexts_arr[0]->schemes, ctx,
1288  			total_bytes_only);
1289  }
1290  
damon_sysfs_upd_schemes_regions_stop(struct damon_sysfs_kdamond * kdamond)1291  static int damon_sysfs_upd_schemes_regions_stop(
1292  		struct damon_sysfs_kdamond *kdamond)
1293  {
1294  	struct damon_ctx *ctx = kdamond->damon_ctx;
1295  
1296  	if (!ctx)
1297  		return -EINVAL;
1298  	return damon_sysfs_schemes_update_regions_stop(ctx);
1299  }
1300  
damon_sysfs_clear_schemes_regions(struct damon_sysfs_kdamond * kdamond)1301  static int damon_sysfs_clear_schemes_regions(
1302  		struct damon_sysfs_kdamond *kdamond)
1303  {
1304  	struct damon_ctx *ctx = kdamond->damon_ctx;
1305  
1306  	if (!ctx)
1307  		return -EINVAL;
1308  	return damon_sysfs_schemes_clear_regions(
1309  			kdamond->contexts->contexts_arr[0]->schemes, ctx);
1310  }
1311  
damon_sysfs_kdamond_running(struct damon_sysfs_kdamond * kdamond)1312  static inline bool damon_sysfs_kdamond_running(
1313  		struct damon_sysfs_kdamond *kdamond)
1314  {
1315  	return kdamond->damon_ctx &&
1316  		damon_sysfs_ctx_running(kdamond->damon_ctx);
1317  }
1318  
damon_sysfs_apply_inputs(struct damon_ctx * ctx,struct damon_sysfs_context * sys_ctx)1319  static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
1320  		struct damon_sysfs_context *sys_ctx)
1321  {
1322  	int err;
1323  
1324  	err = damon_select_ops(ctx, sys_ctx->ops_id);
1325  	if (err)
1326  		return err;
1327  	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
1328  	if (err)
1329  		return err;
1330  	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
1331  	if (err)
1332  		return err;
1333  	return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
1334  }
1335  
1336  /*
1337   * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
1338   * @kdamond:	The kobject wrapper for the associated kdamond.
1339   *
1340   * If the sysfs input is wrong, the kdamond will be terminated.
1341   */
damon_sysfs_commit_input(struct damon_sysfs_kdamond * kdamond)1342  static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
1343  {
1344  	if (!damon_sysfs_kdamond_running(kdamond))
1345  		return -EINVAL;
1346  	/* TODO: Support multiple contexts per kdamond */
1347  	if (kdamond->contexts->nr != 1)
1348  		return -EINVAL;
1349  
1350  	return damon_sysfs_apply_inputs(kdamond->damon_ctx,
1351  			kdamond->contexts->contexts_arr[0]);
1352  }
1353  
1354  /*
1355   * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
1356   * @c:	The DAMON context of the callback.
1357   *
1358   * This function is periodically called back from the kdamond thread for @c.
1359   * Then, it checks if there is a waiting DAMON sysfs request and handles it.
1360   */
damon_sysfs_cmd_request_callback(struct damon_ctx * c)1361  static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
1362  {
1363  	struct damon_sysfs_kdamond *kdamond;
1364  	bool total_bytes_only = false;
1365  	int err = 0;
1366  
1367  	/* avoid deadlock due to concurrent state_store('off') */
1368  	if (!damon_sysfs_schemes_regions_updating &&
1369  			!mutex_trylock(&damon_sysfs_lock))
1370  		return 0;
1371  	kdamond = damon_sysfs_cmd_request.kdamond;
1372  	if (!kdamond || kdamond->damon_ctx != c)
1373  		goto out;
1374  	switch (damon_sysfs_cmd_request.cmd) {
1375  	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
1376  		err = damon_sysfs_upd_schemes_stats(kdamond);
1377  		break;
1378  	case DAMON_SYSFS_CMD_COMMIT:
1379  		err = damon_sysfs_commit_input(kdamond);
1380  		break;
1381  	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
1382  		total_bytes_only = true;
1383  		fallthrough;
1384  	case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS:
1385  		if (!damon_sysfs_schemes_regions_updating) {
1386  			err = damon_sysfs_upd_schemes_regions_start(kdamond,
1387  					total_bytes_only);
1388  			if (!err) {
1389  				damon_sysfs_schemes_regions_updating = true;
1390  				goto keep_lock_out;
1391  			}
1392  		} else {
1393  			err = damon_sysfs_upd_schemes_regions_stop(kdamond);
1394  			damon_sysfs_schemes_regions_updating = false;
1395  		}
1396  		break;
1397  	case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS:
1398  		err = damon_sysfs_clear_schemes_regions(kdamond);
1399  		break;
1400  	default:
1401  		break;
1402  	}
1403  	/* Mark the request as invalid now. */
1404  	damon_sysfs_cmd_request.kdamond = NULL;
1405  out:
1406  	if (!damon_sysfs_schemes_regions_updating)
1407  		mutex_unlock(&damon_sysfs_lock);
1408  keep_lock_out:
1409  	return err;
1410  }
1411  
damon_sysfs_build_ctx(struct damon_sysfs_context * sys_ctx)1412  static struct damon_ctx *damon_sysfs_build_ctx(
1413  		struct damon_sysfs_context *sys_ctx)
1414  {
1415  	struct damon_ctx *ctx = damon_new_ctx();
1416  	int err;
1417  
1418  	if (!ctx)
1419  		return ERR_PTR(-ENOMEM);
1420  
1421  	err = damon_sysfs_apply_inputs(ctx, sys_ctx);
1422  	if (err) {
1423  		damon_destroy_ctx(ctx);
1424  		return ERR_PTR(err);
1425  	}
1426  
1427  	ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
1428  	ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
1429  	ctx->callback.before_terminate = damon_sysfs_before_terminate;
1430  	return ctx;
1431  }
1432  
damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond * kdamond)1433  static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
1434  {
1435  	struct damon_ctx *ctx;
1436  	int err;
1437  
1438  	if (damon_sysfs_kdamond_running(kdamond))
1439  		return -EBUSY;
1440  	if (damon_sysfs_cmd_request.kdamond == kdamond)
1441  		return -EBUSY;
1442  	/* TODO: support multiple contexts per kdamond */
1443  	if (kdamond->contexts->nr != 1)
1444  		return -EINVAL;
1445  
1446  	if (kdamond->damon_ctx)
1447  		damon_destroy_ctx(kdamond->damon_ctx);
1448  	kdamond->damon_ctx = NULL;
1449  
1450  	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1451  	if (IS_ERR(ctx))
1452  		return PTR_ERR(ctx);
1453  	err = damon_start(&ctx, 1, false);
1454  	if (err) {
1455  		damon_destroy_ctx(ctx);
1456  		return err;
1457  	}
1458  	kdamond->damon_ctx = ctx;
1459  	return err;
1460  }
1461  
damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond * kdamond)1462  static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
1463  {
1464  	if (!kdamond->damon_ctx)
1465  		return -EINVAL;
1466  	return damon_stop(&kdamond->damon_ctx, 1);
1467  	/*
1468  	 * To allow users show final monitoring results of already turned-off
1469  	 * DAMON, we free kdamond->damon_ctx in next
1470  	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1471  	 */
1472  }
1473  
1474  /*
1475   * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
1476   * @cmd:	The command to handle.
1477   * @kdamond:	The kobject wrapper for the associated kdamond.
1478   *
1479   * This function handles a DAMON sysfs command for a kdamond.  For commands
1480   * that need to access running DAMON context-internal data, it requests
1481   * handling of the command to the DAMON callback
1482   * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
1483   * or the context is completed.
1484   *
1485   * Return: 0 on success, negative error code otherwise.
1486   */
damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,struct damon_sysfs_kdamond * kdamond)1487  static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
1488  		struct damon_sysfs_kdamond *kdamond)
1489  {
1490  	bool need_wait = true;
1491  
1492  	/* Handle commands that doesn't access DAMON context-internal data */
1493  	switch (cmd) {
1494  	case DAMON_SYSFS_CMD_ON:
1495  		return damon_sysfs_turn_damon_on(kdamond);
1496  	case DAMON_SYSFS_CMD_OFF:
1497  		return damon_sysfs_turn_damon_off(kdamond);
1498  	default:
1499  		break;
1500  	}
1501  
1502  	/* Pass the command to DAMON callback for safe DAMON context access */
1503  	if (damon_sysfs_cmd_request.kdamond)
1504  		return -EBUSY;
1505  	if (!damon_sysfs_kdamond_running(kdamond))
1506  		return -EINVAL;
1507  	damon_sysfs_cmd_request.cmd = cmd;
1508  	damon_sysfs_cmd_request.kdamond = kdamond;
1509  
1510  	/*
1511  	 * wait until damon_sysfs_cmd_request_callback() handles the request
1512  	 * from kdamond context
1513  	 */
1514  	mutex_unlock(&damon_sysfs_lock);
1515  	while (need_wait) {
1516  		schedule_timeout_idle(msecs_to_jiffies(100));
1517  		if (!mutex_trylock(&damon_sysfs_lock))
1518  			continue;
1519  		if (!damon_sysfs_cmd_request.kdamond) {
1520  			/* damon_sysfs_cmd_request_callback() handled */
1521  			need_wait = false;
1522  		} else if (!damon_sysfs_kdamond_running(kdamond)) {
1523  			/* kdamond has already finished */
1524  			need_wait = false;
1525  			damon_sysfs_cmd_request.kdamond = NULL;
1526  		}
1527  		mutex_unlock(&damon_sysfs_lock);
1528  	}
1529  	mutex_lock(&damon_sysfs_lock);
1530  	return 0;
1531  }
1532  
state_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1533  static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
1534  		const char *buf, size_t count)
1535  {
1536  	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1537  			struct damon_sysfs_kdamond, kobj);
1538  	enum damon_sysfs_cmd cmd;
1539  	ssize_t ret = -EINVAL;
1540  
1541  	if (!mutex_trylock(&damon_sysfs_lock))
1542  		return -EBUSY;
1543  	for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
1544  		if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
1545  			ret = damon_sysfs_handle_cmd(cmd, kdamond);
1546  			break;
1547  		}
1548  	}
1549  	mutex_unlock(&damon_sysfs_lock);
1550  	if (!ret)
1551  		ret = count;
1552  	return ret;
1553  }
1554  
pid_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1555  static ssize_t pid_show(struct kobject *kobj,
1556  		struct kobj_attribute *attr, char *buf)
1557  {
1558  	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1559  			struct damon_sysfs_kdamond, kobj);
1560  	struct damon_ctx *ctx;
1561  	int pid = -1;
1562  
1563  	if (!mutex_trylock(&damon_sysfs_lock))
1564  		return -EBUSY;
1565  	ctx = kdamond->damon_ctx;
1566  	if (!ctx)
1567  		goto out;
1568  
1569  	mutex_lock(&ctx->kdamond_lock);
1570  	if (ctx->kdamond)
1571  		pid = ctx->kdamond->pid;
1572  	mutex_unlock(&ctx->kdamond_lock);
1573  out:
1574  	mutex_unlock(&damon_sysfs_lock);
1575  	return sysfs_emit(buf, "%d\n", pid);
1576  }
1577  
damon_sysfs_kdamond_release(struct kobject * kobj)1578  static void damon_sysfs_kdamond_release(struct kobject *kobj)
1579  {
1580  	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1581  			struct damon_sysfs_kdamond, kobj);
1582  
1583  	if (kdamond->damon_ctx)
1584  		damon_destroy_ctx(kdamond->damon_ctx);
1585  	kfree(kdamond);
1586  }
1587  
1588  static struct kobj_attribute damon_sysfs_kdamond_state_attr =
1589  		__ATTR_RW_MODE(state, 0600);
1590  
1591  static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
1592  		__ATTR_RO_MODE(pid, 0400);
1593  
1594  static struct attribute *damon_sysfs_kdamond_attrs[] = {
1595  	&damon_sysfs_kdamond_state_attr.attr,
1596  	&damon_sysfs_kdamond_pid_attr.attr,
1597  	NULL,
1598  };
1599  ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
1600  
1601  static const struct kobj_type damon_sysfs_kdamond_ktype = {
1602  	.release = damon_sysfs_kdamond_release,
1603  	.sysfs_ops = &kobj_sysfs_ops,
1604  	.default_groups = damon_sysfs_kdamond_groups,
1605  };
1606  
1607  /*
1608   * kdamonds directory
1609   */
1610  
1611  struct damon_sysfs_kdamonds {
1612  	struct kobject kobj;
1613  	struct damon_sysfs_kdamond **kdamonds_arr;
1614  	int nr;
1615  };
1616  
damon_sysfs_kdamonds_alloc(void)1617  static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
1618  {
1619  	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
1620  }
1621  
damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds * kdamonds)1622  static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
1623  {
1624  	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
1625  	int i;
1626  
1627  	for (i = 0; i < kdamonds->nr; i++) {
1628  		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
1629  		kobject_put(&kdamonds_arr[i]->kobj);
1630  	}
1631  	kdamonds->nr = 0;
1632  	kfree(kdamonds_arr);
1633  	kdamonds->kdamonds_arr = NULL;
1634  }
1635  
damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond ** kdamonds,int nr_kdamonds)1636  static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
1637  		int nr_kdamonds)
1638  {
1639  	int i;
1640  
1641  	for (i = 0; i < nr_kdamonds; i++) {
1642  		if (damon_sysfs_kdamond_running(kdamonds[i]) ||
1643  		    damon_sysfs_cmd_request.kdamond == kdamonds[i])
1644  			return true;
1645  	}
1646  
1647  	return false;
1648  }
1649  
damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds * kdamonds,int nr_kdamonds)1650  static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
1651  		int nr_kdamonds)
1652  {
1653  	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
1654  	int err, i;
1655  
1656  	if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
1657  		return -EBUSY;
1658  
1659  	damon_sysfs_kdamonds_rm_dirs(kdamonds);
1660  	if (!nr_kdamonds)
1661  		return 0;
1662  
1663  	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
1664  			GFP_KERNEL | __GFP_NOWARN);
1665  	if (!kdamonds_arr)
1666  		return -ENOMEM;
1667  	kdamonds->kdamonds_arr = kdamonds_arr;
1668  
1669  	for (i = 0; i < nr_kdamonds; i++) {
1670  		kdamond = damon_sysfs_kdamond_alloc();
1671  		if (!kdamond) {
1672  			damon_sysfs_kdamonds_rm_dirs(kdamonds);
1673  			return -ENOMEM;
1674  		}
1675  
1676  		err = kobject_init_and_add(&kdamond->kobj,
1677  				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
1678  				"%d", i);
1679  		if (err)
1680  			goto out;
1681  
1682  		err = damon_sysfs_kdamond_add_dirs(kdamond);
1683  		if (err)
1684  			goto out;
1685  
1686  		kdamonds_arr[i] = kdamond;
1687  		kdamonds->nr++;
1688  	}
1689  	return 0;
1690  
1691  out:
1692  	damon_sysfs_kdamonds_rm_dirs(kdamonds);
1693  	kobject_put(&kdamond->kobj);
1694  	return err;
1695  }
1696  
nr_kdamonds_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1697  static ssize_t nr_kdamonds_show(struct kobject *kobj,
1698  		struct kobj_attribute *attr, char *buf)
1699  {
1700  	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
1701  			struct damon_sysfs_kdamonds, kobj);
1702  
1703  	return sysfs_emit(buf, "%d\n", kdamonds->nr);
1704  }
1705  
nr_kdamonds_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1706  static ssize_t nr_kdamonds_store(struct kobject *kobj,
1707  		struct kobj_attribute *attr, const char *buf, size_t count)
1708  {
1709  	struct damon_sysfs_kdamonds *kdamonds;
1710  	int nr, err;
1711  
1712  	err = kstrtoint(buf, 0, &nr);
1713  	if (err)
1714  		return err;
1715  	if (nr < 0)
1716  		return -EINVAL;
1717  
1718  	kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
1719  
1720  	if (!mutex_trylock(&damon_sysfs_lock))
1721  		return -EBUSY;
1722  	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
1723  	mutex_unlock(&damon_sysfs_lock);
1724  	if (err)
1725  		return err;
1726  
1727  	return count;
1728  }
1729  
damon_sysfs_kdamonds_release(struct kobject * kobj)1730  static void damon_sysfs_kdamonds_release(struct kobject *kobj)
1731  {
1732  	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
1733  }
1734  
1735  static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
1736  		__ATTR_RW_MODE(nr_kdamonds, 0600);
1737  
1738  static struct attribute *damon_sysfs_kdamonds_attrs[] = {
1739  	&damon_sysfs_kdamonds_nr_attr.attr,
1740  	NULL,
1741  };
1742  ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
1743  
1744  static const struct kobj_type damon_sysfs_kdamonds_ktype = {
1745  	.release = damon_sysfs_kdamonds_release,
1746  	.sysfs_ops = &kobj_sysfs_ops,
1747  	.default_groups = damon_sysfs_kdamonds_groups,
1748  };
1749  
1750  /*
1751   * damon user interface directory
1752   */
1753  
1754  struct damon_sysfs_ui_dir {
1755  	struct kobject kobj;
1756  	struct damon_sysfs_kdamonds *kdamonds;
1757  };
1758  
damon_sysfs_ui_dir_alloc(void)1759  static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
1760  {
1761  	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
1762  }
1763  
damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir * ui_dir)1764  static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
1765  {
1766  	struct damon_sysfs_kdamonds *kdamonds;
1767  	int err;
1768  
1769  	kdamonds = damon_sysfs_kdamonds_alloc();
1770  	if (!kdamonds)
1771  		return -ENOMEM;
1772  
1773  	err = kobject_init_and_add(&kdamonds->kobj,
1774  			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
1775  			"kdamonds");
1776  	if (err) {
1777  		kobject_put(&kdamonds->kobj);
1778  		return err;
1779  	}
1780  	ui_dir->kdamonds = kdamonds;
1781  	return err;
1782  }
1783  
damon_sysfs_ui_dir_release(struct kobject * kobj)1784  static void damon_sysfs_ui_dir_release(struct kobject *kobj)
1785  {
1786  	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
1787  }
1788  
1789  static struct attribute *damon_sysfs_ui_dir_attrs[] = {
1790  	NULL,
1791  };
1792  ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
1793  
1794  static const struct kobj_type damon_sysfs_ui_dir_ktype = {
1795  	.release = damon_sysfs_ui_dir_release,
1796  	.sysfs_ops = &kobj_sysfs_ops,
1797  	.default_groups = damon_sysfs_ui_dir_groups,
1798  };
1799  
damon_sysfs_init(void)1800  static int __init damon_sysfs_init(void)
1801  {
1802  	struct kobject *damon_sysfs_root;
1803  	struct damon_sysfs_ui_dir *admin;
1804  	int err;
1805  
1806  	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
1807  	if (!damon_sysfs_root)
1808  		return -ENOMEM;
1809  
1810  	admin = damon_sysfs_ui_dir_alloc();
1811  	if (!admin) {
1812  		kobject_put(damon_sysfs_root);
1813  		return -ENOMEM;
1814  	}
1815  	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
1816  			damon_sysfs_root, "admin");
1817  	if (err)
1818  		goto out;
1819  	err = damon_sysfs_ui_dir_add_dirs(admin);
1820  	if (err)
1821  		goto out;
1822  	return 0;
1823  
1824  out:
1825  	kobject_put(&admin->kobj);
1826  	kobject_put(damon_sysfs_root);
1827  	return err;
1828  }
1829  subsys_initcall(damon_sysfs_init);
1830