xref: /openbmc/linux/drivers/base/class.c (revision 8730046c)
1 /*
2  * class.c - basic device class management
3  *
4  * Copyright (c) 2002-3 Patrick Mochel
5  * Copyright (c) 2002-3 Open Source Development Labs
6  * Copyright (c) 2003-2004 Greg Kroah-Hartman
7  * Copyright (c) 2003-2004 IBM Corp.
8  *
9  * This file is released under the GPLv2
10  *
11  */
12 
13 #include <linux/device.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/kdev_t.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/mutex.h>
22 #include "base.h"
23 
24 #define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
25 
26 static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
27 			       char *buf)
28 {
29 	struct class_attribute *class_attr = to_class_attr(attr);
30 	struct subsys_private *cp = to_subsys_private(kobj);
31 	ssize_t ret = -EIO;
32 
33 	if (class_attr->show)
34 		ret = class_attr->show(cp->class, class_attr, buf);
35 	return ret;
36 }
37 
38 static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
39 				const char *buf, size_t count)
40 {
41 	struct class_attribute *class_attr = to_class_attr(attr);
42 	struct subsys_private *cp = to_subsys_private(kobj);
43 	ssize_t ret = -EIO;
44 
45 	if (class_attr->store)
46 		ret = class_attr->store(cp->class, class_attr, buf, count);
47 	return ret;
48 }
49 
50 static void class_release(struct kobject *kobj)
51 {
52 	struct subsys_private *cp = to_subsys_private(kobj);
53 	struct class *class = cp->class;
54 
55 	pr_debug("class '%s': release.\n", class->name);
56 
57 	if (class->class_release)
58 		class->class_release(class);
59 	else
60 		pr_debug("class '%s' does not have a release() function, "
61 			 "be careful\n", class->name);
62 
63 	kfree(cp);
64 }
65 
66 static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
67 {
68 	struct subsys_private *cp = to_subsys_private(kobj);
69 	struct class *class = cp->class;
70 
71 	return class->ns_type;
72 }
73 
74 static const struct sysfs_ops class_sysfs_ops = {
75 	.show	   = class_attr_show,
76 	.store	   = class_attr_store,
77 };
78 
79 static struct kobj_type class_ktype = {
80 	.sysfs_ops	= &class_sysfs_ops,
81 	.release	= class_release,
82 	.child_ns_type	= class_child_ns_type,
83 };
84 
85 /* Hotplug events for classes go to the class subsys */
86 static struct kset *class_kset;
87 
88 
89 int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
90 			 const void *ns)
91 {
92 	int error;
93 
94 	if (cls)
95 		error = sysfs_create_file_ns(&cls->p->subsys.kobj,
96 					     &attr->attr, ns);
97 	else
98 		error = -EINVAL;
99 	return error;
100 }
101 
102 void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
103 			  const void *ns)
104 {
105 	if (cls)
106 		sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
107 }
108 
109 static struct class *class_get(struct class *cls)
110 {
111 	if (cls)
112 		kset_get(&cls->p->subsys);
113 	return cls;
114 }
115 
116 static void class_put(struct class *cls)
117 {
118 	if (cls)
119 		kset_put(&cls->p->subsys);
120 }
121 
122 static int add_class_attrs(struct class *cls)
123 {
124 	int i;
125 	int error = 0;
126 
127 	if (cls->class_attrs) {
128 		for (i = 0; cls->class_attrs[i].attr.name; i++) {
129 			error = class_create_file(cls, &cls->class_attrs[i]);
130 			if (error)
131 				goto error;
132 		}
133 	}
134 done:
135 	return error;
136 error:
137 	while (--i >= 0)
138 		class_remove_file(cls, &cls->class_attrs[i]);
139 	goto done;
140 }
141 
142 static void remove_class_attrs(struct class *cls)
143 {
144 	int i;
145 
146 	if (cls->class_attrs) {
147 		for (i = 0; cls->class_attrs[i].attr.name; i++)
148 			class_remove_file(cls, &cls->class_attrs[i]);
149 	}
150 }
151 
152 static void klist_class_dev_get(struct klist_node *n)
153 {
154 	struct device *dev = container_of(n, struct device, knode_class);
155 
156 	get_device(dev);
157 }
158 
159 static void klist_class_dev_put(struct klist_node *n)
160 {
161 	struct device *dev = container_of(n, struct device, knode_class);
162 
163 	put_device(dev);
164 }
165 
166 static int class_add_groups(struct class *cls,
167 			    const struct attribute_group **groups)
168 {
169 	return sysfs_create_groups(&cls->p->subsys.kobj, groups);
170 }
171 
172 static void class_remove_groups(struct class *cls,
173 				const struct attribute_group **groups)
174 {
175 	return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
176 }
177 
178 int __class_register(struct class *cls, struct lock_class_key *key)
179 {
180 	struct subsys_private *cp;
181 	int error;
182 
183 	pr_debug("device class '%s': registering\n", cls->name);
184 
185 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
186 	if (!cp)
187 		return -ENOMEM;
188 	klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
189 	INIT_LIST_HEAD(&cp->interfaces);
190 	kset_init(&cp->glue_dirs);
191 	__mutex_init(&cp->mutex, "subsys mutex", key);
192 	error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
193 	if (error) {
194 		kfree(cp);
195 		return error;
196 	}
197 
198 	/* set the default /sys/dev directory for devices of this class */
199 	if (!cls->dev_kobj)
200 		cls->dev_kobj = sysfs_dev_char_kobj;
201 
202 #if defined(CONFIG_BLOCK)
203 	/* let the block class directory show up in the root of sysfs */
204 	if (!sysfs_deprecated || cls != &block_class)
205 		cp->subsys.kobj.kset = class_kset;
206 #else
207 	cp->subsys.kobj.kset = class_kset;
208 #endif
209 	cp->subsys.kobj.ktype = &class_ktype;
210 	cp->class = cls;
211 	cls->p = cp;
212 
213 	error = kset_register(&cp->subsys);
214 	if (error) {
215 		kfree(cp);
216 		return error;
217 	}
218 	error = class_add_groups(class_get(cls), cls->class_groups);
219 	class_put(cls);
220 	error = add_class_attrs(class_get(cls));
221 	class_put(cls);
222 	return error;
223 }
224 EXPORT_SYMBOL_GPL(__class_register);
225 
226 void class_unregister(struct class *cls)
227 {
228 	pr_debug("device class '%s': unregistering\n", cls->name);
229 	remove_class_attrs(cls);
230 	class_remove_groups(cls, cls->class_groups);
231 	kset_unregister(&cls->p->subsys);
232 }
233 
234 static void class_create_release(struct class *cls)
235 {
236 	pr_debug("%s called for %s\n", __func__, cls->name);
237 	kfree(cls);
238 }
239 
240 /**
241  * class_create - create a struct class structure
242  * @owner: pointer to the module that is to "own" this struct class
243  * @name: pointer to a string for the name of this class.
244  * @key: the lock_class_key for this class; used by mutex lock debugging
245  *
246  * This is used to create a struct class pointer that can then be used
247  * in calls to device_create().
248  *
249  * Returns &struct class pointer on success, or ERR_PTR() on error.
250  *
251  * Note, the pointer created here is to be destroyed when finished by
252  * making a call to class_destroy().
253  */
254 struct class *__class_create(struct module *owner, const char *name,
255 			     struct lock_class_key *key)
256 {
257 	struct class *cls;
258 	int retval;
259 
260 	cls = kzalloc(sizeof(*cls), GFP_KERNEL);
261 	if (!cls) {
262 		retval = -ENOMEM;
263 		goto error;
264 	}
265 
266 	cls->name = name;
267 	cls->owner = owner;
268 	cls->class_release = class_create_release;
269 
270 	retval = __class_register(cls, key);
271 	if (retval)
272 		goto error;
273 
274 	return cls;
275 
276 error:
277 	kfree(cls);
278 	return ERR_PTR(retval);
279 }
280 EXPORT_SYMBOL_GPL(__class_create);
281 
282 /**
283  * class_destroy - destroys a struct class structure
284  * @cls: pointer to the struct class that is to be destroyed
285  *
286  * Note, the pointer to be destroyed must have been created with a call
287  * to class_create().
288  */
289 void class_destroy(struct class *cls)
290 {
291 	if ((cls == NULL) || (IS_ERR(cls)))
292 		return;
293 
294 	class_unregister(cls);
295 }
296 
297 /**
298  * class_dev_iter_init - initialize class device iterator
299  * @iter: class iterator to initialize
300  * @class: the class we wanna iterate over
301  * @start: the device to start iterating from, if any
302  * @type: device_type of the devices to iterate over, NULL for all
303  *
304  * Initialize class iterator @iter such that it iterates over devices
305  * of @class.  If @start is set, the list iteration will start there,
306  * otherwise if it is NULL, the iteration starts at the beginning of
307  * the list.
308  */
309 void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
310 			 struct device *start, const struct device_type *type)
311 {
312 	struct klist_node *start_knode = NULL;
313 
314 	if (start)
315 		start_knode = &start->knode_class;
316 	klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
317 	iter->type = type;
318 }
319 EXPORT_SYMBOL_GPL(class_dev_iter_init);
320 
321 /**
322  * class_dev_iter_next - iterate to the next device
323  * @iter: class iterator to proceed
324  *
325  * Proceed @iter to the next device and return it.  Returns NULL if
326  * iteration is complete.
327  *
328  * The returned device is referenced and won't be released till
329  * iterator is proceed to the next device or exited.  The caller is
330  * free to do whatever it wants to do with the device including
331  * calling back into class code.
332  */
333 struct device *class_dev_iter_next(struct class_dev_iter *iter)
334 {
335 	struct klist_node *knode;
336 	struct device *dev;
337 
338 	while (1) {
339 		knode = klist_next(&iter->ki);
340 		if (!knode)
341 			return NULL;
342 		dev = container_of(knode, struct device, knode_class);
343 		if (!iter->type || iter->type == dev->type)
344 			return dev;
345 	}
346 }
347 EXPORT_SYMBOL_GPL(class_dev_iter_next);
348 
349 /**
350  * class_dev_iter_exit - finish iteration
351  * @iter: class iterator to finish
352  *
353  * Finish an iteration.  Always call this function after iteration is
354  * complete whether the iteration ran till the end or not.
355  */
356 void class_dev_iter_exit(struct class_dev_iter *iter)
357 {
358 	klist_iter_exit(&iter->ki);
359 }
360 EXPORT_SYMBOL_GPL(class_dev_iter_exit);
361 
362 /**
363  * class_for_each_device - device iterator
364  * @class: the class we're iterating
365  * @start: the device to start with in the list, if any.
366  * @data: data for the callback
367  * @fn: function to be called for each device
368  *
369  * Iterate over @class's list of devices, and call @fn for each,
370  * passing it @data.  If @start is set, the list iteration will start
371  * there, otherwise if it is NULL, the iteration starts at the
372  * beginning of the list.
373  *
374  * We check the return of @fn each time. If it returns anything
375  * other than 0, we break out and return that value.
376  *
377  * @fn is allowed to do anything including calling back into class
378  * code.  There's no locking restriction.
379  */
380 int class_for_each_device(struct class *class, struct device *start,
381 			  void *data, int (*fn)(struct device *, void *))
382 {
383 	struct class_dev_iter iter;
384 	struct device *dev;
385 	int error = 0;
386 
387 	if (!class)
388 		return -EINVAL;
389 	if (!class->p) {
390 		WARN(1, "%s called for class '%s' before it was initialized",
391 		     __func__, class->name);
392 		return -EINVAL;
393 	}
394 
395 	class_dev_iter_init(&iter, class, start, NULL);
396 	while ((dev = class_dev_iter_next(&iter))) {
397 		error = fn(dev, data);
398 		if (error)
399 			break;
400 	}
401 	class_dev_iter_exit(&iter);
402 
403 	return error;
404 }
405 EXPORT_SYMBOL_GPL(class_for_each_device);
406 
407 /**
408  * class_find_device - device iterator for locating a particular device
409  * @class: the class we're iterating
410  * @start: Device to begin with
411  * @data: data for the match function
412  * @match: function to check device
413  *
414  * This is similar to the class_for_each_dev() function above, but it
415  * returns a reference to a device that is 'found' for later use, as
416  * determined by the @match callback.
417  *
418  * The callback should return 0 if the device doesn't match and non-zero
419  * if it does.  If the callback returns non-zero, this function will
420  * return to the caller and not iterate over any more devices.
421  *
422  * Note, you will need to drop the reference with put_device() after use.
423  *
424  * @match is allowed to do anything including calling back into class
425  * code.  There's no locking restriction.
426  */
427 struct device *class_find_device(struct class *class, struct device *start,
428 				 const void *data,
429 				 int (*match)(struct device *, const void *))
430 {
431 	struct class_dev_iter iter;
432 	struct device *dev;
433 
434 	if (!class)
435 		return NULL;
436 	if (!class->p) {
437 		WARN(1, "%s called for class '%s' before it was initialized",
438 		     __func__, class->name);
439 		return NULL;
440 	}
441 
442 	class_dev_iter_init(&iter, class, start, NULL);
443 	while ((dev = class_dev_iter_next(&iter))) {
444 		if (match(dev, data)) {
445 			get_device(dev);
446 			break;
447 		}
448 	}
449 	class_dev_iter_exit(&iter);
450 
451 	return dev;
452 }
453 EXPORT_SYMBOL_GPL(class_find_device);
454 
455 int class_interface_register(struct class_interface *class_intf)
456 {
457 	struct class *parent;
458 	struct class_dev_iter iter;
459 	struct device *dev;
460 
461 	if (!class_intf || !class_intf->class)
462 		return -ENODEV;
463 
464 	parent = class_get(class_intf->class);
465 	if (!parent)
466 		return -EINVAL;
467 
468 	mutex_lock(&parent->p->mutex);
469 	list_add_tail(&class_intf->node, &parent->p->interfaces);
470 	if (class_intf->add_dev) {
471 		class_dev_iter_init(&iter, parent, NULL, NULL);
472 		while ((dev = class_dev_iter_next(&iter)))
473 			class_intf->add_dev(dev, class_intf);
474 		class_dev_iter_exit(&iter);
475 	}
476 	mutex_unlock(&parent->p->mutex);
477 
478 	return 0;
479 }
480 
481 void class_interface_unregister(struct class_interface *class_intf)
482 {
483 	struct class *parent = class_intf->class;
484 	struct class_dev_iter iter;
485 	struct device *dev;
486 
487 	if (!parent)
488 		return;
489 
490 	mutex_lock(&parent->p->mutex);
491 	list_del_init(&class_intf->node);
492 	if (class_intf->remove_dev) {
493 		class_dev_iter_init(&iter, parent, NULL, NULL);
494 		while ((dev = class_dev_iter_next(&iter)))
495 			class_intf->remove_dev(dev, class_intf);
496 		class_dev_iter_exit(&iter);
497 	}
498 	mutex_unlock(&parent->p->mutex);
499 
500 	class_put(parent);
501 }
502 
503 ssize_t show_class_attr_string(struct class *class,
504 			       struct class_attribute *attr, char *buf)
505 {
506 	struct class_attribute_string *cs;
507 
508 	cs = container_of(attr, struct class_attribute_string, attr);
509 	return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
510 }
511 
512 EXPORT_SYMBOL_GPL(show_class_attr_string);
513 
514 struct class_compat {
515 	struct kobject *kobj;
516 };
517 
518 /**
519  * class_compat_register - register a compatibility class
520  * @name: the name of the class
521  *
522  * Compatibility class are meant as a temporary user-space compatibility
523  * workaround when converting a family of class devices to a bus devices.
524  */
525 struct class_compat *class_compat_register(const char *name)
526 {
527 	struct class_compat *cls;
528 
529 	cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
530 	if (!cls)
531 		return NULL;
532 	cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
533 	if (!cls->kobj) {
534 		kfree(cls);
535 		return NULL;
536 	}
537 	return cls;
538 }
539 EXPORT_SYMBOL_GPL(class_compat_register);
540 
541 /**
542  * class_compat_unregister - unregister a compatibility class
543  * @cls: the class to unregister
544  */
545 void class_compat_unregister(struct class_compat *cls)
546 {
547 	kobject_put(cls->kobj);
548 	kfree(cls);
549 }
550 EXPORT_SYMBOL_GPL(class_compat_unregister);
551 
552 /**
553  * class_compat_create_link - create a compatibility class device link to
554  *			      a bus device
555  * @cls: the compatibility class
556  * @dev: the target bus device
557  * @device_link: an optional device to which a "device" link should be created
558  */
559 int class_compat_create_link(struct class_compat *cls, struct device *dev,
560 			     struct device *device_link)
561 {
562 	int error;
563 
564 	error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
565 	if (error)
566 		return error;
567 
568 	/*
569 	 * Optionally add a "device" link (typically to the parent), as a
570 	 * class device would have one and we want to provide as much
571 	 * backwards compatibility as possible.
572 	 */
573 	if (device_link) {
574 		error = sysfs_create_link(&dev->kobj, &device_link->kobj,
575 					  "device");
576 		if (error)
577 			sysfs_remove_link(cls->kobj, dev_name(dev));
578 	}
579 
580 	return error;
581 }
582 EXPORT_SYMBOL_GPL(class_compat_create_link);
583 
584 /**
585  * class_compat_remove_link - remove a compatibility class device link to
586  *			      a bus device
587  * @cls: the compatibility class
588  * @dev: the target bus device
589  * @device_link: an optional device to which a "device" link was previously
590  * 		 created
591  */
592 void class_compat_remove_link(struct class_compat *cls, struct device *dev,
593 			      struct device *device_link)
594 {
595 	if (device_link)
596 		sysfs_remove_link(&dev->kobj, "device");
597 	sysfs_remove_link(cls->kobj, dev_name(dev));
598 }
599 EXPORT_SYMBOL_GPL(class_compat_remove_link);
600 
601 int __init classes_init(void)
602 {
603 	class_kset = kset_create_and_add("class", NULL, NULL);
604 	if (!class_kset)
605 		return -ENOMEM;
606 	return 0;
607 }
608 
609 EXPORT_SYMBOL_GPL(class_create_file_ns);
610 EXPORT_SYMBOL_GPL(class_remove_file_ns);
611 EXPORT_SYMBOL_GPL(class_unregister);
612 EXPORT_SYMBOL_GPL(class_destroy);
613 
614 EXPORT_SYMBOL_GPL(class_interface_register);
615 EXPORT_SYMBOL_GPL(class_interface_unregister);
616