1 /* 2 * linux/fs/char_dev.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/init.h> 8 #include <linux/fs.h> 9 #include <linux/kdev_t.h> 10 #include <linux/slab.h> 11 #include <linux/string.h> 12 13 #include <linux/major.h> 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 18 #include <linux/kobject.h> 19 #include <linux/kobj_map.h> 20 #include <linux/cdev.h> 21 #include <linux/mutex.h> 22 #include <linux/backing-dev.h> 23 24 #include "internal.h" 25 26 /* 27 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character 28 * devices 29 * - permits shared-mmap for read, write and/or exec 30 * - does not permit private mmap in NOMMU mode (can't do COW) 31 * - no readahead or I/O queue unplugging required 32 */ 33 struct backing_dev_info directly_mappable_cdev_bdi = { 34 .name = "char", 35 .capabilities = ( 36 #ifdef CONFIG_MMU 37 /* permit private copies of the data to be taken */ 38 BDI_CAP_MAP_COPY | 39 #endif 40 /* permit direct mmap, for read, write or exec */ 41 BDI_CAP_MAP_DIRECT | 42 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), 43 }; 44 45 static struct kobj_map *cdev_map; 46 47 static DEFINE_MUTEX(chrdevs_lock); 48 49 static struct char_device_struct { 50 struct char_device_struct *next; 51 unsigned int major; 52 unsigned int baseminor; 53 int minorct; 54 char name[64]; 55 struct cdev *cdev; /* will die */ 56 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; 57 58 /* index in the above */ 59 static inline int major_to_index(int major) 60 { 61 return major % CHRDEV_MAJOR_HASH_SIZE; 62 } 63 64 #ifdef CONFIG_PROC_FS 65 66 void chrdev_show(struct seq_file *f, off_t offset) 67 { 68 struct char_device_struct *cd; 69 70 if (offset < CHRDEV_MAJOR_HASH_SIZE) { 71 mutex_lock(&chrdevs_lock); 72 for (cd = chrdevs[offset]; cd; cd = cd->next) 73 seq_printf(f, "%3d %s\n", cd->major, cd->name); 74 mutex_unlock(&chrdevs_lock); 75 } 76 } 77 78 #endif /* CONFIG_PROC_FS */ 79 80 /* 81 * Register a single major with a specified minor range. 82 * 83 * If major == 0 this functions will dynamically allocate a major and return 84 * its number. 85 * 86 * If major > 0 this function will attempt to reserve the passed range of 87 * minors and will return zero on success. 88 * 89 * Returns a -ve errno on failure. 90 */ 91 static struct char_device_struct * 92 __register_chrdev_region(unsigned int major, unsigned int baseminor, 93 int minorct, const char *name) 94 { 95 struct char_device_struct *cd, **cp; 96 int ret = 0; 97 int i; 98 99 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); 100 if (cd == NULL) 101 return ERR_PTR(-ENOMEM); 102 103 mutex_lock(&chrdevs_lock); 104 105 /* temporary */ 106 if (major == 0) { 107 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { 108 if (chrdevs[i] == NULL) 109 break; 110 } 111 112 if (i == 0) { 113 ret = -EBUSY; 114 goto out; 115 } 116 major = i; 117 ret = major; 118 } 119 120 cd->major = major; 121 cd->baseminor = baseminor; 122 cd->minorct = minorct; 123 strlcpy(cd->name, name, sizeof(cd->name)); 124 125 i = major_to_index(major); 126 127 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 128 if ((*cp)->major > major || 129 ((*cp)->major == major && 130 (((*cp)->baseminor >= baseminor) || 131 ((*cp)->baseminor + (*cp)->minorct > baseminor)))) 132 break; 133 134 /* Check for overlapping minor ranges. */ 135 if (*cp && (*cp)->major == major) { 136 int old_min = (*cp)->baseminor; 137 int old_max = (*cp)->baseminor + (*cp)->minorct - 1; 138 int new_min = baseminor; 139 int new_max = baseminor + minorct - 1; 140 141 /* New driver overlaps from the left. */ 142 if (new_max >= old_min && new_max <= old_max) { 143 ret = -EBUSY; 144 goto out; 145 } 146 147 /* New driver overlaps from the right. */ 148 if (new_min <= old_max && new_min >= old_min) { 149 ret = -EBUSY; 150 goto out; 151 } 152 } 153 154 cd->next = *cp; 155 *cp = cd; 156 mutex_unlock(&chrdevs_lock); 157 return cd; 158 out: 159 mutex_unlock(&chrdevs_lock); 160 kfree(cd); 161 return ERR_PTR(ret); 162 } 163 164 static struct char_device_struct * 165 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) 166 { 167 struct char_device_struct *cd = NULL, **cp; 168 int i = major_to_index(major); 169 170 mutex_lock(&chrdevs_lock); 171 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 172 if ((*cp)->major == major && 173 (*cp)->baseminor == baseminor && 174 (*cp)->minorct == minorct) 175 break; 176 if (*cp) { 177 cd = *cp; 178 *cp = cd->next; 179 } 180 mutex_unlock(&chrdevs_lock); 181 return cd; 182 } 183 184 /** 185 * register_chrdev_region() - register a range of device numbers 186 * @from: the first in the desired range of device numbers; must include 187 * the major number. 188 * @count: the number of consecutive device numbers required 189 * @name: the name of the device or driver. 190 * 191 * Return value is zero on success, a negative error code on failure. 192 */ 193 int register_chrdev_region(dev_t from, unsigned count, const char *name) 194 { 195 struct char_device_struct *cd; 196 dev_t to = from + count; 197 dev_t n, next; 198 199 for (n = from; n < to; n = next) { 200 next = MKDEV(MAJOR(n)+1, 0); 201 if (next > to) 202 next = to; 203 cd = __register_chrdev_region(MAJOR(n), MINOR(n), 204 next - n, name); 205 if (IS_ERR(cd)) 206 goto fail; 207 } 208 return 0; 209 fail: 210 to = n; 211 for (n = from; n < to; n = next) { 212 next = MKDEV(MAJOR(n)+1, 0); 213 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 214 } 215 return PTR_ERR(cd); 216 } 217 218 /** 219 * alloc_chrdev_region() - register a range of char device numbers 220 * @dev: output parameter for first assigned number 221 * @baseminor: first of the requested range of minor numbers 222 * @count: the number of minor numbers required 223 * @name: the name of the associated device or driver 224 * 225 * Allocates a range of char device numbers. The major number will be 226 * chosen dynamically, and returned (along with the first minor number) 227 * in @dev. Returns zero or a negative error code. 228 */ 229 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, 230 const char *name) 231 { 232 struct char_device_struct *cd; 233 cd = __register_chrdev_region(0, baseminor, count, name); 234 if (IS_ERR(cd)) 235 return PTR_ERR(cd); 236 *dev = MKDEV(cd->major, cd->baseminor); 237 return 0; 238 } 239 240 /** 241 * __register_chrdev() - create and register a cdev occupying a range of minors 242 * @major: major device number or 0 for dynamic allocation 243 * @baseminor: first of the requested range of minor numbers 244 * @count: the number of minor numbers required 245 * @name: name of this range of devices 246 * @fops: file operations associated with this devices 247 * 248 * If @major == 0 this functions will dynamically allocate a major and return 249 * its number. 250 * 251 * If @major > 0 this function will attempt to reserve a device with the given 252 * major number and will return zero on success. 253 * 254 * Returns a -ve errno on failure. 255 * 256 * The name of this device has nothing to do with the name of the device in 257 * /dev. It only helps to keep track of the different owners of devices. If 258 * your module name has only one type of devices it's ok to use e.g. the name 259 * of the module here. 260 */ 261 int __register_chrdev(unsigned int major, unsigned int baseminor, 262 unsigned int count, const char *name, 263 const struct file_operations *fops) 264 { 265 struct char_device_struct *cd; 266 struct cdev *cdev; 267 int err = -ENOMEM; 268 269 cd = __register_chrdev_region(major, baseminor, count, name); 270 if (IS_ERR(cd)) 271 return PTR_ERR(cd); 272 273 cdev = cdev_alloc(); 274 if (!cdev) 275 goto out2; 276 277 cdev->owner = fops->owner; 278 cdev->ops = fops; 279 kobject_set_name(&cdev->kobj, "%s", name); 280 281 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count); 282 if (err) 283 goto out; 284 285 cd->cdev = cdev; 286 287 return major ? 0 : cd->major; 288 out: 289 kobject_put(&cdev->kobj); 290 out2: 291 kfree(__unregister_chrdev_region(cd->major, baseminor, count)); 292 return err; 293 } 294 295 /** 296 * unregister_chrdev_region() - return a range of device numbers 297 * @from: the first in the range of numbers to unregister 298 * @count: the number of device numbers to unregister 299 * 300 * This function will unregister a range of @count device numbers, 301 * starting with @from. The caller should normally be the one who 302 * allocated those numbers in the first place... 303 */ 304 void unregister_chrdev_region(dev_t from, unsigned count) 305 { 306 dev_t to = from + count; 307 dev_t n, next; 308 309 for (n = from; n < to; n = next) { 310 next = MKDEV(MAJOR(n)+1, 0); 311 if (next > to) 312 next = to; 313 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 314 } 315 } 316 317 /** 318 * __unregister_chrdev - unregister and destroy a cdev 319 * @major: major device number 320 * @baseminor: first of the range of minor numbers 321 * @count: the number of minor numbers this cdev is occupying 322 * @name: name of this range of devices 323 * 324 * Unregister and destroy the cdev occupying the region described by 325 * @major, @baseminor and @count. This function undoes what 326 * __register_chrdev() did. 327 */ 328 void __unregister_chrdev(unsigned int major, unsigned int baseminor, 329 unsigned int count, const char *name) 330 { 331 struct char_device_struct *cd; 332 333 cd = __unregister_chrdev_region(major, baseminor, count); 334 if (cd && cd->cdev) 335 cdev_del(cd->cdev); 336 kfree(cd); 337 } 338 339 static DEFINE_SPINLOCK(cdev_lock); 340 341 static struct kobject *cdev_get(struct cdev *p) 342 { 343 struct module *owner = p->owner; 344 struct kobject *kobj; 345 346 if (owner && !try_module_get(owner)) 347 return NULL; 348 kobj = kobject_get(&p->kobj); 349 if (!kobj) 350 module_put(owner); 351 return kobj; 352 } 353 354 void cdev_put(struct cdev *p) 355 { 356 if (p) { 357 struct module *owner = p->owner; 358 kobject_put(&p->kobj); 359 module_put(owner); 360 } 361 } 362 363 /* 364 * Called every time a character special file is opened 365 */ 366 static int chrdev_open(struct inode *inode, struct file *filp) 367 { 368 struct cdev *p; 369 struct cdev *new = NULL; 370 int ret = 0; 371 372 spin_lock(&cdev_lock); 373 p = inode->i_cdev; 374 if (!p) { 375 struct kobject *kobj; 376 int idx; 377 spin_unlock(&cdev_lock); 378 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); 379 if (!kobj) 380 return -ENXIO; 381 new = container_of(kobj, struct cdev, kobj); 382 spin_lock(&cdev_lock); 383 /* Check i_cdev again in case somebody beat us to it while 384 we dropped the lock. */ 385 p = inode->i_cdev; 386 if (!p) { 387 inode->i_cdev = p = new; 388 list_add(&inode->i_devices, &p->list); 389 new = NULL; 390 } else if (!cdev_get(p)) 391 ret = -ENXIO; 392 } else if (!cdev_get(p)) 393 ret = -ENXIO; 394 spin_unlock(&cdev_lock); 395 cdev_put(new); 396 if (ret) 397 return ret; 398 399 ret = -ENXIO; 400 filp->f_op = fops_get(p->ops); 401 if (!filp->f_op) 402 goto out_cdev_put; 403 404 if (filp->f_op->open) { 405 ret = filp->f_op->open(inode,filp); 406 if (ret) 407 goto out_cdev_put; 408 } 409 410 return 0; 411 412 out_cdev_put: 413 cdev_put(p); 414 return ret; 415 } 416 417 int cdev_index(struct inode *inode) 418 { 419 int idx; 420 struct kobject *kobj; 421 422 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); 423 if (!kobj) 424 return -1; 425 kobject_put(kobj); 426 return idx; 427 } 428 429 void cd_forget(struct inode *inode) 430 { 431 spin_lock(&cdev_lock); 432 list_del_init(&inode->i_devices); 433 inode->i_cdev = NULL; 434 spin_unlock(&cdev_lock); 435 } 436 437 static void cdev_purge(struct cdev *cdev) 438 { 439 spin_lock(&cdev_lock); 440 while (!list_empty(&cdev->list)) { 441 struct inode *inode; 442 inode = container_of(cdev->list.next, struct inode, i_devices); 443 list_del_init(&inode->i_devices); 444 inode->i_cdev = NULL; 445 } 446 spin_unlock(&cdev_lock); 447 } 448 449 /* 450 * Dummy default file-operations: the only thing this does 451 * is contain the open that then fills in the correct operations 452 * depending on the special file... 453 */ 454 const struct file_operations def_chr_fops = { 455 .open = chrdev_open, 456 }; 457 458 static struct kobject *exact_match(dev_t dev, int *part, void *data) 459 { 460 struct cdev *p = data; 461 return &p->kobj; 462 } 463 464 static int exact_lock(dev_t dev, void *data) 465 { 466 struct cdev *p = data; 467 return cdev_get(p) ? 0 : -1; 468 } 469 470 /** 471 * cdev_add() - add a char device to the system 472 * @p: the cdev structure for the device 473 * @dev: the first device number for which this device is responsible 474 * @count: the number of consecutive minor numbers corresponding to this 475 * device 476 * 477 * cdev_add() adds the device represented by @p to the system, making it 478 * live immediately. A negative error code is returned on failure. 479 */ 480 int cdev_add(struct cdev *p, dev_t dev, unsigned count) 481 { 482 p->dev = dev; 483 p->count = count; 484 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); 485 } 486 487 static void cdev_unmap(dev_t dev, unsigned count) 488 { 489 kobj_unmap(cdev_map, dev, count); 490 } 491 492 /** 493 * cdev_del() - remove a cdev from the system 494 * @p: the cdev structure to be removed 495 * 496 * cdev_del() removes @p from the system, possibly freeing the structure 497 * itself. 498 */ 499 void cdev_del(struct cdev *p) 500 { 501 cdev_unmap(p->dev, p->count); 502 kobject_put(&p->kobj); 503 } 504 505 506 static void cdev_default_release(struct kobject *kobj) 507 { 508 struct cdev *p = container_of(kobj, struct cdev, kobj); 509 cdev_purge(p); 510 } 511 512 static void cdev_dynamic_release(struct kobject *kobj) 513 { 514 struct cdev *p = container_of(kobj, struct cdev, kobj); 515 cdev_purge(p); 516 kfree(p); 517 } 518 519 static struct kobj_type ktype_cdev_default = { 520 .release = cdev_default_release, 521 }; 522 523 static struct kobj_type ktype_cdev_dynamic = { 524 .release = cdev_dynamic_release, 525 }; 526 527 /** 528 * cdev_alloc() - allocate a cdev structure 529 * 530 * Allocates and returns a cdev structure, or NULL on failure. 531 */ 532 struct cdev *cdev_alloc(void) 533 { 534 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); 535 if (p) { 536 INIT_LIST_HEAD(&p->list); 537 kobject_init(&p->kobj, &ktype_cdev_dynamic); 538 } 539 return p; 540 } 541 542 /** 543 * cdev_init() - initialize a cdev structure 544 * @cdev: the structure to initialize 545 * @fops: the file_operations for this device 546 * 547 * Initializes @cdev, remembering @fops, making it ready to add to the 548 * system with cdev_add(). 549 */ 550 void cdev_init(struct cdev *cdev, const struct file_operations *fops) 551 { 552 memset(cdev, 0, sizeof *cdev); 553 INIT_LIST_HEAD(&cdev->list); 554 kobject_init(&cdev->kobj, &ktype_cdev_default); 555 cdev->ops = fops; 556 } 557 558 static struct kobject *base_probe(dev_t dev, int *part, void *data) 559 { 560 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) 561 /* Make old-style 2.4 aliases work */ 562 request_module("char-major-%d", MAJOR(dev)); 563 return NULL; 564 } 565 566 void __init chrdev_init(void) 567 { 568 cdev_map = kobj_map_init(base_probe, &chrdevs_lock); 569 bdi_init(&directly_mappable_cdev_bdi); 570 } 571 572 573 /* Let modules do char dev stuff */ 574 EXPORT_SYMBOL(register_chrdev_region); 575 EXPORT_SYMBOL(unregister_chrdev_region); 576 EXPORT_SYMBOL(alloc_chrdev_region); 577 EXPORT_SYMBOL(cdev_init); 578 EXPORT_SYMBOL(cdev_alloc); 579 EXPORT_SYMBOL(cdev_del); 580 EXPORT_SYMBOL(cdev_add); 581 EXPORT_SYMBOL(cdev_index); 582 EXPORT_SYMBOL(__register_chrdev); 583 EXPORT_SYMBOL(__unregister_chrdev); 584 EXPORT_SYMBOL(directly_mappable_cdev_bdi); 585