1 /* 2 * linux/fs/char_dev.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/init.h> 8 #include <linux/fs.h> 9 #include <linux/kdev_t.h> 10 #include <linux/slab.h> 11 #include <linux/string.h> 12 13 #include <linux/major.h> 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 18 #include <linux/kobject.h> 19 #include <linux/kobj_map.h> 20 #include <linux/cdev.h> 21 #include <linux/mutex.h> 22 #include <linux/backing-dev.h> 23 24 #include "internal.h" 25 26 /* 27 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character 28 * devices 29 * - permits shared-mmap for read, write and/or exec 30 * - does not permit private mmap in NOMMU mode (can't do COW) 31 * - no readahead or I/O queue unplugging required 32 */ 33 struct backing_dev_info directly_mappable_cdev_bdi = { 34 .name = "char", 35 .capabilities = ( 36 #ifdef CONFIG_MMU 37 /* permit private copies of the data to be taken */ 38 BDI_CAP_MAP_COPY | 39 #endif 40 /* permit direct mmap, for read, write or exec */ 41 BDI_CAP_MAP_DIRECT | 42 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), 43 }; 44 45 static struct kobj_map *cdev_map; 46 47 static DEFINE_MUTEX(chrdevs_lock); 48 49 static struct char_device_struct { 50 struct char_device_struct *next; 51 unsigned int major; 52 unsigned int baseminor; 53 int minorct; 54 char name[64]; 55 struct cdev *cdev; /* will die */ 56 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; 57 58 /* index in the above */ 59 static inline int major_to_index(int major) 60 { 61 return major % CHRDEV_MAJOR_HASH_SIZE; 62 } 63 64 #ifdef CONFIG_PROC_FS 65 66 void chrdev_show(struct seq_file *f, off_t offset) 67 { 68 struct char_device_struct *cd; 69 70 if (offset < CHRDEV_MAJOR_HASH_SIZE) { 71 mutex_lock(&chrdevs_lock); 72 for (cd = chrdevs[offset]; cd; cd = cd->next) 73 seq_printf(f, "%3d %s\n", cd->major, cd->name); 74 mutex_unlock(&chrdevs_lock); 75 } 76 } 77 78 #endif /* CONFIG_PROC_FS */ 79 80 /* 81 * Register a single major with a specified minor range. 82 * 83 * If major == 0 this functions will dynamically allocate a major and return 84 * its number. 85 * 86 * If major > 0 this function will attempt to reserve the passed range of 87 * minors and will return zero on success. 88 * 89 * Returns a -ve errno on failure. 90 */ 91 static struct char_device_struct * 92 __register_chrdev_region(unsigned int major, unsigned int baseminor, 93 int minorct, const char *name) 94 { 95 struct char_device_struct *cd, **cp; 96 int ret = 0; 97 int i; 98 99 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); 100 if (cd == NULL) 101 return ERR_PTR(-ENOMEM); 102 103 mutex_lock(&chrdevs_lock); 104 105 /* temporary */ 106 if (major == 0) { 107 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { 108 if (chrdevs[i] == NULL) 109 break; 110 } 111 112 if (i == 0) { 113 ret = -EBUSY; 114 goto out; 115 } 116 major = i; 117 ret = major; 118 } 119 120 cd->major = major; 121 cd->baseminor = baseminor; 122 cd->minorct = minorct; 123 strlcpy(cd->name, name, sizeof(cd->name)); 124 125 i = major_to_index(major); 126 127 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 128 if ((*cp)->major > major || 129 ((*cp)->major == major && 130 (((*cp)->baseminor >= baseminor) || 131 ((*cp)->baseminor + (*cp)->minorct > baseminor)))) 132 break; 133 134 /* Check for overlapping minor ranges. */ 135 if (*cp && (*cp)->major == major) { 136 int old_min = (*cp)->baseminor; 137 int old_max = (*cp)->baseminor + (*cp)->minorct - 1; 138 int new_min = baseminor; 139 int new_max = baseminor + minorct - 1; 140 141 /* New driver overlaps from the left. */ 142 if (new_max >= old_min && new_max <= old_max) { 143 ret = -EBUSY; 144 goto out; 145 } 146 147 /* New driver overlaps from the right. */ 148 if (new_min <= old_max && new_min >= old_min) { 149 ret = -EBUSY; 150 goto out; 151 } 152 } 153 154 cd->next = *cp; 155 *cp = cd; 156 mutex_unlock(&chrdevs_lock); 157 return cd; 158 out: 159 mutex_unlock(&chrdevs_lock); 160 kfree(cd); 161 return ERR_PTR(ret); 162 } 163 164 static struct char_device_struct * 165 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) 166 { 167 struct char_device_struct *cd = NULL, **cp; 168 int i = major_to_index(major); 169 170 mutex_lock(&chrdevs_lock); 171 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 172 if ((*cp)->major == major && 173 (*cp)->baseminor == baseminor && 174 (*cp)->minorct == minorct) 175 break; 176 if (*cp) { 177 cd = *cp; 178 *cp = cd->next; 179 } 180 mutex_unlock(&chrdevs_lock); 181 return cd; 182 } 183 184 /** 185 * register_chrdev_region() - register a range of device numbers 186 * @from: the first in the desired range of device numbers; must include 187 * the major number. 188 * @count: the number of consecutive device numbers required 189 * @name: the name of the device or driver. 190 * 191 * Return value is zero on success, a negative error code on failure. 192 */ 193 int register_chrdev_region(dev_t from, unsigned count, const char *name) 194 { 195 struct char_device_struct *cd; 196 dev_t to = from + count; 197 dev_t n, next; 198 199 for (n = from; n < to; n = next) { 200 next = MKDEV(MAJOR(n)+1, 0); 201 if (next > to) 202 next = to; 203 cd = __register_chrdev_region(MAJOR(n), MINOR(n), 204 next - n, name); 205 if (IS_ERR(cd)) 206 goto fail; 207 } 208 return 0; 209 fail: 210 to = n; 211 for (n = from; n < to; n = next) { 212 next = MKDEV(MAJOR(n)+1, 0); 213 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 214 } 215 return PTR_ERR(cd); 216 } 217 218 /** 219 * alloc_chrdev_region() - register a range of char device numbers 220 * @dev: output parameter for first assigned number 221 * @baseminor: first of the requested range of minor numbers 222 * @count: the number of minor numbers required 223 * @name: the name of the associated device or driver 224 * 225 * Allocates a range of char device numbers. The major number will be 226 * chosen dynamically, and returned (along with the first minor number) 227 * in @dev. Returns zero or a negative error code. 228 */ 229 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, 230 const char *name) 231 { 232 struct char_device_struct *cd; 233 cd = __register_chrdev_region(0, baseminor, count, name); 234 if (IS_ERR(cd)) 235 return PTR_ERR(cd); 236 *dev = MKDEV(cd->major, cd->baseminor); 237 return 0; 238 } 239 240 /** 241 * __register_chrdev() - create and register a cdev occupying a range of minors 242 * @major: major device number or 0 for dynamic allocation 243 * @baseminor: first of the requested range of minor numbers 244 * @count: the number of minor numbers required 245 * @name: name of this range of devices 246 * @fops: file operations associated with this devices 247 * 248 * If @major == 0 this functions will dynamically allocate a major and return 249 * its number. 250 * 251 * If @major > 0 this function will attempt to reserve a device with the given 252 * major number and will return zero on success. 253 * 254 * Returns a -ve errno on failure. 255 * 256 * The name of this device has nothing to do with the name of the device in 257 * /dev. It only helps to keep track of the different owners of devices. If 258 * your module name has only one type of devices it's ok to use e.g. the name 259 * of the module here. 260 */ 261 int __register_chrdev(unsigned int major, unsigned int baseminor, 262 unsigned int count, const char *name, 263 const struct file_operations *fops) 264 { 265 struct char_device_struct *cd; 266 struct cdev *cdev; 267 char *s; 268 int err = -ENOMEM; 269 270 cd = __register_chrdev_region(major, baseminor, count, name); 271 if (IS_ERR(cd)) 272 return PTR_ERR(cd); 273 274 cdev = cdev_alloc(); 275 if (!cdev) 276 goto out2; 277 278 cdev->owner = fops->owner; 279 cdev->ops = fops; 280 kobject_set_name(&cdev->kobj, "%s", name); 281 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) 282 *s = '!'; 283 284 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count); 285 if (err) 286 goto out; 287 288 cd->cdev = cdev; 289 290 return major ? 0 : cd->major; 291 out: 292 kobject_put(&cdev->kobj); 293 out2: 294 kfree(__unregister_chrdev_region(cd->major, baseminor, count)); 295 return err; 296 } 297 298 /** 299 * unregister_chrdev_region() - return a range of device numbers 300 * @from: the first in the range of numbers to unregister 301 * @count: the number of device numbers to unregister 302 * 303 * This function will unregister a range of @count device numbers, 304 * starting with @from. The caller should normally be the one who 305 * allocated those numbers in the first place... 306 */ 307 void unregister_chrdev_region(dev_t from, unsigned count) 308 { 309 dev_t to = from + count; 310 dev_t n, next; 311 312 for (n = from; n < to; n = next) { 313 next = MKDEV(MAJOR(n)+1, 0); 314 if (next > to) 315 next = to; 316 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 317 } 318 } 319 320 /** 321 * __unregister_chrdev - unregister and destroy a cdev 322 * @major: major device number 323 * @baseminor: first of the range of minor numbers 324 * @count: the number of minor numbers this cdev is occupying 325 * @name: name of this range of devices 326 * 327 * Unregister and destroy the cdev occupying the region described by 328 * @major, @baseminor and @count. This function undoes what 329 * __register_chrdev() did. 330 */ 331 void __unregister_chrdev(unsigned int major, unsigned int baseminor, 332 unsigned int count, const char *name) 333 { 334 struct char_device_struct *cd; 335 336 cd = __unregister_chrdev_region(major, baseminor, count); 337 if (cd && cd->cdev) 338 cdev_del(cd->cdev); 339 kfree(cd); 340 } 341 342 static DEFINE_SPINLOCK(cdev_lock); 343 344 static struct kobject *cdev_get(struct cdev *p) 345 { 346 struct module *owner = p->owner; 347 struct kobject *kobj; 348 349 if (owner && !try_module_get(owner)) 350 return NULL; 351 kobj = kobject_get(&p->kobj); 352 if (!kobj) 353 module_put(owner); 354 return kobj; 355 } 356 357 void cdev_put(struct cdev *p) 358 { 359 if (p) { 360 struct module *owner = p->owner; 361 kobject_put(&p->kobj); 362 module_put(owner); 363 } 364 } 365 366 /* 367 * Called every time a character special file is opened 368 */ 369 static int chrdev_open(struct inode *inode, struct file *filp) 370 { 371 struct cdev *p; 372 struct cdev *new = NULL; 373 int ret = 0; 374 375 spin_lock(&cdev_lock); 376 p = inode->i_cdev; 377 if (!p) { 378 struct kobject *kobj; 379 int idx; 380 spin_unlock(&cdev_lock); 381 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); 382 if (!kobj) 383 return -ENXIO; 384 new = container_of(kobj, struct cdev, kobj); 385 spin_lock(&cdev_lock); 386 /* Check i_cdev again in case somebody beat us to it while 387 we dropped the lock. */ 388 p = inode->i_cdev; 389 if (!p) { 390 inode->i_cdev = p = new; 391 list_add(&inode->i_devices, &p->list); 392 new = NULL; 393 } else if (!cdev_get(p)) 394 ret = -ENXIO; 395 } else if (!cdev_get(p)) 396 ret = -ENXIO; 397 spin_unlock(&cdev_lock); 398 cdev_put(new); 399 if (ret) 400 return ret; 401 402 ret = -ENXIO; 403 filp->f_op = fops_get(p->ops); 404 if (!filp->f_op) 405 goto out_cdev_put; 406 407 if (filp->f_op->open) { 408 ret = filp->f_op->open(inode,filp); 409 if (ret) 410 goto out_cdev_put; 411 } 412 413 return 0; 414 415 out_cdev_put: 416 cdev_put(p); 417 return ret; 418 } 419 420 int cdev_index(struct inode *inode) 421 { 422 int idx; 423 struct kobject *kobj; 424 425 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); 426 if (!kobj) 427 return -1; 428 kobject_put(kobj); 429 return idx; 430 } 431 432 void cd_forget(struct inode *inode) 433 { 434 spin_lock(&cdev_lock); 435 list_del_init(&inode->i_devices); 436 inode->i_cdev = NULL; 437 spin_unlock(&cdev_lock); 438 } 439 440 static void cdev_purge(struct cdev *cdev) 441 { 442 spin_lock(&cdev_lock); 443 while (!list_empty(&cdev->list)) { 444 struct inode *inode; 445 inode = container_of(cdev->list.next, struct inode, i_devices); 446 list_del_init(&inode->i_devices); 447 inode->i_cdev = NULL; 448 } 449 spin_unlock(&cdev_lock); 450 } 451 452 /* 453 * Dummy default file-operations: the only thing this does 454 * is contain the open that then fills in the correct operations 455 * depending on the special file... 456 */ 457 const struct file_operations def_chr_fops = { 458 .open = chrdev_open, 459 }; 460 461 static struct kobject *exact_match(dev_t dev, int *part, void *data) 462 { 463 struct cdev *p = data; 464 return &p->kobj; 465 } 466 467 static int exact_lock(dev_t dev, void *data) 468 { 469 struct cdev *p = data; 470 return cdev_get(p) ? 0 : -1; 471 } 472 473 /** 474 * cdev_add() - add a char device to the system 475 * @p: the cdev structure for the device 476 * @dev: the first device number for which this device is responsible 477 * @count: the number of consecutive minor numbers corresponding to this 478 * device 479 * 480 * cdev_add() adds the device represented by @p to the system, making it 481 * live immediately. A negative error code is returned on failure. 482 */ 483 int cdev_add(struct cdev *p, dev_t dev, unsigned count) 484 { 485 p->dev = dev; 486 p->count = count; 487 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); 488 } 489 490 static void cdev_unmap(dev_t dev, unsigned count) 491 { 492 kobj_unmap(cdev_map, dev, count); 493 } 494 495 /** 496 * cdev_del() - remove a cdev from the system 497 * @p: the cdev structure to be removed 498 * 499 * cdev_del() removes @p from the system, possibly freeing the structure 500 * itself. 501 */ 502 void cdev_del(struct cdev *p) 503 { 504 cdev_unmap(p->dev, p->count); 505 kobject_put(&p->kobj); 506 } 507 508 509 static void cdev_default_release(struct kobject *kobj) 510 { 511 struct cdev *p = container_of(kobj, struct cdev, kobj); 512 cdev_purge(p); 513 } 514 515 static void cdev_dynamic_release(struct kobject *kobj) 516 { 517 struct cdev *p = container_of(kobj, struct cdev, kobj); 518 cdev_purge(p); 519 kfree(p); 520 } 521 522 static struct kobj_type ktype_cdev_default = { 523 .release = cdev_default_release, 524 }; 525 526 static struct kobj_type ktype_cdev_dynamic = { 527 .release = cdev_dynamic_release, 528 }; 529 530 /** 531 * cdev_alloc() - allocate a cdev structure 532 * 533 * Allocates and returns a cdev structure, or NULL on failure. 534 */ 535 struct cdev *cdev_alloc(void) 536 { 537 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); 538 if (p) { 539 INIT_LIST_HEAD(&p->list); 540 kobject_init(&p->kobj, &ktype_cdev_dynamic); 541 } 542 return p; 543 } 544 545 /** 546 * cdev_init() - initialize a cdev structure 547 * @cdev: the structure to initialize 548 * @fops: the file_operations for this device 549 * 550 * Initializes @cdev, remembering @fops, making it ready to add to the 551 * system with cdev_add(). 552 */ 553 void cdev_init(struct cdev *cdev, const struct file_operations *fops) 554 { 555 memset(cdev, 0, sizeof *cdev); 556 INIT_LIST_HEAD(&cdev->list); 557 kobject_init(&cdev->kobj, &ktype_cdev_default); 558 cdev->ops = fops; 559 } 560 561 static struct kobject *base_probe(dev_t dev, int *part, void *data) 562 { 563 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) 564 /* Make old-style 2.4 aliases work */ 565 request_module("char-major-%d", MAJOR(dev)); 566 return NULL; 567 } 568 569 void __init chrdev_init(void) 570 { 571 cdev_map = kobj_map_init(base_probe, &chrdevs_lock); 572 bdi_init(&directly_mappable_cdev_bdi); 573 } 574 575 576 /* Let modules do char dev stuff */ 577 EXPORT_SYMBOL(register_chrdev_region); 578 EXPORT_SYMBOL(unregister_chrdev_region); 579 EXPORT_SYMBOL(alloc_chrdev_region); 580 EXPORT_SYMBOL(cdev_init); 581 EXPORT_SYMBOL(cdev_alloc); 582 EXPORT_SYMBOL(cdev_del); 583 EXPORT_SYMBOL(cdev_add); 584 EXPORT_SYMBOL(cdev_index); 585 EXPORT_SYMBOL(__register_chrdev); 586 EXPORT_SYMBOL(__unregister_chrdev); 587 EXPORT_SYMBOL(directly_mappable_cdev_bdi); 588