1 /* 2 * spidev.c -- simple synchronous userspace interface to SPI devices 3 * 4 * Copyright (C) 2006 SWAPP 5 * Andrea Paterniani <a.paterniani@swapp-eng.it> 6 * Copyright (C) 2007 David Brownell (simplification, cleanup) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/ioctl.h> 26 #include <linux/fs.h> 27 #include <linux/device.h> 28 #include <linux/list.h> 29 #include <linux/errno.h> 30 #include <linux/mutex.h> 31 #include <linux/slab.h> 32 33 #include <linux/spi/spi.h> 34 #include <linux/spi/spidev.h> 35 36 #include <asm/uaccess.h> 37 38 39 /* 40 * This supports acccess to SPI devices using normal userspace I/O calls. 41 * Note that while traditional UNIX/POSIX I/O semantics are half duplex, 42 * and often mask message boundaries, full SPI support requires full duplex 43 * transfers. There are several kinds of of internal message boundaries to 44 * handle chipselect management and other protocol options. 45 * 46 * SPI has a character major number assigned. We allocate minor numbers 47 * dynamically using a bitmask. You must use hotplug tools, such as udev 48 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device 49 * nodes, since there is no fixed association of minor numbers with any 50 * particular SPI bus or device. 51 */ 52 #define SPIDEV_MAJOR 153 /* assigned */ 53 #define N_SPI_MINORS 32 /* ... up to 256 */ 54 55 static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; 56 57 58 /* Bit masks for spi_device.mode management */ 59 #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL) 60 61 62 struct spidev_data { 63 struct device dev; 64 struct spi_device *spi; 65 struct list_head device_entry; 66 67 struct mutex buf_lock; 68 unsigned users; 69 u8 *buffer; 70 }; 71 72 static LIST_HEAD(device_list); 73 static DEFINE_MUTEX(device_list_lock); 74 75 static unsigned bufsiz = 4096; 76 module_param(bufsiz, uint, S_IRUGO); 77 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); 78 79 /*-------------------------------------------------------------------------*/ 80 81 /* Read-only message with current device setup */ 82 static ssize_t 83 spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) 84 { 85 struct spidev_data *spidev; 86 struct spi_device *spi; 87 ssize_t status = 0; 88 89 /* chipselect only toggles at start or end of operation */ 90 if (count > bufsiz) 91 return -EMSGSIZE; 92 93 spidev = filp->private_data; 94 spi = spidev->spi; 95 96 mutex_lock(&spidev->buf_lock); 97 status = spi_read(spi, spidev->buffer, count); 98 if (status == 0) { 99 unsigned long missing; 100 101 missing = copy_to_user(buf, spidev->buffer, count); 102 if (count && missing == count) 103 status = -EFAULT; 104 else 105 status = count - missing; 106 } 107 mutex_unlock(&spidev->buf_lock); 108 109 return status; 110 } 111 112 /* Write-only message with current device setup */ 113 static ssize_t 114 spidev_write(struct file *filp, const char __user *buf, 115 size_t count, loff_t *f_pos) 116 { 117 struct spidev_data *spidev; 118 struct spi_device *spi; 119 ssize_t status = 0; 120 unsigned long missing; 121 122 /* chipselect only toggles at start or end of operation */ 123 if (count > bufsiz) 124 return -EMSGSIZE; 125 126 spidev = filp->private_data; 127 spi = spidev->spi; 128 129 mutex_lock(&spidev->buf_lock); 130 missing = copy_from_user(spidev->buffer, buf, count); 131 if (missing == 0) { 132 status = spi_write(spi, spidev->buffer, count); 133 if (status == 0) 134 status = count; 135 } else 136 status = -EFAULT; 137 mutex_unlock(&spidev->buf_lock); 138 139 return status; 140 } 141 142 static int spidev_message(struct spidev_data *spidev, 143 struct spi_ioc_transfer *u_xfers, unsigned n_xfers) 144 { 145 struct spi_message msg; 146 struct spi_transfer *k_xfers; 147 struct spi_transfer *k_tmp; 148 struct spi_ioc_transfer *u_tmp; 149 struct spi_device *spi = spidev->spi; 150 unsigned n, total; 151 u8 *buf; 152 int status = -EFAULT; 153 154 spi_message_init(&msg); 155 k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); 156 if (k_xfers == NULL) 157 return -ENOMEM; 158 159 /* Construct spi_message, copying any tx data to bounce buffer. 160 * We walk the array of user-provided transfers, using each one 161 * to initialize a kernel version of the same transfer. 162 */ 163 mutex_lock(&spidev->buf_lock); 164 buf = spidev->buffer; 165 total = 0; 166 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 167 n; 168 n--, k_tmp++, u_tmp++) { 169 k_tmp->len = u_tmp->len; 170 171 total += k_tmp->len; 172 if (total > bufsiz) { 173 status = -EMSGSIZE; 174 goto done; 175 } 176 177 if (u_tmp->rx_buf) { 178 k_tmp->rx_buf = buf; 179 if (!access_ok(VERIFY_WRITE, u_tmp->rx_buf, u_tmp->len)) 180 goto done; 181 } 182 if (u_tmp->tx_buf) { 183 k_tmp->tx_buf = buf; 184 if (copy_from_user(buf, (const u8 __user *) 185 (ptrdiff_t) u_tmp->tx_buf, 186 u_tmp->len)) 187 goto done; 188 } 189 buf += k_tmp->len; 190 191 k_tmp->cs_change = !!u_tmp->cs_change; 192 k_tmp->bits_per_word = u_tmp->bits_per_word; 193 k_tmp->delay_usecs = u_tmp->delay_usecs; 194 k_tmp->speed_hz = u_tmp->speed_hz; 195 #ifdef VERBOSE 196 dev_dbg(&spi->dev, 197 " xfer len %zd %s%s%s%dbits %u usec %uHz\n", 198 u_tmp->len, 199 u_tmp->rx_buf ? "rx " : "", 200 u_tmp->tx_buf ? "tx " : "", 201 u_tmp->cs_change ? "cs " : "", 202 u_tmp->bits_per_word ? : spi->bits_per_word, 203 u_tmp->delay_usecs, 204 u_tmp->speed_hz ? : spi->max_speed_hz); 205 #endif 206 spi_message_add_tail(k_tmp, &msg); 207 } 208 209 status = spi_sync(spi, &msg); 210 if (status < 0) 211 goto done; 212 213 /* copy any rx data out of bounce buffer */ 214 buf = spidev->buffer; 215 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 216 if (u_tmp->rx_buf) { 217 if (__copy_to_user((u8 __user *) 218 (ptrdiff_t) u_tmp->rx_buf, buf, 219 u_tmp->len)) { 220 status = -EFAULT; 221 goto done; 222 } 223 } 224 buf += u_tmp->len; 225 } 226 status = total; 227 228 done: 229 mutex_unlock(&spidev->buf_lock); 230 kfree(k_xfers); 231 return status; 232 } 233 234 static int 235 spidev_ioctl(struct inode *inode, struct file *filp, 236 unsigned int cmd, unsigned long arg) 237 { 238 int err = 0; 239 int retval = 0; 240 struct spidev_data *spidev; 241 struct spi_device *spi; 242 u32 tmp; 243 unsigned n_ioc; 244 struct spi_ioc_transfer *ioc; 245 246 /* Check type and command number */ 247 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) 248 return -ENOTTY; 249 250 /* Check access direction once here; don't repeat below. 251 * IOC_DIR is from the user perspective, while access_ok is 252 * from the kernel perspective; so they look reversed. 253 */ 254 if (_IOC_DIR(cmd) & _IOC_READ) 255 err = !access_ok(VERIFY_WRITE, 256 (void __user *)arg, _IOC_SIZE(cmd)); 257 if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) 258 err = !access_ok(VERIFY_READ, 259 (void __user *)arg, _IOC_SIZE(cmd)); 260 if (err) 261 return -EFAULT; 262 263 spidev = filp->private_data; 264 spi = spidev->spi; 265 266 switch (cmd) { 267 /* read requests */ 268 case SPI_IOC_RD_MODE: 269 retval = __put_user(spi->mode & SPI_MODE_MASK, 270 (__u8 __user *)arg); 271 break; 272 case SPI_IOC_RD_LSB_FIRST: 273 retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, 274 (__u8 __user *)arg); 275 break; 276 case SPI_IOC_RD_BITS_PER_WORD: 277 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); 278 break; 279 case SPI_IOC_RD_MAX_SPEED_HZ: 280 retval = __put_user(spi->max_speed_hz, (__u32 __user *)arg); 281 break; 282 283 /* write requests */ 284 case SPI_IOC_WR_MODE: 285 retval = __get_user(tmp, (u8 __user *)arg); 286 if (retval == 0) { 287 u8 save = spi->mode; 288 289 if (tmp & ~SPI_MODE_MASK) { 290 retval = -EINVAL; 291 break; 292 } 293 294 tmp |= spi->mode & ~SPI_MODE_MASK; 295 spi->mode = (u8)tmp; 296 retval = spi_setup(spi); 297 if (retval < 0) 298 spi->mode = save; 299 else 300 dev_dbg(&spi->dev, "spi mode %02x\n", tmp); 301 } 302 break; 303 case SPI_IOC_WR_LSB_FIRST: 304 retval = __get_user(tmp, (__u8 __user *)arg); 305 if (retval == 0) { 306 u8 save = spi->mode; 307 308 if (tmp) 309 spi->mode |= SPI_LSB_FIRST; 310 else 311 spi->mode &= ~SPI_LSB_FIRST; 312 retval = spi_setup(spi); 313 if (retval < 0) 314 spi->mode = save; 315 else 316 dev_dbg(&spi->dev, "%csb first\n", 317 tmp ? 'l' : 'm'); 318 } 319 break; 320 case SPI_IOC_WR_BITS_PER_WORD: 321 retval = __get_user(tmp, (__u8 __user *)arg); 322 if (retval == 0) { 323 u8 save = spi->bits_per_word; 324 325 spi->bits_per_word = tmp; 326 retval = spi_setup(spi); 327 if (retval < 0) 328 spi->bits_per_word = save; 329 else 330 dev_dbg(&spi->dev, "%d bits per word\n", tmp); 331 } 332 break; 333 case SPI_IOC_WR_MAX_SPEED_HZ: 334 retval = __get_user(tmp, (__u32 __user *)arg); 335 if (retval == 0) { 336 u32 save = spi->max_speed_hz; 337 338 spi->max_speed_hz = tmp; 339 retval = spi_setup(spi); 340 if (retval < 0) 341 spi->max_speed_hz = save; 342 else 343 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); 344 } 345 break; 346 347 default: 348 /* segmented and/or full-duplex I/O request */ 349 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) 350 || _IOC_DIR(cmd) != _IOC_WRITE) 351 return -ENOTTY; 352 353 tmp = _IOC_SIZE(cmd); 354 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { 355 retval = -EINVAL; 356 break; 357 } 358 n_ioc = tmp / sizeof(struct spi_ioc_transfer); 359 if (n_ioc == 0) 360 break; 361 362 /* copy into scratch area */ 363 ioc = kmalloc(tmp, GFP_KERNEL); 364 if (!ioc) { 365 retval = -ENOMEM; 366 break; 367 } 368 if (__copy_from_user(ioc, (void __user *)arg, tmp)) { 369 kfree(ioc); 370 retval = -EFAULT; 371 break; 372 } 373 374 /* translate to spi_message, execute */ 375 retval = spidev_message(spidev, ioc, n_ioc); 376 kfree(ioc); 377 break; 378 } 379 return retval; 380 } 381 382 static int spidev_open(struct inode *inode, struct file *filp) 383 { 384 struct spidev_data *spidev; 385 int status = -ENXIO; 386 387 mutex_lock(&device_list_lock); 388 389 list_for_each_entry(spidev, &device_list, device_entry) { 390 if (spidev->dev.devt == inode->i_rdev) { 391 status = 0; 392 break; 393 } 394 } 395 if (status == 0) { 396 if (!spidev->buffer) { 397 spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); 398 if (!spidev->buffer) { 399 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); 400 status = -ENOMEM; 401 } 402 } 403 if (status == 0) { 404 spidev->users++; 405 filp->private_data = spidev; 406 nonseekable_open(inode, filp); 407 } 408 } else 409 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 410 411 mutex_unlock(&device_list_lock); 412 return status; 413 } 414 415 static int spidev_release(struct inode *inode, struct file *filp) 416 { 417 struct spidev_data *spidev; 418 int status = 0; 419 420 mutex_lock(&device_list_lock); 421 spidev = filp->private_data; 422 filp->private_data = NULL; 423 spidev->users--; 424 if (!spidev->users) { 425 kfree(spidev->buffer); 426 spidev->buffer = NULL; 427 } 428 mutex_unlock(&device_list_lock); 429 430 return status; 431 } 432 433 static struct file_operations spidev_fops = { 434 .owner = THIS_MODULE, 435 /* REVISIT switch to aio primitives, so that userspace 436 * gets more complete API coverage. It'll simplify things 437 * too, except for the locking. 438 */ 439 .write = spidev_write, 440 .read = spidev_read, 441 .ioctl = spidev_ioctl, 442 .open = spidev_open, 443 .release = spidev_release, 444 }; 445 446 /*-------------------------------------------------------------------------*/ 447 448 /* The main reason to have this class is to make mdev/udev create the 449 * /dev/spidevB.C character device nodes exposing our userspace API. 450 * It also simplifies memory management. 451 */ 452 453 static void spidev_classdev_release(struct device *dev) 454 { 455 struct spidev_data *spidev; 456 457 spidev = container_of(dev, struct spidev_data, dev); 458 kfree(spidev); 459 } 460 461 static struct class spidev_class = { 462 .name = "spidev", 463 .owner = THIS_MODULE, 464 .dev_release = spidev_classdev_release, 465 }; 466 467 /*-------------------------------------------------------------------------*/ 468 469 static int spidev_probe(struct spi_device *spi) 470 { 471 struct spidev_data *spidev; 472 int status; 473 unsigned long minor; 474 475 /* Allocate driver data */ 476 spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); 477 if (!spidev) 478 return -ENOMEM; 479 480 /* Initialize the driver data */ 481 spidev->spi = spi; 482 mutex_init(&spidev->buf_lock); 483 484 INIT_LIST_HEAD(&spidev->device_entry); 485 486 /* If we can allocate a minor number, hook up this device. 487 * Reusing minors is fine so long as udev or mdev is working. 488 */ 489 mutex_lock(&device_list_lock); 490 minor = find_first_zero_bit(minors, N_SPI_MINORS); 491 if (minor < N_SPI_MINORS) { 492 spidev->dev.parent = &spi->dev; 493 spidev->dev.class = &spidev_class; 494 spidev->dev.devt = MKDEV(SPIDEV_MAJOR, minor); 495 snprintf(spidev->dev.bus_id, sizeof spidev->dev.bus_id, 496 "spidev%d.%d", 497 spi->master->bus_num, spi->chip_select); 498 status = device_register(&spidev->dev); 499 } else { 500 dev_dbg(&spi->dev, "no minor number available!\n"); 501 status = -ENODEV; 502 } 503 if (status == 0) { 504 set_bit(minor, minors); 505 dev_set_drvdata(&spi->dev, spidev); 506 list_add(&spidev->device_entry, &device_list); 507 } 508 mutex_unlock(&device_list_lock); 509 510 if (status != 0) 511 kfree(spidev); 512 513 return status; 514 } 515 516 static int spidev_remove(struct spi_device *spi) 517 { 518 struct spidev_data *spidev = dev_get_drvdata(&spi->dev); 519 520 mutex_lock(&device_list_lock); 521 522 list_del(&spidev->device_entry); 523 dev_set_drvdata(&spi->dev, NULL); 524 clear_bit(MINOR(spidev->dev.devt), minors); 525 device_unregister(&spidev->dev); 526 527 mutex_unlock(&device_list_lock); 528 529 return 0; 530 } 531 532 static struct spi_driver spidev_spi = { 533 .driver = { 534 .name = "spidev", 535 .owner = THIS_MODULE, 536 }, 537 .probe = spidev_probe, 538 .remove = __devexit_p(spidev_remove), 539 540 /* NOTE: suspend/resume methods are not necessary here. 541 * We don't do anything except pass the requests to/from 542 * the underlying controller. The refrigerator handles 543 * most issues; the controller driver handles the rest. 544 */ 545 }; 546 547 /*-------------------------------------------------------------------------*/ 548 549 static int __init spidev_init(void) 550 { 551 int status; 552 553 /* Claim our 256 reserved device numbers. Then register a class 554 * that will key udev/mdev to add/remove /dev nodes. Last, register 555 * the driver which manages those device numbers. 556 */ 557 BUILD_BUG_ON(N_SPI_MINORS > 256); 558 status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); 559 if (status < 0) 560 return status; 561 562 status = class_register(&spidev_class); 563 if (status < 0) { 564 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 565 return status; 566 } 567 568 status = spi_register_driver(&spidev_spi); 569 if (status < 0) { 570 class_unregister(&spidev_class); 571 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 572 } 573 return status; 574 } 575 module_init(spidev_init); 576 577 static void __exit spidev_exit(void) 578 { 579 spi_unregister_driver(&spidev_spi); 580 class_unregister(&spidev_class); 581 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 582 } 583 module_exit(spidev_exit); 584 585 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); 586 MODULE_DESCRIPTION("User mode SPI device interface"); 587 MODULE_LICENSE("GPL"); 588