1 /* 2 * Simple synchronous userspace interface to SPI devices 3 * 4 * Copyright (C) 2006 SWAPP 5 * Andrea Paterniani <a.paterniani@swapp-eng.it> 6 * Copyright (C) 2007 David Brownell (simplification, cleanup) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/ioctl.h> 22 #include <linux/fs.h> 23 #include <linux/device.h> 24 #include <linux/err.h> 25 #include <linux/list.h> 26 #include <linux/errno.h> 27 #include <linux/mutex.h> 28 #include <linux/slab.h> 29 #include <linux/compat.h> 30 #include <linux/of.h> 31 #include <linux/of_device.h> 32 #include <linux/acpi.h> 33 34 #include <linux/spi/spi.h> 35 #include <linux/spi/spidev.h> 36 37 #include <linux/uaccess.h> 38 39 40 /* 41 * This supports access to SPI devices using normal userspace I/O calls. 42 * Note that while traditional UNIX/POSIX I/O semantics are half duplex, 43 * and often mask message boundaries, full SPI support requires full duplex 44 * transfers. There are several kinds of internal message boundaries to 45 * handle chipselect management and other protocol options. 46 * 47 * SPI has a character major number assigned. We allocate minor numbers 48 * dynamically using a bitmask. You must use hotplug tools, such as udev 49 * (or mdev with busybox) to create and destroy the /dev/spidevB.C device 50 * nodes, since there is no fixed association of minor numbers with any 51 * particular SPI bus or device. 52 */ 53 #define SPIDEV_MAJOR 153 /* assigned */ 54 #define N_SPI_MINORS 32 /* ... up to 256 */ 55 56 static DECLARE_BITMAP(minors, N_SPI_MINORS); 57 58 59 /* Bit masks for spi_device.mode management. Note that incorrect 60 * settings for some settings can cause *lots* of trouble for other 61 * devices on a shared bus: 62 * 63 * - CS_HIGH ... this device will be active when it shouldn't be 64 * - 3WIRE ... when active, it won't behave as it should 65 * - NO_CS ... there will be no explicit message boundaries; this 66 * is completely incompatible with the shared bus model 67 * - READY ... transfers may proceed when they shouldn't. 68 * 69 * REVISIT should changing those flags be privileged? 70 */ 71 #define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \ 72 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ 73 | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \ 74 | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD) 75 76 struct spidev_data { 77 dev_t devt; 78 spinlock_t spi_lock; 79 struct spi_device *spi; 80 struct list_head device_entry; 81 82 /* TX/RX buffers are NULL unless this device is open (users > 0) */ 83 struct mutex buf_lock; 84 unsigned users; 85 u8 *tx_buffer; 86 u8 *rx_buffer; 87 u32 speed_hz; 88 }; 89 90 static LIST_HEAD(device_list); 91 static DEFINE_MUTEX(device_list_lock); 92 93 static unsigned bufsiz = 4096; 94 module_param(bufsiz, uint, S_IRUGO); 95 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); 96 97 /*-------------------------------------------------------------------------*/ 98 99 static ssize_t 100 spidev_sync(struct spidev_data *spidev, struct spi_message *message) 101 { 102 DECLARE_COMPLETION_ONSTACK(done); 103 int status; 104 struct spi_device *spi; 105 106 spin_lock_irq(&spidev->spi_lock); 107 spi = spidev->spi; 108 spin_unlock_irq(&spidev->spi_lock); 109 110 if (spi == NULL) 111 status = -ESHUTDOWN; 112 else 113 status = spi_sync(spi, message); 114 115 if (status == 0) 116 status = message->actual_length; 117 118 return status; 119 } 120 121 static inline ssize_t 122 spidev_sync_write(struct spidev_data *spidev, size_t len) 123 { 124 struct spi_transfer t = { 125 .tx_buf = spidev->tx_buffer, 126 .len = len, 127 .speed_hz = spidev->speed_hz, 128 }; 129 struct spi_message m; 130 131 spi_message_init(&m); 132 spi_message_add_tail(&t, &m); 133 return spidev_sync(spidev, &m); 134 } 135 136 static inline ssize_t 137 spidev_sync_read(struct spidev_data *spidev, size_t len) 138 { 139 struct spi_transfer t = { 140 .rx_buf = spidev->rx_buffer, 141 .len = len, 142 .speed_hz = spidev->speed_hz, 143 }; 144 struct spi_message m; 145 146 spi_message_init(&m); 147 spi_message_add_tail(&t, &m); 148 return spidev_sync(spidev, &m); 149 } 150 151 /*-------------------------------------------------------------------------*/ 152 153 /* Read-only message with current device setup */ 154 static ssize_t 155 spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) 156 { 157 struct spidev_data *spidev; 158 ssize_t status = 0; 159 160 /* chipselect only toggles at start or end of operation */ 161 if (count > bufsiz) 162 return -EMSGSIZE; 163 164 spidev = filp->private_data; 165 166 mutex_lock(&spidev->buf_lock); 167 status = spidev_sync_read(spidev, count); 168 if (status > 0) { 169 unsigned long missing; 170 171 missing = copy_to_user(buf, spidev->rx_buffer, status); 172 if (missing == status) 173 status = -EFAULT; 174 else 175 status = status - missing; 176 } 177 mutex_unlock(&spidev->buf_lock); 178 179 return status; 180 } 181 182 /* Write-only message with current device setup */ 183 static ssize_t 184 spidev_write(struct file *filp, const char __user *buf, 185 size_t count, loff_t *f_pos) 186 { 187 struct spidev_data *spidev; 188 ssize_t status = 0; 189 unsigned long missing; 190 191 /* chipselect only toggles at start or end of operation */ 192 if (count > bufsiz) 193 return -EMSGSIZE; 194 195 spidev = filp->private_data; 196 197 mutex_lock(&spidev->buf_lock); 198 missing = copy_from_user(spidev->tx_buffer, buf, count); 199 if (missing == 0) 200 status = spidev_sync_write(spidev, count); 201 else 202 status = -EFAULT; 203 mutex_unlock(&spidev->buf_lock); 204 205 return status; 206 } 207 208 static int spidev_message(struct spidev_data *spidev, 209 struct spi_ioc_transfer *u_xfers, unsigned n_xfers) 210 { 211 struct spi_message msg; 212 struct spi_transfer *k_xfers; 213 struct spi_transfer *k_tmp; 214 struct spi_ioc_transfer *u_tmp; 215 unsigned n, total, tx_total, rx_total; 216 u8 *tx_buf, *rx_buf; 217 int status = -EFAULT; 218 219 spi_message_init(&msg); 220 k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); 221 if (k_xfers == NULL) 222 return -ENOMEM; 223 224 /* Construct spi_message, copying any tx data to bounce buffer. 225 * We walk the array of user-provided transfers, using each one 226 * to initialize a kernel version of the same transfer. 227 */ 228 tx_buf = spidev->tx_buffer; 229 rx_buf = spidev->rx_buffer; 230 total = 0; 231 tx_total = 0; 232 rx_total = 0; 233 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 234 n; 235 n--, k_tmp++, u_tmp++) { 236 k_tmp->len = u_tmp->len; 237 238 total += k_tmp->len; 239 /* Since the function returns the total length of transfers 240 * on success, restrict the total to positive int values to 241 * avoid the return value looking like an error. Also check 242 * each transfer length to avoid arithmetic overflow. 243 */ 244 if (total > INT_MAX || k_tmp->len > INT_MAX) { 245 status = -EMSGSIZE; 246 goto done; 247 } 248 249 if (u_tmp->rx_buf) { 250 /* this transfer needs space in RX bounce buffer */ 251 rx_total += k_tmp->len; 252 if (rx_total > bufsiz) { 253 status = -EMSGSIZE; 254 goto done; 255 } 256 k_tmp->rx_buf = rx_buf; 257 if (!access_ok(VERIFY_WRITE, (u8 __user *) 258 (uintptr_t) u_tmp->rx_buf, 259 u_tmp->len)) 260 goto done; 261 rx_buf += k_tmp->len; 262 } 263 if (u_tmp->tx_buf) { 264 /* this transfer needs space in TX bounce buffer */ 265 tx_total += k_tmp->len; 266 if (tx_total > bufsiz) { 267 status = -EMSGSIZE; 268 goto done; 269 } 270 k_tmp->tx_buf = tx_buf; 271 if (copy_from_user(tx_buf, (const u8 __user *) 272 (uintptr_t) u_tmp->tx_buf, 273 u_tmp->len)) 274 goto done; 275 tx_buf += k_tmp->len; 276 } 277 278 k_tmp->cs_change = !!u_tmp->cs_change; 279 k_tmp->tx_nbits = u_tmp->tx_nbits; 280 k_tmp->rx_nbits = u_tmp->rx_nbits; 281 k_tmp->bits_per_word = u_tmp->bits_per_word; 282 k_tmp->delay_usecs = u_tmp->delay_usecs; 283 k_tmp->speed_hz = u_tmp->speed_hz; 284 if (!k_tmp->speed_hz) 285 k_tmp->speed_hz = spidev->speed_hz; 286 #ifdef VERBOSE 287 dev_dbg(&spidev->spi->dev, 288 " xfer len %u %s%s%s%dbits %u usec %uHz\n", 289 u_tmp->len, 290 u_tmp->rx_buf ? "rx " : "", 291 u_tmp->tx_buf ? "tx " : "", 292 u_tmp->cs_change ? "cs " : "", 293 u_tmp->bits_per_word ? : spidev->spi->bits_per_word, 294 u_tmp->delay_usecs, 295 u_tmp->speed_hz ? : spidev->spi->max_speed_hz); 296 #endif 297 spi_message_add_tail(k_tmp, &msg); 298 } 299 300 status = spidev_sync(spidev, &msg); 301 if (status < 0) 302 goto done; 303 304 /* copy any rx data out of bounce buffer */ 305 rx_buf = spidev->rx_buffer; 306 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 307 if (u_tmp->rx_buf) { 308 if (__copy_to_user((u8 __user *) 309 (uintptr_t) u_tmp->rx_buf, rx_buf, 310 u_tmp->len)) { 311 status = -EFAULT; 312 goto done; 313 } 314 rx_buf += u_tmp->len; 315 } 316 } 317 status = total; 318 319 done: 320 kfree(k_xfers); 321 return status; 322 } 323 324 static struct spi_ioc_transfer * 325 spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc, 326 unsigned *n_ioc) 327 { 328 struct spi_ioc_transfer *ioc; 329 u32 tmp; 330 331 /* Check type, command number and direction */ 332 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC 333 || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) 334 || _IOC_DIR(cmd) != _IOC_WRITE) 335 return ERR_PTR(-ENOTTY); 336 337 tmp = _IOC_SIZE(cmd); 338 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) 339 return ERR_PTR(-EINVAL); 340 *n_ioc = tmp / sizeof(struct spi_ioc_transfer); 341 if (*n_ioc == 0) 342 return NULL; 343 344 /* copy into scratch area */ 345 ioc = kmalloc(tmp, GFP_KERNEL); 346 if (!ioc) 347 return ERR_PTR(-ENOMEM); 348 if (__copy_from_user(ioc, u_ioc, tmp)) { 349 kfree(ioc); 350 return ERR_PTR(-EFAULT); 351 } 352 return ioc; 353 } 354 355 static long 356 spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 357 { 358 int err = 0; 359 int retval = 0; 360 struct spidev_data *spidev; 361 struct spi_device *spi; 362 u32 tmp; 363 unsigned n_ioc; 364 struct spi_ioc_transfer *ioc; 365 366 /* Check type and command number */ 367 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) 368 return -ENOTTY; 369 370 /* Check access direction once here; don't repeat below. 371 * IOC_DIR is from the user perspective, while access_ok is 372 * from the kernel perspective; so they look reversed. 373 */ 374 if (_IOC_DIR(cmd) & _IOC_READ) 375 err = !access_ok(VERIFY_WRITE, 376 (void __user *)arg, _IOC_SIZE(cmd)); 377 if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) 378 err = !access_ok(VERIFY_READ, 379 (void __user *)arg, _IOC_SIZE(cmd)); 380 if (err) 381 return -EFAULT; 382 383 /* guard against device removal before, or while, 384 * we issue this ioctl. 385 */ 386 spidev = filp->private_data; 387 spin_lock_irq(&spidev->spi_lock); 388 spi = spi_dev_get(spidev->spi); 389 spin_unlock_irq(&spidev->spi_lock); 390 391 if (spi == NULL) 392 return -ESHUTDOWN; 393 394 /* use the buffer lock here for triple duty: 395 * - prevent I/O (from us) so calling spi_setup() is safe; 396 * - prevent concurrent SPI_IOC_WR_* from morphing 397 * data fields while SPI_IOC_RD_* reads them; 398 * - SPI_IOC_MESSAGE needs the buffer locked "normally". 399 */ 400 mutex_lock(&spidev->buf_lock); 401 402 switch (cmd) { 403 /* read requests */ 404 case SPI_IOC_RD_MODE: 405 retval = __put_user(spi->mode & SPI_MODE_MASK, 406 (__u8 __user *)arg); 407 break; 408 case SPI_IOC_RD_MODE32: 409 retval = __put_user(spi->mode & SPI_MODE_MASK, 410 (__u32 __user *)arg); 411 break; 412 case SPI_IOC_RD_LSB_FIRST: 413 retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, 414 (__u8 __user *)arg); 415 break; 416 case SPI_IOC_RD_BITS_PER_WORD: 417 retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); 418 break; 419 case SPI_IOC_RD_MAX_SPEED_HZ: 420 retval = __put_user(spidev->speed_hz, (__u32 __user *)arg); 421 break; 422 423 /* write requests */ 424 case SPI_IOC_WR_MODE: 425 case SPI_IOC_WR_MODE32: 426 if (cmd == SPI_IOC_WR_MODE) 427 retval = __get_user(tmp, (u8 __user *)arg); 428 else 429 retval = __get_user(tmp, (u32 __user *)arg); 430 if (retval == 0) { 431 u32 save = spi->mode; 432 433 if (tmp & ~SPI_MODE_MASK) { 434 retval = -EINVAL; 435 break; 436 } 437 438 tmp |= spi->mode & ~SPI_MODE_MASK; 439 spi->mode = (u16)tmp; 440 retval = spi_setup(spi); 441 if (retval < 0) 442 spi->mode = save; 443 else 444 dev_dbg(&spi->dev, "spi mode %x\n", tmp); 445 } 446 break; 447 case SPI_IOC_WR_LSB_FIRST: 448 retval = __get_user(tmp, (__u8 __user *)arg); 449 if (retval == 0) { 450 u32 save = spi->mode; 451 452 if (tmp) 453 spi->mode |= SPI_LSB_FIRST; 454 else 455 spi->mode &= ~SPI_LSB_FIRST; 456 retval = spi_setup(spi); 457 if (retval < 0) 458 spi->mode = save; 459 else 460 dev_dbg(&spi->dev, "%csb first\n", 461 tmp ? 'l' : 'm'); 462 } 463 break; 464 case SPI_IOC_WR_BITS_PER_WORD: 465 retval = __get_user(tmp, (__u8 __user *)arg); 466 if (retval == 0) { 467 u8 save = spi->bits_per_word; 468 469 spi->bits_per_word = tmp; 470 retval = spi_setup(spi); 471 if (retval < 0) 472 spi->bits_per_word = save; 473 else 474 dev_dbg(&spi->dev, "%d bits per word\n", tmp); 475 } 476 break; 477 case SPI_IOC_WR_MAX_SPEED_HZ: 478 retval = __get_user(tmp, (__u32 __user *)arg); 479 if (retval == 0) { 480 u32 save = spi->max_speed_hz; 481 482 spi->max_speed_hz = tmp; 483 retval = spi_setup(spi); 484 if (retval >= 0) 485 spidev->speed_hz = tmp; 486 else 487 dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); 488 spi->max_speed_hz = save; 489 } 490 break; 491 492 default: 493 /* segmented and/or full-duplex I/O request */ 494 /* Check message and copy into scratch area */ 495 ioc = spidev_get_ioc_message(cmd, 496 (struct spi_ioc_transfer __user *)arg, &n_ioc); 497 if (IS_ERR(ioc)) { 498 retval = PTR_ERR(ioc); 499 break; 500 } 501 if (!ioc) 502 break; /* n_ioc is also 0 */ 503 504 /* translate to spi_message, execute */ 505 retval = spidev_message(spidev, ioc, n_ioc); 506 kfree(ioc); 507 break; 508 } 509 510 mutex_unlock(&spidev->buf_lock); 511 spi_dev_put(spi); 512 return retval; 513 } 514 515 #ifdef CONFIG_COMPAT 516 static long 517 spidev_compat_ioc_message(struct file *filp, unsigned int cmd, 518 unsigned long arg) 519 { 520 struct spi_ioc_transfer __user *u_ioc; 521 int retval = 0; 522 struct spidev_data *spidev; 523 struct spi_device *spi; 524 unsigned n_ioc, n; 525 struct spi_ioc_transfer *ioc; 526 527 u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg); 528 if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd))) 529 return -EFAULT; 530 531 /* guard against device removal before, or while, 532 * we issue this ioctl. 533 */ 534 spidev = filp->private_data; 535 spin_lock_irq(&spidev->spi_lock); 536 spi = spi_dev_get(spidev->spi); 537 spin_unlock_irq(&spidev->spi_lock); 538 539 if (spi == NULL) 540 return -ESHUTDOWN; 541 542 /* SPI_IOC_MESSAGE needs the buffer locked "normally" */ 543 mutex_lock(&spidev->buf_lock); 544 545 /* Check message and copy into scratch area */ 546 ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc); 547 if (IS_ERR(ioc)) { 548 retval = PTR_ERR(ioc); 549 goto done; 550 } 551 if (!ioc) 552 goto done; /* n_ioc is also 0 */ 553 554 /* Convert buffer pointers */ 555 for (n = 0; n < n_ioc; n++) { 556 ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf); 557 ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf); 558 } 559 560 /* translate to spi_message, execute */ 561 retval = spidev_message(spidev, ioc, n_ioc); 562 kfree(ioc); 563 564 done: 565 mutex_unlock(&spidev->buf_lock); 566 spi_dev_put(spi); 567 return retval; 568 } 569 570 static long 571 spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 572 { 573 if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC 574 && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) 575 && _IOC_DIR(cmd) == _IOC_WRITE) 576 return spidev_compat_ioc_message(filp, cmd, arg); 577 578 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 579 } 580 #else 581 #define spidev_compat_ioctl NULL 582 #endif /* CONFIG_COMPAT */ 583 584 static int spidev_open(struct inode *inode, struct file *filp) 585 { 586 struct spidev_data *spidev; 587 int status = -ENXIO; 588 589 mutex_lock(&device_list_lock); 590 591 list_for_each_entry(spidev, &device_list, device_entry) { 592 if (spidev->devt == inode->i_rdev) { 593 status = 0; 594 break; 595 } 596 } 597 598 if (status) { 599 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 600 goto err_find_dev; 601 } 602 603 if (!spidev->tx_buffer) { 604 spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL); 605 if (!spidev->tx_buffer) { 606 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); 607 status = -ENOMEM; 608 goto err_find_dev; 609 } 610 } 611 612 if (!spidev->rx_buffer) { 613 spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL); 614 if (!spidev->rx_buffer) { 615 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); 616 status = -ENOMEM; 617 goto err_alloc_rx_buf; 618 } 619 } 620 621 spidev->users++; 622 filp->private_data = spidev; 623 nonseekable_open(inode, filp); 624 625 mutex_unlock(&device_list_lock); 626 return 0; 627 628 err_alloc_rx_buf: 629 kfree(spidev->tx_buffer); 630 spidev->tx_buffer = NULL; 631 err_find_dev: 632 mutex_unlock(&device_list_lock); 633 return status; 634 } 635 636 static int spidev_release(struct inode *inode, struct file *filp) 637 { 638 struct spidev_data *spidev; 639 640 mutex_lock(&device_list_lock); 641 spidev = filp->private_data; 642 filp->private_data = NULL; 643 644 /* last close? */ 645 spidev->users--; 646 if (!spidev->users) { 647 int dofree; 648 649 kfree(spidev->tx_buffer); 650 spidev->tx_buffer = NULL; 651 652 kfree(spidev->rx_buffer); 653 spidev->rx_buffer = NULL; 654 655 spin_lock_irq(&spidev->spi_lock); 656 if (spidev->spi) 657 spidev->speed_hz = spidev->spi->max_speed_hz; 658 659 /* ... after we unbound from the underlying device? */ 660 dofree = (spidev->spi == NULL); 661 spin_unlock_irq(&spidev->spi_lock); 662 663 if (dofree) 664 kfree(spidev); 665 } 666 mutex_unlock(&device_list_lock); 667 668 return 0; 669 } 670 671 static const struct file_operations spidev_fops = { 672 .owner = THIS_MODULE, 673 /* REVISIT switch to aio primitives, so that userspace 674 * gets more complete API coverage. It'll simplify things 675 * too, except for the locking. 676 */ 677 .write = spidev_write, 678 .read = spidev_read, 679 .unlocked_ioctl = spidev_ioctl, 680 .compat_ioctl = spidev_compat_ioctl, 681 .open = spidev_open, 682 .release = spidev_release, 683 .llseek = no_llseek, 684 }; 685 686 /*-------------------------------------------------------------------------*/ 687 688 /* The main reason to have this class is to make mdev/udev create the 689 * /dev/spidevB.C character device nodes exposing our userspace API. 690 * It also simplifies memory management. 691 */ 692 693 static struct class *spidev_class; 694 695 #ifdef CONFIG_OF 696 static const struct of_device_id spidev_dt_ids[] = { 697 { .compatible = "rohm,dh2228fv" }, 698 { .compatible = "lineartechnology,ltc2488" }, 699 { .compatible = "ge,achc" }, 700 {}, 701 }; 702 MODULE_DEVICE_TABLE(of, spidev_dt_ids); 703 #endif 704 705 #ifdef CONFIG_ACPI 706 707 /* Dummy SPI devices not to be used in production systems */ 708 #define SPIDEV_ACPI_DUMMY 1 709 710 static const struct acpi_device_id spidev_acpi_ids[] = { 711 /* 712 * The ACPI SPT000* devices are only meant for development and 713 * testing. Systems used in production should have a proper ACPI 714 * description of the connected peripheral and they should also use 715 * a proper driver instead of poking directly to the SPI bus. 716 */ 717 { "SPT0001", SPIDEV_ACPI_DUMMY }, 718 { "SPT0002", SPIDEV_ACPI_DUMMY }, 719 { "SPT0003", SPIDEV_ACPI_DUMMY }, 720 {}, 721 }; 722 MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids); 723 724 static void spidev_probe_acpi(struct spi_device *spi) 725 { 726 const struct acpi_device_id *id; 727 728 if (!has_acpi_companion(&spi->dev)) 729 return; 730 731 id = acpi_match_device(spidev_acpi_ids, &spi->dev); 732 if (WARN_ON(!id)) 733 return; 734 735 if (id->driver_data == SPIDEV_ACPI_DUMMY) 736 dev_warn(&spi->dev, "do not use this driver in production systems!\n"); 737 } 738 #else 739 static inline void spidev_probe_acpi(struct spi_device *spi) {} 740 #endif 741 742 /*-------------------------------------------------------------------------*/ 743 744 static int spidev_probe(struct spi_device *spi) 745 { 746 struct spidev_data *spidev; 747 int status; 748 unsigned long minor; 749 750 /* 751 * spidev should never be referenced in DT without a specific 752 * compatible string, it is a Linux implementation thing 753 * rather than a description of the hardware. 754 */ 755 if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) { 756 dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n"); 757 WARN_ON(spi->dev.of_node && 758 !of_match_device(spidev_dt_ids, &spi->dev)); 759 } 760 761 spidev_probe_acpi(spi); 762 763 /* Allocate driver data */ 764 spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); 765 if (!spidev) 766 return -ENOMEM; 767 768 /* Initialize the driver data */ 769 spidev->spi = spi; 770 spin_lock_init(&spidev->spi_lock); 771 mutex_init(&spidev->buf_lock); 772 773 INIT_LIST_HEAD(&spidev->device_entry); 774 775 /* If we can allocate a minor number, hook up this device. 776 * Reusing minors is fine so long as udev or mdev is working. 777 */ 778 mutex_lock(&device_list_lock); 779 minor = find_first_zero_bit(minors, N_SPI_MINORS); 780 if (minor < N_SPI_MINORS) { 781 struct device *dev; 782 783 spidev->devt = MKDEV(SPIDEV_MAJOR, minor); 784 dev = device_create(spidev_class, &spi->dev, spidev->devt, 785 spidev, "spidev%d.%d", 786 spi->master->bus_num, spi->chip_select); 787 status = PTR_ERR_OR_ZERO(dev); 788 } else { 789 dev_dbg(&spi->dev, "no minor number available!\n"); 790 status = -ENODEV; 791 } 792 if (status == 0) { 793 set_bit(minor, minors); 794 list_add(&spidev->device_entry, &device_list); 795 } 796 mutex_unlock(&device_list_lock); 797 798 spidev->speed_hz = spi->max_speed_hz; 799 800 if (status == 0) 801 spi_set_drvdata(spi, spidev); 802 else 803 kfree(spidev); 804 805 return status; 806 } 807 808 static int spidev_remove(struct spi_device *spi) 809 { 810 struct spidev_data *spidev = spi_get_drvdata(spi); 811 812 /* make sure ops on existing fds can abort cleanly */ 813 spin_lock_irq(&spidev->spi_lock); 814 spidev->spi = NULL; 815 spin_unlock_irq(&spidev->spi_lock); 816 817 /* prevent new opens */ 818 mutex_lock(&device_list_lock); 819 list_del(&spidev->device_entry); 820 device_destroy(spidev_class, spidev->devt); 821 clear_bit(MINOR(spidev->devt), minors); 822 if (spidev->users == 0) 823 kfree(spidev); 824 mutex_unlock(&device_list_lock); 825 826 return 0; 827 } 828 829 static struct spi_driver spidev_spi_driver = { 830 .driver = { 831 .name = "spidev", 832 .of_match_table = of_match_ptr(spidev_dt_ids), 833 .acpi_match_table = ACPI_PTR(spidev_acpi_ids), 834 }, 835 .probe = spidev_probe, 836 .remove = spidev_remove, 837 838 /* NOTE: suspend/resume methods are not necessary here. 839 * We don't do anything except pass the requests to/from 840 * the underlying controller. The refrigerator handles 841 * most issues; the controller driver handles the rest. 842 */ 843 }; 844 845 /*-------------------------------------------------------------------------*/ 846 847 static int __init spidev_init(void) 848 { 849 int status; 850 851 /* Claim our 256 reserved device numbers. Then register a class 852 * that will key udev/mdev to add/remove /dev nodes. Last, register 853 * the driver which manages those device numbers. 854 */ 855 BUILD_BUG_ON(N_SPI_MINORS > 256); 856 status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); 857 if (status < 0) 858 return status; 859 860 spidev_class = class_create(THIS_MODULE, "spidev"); 861 if (IS_ERR(spidev_class)) { 862 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); 863 return PTR_ERR(spidev_class); 864 } 865 866 status = spi_register_driver(&spidev_spi_driver); 867 if (status < 0) { 868 class_destroy(spidev_class); 869 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); 870 } 871 return status; 872 } 873 module_init(spidev_init); 874 875 static void __exit spidev_exit(void) 876 { 877 spi_unregister_driver(&spidev_spi_driver); 878 class_destroy(spidev_class); 879 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); 880 } 881 module_exit(spidev_exit); 882 883 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); 884 MODULE_DESCRIPTION("User mode SPI device interface"); 885 MODULE_LICENSE("GPL"); 886 MODULE_ALIAS("spi:spidev"); 887