1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Based on elements of hwmon and input subsystems. 10 */ 11 12 #define pr_fmt(fmt) "iio-core: " fmt 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/idr.h> 17 #include <linux/kdev_t.h> 18 #include <linux/err.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/poll.h> 22 #include <linux/sched.h> 23 #include <linux/wait.h> 24 #include <linux/cdev.h> 25 #include <linux/slab.h> 26 #include <linux/anon_inodes.h> 27 #include <linux/debugfs.h> 28 #include <linux/mutex.h> 29 #include <linux/iio/iio.h> 30 #include "iio_core.h" 31 #include "iio_core_trigger.h" 32 #include <linux/iio/sysfs.h> 33 #include <linux/iio/events.h> 34 #include <linux/iio/buffer.h> 35 #include <linux/iio/buffer_impl.h> 36 37 /* IDA to assign each registered device a unique id */ 38 static DEFINE_IDA(iio_ida); 39 40 static dev_t iio_devt; 41 42 #define IIO_DEV_MAX 256 43 struct bus_type iio_bus_type = { 44 .name = "iio", 45 }; 46 EXPORT_SYMBOL(iio_bus_type); 47 48 static struct dentry *iio_debugfs_dentry; 49 50 static const char * const iio_direction[] = { 51 [0] = "in", 52 [1] = "out", 53 }; 54 55 static const char * const iio_chan_type_name_spec[] = { 56 [IIO_VOLTAGE] = "voltage", 57 [IIO_CURRENT] = "current", 58 [IIO_POWER] = "power", 59 [IIO_ACCEL] = "accel", 60 [IIO_ANGL_VEL] = "anglvel", 61 [IIO_MAGN] = "magn", 62 [IIO_LIGHT] = "illuminance", 63 [IIO_INTENSITY] = "intensity", 64 [IIO_PROXIMITY] = "proximity", 65 [IIO_TEMP] = "temp", 66 [IIO_INCLI] = "incli", 67 [IIO_ROT] = "rot", 68 [IIO_ANGL] = "angl", 69 [IIO_TIMESTAMP] = "timestamp", 70 [IIO_CAPACITANCE] = "capacitance", 71 [IIO_ALTVOLTAGE] = "altvoltage", 72 [IIO_CCT] = "cct", 73 [IIO_PRESSURE] = "pressure", 74 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 75 [IIO_ACTIVITY] = "activity", 76 [IIO_STEPS] = "steps", 77 [IIO_ENERGY] = "energy", 78 [IIO_DISTANCE] = "distance", 79 [IIO_VELOCITY] = "velocity", 80 [IIO_CONCENTRATION] = "concentration", 81 [IIO_RESISTANCE] = "resistance", 82 [IIO_PH] = "ph", 83 [IIO_UVINDEX] = "uvindex", 84 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 85 [IIO_COUNT] = "count", 86 [IIO_INDEX] = "index", 87 [IIO_GRAVITY] = "gravity", 88 }; 89 90 static const char * const iio_modifier_names[] = { 91 [IIO_MOD_X] = "x", 92 [IIO_MOD_Y] = "y", 93 [IIO_MOD_Z] = "z", 94 [IIO_MOD_X_AND_Y] = "x&y", 95 [IIO_MOD_X_AND_Z] = "x&z", 96 [IIO_MOD_Y_AND_Z] = "y&z", 97 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 98 [IIO_MOD_X_OR_Y] = "x|y", 99 [IIO_MOD_X_OR_Z] = "x|z", 100 [IIO_MOD_Y_OR_Z] = "y|z", 101 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 102 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 103 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 104 [IIO_MOD_LIGHT_BOTH] = "both", 105 [IIO_MOD_LIGHT_IR] = "ir", 106 [IIO_MOD_LIGHT_CLEAR] = "clear", 107 [IIO_MOD_LIGHT_RED] = "red", 108 [IIO_MOD_LIGHT_GREEN] = "green", 109 [IIO_MOD_LIGHT_BLUE] = "blue", 110 [IIO_MOD_LIGHT_UV] = "uv", 111 [IIO_MOD_QUATERNION] = "quaternion", 112 [IIO_MOD_TEMP_AMBIENT] = "ambient", 113 [IIO_MOD_TEMP_OBJECT] = "object", 114 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 115 [IIO_MOD_NORTH_TRUE] = "from_north_true", 116 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 117 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 118 [IIO_MOD_RUNNING] = "running", 119 [IIO_MOD_JOGGING] = "jogging", 120 [IIO_MOD_WALKING] = "walking", 121 [IIO_MOD_STILL] = "still", 122 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 123 [IIO_MOD_I] = "i", 124 [IIO_MOD_Q] = "q", 125 [IIO_MOD_CO2] = "co2", 126 [IIO_MOD_VOC] = "voc", 127 }; 128 129 /* relies on pairs of these shared then separate */ 130 static const char * const iio_chan_info_postfix[] = { 131 [IIO_CHAN_INFO_RAW] = "raw", 132 [IIO_CHAN_INFO_PROCESSED] = "input", 133 [IIO_CHAN_INFO_SCALE] = "scale", 134 [IIO_CHAN_INFO_OFFSET] = "offset", 135 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 136 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 137 [IIO_CHAN_INFO_PEAK] = "peak_raw", 138 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 139 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 140 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 141 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 142 = "filter_low_pass_3db_frequency", 143 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 144 = "filter_high_pass_3db_frequency", 145 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 146 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 147 [IIO_CHAN_INFO_PHASE] = "phase", 148 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 149 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 150 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 151 [IIO_CHAN_INFO_ENABLE] = "en", 152 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 153 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 154 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 155 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 156 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 157 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 158 }; 159 160 /** 161 * iio_find_channel_from_si() - get channel from its scan index 162 * @indio_dev: device 163 * @si: scan index to match 164 */ 165 const struct iio_chan_spec 166 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 167 { 168 int i; 169 170 for (i = 0; i < indio_dev->num_channels; i++) 171 if (indio_dev->channels[i].scan_index == si) 172 return &indio_dev->channels[i]; 173 return NULL; 174 } 175 176 /* This turns up an awful lot */ 177 ssize_t iio_read_const_attr(struct device *dev, 178 struct device_attribute *attr, 179 char *buf) 180 { 181 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string); 182 } 183 EXPORT_SYMBOL(iio_read_const_attr); 184 185 static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 186 { 187 int ret; 188 const struct iio_event_interface *ev_int = indio_dev->event_interface; 189 190 ret = mutex_lock_interruptible(&indio_dev->mlock); 191 if (ret) 192 return ret; 193 if ((ev_int && iio_event_enabled(ev_int)) || 194 iio_buffer_enabled(indio_dev)) { 195 mutex_unlock(&indio_dev->mlock); 196 return -EBUSY; 197 } 198 indio_dev->clock_id = clock_id; 199 mutex_unlock(&indio_dev->mlock); 200 201 return 0; 202 } 203 204 /** 205 * iio_get_time_ns() - utility function to get a time stamp for events etc 206 * @indio_dev: device 207 */ 208 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 209 { 210 struct timespec tp; 211 212 switch (iio_device_get_clock(indio_dev)) { 213 case CLOCK_REALTIME: 214 ktime_get_real_ts(&tp); 215 break; 216 case CLOCK_MONOTONIC: 217 ktime_get_ts(&tp); 218 break; 219 case CLOCK_MONOTONIC_RAW: 220 getrawmonotonic(&tp); 221 break; 222 case CLOCK_REALTIME_COARSE: 223 tp = current_kernel_time(); 224 break; 225 case CLOCK_MONOTONIC_COARSE: 226 tp = get_monotonic_coarse(); 227 break; 228 case CLOCK_BOOTTIME: 229 get_monotonic_boottime(&tp); 230 break; 231 case CLOCK_TAI: 232 timekeeping_clocktai(&tp); 233 break; 234 default: 235 BUG(); 236 } 237 238 return timespec_to_ns(&tp); 239 } 240 EXPORT_SYMBOL(iio_get_time_ns); 241 242 /** 243 * iio_get_time_res() - utility function to get time stamp clock resolution in 244 * nano seconds. 245 * @indio_dev: device 246 */ 247 unsigned int iio_get_time_res(const struct iio_dev *indio_dev) 248 { 249 switch (iio_device_get_clock(indio_dev)) { 250 case CLOCK_REALTIME: 251 case CLOCK_MONOTONIC: 252 case CLOCK_MONOTONIC_RAW: 253 case CLOCK_BOOTTIME: 254 case CLOCK_TAI: 255 return hrtimer_resolution; 256 case CLOCK_REALTIME_COARSE: 257 case CLOCK_MONOTONIC_COARSE: 258 return LOW_RES_NSEC; 259 default: 260 BUG(); 261 } 262 } 263 EXPORT_SYMBOL(iio_get_time_res); 264 265 static int __init iio_init(void) 266 { 267 int ret; 268 269 /* Register sysfs bus */ 270 ret = bus_register(&iio_bus_type); 271 if (ret < 0) { 272 pr_err("could not register bus type\n"); 273 goto error_nothing; 274 } 275 276 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 277 if (ret < 0) { 278 pr_err("failed to allocate char dev region\n"); 279 goto error_unregister_bus_type; 280 } 281 282 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 283 284 return 0; 285 286 error_unregister_bus_type: 287 bus_unregister(&iio_bus_type); 288 error_nothing: 289 return ret; 290 } 291 292 static void __exit iio_exit(void) 293 { 294 if (iio_devt) 295 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 296 bus_unregister(&iio_bus_type); 297 debugfs_remove(iio_debugfs_dentry); 298 } 299 300 #if defined(CONFIG_DEBUG_FS) 301 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 302 size_t count, loff_t *ppos) 303 { 304 struct iio_dev *indio_dev = file->private_data; 305 char buf[20]; 306 unsigned val = 0; 307 ssize_t len; 308 int ret; 309 310 ret = indio_dev->info->debugfs_reg_access(indio_dev, 311 indio_dev->cached_reg_addr, 312 0, &val); 313 if (ret) 314 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 315 316 len = snprintf(buf, sizeof(buf), "0x%X\n", val); 317 318 return simple_read_from_buffer(userbuf, count, ppos, buf, len); 319 } 320 321 static ssize_t iio_debugfs_write_reg(struct file *file, 322 const char __user *userbuf, size_t count, loff_t *ppos) 323 { 324 struct iio_dev *indio_dev = file->private_data; 325 unsigned reg, val; 326 char buf[80]; 327 int ret; 328 329 count = min_t(size_t, count, (sizeof(buf)-1)); 330 if (copy_from_user(buf, userbuf, count)) 331 return -EFAULT; 332 333 buf[count] = 0; 334 335 ret = sscanf(buf, "%i %i", ®, &val); 336 337 switch (ret) { 338 case 1: 339 indio_dev->cached_reg_addr = reg; 340 break; 341 case 2: 342 indio_dev->cached_reg_addr = reg; 343 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 344 val, NULL); 345 if (ret) { 346 dev_err(indio_dev->dev.parent, "%s: write failed\n", 347 __func__); 348 return ret; 349 } 350 break; 351 default: 352 return -EINVAL; 353 } 354 355 return count; 356 } 357 358 static const struct file_operations iio_debugfs_reg_fops = { 359 .open = simple_open, 360 .read = iio_debugfs_read_reg, 361 .write = iio_debugfs_write_reg, 362 }; 363 364 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 365 { 366 debugfs_remove_recursive(indio_dev->debugfs_dentry); 367 } 368 369 static int iio_device_register_debugfs(struct iio_dev *indio_dev) 370 { 371 struct dentry *d; 372 373 if (indio_dev->info->debugfs_reg_access == NULL) 374 return 0; 375 376 if (!iio_debugfs_dentry) 377 return 0; 378 379 indio_dev->debugfs_dentry = 380 debugfs_create_dir(dev_name(&indio_dev->dev), 381 iio_debugfs_dentry); 382 if (indio_dev->debugfs_dentry == NULL) { 383 dev_warn(indio_dev->dev.parent, 384 "Failed to create debugfs directory\n"); 385 return -EFAULT; 386 } 387 388 d = debugfs_create_file("direct_reg_access", 0644, 389 indio_dev->debugfs_dentry, 390 indio_dev, &iio_debugfs_reg_fops); 391 if (!d) { 392 iio_device_unregister_debugfs(indio_dev); 393 return -ENOMEM; 394 } 395 396 return 0; 397 } 398 #else 399 static int iio_device_register_debugfs(struct iio_dev *indio_dev) 400 { 401 return 0; 402 } 403 404 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 405 { 406 } 407 #endif /* CONFIG_DEBUG_FS */ 408 409 static ssize_t iio_read_channel_ext_info(struct device *dev, 410 struct device_attribute *attr, 411 char *buf) 412 { 413 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 414 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 415 const struct iio_chan_spec_ext_info *ext_info; 416 417 ext_info = &this_attr->c->ext_info[this_attr->address]; 418 419 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 420 } 421 422 static ssize_t iio_write_channel_ext_info(struct device *dev, 423 struct device_attribute *attr, 424 const char *buf, 425 size_t len) 426 { 427 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 428 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 429 const struct iio_chan_spec_ext_info *ext_info; 430 431 ext_info = &this_attr->c->ext_info[this_attr->address]; 432 433 return ext_info->write(indio_dev, ext_info->private, 434 this_attr->c, buf, len); 435 } 436 437 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 438 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 439 { 440 const struct iio_enum *e = (const struct iio_enum *)priv; 441 unsigned int i; 442 size_t len = 0; 443 444 if (!e->num_items) 445 return 0; 446 447 for (i = 0; i < e->num_items; ++i) 448 len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]); 449 450 /* replace last space with a newline */ 451 buf[len - 1] = '\n'; 452 453 return len; 454 } 455 EXPORT_SYMBOL_GPL(iio_enum_available_read); 456 457 ssize_t iio_enum_read(struct iio_dev *indio_dev, 458 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 459 { 460 const struct iio_enum *e = (const struct iio_enum *)priv; 461 int i; 462 463 if (!e->get) 464 return -EINVAL; 465 466 i = e->get(indio_dev, chan); 467 if (i < 0) 468 return i; 469 else if (i >= e->num_items) 470 return -EINVAL; 471 472 return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]); 473 } 474 EXPORT_SYMBOL_GPL(iio_enum_read); 475 476 ssize_t iio_enum_write(struct iio_dev *indio_dev, 477 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 478 size_t len) 479 { 480 const struct iio_enum *e = (const struct iio_enum *)priv; 481 unsigned int i; 482 int ret; 483 484 if (!e->set) 485 return -EINVAL; 486 487 for (i = 0; i < e->num_items; i++) { 488 if (sysfs_streq(buf, e->items[i])) 489 break; 490 } 491 492 if (i == e->num_items) 493 return -EINVAL; 494 495 ret = e->set(indio_dev, chan, i); 496 return ret ? ret : len; 497 } 498 EXPORT_SYMBOL_GPL(iio_enum_write); 499 500 static const struct iio_mount_matrix iio_mount_idmatrix = { 501 .rotation = { 502 "1", "0", "0", 503 "0", "1", "0", 504 "0", "0", "1" 505 } 506 }; 507 508 static int iio_setup_mount_idmatrix(const struct device *dev, 509 struct iio_mount_matrix *matrix) 510 { 511 *matrix = iio_mount_idmatrix; 512 dev_info(dev, "mounting matrix not found: using identity...\n"); 513 return 0; 514 } 515 516 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 517 const struct iio_chan_spec *chan, char *buf) 518 { 519 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 520 priv)(indio_dev, chan); 521 522 if (IS_ERR(mtx)) 523 return PTR_ERR(mtx); 524 525 if (!mtx) 526 mtx = &iio_mount_idmatrix; 527 528 return snprintf(buf, PAGE_SIZE, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 529 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 530 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 531 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 532 } 533 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 534 535 /** 536 * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from 537 * device-tree "mount-matrix" property 538 * @dev: device the mounting matrix property is assigned to 539 * @propname: device specific mounting matrix property name 540 * @matrix: where to store retrieved matrix 541 * 542 * If device is assigned no mounting matrix property, a default 3x3 identity 543 * matrix will be filled in. 544 * 545 * Return: 0 if success, or a negative error code on failure. 546 */ 547 #ifdef CONFIG_OF 548 int of_iio_read_mount_matrix(const struct device *dev, 549 const char *propname, 550 struct iio_mount_matrix *matrix) 551 { 552 if (dev->of_node) { 553 int err = of_property_read_string_array(dev->of_node, 554 propname, matrix->rotation, 555 ARRAY_SIZE(iio_mount_idmatrix.rotation)); 556 557 if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation)) 558 return 0; 559 560 if (err >= 0) 561 /* Invalid number of matrix entries. */ 562 return -EINVAL; 563 564 if (err != -EINVAL) 565 /* Invalid matrix declaration format. */ 566 return err; 567 } 568 569 /* Matrix was not declared at all: fallback to identity. */ 570 return iio_setup_mount_idmatrix(dev, matrix); 571 } 572 #else 573 int of_iio_read_mount_matrix(const struct device *dev, 574 const char *propname, 575 struct iio_mount_matrix *matrix) 576 { 577 return iio_setup_mount_idmatrix(dev, matrix); 578 } 579 #endif 580 EXPORT_SYMBOL(of_iio_read_mount_matrix); 581 582 static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, 583 int size, const int *vals) 584 { 585 unsigned long long tmp; 586 int tmp0, tmp1; 587 bool scale_db = false; 588 589 switch (type) { 590 case IIO_VAL_INT: 591 return snprintf(buf, len, "%d", vals[0]); 592 case IIO_VAL_INT_PLUS_MICRO_DB: 593 scale_db = true; 594 case IIO_VAL_INT_PLUS_MICRO: 595 if (vals[1] < 0) 596 return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]), 597 -vals[1], scale_db ? " dB" : ""); 598 else 599 return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1], 600 scale_db ? " dB" : ""); 601 case IIO_VAL_INT_PLUS_NANO: 602 if (vals[1] < 0) 603 return snprintf(buf, len, "-%d.%09u", abs(vals[0]), 604 -vals[1]); 605 else 606 return snprintf(buf, len, "%d.%09u", vals[0], vals[1]); 607 case IIO_VAL_FRACTIONAL: 608 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 609 tmp1 = vals[1]; 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 612 case IIO_VAL_FRACTIONAL_LOG2: 613 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 614 tmp1 = do_div(tmp, 1000000000LL); 615 tmp0 = tmp; 616 return snprintf(buf, len, "%d.%09u", tmp0, tmp1); 617 case IIO_VAL_INT_MULTIPLE: 618 { 619 int i; 620 int l = 0; 621 622 for (i = 0; i < size; ++i) { 623 l += snprintf(&buf[l], len - l, "%d ", vals[i]); 624 if (l >= len) 625 break; 626 } 627 return l; 628 } 629 default: 630 return 0; 631 } 632 } 633 634 /** 635 * iio_format_value() - Formats a IIO value into its string representation 636 * @buf: The buffer to which the formatted value gets written 637 * which is assumed to be big enough (i.e. PAGE_SIZE). 638 * @type: One of the IIO_VAL_... constants. This decides how the val 639 * and val2 parameters are formatted. 640 * @size: Number of IIO value entries contained in vals 641 * @vals: Pointer to the values, exact meaning depends on the 642 * type parameter. 643 * 644 * Return: 0 by default, a negative number on failure or the 645 * total number of characters written for a type that belongs 646 * to the IIO_VAL_... constant. 647 */ 648 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 649 { 650 ssize_t len; 651 652 len = __iio_format_value(buf, PAGE_SIZE, type, size, vals); 653 if (len >= PAGE_SIZE - 1) 654 return -EFBIG; 655 656 return len + sprintf(buf + len, "\n"); 657 } 658 EXPORT_SYMBOL_GPL(iio_format_value); 659 660 static ssize_t iio_read_channel_info(struct device *dev, 661 struct device_attribute *attr, 662 char *buf) 663 { 664 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 665 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 666 int vals[INDIO_MAX_RAW_ELEMENTS]; 667 int ret; 668 int val_len = 2; 669 670 if (indio_dev->info->read_raw_multi) 671 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 672 INDIO_MAX_RAW_ELEMENTS, 673 vals, &val_len, 674 this_attr->address); 675 else 676 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 677 &vals[0], &vals[1], this_attr->address); 678 679 if (ret < 0) 680 return ret; 681 682 return iio_format_value(buf, ret, val_len, vals); 683 } 684 685 static ssize_t iio_format_avail_list(char *buf, const int *vals, 686 int type, int length) 687 { 688 int i; 689 ssize_t len = 0; 690 691 switch (type) { 692 case IIO_VAL_INT: 693 for (i = 0; i < length; i++) { 694 len += __iio_format_value(buf + len, PAGE_SIZE - len, 695 type, 1, &vals[i]); 696 if (len >= PAGE_SIZE) 697 return -EFBIG; 698 if (i < length - 1) 699 len += snprintf(buf + len, PAGE_SIZE - len, 700 " "); 701 else 702 len += snprintf(buf + len, PAGE_SIZE - len, 703 "\n"); 704 if (len >= PAGE_SIZE) 705 return -EFBIG; 706 } 707 break; 708 default: 709 for (i = 0; i < length / 2; i++) { 710 len += __iio_format_value(buf + len, PAGE_SIZE - len, 711 type, 2, &vals[i * 2]); 712 if (len >= PAGE_SIZE) 713 return -EFBIG; 714 if (i < length / 2 - 1) 715 len += snprintf(buf + len, PAGE_SIZE - len, 716 " "); 717 else 718 len += snprintf(buf + len, PAGE_SIZE - len, 719 "\n"); 720 if (len >= PAGE_SIZE) 721 return -EFBIG; 722 } 723 } 724 725 return len; 726 } 727 728 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 729 { 730 int i; 731 ssize_t len; 732 733 len = snprintf(buf, PAGE_SIZE, "["); 734 switch (type) { 735 case IIO_VAL_INT: 736 for (i = 0; i < 3; i++) { 737 len += __iio_format_value(buf + len, PAGE_SIZE - len, 738 type, 1, &vals[i]); 739 if (len >= PAGE_SIZE) 740 return -EFBIG; 741 if (i < 2) 742 len += snprintf(buf + len, PAGE_SIZE - len, 743 " "); 744 else 745 len += snprintf(buf + len, PAGE_SIZE - len, 746 "]\n"); 747 if (len >= PAGE_SIZE) 748 return -EFBIG; 749 } 750 break; 751 default: 752 for (i = 0; i < 3; i++) { 753 len += __iio_format_value(buf + len, PAGE_SIZE - len, 754 type, 2, &vals[i * 2]); 755 if (len >= PAGE_SIZE) 756 return -EFBIG; 757 if (i < 2) 758 len += snprintf(buf + len, PAGE_SIZE - len, 759 " "); 760 else 761 len += snprintf(buf + len, PAGE_SIZE - len, 762 "]\n"); 763 if (len >= PAGE_SIZE) 764 return -EFBIG; 765 } 766 } 767 768 return len; 769 } 770 771 static ssize_t iio_read_channel_info_avail(struct device *dev, 772 struct device_attribute *attr, 773 char *buf) 774 { 775 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 776 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 777 const int *vals; 778 int ret; 779 int length; 780 int type; 781 782 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 783 &vals, &type, &length, 784 this_attr->address); 785 786 if (ret < 0) 787 return ret; 788 switch (ret) { 789 case IIO_AVAIL_LIST: 790 return iio_format_avail_list(buf, vals, type, length); 791 case IIO_AVAIL_RANGE: 792 return iio_format_avail_range(buf, vals, type); 793 default: 794 return -EINVAL; 795 } 796 } 797 798 /** 799 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 800 * @str: The string to parse 801 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 802 * @integer: The integer part of the number 803 * @fract: The fractional part of the number 804 * 805 * Returns 0 on success, or a negative error code if the string could not be 806 * parsed. 807 */ 808 int iio_str_to_fixpoint(const char *str, int fract_mult, 809 int *integer, int *fract) 810 { 811 int i = 0, f = 0; 812 bool integer_part = true, negative = false; 813 814 if (fract_mult == 0) { 815 *fract = 0; 816 817 return kstrtoint(str, 0, integer); 818 } 819 820 if (str[0] == '-') { 821 negative = true; 822 str++; 823 } else if (str[0] == '+') { 824 str++; 825 } 826 827 while (*str) { 828 if ('0' <= *str && *str <= '9') { 829 if (integer_part) { 830 i = i * 10 + *str - '0'; 831 } else { 832 f += fract_mult * (*str - '0'); 833 fract_mult /= 10; 834 } 835 } else if (*str == '\n') { 836 if (*(str + 1) == '\0') 837 break; 838 else 839 return -EINVAL; 840 } else if (*str == '.' && integer_part) { 841 integer_part = false; 842 } else { 843 return -EINVAL; 844 } 845 str++; 846 } 847 848 if (negative) { 849 if (i) 850 i = -i; 851 else 852 f = -f; 853 } 854 855 *integer = i; 856 *fract = f; 857 858 return 0; 859 } 860 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 861 862 static ssize_t iio_write_channel_info(struct device *dev, 863 struct device_attribute *attr, 864 const char *buf, 865 size_t len) 866 { 867 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 868 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 869 int ret, fract_mult = 100000; 870 int integer, fract; 871 872 /* Assumes decimal - precision based on number of digits */ 873 if (!indio_dev->info->write_raw) 874 return -EINVAL; 875 876 if (indio_dev->info->write_raw_get_fmt) 877 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 878 this_attr->c, this_attr->address)) { 879 case IIO_VAL_INT: 880 fract_mult = 0; 881 break; 882 case IIO_VAL_INT_PLUS_MICRO: 883 fract_mult = 100000; 884 break; 885 case IIO_VAL_INT_PLUS_NANO: 886 fract_mult = 100000000; 887 break; 888 default: 889 return -EINVAL; 890 } 891 892 ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract); 893 if (ret) 894 return ret; 895 896 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 897 integer, fract, this_attr->address); 898 if (ret) 899 return ret; 900 901 return len; 902 } 903 904 static 905 int __iio_device_attr_init(struct device_attribute *dev_attr, 906 const char *postfix, 907 struct iio_chan_spec const *chan, 908 ssize_t (*readfunc)(struct device *dev, 909 struct device_attribute *attr, 910 char *buf), 911 ssize_t (*writefunc)(struct device *dev, 912 struct device_attribute *attr, 913 const char *buf, 914 size_t len), 915 enum iio_shared_by shared_by) 916 { 917 int ret = 0; 918 char *name = NULL; 919 char *full_postfix; 920 sysfs_attr_init(&dev_attr->attr); 921 922 /* Build up postfix of <extend_name>_<modifier>_postfix */ 923 if (chan->modified && (shared_by == IIO_SEPARATE)) { 924 if (chan->extend_name) 925 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 926 iio_modifier_names[chan 927 ->channel2], 928 chan->extend_name, 929 postfix); 930 else 931 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 932 iio_modifier_names[chan 933 ->channel2], 934 postfix); 935 } else { 936 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 937 full_postfix = kstrdup(postfix, GFP_KERNEL); 938 else 939 full_postfix = kasprintf(GFP_KERNEL, 940 "%s_%s", 941 chan->extend_name, 942 postfix); 943 } 944 if (full_postfix == NULL) 945 return -ENOMEM; 946 947 if (chan->differential) { /* Differential can not have modifier */ 948 switch (shared_by) { 949 case IIO_SHARED_BY_ALL: 950 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 951 break; 952 case IIO_SHARED_BY_DIR: 953 name = kasprintf(GFP_KERNEL, "%s_%s", 954 iio_direction[chan->output], 955 full_postfix); 956 break; 957 case IIO_SHARED_BY_TYPE: 958 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 959 iio_direction[chan->output], 960 iio_chan_type_name_spec[chan->type], 961 iio_chan_type_name_spec[chan->type], 962 full_postfix); 963 break; 964 case IIO_SEPARATE: 965 if (!chan->indexed) { 966 WARN(1, "Differential channels must be indexed\n"); 967 ret = -EINVAL; 968 goto error_free_full_postfix; 969 } 970 name = kasprintf(GFP_KERNEL, 971 "%s_%s%d-%s%d_%s", 972 iio_direction[chan->output], 973 iio_chan_type_name_spec[chan->type], 974 chan->channel, 975 iio_chan_type_name_spec[chan->type], 976 chan->channel2, 977 full_postfix); 978 break; 979 } 980 } else { /* Single ended */ 981 switch (shared_by) { 982 case IIO_SHARED_BY_ALL: 983 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 984 break; 985 case IIO_SHARED_BY_DIR: 986 name = kasprintf(GFP_KERNEL, "%s_%s", 987 iio_direction[chan->output], 988 full_postfix); 989 break; 990 case IIO_SHARED_BY_TYPE: 991 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 992 iio_direction[chan->output], 993 iio_chan_type_name_spec[chan->type], 994 full_postfix); 995 break; 996 997 case IIO_SEPARATE: 998 if (chan->indexed) 999 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1000 iio_direction[chan->output], 1001 iio_chan_type_name_spec[chan->type], 1002 chan->channel, 1003 full_postfix); 1004 else 1005 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1006 iio_direction[chan->output], 1007 iio_chan_type_name_spec[chan->type], 1008 full_postfix); 1009 break; 1010 } 1011 } 1012 if (name == NULL) { 1013 ret = -ENOMEM; 1014 goto error_free_full_postfix; 1015 } 1016 dev_attr->attr.name = name; 1017 1018 if (readfunc) { 1019 dev_attr->attr.mode |= S_IRUGO; 1020 dev_attr->show = readfunc; 1021 } 1022 1023 if (writefunc) { 1024 dev_attr->attr.mode |= S_IWUSR; 1025 dev_attr->store = writefunc; 1026 } 1027 1028 error_free_full_postfix: 1029 kfree(full_postfix); 1030 1031 return ret; 1032 } 1033 1034 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1035 { 1036 kfree(dev_attr->attr.name); 1037 } 1038 1039 int __iio_add_chan_devattr(const char *postfix, 1040 struct iio_chan_spec const *chan, 1041 ssize_t (*readfunc)(struct device *dev, 1042 struct device_attribute *attr, 1043 char *buf), 1044 ssize_t (*writefunc)(struct device *dev, 1045 struct device_attribute *attr, 1046 const char *buf, 1047 size_t len), 1048 u64 mask, 1049 enum iio_shared_by shared_by, 1050 struct device *dev, 1051 struct list_head *attr_list) 1052 { 1053 int ret; 1054 struct iio_dev_attr *iio_attr, *t; 1055 1056 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1057 if (iio_attr == NULL) 1058 return -ENOMEM; 1059 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1060 postfix, chan, 1061 readfunc, writefunc, shared_by); 1062 if (ret) 1063 goto error_iio_dev_attr_free; 1064 iio_attr->c = chan; 1065 iio_attr->address = mask; 1066 list_for_each_entry(t, attr_list, l) 1067 if (strcmp(t->dev_attr.attr.name, 1068 iio_attr->dev_attr.attr.name) == 0) { 1069 if (shared_by == IIO_SEPARATE) 1070 dev_err(dev, "tried to double register : %s\n", 1071 t->dev_attr.attr.name); 1072 ret = -EBUSY; 1073 goto error_device_attr_deinit; 1074 } 1075 list_add(&iio_attr->l, attr_list); 1076 1077 return 0; 1078 1079 error_device_attr_deinit: 1080 __iio_device_attr_deinit(&iio_attr->dev_attr); 1081 error_iio_dev_attr_free: 1082 kfree(iio_attr); 1083 return ret; 1084 } 1085 1086 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1087 struct iio_chan_spec const *chan, 1088 enum iio_shared_by shared_by, 1089 const long *infomask) 1090 { 1091 int i, ret, attrcount = 0; 1092 1093 for_each_set_bit(i, infomask, sizeof(infomask)*8) { 1094 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1095 return -EINVAL; 1096 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1097 chan, 1098 &iio_read_channel_info, 1099 &iio_write_channel_info, 1100 i, 1101 shared_by, 1102 &indio_dev->dev, 1103 &indio_dev->channel_attr_list); 1104 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1105 continue; 1106 else if (ret < 0) 1107 return ret; 1108 attrcount++; 1109 } 1110 1111 return attrcount; 1112 } 1113 1114 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1115 struct iio_chan_spec const *chan, 1116 enum iio_shared_by shared_by, 1117 const long *infomask) 1118 { 1119 int i, ret, attrcount = 0; 1120 char *avail_postfix; 1121 1122 for_each_set_bit(i, infomask, sizeof(infomask) * 8) { 1123 avail_postfix = kasprintf(GFP_KERNEL, 1124 "%s_available", 1125 iio_chan_info_postfix[i]); 1126 if (!avail_postfix) 1127 return -ENOMEM; 1128 1129 ret = __iio_add_chan_devattr(avail_postfix, 1130 chan, 1131 &iio_read_channel_info_avail, 1132 NULL, 1133 i, 1134 shared_by, 1135 &indio_dev->dev, 1136 &indio_dev->channel_attr_list); 1137 kfree(avail_postfix); 1138 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1139 continue; 1140 else if (ret < 0) 1141 return ret; 1142 attrcount++; 1143 } 1144 1145 return attrcount; 1146 } 1147 1148 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1149 struct iio_chan_spec const *chan) 1150 { 1151 int ret, attrcount = 0; 1152 const struct iio_chan_spec_ext_info *ext_info; 1153 1154 if (chan->channel < 0) 1155 return 0; 1156 ret = iio_device_add_info_mask_type(indio_dev, chan, 1157 IIO_SEPARATE, 1158 &chan->info_mask_separate); 1159 if (ret < 0) 1160 return ret; 1161 attrcount += ret; 1162 1163 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1164 IIO_SEPARATE, 1165 &chan-> 1166 info_mask_separate_available); 1167 if (ret < 0) 1168 return ret; 1169 attrcount += ret; 1170 1171 ret = iio_device_add_info_mask_type(indio_dev, chan, 1172 IIO_SHARED_BY_TYPE, 1173 &chan->info_mask_shared_by_type); 1174 if (ret < 0) 1175 return ret; 1176 attrcount += ret; 1177 1178 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1179 IIO_SHARED_BY_TYPE, 1180 &chan-> 1181 info_mask_shared_by_type_available); 1182 if (ret < 0) 1183 return ret; 1184 attrcount += ret; 1185 1186 ret = iio_device_add_info_mask_type(indio_dev, chan, 1187 IIO_SHARED_BY_DIR, 1188 &chan->info_mask_shared_by_dir); 1189 if (ret < 0) 1190 return ret; 1191 attrcount += ret; 1192 1193 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1194 IIO_SHARED_BY_DIR, 1195 &chan->info_mask_shared_by_dir_available); 1196 if (ret < 0) 1197 return ret; 1198 attrcount += ret; 1199 1200 ret = iio_device_add_info_mask_type(indio_dev, chan, 1201 IIO_SHARED_BY_ALL, 1202 &chan->info_mask_shared_by_all); 1203 if (ret < 0) 1204 return ret; 1205 attrcount += ret; 1206 1207 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1208 IIO_SHARED_BY_ALL, 1209 &chan->info_mask_shared_by_all_available); 1210 if (ret < 0) 1211 return ret; 1212 attrcount += ret; 1213 1214 if (chan->ext_info) { 1215 unsigned int i = 0; 1216 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1217 ret = __iio_add_chan_devattr(ext_info->name, 1218 chan, 1219 ext_info->read ? 1220 &iio_read_channel_ext_info : NULL, 1221 ext_info->write ? 1222 &iio_write_channel_ext_info : NULL, 1223 i, 1224 ext_info->shared, 1225 &indio_dev->dev, 1226 &indio_dev->channel_attr_list); 1227 i++; 1228 if (ret == -EBUSY && ext_info->shared) 1229 continue; 1230 1231 if (ret) 1232 return ret; 1233 1234 attrcount++; 1235 } 1236 } 1237 1238 return attrcount; 1239 } 1240 1241 /** 1242 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1243 * @attr_list: List of IIO device attributes 1244 * 1245 * This function frees the memory allocated for each of the IIO device 1246 * attributes in the list. 1247 */ 1248 void iio_free_chan_devattr_list(struct list_head *attr_list) 1249 { 1250 struct iio_dev_attr *p, *n; 1251 1252 list_for_each_entry_safe(p, n, attr_list, l) { 1253 kfree(p->dev_attr.attr.name); 1254 list_del(&p->l); 1255 kfree(p); 1256 } 1257 } 1258 1259 static ssize_t iio_show_dev_name(struct device *dev, 1260 struct device_attribute *attr, 1261 char *buf) 1262 { 1263 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1264 return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->name); 1265 } 1266 1267 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); 1268 1269 static ssize_t iio_show_timestamp_clock(struct device *dev, 1270 struct device_attribute *attr, 1271 char *buf) 1272 { 1273 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1274 const clockid_t clk = iio_device_get_clock(indio_dev); 1275 const char *name; 1276 ssize_t sz; 1277 1278 switch (clk) { 1279 case CLOCK_REALTIME: 1280 name = "realtime\n"; 1281 sz = sizeof("realtime\n"); 1282 break; 1283 case CLOCK_MONOTONIC: 1284 name = "monotonic\n"; 1285 sz = sizeof("monotonic\n"); 1286 break; 1287 case CLOCK_MONOTONIC_RAW: 1288 name = "monotonic_raw\n"; 1289 sz = sizeof("monotonic_raw\n"); 1290 break; 1291 case CLOCK_REALTIME_COARSE: 1292 name = "realtime_coarse\n"; 1293 sz = sizeof("realtime_coarse\n"); 1294 break; 1295 case CLOCK_MONOTONIC_COARSE: 1296 name = "monotonic_coarse\n"; 1297 sz = sizeof("monotonic_coarse\n"); 1298 break; 1299 case CLOCK_BOOTTIME: 1300 name = "boottime\n"; 1301 sz = sizeof("boottime\n"); 1302 break; 1303 case CLOCK_TAI: 1304 name = "tai\n"; 1305 sz = sizeof("tai\n"); 1306 break; 1307 default: 1308 BUG(); 1309 } 1310 1311 memcpy(buf, name, sz); 1312 return sz; 1313 } 1314 1315 static ssize_t iio_store_timestamp_clock(struct device *dev, 1316 struct device_attribute *attr, 1317 const char *buf, size_t len) 1318 { 1319 clockid_t clk; 1320 int ret; 1321 1322 if (sysfs_streq(buf, "realtime")) 1323 clk = CLOCK_REALTIME; 1324 else if (sysfs_streq(buf, "monotonic")) 1325 clk = CLOCK_MONOTONIC; 1326 else if (sysfs_streq(buf, "monotonic_raw")) 1327 clk = CLOCK_MONOTONIC_RAW; 1328 else if (sysfs_streq(buf, "realtime_coarse")) 1329 clk = CLOCK_REALTIME_COARSE; 1330 else if (sysfs_streq(buf, "monotonic_coarse")) 1331 clk = CLOCK_MONOTONIC_COARSE; 1332 else if (sysfs_streq(buf, "boottime")) 1333 clk = CLOCK_BOOTTIME; 1334 else if (sysfs_streq(buf, "tai")) 1335 clk = CLOCK_TAI; 1336 else 1337 return -EINVAL; 1338 1339 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1340 if (ret) 1341 return ret; 1342 1343 return len; 1344 } 1345 1346 static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, 1347 iio_show_timestamp_clock, iio_store_timestamp_clock); 1348 1349 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1350 { 1351 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1352 struct iio_dev_attr *p; 1353 struct attribute **attr, *clk = NULL; 1354 1355 /* First count elements in any existing group */ 1356 if (indio_dev->info->attrs) { 1357 attr = indio_dev->info->attrs->attrs; 1358 while (*attr++ != NULL) 1359 attrcount_orig++; 1360 } 1361 attrcount = attrcount_orig; 1362 /* 1363 * New channel registration method - relies on the fact a group does 1364 * not need to be initialized if its name is NULL. 1365 */ 1366 if (indio_dev->channels) 1367 for (i = 0; i < indio_dev->num_channels; i++) { 1368 const struct iio_chan_spec *chan = 1369 &indio_dev->channels[i]; 1370 1371 if (chan->type == IIO_TIMESTAMP) 1372 clk = &dev_attr_current_timestamp_clock.attr; 1373 1374 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1375 if (ret < 0) 1376 goto error_clear_attrs; 1377 attrcount += ret; 1378 } 1379 1380 if (indio_dev->event_interface) 1381 clk = &dev_attr_current_timestamp_clock.attr; 1382 1383 if (indio_dev->name) 1384 attrcount++; 1385 if (clk) 1386 attrcount++; 1387 1388 indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1, 1389 sizeof(indio_dev->chan_attr_group.attrs[0]), 1390 GFP_KERNEL); 1391 if (indio_dev->chan_attr_group.attrs == NULL) { 1392 ret = -ENOMEM; 1393 goto error_clear_attrs; 1394 } 1395 /* Copy across original attributes */ 1396 if (indio_dev->info->attrs) 1397 memcpy(indio_dev->chan_attr_group.attrs, 1398 indio_dev->info->attrs->attrs, 1399 sizeof(indio_dev->chan_attr_group.attrs[0]) 1400 *attrcount_orig); 1401 attrn = attrcount_orig; 1402 /* Add all elements from the list. */ 1403 list_for_each_entry(p, &indio_dev->channel_attr_list, l) 1404 indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1405 if (indio_dev->name) 1406 indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1407 if (clk) 1408 indio_dev->chan_attr_group.attrs[attrn++] = clk; 1409 1410 indio_dev->groups[indio_dev->groupcounter++] = 1411 &indio_dev->chan_attr_group; 1412 1413 return 0; 1414 1415 error_clear_attrs: 1416 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 1417 1418 return ret; 1419 } 1420 1421 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1422 { 1423 1424 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 1425 kfree(indio_dev->chan_attr_group.attrs); 1426 indio_dev->chan_attr_group.attrs = NULL; 1427 } 1428 1429 static void iio_dev_release(struct device *device) 1430 { 1431 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1432 if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED)) 1433 iio_device_unregister_trigger_consumer(indio_dev); 1434 iio_device_unregister_eventset(indio_dev); 1435 iio_device_unregister_sysfs(indio_dev); 1436 1437 iio_buffer_put(indio_dev->buffer); 1438 1439 ida_simple_remove(&iio_ida, indio_dev->id); 1440 kfree(indio_dev); 1441 } 1442 1443 struct device_type iio_device_type = { 1444 .name = "iio_device", 1445 .release = iio_dev_release, 1446 }; 1447 1448 /** 1449 * iio_device_alloc() - allocate an iio_dev from a driver 1450 * @sizeof_priv: Space to allocate for private structure. 1451 **/ 1452 struct iio_dev *iio_device_alloc(int sizeof_priv) 1453 { 1454 struct iio_dev *dev; 1455 size_t alloc_size; 1456 1457 alloc_size = sizeof(struct iio_dev); 1458 if (sizeof_priv) { 1459 alloc_size = ALIGN(alloc_size, IIO_ALIGN); 1460 alloc_size += sizeof_priv; 1461 } 1462 /* ensure 32-byte alignment of whole construct ? */ 1463 alloc_size += IIO_ALIGN - 1; 1464 1465 dev = kzalloc(alloc_size, GFP_KERNEL); 1466 1467 if (dev) { 1468 dev->dev.groups = dev->groups; 1469 dev->dev.type = &iio_device_type; 1470 dev->dev.bus = &iio_bus_type; 1471 device_initialize(&dev->dev); 1472 dev_set_drvdata(&dev->dev, (void *)dev); 1473 mutex_init(&dev->mlock); 1474 mutex_init(&dev->info_exist_lock); 1475 INIT_LIST_HEAD(&dev->channel_attr_list); 1476 1477 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 1478 if (dev->id < 0) { 1479 /* cannot use a dev_err as the name isn't available */ 1480 pr_err("failed to get device id\n"); 1481 kfree(dev); 1482 return NULL; 1483 } 1484 dev_set_name(&dev->dev, "iio:device%d", dev->id); 1485 INIT_LIST_HEAD(&dev->buffer_list); 1486 } 1487 1488 return dev; 1489 } 1490 EXPORT_SYMBOL(iio_device_alloc); 1491 1492 /** 1493 * iio_device_free() - free an iio_dev from a driver 1494 * @dev: the iio_dev associated with the device 1495 **/ 1496 void iio_device_free(struct iio_dev *dev) 1497 { 1498 if (dev) 1499 put_device(&dev->dev); 1500 } 1501 EXPORT_SYMBOL(iio_device_free); 1502 1503 static void devm_iio_device_release(struct device *dev, void *res) 1504 { 1505 iio_device_free(*(struct iio_dev **)res); 1506 } 1507 1508 int devm_iio_device_match(struct device *dev, void *res, void *data) 1509 { 1510 struct iio_dev **r = res; 1511 if (!r || !*r) { 1512 WARN_ON(!r || !*r); 1513 return 0; 1514 } 1515 return *r == data; 1516 } 1517 EXPORT_SYMBOL_GPL(devm_iio_device_match); 1518 1519 /** 1520 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1521 * @dev: Device to allocate iio_dev for 1522 * @sizeof_priv: Space to allocate for private structure. 1523 * 1524 * Managed iio_device_alloc. iio_dev allocated with this function is 1525 * automatically freed on driver detach. 1526 * 1527 * If an iio_dev allocated with this function needs to be freed separately, 1528 * devm_iio_device_free() must be used. 1529 * 1530 * RETURNS: 1531 * Pointer to allocated iio_dev on success, NULL on failure. 1532 */ 1533 struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) 1534 { 1535 struct iio_dev **ptr, *iio_dev; 1536 1537 ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr), 1538 GFP_KERNEL); 1539 if (!ptr) 1540 return NULL; 1541 1542 iio_dev = iio_device_alloc(sizeof_priv); 1543 if (iio_dev) { 1544 *ptr = iio_dev; 1545 devres_add(dev, ptr); 1546 } else { 1547 devres_free(ptr); 1548 } 1549 1550 return iio_dev; 1551 } 1552 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1553 1554 /** 1555 * devm_iio_device_free - Resource-managed iio_device_free() 1556 * @dev: Device this iio_dev belongs to 1557 * @iio_dev: the iio_dev associated with the device 1558 * 1559 * Free iio_dev allocated with devm_iio_device_alloc(). 1560 */ 1561 void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev) 1562 { 1563 int rc; 1564 1565 rc = devres_release(dev, devm_iio_device_release, 1566 devm_iio_device_match, iio_dev); 1567 WARN_ON(rc); 1568 } 1569 EXPORT_SYMBOL_GPL(devm_iio_device_free); 1570 1571 /** 1572 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1573 * @inode: Inode structure for identifying the device in the file system 1574 * @filp: File structure for iio device used to keep and later access 1575 * private data 1576 * 1577 * Return: 0 on success or -EBUSY if the device is already opened 1578 **/ 1579 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1580 { 1581 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1582 struct iio_dev, chrdev); 1583 1584 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 1585 return -EBUSY; 1586 1587 iio_device_get(indio_dev); 1588 1589 filp->private_data = indio_dev; 1590 1591 return 0; 1592 } 1593 1594 /** 1595 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1596 * @inode: Inode structure pointer for the char device 1597 * @filp: File structure pointer for the char device 1598 * 1599 * Return: 0 for successful release 1600 */ 1601 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1602 { 1603 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1604 struct iio_dev, chrdev); 1605 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 1606 iio_device_put(indio_dev); 1607 1608 return 0; 1609 } 1610 1611 /* Somewhat of a cross file organization violation - ioctls here are actually 1612 * event related */ 1613 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1614 { 1615 struct iio_dev *indio_dev = filp->private_data; 1616 int __user *ip = (int __user *)arg; 1617 int fd; 1618 1619 if (!indio_dev->info) 1620 return -ENODEV; 1621 1622 if (cmd == IIO_GET_EVENT_FD_IOCTL) { 1623 fd = iio_event_getfd(indio_dev); 1624 if (fd < 0) 1625 return fd; 1626 if (copy_to_user(ip, &fd, sizeof(fd))) 1627 return -EFAULT; 1628 return 0; 1629 } 1630 return -EINVAL; 1631 } 1632 1633 static const struct file_operations iio_buffer_fileops = { 1634 .read = iio_buffer_read_first_n_outer_addr, 1635 .release = iio_chrdev_release, 1636 .open = iio_chrdev_open, 1637 .poll = iio_buffer_poll_addr, 1638 .owner = THIS_MODULE, 1639 .llseek = noop_llseek, 1640 .unlocked_ioctl = iio_ioctl, 1641 .compat_ioctl = iio_ioctl, 1642 }; 1643 1644 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1645 { 1646 int i, j; 1647 const struct iio_chan_spec *channels = indio_dev->channels; 1648 1649 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1650 return 0; 1651 1652 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1653 if (channels[i].scan_index < 0) 1654 continue; 1655 for (j = i + 1; j < indio_dev->num_channels; j++) 1656 if (channels[i].scan_index == channels[j].scan_index) { 1657 dev_err(&indio_dev->dev, 1658 "Duplicate scan index %d\n", 1659 channels[i].scan_index); 1660 return -EINVAL; 1661 } 1662 } 1663 1664 return 0; 1665 } 1666 1667 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1668 1669 /** 1670 * iio_device_register() - register a device with the IIO subsystem 1671 * @indio_dev: Device structure filled by the device driver 1672 **/ 1673 int iio_device_register(struct iio_dev *indio_dev) 1674 { 1675 int ret; 1676 1677 /* If the calling driver did not initialize of_node, do it here */ 1678 if (!indio_dev->dev.of_node && indio_dev->dev.parent) 1679 indio_dev->dev.of_node = indio_dev->dev.parent->of_node; 1680 1681 ret = iio_check_unique_scan_index(indio_dev); 1682 if (ret < 0) 1683 return ret; 1684 1685 /* configure elements for the chrdev */ 1686 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id); 1687 1688 ret = iio_device_register_debugfs(indio_dev); 1689 if (ret) { 1690 dev_err(indio_dev->dev.parent, 1691 "Failed to register debugfs interfaces\n"); 1692 return ret; 1693 } 1694 1695 ret = iio_buffer_alloc_sysfs_and_mask(indio_dev); 1696 if (ret) { 1697 dev_err(indio_dev->dev.parent, 1698 "Failed to create buffer sysfs interfaces\n"); 1699 goto error_unreg_debugfs; 1700 } 1701 1702 ret = iio_device_register_sysfs(indio_dev); 1703 if (ret) { 1704 dev_err(indio_dev->dev.parent, 1705 "Failed to register sysfs interfaces\n"); 1706 goto error_buffer_free_sysfs; 1707 } 1708 ret = iio_device_register_eventset(indio_dev); 1709 if (ret) { 1710 dev_err(indio_dev->dev.parent, 1711 "Failed to register event set\n"); 1712 goto error_free_sysfs; 1713 } 1714 if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED)) 1715 iio_device_register_trigger_consumer(indio_dev); 1716 1717 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1718 indio_dev->setup_ops == NULL) 1719 indio_dev->setup_ops = &noop_ring_setup_ops; 1720 1721 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); 1722 indio_dev->chrdev.owner = indio_dev->info->driver_module; 1723 indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj; 1724 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1); 1725 if (ret < 0) 1726 goto error_unreg_eventset; 1727 1728 ret = device_add(&indio_dev->dev); 1729 if (ret < 0) 1730 goto error_cdev_del; 1731 1732 return 0; 1733 error_cdev_del: 1734 cdev_del(&indio_dev->chrdev); 1735 error_unreg_eventset: 1736 iio_device_unregister_eventset(indio_dev); 1737 error_free_sysfs: 1738 iio_device_unregister_sysfs(indio_dev); 1739 error_buffer_free_sysfs: 1740 iio_buffer_free_sysfs_and_mask(indio_dev); 1741 error_unreg_debugfs: 1742 iio_device_unregister_debugfs(indio_dev); 1743 return ret; 1744 } 1745 EXPORT_SYMBOL(iio_device_register); 1746 1747 /** 1748 * iio_device_unregister() - unregister a device from the IIO subsystem 1749 * @indio_dev: Device structure representing the device. 1750 **/ 1751 void iio_device_unregister(struct iio_dev *indio_dev) 1752 { 1753 mutex_lock(&indio_dev->info_exist_lock); 1754 1755 device_del(&indio_dev->dev); 1756 1757 if (indio_dev->chrdev.dev) 1758 cdev_del(&indio_dev->chrdev); 1759 iio_device_unregister_debugfs(indio_dev); 1760 1761 iio_disable_all_buffers(indio_dev); 1762 1763 indio_dev->info = NULL; 1764 1765 iio_device_wakeup_eventset(indio_dev); 1766 iio_buffer_wakeup_poll(indio_dev); 1767 1768 mutex_unlock(&indio_dev->info_exist_lock); 1769 1770 iio_buffer_free_sysfs_and_mask(indio_dev); 1771 } 1772 EXPORT_SYMBOL(iio_device_unregister); 1773 1774 static void devm_iio_device_unreg(struct device *dev, void *res) 1775 { 1776 iio_device_unregister(*(struct iio_dev **)res); 1777 } 1778 1779 /** 1780 * devm_iio_device_register - Resource-managed iio_device_register() 1781 * @dev: Device to allocate iio_dev for 1782 * @indio_dev: Device structure filled by the device driver 1783 * 1784 * Managed iio_device_register. The IIO device registered with this 1785 * function is automatically unregistered on driver detach. This function 1786 * calls iio_device_register() internally. Refer to that function for more 1787 * information. 1788 * 1789 * If an iio_dev registered with this function needs to be unregistered 1790 * separately, devm_iio_device_unregister() must be used. 1791 * 1792 * RETURNS: 1793 * 0 on success, negative error number on failure. 1794 */ 1795 int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev) 1796 { 1797 struct iio_dev **ptr; 1798 int ret; 1799 1800 ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL); 1801 if (!ptr) 1802 return -ENOMEM; 1803 1804 *ptr = indio_dev; 1805 ret = iio_device_register(indio_dev); 1806 if (!ret) 1807 devres_add(dev, ptr); 1808 else 1809 devres_free(ptr); 1810 1811 return ret; 1812 } 1813 EXPORT_SYMBOL_GPL(devm_iio_device_register); 1814 1815 /** 1816 * devm_iio_device_unregister - Resource-managed iio_device_unregister() 1817 * @dev: Device this iio_dev belongs to 1818 * @indio_dev: the iio_dev associated with the device 1819 * 1820 * Unregister iio_dev registered with devm_iio_device_register(). 1821 */ 1822 void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev) 1823 { 1824 int rc; 1825 1826 rc = devres_release(dev, devm_iio_device_unreg, 1827 devm_iio_device_match, indio_dev); 1828 WARN_ON(rc); 1829 } 1830 EXPORT_SYMBOL_GPL(devm_iio_device_unregister); 1831 1832 /** 1833 * iio_device_claim_direct_mode - Keep device in direct mode 1834 * @indio_dev: the iio_dev associated with the device 1835 * 1836 * If the device is in direct mode it is guaranteed to stay 1837 * that way until iio_device_release_direct_mode() is called. 1838 * 1839 * Use with iio_device_release_direct_mode() 1840 * 1841 * Returns: 0 on success, -EBUSY on failure 1842 */ 1843 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 1844 { 1845 mutex_lock(&indio_dev->mlock); 1846 1847 if (iio_buffer_enabled(indio_dev)) { 1848 mutex_unlock(&indio_dev->mlock); 1849 return -EBUSY; 1850 } 1851 return 0; 1852 } 1853 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 1854 1855 /** 1856 * iio_device_release_direct_mode - releases claim on direct mode 1857 * @indio_dev: the iio_dev associated with the device 1858 * 1859 * Release the claim. Device is no longer guaranteed to stay 1860 * in direct mode. 1861 * 1862 * Use with iio_device_claim_direct_mode() 1863 */ 1864 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 1865 { 1866 mutex_unlock(&indio_dev->mlock); 1867 } 1868 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 1869 1870 subsys_initcall(iio_init); 1871 module_exit(iio_exit); 1872 1873 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 1874 MODULE_DESCRIPTION("Industrial I/O core"); 1875 MODULE_LICENSE("GPL"); 1876