1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Based on elements of hwmon and input subsystems. 10 */ 11 12 #define pr_fmt(fmt) "iio-core: " fmt 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/idr.h> 17 #include <linux/kdev_t.h> 18 #include <linux/err.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/poll.h> 22 #include <linux/sched.h> 23 #include <linux/wait.h> 24 #include <linux/cdev.h> 25 #include <linux/slab.h> 26 #include <linux/anon_inodes.h> 27 #include <linux/debugfs.h> 28 #include <linux/mutex.h> 29 #include <linux/iio/iio.h> 30 #include "iio_core.h" 31 #include "iio_core_trigger.h" 32 #include <linux/iio/sysfs.h> 33 #include <linux/iio/events.h> 34 #include <linux/iio/buffer.h> 35 #include <linux/iio/buffer_impl.h> 36 37 /* IDA to assign each registered device a unique id */ 38 static DEFINE_IDA(iio_ida); 39 40 static dev_t iio_devt; 41 42 #define IIO_DEV_MAX 256 43 struct bus_type iio_bus_type = { 44 .name = "iio", 45 }; 46 EXPORT_SYMBOL(iio_bus_type); 47 48 static struct dentry *iio_debugfs_dentry; 49 50 static const char * const iio_direction[] = { 51 [0] = "in", 52 [1] = "out", 53 }; 54 55 static const char * const iio_chan_type_name_spec[] = { 56 [IIO_VOLTAGE] = "voltage", 57 [IIO_CURRENT] = "current", 58 [IIO_POWER] = "power", 59 [IIO_ACCEL] = "accel", 60 [IIO_ANGL_VEL] = "anglvel", 61 [IIO_MAGN] = "magn", 62 [IIO_LIGHT] = "illuminance", 63 [IIO_INTENSITY] = "intensity", 64 [IIO_PROXIMITY] = "proximity", 65 [IIO_TEMP] = "temp", 66 [IIO_INCLI] = "incli", 67 [IIO_ROT] = "rot", 68 [IIO_ANGL] = "angl", 69 [IIO_TIMESTAMP] = "timestamp", 70 [IIO_CAPACITANCE] = "capacitance", 71 [IIO_ALTVOLTAGE] = "altvoltage", 72 [IIO_CCT] = "cct", 73 [IIO_PRESSURE] = "pressure", 74 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 75 [IIO_ACTIVITY] = "activity", 76 [IIO_STEPS] = "steps", 77 [IIO_ENERGY] = "energy", 78 [IIO_DISTANCE] = "distance", 79 [IIO_VELOCITY] = "velocity", 80 [IIO_CONCENTRATION] = "concentration", 81 [IIO_RESISTANCE] = "resistance", 82 [IIO_PH] = "ph", 83 [IIO_UVINDEX] = "uvindex", 84 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 85 [IIO_COUNT] = "count", 86 [IIO_INDEX] = "index", 87 [IIO_GRAVITY] = "gravity", 88 }; 89 90 static const char * const iio_modifier_names[] = { 91 [IIO_MOD_X] = "x", 92 [IIO_MOD_Y] = "y", 93 [IIO_MOD_Z] = "z", 94 [IIO_MOD_X_AND_Y] = "x&y", 95 [IIO_MOD_X_AND_Z] = "x&z", 96 [IIO_MOD_Y_AND_Z] = "y&z", 97 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 98 [IIO_MOD_X_OR_Y] = "x|y", 99 [IIO_MOD_X_OR_Z] = "x|z", 100 [IIO_MOD_Y_OR_Z] = "y|z", 101 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 102 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 103 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 104 [IIO_MOD_LIGHT_BOTH] = "both", 105 [IIO_MOD_LIGHT_IR] = "ir", 106 [IIO_MOD_LIGHT_CLEAR] = "clear", 107 [IIO_MOD_LIGHT_RED] = "red", 108 [IIO_MOD_LIGHT_GREEN] = "green", 109 [IIO_MOD_LIGHT_BLUE] = "blue", 110 [IIO_MOD_LIGHT_UV] = "uv", 111 [IIO_MOD_QUATERNION] = "quaternion", 112 [IIO_MOD_TEMP_AMBIENT] = "ambient", 113 [IIO_MOD_TEMP_OBJECT] = "object", 114 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 115 [IIO_MOD_NORTH_TRUE] = "from_north_true", 116 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 117 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 118 [IIO_MOD_RUNNING] = "running", 119 [IIO_MOD_JOGGING] = "jogging", 120 [IIO_MOD_WALKING] = "walking", 121 [IIO_MOD_STILL] = "still", 122 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 123 [IIO_MOD_I] = "i", 124 [IIO_MOD_Q] = "q", 125 [IIO_MOD_CO2] = "co2", 126 [IIO_MOD_VOC] = "voc", 127 }; 128 129 /* relies on pairs of these shared then separate */ 130 static const char * const iio_chan_info_postfix[] = { 131 [IIO_CHAN_INFO_RAW] = "raw", 132 [IIO_CHAN_INFO_PROCESSED] = "input", 133 [IIO_CHAN_INFO_SCALE] = "scale", 134 [IIO_CHAN_INFO_OFFSET] = "offset", 135 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 136 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 137 [IIO_CHAN_INFO_PEAK] = "peak_raw", 138 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 139 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 140 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 141 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 142 = "filter_low_pass_3db_frequency", 143 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 144 = "filter_high_pass_3db_frequency", 145 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 146 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 147 [IIO_CHAN_INFO_PHASE] = "phase", 148 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 149 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 150 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 151 [IIO_CHAN_INFO_ENABLE] = "en", 152 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 153 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 154 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 155 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 156 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 157 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 158 }; 159 160 /** 161 * iio_find_channel_from_si() - get channel from its scan index 162 * @indio_dev: device 163 * @si: scan index to match 164 */ 165 const struct iio_chan_spec 166 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 167 { 168 int i; 169 170 for (i = 0; i < indio_dev->num_channels; i++) 171 if (indio_dev->channels[i].scan_index == si) 172 return &indio_dev->channels[i]; 173 return NULL; 174 } 175 176 /* This turns up an awful lot */ 177 ssize_t iio_read_const_attr(struct device *dev, 178 struct device_attribute *attr, 179 char *buf) 180 { 181 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string); 182 } 183 EXPORT_SYMBOL(iio_read_const_attr); 184 185 static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 186 { 187 int ret; 188 const struct iio_event_interface *ev_int = indio_dev->event_interface; 189 190 ret = mutex_lock_interruptible(&indio_dev->mlock); 191 if (ret) 192 return ret; 193 if ((ev_int && iio_event_enabled(ev_int)) || 194 iio_buffer_enabled(indio_dev)) { 195 mutex_unlock(&indio_dev->mlock); 196 return -EBUSY; 197 } 198 indio_dev->clock_id = clock_id; 199 mutex_unlock(&indio_dev->mlock); 200 201 return 0; 202 } 203 204 /** 205 * iio_get_time_ns() - utility function to get a time stamp for events etc 206 * @indio_dev: device 207 */ 208 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 209 { 210 struct timespec tp; 211 212 switch (iio_device_get_clock(indio_dev)) { 213 case CLOCK_REALTIME: 214 ktime_get_real_ts(&tp); 215 break; 216 case CLOCK_MONOTONIC: 217 ktime_get_ts(&tp); 218 break; 219 case CLOCK_MONOTONIC_RAW: 220 getrawmonotonic(&tp); 221 break; 222 case CLOCK_REALTIME_COARSE: 223 tp = current_kernel_time(); 224 break; 225 case CLOCK_MONOTONIC_COARSE: 226 tp = get_monotonic_coarse(); 227 break; 228 case CLOCK_BOOTTIME: 229 get_monotonic_boottime(&tp); 230 break; 231 case CLOCK_TAI: 232 timekeeping_clocktai(&tp); 233 break; 234 default: 235 BUG(); 236 } 237 238 return timespec_to_ns(&tp); 239 } 240 EXPORT_SYMBOL(iio_get_time_ns); 241 242 /** 243 * iio_get_time_res() - utility function to get time stamp clock resolution in 244 * nano seconds. 245 * @indio_dev: device 246 */ 247 unsigned int iio_get_time_res(const struct iio_dev *indio_dev) 248 { 249 switch (iio_device_get_clock(indio_dev)) { 250 case CLOCK_REALTIME: 251 case CLOCK_MONOTONIC: 252 case CLOCK_MONOTONIC_RAW: 253 case CLOCK_BOOTTIME: 254 case CLOCK_TAI: 255 return hrtimer_resolution; 256 case CLOCK_REALTIME_COARSE: 257 case CLOCK_MONOTONIC_COARSE: 258 return LOW_RES_NSEC; 259 default: 260 BUG(); 261 } 262 } 263 EXPORT_SYMBOL(iio_get_time_res); 264 265 static int __init iio_init(void) 266 { 267 int ret; 268 269 /* Register sysfs bus */ 270 ret = bus_register(&iio_bus_type); 271 if (ret < 0) { 272 pr_err("could not register bus type\n"); 273 goto error_nothing; 274 } 275 276 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 277 if (ret < 0) { 278 pr_err("failed to allocate char dev region\n"); 279 goto error_unregister_bus_type; 280 } 281 282 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 283 284 return 0; 285 286 error_unregister_bus_type: 287 bus_unregister(&iio_bus_type); 288 error_nothing: 289 return ret; 290 } 291 292 static void __exit iio_exit(void) 293 { 294 if (iio_devt) 295 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 296 bus_unregister(&iio_bus_type); 297 debugfs_remove(iio_debugfs_dentry); 298 } 299 300 #if defined(CONFIG_DEBUG_FS) 301 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 302 size_t count, loff_t *ppos) 303 { 304 struct iio_dev *indio_dev = file->private_data; 305 char buf[20]; 306 unsigned val = 0; 307 ssize_t len; 308 int ret; 309 310 ret = indio_dev->info->debugfs_reg_access(indio_dev, 311 indio_dev->cached_reg_addr, 312 0, &val); 313 if (ret) 314 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 315 316 len = snprintf(buf, sizeof(buf), "0x%X\n", val); 317 318 return simple_read_from_buffer(userbuf, count, ppos, buf, len); 319 } 320 321 static ssize_t iio_debugfs_write_reg(struct file *file, 322 const char __user *userbuf, size_t count, loff_t *ppos) 323 { 324 struct iio_dev *indio_dev = file->private_data; 325 unsigned reg, val; 326 char buf[80]; 327 int ret; 328 329 count = min_t(size_t, count, (sizeof(buf)-1)); 330 if (copy_from_user(buf, userbuf, count)) 331 return -EFAULT; 332 333 buf[count] = 0; 334 335 ret = sscanf(buf, "%i %i", ®, &val); 336 337 switch (ret) { 338 case 1: 339 indio_dev->cached_reg_addr = reg; 340 break; 341 case 2: 342 indio_dev->cached_reg_addr = reg; 343 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 344 val, NULL); 345 if (ret) { 346 dev_err(indio_dev->dev.parent, "%s: write failed\n", 347 __func__); 348 return ret; 349 } 350 break; 351 default: 352 return -EINVAL; 353 } 354 355 return count; 356 } 357 358 static const struct file_operations iio_debugfs_reg_fops = { 359 .open = simple_open, 360 .read = iio_debugfs_read_reg, 361 .write = iio_debugfs_write_reg, 362 }; 363 364 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 365 { 366 debugfs_remove_recursive(indio_dev->debugfs_dentry); 367 } 368 369 static int iio_device_register_debugfs(struct iio_dev *indio_dev) 370 { 371 struct dentry *d; 372 373 if (indio_dev->info->debugfs_reg_access == NULL) 374 return 0; 375 376 if (!iio_debugfs_dentry) 377 return 0; 378 379 indio_dev->debugfs_dentry = 380 debugfs_create_dir(dev_name(&indio_dev->dev), 381 iio_debugfs_dentry); 382 if (indio_dev->debugfs_dentry == NULL) { 383 dev_warn(indio_dev->dev.parent, 384 "Failed to create debugfs directory\n"); 385 return -EFAULT; 386 } 387 388 d = debugfs_create_file("direct_reg_access", 0644, 389 indio_dev->debugfs_dentry, 390 indio_dev, &iio_debugfs_reg_fops); 391 if (!d) { 392 iio_device_unregister_debugfs(indio_dev); 393 return -ENOMEM; 394 } 395 396 return 0; 397 } 398 #else 399 static int iio_device_register_debugfs(struct iio_dev *indio_dev) 400 { 401 return 0; 402 } 403 404 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 405 { 406 } 407 #endif /* CONFIG_DEBUG_FS */ 408 409 static ssize_t iio_read_channel_ext_info(struct device *dev, 410 struct device_attribute *attr, 411 char *buf) 412 { 413 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 414 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 415 const struct iio_chan_spec_ext_info *ext_info; 416 417 ext_info = &this_attr->c->ext_info[this_attr->address]; 418 419 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 420 } 421 422 static ssize_t iio_write_channel_ext_info(struct device *dev, 423 struct device_attribute *attr, 424 const char *buf, 425 size_t len) 426 { 427 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 428 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 429 const struct iio_chan_spec_ext_info *ext_info; 430 431 ext_info = &this_attr->c->ext_info[this_attr->address]; 432 433 return ext_info->write(indio_dev, ext_info->private, 434 this_attr->c, buf, len); 435 } 436 437 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 438 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 439 { 440 const struct iio_enum *e = (const struct iio_enum *)priv; 441 unsigned int i; 442 size_t len = 0; 443 444 if (!e->num_items) 445 return 0; 446 447 for (i = 0; i < e->num_items; ++i) 448 len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]); 449 450 /* replace last space with a newline */ 451 buf[len - 1] = '\n'; 452 453 return len; 454 } 455 EXPORT_SYMBOL_GPL(iio_enum_available_read); 456 457 ssize_t iio_enum_read(struct iio_dev *indio_dev, 458 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 459 { 460 const struct iio_enum *e = (const struct iio_enum *)priv; 461 int i; 462 463 if (!e->get) 464 return -EINVAL; 465 466 i = e->get(indio_dev, chan); 467 if (i < 0) 468 return i; 469 else if (i >= e->num_items) 470 return -EINVAL; 471 472 return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]); 473 } 474 EXPORT_SYMBOL_GPL(iio_enum_read); 475 476 ssize_t iio_enum_write(struct iio_dev *indio_dev, 477 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 478 size_t len) 479 { 480 const struct iio_enum *e = (const struct iio_enum *)priv; 481 unsigned int i; 482 int ret; 483 484 if (!e->set) 485 return -EINVAL; 486 487 for (i = 0; i < e->num_items; i++) { 488 if (sysfs_streq(buf, e->items[i])) 489 break; 490 } 491 492 if (i == e->num_items) 493 return -EINVAL; 494 495 ret = e->set(indio_dev, chan, i); 496 return ret ? ret : len; 497 } 498 EXPORT_SYMBOL_GPL(iio_enum_write); 499 500 static const struct iio_mount_matrix iio_mount_idmatrix = { 501 .rotation = { 502 "1", "0", "0", 503 "0", "1", "0", 504 "0", "0", "1" 505 } 506 }; 507 508 static int iio_setup_mount_idmatrix(const struct device *dev, 509 struct iio_mount_matrix *matrix) 510 { 511 *matrix = iio_mount_idmatrix; 512 dev_info(dev, "mounting matrix not found: using identity...\n"); 513 return 0; 514 } 515 516 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 517 const struct iio_chan_spec *chan, char *buf) 518 { 519 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 520 priv)(indio_dev, chan); 521 522 if (IS_ERR(mtx)) 523 return PTR_ERR(mtx); 524 525 if (!mtx) 526 mtx = &iio_mount_idmatrix; 527 528 return snprintf(buf, PAGE_SIZE, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 529 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 530 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 531 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 532 } 533 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 534 535 /** 536 * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from 537 * device-tree "mount-matrix" property 538 * @dev: device the mounting matrix property is assigned to 539 * @propname: device specific mounting matrix property name 540 * @matrix: where to store retrieved matrix 541 * 542 * If device is assigned no mounting matrix property, a default 3x3 identity 543 * matrix will be filled in. 544 * 545 * Return: 0 if success, or a negative error code on failure. 546 */ 547 #ifdef CONFIG_OF 548 int of_iio_read_mount_matrix(const struct device *dev, 549 const char *propname, 550 struct iio_mount_matrix *matrix) 551 { 552 if (dev->of_node) { 553 int err = of_property_read_string_array(dev->of_node, 554 propname, matrix->rotation, 555 ARRAY_SIZE(iio_mount_idmatrix.rotation)); 556 557 if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation)) 558 return 0; 559 560 if (err >= 0) 561 /* Invalid number of matrix entries. */ 562 return -EINVAL; 563 564 if (err != -EINVAL) 565 /* Invalid matrix declaration format. */ 566 return err; 567 } 568 569 /* Matrix was not declared at all: fallback to identity. */ 570 return iio_setup_mount_idmatrix(dev, matrix); 571 } 572 #else 573 int of_iio_read_mount_matrix(const struct device *dev, 574 const char *propname, 575 struct iio_mount_matrix *matrix) 576 { 577 return iio_setup_mount_idmatrix(dev, matrix); 578 } 579 #endif 580 EXPORT_SYMBOL(of_iio_read_mount_matrix); 581 582 static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, 583 int size, const int *vals) 584 { 585 unsigned long long tmp; 586 int tmp0, tmp1; 587 bool scale_db = false; 588 589 switch (type) { 590 case IIO_VAL_INT: 591 return snprintf(buf, len, "%d", vals[0]); 592 case IIO_VAL_INT_PLUS_MICRO_DB: 593 scale_db = true; 594 case IIO_VAL_INT_PLUS_MICRO: 595 if (vals[1] < 0) 596 return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]), 597 -vals[1], scale_db ? " dB" : ""); 598 else 599 return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1], 600 scale_db ? " dB" : ""); 601 case IIO_VAL_INT_PLUS_NANO: 602 if (vals[1] < 0) 603 return snprintf(buf, len, "-%d.%09u", abs(vals[0]), 604 -vals[1]); 605 else 606 return snprintf(buf, len, "%d.%09u", vals[0], vals[1]); 607 case IIO_VAL_FRACTIONAL: 608 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 609 tmp1 = vals[1]; 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 612 case IIO_VAL_FRACTIONAL_LOG2: 613 tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 614 tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1); 615 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 616 case IIO_VAL_INT_MULTIPLE: 617 { 618 int i; 619 int l = 0; 620 621 for (i = 0; i < size; ++i) { 622 l += snprintf(&buf[l], len - l, "%d ", vals[i]); 623 if (l >= len) 624 break; 625 } 626 return l; 627 } 628 default: 629 return 0; 630 } 631 } 632 633 /** 634 * iio_format_value() - Formats a IIO value into its string representation 635 * @buf: The buffer to which the formatted value gets written 636 * which is assumed to be big enough (i.e. PAGE_SIZE). 637 * @type: One of the IIO_VAL_... constants. This decides how the val 638 * and val2 parameters are formatted. 639 * @size: Number of IIO value entries contained in vals 640 * @vals: Pointer to the values, exact meaning depends on the 641 * type parameter. 642 * 643 * Return: 0 by default, a negative number on failure or the 644 * total number of characters written for a type that belongs 645 * to the IIO_VAL_... constant. 646 */ 647 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 648 { 649 ssize_t len; 650 651 len = __iio_format_value(buf, PAGE_SIZE, type, size, vals); 652 if (len >= PAGE_SIZE - 1) 653 return -EFBIG; 654 655 return len + sprintf(buf + len, "\n"); 656 } 657 EXPORT_SYMBOL_GPL(iio_format_value); 658 659 static ssize_t iio_read_channel_info(struct device *dev, 660 struct device_attribute *attr, 661 char *buf) 662 { 663 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 664 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 665 int vals[INDIO_MAX_RAW_ELEMENTS]; 666 int ret; 667 int val_len = 2; 668 669 if (indio_dev->info->read_raw_multi) 670 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 671 INDIO_MAX_RAW_ELEMENTS, 672 vals, &val_len, 673 this_attr->address); 674 else 675 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 676 &vals[0], &vals[1], this_attr->address); 677 678 if (ret < 0) 679 return ret; 680 681 return iio_format_value(buf, ret, val_len, vals); 682 } 683 684 static ssize_t iio_format_avail_list(char *buf, const int *vals, 685 int type, int length) 686 { 687 int i; 688 ssize_t len = 0; 689 690 switch (type) { 691 case IIO_VAL_INT: 692 for (i = 0; i < length; i++) { 693 len += __iio_format_value(buf + len, PAGE_SIZE - len, 694 type, 1, &vals[i]); 695 if (len >= PAGE_SIZE) 696 return -EFBIG; 697 if (i < length - 1) 698 len += snprintf(buf + len, PAGE_SIZE - len, 699 " "); 700 else 701 len += snprintf(buf + len, PAGE_SIZE - len, 702 "\n"); 703 if (len >= PAGE_SIZE) 704 return -EFBIG; 705 } 706 break; 707 default: 708 for (i = 0; i < length / 2; i++) { 709 len += __iio_format_value(buf + len, PAGE_SIZE - len, 710 type, 2, &vals[i * 2]); 711 if (len >= PAGE_SIZE) 712 return -EFBIG; 713 if (i < length / 2 - 1) 714 len += snprintf(buf + len, PAGE_SIZE - len, 715 " "); 716 else 717 len += snprintf(buf + len, PAGE_SIZE - len, 718 "\n"); 719 if (len >= PAGE_SIZE) 720 return -EFBIG; 721 } 722 } 723 724 return len; 725 } 726 727 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 728 { 729 int i; 730 ssize_t len; 731 732 len = snprintf(buf, PAGE_SIZE, "["); 733 switch (type) { 734 case IIO_VAL_INT: 735 for (i = 0; i < 3; i++) { 736 len += __iio_format_value(buf + len, PAGE_SIZE - len, 737 type, 1, &vals[i]); 738 if (len >= PAGE_SIZE) 739 return -EFBIG; 740 if (i < 2) 741 len += snprintf(buf + len, PAGE_SIZE - len, 742 " "); 743 else 744 len += snprintf(buf + len, PAGE_SIZE - len, 745 "]\n"); 746 if (len >= PAGE_SIZE) 747 return -EFBIG; 748 } 749 break; 750 default: 751 for (i = 0; i < 3; i++) { 752 len += __iio_format_value(buf + len, PAGE_SIZE - len, 753 type, 2, &vals[i * 2]); 754 if (len >= PAGE_SIZE) 755 return -EFBIG; 756 if (i < 2) 757 len += snprintf(buf + len, PAGE_SIZE - len, 758 " "); 759 else 760 len += snprintf(buf + len, PAGE_SIZE - len, 761 "]\n"); 762 if (len >= PAGE_SIZE) 763 return -EFBIG; 764 } 765 } 766 767 return len; 768 } 769 770 static ssize_t iio_read_channel_info_avail(struct device *dev, 771 struct device_attribute *attr, 772 char *buf) 773 { 774 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 775 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 776 const int *vals; 777 int ret; 778 int length; 779 int type; 780 781 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 782 &vals, &type, &length, 783 this_attr->address); 784 785 if (ret < 0) 786 return ret; 787 switch (ret) { 788 case IIO_AVAIL_LIST: 789 return iio_format_avail_list(buf, vals, type, length); 790 case IIO_AVAIL_RANGE: 791 return iio_format_avail_range(buf, vals, type); 792 default: 793 return -EINVAL; 794 } 795 } 796 797 /** 798 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 799 * @str: The string to parse 800 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 801 * @integer: The integer part of the number 802 * @fract: The fractional part of the number 803 * 804 * Returns 0 on success, or a negative error code if the string could not be 805 * parsed. 806 */ 807 int iio_str_to_fixpoint(const char *str, int fract_mult, 808 int *integer, int *fract) 809 { 810 int i = 0, f = 0; 811 bool integer_part = true, negative = false; 812 813 if (fract_mult == 0) { 814 *fract = 0; 815 816 return kstrtoint(str, 0, integer); 817 } 818 819 if (str[0] == '-') { 820 negative = true; 821 str++; 822 } else if (str[0] == '+') { 823 str++; 824 } 825 826 while (*str) { 827 if ('0' <= *str && *str <= '9') { 828 if (integer_part) { 829 i = i * 10 + *str - '0'; 830 } else { 831 f += fract_mult * (*str - '0'); 832 fract_mult /= 10; 833 } 834 } else if (*str == '\n') { 835 if (*(str + 1) == '\0') 836 break; 837 else 838 return -EINVAL; 839 } else if (*str == '.' && integer_part) { 840 integer_part = false; 841 } else { 842 return -EINVAL; 843 } 844 str++; 845 } 846 847 if (negative) { 848 if (i) 849 i = -i; 850 else 851 f = -f; 852 } 853 854 *integer = i; 855 *fract = f; 856 857 return 0; 858 } 859 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 860 861 static ssize_t iio_write_channel_info(struct device *dev, 862 struct device_attribute *attr, 863 const char *buf, 864 size_t len) 865 { 866 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 867 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 868 int ret, fract_mult = 100000; 869 int integer, fract; 870 871 /* Assumes decimal - precision based on number of digits */ 872 if (!indio_dev->info->write_raw) 873 return -EINVAL; 874 875 if (indio_dev->info->write_raw_get_fmt) 876 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 877 this_attr->c, this_attr->address)) { 878 case IIO_VAL_INT: 879 fract_mult = 0; 880 break; 881 case IIO_VAL_INT_PLUS_MICRO: 882 fract_mult = 100000; 883 break; 884 case IIO_VAL_INT_PLUS_NANO: 885 fract_mult = 100000000; 886 break; 887 default: 888 return -EINVAL; 889 } 890 891 ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract); 892 if (ret) 893 return ret; 894 895 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 896 integer, fract, this_attr->address); 897 if (ret) 898 return ret; 899 900 return len; 901 } 902 903 static 904 int __iio_device_attr_init(struct device_attribute *dev_attr, 905 const char *postfix, 906 struct iio_chan_spec const *chan, 907 ssize_t (*readfunc)(struct device *dev, 908 struct device_attribute *attr, 909 char *buf), 910 ssize_t (*writefunc)(struct device *dev, 911 struct device_attribute *attr, 912 const char *buf, 913 size_t len), 914 enum iio_shared_by shared_by) 915 { 916 int ret = 0; 917 char *name = NULL; 918 char *full_postfix; 919 sysfs_attr_init(&dev_attr->attr); 920 921 /* Build up postfix of <extend_name>_<modifier>_postfix */ 922 if (chan->modified && (shared_by == IIO_SEPARATE)) { 923 if (chan->extend_name) 924 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 925 iio_modifier_names[chan 926 ->channel2], 927 chan->extend_name, 928 postfix); 929 else 930 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 931 iio_modifier_names[chan 932 ->channel2], 933 postfix); 934 } else { 935 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 936 full_postfix = kstrdup(postfix, GFP_KERNEL); 937 else 938 full_postfix = kasprintf(GFP_KERNEL, 939 "%s_%s", 940 chan->extend_name, 941 postfix); 942 } 943 if (full_postfix == NULL) 944 return -ENOMEM; 945 946 if (chan->differential) { /* Differential can not have modifier */ 947 switch (shared_by) { 948 case IIO_SHARED_BY_ALL: 949 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 950 break; 951 case IIO_SHARED_BY_DIR: 952 name = kasprintf(GFP_KERNEL, "%s_%s", 953 iio_direction[chan->output], 954 full_postfix); 955 break; 956 case IIO_SHARED_BY_TYPE: 957 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 958 iio_direction[chan->output], 959 iio_chan_type_name_spec[chan->type], 960 iio_chan_type_name_spec[chan->type], 961 full_postfix); 962 break; 963 case IIO_SEPARATE: 964 if (!chan->indexed) { 965 WARN(1, "Differential channels must be indexed\n"); 966 ret = -EINVAL; 967 goto error_free_full_postfix; 968 } 969 name = kasprintf(GFP_KERNEL, 970 "%s_%s%d-%s%d_%s", 971 iio_direction[chan->output], 972 iio_chan_type_name_spec[chan->type], 973 chan->channel, 974 iio_chan_type_name_spec[chan->type], 975 chan->channel2, 976 full_postfix); 977 break; 978 } 979 } else { /* Single ended */ 980 switch (shared_by) { 981 case IIO_SHARED_BY_ALL: 982 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 983 break; 984 case IIO_SHARED_BY_DIR: 985 name = kasprintf(GFP_KERNEL, "%s_%s", 986 iio_direction[chan->output], 987 full_postfix); 988 break; 989 case IIO_SHARED_BY_TYPE: 990 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 991 iio_direction[chan->output], 992 iio_chan_type_name_spec[chan->type], 993 full_postfix); 994 break; 995 996 case IIO_SEPARATE: 997 if (chan->indexed) 998 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 999 iio_direction[chan->output], 1000 iio_chan_type_name_spec[chan->type], 1001 chan->channel, 1002 full_postfix); 1003 else 1004 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1005 iio_direction[chan->output], 1006 iio_chan_type_name_spec[chan->type], 1007 full_postfix); 1008 break; 1009 } 1010 } 1011 if (name == NULL) { 1012 ret = -ENOMEM; 1013 goto error_free_full_postfix; 1014 } 1015 dev_attr->attr.name = name; 1016 1017 if (readfunc) { 1018 dev_attr->attr.mode |= S_IRUGO; 1019 dev_attr->show = readfunc; 1020 } 1021 1022 if (writefunc) { 1023 dev_attr->attr.mode |= S_IWUSR; 1024 dev_attr->store = writefunc; 1025 } 1026 1027 error_free_full_postfix: 1028 kfree(full_postfix); 1029 1030 return ret; 1031 } 1032 1033 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1034 { 1035 kfree(dev_attr->attr.name); 1036 } 1037 1038 int __iio_add_chan_devattr(const char *postfix, 1039 struct iio_chan_spec const *chan, 1040 ssize_t (*readfunc)(struct device *dev, 1041 struct device_attribute *attr, 1042 char *buf), 1043 ssize_t (*writefunc)(struct device *dev, 1044 struct device_attribute *attr, 1045 const char *buf, 1046 size_t len), 1047 u64 mask, 1048 enum iio_shared_by shared_by, 1049 struct device *dev, 1050 struct list_head *attr_list) 1051 { 1052 int ret; 1053 struct iio_dev_attr *iio_attr, *t; 1054 1055 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1056 if (iio_attr == NULL) 1057 return -ENOMEM; 1058 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1059 postfix, chan, 1060 readfunc, writefunc, shared_by); 1061 if (ret) 1062 goto error_iio_dev_attr_free; 1063 iio_attr->c = chan; 1064 iio_attr->address = mask; 1065 list_for_each_entry(t, attr_list, l) 1066 if (strcmp(t->dev_attr.attr.name, 1067 iio_attr->dev_attr.attr.name) == 0) { 1068 if (shared_by == IIO_SEPARATE) 1069 dev_err(dev, "tried to double register : %s\n", 1070 t->dev_attr.attr.name); 1071 ret = -EBUSY; 1072 goto error_device_attr_deinit; 1073 } 1074 list_add(&iio_attr->l, attr_list); 1075 1076 return 0; 1077 1078 error_device_attr_deinit: 1079 __iio_device_attr_deinit(&iio_attr->dev_attr); 1080 error_iio_dev_attr_free: 1081 kfree(iio_attr); 1082 return ret; 1083 } 1084 1085 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1086 struct iio_chan_spec const *chan, 1087 enum iio_shared_by shared_by, 1088 const long *infomask) 1089 { 1090 int i, ret, attrcount = 0; 1091 1092 for_each_set_bit(i, infomask, sizeof(infomask)*8) { 1093 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1094 return -EINVAL; 1095 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1096 chan, 1097 &iio_read_channel_info, 1098 &iio_write_channel_info, 1099 i, 1100 shared_by, 1101 &indio_dev->dev, 1102 &indio_dev->channel_attr_list); 1103 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1104 continue; 1105 else if (ret < 0) 1106 return ret; 1107 attrcount++; 1108 } 1109 1110 return attrcount; 1111 } 1112 1113 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1114 struct iio_chan_spec const *chan, 1115 enum iio_shared_by shared_by, 1116 const long *infomask) 1117 { 1118 int i, ret, attrcount = 0; 1119 char *avail_postfix; 1120 1121 for_each_set_bit(i, infomask, sizeof(infomask) * 8) { 1122 avail_postfix = kasprintf(GFP_KERNEL, 1123 "%s_available", 1124 iio_chan_info_postfix[i]); 1125 if (!avail_postfix) 1126 return -ENOMEM; 1127 1128 ret = __iio_add_chan_devattr(avail_postfix, 1129 chan, 1130 &iio_read_channel_info_avail, 1131 NULL, 1132 i, 1133 shared_by, 1134 &indio_dev->dev, 1135 &indio_dev->channel_attr_list); 1136 kfree(avail_postfix); 1137 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1138 continue; 1139 else if (ret < 0) 1140 return ret; 1141 attrcount++; 1142 } 1143 1144 return attrcount; 1145 } 1146 1147 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1148 struct iio_chan_spec const *chan) 1149 { 1150 int ret, attrcount = 0; 1151 const struct iio_chan_spec_ext_info *ext_info; 1152 1153 if (chan->channel < 0) 1154 return 0; 1155 ret = iio_device_add_info_mask_type(indio_dev, chan, 1156 IIO_SEPARATE, 1157 &chan->info_mask_separate); 1158 if (ret < 0) 1159 return ret; 1160 attrcount += ret; 1161 1162 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1163 IIO_SEPARATE, 1164 &chan-> 1165 info_mask_separate_available); 1166 if (ret < 0) 1167 return ret; 1168 attrcount += ret; 1169 1170 ret = iio_device_add_info_mask_type(indio_dev, chan, 1171 IIO_SHARED_BY_TYPE, 1172 &chan->info_mask_shared_by_type); 1173 if (ret < 0) 1174 return ret; 1175 attrcount += ret; 1176 1177 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1178 IIO_SHARED_BY_TYPE, 1179 &chan-> 1180 info_mask_shared_by_type_available); 1181 if (ret < 0) 1182 return ret; 1183 attrcount += ret; 1184 1185 ret = iio_device_add_info_mask_type(indio_dev, chan, 1186 IIO_SHARED_BY_DIR, 1187 &chan->info_mask_shared_by_dir); 1188 if (ret < 0) 1189 return ret; 1190 attrcount += ret; 1191 1192 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1193 IIO_SHARED_BY_DIR, 1194 &chan->info_mask_shared_by_dir_available); 1195 if (ret < 0) 1196 return ret; 1197 attrcount += ret; 1198 1199 ret = iio_device_add_info_mask_type(indio_dev, chan, 1200 IIO_SHARED_BY_ALL, 1201 &chan->info_mask_shared_by_all); 1202 if (ret < 0) 1203 return ret; 1204 attrcount += ret; 1205 1206 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1207 IIO_SHARED_BY_ALL, 1208 &chan->info_mask_shared_by_all_available); 1209 if (ret < 0) 1210 return ret; 1211 attrcount += ret; 1212 1213 if (chan->ext_info) { 1214 unsigned int i = 0; 1215 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1216 ret = __iio_add_chan_devattr(ext_info->name, 1217 chan, 1218 ext_info->read ? 1219 &iio_read_channel_ext_info : NULL, 1220 ext_info->write ? 1221 &iio_write_channel_ext_info : NULL, 1222 i, 1223 ext_info->shared, 1224 &indio_dev->dev, 1225 &indio_dev->channel_attr_list); 1226 i++; 1227 if (ret == -EBUSY && ext_info->shared) 1228 continue; 1229 1230 if (ret) 1231 return ret; 1232 1233 attrcount++; 1234 } 1235 } 1236 1237 return attrcount; 1238 } 1239 1240 /** 1241 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1242 * @attr_list: List of IIO device attributes 1243 * 1244 * This function frees the memory allocated for each of the IIO device 1245 * attributes in the list. 1246 */ 1247 void iio_free_chan_devattr_list(struct list_head *attr_list) 1248 { 1249 struct iio_dev_attr *p, *n; 1250 1251 list_for_each_entry_safe(p, n, attr_list, l) { 1252 kfree(p->dev_attr.attr.name); 1253 list_del(&p->l); 1254 kfree(p); 1255 } 1256 } 1257 1258 static ssize_t iio_show_dev_name(struct device *dev, 1259 struct device_attribute *attr, 1260 char *buf) 1261 { 1262 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1263 return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->name); 1264 } 1265 1266 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); 1267 1268 static ssize_t iio_show_timestamp_clock(struct device *dev, 1269 struct device_attribute *attr, 1270 char *buf) 1271 { 1272 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1273 const clockid_t clk = iio_device_get_clock(indio_dev); 1274 const char *name; 1275 ssize_t sz; 1276 1277 switch (clk) { 1278 case CLOCK_REALTIME: 1279 name = "realtime\n"; 1280 sz = sizeof("realtime\n"); 1281 break; 1282 case CLOCK_MONOTONIC: 1283 name = "monotonic\n"; 1284 sz = sizeof("monotonic\n"); 1285 break; 1286 case CLOCK_MONOTONIC_RAW: 1287 name = "monotonic_raw\n"; 1288 sz = sizeof("monotonic_raw\n"); 1289 break; 1290 case CLOCK_REALTIME_COARSE: 1291 name = "realtime_coarse\n"; 1292 sz = sizeof("realtime_coarse\n"); 1293 break; 1294 case CLOCK_MONOTONIC_COARSE: 1295 name = "monotonic_coarse\n"; 1296 sz = sizeof("monotonic_coarse\n"); 1297 break; 1298 case CLOCK_BOOTTIME: 1299 name = "boottime\n"; 1300 sz = sizeof("boottime\n"); 1301 break; 1302 case CLOCK_TAI: 1303 name = "tai\n"; 1304 sz = sizeof("tai\n"); 1305 break; 1306 default: 1307 BUG(); 1308 } 1309 1310 memcpy(buf, name, sz); 1311 return sz; 1312 } 1313 1314 static ssize_t iio_store_timestamp_clock(struct device *dev, 1315 struct device_attribute *attr, 1316 const char *buf, size_t len) 1317 { 1318 clockid_t clk; 1319 int ret; 1320 1321 if (sysfs_streq(buf, "realtime")) 1322 clk = CLOCK_REALTIME; 1323 else if (sysfs_streq(buf, "monotonic")) 1324 clk = CLOCK_MONOTONIC; 1325 else if (sysfs_streq(buf, "monotonic_raw")) 1326 clk = CLOCK_MONOTONIC_RAW; 1327 else if (sysfs_streq(buf, "realtime_coarse")) 1328 clk = CLOCK_REALTIME_COARSE; 1329 else if (sysfs_streq(buf, "monotonic_coarse")) 1330 clk = CLOCK_MONOTONIC_COARSE; 1331 else if (sysfs_streq(buf, "boottime")) 1332 clk = CLOCK_BOOTTIME; 1333 else if (sysfs_streq(buf, "tai")) 1334 clk = CLOCK_TAI; 1335 else 1336 return -EINVAL; 1337 1338 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1339 if (ret) 1340 return ret; 1341 1342 return len; 1343 } 1344 1345 static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, 1346 iio_show_timestamp_clock, iio_store_timestamp_clock); 1347 1348 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1349 { 1350 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1351 struct iio_dev_attr *p; 1352 struct attribute **attr, *clk = NULL; 1353 1354 /* First count elements in any existing group */ 1355 if (indio_dev->info->attrs) { 1356 attr = indio_dev->info->attrs->attrs; 1357 while (*attr++ != NULL) 1358 attrcount_orig++; 1359 } 1360 attrcount = attrcount_orig; 1361 /* 1362 * New channel registration method - relies on the fact a group does 1363 * not need to be initialized if its name is NULL. 1364 */ 1365 if (indio_dev->channels) 1366 for (i = 0; i < indio_dev->num_channels; i++) { 1367 const struct iio_chan_spec *chan = 1368 &indio_dev->channels[i]; 1369 1370 if (chan->type == IIO_TIMESTAMP) 1371 clk = &dev_attr_current_timestamp_clock.attr; 1372 1373 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1374 if (ret < 0) 1375 goto error_clear_attrs; 1376 attrcount += ret; 1377 } 1378 1379 if (indio_dev->event_interface) 1380 clk = &dev_attr_current_timestamp_clock.attr; 1381 1382 if (indio_dev->name) 1383 attrcount++; 1384 if (clk) 1385 attrcount++; 1386 1387 indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1, 1388 sizeof(indio_dev->chan_attr_group.attrs[0]), 1389 GFP_KERNEL); 1390 if (indio_dev->chan_attr_group.attrs == NULL) { 1391 ret = -ENOMEM; 1392 goto error_clear_attrs; 1393 } 1394 /* Copy across original attributes */ 1395 if (indio_dev->info->attrs) 1396 memcpy(indio_dev->chan_attr_group.attrs, 1397 indio_dev->info->attrs->attrs, 1398 sizeof(indio_dev->chan_attr_group.attrs[0]) 1399 *attrcount_orig); 1400 attrn = attrcount_orig; 1401 /* Add all elements from the list. */ 1402 list_for_each_entry(p, &indio_dev->channel_attr_list, l) 1403 indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1404 if (indio_dev->name) 1405 indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1406 if (clk) 1407 indio_dev->chan_attr_group.attrs[attrn++] = clk; 1408 1409 indio_dev->groups[indio_dev->groupcounter++] = 1410 &indio_dev->chan_attr_group; 1411 1412 return 0; 1413 1414 error_clear_attrs: 1415 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 1416 1417 return ret; 1418 } 1419 1420 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1421 { 1422 1423 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 1424 kfree(indio_dev->chan_attr_group.attrs); 1425 indio_dev->chan_attr_group.attrs = NULL; 1426 } 1427 1428 static void iio_dev_release(struct device *device) 1429 { 1430 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1431 if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED)) 1432 iio_device_unregister_trigger_consumer(indio_dev); 1433 iio_device_unregister_eventset(indio_dev); 1434 iio_device_unregister_sysfs(indio_dev); 1435 1436 iio_buffer_put(indio_dev->buffer); 1437 1438 ida_simple_remove(&iio_ida, indio_dev->id); 1439 kfree(indio_dev); 1440 } 1441 1442 struct device_type iio_device_type = { 1443 .name = "iio_device", 1444 .release = iio_dev_release, 1445 }; 1446 1447 /** 1448 * iio_device_alloc() - allocate an iio_dev from a driver 1449 * @sizeof_priv: Space to allocate for private structure. 1450 **/ 1451 struct iio_dev *iio_device_alloc(int sizeof_priv) 1452 { 1453 struct iio_dev *dev; 1454 size_t alloc_size; 1455 1456 alloc_size = sizeof(struct iio_dev); 1457 if (sizeof_priv) { 1458 alloc_size = ALIGN(alloc_size, IIO_ALIGN); 1459 alloc_size += sizeof_priv; 1460 } 1461 /* ensure 32-byte alignment of whole construct ? */ 1462 alloc_size += IIO_ALIGN - 1; 1463 1464 dev = kzalloc(alloc_size, GFP_KERNEL); 1465 1466 if (dev) { 1467 dev->dev.groups = dev->groups; 1468 dev->dev.type = &iio_device_type; 1469 dev->dev.bus = &iio_bus_type; 1470 device_initialize(&dev->dev); 1471 dev_set_drvdata(&dev->dev, (void *)dev); 1472 mutex_init(&dev->mlock); 1473 mutex_init(&dev->info_exist_lock); 1474 INIT_LIST_HEAD(&dev->channel_attr_list); 1475 1476 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 1477 if (dev->id < 0) { 1478 /* cannot use a dev_err as the name isn't available */ 1479 pr_err("failed to get device id\n"); 1480 kfree(dev); 1481 return NULL; 1482 } 1483 dev_set_name(&dev->dev, "iio:device%d", dev->id); 1484 INIT_LIST_HEAD(&dev->buffer_list); 1485 } 1486 1487 return dev; 1488 } 1489 EXPORT_SYMBOL(iio_device_alloc); 1490 1491 /** 1492 * iio_device_free() - free an iio_dev from a driver 1493 * @dev: the iio_dev associated with the device 1494 **/ 1495 void iio_device_free(struct iio_dev *dev) 1496 { 1497 if (dev) 1498 put_device(&dev->dev); 1499 } 1500 EXPORT_SYMBOL(iio_device_free); 1501 1502 static void devm_iio_device_release(struct device *dev, void *res) 1503 { 1504 iio_device_free(*(struct iio_dev **)res); 1505 } 1506 1507 int devm_iio_device_match(struct device *dev, void *res, void *data) 1508 { 1509 struct iio_dev **r = res; 1510 if (!r || !*r) { 1511 WARN_ON(!r || !*r); 1512 return 0; 1513 } 1514 return *r == data; 1515 } 1516 EXPORT_SYMBOL_GPL(devm_iio_device_match); 1517 1518 /** 1519 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1520 * @dev: Device to allocate iio_dev for 1521 * @sizeof_priv: Space to allocate for private structure. 1522 * 1523 * Managed iio_device_alloc. iio_dev allocated with this function is 1524 * automatically freed on driver detach. 1525 * 1526 * If an iio_dev allocated with this function needs to be freed separately, 1527 * devm_iio_device_free() must be used. 1528 * 1529 * RETURNS: 1530 * Pointer to allocated iio_dev on success, NULL on failure. 1531 */ 1532 struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) 1533 { 1534 struct iio_dev **ptr, *iio_dev; 1535 1536 ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr), 1537 GFP_KERNEL); 1538 if (!ptr) 1539 return NULL; 1540 1541 iio_dev = iio_device_alloc(sizeof_priv); 1542 if (iio_dev) { 1543 *ptr = iio_dev; 1544 devres_add(dev, ptr); 1545 } else { 1546 devres_free(ptr); 1547 } 1548 1549 return iio_dev; 1550 } 1551 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1552 1553 /** 1554 * devm_iio_device_free - Resource-managed iio_device_free() 1555 * @dev: Device this iio_dev belongs to 1556 * @iio_dev: the iio_dev associated with the device 1557 * 1558 * Free iio_dev allocated with devm_iio_device_alloc(). 1559 */ 1560 void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev) 1561 { 1562 int rc; 1563 1564 rc = devres_release(dev, devm_iio_device_release, 1565 devm_iio_device_match, iio_dev); 1566 WARN_ON(rc); 1567 } 1568 EXPORT_SYMBOL_GPL(devm_iio_device_free); 1569 1570 /** 1571 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1572 * @inode: Inode structure for identifying the device in the file system 1573 * @filp: File structure for iio device used to keep and later access 1574 * private data 1575 * 1576 * Return: 0 on success or -EBUSY if the device is already opened 1577 **/ 1578 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1579 { 1580 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1581 struct iio_dev, chrdev); 1582 1583 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 1584 return -EBUSY; 1585 1586 iio_device_get(indio_dev); 1587 1588 filp->private_data = indio_dev; 1589 1590 return 0; 1591 } 1592 1593 /** 1594 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1595 * @inode: Inode structure pointer for the char device 1596 * @filp: File structure pointer for the char device 1597 * 1598 * Return: 0 for successful release 1599 */ 1600 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1601 { 1602 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1603 struct iio_dev, chrdev); 1604 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 1605 iio_device_put(indio_dev); 1606 1607 return 0; 1608 } 1609 1610 /* Somewhat of a cross file organization violation - ioctls here are actually 1611 * event related */ 1612 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1613 { 1614 struct iio_dev *indio_dev = filp->private_data; 1615 int __user *ip = (int __user *)arg; 1616 int fd; 1617 1618 if (!indio_dev->info) 1619 return -ENODEV; 1620 1621 if (cmd == IIO_GET_EVENT_FD_IOCTL) { 1622 fd = iio_event_getfd(indio_dev); 1623 if (fd < 0) 1624 return fd; 1625 if (copy_to_user(ip, &fd, sizeof(fd))) 1626 return -EFAULT; 1627 return 0; 1628 } 1629 return -EINVAL; 1630 } 1631 1632 static const struct file_operations iio_buffer_fileops = { 1633 .read = iio_buffer_read_first_n_outer_addr, 1634 .release = iio_chrdev_release, 1635 .open = iio_chrdev_open, 1636 .poll = iio_buffer_poll_addr, 1637 .owner = THIS_MODULE, 1638 .llseek = noop_llseek, 1639 .unlocked_ioctl = iio_ioctl, 1640 .compat_ioctl = iio_ioctl, 1641 }; 1642 1643 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1644 { 1645 int i, j; 1646 const struct iio_chan_spec *channels = indio_dev->channels; 1647 1648 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1649 return 0; 1650 1651 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1652 if (channels[i].scan_index < 0) 1653 continue; 1654 for (j = i + 1; j < indio_dev->num_channels; j++) 1655 if (channels[i].scan_index == channels[j].scan_index) { 1656 dev_err(&indio_dev->dev, 1657 "Duplicate scan index %d\n", 1658 channels[i].scan_index); 1659 return -EINVAL; 1660 } 1661 } 1662 1663 return 0; 1664 } 1665 1666 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1667 1668 /** 1669 * iio_device_register() - register a device with the IIO subsystem 1670 * @indio_dev: Device structure filled by the device driver 1671 **/ 1672 int iio_device_register(struct iio_dev *indio_dev) 1673 { 1674 int ret; 1675 1676 /* If the calling driver did not initialize of_node, do it here */ 1677 if (!indio_dev->dev.of_node && indio_dev->dev.parent) 1678 indio_dev->dev.of_node = indio_dev->dev.parent->of_node; 1679 1680 ret = iio_check_unique_scan_index(indio_dev); 1681 if (ret < 0) 1682 return ret; 1683 1684 /* configure elements for the chrdev */ 1685 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id); 1686 1687 ret = iio_device_register_debugfs(indio_dev); 1688 if (ret) { 1689 dev_err(indio_dev->dev.parent, 1690 "Failed to register debugfs interfaces\n"); 1691 return ret; 1692 } 1693 1694 ret = iio_buffer_alloc_sysfs_and_mask(indio_dev); 1695 if (ret) { 1696 dev_err(indio_dev->dev.parent, 1697 "Failed to create buffer sysfs interfaces\n"); 1698 goto error_unreg_debugfs; 1699 } 1700 1701 ret = iio_device_register_sysfs(indio_dev); 1702 if (ret) { 1703 dev_err(indio_dev->dev.parent, 1704 "Failed to register sysfs interfaces\n"); 1705 goto error_buffer_free_sysfs; 1706 } 1707 ret = iio_device_register_eventset(indio_dev); 1708 if (ret) { 1709 dev_err(indio_dev->dev.parent, 1710 "Failed to register event set\n"); 1711 goto error_free_sysfs; 1712 } 1713 if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED)) 1714 iio_device_register_trigger_consumer(indio_dev); 1715 1716 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1717 indio_dev->setup_ops == NULL) 1718 indio_dev->setup_ops = &noop_ring_setup_ops; 1719 1720 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); 1721 indio_dev->chrdev.owner = indio_dev->info->driver_module; 1722 1723 ret = cdev_device_add(&indio_dev->chrdev, &indio_dev->dev); 1724 if (ret < 0) 1725 goto error_unreg_eventset; 1726 1727 return 0; 1728 1729 error_unreg_eventset: 1730 iio_device_unregister_eventset(indio_dev); 1731 error_free_sysfs: 1732 iio_device_unregister_sysfs(indio_dev); 1733 error_buffer_free_sysfs: 1734 iio_buffer_free_sysfs_and_mask(indio_dev); 1735 error_unreg_debugfs: 1736 iio_device_unregister_debugfs(indio_dev); 1737 return ret; 1738 } 1739 EXPORT_SYMBOL(iio_device_register); 1740 1741 /** 1742 * iio_device_unregister() - unregister a device from the IIO subsystem 1743 * @indio_dev: Device structure representing the device. 1744 **/ 1745 void iio_device_unregister(struct iio_dev *indio_dev) 1746 { 1747 mutex_lock(&indio_dev->info_exist_lock); 1748 1749 cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); 1750 1751 iio_device_unregister_debugfs(indio_dev); 1752 1753 iio_disable_all_buffers(indio_dev); 1754 1755 indio_dev->info = NULL; 1756 1757 iio_device_wakeup_eventset(indio_dev); 1758 iio_buffer_wakeup_poll(indio_dev); 1759 1760 mutex_unlock(&indio_dev->info_exist_lock); 1761 1762 iio_buffer_free_sysfs_and_mask(indio_dev); 1763 } 1764 EXPORT_SYMBOL(iio_device_unregister); 1765 1766 static void devm_iio_device_unreg(struct device *dev, void *res) 1767 { 1768 iio_device_unregister(*(struct iio_dev **)res); 1769 } 1770 1771 /** 1772 * devm_iio_device_register - Resource-managed iio_device_register() 1773 * @dev: Device to allocate iio_dev for 1774 * @indio_dev: Device structure filled by the device driver 1775 * 1776 * Managed iio_device_register. The IIO device registered with this 1777 * function is automatically unregistered on driver detach. This function 1778 * calls iio_device_register() internally. Refer to that function for more 1779 * information. 1780 * 1781 * If an iio_dev registered with this function needs to be unregistered 1782 * separately, devm_iio_device_unregister() must be used. 1783 * 1784 * RETURNS: 1785 * 0 on success, negative error number on failure. 1786 */ 1787 int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev) 1788 { 1789 struct iio_dev **ptr; 1790 int ret; 1791 1792 ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL); 1793 if (!ptr) 1794 return -ENOMEM; 1795 1796 *ptr = indio_dev; 1797 ret = iio_device_register(indio_dev); 1798 if (!ret) 1799 devres_add(dev, ptr); 1800 else 1801 devres_free(ptr); 1802 1803 return ret; 1804 } 1805 EXPORT_SYMBOL_GPL(devm_iio_device_register); 1806 1807 /** 1808 * devm_iio_device_unregister - Resource-managed iio_device_unregister() 1809 * @dev: Device this iio_dev belongs to 1810 * @indio_dev: the iio_dev associated with the device 1811 * 1812 * Unregister iio_dev registered with devm_iio_device_register(). 1813 */ 1814 void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev) 1815 { 1816 int rc; 1817 1818 rc = devres_release(dev, devm_iio_device_unreg, 1819 devm_iio_device_match, indio_dev); 1820 WARN_ON(rc); 1821 } 1822 EXPORT_SYMBOL_GPL(devm_iio_device_unregister); 1823 1824 /** 1825 * iio_device_claim_direct_mode - Keep device in direct mode 1826 * @indio_dev: the iio_dev associated with the device 1827 * 1828 * If the device is in direct mode it is guaranteed to stay 1829 * that way until iio_device_release_direct_mode() is called. 1830 * 1831 * Use with iio_device_release_direct_mode() 1832 * 1833 * Returns: 0 on success, -EBUSY on failure 1834 */ 1835 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 1836 { 1837 mutex_lock(&indio_dev->mlock); 1838 1839 if (iio_buffer_enabled(indio_dev)) { 1840 mutex_unlock(&indio_dev->mlock); 1841 return -EBUSY; 1842 } 1843 return 0; 1844 } 1845 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 1846 1847 /** 1848 * iio_device_release_direct_mode - releases claim on direct mode 1849 * @indio_dev: the iio_dev associated with the device 1850 * 1851 * Release the claim. Device is no longer guaranteed to stay 1852 * in direct mode. 1853 * 1854 * Use with iio_device_claim_direct_mode() 1855 */ 1856 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 1857 { 1858 mutex_unlock(&indio_dev->mlock); 1859 } 1860 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 1861 1862 subsys_initcall(iio_init); 1863 module_exit(iio_exit); 1864 1865 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 1866 MODULE_DESCRIPTION("Industrial I/O core"); 1867 MODULE_LICENSE("GPL"); 1868