1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Based on elements of hwmon and input subsystems. 7 */ 8 9 #define pr_fmt(fmt) "iio-core: " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/idr.h> 14 #include <linux/kdev_t.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/property.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/cdev.h> 23 #include <linux/slab.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/debugfs.h> 26 #include <linux/mutex.h> 27 #include <linux/iio/iio.h> 28 #include <linux/iio/iio-opaque.h> 29 #include "iio_core.h" 30 #include "iio_core_trigger.h" 31 #include <linux/iio/sysfs.h> 32 #include <linux/iio/events.h> 33 #include <linux/iio/buffer.h> 34 #include <linux/iio/buffer_impl.h> 35 36 /* IDA to assign each registered device a unique id */ 37 static DEFINE_IDA(iio_ida); 38 39 static dev_t iio_devt; 40 41 #define IIO_DEV_MAX 256 42 struct bus_type iio_bus_type = { 43 .name = "iio", 44 }; 45 EXPORT_SYMBOL(iio_bus_type); 46 47 static struct dentry *iio_debugfs_dentry; 48 49 static const char * const iio_direction[] = { 50 [0] = "in", 51 [1] = "out", 52 }; 53 54 static const char * const iio_chan_type_name_spec[] = { 55 [IIO_VOLTAGE] = "voltage", 56 [IIO_CURRENT] = "current", 57 [IIO_POWER] = "power", 58 [IIO_ACCEL] = "accel", 59 [IIO_ANGL_VEL] = "anglvel", 60 [IIO_MAGN] = "magn", 61 [IIO_LIGHT] = "illuminance", 62 [IIO_INTENSITY] = "intensity", 63 [IIO_PROXIMITY] = "proximity", 64 [IIO_TEMP] = "temp", 65 [IIO_INCLI] = "incli", 66 [IIO_ROT] = "rot", 67 [IIO_ANGL] = "angl", 68 [IIO_TIMESTAMP] = "timestamp", 69 [IIO_CAPACITANCE] = "capacitance", 70 [IIO_ALTVOLTAGE] = "altvoltage", 71 [IIO_CCT] = "cct", 72 [IIO_PRESSURE] = "pressure", 73 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 74 [IIO_ACTIVITY] = "activity", 75 [IIO_STEPS] = "steps", 76 [IIO_ENERGY] = "energy", 77 [IIO_DISTANCE] = "distance", 78 [IIO_VELOCITY] = "velocity", 79 [IIO_CONCENTRATION] = "concentration", 80 [IIO_RESISTANCE] = "resistance", 81 [IIO_PH] = "ph", 82 [IIO_UVINDEX] = "uvindex", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 84 [IIO_COUNT] = "count", 85 [IIO_INDEX] = "index", 86 [IIO_GRAVITY] = "gravity", 87 [IIO_POSITIONRELATIVE] = "positionrelative", 88 [IIO_PHASE] = "phase", 89 [IIO_MASSCONCENTRATION] = "massconcentration", 90 }; 91 92 static const char * const iio_modifier_names[] = { 93 [IIO_MOD_X] = "x", 94 [IIO_MOD_Y] = "y", 95 [IIO_MOD_Z] = "z", 96 [IIO_MOD_X_AND_Y] = "x&y", 97 [IIO_MOD_X_AND_Z] = "x&z", 98 [IIO_MOD_Y_AND_Z] = "y&z", 99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 100 [IIO_MOD_X_OR_Y] = "x|y", 101 [IIO_MOD_X_OR_Z] = "x|z", 102 [IIO_MOD_Y_OR_Z] = "y|z", 103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 106 [IIO_MOD_LIGHT_BOTH] = "both", 107 [IIO_MOD_LIGHT_IR] = "ir", 108 [IIO_MOD_LIGHT_CLEAR] = "clear", 109 [IIO_MOD_LIGHT_RED] = "red", 110 [IIO_MOD_LIGHT_GREEN] = "green", 111 [IIO_MOD_LIGHT_BLUE] = "blue", 112 [IIO_MOD_LIGHT_UV] = "uv", 113 [IIO_MOD_LIGHT_DUV] = "duv", 114 [IIO_MOD_QUATERNION] = "quaternion", 115 [IIO_MOD_TEMP_AMBIENT] = "ambient", 116 [IIO_MOD_TEMP_OBJECT] = "object", 117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 118 [IIO_MOD_NORTH_TRUE] = "from_north_true", 119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 121 [IIO_MOD_RUNNING] = "running", 122 [IIO_MOD_JOGGING] = "jogging", 123 [IIO_MOD_WALKING] = "walking", 124 [IIO_MOD_STILL] = "still", 125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 126 [IIO_MOD_I] = "i", 127 [IIO_MOD_Q] = "q", 128 [IIO_MOD_CO2] = "co2", 129 [IIO_MOD_VOC] = "voc", 130 [IIO_MOD_PM1] = "pm1", 131 [IIO_MOD_PM2P5] = "pm2p5", 132 [IIO_MOD_PM4] = "pm4", 133 [IIO_MOD_PM10] = "pm10", 134 [IIO_MOD_ETHANOL] = "ethanol", 135 [IIO_MOD_H2] = "h2", 136 [IIO_MOD_O2] = "o2", 137 }; 138 139 /* relies on pairs of these shared then separate */ 140 static const char * const iio_chan_info_postfix[] = { 141 [IIO_CHAN_INFO_RAW] = "raw", 142 [IIO_CHAN_INFO_PROCESSED] = "input", 143 [IIO_CHAN_INFO_SCALE] = "scale", 144 [IIO_CHAN_INFO_OFFSET] = "offset", 145 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 146 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 147 [IIO_CHAN_INFO_PEAK] = "peak_raw", 148 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 149 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 150 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 151 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 152 = "filter_low_pass_3db_frequency", 153 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 154 = "filter_high_pass_3db_frequency", 155 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 156 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 157 [IIO_CHAN_INFO_PHASE] = "phase", 158 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 159 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 160 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 161 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 162 [IIO_CHAN_INFO_ENABLE] = "en", 163 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 164 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 165 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 166 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 167 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 168 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 169 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 170 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 171 }; 172 /** 173 * iio_device_id() - query the unique ID for the device 174 * @indio_dev: Device structure whose ID is being queried 175 * 176 * The IIO device ID is a unique index used for example for the naming 177 * of the character device /dev/iio\:device[ID] 178 */ 179 int iio_device_id(struct iio_dev *indio_dev) 180 { 181 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 182 183 return iio_dev_opaque->id; 184 } 185 EXPORT_SYMBOL_GPL(iio_device_id); 186 187 /** 188 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps 189 * @array: array of strings 190 * @n: number of strings in the array 191 * @str: string to match with 192 * 193 * Returns index of @str in the @array or -EINVAL, similar to match_string(). 194 * Uses sysfs_streq instead of strcmp for matching. 195 * 196 * This routine will look for a string in an array of strings. 197 * The search will continue until the element is found or the n-th element 198 * is reached, regardless of any NULL elements in the array. 199 */ 200 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n, 201 const char *str) 202 { 203 const char *item; 204 int index; 205 206 for (index = 0; index < n; index++) { 207 item = array[index]; 208 if (!item) 209 continue; 210 if (sysfs_streq(item, str)) 211 return index; 212 } 213 214 return -EINVAL; 215 } 216 217 #if defined(CONFIG_DEBUG_FS) 218 /* 219 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 220 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 221 */ 222 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 223 { 224 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 225 return iio_dev_opaque->debugfs_dentry; 226 } 227 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 228 #endif 229 230 /** 231 * iio_find_channel_from_si() - get channel from its scan index 232 * @indio_dev: device 233 * @si: scan index to match 234 */ 235 const struct iio_chan_spec 236 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 237 { 238 int i; 239 240 for (i = 0; i < indio_dev->num_channels; i++) 241 if (indio_dev->channels[i].scan_index == si) 242 return &indio_dev->channels[i]; 243 return NULL; 244 } 245 246 /* This turns up an awful lot */ 247 ssize_t iio_read_const_attr(struct device *dev, 248 struct device_attribute *attr, 249 char *buf) 250 { 251 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 252 } 253 EXPORT_SYMBOL(iio_read_const_attr); 254 255 /** 256 * iio_device_set_clock() - Set current timestamping clock for the device 257 * @indio_dev: IIO device structure containing the device 258 * @clock_id: timestamping clock posix identifier to set. 259 */ 260 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 261 { 262 int ret; 263 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 264 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 265 266 ret = mutex_lock_interruptible(&indio_dev->mlock); 267 if (ret) 268 return ret; 269 if ((ev_int && iio_event_enabled(ev_int)) || 270 iio_buffer_enabled(indio_dev)) { 271 mutex_unlock(&indio_dev->mlock); 272 return -EBUSY; 273 } 274 iio_dev_opaque->clock_id = clock_id; 275 mutex_unlock(&indio_dev->mlock); 276 277 return 0; 278 } 279 EXPORT_SYMBOL(iio_device_set_clock); 280 281 /** 282 * iio_device_get_clock() - Retrieve current timestamping clock for the device 283 * @indio_dev: IIO device structure containing the device 284 */ 285 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 286 { 287 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 288 289 return iio_dev_opaque->clock_id; 290 } 291 EXPORT_SYMBOL(iio_device_get_clock); 292 293 /** 294 * iio_get_time_ns() - utility function to get a time stamp for events etc 295 * @indio_dev: device 296 */ 297 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 298 { 299 struct timespec64 tp; 300 301 switch (iio_device_get_clock(indio_dev)) { 302 case CLOCK_REALTIME: 303 return ktime_get_real_ns(); 304 case CLOCK_MONOTONIC: 305 return ktime_get_ns(); 306 case CLOCK_MONOTONIC_RAW: 307 return ktime_get_raw_ns(); 308 case CLOCK_REALTIME_COARSE: 309 return ktime_to_ns(ktime_get_coarse_real()); 310 case CLOCK_MONOTONIC_COARSE: 311 ktime_get_coarse_ts64(&tp); 312 return timespec64_to_ns(&tp); 313 case CLOCK_BOOTTIME: 314 return ktime_get_boottime_ns(); 315 case CLOCK_TAI: 316 return ktime_get_clocktai_ns(); 317 default: 318 BUG(); 319 } 320 } 321 EXPORT_SYMBOL(iio_get_time_ns); 322 323 /** 324 * iio_get_time_res() - utility function to get time stamp clock resolution in 325 * nano seconds. 326 * @indio_dev: device 327 */ 328 unsigned int iio_get_time_res(const struct iio_dev *indio_dev) 329 { 330 switch (iio_device_get_clock(indio_dev)) { 331 case CLOCK_REALTIME: 332 case CLOCK_MONOTONIC: 333 case CLOCK_MONOTONIC_RAW: 334 case CLOCK_BOOTTIME: 335 case CLOCK_TAI: 336 return hrtimer_resolution; 337 case CLOCK_REALTIME_COARSE: 338 case CLOCK_MONOTONIC_COARSE: 339 return LOW_RES_NSEC; 340 default: 341 BUG(); 342 } 343 } 344 EXPORT_SYMBOL(iio_get_time_res); 345 346 static int __init iio_init(void) 347 { 348 int ret; 349 350 /* Register sysfs bus */ 351 ret = bus_register(&iio_bus_type); 352 if (ret < 0) { 353 pr_err("could not register bus type\n"); 354 goto error_nothing; 355 } 356 357 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 358 if (ret < 0) { 359 pr_err("failed to allocate char dev region\n"); 360 goto error_unregister_bus_type; 361 } 362 363 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 364 365 return 0; 366 367 error_unregister_bus_type: 368 bus_unregister(&iio_bus_type); 369 error_nothing: 370 return ret; 371 } 372 373 static void __exit iio_exit(void) 374 { 375 if (iio_devt) 376 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 377 bus_unregister(&iio_bus_type); 378 debugfs_remove(iio_debugfs_dentry); 379 } 380 381 #if defined(CONFIG_DEBUG_FS) 382 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 383 size_t count, loff_t *ppos) 384 { 385 struct iio_dev *indio_dev = file->private_data; 386 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 387 unsigned val = 0; 388 int ret; 389 390 if (*ppos > 0) 391 return simple_read_from_buffer(userbuf, count, ppos, 392 iio_dev_opaque->read_buf, 393 iio_dev_opaque->read_buf_len); 394 395 ret = indio_dev->info->debugfs_reg_access(indio_dev, 396 iio_dev_opaque->cached_reg_addr, 397 0, &val); 398 if (ret) { 399 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 400 return ret; 401 } 402 403 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 404 sizeof(iio_dev_opaque->read_buf), 405 "0x%X\n", val); 406 407 return simple_read_from_buffer(userbuf, count, ppos, 408 iio_dev_opaque->read_buf, 409 iio_dev_opaque->read_buf_len); 410 } 411 412 static ssize_t iio_debugfs_write_reg(struct file *file, 413 const char __user *userbuf, size_t count, loff_t *ppos) 414 { 415 struct iio_dev *indio_dev = file->private_data; 416 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 417 unsigned reg, val; 418 char buf[80]; 419 int ret; 420 421 count = min_t(size_t, count, (sizeof(buf)-1)); 422 if (copy_from_user(buf, userbuf, count)) 423 return -EFAULT; 424 425 buf[count] = 0; 426 427 ret = sscanf(buf, "%i %i", ®, &val); 428 429 switch (ret) { 430 case 1: 431 iio_dev_opaque->cached_reg_addr = reg; 432 break; 433 case 2: 434 iio_dev_opaque->cached_reg_addr = reg; 435 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 436 val, NULL); 437 if (ret) { 438 dev_err(indio_dev->dev.parent, "%s: write failed\n", 439 __func__); 440 return ret; 441 } 442 break; 443 default: 444 return -EINVAL; 445 } 446 447 return count; 448 } 449 450 static const struct file_operations iio_debugfs_reg_fops = { 451 .open = simple_open, 452 .read = iio_debugfs_read_reg, 453 .write = iio_debugfs_write_reg, 454 }; 455 456 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 457 { 458 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 459 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 460 } 461 462 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 463 { 464 struct iio_dev_opaque *iio_dev_opaque; 465 466 if (indio_dev->info->debugfs_reg_access == NULL) 467 return; 468 469 if (!iio_debugfs_dentry) 470 return; 471 472 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 473 474 iio_dev_opaque->debugfs_dentry = 475 debugfs_create_dir(dev_name(&indio_dev->dev), 476 iio_debugfs_dentry); 477 478 debugfs_create_file("direct_reg_access", 0644, 479 iio_dev_opaque->debugfs_dentry, indio_dev, 480 &iio_debugfs_reg_fops); 481 } 482 #else 483 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 484 { 485 } 486 487 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 488 { 489 } 490 #endif /* CONFIG_DEBUG_FS */ 491 492 static ssize_t iio_read_channel_ext_info(struct device *dev, 493 struct device_attribute *attr, 494 char *buf) 495 { 496 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 497 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 498 const struct iio_chan_spec_ext_info *ext_info; 499 500 ext_info = &this_attr->c->ext_info[this_attr->address]; 501 502 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 503 } 504 505 static ssize_t iio_write_channel_ext_info(struct device *dev, 506 struct device_attribute *attr, 507 const char *buf, 508 size_t len) 509 { 510 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 511 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 512 const struct iio_chan_spec_ext_info *ext_info; 513 514 ext_info = &this_attr->c->ext_info[this_attr->address]; 515 516 return ext_info->write(indio_dev, ext_info->private, 517 this_attr->c, buf, len); 518 } 519 520 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 521 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 522 { 523 const struct iio_enum *e = (const struct iio_enum *)priv; 524 unsigned int i; 525 size_t len = 0; 526 527 if (!e->num_items) 528 return 0; 529 530 for (i = 0; i < e->num_items; ++i) { 531 if (!e->items[i]) 532 continue; 533 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 534 } 535 536 /* replace last space with a newline */ 537 buf[len - 1] = '\n'; 538 539 return len; 540 } 541 EXPORT_SYMBOL_GPL(iio_enum_available_read); 542 543 ssize_t iio_enum_read(struct iio_dev *indio_dev, 544 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 545 { 546 const struct iio_enum *e = (const struct iio_enum *)priv; 547 int i; 548 549 if (!e->get) 550 return -EINVAL; 551 552 i = e->get(indio_dev, chan); 553 if (i < 0) 554 return i; 555 else if (i >= e->num_items || !e->items[i]) 556 return -EINVAL; 557 558 return sysfs_emit(buf, "%s\n", e->items[i]); 559 } 560 EXPORT_SYMBOL_GPL(iio_enum_read); 561 562 ssize_t iio_enum_write(struct iio_dev *indio_dev, 563 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 564 size_t len) 565 { 566 const struct iio_enum *e = (const struct iio_enum *)priv; 567 int ret; 568 569 if (!e->set) 570 return -EINVAL; 571 572 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf); 573 if (ret < 0) 574 return ret; 575 576 ret = e->set(indio_dev, chan, ret); 577 return ret ? ret : len; 578 } 579 EXPORT_SYMBOL_GPL(iio_enum_write); 580 581 static const struct iio_mount_matrix iio_mount_idmatrix = { 582 .rotation = { 583 "1", "0", "0", 584 "0", "1", "0", 585 "0", "0", "1" 586 } 587 }; 588 589 static int iio_setup_mount_idmatrix(const struct device *dev, 590 struct iio_mount_matrix *matrix) 591 { 592 *matrix = iio_mount_idmatrix; 593 dev_info(dev, "mounting matrix not found: using identity...\n"); 594 return 0; 595 } 596 597 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 598 const struct iio_chan_spec *chan, char *buf) 599 { 600 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 601 priv)(indio_dev, chan); 602 603 if (IS_ERR(mtx)) 604 return PTR_ERR(mtx); 605 606 if (!mtx) 607 mtx = &iio_mount_idmatrix; 608 609 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 610 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 611 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 612 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 613 } 614 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 615 616 /** 617 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 618 * device "mount-matrix" property 619 * @dev: device the mounting matrix property is assigned to 620 * @matrix: where to store retrieved matrix 621 * 622 * If device is assigned no mounting matrix property, a default 3x3 identity 623 * matrix will be filled in. 624 * 625 * Return: 0 if success, or a negative error code on failure. 626 */ 627 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 628 { 629 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 630 int err; 631 632 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 633 if (err == len) 634 return 0; 635 636 if (err >= 0) 637 /* Invalid number of matrix entries. */ 638 return -EINVAL; 639 640 if (err != -EINVAL) 641 /* Invalid matrix declaration format. */ 642 return err; 643 644 /* Matrix was not declared at all: fallback to identity. */ 645 return iio_setup_mount_idmatrix(dev, matrix); 646 } 647 EXPORT_SYMBOL(iio_read_mount_matrix); 648 649 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 650 int size, const int *vals) 651 { 652 int tmp0, tmp1; 653 s64 tmp2; 654 bool scale_db = false; 655 656 switch (type) { 657 case IIO_VAL_INT: 658 return sysfs_emit_at(buf, offset, "%d", vals[0]); 659 case IIO_VAL_INT_PLUS_MICRO_DB: 660 scale_db = true; 661 fallthrough; 662 case IIO_VAL_INT_PLUS_MICRO: 663 if (vals[1] < 0) 664 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 665 abs(vals[0]), -vals[1], 666 scale_db ? " dB" : ""); 667 else 668 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 669 vals[1], scale_db ? " dB" : ""); 670 case IIO_VAL_INT_PLUS_NANO: 671 if (vals[1] < 0) 672 return sysfs_emit_at(buf, offset, "-%d.%09u", 673 abs(vals[0]), -vals[1]); 674 else 675 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 676 vals[1]); 677 case IIO_VAL_FRACTIONAL: 678 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 679 tmp1 = vals[1]; 680 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 681 if ((tmp2 < 0) && (tmp0 == 0)) 682 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 683 else 684 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 685 abs(tmp1)); 686 case IIO_VAL_FRACTIONAL_LOG2: 687 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 688 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 689 if (tmp0 == 0 && tmp2 < 0) 690 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 691 else 692 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 693 abs(tmp1)); 694 case IIO_VAL_INT_MULTIPLE: 695 { 696 int i; 697 int l = 0; 698 699 for (i = 0; i < size; ++i) 700 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 701 return l; 702 } 703 case IIO_VAL_CHAR: 704 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 705 default: 706 return 0; 707 } 708 } 709 710 /** 711 * iio_format_value() - Formats a IIO value into its string representation 712 * @buf: The buffer to which the formatted value gets written 713 * which is assumed to be big enough (i.e. PAGE_SIZE). 714 * @type: One of the IIO_VAL_* constants. This decides how the val 715 * and val2 parameters are formatted. 716 * @size: Number of IIO value entries contained in vals 717 * @vals: Pointer to the values, exact meaning depends on the 718 * type parameter. 719 * 720 * Return: 0 by default, a negative number on failure or the 721 * total number of characters written for a type that belongs 722 * to the IIO_VAL_* constant. 723 */ 724 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 725 { 726 ssize_t len; 727 728 len = __iio_format_value(buf, 0, type, size, vals); 729 if (len >= PAGE_SIZE - 1) 730 return -EFBIG; 731 732 return len + sysfs_emit_at(buf, len, "\n"); 733 } 734 EXPORT_SYMBOL_GPL(iio_format_value); 735 736 static ssize_t iio_read_channel_label(struct device *dev, 737 struct device_attribute *attr, 738 char *buf) 739 { 740 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 741 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 742 743 if (!indio_dev->info->read_label) 744 return -EINVAL; 745 746 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 747 } 748 749 static ssize_t iio_read_channel_info(struct device *dev, 750 struct device_attribute *attr, 751 char *buf) 752 { 753 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 754 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 755 int vals[INDIO_MAX_RAW_ELEMENTS]; 756 int ret; 757 int val_len = 2; 758 759 if (indio_dev->info->read_raw_multi) 760 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 761 INDIO_MAX_RAW_ELEMENTS, 762 vals, &val_len, 763 this_attr->address); 764 else 765 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 766 &vals[0], &vals[1], this_attr->address); 767 768 if (ret < 0) 769 return ret; 770 771 return iio_format_value(buf, ret, val_len, vals); 772 } 773 774 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 775 const char *prefix, const char *suffix) 776 { 777 ssize_t len; 778 int stride; 779 int i; 780 781 switch (type) { 782 case IIO_VAL_INT: 783 stride = 1; 784 break; 785 default: 786 stride = 2; 787 break; 788 } 789 790 len = sysfs_emit(buf, prefix); 791 792 for (i = 0; i <= length - stride; i += stride) { 793 if (i != 0) { 794 len += sysfs_emit_at(buf, len, " "); 795 if (len >= PAGE_SIZE) 796 return -EFBIG; 797 } 798 799 len += __iio_format_value(buf, len, type, stride, &vals[i]); 800 if (len >= PAGE_SIZE) 801 return -EFBIG; 802 } 803 804 len += sysfs_emit_at(buf, len, "%s\n", suffix); 805 806 return len; 807 } 808 809 static ssize_t iio_format_avail_list(char *buf, const int *vals, 810 int type, int length) 811 { 812 813 return iio_format_list(buf, vals, type, length, "", ""); 814 } 815 816 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 817 { 818 return iio_format_list(buf, vals, type, 3, "[", "]"); 819 } 820 821 static ssize_t iio_read_channel_info_avail(struct device *dev, 822 struct device_attribute *attr, 823 char *buf) 824 { 825 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 826 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 827 const int *vals; 828 int ret; 829 int length; 830 int type; 831 832 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 833 &vals, &type, &length, 834 this_attr->address); 835 836 if (ret < 0) 837 return ret; 838 switch (ret) { 839 case IIO_AVAIL_LIST: 840 return iio_format_avail_list(buf, vals, type, length); 841 case IIO_AVAIL_RANGE: 842 return iio_format_avail_range(buf, vals, type); 843 default: 844 return -EINVAL; 845 } 846 } 847 848 /** 849 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 850 * @str: The string to parse 851 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 852 * @integer: The integer part of the number 853 * @fract: The fractional part of the number 854 * @scale_db: True if this should parse as dB 855 * 856 * Returns 0 on success, or a negative error code if the string could not be 857 * parsed. 858 */ 859 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 860 int *integer, int *fract, bool scale_db) 861 { 862 int i = 0, f = 0; 863 bool integer_part = true, negative = false; 864 865 if (fract_mult == 0) { 866 *fract = 0; 867 868 return kstrtoint(str, 0, integer); 869 } 870 871 if (str[0] == '-') { 872 negative = true; 873 str++; 874 } else if (str[0] == '+') { 875 str++; 876 } 877 878 while (*str) { 879 if ('0' <= *str && *str <= '9') { 880 if (integer_part) { 881 i = i * 10 + *str - '0'; 882 } else { 883 f += fract_mult * (*str - '0'); 884 fract_mult /= 10; 885 } 886 } else if (*str == '\n') { 887 if (*(str + 1) == '\0') 888 break; 889 else 890 return -EINVAL; 891 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 892 /* Ignore the dB suffix */ 893 str += sizeof(" dB") - 1; 894 continue; 895 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 896 /* Ignore the dB suffix */ 897 str += sizeof("dB") - 1; 898 continue; 899 } else if (*str == '.' && integer_part) { 900 integer_part = false; 901 } else { 902 return -EINVAL; 903 } 904 str++; 905 } 906 907 if (negative) { 908 if (i) 909 i = -i; 910 else 911 f = -f; 912 } 913 914 *integer = i; 915 *fract = f; 916 917 return 0; 918 } 919 920 /** 921 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 922 * @str: The string to parse 923 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 924 * @integer: The integer part of the number 925 * @fract: The fractional part of the number 926 * 927 * Returns 0 on success, or a negative error code if the string could not be 928 * parsed. 929 */ 930 int iio_str_to_fixpoint(const char *str, int fract_mult, 931 int *integer, int *fract) 932 { 933 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 934 } 935 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 936 937 static ssize_t iio_write_channel_info(struct device *dev, 938 struct device_attribute *attr, 939 const char *buf, 940 size_t len) 941 { 942 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 943 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 944 int ret, fract_mult = 100000; 945 int integer, fract = 0; 946 bool is_char = false; 947 bool scale_db = false; 948 949 /* Assumes decimal - precision based on number of digits */ 950 if (!indio_dev->info->write_raw) 951 return -EINVAL; 952 953 if (indio_dev->info->write_raw_get_fmt) 954 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 955 this_attr->c, this_attr->address)) { 956 case IIO_VAL_INT: 957 fract_mult = 0; 958 break; 959 case IIO_VAL_INT_PLUS_MICRO_DB: 960 scale_db = true; 961 fallthrough; 962 case IIO_VAL_INT_PLUS_MICRO: 963 fract_mult = 100000; 964 break; 965 case IIO_VAL_INT_PLUS_NANO: 966 fract_mult = 100000000; 967 break; 968 case IIO_VAL_CHAR: 969 is_char = true; 970 break; 971 default: 972 return -EINVAL; 973 } 974 975 if (is_char) { 976 char ch; 977 978 if (sscanf(buf, "%c", &ch) != 1) 979 return -EINVAL; 980 integer = ch; 981 } else { 982 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 983 scale_db); 984 if (ret) 985 return ret; 986 } 987 988 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 989 integer, fract, this_attr->address); 990 if (ret) 991 return ret; 992 993 return len; 994 } 995 996 static 997 int __iio_device_attr_init(struct device_attribute *dev_attr, 998 const char *postfix, 999 struct iio_chan_spec const *chan, 1000 ssize_t (*readfunc)(struct device *dev, 1001 struct device_attribute *attr, 1002 char *buf), 1003 ssize_t (*writefunc)(struct device *dev, 1004 struct device_attribute *attr, 1005 const char *buf, 1006 size_t len), 1007 enum iio_shared_by shared_by) 1008 { 1009 int ret = 0; 1010 char *name = NULL; 1011 char *full_postfix; 1012 sysfs_attr_init(&dev_attr->attr); 1013 1014 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1015 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1016 if (chan->extend_name) 1017 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1018 iio_modifier_names[chan 1019 ->channel2], 1020 chan->extend_name, 1021 postfix); 1022 else 1023 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1024 iio_modifier_names[chan 1025 ->channel2], 1026 postfix); 1027 } else { 1028 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1029 full_postfix = kstrdup(postfix, GFP_KERNEL); 1030 else 1031 full_postfix = kasprintf(GFP_KERNEL, 1032 "%s_%s", 1033 chan->extend_name, 1034 postfix); 1035 } 1036 if (full_postfix == NULL) 1037 return -ENOMEM; 1038 1039 if (chan->differential) { /* Differential can not have modifier */ 1040 switch (shared_by) { 1041 case IIO_SHARED_BY_ALL: 1042 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1043 break; 1044 case IIO_SHARED_BY_DIR: 1045 name = kasprintf(GFP_KERNEL, "%s_%s", 1046 iio_direction[chan->output], 1047 full_postfix); 1048 break; 1049 case IIO_SHARED_BY_TYPE: 1050 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1051 iio_direction[chan->output], 1052 iio_chan_type_name_spec[chan->type], 1053 iio_chan_type_name_spec[chan->type], 1054 full_postfix); 1055 break; 1056 case IIO_SEPARATE: 1057 if (!chan->indexed) { 1058 WARN(1, "Differential channels must be indexed\n"); 1059 ret = -EINVAL; 1060 goto error_free_full_postfix; 1061 } 1062 name = kasprintf(GFP_KERNEL, 1063 "%s_%s%d-%s%d_%s", 1064 iio_direction[chan->output], 1065 iio_chan_type_name_spec[chan->type], 1066 chan->channel, 1067 iio_chan_type_name_spec[chan->type], 1068 chan->channel2, 1069 full_postfix); 1070 break; 1071 } 1072 } else { /* Single ended */ 1073 switch (shared_by) { 1074 case IIO_SHARED_BY_ALL: 1075 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1076 break; 1077 case IIO_SHARED_BY_DIR: 1078 name = kasprintf(GFP_KERNEL, "%s_%s", 1079 iio_direction[chan->output], 1080 full_postfix); 1081 break; 1082 case IIO_SHARED_BY_TYPE: 1083 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1084 iio_direction[chan->output], 1085 iio_chan_type_name_spec[chan->type], 1086 full_postfix); 1087 break; 1088 1089 case IIO_SEPARATE: 1090 if (chan->indexed) 1091 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1092 iio_direction[chan->output], 1093 iio_chan_type_name_spec[chan->type], 1094 chan->channel, 1095 full_postfix); 1096 else 1097 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1098 iio_direction[chan->output], 1099 iio_chan_type_name_spec[chan->type], 1100 full_postfix); 1101 break; 1102 } 1103 } 1104 if (name == NULL) { 1105 ret = -ENOMEM; 1106 goto error_free_full_postfix; 1107 } 1108 dev_attr->attr.name = name; 1109 1110 if (readfunc) { 1111 dev_attr->attr.mode |= S_IRUGO; 1112 dev_attr->show = readfunc; 1113 } 1114 1115 if (writefunc) { 1116 dev_attr->attr.mode |= S_IWUSR; 1117 dev_attr->store = writefunc; 1118 } 1119 1120 error_free_full_postfix: 1121 kfree(full_postfix); 1122 1123 return ret; 1124 } 1125 1126 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1127 { 1128 kfree(dev_attr->attr.name); 1129 } 1130 1131 int __iio_add_chan_devattr(const char *postfix, 1132 struct iio_chan_spec const *chan, 1133 ssize_t (*readfunc)(struct device *dev, 1134 struct device_attribute *attr, 1135 char *buf), 1136 ssize_t (*writefunc)(struct device *dev, 1137 struct device_attribute *attr, 1138 const char *buf, 1139 size_t len), 1140 u64 mask, 1141 enum iio_shared_by shared_by, 1142 struct device *dev, 1143 struct iio_buffer *buffer, 1144 struct list_head *attr_list) 1145 { 1146 int ret; 1147 struct iio_dev_attr *iio_attr, *t; 1148 1149 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1150 if (iio_attr == NULL) 1151 return -ENOMEM; 1152 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1153 postfix, chan, 1154 readfunc, writefunc, shared_by); 1155 if (ret) 1156 goto error_iio_dev_attr_free; 1157 iio_attr->c = chan; 1158 iio_attr->address = mask; 1159 iio_attr->buffer = buffer; 1160 list_for_each_entry(t, attr_list, l) 1161 if (strcmp(t->dev_attr.attr.name, 1162 iio_attr->dev_attr.attr.name) == 0) { 1163 if (shared_by == IIO_SEPARATE) 1164 dev_err(dev, "tried to double register : %s\n", 1165 t->dev_attr.attr.name); 1166 ret = -EBUSY; 1167 goto error_device_attr_deinit; 1168 } 1169 list_add(&iio_attr->l, attr_list); 1170 1171 return 0; 1172 1173 error_device_attr_deinit: 1174 __iio_device_attr_deinit(&iio_attr->dev_attr); 1175 error_iio_dev_attr_free: 1176 kfree(iio_attr); 1177 return ret; 1178 } 1179 1180 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1181 struct iio_chan_spec const *chan) 1182 { 1183 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1184 int ret; 1185 1186 if (!indio_dev->info->read_label) 1187 return 0; 1188 1189 ret = __iio_add_chan_devattr("label", 1190 chan, 1191 &iio_read_channel_label, 1192 NULL, 1193 0, 1194 IIO_SEPARATE, 1195 &indio_dev->dev, 1196 NULL, 1197 &iio_dev_opaque->channel_attr_list); 1198 if (ret < 0) 1199 return ret; 1200 1201 return 1; 1202 } 1203 1204 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1205 struct iio_chan_spec const *chan, 1206 enum iio_shared_by shared_by, 1207 const long *infomask) 1208 { 1209 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1210 int i, ret, attrcount = 0; 1211 1212 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1213 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1214 return -EINVAL; 1215 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1216 chan, 1217 &iio_read_channel_info, 1218 &iio_write_channel_info, 1219 i, 1220 shared_by, 1221 &indio_dev->dev, 1222 NULL, 1223 &iio_dev_opaque->channel_attr_list); 1224 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1225 continue; 1226 else if (ret < 0) 1227 return ret; 1228 attrcount++; 1229 } 1230 1231 return attrcount; 1232 } 1233 1234 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1235 struct iio_chan_spec const *chan, 1236 enum iio_shared_by shared_by, 1237 const long *infomask) 1238 { 1239 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1240 int i, ret, attrcount = 0; 1241 char *avail_postfix; 1242 1243 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1244 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1245 return -EINVAL; 1246 avail_postfix = kasprintf(GFP_KERNEL, 1247 "%s_available", 1248 iio_chan_info_postfix[i]); 1249 if (!avail_postfix) 1250 return -ENOMEM; 1251 1252 ret = __iio_add_chan_devattr(avail_postfix, 1253 chan, 1254 &iio_read_channel_info_avail, 1255 NULL, 1256 i, 1257 shared_by, 1258 &indio_dev->dev, 1259 NULL, 1260 &iio_dev_opaque->channel_attr_list); 1261 kfree(avail_postfix); 1262 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1263 continue; 1264 else if (ret < 0) 1265 return ret; 1266 attrcount++; 1267 } 1268 1269 return attrcount; 1270 } 1271 1272 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1273 struct iio_chan_spec const *chan) 1274 { 1275 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1276 int ret, attrcount = 0; 1277 const struct iio_chan_spec_ext_info *ext_info; 1278 1279 if (chan->channel < 0) 1280 return 0; 1281 ret = iio_device_add_info_mask_type(indio_dev, chan, 1282 IIO_SEPARATE, 1283 &chan->info_mask_separate); 1284 if (ret < 0) 1285 return ret; 1286 attrcount += ret; 1287 1288 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1289 IIO_SEPARATE, 1290 &chan-> 1291 info_mask_separate_available); 1292 if (ret < 0) 1293 return ret; 1294 attrcount += ret; 1295 1296 ret = iio_device_add_info_mask_type(indio_dev, chan, 1297 IIO_SHARED_BY_TYPE, 1298 &chan->info_mask_shared_by_type); 1299 if (ret < 0) 1300 return ret; 1301 attrcount += ret; 1302 1303 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1304 IIO_SHARED_BY_TYPE, 1305 &chan-> 1306 info_mask_shared_by_type_available); 1307 if (ret < 0) 1308 return ret; 1309 attrcount += ret; 1310 1311 ret = iio_device_add_info_mask_type(indio_dev, chan, 1312 IIO_SHARED_BY_DIR, 1313 &chan->info_mask_shared_by_dir); 1314 if (ret < 0) 1315 return ret; 1316 attrcount += ret; 1317 1318 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1319 IIO_SHARED_BY_DIR, 1320 &chan->info_mask_shared_by_dir_available); 1321 if (ret < 0) 1322 return ret; 1323 attrcount += ret; 1324 1325 ret = iio_device_add_info_mask_type(indio_dev, chan, 1326 IIO_SHARED_BY_ALL, 1327 &chan->info_mask_shared_by_all); 1328 if (ret < 0) 1329 return ret; 1330 attrcount += ret; 1331 1332 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1333 IIO_SHARED_BY_ALL, 1334 &chan->info_mask_shared_by_all_available); 1335 if (ret < 0) 1336 return ret; 1337 attrcount += ret; 1338 1339 ret = iio_device_add_channel_label(indio_dev, chan); 1340 if (ret < 0) 1341 return ret; 1342 attrcount += ret; 1343 1344 if (chan->ext_info) { 1345 unsigned int i = 0; 1346 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1347 ret = __iio_add_chan_devattr(ext_info->name, 1348 chan, 1349 ext_info->read ? 1350 &iio_read_channel_ext_info : NULL, 1351 ext_info->write ? 1352 &iio_write_channel_ext_info : NULL, 1353 i, 1354 ext_info->shared, 1355 &indio_dev->dev, 1356 NULL, 1357 &iio_dev_opaque->channel_attr_list); 1358 i++; 1359 if (ret == -EBUSY && ext_info->shared) 1360 continue; 1361 1362 if (ret) 1363 return ret; 1364 1365 attrcount++; 1366 } 1367 } 1368 1369 return attrcount; 1370 } 1371 1372 /** 1373 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1374 * @attr_list: List of IIO device attributes 1375 * 1376 * This function frees the memory allocated for each of the IIO device 1377 * attributes in the list. 1378 */ 1379 void iio_free_chan_devattr_list(struct list_head *attr_list) 1380 { 1381 struct iio_dev_attr *p, *n; 1382 1383 list_for_each_entry_safe(p, n, attr_list, l) { 1384 kfree_const(p->dev_attr.attr.name); 1385 list_del(&p->l); 1386 kfree(p); 1387 } 1388 } 1389 1390 static ssize_t iio_show_dev_name(struct device *dev, 1391 struct device_attribute *attr, 1392 char *buf) 1393 { 1394 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1395 return sysfs_emit(buf, "%s\n", indio_dev->name); 1396 } 1397 1398 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); 1399 1400 static ssize_t iio_show_dev_label(struct device *dev, 1401 struct device_attribute *attr, 1402 char *buf) 1403 { 1404 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1405 return sysfs_emit(buf, "%s\n", indio_dev->label); 1406 } 1407 1408 static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL); 1409 1410 static ssize_t iio_show_timestamp_clock(struct device *dev, 1411 struct device_attribute *attr, 1412 char *buf) 1413 { 1414 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1415 const clockid_t clk = iio_device_get_clock(indio_dev); 1416 const char *name; 1417 ssize_t sz; 1418 1419 switch (clk) { 1420 case CLOCK_REALTIME: 1421 name = "realtime\n"; 1422 sz = sizeof("realtime\n"); 1423 break; 1424 case CLOCK_MONOTONIC: 1425 name = "monotonic\n"; 1426 sz = sizeof("monotonic\n"); 1427 break; 1428 case CLOCK_MONOTONIC_RAW: 1429 name = "monotonic_raw\n"; 1430 sz = sizeof("monotonic_raw\n"); 1431 break; 1432 case CLOCK_REALTIME_COARSE: 1433 name = "realtime_coarse\n"; 1434 sz = sizeof("realtime_coarse\n"); 1435 break; 1436 case CLOCK_MONOTONIC_COARSE: 1437 name = "monotonic_coarse\n"; 1438 sz = sizeof("monotonic_coarse\n"); 1439 break; 1440 case CLOCK_BOOTTIME: 1441 name = "boottime\n"; 1442 sz = sizeof("boottime\n"); 1443 break; 1444 case CLOCK_TAI: 1445 name = "tai\n"; 1446 sz = sizeof("tai\n"); 1447 break; 1448 default: 1449 BUG(); 1450 } 1451 1452 memcpy(buf, name, sz); 1453 return sz; 1454 } 1455 1456 static ssize_t iio_store_timestamp_clock(struct device *dev, 1457 struct device_attribute *attr, 1458 const char *buf, size_t len) 1459 { 1460 clockid_t clk; 1461 int ret; 1462 1463 if (sysfs_streq(buf, "realtime")) 1464 clk = CLOCK_REALTIME; 1465 else if (sysfs_streq(buf, "monotonic")) 1466 clk = CLOCK_MONOTONIC; 1467 else if (sysfs_streq(buf, "monotonic_raw")) 1468 clk = CLOCK_MONOTONIC_RAW; 1469 else if (sysfs_streq(buf, "realtime_coarse")) 1470 clk = CLOCK_REALTIME_COARSE; 1471 else if (sysfs_streq(buf, "monotonic_coarse")) 1472 clk = CLOCK_MONOTONIC_COARSE; 1473 else if (sysfs_streq(buf, "boottime")) 1474 clk = CLOCK_BOOTTIME; 1475 else if (sysfs_streq(buf, "tai")) 1476 clk = CLOCK_TAI; 1477 else 1478 return -EINVAL; 1479 1480 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1481 if (ret) 1482 return ret; 1483 1484 return len; 1485 } 1486 1487 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1488 const struct attribute_group *group) 1489 { 1490 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1491 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1492 unsigned int cnt = iio_dev_opaque->groupcounter; 1493 1494 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL); 1495 if (!new) 1496 return -ENOMEM; 1497 1498 new[iio_dev_opaque->groupcounter++] = group; 1499 new[iio_dev_opaque->groupcounter] = NULL; 1500 1501 iio_dev_opaque->groups = new; 1502 1503 return 0; 1504 } 1505 1506 static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, 1507 iio_show_timestamp_clock, iio_store_timestamp_clock); 1508 1509 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1510 { 1511 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1512 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1513 struct iio_dev_attr *p; 1514 struct attribute **attr, *clk = NULL; 1515 1516 /* First count elements in any existing group */ 1517 if (indio_dev->info->attrs) { 1518 attr = indio_dev->info->attrs->attrs; 1519 while (*attr++ != NULL) 1520 attrcount_orig++; 1521 } 1522 attrcount = attrcount_orig; 1523 /* 1524 * New channel registration method - relies on the fact a group does 1525 * not need to be initialized if its name is NULL. 1526 */ 1527 if (indio_dev->channels) 1528 for (i = 0; i < indio_dev->num_channels; i++) { 1529 const struct iio_chan_spec *chan = 1530 &indio_dev->channels[i]; 1531 1532 if (chan->type == IIO_TIMESTAMP) 1533 clk = &dev_attr_current_timestamp_clock.attr; 1534 1535 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1536 if (ret < 0) 1537 goto error_clear_attrs; 1538 attrcount += ret; 1539 } 1540 1541 if (iio_dev_opaque->event_interface) 1542 clk = &dev_attr_current_timestamp_clock.attr; 1543 1544 if (indio_dev->name) 1545 attrcount++; 1546 if (indio_dev->label) 1547 attrcount++; 1548 if (clk) 1549 attrcount++; 1550 1551 iio_dev_opaque->chan_attr_group.attrs = 1552 kcalloc(attrcount + 1, 1553 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1554 GFP_KERNEL); 1555 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1556 ret = -ENOMEM; 1557 goto error_clear_attrs; 1558 } 1559 /* Copy across original attributes */ 1560 if (indio_dev->info->attrs) { 1561 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1562 indio_dev->info->attrs->attrs, 1563 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1564 *attrcount_orig); 1565 iio_dev_opaque->chan_attr_group.is_visible = 1566 indio_dev->info->attrs->is_visible; 1567 } 1568 attrn = attrcount_orig; 1569 /* Add all elements from the list. */ 1570 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1571 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1572 if (indio_dev->name) 1573 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1574 if (indio_dev->label) 1575 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1576 if (clk) 1577 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1578 1579 ret = iio_device_register_sysfs_group(indio_dev, 1580 &iio_dev_opaque->chan_attr_group); 1581 if (ret) 1582 goto error_clear_attrs; 1583 1584 return 0; 1585 1586 error_clear_attrs: 1587 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1588 1589 return ret; 1590 } 1591 1592 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1593 { 1594 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1595 1596 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1597 kfree(iio_dev_opaque->chan_attr_group.attrs); 1598 iio_dev_opaque->chan_attr_group.attrs = NULL; 1599 kfree(iio_dev_opaque->groups); 1600 } 1601 1602 static void iio_dev_release(struct device *device) 1603 { 1604 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1605 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1606 1607 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1608 iio_device_unregister_trigger_consumer(indio_dev); 1609 iio_device_unregister_eventset(indio_dev); 1610 iio_device_unregister_sysfs(indio_dev); 1611 1612 iio_device_detach_buffers(indio_dev); 1613 1614 ida_simple_remove(&iio_ida, iio_dev_opaque->id); 1615 kfree(iio_dev_opaque); 1616 } 1617 1618 struct device_type iio_device_type = { 1619 .name = "iio_device", 1620 .release = iio_dev_release, 1621 }; 1622 1623 /** 1624 * iio_device_alloc() - allocate an iio_dev from a driver 1625 * @parent: Parent device. 1626 * @sizeof_priv: Space to allocate for private structure. 1627 **/ 1628 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1629 { 1630 struct iio_dev_opaque *iio_dev_opaque; 1631 struct iio_dev *indio_dev; 1632 size_t alloc_size; 1633 1634 alloc_size = sizeof(struct iio_dev_opaque); 1635 if (sizeof_priv) { 1636 alloc_size = ALIGN(alloc_size, IIO_ALIGN); 1637 alloc_size += sizeof_priv; 1638 } 1639 1640 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1641 if (!iio_dev_opaque) 1642 return NULL; 1643 1644 indio_dev = &iio_dev_opaque->indio_dev; 1645 indio_dev->priv = (char *)iio_dev_opaque + 1646 ALIGN(sizeof(struct iio_dev_opaque), IIO_ALIGN); 1647 1648 indio_dev->dev.parent = parent; 1649 indio_dev->dev.type = &iio_device_type; 1650 indio_dev->dev.bus = &iio_bus_type; 1651 device_initialize(&indio_dev->dev); 1652 iio_device_set_drvdata(indio_dev, (void *)indio_dev); 1653 mutex_init(&indio_dev->mlock); 1654 mutex_init(&iio_dev_opaque->info_exist_lock); 1655 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1656 1657 iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 1658 if (iio_dev_opaque->id < 0) { 1659 /* cannot use a dev_err as the name isn't available */ 1660 pr_err("failed to get device id\n"); 1661 kfree(iio_dev_opaque); 1662 return NULL; 1663 } 1664 dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id); 1665 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1666 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1667 1668 return indio_dev; 1669 } 1670 EXPORT_SYMBOL(iio_device_alloc); 1671 1672 /** 1673 * iio_device_free() - free an iio_dev from a driver 1674 * @dev: the iio_dev associated with the device 1675 **/ 1676 void iio_device_free(struct iio_dev *dev) 1677 { 1678 if (dev) 1679 put_device(&dev->dev); 1680 } 1681 EXPORT_SYMBOL(iio_device_free); 1682 1683 static void devm_iio_device_release(void *iio_dev) 1684 { 1685 iio_device_free(iio_dev); 1686 } 1687 1688 /** 1689 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1690 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1691 * @sizeof_priv: Space to allocate for private structure. 1692 * 1693 * Managed iio_device_alloc. iio_dev allocated with this function is 1694 * automatically freed on driver detach. 1695 * 1696 * RETURNS: 1697 * Pointer to allocated iio_dev on success, NULL on failure. 1698 */ 1699 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1700 { 1701 struct iio_dev *iio_dev; 1702 int ret; 1703 1704 iio_dev = iio_device_alloc(parent, sizeof_priv); 1705 if (!iio_dev) 1706 return NULL; 1707 1708 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1709 iio_dev); 1710 if (ret) 1711 return NULL; 1712 1713 return iio_dev; 1714 } 1715 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1716 1717 /** 1718 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1719 * @inode: Inode structure for identifying the device in the file system 1720 * @filp: File structure for iio device used to keep and later access 1721 * private data 1722 * 1723 * Return: 0 on success or -EBUSY if the device is already opened 1724 **/ 1725 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1726 { 1727 struct iio_dev_opaque *iio_dev_opaque = 1728 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1729 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1730 struct iio_dev_buffer_pair *ib; 1731 1732 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1733 return -EBUSY; 1734 1735 iio_device_get(indio_dev); 1736 1737 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1738 if (!ib) { 1739 iio_device_put(indio_dev); 1740 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1741 return -ENOMEM; 1742 } 1743 1744 ib->indio_dev = indio_dev; 1745 ib->buffer = indio_dev->buffer; 1746 1747 filp->private_data = ib; 1748 1749 return 0; 1750 } 1751 1752 /** 1753 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1754 * @inode: Inode structure pointer for the char device 1755 * @filp: File structure pointer for the char device 1756 * 1757 * Return: 0 for successful release 1758 */ 1759 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1760 { 1761 struct iio_dev_buffer_pair *ib = filp->private_data; 1762 struct iio_dev_opaque *iio_dev_opaque = 1763 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1764 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1765 kfree(ib); 1766 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1767 iio_device_put(indio_dev); 1768 1769 return 0; 1770 } 1771 1772 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1773 struct iio_ioctl_handler *h) 1774 { 1775 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1776 1777 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1778 } 1779 1780 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1781 { 1782 list_del(&h->entry); 1783 } 1784 1785 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1786 { 1787 struct iio_dev_buffer_pair *ib = filp->private_data; 1788 struct iio_dev *indio_dev = ib->indio_dev; 1789 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1790 struct iio_ioctl_handler *h; 1791 int ret = -ENODEV; 1792 1793 mutex_lock(&iio_dev_opaque->info_exist_lock); 1794 1795 /** 1796 * The NULL check here is required to prevent crashing when a device 1797 * is being removed while userspace would still have open file handles 1798 * to try to access this device. 1799 */ 1800 if (!indio_dev->info) 1801 goto out_unlock; 1802 1803 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1804 ret = h->ioctl(indio_dev, filp, cmd, arg); 1805 if (ret != IIO_IOCTL_UNHANDLED) 1806 break; 1807 } 1808 1809 if (ret == IIO_IOCTL_UNHANDLED) 1810 ret = -ENODEV; 1811 1812 out_unlock: 1813 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1814 1815 return ret; 1816 } 1817 1818 static const struct file_operations iio_buffer_fileops = { 1819 .owner = THIS_MODULE, 1820 .llseek = noop_llseek, 1821 .read = iio_buffer_read_outer_addr, 1822 .poll = iio_buffer_poll_addr, 1823 .unlocked_ioctl = iio_ioctl, 1824 .compat_ioctl = compat_ptr_ioctl, 1825 .open = iio_chrdev_open, 1826 .release = iio_chrdev_release, 1827 }; 1828 1829 static const struct file_operations iio_event_fileops = { 1830 .owner = THIS_MODULE, 1831 .llseek = noop_llseek, 1832 .unlocked_ioctl = iio_ioctl, 1833 .compat_ioctl = compat_ptr_ioctl, 1834 .open = iio_chrdev_open, 1835 .release = iio_chrdev_release, 1836 }; 1837 1838 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1839 { 1840 int i, j; 1841 const struct iio_chan_spec *channels = indio_dev->channels; 1842 1843 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1844 return 0; 1845 1846 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1847 if (channels[i].scan_index < 0) 1848 continue; 1849 for (j = i + 1; j < indio_dev->num_channels; j++) 1850 if (channels[i].scan_index == channels[j].scan_index) { 1851 dev_err(&indio_dev->dev, 1852 "Duplicate scan index %d\n", 1853 channels[i].scan_index); 1854 return -EINVAL; 1855 } 1856 } 1857 1858 return 0; 1859 } 1860 1861 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1862 1863 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1864 { 1865 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1866 const char *label; 1867 int ret; 1868 1869 if (!indio_dev->info) 1870 return -EINVAL; 1871 1872 iio_dev_opaque->driver_module = this_mod; 1873 /* If the calling driver did not initialize of_node, do it here */ 1874 if (!indio_dev->dev.of_node && indio_dev->dev.parent) 1875 indio_dev->dev.of_node = indio_dev->dev.parent->of_node; 1876 1877 label = of_get_property(indio_dev->dev.of_node, "label", NULL); 1878 if (label) 1879 indio_dev->label = label; 1880 1881 ret = iio_check_unique_scan_index(indio_dev); 1882 if (ret < 0) 1883 return ret; 1884 1885 iio_device_register_debugfs(indio_dev); 1886 1887 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1888 if (ret) { 1889 dev_err(indio_dev->dev.parent, 1890 "Failed to create buffer sysfs interfaces\n"); 1891 goto error_unreg_debugfs; 1892 } 1893 1894 ret = iio_device_register_sysfs(indio_dev); 1895 if (ret) { 1896 dev_err(indio_dev->dev.parent, 1897 "Failed to register sysfs interfaces\n"); 1898 goto error_buffer_free_sysfs; 1899 } 1900 ret = iio_device_register_eventset(indio_dev); 1901 if (ret) { 1902 dev_err(indio_dev->dev.parent, 1903 "Failed to register event set\n"); 1904 goto error_free_sysfs; 1905 } 1906 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1907 iio_device_register_trigger_consumer(indio_dev); 1908 1909 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1910 indio_dev->setup_ops == NULL) 1911 indio_dev->setup_ops = &noop_ring_setup_ops; 1912 1913 if (iio_dev_opaque->attached_buffers_cnt) 1914 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1915 else if (iio_dev_opaque->event_interface) 1916 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1917 1918 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1919 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1920 iio_dev_opaque->chrdev.owner = this_mod; 1921 } 1922 1923 /* assign device groups now; they should be all registered now */ 1924 indio_dev->dev.groups = iio_dev_opaque->groups; 1925 1926 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1927 if (ret < 0) 1928 goto error_unreg_eventset; 1929 1930 return 0; 1931 1932 error_unreg_eventset: 1933 iio_device_unregister_eventset(indio_dev); 1934 error_free_sysfs: 1935 iio_device_unregister_sysfs(indio_dev); 1936 error_buffer_free_sysfs: 1937 iio_buffers_free_sysfs_and_mask(indio_dev); 1938 error_unreg_debugfs: 1939 iio_device_unregister_debugfs(indio_dev); 1940 return ret; 1941 } 1942 EXPORT_SYMBOL(__iio_device_register); 1943 1944 /** 1945 * iio_device_unregister() - unregister a device from the IIO subsystem 1946 * @indio_dev: Device structure representing the device. 1947 **/ 1948 void iio_device_unregister(struct iio_dev *indio_dev) 1949 { 1950 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1951 1952 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 1953 1954 mutex_lock(&iio_dev_opaque->info_exist_lock); 1955 1956 iio_device_unregister_debugfs(indio_dev); 1957 1958 iio_disable_all_buffers(indio_dev); 1959 1960 indio_dev->info = NULL; 1961 1962 iio_device_wakeup_eventset(indio_dev); 1963 iio_buffer_wakeup_poll(indio_dev); 1964 1965 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1966 1967 iio_buffers_free_sysfs_and_mask(indio_dev); 1968 } 1969 EXPORT_SYMBOL(iio_device_unregister); 1970 1971 static void devm_iio_device_unreg(void *indio_dev) 1972 { 1973 iio_device_unregister(indio_dev); 1974 } 1975 1976 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 1977 struct module *this_mod) 1978 { 1979 int ret; 1980 1981 ret = __iio_device_register(indio_dev, this_mod); 1982 if (ret) 1983 return ret; 1984 1985 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 1986 } 1987 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 1988 1989 /** 1990 * iio_device_claim_direct_mode - Keep device in direct mode 1991 * @indio_dev: the iio_dev associated with the device 1992 * 1993 * If the device is in direct mode it is guaranteed to stay 1994 * that way until iio_device_release_direct_mode() is called. 1995 * 1996 * Use with iio_device_release_direct_mode() 1997 * 1998 * Returns: 0 on success, -EBUSY on failure 1999 */ 2000 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2001 { 2002 mutex_lock(&indio_dev->mlock); 2003 2004 if (iio_buffer_enabled(indio_dev)) { 2005 mutex_unlock(&indio_dev->mlock); 2006 return -EBUSY; 2007 } 2008 return 0; 2009 } 2010 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2011 2012 /** 2013 * iio_device_release_direct_mode - releases claim on direct mode 2014 * @indio_dev: the iio_dev associated with the device 2015 * 2016 * Release the claim. Device is no longer guaranteed to stay 2017 * in direct mode. 2018 * 2019 * Use with iio_device_claim_direct_mode() 2020 */ 2021 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2022 { 2023 mutex_unlock(&indio_dev->mlock); 2024 } 2025 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2026 2027 subsys_initcall(iio_init); 2028 module_exit(iio_exit); 2029 2030 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2031 MODULE_DESCRIPTION("Industrial I/O core"); 2032 MODULE_LICENSE("GPL"); 2033