1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Based on elements of hwmon and input subsystems. 7 */ 8 9 #define pr_fmt(fmt) "iio-core: " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/idr.h> 14 #include <linux/kdev_t.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/property.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/cdev.h> 23 #include <linux/slab.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/debugfs.h> 26 #include <linux/mutex.h> 27 #include <linux/iio/iio.h> 28 #include <linux/iio/iio-opaque.h> 29 #include "iio_core.h" 30 #include "iio_core_trigger.h" 31 #include <linux/iio/sysfs.h> 32 #include <linux/iio/events.h> 33 #include <linux/iio/buffer.h> 34 #include <linux/iio/buffer_impl.h> 35 36 /* IDA to assign each registered device a unique id */ 37 static DEFINE_IDA(iio_ida); 38 39 static dev_t iio_devt; 40 41 #define IIO_DEV_MAX 256 42 struct bus_type iio_bus_type = { 43 .name = "iio", 44 }; 45 EXPORT_SYMBOL(iio_bus_type); 46 47 static struct dentry *iio_debugfs_dentry; 48 49 static const char * const iio_direction[] = { 50 [0] = "in", 51 [1] = "out", 52 }; 53 54 static const char * const iio_chan_type_name_spec[] = { 55 [IIO_VOLTAGE] = "voltage", 56 [IIO_CURRENT] = "current", 57 [IIO_POWER] = "power", 58 [IIO_ACCEL] = "accel", 59 [IIO_ANGL_VEL] = "anglvel", 60 [IIO_MAGN] = "magn", 61 [IIO_LIGHT] = "illuminance", 62 [IIO_INTENSITY] = "intensity", 63 [IIO_PROXIMITY] = "proximity", 64 [IIO_TEMP] = "temp", 65 [IIO_INCLI] = "incli", 66 [IIO_ROT] = "rot", 67 [IIO_ANGL] = "angl", 68 [IIO_TIMESTAMP] = "timestamp", 69 [IIO_CAPACITANCE] = "capacitance", 70 [IIO_ALTVOLTAGE] = "altvoltage", 71 [IIO_CCT] = "cct", 72 [IIO_PRESSURE] = "pressure", 73 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 74 [IIO_ACTIVITY] = "activity", 75 [IIO_STEPS] = "steps", 76 [IIO_ENERGY] = "energy", 77 [IIO_DISTANCE] = "distance", 78 [IIO_VELOCITY] = "velocity", 79 [IIO_CONCENTRATION] = "concentration", 80 [IIO_RESISTANCE] = "resistance", 81 [IIO_PH] = "ph", 82 [IIO_UVINDEX] = "uvindex", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 84 [IIO_COUNT] = "count", 85 [IIO_INDEX] = "index", 86 [IIO_GRAVITY] = "gravity", 87 [IIO_POSITIONRELATIVE] = "positionrelative", 88 [IIO_PHASE] = "phase", 89 [IIO_MASSCONCENTRATION] = "massconcentration", 90 }; 91 92 static const char * const iio_modifier_names[] = { 93 [IIO_MOD_X] = "x", 94 [IIO_MOD_Y] = "y", 95 [IIO_MOD_Z] = "z", 96 [IIO_MOD_X_AND_Y] = "x&y", 97 [IIO_MOD_X_AND_Z] = "x&z", 98 [IIO_MOD_Y_AND_Z] = "y&z", 99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 100 [IIO_MOD_X_OR_Y] = "x|y", 101 [IIO_MOD_X_OR_Z] = "x|z", 102 [IIO_MOD_Y_OR_Z] = "y|z", 103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 106 [IIO_MOD_LIGHT_BOTH] = "both", 107 [IIO_MOD_LIGHT_IR] = "ir", 108 [IIO_MOD_LIGHT_CLEAR] = "clear", 109 [IIO_MOD_LIGHT_RED] = "red", 110 [IIO_MOD_LIGHT_GREEN] = "green", 111 [IIO_MOD_LIGHT_BLUE] = "blue", 112 [IIO_MOD_LIGHT_UV] = "uv", 113 [IIO_MOD_LIGHT_DUV] = "duv", 114 [IIO_MOD_QUATERNION] = "quaternion", 115 [IIO_MOD_TEMP_AMBIENT] = "ambient", 116 [IIO_MOD_TEMP_OBJECT] = "object", 117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 118 [IIO_MOD_NORTH_TRUE] = "from_north_true", 119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 121 [IIO_MOD_RUNNING] = "running", 122 [IIO_MOD_JOGGING] = "jogging", 123 [IIO_MOD_WALKING] = "walking", 124 [IIO_MOD_STILL] = "still", 125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 126 [IIO_MOD_I] = "i", 127 [IIO_MOD_Q] = "q", 128 [IIO_MOD_CO2] = "co2", 129 [IIO_MOD_VOC] = "voc", 130 [IIO_MOD_PM1] = "pm1", 131 [IIO_MOD_PM2P5] = "pm2p5", 132 [IIO_MOD_PM4] = "pm4", 133 [IIO_MOD_PM10] = "pm10", 134 [IIO_MOD_ETHANOL] = "ethanol", 135 [IIO_MOD_H2] = "h2", 136 [IIO_MOD_O2] = "o2", 137 }; 138 139 /* relies on pairs of these shared then separate */ 140 static const char * const iio_chan_info_postfix[] = { 141 [IIO_CHAN_INFO_RAW] = "raw", 142 [IIO_CHAN_INFO_PROCESSED] = "input", 143 [IIO_CHAN_INFO_SCALE] = "scale", 144 [IIO_CHAN_INFO_OFFSET] = "offset", 145 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 146 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 147 [IIO_CHAN_INFO_PEAK] = "peak_raw", 148 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 149 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 150 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 151 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 152 = "filter_low_pass_3db_frequency", 153 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 154 = "filter_high_pass_3db_frequency", 155 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 156 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 157 [IIO_CHAN_INFO_PHASE] = "phase", 158 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 159 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 160 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 161 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 162 [IIO_CHAN_INFO_ENABLE] = "en", 163 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 164 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 165 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 166 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 167 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 168 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 169 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 170 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 171 }; 172 /** 173 * iio_device_id() - query the unique ID for the device 174 * @indio_dev: Device structure whose ID is being queried 175 * 176 * The IIO device ID is a unique index used for example for the naming 177 * of the character device /dev/iio\:device[ID] 178 */ 179 int iio_device_id(struct iio_dev *indio_dev) 180 { 181 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 182 183 return iio_dev_opaque->id; 184 } 185 EXPORT_SYMBOL_GPL(iio_device_id); 186 187 /** 188 * iio_buffer_enabled() - helper function to test if the buffer is enabled 189 * @indio_dev: IIO device structure for device 190 */ 191 bool iio_buffer_enabled(struct iio_dev *indio_dev) 192 { 193 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 194 195 return iio_dev_opaque->currentmode 196 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | 197 INDIO_BUFFER_SOFTWARE); 198 } 199 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 200 201 /** 202 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps 203 * @array: array of strings 204 * @n: number of strings in the array 205 * @str: string to match with 206 * 207 * Returns index of @str in the @array or -EINVAL, similar to match_string(). 208 * Uses sysfs_streq instead of strcmp for matching. 209 * 210 * This routine will look for a string in an array of strings. 211 * The search will continue until the element is found or the n-th element 212 * is reached, regardless of any NULL elements in the array. 213 */ 214 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n, 215 const char *str) 216 { 217 const char *item; 218 int index; 219 220 for (index = 0; index < n; index++) { 221 item = array[index]; 222 if (!item) 223 continue; 224 if (sysfs_streq(item, str)) 225 return index; 226 } 227 228 return -EINVAL; 229 } 230 231 #if defined(CONFIG_DEBUG_FS) 232 /* 233 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 234 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 235 */ 236 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 237 { 238 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 239 return iio_dev_opaque->debugfs_dentry; 240 } 241 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 242 #endif 243 244 /** 245 * iio_find_channel_from_si() - get channel from its scan index 246 * @indio_dev: device 247 * @si: scan index to match 248 */ 249 const struct iio_chan_spec 250 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 251 { 252 int i; 253 254 for (i = 0; i < indio_dev->num_channels; i++) 255 if (indio_dev->channels[i].scan_index == si) 256 return &indio_dev->channels[i]; 257 return NULL; 258 } 259 260 /* This turns up an awful lot */ 261 ssize_t iio_read_const_attr(struct device *dev, 262 struct device_attribute *attr, 263 char *buf) 264 { 265 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 266 } 267 EXPORT_SYMBOL(iio_read_const_attr); 268 269 /** 270 * iio_device_set_clock() - Set current timestamping clock for the device 271 * @indio_dev: IIO device structure containing the device 272 * @clock_id: timestamping clock posix identifier to set. 273 */ 274 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 275 { 276 int ret; 277 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 278 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 279 280 ret = mutex_lock_interruptible(&indio_dev->mlock); 281 if (ret) 282 return ret; 283 if ((ev_int && iio_event_enabled(ev_int)) || 284 iio_buffer_enabled(indio_dev)) { 285 mutex_unlock(&indio_dev->mlock); 286 return -EBUSY; 287 } 288 iio_dev_opaque->clock_id = clock_id; 289 mutex_unlock(&indio_dev->mlock); 290 291 return 0; 292 } 293 EXPORT_SYMBOL(iio_device_set_clock); 294 295 /** 296 * iio_device_get_clock() - Retrieve current timestamping clock for the device 297 * @indio_dev: IIO device structure containing the device 298 */ 299 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 300 { 301 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 302 303 return iio_dev_opaque->clock_id; 304 } 305 EXPORT_SYMBOL(iio_device_get_clock); 306 307 /** 308 * iio_get_time_ns() - utility function to get a time stamp for events etc 309 * @indio_dev: device 310 */ 311 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 312 { 313 struct timespec64 tp; 314 315 switch (iio_device_get_clock(indio_dev)) { 316 case CLOCK_REALTIME: 317 return ktime_get_real_ns(); 318 case CLOCK_MONOTONIC: 319 return ktime_get_ns(); 320 case CLOCK_MONOTONIC_RAW: 321 return ktime_get_raw_ns(); 322 case CLOCK_REALTIME_COARSE: 323 return ktime_to_ns(ktime_get_coarse_real()); 324 case CLOCK_MONOTONIC_COARSE: 325 ktime_get_coarse_ts64(&tp); 326 return timespec64_to_ns(&tp); 327 case CLOCK_BOOTTIME: 328 return ktime_get_boottime_ns(); 329 case CLOCK_TAI: 330 return ktime_get_clocktai_ns(); 331 default: 332 BUG(); 333 } 334 } 335 EXPORT_SYMBOL(iio_get_time_ns); 336 337 static int __init iio_init(void) 338 { 339 int ret; 340 341 /* Register sysfs bus */ 342 ret = bus_register(&iio_bus_type); 343 if (ret < 0) { 344 pr_err("could not register bus type\n"); 345 goto error_nothing; 346 } 347 348 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 349 if (ret < 0) { 350 pr_err("failed to allocate char dev region\n"); 351 goto error_unregister_bus_type; 352 } 353 354 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 355 356 return 0; 357 358 error_unregister_bus_type: 359 bus_unregister(&iio_bus_type); 360 error_nothing: 361 return ret; 362 } 363 364 static void __exit iio_exit(void) 365 { 366 if (iio_devt) 367 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 368 bus_unregister(&iio_bus_type); 369 debugfs_remove(iio_debugfs_dentry); 370 } 371 372 #if defined(CONFIG_DEBUG_FS) 373 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 374 size_t count, loff_t *ppos) 375 { 376 struct iio_dev *indio_dev = file->private_data; 377 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 378 unsigned int val = 0; 379 int ret; 380 381 if (*ppos > 0) 382 return simple_read_from_buffer(userbuf, count, ppos, 383 iio_dev_opaque->read_buf, 384 iio_dev_opaque->read_buf_len); 385 386 ret = indio_dev->info->debugfs_reg_access(indio_dev, 387 iio_dev_opaque->cached_reg_addr, 388 0, &val); 389 if (ret) { 390 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 391 return ret; 392 } 393 394 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 395 sizeof(iio_dev_opaque->read_buf), 396 "0x%X\n", val); 397 398 return simple_read_from_buffer(userbuf, count, ppos, 399 iio_dev_opaque->read_buf, 400 iio_dev_opaque->read_buf_len); 401 } 402 403 static ssize_t iio_debugfs_write_reg(struct file *file, 404 const char __user *userbuf, size_t count, loff_t *ppos) 405 { 406 struct iio_dev *indio_dev = file->private_data; 407 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 408 unsigned int reg, val; 409 char buf[80]; 410 int ret; 411 412 count = min_t(size_t, count, (sizeof(buf)-1)); 413 if (copy_from_user(buf, userbuf, count)) 414 return -EFAULT; 415 416 buf[count] = 0; 417 418 ret = sscanf(buf, "%i %i", ®, &val); 419 420 switch (ret) { 421 case 1: 422 iio_dev_opaque->cached_reg_addr = reg; 423 break; 424 case 2: 425 iio_dev_opaque->cached_reg_addr = reg; 426 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 427 val, NULL); 428 if (ret) { 429 dev_err(indio_dev->dev.parent, "%s: write failed\n", 430 __func__); 431 return ret; 432 } 433 break; 434 default: 435 return -EINVAL; 436 } 437 438 return count; 439 } 440 441 static const struct file_operations iio_debugfs_reg_fops = { 442 .open = simple_open, 443 .read = iio_debugfs_read_reg, 444 .write = iio_debugfs_write_reg, 445 }; 446 447 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 448 { 449 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 450 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 451 } 452 453 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 454 { 455 struct iio_dev_opaque *iio_dev_opaque; 456 457 if (indio_dev->info->debugfs_reg_access == NULL) 458 return; 459 460 if (!iio_debugfs_dentry) 461 return; 462 463 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 464 465 iio_dev_opaque->debugfs_dentry = 466 debugfs_create_dir(dev_name(&indio_dev->dev), 467 iio_debugfs_dentry); 468 469 debugfs_create_file("direct_reg_access", 0644, 470 iio_dev_opaque->debugfs_dentry, indio_dev, 471 &iio_debugfs_reg_fops); 472 } 473 #else 474 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 475 { 476 } 477 478 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 479 { 480 } 481 #endif /* CONFIG_DEBUG_FS */ 482 483 static ssize_t iio_read_channel_ext_info(struct device *dev, 484 struct device_attribute *attr, 485 char *buf) 486 { 487 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 488 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 489 const struct iio_chan_spec_ext_info *ext_info; 490 491 ext_info = &this_attr->c->ext_info[this_attr->address]; 492 493 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 494 } 495 496 static ssize_t iio_write_channel_ext_info(struct device *dev, 497 struct device_attribute *attr, 498 const char *buf, 499 size_t len) 500 { 501 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 502 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 503 const struct iio_chan_spec_ext_info *ext_info; 504 505 ext_info = &this_attr->c->ext_info[this_attr->address]; 506 507 return ext_info->write(indio_dev, ext_info->private, 508 this_attr->c, buf, len); 509 } 510 511 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 512 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 513 { 514 const struct iio_enum *e = (const struct iio_enum *)priv; 515 unsigned int i; 516 size_t len = 0; 517 518 if (!e->num_items) 519 return 0; 520 521 for (i = 0; i < e->num_items; ++i) { 522 if (!e->items[i]) 523 continue; 524 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 525 } 526 527 /* replace last space with a newline */ 528 buf[len - 1] = '\n'; 529 530 return len; 531 } 532 EXPORT_SYMBOL_GPL(iio_enum_available_read); 533 534 ssize_t iio_enum_read(struct iio_dev *indio_dev, 535 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 536 { 537 const struct iio_enum *e = (const struct iio_enum *)priv; 538 int i; 539 540 if (!e->get) 541 return -EINVAL; 542 543 i = e->get(indio_dev, chan); 544 if (i < 0) 545 return i; 546 else if (i >= e->num_items || !e->items[i]) 547 return -EINVAL; 548 549 return sysfs_emit(buf, "%s\n", e->items[i]); 550 } 551 EXPORT_SYMBOL_GPL(iio_enum_read); 552 553 ssize_t iio_enum_write(struct iio_dev *indio_dev, 554 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 555 size_t len) 556 { 557 const struct iio_enum *e = (const struct iio_enum *)priv; 558 int ret; 559 560 if (!e->set) 561 return -EINVAL; 562 563 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf); 564 if (ret < 0) 565 return ret; 566 567 ret = e->set(indio_dev, chan, ret); 568 return ret ? ret : len; 569 } 570 EXPORT_SYMBOL_GPL(iio_enum_write); 571 572 static const struct iio_mount_matrix iio_mount_idmatrix = { 573 .rotation = { 574 "1", "0", "0", 575 "0", "1", "0", 576 "0", "0", "1" 577 } 578 }; 579 580 static int iio_setup_mount_idmatrix(const struct device *dev, 581 struct iio_mount_matrix *matrix) 582 { 583 *matrix = iio_mount_idmatrix; 584 dev_info(dev, "mounting matrix not found: using identity...\n"); 585 return 0; 586 } 587 588 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 589 const struct iio_chan_spec *chan, char *buf) 590 { 591 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 592 priv)(indio_dev, chan); 593 594 if (IS_ERR(mtx)) 595 return PTR_ERR(mtx); 596 597 if (!mtx) 598 mtx = &iio_mount_idmatrix; 599 600 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 601 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 602 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 603 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 604 } 605 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 606 607 /** 608 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 609 * device "mount-matrix" property 610 * @dev: device the mounting matrix property is assigned to 611 * @matrix: where to store retrieved matrix 612 * 613 * If device is assigned no mounting matrix property, a default 3x3 identity 614 * matrix will be filled in. 615 * 616 * Return: 0 if success, or a negative error code on failure. 617 */ 618 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 619 { 620 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 621 int err; 622 623 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 624 if (err == len) 625 return 0; 626 627 if (err >= 0) 628 /* Invalid number of matrix entries. */ 629 return -EINVAL; 630 631 if (err != -EINVAL) 632 /* Invalid matrix declaration format. */ 633 return err; 634 635 /* Matrix was not declared at all: fallback to identity. */ 636 return iio_setup_mount_idmatrix(dev, matrix); 637 } 638 EXPORT_SYMBOL(iio_read_mount_matrix); 639 640 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 641 int size, const int *vals) 642 { 643 int tmp0, tmp1; 644 s64 tmp2; 645 bool scale_db = false; 646 647 switch (type) { 648 case IIO_VAL_INT: 649 return sysfs_emit_at(buf, offset, "%d", vals[0]); 650 case IIO_VAL_INT_PLUS_MICRO_DB: 651 scale_db = true; 652 fallthrough; 653 case IIO_VAL_INT_PLUS_MICRO: 654 if (vals[1] < 0) 655 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 656 abs(vals[0]), -vals[1], 657 scale_db ? " dB" : ""); 658 else 659 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 660 vals[1], scale_db ? " dB" : ""); 661 case IIO_VAL_INT_PLUS_NANO: 662 if (vals[1] < 0) 663 return sysfs_emit_at(buf, offset, "-%d.%09u", 664 abs(vals[0]), -vals[1]); 665 else 666 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 667 vals[1]); 668 case IIO_VAL_FRACTIONAL: 669 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 670 tmp1 = vals[1]; 671 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 672 if ((tmp2 < 0) && (tmp0 == 0)) 673 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 674 else 675 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 676 abs(tmp1)); 677 case IIO_VAL_FRACTIONAL_LOG2: 678 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 679 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 680 if (tmp0 == 0 && tmp2 < 0) 681 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 682 else 683 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 684 abs(tmp1)); 685 case IIO_VAL_INT_MULTIPLE: 686 { 687 int i; 688 int l = 0; 689 690 for (i = 0; i < size; ++i) 691 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 692 return l; 693 } 694 case IIO_VAL_CHAR: 695 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 696 case IIO_VAL_INT_64: 697 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 698 return sysfs_emit_at(buf, offset, "%lld", tmp2); 699 default: 700 return 0; 701 } 702 } 703 704 /** 705 * iio_format_value() - Formats a IIO value into its string representation 706 * @buf: The buffer to which the formatted value gets written 707 * which is assumed to be big enough (i.e. PAGE_SIZE). 708 * @type: One of the IIO_VAL_* constants. This decides how the val 709 * and val2 parameters are formatted. 710 * @size: Number of IIO value entries contained in vals 711 * @vals: Pointer to the values, exact meaning depends on the 712 * type parameter. 713 * 714 * Return: 0 by default, a negative number on failure or the 715 * total number of characters written for a type that belongs 716 * to the IIO_VAL_* constant. 717 */ 718 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 719 { 720 ssize_t len; 721 722 len = __iio_format_value(buf, 0, type, size, vals); 723 if (len >= PAGE_SIZE - 1) 724 return -EFBIG; 725 726 return len + sysfs_emit_at(buf, len, "\n"); 727 } 728 EXPORT_SYMBOL_GPL(iio_format_value); 729 730 static ssize_t iio_read_channel_label(struct device *dev, 731 struct device_attribute *attr, 732 char *buf) 733 { 734 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 735 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 736 737 if (indio_dev->info->read_label) 738 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 739 740 if (this_attr->c->extend_name) 741 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 742 743 return -EINVAL; 744 } 745 746 static ssize_t iio_read_channel_info(struct device *dev, 747 struct device_attribute *attr, 748 char *buf) 749 { 750 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 751 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 752 int vals[INDIO_MAX_RAW_ELEMENTS]; 753 int ret; 754 int val_len = 2; 755 756 if (indio_dev->info->read_raw_multi) 757 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 758 INDIO_MAX_RAW_ELEMENTS, 759 vals, &val_len, 760 this_attr->address); 761 else 762 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 763 &vals[0], &vals[1], this_attr->address); 764 765 if (ret < 0) 766 return ret; 767 768 return iio_format_value(buf, ret, val_len, vals); 769 } 770 771 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 772 const char *prefix, const char *suffix) 773 { 774 ssize_t len; 775 int stride; 776 int i; 777 778 switch (type) { 779 case IIO_VAL_INT: 780 stride = 1; 781 break; 782 default: 783 stride = 2; 784 break; 785 } 786 787 len = sysfs_emit(buf, prefix); 788 789 for (i = 0; i <= length - stride; i += stride) { 790 if (i != 0) { 791 len += sysfs_emit_at(buf, len, " "); 792 if (len >= PAGE_SIZE) 793 return -EFBIG; 794 } 795 796 len += __iio_format_value(buf, len, type, stride, &vals[i]); 797 if (len >= PAGE_SIZE) 798 return -EFBIG; 799 } 800 801 len += sysfs_emit_at(buf, len, "%s\n", suffix); 802 803 return len; 804 } 805 806 static ssize_t iio_format_avail_list(char *buf, const int *vals, 807 int type, int length) 808 { 809 810 return iio_format_list(buf, vals, type, length, "", ""); 811 } 812 813 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 814 { 815 return iio_format_list(buf, vals, type, 3, "[", "]"); 816 } 817 818 static ssize_t iio_read_channel_info_avail(struct device *dev, 819 struct device_attribute *attr, 820 char *buf) 821 { 822 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 823 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 824 const int *vals; 825 int ret; 826 int length; 827 int type; 828 829 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 830 &vals, &type, &length, 831 this_attr->address); 832 833 if (ret < 0) 834 return ret; 835 switch (ret) { 836 case IIO_AVAIL_LIST: 837 return iio_format_avail_list(buf, vals, type, length); 838 case IIO_AVAIL_RANGE: 839 return iio_format_avail_range(buf, vals, type); 840 default: 841 return -EINVAL; 842 } 843 } 844 845 /** 846 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 847 * @str: The string to parse 848 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 849 * @integer: The integer part of the number 850 * @fract: The fractional part of the number 851 * @scale_db: True if this should parse as dB 852 * 853 * Returns 0 on success, or a negative error code if the string could not be 854 * parsed. 855 */ 856 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 857 int *integer, int *fract, bool scale_db) 858 { 859 int i = 0, f = 0; 860 bool integer_part = true, negative = false; 861 862 if (fract_mult == 0) { 863 *fract = 0; 864 865 return kstrtoint(str, 0, integer); 866 } 867 868 if (str[0] == '-') { 869 negative = true; 870 str++; 871 } else if (str[0] == '+') { 872 str++; 873 } 874 875 while (*str) { 876 if ('0' <= *str && *str <= '9') { 877 if (integer_part) { 878 i = i * 10 + *str - '0'; 879 } else { 880 f += fract_mult * (*str - '0'); 881 fract_mult /= 10; 882 } 883 } else if (*str == '\n') { 884 if (*(str + 1) == '\0') 885 break; 886 return -EINVAL; 887 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 888 /* Ignore the dB suffix */ 889 str += sizeof(" dB") - 1; 890 continue; 891 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 892 /* Ignore the dB suffix */ 893 str += sizeof("dB") - 1; 894 continue; 895 } else if (*str == '.' && integer_part) { 896 integer_part = false; 897 } else { 898 return -EINVAL; 899 } 900 str++; 901 } 902 903 if (negative) { 904 if (i) 905 i = -i; 906 else 907 f = -f; 908 } 909 910 *integer = i; 911 *fract = f; 912 913 return 0; 914 } 915 916 /** 917 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 918 * @str: The string to parse 919 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 920 * @integer: The integer part of the number 921 * @fract: The fractional part of the number 922 * 923 * Returns 0 on success, or a negative error code if the string could not be 924 * parsed. 925 */ 926 int iio_str_to_fixpoint(const char *str, int fract_mult, 927 int *integer, int *fract) 928 { 929 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 930 } 931 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 932 933 static ssize_t iio_write_channel_info(struct device *dev, 934 struct device_attribute *attr, 935 const char *buf, 936 size_t len) 937 { 938 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 939 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 940 int ret, fract_mult = 100000; 941 int integer, fract = 0; 942 bool is_char = false; 943 bool scale_db = false; 944 945 /* Assumes decimal - precision based on number of digits */ 946 if (!indio_dev->info->write_raw) 947 return -EINVAL; 948 949 if (indio_dev->info->write_raw_get_fmt) 950 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 951 this_attr->c, this_attr->address)) { 952 case IIO_VAL_INT: 953 fract_mult = 0; 954 break; 955 case IIO_VAL_INT_PLUS_MICRO_DB: 956 scale_db = true; 957 fallthrough; 958 case IIO_VAL_INT_PLUS_MICRO: 959 fract_mult = 100000; 960 break; 961 case IIO_VAL_INT_PLUS_NANO: 962 fract_mult = 100000000; 963 break; 964 case IIO_VAL_CHAR: 965 is_char = true; 966 break; 967 default: 968 return -EINVAL; 969 } 970 971 if (is_char) { 972 char ch; 973 974 if (sscanf(buf, "%c", &ch) != 1) 975 return -EINVAL; 976 integer = ch; 977 } else { 978 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 979 scale_db); 980 if (ret) 981 return ret; 982 } 983 984 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 985 integer, fract, this_attr->address); 986 if (ret) 987 return ret; 988 989 return len; 990 } 991 992 static 993 int __iio_device_attr_init(struct device_attribute *dev_attr, 994 const char *postfix, 995 struct iio_chan_spec const *chan, 996 ssize_t (*readfunc)(struct device *dev, 997 struct device_attribute *attr, 998 char *buf), 999 ssize_t (*writefunc)(struct device *dev, 1000 struct device_attribute *attr, 1001 const char *buf, 1002 size_t len), 1003 enum iio_shared_by shared_by) 1004 { 1005 int ret = 0; 1006 char *name = NULL; 1007 char *full_postfix; 1008 sysfs_attr_init(&dev_attr->attr); 1009 1010 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1011 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1012 if (chan->extend_name) 1013 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1014 iio_modifier_names[chan 1015 ->channel2], 1016 chan->extend_name, 1017 postfix); 1018 else 1019 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1020 iio_modifier_names[chan 1021 ->channel2], 1022 postfix); 1023 } else { 1024 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1025 full_postfix = kstrdup(postfix, GFP_KERNEL); 1026 else 1027 full_postfix = kasprintf(GFP_KERNEL, 1028 "%s_%s", 1029 chan->extend_name, 1030 postfix); 1031 } 1032 if (full_postfix == NULL) 1033 return -ENOMEM; 1034 1035 if (chan->differential) { /* Differential can not have modifier */ 1036 switch (shared_by) { 1037 case IIO_SHARED_BY_ALL: 1038 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1039 break; 1040 case IIO_SHARED_BY_DIR: 1041 name = kasprintf(GFP_KERNEL, "%s_%s", 1042 iio_direction[chan->output], 1043 full_postfix); 1044 break; 1045 case IIO_SHARED_BY_TYPE: 1046 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1047 iio_direction[chan->output], 1048 iio_chan_type_name_spec[chan->type], 1049 iio_chan_type_name_spec[chan->type], 1050 full_postfix); 1051 break; 1052 case IIO_SEPARATE: 1053 if (!chan->indexed) { 1054 WARN(1, "Differential channels must be indexed\n"); 1055 ret = -EINVAL; 1056 goto error_free_full_postfix; 1057 } 1058 name = kasprintf(GFP_KERNEL, 1059 "%s_%s%d-%s%d_%s", 1060 iio_direction[chan->output], 1061 iio_chan_type_name_spec[chan->type], 1062 chan->channel, 1063 iio_chan_type_name_spec[chan->type], 1064 chan->channel2, 1065 full_postfix); 1066 break; 1067 } 1068 } else { /* Single ended */ 1069 switch (shared_by) { 1070 case IIO_SHARED_BY_ALL: 1071 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1072 break; 1073 case IIO_SHARED_BY_DIR: 1074 name = kasprintf(GFP_KERNEL, "%s_%s", 1075 iio_direction[chan->output], 1076 full_postfix); 1077 break; 1078 case IIO_SHARED_BY_TYPE: 1079 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1080 iio_direction[chan->output], 1081 iio_chan_type_name_spec[chan->type], 1082 full_postfix); 1083 break; 1084 1085 case IIO_SEPARATE: 1086 if (chan->indexed) 1087 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1088 iio_direction[chan->output], 1089 iio_chan_type_name_spec[chan->type], 1090 chan->channel, 1091 full_postfix); 1092 else 1093 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1094 iio_direction[chan->output], 1095 iio_chan_type_name_spec[chan->type], 1096 full_postfix); 1097 break; 1098 } 1099 } 1100 if (name == NULL) { 1101 ret = -ENOMEM; 1102 goto error_free_full_postfix; 1103 } 1104 dev_attr->attr.name = name; 1105 1106 if (readfunc) { 1107 dev_attr->attr.mode |= 0444; 1108 dev_attr->show = readfunc; 1109 } 1110 1111 if (writefunc) { 1112 dev_attr->attr.mode |= 0200; 1113 dev_attr->store = writefunc; 1114 } 1115 1116 error_free_full_postfix: 1117 kfree(full_postfix); 1118 1119 return ret; 1120 } 1121 1122 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1123 { 1124 kfree(dev_attr->attr.name); 1125 } 1126 1127 int __iio_add_chan_devattr(const char *postfix, 1128 struct iio_chan_spec const *chan, 1129 ssize_t (*readfunc)(struct device *dev, 1130 struct device_attribute *attr, 1131 char *buf), 1132 ssize_t (*writefunc)(struct device *dev, 1133 struct device_attribute *attr, 1134 const char *buf, 1135 size_t len), 1136 u64 mask, 1137 enum iio_shared_by shared_by, 1138 struct device *dev, 1139 struct iio_buffer *buffer, 1140 struct list_head *attr_list) 1141 { 1142 int ret; 1143 struct iio_dev_attr *iio_attr, *t; 1144 1145 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1146 if (iio_attr == NULL) 1147 return -ENOMEM; 1148 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1149 postfix, chan, 1150 readfunc, writefunc, shared_by); 1151 if (ret) 1152 goto error_iio_dev_attr_free; 1153 iio_attr->c = chan; 1154 iio_attr->address = mask; 1155 iio_attr->buffer = buffer; 1156 list_for_each_entry(t, attr_list, l) 1157 if (strcmp(t->dev_attr.attr.name, 1158 iio_attr->dev_attr.attr.name) == 0) { 1159 if (shared_by == IIO_SEPARATE) 1160 dev_err(dev, "tried to double register : %s\n", 1161 t->dev_attr.attr.name); 1162 ret = -EBUSY; 1163 goto error_device_attr_deinit; 1164 } 1165 list_add(&iio_attr->l, attr_list); 1166 1167 return 0; 1168 1169 error_device_attr_deinit: 1170 __iio_device_attr_deinit(&iio_attr->dev_attr); 1171 error_iio_dev_attr_free: 1172 kfree(iio_attr); 1173 return ret; 1174 } 1175 1176 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1177 struct iio_chan_spec const *chan) 1178 { 1179 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1180 int ret; 1181 1182 if (!indio_dev->info->read_label && !chan->extend_name) 1183 return 0; 1184 1185 ret = __iio_add_chan_devattr("label", 1186 chan, 1187 &iio_read_channel_label, 1188 NULL, 1189 0, 1190 IIO_SEPARATE, 1191 &indio_dev->dev, 1192 NULL, 1193 &iio_dev_opaque->channel_attr_list); 1194 if (ret < 0) 1195 return ret; 1196 1197 return 1; 1198 } 1199 1200 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1201 struct iio_chan_spec const *chan, 1202 enum iio_shared_by shared_by, 1203 const long *infomask) 1204 { 1205 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1206 int i, ret, attrcount = 0; 1207 1208 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1209 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1210 return -EINVAL; 1211 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1212 chan, 1213 &iio_read_channel_info, 1214 &iio_write_channel_info, 1215 i, 1216 shared_by, 1217 &indio_dev->dev, 1218 NULL, 1219 &iio_dev_opaque->channel_attr_list); 1220 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1221 continue; 1222 else if (ret < 0) 1223 return ret; 1224 attrcount++; 1225 } 1226 1227 return attrcount; 1228 } 1229 1230 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1231 struct iio_chan_spec const *chan, 1232 enum iio_shared_by shared_by, 1233 const long *infomask) 1234 { 1235 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1236 int i, ret, attrcount = 0; 1237 char *avail_postfix; 1238 1239 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1240 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1241 return -EINVAL; 1242 avail_postfix = kasprintf(GFP_KERNEL, 1243 "%s_available", 1244 iio_chan_info_postfix[i]); 1245 if (!avail_postfix) 1246 return -ENOMEM; 1247 1248 ret = __iio_add_chan_devattr(avail_postfix, 1249 chan, 1250 &iio_read_channel_info_avail, 1251 NULL, 1252 i, 1253 shared_by, 1254 &indio_dev->dev, 1255 NULL, 1256 &iio_dev_opaque->channel_attr_list); 1257 kfree(avail_postfix); 1258 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1259 continue; 1260 else if (ret < 0) 1261 return ret; 1262 attrcount++; 1263 } 1264 1265 return attrcount; 1266 } 1267 1268 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1269 struct iio_chan_spec const *chan) 1270 { 1271 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1272 int ret, attrcount = 0; 1273 const struct iio_chan_spec_ext_info *ext_info; 1274 1275 if (chan->channel < 0) 1276 return 0; 1277 ret = iio_device_add_info_mask_type(indio_dev, chan, 1278 IIO_SEPARATE, 1279 &chan->info_mask_separate); 1280 if (ret < 0) 1281 return ret; 1282 attrcount += ret; 1283 1284 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1285 IIO_SEPARATE, 1286 &chan-> 1287 info_mask_separate_available); 1288 if (ret < 0) 1289 return ret; 1290 attrcount += ret; 1291 1292 ret = iio_device_add_info_mask_type(indio_dev, chan, 1293 IIO_SHARED_BY_TYPE, 1294 &chan->info_mask_shared_by_type); 1295 if (ret < 0) 1296 return ret; 1297 attrcount += ret; 1298 1299 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1300 IIO_SHARED_BY_TYPE, 1301 &chan-> 1302 info_mask_shared_by_type_available); 1303 if (ret < 0) 1304 return ret; 1305 attrcount += ret; 1306 1307 ret = iio_device_add_info_mask_type(indio_dev, chan, 1308 IIO_SHARED_BY_DIR, 1309 &chan->info_mask_shared_by_dir); 1310 if (ret < 0) 1311 return ret; 1312 attrcount += ret; 1313 1314 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1315 IIO_SHARED_BY_DIR, 1316 &chan->info_mask_shared_by_dir_available); 1317 if (ret < 0) 1318 return ret; 1319 attrcount += ret; 1320 1321 ret = iio_device_add_info_mask_type(indio_dev, chan, 1322 IIO_SHARED_BY_ALL, 1323 &chan->info_mask_shared_by_all); 1324 if (ret < 0) 1325 return ret; 1326 attrcount += ret; 1327 1328 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1329 IIO_SHARED_BY_ALL, 1330 &chan->info_mask_shared_by_all_available); 1331 if (ret < 0) 1332 return ret; 1333 attrcount += ret; 1334 1335 ret = iio_device_add_channel_label(indio_dev, chan); 1336 if (ret < 0) 1337 return ret; 1338 attrcount += ret; 1339 1340 if (chan->ext_info) { 1341 unsigned int i = 0; 1342 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1343 ret = __iio_add_chan_devattr(ext_info->name, 1344 chan, 1345 ext_info->read ? 1346 &iio_read_channel_ext_info : NULL, 1347 ext_info->write ? 1348 &iio_write_channel_ext_info : NULL, 1349 i, 1350 ext_info->shared, 1351 &indio_dev->dev, 1352 NULL, 1353 &iio_dev_opaque->channel_attr_list); 1354 i++; 1355 if (ret == -EBUSY && ext_info->shared) 1356 continue; 1357 1358 if (ret) 1359 return ret; 1360 1361 attrcount++; 1362 } 1363 } 1364 1365 return attrcount; 1366 } 1367 1368 /** 1369 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1370 * @attr_list: List of IIO device attributes 1371 * 1372 * This function frees the memory allocated for each of the IIO device 1373 * attributes in the list. 1374 */ 1375 void iio_free_chan_devattr_list(struct list_head *attr_list) 1376 { 1377 struct iio_dev_attr *p, *n; 1378 1379 list_for_each_entry_safe(p, n, attr_list, l) { 1380 kfree_const(p->dev_attr.attr.name); 1381 list_del(&p->l); 1382 kfree(p); 1383 } 1384 } 1385 1386 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1387 char *buf) 1388 { 1389 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1390 return sysfs_emit(buf, "%s\n", indio_dev->name); 1391 } 1392 1393 static DEVICE_ATTR_RO(name); 1394 1395 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1396 char *buf) 1397 { 1398 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1399 return sysfs_emit(buf, "%s\n", indio_dev->label); 1400 } 1401 1402 static DEVICE_ATTR_RO(label); 1403 1404 static ssize_t current_timestamp_clock_show(struct device *dev, 1405 struct device_attribute *attr, 1406 char *buf) 1407 { 1408 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1409 const clockid_t clk = iio_device_get_clock(indio_dev); 1410 const char *name; 1411 ssize_t sz; 1412 1413 switch (clk) { 1414 case CLOCK_REALTIME: 1415 name = "realtime\n"; 1416 sz = sizeof("realtime\n"); 1417 break; 1418 case CLOCK_MONOTONIC: 1419 name = "monotonic\n"; 1420 sz = sizeof("monotonic\n"); 1421 break; 1422 case CLOCK_MONOTONIC_RAW: 1423 name = "monotonic_raw\n"; 1424 sz = sizeof("monotonic_raw\n"); 1425 break; 1426 case CLOCK_REALTIME_COARSE: 1427 name = "realtime_coarse\n"; 1428 sz = sizeof("realtime_coarse\n"); 1429 break; 1430 case CLOCK_MONOTONIC_COARSE: 1431 name = "monotonic_coarse\n"; 1432 sz = sizeof("monotonic_coarse\n"); 1433 break; 1434 case CLOCK_BOOTTIME: 1435 name = "boottime\n"; 1436 sz = sizeof("boottime\n"); 1437 break; 1438 case CLOCK_TAI: 1439 name = "tai\n"; 1440 sz = sizeof("tai\n"); 1441 break; 1442 default: 1443 BUG(); 1444 } 1445 1446 memcpy(buf, name, sz); 1447 return sz; 1448 } 1449 1450 static ssize_t current_timestamp_clock_store(struct device *dev, 1451 struct device_attribute *attr, 1452 const char *buf, size_t len) 1453 { 1454 clockid_t clk; 1455 int ret; 1456 1457 if (sysfs_streq(buf, "realtime")) 1458 clk = CLOCK_REALTIME; 1459 else if (sysfs_streq(buf, "monotonic")) 1460 clk = CLOCK_MONOTONIC; 1461 else if (sysfs_streq(buf, "monotonic_raw")) 1462 clk = CLOCK_MONOTONIC_RAW; 1463 else if (sysfs_streq(buf, "realtime_coarse")) 1464 clk = CLOCK_REALTIME_COARSE; 1465 else if (sysfs_streq(buf, "monotonic_coarse")) 1466 clk = CLOCK_MONOTONIC_COARSE; 1467 else if (sysfs_streq(buf, "boottime")) 1468 clk = CLOCK_BOOTTIME; 1469 else if (sysfs_streq(buf, "tai")) 1470 clk = CLOCK_TAI; 1471 else 1472 return -EINVAL; 1473 1474 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1475 if (ret) 1476 return ret; 1477 1478 return len; 1479 } 1480 1481 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1482 const struct attribute_group *group) 1483 { 1484 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1485 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1486 unsigned int cnt = iio_dev_opaque->groupcounter; 1487 1488 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL); 1489 if (!new) 1490 return -ENOMEM; 1491 1492 new[iio_dev_opaque->groupcounter++] = group; 1493 new[iio_dev_opaque->groupcounter] = NULL; 1494 1495 iio_dev_opaque->groups = new; 1496 1497 return 0; 1498 } 1499 1500 static DEVICE_ATTR_RW(current_timestamp_clock); 1501 1502 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1503 { 1504 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1505 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1506 struct iio_dev_attr *p; 1507 struct attribute **attr, *clk = NULL; 1508 1509 /* First count elements in any existing group */ 1510 if (indio_dev->info->attrs) { 1511 attr = indio_dev->info->attrs->attrs; 1512 while (*attr++ != NULL) 1513 attrcount_orig++; 1514 } 1515 attrcount = attrcount_orig; 1516 /* 1517 * New channel registration method - relies on the fact a group does 1518 * not need to be initialized if its name is NULL. 1519 */ 1520 if (indio_dev->channels) 1521 for (i = 0; i < indio_dev->num_channels; i++) { 1522 const struct iio_chan_spec *chan = 1523 &indio_dev->channels[i]; 1524 1525 if (chan->type == IIO_TIMESTAMP) 1526 clk = &dev_attr_current_timestamp_clock.attr; 1527 1528 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1529 if (ret < 0) 1530 goto error_clear_attrs; 1531 attrcount += ret; 1532 } 1533 1534 if (iio_dev_opaque->event_interface) 1535 clk = &dev_attr_current_timestamp_clock.attr; 1536 1537 if (indio_dev->name) 1538 attrcount++; 1539 if (indio_dev->label) 1540 attrcount++; 1541 if (clk) 1542 attrcount++; 1543 1544 iio_dev_opaque->chan_attr_group.attrs = 1545 kcalloc(attrcount + 1, 1546 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1547 GFP_KERNEL); 1548 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1549 ret = -ENOMEM; 1550 goto error_clear_attrs; 1551 } 1552 /* Copy across original attributes */ 1553 if (indio_dev->info->attrs) { 1554 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1555 indio_dev->info->attrs->attrs, 1556 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1557 *attrcount_orig); 1558 iio_dev_opaque->chan_attr_group.is_visible = 1559 indio_dev->info->attrs->is_visible; 1560 } 1561 attrn = attrcount_orig; 1562 /* Add all elements from the list. */ 1563 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1564 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1565 if (indio_dev->name) 1566 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1567 if (indio_dev->label) 1568 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1569 if (clk) 1570 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1571 1572 ret = iio_device_register_sysfs_group(indio_dev, 1573 &iio_dev_opaque->chan_attr_group); 1574 if (ret) 1575 goto error_clear_attrs; 1576 1577 return 0; 1578 1579 error_clear_attrs: 1580 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1581 1582 return ret; 1583 } 1584 1585 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1586 { 1587 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1588 1589 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1590 kfree(iio_dev_opaque->chan_attr_group.attrs); 1591 iio_dev_opaque->chan_attr_group.attrs = NULL; 1592 kfree(iio_dev_opaque->groups); 1593 iio_dev_opaque->groups = NULL; 1594 } 1595 1596 static void iio_dev_release(struct device *device) 1597 { 1598 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1599 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1600 1601 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1602 iio_device_unregister_trigger_consumer(indio_dev); 1603 iio_device_unregister_eventset(indio_dev); 1604 iio_device_unregister_sysfs(indio_dev); 1605 1606 iio_device_detach_buffers(indio_dev); 1607 1608 ida_free(&iio_ida, iio_dev_opaque->id); 1609 kfree(iio_dev_opaque); 1610 } 1611 1612 const struct device_type iio_device_type = { 1613 .name = "iio_device", 1614 .release = iio_dev_release, 1615 }; 1616 1617 /** 1618 * iio_device_alloc() - allocate an iio_dev from a driver 1619 * @parent: Parent device. 1620 * @sizeof_priv: Space to allocate for private structure. 1621 **/ 1622 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1623 { 1624 struct iio_dev_opaque *iio_dev_opaque; 1625 struct iio_dev *indio_dev; 1626 size_t alloc_size; 1627 1628 alloc_size = sizeof(struct iio_dev_opaque); 1629 if (sizeof_priv) { 1630 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1631 alloc_size += sizeof_priv; 1632 } 1633 1634 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1635 if (!iio_dev_opaque) 1636 return NULL; 1637 1638 indio_dev = &iio_dev_opaque->indio_dev; 1639 indio_dev->priv = (char *)iio_dev_opaque + 1640 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); 1641 1642 indio_dev->dev.parent = parent; 1643 indio_dev->dev.type = &iio_device_type; 1644 indio_dev->dev.bus = &iio_bus_type; 1645 device_initialize(&indio_dev->dev); 1646 mutex_init(&indio_dev->mlock); 1647 mutex_init(&iio_dev_opaque->info_exist_lock); 1648 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1649 1650 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1651 if (iio_dev_opaque->id < 0) { 1652 /* cannot use a dev_err as the name isn't available */ 1653 pr_err("failed to get device id\n"); 1654 kfree(iio_dev_opaque); 1655 return NULL; 1656 } 1657 1658 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1659 ida_free(&iio_ida, iio_dev_opaque->id); 1660 kfree(iio_dev_opaque); 1661 return NULL; 1662 } 1663 1664 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1665 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1666 1667 return indio_dev; 1668 } 1669 EXPORT_SYMBOL(iio_device_alloc); 1670 1671 /** 1672 * iio_device_free() - free an iio_dev from a driver 1673 * @dev: the iio_dev associated with the device 1674 **/ 1675 void iio_device_free(struct iio_dev *dev) 1676 { 1677 if (dev) 1678 put_device(&dev->dev); 1679 } 1680 EXPORT_SYMBOL(iio_device_free); 1681 1682 static void devm_iio_device_release(void *iio_dev) 1683 { 1684 iio_device_free(iio_dev); 1685 } 1686 1687 /** 1688 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1689 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1690 * @sizeof_priv: Space to allocate for private structure. 1691 * 1692 * Managed iio_device_alloc. iio_dev allocated with this function is 1693 * automatically freed on driver detach. 1694 * 1695 * RETURNS: 1696 * Pointer to allocated iio_dev on success, NULL on failure. 1697 */ 1698 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1699 { 1700 struct iio_dev *iio_dev; 1701 int ret; 1702 1703 iio_dev = iio_device_alloc(parent, sizeof_priv); 1704 if (!iio_dev) 1705 return NULL; 1706 1707 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1708 iio_dev); 1709 if (ret) 1710 return NULL; 1711 1712 return iio_dev; 1713 } 1714 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1715 1716 /** 1717 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1718 * @inode: Inode structure for identifying the device in the file system 1719 * @filp: File structure for iio device used to keep and later access 1720 * private data 1721 * 1722 * Return: 0 on success or -EBUSY if the device is already opened 1723 **/ 1724 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1725 { 1726 struct iio_dev_opaque *iio_dev_opaque = 1727 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1728 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1729 struct iio_dev_buffer_pair *ib; 1730 1731 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1732 return -EBUSY; 1733 1734 iio_device_get(indio_dev); 1735 1736 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1737 if (!ib) { 1738 iio_device_put(indio_dev); 1739 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1740 return -ENOMEM; 1741 } 1742 1743 ib->indio_dev = indio_dev; 1744 ib->buffer = indio_dev->buffer; 1745 1746 filp->private_data = ib; 1747 1748 return 0; 1749 } 1750 1751 /** 1752 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1753 * @inode: Inode structure pointer for the char device 1754 * @filp: File structure pointer for the char device 1755 * 1756 * Return: 0 for successful release 1757 */ 1758 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1759 { 1760 struct iio_dev_buffer_pair *ib = filp->private_data; 1761 struct iio_dev_opaque *iio_dev_opaque = 1762 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1763 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1764 kfree(ib); 1765 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1766 iio_device_put(indio_dev); 1767 1768 return 0; 1769 } 1770 1771 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1772 struct iio_ioctl_handler *h) 1773 { 1774 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1775 1776 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1777 } 1778 1779 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1780 { 1781 list_del(&h->entry); 1782 } 1783 1784 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1785 { 1786 struct iio_dev_buffer_pair *ib = filp->private_data; 1787 struct iio_dev *indio_dev = ib->indio_dev; 1788 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1789 struct iio_ioctl_handler *h; 1790 int ret = -ENODEV; 1791 1792 mutex_lock(&iio_dev_opaque->info_exist_lock); 1793 1794 /** 1795 * The NULL check here is required to prevent crashing when a device 1796 * is being removed while userspace would still have open file handles 1797 * to try to access this device. 1798 */ 1799 if (!indio_dev->info) 1800 goto out_unlock; 1801 1802 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1803 ret = h->ioctl(indio_dev, filp, cmd, arg); 1804 if (ret != IIO_IOCTL_UNHANDLED) 1805 break; 1806 } 1807 1808 if (ret == IIO_IOCTL_UNHANDLED) 1809 ret = -ENODEV; 1810 1811 out_unlock: 1812 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1813 1814 return ret; 1815 } 1816 1817 static const struct file_operations iio_buffer_fileops = { 1818 .owner = THIS_MODULE, 1819 .llseek = noop_llseek, 1820 .read = iio_buffer_read_outer_addr, 1821 .write = iio_buffer_write_outer_addr, 1822 .poll = iio_buffer_poll_addr, 1823 .unlocked_ioctl = iio_ioctl, 1824 .compat_ioctl = compat_ptr_ioctl, 1825 .open = iio_chrdev_open, 1826 .release = iio_chrdev_release, 1827 }; 1828 1829 static const struct file_operations iio_event_fileops = { 1830 .owner = THIS_MODULE, 1831 .llseek = noop_llseek, 1832 .unlocked_ioctl = iio_ioctl, 1833 .compat_ioctl = compat_ptr_ioctl, 1834 .open = iio_chrdev_open, 1835 .release = iio_chrdev_release, 1836 }; 1837 1838 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1839 { 1840 int i, j; 1841 const struct iio_chan_spec *channels = indio_dev->channels; 1842 1843 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1844 return 0; 1845 1846 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1847 if (channels[i].scan_index < 0) 1848 continue; 1849 for (j = i + 1; j < indio_dev->num_channels; j++) 1850 if (channels[i].scan_index == channels[j].scan_index) { 1851 dev_err(&indio_dev->dev, 1852 "Duplicate scan index %d\n", 1853 channels[i].scan_index); 1854 return -EINVAL; 1855 } 1856 } 1857 1858 return 0; 1859 } 1860 1861 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1862 { 1863 unsigned int i; 1864 1865 if (!indio_dev->info->read_label) 1866 return 0; 1867 1868 for (i = 0; i < indio_dev->num_channels; i++) { 1869 if (indio_dev->channels[i].extend_name) { 1870 dev_err(&indio_dev->dev, 1871 "Cannot use labels and extend_name at the same time\n"); 1872 return -EINVAL; 1873 } 1874 } 1875 1876 return 0; 1877 } 1878 1879 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1880 1881 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1882 { 1883 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1884 struct fwnode_handle *fwnode; 1885 int ret; 1886 1887 if (!indio_dev->info) 1888 return -EINVAL; 1889 1890 iio_dev_opaque->driver_module = this_mod; 1891 1892 /* If the calling driver did not initialize firmware node, do it here */ 1893 if (dev_fwnode(&indio_dev->dev)) 1894 fwnode = dev_fwnode(&indio_dev->dev); 1895 else 1896 fwnode = dev_fwnode(indio_dev->dev.parent); 1897 device_set_node(&indio_dev->dev, fwnode); 1898 1899 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1900 1901 ret = iio_check_unique_scan_index(indio_dev); 1902 if (ret < 0) 1903 return ret; 1904 1905 ret = iio_check_extended_name(indio_dev); 1906 if (ret < 0) 1907 return ret; 1908 1909 iio_device_register_debugfs(indio_dev); 1910 1911 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1912 if (ret) { 1913 dev_err(indio_dev->dev.parent, 1914 "Failed to create buffer sysfs interfaces\n"); 1915 goto error_unreg_debugfs; 1916 } 1917 1918 ret = iio_device_register_sysfs(indio_dev); 1919 if (ret) { 1920 dev_err(indio_dev->dev.parent, 1921 "Failed to register sysfs interfaces\n"); 1922 goto error_buffer_free_sysfs; 1923 } 1924 ret = iio_device_register_eventset(indio_dev); 1925 if (ret) { 1926 dev_err(indio_dev->dev.parent, 1927 "Failed to register event set\n"); 1928 goto error_free_sysfs; 1929 } 1930 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1931 iio_device_register_trigger_consumer(indio_dev); 1932 1933 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1934 indio_dev->setup_ops == NULL) 1935 indio_dev->setup_ops = &noop_ring_setup_ops; 1936 1937 if (iio_dev_opaque->attached_buffers_cnt) 1938 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1939 else if (iio_dev_opaque->event_interface) 1940 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1941 1942 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1943 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1944 iio_dev_opaque->chrdev.owner = this_mod; 1945 } 1946 1947 /* assign device groups now; they should be all registered now */ 1948 indio_dev->dev.groups = iio_dev_opaque->groups; 1949 1950 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1951 if (ret < 0) 1952 goto error_unreg_eventset; 1953 1954 return 0; 1955 1956 error_unreg_eventset: 1957 iio_device_unregister_eventset(indio_dev); 1958 error_free_sysfs: 1959 iio_device_unregister_sysfs(indio_dev); 1960 error_buffer_free_sysfs: 1961 iio_buffers_free_sysfs_and_mask(indio_dev); 1962 error_unreg_debugfs: 1963 iio_device_unregister_debugfs(indio_dev); 1964 return ret; 1965 } 1966 EXPORT_SYMBOL(__iio_device_register); 1967 1968 /** 1969 * iio_device_unregister() - unregister a device from the IIO subsystem 1970 * @indio_dev: Device structure representing the device. 1971 **/ 1972 void iio_device_unregister(struct iio_dev *indio_dev) 1973 { 1974 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1975 1976 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 1977 1978 mutex_lock(&iio_dev_opaque->info_exist_lock); 1979 1980 iio_device_unregister_debugfs(indio_dev); 1981 1982 iio_disable_all_buffers(indio_dev); 1983 1984 indio_dev->info = NULL; 1985 1986 iio_device_wakeup_eventset(indio_dev); 1987 iio_buffer_wakeup_poll(indio_dev); 1988 1989 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1990 1991 iio_buffers_free_sysfs_and_mask(indio_dev); 1992 } 1993 EXPORT_SYMBOL(iio_device_unregister); 1994 1995 static void devm_iio_device_unreg(void *indio_dev) 1996 { 1997 iio_device_unregister(indio_dev); 1998 } 1999 2000 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2001 struct module *this_mod) 2002 { 2003 int ret; 2004 2005 ret = __iio_device_register(indio_dev, this_mod); 2006 if (ret) 2007 return ret; 2008 2009 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2010 } 2011 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2012 2013 /** 2014 * iio_device_claim_direct_mode - Keep device in direct mode 2015 * @indio_dev: the iio_dev associated with the device 2016 * 2017 * If the device is in direct mode it is guaranteed to stay 2018 * that way until iio_device_release_direct_mode() is called. 2019 * 2020 * Use with iio_device_release_direct_mode() 2021 * 2022 * Returns: 0 on success, -EBUSY on failure 2023 */ 2024 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2025 { 2026 mutex_lock(&indio_dev->mlock); 2027 2028 if (iio_buffer_enabled(indio_dev)) { 2029 mutex_unlock(&indio_dev->mlock); 2030 return -EBUSY; 2031 } 2032 return 0; 2033 } 2034 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2035 2036 /** 2037 * iio_device_release_direct_mode - releases claim on direct mode 2038 * @indio_dev: the iio_dev associated with the device 2039 * 2040 * Release the claim. Device is no longer guaranteed to stay 2041 * in direct mode. 2042 * 2043 * Use with iio_device_claim_direct_mode() 2044 */ 2045 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2046 { 2047 mutex_unlock(&indio_dev->mlock); 2048 } 2049 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2050 2051 /** 2052 * iio_device_get_current_mode() - helper function providing read-only access to 2053 * the opaque @currentmode variable 2054 * @indio_dev: IIO device structure for device 2055 */ 2056 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2057 { 2058 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2059 2060 return iio_dev_opaque->currentmode; 2061 } 2062 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2063 2064 subsys_initcall(iio_init); 2065 module_exit(iio_exit); 2066 2067 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2068 MODULE_DESCRIPTION("Industrial I/O core"); 2069 MODULE_LICENSE("GPL"); 2070