1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The industrial I/O core 4 * 5 * Copyright (c) 2008 Jonathan Cameron 6 * 7 * Based on elements of hwmon and input subsystems. 8 */ 9 10 #define pr_fmt(fmt) "iio-core: " fmt 11 12 #include <linux/anon_inodes.h> 13 #include <linux/cdev.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/fs.h> 18 #include <linux/idr.h> 19 #include <linux/kdev_t.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/poll.h> 24 #include <linux/property.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/wait.h> 28 29 #include <linux/iio/buffer.h> 30 #include <linux/iio/buffer_impl.h> 31 #include <linux/iio/events.h> 32 #include <linux/iio/iio-opaque.h> 33 #include <linux/iio/iio.h> 34 #include <linux/iio/sysfs.h> 35 36 #include "iio_core.h" 37 #include "iio_core_trigger.h" 38 39 /* IDA to assign each registered device a unique id */ 40 static DEFINE_IDA(iio_ida); 41 42 static dev_t iio_devt; 43 44 #define IIO_DEV_MAX 256 45 struct bus_type iio_bus_type = { 46 .name = "iio", 47 }; 48 EXPORT_SYMBOL(iio_bus_type); 49 50 static struct dentry *iio_debugfs_dentry; 51 52 static const char * const iio_direction[] = { 53 [0] = "in", 54 [1] = "out", 55 }; 56 57 static const char * const iio_chan_type_name_spec[] = { 58 [IIO_VOLTAGE] = "voltage", 59 [IIO_CURRENT] = "current", 60 [IIO_POWER] = "power", 61 [IIO_ACCEL] = "accel", 62 [IIO_ANGL_VEL] = "anglvel", 63 [IIO_MAGN] = "magn", 64 [IIO_LIGHT] = "illuminance", 65 [IIO_INTENSITY] = "intensity", 66 [IIO_PROXIMITY] = "proximity", 67 [IIO_TEMP] = "temp", 68 [IIO_INCLI] = "incli", 69 [IIO_ROT] = "rot", 70 [IIO_ANGL] = "angl", 71 [IIO_TIMESTAMP] = "timestamp", 72 [IIO_CAPACITANCE] = "capacitance", 73 [IIO_ALTVOLTAGE] = "altvoltage", 74 [IIO_CCT] = "cct", 75 [IIO_PRESSURE] = "pressure", 76 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 77 [IIO_ACTIVITY] = "activity", 78 [IIO_STEPS] = "steps", 79 [IIO_ENERGY] = "energy", 80 [IIO_DISTANCE] = "distance", 81 [IIO_VELOCITY] = "velocity", 82 [IIO_CONCENTRATION] = "concentration", 83 [IIO_RESISTANCE] = "resistance", 84 [IIO_PH] = "ph", 85 [IIO_UVINDEX] = "uvindex", 86 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 87 [IIO_COUNT] = "count", 88 [IIO_INDEX] = "index", 89 [IIO_GRAVITY] = "gravity", 90 [IIO_POSITIONRELATIVE] = "positionrelative", 91 [IIO_PHASE] = "phase", 92 [IIO_MASSCONCENTRATION] = "massconcentration", 93 }; 94 95 static const char * const iio_modifier_names[] = { 96 [IIO_MOD_X] = "x", 97 [IIO_MOD_Y] = "y", 98 [IIO_MOD_Z] = "z", 99 [IIO_MOD_X_AND_Y] = "x&y", 100 [IIO_MOD_X_AND_Z] = "x&z", 101 [IIO_MOD_Y_AND_Z] = "y&z", 102 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 103 [IIO_MOD_X_OR_Y] = "x|y", 104 [IIO_MOD_X_OR_Z] = "x|z", 105 [IIO_MOD_Y_OR_Z] = "y|z", 106 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 107 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 108 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 109 [IIO_MOD_LIGHT_BOTH] = "both", 110 [IIO_MOD_LIGHT_IR] = "ir", 111 [IIO_MOD_LIGHT_CLEAR] = "clear", 112 [IIO_MOD_LIGHT_RED] = "red", 113 [IIO_MOD_LIGHT_GREEN] = "green", 114 [IIO_MOD_LIGHT_BLUE] = "blue", 115 [IIO_MOD_LIGHT_UV] = "uv", 116 [IIO_MOD_LIGHT_DUV] = "duv", 117 [IIO_MOD_QUATERNION] = "quaternion", 118 [IIO_MOD_TEMP_AMBIENT] = "ambient", 119 [IIO_MOD_TEMP_OBJECT] = "object", 120 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 121 [IIO_MOD_NORTH_TRUE] = "from_north_true", 122 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 123 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 124 [IIO_MOD_RUNNING] = "running", 125 [IIO_MOD_JOGGING] = "jogging", 126 [IIO_MOD_WALKING] = "walking", 127 [IIO_MOD_STILL] = "still", 128 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 129 [IIO_MOD_I] = "i", 130 [IIO_MOD_Q] = "q", 131 [IIO_MOD_CO2] = "co2", 132 [IIO_MOD_VOC] = "voc", 133 [IIO_MOD_PM1] = "pm1", 134 [IIO_MOD_PM2P5] = "pm2p5", 135 [IIO_MOD_PM4] = "pm4", 136 [IIO_MOD_PM10] = "pm10", 137 [IIO_MOD_ETHANOL] = "ethanol", 138 [IIO_MOD_H2] = "h2", 139 [IIO_MOD_O2] = "o2", 140 [IIO_MOD_LINEAR_X] = "linear_x", 141 [IIO_MOD_LINEAR_Y] = "linear_y", 142 [IIO_MOD_LINEAR_Z] = "linear_z", 143 [IIO_MOD_PITCH] = "pitch", 144 [IIO_MOD_YAW] = "yaw", 145 [IIO_MOD_ROLL] = "roll", 146 }; 147 148 /* relies on pairs of these shared then separate */ 149 static const char * const iio_chan_info_postfix[] = { 150 [IIO_CHAN_INFO_RAW] = "raw", 151 [IIO_CHAN_INFO_PROCESSED] = "input", 152 [IIO_CHAN_INFO_SCALE] = "scale", 153 [IIO_CHAN_INFO_OFFSET] = "offset", 154 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 155 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 156 [IIO_CHAN_INFO_PEAK] = "peak_raw", 157 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 158 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 159 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 160 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 161 = "filter_low_pass_3db_frequency", 162 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 163 = "filter_high_pass_3db_frequency", 164 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 165 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 166 [IIO_CHAN_INFO_PHASE] = "phase", 167 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 168 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 169 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 170 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 171 [IIO_CHAN_INFO_ENABLE] = "en", 172 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 173 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 174 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 175 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 176 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 177 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 178 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 179 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 180 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", 181 }; 182 /** 183 * iio_device_id() - query the unique ID for the device 184 * @indio_dev: Device structure whose ID is being queried 185 * 186 * The IIO device ID is a unique index used for example for the naming 187 * of the character device /dev/iio\:device[ID]. 188 * 189 * Returns: Unique ID for the device. 190 */ 191 int iio_device_id(struct iio_dev *indio_dev) 192 { 193 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 194 195 return iio_dev_opaque->id; 196 } 197 EXPORT_SYMBOL_GPL(iio_device_id); 198 199 /** 200 * iio_buffer_enabled() - helper function to test if the buffer is enabled 201 * @indio_dev: IIO device structure for device 202 * 203 * Returns: True, if the buffer is enabled. 204 */ 205 bool iio_buffer_enabled(struct iio_dev *indio_dev) 206 { 207 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 208 209 return iio_dev_opaque->currentmode & 210 (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE | 211 INDIO_BUFFER_TRIGGERED); 212 } 213 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 214 215 #if defined(CONFIG_DEBUG_FS) 216 /* 217 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 218 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 219 */ 220 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 221 { 222 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 223 224 return iio_dev_opaque->debugfs_dentry; 225 } 226 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 227 #endif 228 229 /** 230 * iio_find_channel_from_si() - get channel from its scan index 231 * @indio_dev: device 232 * @si: scan index to match 233 * 234 * Returns: 235 * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure. 236 */ 237 const struct iio_chan_spec 238 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 239 { 240 int i; 241 242 for (i = 0; i < indio_dev->num_channels; i++) 243 if (indio_dev->channels[i].scan_index == si) 244 return &indio_dev->channels[i]; 245 return NULL; 246 } 247 248 /* This turns up an awful lot */ 249 ssize_t iio_read_const_attr(struct device *dev, 250 struct device_attribute *attr, 251 char *buf) 252 { 253 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 254 } 255 EXPORT_SYMBOL(iio_read_const_attr); 256 257 /** 258 * iio_device_set_clock() - Set current timestamping clock for the device 259 * @indio_dev: IIO device structure containing the device 260 * @clock_id: timestamping clock POSIX identifier to set. 261 * 262 * Returns: 0 on success, or a negative error code. 263 */ 264 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 265 { 266 int ret; 267 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 268 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 269 270 ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); 271 if (ret) 272 return ret; 273 if ((ev_int && iio_event_enabled(ev_int)) || 274 iio_buffer_enabled(indio_dev)) { 275 mutex_unlock(&iio_dev_opaque->mlock); 276 return -EBUSY; 277 } 278 iio_dev_opaque->clock_id = clock_id; 279 mutex_unlock(&iio_dev_opaque->mlock); 280 281 return 0; 282 } 283 EXPORT_SYMBOL(iio_device_set_clock); 284 285 /** 286 * iio_device_get_clock() - Retrieve current timestamping clock for the device 287 * @indio_dev: IIO device structure containing the device 288 * 289 * Returns: Clock ID of the current timestamping clock for the device. 290 */ 291 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 292 { 293 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 294 295 return iio_dev_opaque->clock_id; 296 } 297 EXPORT_SYMBOL(iio_device_get_clock); 298 299 /** 300 * iio_get_time_ns() - utility function to get a time stamp for events etc 301 * @indio_dev: device 302 * 303 * Returns: Timestamp of the event in nanoseconds. 304 */ 305 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 306 { 307 struct timespec64 tp; 308 309 switch (iio_device_get_clock(indio_dev)) { 310 case CLOCK_REALTIME: 311 return ktime_get_real_ns(); 312 case CLOCK_MONOTONIC: 313 return ktime_get_ns(); 314 case CLOCK_MONOTONIC_RAW: 315 return ktime_get_raw_ns(); 316 case CLOCK_REALTIME_COARSE: 317 return ktime_to_ns(ktime_get_coarse_real()); 318 case CLOCK_MONOTONIC_COARSE: 319 ktime_get_coarse_ts64(&tp); 320 return timespec64_to_ns(&tp); 321 case CLOCK_BOOTTIME: 322 return ktime_get_boottime_ns(); 323 case CLOCK_TAI: 324 return ktime_get_clocktai_ns(); 325 default: 326 BUG(); 327 } 328 } 329 EXPORT_SYMBOL(iio_get_time_ns); 330 331 static int __init iio_init(void) 332 { 333 int ret; 334 335 /* Register sysfs bus */ 336 ret = bus_register(&iio_bus_type); 337 if (ret < 0) { 338 pr_err("could not register bus type\n"); 339 goto error_nothing; 340 } 341 342 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 343 if (ret < 0) { 344 pr_err("failed to allocate char dev region\n"); 345 goto error_unregister_bus_type; 346 } 347 348 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 349 350 return 0; 351 352 error_unregister_bus_type: 353 bus_unregister(&iio_bus_type); 354 error_nothing: 355 return ret; 356 } 357 358 static void __exit iio_exit(void) 359 { 360 if (iio_devt) 361 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 362 bus_unregister(&iio_bus_type); 363 debugfs_remove(iio_debugfs_dentry); 364 } 365 366 #if defined(CONFIG_DEBUG_FS) 367 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 368 size_t count, loff_t *ppos) 369 { 370 struct iio_dev *indio_dev = file->private_data; 371 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 372 unsigned int val = 0; 373 int ret; 374 375 if (*ppos > 0) 376 return simple_read_from_buffer(userbuf, count, ppos, 377 iio_dev_opaque->read_buf, 378 iio_dev_opaque->read_buf_len); 379 380 ret = indio_dev->info->debugfs_reg_access(indio_dev, 381 iio_dev_opaque->cached_reg_addr, 382 0, &val); 383 if (ret) { 384 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 385 return ret; 386 } 387 388 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 389 sizeof(iio_dev_opaque->read_buf), 390 "0x%X\n", val); 391 392 return simple_read_from_buffer(userbuf, count, ppos, 393 iio_dev_opaque->read_buf, 394 iio_dev_opaque->read_buf_len); 395 } 396 397 static ssize_t iio_debugfs_write_reg(struct file *file, 398 const char __user *userbuf, size_t count, loff_t *ppos) 399 { 400 struct iio_dev *indio_dev = file->private_data; 401 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 402 unsigned int reg, val; 403 char buf[80]; 404 int ret; 405 406 count = min(count, sizeof(buf) - 1); 407 if (copy_from_user(buf, userbuf, count)) 408 return -EFAULT; 409 410 buf[count] = 0; 411 412 ret = sscanf(buf, "%i %i", ®, &val); 413 414 switch (ret) { 415 case 1: 416 iio_dev_opaque->cached_reg_addr = reg; 417 break; 418 case 2: 419 iio_dev_opaque->cached_reg_addr = reg; 420 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 421 val, NULL); 422 if (ret) { 423 dev_err(indio_dev->dev.parent, "%s: write failed\n", 424 __func__); 425 return ret; 426 } 427 break; 428 default: 429 return -EINVAL; 430 } 431 432 return count; 433 } 434 435 static const struct file_operations iio_debugfs_reg_fops = { 436 .open = simple_open, 437 .read = iio_debugfs_read_reg, 438 .write = iio_debugfs_write_reg, 439 }; 440 441 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 442 { 443 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 444 445 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 446 } 447 448 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 449 { 450 struct iio_dev_opaque *iio_dev_opaque; 451 452 if (indio_dev->info->debugfs_reg_access == NULL) 453 return; 454 455 if (!iio_debugfs_dentry) 456 return; 457 458 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 459 460 iio_dev_opaque->debugfs_dentry = 461 debugfs_create_dir(dev_name(&indio_dev->dev), 462 iio_debugfs_dentry); 463 464 debugfs_create_file("direct_reg_access", 0644, 465 iio_dev_opaque->debugfs_dentry, indio_dev, 466 &iio_debugfs_reg_fops); 467 } 468 #else 469 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 470 { 471 } 472 473 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 474 { 475 } 476 #endif /* CONFIG_DEBUG_FS */ 477 478 static ssize_t iio_read_channel_ext_info(struct device *dev, 479 struct device_attribute *attr, 480 char *buf) 481 { 482 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 483 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 484 const struct iio_chan_spec_ext_info *ext_info; 485 486 ext_info = &this_attr->c->ext_info[this_attr->address]; 487 488 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 489 } 490 491 static ssize_t iio_write_channel_ext_info(struct device *dev, 492 struct device_attribute *attr, 493 const char *buf, size_t len) 494 { 495 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 496 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 497 const struct iio_chan_spec_ext_info *ext_info; 498 499 ext_info = &this_attr->c->ext_info[this_attr->address]; 500 501 return ext_info->write(indio_dev, ext_info->private, 502 this_attr->c, buf, len); 503 } 504 505 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 506 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 507 { 508 const struct iio_enum *e = (const struct iio_enum *)priv; 509 unsigned int i; 510 size_t len = 0; 511 512 if (!e->num_items) 513 return 0; 514 515 for (i = 0; i < e->num_items; ++i) { 516 if (!e->items[i]) 517 continue; 518 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 519 } 520 521 /* replace last space with a newline */ 522 buf[len - 1] = '\n'; 523 524 return len; 525 } 526 EXPORT_SYMBOL_GPL(iio_enum_available_read); 527 528 ssize_t iio_enum_read(struct iio_dev *indio_dev, 529 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 530 { 531 const struct iio_enum *e = (const struct iio_enum *)priv; 532 int i; 533 534 if (!e->get) 535 return -EINVAL; 536 537 i = e->get(indio_dev, chan); 538 if (i < 0) 539 return i; 540 if (i >= e->num_items || !e->items[i]) 541 return -EINVAL; 542 543 return sysfs_emit(buf, "%s\n", e->items[i]); 544 } 545 EXPORT_SYMBOL_GPL(iio_enum_read); 546 547 ssize_t iio_enum_write(struct iio_dev *indio_dev, 548 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 549 size_t len) 550 { 551 const struct iio_enum *e = (const struct iio_enum *)priv; 552 int ret; 553 554 if (!e->set) 555 return -EINVAL; 556 557 ret = __sysfs_match_string(e->items, e->num_items, buf); 558 if (ret < 0) 559 return ret; 560 561 ret = e->set(indio_dev, chan, ret); 562 return ret ? ret : len; 563 } 564 EXPORT_SYMBOL_GPL(iio_enum_write); 565 566 static const struct iio_mount_matrix iio_mount_idmatrix = { 567 .rotation = { 568 "1", "0", "0", 569 "0", "1", "0", 570 "0", "0", "1" 571 } 572 }; 573 574 static int iio_setup_mount_idmatrix(const struct device *dev, 575 struct iio_mount_matrix *matrix) 576 { 577 *matrix = iio_mount_idmatrix; 578 dev_info(dev, "mounting matrix not found: using identity...\n"); 579 return 0; 580 } 581 582 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 583 const struct iio_chan_spec *chan, char *buf) 584 { 585 const struct iio_mount_matrix *mtx; 586 587 mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan); 588 if (IS_ERR(mtx)) 589 return PTR_ERR(mtx); 590 591 if (!mtx) 592 mtx = &iio_mount_idmatrix; 593 594 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 595 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 596 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 597 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 598 } 599 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 600 601 /** 602 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 603 * device "mount-matrix" property 604 * @dev: device the mounting matrix property is assigned to 605 * @matrix: where to store retrieved matrix 606 * 607 * If device is assigned no mounting matrix property, a default 3x3 identity 608 * matrix will be filled in. 609 * 610 * Returns: 0 if success, or a negative error code on failure. 611 */ 612 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 613 { 614 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 615 int err; 616 617 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 618 if (err == len) 619 return 0; 620 621 if (err >= 0) 622 /* Invalid number of matrix entries. */ 623 return -EINVAL; 624 625 if (err != -EINVAL) 626 /* Invalid matrix declaration format. */ 627 return err; 628 629 /* Matrix was not declared at all: fallback to identity. */ 630 return iio_setup_mount_idmatrix(dev, matrix); 631 } 632 EXPORT_SYMBOL(iio_read_mount_matrix); 633 634 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 635 int size, const int *vals) 636 { 637 int tmp0, tmp1; 638 s64 tmp2; 639 bool scale_db = false; 640 641 switch (type) { 642 case IIO_VAL_INT: 643 return sysfs_emit_at(buf, offset, "%d", vals[0]); 644 case IIO_VAL_INT_PLUS_MICRO_DB: 645 scale_db = true; 646 fallthrough; 647 case IIO_VAL_INT_PLUS_MICRO: 648 if (vals[1] < 0) 649 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 650 abs(vals[0]), -vals[1], 651 scale_db ? " dB" : ""); 652 else 653 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 654 vals[1], scale_db ? " dB" : ""); 655 case IIO_VAL_INT_PLUS_NANO: 656 if (vals[1] < 0) 657 return sysfs_emit_at(buf, offset, "-%d.%09u", 658 abs(vals[0]), -vals[1]); 659 else 660 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 661 vals[1]); 662 case IIO_VAL_FRACTIONAL: 663 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 664 tmp1 = vals[1]; 665 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 666 if ((tmp2 < 0) && (tmp0 == 0)) 667 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 668 else 669 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 670 abs(tmp1)); 671 case IIO_VAL_FRACTIONAL_LOG2: 672 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 673 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 674 if (tmp0 == 0 && tmp2 < 0) 675 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 676 else 677 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 678 abs(tmp1)); 679 case IIO_VAL_INT_MULTIPLE: 680 { 681 int i; 682 int l = 0; 683 684 for (i = 0; i < size; ++i) 685 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 686 return l; 687 } 688 case IIO_VAL_CHAR: 689 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 690 case IIO_VAL_INT_64: 691 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 692 return sysfs_emit_at(buf, offset, "%lld", tmp2); 693 default: 694 return 0; 695 } 696 } 697 698 /** 699 * iio_format_value() - Formats a IIO value into its string representation 700 * @buf: The buffer to which the formatted value gets written 701 * which is assumed to be big enough (i.e. PAGE_SIZE). 702 * @type: One of the IIO_VAL_* constants. This decides how the val 703 * and val2 parameters are formatted. 704 * @size: Number of IIO value entries contained in vals 705 * @vals: Pointer to the values, exact meaning depends on the 706 * type parameter. 707 * 708 * Returns: 709 * 0 by default, a negative number on failure or the total number of characters 710 * written for a type that belongs to the IIO_VAL_* constant. 711 */ 712 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 713 { 714 ssize_t len; 715 716 len = __iio_format_value(buf, 0, type, size, vals); 717 if (len >= PAGE_SIZE - 1) 718 return -EFBIG; 719 720 return len + sysfs_emit_at(buf, len, "\n"); 721 } 722 EXPORT_SYMBOL_GPL(iio_format_value); 723 724 static ssize_t iio_read_channel_label(struct device *dev, 725 struct device_attribute *attr, 726 char *buf) 727 { 728 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 729 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 730 731 if (indio_dev->info->read_label) 732 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 733 734 if (this_attr->c->extend_name) 735 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 736 737 return -EINVAL; 738 } 739 740 static ssize_t iio_read_channel_info(struct device *dev, 741 struct device_attribute *attr, 742 char *buf) 743 { 744 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 745 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 746 int vals[INDIO_MAX_RAW_ELEMENTS]; 747 int ret; 748 int val_len = 2; 749 750 if (indio_dev->info->read_raw_multi) 751 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 752 INDIO_MAX_RAW_ELEMENTS, 753 vals, &val_len, 754 this_attr->address); 755 else 756 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 757 &vals[0], &vals[1], this_attr->address); 758 759 if (ret < 0) 760 return ret; 761 762 return iio_format_value(buf, ret, val_len, vals); 763 } 764 765 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 766 const char *prefix, const char *suffix) 767 { 768 ssize_t len; 769 int stride; 770 int i; 771 772 switch (type) { 773 case IIO_VAL_INT: 774 stride = 1; 775 break; 776 default: 777 stride = 2; 778 break; 779 } 780 781 len = sysfs_emit(buf, prefix); 782 783 for (i = 0; i <= length - stride; i += stride) { 784 if (i != 0) { 785 len += sysfs_emit_at(buf, len, " "); 786 if (len >= PAGE_SIZE) 787 return -EFBIG; 788 } 789 790 len += __iio_format_value(buf, len, type, stride, &vals[i]); 791 if (len >= PAGE_SIZE) 792 return -EFBIG; 793 } 794 795 len += sysfs_emit_at(buf, len, "%s\n", suffix); 796 797 return len; 798 } 799 800 static ssize_t iio_format_avail_list(char *buf, const int *vals, 801 int type, int length) 802 { 803 804 return iio_format_list(buf, vals, type, length, "", ""); 805 } 806 807 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 808 { 809 int length; 810 811 /* 812 * length refers to the array size , not the number of elements. 813 * The purpose is to print the range [min , step ,max] so length should 814 * be 3 in case of int, and 6 for other types. 815 */ 816 switch (type) { 817 case IIO_VAL_INT: 818 length = 3; 819 break; 820 default: 821 length = 6; 822 break; 823 } 824 825 return iio_format_list(buf, vals, type, length, "[", "]"); 826 } 827 828 static ssize_t iio_read_channel_info_avail(struct device *dev, 829 struct device_attribute *attr, 830 char *buf) 831 { 832 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 833 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 834 const int *vals; 835 int ret; 836 int length; 837 int type; 838 839 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 840 &vals, &type, &length, 841 this_attr->address); 842 843 if (ret < 0) 844 return ret; 845 switch (ret) { 846 case IIO_AVAIL_LIST: 847 return iio_format_avail_list(buf, vals, type, length); 848 case IIO_AVAIL_RANGE: 849 return iio_format_avail_range(buf, vals, type); 850 default: 851 return -EINVAL; 852 } 853 } 854 855 /** 856 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 857 * @str: The string to parse 858 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 859 * @integer: The integer part of the number 860 * @fract: The fractional part of the number 861 * @scale_db: True if this should parse as dB 862 * 863 * Returns: 864 * 0 on success, or a negative error code if the string could not be parsed. 865 */ 866 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 867 int *integer, int *fract, bool scale_db) 868 { 869 int i = 0, f = 0; 870 bool integer_part = true, negative = false; 871 872 if (fract_mult == 0) { 873 *fract = 0; 874 875 return kstrtoint(str, 0, integer); 876 } 877 878 if (str[0] == '-') { 879 negative = true; 880 str++; 881 } else if (str[0] == '+') { 882 str++; 883 } 884 885 while (*str) { 886 if ('0' <= *str && *str <= '9') { 887 if (integer_part) { 888 i = i * 10 + *str - '0'; 889 } else { 890 f += fract_mult * (*str - '0'); 891 fract_mult /= 10; 892 } 893 } else if (*str == '\n') { 894 if (*(str + 1) == '\0') 895 break; 896 return -EINVAL; 897 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 898 /* Ignore the dB suffix */ 899 str += sizeof(" dB") - 1; 900 continue; 901 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 902 /* Ignore the dB suffix */ 903 str += sizeof("dB") - 1; 904 continue; 905 } else if (*str == '.' && integer_part) { 906 integer_part = false; 907 } else { 908 return -EINVAL; 909 } 910 str++; 911 } 912 913 if (negative) { 914 if (i) 915 i = -i; 916 else 917 f = -f; 918 } 919 920 *integer = i; 921 *fract = f; 922 923 return 0; 924 } 925 926 /** 927 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 928 * @str: The string to parse 929 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 930 * @integer: The integer part of the number 931 * @fract: The fractional part of the number 932 * 933 * Returns: 934 * 0 on success, or a negative error code if the string could not be parsed. 935 */ 936 int iio_str_to_fixpoint(const char *str, int fract_mult, 937 int *integer, int *fract) 938 { 939 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 940 } 941 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 942 943 static ssize_t iio_write_channel_info(struct device *dev, 944 struct device_attribute *attr, 945 const char *buf, 946 size_t len) 947 { 948 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 949 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 950 int ret, fract_mult = 100000; 951 int integer, fract = 0; 952 bool is_char = false; 953 bool scale_db = false; 954 955 /* Assumes decimal - precision based on number of digits */ 956 if (!indio_dev->info->write_raw) 957 return -EINVAL; 958 959 if (indio_dev->info->write_raw_get_fmt) 960 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 961 this_attr->c, this_attr->address)) { 962 case IIO_VAL_INT: 963 fract_mult = 0; 964 break; 965 case IIO_VAL_INT_PLUS_MICRO_DB: 966 scale_db = true; 967 fallthrough; 968 case IIO_VAL_INT_PLUS_MICRO: 969 fract_mult = 100000; 970 break; 971 case IIO_VAL_INT_PLUS_NANO: 972 fract_mult = 100000000; 973 break; 974 case IIO_VAL_CHAR: 975 is_char = true; 976 break; 977 default: 978 return -EINVAL; 979 } 980 981 if (is_char) { 982 char ch; 983 984 if (sscanf(buf, "%c", &ch) != 1) 985 return -EINVAL; 986 integer = ch; 987 } else { 988 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 989 scale_db); 990 if (ret) 991 return ret; 992 } 993 994 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 995 integer, fract, this_attr->address); 996 if (ret) 997 return ret; 998 999 return len; 1000 } 1001 1002 static 1003 int __iio_device_attr_init(struct device_attribute *dev_attr, 1004 const char *postfix, 1005 struct iio_chan_spec const *chan, 1006 ssize_t (*readfunc)(struct device *dev, 1007 struct device_attribute *attr, 1008 char *buf), 1009 ssize_t (*writefunc)(struct device *dev, 1010 struct device_attribute *attr, 1011 const char *buf, 1012 size_t len), 1013 enum iio_shared_by shared_by) 1014 { 1015 int ret = 0; 1016 char *name = NULL; 1017 char *full_postfix; 1018 1019 sysfs_attr_init(&dev_attr->attr); 1020 1021 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1022 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1023 if (chan->extend_name) 1024 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1025 iio_modifier_names[chan->channel2], 1026 chan->extend_name, 1027 postfix); 1028 else 1029 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1030 iio_modifier_names[chan->channel2], 1031 postfix); 1032 } else { 1033 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1034 full_postfix = kstrdup(postfix, GFP_KERNEL); 1035 else 1036 full_postfix = kasprintf(GFP_KERNEL, 1037 "%s_%s", 1038 chan->extend_name, 1039 postfix); 1040 } 1041 if (full_postfix == NULL) 1042 return -ENOMEM; 1043 1044 if (chan->differential) { /* Differential can not have modifier */ 1045 switch (shared_by) { 1046 case IIO_SHARED_BY_ALL: 1047 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1048 break; 1049 case IIO_SHARED_BY_DIR: 1050 name = kasprintf(GFP_KERNEL, "%s_%s", 1051 iio_direction[chan->output], 1052 full_postfix); 1053 break; 1054 case IIO_SHARED_BY_TYPE: 1055 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1056 iio_direction[chan->output], 1057 iio_chan_type_name_spec[chan->type], 1058 iio_chan_type_name_spec[chan->type], 1059 full_postfix); 1060 break; 1061 case IIO_SEPARATE: 1062 if (!chan->indexed) { 1063 WARN(1, "Differential channels must be indexed\n"); 1064 ret = -EINVAL; 1065 goto error_free_full_postfix; 1066 } 1067 name = kasprintf(GFP_KERNEL, 1068 "%s_%s%d-%s%d_%s", 1069 iio_direction[chan->output], 1070 iio_chan_type_name_spec[chan->type], 1071 chan->channel, 1072 iio_chan_type_name_spec[chan->type], 1073 chan->channel2, 1074 full_postfix); 1075 break; 1076 } 1077 } else { /* Single ended */ 1078 switch (shared_by) { 1079 case IIO_SHARED_BY_ALL: 1080 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1081 break; 1082 case IIO_SHARED_BY_DIR: 1083 name = kasprintf(GFP_KERNEL, "%s_%s", 1084 iio_direction[chan->output], 1085 full_postfix); 1086 break; 1087 case IIO_SHARED_BY_TYPE: 1088 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1089 iio_direction[chan->output], 1090 iio_chan_type_name_spec[chan->type], 1091 full_postfix); 1092 break; 1093 1094 case IIO_SEPARATE: 1095 if (chan->indexed) 1096 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1097 iio_direction[chan->output], 1098 iio_chan_type_name_spec[chan->type], 1099 chan->channel, 1100 full_postfix); 1101 else 1102 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1103 iio_direction[chan->output], 1104 iio_chan_type_name_spec[chan->type], 1105 full_postfix); 1106 break; 1107 } 1108 } 1109 if (name == NULL) { 1110 ret = -ENOMEM; 1111 goto error_free_full_postfix; 1112 } 1113 dev_attr->attr.name = name; 1114 1115 if (readfunc) { 1116 dev_attr->attr.mode |= 0444; 1117 dev_attr->show = readfunc; 1118 } 1119 1120 if (writefunc) { 1121 dev_attr->attr.mode |= 0200; 1122 dev_attr->store = writefunc; 1123 } 1124 1125 error_free_full_postfix: 1126 kfree(full_postfix); 1127 1128 return ret; 1129 } 1130 1131 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1132 { 1133 kfree(dev_attr->attr.name); 1134 } 1135 1136 int __iio_add_chan_devattr(const char *postfix, 1137 struct iio_chan_spec const *chan, 1138 ssize_t (*readfunc)(struct device *dev, 1139 struct device_attribute *attr, 1140 char *buf), 1141 ssize_t (*writefunc)(struct device *dev, 1142 struct device_attribute *attr, 1143 const char *buf, 1144 size_t len), 1145 u64 mask, 1146 enum iio_shared_by shared_by, 1147 struct device *dev, 1148 struct iio_buffer *buffer, 1149 struct list_head *attr_list) 1150 { 1151 int ret; 1152 struct iio_dev_attr *iio_attr, *t; 1153 1154 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1155 if (iio_attr == NULL) 1156 return -ENOMEM; 1157 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1158 postfix, chan, 1159 readfunc, writefunc, shared_by); 1160 if (ret) 1161 goto error_iio_dev_attr_free; 1162 iio_attr->c = chan; 1163 iio_attr->address = mask; 1164 iio_attr->buffer = buffer; 1165 list_for_each_entry(t, attr_list, l) 1166 if (strcmp(t->dev_attr.attr.name, 1167 iio_attr->dev_attr.attr.name) == 0) { 1168 if (shared_by == IIO_SEPARATE) 1169 dev_err(dev, "tried to double register : %s\n", 1170 t->dev_attr.attr.name); 1171 ret = -EBUSY; 1172 goto error_device_attr_deinit; 1173 } 1174 list_add(&iio_attr->l, attr_list); 1175 1176 return 0; 1177 1178 error_device_attr_deinit: 1179 __iio_device_attr_deinit(&iio_attr->dev_attr); 1180 error_iio_dev_attr_free: 1181 kfree(iio_attr); 1182 return ret; 1183 } 1184 1185 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1186 struct iio_chan_spec const *chan) 1187 { 1188 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1189 int ret; 1190 1191 if (!indio_dev->info->read_label && !chan->extend_name) 1192 return 0; 1193 1194 ret = __iio_add_chan_devattr("label", 1195 chan, 1196 &iio_read_channel_label, 1197 NULL, 1198 0, 1199 IIO_SEPARATE, 1200 &indio_dev->dev, 1201 NULL, 1202 &iio_dev_opaque->channel_attr_list); 1203 if (ret < 0) 1204 return ret; 1205 1206 return 1; 1207 } 1208 1209 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1210 struct iio_chan_spec const *chan, 1211 enum iio_shared_by shared_by, 1212 const long *infomask) 1213 { 1214 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1215 int i, ret, attrcount = 0; 1216 1217 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1218 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1219 return -EINVAL; 1220 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1221 chan, 1222 &iio_read_channel_info, 1223 &iio_write_channel_info, 1224 i, 1225 shared_by, 1226 &indio_dev->dev, 1227 NULL, 1228 &iio_dev_opaque->channel_attr_list); 1229 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1230 continue; 1231 if (ret < 0) 1232 return ret; 1233 attrcount++; 1234 } 1235 1236 return attrcount; 1237 } 1238 1239 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1240 struct iio_chan_spec const *chan, 1241 enum iio_shared_by shared_by, 1242 const long *infomask) 1243 { 1244 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1245 int i, ret, attrcount = 0; 1246 char *avail_postfix; 1247 1248 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1249 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1250 return -EINVAL; 1251 avail_postfix = kasprintf(GFP_KERNEL, 1252 "%s_available", 1253 iio_chan_info_postfix[i]); 1254 if (!avail_postfix) 1255 return -ENOMEM; 1256 1257 ret = __iio_add_chan_devattr(avail_postfix, 1258 chan, 1259 &iio_read_channel_info_avail, 1260 NULL, 1261 i, 1262 shared_by, 1263 &indio_dev->dev, 1264 NULL, 1265 &iio_dev_opaque->channel_attr_list); 1266 kfree(avail_postfix); 1267 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1268 continue; 1269 if (ret < 0) 1270 return ret; 1271 attrcount++; 1272 } 1273 1274 return attrcount; 1275 } 1276 1277 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1278 struct iio_chan_spec const *chan) 1279 { 1280 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1281 int ret, attrcount = 0; 1282 const struct iio_chan_spec_ext_info *ext_info; 1283 1284 if (chan->channel < 0) 1285 return 0; 1286 ret = iio_device_add_info_mask_type(indio_dev, chan, 1287 IIO_SEPARATE, 1288 &chan->info_mask_separate); 1289 if (ret < 0) 1290 return ret; 1291 attrcount += ret; 1292 1293 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1294 IIO_SEPARATE, 1295 &chan->info_mask_separate_available); 1296 if (ret < 0) 1297 return ret; 1298 attrcount += ret; 1299 1300 ret = iio_device_add_info_mask_type(indio_dev, chan, 1301 IIO_SHARED_BY_TYPE, 1302 &chan->info_mask_shared_by_type); 1303 if (ret < 0) 1304 return ret; 1305 attrcount += ret; 1306 1307 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1308 IIO_SHARED_BY_TYPE, 1309 &chan->info_mask_shared_by_type_available); 1310 if (ret < 0) 1311 return ret; 1312 attrcount += ret; 1313 1314 ret = iio_device_add_info_mask_type(indio_dev, chan, 1315 IIO_SHARED_BY_DIR, 1316 &chan->info_mask_shared_by_dir); 1317 if (ret < 0) 1318 return ret; 1319 attrcount += ret; 1320 1321 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1322 IIO_SHARED_BY_DIR, 1323 &chan->info_mask_shared_by_dir_available); 1324 if (ret < 0) 1325 return ret; 1326 attrcount += ret; 1327 1328 ret = iio_device_add_info_mask_type(indio_dev, chan, 1329 IIO_SHARED_BY_ALL, 1330 &chan->info_mask_shared_by_all); 1331 if (ret < 0) 1332 return ret; 1333 attrcount += ret; 1334 1335 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1336 IIO_SHARED_BY_ALL, 1337 &chan->info_mask_shared_by_all_available); 1338 if (ret < 0) 1339 return ret; 1340 attrcount += ret; 1341 1342 ret = iio_device_add_channel_label(indio_dev, chan); 1343 if (ret < 0) 1344 return ret; 1345 attrcount += ret; 1346 1347 if (chan->ext_info) { 1348 unsigned int i = 0; 1349 1350 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1351 ret = __iio_add_chan_devattr(ext_info->name, 1352 chan, 1353 ext_info->read ? 1354 &iio_read_channel_ext_info : NULL, 1355 ext_info->write ? 1356 &iio_write_channel_ext_info : NULL, 1357 i, 1358 ext_info->shared, 1359 &indio_dev->dev, 1360 NULL, 1361 &iio_dev_opaque->channel_attr_list); 1362 i++; 1363 if (ret == -EBUSY && ext_info->shared) 1364 continue; 1365 1366 if (ret) 1367 return ret; 1368 1369 attrcount++; 1370 } 1371 } 1372 1373 return attrcount; 1374 } 1375 1376 /** 1377 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1378 * @attr_list: List of IIO device attributes 1379 * 1380 * This function frees the memory allocated for each of the IIO device 1381 * attributes in the list. 1382 */ 1383 void iio_free_chan_devattr_list(struct list_head *attr_list) 1384 { 1385 struct iio_dev_attr *p, *n; 1386 1387 list_for_each_entry_safe(p, n, attr_list, l) { 1388 kfree_const(p->dev_attr.attr.name); 1389 list_del(&p->l); 1390 kfree(p); 1391 } 1392 } 1393 1394 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1395 char *buf) 1396 { 1397 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1398 1399 return sysfs_emit(buf, "%s\n", indio_dev->name); 1400 } 1401 1402 static DEVICE_ATTR_RO(name); 1403 1404 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1405 char *buf) 1406 { 1407 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1408 1409 return sysfs_emit(buf, "%s\n", indio_dev->label); 1410 } 1411 1412 static DEVICE_ATTR_RO(label); 1413 1414 static const char * const clock_names[] = { 1415 [CLOCK_REALTIME] = "realtime", 1416 [CLOCK_MONOTONIC] = "monotonic", 1417 [CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id", 1418 [CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id", 1419 [CLOCK_MONOTONIC_RAW] = "monotonic_raw", 1420 [CLOCK_REALTIME_COARSE] = "realtime_coarse", 1421 [CLOCK_MONOTONIC_COARSE] = "monotonic_coarse", 1422 [CLOCK_BOOTTIME] = "boottime", 1423 [CLOCK_REALTIME_ALARM] = "realtime_alarm", 1424 [CLOCK_BOOTTIME_ALARM] = "boottime_alarm", 1425 [CLOCK_SGI_CYCLE] = "sgi_cycle", 1426 [CLOCK_TAI] = "tai", 1427 }; 1428 1429 static ssize_t current_timestamp_clock_show(struct device *dev, 1430 struct device_attribute *attr, 1431 char *buf) 1432 { 1433 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1434 const clockid_t clk = iio_device_get_clock(indio_dev); 1435 1436 switch (clk) { 1437 case CLOCK_REALTIME: 1438 case CLOCK_MONOTONIC: 1439 case CLOCK_MONOTONIC_RAW: 1440 case CLOCK_REALTIME_COARSE: 1441 case CLOCK_MONOTONIC_COARSE: 1442 case CLOCK_BOOTTIME: 1443 case CLOCK_TAI: 1444 break; 1445 default: 1446 BUG(); 1447 } 1448 1449 return sysfs_emit(buf, "%s\n", clock_names[clk]); 1450 } 1451 1452 static ssize_t current_timestamp_clock_store(struct device *dev, 1453 struct device_attribute *attr, 1454 const char *buf, size_t len) 1455 { 1456 clockid_t clk; 1457 int ret; 1458 1459 ret = sysfs_match_string(clock_names, buf); 1460 if (ret < 0) 1461 return ret; 1462 clk = ret; 1463 1464 switch (clk) { 1465 case CLOCK_REALTIME: 1466 case CLOCK_MONOTONIC: 1467 case CLOCK_MONOTONIC_RAW: 1468 case CLOCK_REALTIME_COARSE: 1469 case CLOCK_MONOTONIC_COARSE: 1470 case CLOCK_BOOTTIME: 1471 case CLOCK_TAI: 1472 break; 1473 default: 1474 return -EINVAL; 1475 } 1476 1477 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1478 if (ret) 1479 return ret; 1480 1481 return len; 1482 } 1483 1484 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1485 const struct attribute_group *group) 1486 { 1487 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1488 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1489 unsigned int cnt = iio_dev_opaque->groupcounter; 1490 1491 new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL); 1492 if (!new) 1493 return -ENOMEM; 1494 1495 new[iio_dev_opaque->groupcounter++] = group; 1496 new[iio_dev_opaque->groupcounter] = NULL; 1497 1498 iio_dev_opaque->groups = new; 1499 1500 return 0; 1501 } 1502 1503 static DEVICE_ATTR_RW(current_timestamp_clock); 1504 1505 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1506 { 1507 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1508 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1509 struct iio_dev_attr *p; 1510 struct attribute **attr, *clk = NULL; 1511 1512 /* First count elements in any existing group */ 1513 if (indio_dev->info->attrs) { 1514 attr = indio_dev->info->attrs->attrs; 1515 while (*attr++ != NULL) 1516 attrcount_orig++; 1517 } 1518 attrcount = attrcount_orig; 1519 /* 1520 * New channel registration method - relies on the fact a group does 1521 * not need to be initialized if its name is NULL. 1522 */ 1523 if (indio_dev->channels) 1524 for (i = 0; i < indio_dev->num_channels; i++) { 1525 const struct iio_chan_spec *chan = 1526 &indio_dev->channels[i]; 1527 1528 if (chan->type == IIO_TIMESTAMP) 1529 clk = &dev_attr_current_timestamp_clock.attr; 1530 1531 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1532 if (ret < 0) 1533 goto error_clear_attrs; 1534 attrcount += ret; 1535 } 1536 1537 if (iio_dev_opaque->event_interface) 1538 clk = &dev_attr_current_timestamp_clock.attr; 1539 1540 if (indio_dev->name) 1541 attrcount++; 1542 if (indio_dev->label) 1543 attrcount++; 1544 if (clk) 1545 attrcount++; 1546 1547 iio_dev_opaque->chan_attr_group.attrs = 1548 kcalloc(attrcount + 1, 1549 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1550 GFP_KERNEL); 1551 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1552 ret = -ENOMEM; 1553 goto error_clear_attrs; 1554 } 1555 /* Copy across original attributes, and point to original binary attributes */ 1556 if (indio_dev->info->attrs) { 1557 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1558 indio_dev->info->attrs->attrs, 1559 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1560 *attrcount_orig); 1561 iio_dev_opaque->chan_attr_group.is_visible = 1562 indio_dev->info->attrs->is_visible; 1563 iio_dev_opaque->chan_attr_group.bin_attrs = 1564 indio_dev->info->attrs->bin_attrs; 1565 } 1566 attrn = attrcount_orig; 1567 /* Add all elements from the list. */ 1568 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1569 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1570 if (indio_dev->name) 1571 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1572 if (indio_dev->label) 1573 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1574 if (clk) 1575 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1576 1577 ret = iio_device_register_sysfs_group(indio_dev, 1578 &iio_dev_opaque->chan_attr_group); 1579 if (ret) 1580 goto error_free_chan_attrs; 1581 1582 return 0; 1583 1584 error_free_chan_attrs: 1585 kfree(iio_dev_opaque->chan_attr_group.attrs); 1586 iio_dev_opaque->chan_attr_group.attrs = NULL; 1587 error_clear_attrs: 1588 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1589 1590 return ret; 1591 } 1592 1593 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1594 { 1595 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1596 1597 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1598 kfree(iio_dev_opaque->chan_attr_group.attrs); 1599 iio_dev_opaque->chan_attr_group.attrs = NULL; 1600 kfree(iio_dev_opaque->groups); 1601 iio_dev_opaque->groups = NULL; 1602 } 1603 1604 static void iio_dev_release(struct device *device) 1605 { 1606 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1607 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1608 1609 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1610 iio_device_unregister_trigger_consumer(indio_dev); 1611 iio_device_unregister_eventset(indio_dev); 1612 iio_device_unregister_sysfs(indio_dev); 1613 1614 iio_device_detach_buffers(indio_dev); 1615 1616 lockdep_unregister_key(&iio_dev_opaque->mlock_key); 1617 1618 ida_free(&iio_ida, iio_dev_opaque->id); 1619 kfree(iio_dev_opaque); 1620 } 1621 1622 const struct device_type iio_device_type = { 1623 .name = "iio_device", 1624 .release = iio_dev_release, 1625 }; 1626 1627 /** 1628 * iio_device_alloc() - allocate an iio_dev from a driver 1629 * @parent: Parent device. 1630 * @sizeof_priv: Space to allocate for private structure. 1631 * 1632 * Returns: 1633 * Pointer to allocated iio_dev on success, NULL on failure. 1634 */ 1635 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1636 { 1637 struct iio_dev_opaque *iio_dev_opaque; 1638 struct iio_dev *indio_dev; 1639 size_t alloc_size; 1640 1641 alloc_size = sizeof(struct iio_dev_opaque); 1642 if (sizeof_priv) { 1643 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1644 alloc_size += sizeof_priv; 1645 } 1646 1647 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1648 if (!iio_dev_opaque) 1649 return NULL; 1650 1651 indio_dev = &iio_dev_opaque->indio_dev; 1652 indio_dev->priv = (char *)iio_dev_opaque + 1653 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); 1654 1655 indio_dev->dev.parent = parent; 1656 indio_dev->dev.type = &iio_device_type; 1657 indio_dev->dev.bus = &iio_bus_type; 1658 device_initialize(&indio_dev->dev); 1659 mutex_init(&iio_dev_opaque->mlock); 1660 mutex_init(&iio_dev_opaque->info_exist_lock); 1661 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1662 1663 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1664 if (iio_dev_opaque->id < 0) { 1665 /* cannot use a dev_err as the name isn't available */ 1666 pr_err("failed to get device id\n"); 1667 kfree(iio_dev_opaque); 1668 return NULL; 1669 } 1670 1671 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1672 ida_free(&iio_ida, iio_dev_opaque->id); 1673 kfree(iio_dev_opaque); 1674 return NULL; 1675 } 1676 1677 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1678 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1679 1680 lockdep_register_key(&iio_dev_opaque->mlock_key); 1681 lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); 1682 1683 return indio_dev; 1684 } 1685 EXPORT_SYMBOL(iio_device_alloc); 1686 1687 /** 1688 * iio_device_free() - free an iio_dev from a driver 1689 * @dev: the iio_dev associated with the device 1690 */ 1691 void iio_device_free(struct iio_dev *dev) 1692 { 1693 if (dev) 1694 put_device(&dev->dev); 1695 } 1696 EXPORT_SYMBOL(iio_device_free); 1697 1698 static void devm_iio_device_release(void *iio_dev) 1699 { 1700 iio_device_free(iio_dev); 1701 } 1702 1703 /** 1704 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1705 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1706 * @sizeof_priv: Space to allocate for private structure. 1707 * 1708 * Managed iio_device_alloc. iio_dev allocated with this function is 1709 * automatically freed on driver detach. 1710 * 1711 * Returns: 1712 * Pointer to allocated iio_dev on success, NULL on failure. 1713 */ 1714 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1715 { 1716 struct iio_dev *iio_dev; 1717 int ret; 1718 1719 iio_dev = iio_device_alloc(parent, sizeof_priv); 1720 if (!iio_dev) 1721 return NULL; 1722 1723 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1724 iio_dev); 1725 if (ret) 1726 return NULL; 1727 1728 return iio_dev; 1729 } 1730 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1731 1732 /** 1733 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1734 * @inode: Inode structure for identifying the device in the file system 1735 * @filp: File structure for iio device used to keep and later access 1736 * private data 1737 * 1738 * Returns: 0 on success or -EBUSY if the device is already opened 1739 */ 1740 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1741 { 1742 struct iio_dev_opaque *iio_dev_opaque = 1743 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1744 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1745 struct iio_dev_buffer_pair *ib; 1746 1747 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1748 return -EBUSY; 1749 1750 iio_device_get(indio_dev); 1751 1752 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1753 if (!ib) { 1754 iio_device_put(indio_dev); 1755 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1756 return -ENOMEM; 1757 } 1758 1759 ib->indio_dev = indio_dev; 1760 ib->buffer = indio_dev->buffer; 1761 1762 filp->private_data = ib; 1763 1764 return 0; 1765 } 1766 1767 /** 1768 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1769 * @inode: Inode structure pointer for the char device 1770 * @filp: File structure pointer for the char device 1771 * 1772 * Returns: 0 for successful release. 1773 */ 1774 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1775 { 1776 struct iio_dev_buffer_pair *ib = filp->private_data; 1777 struct iio_dev_opaque *iio_dev_opaque = 1778 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1779 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1780 1781 kfree(ib); 1782 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1783 iio_device_put(indio_dev); 1784 1785 return 0; 1786 } 1787 1788 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1789 struct iio_ioctl_handler *h) 1790 { 1791 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1792 1793 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1794 } 1795 1796 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1797 { 1798 list_del(&h->entry); 1799 } 1800 1801 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1802 { 1803 struct iio_dev_buffer_pair *ib = filp->private_data; 1804 struct iio_dev *indio_dev = ib->indio_dev; 1805 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1806 struct iio_ioctl_handler *h; 1807 int ret = -ENODEV; 1808 1809 mutex_lock(&iio_dev_opaque->info_exist_lock); 1810 1811 /* 1812 * The NULL check here is required to prevent crashing when a device 1813 * is being removed while userspace would still have open file handles 1814 * to try to access this device. 1815 */ 1816 if (!indio_dev->info) 1817 goto out_unlock; 1818 1819 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1820 ret = h->ioctl(indio_dev, filp, cmd, arg); 1821 if (ret != IIO_IOCTL_UNHANDLED) 1822 break; 1823 } 1824 1825 if (ret == IIO_IOCTL_UNHANDLED) 1826 ret = -ENODEV; 1827 1828 out_unlock: 1829 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1830 1831 return ret; 1832 } 1833 1834 static const struct file_operations iio_buffer_fileops = { 1835 .owner = THIS_MODULE, 1836 .llseek = noop_llseek, 1837 .read = iio_buffer_read_outer_addr, 1838 .write = iio_buffer_write_outer_addr, 1839 .poll = iio_buffer_poll_addr, 1840 .unlocked_ioctl = iio_ioctl, 1841 .compat_ioctl = compat_ptr_ioctl, 1842 .open = iio_chrdev_open, 1843 .release = iio_chrdev_release, 1844 }; 1845 1846 static const struct file_operations iio_event_fileops = { 1847 .owner = THIS_MODULE, 1848 .llseek = noop_llseek, 1849 .unlocked_ioctl = iio_ioctl, 1850 .compat_ioctl = compat_ptr_ioctl, 1851 .open = iio_chrdev_open, 1852 .release = iio_chrdev_release, 1853 }; 1854 1855 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1856 { 1857 int i, j; 1858 const struct iio_chan_spec *channels = indio_dev->channels; 1859 1860 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1861 return 0; 1862 1863 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1864 if (channels[i].scan_index < 0) 1865 continue; 1866 for (j = i + 1; j < indio_dev->num_channels; j++) 1867 if (channels[i].scan_index == channels[j].scan_index) { 1868 dev_err(&indio_dev->dev, 1869 "Duplicate scan index %d\n", 1870 channels[i].scan_index); 1871 return -EINVAL; 1872 } 1873 } 1874 1875 return 0; 1876 } 1877 1878 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1879 { 1880 unsigned int i; 1881 1882 if (!indio_dev->info->read_label) 1883 return 0; 1884 1885 for (i = 0; i < indio_dev->num_channels; i++) { 1886 if (indio_dev->channels[i].extend_name) { 1887 dev_err(&indio_dev->dev, 1888 "Cannot use labels and extend_name at the same time\n"); 1889 return -EINVAL; 1890 } 1891 } 1892 1893 return 0; 1894 } 1895 1896 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1897 1898 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1899 { 1900 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1901 struct fwnode_handle *fwnode = NULL; 1902 int ret; 1903 1904 if (!indio_dev->info) 1905 return -EINVAL; 1906 1907 iio_dev_opaque->driver_module = this_mod; 1908 1909 /* If the calling driver did not initialize firmware node, do it here */ 1910 if (dev_fwnode(&indio_dev->dev)) 1911 fwnode = dev_fwnode(&indio_dev->dev); 1912 /* The default dummy IIO device has no parent */ 1913 else if (indio_dev->dev.parent) 1914 fwnode = dev_fwnode(indio_dev->dev.parent); 1915 device_set_node(&indio_dev->dev, fwnode); 1916 1917 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1918 1919 ret = iio_check_unique_scan_index(indio_dev); 1920 if (ret < 0) 1921 return ret; 1922 1923 ret = iio_check_extended_name(indio_dev); 1924 if (ret < 0) 1925 return ret; 1926 1927 iio_device_register_debugfs(indio_dev); 1928 1929 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1930 if (ret) { 1931 dev_err(indio_dev->dev.parent, 1932 "Failed to create buffer sysfs interfaces\n"); 1933 goto error_unreg_debugfs; 1934 } 1935 1936 ret = iio_device_register_sysfs(indio_dev); 1937 if (ret) { 1938 dev_err(indio_dev->dev.parent, 1939 "Failed to register sysfs interfaces\n"); 1940 goto error_buffer_free_sysfs; 1941 } 1942 ret = iio_device_register_eventset(indio_dev); 1943 if (ret) { 1944 dev_err(indio_dev->dev.parent, 1945 "Failed to register event set\n"); 1946 goto error_free_sysfs; 1947 } 1948 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1949 iio_device_register_trigger_consumer(indio_dev); 1950 1951 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1952 indio_dev->setup_ops == NULL) 1953 indio_dev->setup_ops = &noop_ring_setup_ops; 1954 1955 if (iio_dev_opaque->attached_buffers_cnt) 1956 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1957 else if (iio_dev_opaque->event_interface) 1958 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1959 1960 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1961 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1962 iio_dev_opaque->chrdev.owner = this_mod; 1963 } 1964 1965 /* assign device groups now; they should be all registered now */ 1966 indio_dev->dev.groups = iio_dev_opaque->groups; 1967 1968 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1969 if (ret < 0) 1970 goto error_unreg_eventset; 1971 1972 return 0; 1973 1974 error_unreg_eventset: 1975 iio_device_unregister_eventset(indio_dev); 1976 error_free_sysfs: 1977 iio_device_unregister_sysfs(indio_dev); 1978 error_buffer_free_sysfs: 1979 iio_buffers_free_sysfs_and_mask(indio_dev); 1980 error_unreg_debugfs: 1981 iio_device_unregister_debugfs(indio_dev); 1982 return ret; 1983 } 1984 EXPORT_SYMBOL(__iio_device_register); 1985 1986 /** 1987 * iio_device_unregister() - unregister a device from the IIO subsystem 1988 * @indio_dev: Device structure representing the device. 1989 */ 1990 void iio_device_unregister(struct iio_dev *indio_dev) 1991 { 1992 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1993 1994 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 1995 1996 mutex_lock(&iio_dev_opaque->info_exist_lock); 1997 1998 iio_device_unregister_debugfs(indio_dev); 1999 2000 iio_disable_all_buffers(indio_dev); 2001 2002 indio_dev->info = NULL; 2003 2004 iio_device_wakeup_eventset(indio_dev); 2005 iio_buffer_wakeup_poll(indio_dev); 2006 2007 mutex_unlock(&iio_dev_opaque->info_exist_lock); 2008 2009 iio_buffers_free_sysfs_and_mask(indio_dev); 2010 } 2011 EXPORT_SYMBOL(iio_device_unregister); 2012 2013 static void devm_iio_device_unreg(void *indio_dev) 2014 { 2015 iio_device_unregister(indio_dev); 2016 } 2017 2018 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2019 struct module *this_mod) 2020 { 2021 int ret; 2022 2023 ret = __iio_device_register(indio_dev, this_mod); 2024 if (ret) 2025 return ret; 2026 2027 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2028 } 2029 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2030 2031 /** 2032 * iio_device_claim_direct_mode - Keep device in direct mode 2033 * @indio_dev: the iio_dev associated with the device 2034 * 2035 * If the device is in direct mode it is guaranteed to stay 2036 * that way until iio_device_release_direct_mode() is called. 2037 * 2038 * Use with iio_device_release_direct_mode() 2039 * 2040 * Returns: 0 on success, -EBUSY on failure. 2041 */ 2042 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2043 { 2044 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2045 2046 mutex_lock(&iio_dev_opaque->mlock); 2047 2048 if (iio_buffer_enabled(indio_dev)) { 2049 mutex_unlock(&iio_dev_opaque->mlock); 2050 return -EBUSY; 2051 } 2052 return 0; 2053 } 2054 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2055 2056 /** 2057 * iio_device_release_direct_mode - releases claim on direct mode 2058 * @indio_dev: the iio_dev associated with the device 2059 * 2060 * Release the claim. Device is no longer guaranteed to stay 2061 * in direct mode. 2062 * 2063 * Use with iio_device_claim_direct_mode() 2064 */ 2065 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2066 { 2067 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2068 } 2069 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2070 2071 /** 2072 * iio_device_claim_buffer_mode - Keep device in buffer mode 2073 * @indio_dev: the iio_dev associated with the device 2074 * 2075 * If the device is in buffer mode it is guaranteed to stay 2076 * that way until iio_device_release_buffer_mode() is called. 2077 * 2078 * Use with iio_device_release_buffer_mode(). 2079 * 2080 * Returns: 0 on success, -EBUSY on failure. 2081 */ 2082 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) 2083 { 2084 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2085 2086 mutex_lock(&iio_dev_opaque->mlock); 2087 2088 if (iio_buffer_enabled(indio_dev)) 2089 return 0; 2090 2091 mutex_unlock(&iio_dev_opaque->mlock); 2092 return -EBUSY; 2093 } 2094 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); 2095 2096 /** 2097 * iio_device_release_buffer_mode - releases claim on buffer mode 2098 * @indio_dev: the iio_dev associated with the device 2099 * 2100 * Release the claim. Device is no longer guaranteed to stay 2101 * in buffer mode. 2102 * 2103 * Use with iio_device_claim_buffer_mode(). 2104 */ 2105 void iio_device_release_buffer_mode(struct iio_dev *indio_dev) 2106 { 2107 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2108 } 2109 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); 2110 2111 /** 2112 * iio_device_get_current_mode() - helper function providing read-only access to 2113 * the opaque @currentmode variable 2114 * @indio_dev: IIO device structure for device 2115 */ 2116 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2117 { 2118 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2119 2120 return iio_dev_opaque->currentmode; 2121 } 2122 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2123 2124 subsys_initcall(iio_init); 2125 module_exit(iio_exit); 2126 2127 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2128 MODULE_DESCRIPTION("Industrial I/O core"); 2129 MODULE_LICENSE("GPL"); 2130