1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The industrial I/O core 4 * 5 * Copyright (c) 2008 Jonathan Cameron 6 * 7 * Based on elements of hwmon and input subsystems. 8 */ 9 10 #define pr_fmt(fmt) "iio-core: " fmt 11 12 #include <linux/anon_inodes.h> 13 #include <linux/cdev.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/fs.h> 18 #include <linux/idr.h> 19 #include <linux/kdev_t.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/poll.h> 24 #include <linux/property.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/wait.h> 28 29 #include <linux/iio/buffer.h> 30 #include <linux/iio/buffer_impl.h> 31 #include <linux/iio/events.h> 32 #include <linux/iio/iio-opaque.h> 33 #include <linux/iio/iio.h> 34 #include <linux/iio/sysfs.h> 35 36 #include "iio_core.h" 37 #include "iio_core_trigger.h" 38 39 /* IDA to assign each registered device a unique id */ 40 static DEFINE_IDA(iio_ida); 41 42 static dev_t iio_devt; 43 44 #define IIO_DEV_MAX 256 45 struct bus_type iio_bus_type = { 46 .name = "iio", 47 }; 48 EXPORT_SYMBOL(iio_bus_type); 49 50 static struct dentry *iio_debugfs_dentry; 51 52 static const char * const iio_direction[] = { 53 [0] = "in", 54 [1] = "out", 55 }; 56 57 static const char * const iio_chan_type_name_spec[] = { 58 [IIO_VOLTAGE] = "voltage", 59 [IIO_CURRENT] = "current", 60 [IIO_POWER] = "power", 61 [IIO_ACCEL] = "accel", 62 [IIO_ANGL_VEL] = "anglvel", 63 [IIO_MAGN] = "magn", 64 [IIO_LIGHT] = "illuminance", 65 [IIO_INTENSITY] = "intensity", 66 [IIO_PROXIMITY] = "proximity", 67 [IIO_TEMP] = "temp", 68 [IIO_INCLI] = "incli", 69 [IIO_ROT] = "rot", 70 [IIO_ANGL] = "angl", 71 [IIO_TIMESTAMP] = "timestamp", 72 [IIO_CAPACITANCE] = "capacitance", 73 [IIO_ALTVOLTAGE] = "altvoltage", 74 [IIO_CCT] = "cct", 75 [IIO_PRESSURE] = "pressure", 76 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 77 [IIO_ACTIVITY] = "activity", 78 [IIO_STEPS] = "steps", 79 [IIO_ENERGY] = "energy", 80 [IIO_DISTANCE] = "distance", 81 [IIO_VELOCITY] = "velocity", 82 [IIO_CONCENTRATION] = "concentration", 83 [IIO_RESISTANCE] = "resistance", 84 [IIO_PH] = "ph", 85 [IIO_UVINDEX] = "uvindex", 86 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 87 [IIO_COUNT] = "count", 88 [IIO_INDEX] = "index", 89 [IIO_GRAVITY] = "gravity", 90 [IIO_POSITIONRELATIVE] = "positionrelative", 91 [IIO_PHASE] = "phase", 92 [IIO_MASSCONCENTRATION] = "massconcentration", 93 }; 94 95 static const char * const iio_modifier_names[] = { 96 [IIO_MOD_X] = "x", 97 [IIO_MOD_Y] = "y", 98 [IIO_MOD_Z] = "z", 99 [IIO_MOD_X_AND_Y] = "x&y", 100 [IIO_MOD_X_AND_Z] = "x&z", 101 [IIO_MOD_Y_AND_Z] = "y&z", 102 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 103 [IIO_MOD_X_OR_Y] = "x|y", 104 [IIO_MOD_X_OR_Z] = "x|z", 105 [IIO_MOD_Y_OR_Z] = "y|z", 106 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 107 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 108 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 109 [IIO_MOD_LIGHT_BOTH] = "both", 110 [IIO_MOD_LIGHT_IR] = "ir", 111 [IIO_MOD_LIGHT_CLEAR] = "clear", 112 [IIO_MOD_LIGHT_RED] = "red", 113 [IIO_MOD_LIGHT_GREEN] = "green", 114 [IIO_MOD_LIGHT_BLUE] = "blue", 115 [IIO_MOD_LIGHT_UV] = "uv", 116 [IIO_MOD_LIGHT_DUV] = "duv", 117 [IIO_MOD_QUATERNION] = "quaternion", 118 [IIO_MOD_TEMP_AMBIENT] = "ambient", 119 [IIO_MOD_TEMP_OBJECT] = "object", 120 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 121 [IIO_MOD_NORTH_TRUE] = "from_north_true", 122 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 123 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 124 [IIO_MOD_RUNNING] = "running", 125 [IIO_MOD_JOGGING] = "jogging", 126 [IIO_MOD_WALKING] = "walking", 127 [IIO_MOD_STILL] = "still", 128 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 129 [IIO_MOD_I] = "i", 130 [IIO_MOD_Q] = "q", 131 [IIO_MOD_CO2] = "co2", 132 [IIO_MOD_VOC] = "voc", 133 [IIO_MOD_PM1] = "pm1", 134 [IIO_MOD_PM2P5] = "pm2p5", 135 [IIO_MOD_PM4] = "pm4", 136 [IIO_MOD_PM10] = "pm10", 137 [IIO_MOD_ETHANOL] = "ethanol", 138 [IIO_MOD_H2] = "h2", 139 [IIO_MOD_O2] = "o2", 140 [IIO_MOD_LINEAR_X] = "linear_x", 141 [IIO_MOD_LINEAR_Y] = "linear_y", 142 [IIO_MOD_LINEAR_Z] = "linear_z", 143 [IIO_MOD_PITCH] = "pitch", 144 [IIO_MOD_YAW] = "yaw", 145 [IIO_MOD_ROLL] = "roll", 146 }; 147 148 /* relies on pairs of these shared then separate */ 149 static const char * const iio_chan_info_postfix[] = { 150 [IIO_CHAN_INFO_RAW] = "raw", 151 [IIO_CHAN_INFO_PROCESSED] = "input", 152 [IIO_CHAN_INFO_SCALE] = "scale", 153 [IIO_CHAN_INFO_OFFSET] = "offset", 154 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 155 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 156 [IIO_CHAN_INFO_PEAK] = "peak_raw", 157 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 158 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 159 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 160 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 161 = "filter_low_pass_3db_frequency", 162 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 163 = "filter_high_pass_3db_frequency", 164 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 165 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 166 [IIO_CHAN_INFO_PHASE] = "phase", 167 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 168 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 169 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 170 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 171 [IIO_CHAN_INFO_ENABLE] = "en", 172 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 173 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 174 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 175 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 176 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 177 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 178 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 179 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 180 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", 181 }; 182 /** 183 * iio_device_id() - query the unique ID for the device 184 * @indio_dev: Device structure whose ID is being queried 185 * 186 * The IIO device ID is a unique index used for example for the naming 187 * of the character device /dev/iio\:device[ID]. 188 * 189 * Returns: Unique ID for the device. 190 */ 191 int iio_device_id(struct iio_dev *indio_dev) 192 { 193 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 194 195 return iio_dev_opaque->id; 196 } 197 EXPORT_SYMBOL_GPL(iio_device_id); 198 199 /** 200 * iio_buffer_enabled() - helper function to test if the buffer is enabled 201 * @indio_dev: IIO device structure for device 202 * 203 * Returns: True, if the buffer is enabled. 204 */ 205 bool iio_buffer_enabled(struct iio_dev *indio_dev) 206 { 207 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 208 209 return iio_dev_opaque->currentmode & 210 (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE | 211 INDIO_BUFFER_TRIGGERED); 212 } 213 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 214 215 #if defined(CONFIG_DEBUG_FS) 216 /* 217 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 218 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 219 */ 220 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 221 { 222 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 223 224 return iio_dev_opaque->debugfs_dentry; 225 } 226 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 227 #endif 228 229 /** 230 * iio_find_channel_from_si() - get channel from its scan index 231 * @indio_dev: device 232 * @si: scan index to match 233 * 234 * Returns: 235 * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure. 236 */ 237 const struct iio_chan_spec 238 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 239 { 240 int i; 241 242 for (i = 0; i < indio_dev->num_channels; i++) 243 if (indio_dev->channels[i].scan_index == si) 244 return &indio_dev->channels[i]; 245 return NULL; 246 } 247 248 /* This turns up an awful lot */ 249 ssize_t iio_read_const_attr(struct device *dev, 250 struct device_attribute *attr, 251 char *buf) 252 { 253 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 254 } 255 EXPORT_SYMBOL(iio_read_const_attr); 256 257 /** 258 * iio_device_set_clock() - Set current timestamping clock for the device 259 * @indio_dev: IIO device structure containing the device 260 * @clock_id: timestamping clock POSIX identifier to set. 261 * 262 * Returns: 0 on success, or a negative error code. 263 */ 264 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 265 { 266 int ret; 267 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 268 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 269 270 ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); 271 if (ret) 272 return ret; 273 if ((ev_int && iio_event_enabled(ev_int)) || 274 iio_buffer_enabled(indio_dev)) { 275 mutex_unlock(&iio_dev_opaque->mlock); 276 return -EBUSY; 277 } 278 iio_dev_opaque->clock_id = clock_id; 279 mutex_unlock(&iio_dev_opaque->mlock); 280 281 return 0; 282 } 283 EXPORT_SYMBOL(iio_device_set_clock); 284 285 /** 286 * iio_device_get_clock() - Retrieve current timestamping clock for the device 287 * @indio_dev: IIO device structure containing the device 288 * 289 * Returns: Clock ID of the current timestamping clock for the device. 290 */ 291 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 292 { 293 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 294 295 return iio_dev_opaque->clock_id; 296 } 297 EXPORT_SYMBOL(iio_device_get_clock); 298 299 /** 300 * iio_get_time_ns() - utility function to get a time stamp for events etc 301 * @indio_dev: device 302 * 303 * Returns: Timestamp of the event in nanoseconds. 304 */ 305 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 306 { 307 struct timespec64 tp; 308 309 switch (iio_device_get_clock(indio_dev)) { 310 case CLOCK_REALTIME: 311 return ktime_get_real_ns(); 312 case CLOCK_MONOTONIC: 313 return ktime_get_ns(); 314 case CLOCK_MONOTONIC_RAW: 315 return ktime_get_raw_ns(); 316 case CLOCK_REALTIME_COARSE: 317 return ktime_to_ns(ktime_get_coarse_real()); 318 case CLOCK_MONOTONIC_COARSE: 319 ktime_get_coarse_ts64(&tp); 320 return timespec64_to_ns(&tp); 321 case CLOCK_BOOTTIME: 322 return ktime_get_boottime_ns(); 323 case CLOCK_TAI: 324 return ktime_get_clocktai_ns(); 325 default: 326 BUG(); 327 } 328 } 329 EXPORT_SYMBOL(iio_get_time_ns); 330 331 static int __init iio_init(void) 332 { 333 int ret; 334 335 /* Register sysfs bus */ 336 ret = bus_register(&iio_bus_type); 337 if (ret < 0) { 338 pr_err("could not register bus type\n"); 339 goto error_nothing; 340 } 341 342 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 343 if (ret < 0) { 344 pr_err("failed to allocate char dev region\n"); 345 goto error_unregister_bus_type; 346 } 347 348 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 349 350 return 0; 351 352 error_unregister_bus_type: 353 bus_unregister(&iio_bus_type); 354 error_nothing: 355 return ret; 356 } 357 358 static void __exit iio_exit(void) 359 { 360 if (iio_devt) 361 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 362 bus_unregister(&iio_bus_type); 363 debugfs_remove(iio_debugfs_dentry); 364 } 365 366 #if defined(CONFIG_DEBUG_FS) 367 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 368 size_t count, loff_t *ppos) 369 { 370 struct iio_dev *indio_dev = file->private_data; 371 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 372 unsigned int val = 0; 373 int ret; 374 375 if (*ppos > 0) 376 return simple_read_from_buffer(userbuf, count, ppos, 377 iio_dev_opaque->read_buf, 378 iio_dev_opaque->read_buf_len); 379 380 ret = indio_dev->info->debugfs_reg_access(indio_dev, 381 iio_dev_opaque->cached_reg_addr, 382 0, &val); 383 if (ret) { 384 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 385 return ret; 386 } 387 388 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 389 sizeof(iio_dev_opaque->read_buf), 390 "0x%X\n", val); 391 392 return simple_read_from_buffer(userbuf, count, ppos, 393 iio_dev_opaque->read_buf, 394 iio_dev_opaque->read_buf_len); 395 } 396 397 static ssize_t iio_debugfs_write_reg(struct file *file, 398 const char __user *userbuf, size_t count, loff_t *ppos) 399 { 400 struct iio_dev *indio_dev = file->private_data; 401 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 402 unsigned int reg, val; 403 char buf[80]; 404 int ret; 405 406 count = min(count, sizeof(buf) - 1); 407 if (copy_from_user(buf, userbuf, count)) 408 return -EFAULT; 409 410 buf[count] = 0; 411 412 ret = sscanf(buf, "%i %i", ®, &val); 413 414 switch (ret) { 415 case 1: 416 iio_dev_opaque->cached_reg_addr = reg; 417 break; 418 case 2: 419 iio_dev_opaque->cached_reg_addr = reg; 420 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 421 val, NULL); 422 if (ret) { 423 dev_err(indio_dev->dev.parent, "%s: write failed\n", 424 __func__); 425 return ret; 426 } 427 break; 428 default: 429 return -EINVAL; 430 } 431 432 return count; 433 } 434 435 static const struct file_operations iio_debugfs_reg_fops = { 436 .open = simple_open, 437 .read = iio_debugfs_read_reg, 438 .write = iio_debugfs_write_reg, 439 }; 440 441 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 442 { 443 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 444 445 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 446 } 447 448 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 449 { 450 struct iio_dev_opaque *iio_dev_opaque; 451 452 if (indio_dev->info->debugfs_reg_access == NULL) 453 return; 454 455 if (!iio_debugfs_dentry) 456 return; 457 458 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 459 460 iio_dev_opaque->debugfs_dentry = 461 debugfs_create_dir(dev_name(&indio_dev->dev), 462 iio_debugfs_dentry); 463 464 debugfs_create_file("direct_reg_access", 0644, 465 iio_dev_opaque->debugfs_dentry, indio_dev, 466 &iio_debugfs_reg_fops); 467 } 468 #else 469 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 470 { 471 } 472 473 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 474 { 475 } 476 #endif /* CONFIG_DEBUG_FS */ 477 478 static ssize_t iio_read_channel_ext_info(struct device *dev, 479 struct device_attribute *attr, 480 char *buf) 481 { 482 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 483 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 484 const struct iio_chan_spec_ext_info *ext_info; 485 486 ext_info = &this_attr->c->ext_info[this_attr->address]; 487 488 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 489 } 490 491 static ssize_t iio_write_channel_ext_info(struct device *dev, 492 struct device_attribute *attr, 493 const char *buf, size_t len) 494 { 495 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 496 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 497 const struct iio_chan_spec_ext_info *ext_info; 498 499 ext_info = &this_attr->c->ext_info[this_attr->address]; 500 501 return ext_info->write(indio_dev, ext_info->private, 502 this_attr->c, buf, len); 503 } 504 505 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 506 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 507 { 508 const struct iio_enum *e = (const struct iio_enum *)priv; 509 unsigned int i; 510 size_t len = 0; 511 512 if (!e->num_items) 513 return 0; 514 515 for (i = 0; i < e->num_items; ++i) { 516 if (!e->items[i]) 517 continue; 518 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 519 } 520 521 /* replace last space with a newline */ 522 buf[len - 1] = '\n'; 523 524 return len; 525 } 526 EXPORT_SYMBOL_GPL(iio_enum_available_read); 527 528 ssize_t iio_enum_read(struct iio_dev *indio_dev, 529 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 530 { 531 const struct iio_enum *e = (const struct iio_enum *)priv; 532 int i; 533 534 if (!e->get) 535 return -EINVAL; 536 537 i = e->get(indio_dev, chan); 538 if (i < 0) 539 return i; 540 if (i >= e->num_items || !e->items[i]) 541 return -EINVAL; 542 543 return sysfs_emit(buf, "%s\n", e->items[i]); 544 } 545 EXPORT_SYMBOL_GPL(iio_enum_read); 546 547 ssize_t iio_enum_write(struct iio_dev *indio_dev, 548 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 549 size_t len) 550 { 551 const struct iio_enum *e = (const struct iio_enum *)priv; 552 int ret; 553 554 if (!e->set) 555 return -EINVAL; 556 557 ret = __sysfs_match_string(e->items, e->num_items, buf); 558 if (ret < 0) 559 return ret; 560 561 ret = e->set(indio_dev, chan, ret); 562 return ret ? ret : len; 563 } 564 EXPORT_SYMBOL_GPL(iio_enum_write); 565 566 static const struct iio_mount_matrix iio_mount_idmatrix = { 567 .rotation = { 568 "1", "0", "0", 569 "0", "1", "0", 570 "0", "0", "1" 571 } 572 }; 573 574 static int iio_setup_mount_idmatrix(const struct device *dev, 575 struct iio_mount_matrix *matrix) 576 { 577 *matrix = iio_mount_idmatrix; 578 dev_info(dev, "mounting matrix not found: using identity...\n"); 579 return 0; 580 } 581 582 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 583 const struct iio_chan_spec *chan, char *buf) 584 { 585 const struct iio_mount_matrix *mtx; 586 587 mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan); 588 if (IS_ERR(mtx)) 589 return PTR_ERR(mtx); 590 591 if (!mtx) 592 mtx = &iio_mount_idmatrix; 593 594 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 595 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 596 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 597 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 598 } 599 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 600 601 /** 602 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 603 * device "mount-matrix" property 604 * @dev: device the mounting matrix property is assigned to 605 * @matrix: where to store retrieved matrix 606 * 607 * If device is assigned no mounting matrix property, a default 3x3 identity 608 * matrix will be filled in. 609 * 610 * Returns: 0 if success, or a negative error code on failure. 611 */ 612 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 613 { 614 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 615 int err; 616 617 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 618 if (err == len) 619 return 0; 620 621 if (err >= 0) 622 /* Invalid number of matrix entries. */ 623 return -EINVAL; 624 625 if (err != -EINVAL) 626 /* Invalid matrix declaration format. */ 627 return err; 628 629 /* Matrix was not declared at all: fallback to identity. */ 630 return iio_setup_mount_idmatrix(dev, matrix); 631 } 632 EXPORT_SYMBOL(iio_read_mount_matrix); 633 634 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 635 int size, const int *vals) 636 { 637 int tmp0, tmp1; 638 s64 tmp2; 639 bool scale_db = false; 640 641 switch (type) { 642 case IIO_VAL_INT: 643 return sysfs_emit_at(buf, offset, "%d", vals[0]); 644 case IIO_VAL_INT_PLUS_MICRO_DB: 645 scale_db = true; 646 fallthrough; 647 case IIO_VAL_INT_PLUS_MICRO: 648 if (vals[1] < 0) 649 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 650 abs(vals[0]), -vals[1], 651 scale_db ? " dB" : ""); 652 else 653 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 654 vals[1], scale_db ? " dB" : ""); 655 case IIO_VAL_INT_PLUS_NANO: 656 if (vals[1] < 0) 657 return sysfs_emit_at(buf, offset, "-%d.%09u", 658 abs(vals[0]), -vals[1]); 659 else 660 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 661 vals[1]); 662 case IIO_VAL_FRACTIONAL: 663 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 664 tmp1 = vals[1]; 665 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 666 if ((tmp2 < 0) && (tmp0 == 0)) 667 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 668 else 669 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 670 abs(tmp1)); 671 case IIO_VAL_FRACTIONAL_LOG2: 672 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 673 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 674 if (tmp0 == 0 && tmp2 < 0) 675 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 676 else 677 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 678 abs(tmp1)); 679 case IIO_VAL_INT_MULTIPLE: 680 { 681 int i; 682 int l = 0; 683 684 for (i = 0; i < size; ++i) 685 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 686 return l; 687 } 688 case IIO_VAL_CHAR: 689 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 690 case IIO_VAL_INT_64: 691 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 692 return sysfs_emit_at(buf, offset, "%lld", tmp2); 693 default: 694 return 0; 695 } 696 } 697 698 /** 699 * iio_format_value() - Formats a IIO value into its string representation 700 * @buf: The buffer to which the formatted value gets written 701 * which is assumed to be big enough (i.e. PAGE_SIZE). 702 * @type: One of the IIO_VAL_* constants. This decides how the val 703 * and val2 parameters are formatted. 704 * @size: Number of IIO value entries contained in vals 705 * @vals: Pointer to the values, exact meaning depends on the 706 * type parameter. 707 * 708 * Returns: 709 * 0 by default, a negative number on failure or the total number of characters 710 * written for a type that belongs to the IIO_VAL_* constant. 711 */ 712 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 713 { 714 ssize_t len; 715 716 len = __iio_format_value(buf, 0, type, size, vals); 717 if (len >= PAGE_SIZE - 1) 718 return -EFBIG; 719 720 return len + sysfs_emit_at(buf, len, "\n"); 721 } 722 EXPORT_SYMBOL_GPL(iio_format_value); 723 724 static ssize_t iio_read_channel_label(struct device *dev, 725 struct device_attribute *attr, 726 char *buf) 727 { 728 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 729 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 730 731 if (indio_dev->info->read_label) 732 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 733 734 if (this_attr->c->extend_name) 735 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 736 737 return -EINVAL; 738 } 739 740 static ssize_t iio_read_channel_info(struct device *dev, 741 struct device_attribute *attr, 742 char *buf) 743 { 744 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 745 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 746 int vals[INDIO_MAX_RAW_ELEMENTS]; 747 int ret; 748 int val_len = 2; 749 750 if (indio_dev->info->read_raw_multi) 751 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 752 INDIO_MAX_RAW_ELEMENTS, 753 vals, &val_len, 754 this_attr->address); 755 else if (indio_dev->info->read_raw) 756 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 757 &vals[0], &vals[1], this_attr->address); 758 else 759 return -EINVAL; 760 761 if (ret < 0) 762 return ret; 763 764 return iio_format_value(buf, ret, val_len, vals); 765 } 766 767 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 768 const char *prefix, const char *suffix) 769 { 770 ssize_t len; 771 int stride; 772 int i; 773 774 switch (type) { 775 case IIO_VAL_INT: 776 stride = 1; 777 break; 778 default: 779 stride = 2; 780 break; 781 } 782 783 len = sysfs_emit(buf, prefix); 784 785 for (i = 0; i <= length - stride; i += stride) { 786 if (i != 0) { 787 len += sysfs_emit_at(buf, len, " "); 788 if (len >= PAGE_SIZE) 789 return -EFBIG; 790 } 791 792 len += __iio_format_value(buf, len, type, stride, &vals[i]); 793 if (len >= PAGE_SIZE) 794 return -EFBIG; 795 } 796 797 len += sysfs_emit_at(buf, len, "%s\n", suffix); 798 799 return len; 800 } 801 802 static ssize_t iio_format_avail_list(char *buf, const int *vals, 803 int type, int length) 804 { 805 806 return iio_format_list(buf, vals, type, length, "", ""); 807 } 808 809 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 810 { 811 int length; 812 813 /* 814 * length refers to the array size , not the number of elements. 815 * The purpose is to print the range [min , step ,max] so length should 816 * be 3 in case of int, and 6 for other types. 817 */ 818 switch (type) { 819 case IIO_VAL_INT: 820 length = 3; 821 break; 822 default: 823 length = 6; 824 break; 825 } 826 827 return iio_format_list(buf, vals, type, length, "[", "]"); 828 } 829 830 static ssize_t iio_read_channel_info_avail(struct device *dev, 831 struct device_attribute *attr, 832 char *buf) 833 { 834 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 835 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 836 const int *vals; 837 int ret; 838 int length; 839 int type; 840 841 if (!indio_dev->info->read_avail) 842 return -EINVAL; 843 844 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 845 &vals, &type, &length, 846 this_attr->address); 847 848 if (ret < 0) 849 return ret; 850 switch (ret) { 851 case IIO_AVAIL_LIST: 852 return iio_format_avail_list(buf, vals, type, length); 853 case IIO_AVAIL_RANGE: 854 return iio_format_avail_range(buf, vals, type); 855 default: 856 return -EINVAL; 857 } 858 } 859 860 /** 861 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 862 * @str: The string to parse 863 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 864 * @integer: The integer part of the number 865 * @fract: The fractional part of the number 866 * @scale_db: True if this should parse as dB 867 * 868 * Returns: 869 * 0 on success, or a negative error code if the string could not be parsed. 870 */ 871 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 872 int *integer, int *fract, bool scale_db) 873 { 874 int i = 0, f = 0; 875 bool integer_part = true, negative = false; 876 877 if (fract_mult == 0) { 878 *fract = 0; 879 880 return kstrtoint(str, 0, integer); 881 } 882 883 if (str[0] == '-') { 884 negative = true; 885 str++; 886 } else if (str[0] == '+') { 887 str++; 888 } 889 890 while (*str) { 891 if ('0' <= *str && *str <= '9') { 892 if (integer_part) { 893 i = i * 10 + *str - '0'; 894 } else { 895 f += fract_mult * (*str - '0'); 896 fract_mult /= 10; 897 } 898 } else if (*str == '\n') { 899 if (*(str + 1) == '\0') 900 break; 901 return -EINVAL; 902 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 903 /* Ignore the dB suffix */ 904 str += sizeof(" dB") - 1; 905 continue; 906 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 907 /* Ignore the dB suffix */ 908 str += sizeof("dB") - 1; 909 continue; 910 } else if (*str == '.' && integer_part) { 911 integer_part = false; 912 } else { 913 return -EINVAL; 914 } 915 str++; 916 } 917 918 if (negative) { 919 if (i) 920 i = -i; 921 else 922 f = -f; 923 } 924 925 *integer = i; 926 *fract = f; 927 928 return 0; 929 } 930 931 /** 932 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 933 * @str: The string to parse 934 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 935 * @integer: The integer part of the number 936 * @fract: The fractional part of the number 937 * 938 * Returns: 939 * 0 on success, or a negative error code if the string could not be parsed. 940 */ 941 int iio_str_to_fixpoint(const char *str, int fract_mult, 942 int *integer, int *fract) 943 { 944 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 945 } 946 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 947 948 static ssize_t iio_write_channel_info(struct device *dev, 949 struct device_attribute *attr, 950 const char *buf, 951 size_t len) 952 { 953 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 954 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 955 int ret, fract_mult = 100000; 956 int integer, fract = 0; 957 bool is_char = false; 958 bool scale_db = false; 959 960 /* Assumes decimal - precision based on number of digits */ 961 if (!indio_dev->info->write_raw) 962 return -EINVAL; 963 964 if (indio_dev->info->write_raw_get_fmt) 965 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 966 this_attr->c, this_attr->address)) { 967 case IIO_VAL_INT: 968 fract_mult = 0; 969 break; 970 case IIO_VAL_INT_PLUS_MICRO_DB: 971 scale_db = true; 972 fallthrough; 973 case IIO_VAL_INT_PLUS_MICRO: 974 fract_mult = 100000; 975 break; 976 case IIO_VAL_INT_PLUS_NANO: 977 fract_mult = 100000000; 978 break; 979 case IIO_VAL_CHAR: 980 is_char = true; 981 break; 982 default: 983 return -EINVAL; 984 } 985 986 if (is_char) { 987 char ch; 988 989 if (sscanf(buf, "%c", &ch) != 1) 990 return -EINVAL; 991 integer = ch; 992 } else { 993 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 994 scale_db); 995 if (ret) 996 return ret; 997 } 998 999 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 1000 integer, fract, this_attr->address); 1001 if (ret) 1002 return ret; 1003 1004 return len; 1005 } 1006 1007 static 1008 int __iio_device_attr_init(struct device_attribute *dev_attr, 1009 const char *postfix, 1010 struct iio_chan_spec const *chan, 1011 ssize_t (*readfunc)(struct device *dev, 1012 struct device_attribute *attr, 1013 char *buf), 1014 ssize_t (*writefunc)(struct device *dev, 1015 struct device_attribute *attr, 1016 const char *buf, 1017 size_t len), 1018 enum iio_shared_by shared_by) 1019 { 1020 int ret = 0; 1021 char *name = NULL; 1022 char *full_postfix; 1023 1024 sysfs_attr_init(&dev_attr->attr); 1025 1026 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1027 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1028 if (chan->extend_name) 1029 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1030 iio_modifier_names[chan->channel2], 1031 chan->extend_name, 1032 postfix); 1033 else 1034 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1035 iio_modifier_names[chan->channel2], 1036 postfix); 1037 } else { 1038 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1039 full_postfix = kstrdup(postfix, GFP_KERNEL); 1040 else 1041 full_postfix = kasprintf(GFP_KERNEL, 1042 "%s_%s", 1043 chan->extend_name, 1044 postfix); 1045 } 1046 if (full_postfix == NULL) 1047 return -ENOMEM; 1048 1049 if (chan->differential) { /* Differential can not have modifier */ 1050 switch (shared_by) { 1051 case IIO_SHARED_BY_ALL: 1052 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1053 break; 1054 case IIO_SHARED_BY_DIR: 1055 name = kasprintf(GFP_KERNEL, "%s_%s", 1056 iio_direction[chan->output], 1057 full_postfix); 1058 break; 1059 case IIO_SHARED_BY_TYPE: 1060 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1061 iio_direction[chan->output], 1062 iio_chan_type_name_spec[chan->type], 1063 iio_chan_type_name_spec[chan->type], 1064 full_postfix); 1065 break; 1066 case IIO_SEPARATE: 1067 if (!chan->indexed) { 1068 WARN(1, "Differential channels must be indexed\n"); 1069 ret = -EINVAL; 1070 goto error_free_full_postfix; 1071 } 1072 name = kasprintf(GFP_KERNEL, 1073 "%s_%s%d-%s%d_%s", 1074 iio_direction[chan->output], 1075 iio_chan_type_name_spec[chan->type], 1076 chan->channel, 1077 iio_chan_type_name_spec[chan->type], 1078 chan->channel2, 1079 full_postfix); 1080 break; 1081 } 1082 } else { /* Single ended */ 1083 switch (shared_by) { 1084 case IIO_SHARED_BY_ALL: 1085 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1086 break; 1087 case IIO_SHARED_BY_DIR: 1088 name = kasprintf(GFP_KERNEL, "%s_%s", 1089 iio_direction[chan->output], 1090 full_postfix); 1091 break; 1092 case IIO_SHARED_BY_TYPE: 1093 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1094 iio_direction[chan->output], 1095 iio_chan_type_name_spec[chan->type], 1096 full_postfix); 1097 break; 1098 1099 case IIO_SEPARATE: 1100 if (chan->indexed) 1101 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1102 iio_direction[chan->output], 1103 iio_chan_type_name_spec[chan->type], 1104 chan->channel, 1105 full_postfix); 1106 else 1107 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1108 iio_direction[chan->output], 1109 iio_chan_type_name_spec[chan->type], 1110 full_postfix); 1111 break; 1112 } 1113 } 1114 if (name == NULL) { 1115 ret = -ENOMEM; 1116 goto error_free_full_postfix; 1117 } 1118 dev_attr->attr.name = name; 1119 1120 if (readfunc) { 1121 dev_attr->attr.mode |= 0444; 1122 dev_attr->show = readfunc; 1123 } 1124 1125 if (writefunc) { 1126 dev_attr->attr.mode |= 0200; 1127 dev_attr->store = writefunc; 1128 } 1129 1130 error_free_full_postfix: 1131 kfree(full_postfix); 1132 1133 return ret; 1134 } 1135 1136 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1137 { 1138 kfree(dev_attr->attr.name); 1139 } 1140 1141 int __iio_add_chan_devattr(const char *postfix, 1142 struct iio_chan_spec const *chan, 1143 ssize_t (*readfunc)(struct device *dev, 1144 struct device_attribute *attr, 1145 char *buf), 1146 ssize_t (*writefunc)(struct device *dev, 1147 struct device_attribute *attr, 1148 const char *buf, 1149 size_t len), 1150 u64 mask, 1151 enum iio_shared_by shared_by, 1152 struct device *dev, 1153 struct iio_buffer *buffer, 1154 struct list_head *attr_list) 1155 { 1156 int ret; 1157 struct iio_dev_attr *iio_attr, *t; 1158 1159 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1160 if (iio_attr == NULL) 1161 return -ENOMEM; 1162 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1163 postfix, chan, 1164 readfunc, writefunc, shared_by); 1165 if (ret) 1166 goto error_iio_dev_attr_free; 1167 iio_attr->c = chan; 1168 iio_attr->address = mask; 1169 iio_attr->buffer = buffer; 1170 list_for_each_entry(t, attr_list, l) 1171 if (strcmp(t->dev_attr.attr.name, 1172 iio_attr->dev_attr.attr.name) == 0) { 1173 if (shared_by == IIO_SEPARATE) 1174 dev_err(dev, "tried to double register : %s\n", 1175 t->dev_attr.attr.name); 1176 ret = -EBUSY; 1177 goto error_device_attr_deinit; 1178 } 1179 list_add(&iio_attr->l, attr_list); 1180 1181 return 0; 1182 1183 error_device_attr_deinit: 1184 __iio_device_attr_deinit(&iio_attr->dev_attr); 1185 error_iio_dev_attr_free: 1186 kfree(iio_attr); 1187 return ret; 1188 } 1189 1190 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1191 struct iio_chan_spec const *chan) 1192 { 1193 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1194 int ret; 1195 1196 if (!indio_dev->info->read_label && !chan->extend_name) 1197 return 0; 1198 1199 ret = __iio_add_chan_devattr("label", 1200 chan, 1201 &iio_read_channel_label, 1202 NULL, 1203 0, 1204 IIO_SEPARATE, 1205 &indio_dev->dev, 1206 NULL, 1207 &iio_dev_opaque->channel_attr_list); 1208 if (ret < 0) 1209 return ret; 1210 1211 return 1; 1212 } 1213 1214 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1215 struct iio_chan_spec const *chan, 1216 enum iio_shared_by shared_by, 1217 const long *infomask) 1218 { 1219 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1220 int i, ret, attrcount = 0; 1221 1222 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1223 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1224 return -EINVAL; 1225 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1226 chan, 1227 &iio_read_channel_info, 1228 &iio_write_channel_info, 1229 i, 1230 shared_by, 1231 &indio_dev->dev, 1232 NULL, 1233 &iio_dev_opaque->channel_attr_list); 1234 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1235 continue; 1236 if (ret < 0) 1237 return ret; 1238 attrcount++; 1239 } 1240 1241 return attrcount; 1242 } 1243 1244 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1245 struct iio_chan_spec const *chan, 1246 enum iio_shared_by shared_by, 1247 const long *infomask) 1248 { 1249 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1250 int i, ret, attrcount = 0; 1251 char *avail_postfix; 1252 1253 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1254 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1255 return -EINVAL; 1256 avail_postfix = kasprintf(GFP_KERNEL, 1257 "%s_available", 1258 iio_chan_info_postfix[i]); 1259 if (!avail_postfix) 1260 return -ENOMEM; 1261 1262 ret = __iio_add_chan_devattr(avail_postfix, 1263 chan, 1264 &iio_read_channel_info_avail, 1265 NULL, 1266 i, 1267 shared_by, 1268 &indio_dev->dev, 1269 NULL, 1270 &iio_dev_opaque->channel_attr_list); 1271 kfree(avail_postfix); 1272 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1273 continue; 1274 if (ret < 0) 1275 return ret; 1276 attrcount++; 1277 } 1278 1279 return attrcount; 1280 } 1281 1282 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1283 struct iio_chan_spec const *chan) 1284 { 1285 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1286 int ret, attrcount = 0; 1287 const struct iio_chan_spec_ext_info *ext_info; 1288 1289 if (chan->channel < 0) 1290 return 0; 1291 ret = iio_device_add_info_mask_type(indio_dev, chan, 1292 IIO_SEPARATE, 1293 &chan->info_mask_separate); 1294 if (ret < 0) 1295 return ret; 1296 attrcount += ret; 1297 1298 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1299 IIO_SEPARATE, 1300 &chan->info_mask_separate_available); 1301 if (ret < 0) 1302 return ret; 1303 attrcount += ret; 1304 1305 ret = iio_device_add_info_mask_type(indio_dev, chan, 1306 IIO_SHARED_BY_TYPE, 1307 &chan->info_mask_shared_by_type); 1308 if (ret < 0) 1309 return ret; 1310 attrcount += ret; 1311 1312 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1313 IIO_SHARED_BY_TYPE, 1314 &chan->info_mask_shared_by_type_available); 1315 if (ret < 0) 1316 return ret; 1317 attrcount += ret; 1318 1319 ret = iio_device_add_info_mask_type(indio_dev, chan, 1320 IIO_SHARED_BY_DIR, 1321 &chan->info_mask_shared_by_dir); 1322 if (ret < 0) 1323 return ret; 1324 attrcount += ret; 1325 1326 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1327 IIO_SHARED_BY_DIR, 1328 &chan->info_mask_shared_by_dir_available); 1329 if (ret < 0) 1330 return ret; 1331 attrcount += ret; 1332 1333 ret = iio_device_add_info_mask_type(indio_dev, chan, 1334 IIO_SHARED_BY_ALL, 1335 &chan->info_mask_shared_by_all); 1336 if (ret < 0) 1337 return ret; 1338 attrcount += ret; 1339 1340 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1341 IIO_SHARED_BY_ALL, 1342 &chan->info_mask_shared_by_all_available); 1343 if (ret < 0) 1344 return ret; 1345 attrcount += ret; 1346 1347 ret = iio_device_add_channel_label(indio_dev, chan); 1348 if (ret < 0) 1349 return ret; 1350 attrcount += ret; 1351 1352 if (chan->ext_info) { 1353 unsigned int i = 0; 1354 1355 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1356 ret = __iio_add_chan_devattr(ext_info->name, 1357 chan, 1358 ext_info->read ? 1359 &iio_read_channel_ext_info : NULL, 1360 ext_info->write ? 1361 &iio_write_channel_ext_info : NULL, 1362 i, 1363 ext_info->shared, 1364 &indio_dev->dev, 1365 NULL, 1366 &iio_dev_opaque->channel_attr_list); 1367 i++; 1368 if (ret == -EBUSY && ext_info->shared) 1369 continue; 1370 1371 if (ret) 1372 return ret; 1373 1374 attrcount++; 1375 } 1376 } 1377 1378 return attrcount; 1379 } 1380 1381 /** 1382 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1383 * @attr_list: List of IIO device attributes 1384 * 1385 * This function frees the memory allocated for each of the IIO device 1386 * attributes in the list. 1387 */ 1388 void iio_free_chan_devattr_list(struct list_head *attr_list) 1389 { 1390 struct iio_dev_attr *p, *n; 1391 1392 list_for_each_entry_safe(p, n, attr_list, l) { 1393 kfree_const(p->dev_attr.attr.name); 1394 list_del(&p->l); 1395 kfree(p); 1396 } 1397 } 1398 1399 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1400 char *buf) 1401 { 1402 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1403 1404 return sysfs_emit(buf, "%s\n", indio_dev->name); 1405 } 1406 1407 static DEVICE_ATTR_RO(name); 1408 1409 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1410 char *buf) 1411 { 1412 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1413 1414 return sysfs_emit(buf, "%s\n", indio_dev->label); 1415 } 1416 1417 static DEVICE_ATTR_RO(label); 1418 1419 static const char * const clock_names[] = { 1420 [CLOCK_REALTIME] = "realtime", 1421 [CLOCK_MONOTONIC] = "monotonic", 1422 [CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id", 1423 [CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id", 1424 [CLOCK_MONOTONIC_RAW] = "monotonic_raw", 1425 [CLOCK_REALTIME_COARSE] = "realtime_coarse", 1426 [CLOCK_MONOTONIC_COARSE] = "monotonic_coarse", 1427 [CLOCK_BOOTTIME] = "boottime", 1428 [CLOCK_REALTIME_ALARM] = "realtime_alarm", 1429 [CLOCK_BOOTTIME_ALARM] = "boottime_alarm", 1430 [CLOCK_SGI_CYCLE] = "sgi_cycle", 1431 [CLOCK_TAI] = "tai", 1432 }; 1433 1434 static ssize_t current_timestamp_clock_show(struct device *dev, 1435 struct device_attribute *attr, 1436 char *buf) 1437 { 1438 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1439 const clockid_t clk = iio_device_get_clock(indio_dev); 1440 1441 switch (clk) { 1442 case CLOCK_REALTIME: 1443 case CLOCK_MONOTONIC: 1444 case CLOCK_MONOTONIC_RAW: 1445 case CLOCK_REALTIME_COARSE: 1446 case CLOCK_MONOTONIC_COARSE: 1447 case CLOCK_BOOTTIME: 1448 case CLOCK_TAI: 1449 break; 1450 default: 1451 BUG(); 1452 } 1453 1454 return sysfs_emit(buf, "%s\n", clock_names[clk]); 1455 } 1456 1457 static ssize_t current_timestamp_clock_store(struct device *dev, 1458 struct device_attribute *attr, 1459 const char *buf, size_t len) 1460 { 1461 clockid_t clk; 1462 int ret; 1463 1464 ret = sysfs_match_string(clock_names, buf); 1465 if (ret < 0) 1466 return ret; 1467 clk = ret; 1468 1469 switch (clk) { 1470 case CLOCK_REALTIME: 1471 case CLOCK_MONOTONIC: 1472 case CLOCK_MONOTONIC_RAW: 1473 case CLOCK_REALTIME_COARSE: 1474 case CLOCK_MONOTONIC_COARSE: 1475 case CLOCK_BOOTTIME: 1476 case CLOCK_TAI: 1477 break; 1478 default: 1479 return -EINVAL; 1480 } 1481 1482 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1483 if (ret) 1484 return ret; 1485 1486 return len; 1487 } 1488 1489 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1490 const struct attribute_group *group) 1491 { 1492 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1493 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1494 unsigned int cnt = iio_dev_opaque->groupcounter; 1495 1496 new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL); 1497 if (!new) 1498 return -ENOMEM; 1499 1500 new[iio_dev_opaque->groupcounter++] = group; 1501 new[iio_dev_opaque->groupcounter] = NULL; 1502 1503 iio_dev_opaque->groups = new; 1504 1505 return 0; 1506 } 1507 1508 static DEVICE_ATTR_RW(current_timestamp_clock); 1509 1510 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1511 { 1512 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1513 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1514 struct iio_dev_attr *p; 1515 struct attribute **attr, *clk = NULL; 1516 1517 /* First count elements in any existing group */ 1518 if (indio_dev->info->attrs) { 1519 attr = indio_dev->info->attrs->attrs; 1520 while (*attr++ != NULL) 1521 attrcount_orig++; 1522 } 1523 attrcount = attrcount_orig; 1524 /* 1525 * New channel registration method - relies on the fact a group does 1526 * not need to be initialized if its name is NULL. 1527 */ 1528 if (indio_dev->channels) 1529 for (i = 0; i < indio_dev->num_channels; i++) { 1530 const struct iio_chan_spec *chan = 1531 &indio_dev->channels[i]; 1532 1533 if (chan->type == IIO_TIMESTAMP) 1534 clk = &dev_attr_current_timestamp_clock.attr; 1535 1536 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1537 if (ret < 0) 1538 goto error_clear_attrs; 1539 attrcount += ret; 1540 } 1541 1542 if (iio_dev_opaque->event_interface) 1543 clk = &dev_attr_current_timestamp_clock.attr; 1544 1545 if (indio_dev->name) 1546 attrcount++; 1547 if (indio_dev->label) 1548 attrcount++; 1549 if (clk) 1550 attrcount++; 1551 1552 iio_dev_opaque->chan_attr_group.attrs = 1553 kcalloc(attrcount + 1, 1554 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1555 GFP_KERNEL); 1556 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1557 ret = -ENOMEM; 1558 goto error_clear_attrs; 1559 } 1560 /* Copy across original attributes, and point to original binary attributes */ 1561 if (indio_dev->info->attrs) { 1562 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1563 indio_dev->info->attrs->attrs, 1564 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1565 *attrcount_orig); 1566 iio_dev_opaque->chan_attr_group.is_visible = 1567 indio_dev->info->attrs->is_visible; 1568 iio_dev_opaque->chan_attr_group.bin_attrs = 1569 indio_dev->info->attrs->bin_attrs; 1570 } 1571 attrn = attrcount_orig; 1572 /* Add all elements from the list. */ 1573 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1574 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1575 if (indio_dev->name) 1576 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1577 if (indio_dev->label) 1578 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1579 if (clk) 1580 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1581 1582 ret = iio_device_register_sysfs_group(indio_dev, 1583 &iio_dev_opaque->chan_attr_group); 1584 if (ret) 1585 goto error_free_chan_attrs; 1586 1587 return 0; 1588 1589 error_free_chan_attrs: 1590 kfree(iio_dev_opaque->chan_attr_group.attrs); 1591 iio_dev_opaque->chan_attr_group.attrs = NULL; 1592 error_clear_attrs: 1593 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1594 1595 return ret; 1596 } 1597 1598 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1599 { 1600 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1601 1602 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1603 kfree(iio_dev_opaque->chan_attr_group.attrs); 1604 iio_dev_opaque->chan_attr_group.attrs = NULL; 1605 kfree(iio_dev_opaque->groups); 1606 iio_dev_opaque->groups = NULL; 1607 } 1608 1609 static void iio_dev_release(struct device *device) 1610 { 1611 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1612 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1613 1614 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1615 iio_device_unregister_trigger_consumer(indio_dev); 1616 iio_device_unregister_eventset(indio_dev); 1617 iio_device_unregister_sysfs(indio_dev); 1618 1619 iio_device_detach_buffers(indio_dev); 1620 1621 lockdep_unregister_key(&iio_dev_opaque->mlock_key); 1622 1623 ida_free(&iio_ida, iio_dev_opaque->id); 1624 kfree(iio_dev_opaque); 1625 } 1626 1627 const struct device_type iio_device_type = { 1628 .name = "iio_device", 1629 .release = iio_dev_release, 1630 }; 1631 1632 /** 1633 * iio_device_alloc() - allocate an iio_dev from a driver 1634 * @parent: Parent device. 1635 * @sizeof_priv: Space to allocate for private structure. 1636 * 1637 * Returns: 1638 * Pointer to allocated iio_dev on success, NULL on failure. 1639 */ 1640 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1641 { 1642 struct iio_dev_opaque *iio_dev_opaque; 1643 struct iio_dev *indio_dev; 1644 size_t alloc_size; 1645 1646 alloc_size = sizeof(struct iio_dev_opaque); 1647 if (sizeof_priv) { 1648 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1649 alloc_size += sizeof_priv; 1650 } 1651 1652 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1653 if (!iio_dev_opaque) 1654 return NULL; 1655 1656 indio_dev = &iio_dev_opaque->indio_dev; 1657 1658 if (sizeof_priv) 1659 indio_dev->priv = (char *)iio_dev_opaque + 1660 ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN); 1661 1662 indio_dev->dev.parent = parent; 1663 indio_dev->dev.type = &iio_device_type; 1664 indio_dev->dev.bus = &iio_bus_type; 1665 device_initialize(&indio_dev->dev); 1666 mutex_init(&iio_dev_opaque->mlock); 1667 mutex_init(&iio_dev_opaque->info_exist_lock); 1668 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1669 1670 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1671 if (iio_dev_opaque->id < 0) { 1672 /* cannot use a dev_err as the name isn't available */ 1673 pr_err("failed to get device id\n"); 1674 kfree(iio_dev_opaque); 1675 return NULL; 1676 } 1677 1678 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1679 ida_free(&iio_ida, iio_dev_opaque->id); 1680 kfree(iio_dev_opaque); 1681 return NULL; 1682 } 1683 1684 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1685 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1686 1687 lockdep_register_key(&iio_dev_opaque->mlock_key); 1688 lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); 1689 1690 return indio_dev; 1691 } 1692 EXPORT_SYMBOL(iio_device_alloc); 1693 1694 /** 1695 * iio_device_free() - free an iio_dev from a driver 1696 * @dev: the iio_dev associated with the device 1697 */ 1698 void iio_device_free(struct iio_dev *dev) 1699 { 1700 if (dev) 1701 put_device(&dev->dev); 1702 } 1703 EXPORT_SYMBOL(iio_device_free); 1704 1705 static void devm_iio_device_release(void *iio_dev) 1706 { 1707 iio_device_free(iio_dev); 1708 } 1709 1710 /** 1711 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1712 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1713 * @sizeof_priv: Space to allocate for private structure. 1714 * 1715 * Managed iio_device_alloc. iio_dev allocated with this function is 1716 * automatically freed on driver detach. 1717 * 1718 * Returns: 1719 * Pointer to allocated iio_dev on success, NULL on failure. 1720 */ 1721 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1722 { 1723 struct iio_dev *iio_dev; 1724 int ret; 1725 1726 iio_dev = iio_device_alloc(parent, sizeof_priv); 1727 if (!iio_dev) 1728 return NULL; 1729 1730 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1731 iio_dev); 1732 if (ret) 1733 return NULL; 1734 1735 return iio_dev; 1736 } 1737 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1738 1739 /** 1740 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1741 * @inode: Inode structure for identifying the device in the file system 1742 * @filp: File structure for iio device used to keep and later access 1743 * private data 1744 * 1745 * Returns: 0 on success or -EBUSY if the device is already opened 1746 */ 1747 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1748 { 1749 struct iio_dev_opaque *iio_dev_opaque = 1750 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1751 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1752 struct iio_dev_buffer_pair *ib; 1753 1754 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1755 return -EBUSY; 1756 1757 iio_device_get(indio_dev); 1758 1759 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1760 if (!ib) { 1761 iio_device_put(indio_dev); 1762 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1763 return -ENOMEM; 1764 } 1765 1766 ib->indio_dev = indio_dev; 1767 ib->buffer = indio_dev->buffer; 1768 1769 filp->private_data = ib; 1770 1771 return 0; 1772 } 1773 1774 /** 1775 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1776 * @inode: Inode structure pointer for the char device 1777 * @filp: File structure pointer for the char device 1778 * 1779 * Returns: 0 for successful release. 1780 */ 1781 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1782 { 1783 struct iio_dev_buffer_pair *ib = filp->private_data; 1784 struct iio_dev_opaque *iio_dev_opaque = 1785 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1786 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1787 1788 kfree(ib); 1789 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1790 iio_device_put(indio_dev); 1791 1792 return 0; 1793 } 1794 1795 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1796 struct iio_ioctl_handler *h) 1797 { 1798 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1799 1800 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1801 } 1802 1803 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1804 { 1805 list_del(&h->entry); 1806 } 1807 1808 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1809 { 1810 struct iio_dev_buffer_pair *ib = filp->private_data; 1811 struct iio_dev *indio_dev = ib->indio_dev; 1812 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1813 struct iio_ioctl_handler *h; 1814 int ret = -ENODEV; 1815 1816 mutex_lock(&iio_dev_opaque->info_exist_lock); 1817 1818 /* 1819 * The NULL check here is required to prevent crashing when a device 1820 * is being removed while userspace would still have open file handles 1821 * to try to access this device. 1822 */ 1823 if (!indio_dev->info) 1824 goto out_unlock; 1825 1826 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1827 ret = h->ioctl(indio_dev, filp, cmd, arg); 1828 if (ret != IIO_IOCTL_UNHANDLED) 1829 break; 1830 } 1831 1832 if (ret == IIO_IOCTL_UNHANDLED) 1833 ret = -ENODEV; 1834 1835 out_unlock: 1836 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1837 1838 return ret; 1839 } 1840 1841 static const struct file_operations iio_buffer_fileops = { 1842 .owner = THIS_MODULE, 1843 .llseek = noop_llseek, 1844 .read = iio_buffer_read_outer_addr, 1845 .write = iio_buffer_write_outer_addr, 1846 .poll = iio_buffer_poll_addr, 1847 .unlocked_ioctl = iio_ioctl, 1848 .compat_ioctl = compat_ptr_ioctl, 1849 .open = iio_chrdev_open, 1850 .release = iio_chrdev_release, 1851 }; 1852 1853 static const struct file_operations iio_event_fileops = { 1854 .owner = THIS_MODULE, 1855 .llseek = noop_llseek, 1856 .unlocked_ioctl = iio_ioctl, 1857 .compat_ioctl = compat_ptr_ioctl, 1858 .open = iio_chrdev_open, 1859 .release = iio_chrdev_release, 1860 }; 1861 1862 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1863 { 1864 int i, j; 1865 const struct iio_chan_spec *channels = indio_dev->channels; 1866 1867 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1868 return 0; 1869 1870 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1871 if (channels[i].scan_index < 0) 1872 continue; 1873 for (j = i + 1; j < indio_dev->num_channels; j++) 1874 if (channels[i].scan_index == channels[j].scan_index) { 1875 dev_err(&indio_dev->dev, 1876 "Duplicate scan index %d\n", 1877 channels[i].scan_index); 1878 return -EINVAL; 1879 } 1880 } 1881 1882 return 0; 1883 } 1884 1885 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1886 { 1887 unsigned int i; 1888 1889 if (!indio_dev->info->read_label) 1890 return 0; 1891 1892 for (i = 0; i < indio_dev->num_channels; i++) { 1893 if (indio_dev->channels[i].extend_name) { 1894 dev_err(&indio_dev->dev, 1895 "Cannot use labels and extend_name at the same time\n"); 1896 return -EINVAL; 1897 } 1898 } 1899 1900 return 0; 1901 } 1902 1903 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1904 1905 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1906 { 1907 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1908 struct fwnode_handle *fwnode = NULL; 1909 int ret; 1910 1911 if (!indio_dev->info) 1912 return -EINVAL; 1913 1914 iio_dev_opaque->driver_module = this_mod; 1915 1916 /* If the calling driver did not initialize firmware node, do it here */ 1917 if (dev_fwnode(&indio_dev->dev)) 1918 fwnode = dev_fwnode(&indio_dev->dev); 1919 /* The default dummy IIO device has no parent */ 1920 else if (indio_dev->dev.parent) 1921 fwnode = dev_fwnode(indio_dev->dev.parent); 1922 device_set_node(&indio_dev->dev, fwnode); 1923 1924 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1925 1926 ret = iio_check_unique_scan_index(indio_dev); 1927 if (ret < 0) 1928 return ret; 1929 1930 ret = iio_check_extended_name(indio_dev); 1931 if (ret < 0) 1932 return ret; 1933 1934 iio_device_register_debugfs(indio_dev); 1935 1936 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1937 if (ret) { 1938 dev_err(indio_dev->dev.parent, 1939 "Failed to create buffer sysfs interfaces\n"); 1940 goto error_unreg_debugfs; 1941 } 1942 1943 ret = iio_device_register_sysfs(indio_dev); 1944 if (ret) { 1945 dev_err(indio_dev->dev.parent, 1946 "Failed to register sysfs interfaces\n"); 1947 goto error_buffer_free_sysfs; 1948 } 1949 ret = iio_device_register_eventset(indio_dev); 1950 if (ret) { 1951 dev_err(indio_dev->dev.parent, 1952 "Failed to register event set\n"); 1953 goto error_free_sysfs; 1954 } 1955 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1956 iio_device_register_trigger_consumer(indio_dev); 1957 1958 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1959 indio_dev->setup_ops == NULL) 1960 indio_dev->setup_ops = &noop_ring_setup_ops; 1961 1962 if (iio_dev_opaque->attached_buffers_cnt) 1963 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1964 else if (iio_dev_opaque->event_interface) 1965 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1966 1967 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1968 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1969 iio_dev_opaque->chrdev.owner = this_mod; 1970 } 1971 1972 /* assign device groups now; they should be all registered now */ 1973 indio_dev->dev.groups = iio_dev_opaque->groups; 1974 1975 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1976 if (ret < 0) 1977 goto error_unreg_eventset; 1978 1979 return 0; 1980 1981 error_unreg_eventset: 1982 iio_device_unregister_eventset(indio_dev); 1983 error_free_sysfs: 1984 iio_device_unregister_sysfs(indio_dev); 1985 error_buffer_free_sysfs: 1986 iio_buffers_free_sysfs_and_mask(indio_dev); 1987 error_unreg_debugfs: 1988 iio_device_unregister_debugfs(indio_dev); 1989 return ret; 1990 } 1991 EXPORT_SYMBOL(__iio_device_register); 1992 1993 /** 1994 * iio_device_unregister() - unregister a device from the IIO subsystem 1995 * @indio_dev: Device structure representing the device. 1996 */ 1997 void iio_device_unregister(struct iio_dev *indio_dev) 1998 { 1999 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2000 2001 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 2002 2003 mutex_lock(&iio_dev_opaque->info_exist_lock); 2004 2005 iio_device_unregister_debugfs(indio_dev); 2006 2007 iio_disable_all_buffers(indio_dev); 2008 2009 indio_dev->info = NULL; 2010 2011 iio_device_wakeup_eventset(indio_dev); 2012 iio_buffer_wakeup_poll(indio_dev); 2013 2014 mutex_unlock(&iio_dev_opaque->info_exist_lock); 2015 2016 iio_buffers_free_sysfs_and_mask(indio_dev); 2017 } 2018 EXPORT_SYMBOL(iio_device_unregister); 2019 2020 static void devm_iio_device_unreg(void *indio_dev) 2021 { 2022 iio_device_unregister(indio_dev); 2023 } 2024 2025 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2026 struct module *this_mod) 2027 { 2028 int ret; 2029 2030 ret = __iio_device_register(indio_dev, this_mod); 2031 if (ret) 2032 return ret; 2033 2034 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2035 } 2036 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2037 2038 /** 2039 * iio_device_claim_direct_mode - Keep device in direct mode 2040 * @indio_dev: the iio_dev associated with the device 2041 * 2042 * If the device is in direct mode it is guaranteed to stay 2043 * that way until iio_device_release_direct_mode() is called. 2044 * 2045 * Use with iio_device_release_direct_mode() 2046 * 2047 * Returns: 0 on success, -EBUSY on failure. 2048 */ 2049 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2050 { 2051 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2052 2053 mutex_lock(&iio_dev_opaque->mlock); 2054 2055 if (iio_buffer_enabled(indio_dev)) { 2056 mutex_unlock(&iio_dev_opaque->mlock); 2057 return -EBUSY; 2058 } 2059 return 0; 2060 } 2061 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2062 2063 /** 2064 * iio_device_release_direct_mode - releases claim on direct mode 2065 * @indio_dev: the iio_dev associated with the device 2066 * 2067 * Release the claim. Device is no longer guaranteed to stay 2068 * in direct mode. 2069 * 2070 * Use with iio_device_claim_direct_mode() 2071 */ 2072 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2073 { 2074 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2075 } 2076 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2077 2078 /** 2079 * iio_device_claim_buffer_mode - Keep device in buffer mode 2080 * @indio_dev: the iio_dev associated with the device 2081 * 2082 * If the device is in buffer mode it is guaranteed to stay 2083 * that way until iio_device_release_buffer_mode() is called. 2084 * 2085 * Use with iio_device_release_buffer_mode(). 2086 * 2087 * Returns: 0 on success, -EBUSY on failure. 2088 */ 2089 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) 2090 { 2091 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2092 2093 mutex_lock(&iio_dev_opaque->mlock); 2094 2095 if (iio_buffer_enabled(indio_dev)) 2096 return 0; 2097 2098 mutex_unlock(&iio_dev_opaque->mlock); 2099 return -EBUSY; 2100 } 2101 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); 2102 2103 /** 2104 * iio_device_release_buffer_mode - releases claim on buffer mode 2105 * @indio_dev: the iio_dev associated with the device 2106 * 2107 * Release the claim. Device is no longer guaranteed to stay 2108 * in buffer mode. 2109 * 2110 * Use with iio_device_claim_buffer_mode(). 2111 */ 2112 void iio_device_release_buffer_mode(struct iio_dev *indio_dev) 2113 { 2114 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2115 } 2116 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); 2117 2118 /** 2119 * iio_device_get_current_mode() - helper function providing read-only access to 2120 * the opaque @currentmode variable 2121 * @indio_dev: IIO device structure for device 2122 */ 2123 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2124 { 2125 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2126 2127 return iio_dev_opaque->currentmode; 2128 } 2129 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2130 2131 subsys_initcall(iio_init); 2132 module_exit(iio_exit); 2133 2134 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2135 MODULE_DESCRIPTION("Industrial I/O core"); 2136 MODULE_LICENSE("GPL"); 2137