1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Based on elements of hwmon and input subsystems. 7 */ 8 9 #define pr_fmt(fmt) "iio-core: " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/idr.h> 14 #include <linux/kdev_t.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/property.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/cdev.h> 23 #include <linux/slab.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/debugfs.h> 26 #include <linux/mutex.h> 27 #include <linux/iio/iio.h> 28 #include <linux/iio/iio-opaque.h> 29 #include "iio_core.h" 30 #include "iio_core_trigger.h" 31 #include <linux/iio/sysfs.h> 32 #include <linux/iio/events.h> 33 #include <linux/iio/buffer.h> 34 #include <linux/iio/buffer_impl.h> 35 36 /* IDA to assign each registered device a unique id */ 37 static DEFINE_IDA(iio_ida); 38 39 static dev_t iio_devt; 40 41 #define IIO_DEV_MAX 256 42 struct bus_type iio_bus_type = { 43 .name = "iio", 44 }; 45 EXPORT_SYMBOL(iio_bus_type); 46 47 static struct dentry *iio_debugfs_dentry; 48 49 static const char * const iio_direction[] = { 50 [0] = "in", 51 [1] = "out", 52 }; 53 54 static const char * const iio_chan_type_name_spec[] = { 55 [IIO_VOLTAGE] = "voltage", 56 [IIO_CURRENT] = "current", 57 [IIO_POWER] = "power", 58 [IIO_ACCEL] = "accel", 59 [IIO_ANGL_VEL] = "anglvel", 60 [IIO_MAGN] = "magn", 61 [IIO_LIGHT] = "illuminance", 62 [IIO_INTENSITY] = "intensity", 63 [IIO_PROXIMITY] = "proximity", 64 [IIO_TEMP] = "temp", 65 [IIO_INCLI] = "incli", 66 [IIO_ROT] = "rot", 67 [IIO_ANGL] = "angl", 68 [IIO_TIMESTAMP] = "timestamp", 69 [IIO_CAPACITANCE] = "capacitance", 70 [IIO_ALTVOLTAGE] = "altvoltage", 71 [IIO_CCT] = "cct", 72 [IIO_PRESSURE] = "pressure", 73 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 74 [IIO_ACTIVITY] = "activity", 75 [IIO_STEPS] = "steps", 76 [IIO_ENERGY] = "energy", 77 [IIO_DISTANCE] = "distance", 78 [IIO_VELOCITY] = "velocity", 79 [IIO_CONCENTRATION] = "concentration", 80 [IIO_RESISTANCE] = "resistance", 81 [IIO_PH] = "ph", 82 [IIO_UVINDEX] = "uvindex", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 84 [IIO_COUNT] = "count", 85 [IIO_INDEX] = "index", 86 [IIO_GRAVITY] = "gravity", 87 [IIO_POSITIONRELATIVE] = "positionrelative", 88 [IIO_PHASE] = "phase", 89 [IIO_MASSCONCENTRATION] = "massconcentration", 90 }; 91 92 static const char * const iio_modifier_names[] = { 93 [IIO_MOD_X] = "x", 94 [IIO_MOD_Y] = "y", 95 [IIO_MOD_Z] = "z", 96 [IIO_MOD_X_AND_Y] = "x&y", 97 [IIO_MOD_X_AND_Z] = "x&z", 98 [IIO_MOD_Y_AND_Z] = "y&z", 99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 100 [IIO_MOD_X_OR_Y] = "x|y", 101 [IIO_MOD_X_OR_Z] = "x|z", 102 [IIO_MOD_Y_OR_Z] = "y|z", 103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 106 [IIO_MOD_LIGHT_BOTH] = "both", 107 [IIO_MOD_LIGHT_IR] = "ir", 108 [IIO_MOD_LIGHT_CLEAR] = "clear", 109 [IIO_MOD_LIGHT_RED] = "red", 110 [IIO_MOD_LIGHT_GREEN] = "green", 111 [IIO_MOD_LIGHT_BLUE] = "blue", 112 [IIO_MOD_LIGHT_UV] = "uv", 113 [IIO_MOD_LIGHT_DUV] = "duv", 114 [IIO_MOD_QUATERNION] = "quaternion", 115 [IIO_MOD_TEMP_AMBIENT] = "ambient", 116 [IIO_MOD_TEMP_OBJECT] = "object", 117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 118 [IIO_MOD_NORTH_TRUE] = "from_north_true", 119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 121 [IIO_MOD_RUNNING] = "running", 122 [IIO_MOD_JOGGING] = "jogging", 123 [IIO_MOD_WALKING] = "walking", 124 [IIO_MOD_STILL] = "still", 125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 126 [IIO_MOD_I] = "i", 127 [IIO_MOD_Q] = "q", 128 [IIO_MOD_CO2] = "co2", 129 [IIO_MOD_VOC] = "voc", 130 [IIO_MOD_PM1] = "pm1", 131 [IIO_MOD_PM2P5] = "pm2p5", 132 [IIO_MOD_PM4] = "pm4", 133 [IIO_MOD_PM10] = "pm10", 134 [IIO_MOD_ETHANOL] = "ethanol", 135 [IIO_MOD_H2] = "h2", 136 [IIO_MOD_O2] = "o2", 137 [IIO_MOD_LINEAR_X] = "linear_x", 138 [IIO_MOD_LINEAR_Y] = "linear_y", 139 [IIO_MOD_LINEAR_Z] = "linear_z", 140 [IIO_MOD_PITCH] = "pitch", 141 [IIO_MOD_YAW] = "yaw", 142 [IIO_MOD_ROLL] = "roll", 143 }; 144 145 /* relies on pairs of these shared then separate */ 146 static const char * const iio_chan_info_postfix[] = { 147 [IIO_CHAN_INFO_RAW] = "raw", 148 [IIO_CHAN_INFO_PROCESSED] = "input", 149 [IIO_CHAN_INFO_SCALE] = "scale", 150 [IIO_CHAN_INFO_OFFSET] = "offset", 151 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 152 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 153 [IIO_CHAN_INFO_PEAK] = "peak_raw", 154 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 155 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 156 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 157 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 158 = "filter_low_pass_3db_frequency", 159 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 160 = "filter_high_pass_3db_frequency", 161 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 162 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 163 [IIO_CHAN_INFO_PHASE] = "phase", 164 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 165 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 166 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 167 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 168 [IIO_CHAN_INFO_ENABLE] = "en", 169 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 170 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 171 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 172 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 173 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 174 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 175 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 176 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 177 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", 178 }; 179 /** 180 * iio_device_id() - query the unique ID for the device 181 * @indio_dev: Device structure whose ID is being queried 182 * 183 * The IIO device ID is a unique index used for example for the naming 184 * of the character device /dev/iio\:device[ID] 185 */ 186 int iio_device_id(struct iio_dev *indio_dev) 187 { 188 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 189 190 return iio_dev_opaque->id; 191 } 192 EXPORT_SYMBOL_GPL(iio_device_id); 193 194 /** 195 * iio_buffer_enabled() - helper function to test if the buffer is enabled 196 * @indio_dev: IIO device structure for device 197 */ 198 bool iio_buffer_enabled(struct iio_dev *indio_dev) 199 { 200 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 201 202 return iio_dev_opaque->currentmode 203 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | 204 INDIO_BUFFER_SOFTWARE); 205 } 206 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 207 208 /** 209 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps 210 * @array: array of strings 211 * @n: number of strings in the array 212 * @str: string to match with 213 * 214 * Returns index of @str in the @array or -EINVAL, similar to match_string(). 215 * Uses sysfs_streq instead of strcmp for matching. 216 * 217 * This routine will look for a string in an array of strings. 218 * The search will continue until the element is found or the n-th element 219 * is reached, regardless of any NULL elements in the array. 220 */ 221 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n, 222 const char *str) 223 { 224 const char *item; 225 int index; 226 227 for (index = 0; index < n; index++) { 228 item = array[index]; 229 if (!item) 230 continue; 231 if (sysfs_streq(item, str)) 232 return index; 233 } 234 235 return -EINVAL; 236 } 237 238 #if defined(CONFIG_DEBUG_FS) 239 /* 240 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 241 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 242 */ 243 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 244 { 245 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 246 247 return iio_dev_opaque->debugfs_dentry; 248 } 249 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 250 #endif 251 252 /** 253 * iio_find_channel_from_si() - get channel from its scan index 254 * @indio_dev: device 255 * @si: scan index to match 256 */ 257 const struct iio_chan_spec 258 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 259 { 260 int i; 261 262 for (i = 0; i < indio_dev->num_channels; i++) 263 if (indio_dev->channels[i].scan_index == si) 264 return &indio_dev->channels[i]; 265 return NULL; 266 } 267 268 /* This turns up an awful lot */ 269 ssize_t iio_read_const_attr(struct device *dev, 270 struct device_attribute *attr, 271 char *buf) 272 { 273 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 274 } 275 EXPORT_SYMBOL(iio_read_const_attr); 276 277 /** 278 * iio_device_set_clock() - Set current timestamping clock for the device 279 * @indio_dev: IIO device structure containing the device 280 * @clock_id: timestamping clock posix identifier to set. 281 */ 282 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 283 { 284 int ret; 285 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 286 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 287 288 ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); 289 if (ret) 290 return ret; 291 if ((ev_int && iio_event_enabled(ev_int)) || 292 iio_buffer_enabled(indio_dev)) { 293 mutex_unlock(&iio_dev_opaque->mlock); 294 return -EBUSY; 295 } 296 iio_dev_opaque->clock_id = clock_id; 297 mutex_unlock(&iio_dev_opaque->mlock); 298 299 return 0; 300 } 301 EXPORT_SYMBOL(iio_device_set_clock); 302 303 /** 304 * iio_device_get_clock() - Retrieve current timestamping clock for the device 305 * @indio_dev: IIO device structure containing the device 306 */ 307 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 308 { 309 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 310 311 return iio_dev_opaque->clock_id; 312 } 313 EXPORT_SYMBOL(iio_device_get_clock); 314 315 /** 316 * iio_get_time_ns() - utility function to get a time stamp for events etc 317 * @indio_dev: device 318 */ 319 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 320 { 321 struct timespec64 tp; 322 323 switch (iio_device_get_clock(indio_dev)) { 324 case CLOCK_REALTIME: 325 return ktime_get_real_ns(); 326 case CLOCK_MONOTONIC: 327 return ktime_get_ns(); 328 case CLOCK_MONOTONIC_RAW: 329 return ktime_get_raw_ns(); 330 case CLOCK_REALTIME_COARSE: 331 return ktime_to_ns(ktime_get_coarse_real()); 332 case CLOCK_MONOTONIC_COARSE: 333 ktime_get_coarse_ts64(&tp); 334 return timespec64_to_ns(&tp); 335 case CLOCK_BOOTTIME: 336 return ktime_get_boottime_ns(); 337 case CLOCK_TAI: 338 return ktime_get_clocktai_ns(); 339 default: 340 BUG(); 341 } 342 } 343 EXPORT_SYMBOL(iio_get_time_ns); 344 345 static int __init iio_init(void) 346 { 347 int ret; 348 349 /* Register sysfs bus */ 350 ret = bus_register(&iio_bus_type); 351 if (ret < 0) { 352 pr_err("could not register bus type\n"); 353 goto error_nothing; 354 } 355 356 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 357 if (ret < 0) { 358 pr_err("failed to allocate char dev region\n"); 359 goto error_unregister_bus_type; 360 } 361 362 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 363 364 return 0; 365 366 error_unregister_bus_type: 367 bus_unregister(&iio_bus_type); 368 error_nothing: 369 return ret; 370 } 371 372 static void __exit iio_exit(void) 373 { 374 if (iio_devt) 375 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 376 bus_unregister(&iio_bus_type); 377 debugfs_remove(iio_debugfs_dentry); 378 } 379 380 #if defined(CONFIG_DEBUG_FS) 381 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 382 size_t count, loff_t *ppos) 383 { 384 struct iio_dev *indio_dev = file->private_data; 385 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 386 unsigned int val = 0; 387 int ret; 388 389 if (*ppos > 0) 390 return simple_read_from_buffer(userbuf, count, ppos, 391 iio_dev_opaque->read_buf, 392 iio_dev_opaque->read_buf_len); 393 394 ret = indio_dev->info->debugfs_reg_access(indio_dev, 395 iio_dev_opaque->cached_reg_addr, 396 0, &val); 397 if (ret) { 398 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 399 return ret; 400 } 401 402 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 403 sizeof(iio_dev_opaque->read_buf), 404 "0x%X\n", val); 405 406 return simple_read_from_buffer(userbuf, count, ppos, 407 iio_dev_opaque->read_buf, 408 iio_dev_opaque->read_buf_len); 409 } 410 411 static ssize_t iio_debugfs_write_reg(struct file *file, 412 const char __user *userbuf, size_t count, loff_t *ppos) 413 { 414 struct iio_dev *indio_dev = file->private_data; 415 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 416 unsigned int reg, val; 417 char buf[80]; 418 int ret; 419 420 count = min_t(size_t, count, (sizeof(buf)-1)); 421 if (copy_from_user(buf, userbuf, count)) 422 return -EFAULT; 423 424 buf[count] = 0; 425 426 ret = sscanf(buf, "%i %i", ®, &val); 427 428 switch (ret) { 429 case 1: 430 iio_dev_opaque->cached_reg_addr = reg; 431 break; 432 case 2: 433 iio_dev_opaque->cached_reg_addr = reg; 434 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 435 val, NULL); 436 if (ret) { 437 dev_err(indio_dev->dev.parent, "%s: write failed\n", 438 __func__); 439 return ret; 440 } 441 break; 442 default: 443 return -EINVAL; 444 } 445 446 return count; 447 } 448 449 static const struct file_operations iio_debugfs_reg_fops = { 450 .open = simple_open, 451 .read = iio_debugfs_read_reg, 452 .write = iio_debugfs_write_reg, 453 }; 454 455 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 456 { 457 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 458 459 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 460 } 461 462 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 463 { 464 struct iio_dev_opaque *iio_dev_opaque; 465 466 if (indio_dev->info->debugfs_reg_access == NULL) 467 return; 468 469 if (!iio_debugfs_dentry) 470 return; 471 472 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 473 474 iio_dev_opaque->debugfs_dentry = 475 debugfs_create_dir(dev_name(&indio_dev->dev), 476 iio_debugfs_dentry); 477 478 debugfs_create_file("direct_reg_access", 0644, 479 iio_dev_opaque->debugfs_dentry, indio_dev, 480 &iio_debugfs_reg_fops); 481 } 482 #else 483 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 484 { 485 } 486 487 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 488 { 489 } 490 #endif /* CONFIG_DEBUG_FS */ 491 492 static ssize_t iio_read_channel_ext_info(struct device *dev, 493 struct device_attribute *attr, 494 char *buf) 495 { 496 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 497 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 498 const struct iio_chan_spec_ext_info *ext_info; 499 500 ext_info = &this_attr->c->ext_info[this_attr->address]; 501 502 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 503 } 504 505 static ssize_t iio_write_channel_ext_info(struct device *dev, 506 struct device_attribute *attr, 507 const char *buf, 508 size_t len) 509 { 510 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 511 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 512 const struct iio_chan_spec_ext_info *ext_info; 513 514 ext_info = &this_attr->c->ext_info[this_attr->address]; 515 516 return ext_info->write(indio_dev, ext_info->private, 517 this_attr->c, buf, len); 518 } 519 520 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 521 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 522 { 523 const struct iio_enum *e = (const struct iio_enum *)priv; 524 unsigned int i; 525 size_t len = 0; 526 527 if (!e->num_items) 528 return 0; 529 530 for (i = 0; i < e->num_items; ++i) { 531 if (!e->items[i]) 532 continue; 533 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 534 } 535 536 /* replace last space with a newline */ 537 buf[len - 1] = '\n'; 538 539 return len; 540 } 541 EXPORT_SYMBOL_GPL(iio_enum_available_read); 542 543 ssize_t iio_enum_read(struct iio_dev *indio_dev, 544 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 545 { 546 const struct iio_enum *e = (const struct iio_enum *)priv; 547 int i; 548 549 if (!e->get) 550 return -EINVAL; 551 552 i = e->get(indio_dev, chan); 553 if (i < 0) 554 return i; 555 else if (i >= e->num_items || !e->items[i]) 556 return -EINVAL; 557 558 return sysfs_emit(buf, "%s\n", e->items[i]); 559 } 560 EXPORT_SYMBOL_GPL(iio_enum_read); 561 562 ssize_t iio_enum_write(struct iio_dev *indio_dev, 563 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 564 size_t len) 565 { 566 const struct iio_enum *e = (const struct iio_enum *)priv; 567 int ret; 568 569 if (!e->set) 570 return -EINVAL; 571 572 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf); 573 if (ret < 0) 574 return ret; 575 576 ret = e->set(indio_dev, chan, ret); 577 return ret ? ret : len; 578 } 579 EXPORT_SYMBOL_GPL(iio_enum_write); 580 581 static const struct iio_mount_matrix iio_mount_idmatrix = { 582 .rotation = { 583 "1", "0", "0", 584 "0", "1", "0", 585 "0", "0", "1" 586 } 587 }; 588 589 static int iio_setup_mount_idmatrix(const struct device *dev, 590 struct iio_mount_matrix *matrix) 591 { 592 *matrix = iio_mount_idmatrix; 593 dev_info(dev, "mounting matrix not found: using identity...\n"); 594 return 0; 595 } 596 597 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 598 const struct iio_chan_spec *chan, char *buf) 599 { 600 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 601 priv)(indio_dev, chan); 602 603 if (IS_ERR(mtx)) 604 return PTR_ERR(mtx); 605 606 if (!mtx) 607 mtx = &iio_mount_idmatrix; 608 609 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 610 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 611 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 612 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 613 } 614 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 615 616 /** 617 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 618 * device "mount-matrix" property 619 * @dev: device the mounting matrix property is assigned to 620 * @matrix: where to store retrieved matrix 621 * 622 * If device is assigned no mounting matrix property, a default 3x3 identity 623 * matrix will be filled in. 624 * 625 * Return: 0 if success, or a negative error code on failure. 626 */ 627 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 628 { 629 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 630 int err; 631 632 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 633 if (err == len) 634 return 0; 635 636 if (err >= 0) 637 /* Invalid number of matrix entries. */ 638 return -EINVAL; 639 640 if (err != -EINVAL) 641 /* Invalid matrix declaration format. */ 642 return err; 643 644 /* Matrix was not declared at all: fallback to identity. */ 645 return iio_setup_mount_idmatrix(dev, matrix); 646 } 647 EXPORT_SYMBOL(iio_read_mount_matrix); 648 649 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 650 int size, const int *vals) 651 { 652 int tmp0, tmp1; 653 s64 tmp2; 654 bool scale_db = false; 655 656 switch (type) { 657 case IIO_VAL_INT: 658 return sysfs_emit_at(buf, offset, "%d", vals[0]); 659 case IIO_VAL_INT_PLUS_MICRO_DB: 660 scale_db = true; 661 fallthrough; 662 case IIO_VAL_INT_PLUS_MICRO: 663 if (vals[1] < 0) 664 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 665 abs(vals[0]), -vals[1], 666 scale_db ? " dB" : ""); 667 else 668 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 669 vals[1], scale_db ? " dB" : ""); 670 case IIO_VAL_INT_PLUS_NANO: 671 if (vals[1] < 0) 672 return sysfs_emit_at(buf, offset, "-%d.%09u", 673 abs(vals[0]), -vals[1]); 674 else 675 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 676 vals[1]); 677 case IIO_VAL_FRACTIONAL: 678 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 679 tmp1 = vals[1]; 680 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 681 if ((tmp2 < 0) && (tmp0 == 0)) 682 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 683 else 684 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 685 abs(tmp1)); 686 case IIO_VAL_FRACTIONAL_LOG2: 687 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 688 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 689 if (tmp0 == 0 && tmp2 < 0) 690 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 691 else 692 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 693 abs(tmp1)); 694 case IIO_VAL_INT_MULTIPLE: 695 { 696 int i; 697 int l = 0; 698 699 for (i = 0; i < size; ++i) 700 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 701 return l; 702 } 703 case IIO_VAL_CHAR: 704 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 705 case IIO_VAL_INT_64: 706 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 707 return sysfs_emit_at(buf, offset, "%lld", tmp2); 708 default: 709 return 0; 710 } 711 } 712 713 /** 714 * iio_format_value() - Formats a IIO value into its string representation 715 * @buf: The buffer to which the formatted value gets written 716 * which is assumed to be big enough (i.e. PAGE_SIZE). 717 * @type: One of the IIO_VAL_* constants. This decides how the val 718 * and val2 parameters are formatted. 719 * @size: Number of IIO value entries contained in vals 720 * @vals: Pointer to the values, exact meaning depends on the 721 * type parameter. 722 * 723 * Return: 0 by default, a negative number on failure or the 724 * total number of characters written for a type that belongs 725 * to the IIO_VAL_* constant. 726 */ 727 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 728 { 729 ssize_t len; 730 731 len = __iio_format_value(buf, 0, type, size, vals); 732 if (len >= PAGE_SIZE - 1) 733 return -EFBIG; 734 735 return len + sysfs_emit_at(buf, len, "\n"); 736 } 737 EXPORT_SYMBOL_GPL(iio_format_value); 738 739 static ssize_t iio_read_channel_label(struct device *dev, 740 struct device_attribute *attr, 741 char *buf) 742 { 743 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 744 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 745 746 if (indio_dev->info->read_label) 747 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 748 749 if (this_attr->c->extend_name) 750 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 751 752 return -EINVAL; 753 } 754 755 static ssize_t iio_read_channel_info(struct device *dev, 756 struct device_attribute *attr, 757 char *buf) 758 { 759 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 760 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 761 int vals[INDIO_MAX_RAW_ELEMENTS]; 762 int ret; 763 int val_len = 2; 764 765 if (indio_dev->info->read_raw_multi) 766 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 767 INDIO_MAX_RAW_ELEMENTS, 768 vals, &val_len, 769 this_attr->address); 770 else 771 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 772 &vals[0], &vals[1], this_attr->address); 773 774 if (ret < 0) 775 return ret; 776 777 return iio_format_value(buf, ret, val_len, vals); 778 } 779 780 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 781 const char *prefix, const char *suffix) 782 { 783 ssize_t len; 784 int stride; 785 int i; 786 787 switch (type) { 788 case IIO_VAL_INT: 789 stride = 1; 790 break; 791 default: 792 stride = 2; 793 break; 794 } 795 796 len = sysfs_emit(buf, prefix); 797 798 for (i = 0; i <= length - stride; i += stride) { 799 if (i != 0) { 800 len += sysfs_emit_at(buf, len, " "); 801 if (len >= PAGE_SIZE) 802 return -EFBIG; 803 } 804 805 len += __iio_format_value(buf, len, type, stride, &vals[i]); 806 if (len >= PAGE_SIZE) 807 return -EFBIG; 808 } 809 810 len += sysfs_emit_at(buf, len, "%s\n", suffix); 811 812 return len; 813 } 814 815 static ssize_t iio_format_avail_list(char *buf, const int *vals, 816 int type, int length) 817 { 818 819 return iio_format_list(buf, vals, type, length, "", ""); 820 } 821 822 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 823 { 824 int length; 825 826 /* 827 * length refers to the array size , not the number of elements. 828 * The purpose is to print the range [min , step ,max] so length should 829 * be 3 in case of int, and 6 for other types. 830 */ 831 switch (type) { 832 case IIO_VAL_INT: 833 length = 3; 834 break; 835 default: 836 length = 6; 837 break; 838 } 839 840 return iio_format_list(buf, vals, type, length, "[", "]"); 841 } 842 843 static ssize_t iio_read_channel_info_avail(struct device *dev, 844 struct device_attribute *attr, 845 char *buf) 846 { 847 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 848 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 849 const int *vals; 850 int ret; 851 int length; 852 int type; 853 854 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 855 &vals, &type, &length, 856 this_attr->address); 857 858 if (ret < 0) 859 return ret; 860 switch (ret) { 861 case IIO_AVAIL_LIST: 862 return iio_format_avail_list(buf, vals, type, length); 863 case IIO_AVAIL_RANGE: 864 return iio_format_avail_range(buf, vals, type); 865 default: 866 return -EINVAL; 867 } 868 } 869 870 /** 871 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 872 * @str: The string to parse 873 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 874 * @integer: The integer part of the number 875 * @fract: The fractional part of the number 876 * @scale_db: True if this should parse as dB 877 * 878 * Returns 0 on success, or a negative error code if the string could not be 879 * parsed. 880 */ 881 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 882 int *integer, int *fract, bool scale_db) 883 { 884 int i = 0, f = 0; 885 bool integer_part = true, negative = false; 886 887 if (fract_mult == 0) { 888 *fract = 0; 889 890 return kstrtoint(str, 0, integer); 891 } 892 893 if (str[0] == '-') { 894 negative = true; 895 str++; 896 } else if (str[0] == '+') { 897 str++; 898 } 899 900 while (*str) { 901 if ('0' <= *str && *str <= '9') { 902 if (integer_part) { 903 i = i * 10 + *str - '0'; 904 } else { 905 f += fract_mult * (*str - '0'); 906 fract_mult /= 10; 907 } 908 } else if (*str == '\n') { 909 if (*(str + 1) == '\0') 910 break; 911 return -EINVAL; 912 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 913 /* Ignore the dB suffix */ 914 str += sizeof(" dB") - 1; 915 continue; 916 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 917 /* Ignore the dB suffix */ 918 str += sizeof("dB") - 1; 919 continue; 920 } else if (*str == '.' && integer_part) { 921 integer_part = false; 922 } else { 923 return -EINVAL; 924 } 925 str++; 926 } 927 928 if (negative) { 929 if (i) 930 i = -i; 931 else 932 f = -f; 933 } 934 935 *integer = i; 936 *fract = f; 937 938 return 0; 939 } 940 941 /** 942 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 943 * @str: The string to parse 944 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 945 * @integer: The integer part of the number 946 * @fract: The fractional part of the number 947 * 948 * Returns 0 on success, or a negative error code if the string could not be 949 * parsed. 950 */ 951 int iio_str_to_fixpoint(const char *str, int fract_mult, 952 int *integer, int *fract) 953 { 954 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 955 } 956 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 957 958 static ssize_t iio_write_channel_info(struct device *dev, 959 struct device_attribute *attr, 960 const char *buf, 961 size_t len) 962 { 963 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 964 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 965 int ret, fract_mult = 100000; 966 int integer, fract = 0; 967 bool is_char = false; 968 bool scale_db = false; 969 970 /* Assumes decimal - precision based on number of digits */ 971 if (!indio_dev->info->write_raw) 972 return -EINVAL; 973 974 if (indio_dev->info->write_raw_get_fmt) 975 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 976 this_attr->c, this_attr->address)) { 977 case IIO_VAL_INT: 978 fract_mult = 0; 979 break; 980 case IIO_VAL_INT_PLUS_MICRO_DB: 981 scale_db = true; 982 fallthrough; 983 case IIO_VAL_INT_PLUS_MICRO: 984 fract_mult = 100000; 985 break; 986 case IIO_VAL_INT_PLUS_NANO: 987 fract_mult = 100000000; 988 break; 989 case IIO_VAL_CHAR: 990 is_char = true; 991 break; 992 default: 993 return -EINVAL; 994 } 995 996 if (is_char) { 997 char ch; 998 999 if (sscanf(buf, "%c", &ch) != 1) 1000 return -EINVAL; 1001 integer = ch; 1002 } else { 1003 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 1004 scale_db); 1005 if (ret) 1006 return ret; 1007 } 1008 1009 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 1010 integer, fract, this_attr->address); 1011 if (ret) 1012 return ret; 1013 1014 return len; 1015 } 1016 1017 static 1018 int __iio_device_attr_init(struct device_attribute *dev_attr, 1019 const char *postfix, 1020 struct iio_chan_spec const *chan, 1021 ssize_t (*readfunc)(struct device *dev, 1022 struct device_attribute *attr, 1023 char *buf), 1024 ssize_t (*writefunc)(struct device *dev, 1025 struct device_attribute *attr, 1026 const char *buf, 1027 size_t len), 1028 enum iio_shared_by shared_by) 1029 { 1030 int ret = 0; 1031 char *name = NULL; 1032 char *full_postfix; 1033 1034 sysfs_attr_init(&dev_attr->attr); 1035 1036 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1037 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1038 if (chan->extend_name) 1039 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1040 iio_modifier_names[chan 1041 ->channel2], 1042 chan->extend_name, 1043 postfix); 1044 else 1045 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1046 iio_modifier_names[chan 1047 ->channel2], 1048 postfix); 1049 } else { 1050 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1051 full_postfix = kstrdup(postfix, GFP_KERNEL); 1052 else 1053 full_postfix = kasprintf(GFP_KERNEL, 1054 "%s_%s", 1055 chan->extend_name, 1056 postfix); 1057 } 1058 if (full_postfix == NULL) 1059 return -ENOMEM; 1060 1061 if (chan->differential) { /* Differential can not have modifier */ 1062 switch (shared_by) { 1063 case IIO_SHARED_BY_ALL: 1064 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1065 break; 1066 case IIO_SHARED_BY_DIR: 1067 name = kasprintf(GFP_KERNEL, "%s_%s", 1068 iio_direction[chan->output], 1069 full_postfix); 1070 break; 1071 case IIO_SHARED_BY_TYPE: 1072 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1073 iio_direction[chan->output], 1074 iio_chan_type_name_spec[chan->type], 1075 iio_chan_type_name_spec[chan->type], 1076 full_postfix); 1077 break; 1078 case IIO_SEPARATE: 1079 if (!chan->indexed) { 1080 WARN(1, "Differential channels must be indexed\n"); 1081 ret = -EINVAL; 1082 goto error_free_full_postfix; 1083 } 1084 name = kasprintf(GFP_KERNEL, 1085 "%s_%s%d-%s%d_%s", 1086 iio_direction[chan->output], 1087 iio_chan_type_name_spec[chan->type], 1088 chan->channel, 1089 iio_chan_type_name_spec[chan->type], 1090 chan->channel2, 1091 full_postfix); 1092 break; 1093 } 1094 } else { /* Single ended */ 1095 switch (shared_by) { 1096 case IIO_SHARED_BY_ALL: 1097 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1098 break; 1099 case IIO_SHARED_BY_DIR: 1100 name = kasprintf(GFP_KERNEL, "%s_%s", 1101 iio_direction[chan->output], 1102 full_postfix); 1103 break; 1104 case IIO_SHARED_BY_TYPE: 1105 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1106 iio_direction[chan->output], 1107 iio_chan_type_name_spec[chan->type], 1108 full_postfix); 1109 break; 1110 1111 case IIO_SEPARATE: 1112 if (chan->indexed) 1113 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1114 iio_direction[chan->output], 1115 iio_chan_type_name_spec[chan->type], 1116 chan->channel, 1117 full_postfix); 1118 else 1119 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1120 iio_direction[chan->output], 1121 iio_chan_type_name_spec[chan->type], 1122 full_postfix); 1123 break; 1124 } 1125 } 1126 if (name == NULL) { 1127 ret = -ENOMEM; 1128 goto error_free_full_postfix; 1129 } 1130 dev_attr->attr.name = name; 1131 1132 if (readfunc) { 1133 dev_attr->attr.mode |= 0444; 1134 dev_attr->show = readfunc; 1135 } 1136 1137 if (writefunc) { 1138 dev_attr->attr.mode |= 0200; 1139 dev_attr->store = writefunc; 1140 } 1141 1142 error_free_full_postfix: 1143 kfree(full_postfix); 1144 1145 return ret; 1146 } 1147 1148 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1149 { 1150 kfree(dev_attr->attr.name); 1151 } 1152 1153 int __iio_add_chan_devattr(const char *postfix, 1154 struct iio_chan_spec const *chan, 1155 ssize_t (*readfunc)(struct device *dev, 1156 struct device_attribute *attr, 1157 char *buf), 1158 ssize_t (*writefunc)(struct device *dev, 1159 struct device_attribute *attr, 1160 const char *buf, 1161 size_t len), 1162 u64 mask, 1163 enum iio_shared_by shared_by, 1164 struct device *dev, 1165 struct iio_buffer *buffer, 1166 struct list_head *attr_list) 1167 { 1168 int ret; 1169 struct iio_dev_attr *iio_attr, *t; 1170 1171 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1172 if (iio_attr == NULL) 1173 return -ENOMEM; 1174 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1175 postfix, chan, 1176 readfunc, writefunc, shared_by); 1177 if (ret) 1178 goto error_iio_dev_attr_free; 1179 iio_attr->c = chan; 1180 iio_attr->address = mask; 1181 iio_attr->buffer = buffer; 1182 list_for_each_entry(t, attr_list, l) 1183 if (strcmp(t->dev_attr.attr.name, 1184 iio_attr->dev_attr.attr.name) == 0) { 1185 if (shared_by == IIO_SEPARATE) 1186 dev_err(dev, "tried to double register : %s\n", 1187 t->dev_attr.attr.name); 1188 ret = -EBUSY; 1189 goto error_device_attr_deinit; 1190 } 1191 list_add(&iio_attr->l, attr_list); 1192 1193 return 0; 1194 1195 error_device_attr_deinit: 1196 __iio_device_attr_deinit(&iio_attr->dev_attr); 1197 error_iio_dev_attr_free: 1198 kfree(iio_attr); 1199 return ret; 1200 } 1201 1202 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1203 struct iio_chan_spec const *chan) 1204 { 1205 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1206 int ret; 1207 1208 if (!indio_dev->info->read_label && !chan->extend_name) 1209 return 0; 1210 1211 ret = __iio_add_chan_devattr("label", 1212 chan, 1213 &iio_read_channel_label, 1214 NULL, 1215 0, 1216 IIO_SEPARATE, 1217 &indio_dev->dev, 1218 NULL, 1219 &iio_dev_opaque->channel_attr_list); 1220 if (ret < 0) 1221 return ret; 1222 1223 return 1; 1224 } 1225 1226 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1227 struct iio_chan_spec const *chan, 1228 enum iio_shared_by shared_by, 1229 const long *infomask) 1230 { 1231 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1232 int i, ret, attrcount = 0; 1233 1234 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1235 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1236 return -EINVAL; 1237 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1238 chan, 1239 &iio_read_channel_info, 1240 &iio_write_channel_info, 1241 i, 1242 shared_by, 1243 &indio_dev->dev, 1244 NULL, 1245 &iio_dev_opaque->channel_attr_list); 1246 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1247 continue; 1248 else if (ret < 0) 1249 return ret; 1250 attrcount++; 1251 } 1252 1253 return attrcount; 1254 } 1255 1256 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1257 struct iio_chan_spec const *chan, 1258 enum iio_shared_by shared_by, 1259 const long *infomask) 1260 { 1261 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1262 int i, ret, attrcount = 0; 1263 char *avail_postfix; 1264 1265 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1266 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1267 return -EINVAL; 1268 avail_postfix = kasprintf(GFP_KERNEL, 1269 "%s_available", 1270 iio_chan_info_postfix[i]); 1271 if (!avail_postfix) 1272 return -ENOMEM; 1273 1274 ret = __iio_add_chan_devattr(avail_postfix, 1275 chan, 1276 &iio_read_channel_info_avail, 1277 NULL, 1278 i, 1279 shared_by, 1280 &indio_dev->dev, 1281 NULL, 1282 &iio_dev_opaque->channel_attr_list); 1283 kfree(avail_postfix); 1284 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1285 continue; 1286 else if (ret < 0) 1287 return ret; 1288 attrcount++; 1289 } 1290 1291 return attrcount; 1292 } 1293 1294 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1295 struct iio_chan_spec const *chan) 1296 { 1297 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1298 int ret, attrcount = 0; 1299 const struct iio_chan_spec_ext_info *ext_info; 1300 1301 if (chan->channel < 0) 1302 return 0; 1303 ret = iio_device_add_info_mask_type(indio_dev, chan, 1304 IIO_SEPARATE, 1305 &chan->info_mask_separate); 1306 if (ret < 0) 1307 return ret; 1308 attrcount += ret; 1309 1310 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1311 IIO_SEPARATE, 1312 &chan->info_mask_separate_available); 1313 if (ret < 0) 1314 return ret; 1315 attrcount += ret; 1316 1317 ret = iio_device_add_info_mask_type(indio_dev, chan, 1318 IIO_SHARED_BY_TYPE, 1319 &chan->info_mask_shared_by_type); 1320 if (ret < 0) 1321 return ret; 1322 attrcount += ret; 1323 1324 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1325 IIO_SHARED_BY_TYPE, 1326 &chan->info_mask_shared_by_type_available); 1327 if (ret < 0) 1328 return ret; 1329 attrcount += ret; 1330 1331 ret = iio_device_add_info_mask_type(indio_dev, chan, 1332 IIO_SHARED_BY_DIR, 1333 &chan->info_mask_shared_by_dir); 1334 if (ret < 0) 1335 return ret; 1336 attrcount += ret; 1337 1338 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1339 IIO_SHARED_BY_DIR, 1340 &chan->info_mask_shared_by_dir_available); 1341 if (ret < 0) 1342 return ret; 1343 attrcount += ret; 1344 1345 ret = iio_device_add_info_mask_type(indio_dev, chan, 1346 IIO_SHARED_BY_ALL, 1347 &chan->info_mask_shared_by_all); 1348 if (ret < 0) 1349 return ret; 1350 attrcount += ret; 1351 1352 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1353 IIO_SHARED_BY_ALL, 1354 &chan->info_mask_shared_by_all_available); 1355 if (ret < 0) 1356 return ret; 1357 attrcount += ret; 1358 1359 ret = iio_device_add_channel_label(indio_dev, chan); 1360 if (ret < 0) 1361 return ret; 1362 attrcount += ret; 1363 1364 if (chan->ext_info) { 1365 unsigned int i = 0; 1366 1367 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1368 ret = __iio_add_chan_devattr(ext_info->name, 1369 chan, 1370 ext_info->read ? 1371 &iio_read_channel_ext_info : NULL, 1372 ext_info->write ? 1373 &iio_write_channel_ext_info : NULL, 1374 i, 1375 ext_info->shared, 1376 &indio_dev->dev, 1377 NULL, 1378 &iio_dev_opaque->channel_attr_list); 1379 i++; 1380 if (ret == -EBUSY && ext_info->shared) 1381 continue; 1382 1383 if (ret) 1384 return ret; 1385 1386 attrcount++; 1387 } 1388 } 1389 1390 return attrcount; 1391 } 1392 1393 /** 1394 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1395 * @attr_list: List of IIO device attributes 1396 * 1397 * This function frees the memory allocated for each of the IIO device 1398 * attributes in the list. 1399 */ 1400 void iio_free_chan_devattr_list(struct list_head *attr_list) 1401 { 1402 struct iio_dev_attr *p, *n; 1403 1404 list_for_each_entry_safe(p, n, attr_list, l) { 1405 kfree_const(p->dev_attr.attr.name); 1406 list_del(&p->l); 1407 kfree(p); 1408 } 1409 } 1410 1411 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1412 char *buf) 1413 { 1414 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1415 1416 return sysfs_emit(buf, "%s\n", indio_dev->name); 1417 } 1418 1419 static DEVICE_ATTR_RO(name); 1420 1421 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1422 char *buf) 1423 { 1424 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1425 1426 return sysfs_emit(buf, "%s\n", indio_dev->label); 1427 } 1428 1429 static DEVICE_ATTR_RO(label); 1430 1431 static ssize_t current_timestamp_clock_show(struct device *dev, 1432 struct device_attribute *attr, 1433 char *buf) 1434 { 1435 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1436 const clockid_t clk = iio_device_get_clock(indio_dev); 1437 const char *name; 1438 ssize_t sz; 1439 1440 switch (clk) { 1441 case CLOCK_REALTIME: 1442 name = "realtime\n"; 1443 sz = sizeof("realtime\n"); 1444 break; 1445 case CLOCK_MONOTONIC: 1446 name = "monotonic\n"; 1447 sz = sizeof("monotonic\n"); 1448 break; 1449 case CLOCK_MONOTONIC_RAW: 1450 name = "monotonic_raw\n"; 1451 sz = sizeof("monotonic_raw\n"); 1452 break; 1453 case CLOCK_REALTIME_COARSE: 1454 name = "realtime_coarse\n"; 1455 sz = sizeof("realtime_coarse\n"); 1456 break; 1457 case CLOCK_MONOTONIC_COARSE: 1458 name = "monotonic_coarse\n"; 1459 sz = sizeof("monotonic_coarse\n"); 1460 break; 1461 case CLOCK_BOOTTIME: 1462 name = "boottime\n"; 1463 sz = sizeof("boottime\n"); 1464 break; 1465 case CLOCK_TAI: 1466 name = "tai\n"; 1467 sz = sizeof("tai\n"); 1468 break; 1469 default: 1470 BUG(); 1471 } 1472 1473 memcpy(buf, name, sz); 1474 return sz; 1475 } 1476 1477 static ssize_t current_timestamp_clock_store(struct device *dev, 1478 struct device_attribute *attr, 1479 const char *buf, size_t len) 1480 { 1481 clockid_t clk; 1482 int ret; 1483 1484 if (sysfs_streq(buf, "realtime")) 1485 clk = CLOCK_REALTIME; 1486 else if (sysfs_streq(buf, "monotonic")) 1487 clk = CLOCK_MONOTONIC; 1488 else if (sysfs_streq(buf, "monotonic_raw")) 1489 clk = CLOCK_MONOTONIC_RAW; 1490 else if (sysfs_streq(buf, "realtime_coarse")) 1491 clk = CLOCK_REALTIME_COARSE; 1492 else if (sysfs_streq(buf, "monotonic_coarse")) 1493 clk = CLOCK_MONOTONIC_COARSE; 1494 else if (sysfs_streq(buf, "boottime")) 1495 clk = CLOCK_BOOTTIME; 1496 else if (sysfs_streq(buf, "tai")) 1497 clk = CLOCK_TAI; 1498 else 1499 return -EINVAL; 1500 1501 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1502 if (ret) 1503 return ret; 1504 1505 return len; 1506 } 1507 1508 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1509 const struct attribute_group *group) 1510 { 1511 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1512 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1513 unsigned int cnt = iio_dev_opaque->groupcounter; 1514 1515 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL); 1516 if (!new) 1517 return -ENOMEM; 1518 1519 new[iio_dev_opaque->groupcounter++] = group; 1520 new[iio_dev_opaque->groupcounter] = NULL; 1521 1522 iio_dev_opaque->groups = new; 1523 1524 return 0; 1525 } 1526 1527 static DEVICE_ATTR_RW(current_timestamp_clock); 1528 1529 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1530 { 1531 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1532 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1533 struct iio_dev_attr *p; 1534 struct attribute **attr, *clk = NULL; 1535 1536 /* First count elements in any existing group */ 1537 if (indio_dev->info->attrs) { 1538 attr = indio_dev->info->attrs->attrs; 1539 while (*attr++ != NULL) 1540 attrcount_orig++; 1541 } 1542 attrcount = attrcount_orig; 1543 /* 1544 * New channel registration method - relies on the fact a group does 1545 * not need to be initialized if its name is NULL. 1546 */ 1547 if (indio_dev->channels) 1548 for (i = 0; i < indio_dev->num_channels; i++) { 1549 const struct iio_chan_spec *chan = 1550 &indio_dev->channels[i]; 1551 1552 if (chan->type == IIO_TIMESTAMP) 1553 clk = &dev_attr_current_timestamp_clock.attr; 1554 1555 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1556 if (ret < 0) 1557 goto error_clear_attrs; 1558 attrcount += ret; 1559 } 1560 1561 if (iio_dev_opaque->event_interface) 1562 clk = &dev_attr_current_timestamp_clock.attr; 1563 1564 if (indio_dev->name) 1565 attrcount++; 1566 if (indio_dev->label) 1567 attrcount++; 1568 if (clk) 1569 attrcount++; 1570 1571 iio_dev_opaque->chan_attr_group.attrs = 1572 kcalloc(attrcount + 1, 1573 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1574 GFP_KERNEL); 1575 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1576 ret = -ENOMEM; 1577 goto error_clear_attrs; 1578 } 1579 /* Copy across original attributes, and point to original binary attributes */ 1580 if (indio_dev->info->attrs) { 1581 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1582 indio_dev->info->attrs->attrs, 1583 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1584 *attrcount_orig); 1585 iio_dev_opaque->chan_attr_group.is_visible = 1586 indio_dev->info->attrs->is_visible; 1587 iio_dev_opaque->chan_attr_group.bin_attrs = 1588 indio_dev->info->attrs->bin_attrs; 1589 } 1590 attrn = attrcount_orig; 1591 /* Add all elements from the list. */ 1592 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1593 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1594 if (indio_dev->name) 1595 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1596 if (indio_dev->label) 1597 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1598 if (clk) 1599 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1600 1601 ret = iio_device_register_sysfs_group(indio_dev, 1602 &iio_dev_opaque->chan_attr_group); 1603 if (ret) 1604 goto error_clear_attrs; 1605 1606 return 0; 1607 1608 error_clear_attrs: 1609 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1610 1611 return ret; 1612 } 1613 1614 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1615 { 1616 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1617 1618 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1619 kfree(iio_dev_opaque->chan_attr_group.attrs); 1620 iio_dev_opaque->chan_attr_group.attrs = NULL; 1621 kfree(iio_dev_opaque->groups); 1622 iio_dev_opaque->groups = NULL; 1623 } 1624 1625 static void iio_dev_release(struct device *device) 1626 { 1627 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1628 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1629 1630 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1631 iio_device_unregister_trigger_consumer(indio_dev); 1632 iio_device_unregister_eventset(indio_dev); 1633 iio_device_unregister_sysfs(indio_dev); 1634 1635 iio_device_detach_buffers(indio_dev); 1636 1637 lockdep_unregister_key(&iio_dev_opaque->mlock_key); 1638 1639 ida_free(&iio_ida, iio_dev_opaque->id); 1640 kfree(iio_dev_opaque); 1641 } 1642 1643 const struct device_type iio_device_type = { 1644 .name = "iio_device", 1645 .release = iio_dev_release, 1646 }; 1647 1648 /** 1649 * iio_device_alloc() - allocate an iio_dev from a driver 1650 * @parent: Parent device. 1651 * @sizeof_priv: Space to allocate for private structure. 1652 **/ 1653 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1654 { 1655 struct iio_dev_opaque *iio_dev_opaque; 1656 struct iio_dev *indio_dev; 1657 size_t alloc_size; 1658 1659 alloc_size = sizeof(struct iio_dev_opaque); 1660 if (sizeof_priv) { 1661 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1662 alloc_size += sizeof_priv; 1663 } 1664 1665 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1666 if (!iio_dev_opaque) 1667 return NULL; 1668 1669 indio_dev = &iio_dev_opaque->indio_dev; 1670 indio_dev->priv = (char *)iio_dev_opaque + 1671 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); 1672 1673 indio_dev->dev.parent = parent; 1674 indio_dev->dev.type = &iio_device_type; 1675 indio_dev->dev.bus = &iio_bus_type; 1676 device_initialize(&indio_dev->dev); 1677 mutex_init(&iio_dev_opaque->mlock); 1678 mutex_init(&iio_dev_opaque->info_exist_lock); 1679 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1680 1681 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1682 if (iio_dev_opaque->id < 0) { 1683 /* cannot use a dev_err as the name isn't available */ 1684 pr_err("failed to get device id\n"); 1685 kfree(iio_dev_opaque); 1686 return NULL; 1687 } 1688 1689 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1690 ida_free(&iio_ida, iio_dev_opaque->id); 1691 kfree(iio_dev_opaque); 1692 return NULL; 1693 } 1694 1695 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1696 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1697 1698 lockdep_register_key(&iio_dev_opaque->mlock_key); 1699 lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); 1700 1701 return indio_dev; 1702 } 1703 EXPORT_SYMBOL(iio_device_alloc); 1704 1705 /** 1706 * iio_device_free() - free an iio_dev from a driver 1707 * @dev: the iio_dev associated with the device 1708 **/ 1709 void iio_device_free(struct iio_dev *dev) 1710 { 1711 if (dev) 1712 put_device(&dev->dev); 1713 } 1714 EXPORT_SYMBOL(iio_device_free); 1715 1716 static void devm_iio_device_release(void *iio_dev) 1717 { 1718 iio_device_free(iio_dev); 1719 } 1720 1721 /** 1722 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1723 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1724 * @sizeof_priv: Space to allocate for private structure. 1725 * 1726 * Managed iio_device_alloc. iio_dev allocated with this function is 1727 * automatically freed on driver detach. 1728 * 1729 * RETURNS: 1730 * Pointer to allocated iio_dev on success, NULL on failure. 1731 */ 1732 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1733 { 1734 struct iio_dev *iio_dev; 1735 int ret; 1736 1737 iio_dev = iio_device_alloc(parent, sizeof_priv); 1738 if (!iio_dev) 1739 return NULL; 1740 1741 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1742 iio_dev); 1743 if (ret) 1744 return NULL; 1745 1746 return iio_dev; 1747 } 1748 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1749 1750 /** 1751 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1752 * @inode: Inode structure for identifying the device in the file system 1753 * @filp: File structure for iio device used to keep and later access 1754 * private data 1755 * 1756 * Return: 0 on success or -EBUSY if the device is already opened 1757 **/ 1758 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1759 { 1760 struct iio_dev_opaque *iio_dev_opaque = 1761 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1762 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1763 struct iio_dev_buffer_pair *ib; 1764 1765 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1766 return -EBUSY; 1767 1768 iio_device_get(indio_dev); 1769 1770 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1771 if (!ib) { 1772 iio_device_put(indio_dev); 1773 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1774 return -ENOMEM; 1775 } 1776 1777 ib->indio_dev = indio_dev; 1778 ib->buffer = indio_dev->buffer; 1779 1780 filp->private_data = ib; 1781 1782 return 0; 1783 } 1784 1785 /** 1786 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1787 * @inode: Inode structure pointer for the char device 1788 * @filp: File structure pointer for the char device 1789 * 1790 * Return: 0 for successful release 1791 */ 1792 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1793 { 1794 struct iio_dev_buffer_pair *ib = filp->private_data; 1795 struct iio_dev_opaque *iio_dev_opaque = 1796 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1797 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1798 1799 kfree(ib); 1800 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1801 iio_device_put(indio_dev); 1802 1803 return 0; 1804 } 1805 1806 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1807 struct iio_ioctl_handler *h) 1808 { 1809 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1810 1811 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1812 } 1813 1814 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1815 { 1816 list_del(&h->entry); 1817 } 1818 1819 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1820 { 1821 struct iio_dev_buffer_pair *ib = filp->private_data; 1822 struct iio_dev *indio_dev = ib->indio_dev; 1823 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1824 struct iio_ioctl_handler *h; 1825 int ret = -ENODEV; 1826 1827 mutex_lock(&iio_dev_opaque->info_exist_lock); 1828 1829 /** 1830 * The NULL check here is required to prevent crashing when a device 1831 * is being removed while userspace would still have open file handles 1832 * to try to access this device. 1833 */ 1834 if (!indio_dev->info) 1835 goto out_unlock; 1836 1837 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1838 ret = h->ioctl(indio_dev, filp, cmd, arg); 1839 if (ret != IIO_IOCTL_UNHANDLED) 1840 break; 1841 } 1842 1843 if (ret == IIO_IOCTL_UNHANDLED) 1844 ret = -ENODEV; 1845 1846 out_unlock: 1847 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1848 1849 return ret; 1850 } 1851 1852 static const struct file_operations iio_buffer_fileops = { 1853 .owner = THIS_MODULE, 1854 .llseek = noop_llseek, 1855 .read = iio_buffer_read_outer_addr, 1856 .write = iio_buffer_write_outer_addr, 1857 .poll = iio_buffer_poll_addr, 1858 .unlocked_ioctl = iio_ioctl, 1859 .compat_ioctl = compat_ptr_ioctl, 1860 .open = iio_chrdev_open, 1861 .release = iio_chrdev_release, 1862 }; 1863 1864 static const struct file_operations iio_event_fileops = { 1865 .owner = THIS_MODULE, 1866 .llseek = noop_llseek, 1867 .unlocked_ioctl = iio_ioctl, 1868 .compat_ioctl = compat_ptr_ioctl, 1869 .open = iio_chrdev_open, 1870 .release = iio_chrdev_release, 1871 }; 1872 1873 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1874 { 1875 int i, j; 1876 const struct iio_chan_spec *channels = indio_dev->channels; 1877 1878 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1879 return 0; 1880 1881 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1882 if (channels[i].scan_index < 0) 1883 continue; 1884 for (j = i + 1; j < indio_dev->num_channels; j++) 1885 if (channels[i].scan_index == channels[j].scan_index) { 1886 dev_err(&indio_dev->dev, 1887 "Duplicate scan index %d\n", 1888 channels[i].scan_index); 1889 return -EINVAL; 1890 } 1891 } 1892 1893 return 0; 1894 } 1895 1896 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1897 { 1898 unsigned int i; 1899 1900 if (!indio_dev->info->read_label) 1901 return 0; 1902 1903 for (i = 0; i < indio_dev->num_channels; i++) { 1904 if (indio_dev->channels[i].extend_name) { 1905 dev_err(&indio_dev->dev, 1906 "Cannot use labels and extend_name at the same time\n"); 1907 return -EINVAL; 1908 } 1909 } 1910 1911 return 0; 1912 } 1913 1914 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1915 1916 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1917 { 1918 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1919 struct fwnode_handle *fwnode; 1920 int ret; 1921 1922 if (!indio_dev->info) 1923 return -EINVAL; 1924 1925 iio_dev_opaque->driver_module = this_mod; 1926 1927 /* If the calling driver did not initialize firmware node, do it here */ 1928 if (dev_fwnode(&indio_dev->dev)) 1929 fwnode = dev_fwnode(&indio_dev->dev); 1930 else 1931 fwnode = dev_fwnode(indio_dev->dev.parent); 1932 device_set_node(&indio_dev->dev, fwnode); 1933 1934 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1935 1936 ret = iio_check_unique_scan_index(indio_dev); 1937 if (ret < 0) 1938 return ret; 1939 1940 ret = iio_check_extended_name(indio_dev); 1941 if (ret < 0) 1942 return ret; 1943 1944 iio_device_register_debugfs(indio_dev); 1945 1946 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1947 if (ret) { 1948 dev_err(indio_dev->dev.parent, 1949 "Failed to create buffer sysfs interfaces\n"); 1950 goto error_unreg_debugfs; 1951 } 1952 1953 ret = iio_device_register_sysfs(indio_dev); 1954 if (ret) { 1955 dev_err(indio_dev->dev.parent, 1956 "Failed to register sysfs interfaces\n"); 1957 goto error_buffer_free_sysfs; 1958 } 1959 ret = iio_device_register_eventset(indio_dev); 1960 if (ret) { 1961 dev_err(indio_dev->dev.parent, 1962 "Failed to register event set\n"); 1963 goto error_free_sysfs; 1964 } 1965 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1966 iio_device_register_trigger_consumer(indio_dev); 1967 1968 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1969 indio_dev->setup_ops == NULL) 1970 indio_dev->setup_ops = &noop_ring_setup_ops; 1971 1972 if (iio_dev_opaque->attached_buffers_cnt) 1973 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1974 else if (iio_dev_opaque->event_interface) 1975 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1976 1977 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1978 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1979 iio_dev_opaque->chrdev.owner = this_mod; 1980 } 1981 1982 /* assign device groups now; they should be all registered now */ 1983 indio_dev->dev.groups = iio_dev_opaque->groups; 1984 1985 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1986 if (ret < 0) 1987 goto error_unreg_eventset; 1988 1989 return 0; 1990 1991 error_unreg_eventset: 1992 iio_device_unregister_eventset(indio_dev); 1993 error_free_sysfs: 1994 iio_device_unregister_sysfs(indio_dev); 1995 error_buffer_free_sysfs: 1996 iio_buffers_free_sysfs_and_mask(indio_dev); 1997 error_unreg_debugfs: 1998 iio_device_unregister_debugfs(indio_dev); 1999 return ret; 2000 } 2001 EXPORT_SYMBOL(__iio_device_register); 2002 2003 /** 2004 * iio_device_unregister() - unregister a device from the IIO subsystem 2005 * @indio_dev: Device structure representing the device. 2006 **/ 2007 void iio_device_unregister(struct iio_dev *indio_dev) 2008 { 2009 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2010 2011 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 2012 2013 mutex_lock(&iio_dev_opaque->info_exist_lock); 2014 2015 iio_device_unregister_debugfs(indio_dev); 2016 2017 iio_disable_all_buffers(indio_dev); 2018 2019 indio_dev->info = NULL; 2020 2021 iio_device_wakeup_eventset(indio_dev); 2022 iio_buffer_wakeup_poll(indio_dev); 2023 2024 mutex_unlock(&iio_dev_opaque->info_exist_lock); 2025 2026 iio_buffers_free_sysfs_and_mask(indio_dev); 2027 } 2028 EXPORT_SYMBOL(iio_device_unregister); 2029 2030 static void devm_iio_device_unreg(void *indio_dev) 2031 { 2032 iio_device_unregister(indio_dev); 2033 } 2034 2035 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2036 struct module *this_mod) 2037 { 2038 int ret; 2039 2040 ret = __iio_device_register(indio_dev, this_mod); 2041 if (ret) 2042 return ret; 2043 2044 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2045 } 2046 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2047 2048 /** 2049 * iio_device_claim_direct_mode - Keep device in direct mode 2050 * @indio_dev: the iio_dev associated with the device 2051 * 2052 * If the device is in direct mode it is guaranteed to stay 2053 * that way until iio_device_release_direct_mode() is called. 2054 * 2055 * Use with iio_device_release_direct_mode() 2056 * 2057 * Returns: 0 on success, -EBUSY on failure 2058 */ 2059 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2060 { 2061 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2062 2063 mutex_lock(&iio_dev_opaque->mlock); 2064 2065 if (iio_buffer_enabled(indio_dev)) { 2066 mutex_unlock(&iio_dev_opaque->mlock); 2067 return -EBUSY; 2068 } 2069 return 0; 2070 } 2071 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2072 2073 /** 2074 * iio_device_release_direct_mode - releases claim on direct mode 2075 * @indio_dev: the iio_dev associated with the device 2076 * 2077 * Release the claim. Device is no longer guaranteed to stay 2078 * in direct mode. 2079 * 2080 * Use with iio_device_claim_direct_mode() 2081 */ 2082 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2083 { 2084 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2085 } 2086 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2087 2088 /** 2089 * iio_device_claim_buffer_mode - Keep device in buffer mode 2090 * @indio_dev: the iio_dev associated with the device 2091 * 2092 * If the device is in buffer mode it is guaranteed to stay 2093 * that way until iio_device_release_buffer_mode() is called. 2094 * 2095 * Use with iio_device_release_buffer_mode(). 2096 * 2097 * Returns: 0 on success, -EBUSY on failure. 2098 */ 2099 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) 2100 { 2101 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2102 2103 mutex_lock(&iio_dev_opaque->mlock); 2104 2105 if (iio_buffer_enabled(indio_dev)) 2106 return 0; 2107 2108 mutex_unlock(&iio_dev_opaque->mlock); 2109 return -EBUSY; 2110 } 2111 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); 2112 2113 /** 2114 * iio_device_release_buffer_mode - releases claim on buffer mode 2115 * @indio_dev: the iio_dev associated with the device 2116 * 2117 * Release the claim. Device is no longer guaranteed to stay 2118 * in buffer mode. 2119 * 2120 * Use with iio_device_claim_buffer_mode(). 2121 */ 2122 void iio_device_release_buffer_mode(struct iio_dev *indio_dev) 2123 { 2124 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2125 } 2126 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); 2127 2128 /** 2129 * iio_device_get_current_mode() - helper function providing read-only access to 2130 * the opaque @currentmode variable 2131 * @indio_dev: IIO device structure for device 2132 */ 2133 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2134 { 2135 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2136 2137 return iio_dev_opaque->currentmode; 2138 } 2139 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2140 2141 subsys_initcall(iio_init); 2142 module_exit(iio_exit); 2143 2144 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2145 MODULE_DESCRIPTION("Industrial I/O core"); 2146 MODULE_LICENSE("GPL"); 2147