1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Based on elements of hwmon and input subsystems. 7 */ 8 9 #define pr_fmt(fmt) "iio-core: " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/idr.h> 14 #include <linux/kdev_t.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/property.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/cdev.h> 23 #include <linux/slab.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/debugfs.h> 26 #include <linux/mutex.h> 27 #include <linux/iio/iio.h> 28 #include <linux/iio/iio-opaque.h> 29 #include "iio_core.h" 30 #include "iio_core_trigger.h" 31 #include <linux/iio/sysfs.h> 32 #include <linux/iio/events.h> 33 #include <linux/iio/buffer.h> 34 #include <linux/iio/buffer_impl.h> 35 36 /* IDA to assign each registered device a unique id */ 37 static DEFINE_IDA(iio_ida); 38 39 static dev_t iio_devt; 40 41 #define IIO_DEV_MAX 256 42 struct bus_type iio_bus_type = { 43 .name = "iio", 44 }; 45 EXPORT_SYMBOL(iio_bus_type); 46 47 static struct dentry *iio_debugfs_dentry; 48 49 static const char * const iio_direction[] = { 50 [0] = "in", 51 [1] = "out", 52 }; 53 54 static const char * const iio_chan_type_name_spec[] = { 55 [IIO_VOLTAGE] = "voltage", 56 [IIO_CURRENT] = "current", 57 [IIO_POWER] = "power", 58 [IIO_ACCEL] = "accel", 59 [IIO_ANGL_VEL] = "anglvel", 60 [IIO_MAGN] = "magn", 61 [IIO_LIGHT] = "illuminance", 62 [IIO_INTENSITY] = "intensity", 63 [IIO_PROXIMITY] = "proximity", 64 [IIO_TEMP] = "temp", 65 [IIO_INCLI] = "incli", 66 [IIO_ROT] = "rot", 67 [IIO_ANGL] = "angl", 68 [IIO_TIMESTAMP] = "timestamp", 69 [IIO_CAPACITANCE] = "capacitance", 70 [IIO_ALTVOLTAGE] = "altvoltage", 71 [IIO_CCT] = "cct", 72 [IIO_PRESSURE] = "pressure", 73 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 74 [IIO_ACTIVITY] = "activity", 75 [IIO_STEPS] = "steps", 76 [IIO_ENERGY] = "energy", 77 [IIO_DISTANCE] = "distance", 78 [IIO_VELOCITY] = "velocity", 79 [IIO_CONCENTRATION] = "concentration", 80 [IIO_RESISTANCE] = "resistance", 81 [IIO_PH] = "ph", 82 [IIO_UVINDEX] = "uvindex", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 84 [IIO_COUNT] = "count", 85 [IIO_INDEX] = "index", 86 [IIO_GRAVITY] = "gravity", 87 [IIO_POSITIONRELATIVE] = "positionrelative", 88 [IIO_PHASE] = "phase", 89 [IIO_MASSCONCENTRATION] = "massconcentration", 90 }; 91 92 static const char * const iio_modifier_names[] = { 93 [IIO_MOD_X] = "x", 94 [IIO_MOD_Y] = "y", 95 [IIO_MOD_Z] = "z", 96 [IIO_MOD_X_AND_Y] = "x&y", 97 [IIO_MOD_X_AND_Z] = "x&z", 98 [IIO_MOD_Y_AND_Z] = "y&z", 99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 100 [IIO_MOD_X_OR_Y] = "x|y", 101 [IIO_MOD_X_OR_Z] = "x|z", 102 [IIO_MOD_Y_OR_Z] = "y|z", 103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 106 [IIO_MOD_LIGHT_BOTH] = "both", 107 [IIO_MOD_LIGHT_IR] = "ir", 108 [IIO_MOD_LIGHT_CLEAR] = "clear", 109 [IIO_MOD_LIGHT_RED] = "red", 110 [IIO_MOD_LIGHT_GREEN] = "green", 111 [IIO_MOD_LIGHT_BLUE] = "blue", 112 [IIO_MOD_LIGHT_UV] = "uv", 113 [IIO_MOD_LIGHT_DUV] = "duv", 114 [IIO_MOD_QUATERNION] = "quaternion", 115 [IIO_MOD_TEMP_AMBIENT] = "ambient", 116 [IIO_MOD_TEMP_OBJECT] = "object", 117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 118 [IIO_MOD_NORTH_TRUE] = "from_north_true", 119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 121 [IIO_MOD_RUNNING] = "running", 122 [IIO_MOD_JOGGING] = "jogging", 123 [IIO_MOD_WALKING] = "walking", 124 [IIO_MOD_STILL] = "still", 125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 126 [IIO_MOD_I] = "i", 127 [IIO_MOD_Q] = "q", 128 [IIO_MOD_CO2] = "co2", 129 [IIO_MOD_VOC] = "voc", 130 [IIO_MOD_PM1] = "pm1", 131 [IIO_MOD_PM2P5] = "pm2p5", 132 [IIO_MOD_PM4] = "pm4", 133 [IIO_MOD_PM10] = "pm10", 134 [IIO_MOD_ETHANOL] = "ethanol", 135 [IIO_MOD_H2] = "h2", 136 [IIO_MOD_O2] = "o2", 137 }; 138 139 /* relies on pairs of these shared then separate */ 140 static const char * const iio_chan_info_postfix[] = { 141 [IIO_CHAN_INFO_RAW] = "raw", 142 [IIO_CHAN_INFO_PROCESSED] = "input", 143 [IIO_CHAN_INFO_SCALE] = "scale", 144 [IIO_CHAN_INFO_OFFSET] = "offset", 145 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 146 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 147 [IIO_CHAN_INFO_PEAK] = "peak_raw", 148 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 149 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 150 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 151 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 152 = "filter_low_pass_3db_frequency", 153 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 154 = "filter_high_pass_3db_frequency", 155 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 156 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 157 [IIO_CHAN_INFO_PHASE] = "phase", 158 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 159 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 160 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 161 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 162 [IIO_CHAN_INFO_ENABLE] = "en", 163 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 164 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 165 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 166 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 167 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 168 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 169 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 170 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 171 }; 172 /** 173 * iio_device_id() - query the unique ID for the device 174 * @indio_dev: Device structure whose ID is being queried 175 * 176 * The IIO device ID is a unique index used for example for the naming 177 * of the character device /dev/iio\:device[ID] 178 */ 179 int iio_device_id(struct iio_dev *indio_dev) 180 { 181 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 182 183 return iio_dev_opaque->id; 184 } 185 EXPORT_SYMBOL_GPL(iio_device_id); 186 187 /** 188 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps 189 * @array: array of strings 190 * @n: number of strings in the array 191 * @str: string to match with 192 * 193 * Returns index of @str in the @array or -EINVAL, similar to match_string(). 194 * Uses sysfs_streq instead of strcmp for matching. 195 * 196 * This routine will look for a string in an array of strings. 197 * The search will continue until the element is found or the n-th element 198 * is reached, regardless of any NULL elements in the array. 199 */ 200 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n, 201 const char *str) 202 { 203 const char *item; 204 int index; 205 206 for (index = 0; index < n; index++) { 207 item = array[index]; 208 if (!item) 209 continue; 210 if (sysfs_streq(item, str)) 211 return index; 212 } 213 214 return -EINVAL; 215 } 216 217 #if defined(CONFIG_DEBUG_FS) 218 /* 219 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 220 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 221 */ 222 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 223 { 224 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 225 return iio_dev_opaque->debugfs_dentry; 226 } 227 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 228 #endif 229 230 /** 231 * iio_find_channel_from_si() - get channel from its scan index 232 * @indio_dev: device 233 * @si: scan index to match 234 */ 235 const struct iio_chan_spec 236 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 237 { 238 int i; 239 240 for (i = 0; i < indio_dev->num_channels; i++) 241 if (indio_dev->channels[i].scan_index == si) 242 return &indio_dev->channels[i]; 243 return NULL; 244 } 245 246 /* This turns up an awful lot */ 247 ssize_t iio_read_const_attr(struct device *dev, 248 struct device_attribute *attr, 249 char *buf) 250 { 251 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 252 } 253 EXPORT_SYMBOL(iio_read_const_attr); 254 255 /** 256 * iio_device_set_clock() - Set current timestamping clock for the device 257 * @indio_dev: IIO device structure containing the device 258 * @clock_id: timestamping clock posix identifier to set. 259 */ 260 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 261 { 262 int ret; 263 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 264 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 265 266 ret = mutex_lock_interruptible(&indio_dev->mlock); 267 if (ret) 268 return ret; 269 if ((ev_int && iio_event_enabled(ev_int)) || 270 iio_buffer_enabled(indio_dev)) { 271 mutex_unlock(&indio_dev->mlock); 272 return -EBUSY; 273 } 274 iio_dev_opaque->clock_id = clock_id; 275 mutex_unlock(&indio_dev->mlock); 276 277 return 0; 278 } 279 EXPORT_SYMBOL(iio_device_set_clock); 280 281 /** 282 * iio_device_get_clock() - Retrieve current timestamping clock for the device 283 * @indio_dev: IIO device structure containing the device 284 */ 285 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 286 { 287 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 288 289 return iio_dev_opaque->clock_id; 290 } 291 EXPORT_SYMBOL(iio_device_get_clock); 292 293 /** 294 * iio_get_time_ns() - utility function to get a time stamp for events etc 295 * @indio_dev: device 296 */ 297 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 298 { 299 struct timespec64 tp; 300 301 switch (iio_device_get_clock(indio_dev)) { 302 case CLOCK_REALTIME: 303 return ktime_get_real_ns(); 304 case CLOCK_MONOTONIC: 305 return ktime_get_ns(); 306 case CLOCK_MONOTONIC_RAW: 307 return ktime_get_raw_ns(); 308 case CLOCK_REALTIME_COARSE: 309 return ktime_to_ns(ktime_get_coarse_real()); 310 case CLOCK_MONOTONIC_COARSE: 311 ktime_get_coarse_ts64(&tp); 312 return timespec64_to_ns(&tp); 313 case CLOCK_BOOTTIME: 314 return ktime_get_boottime_ns(); 315 case CLOCK_TAI: 316 return ktime_get_clocktai_ns(); 317 default: 318 BUG(); 319 } 320 } 321 EXPORT_SYMBOL(iio_get_time_ns); 322 323 /** 324 * iio_get_time_res() - utility function to get time stamp clock resolution in 325 * nano seconds. 326 * @indio_dev: device 327 */ 328 unsigned int iio_get_time_res(const struct iio_dev *indio_dev) 329 { 330 switch (iio_device_get_clock(indio_dev)) { 331 case CLOCK_REALTIME: 332 case CLOCK_MONOTONIC: 333 case CLOCK_MONOTONIC_RAW: 334 case CLOCK_BOOTTIME: 335 case CLOCK_TAI: 336 return hrtimer_resolution; 337 case CLOCK_REALTIME_COARSE: 338 case CLOCK_MONOTONIC_COARSE: 339 return LOW_RES_NSEC; 340 default: 341 BUG(); 342 } 343 } 344 EXPORT_SYMBOL(iio_get_time_res); 345 346 static int __init iio_init(void) 347 { 348 int ret; 349 350 /* Register sysfs bus */ 351 ret = bus_register(&iio_bus_type); 352 if (ret < 0) { 353 pr_err("could not register bus type\n"); 354 goto error_nothing; 355 } 356 357 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 358 if (ret < 0) { 359 pr_err("failed to allocate char dev region\n"); 360 goto error_unregister_bus_type; 361 } 362 363 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 364 365 return 0; 366 367 error_unregister_bus_type: 368 bus_unregister(&iio_bus_type); 369 error_nothing: 370 return ret; 371 } 372 373 static void __exit iio_exit(void) 374 { 375 if (iio_devt) 376 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 377 bus_unregister(&iio_bus_type); 378 debugfs_remove(iio_debugfs_dentry); 379 } 380 381 #if defined(CONFIG_DEBUG_FS) 382 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 383 size_t count, loff_t *ppos) 384 { 385 struct iio_dev *indio_dev = file->private_data; 386 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 387 unsigned val = 0; 388 int ret; 389 390 if (*ppos > 0) 391 return simple_read_from_buffer(userbuf, count, ppos, 392 iio_dev_opaque->read_buf, 393 iio_dev_opaque->read_buf_len); 394 395 ret = indio_dev->info->debugfs_reg_access(indio_dev, 396 iio_dev_opaque->cached_reg_addr, 397 0, &val); 398 if (ret) { 399 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 400 return ret; 401 } 402 403 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 404 sizeof(iio_dev_opaque->read_buf), 405 "0x%X\n", val); 406 407 return simple_read_from_buffer(userbuf, count, ppos, 408 iio_dev_opaque->read_buf, 409 iio_dev_opaque->read_buf_len); 410 } 411 412 static ssize_t iio_debugfs_write_reg(struct file *file, 413 const char __user *userbuf, size_t count, loff_t *ppos) 414 { 415 struct iio_dev *indio_dev = file->private_data; 416 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 417 unsigned reg, val; 418 char buf[80]; 419 int ret; 420 421 count = min_t(size_t, count, (sizeof(buf)-1)); 422 if (copy_from_user(buf, userbuf, count)) 423 return -EFAULT; 424 425 buf[count] = 0; 426 427 ret = sscanf(buf, "%i %i", ®, &val); 428 429 switch (ret) { 430 case 1: 431 iio_dev_opaque->cached_reg_addr = reg; 432 break; 433 case 2: 434 iio_dev_opaque->cached_reg_addr = reg; 435 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 436 val, NULL); 437 if (ret) { 438 dev_err(indio_dev->dev.parent, "%s: write failed\n", 439 __func__); 440 return ret; 441 } 442 break; 443 default: 444 return -EINVAL; 445 } 446 447 return count; 448 } 449 450 static const struct file_operations iio_debugfs_reg_fops = { 451 .open = simple_open, 452 .read = iio_debugfs_read_reg, 453 .write = iio_debugfs_write_reg, 454 }; 455 456 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 457 { 458 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 459 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 460 } 461 462 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 463 { 464 struct iio_dev_opaque *iio_dev_opaque; 465 466 if (indio_dev->info->debugfs_reg_access == NULL) 467 return; 468 469 if (!iio_debugfs_dentry) 470 return; 471 472 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 473 474 iio_dev_opaque->debugfs_dentry = 475 debugfs_create_dir(dev_name(&indio_dev->dev), 476 iio_debugfs_dentry); 477 478 debugfs_create_file("direct_reg_access", 0644, 479 iio_dev_opaque->debugfs_dentry, indio_dev, 480 &iio_debugfs_reg_fops); 481 } 482 #else 483 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 484 { 485 } 486 487 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 488 { 489 } 490 #endif /* CONFIG_DEBUG_FS */ 491 492 static ssize_t iio_read_channel_ext_info(struct device *dev, 493 struct device_attribute *attr, 494 char *buf) 495 { 496 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 497 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 498 const struct iio_chan_spec_ext_info *ext_info; 499 500 ext_info = &this_attr->c->ext_info[this_attr->address]; 501 502 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 503 } 504 505 static ssize_t iio_write_channel_ext_info(struct device *dev, 506 struct device_attribute *attr, 507 const char *buf, 508 size_t len) 509 { 510 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 511 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 512 const struct iio_chan_spec_ext_info *ext_info; 513 514 ext_info = &this_attr->c->ext_info[this_attr->address]; 515 516 return ext_info->write(indio_dev, ext_info->private, 517 this_attr->c, buf, len); 518 } 519 520 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 521 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 522 { 523 const struct iio_enum *e = (const struct iio_enum *)priv; 524 unsigned int i; 525 size_t len = 0; 526 527 if (!e->num_items) 528 return 0; 529 530 for (i = 0; i < e->num_items; ++i) { 531 if (!e->items[i]) 532 continue; 533 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 534 } 535 536 /* replace last space with a newline */ 537 buf[len - 1] = '\n'; 538 539 return len; 540 } 541 EXPORT_SYMBOL_GPL(iio_enum_available_read); 542 543 ssize_t iio_enum_read(struct iio_dev *indio_dev, 544 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 545 { 546 const struct iio_enum *e = (const struct iio_enum *)priv; 547 int i; 548 549 if (!e->get) 550 return -EINVAL; 551 552 i = e->get(indio_dev, chan); 553 if (i < 0) 554 return i; 555 else if (i >= e->num_items || !e->items[i]) 556 return -EINVAL; 557 558 return sysfs_emit(buf, "%s\n", e->items[i]); 559 } 560 EXPORT_SYMBOL_GPL(iio_enum_read); 561 562 ssize_t iio_enum_write(struct iio_dev *indio_dev, 563 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 564 size_t len) 565 { 566 const struct iio_enum *e = (const struct iio_enum *)priv; 567 int ret; 568 569 if (!e->set) 570 return -EINVAL; 571 572 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf); 573 if (ret < 0) 574 return ret; 575 576 ret = e->set(indio_dev, chan, ret); 577 return ret ? ret : len; 578 } 579 EXPORT_SYMBOL_GPL(iio_enum_write); 580 581 static const struct iio_mount_matrix iio_mount_idmatrix = { 582 .rotation = { 583 "1", "0", "0", 584 "0", "1", "0", 585 "0", "0", "1" 586 } 587 }; 588 589 static int iio_setup_mount_idmatrix(const struct device *dev, 590 struct iio_mount_matrix *matrix) 591 { 592 *matrix = iio_mount_idmatrix; 593 dev_info(dev, "mounting matrix not found: using identity...\n"); 594 return 0; 595 } 596 597 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 598 const struct iio_chan_spec *chan, char *buf) 599 { 600 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 601 priv)(indio_dev, chan); 602 603 if (IS_ERR(mtx)) 604 return PTR_ERR(mtx); 605 606 if (!mtx) 607 mtx = &iio_mount_idmatrix; 608 609 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 610 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 611 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 612 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 613 } 614 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 615 616 /** 617 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 618 * device "mount-matrix" property 619 * @dev: device the mounting matrix property is assigned to 620 * @matrix: where to store retrieved matrix 621 * 622 * If device is assigned no mounting matrix property, a default 3x3 identity 623 * matrix will be filled in. 624 * 625 * Return: 0 if success, or a negative error code on failure. 626 */ 627 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 628 { 629 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 630 int err; 631 632 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 633 if (err == len) 634 return 0; 635 636 if (err >= 0) 637 /* Invalid number of matrix entries. */ 638 return -EINVAL; 639 640 if (err != -EINVAL) 641 /* Invalid matrix declaration format. */ 642 return err; 643 644 /* Matrix was not declared at all: fallback to identity. */ 645 return iio_setup_mount_idmatrix(dev, matrix); 646 } 647 EXPORT_SYMBOL(iio_read_mount_matrix); 648 649 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 650 int size, const int *vals) 651 { 652 int tmp0, tmp1; 653 s64 tmp2; 654 bool scale_db = false; 655 656 switch (type) { 657 case IIO_VAL_INT: 658 return sysfs_emit_at(buf, offset, "%d", vals[0]); 659 case IIO_VAL_INT_PLUS_MICRO_DB: 660 scale_db = true; 661 fallthrough; 662 case IIO_VAL_INT_PLUS_MICRO: 663 if (vals[1] < 0) 664 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 665 abs(vals[0]), -vals[1], 666 scale_db ? " dB" : ""); 667 else 668 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 669 vals[1], scale_db ? " dB" : ""); 670 case IIO_VAL_INT_PLUS_NANO: 671 if (vals[1] < 0) 672 return sysfs_emit_at(buf, offset, "-%d.%09u", 673 abs(vals[0]), -vals[1]); 674 else 675 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 676 vals[1]); 677 case IIO_VAL_FRACTIONAL: 678 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 679 tmp1 = vals[1]; 680 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 681 if ((tmp2 < 0) && (tmp0 == 0)) 682 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 683 else 684 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 685 abs(tmp1)); 686 case IIO_VAL_FRACTIONAL_LOG2: 687 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 688 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 689 if (tmp0 == 0 && tmp2 < 0) 690 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 691 else 692 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 693 abs(tmp1)); 694 case IIO_VAL_INT_MULTIPLE: 695 { 696 int i; 697 int l = 0; 698 699 for (i = 0; i < size; ++i) 700 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 701 return l; 702 } 703 case IIO_VAL_CHAR: 704 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 705 default: 706 return 0; 707 } 708 } 709 710 /** 711 * iio_format_value() - Formats a IIO value into its string representation 712 * @buf: The buffer to which the formatted value gets written 713 * which is assumed to be big enough (i.e. PAGE_SIZE). 714 * @type: One of the IIO_VAL_* constants. This decides how the val 715 * and val2 parameters are formatted. 716 * @size: Number of IIO value entries contained in vals 717 * @vals: Pointer to the values, exact meaning depends on the 718 * type parameter. 719 * 720 * Return: 0 by default, a negative number on failure or the 721 * total number of characters written for a type that belongs 722 * to the IIO_VAL_* constant. 723 */ 724 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 725 { 726 ssize_t len; 727 728 len = __iio_format_value(buf, 0, type, size, vals); 729 if (len >= PAGE_SIZE - 1) 730 return -EFBIG; 731 732 return len + sysfs_emit_at(buf, len, "\n"); 733 } 734 EXPORT_SYMBOL_GPL(iio_format_value); 735 736 static ssize_t iio_read_channel_label(struct device *dev, 737 struct device_attribute *attr, 738 char *buf) 739 { 740 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 741 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 742 743 if (indio_dev->info->read_label) 744 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 745 746 if (this_attr->c->extend_name) 747 return sprintf(buf, "%s\n", this_attr->c->extend_name); 748 749 return -EINVAL; 750 } 751 752 static ssize_t iio_read_channel_info(struct device *dev, 753 struct device_attribute *attr, 754 char *buf) 755 { 756 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 757 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 758 int vals[INDIO_MAX_RAW_ELEMENTS]; 759 int ret; 760 int val_len = 2; 761 762 if (indio_dev->info->read_raw_multi) 763 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 764 INDIO_MAX_RAW_ELEMENTS, 765 vals, &val_len, 766 this_attr->address); 767 else 768 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 769 &vals[0], &vals[1], this_attr->address); 770 771 if (ret < 0) 772 return ret; 773 774 return iio_format_value(buf, ret, val_len, vals); 775 } 776 777 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 778 const char *prefix, const char *suffix) 779 { 780 ssize_t len; 781 int stride; 782 int i; 783 784 switch (type) { 785 case IIO_VAL_INT: 786 stride = 1; 787 break; 788 default: 789 stride = 2; 790 break; 791 } 792 793 len = sysfs_emit(buf, prefix); 794 795 for (i = 0; i <= length - stride; i += stride) { 796 if (i != 0) { 797 len += sysfs_emit_at(buf, len, " "); 798 if (len >= PAGE_SIZE) 799 return -EFBIG; 800 } 801 802 len += __iio_format_value(buf, len, type, stride, &vals[i]); 803 if (len >= PAGE_SIZE) 804 return -EFBIG; 805 } 806 807 len += sysfs_emit_at(buf, len, "%s\n", suffix); 808 809 return len; 810 } 811 812 static ssize_t iio_format_avail_list(char *buf, const int *vals, 813 int type, int length) 814 { 815 816 return iio_format_list(buf, vals, type, length, "", ""); 817 } 818 819 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 820 { 821 return iio_format_list(buf, vals, type, 3, "[", "]"); 822 } 823 824 static ssize_t iio_read_channel_info_avail(struct device *dev, 825 struct device_attribute *attr, 826 char *buf) 827 { 828 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 829 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 830 const int *vals; 831 int ret; 832 int length; 833 int type; 834 835 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 836 &vals, &type, &length, 837 this_attr->address); 838 839 if (ret < 0) 840 return ret; 841 switch (ret) { 842 case IIO_AVAIL_LIST: 843 return iio_format_avail_list(buf, vals, type, length); 844 case IIO_AVAIL_RANGE: 845 return iio_format_avail_range(buf, vals, type); 846 default: 847 return -EINVAL; 848 } 849 } 850 851 /** 852 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 853 * @str: The string to parse 854 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 855 * @integer: The integer part of the number 856 * @fract: The fractional part of the number 857 * @scale_db: True if this should parse as dB 858 * 859 * Returns 0 on success, or a negative error code if the string could not be 860 * parsed. 861 */ 862 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 863 int *integer, int *fract, bool scale_db) 864 { 865 int i = 0, f = 0; 866 bool integer_part = true, negative = false; 867 868 if (fract_mult == 0) { 869 *fract = 0; 870 871 return kstrtoint(str, 0, integer); 872 } 873 874 if (str[0] == '-') { 875 negative = true; 876 str++; 877 } else if (str[0] == '+') { 878 str++; 879 } 880 881 while (*str) { 882 if ('0' <= *str && *str <= '9') { 883 if (integer_part) { 884 i = i * 10 + *str - '0'; 885 } else { 886 f += fract_mult * (*str - '0'); 887 fract_mult /= 10; 888 } 889 } else if (*str == '\n') { 890 if (*(str + 1) == '\0') 891 break; 892 else 893 return -EINVAL; 894 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 895 /* Ignore the dB suffix */ 896 str += sizeof(" dB") - 1; 897 continue; 898 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 899 /* Ignore the dB suffix */ 900 str += sizeof("dB") - 1; 901 continue; 902 } else if (*str == '.' && integer_part) { 903 integer_part = false; 904 } else { 905 return -EINVAL; 906 } 907 str++; 908 } 909 910 if (negative) { 911 if (i) 912 i = -i; 913 else 914 f = -f; 915 } 916 917 *integer = i; 918 *fract = f; 919 920 return 0; 921 } 922 923 /** 924 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 925 * @str: The string to parse 926 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 927 * @integer: The integer part of the number 928 * @fract: The fractional part of the number 929 * 930 * Returns 0 on success, or a negative error code if the string could not be 931 * parsed. 932 */ 933 int iio_str_to_fixpoint(const char *str, int fract_mult, 934 int *integer, int *fract) 935 { 936 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 937 } 938 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 939 940 static ssize_t iio_write_channel_info(struct device *dev, 941 struct device_attribute *attr, 942 const char *buf, 943 size_t len) 944 { 945 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 946 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 947 int ret, fract_mult = 100000; 948 int integer, fract = 0; 949 bool is_char = false; 950 bool scale_db = false; 951 952 /* Assumes decimal - precision based on number of digits */ 953 if (!indio_dev->info->write_raw) 954 return -EINVAL; 955 956 if (indio_dev->info->write_raw_get_fmt) 957 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 958 this_attr->c, this_attr->address)) { 959 case IIO_VAL_INT: 960 fract_mult = 0; 961 break; 962 case IIO_VAL_INT_PLUS_MICRO_DB: 963 scale_db = true; 964 fallthrough; 965 case IIO_VAL_INT_PLUS_MICRO: 966 fract_mult = 100000; 967 break; 968 case IIO_VAL_INT_PLUS_NANO: 969 fract_mult = 100000000; 970 break; 971 case IIO_VAL_CHAR: 972 is_char = true; 973 break; 974 default: 975 return -EINVAL; 976 } 977 978 if (is_char) { 979 char ch; 980 981 if (sscanf(buf, "%c", &ch) != 1) 982 return -EINVAL; 983 integer = ch; 984 } else { 985 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 986 scale_db); 987 if (ret) 988 return ret; 989 } 990 991 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 992 integer, fract, this_attr->address); 993 if (ret) 994 return ret; 995 996 return len; 997 } 998 999 static 1000 int __iio_device_attr_init(struct device_attribute *dev_attr, 1001 const char *postfix, 1002 struct iio_chan_spec const *chan, 1003 ssize_t (*readfunc)(struct device *dev, 1004 struct device_attribute *attr, 1005 char *buf), 1006 ssize_t (*writefunc)(struct device *dev, 1007 struct device_attribute *attr, 1008 const char *buf, 1009 size_t len), 1010 enum iio_shared_by shared_by) 1011 { 1012 int ret = 0; 1013 char *name = NULL; 1014 char *full_postfix; 1015 sysfs_attr_init(&dev_attr->attr); 1016 1017 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1018 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1019 if (chan->extend_name) 1020 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1021 iio_modifier_names[chan 1022 ->channel2], 1023 chan->extend_name, 1024 postfix); 1025 else 1026 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1027 iio_modifier_names[chan 1028 ->channel2], 1029 postfix); 1030 } else { 1031 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1032 full_postfix = kstrdup(postfix, GFP_KERNEL); 1033 else 1034 full_postfix = kasprintf(GFP_KERNEL, 1035 "%s_%s", 1036 chan->extend_name, 1037 postfix); 1038 } 1039 if (full_postfix == NULL) 1040 return -ENOMEM; 1041 1042 if (chan->differential) { /* Differential can not have modifier */ 1043 switch (shared_by) { 1044 case IIO_SHARED_BY_ALL: 1045 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1046 break; 1047 case IIO_SHARED_BY_DIR: 1048 name = kasprintf(GFP_KERNEL, "%s_%s", 1049 iio_direction[chan->output], 1050 full_postfix); 1051 break; 1052 case IIO_SHARED_BY_TYPE: 1053 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1054 iio_direction[chan->output], 1055 iio_chan_type_name_spec[chan->type], 1056 iio_chan_type_name_spec[chan->type], 1057 full_postfix); 1058 break; 1059 case IIO_SEPARATE: 1060 if (!chan->indexed) { 1061 WARN(1, "Differential channels must be indexed\n"); 1062 ret = -EINVAL; 1063 goto error_free_full_postfix; 1064 } 1065 name = kasprintf(GFP_KERNEL, 1066 "%s_%s%d-%s%d_%s", 1067 iio_direction[chan->output], 1068 iio_chan_type_name_spec[chan->type], 1069 chan->channel, 1070 iio_chan_type_name_spec[chan->type], 1071 chan->channel2, 1072 full_postfix); 1073 break; 1074 } 1075 } else { /* Single ended */ 1076 switch (shared_by) { 1077 case IIO_SHARED_BY_ALL: 1078 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1079 break; 1080 case IIO_SHARED_BY_DIR: 1081 name = kasprintf(GFP_KERNEL, "%s_%s", 1082 iio_direction[chan->output], 1083 full_postfix); 1084 break; 1085 case IIO_SHARED_BY_TYPE: 1086 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1087 iio_direction[chan->output], 1088 iio_chan_type_name_spec[chan->type], 1089 full_postfix); 1090 break; 1091 1092 case IIO_SEPARATE: 1093 if (chan->indexed) 1094 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1095 iio_direction[chan->output], 1096 iio_chan_type_name_spec[chan->type], 1097 chan->channel, 1098 full_postfix); 1099 else 1100 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1101 iio_direction[chan->output], 1102 iio_chan_type_name_spec[chan->type], 1103 full_postfix); 1104 break; 1105 } 1106 } 1107 if (name == NULL) { 1108 ret = -ENOMEM; 1109 goto error_free_full_postfix; 1110 } 1111 dev_attr->attr.name = name; 1112 1113 if (readfunc) { 1114 dev_attr->attr.mode |= S_IRUGO; 1115 dev_attr->show = readfunc; 1116 } 1117 1118 if (writefunc) { 1119 dev_attr->attr.mode |= S_IWUSR; 1120 dev_attr->store = writefunc; 1121 } 1122 1123 error_free_full_postfix: 1124 kfree(full_postfix); 1125 1126 return ret; 1127 } 1128 1129 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1130 { 1131 kfree(dev_attr->attr.name); 1132 } 1133 1134 int __iio_add_chan_devattr(const char *postfix, 1135 struct iio_chan_spec const *chan, 1136 ssize_t (*readfunc)(struct device *dev, 1137 struct device_attribute *attr, 1138 char *buf), 1139 ssize_t (*writefunc)(struct device *dev, 1140 struct device_attribute *attr, 1141 const char *buf, 1142 size_t len), 1143 u64 mask, 1144 enum iio_shared_by shared_by, 1145 struct device *dev, 1146 struct iio_buffer *buffer, 1147 struct list_head *attr_list) 1148 { 1149 int ret; 1150 struct iio_dev_attr *iio_attr, *t; 1151 1152 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1153 if (iio_attr == NULL) 1154 return -ENOMEM; 1155 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1156 postfix, chan, 1157 readfunc, writefunc, shared_by); 1158 if (ret) 1159 goto error_iio_dev_attr_free; 1160 iio_attr->c = chan; 1161 iio_attr->address = mask; 1162 iio_attr->buffer = buffer; 1163 list_for_each_entry(t, attr_list, l) 1164 if (strcmp(t->dev_attr.attr.name, 1165 iio_attr->dev_attr.attr.name) == 0) { 1166 if (shared_by == IIO_SEPARATE) 1167 dev_err(dev, "tried to double register : %s\n", 1168 t->dev_attr.attr.name); 1169 ret = -EBUSY; 1170 goto error_device_attr_deinit; 1171 } 1172 list_add(&iio_attr->l, attr_list); 1173 1174 return 0; 1175 1176 error_device_attr_deinit: 1177 __iio_device_attr_deinit(&iio_attr->dev_attr); 1178 error_iio_dev_attr_free: 1179 kfree(iio_attr); 1180 return ret; 1181 } 1182 1183 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1184 struct iio_chan_spec const *chan) 1185 { 1186 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1187 int ret; 1188 1189 if (!indio_dev->info->read_label && !chan->extend_name) 1190 return 0; 1191 1192 ret = __iio_add_chan_devattr("label", 1193 chan, 1194 &iio_read_channel_label, 1195 NULL, 1196 0, 1197 IIO_SEPARATE, 1198 &indio_dev->dev, 1199 NULL, 1200 &iio_dev_opaque->channel_attr_list); 1201 if (ret < 0) 1202 return ret; 1203 1204 return 1; 1205 } 1206 1207 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1208 struct iio_chan_spec const *chan, 1209 enum iio_shared_by shared_by, 1210 const long *infomask) 1211 { 1212 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1213 int i, ret, attrcount = 0; 1214 1215 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1216 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1217 return -EINVAL; 1218 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1219 chan, 1220 &iio_read_channel_info, 1221 &iio_write_channel_info, 1222 i, 1223 shared_by, 1224 &indio_dev->dev, 1225 NULL, 1226 &iio_dev_opaque->channel_attr_list); 1227 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1228 continue; 1229 else if (ret < 0) 1230 return ret; 1231 attrcount++; 1232 } 1233 1234 return attrcount; 1235 } 1236 1237 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1238 struct iio_chan_spec const *chan, 1239 enum iio_shared_by shared_by, 1240 const long *infomask) 1241 { 1242 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1243 int i, ret, attrcount = 0; 1244 char *avail_postfix; 1245 1246 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1247 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1248 return -EINVAL; 1249 avail_postfix = kasprintf(GFP_KERNEL, 1250 "%s_available", 1251 iio_chan_info_postfix[i]); 1252 if (!avail_postfix) 1253 return -ENOMEM; 1254 1255 ret = __iio_add_chan_devattr(avail_postfix, 1256 chan, 1257 &iio_read_channel_info_avail, 1258 NULL, 1259 i, 1260 shared_by, 1261 &indio_dev->dev, 1262 NULL, 1263 &iio_dev_opaque->channel_attr_list); 1264 kfree(avail_postfix); 1265 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1266 continue; 1267 else if (ret < 0) 1268 return ret; 1269 attrcount++; 1270 } 1271 1272 return attrcount; 1273 } 1274 1275 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1276 struct iio_chan_spec const *chan) 1277 { 1278 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1279 int ret, attrcount = 0; 1280 const struct iio_chan_spec_ext_info *ext_info; 1281 1282 if (chan->channel < 0) 1283 return 0; 1284 ret = iio_device_add_info_mask_type(indio_dev, chan, 1285 IIO_SEPARATE, 1286 &chan->info_mask_separate); 1287 if (ret < 0) 1288 return ret; 1289 attrcount += ret; 1290 1291 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1292 IIO_SEPARATE, 1293 &chan-> 1294 info_mask_separate_available); 1295 if (ret < 0) 1296 return ret; 1297 attrcount += ret; 1298 1299 ret = iio_device_add_info_mask_type(indio_dev, chan, 1300 IIO_SHARED_BY_TYPE, 1301 &chan->info_mask_shared_by_type); 1302 if (ret < 0) 1303 return ret; 1304 attrcount += ret; 1305 1306 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1307 IIO_SHARED_BY_TYPE, 1308 &chan-> 1309 info_mask_shared_by_type_available); 1310 if (ret < 0) 1311 return ret; 1312 attrcount += ret; 1313 1314 ret = iio_device_add_info_mask_type(indio_dev, chan, 1315 IIO_SHARED_BY_DIR, 1316 &chan->info_mask_shared_by_dir); 1317 if (ret < 0) 1318 return ret; 1319 attrcount += ret; 1320 1321 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1322 IIO_SHARED_BY_DIR, 1323 &chan->info_mask_shared_by_dir_available); 1324 if (ret < 0) 1325 return ret; 1326 attrcount += ret; 1327 1328 ret = iio_device_add_info_mask_type(indio_dev, chan, 1329 IIO_SHARED_BY_ALL, 1330 &chan->info_mask_shared_by_all); 1331 if (ret < 0) 1332 return ret; 1333 attrcount += ret; 1334 1335 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1336 IIO_SHARED_BY_ALL, 1337 &chan->info_mask_shared_by_all_available); 1338 if (ret < 0) 1339 return ret; 1340 attrcount += ret; 1341 1342 ret = iio_device_add_channel_label(indio_dev, chan); 1343 if (ret < 0) 1344 return ret; 1345 attrcount += ret; 1346 1347 if (chan->ext_info) { 1348 unsigned int i = 0; 1349 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1350 ret = __iio_add_chan_devattr(ext_info->name, 1351 chan, 1352 ext_info->read ? 1353 &iio_read_channel_ext_info : NULL, 1354 ext_info->write ? 1355 &iio_write_channel_ext_info : NULL, 1356 i, 1357 ext_info->shared, 1358 &indio_dev->dev, 1359 NULL, 1360 &iio_dev_opaque->channel_attr_list); 1361 i++; 1362 if (ret == -EBUSY && ext_info->shared) 1363 continue; 1364 1365 if (ret) 1366 return ret; 1367 1368 attrcount++; 1369 } 1370 } 1371 1372 return attrcount; 1373 } 1374 1375 /** 1376 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1377 * @attr_list: List of IIO device attributes 1378 * 1379 * This function frees the memory allocated for each of the IIO device 1380 * attributes in the list. 1381 */ 1382 void iio_free_chan_devattr_list(struct list_head *attr_list) 1383 { 1384 struct iio_dev_attr *p, *n; 1385 1386 list_for_each_entry_safe(p, n, attr_list, l) { 1387 kfree_const(p->dev_attr.attr.name); 1388 list_del(&p->l); 1389 kfree(p); 1390 } 1391 } 1392 1393 static ssize_t iio_show_dev_name(struct device *dev, 1394 struct device_attribute *attr, 1395 char *buf) 1396 { 1397 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1398 return sysfs_emit(buf, "%s\n", indio_dev->name); 1399 } 1400 1401 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); 1402 1403 static ssize_t iio_show_dev_label(struct device *dev, 1404 struct device_attribute *attr, 1405 char *buf) 1406 { 1407 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1408 return sysfs_emit(buf, "%s\n", indio_dev->label); 1409 } 1410 1411 static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL); 1412 1413 static ssize_t iio_show_timestamp_clock(struct device *dev, 1414 struct device_attribute *attr, 1415 char *buf) 1416 { 1417 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1418 const clockid_t clk = iio_device_get_clock(indio_dev); 1419 const char *name; 1420 ssize_t sz; 1421 1422 switch (clk) { 1423 case CLOCK_REALTIME: 1424 name = "realtime\n"; 1425 sz = sizeof("realtime\n"); 1426 break; 1427 case CLOCK_MONOTONIC: 1428 name = "monotonic\n"; 1429 sz = sizeof("monotonic\n"); 1430 break; 1431 case CLOCK_MONOTONIC_RAW: 1432 name = "monotonic_raw\n"; 1433 sz = sizeof("monotonic_raw\n"); 1434 break; 1435 case CLOCK_REALTIME_COARSE: 1436 name = "realtime_coarse\n"; 1437 sz = sizeof("realtime_coarse\n"); 1438 break; 1439 case CLOCK_MONOTONIC_COARSE: 1440 name = "monotonic_coarse\n"; 1441 sz = sizeof("monotonic_coarse\n"); 1442 break; 1443 case CLOCK_BOOTTIME: 1444 name = "boottime\n"; 1445 sz = sizeof("boottime\n"); 1446 break; 1447 case CLOCK_TAI: 1448 name = "tai\n"; 1449 sz = sizeof("tai\n"); 1450 break; 1451 default: 1452 BUG(); 1453 } 1454 1455 memcpy(buf, name, sz); 1456 return sz; 1457 } 1458 1459 static ssize_t iio_store_timestamp_clock(struct device *dev, 1460 struct device_attribute *attr, 1461 const char *buf, size_t len) 1462 { 1463 clockid_t clk; 1464 int ret; 1465 1466 if (sysfs_streq(buf, "realtime")) 1467 clk = CLOCK_REALTIME; 1468 else if (sysfs_streq(buf, "monotonic")) 1469 clk = CLOCK_MONOTONIC; 1470 else if (sysfs_streq(buf, "monotonic_raw")) 1471 clk = CLOCK_MONOTONIC_RAW; 1472 else if (sysfs_streq(buf, "realtime_coarse")) 1473 clk = CLOCK_REALTIME_COARSE; 1474 else if (sysfs_streq(buf, "monotonic_coarse")) 1475 clk = CLOCK_MONOTONIC_COARSE; 1476 else if (sysfs_streq(buf, "boottime")) 1477 clk = CLOCK_BOOTTIME; 1478 else if (sysfs_streq(buf, "tai")) 1479 clk = CLOCK_TAI; 1480 else 1481 return -EINVAL; 1482 1483 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1484 if (ret) 1485 return ret; 1486 1487 return len; 1488 } 1489 1490 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1491 const struct attribute_group *group) 1492 { 1493 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1494 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1495 unsigned int cnt = iio_dev_opaque->groupcounter; 1496 1497 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL); 1498 if (!new) 1499 return -ENOMEM; 1500 1501 new[iio_dev_opaque->groupcounter++] = group; 1502 new[iio_dev_opaque->groupcounter] = NULL; 1503 1504 iio_dev_opaque->groups = new; 1505 1506 return 0; 1507 } 1508 1509 static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, 1510 iio_show_timestamp_clock, iio_store_timestamp_clock); 1511 1512 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1513 { 1514 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1515 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1516 struct iio_dev_attr *p; 1517 struct attribute **attr, *clk = NULL; 1518 1519 /* First count elements in any existing group */ 1520 if (indio_dev->info->attrs) { 1521 attr = indio_dev->info->attrs->attrs; 1522 while (*attr++ != NULL) 1523 attrcount_orig++; 1524 } 1525 attrcount = attrcount_orig; 1526 /* 1527 * New channel registration method - relies on the fact a group does 1528 * not need to be initialized if its name is NULL. 1529 */ 1530 if (indio_dev->channels) 1531 for (i = 0; i < indio_dev->num_channels; i++) { 1532 const struct iio_chan_spec *chan = 1533 &indio_dev->channels[i]; 1534 1535 if (chan->type == IIO_TIMESTAMP) 1536 clk = &dev_attr_current_timestamp_clock.attr; 1537 1538 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1539 if (ret < 0) 1540 goto error_clear_attrs; 1541 attrcount += ret; 1542 } 1543 1544 if (iio_dev_opaque->event_interface) 1545 clk = &dev_attr_current_timestamp_clock.attr; 1546 1547 if (indio_dev->name) 1548 attrcount++; 1549 if (indio_dev->label) 1550 attrcount++; 1551 if (clk) 1552 attrcount++; 1553 1554 iio_dev_opaque->chan_attr_group.attrs = 1555 kcalloc(attrcount + 1, 1556 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1557 GFP_KERNEL); 1558 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1559 ret = -ENOMEM; 1560 goto error_clear_attrs; 1561 } 1562 /* Copy across original attributes */ 1563 if (indio_dev->info->attrs) { 1564 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1565 indio_dev->info->attrs->attrs, 1566 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1567 *attrcount_orig); 1568 iio_dev_opaque->chan_attr_group.is_visible = 1569 indio_dev->info->attrs->is_visible; 1570 } 1571 attrn = attrcount_orig; 1572 /* Add all elements from the list. */ 1573 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1574 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1575 if (indio_dev->name) 1576 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1577 if (indio_dev->label) 1578 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1579 if (clk) 1580 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1581 1582 ret = iio_device_register_sysfs_group(indio_dev, 1583 &iio_dev_opaque->chan_attr_group); 1584 if (ret) 1585 goto error_clear_attrs; 1586 1587 return 0; 1588 1589 error_clear_attrs: 1590 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1591 1592 return ret; 1593 } 1594 1595 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1596 { 1597 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1598 1599 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1600 kfree(iio_dev_opaque->chan_attr_group.attrs); 1601 iio_dev_opaque->chan_attr_group.attrs = NULL; 1602 kfree(iio_dev_opaque->groups); 1603 } 1604 1605 static void iio_dev_release(struct device *device) 1606 { 1607 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1608 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1609 1610 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1611 iio_device_unregister_trigger_consumer(indio_dev); 1612 iio_device_unregister_eventset(indio_dev); 1613 iio_device_unregister_sysfs(indio_dev); 1614 1615 iio_device_detach_buffers(indio_dev); 1616 1617 ida_simple_remove(&iio_ida, iio_dev_opaque->id); 1618 kfree(iio_dev_opaque); 1619 } 1620 1621 struct device_type iio_device_type = { 1622 .name = "iio_device", 1623 .release = iio_dev_release, 1624 }; 1625 1626 /** 1627 * iio_device_alloc() - allocate an iio_dev from a driver 1628 * @parent: Parent device. 1629 * @sizeof_priv: Space to allocate for private structure. 1630 **/ 1631 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1632 { 1633 struct iio_dev_opaque *iio_dev_opaque; 1634 struct iio_dev *indio_dev; 1635 size_t alloc_size; 1636 1637 alloc_size = sizeof(struct iio_dev_opaque); 1638 if (sizeof_priv) { 1639 alloc_size = ALIGN(alloc_size, IIO_ALIGN); 1640 alloc_size += sizeof_priv; 1641 } 1642 1643 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1644 if (!iio_dev_opaque) 1645 return NULL; 1646 1647 indio_dev = &iio_dev_opaque->indio_dev; 1648 indio_dev->priv = (char *)iio_dev_opaque + 1649 ALIGN(sizeof(struct iio_dev_opaque), IIO_ALIGN); 1650 1651 indio_dev->dev.parent = parent; 1652 indio_dev->dev.type = &iio_device_type; 1653 indio_dev->dev.bus = &iio_bus_type; 1654 device_initialize(&indio_dev->dev); 1655 iio_device_set_drvdata(indio_dev, (void *)indio_dev); 1656 mutex_init(&indio_dev->mlock); 1657 mutex_init(&iio_dev_opaque->info_exist_lock); 1658 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1659 1660 iio_dev_opaque->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 1661 if (iio_dev_opaque->id < 0) { 1662 /* cannot use a dev_err as the name isn't available */ 1663 pr_err("failed to get device id\n"); 1664 kfree(iio_dev_opaque); 1665 return NULL; 1666 } 1667 dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id); 1668 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1669 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1670 1671 return indio_dev; 1672 } 1673 EXPORT_SYMBOL(iio_device_alloc); 1674 1675 /** 1676 * iio_device_free() - free an iio_dev from a driver 1677 * @dev: the iio_dev associated with the device 1678 **/ 1679 void iio_device_free(struct iio_dev *dev) 1680 { 1681 if (dev) 1682 put_device(&dev->dev); 1683 } 1684 EXPORT_SYMBOL(iio_device_free); 1685 1686 static void devm_iio_device_release(void *iio_dev) 1687 { 1688 iio_device_free(iio_dev); 1689 } 1690 1691 /** 1692 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1693 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1694 * @sizeof_priv: Space to allocate for private structure. 1695 * 1696 * Managed iio_device_alloc. iio_dev allocated with this function is 1697 * automatically freed on driver detach. 1698 * 1699 * RETURNS: 1700 * Pointer to allocated iio_dev on success, NULL on failure. 1701 */ 1702 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1703 { 1704 struct iio_dev *iio_dev; 1705 int ret; 1706 1707 iio_dev = iio_device_alloc(parent, sizeof_priv); 1708 if (!iio_dev) 1709 return NULL; 1710 1711 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1712 iio_dev); 1713 if (ret) 1714 return NULL; 1715 1716 return iio_dev; 1717 } 1718 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1719 1720 /** 1721 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1722 * @inode: Inode structure for identifying the device in the file system 1723 * @filp: File structure for iio device used to keep and later access 1724 * private data 1725 * 1726 * Return: 0 on success or -EBUSY if the device is already opened 1727 **/ 1728 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1729 { 1730 struct iio_dev_opaque *iio_dev_opaque = 1731 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1732 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1733 struct iio_dev_buffer_pair *ib; 1734 1735 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1736 return -EBUSY; 1737 1738 iio_device_get(indio_dev); 1739 1740 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1741 if (!ib) { 1742 iio_device_put(indio_dev); 1743 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1744 return -ENOMEM; 1745 } 1746 1747 ib->indio_dev = indio_dev; 1748 ib->buffer = indio_dev->buffer; 1749 1750 filp->private_data = ib; 1751 1752 return 0; 1753 } 1754 1755 /** 1756 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1757 * @inode: Inode structure pointer for the char device 1758 * @filp: File structure pointer for the char device 1759 * 1760 * Return: 0 for successful release 1761 */ 1762 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1763 { 1764 struct iio_dev_buffer_pair *ib = filp->private_data; 1765 struct iio_dev_opaque *iio_dev_opaque = 1766 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1767 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1768 kfree(ib); 1769 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1770 iio_device_put(indio_dev); 1771 1772 return 0; 1773 } 1774 1775 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1776 struct iio_ioctl_handler *h) 1777 { 1778 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1779 1780 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1781 } 1782 1783 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1784 { 1785 list_del(&h->entry); 1786 } 1787 1788 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1789 { 1790 struct iio_dev_buffer_pair *ib = filp->private_data; 1791 struct iio_dev *indio_dev = ib->indio_dev; 1792 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1793 struct iio_ioctl_handler *h; 1794 int ret = -ENODEV; 1795 1796 mutex_lock(&iio_dev_opaque->info_exist_lock); 1797 1798 /** 1799 * The NULL check here is required to prevent crashing when a device 1800 * is being removed while userspace would still have open file handles 1801 * to try to access this device. 1802 */ 1803 if (!indio_dev->info) 1804 goto out_unlock; 1805 1806 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1807 ret = h->ioctl(indio_dev, filp, cmd, arg); 1808 if (ret != IIO_IOCTL_UNHANDLED) 1809 break; 1810 } 1811 1812 if (ret == IIO_IOCTL_UNHANDLED) 1813 ret = -ENODEV; 1814 1815 out_unlock: 1816 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1817 1818 return ret; 1819 } 1820 1821 static const struct file_operations iio_buffer_fileops = { 1822 .owner = THIS_MODULE, 1823 .llseek = noop_llseek, 1824 .read = iio_buffer_read_outer_addr, 1825 .poll = iio_buffer_poll_addr, 1826 .unlocked_ioctl = iio_ioctl, 1827 .compat_ioctl = compat_ptr_ioctl, 1828 .open = iio_chrdev_open, 1829 .release = iio_chrdev_release, 1830 }; 1831 1832 static const struct file_operations iio_event_fileops = { 1833 .owner = THIS_MODULE, 1834 .llseek = noop_llseek, 1835 .unlocked_ioctl = iio_ioctl, 1836 .compat_ioctl = compat_ptr_ioctl, 1837 .open = iio_chrdev_open, 1838 .release = iio_chrdev_release, 1839 }; 1840 1841 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1842 { 1843 int i, j; 1844 const struct iio_chan_spec *channels = indio_dev->channels; 1845 1846 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1847 return 0; 1848 1849 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1850 if (channels[i].scan_index < 0) 1851 continue; 1852 for (j = i + 1; j < indio_dev->num_channels; j++) 1853 if (channels[i].scan_index == channels[j].scan_index) { 1854 dev_err(&indio_dev->dev, 1855 "Duplicate scan index %d\n", 1856 channels[i].scan_index); 1857 return -EINVAL; 1858 } 1859 } 1860 1861 return 0; 1862 } 1863 1864 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1865 { 1866 unsigned int i; 1867 1868 if (!indio_dev->info->read_label) 1869 return 0; 1870 1871 for (i = 0; i < indio_dev->num_channels; i++) { 1872 if (indio_dev->channels[i].extend_name) { 1873 dev_err(&indio_dev->dev, 1874 "Cannot use labels and extend_name at the same time\n"); 1875 return -EINVAL; 1876 } 1877 } 1878 1879 return 0; 1880 } 1881 1882 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1883 1884 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1885 { 1886 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1887 const char *label; 1888 int ret; 1889 1890 if (!indio_dev->info) 1891 return -EINVAL; 1892 1893 iio_dev_opaque->driver_module = this_mod; 1894 /* If the calling driver did not initialize of_node, do it here */ 1895 if (!indio_dev->dev.of_node && indio_dev->dev.parent) 1896 indio_dev->dev.of_node = indio_dev->dev.parent->of_node; 1897 1898 label = of_get_property(indio_dev->dev.of_node, "label", NULL); 1899 if (label) 1900 indio_dev->label = label; 1901 1902 ret = iio_check_unique_scan_index(indio_dev); 1903 if (ret < 0) 1904 return ret; 1905 1906 ret = iio_check_extended_name(indio_dev); 1907 if (ret < 0) 1908 return ret; 1909 1910 iio_device_register_debugfs(indio_dev); 1911 1912 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1913 if (ret) { 1914 dev_err(indio_dev->dev.parent, 1915 "Failed to create buffer sysfs interfaces\n"); 1916 goto error_unreg_debugfs; 1917 } 1918 1919 ret = iio_device_register_sysfs(indio_dev); 1920 if (ret) { 1921 dev_err(indio_dev->dev.parent, 1922 "Failed to register sysfs interfaces\n"); 1923 goto error_buffer_free_sysfs; 1924 } 1925 ret = iio_device_register_eventset(indio_dev); 1926 if (ret) { 1927 dev_err(indio_dev->dev.parent, 1928 "Failed to register event set\n"); 1929 goto error_free_sysfs; 1930 } 1931 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1932 iio_device_register_trigger_consumer(indio_dev); 1933 1934 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1935 indio_dev->setup_ops == NULL) 1936 indio_dev->setup_ops = &noop_ring_setup_ops; 1937 1938 if (iio_dev_opaque->attached_buffers_cnt) 1939 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1940 else if (iio_dev_opaque->event_interface) 1941 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1942 1943 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1944 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1945 iio_dev_opaque->chrdev.owner = this_mod; 1946 } 1947 1948 /* assign device groups now; they should be all registered now */ 1949 indio_dev->dev.groups = iio_dev_opaque->groups; 1950 1951 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1952 if (ret < 0) 1953 goto error_unreg_eventset; 1954 1955 return 0; 1956 1957 error_unreg_eventset: 1958 iio_device_unregister_eventset(indio_dev); 1959 error_free_sysfs: 1960 iio_device_unregister_sysfs(indio_dev); 1961 error_buffer_free_sysfs: 1962 iio_buffers_free_sysfs_and_mask(indio_dev); 1963 error_unreg_debugfs: 1964 iio_device_unregister_debugfs(indio_dev); 1965 return ret; 1966 } 1967 EXPORT_SYMBOL(__iio_device_register); 1968 1969 /** 1970 * iio_device_unregister() - unregister a device from the IIO subsystem 1971 * @indio_dev: Device structure representing the device. 1972 **/ 1973 void iio_device_unregister(struct iio_dev *indio_dev) 1974 { 1975 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1976 1977 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 1978 1979 mutex_lock(&iio_dev_opaque->info_exist_lock); 1980 1981 iio_device_unregister_debugfs(indio_dev); 1982 1983 iio_disable_all_buffers(indio_dev); 1984 1985 indio_dev->info = NULL; 1986 1987 iio_device_wakeup_eventset(indio_dev); 1988 iio_buffer_wakeup_poll(indio_dev); 1989 1990 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1991 1992 iio_buffers_free_sysfs_and_mask(indio_dev); 1993 } 1994 EXPORT_SYMBOL(iio_device_unregister); 1995 1996 static void devm_iio_device_unreg(void *indio_dev) 1997 { 1998 iio_device_unregister(indio_dev); 1999 } 2000 2001 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2002 struct module *this_mod) 2003 { 2004 int ret; 2005 2006 ret = __iio_device_register(indio_dev, this_mod); 2007 if (ret) 2008 return ret; 2009 2010 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2011 } 2012 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2013 2014 /** 2015 * iio_device_claim_direct_mode - Keep device in direct mode 2016 * @indio_dev: the iio_dev associated with the device 2017 * 2018 * If the device is in direct mode it is guaranteed to stay 2019 * that way until iio_device_release_direct_mode() is called. 2020 * 2021 * Use with iio_device_release_direct_mode() 2022 * 2023 * Returns: 0 on success, -EBUSY on failure 2024 */ 2025 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2026 { 2027 mutex_lock(&indio_dev->mlock); 2028 2029 if (iio_buffer_enabled(indio_dev)) { 2030 mutex_unlock(&indio_dev->mlock); 2031 return -EBUSY; 2032 } 2033 return 0; 2034 } 2035 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2036 2037 /** 2038 * iio_device_release_direct_mode - releases claim on direct mode 2039 * @indio_dev: the iio_dev associated with the device 2040 * 2041 * Release the claim. Device is no longer guaranteed to stay 2042 * in direct mode. 2043 * 2044 * Use with iio_device_claim_direct_mode() 2045 */ 2046 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2047 { 2048 mutex_unlock(&indio_dev->mlock); 2049 } 2050 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2051 2052 subsys_initcall(iio_init); 2053 module_exit(iio_exit); 2054 2055 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2056 MODULE_DESCRIPTION("Industrial I/O core"); 2057 MODULE_LICENSE("GPL"); 2058